From 6264628c113188af3a69ec16dcc4401884d95868 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 15 Feb 2020 22:42:21 +0100 Subject: [PATCH 0001/1727] Initial commit --- .gitignore | 2 + Cargo.lock | 925 ++++++++++++++++++++++++++++++++++++++++++++ Cargo.toml | 13 + src/main.rs | 26 ++ src/ruma_wrapper.rs | 63 +++ 5 files changed, 1029 insertions(+) create mode 100644 .gitignore create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 src/main.rs create mode 100644 src/ruma_wrapper.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..53eaa21 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/target +**/*.rs.bk diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..22b2a71 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,925 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "aho-corasick" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "autocfg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "base64" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "base64" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "bytes" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "c2-chacha" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cc" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cookie" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "devise" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "devise_codegen 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "devise_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "devise_codegen" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "devise_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "devise_core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dtoa" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "env_logger" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fnv" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "getrandom" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hermit-abi" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "http" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "httparse" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "humantime" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hyper" +version = "0.10.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "idna" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "indexmap" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "itoa" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "js_int" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "language-tags" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.66" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "log" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "matrixserver" +version = "0.1.0" +dependencies = [ + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pretty_env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rocket 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-client-api 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memchr" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "mime" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num_cpus" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pear" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "pear_codegen 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pear_codegen" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "yansi 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "percent-encoding" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ppv-lite86" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "pretty_env_logger" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "quote" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_chacha" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "redox_syscall" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "regex" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ring" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rocket" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pear 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rocket_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rocket_http 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "state 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "yansi 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rocket_codegen" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "devise 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "rocket_http 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "yansi 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rocket_http" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cookie 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.10.16 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "pear 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "state 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ruma-api" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-api-macros 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ruma-api-macros" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ruma-client-api" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "js_int 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-api 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-events 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ruma-events" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "js_int 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-events-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ruma-events-macros" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ruma-identifiers" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ryu" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "safemem" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_json" +version = "1.0.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_urlencoded" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "smallvec" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "smallvec" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "state" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "syn" +version = "0.15.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "termcolor" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "time" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "toml" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "traitobject" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "typeable" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicase" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "untrusted" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "url" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "url" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "version_check" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "version_check" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "yansi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "yansi" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "743ad5a418686aad3b87fd14c43badd828cf26e214a00f92a384291cf22e1811" +"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +"checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" +"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" +"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" +"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" +"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum cookie 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d9fac5e7bdefb6160fb181ee0eaa6f96704b625c70e6d61c465cb35750a4ea12" +"checksum devise 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "74e04ba2d03c5fa0d954c061fc8c9c288badadffc272ebb87679a89846de3ed3" +"checksum devise_codegen 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "066ceb7928ca93a9bedc6d0e612a8a0424048b0ab1f75971b203d01420c055d7" +"checksum devise_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cf41c59b22b5e3ec0ea55c7847e5f358d340f3a8d6d53a5cf4f1564967f96487" +"checksum dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" +"checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +"checksum hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772" +"checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" +"checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +"checksum hyper 0.10.16 (registry+https://github.com/rust-lang/crates.io-index)" = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" +"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +"checksum indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" +"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +"checksum js_int 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7a28645cf69403534c8b3c961783cdc227c4c4fa5d31468464de1f43be0efcfc" +"checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" +"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" +"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" +"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +"checksum memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53445de381a1f436797497c61d851644d0e8e88e6140f22872ad33a704933978" +"checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" +"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +"checksum pear 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c26d2b92e47063ffce70d3e3b1bd097af121a9e0db07ca38a6cc1cf0cc85ff25" +"checksum pear_codegen 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "336db4a192cc7f54efeb0c4e11a9245394824cc3bcbd37ba3ff51240c35d7a6e" +"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" +"checksum pretty_env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" +"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +"checksum proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548" +"checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +"checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" +"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +"checksum regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8" +"checksum regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06" +"checksum ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)" = "2c4db68a2e35f3497146b7e4563df7d4773a2433230c5e4b448328e31740458a" +"checksum rocket 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "42c1e9deb3ef4fa430d307bfccd4231434b707ca1328fae339c43ad1201cc6f7" +"checksum rocket_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "79aa1366f9b2eccddc05971e17c5de7bb75a5431eb12c2b5c66545fd348647f4" +"checksum rocket_http 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b1391457ee4e80b40d4b57fa5765c0f2836b20d73bcbee4e3f35d93cf3b80817" +"checksum ruma-api 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b56cf718a9b575a9ce4fae92399c5c00b9059b2d3fbc6bfb4ff4f00431e50290" +"checksum ruma-api-macros 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e2c60086b570af5d8b88c9e2f10b3c4c950436658a104c3e60117eca2b1c466" +"checksum ruma-client-api 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aa35e24b165401c1066416d804d830313f27d934dcd71f4388396a42f98ba020" +"checksum ruma-events 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4a196cbeaa7bffe3756448ee9cf142645790e2a0ece78dfaf10a311eaf5e2a5e" +"checksum ruma-events-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "962d93056619ed61826a9d8872c863560e4892ff6a69b70f593baa5ae8b19dc8" +"checksum ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07e442c700a3b33fc4dd4a1c4b463ebdd252d2c2db31b83da6bb3009307039b9" +"checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" +"checksum safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" +"checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" +"checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" +"checksum serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" +"checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +"checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" +"checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" +"checksum state 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" +"checksum syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)" = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +"checksum syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5" +"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" +"checksum toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" +"checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" +"checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" +"checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" +"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +"checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +"checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f" +"checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" +"checksum url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" +"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" +"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +"checksum yansi 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d60c3b48c9cdec42fb06b3b84b5b087405e1fa1c644a1af3930e4dfafe93de48" +"checksum yansi 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..4174c38 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "matrixserver" +version = "0.1.0" +authors = ["timokoesters "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +rocket = "0.4.2" +http = "0.2.0" +ruma-client-api = "0.6.0" +pretty_env_logger = "0.4.0" diff --git a/src/main.rs b/src/main.rs new file mode 100644 index 0000000..a38b9e2 --- /dev/null +++ b/src/main.rs @@ -0,0 +1,26 @@ +#![feature(proc_macro_hygiene, decl_macro)] +mod ruma_wrapper; + +use { + rocket::{get, post, routes}, + ruma_client_api::r0::account::register, + ruma_wrapper::Ruma, + std::convert::TryInto, +}; + +#[post("/_matrix/client/r0/register", data = "")] +fn register_route(body: Ruma) -> Ruma { + Ruma(register::Response { + access_token: "42".to_owned(), + home_server: "deprecated".to_owned(), + user_id: "@yourrequestedid:homeserver.com".try_into().unwrap(), + device_id: body.device_id.clone().unwrap_or_default(), + }) +} + +fn main() { + pretty_env_logger::init(); + rocket::ignite() + .mount("/", routes![register_route]) + .launch(); +} diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs new file mode 100644 index 0000000..e09a125 --- /dev/null +++ b/src/ruma_wrapper.rs @@ -0,0 +1,63 @@ +use { + rocket::data::{FromDataSimple, Outcome}, + rocket::http::Status, + rocket::response::Responder, + rocket::Request, + rocket::{Data, Outcome::*}, + std::ops::Deref, + std::{ + convert::{TryFrom, TryInto}, + io::{Cursor, Read}, + }, +}; + +const MESSAGE_LIMIT: u64 = 65535; + +pub struct Ruma(pub T); +impl>>> FromDataSimple for Ruma { + type Error = (); + + fn from_data(request: &Request, data: Data) -> Outcome { + let mut handle = data.open().take(MESSAGE_LIMIT); + let mut body = Vec::new(); + handle.read_to_end(&mut body).unwrap(); + dbg!(&body); + let mut http_request = http::Request::builder().uri(request.uri().to_string()); + for header in request.headers().iter() { + http_request = http_request.header(header.name.as_str(), &*header.value); + } + + let http_request = http_request.body(body).unwrap(); + + match T::try_from(http_request) { + Ok(r) => Success(Ruma(r)), + Err(_) => Failure((Status::InternalServerError, ())), + } + } +} + +impl Deref for Ruma { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'r, T: TryInto>>> Responder<'r> for Ruma { + fn respond_to(self, _: &Request) -> rocket::response::Result<'r> { + match self.0.try_into() { + Ok(http_response) => { + let mut response = rocket::response::Response::build(); + response.sized_body(Cursor::new(http_response.body().clone())); + + for header in http_response.headers() { + response + .raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); + } + response.ok() + } + Err(_) => Err(Status::InternalServerError), + } + } +} From cd777af41c32cd0aba56b93d0e8fd6c42bc9a3ac Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 18 Feb 2020 22:07:57 +0100 Subject: [PATCH 0002/1727] feat: simple endpoint handlers --- Cargo.lock | 58 ++++++++++++++++++++---- Cargo.toml | 3 +- src/main.rs | 108 +++++++++++++++++++++++++++++++++++++++----- src/ruma_wrapper.rs | 42 +++++++++++++---- 4 files changed, 181 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 22b2a71..f85838e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -145,6 +145,14 @@ dependencies = [ "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "heck" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "hermit-abi" version = "0.1.6" @@ -276,9 +284,10 @@ name = "matrixserver" version = "0.1.0" dependencies = [ "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rocket 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-client-api 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-client-api 0.6.0 (git+https://github.com/ruma/ruma-client-api)", ] [[package]] @@ -509,22 +518,23 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-api-macros 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-api-macros 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "strum 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ruma-api-macros" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -535,14 +545,16 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" +source = "git+https://github.com/ruma/ruma-client-api#8e9a6ffededb89bc87c6ac5d067d8d4249eabf04" dependencies = [ + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "js_int 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-api 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-api 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", "ruma-events 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", + "strum 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -645,6 +657,25 @@ name = "state" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "strum" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "strum_macros 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "strum_macros" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "syn" version = "0.15.44" @@ -733,6 +764,11 @@ dependencies = [ "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "unicode-segmentation" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "unicode-xid" version = "0.1.0" @@ -841,6 +877,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" "checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" "checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" "checksum hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772" "checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" "checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" @@ -883,9 +920,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum rocket 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "42c1e9deb3ef4fa430d307bfccd4231434b707ca1328fae339c43ad1201cc6f7" "checksum rocket_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "79aa1366f9b2eccddc05971e17c5de7bb75a5431eb12c2b5c66545fd348647f4" "checksum rocket_http 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b1391457ee4e80b40d4b57fa5765c0f2836b20d73bcbee4e3f35d93cf3b80817" -"checksum ruma-api 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b56cf718a9b575a9ce4fae92399c5c00b9059b2d3fbc6bfb4ff4f00431e50290" -"checksum ruma-api-macros 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e2c60086b570af5d8b88c9e2f10b3c4c950436658a104c3e60117eca2b1c466" -"checksum ruma-client-api 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aa35e24b165401c1066416d804d830313f27d934dcd71f4388396a42f98ba020" +"checksum ruma-api 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3652d110c06f4ca71815d043c7aee3d9d90de5b4b687c037a27563d266fccd5b" +"checksum ruma-api-macros 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "876dac1a0fdc5495849219542b4b4ea024153f32dc486e65a613f37c69018627" +"checksum ruma-client-api 0.6.0 (git+https://github.com/ruma/ruma-client-api)" = "" "checksum ruma-events 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4a196cbeaa7bffe3756448ee9cf142645790e2a0ece78dfaf10a311eaf5e2a5e" "checksum ruma-events-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "962d93056619ed61826a9d8872c863560e4892ff6a69b70f593baa5ae8b19dc8" "checksum ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07e442c700a3b33fc4dd4a1c4b463ebdd252d2c2db31b83da6bb3009307039b9" @@ -898,6 +935,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" "checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" "checksum state 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" +"checksum strum 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "530efb820d53b712f4e347916c5e7ed20deb76a4f0457943b3182fb889b06d2c" +"checksum strum_macros 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e6e163a520367c465f59e0a61a23cfae3b10b6546d78b6f672a382be79f7110" "checksum syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)" = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" "checksum syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5" "checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" @@ -909,6 +948,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" "checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" "checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" "checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f" diff --git a/Cargo.toml b/Cargo.toml index 4174c38..6b4918b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,5 +9,6 @@ edition = "2018" [dependencies] rocket = "0.4.2" http = "0.2.0" -ruma-client-api = "0.6.0" +ruma-client-api = { git = "https://github.com/ruma/ruma-client-api" } pretty_env_logger = "0.4.0" +log = "0.4.8" diff --git a/src/main.rs b/src/main.rs index a38b9e2..5fa7ff1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,25 +2,111 @@ mod ruma_wrapper; use { - rocket::{get, post, routes}, - ruma_client_api::r0::account::register, - ruma_wrapper::Ruma, + rocket::{get, post, put, routes}, + ruma_client_api::{ + error::{Error, ErrorKind}, + r0::{ + account::register, alias::get_alias, membership::join_room_by_id, + message::create_message_event, + }, + unversioned::get_supported_versions, + }, + ruma_wrapper::{MatrixResult, Ruma}, std::convert::TryInto, }; +#[get("/_matrix/client/versions")] +fn get_supported_versions_route() -> MatrixResult { + MatrixResult(Ok(get_supported_versions::Response { + versions: vec!["r0.6.0".to_owned()], + })) +} + #[post("/_matrix/client/r0/register", data = "")] -fn register_route(body: Ruma) -> Ruma { - Ruma(register::Response { - access_token: "42".to_owned(), - home_server: "deprecated".to_owned(), - user_id: "@yourrequestedid:homeserver.com".try_into().unwrap(), - device_id: body.device_id.clone().unwrap_or_default(), - }) +fn register_route(body: Ruma) -> MatrixResult { + let user_id = match (*format!( + "@{}:localhost", + body.username.clone().unwrap_or("randomname".to_owned()) + )) + .try_into() + { + Err(_) => { + return MatrixResult(Err(Error { + kind: ErrorKind::InvalidUsername, + message: "Username was invalid. ".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } + Ok(user_id) => user_id, + }; + + MatrixResult(Ok(register::Response { + access_token: "randomtoken".to_owned(), + home_server: "localhost".to_owned(), + user_id, + device_id: body.device_id.clone().unwrap_or("randomid".to_owned()), + })) +} + +#[get("/_matrix/client/r0/directory/room/")] +fn get_alias_route(room_alias: String) -> MatrixResult { + let room_id = match &*room_alias { + "#room:localhost" => "!xclkjvdlfj:localhost", + _ => { + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } + } + .try_into() + .unwrap(); + + MatrixResult(Ok(get_alias::Response { + room_id, + servers: vec!["localhost".to_owned()], + })) +} + +#[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] +fn join_room_by_id_route( + _room_id: String, + body: Ruma, +) -> MatrixResult { + MatrixResult(Ok(join_room_by_id::Response { + room_id: body.room_id.clone(), + })) +} + +#[put( + "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", + data = "" +)] +fn create_message_event_route( + _room_id: String, + _event_type: String, + _txn_id: String, + body: Ruma, +) -> MatrixResult { + dbg!(body.0); + MatrixResult(Ok(create_message_event::Response { + event_id: "$randomeventid".try_into().unwrap(), + })) } fn main() { pretty_env_logger::init(); rocket::ignite() - .mount("/", routes![register_route]) + .mount( + "/", + routes![ + get_supported_versions_route, + register_route, + get_alias_route, + join_room_by_id_route, + create_message_event_route, + ], + ) .launch(); } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index e09a125..dda584f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -4,6 +4,8 @@ use { rocket::response::Responder, rocket::Request, rocket::{Data, Outcome::*}, + ruma_client_api::error::Error, + std::fmt::Debug, std::ops::Deref, std::{ convert::{TryFrom, TryInto}, @@ -14,24 +16,33 @@ use { const MESSAGE_LIMIT: u64 = 65535; pub struct Ruma(pub T); -impl>>> FromDataSimple for Ruma { +impl>>> FromDataSimple for Ruma +where + T::Error: Debug, +{ type Error = (); fn from_data(request: &Request, data: Data) -> Outcome { - let mut handle = data.open().take(MESSAGE_LIMIT); - let mut body = Vec::new(); - handle.read_to_end(&mut body).unwrap(); - dbg!(&body); - let mut http_request = http::Request::builder().uri(request.uri().to_string()); + let mut http_request = http::Request::builder() + .uri(request.uri().to_string()) + .method(&*request.method().to_string()); for header in request.headers().iter() { http_request = http_request.header(header.name.as_str(), &*header.value); } + let mut handle = data.open().take(MESSAGE_LIMIT); + let mut body = Vec::new(); + handle.read_to_end(&mut body).unwrap(); + let http_request = http_request.body(body).unwrap(); + log::info!("{:?}", http_request); match T::try_from(http_request) { Ok(r) => Success(Ruma(r)), - Err(_) => Failure((Status::InternalServerError, ())), + Err(e) => { + log::error!("{:?}", e); + Failure((Status::InternalServerError, ())) + } } } } @@ -44,9 +55,22 @@ impl Deref for Ruma { } } -impl<'r, T: TryInto>>> Responder<'r> for Ruma { +pub struct MatrixResult(pub std::result::Result); +impl>>> TryInto>> for MatrixResult { + type Error = T::Error; + + fn try_into(self) -> Result>, T::Error> { + match self.0 { + Ok(t) => t.try_into(), + Err(e) => Ok(e.into()), + } + } +} + +impl<'r, T: TryInto>>> Responder<'r> for MatrixResult { fn respond_to(self, _: &Request) -> rocket::response::Result<'r> { - match self.0.try_into() { + let http_response: Result, _> = self.try_into(); + match http_response { Ok(http_response) => { let mut response = rocket::response::Response::build(); response.sized_body(Cursor::new(http_response.body().clone())); From c2c18b46517c351bbd68ec2619231b46f384b5b2 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 20 Feb 2020 10:12:13 +0100 Subject: [PATCH 0003/1727] feat: database --- Cargo.lock | 239 ++++++++++++++++++++++++++++++++++++++++++++++++++++ Cargo.toml | 3 + src/main.rs | 74 ++++++++++++++-- 3 files changed, 311 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f85838e..f5e24c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,6 +8,16 @@ dependencies = [ "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "arrayvec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "atty" version = "0.2.14" @@ -18,6 +28,11 @@ dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "autocfg" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "autocfg" version = "1.0.0" @@ -40,11 +55,26 @@ dependencies = [ "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "blake2b_simd" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "byteorder" version = "1.3.4" @@ -73,6 +103,19 @@ name = "cfg-if" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "cookie" version = "0.11.2" @@ -84,6 +127,37 @@ dependencies = [ "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "crc32fast" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-utils" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "devise" version = "0.2.0" @@ -113,6 +187,26 @@ dependencies = [ "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "directories" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dirs-sys" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "dtoa" version = "0.4.5" @@ -135,6 +229,23 @@ name = "fnv" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "getrandom" version = "0.1.14" @@ -258,6 +369,14 @@ name = "libc" version = "0.2.66" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "lock_api" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "log" version = "0.3.9" @@ -283,11 +402,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "matrixserver" version = "0.1.0" dependencies = [ + "directories 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rocket 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "ruma-client-api 0.6.0 (git+https://github.com/ruma/ruma-client-api)", + "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", + "sled 0.31.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -300,6 +422,14 @@ name = "memchr" version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "memoffset" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "mime" version = "0.2.6" @@ -317,6 +447,28 @@ dependencies = [ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parking_lot" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parking_lot_core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "pear" version = "0.1.2" @@ -440,6 +592,16 @@ name = "redox_syscall" version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "redox_users" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "regex" version = "1.3.4" @@ -590,6 +752,25 @@ dependencies = [ "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rust-argon2" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2b_simd 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", + "constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ryu" version = "1.0.2" @@ -600,6 +781,24 @@ name = "safemem" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "serde" version = "1.0.104" @@ -639,6 +838,21 @@ dependencies = [ "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "sled" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "smallvec" version = "0.6.13" @@ -859,23 +1073,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [metadata] "checksum aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "743ad5a418686aad3b87fd14c43badd828cf26e214a00f92a384291cf22e1811" +"checksum arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +"checksum arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" "checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" "checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" "checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" "checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +"checksum blake2b_simd 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" "checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" "checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" "checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" "checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +"checksum constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" "checksum cookie 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d9fac5e7bdefb6160fb181ee0eaa6f96704b625c70e6d61c465cb35750a4ea12" +"checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" +"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" "checksum devise 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "74e04ba2d03c5fa0d954c061fc8c9c288badadffc272ebb87679a89846de3ed3" "checksum devise_codegen 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "066ceb7928ca93a9bedc6d0e612a8a0424048b0ab1f75971b203d01420c055d7" "checksum devise_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cf41c59b22b5e3ec0ea55c7847e5f358d340f3a8d6d53a5cf4f1564967f96487" +"checksum directories 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" +"checksum dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" "checksum dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" "checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" "checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +"checksum fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +"checksum fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" "checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" "checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" "checksum hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772" @@ -891,13 +1119,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" "checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" +"checksum lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" "checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" "checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" "checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" "checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" "checksum memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53445de381a1f436797497c61d851644d0e8e88e6140f22872ad33a704933978" +"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" "checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" "checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +"checksum parking_lot 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc" +"checksum parking_lot_core 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7582838484df45743c8434fbff785e8edf260c28748353d44bc0da32e0ceabf1" "checksum pear 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c26d2b92e47063ffce70d3e3b1bd097af121a9e0db07ca38a6cc1cf0cc85ff25" "checksum pear_codegen 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "336db4a192cc7f54efeb0c4e11a9245394824cc3bcbd37ba3ff51240c35d7a6e" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" @@ -914,6 +1146,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" "checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +"checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" "checksum regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8" "checksum regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06" "checksum ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)" = "2c4db68a2e35f3497146b7e4563df7d4773a2433230c5e4b448328e31740458a" @@ -926,12 +1159,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum ruma-events 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4a196cbeaa7bffe3756448ee9cf142645790e2a0ece78dfaf10a311eaf5e2a5e" "checksum ruma-events-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "962d93056619ed61826a9d8872c863560e4892ff6a69b70f593baa5ae8b19dc8" "checksum ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07e442c700a3b33fc4dd4a1c4b463ebdd252d2c2db31b83da6bb3009307039b9" +"checksum rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" +"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" "checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" "checksum safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" +"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" "checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" "checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" "checksum serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" "checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +"checksum sled 0.31.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb6824dde66ad33bf20c6e8476f5b82b871bc8bc3c129a10ea2f7dae5060fa3" "checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" "checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" "checksum state 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" diff --git a/Cargo.toml b/Cargo.toml index 6b4918b..7c9a716 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,3 +12,6 @@ http = "0.2.0" ruma-client-api = { git = "https://github.com/ruma/ruma-client-api" } pretty_env_logger = "0.4.0" log = "0.4.8" +sled = "0.31.0" +directories = "2.0.2" +ruma-identifiers = "0.14.1" diff --git a/src/main.rs b/src/main.rs index 5fa7ff1..bb63646 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,16 +2,19 @@ mod ruma_wrapper; use { - rocket::{get, post, put, routes}, + directories::ProjectDirs, + rocket::{get, post, put, routes, State}, ruma_client_api::{ error::{Error, ErrorKind}, r0::{ account::register, alias::get_alias, membership::join_room_by_id, - message::create_message_event, + message::create_message_event, session::login, }, unversioned::get_supported_versions, }, + ruma_identifiers::UserId, ruma_wrapper::{MatrixResult, Ruma}, + sled::Db, std::convert::TryInto, }; @@ -23,8 +26,13 @@ fn get_supported_versions_route() -> MatrixResult) -> MatrixResult { - let user_id = match (*format!( +fn register_route( + db: State, + body: Ruma, +) -> MatrixResult { + let users = db.open_tree("users").unwrap(); + + let user_id: UserId = match (*format!( "@{}:localhost", body.username.clone().unwrap_or("randomname".to_owned()) )) @@ -33,13 +41,28 @@ fn register_route(body: Ruma) -> MatrixResult { return MatrixResult(Err(Error { kind: ErrorKind::InvalidUsername, - message: "Username was invalid. ".to_owned(), + message: "Username was invalid.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })) } Ok(user_id) => user_id, }; + if users.contains_key(user_id.to_string()).unwrap() { + return MatrixResult(Err(Error { + kind: ErrorKind::UserInUse, + message: "Desired user ID is already taken.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + + users + .insert( + user_id.to_string(), + &*body.password.clone().unwrap_or_default(), + ) + .unwrap(); + MatrixResult(Ok(register::Response { access_token: "randomtoken".to_owned(), home_server: "localhost".to_owned(), @@ -48,6 +71,38 @@ fn register_route(body: Ruma) -> MatrixResult, body: Ruma) -> MatrixResult { + let user_id = if let login::UserInfo::MatrixId(username) = &body.user { + let user_id = format!("@{}:localhost", username); + let users = db.open_tree("users").unwrap(); + if !users.contains_key(user_id.clone()).unwrap() { + dbg!(); + return MatrixResult(Err(Error { + kind: ErrorKind::Forbidden, + message: "UserId not found.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + user_id + } else { + dbg!(); + return MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Bad login type.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + }; + + return MatrixResult(Ok(login::Response { + user_id: (*user_id).try_into().unwrap(), // User id is correct because the user is already registered + access_token: "randomtoken".to_owned(), + home_server: Some("localhost".to_owned()), + device_id: body.device_id.clone().unwrap_or("randomid".to_owned()), + well_known: None, + })); +} + #[get("/_matrix/client/r0/directory/room/")] fn get_alias_route(room_alias: String) -> MatrixResult { let room_id = match &*room_alias { @@ -97,16 +152,25 @@ fn create_message_event_route( fn main() { pretty_env_logger::init(); + let db = sled::open( + ProjectDirs::from("xyz", "koesters", "matrixserver") + .unwrap() + .data_dir(), + ) + .unwrap(); + rocket::ignite() .mount( "/", routes![ get_supported_versions_route, register_route, + login_route, get_alias_route, join_room_by_id_route, create_message_event_route, ], ) + .manage(db) .launch(); } From 6fffcecf8e801b9e741e667ae699a1cbf4439008 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 27 Mar 2020 21:00:10 +0100 Subject: [PATCH 0004/1727] Updates --- Cargo.lock | 1075 ++++++++++++++++++++++++++++++---------------------- Cargo.toml | 4 +- 2 files changed, 633 insertions(+), 446 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f5e24c2..4e7af27 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,1203 +2,1390 @@ # It is not intended for manual editing. [[package]] name = "aho-corasick" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" dependencies = [ - "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr", ] [[package]] name = "arrayref" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi", + "libc", + "winapi", ] -[[package]] -name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "autocfg" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" + +[[package]] +name = "base-x" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" [[package]] name = "base64" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" dependencies = [ - "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder", + "safemem", ] [[package]] name = "base64" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" + +[[package]] +name = "binascii" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "blake2b_simd" version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" dependencies = [ - "arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayref", + "arrayvec", + "constant_time_eq", ] +[[package]] +name = "bumpalo" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" + [[package]] name = "byteorder" version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "bytes" version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "c2-chacha" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" [[package]] name = "cc" version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" [[package]] name = "cfg-if" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "cloudabi" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", ] [[package]] name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "cookie" -version = "0.11.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c60ef6d0bbf56ad2674249b6bb74f2c6aeb98b98dd57b5d3e37cace33011d69" dependencies = [ - "base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.11.0", + "percent-encoding 2.1.0", + "ring", + "time 0.2.9", ] [[package]] name = "crc32fast" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", ] [[package]] name = "crossbeam-epoch" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "cfg-if", + "crossbeam-utils", + "lazy_static", + "maybe-uninit", + "memoffset", + "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "cfg-if", + "lazy_static", ] [[package]] name = "devise" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.3.0" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ - "devise_codegen 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "devise_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "devise_codegen", + "devise_core", ] [[package]] name = "devise_codegen" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.3.0" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ - "devise_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "devise_core", + "quote 1.0.3", ] [[package]] name = "devise_core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.3.0" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", + "proc-macro2 1.0.9", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "directories" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "dirs-sys", ] [[package]] name = "dirs-sys" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "redox_users", + "winapi", ] +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + [[package]] name = "dtoa" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" [[package]] name = "env_logger" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ - "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "atty", + "humantime", + "log 0.4.8", + "regex", + "termcolor", ] [[package]] name = "fnv" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" [[package]] name = "fs2" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "winapi", ] [[package]] name = "fxhash" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" dependencies = [ - "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder", ] [[package]] name = "getrandom" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "wasi", ] +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + [[package]] name = "heck" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" dependencies = [ - "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", ] [[package]] name = "http" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "fnv", + "itoa", ] [[package]] name = "httparse" version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" [[package]] name = "humantime" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" dependencies = [ - "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error", ] [[package]] name = "hyper" version = "0.10.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" dependencies = [ - "base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.9.3", + "httparse", + "language-tags", + "log 0.3.9", + "mime", + "num_cpus", + "time 0.1.42", + "traitobject", + "typeable", + "unicase", + "url 1.7.2", ] [[package]] name = "idna" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", + "unicode-bidi", + "unicode-normalization", ] [[package]] name = "idna" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", + "unicode-bidi", + "unicode-normalization", ] [[package]] name = "indexmap" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" dependencies = [ - "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", ] [[package]] name = "itoa" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" + +[[package]] +name = "js-sys" +version = "0.3.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" +dependencies = [ + "wasm-bindgen", +] [[package]] name = "js_int" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4937d8b6672d78c0dd9689d671e3faf2c9744fa36cbcb437e22cc8b1bd59ac25" dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde", ] [[package]] name = "language-tags" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.66" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" [[package]] name = "lock_api" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" dependencies = [ - "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard", ] [[package]] name = "log" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" dependencies = [ - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8", ] [[package]] name = "log" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", ] [[package]] name = "matches" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "matrixserver" version = "0.1.0" dependencies = [ - "directories 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pretty_env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rocket 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-client-api 0.6.0 (git+https://github.com/ruma/ruma-client-api)", - "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", - "sled 0.31.0 (registry+https://github.com/rust-lang/crates.io-index)", + "directories", + "http", + "log 0.4.8", + "pretty_env_logger", + "rocket", + "ruma-client-api", + "ruma-identifiers", + "sled", ] [[package]] name = "maybe-uninit" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" [[package]] name = "memoffset" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" dependencies = [ - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", ] [[package]] name = "mime" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" dependencies = [ - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.9", ] [[package]] name = "num_cpus" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" dependencies = [ - "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi", + "libc", ] [[package]] name = "parking_lot" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc" dependencies = [ - "lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot_core 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lock_api", + "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7582838484df45743c8434fbff785e8edf260c28748353d44bc0da32e0ceabf1" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "cloudabi", + "libc", + "redox_syscall", + "smallvec", + "winapi", ] [[package]] name = "pear" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c26d2b92e47063ffce70d3e3b1bd097af121a9e0db07ca38a6cc1cf0cc85ff25" dependencies = [ - "pear_codegen 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "pear_codegen", ] [[package]] name = "pear_codegen" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "336db4a192cc7f54efeb0c4e11a9245394824cc3bcbd37ba3ff51240c35d7a6e" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "yansi 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30", + "quote 0.6.13", + "syn 0.15.44", + "version_check 0.1.5", + "yansi 0.4.0", ] [[package]] name = "percent-encoding" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" [[package]] name = "percent-encoding" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "ppv-lite86" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" [[package]] name = "pretty_env_logger" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" dependencies = [ - "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger", + "log 0.4.8", ] +[[package]] +name = "proc-macro-hack" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcfdefadc3d57ca21cf17990a28ef4c0f7c61383a28cb7604cf4a18e6ede1420" + [[package]] name = "proc-macro2" version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.1.0", ] [[package]] name = "proc-macro2" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0", ] [[package]] name = "quick-error" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30", ] [[package]] name = "quote" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9", ] [[package]] name = "rand" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", ] [[package]] name = "rand_chacha" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ - "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ppv-lite86", + "rand_core", ] [[package]] name = "rand_core" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", ] [[package]] name = "rand_hc" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core", ] [[package]] name = "redox_syscall" version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" [[package]] name = "redox_users" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", + "redox_syscall", + "rust-argon2", ] [[package]] name = "regex" -version = "1.3.4" +version = "1.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3" dependencies = [ - "aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", ] [[package]] name = "regex-syntax" -version = "0.6.14" +version = "0.6.17" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" [[package]] name = "ring" -version = "0.13.5" +version = "0.16.12" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c" dependencies = [ - "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", + "lazy_static", + "libc", + "spin", + "untrusted", + "web-sys", + "winapi", ] [[package]] name = "rocket" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.5.0-dev" +source = "git+https://github.com/SergioBenitez/Rocket.git#06e146e7d18d7c4aab423d289090261f548ea69d" dependencies = [ - "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "pear 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rocket_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rocket_http 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "state 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "yansi 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "atty", + "binascii", + "log 0.4.8", + "memchr", + "num_cpus", + "pear", + "rocket_codegen", + "rocket_http", + "state", + "time 0.2.9", + "toml", + "version_check 0.9.1", + "yansi 0.5.0", ] [[package]] name = "rocket_codegen" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.5.0-dev" +source = "git+https://github.com/SergioBenitez/Rocket.git#06e146e7d18d7c4aab423d289090261f548ea69d" dependencies = [ - "devise 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "rocket_http 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "yansi 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "devise", + "glob", + "indexmap", + "quote 1.0.3", + "rocket_http", + "version_check 0.9.1", + "yansi 0.5.0", ] [[package]] name = "rocket_http" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.5.0-dev" +source = "git+https://github.com/SergioBenitez/Rocket.git#06e146e7d18d7c4aab423d289090261f548ea69d" dependencies = [ - "cookie 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.16 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "pear 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "state 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cookie", + "hyper", + "indexmap", + "pear", + "percent-encoding 1.0.1", + "smallvec", + "state", + "time 0.2.9", + "unicode-xid 0.2.0", ] [[package]] name = "ruma-api" -version = "0.14.0" +version = "0.15.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44987d5fefcf801a6fb5c5843c17f876a53852fa07e5e4d99e0dca3670f1441a" dependencies = [ - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-api-macros 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "strum 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http", + "percent-encoding 2.1.0", + "ruma-api-macros", + "ruma-identifiers", + "serde", + "serde_json", + "serde_urlencoded", + "strum", + "url 2.1.1", ] [[package]] name = "ruma-api-macros" -version = "0.11.0" +version = "0.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36931db94874129f9202f650d91447d8317b099bae1e12cdd5769ba4eced07d2" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "ruma-client-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma-client-api#8e9a6ffededb89bc87c6ac5d067d8d4249eabf04" +source = "git+https://github.com/ruma/ruma-client-api#57f5e8d66168a54128426c8e34b26fa78f739c3e" dependencies = [ - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "js_int 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-api 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-events 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", - "strum 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http", + "js_int", + "ruma-api", + "ruma-events", + "ruma-identifiers", + "serde", + "serde_json", + "strum", + "url 2.1.1", ] [[package]] name = "ruma-events" -version = "0.15.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11951235b25c72a82eb988aabf5af23cae883562665e0cb73954ffe4ae81f11c" dependencies = [ - "js_int 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-events-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", + "js_int", + "ruma-events-macros", + "ruma-identifiers", + "serde", + "serde_json", ] [[package]] name = "ruma-events-macros" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "962d93056619ed61826a9d8872c863560e4892ff6a69b70f593baa5ae8b19dc8" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "ruma-identifiers" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e442c700a3b33fc4dd4a1c4b463ebdd252d2c2db31b83da6bb3009307039b9" dependencies = [ - "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand", + "serde", + "url 2.1.1", ] [[package]] name = "rust-argon2" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "blake2b_simd 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", - "constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.11.0", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", ] [[package]] name = "rustc_version" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" dependencies = [ - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "semver", +] + +[[package]] +name = "rustversion" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" +dependencies = [ + "proc-macro2 1.0.9", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "ryu" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" [[package]] name = "safemem" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "semver" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "semver-parser", ] [[package]] name = "semver-parser" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff" dependencies = [ - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "serde_json" version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" dependencies = [ - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa", + "ryu", + "serde", ] [[package]] name = "serde_urlencoded" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" dependencies = [ - "dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "dtoa", + "itoa", + "serde", + "url 2.1.1", ] +[[package]] +name = "sha1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" + [[package]] name = "sled" version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fb6824dde66ad33bf20c6e8476f5b82b871bc8bc3c129a10ea2f7dae5060fa3" dependencies = [ - "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "smallvec" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crc32fast", + "crossbeam-epoch", + "crossbeam-utils", + "fs2", + "fxhash", + "libc", + "log 0.4.8", + "parking_lot", ] [[package]] name = "smallvec" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "standback" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4edf667ea8f60afc06d6aeec079d20d5800351109addec1faea678a8663da4e1" [[package]] name = "state" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" + +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2 1.0.9", + "quote 1.0.3", + "serde", + "serde_derive", + "syn 1.0.17", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2 1.0.9", + "quote 1.0.3", + "serde", + "serde_derive", + "serde_json", + "sha1", + "syn 1.0.17", +] + +[[package]] +name = "stdweb-internal-runtime" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "strum" -version = "0.17.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57bd81eb48f4c437cadc685403cad539345bf703d78e63707418431cecd4522b" dependencies = [ - "strum_macros 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", + "strum_macros", ] [[package]] name = "strum_macros" -version = "0.17.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ - "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "heck", + "proc-macro2 1.0.9", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "syn" version = "0.15.44" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30", + "quote 0.6.13", + "unicode-xid 0.1.0", ] [[package]] name = "syn" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9", + "quote 1.0.3", + "unicode-xid 0.2.0", ] [[package]] name = "termcolor" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" dependencies = [ - "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util", ] [[package]] name = "thread_local" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static", ] [[package]] name = "time" version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "redox_syscall", + "winapi", +] + +[[package]] +name = "time" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6329a7835505d46f5f3a9a2c237f8d6bf5ca6f0015decb3698ba57fcdbb609ba" +dependencies = [ + "cfg-if", + "libc", + "rustversion", + "standback", + "stdweb", + "time-macros", + "winapi", +] + +[[package]] +name = "time-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9b6e9f095bc105e183e3cd493d72579be3181ad4004fceb01adbe9eecab2d" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e987cfe0537f575b5fc99909de6185f6c19c3ad8889e2275e686a873d0869ba1" +dependencies = [ + "proc-macro-hack", + "proc-macro2 1.0.9", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "toml" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde", ] [[package]] name = "traitobject" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" [[package]] name = "typeable" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" [[package]] name = "unicase" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" dependencies = [ - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.1.5", ] [[package]] name = "unicode-bidi" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", ] [[package]] name = "unicode-normalization" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" dependencies = [ - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec", ] [[package]] name = "unicode-segmentation" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" [[package]] name = "unicode-xid" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "unicode-xid" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" [[package]] name = "untrusted" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" [[package]] name = "url" version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" dependencies = [ - "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "idna 0.1.5", + "matches", + "percent-encoding 1.0.1", ] [[package]] name = "url" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" dependencies = [ - "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "idna 0.2.0", + "matches", + "percent-encoding 2.1.0", + "serde", ] [[package]] name = "version_check" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" [[package]] name = "version_check" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasm-bindgen" +version = "0.2.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" +dependencies = [ + "bumpalo", + "lazy_static", + "log 0.4.8", + "proc-macro2 1.0.9", + "quote 1.0.3", + "syn 1.0.17", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4" +dependencies = [ + "quote 1.0.3", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" +dependencies = [ + "proc-macro2 1.0.9", + "quote 1.0.3", + "syn 1.0.17", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639" + +[[package]] +name = "web-sys" +version = "0.3.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] [[package]] name = "winapi" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "yansi" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d60c3b48c9cdec42fb06b3b84b5b087405e1fa1c644a1af3930e4dfafe93de48" [[package]] name = "yansi" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[metadata] -"checksum aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "743ad5a418686aad3b87fd14c43badd828cf26e214a00f92a384291cf22e1811" -"checksum arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" -"checksum arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" -"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" -"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" -"checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" -"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum blake2b_simd 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" -"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" -"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" -"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" -"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -"checksum cookie 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d9fac5e7bdefb6160fb181ee0eaa6f96704b625c70e6d61c465cb35750a4ea12" -"checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" -"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" -"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" -"checksum devise 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "74e04ba2d03c5fa0d954c061fc8c9c288badadffc272ebb87679a89846de3ed3" -"checksum devise_codegen 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "066ceb7928ca93a9bedc6d0e612a8a0424048b0ab1f75971b203d01420c055d7" -"checksum devise_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cf41c59b22b5e3ec0ea55c7847e5f358d340f3a8d6d53a5cf4f1564967f96487" -"checksum directories 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" -"checksum dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" -"checksum dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" -"checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -"checksum fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" -"checksum fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" -"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -"checksum hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772" -"checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" -"checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" -"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -"checksum hyper 0.10.16 (registry+https://github.com/rust-lang/crates.io-index)" = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" -"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" -"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -"checksum indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" -"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" -"checksum js_int 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7a28645cf69403534c8b3c961783cdc227c4c4fa5d31468464de1f43be0efcfc" -"checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" -"checksum lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" -"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" -"checksum memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53445de381a1f436797497c61d851644d0e8e88e6140f22872ad33a704933978" -"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" -"checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" -"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" -"checksum parking_lot 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc" -"checksum parking_lot_core 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7582838484df45743c8434fbff785e8edf260c28748353d44bc0da32e0ceabf1" -"checksum pear 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c26d2b92e47063ffce70d3e3b1bd097af121a9e0db07ca38a6cc1cf0cc85ff25" -"checksum pear_codegen 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "336db4a192cc7f54efeb0c4e11a9245394824cc3bcbd37ba3ff51240c35d7a6e" -"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" -"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" -"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -"checksum pretty_env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" -"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -"checksum proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548" -"checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -"checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" -"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" -"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" -"checksum regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8" -"checksum regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06" -"checksum ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)" = "2c4db68a2e35f3497146b7e4563df7d4773a2433230c5e4b448328e31740458a" -"checksum rocket 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "42c1e9deb3ef4fa430d307bfccd4231434b707ca1328fae339c43ad1201cc6f7" -"checksum rocket_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "79aa1366f9b2eccddc05971e17c5de7bb75a5431eb12c2b5c66545fd348647f4" -"checksum rocket_http 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b1391457ee4e80b40d4b57fa5765c0f2836b20d73bcbee4e3f35d93cf3b80817" -"checksum ruma-api 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3652d110c06f4ca71815d043c7aee3d9d90de5b4b687c037a27563d266fccd5b" -"checksum ruma-api-macros 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "876dac1a0fdc5495849219542b4b4ea024153f32dc486e65a613f37c69018627" -"checksum ruma-client-api 0.6.0 (git+https://github.com/ruma/ruma-client-api)" = "" -"checksum ruma-events 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4a196cbeaa7bffe3756448ee9cf142645790e2a0ece78dfaf10a311eaf5e2a5e" -"checksum ruma-events-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "962d93056619ed61826a9d8872c863560e4892ff6a69b70f593baa5ae8b19dc8" -"checksum ruma-identifiers 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07e442c700a3b33fc4dd4a1c4b463ebdd252d2c2db31b83da6bb3009307039b9" -"checksum rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" -"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -"checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" -"checksum safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" -"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" -"checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" -"checksum serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" -"checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" -"checksum sled 0.31.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb6824dde66ad33bf20c6e8476f5b82b871bc8bc3c129a10ea2f7dae5060fa3" -"checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" -"checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" -"checksum state 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" -"checksum strum 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "530efb820d53b712f4e347916c5e7ed20deb76a4f0457943b3182fb889b06d2c" -"checksum strum_macros 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e6e163a520367c465f59e0a61a23cfae3b10b6546d78b6f672a382be79f7110" -"checksum syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)" = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -"checksum syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5" -"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" -"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" -"checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" -"checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" -"checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" -"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" -"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" -"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" -"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f" -"checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" -"checksum url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" -"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" -"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" -"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum yansi 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d60c3b48c9cdec42fb06b3b84b5b087405e1fa1c644a1af3930e4dfafe93de48" -"checksum yansi 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" +checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" diff --git a/Cargo.toml b/Cargo.toml index 7c9a716..31cfd30 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rocket = "0.4.2" -http = "0.2.0" +rocket = { git = "https://github.com/SergioBenitez/Rocket.git" } +http = "0.2.1" ruma-client-api = { git = "https://github.com/ruma/ruma-client-api" } pretty_env_logger = "0.4.0" log = "0.4.8" From 1679da7784afa624e2bf597704974d9c64f3525b Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 27 Mar 2020 21:00:40 +0100 Subject: [PATCH 0005/1727] RUST_LOG=info by default --- src/main.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index bb63646..1bf0b56 100644 --- a/src/main.rs +++ b/src/main.rs @@ -15,13 +15,14 @@ use { ruma_identifiers::UserId, ruma_wrapper::{MatrixResult, Ruma}, sled::Db, - std::convert::TryInto, + std::{collections::HashMap, convert::TryInto}, }; #[get("/_matrix/client/versions")] fn get_supported_versions_route() -> MatrixResult { MatrixResult(Ok(get_supported_versions::Response { versions: vec!["r0.6.0".to_owned()], + unstable_features: HashMap::new(), })) } @@ -151,7 +152,12 @@ fn create_message_event_route( } fn main() { + // Log info by default + if let Err(_) = std::env::var("RUST_LOG") { + std::env::set_var("RUST_LOG", "info"); + } pretty_env_logger::init(); + let db = sled::open( ProjectDirs::from("xyz", "koesters", "matrixserver") .unwrap() From 6d27f1551667071eef949840036e70e36c63e73c Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 28 Mar 2020 15:16:18 +0100 Subject: [PATCH 0006/1727] More logging --- src/main.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 1bf0b56..fa6cb14 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,6 +3,7 @@ mod ruma_wrapper; use { directories::ProjectDirs, + log::debug, rocket::{get, post, put, routes, State}, ruma_client_api::{ error::{Error, ErrorKind}, @@ -40,16 +41,18 @@ fn register_route( .try_into() { Err(_) => { + debug!("Username invalid"); return MatrixResult(Err(Error { kind: ErrorKind::InvalidUsername, message: "Username was invalid.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, - })) + })); } Ok(user_id) => user_id, }; if users.contains_key(user_id.to_string()).unwrap() { + debug!("ID already taken"); return MatrixResult(Err(Error { kind: ErrorKind::UserInUse, message: "Desired user ID is already taken.".to_owned(), From 34a53ce20a49d9a9d6ffd98a04d0164a73a16a79 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 28 Mar 2020 18:50:02 +0100 Subject: [PATCH 0007/1727] Better database structure --- Cargo.lock | 69 +++++++++++++++++++++++++++++- Cargo.toml | 2 +- Rocket.toml | 3 ++ rust-toolchain | 1 + src/data.rs | 39 +++++++++++++++++ src/main.rs | 102 +++++++++++++++++++++++--------------------- src/ruma_wrapper.rs | 58 +++++++++++++++++-------- 7 files changed, 206 insertions(+), 68 deletions(-) create mode 100644 Rocket.toml create mode 100644 rust-toolchain create mode 100644 src/data.rs diff --git a/Cargo.lock b/Cargo.lock index 4e7af27..d024f97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -54,6 +54,15 @@ dependencies = [ "safemem", ] +[[package]] +name = "base64" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +dependencies = [ + "byteorder", +] + [[package]] name = "base64" version = "0.11.0" @@ -356,6 +365,18 @@ dependencies = [ "url 1.7.2", ] +[[package]] +name = "hyper-sync-rustls" +version = "0.3.0-rc.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53be239c980459955c0f0af3f13190ead511d7d4bdaeab8127c011b94d8558de" +dependencies = [ + "hyper", + "rustls", + "webpki", + "webpki-roots", +] + [[package]] name = "idna" version = "0.1.5" @@ -770,9 +791,11 @@ source = "git+https://github.com/SergioBenitez/Rocket.git#06e146e7d18d7c4aab423d dependencies = [ "cookie", "hyper", + "hyper-sync-rustls", "indexmap", "pear", "percent-encoding 1.0.1", + "rustls", "smallvec", "state", "time 0.2.9", @@ -879,6 +902,19 @@ dependencies = [ "semver", ] +[[package]] +name = "rustls" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" +dependencies = [ + "base64 0.10.1", + "log 0.4.8", + "ring", + "sct", + "webpki", +] + [[package]] name = "rustversion" version = "1.0.2" @@ -908,6 +944,16 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "sct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "semver" version = "0.9.0" @@ -945,9 +991,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" +checksum = "02044a6a92866fd61624b3db4d2c9dccc2feabbc6be490b87611bf285edbac55" dependencies = [ "itoa", "ryu", @@ -1347,6 +1393,25 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" +dependencies = [ + "webpki", +] + [[package]] name = "winapi" version = "0.3.8" diff --git a/Cargo.toml b/Cargo.toml index 31cfd30..32c71a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rocket = { git = "https://github.com/SergioBenitez/Rocket.git" } +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", features = ["tls"] } http = "0.2.1" ruma-client-api = { git = "https://github.com/ruma/ruma-client-api" } pretty_env_logger = "0.4.0" diff --git a/Rocket.toml b/Rocket.toml new file mode 100644 index 0000000..d18ee97 --- /dev/null +++ b/Rocket.toml @@ -0,0 +1,3 @@ +#[global.tls] +#certs = "/etc/ssl/certs/ssl-cert-snakeoil.pem" +#key = "/etc/ssl/private/ssl-cert-snakeoil.key" diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 0000000..bf867e0 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +nightly diff --git a/src/data.rs b/src/data.rs new file mode 100644 index 0000000..52fe7af --- /dev/null +++ b/src/data.rs @@ -0,0 +1,39 @@ +use directories::ProjectDirs; +use ruma_identifiers::UserId; + +pub struct Data(sled::Db); + +impl Data { + pub fn set_hostname(&self, hostname: &str) { + self.0.insert("hostname", hostname).unwrap(); + } + pub fn hostname(&self) -> String { + String::from_utf8(self.0.get("hostname").unwrap().unwrap().to_vec()).unwrap() + } + pub fn load_or_create() -> Self { + Data( + sled::open( + ProjectDirs::from("xyz", "koesters", "matrixserver") + .unwrap() + .data_dir(), + ) + .unwrap(), + ) + } + + pub fn user_exists(&self, user_id: &UserId) -> bool { + self.0 + .open_tree("username_password") + .unwrap() + .contains_key(user_id.to_string()) + .unwrap() + } + + pub fn user_add(&self, user_id: UserId, password: Option) { + self.0 + .open_tree("username_password") + .unwrap() + .insert(user_id.to_string(), &*password.unwrap_or_default()) + .unwrap(); + } +} diff --git a/src/main.rs b/src/main.rs index fa6cb14..419a01c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,42 +1,47 @@ #![feature(proc_macro_hygiene, decl_macro)] +mod data; mod ruma_wrapper; -use { - directories::ProjectDirs, - log::debug, - rocket::{get, post, put, routes, State}, - ruma_client_api::{ - error::{Error, ErrorKind}, - r0::{ - account::register, alias::get_alias, membership::join_room_by_id, - message::create_message_event, session::login, - }, - unversioned::get_supported_versions, +use data::Data; +use log::debug; +use rocket::{get, post, put, routes, State}; +use ruma_client_api::{ + error::{Error, ErrorKind}, + r0::{ + account::register, alias::get_alias, membership::join_room_by_id, + message::create_message_event, session::login, }, - ruma_identifiers::UserId, - ruma_wrapper::{MatrixResult, Ruma}, - sled::Db, - std::{collections::HashMap, convert::TryInto}, + unversioned::get_supported_versions, }; +use ruma_identifiers::UserId; +use ruma_wrapper::{MatrixResult, Ruma}; +use std::{collections::HashMap, convert::TryInto}; #[get("/_matrix/client/versions")] fn get_supported_versions_route() -> MatrixResult { MatrixResult(Ok(get_supported_versions::Response { - versions: vec!["r0.6.0".to_owned()], + versions: vec![ + "r0.0.1".to_owned(), + "r0.1.0".to_owned(), + "r0.2.0".to_owned(), + "r0.3.0".to_owned(), + "r0.4.0".to_owned(), + "r0.5.0".to_owned(), + "r0.6.0".to_owned(), + ], unstable_features: HashMap::new(), })) } #[post("/_matrix/client/r0/register", data = "")] fn register_route( - db: State, + data: State, body: Ruma, ) -> MatrixResult { - let users = db.open_tree("users").unwrap(); - let user_id: UserId = match (*format!( - "@{}:localhost", - body.username.clone().unwrap_or("randomname".to_owned()) + "@{}:{}", + body.username.clone().unwrap_or("randomname".to_owned()), + data.hostname() )) .try_into() { @@ -51,7 +56,7 @@ fn register_route( Ok(user_id) => user_id, }; - if users.contains_key(user_id.to_string()).unwrap() { + if data.user_exists(&user_id) { debug!("ID already taken"); return MatrixResult(Err(Error { kind: ErrorKind::UserInUse, @@ -60,37 +65,42 @@ fn register_route( })); } - users - .insert( - user_id.to_string(), - &*body.password.clone().unwrap_or_default(), - ) - .unwrap(); + data.user_add(user_id.clone(), body.password.clone()); MatrixResult(Ok(register::Response { access_token: "randomtoken".to_owned(), - home_server: "localhost".to_owned(), + home_server: data.hostname(), user_id, device_id: body.device_id.clone().unwrap_or("randomid".to_owned()), })) } #[post("/_matrix/client/r0/login", data = "")] -fn login_route(db: State, body: Ruma) -> MatrixResult { - let user_id = if let login::UserInfo::MatrixId(username) = &body.user { - let user_id = format!("@{}:localhost", username); - let users = db.open_tree("users").unwrap(); - if !users.contains_key(user_id.clone()).unwrap() { - dbg!(); +fn login_route(data: State, body: Ruma) -> MatrixResult { + let username = if let login::UserInfo::MatrixId(mut username) = body.user.clone() { + if !username.contains(':') { + username = format!("@{}:{}", username, data.hostname()); + } + if let Ok(user_id) = (*username).try_into() { + if !data.user_exists(&user_id) { + debug!("Userid does not exist. Can't log in."); + return MatrixResult(Err(Error { + kind: ErrorKind::Forbidden, + message: "UserId not found.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + user_id + } else { + debug!("Invalid UserId."); return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "UserId not found.".to_owned(), + kind: ErrorKind::Unknown, + message: "Bad login type.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })); } - user_id } else { - dbg!(); + debug!("Bad login type"); return MatrixResult(Err(Error { kind: ErrorKind::Unknown, message: "Bad login type.".to_owned(), @@ -99,7 +109,7 @@ fn login_route(db: State, body: Ruma) -> MatrixResult, ) -> MatrixResult { - dbg!(body.0); + dbg!(body); MatrixResult(Ok(create_message_event::Response { event_id: "$randomeventid".try_into().unwrap(), })) @@ -161,12 +171,8 @@ fn main() { } pretty_env_logger::init(); - let db = sled::open( - ProjectDirs::from("xyz", "koesters", "matrixserver") - .unwrap() - .data_dir(), - ) - .unwrap(); + let data = Data::load_or_create(); + data.set_hostname("localhost"); rocket::ignite() .mount( @@ -180,6 +186,6 @@ fn main() { create_message_event_route, ], ) - .manage(db) + .manage(data) .launch(); } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index dda584f..1f29450 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,24 +1,28 @@ -use { - rocket::data::{FromDataSimple, Outcome}, - rocket::http::Status, - rocket::response::Responder, - rocket::Request, - rocket::{Data, Outcome::*}, - ruma_client_api::error::Error, - std::fmt::Debug, - std::ops::Deref, - std::{ - convert::{TryFrom, TryInto}, - io::{Cursor, Read}, - }, +use rocket::{ + data::{FromDataSimple, Outcome}, + http::Status, + response::Responder, + Data, + Outcome::*, + Request, +}; +use ruma_client_api::error::Error; +use std::{ + convert::{TryFrom, TryInto}, + fmt, + io::{Cursor, Read}, + ops::Deref, }; const MESSAGE_LIMIT: u64 = 65535; -pub struct Ruma(pub T); +pub struct Ruma { + body: T, + headers: http::HeaderMap, +} impl>>> FromDataSimple for Ruma where - T::Error: Debug, + T::Error: fmt::Debug, { type Error = (); @@ -35,10 +39,11 @@ where handle.read_to_end(&mut body).unwrap(); let http_request = http_request.body(body).unwrap(); + let headers = http_request.headers().clone(); log::info!("{:?}", http_request); match T::try_from(http_request) { - Ok(r) => Success(Ruma(r)), + Ok(t) => Success(Ruma { body: t, headers }), Err(e) => { log::error!("{:?}", e); Failure((Status::InternalServerError, ())) @@ -51,7 +56,16 @@ impl Deref for Ruma { type Target = T; fn deref(&self) -> &Self::Target { - &self.0 + &self.body + } +} + +impl fmt::Debug for Ruma { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Ruma") + .field("body", &self.body) + .field("headers", &self.headers) + .finish() } } @@ -79,6 +93,16 @@ impl<'r, T: TryInto>>> Responder<'r> for MatrixResult response .raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); } + + response.raw_header("Access-Control-Allow-Origin", "*"); + response.raw_header( + "Access-Control-Allow-Methods", + "GET, POST, PUT, DELETE, OPTIONS", + ); + response.raw_header( + "Access-Control-Allow-Headers", + "Origin, X-Requested-With, Content-Type, Accept, Authorization", + ); response.ok() } Err(_) => Err(Status::InternalServerError), From 744e0adfcf3ddaeab51c6e847e5201410d8aa34d Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 28 Mar 2020 23:08:59 +0100 Subject: [PATCH 0008/1727] Try to impl auth in ruma_wrapper --- Cargo.lock | 1 + Cargo.toml | 1 + src/main.rs | 2 +- src/ruma_wrapper.rs | 42 ++++++++++++++++++++++++++---------------- 4 files changed, 29 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d024f97..91c2441 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -492,6 +492,7 @@ dependencies = [ "log 0.4.8", "pretty_env_logger", "rocket", + "ruma-api", "ruma-client-api", "ruma-identifiers", "sled", diff --git a/Cargo.toml b/Cargo.toml index 32c71a2..62322d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,3 +15,4 @@ log = "0.4.8" sled = "0.31.0" directories = "2.0.2" ruma-identifiers = "0.14.1" +ruma-api = "0.15.0-dev.1" diff --git a/src/main.rs b/src/main.rs index 419a01c..ba95319 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,7 @@ mod data; mod ruma_wrapper; -use data::Data; +pub use data::Data; use log::debug; use rocket::{get, post, put, routes, State}; use ruma_client_api::{ diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 1f29450..ceaec3d 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,32 +1,35 @@ -use rocket::{ - data::{FromDataSimple, Outcome}, - http::Status, - response::Responder, - Data, - Outcome::*, - Request, -}; -use ruma_client_api::error::Error; -use std::{ - convert::{TryFrom, TryInto}, - fmt, - io::{Cursor, Read}, - ops::Deref, +use { + rocket::data::{FromDataSimple, Outcome}, + rocket::http::Status, + rocket::response::Responder, + rocket::Outcome::*, + rocket::Request, + rocket::State, + ruma_client_api::error::Error, + std::ops::Deref, + std::{ + convert::{TryFrom, TryInto}, + fmt, + io::{Cursor, Read}, + }, }; const MESSAGE_LIMIT: u64 = 65535; +/// This struct converts rocket requests into ruma structs by converting them into http requests +/// first. pub struct Ruma { body: T, headers: http::HeaderMap, } + impl>>> FromDataSimple for Ruma where T::Error: fmt::Debug, { type Error = (); - fn from_data(request: &Request, data: Data) -> Outcome { + fn from_data(request: &Request, data: rocket::Data) -> Outcome { let mut http_request = http::Request::builder() .uri(request.uri().to_string()) .method(&*request.method().to_string()); @@ -43,7 +46,13 @@ where log::info!("{:?}", http_request); match T::try_from(http_request) { - Ok(t) => Success(Ruma { body: t, headers }), + Ok(t) => { + //if T::METADATA.requires_authentication { + //let data = request.guard::>(); + // TODO: auth + //} + Success(Ruma { body: t, headers }) + } Err(e) => { log::error!("{:?}", e); Failure((Status::InternalServerError, ())) @@ -69,6 +78,7 @@ impl fmt::Debug for Ruma { } } +/// This struct converts ruma responses into rocket http responses. pub struct MatrixResult(pub std::result::Result); impl>>> TryInto>> for MatrixResult { type Error = T::Error; From 73e04e71d7715dda62197ae67267cbdeef0b32c0 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 29 Mar 2020 13:48:44 +0200 Subject: [PATCH 0009/1727] Start work on event creation --- Cargo.lock | 2 ++ Cargo.toml | 2 ++ src/data.rs | 3 +++ src/main.rs | 23 ++++++++++++++++++++--- src/utils.rs | 9 +++++++++ 5 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 src/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 91c2441..b194092 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -489,11 +489,13 @@ version = "0.1.0" dependencies = [ "directories", "http", + "js_int", "log 0.4.8", "pretty_env_logger", "rocket", "ruma-api", "ruma-client-api", + "ruma-events", "ruma-identifiers", "sled", ] diff --git a/Cargo.toml b/Cargo.toml index 62322d7..b693378 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,3 +16,5 @@ sled = "0.31.0" directories = "2.0.2" ruma-identifiers = "0.14.1" ruma-api = "0.15.0-dev.1" +ruma-events = "0.17.0" +js_int = "0.1.3" diff --git a/src/data.rs b/src/data.rs index 52fe7af..de063ab 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,4 +1,5 @@ use directories::ProjectDirs; +use ruma_events::collections::all::RoomEvent; use ruma_identifiers::UserId; pub struct Data(sled::Db); @@ -36,4 +37,6 @@ impl Data { .insert(user_id.to_string(), &*password.unwrap_or_default()) .unwrap(); } + + pub fn room_event_add(&self, room_event: &RoomEvent) {} } diff --git a/src/main.rs b/src/main.rs index ba95319..8b488d5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,7 @@ #![feature(proc_macro_hygiene, decl_macro)] mod data; mod ruma_wrapper; +mod utils; pub use data::Data; use log::debug; @@ -13,8 +14,10 @@ use ruma_client_api::{ }, unversioned::get_supported_versions, }; -use ruma_identifiers::UserId; +use ruma_events::room::message::MessageEvent; +use ruma_identifiers::{EventId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; +use std::convert::TryFrom; use std::{collections::HashMap, convert::TryInto}; #[get("/_matrix/client/versions")] @@ -153,14 +156,28 @@ fn join_room_by_id_route( data = "" )] fn create_message_event_route( + data: State, _room_id: String, _event_type: String, _txn_id: String, body: Ruma, ) -> MatrixResult { - dbg!(body); + dbg!(&body); + if let Ok(content) = body.data.clone().into_result() { + data.room_event_add( + &MessageEvent { + content, + event_id: EventId::try_from("$randomeventid:localhost").unwrap(), + origin_server_ts: utils::millis_since_unix_epoch(), + room_id: Some(body.room_id.clone()), + sender: UserId::try_from("@TODO:localhost").unwrap(), + unsigned: None, + } + .into(), + ); + } MatrixResult(Ok(create_message_event::Response { - event_id: "$randomeventid".try_into().unwrap(), + event_id: "$randomeventid:localhost".try_into().unwrap(), })) } diff --git a/src/utils.rs b/src/utils.rs new file mode 100644 index 0000000..2905088 --- /dev/null +++ b/src/utils.rs @@ -0,0 +1,9 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +pub fn millis_since_unix_epoch() -> js_int::UInt { + (SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as u32) + .into() +} From 1183105f15306a2594f09c7280b958119e985693 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 29 Mar 2020 01:29:47 +0100 Subject: [PATCH 0010/1727] Make Endpoint bound work for ruma_wrapper::Ruma --- src/main.rs | 2 +- src/ruma_wrapper.rs | 37 +++++++++++++++++++++++++------------ 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/src/main.rs b/src/main.rs index 8b488d5..0097109 100644 --- a/src/main.rs +++ b/src/main.rs @@ -160,7 +160,7 @@ fn create_message_event_route( _room_id: String, _event_type: String, _txn_id: String, - body: Ruma, + body: Ruma, ) -> MatrixResult { dbg!(&body); if let Ok(content) = body.data.clone().into_result() { diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index ceaec3d..5b71925 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -5,6 +5,10 @@ use { rocket::Outcome::*, rocket::Request, rocket::State, + ruma_api::{ + error::{FromHttpRequestError, FromHttpResponseError}, + Endpoint, Outgoing, + }, ruma_client_api::error::Error, std::ops::Deref, std::{ @@ -18,14 +22,20 @@ const MESSAGE_LIMIT: u64 = 65535; /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { - body: T, +pub struct Ruma { + body: T::Incoming, headers: http::HeaderMap, } -impl>>> FromDataSimple for Ruma +impl FromDataSimple for Ruma where - T::Error: fmt::Debug, + // We need to duplicate Endpoint's where clauses because the compiler is not smart enough yet. + // See https://github.com/rust-lang/rust/issues/54149 + ::Incoming: TryFrom>, Error = FromHttpRequestError>, + ::Incoming: TryFrom< + http::Response>, + Error = FromHttpResponseError<::ResponseError>, + >, { type Error = (); @@ -45,12 +55,12 @@ where let headers = http_request.headers().clone(); log::info!("{:?}", http_request); - match T::try_from(http_request) { + match T::Incoming::try_from(http_request) { Ok(t) => { - //if T::METADATA.requires_authentication { - //let data = request.guard::>(); - // TODO: auth - //} + if T::METADATA.requires_authentication { + let data = request.guard::>(); + // TODO: auth + } Success(Ruma { body: t, headers }) } Err(e) => { @@ -61,15 +71,18 @@ where } } -impl Deref for Ruma { - type Target = T; +impl Deref for Ruma { + type Target = T::Incoming; fn deref(&self) -> &Self::Target { &self.body } } -impl fmt::Debug for Ruma { +impl fmt::Debug for Ruma +where + T::Incoming: fmt::Debug, +{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Ruma") .field("body", &self.body) From 533260edd84e18c1c773d43b6fd71d2e99032ac0 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 29 Mar 2020 21:05:20 +0200 Subject: [PATCH 0011/1727] Add auth --- Cargo.lock | 26 ++++---- Cargo.toml | 7 +- src/data.rs | 110 ++++++++++++++++++++++++++++--- src/main.rs | 154 +++++++++++++++++++++++++++++++------------- src/ruma_wrapper.rs | 53 ++++++++------- src/utils.rs | 18 ++++++ 6 files changed, 274 insertions(+), 94 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b194092..1a1da0c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -497,6 +497,7 @@ dependencies = [ "ruma-client-api", "ruma-events", "ruma-identifiers", + "serde_json", "sled", ] @@ -807,9 +808,9 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.15.0-dev.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44987d5fefcf801a6fb5c5843c17f876a53852fa07e5e4d99e0dca3670f1441a" +checksum = "120f0cd8625b842423ef3a63cabb8c309ca35a02de87cc4b377fb2cdd43f1fe5" dependencies = [ "http", "percent-encoding 2.1.0", @@ -824,9 +825,9 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.12.0-dev.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36931db94874129f9202f650d91447d8317b099bae1e12cdd5769ba4eced07d2" +checksum = "bfc523efc9c1ba7033ff17888551c1d378e12eae087cfbe4fcee938ff516759e" dependencies = [ "proc-macro2 1.0.9", "quote 1.0.3", @@ -835,8 +836,9 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma-client-api#57f5e8d66168a54128426c8e34b26fa78f739c3e" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a64241cdc0cff76038484451d7a5d2689f8ea4e59b6695cd3c8448af7bcc016" dependencies = [ "http", "js_int", @@ -851,9 +853,9 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11951235b25c72a82eb988aabf5af23cae883562665e0cb73954ffe4ae81f11c" +checksum = "80e34bfc20462f18d7f0beb6f1863db62d29438f2dcf390b625e9b20696cb2b3" dependencies = [ "js_int", "ruma-events-macros", @@ -864,9 +866,9 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "962d93056619ed61826a9d8872c863560e4892ff6a69b70f593baa5ae8b19dc8" +checksum = "ff95b6b4480c570db471b490b35ad70add5470651654e75faf0b97052b4f29e1" dependencies = [ "proc-macro2 1.0.9", "quote 1.0.3", @@ -994,9 +996,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02044a6a92866fd61624b3db4d2c9dccc2feabbc6be490b87611bf285edbac55" +checksum = "78a7a12c167809363ec3bd7329fc0a3369056996de43c4b37ef3cd54a6ce4867" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index b693378..3adca54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,12 +9,13 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", features = ["tls"] } http = "0.2.1" -ruma-client-api = { git = "https://github.com/ruma/ruma-client-api" } +ruma-client-api = "0.7.0" pretty_env_logger = "0.4.0" log = "0.4.8" sled = "0.31.0" directories = "2.0.2" ruma-identifiers = "0.14.1" -ruma-api = "0.15.0-dev.1" -ruma-events = "0.17.0" +ruma-api = "0.15.0" +ruma-events = "0.18.0" js_int = "0.1.3" +serde_json = "1.0.50" diff --git a/src/data.rs b/src/data.rs index de063ab..9a0a9c2 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,16 +1,18 @@ +use crate::utils; use directories::ProjectDirs; use ruma_events::collections::all::RoomEvent; use ruma_identifiers::UserId; +use std::convert::TryInto; + +const USERID_PASSWORD: &str = "userid_password"; +const USERID_DEVICEIDS: &str = "userid_deviceids"; +const DEVICEID_TOKEN: &str = "deviceid_token"; +const TOKEN_USERID: &str = "token_userid"; pub struct Data(sled::Db); impl Data { - pub fn set_hostname(&self, hostname: &str) { - self.0.insert("hostname", hostname).unwrap(); - } - pub fn hostname(&self) -> String { - String::from_utf8(self.0.get("hostname").unwrap().unwrap().to_vec()).unwrap() - } + /// Load an existing database or create a new one. pub fn load_or_create() -> Self { Data( sled::open( @@ -22,21 +24,109 @@ impl Data { ) } + /// Set the hostname of the server. Warning: Hostname changes will likely break things. + pub fn set_hostname(&self, hostname: &str) { + self.0.insert("hostname", hostname).unwrap(); + } + + /// Get the hostname of the server. + pub fn hostname(&self) -> String { + utils::bytes_to_string(&self.0.get("hostname").unwrap().unwrap()) + } + + /// Check if a user has an account by looking for an assigned password. pub fn user_exists(&self, user_id: &UserId) -> bool { self.0 - .open_tree("username_password") + .open_tree(USERID_PASSWORD) .unwrap() .contains_key(user_id.to_string()) .unwrap() } - pub fn user_add(&self, user_id: UserId, password: Option) { + /// Create a new user account by assigning them a password. + pub fn user_add(&self, user_id: &UserId, password: Option) { self.0 - .open_tree("username_password") + .open_tree(USERID_PASSWORD) .unwrap() .insert(user_id.to_string(), &*password.unwrap_or_default()) .unwrap(); } - pub fn room_event_add(&self, room_event: &RoomEvent) {} + /// Find out which user an access token belongs to. + pub fn user_from_token(&self, token: &str) -> Option { + self.0 + .open_tree(TOKEN_USERID) + .unwrap() + .get(token) + .unwrap() + .and_then(|bytes| (*utils::bytes_to_string(&bytes)).try_into().ok()) + } + + /// Checks if the given password is equal to the one in the database. + pub fn password_get(&self, user_id: &UserId) -> Option { + self.0 + .open_tree(USERID_PASSWORD) + .unwrap() + .get(user_id.to_string()) + .unwrap() + .map(|bytes| utils::bytes_to_string(&bytes)) + } + + /// Add a new device to a user. + pub fn device_add(&self, user_id: &UserId, device_id: &str) { + self.0 + .open_tree(USERID_DEVICEIDS) + .unwrap() + .insert(user_id.to_string(), device_id) + .unwrap(); + } + + /// Replace the access token of one device. + pub fn token_replace(&self, user_id: &UserId, device_id: &String, token: String) { + // Make sure the device id belongs to the user + debug_assert!(self + .0 + .open_tree(USERID_DEVICEIDS) + .unwrap() + .get(&user_id.to_string()) // Does the user exist? + .unwrap() + .map(|bytes| utils::bytes_to_vec(&bytes)) + .filter(|devices| devices.contains(device_id)) // Does the user have that device? + .is_some()); + + // Remove old token + if let Some(old_token) = self + .0 + .open_tree(DEVICEID_TOKEN) + .unwrap() + .get(device_id) + .unwrap() + { + self.0 + .open_tree(TOKEN_USERID) + .unwrap() + .remove(old_token) + .unwrap(); + // It will be removed from DEVICEID_TOKEN by the insert later + } + + // Assign token to device_id + self.0 + .open_tree(DEVICEID_TOKEN) + .unwrap() + .insert(device_id, &*token) + .unwrap(); + + // Assign token to user + self.0 + .open_tree(TOKEN_USERID) + .unwrap() + .insert(token, &*user_id.to_string()) + .unwrap(); + } + + /// Create a new room event. + pub fn room_event_add(&self, _room_event: &RoomEvent) { + todo!(); + } } diff --git a/src/main.rs b/src/main.rs index 0097109..7cb7c67 100644 --- a/src/main.rs +++ b/src/main.rs @@ -14,9 +14,10 @@ use ruma_client_api::{ }, unversioned::get_supported_versions, }; -use ruma_events::room::message::MessageEvent; +use ruma_events::{room::message::MessageEvent, EventResult}; use ruma_identifiers::{EventId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; +use serde_json::map::Map; use std::convert::TryFrom; use std::{collections::HashMap, convert::TryInto}; @@ -41,6 +42,7 @@ fn register_route( data: State, body: Ruma, ) -> MatrixResult { + // Validate user id let user_id: UserId = match (*format!( "@{}:{}", body.username.clone().unwrap_or("randomname".to_owned()), @@ -59,6 +61,7 @@ fn register_route( Ok(user_id) => user_id, }; + // Check if username is creative enough if data.user_exists(&user_id) { debug!("ID already taken"); return MatrixResult(Err(Error { @@ -68,68 +71,115 @@ fn register_route( })); } - data.user_add(user_id.clone(), body.password.clone()); + // Create user + data.user_add(&user_id, body.password.clone()); + + // Generate new device id if the user didn't specify one + let device_id = body + .device_id + .clone() + .unwrap_or_else(|| "TODO:randomdeviceid".to_owned()); + + // Add device + data.device_add(&user_id, &device_id); + + // Generate new token for the device + let token = "TODO:randomtoken".to_owned(); + data.token_replace(&user_id, &device_id, token.clone()); MatrixResult(Ok(register::Response { - access_token: "randomtoken".to_owned(), + access_token: token, home_server: data.hostname(), user_id, - device_id: body.device_id.clone().unwrap_or("randomid".to_owned()), + device_id, })) } #[post("/_matrix/client/r0/login", data = "")] fn login_route(data: State, body: Ruma) -> MatrixResult { - let username = if let login::UserInfo::MatrixId(mut username) = body.user.clone() { - if !username.contains(':') { - username = format!("@{}:{}", username, data.hostname()); - } - if let Ok(user_id) = (*username).try_into() { - if !data.user_exists(&user_id) { - debug!("Userid does not exist. Can't log in."); + // Validate login method + let user_id = + if let (login::UserInfo::MatrixId(mut username), login::LoginInfo::Password { password }) = + (body.user.clone(), body.login_info.clone()) + { + if !username.contains(':') { + username = format!("@{}:{}", username, data.hostname()); + } + if let Ok(user_id) = (*username).try_into() { + if !data.user_exists(&user_id) {} + + // Check password + if let Some(correct_password) = data.password_get(&user_id) { + if password == correct_password { + // Success! + user_id + } else { + debug!("Invalid password."); + return MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "".to_owned(), + status_code: http::StatusCode::FORBIDDEN, + })); + } + } else { + debug!("UserId does not exist (has no assigned password). Can't log in."); + return MatrixResult(Err(Error { + kind: ErrorKind::Forbidden, + message: "".to_owned(), + status_code: http::StatusCode::FORBIDDEN, + })); + } + } else { + debug!("Invalid UserId."); return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "UserId not found.".to_owned(), + kind: ErrorKind::Unknown, + message: "Bad login type.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })); } - user_id } else { - debug!("Invalid UserId."); + debug!("Bad login type"); return MatrixResult(Err(Error { kind: ErrorKind::Unknown, message: "Bad login type.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })); - } - } else { - debug!("Bad login type"); - return MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Bad login type.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - }; + }; + + // Generate new device id if the user didn't specify one + let device_id = body + .device_id + .clone() + .unwrap_or("TODO:randomdeviceid".to_owned()); + + // Add device (TODO: We might not want to call it when using an existing device) + data.device_add(&user_id, &device_id); + + // Generate a new token for the device + let token = "TODO:randomtoken".to_owned(); + data.token_replace(&user_id, &device_id, token.clone()); return MatrixResult(Ok(login::Response { - user_id: username.try_into().unwrap(), // Unwrap is okay because the user is already registered - access_token: "randomtoken".to_owned(), - home_server: Some("localhost".to_owned()), - device_id: body.device_id.clone().unwrap_or("randomid".to_owned()), + user_id, + access_token: token, + home_server: Some(data.hostname()), + device_id, well_known: None, })); } #[get("/_matrix/client/r0/directory/room/")] fn get_alias_route(room_alias: String) -> MatrixResult { + // TODO let room_id = match &*room_alias { "#room:localhost" => "!xclkjvdlfj:localhost", _ => { + debug!("Room not found."); return MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Room not found.".to_owned(), status_code: http::StatusCode::NOT_FOUND, - })) + })); } } .try_into() @@ -146,6 +196,7 @@ fn join_room_by_id_route( _room_id: String, body: Ruma, ) -> MatrixResult { + // TODO MatrixResult(Ok(join_room_by_id::Response { room_id: body.room_id.clone(), })) @@ -162,23 +213,34 @@ fn create_message_event_route( _txn_id: String, body: Ruma, ) -> MatrixResult { - dbg!(&body); - if let Ok(content) = body.data.clone().into_result() { - data.room_event_add( - &MessageEvent { - content, - event_id: EventId::try_from("$randomeventid:localhost").unwrap(), - origin_server_ts: utils::millis_since_unix_epoch(), - room_id: Some(body.room_id.clone()), - sender: UserId::try_from("@TODO:localhost").unwrap(), - unsigned: None, - } - .into(), - ); - } - MatrixResult(Ok(create_message_event::Response { - event_id: "$randomeventid:localhost".try_into().unwrap(), - })) + // Check if content is valid + let content = match body.data.clone() { + EventResult::Ok(content) => content, + EventResult::Err(_) => { + debug!("No content."); + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "No content.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + }; + + let event_id = EventId::try_from("$TODOrandomeventid:localhost").unwrap(); + + data.room_event_add( + &MessageEvent { + content, + event_id: event_id.clone(), + origin_server_ts: utils::millis_since_unix_epoch(), + room_id: Some(body.room_id.clone()), + sender: body.user_id.expect("user is authenticated"), + unsigned: Map::default(), + } + .into(), + ); + + MatrixResult(Ok(create_message_event::Response { event_id })) } fn main() { diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 5b71925..0b42ceb 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -10,10 +10,10 @@ use { Endpoint, Outgoing, }, ruma_client_api::error::Error, + ruma_identifiers::UserId, std::ops::Deref, std::{ convert::{TryFrom, TryInto}, - fmt, io::{Cursor, Read}, }, }; @@ -22,9 +22,10 @@ const MESSAGE_LIMIT: u64 = 65535; /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. +#[derive(Debug)] pub struct Ruma { body: T::Incoming, - headers: http::HeaderMap, + pub user_id: Option, } impl FromDataSimple for Ruma @@ -37,9 +38,34 @@ where Error = FromHttpResponseError<::ResponseError>, >, { - type Error = (); + type Error = (); // TODO: Better error handling fn from_data(request: &Request, data: rocket::Data) -> Outcome { + let user_id = if T::METADATA.requires_authentication { + let data = request.guard::>().unwrap(); + + // Get token from header or query value + let token = match request + .headers() + .get_one("Authorization") + .map(|s| s.to_owned()) + .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) + { + // TODO: M_MISSING_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some(token) => token, + }; + + // Check if token is valid + match data.user_from_token(&token) { + // TODO: M_UNKNOWN_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some(user_id) => Some(user_id), + } + } else { + None + }; + let mut http_request = http::Request::builder() .uri(request.uri().to_string()) .method(&*request.method().to_string()); @@ -52,17 +78,10 @@ where handle.read_to_end(&mut body).unwrap(); let http_request = http_request.body(body).unwrap(); - let headers = http_request.headers().clone(); log::info!("{:?}", http_request); match T::Incoming::try_from(http_request) { - Ok(t) => { - if T::METADATA.requires_authentication { - let data = request.guard::>(); - // TODO: auth - } - Success(Ruma { body: t, headers }) - } + Ok(t) => Success(Ruma { body: t, user_id }), Err(e) => { log::error!("{:?}", e); Failure((Status::InternalServerError, ())) @@ -79,18 +98,6 @@ impl Deref for Ruma { } } -impl fmt::Debug for Ruma -where - T::Incoming: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Ruma") - .field("body", &self.body) - .field("headers", &self.headers) - .finish() - } -} - /// This struct converts ruma responses into rocket http responses. pub struct MatrixResult(pub std::result::Result); impl>>> TryInto>> for MatrixResult { diff --git a/src/utils.rs b/src/utils.rs index 2905088..fd7b4cb 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -7,3 +7,21 @@ pub fn millis_since_unix_epoch() -> js_int::UInt { .as_millis() as u32) .into() } + +pub fn bytes_to_string(bytes: &[u8]) -> String { + String::from_utf8(bytes.to_vec()).expect("convert bytes to string") +} + +pub fn vec_to_bytes(vec: Vec) -> Vec { + vec.into_iter() + .map(|string| string.into_bytes()) + .collect::>>() + .join(&0) +} + +pub fn bytes_to_vec(bytes: &[u8]) -> Vec { + bytes + .split(|&b| b == 0) + .map(|bytes_string| bytes_to_string(bytes_string)) + .collect::>() +} From b508b4d1e7f55281e2561d50dcffcf8bf9134ab7 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 30 Mar 2020 00:10:15 +0200 Subject: [PATCH 0012/1727] Start work on message events --- src/data.rs | 8 +++++--- src/main.rs | 31 ++++++++++--------------------- src/ruma_wrapper.rs | 13 ++++++++++--- 3 files changed, 25 insertions(+), 27 deletions(-) diff --git a/src/data.rs b/src/data.rs index 9a0a9c2..0fa24d4 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,7 +1,8 @@ use crate::utils; use directories::ProjectDirs; -use ruma_events::collections::all::RoomEvent; -use ruma_identifiers::UserId; +use log::debug; +use ruma_events::collections::all::Event; +use ruma_identifiers::{EventId, RoomId, UserId}; use std::convert::TryInto; const USERID_PASSWORD: &str = "userid_password"; @@ -126,7 +127,8 @@ impl Data { } /// Create a new room event. - pub fn room_event_add(&self, _room_event: &RoomEvent) { + pub fn event_add(&self, event: &Event, room_id: &RoomId, event_id: &EventId) { + debug!("{}", serde_json::to_string(event).unwrap()); todo!(); } } diff --git a/src/main.rs b/src/main.rs index 7cb7c67..06f7ca3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -14,7 +14,8 @@ use ruma_client_api::{ }, unversioned::get_supported_versions, }; -use ruma_events::{room::message::MessageEvent, EventResult}; +use ruma_events::collections::all::Event; +use ruma_events::room::message::MessageEvent; use ruma_identifiers::{EventId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; use serde_json::map::Map; @@ -213,31 +214,19 @@ fn create_message_event_route( _txn_id: String, body: Ruma, ) -> MatrixResult { - // Check if content is valid - let content = match body.data.clone() { - EventResult::Ok(content) => content, - EventResult::Err(_) => { - debug!("No content."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "No content.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } - }; - + // Generate event id let event_id = EventId::try_from("$TODOrandomeventid:localhost").unwrap(); - - data.room_event_add( - &MessageEvent { - content, + data.event_add( + &Event::RoomMessage(MessageEvent { + content: body.data.clone().into_result().unwrap(), event_id: event_id.clone(), origin_server_ts: utils::millis_since_unix_epoch(), room_id: Some(body.room_id.clone()), - sender: body.user_id.expect("user is authenticated"), + sender: body.user_id.clone().expect("user is authenticated"), unsigned: Map::default(), - } - .into(), + }), + &body.room_id, + &event_id, ); MatrixResult(Ok(create_message_event::Response { event_id })) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 0b42ceb..e898137 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -26,6 +26,7 @@ const MESSAGE_LIMIT: u64 = 65535; pub struct Ruma { body: T::Incoming, pub user_id: Option, + pub json_body: serde_json::Value, } impl FromDataSimple for Ruma @@ -77,11 +78,17 @@ where let mut body = Vec::new(); handle.read_to_end(&mut body).unwrap(); - let http_request = http_request.body(body).unwrap(); - + let http_request = http_request.body(body.clone()).unwrap(); log::info!("{:?}", http_request); + match T::Incoming::try_from(http_request) { - Ok(t) => Success(Ruma { body: t, user_id }), + Ok(t) => Success(Ruma { + body: t, + user_id, + // TODO: Can we avoid parsing it again? + json_body: serde_json::from_slice(&body) + .expect("Ruma already parsed it successfuly"), + }), Err(e) => { log::error!("{:?}", e); Failure((Status::InternalServerError, ())) From dba6c466674d2e6dd462178a249fe45bde98d5a2 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 30 Mar 2020 13:46:18 +0200 Subject: [PATCH 0013/1727] Use sled::Tree::prefix_search for deviceids --- rustfmt.toml | 1 + src/data.rs | 133 +++++++++++++++++++++--------------------------- src/database.rs | 117 ++++++++++++++++++++++++++++++++++++++++++ src/main.rs | 26 ++++++---- src/utils.rs | 32 ++++++------ 5 files changed, 207 insertions(+), 102 deletions(-) create mode 100644 rustfmt.toml create mode 100644 src/database.rs diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..7d2cf54 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1 @@ +merge_imports = true diff --git a/src/data.rs b/src/data.rs index 0fa24d4..b7b9845 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,134 +1,115 @@ -use crate::utils; -use directories::ProjectDirs; -use log::debug; +use crate::{utils, Database}; use ruma_events::collections::all::Event; use ruma_identifiers::{EventId, RoomId, UserId}; use std::convert::TryInto; -const USERID_PASSWORD: &str = "userid_password"; -const USERID_DEVICEIDS: &str = "userid_deviceids"; -const DEVICEID_TOKEN: &str = "deviceid_token"; -const TOKEN_USERID: &str = "token_userid"; - -pub struct Data(sled::Db); +pub struct Data { + hostname: String, + db: Database, +} impl Data { /// Load an existing database or create a new one. - pub fn load_or_create() -> Self { - Data( - sled::open( - ProjectDirs::from("xyz", "koesters", "matrixserver") - .unwrap() - .data_dir(), - ) - .unwrap(), - ) - } - - /// Set the hostname of the server. Warning: Hostname changes will likely break things. - pub fn set_hostname(&self, hostname: &str) { - self.0.insert("hostname", hostname).unwrap(); + pub fn load_or_create(hostname: &str) -> Self { + Self { + hostname: hostname.to_owned(), + db: Database::load_or_create(hostname), + } } /// Get the hostname of the server. - pub fn hostname(&self) -> String { - utils::bytes_to_string(&self.0.get("hostname").unwrap().unwrap()) + pub fn hostname(&self) -> &str { + &self.hostname } /// Check if a user has an account by looking for an assigned password. pub fn user_exists(&self, user_id: &UserId) -> bool { - self.0 - .open_tree(USERID_PASSWORD) - .unwrap() + self.db + .userid_password .contains_key(user_id.to_string()) .unwrap() } /// Create a new user account by assigning them a password. pub fn user_add(&self, user_id: &UserId, password: Option) { - self.0 - .open_tree(USERID_PASSWORD) - .unwrap() + self.db + .userid_password .insert(user_id.to_string(), &*password.unwrap_or_default()) .unwrap(); } /// Find out which user an access token belongs to. pub fn user_from_token(&self, token: &str) -> Option { - self.0 - .open_tree(TOKEN_USERID) - .unwrap() + self.db + .token_userid .get(token) .unwrap() - .and_then(|bytes| (*utils::bytes_to_string(&bytes)).try_into().ok()) + .and_then(|bytes| (*utils::string_from_bytes(&bytes)).try_into().ok()) } /// Checks if the given password is equal to the one in the database. pub fn password_get(&self, user_id: &UserId) -> Option { - self.0 - .open_tree(USERID_PASSWORD) - .unwrap() + self.db + .userid_password .get(user_id.to_string()) .unwrap() - .map(|bytes| utils::bytes_to_string(&bytes)) + .map(|bytes| utils::string_from_bytes(&bytes)) } /// Add a new device to a user. pub fn device_add(&self, user_id: &UserId, device_id: &str) { - self.0 - .open_tree(USERID_DEVICEIDS) - .unwrap() - .insert(user_id.to_string(), device_id) - .unwrap(); + if self + .db + .userid_deviceids + .get_iter(&user_id.to_string().as_bytes()) + .filter_map(|item| item.ok()) + .map(|(_key, value)| value) + .all(|device| device != device_id) + { + self.db + .userid_deviceids + .add(user_id.to_string().as_bytes(), device_id.into()); + } } /// Replace the access token of one device. pub fn token_replace(&self, user_id: &UserId, device_id: &String, token: String) { // Make sure the device id belongs to the user debug_assert!(self - .0 - .open_tree(USERID_DEVICEIDS) - .unwrap() - .get(&user_id.to_string()) // Does the user exist? - .unwrap() - .map(|bytes| utils::bytes_to_vec(&bytes)) - .filter(|devices| devices.contains(device_id)) // Does the user have that device? - .is_some()); + .db + .userid_deviceids + .get_iter(&user_id.to_string().as_bytes()) + .filter_map(|item| item.ok()) + .map(|(_key, value)| value) + .any(|device| device == device_id.as_bytes())); // Does the user have that device? // Remove old token - if let Some(old_token) = self - .0 - .open_tree(DEVICEID_TOKEN) - .unwrap() - .get(device_id) - .unwrap() - { - self.0 - .open_tree(TOKEN_USERID) - .unwrap() - .remove(old_token) - .unwrap(); - // It will be removed from DEVICEID_TOKEN by the insert later + if let Some(old_token) = self.db.deviceid_token.get(device_id).unwrap() { + self.db.token_userid.remove(old_token).unwrap(); + // It will be removed from deviceid_token by the insert later } // Assign token to device_id - self.0 - .open_tree(DEVICEID_TOKEN) - .unwrap() - .insert(device_id, &*token) - .unwrap(); + self.db.deviceid_token.insert(device_id, &*token).unwrap(); // Assign token to user - self.0 - .open_tree(TOKEN_USERID) - .unwrap() + self.db + .token_userid .insert(token, &*user_id.to_string()) .unwrap(); } /// Create a new room event. - pub fn event_add(&self, event: &Event, room_id: &RoomId, event_id: &EventId) { - debug!("{}", serde_json::to_string(event).unwrap()); - todo!(); + pub fn event_add(&self, room_id: &RoomId, event_id: &EventId, event: &Event) { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.extend_from_slice(event_id.to_string().as_bytes()); + self.db + .roomid_eventid_event + .insert(&key, &*serde_json::to_string(event).unwrap()) + .unwrap(); + } + + pub fn debug(&self) { + self.db.debug(); } } diff --git a/src/database.rs b/src/database.rs new file mode 100644 index 0000000..34ed72b --- /dev/null +++ b/src/database.rs @@ -0,0 +1,117 @@ +use crate::utils; +use directories::ProjectDirs; +use sled::IVec; + +pub struct MultiValue(sled::Tree); + +impl MultiValue { + /// Get an iterator over all values. + pub fn iter_all(&self) -> sled::Iter { + self.0.iter() + } + + /// Get an iterator over all values of this id. + pub fn get_iter(&self, id: &[u8]) -> sled::Iter { + // Data keys start with d + let mut key = vec![b'd']; + key.extend_from_slice(id.as_ref()); + key.push(0xff); // Add delimiter so we don't find usernames starting with the same id + + self.0.scan_prefix(key) + } + + /// Add another value to the id. + pub fn add(&self, id: &[u8], value: IVec) { + // The new value will need a new index. We store the last used index in 'n' + id + let mut count_key: Vec = vec![b'n']; + count_key.extend_from_slice(id.as_ref()); + + // Increment the last index and use that + let index = self + .0 + .update_and_fetch(&count_key, utils::increment) + .unwrap() + .unwrap(); + + // Data keys start with d + let mut key = vec![b'd']; + key.extend_from_slice(id.as_ref()); + key.push(0xff); + key.extend_from_slice(&index); + + self.0.insert(key, value).unwrap(); + } +} + +pub struct Database { + pub userid_password: sled::Tree, + pub userid_deviceids: MultiValue, + pub deviceid_token: sled::Tree, + pub token_userid: sled::Tree, + pub roomid_eventid_event: sled::Tree, + _db: sled::Db, +} + +impl Database { + /// Load an existing database or create a new one. + pub fn load_or_create(hostname: &str) -> Self { + let mut path = ProjectDirs::from("xyz", "koesters", "matrixserver") + .unwrap() + .data_dir() + .to_path_buf(); + path.push(hostname); + let db = sled::open(&path).unwrap(); + + Self { + userid_password: db.open_tree("userid_password").unwrap(), + userid_deviceids: MultiValue(db.open_tree("userid_deviceids").unwrap()), + deviceid_token: db.open_tree("deviceid_token").unwrap(), + token_userid: db.open_tree("token_userid").unwrap(), + roomid_eventid_event: db.open_tree("roomid_eventid_event").unwrap(), + _db: db, + } + } + + pub fn debug(&self) { + println!("# UserId -> Password:"); + for (k, v) in self.userid_password.iter().map(|r| r.unwrap()) { + println!( + "{} -> {}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("# UserId -> DeviceIds:"); + for (k, v) in self.userid_deviceids.iter_all().map(|r| r.unwrap()) { + println!( + "{} -> {}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("# DeviceId -> Token:"); + for (k, v) in self.deviceid_token.iter().map(|r| r.unwrap()) { + println!( + "{} -> {}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("# Token -> UserId:"); + for (k, v) in self.token_userid.iter().map(|r| r.unwrap()) { + println!( + "{} -> {}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("# RoomId + EventId -> Event:"); + for (k, v) in self.roomid_eventid_event.iter().map(|r| r.unwrap()) { + println!( + "{} -> {}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + } +} diff --git a/src/main.rs b/src/main.rs index 06f7ca3..cf1f37f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,9 +1,12 @@ #![feature(proc_macro_hygiene, decl_macro)] mod data; +mod database; mod ruma_wrapper; mod utils; pub use data::Data; +pub use database::Database; + use log::debug; use rocket::{get, post, put, routes, State}; use ruma_client_api::{ @@ -14,13 +17,14 @@ use ruma_client_api::{ }, unversioned::get_supported_versions, }; -use ruma_events::collections::all::Event; -use ruma_events::room::message::MessageEvent; +use ruma_events::{collections::all::Event, room::message::MessageEvent}; use ruma_identifiers::{EventId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; use serde_json::map::Map; -use std::convert::TryFrom; -use std::{collections::HashMap, convert::TryInto}; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, +}; #[get("/_matrix/client/versions")] fn get_supported_versions_route() -> MatrixResult { @@ -90,7 +94,7 @@ fn register_route( MatrixResult(Ok(register::Response { access_token: token, - home_server: data.hostname(), + home_server: data.hostname().to_owned(), user_id, device_id, })) @@ -153,7 +157,7 @@ fn login_route(data: State, body: Ruma) -> MatrixResult, body: Ruma) -> MatrixResult js_int::UInt { (SystemTime::now() @@ -8,20 +11,19 @@ pub fn millis_since_unix_epoch() -> js_int::UInt { .into() } -pub fn bytes_to_string(bytes: &[u8]) -> String { - String::from_utf8(bytes.to_vec()).expect("convert bytes to string") +pub fn increment(old: Option<&[u8]>) -> Option> { + let number = match old { + Some(bytes) => { + let array: [u8; 8] = bytes.try_into().unwrap(); + let number = u64::from_be_bytes(array); + number + 1 + } + None => 0, + }; + + Some(number.to_be_bytes().to_vec()) } -pub fn vec_to_bytes(vec: Vec) -> Vec { - vec.into_iter() - .map(|string| string.into_bytes()) - .collect::>>() - .join(&0) -} - -pub fn bytes_to_vec(bytes: &[u8]) -> Vec { - bytes - .split(|&b| b == 0) - .map(|bytes_string| bytes_to_string(bytes_string)) - .collect::>() +pub fn string_from_bytes(bytes: &[u8]) -> String { + String::from_utf8(bytes.to_vec()).expect("bytes are valid utf8") } From 22cca206ba50c7eb44bbcd0e330ff3adf117db8f Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 30 Mar 2020 15:38:25 +0200 Subject: [PATCH 0014/1727] Todo: ruma signatures This commit will get force pushed away later --- Cargo.toml | 1 + src/main.rs | 25 +++++++++++++------------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 3adca54..13b38f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,3 +19,4 @@ ruma-api = "0.15.0" ruma-events = "0.18.0" js_int = "0.1.3" serde_json = "1.0.50" +ruma-signatures = "0.5.0" diff --git a/src/main.rs b/src/main.rs index cf1f37f..6cf5477 100644 --- a/src/main.rs +++ b/src/main.rs @@ -218,20 +218,21 @@ fn create_message_event_route( _txn_id: String, body: Ruma, ) -> MatrixResult { + // Construct event + let event = Event::RoomMessage(MessageEvent { + content: body.data.clone().into_result().unwrap(), + event_id: event_id.clone(), + origin_server_ts: utils::millis_since_unix_epoch(), + room_id: Some(body.room_id.clone()), + sender: body.user_id.clone().expect("user is authenticated"), + unsigned: Map::default(), + }); + // Generate event id + dbg!(ruma_signatures::reference_hash(event)); + let event_id = EventId::try_from("$TODOrandomeventid:localhost").unwrap(); - data.event_add( - &body.room_id, - &event_id, - &Event::RoomMessage(MessageEvent { - content: body.data.clone().into_result().unwrap(), - event_id: event_id.clone(), - origin_server_ts: utils::millis_since_unix_epoch(), - room_id: Some(body.room_id.clone()), - sender: body.user_id.clone().expect("user is authenticated"), - unsigned: Map::default(), - }), - ); + data.event_add(&body.room_id, &event_id, &event); MatrixResult(Ok(create_message_event::Response { event_id })) } From fa3226898ccbd31475bf3f2467e729f3e19dcdfa Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 3 Apr 2020 17:27:08 +0200 Subject: [PATCH 0015/1727] feat: save pdus PDUs are saved in a pduid -> pdus map. roomid -> pduleaves keeps track of the leaves of the event graph and eventid -> pduid maps event ids to pdus. --- Cargo.lock | 26 ++++++++ Cargo.toml | 3 +- Rocket.toml | 8 ++- src/data.rs | 154 ++++++++++++++++++++++++++++++++++++++++++-- src/database.rs | 42 +++++++++--- src/main.rs | 89 ++++++++++++++++++++----- src/ruma_wrapper.rs | 36 +++++------ src/utils.rs | 5 ++ 8 files changed, 309 insertions(+), 54 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a1da0c..19276fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -496,7 +496,9 @@ dependencies = [ "ruma-api", "ruma-client-api", "ruma-events", + "ruma-federation-api", "ruma-identifiers", + "ruma-signatures", "serde_json", "sled", ] @@ -875,6 +877,19 @@ dependencies = [ "syn 1.0.17", ] +[[package]] +name = "ruma-federation-api" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2a73a23c4d9243be91e101e1942f4d9cd913ef5156d756bafdfe2409ee23d72" +dependencies = [ + "js_int", + "ruma-events", + "ruma-identifiers", + "serde", + "serde_json", +] + [[package]] name = "ruma-identifiers" version = "0.14.1" @@ -886,6 +901,17 @@ dependencies = [ "url 2.1.1", ] +[[package]] +name = "ruma-signatures" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma-signatures.git#a08fc01c0bce63f913e1b4b1a673169d59738b63" +dependencies = [ + "base64 0.11.0", + "ring", + "serde_json", + "untrusted", +] + [[package]] name = "rust-argon2" version = "0.7.0" diff --git a/Cargo.toml b/Cargo.toml index 13b38f3..e01ca0d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,4 +19,5 @@ ruma-api = "0.15.0" ruma-events = "0.18.0" js_int = "0.1.3" serde_json = "1.0.50" -ruma-signatures = "0.5.0" +ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } +ruma-federation-api = "0.0.1" diff --git a/Rocket.toml b/Rocket.toml index d18ee97..f55e107 100644 --- a/Rocket.toml +++ b/Rocket.toml @@ -1,3 +1,7 @@ +[global] +address = "0.0.0.0" +port = 14004 + #[global.tls] -#certs = "/etc/ssl/certs/ssl-cert-snakeoil.pem" -#key = "/etc/ssl/private/ssl-cert-snakeoil.key" +#certs = "/etc/letsencrypt/live/matrixtesting.koesters.xyz/fullchain.pem" +#key = "/etc/letsencrypt/live/matrixtesting.koesters.xyz/privkey.pem" diff --git a/src/data.rs b/src/data.rs index b7b9845..28b8d05 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,7 +1,9 @@ use crate::{utils, Database}; +use log::debug; use ruma_events::collections::all::Event; +use ruma_federation_api::RoomV3Pdu; use ruma_identifiers::{EventId, RoomId, UserId}; -use std::convert::TryInto; +use std::convert::{TryFrom, TryInto}; pub struct Data { hostname: String, @@ -99,14 +101,152 @@ impl Data { .unwrap(); } - /// Create a new room event. - pub fn event_add(&self, room_id: &RoomId, event_id: &EventId, event: &Event) { - let mut key = room_id.to_string().as_bytes().to_vec(); - key.extend_from_slice(event_id.to_string().as_bytes()); + pub fn pdu_get(&self, event_id: &EventId) -> Option { self.db - .roomid_eventid_event - .insert(&key, &*serde_json::to_string(event).unwrap()) + .eventid_pduid + .get(event_id.to_string().as_bytes()) + .unwrap() + .map(|pdu_id| { + serde_json::from_slice( + &self + .db + .pduid_pdus + .get(pdu_id) + .unwrap() + .expect("eventid_pduid in db is valid"), + ) + .expect("pdu is valid") + }) + } + + // TODO: Make sure this isn't called twice in parallel + pub fn pdu_leaves_replace(&self, room_id: &RoomId, event_id: &EventId) -> Vec { + let event_ids = self + .db + .roomid_pduleaves + .get_iter(room_id.to_string().as_bytes()) + .values() + .map(|pdu_id| { + EventId::try_from(&*utils::string_from_bytes(&pdu_id.unwrap())) + .expect("pdu leaves are valid event ids") + }) + .collect(); + + self.db + .roomid_pduleaves + .clear(room_id.to_string().as_bytes()); + + self.db.roomid_pduleaves.add( + &room_id.to_string().as_bytes(), + (*event_id.to_string()).into(), + ); + + event_ids + } + + /// Add a persisted data unit from this homeserver + pub fn pdu_append(&self, event_id: &EventId, room_id: &RoomId, event: Event) { + // prev_events are the leaves of the current graph. This method removes all leaves from the + // room and replaces them with our event + let prev_events = self.pdu_leaves_replace(room_id, event_id); + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .map(|event_id| { + self.pdu_get(event_id) + .expect("pdu in prev_events is valid") + .depth + .into() + }) + .max() + .unwrap_or(0_u64) + + 1; + + let mut pdu_value = serde_json::to_value(&event).expect("message event can be serialized"); + let pdu = pdu_value.as_object_mut().unwrap(); + + pdu.insert( + "prev_events".to_owned(), + prev_events + .iter() + .map(|id| id.to_string()) + .collect::>() + .into(), + ); + pdu.insert("origin".to_owned(), self.hostname().into()); + pdu.insert("depth".to_owned(), depth.into()); + pdu.insert("auth_events".to_owned(), vec!["$auth_eventid"].into()); // TODO + pdu.insert( + "hashes".to_owned(), + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA".into(), + ); // TODO + pdu.insert("signatures".to_owned(), "signature".into()); // TODO + + // The new value will need a new index. We store the last used index in 'n' + id + let mut count_key: Vec = vec![b'n']; + count_key.extend_from_slice(&room_id.to_string().as_bytes()); + + // Increment the last index and use that + let index = utils::u64_from_bytes( + &self + .db + .pduid_pdus + .update_and_fetch(&count_key, utils::increment) + .unwrap() + .unwrap(), + ); + + let mut pdu_id = vec![b'd']; + pdu_id.extend_from_slice(room_id.to_string().as_bytes()); + + pdu_id.push(b'#'); // Add delimiter so we don't find rooms starting with the same id + pdu_id.extend_from_slice(index.to_string().as_bytes()); + + self.db + .pduid_pdus + .insert(&pdu_id, dbg!(&*serde_json::to_string(&pdu).unwrap())) .unwrap(); + + self.db + .eventid_pduid + .insert(event_id.to_string(), pdu_id.clone()) + .unwrap(); + } + + /// Returns a vector of all PDUs. + pub fn pdus_all(&self) -> Vec { + self.pdus_since( + self.db + .eventid_pduid + .iter() + .values() + .next() + .unwrap() + .map(|key| utils::string_from_bytes(&key)) + .expect("there should be at least one pdu"), + ) + } + + /// Returns a vector of all events that happened after the event with id `since`. + pub fn pdus_since(&self, since: String) -> Vec { + let mut pdus = Vec::new(); + + if let Some(room_id) = since.rsplitn(2, '#').nth(1) { + let mut current = since.clone(); + + while let Some((key, value)) = self.db.pduid_pdus.get_gt(current).unwrap() { + if key.starts_with(&room_id.to_string().as_bytes()) { + current = utils::string_from_bytes(&key); + } else { + break; + } + pdus.push(serde_json::from_slice(&value).expect("pdu is valid")); + } + } else { + debug!("event at `since` not found"); + } + pdus } pub fn debug(&self) { diff --git a/src/database.rs b/src/database.rs index 34ed72b..b08dd3c 100644 --- a/src/database.rs +++ b/src/database.rs @@ -15,11 +15,17 @@ impl MultiValue { // Data keys start with d let mut key = vec![b'd']; key.extend_from_slice(id.as_ref()); - key.push(0xff); // Add delimiter so we don't find usernames starting with the same id + key.push(0xff); // Add delimiter so we don't find keys starting with the same id self.0.scan_prefix(key) } + pub fn clear(&self, id: &[u8]) { + for key in self.get_iter(id).keys() { + self.0.remove(key.unwrap()).unwrap(); + } + } + /// Add another value to the id. pub fn add(&self, id: &[u8], value: IVec) { // The new value will need a new index. We store the last used index in 'n' + id @@ -48,7 +54,9 @@ pub struct Database { pub userid_deviceids: MultiValue, pub deviceid_token: sled::Tree, pub token_userid: sled::Tree, - pub roomid_eventid_event: sled::Tree, + pub pduid_pdus: sled::Tree, + pub roomid_pduleaves: MultiValue, + pub eventid_pduid: sled::Tree, _db: sled::Db, } @@ -67,7 +75,9 @@ impl Database { userid_deviceids: MultiValue(db.open_tree("userid_deviceids").unwrap()), deviceid_token: db.open_tree("deviceid_token").unwrap(), token_userid: db.open_tree("token_userid").unwrap(), - roomid_eventid_event: db.open_tree("roomid_eventid_event").unwrap(), + pduid_pdus: db.open_tree("pduid_pdus").unwrap(), + roomid_pduleaves: MultiValue(db.open_tree("roomid_pduleaves").unwrap()), + eventid_pduid: db.open_tree("eventid_pduid").unwrap(), _db: db, } } @@ -81,7 +91,7 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("# UserId -> DeviceIds:"); + println!("\n# UserId -> DeviceIds:"); for (k, v) in self.userid_deviceids.iter_all().map(|r| r.unwrap()) { println!( "{} -> {}", @@ -89,7 +99,7 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("# DeviceId -> Token:"); + println!("\n# DeviceId -> Token:"); for (k, v) in self.deviceid_token.iter().map(|r| r.unwrap()) { println!( "{} -> {}", @@ -97,7 +107,7 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("# Token -> UserId:"); + println!("\n# Token -> UserId:"); for (k, v) in self.token_userid.iter().map(|r| r.unwrap()) { println!( "{} -> {}", @@ -105,8 +115,24 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("# RoomId + EventId -> Event:"); - for (k, v) in self.roomid_eventid_event.iter().map(|r| r.unwrap()) { + println!("\n# RoomId -> PDU leaves:"); + for (k, v) in self.roomid_pduleaves.iter_all().map(|r| r.unwrap()) { + println!( + "{} -> {}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("\n# PDU Id -> PDUs:"); + for (k, v) in self.pduid_pdus.iter().map(|r| r.unwrap()) { + println!( + "{} -> {}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("\n# EventId -> PDU Id:"); + for (k, v) in self.eventid_pduid.iter().map(|r| r.unwrap()) { println!( "{} -> {}", String::from_utf8_lossy(&k), diff --git a/src/main.rs b/src/main.rs index 6cf5477..c8b1cc8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -8,12 +8,12 @@ pub use data::Data; pub use database::Database; use log::debug; -use rocket::{get, post, put, routes, State}; +use rocket::{get, options, post, put, routes, State}; use ruma_client_api::{ error::{Error, ErrorKind}, r0::{ account::register, alias::get_alias, membership::join_room_by_id, - message::create_message_event, session::login, + message::create_message_event, session::login, sync::sync_events, }, unversioned::get_supported_versions, }; @@ -24,20 +24,13 @@ use serde_json::map::Map; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, + path::PathBuf, }; #[get("/_matrix/client/versions")] fn get_supported_versions_route() -> MatrixResult { MatrixResult(Ok(get_supported_versions::Response { - versions: vec![ - "r0.0.1".to_owned(), - "r0.1.0".to_owned(), - "r0.2.0".to_owned(), - "r0.3.0".to_owned(), - "r0.4.0".to_owned(), - "r0.5.0".to_owned(), - "r0.6.0".to_owned(), - ], + versions: vec!["r0.6.0".to_owned()], unstable_features: HashMap::new(), })) } @@ -219,9 +212,9 @@ fn create_message_event_route( body: Ruma, ) -> MatrixResult { // Construct event - let event = Event::RoomMessage(MessageEvent { + let mut event = Event::RoomMessage(MessageEvent { content: body.data.clone().into_result().unwrap(), - event_id: event_id.clone(), + event_id: EventId::try_from("$thiswillbefilledinlater").unwrap(), origin_server_ts: utils::millis_since_unix_epoch(), room_id: Some(body.room_id.clone()), sender: body.user_id.clone().expect("user is authenticated"), @@ -229,18 +222,78 @@ fn create_message_event_route( }); // Generate event id - dbg!(ruma_signatures::reference_hash(event)); + let event_id = EventId::try_from(&*format!( + "${}", + ruma_signatures::reference_hash(&serde_json::to_value(&event).unwrap()) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are correct"); - let event_id = EventId::try_from("$TODOrandomeventid:localhost").unwrap(); - data.event_add(&body.room_id, &event_id, &event); + // Insert event id + if let Event::RoomMessage(message) = &mut event { + message.event_id = event_id.clone(); + } + + // Add PDU to the graph + data.pdu_append(&event_id, &body.room_id, event); MatrixResult(Ok(create_message_event::Response { event_id })) } +#[get("/_matrix/client/r0/sync")] +fn sync_route(data: State) -> MatrixResult { + let pdus = data.pdus_all(); + let mut joined_rooms = HashMap::new(); + joined_rooms.insert( + "!roomid:localhost".try_into().unwrap(), + sync_events::JoinedRoom { + account_data: sync_events::AccountData { events: Vec::new() }, + summary: sync_events::RoomSummary { + heroes: Vec::new(), + joined_member_count: None, + invited_member_count: None, + }, + unread_notifications: sync_events::UnreadNotificationsCount { + highlight_count: None, + notification_count: None, + }, + timeline: sync_events::Timeline { + limited: None, + prev_batch: None, + events: todo!(), + }, + state: sync_events::State { events: Vec::new() }, + ephemeral: sync_events::Ephemeral { events: Vec::new() }, + }, + ); + + MatrixResult(Ok(sync_events::Response { + next_batch: String::new(), + rooms: sync_events::Rooms { + leave: Default::default(), + join: joined_rooms, + invite: Default::default(), + }, + presence: sync_events::Presence { events: Vec::new() }, + device_lists: Default::default(), + device_one_time_keys_count: Default::default(), + to_device: sync_events::ToDevice { events: Vec::new() }, + })) +} + +#[options("/<_segments..>")] +fn options_route(_segments: PathBuf) -> MatrixResult { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) +} + fn main() { // Log info by default if let Err(_) = std::env::var("RUST_LOG") { - std::env::set_var("RUST_LOG", "info"); + std::env::set_var("RUST_LOG", "matrixserver=debug,info"); } pretty_env_logger::init(); @@ -257,6 +310,8 @@ fn main() { get_alias_route, join_room_by_id_route, create_message_event_route, + sync_route, + options_route, ], ) .manage(data) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index e898137..eda648e 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,28 +1,26 @@ -use { - rocket::data::{FromDataSimple, Outcome}, - rocket::http::Status, - rocket::response::Responder, - rocket::Outcome::*, - rocket::Request, - rocket::State, - ruma_api::{ - error::{FromHttpRequestError, FromHttpResponseError}, - Endpoint, Outgoing, - }, - ruma_client_api::error::Error, - ruma_identifiers::UserId, - std::ops::Deref, - std::{ - convert::{TryFrom, TryInto}, - io::{Cursor, Read}, - }, +use rocket::{ + data::{FromDataSimple, Outcome}, + http::Status, + response::Responder, + Outcome::*, + Request, State, +}; +use ruma_api::{ + error::{FromHttpRequestError, FromHttpResponseError}, + Endpoint, Outgoing, +}; +use ruma_client_api::error::Error; +use ruma_identifiers::UserId; +use std::{ + convert::{TryFrom, TryInto}, + io::{Cursor, Read}, + ops::Deref, }; const MESSAGE_LIMIT: u64 = 65535; /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -#[derive(Debug)] pub struct Ruma { body: T::Incoming, pub user_id: Option, diff --git a/src/utils.rs b/src/utils.rs index f2ef6c4..19f3f02 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -24,6 +24,11 @@ pub fn increment(old: Option<&[u8]>) -> Option> { Some(number.to_be_bytes().to_vec()) } +pub fn u64_from_bytes(bytes: &[u8]) -> u64 { + let array: [u8; 8] = bytes.try_into().expect("bytes are valid u64"); + u64::from_be_bytes(array) +} + pub fn string_from_bytes(bytes: &[u8]) -> String { String::from_utf8(bytes.to_vec()).expect("bytes are valid utf8") } From 2855d1acdf0dd4c5156d7bf6fab4afd80cb6b386 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 3 Apr 2020 17:53:06 +0200 Subject: [PATCH 0016/1727] Add README.md --- README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 0000000..bda2435 --- /dev/null +++ b/README.md @@ -0,0 +1,18 @@ +# Matrix Homeserver in Rust + +#### Goals + +A Matrix Homeserver that's faster than others. + +#### What is it build on? + +- [Ruma](https://www.ruma.io): Useful structures for endpoint requests and responses that can be (de)serialized +- [Sled](https://github.com/spacejam/sled): A simple (key, value) database with good performance +- [Rocket](https://rocket.rs): A flexible web framework + +#### Roadmap + +- [x] Register, login, authentication tokens +- [ ] Create room messages +- [ ] Sync room messages +- [ ] Join rooms, lookup room ids From f9cfede2a8508c1b804461cd09d63b8c14440295 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 3 Apr 2020 21:17:27 +0200 Subject: [PATCH 0017/1727] fix: deserialize sync event body --- Cargo.lock | 66 ++++++++++++++++++++++++--------------------- Cargo.toml | 4 +-- src/main.rs | 7 +++-- src/ruma_wrapper.rs | 7 +++-- 4 files changed, 48 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 19276fe..c4c6419 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -69,6 +69,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" +[[package]] +name = "base64" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5ca2cd0adc3f48f9e9ea5a6bbdf9ccc0bfade884847e484d452414c7ccffb3" + [[package]] name = "binascii" version = "0.1.4" @@ -208,7 +214,7 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "bitflags", - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", ] @@ -313,9 +319,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" +checksum = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" dependencies = [ "libc", ] @@ -425,9 +431,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4937d8b6672d78c0dd9689d671e3faf2c9744fa36cbcb437e22cc8b1bd59ac25" +checksum = "f97dc0d13f3bf0369f00504ad806499490045d6f93524a6ead4081c380703a2f" dependencies = [ "serde", ] @@ -619,9 +625,9 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfdefadc3d57ca21cf17990a28ef4c0f7c61383a28cb7604cf4a18e6ede1420" +checksum = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" [[package]] name = "proc-macro2" @@ -634,9 +640,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" +checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" dependencies = [ "unicode-xid 0.2.0", ] @@ -662,7 +668,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" dependencies = [ - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", ] [[package]] @@ -831,16 +837,16 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfc523efc9c1ba7033ff17888551c1d378e12eae087cfbe4fcee938ff516759e" dependencies = [ - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", ] [[package]] name = "ruma-client-api" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a64241cdc0cff76038484451d7a5d2689f8ea4e59b6695cd3c8448af7bcc016" +checksum = "b390a86d36e87cc56111802bfd281eed1095f5097a89677101d0271d8e6b1306" dependencies = [ "http", "js_int", @@ -872,7 +878,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff95b6b4480c570db471b490b35ad70add5470651654e75faf0b97052b4f29e1" dependencies = [ - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", ] @@ -903,10 +909,10 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma-signatures.git#a08fc01c0bce63f913e1b4b1a673169d59738b63" +version = "0.6.0-dev.1" +source = "git+https://github.com/ruma/ruma-signatures.git#9947e94cb28daea456904197f7cd754a8e48797a" dependencies = [ - "base64 0.11.0", + "base64 0.12.0", "ring", "serde_json", "untrusted", @@ -952,7 +958,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" dependencies = [ - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", ] @@ -1015,7 +1021,7 @@ version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8" dependencies = [ - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", ] @@ -1079,9 +1085,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4edf667ea8f60afc06d6aeec079d20d5800351109addec1faea678a8663da4e1" +checksum = "ee531c64ad0f80d289504bd32fb047f42a9e957cda584276ab96eb587e9abac3" [[package]] name = "state" @@ -1109,7 +1115,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "serde", "serde_derive", @@ -1123,7 +1129,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" dependencies = [ "base-x", - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "serde", "serde_derive", @@ -1154,7 +1160,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", ] @@ -1176,7 +1182,7 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" dependencies = [ - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "unicode-xid 0.2.0", ] @@ -1242,7 +1248,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e987cfe0537f575b5fc99909de6185f6c19c3ad8889e2275e686a873d0869ba1" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", ] @@ -1379,7 +1385,7 @@ dependencies = [ "bumpalo", "lazy_static", "log 0.4.8", - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", "wasm-bindgen-shared", @@ -1401,7 +1407,7 @@ version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" dependencies = [ - "proc-macro2 1.0.9", + "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", "wasm-bindgen-backend", @@ -1461,9 +1467,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" +checksum = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" dependencies = [ "winapi", ] diff --git a/Cargo.toml b/Cargo.toml index e01ca0d..b76a9c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", features = ["tls"] } http = "0.2.1" -ruma-client-api = "0.7.0" +ruma-client-api = "0.7.1" pretty_env_logger = "0.4.0" log = "0.4.8" sled = "0.31.0" @@ -17,7 +17,7 @@ directories = "2.0.2" ruma-identifiers = "0.14.1" ruma-api = "0.15.0" ruma-events = "0.18.0" -js_int = "0.1.3" +js_int = "0.1.4" serde_json = "1.0.50" ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } ruma-federation-api = "0.0.1" diff --git a/src/main.rs b/src/main.rs index c8b1cc8..8af476d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -240,8 +240,11 @@ fn create_message_event_route( MatrixResult(Ok(create_message_event::Response { event_id })) } -#[get("/_matrix/client/r0/sync")] -fn sync_route(data: State) -> MatrixResult { +#[get("/_matrix/client/r0/sync", data = "")] +fn sync_route( + data: State, + body: Ruma, +) -> MatrixResult { let pdus = data.pdus_all(); let mut joined_rooms = HashMap::new(); joined_rooms.insert( diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index eda648e..507f620 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -84,8 +84,11 @@ where body: t, user_id, // TODO: Can we avoid parsing it again? - json_body: serde_json::from_slice(&body) - .expect("Ruma already parsed it successfuly"), + json_body: if !body.is_empty() { + serde_json::from_slice(&body).expect("Ruma already parsed it successfully") + } else { + serde_json::Value::default() + }, }), Err(e) => { log::error!("{:?}", e); From 8b8381bcc030c844557a758690ec6e1cbc0a9ac4 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 4 Apr 2020 11:53:37 +0200 Subject: [PATCH 0018/1727] New PduEvent struct --- Cargo.lock | 1 + Cargo.toml | 1 + src/data.rs | 55 +++++++++++++++++++++++-------------------- src/main.rs | 68 ++++++++++++++++++++++++++++++----------------------- src/pdu.rs | 42 +++++++++++++++++++++++++++++++++ 5 files changed, 112 insertions(+), 55 deletions(-) create mode 100644 src/pdu.rs diff --git a/Cargo.lock b/Cargo.lock index c4c6419..ced79c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,6 +505,7 @@ dependencies = [ "ruma-federation-api", "ruma-identifiers", "ruma-signatures", + "serde", "serde_json", "sled", ] diff --git a/Cargo.toml b/Cargo.toml index b76a9c9..d259707 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,3 +21,4 @@ js_int = "0.1.4" serde_json = "1.0.50" ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } ruma-federation-api = "0.0.1" +serde = "1.0.105" diff --git a/src/data.rs b/src/data.rs index 28b8d05..f0917ff 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,9 +1,12 @@ -use crate::{utils, Database}; +use crate::{utils, Database, PduEvent}; use log::debug; -use ruma_events::collections::all::Event; +use ruma_events::{room::message::MessageEvent, EventType}; use ruma_federation_api::RoomV3Pdu; use ruma_identifiers::{EventId, RoomId, UserId}; -use std::convert::{TryFrom, TryInto}; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, +}; pub struct Data { hostname: String, @@ -145,7 +148,7 @@ impl Data { } /// Add a persisted data unit from this homeserver - pub fn pdu_append(&self, event_id: &EventId, room_id: &RoomId, event: Event) { + pub fn pdu_append_message(&self, event_id: &EventId, room_id: &RoomId, event: MessageEvent) { // prev_events are the leaves of the current graph. This method removes all leaves from the // room and replaces them with our event let prev_events = self.pdu_leaves_replace(room_id, event_id); @@ -163,25 +166,25 @@ impl Data { .unwrap_or(0_u64) + 1; - let mut pdu_value = serde_json::to_value(&event).expect("message event can be serialized"); - let pdu = pdu_value.as_object_mut().unwrap(); - - pdu.insert( - "prev_events".to_owned(), - prev_events - .iter() - .map(|id| id.to_string()) - .collect::>() - .into(), - ); - pdu.insert("origin".to_owned(), self.hostname().into()); - pdu.insert("depth".to_owned(), depth.into()); - pdu.insert("auth_events".to_owned(), vec!["$auth_eventid"].into()); // TODO - pdu.insert( - "hashes".to_owned(), - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA".into(), - ); // TODO - pdu.insert("signatures".to_owned(), "signature".into()); // TODO + let pdu = PduEvent { + event_id: event_id.clone(), + room_id: room_id.clone(), + sender: event.sender, + origin: self.hostname.clone(), + origin_server_ts: event.origin_server_ts, + kind: EventType::RoomMessage, + content: serde_json::to_value(event.content).unwrap(), + state_key: None, + prev_events, + depth: depth.try_into().unwrap(), + auth_events: Vec::new(), + redacts: None, + unsigned: Default::default(), + hashes: ruma_federation_api::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: HashMap::new(), + }; // The new value will need a new index. We store the last used index in 'n' + id let mut count_key: Vec = vec![b'n']; @@ -205,7 +208,7 @@ impl Data { self.db .pduid_pdus - .insert(&pdu_id, dbg!(&*serde_json::to_string(&pdu).unwrap())) + .insert(&pdu_id, &*serde_json::to_string(&pdu).unwrap()) .unwrap(); self.db @@ -215,7 +218,7 @@ impl Data { } /// Returns a vector of all PDUs. - pub fn pdus_all(&self) -> Vec { + pub fn pdus_all(&self) -> Vec { self.pdus_since( self.db .eventid_pduid @@ -229,7 +232,7 @@ impl Data { } /// Returns a vector of all events that happened after the event with id `since`. - pub fn pdus_since(&self, since: String) -> Vec { + pub fn pdus_since(&self, since: String) -> Vec { let mut pdus = Vec::new(); if let Some(room_id) = since.rsplitn(2, '#').nth(1) { diff --git a/src/main.rs b/src/main.rs index 8af476d..44ff413 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,13 +1,15 @@ #![feature(proc_macro_hygiene, decl_macro)] mod data; mod database; +mod pdu; mod ruma_wrapper; mod utils; pub use data::Data; pub use database::Database; +pub use pdu::PduEvent; -use log::debug; +use log::{debug, error}; use rocket::{get, options, post, put, routes, State}; use ruma_client_api::{ error::{Error, ErrorKind}, @@ -17,7 +19,7 @@ use ruma_client_api::{ }, unversioned::get_supported_versions, }; -use ruma_events::{collections::all::Event, room::message::MessageEvent}; +use ruma_events::{collections::all::RoomEvent, room::message::MessageEvent, EventResult}; use ruma_identifiers::{EventId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; use serde_json::map::Map; @@ -212,7 +214,7 @@ fn create_message_event_route( body: Ruma, ) -> MatrixResult { // Construct event - let mut event = Event::RoomMessage(MessageEvent { + let mut event = RoomEvent::RoomMessage(MessageEvent { content: body.data.clone().into_result().unwrap(), event_id: EventId::try_from("$thiswillbefilledinlater").unwrap(), origin_server_ts: utils::millis_since_unix_epoch(), @@ -230,13 +232,13 @@ fn create_message_event_route( .expect("ruma's reference hashes are correct"); // Insert event id - if let Event::RoomMessage(message) = &mut event { + if let RoomEvent::RoomMessage(message) = &mut event { message.event_id = event_id.clone(); + data.pdu_append_message(&event_id, &body.room_id, message.clone()); + } else { + error!("only roommessages are handled currently"); } - // Add PDU to the graph - data.pdu_append(&event_id, &body.room_id, event); - MatrixResult(Ok(create_message_event::Response { event_id })) } @@ -245,30 +247,38 @@ fn sync_route( data: State, body: Ruma, ) -> MatrixResult { - let pdus = data.pdus_all(); let mut joined_rooms = HashMap::new(); - joined_rooms.insert( - "!roomid:localhost".try_into().unwrap(), - sync_events::JoinedRoom { - account_data: sync_events::AccountData { events: Vec::new() }, - summary: sync_events::RoomSummary { - heroes: Vec::new(), - joined_member_count: None, - invited_member_count: None, + { + let pdus = data.pdus_all(); + let mut room_events = Vec::new(); + + for pdu in pdus { + room_events.push(pdu.to_room_event()); + } + + joined_rooms.insert( + "!roomid:localhost".try_into().unwrap(), + sync_events::JoinedRoom { + account_data: sync_events::AccountData { events: Vec::new() }, + summary: sync_events::RoomSummary { + heroes: Vec::new(), + joined_member_count: None, + invited_member_count: None, + }, + unread_notifications: sync_events::UnreadNotificationsCount { + highlight_count: None, + notification_count: None, + }, + timeline: sync_events::Timeline { + limited: None, + prev_batch: None, + events: room_events, + }, + state: sync_events::State { events: Vec::new() }, + ephemeral: sync_events::Ephemeral { events: Vec::new() }, }, - unread_notifications: sync_events::UnreadNotificationsCount { - highlight_count: None, - notification_count: None, - }, - timeline: sync_events::Timeline { - limited: None, - prev_batch: None, - events: todo!(), - }, - state: sync_events::State { events: Vec::new() }, - ephemeral: sync_events::Ephemeral { events: Vec::new() }, - }, - ); + ); + } MatrixResult(Ok(sync_events::Response { next_batch: String::new(), diff --git a/src/pdu.rs b/src/pdu.rs new file mode 100644 index 0000000..588242b --- /dev/null +++ b/src/pdu.rs @@ -0,0 +1,42 @@ +use js_int::UInt; +use ruma_events::{collections::all::RoomEvent, EventResult, EventType}; +use ruma_federation_api::EventHash; +use ruma_identifiers::{EventId, RoomId, UserId}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(Deserialize, Serialize)] +pub struct PduEvent { + pub event_id: EventId, + pub room_id: RoomId, + pub sender: UserId, + pub origin: String, + pub origin_server_ts: UInt, + #[serde(rename = "type")] + pub kind: EventType, + pub content: serde_json::Value, + #[serde(skip_serializing_if = "Option::is_none")] + pub state_key: Option, + pub prev_events: Vec, + pub depth: UInt, + pub auth_events: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub redacts: Option, + #[serde(default, skip_serializing_if = "serde_json::Map::is_empty")] + pub unsigned: serde_json::Map, + pub hashes: EventHash, + pub signatures: HashMap>, +} + +impl PduEvent { + pub fn to_room_event(&self) -> RoomEvent { + // Can only fail in rare circumstances that won't ever happen here, see + // https://docs.rs/serde_json/1.0.50/serde_json/fn.to_string.html + let json = serde_json::to_string(&self).unwrap(); + // EventResult's deserialize implementation always returns `Ok(...)` + serde_json::from_str::>(&json) + .unwrap() + .into_result() + .unwrap() + } +} From 884dc2867db1ad957a24477a0217a11721c3cef5 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 4 Apr 2020 20:50:01 +0200 Subject: [PATCH 0019/1727] Move to rocket's async branch --- Cargo.lock | 665 ++++++++++++++++++++++++++------------------ Cargo.toml | 5 +- src/main.rs | 7 +- src/ruma_wrapper.rs | 129 +++++---- 4 files changed, 479 insertions(+), 327 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ced79c6..4d22411 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9,6 +9,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "arc-swap" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d663a8e9a99154b5fb793032533f6328da35e23aac63d5c152279aa8ba356825" + [[package]] name = "arrayref" version = "0.3.6" @@ -21,6 +27,17 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +[[package]] +name = "async-trait" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab5c215748dc1ad11a145359b1067107ae0f8ca5e99844fa64067ed5bf198e3" +dependencies = [ + "proc-macro2 1.0.10", + "quote 1.0.3", + "syn 1.0.17", +] + [[package]] name = "atty" version = "0.2.14" @@ -29,7 +46,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi", + "winapi 0.3.8", ] [[package]] @@ -39,20 +56,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] -name = "base-x" -version = "0.2.6" +name = "base16" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" - -[[package]] -name = "base64" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" -dependencies = [ - "byteorder", - "safemem", -] +checksum = "d27c3610c36aee21ce8ac510e6224498de4228ad772a171ed65643a24693a5a8" [[package]] name = "base64" @@ -75,12 +82,6 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d5ca2cd0adc3f48f9e9ea5a6bbdf9ccc0bfade884847e484d452414c7ccffb3" -[[package]] -name = "binascii" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" - [[package]] name = "bitflags" version = "1.2.1" @@ -145,14 +146,13 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "cookie" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c60ef6d0bbf56ad2674249b6bb74f2c6aeb98b98dd57b5d3e37cace33011d69" +version = "0.12.0" +source = "git+https://github.com/SergioBenitez/cookie-rs?rev=e0f3e6c#e0f3e6c4daea108d55838c56da777b36898bd223" dependencies = [ - "base64 0.11.0", + "base64 0.10.1", "percent-encoding 2.1.0", "ring", - "time 0.2.9", + "time", ] [[package]] @@ -238,15 +238,9 @@ dependencies = [ "cfg-if", "libc", "redox_users", - "winapi", + "winapi 0.3.8", ] -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - [[package]] name = "dtoa" version = "0.4.5" @@ -261,7 +255,7 @@ checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ "atty", "humantime", - "log 0.4.8", + "log", "regex", "termcolor", ] @@ -279,7 +273,114 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi", + "winapi 0.3.8", +] + +[[package]] +name = "fuchsia-zircon" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +dependencies = [ + "bitflags", + "fuchsia-zircon-sys", +] + +[[package]] +name = "fuchsia-zircon-sys" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" + +[[package]] +name = "futures" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" + +[[package]] +name = "futures-executor" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" + +[[package]] +name = "futures-macro" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" +dependencies = [ + "proc-macro-hack", + "proc-macro2 1.0.10", + "quote 1.0.3", + "syn 1.0.17", +] + +[[package]] +name = "futures-sink" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" + +[[package]] +name = "futures-task" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" + +[[package]] +name = "futures-util" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", ] [[package]] @@ -303,10 +404,23 @@ dependencies = [ ] [[package]] -name = "glob" -version = "0.3.0" +name = "h2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "377038bf3c89d18d6ca1431e7a5027194fbd724ca10592b9487ede5e8e144f42" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "log", + "slab", + "tokio", + "tokio-util", +] [[package]] name = "heck" @@ -337,6 +451,16 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +dependencies = [ + "bytes", + "http", +] + [[package]] name = "httparse" version = "1.3.4" @@ -354,44 +478,25 @@ dependencies = [ [[package]] name = "hyper" -version = "0.10.16" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" +checksum = "ed6081100e960d9d74734659ffc9cc91daf1c0fc7aceb8eaa94ee1a3f5046f2e" dependencies = [ - "base64 0.9.3", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", "httparse", - "language-tags", - "log 0.3.9", - "mime", - "num_cpus", - "time 0.1.42", - "traitobject", - "typeable", - "unicase", - "url 1.7.2", -] - -[[package]] -name = "hyper-sync-rustls" -version = "0.3.0-rc.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53be239c980459955c0f0af3f13190ead511d7d4bdaeab8127c011b94d8558de" -dependencies = [ - "hyper", - "rustls", - "webpki", - "webpki-roots", -] - -[[package]] -name = "idna" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", + "itoa", + "log", + "pin-project", + "time", + "tokio", + "tower-service", + "want", ] [[package]] @@ -414,6 +519,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "iovec" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +dependencies = [ + "libc", +] + [[package]] name = "itoa" version = "0.4.5" @@ -439,10 +553,14 @@ dependencies = [ ] [[package]] -name = "language-tags" +name = "kernel32-sys" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" +checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] [[package]] name = "lazy_static" @@ -465,15 +583,6 @@ dependencies = [ "scopeguard", ] -[[package]] -name = "log" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -dependencies = [ - "log 0.4.8", -] - [[package]] name = "log" version = "0.4.8" @@ -496,7 +605,7 @@ dependencies = [ "directories", "http", "js_int", - "log 0.4.8", + "log", "pretty_env_logger", "rocket", "ruma-api", @@ -508,6 +617,7 @@ dependencies = [ "serde", "serde_json", "sled", + "tokio", ] [[package]] @@ -533,11 +643,61 @@ dependencies = [ [[package]] name = "mime" -version = "0.2.6" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + +[[package]] +name = "mio" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" dependencies = [ - "log 0.3.9", + "cfg-if", + "fuchsia-zircon", + "fuchsia-zircon-sys", + "iovec", + "kernel32-sys", + "libc", + "log", + "miow", + "net2", + "slab", + "winapi 0.2.8", +] + +[[package]] +name = "mio-uds" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" +dependencies = [ + "iovec", + "libc", + "mio", +] + +[[package]] +name = "miow" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +dependencies = [ + "kernel32-sys", + "net2", + "winapi 0.2.8", + "ws2_32-sys", +] + +[[package]] +name = "net2" +version = "0.2.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" +dependencies = [ + "cfg-if", + "libc", + "winapi 0.3.8", ] [[package]] @@ -571,7 +731,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "winapi", + "winapi 0.3.8", ] [[package]] @@ -608,6 +768,38 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "pin-project" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7804a463a8d9572f13453c516a5faea534a2403d7ced2f0c7e100eeff072772c" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f" +dependencies = [ + "proc-macro2 1.0.10", + "quote 1.0.3", + "syn 1.0.17", +] + +[[package]] +name = "pin-project-lite" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" + +[[package]] +name = "pin-utils" +version = "0.1.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" + [[package]] name = "ppv-lite86" version = "0.2.6" @@ -621,7 +813,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" dependencies = [ "env_logger", - "log 0.4.8", + "log", ] [[package]] @@ -630,6 +822,12 @@ version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" +[[package]] +name = "proc-macro-nested" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" + [[package]] name = "proc-macro2" version = "0.4.30" @@ -760,24 +958,29 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi", + "winapi 0.3.8", ] [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git#06e146e7d18d7c4aab423d289090261f548ea69d" +source = "git+https://github.com/SergioBenitez/Rocket.git?branch=async#78c8ac8ccdbe85abb9508fb9657e70eb2b8d08c0" dependencies = [ + "async-trait", "atty", - "binascii", - "log 0.4.8", + "base16", + "base64 0.11.0", + "futures", + "futures-util", + "log", "memchr", "num_cpus", "pear", "rocket_codegen", "rocket_http", "state", - "time 0.2.9", + "time", + "tokio", "toml", "version_check 0.9.1", "yansi 0.5.0", @@ -786,10 +989,9 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git#06e146e7d18d7c4aab423d289090261f548ea69d" +source = "git+https://github.com/SergioBenitez/Rocket.git?branch=async#78c8ac8ccdbe85abb9508fb9657e70eb2b8d08c0" dependencies = [ "devise", - "glob", "indexmap", "quote 1.0.3", "rocket_http", @@ -800,18 +1002,21 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git#06e146e7d18d7c4aab423d289090261f548ea69d" +source = "git+https://github.com/SergioBenitez/Rocket.git?branch=async#78c8ac8ccdbe85abb9508fb9657e70eb2b8d08c0" dependencies = [ "cookie", + "http", "hyper", - "hyper-sync-rustls", "indexmap", + "log", + "mime", "pear", "percent-encoding 1.0.1", - "rustls", "smallvec", "state", - "time 0.2.9", + "time", + "tokio", + "tokio-rustls", "unicode-xid 0.2.0", ] @@ -829,7 +1034,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "strum", - "url 2.1.1", + "url", ] [[package]] @@ -857,7 +1062,7 @@ dependencies = [ "serde", "serde_json", "strum", - "url 2.1.1", + "url", ] [[package]] @@ -905,7 +1110,7 @@ checksum = "07e442c700a3b33fc4dd4a1c4b463ebdd252d2c2db31b83da6bb3009307039b9" dependencies = [ "rand", "serde", - "url 2.1.1", + "url", ] [[package]] @@ -931,15 +1136,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - [[package]] name = "rustls" version = "0.16.0" @@ -947,35 +1143,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" dependencies = [ "base64 0.10.1", - "log 0.4.8", + "log", "ring", "sct", "webpki", ] -[[package]] -name = "rustversion" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" -dependencies = [ - "proc-macro2 1.0.10", - "quote 1.0.3", - "syn 1.0.17", -] - [[package]] name = "ryu" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "scopeguard" version = "1.1.0" @@ -992,35 +1171,20 @@ dependencies = [ "untrusted", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff" +checksum = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8" +checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", @@ -1047,14 +1211,24 @@ dependencies = [ "dtoa", "itoa", "serde", - "url 2.1.1", + "url", ] [[package]] -name = "sha1" -version = "0.6.0" +name = "signal-hook-registry" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +dependencies = [ + "arc-swap", + "libc", +] + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "sled" @@ -1068,7 +1242,7 @@ dependencies = [ "fs2", "fxhash", "libc", - "log 0.4.8", + "log", "parking_lot", ] @@ -1084,67 +1258,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "standback" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee531c64ad0f80d289504bd32fb047f42a9e957cda584276ab96eb587e9abac3" - [[package]] name = "state" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2 1.0.10", - "quote 1.0.3", - "serde", - "serde_derive", - "syn 1.0.17", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2 1.0.10", - "quote 1.0.3", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn 1.0.17", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - [[package]] name = "strum" version = "0.18.0" @@ -1214,44 +1333,55 @@ checksum = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" dependencies = [ "libc", "redox_syscall", - "winapi", + "winapi 0.3.8", ] [[package]] -name = "time" -version = "0.2.9" +name = "tokio" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6329a7835505d46f5f3a9a2c237f8d6bf5ca6f0015decb3698ba57fcdbb609ba" +checksum = "ee5a0dd887e37d37390c13ff8ac830f992307fe30a1fff0ab8427af67211ba28" dependencies = [ - "cfg-if", + "bytes", + "fnv", + "futures-core", + "iovec", + "lazy_static", "libc", - "rustversion", - "standback", - "stdweb", - "time-macros", - "winapi", + "memchr", + "mio", + "mio-uds", + "num_cpus", + "pin-project-lite", + "signal-hook-registry", + "slab", + "winapi 0.3.8", ] [[package]] -name = "time-macros" -version = "0.1.0" +name = "tokio-rustls" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ae9b6e9f095bc105e183e3cd493d72579be3181ad4004fceb01adbe9eecab2d" +checksum = "141afec0978abae6573065a48882c6bae44c5cc61db9b511ac4abf6a09bfd9cc" dependencies = [ - "proc-macro-hack", - "time-macros-impl", + "futures-core", + "rustls", + "tokio", + "webpki", ] [[package]] -name = "time-macros-impl" -version = "0.1.0" +name = "tokio-util" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987cfe0537f575b5fc99909de6185f6c19c3ad8889e2275e686a873d0869ba1" +checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "proc-macro-hack", - "proc-macro2 1.0.10", - "quote 1.0.3", - "syn 1.0.17", + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", ] [[package]] @@ -1264,25 +1394,16 @@ dependencies = [ ] [[package]] -name = "traitobject" -version = "0.1.0" +name = "tower-service" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" +checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] -name = "typeable" -version = "0.1.2" +name = "try-lock" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" - -[[package]] -name = "unicase" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" -dependencies = [ - "version_check 0.1.5", -] +checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" [[package]] name = "unicode-bidi" @@ -1326,24 +1447,13 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" -[[package]] -name = "url" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" -dependencies = [ - "idna 0.1.5", - "matches", - "percent-encoding 1.0.1", -] - [[package]] name = "url" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" dependencies = [ - "idna 0.2.0", + "idna", "matches", "percent-encoding 2.1.0", "serde", @@ -1361,6 +1471,16 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" @@ -1385,7 +1505,7 @@ checksum = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" dependencies = [ "bumpalo", "lazy_static", - "log 0.4.8", + "log", "proc-macro2 1.0.10", "quote 1.0.3", "syn 1.0.17", @@ -1442,13 +1562,10 @@ dependencies = [ ] [[package]] -name = "webpki-roots" -version = "0.18.0" +name = "winapi" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" -dependencies = [ - "webpki", -] +checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" [[package]] name = "winapi" @@ -1460,6 +1577,12 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" + [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -1472,7 +1595,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" dependencies = [ - "winapi", + "winapi 0.3.8", ] [[package]] @@ -1481,6 +1604,16 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "ws2_32-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + [[package]] name = "yansi" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index d259707..f61995b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", features = ["tls"] } +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" ruma-client-api = "0.7.1" pretty_env_logger = "0.4.0" @@ -21,4 +21,5 @@ js_int = "0.1.4" serde_json = "1.0.50" ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } ruma-federation-api = "0.0.1" -serde = "1.0.105" +serde = "1.0.106" +tokio = "0.2.16" diff --git a/src/main.rs b/src/main.rs index 44ff413..491a1b7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -270,8 +270,8 @@ fn sync_route( notification_count: None, }, timeline: sync_events::Timeline { - limited: None, - prev_batch: None, + limited: Some(false), + prev_batch: Some("".to_owned()), events: room_events, }, state: sync_events::State { events: Vec::new() }, @@ -328,5 +328,6 @@ fn main() { ], ) .manage(data) - .launch(); + .launch() + .unwrap(); } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 507f620..b3c8fdf 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,7 +1,7 @@ use rocket::{ - data::{FromDataSimple, Outcome}, + data::{Data, FromData, FromDataFuture, Transform, Transformed, TransformFuture}, http::Status, - response::Responder, + response::{self, Responder}, Outcome::*, Request, State, }; @@ -13,9 +13,10 @@ use ruma_client_api::error::Error; use ruma_identifiers::UserId; use std::{ convert::{TryFrom, TryInto}, - io::{Cursor, Read}, + io::Cursor, ops::Deref, }; +use tokio::io::AsyncReadExt; const MESSAGE_LIMIT: u64 = 65535; @@ -27,7 +28,7 @@ pub struct Ruma { pub json_body: serde_json::Value, } -impl FromDataSimple for Ruma +impl<'a, T: Endpoint> FromData<'a> for Ruma where // We need to duplicate Endpoint's where clauses because the compiler is not smart enough yet. // See https://github.com/rust-lang/rust/issues/54149 @@ -38,63 +39,76 @@ where >, { type Error = (); // TODO: Better error handling + type Owned = Data; + type Borrowed = Self::Owned; - fn from_data(request: &Request, data: rocket::Data) -> Outcome { - let user_id = if T::METADATA.requires_authentication { - let data = request.guard::>().unwrap(); + fn transform<'r>(_req: &'r Request, data: Data) -> TransformFuture<'r, Self::Owned, Self::Error> { + Box::pin(async move { Transform::Owned(Success(data)) }) + } - // Get token from header or query value - let token = match request - .headers() - .get_one("Authorization") - .map(|s| s.to_owned()) - .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) - { - // TODO: M_MISSING_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some(token) => token, + fn from_data( + request: &'a Request, + outcome: Transformed<'a, Self>, + ) -> FromDataFuture<'a, Self, Self::Error> { + Box::pin(async move { + let data = rocket::try_outcome!(outcome.owned()); + + let user_id = if T::METADATA.requires_authentication { + let data = request.guard::>().await.unwrap(); + + // Get token from header or query value + let token = match request + .headers() + .get_one("Authorization") + .map(|s| s.to_owned()) + .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) + { + // TODO: M_MISSING_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some(token) => token, + }; + + // Check if token is valid + match data.user_from_token(&token) { + // TODO: M_UNKNOWN_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some(user_id) => Some(user_id), + } + } else { + None }; - // Check if token is valid - match data.user_from_token(&token) { - // TODO: M_UNKNOWN_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some(user_id) => Some(user_id), + let mut http_request = http::Request::builder() + .uri(request.uri().to_string()) + .method(&*request.method().to_string()); + for header in request.headers().iter() { + http_request = http_request.header(header.name.as_str(), &*header.value); } - } else { - None - }; - let mut http_request = http::Request::builder() - .uri(request.uri().to_string()) - .method(&*request.method().to_string()); - for header in request.headers().iter() { - http_request = http_request.header(header.name.as_str(), &*header.value); - } + let mut handle = data.open().take(MESSAGE_LIMIT); + let mut body = Vec::new(); + handle.read_to_end(&mut body).await.unwrap(); - let mut handle = data.open().take(MESSAGE_LIMIT); - let mut body = Vec::new(); - handle.read_to_end(&mut body).unwrap(); + let http_request = http_request.body(body.clone()).unwrap(); + log::info!("{:?}", http_request); - let http_request = http_request.body(body.clone()).unwrap(); - log::info!("{:?}", http_request); - - match T::Incoming::try_from(http_request) { - Ok(t) => Success(Ruma { - body: t, - user_id, - // TODO: Can we avoid parsing it again? - json_body: if !body.is_empty() { - serde_json::from_slice(&body).expect("Ruma already parsed it successfully") - } else { - serde_json::Value::default() - }, - }), - Err(e) => { - log::error!("{:?}", e); - Failure((Status::InternalServerError, ())) + match T::Incoming::try_from(http_request) { + Ok(t) => Success(Ruma { + body: t, + user_id, + // TODO: Can we avoid parsing it again? + json_body: if !body.is_empty() { + serde_json::from_slice(&body).expect("Ruma already parsed it successfully") + } else { + serde_json::Value::default() + }, + }), + Err(e) => { + log::error!("{:?}", e); + Failure((Status::InternalServerError, ())) + } } - } + }) } } @@ -108,7 +122,9 @@ impl Deref for Ruma { /// This struct converts ruma responses into rocket http responses. pub struct MatrixResult(pub std::result::Result); -impl>>> TryInto>> for MatrixResult { + +impl>>> TryInto>> for MatrixResult +{ type Error = T::Error; fn try_into(self) -> Result>, T::Error> { @@ -119,13 +135,14 @@ impl>>> TryInto>> for M } } -impl<'r, T: TryInto>>> Responder<'r> for MatrixResult { - fn respond_to(self, _: &Request) -> rocket::response::Result<'r> { +#[rocket::async_trait] +impl<'r, T: Send + TryInto>>> Responder<'r> for MatrixResult where T::Error: Send{ + async fn respond_to(self, _: &'r Request<'_>) -> response::Result<'r> { let http_response: Result, _> = self.try_into(); match http_response { Ok(http_response) => { let mut response = rocket::response::Response::build(); - response.sized_body(Cursor::new(http_response.body().clone())); + response.sized_body(Cursor::new(http_response.body().clone())).await; for header in http_response.headers() { response From 4d4cff712015651acf9579df33c14999f876cfb1 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 5 Apr 2020 23:06:43 +0200 Subject: [PATCH 0020/1727] Allow all kinds of messages in /send --- src/data.rs | 53 ++++++++++++++++++++++++++++++++++++++--------------- src/main.rs | 45 +++++++++++++++++---------------------------- 2 files changed, 55 insertions(+), 43 deletions(-) diff --git a/src/data.rs b/src/data.rs index f0917ff..86a7aa9 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,6 +1,9 @@ use crate::{utils, Database, PduEvent}; use log::debug; -use ruma_events::{room::message::MessageEvent, EventType}; +use ruma_events::{ + room::message::{MessageEvent, MessageEventContent}, + EventType, +}; use ruma_federation_api::RoomV3Pdu; use ruma_identifiers::{EventId, RoomId, UserId}; use std::{ @@ -122,8 +125,7 @@ impl Data { }) } - // TODO: Make sure this isn't called twice in parallel - pub fn pdu_leaves_replace(&self, room_id: &RoomId, event_id: &EventId) -> Vec { + pub fn pdu_leaves_get(&self, room_id: &RoomId) -> Vec { let event_ids = self .db .roomid_pduleaves @@ -135,6 +137,10 @@ impl Data { }) .collect(); + event_ids + } + + pub fn pdu_leaves_replace(&self, room_id: &RoomId, event_id: &EventId) { self.db .roomid_pduleaves .clear(room_id.to_string().as_bytes()); @@ -143,15 +149,20 @@ impl Data { &room_id.to_string().as_bytes(), (*event_id.to_string()).into(), ); - - event_ids } /// Add a persisted data unit from this homeserver - pub fn pdu_append_message(&self, event_id: &EventId, room_id: &RoomId, event: MessageEvent) { + pub fn pdu_append( + &self, + room_id: RoomId, + sender: UserId, + event_type: EventType, + content: MessageEventContent, + ) -> EventId { // prev_events are the leaves of the current graph. This method removes all leaves from the // room and replaces them with our event - let prev_events = self.pdu_leaves_replace(room_id, event_id); + // TODO: Make sure this isn't called twice in parallel + let prev_events = self.pdu_leaves_get(&room_id); // Our depth is the maximum depth of prev_events + 1 let depth = prev_events @@ -166,26 +177,36 @@ impl Data { .unwrap_or(0_u64) + 1; - let pdu = PduEvent { - event_id: event_id.clone(), + let mut pdu = PduEvent { + event_id: EventId::try_from("$thiswillbefilledinlater").unwrap(), room_id: room_id.clone(), - sender: event.sender, + sender: sender.clone(), origin: self.hostname.clone(), - origin_server_ts: event.origin_server_ts, - kind: EventType::RoomMessage, - content: serde_json::to_value(event.content).unwrap(), + origin_server_ts: utils::millis_since_unix_epoch(), + kind: event_type, + content: serde_json::to_value(content).expect("message content is valid json"), state_key: None, prev_events, depth: depth.try_into().unwrap(), auth_events: Vec::new(), redacts: None, - unsigned: Default::default(), + unsigned: Default::default(), // TODO hashes: ruma_federation_api::EventHash { sha256: "aaa".to_owned(), }, signatures: HashMap::new(), }; + // Generate event id + pdu.event_id = EventId::try_from(&*format!( + "${}", + ruma_signatures::reference_hash(&serde_json::to_value(&pdu).unwrap()) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are correct"); + + self.pdu_leaves_replace(&room_id, &pdu.event_id); + // The new value will need a new index. We store the last used index in 'n' + id let mut count_key: Vec = vec![b'n']; count_key.extend_from_slice(&room_id.to_string().as_bytes()); @@ -213,8 +234,10 @@ impl Data { self.db .eventid_pduid - .insert(event_id.to_string(), pdu_id.clone()) + .insert(pdu.event_id.to_string(), pdu_id.clone()) .unwrap(); + + pdu.event_id } /// Returns a vector of all PDUs. diff --git a/src/main.rs b/src/main.rs index 491a1b7..5a8c066 100644 --- a/src/main.rs +++ b/src/main.rs @@ -213,39 +213,28 @@ fn create_message_event_route( _txn_id: String, body: Ruma, ) -> MatrixResult { - // Construct event - let mut event = RoomEvent::RoomMessage(MessageEvent { - content: body.data.clone().into_result().unwrap(), - event_id: EventId::try_from("$thiswillbefilledinlater").unwrap(), - origin_server_ts: utils::millis_since_unix_epoch(), - room_id: Some(body.room_id.clone()), - sender: body.user_id.clone().expect("user is authenticated"), - unsigned: Map::default(), - }); - - // Generate event id - let event_id = EventId::try_from(&*format!( - "${}", - ruma_signatures::reference_hash(&serde_json::to_value(&event).unwrap()) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are correct"); - - // Insert event id - if let RoomEvent::RoomMessage(message) = &mut event { - message.event_id = event_id.clone(); - data.pdu_append_message(&event_id, &body.room_id, message.clone()); + if let Ok(content) = body.data.clone().into_result() { + let event_id = data.pdu_append( + body.room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + body.event_type.clone(), + content, + ); + MatrixResult(Ok(create_message_event::Response { event_id })) } else { - error!("only roommessages are handled currently"); + error!("No data found"); + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) } - - MatrixResult(Ok(create_message_event::Response { event_id })) } -#[get("/_matrix/client/r0/sync", data = "")] +#[get("/_matrix/client/r0/sync", data = "<_body>")] fn sync_route( data: State, - body: Ruma, + _body: Ruma, ) -> MatrixResult { let mut joined_rooms = HashMap::new(); { @@ -298,7 +287,7 @@ fn sync_route( fn options_route(_segments: PathBuf) -> MatrixResult { MatrixResult(Err(Error { kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), + message: "This is the options route.".to_owned(), status_code: http::StatusCode::NOT_FOUND, })) } From 10bb96fcf7fe9da7f4e5001082348caf620cf3c0 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 6 Apr 2020 13:46:46 +0200 Subject: [PATCH 0021/1727] feat: room joining, room based /sync responses --- Cargo.lock | 9 +++--- Cargo.toml | 2 +- src/data.rs | 79 ++++++++++++++++++++++++++++------------------ src/database.rs | 38 ++++++++++++++++------ src/main.rs | 83 +++++++++++++++++++++++++++++++------------------ 5 files changed, 134 insertions(+), 77 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d22411..2978456 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1050,9 +1050,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b390a86d36e87cc56111802bfd281eed1095f5097a89677101d0271d8e6b1306" +version = "0.7.2" +source = "git+https://github.com/ruma/ruma-client-api.git#fe92c2940a2db80509e9a9f162c0f68f3ec3d0a4" dependencies = [ "http", "js_int", @@ -1193,9 +1192,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a7a12c167809363ec3bd7329fc0a3369056996de43c4b37ef3cd54a6ce4867" +checksum = "da07b57ee2623368351e9a0488bb0b261322a15a6e0ae53e243cbdc0f4208da9" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index f61995b..631e6ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" -ruma-client-api = "0.7.1" +ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } pretty_env_logger = "0.4.0" log = "0.4.8" sled = "0.31.0" diff --git a/src/data.rs b/src/data.rs index 86a7aa9..0467d22 100644 --- a/src/data.rs +++ b/src/data.rs @@ -107,6 +107,29 @@ impl Data { .unwrap(); } + pub fn room_join(&self, room_id: &RoomId, user_id: &UserId) { + self.db.userid_roomids.add( + user_id.to_string().as_bytes(), + room_id.to_string().as_bytes().into(), + ); + self.db.roomid_userids.add( + room_id.to_string().as_bytes(), + user_id.to_string().as_bytes().into(), + ); + } + + pub fn rooms_joined(&self, user_id: &UserId) -> Vec { + self.db + .userid_roomids + .get_iter(user_id.to_string().as_bytes()) + .values() + .map(|room_id| { + RoomId::try_from(&*utils::string_from_bytes(&room_id.unwrap())) + .expect("user joined valid room ids") + }) + .collect() + } + pub fn pdu_get(&self, event_id: &EventId) -> Option { self.db .eventid_pduid @@ -157,7 +180,7 @@ impl Data { room_id: RoomId, sender: UserId, event_type: EventType, - content: MessageEventContent, + content: serde_json::Value, ) -> EventId { // prev_events are the leaves of the current graph. This method removes all leaves from the // room and replaces them with our event @@ -184,7 +207,7 @@ impl Data { origin: self.hostname.clone(), origin_server_ts: utils::millis_since_unix_epoch(), kind: event_type, - content: serde_json::to_value(content).expect("message content is valid json"), + content, state_key: None, prev_events, depth: depth.try_into().unwrap(), @@ -207,9 +230,10 @@ impl Data { self.pdu_leaves_replace(&room_id, &pdu.event_id); - // The new value will need a new index. We store the last used index in 'n' + id - let mut count_key: Vec = vec![b'n']; - count_key.extend_from_slice(&room_id.to_string().as_bytes()); + // The new value will need a new index. We store the last used index in 'n' + // The count will go up regardless of the room_id + // This is also the next_batch/since value + let count_key: Vec = vec![b'n']; // Increment the last index and use that let index = utils::u64_from_bytes( @@ -225,7 +249,7 @@ impl Data { pdu_id.extend_from_slice(room_id.to_string().as_bytes()); pdu_id.push(b'#'); // Add delimiter so we don't find rooms starting with the same id - pdu_id.extend_from_slice(index.to_string().as_bytes()); + pdu_id.extend_from_slice(&index.to_be_bytes()); self.db .pduid_pdus @@ -240,37 +264,30 @@ impl Data { pdu.event_id } - /// Returns a vector of all PDUs. - pub fn pdus_all(&self) -> Vec { - self.pdus_since( - self.db - .eventid_pduid - .iter() - .values() - .next() - .unwrap() - .map(|key| utils::string_from_bytes(&key)) - .expect("there should be at least one pdu"), - ) + /// Returns a vector of all PDUs in a room. + pub fn pdus_all(&self, room_id: &RoomId) -> Vec { + self.pdus_since(room_id, "".to_owned()) } - /// Returns a vector of all events that happened after the event with id `since`. - pub fn pdus_since(&self, since: String) -> Vec { + /// Returns a vector of all events in a room that happened after the event with id `since`. + pub fn pdus_since(&self, room_id: &RoomId, since: String) -> Vec { let mut pdus = Vec::new(); - if let Some(room_id) = since.rsplitn(2, '#').nth(1) { - let mut current = since.clone(); + // Create the first part of the full pdu id + let mut pdu_id = vec![b'd']; + pdu_id.extend_from_slice(room_id.to_string().as_bytes()); + pdu_id.push(b'#'); // Add delimiter so we don't find rooms starting with the same id - while let Some((key, value)) = self.db.pduid_pdus.get_gt(current).unwrap() { - if key.starts_with(&room_id.to_string().as_bytes()) { - current = utils::string_from_bytes(&key); - } else { - break; - } - pdus.push(serde_json::from_slice(&value).expect("pdu is valid")); + let mut current = pdu_id.clone(); + current.extend_from_slice(since.as_bytes()); + + while let Some((key, value)) = self.db.pduid_pdus.get_gt(¤t).unwrap() { + if key.starts_with(&pdu_id) { + current = key.to_vec(); + pdus.push(serde_json::from_slice(&value).expect("pdu in db is valid")); + } else { + break; } - } else { - debug!("event at `since` not found"); } pdus } diff --git a/src/database.rs b/src/database.rs index b08dd3c..dee2e94 100644 --- a/src/database.rs +++ b/src/database.rs @@ -55,8 +55,10 @@ pub struct Database { pub deviceid_token: sled::Tree, pub token_userid: sled::Tree, pub pduid_pdus: sled::Tree, - pub roomid_pduleaves: MultiValue, pub eventid_pduid: sled::Tree, + pub roomid_pduleaves: MultiValue, + pub roomid_userids: MultiValue, + pub userid_roomids: MultiValue, _db: sled::Db, } @@ -76,8 +78,10 @@ impl Database { deviceid_token: db.open_tree("deviceid_token").unwrap(), token_userid: db.open_tree("token_userid").unwrap(), pduid_pdus: db.open_tree("pduid_pdus").unwrap(), - roomid_pduleaves: MultiValue(db.open_tree("roomid_pduleaves").unwrap()), eventid_pduid: db.open_tree("eventid_pduid").unwrap(), + roomid_pduleaves: MultiValue(db.open_tree("roomid_pduleaves").unwrap()), + roomid_userids: MultiValue(db.open_tree("roomid_userids").unwrap()), + userid_roomids: MultiValue(db.open_tree("userid_roomids").unwrap()), _db: db, } } @@ -86,7 +90,7 @@ impl Database { println!("# UserId -> Password:"); for (k, v) in self.userid_password.iter().map(|r| r.unwrap()) { println!( - "{} -> {}", + "{:?} -> {:?}", String::from_utf8_lossy(&k), String::from_utf8_lossy(&v), ); @@ -94,7 +98,7 @@ impl Database { println!("\n# UserId -> DeviceIds:"); for (k, v) in self.userid_deviceids.iter_all().map(|r| r.unwrap()) { println!( - "{} -> {}", + "{:?} -> {:?}", String::from_utf8_lossy(&k), String::from_utf8_lossy(&v), ); @@ -102,7 +106,7 @@ impl Database { println!("\n# DeviceId -> Token:"); for (k, v) in self.deviceid_token.iter().map(|r| r.unwrap()) { println!( - "{} -> {}", + "{:?} -> {:?}", String::from_utf8_lossy(&k), String::from_utf8_lossy(&v), ); @@ -110,7 +114,7 @@ impl Database { println!("\n# Token -> UserId:"); for (k, v) in self.token_userid.iter().map(|r| r.unwrap()) { println!( - "{} -> {}", + "{:?} -> {:?}", String::from_utf8_lossy(&k), String::from_utf8_lossy(&v), ); @@ -118,7 +122,23 @@ impl Database { println!("\n# RoomId -> PDU leaves:"); for (k, v) in self.roomid_pduleaves.iter_all().map(|r| r.unwrap()) { println!( - "{} -> {}", + "{:?} -> {:?}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("\n# RoomId -> UserIds:"); + for (k, v) in self.roomid_userids.iter_all().map(|r| r.unwrap()) { + println!( + "{:?} -> {:?}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("\n# UserId -> RoomIds:"); + for (k, v) in self.userid_roomids.iter_all().map(|r| r.unwrap()) { + println!( + "{:?} -> {:?}", String::from_utf8_lossy(&k), String::from_utf8_lossy(&v), ); @@ -126,7 +146,7 @@ impl Database { println!("\n# PDU Id -> PDUs:"); for (k, v) in self.pduid_pdus.iter().map(|r| r.unwrap()) { println!( - "{} -> {}", + "{:?} -> {:?}", String::from_utf8_lossy(&k), String::from_utf8_lossy(&v), ); @@ -134,7 +154,7 @@ impl Database { println!("\n# EventId -> PDU Id:"); for (k, v) in self.eventid_pduid.iter().map(|r| r.unwrap()) { println!( - "{} -> {}", + "{:?} -> {:?}", String::from_utf8_lossy(&k), String::from_utf8_lossy(&v), ); diff --git a/src/main.rs b/src/main.rs index 5a8c066..a504930 100644 --- a/src/main.rs +++ b/src/main.rs @@ -15,14 +15,16 @@ use ruma_client_api::{ error::{Error, ErrorKind}, r0::{ account::register, alias::get_alias, membership::join_room_by_id, - message::create_message_event, session::login, sync::sync_events, + message::create_message_event, room::create_room, session::login, sync::sync_events, }, unversioned::get_supported_versions, }; -use ruma_events::{collections::all::RoomEvent, room::message::MessageEvent, EventResult}; -use ruma_identifiers::{EventId, UserId}; +use ruma_events::{ + collections::all::RoomEvent, room::message::MessageEvent, EventResult, EventType, +}; +use ruma_identifiers::{EventId, RoomId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; -use serde_json::map::Map; +use serde_json::{json, map::Map}; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, @@ -168,6 +170,29 @@ fn login_route(data: State, body: Ruma) -> MatrixResult, + body: Ruma, +) -> MatrixResult { + // TODO: check if room is unique + let room_id = RoomId::new(data.hostname()).expect("host is valid"); + + data.room_join( + &room_id, + body.user_id.as_ref().expect("user is authenticated"), + ); + + data.pdu_append( + room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + EventType::RoomMessage, + json!({"msgtype": "m.text", "body": "Hello"}), + ); + + MatrixResult(Ok(create_room::Response { room_id })) +} + #[get("/_matrix/client/r0/directory/room/")] fn get_alias_route(room_alias: String) -> MatrixResult { // TODO @@ -193,10 +218,14 @@ fn get_alias_route(room_alias: String) -> MatrixResult { #[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] fn join_room_by_id_route( - _room_id: String, + data: State, body: Ruma, + _room_id: String, ) -> MatrixResult { - // TODO + data.room_join( + &body.room_id, + body.user_id.as_ref().expect("user is authenticated"), + ); MatrixResult(Ok(join_room_by_id::Response { room_id: body.room_id.clone(), })) @@ -213,37 +242,28 @@ fn create_message_event_route( _txn_id: String, body: Ruma, ) -> MatrixResult { - if let Ok(content) = body.data.clone().into_result() { - let event_id = data.pdu_append( - body.room_id.clone(), - body.user_id.clone().expect("user is authenticated"), - body.event_type.clone(), - content, - ); - MatrixResult(Ok(create_message_event::Response { event_id })) - } else { - error!("No data found"); - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) - } + let event_id = data.pdu_append( + body.room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + body.event_type.clone(), + body.json_body, + ); + MatrixResult(Ok(create_message_event::Response { event_id })) } -#[get("/_matrix/client/r0/sync", data = "<_body>")] +#[get("/_matrix/client/r0/sync", data = "")] fn sync_route( data: State, - _body: Ruma, + body: Ruma, ) -> MatrixResult { let mut joined_rooms = HashMap::new(); - { - let pdus = data.pdus_all(); - let mut room_events = Vec::new(); - - for pdu in pdus { - room_events.push(pdu.to_room_event()); - } + let joined_roomids = data.rooms_joined(body.user_id.as_ref().expect("user is authenticated")); + for room_id in joined_roomids { + let room_events = data + .pdus_all(&room_id) + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect(); joined_rooms.insert( "!roomid:localhost".try_into().unwrap(), @@ -309,6 +329,7 @@ fn main() { get_supported_versions_route, register_route, login_route, + create_room_route, get_alias_route, join_room_by_id_route, create_message_event_route, From e55a63629cda9e6f4d5cae25c39eb7552b1db017 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 6 Apr 2020 14:33:02 +0200 Subject: [PATCH 0022/1727] feat: state event support --- src/data.rs | 9 +++---- src/main.rs | 73 +++++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 63 insertions(+), 19 deletions(-) diff --git a/src/data.rs b/src/data.rs index 0467d22..56878d7 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,9 +1,5 @@ use crate::{utils, Database, PduEvent}; -use log::debug; -use ruma_events::{ - room::message::{MessageEvent, MessageEventContent}, - EventType, -}; +use ruma_events::EventType; use ruma_federation_api::RoomV3Pdu; use ruma_identifiers::{EventId, RoomId, UserId}; use std::{ @@ -181,6 +177,7 @@ impl Data { sender: UserId, event_type: EventType, content: serde_json::Value, + state_key: Option, ) -> EventId { // prev_events are the leaves of the current graph. This method removes all leaves from the // room and replaces them with our event @@ -208,7 +205,7 @@ impl Data { origin_server_ts: utils::millis_since_unix_epoch(), kind: event_type, content, - state_key: None, + state_key, prev_events, depth: depth.try_into().unwrap(), auth_events: Vec::new(), diff --git a/src/main.rs b/src/main.rs index a504930..72f3ce7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -9,27 +9,27 @@ pub use data::Data; pub use database::Database; pub use pdu::PduEvent; -use log::{debug, error}; +use log::debug; use rocket::{get, options, post, put, routes, State}; use ruma_client_api::{ error::{Error, ErrorKind}, r0::{ - account::register, alias::get_alias, membership::join_room_by_id, - message::create_message_event, room::create_room, session::login, sync::sync_events, + account::register, + alias::get_alias, + membership::join_room_by_id, + message::create_message_event, + room::create_room, + session::login, + state::{create_state_event_for_empty_key, create_state_event_for_key}, + sync::sync_events, }, unversioned::get_supported_versions, }; -use ruma_events::{ - collections::all::RoomEvent, room::message::MessageEvent, EventResult, EventType, -}; -use ruma_identifiers::{EventId, RoomId, UserId}; +use ruma_events::EventType; +use ruma_identifiers::{RoomId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; -use serde_json::{json, map::Map}; -use std::{ - collections::HashMap, - convert::{TryFrom, TryInto}, - path::PathBuf, -}; +use serde_json::json; +use std::{collections::HashMap, convert::TryInto, path::PathBuf}; #[get("/_matrix/client/versions")] fn get_supported_versions_route() -> MatrixResult { @@ -188,6 +188,7 @@ fn create_room_route( body.user_id.clone().expect("user is authenticated"), EventType::RoomMessage, json!({"msgtype": "m.text", "body": "Hello"}), + None, ); MatrixResult(Ok(create_room::Response { room_id })) @@ -247,10 +248,54 @@ fn create_message_event_route( body.user_id.clone().expect("user is authenticated"), body.event_type.clone(), body.json_body, + None, ); MatrixResult(Ok(create_message_event::Response { event_id })) } +#[put( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + data = "" +)] +fn create_state_event_for_key_route( + data: State, + _room_id: String, + _event_type: String, + _state_key: String, + body: Ruma, +) -> MatrixResult { + // Reponse of with/without key is the same + let event_id = data.pdu_append( + body.room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + body.event_type.clone(), + body.json_body.clone(), + Some(body.state_key.clone()), + ); + MatrixResult(Ok(create_state_event_for_key::Response { event_id })) +} + +#[put( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + data = "" +)] +fn create_state_event_for_empty_key_route( + data: State, + _room_id: String, + _event_type: String, + body: Ruma, +) -> MatrixResult { + // Reponse of with/without key is the same + let event_id = data.pdu_append( + body.room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + body.event_type.clone(), + body.json_body, + Some("".to_owned()), + ); + MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) +} + #[get("/_matrix/client/r0/sync", data = "")] fn sync_route( data: State, @@ -333,6 +378,8 @@ fn main() { get_alias_route, join_room_by_id_route, create_message_event_route, + create_state_event_for_key_route, + create_state_event_for_empty_key_route, sync_route, options_route, ], From eb7c5b79be35fe8c278527c9efc129a21cbe24f6 Mon Sep 17 00:00:00 2001 From: timo Date: Mon, 6 Apr 2020 19:57:04 +0200 Subject: [PATCH 0023/1727] Update 'README.md' --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bda2435..4e42852 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ A Matrix Homeserver that's faster than others. #### Roadmap - [x] Register, login, authentication tokens -- [ ] Create room messages -- [ ] Sync room messages +- [x] Create room messages +- [x] Sync room messages - [ ] Join rooms, lookup room ids +- [ ] Riot web support \ No newline at end of file From 64223b8812ac6ec94b472f620d3030fbdc748df3 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 6 Apr 2020 17:37:13 +0200 Subject: [PATCH 0024/1727] register login flow --- src/main.rs | 17 ++++++++++++++++- src/ruma_wrapper.rs | 19 +++++++++++++------ 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/src/main.rs b/src/main.rs index 72f3ce7..2d87943 100644 --- a/src/main.rs +++ b/src/main.rs @@ -44,6 +44,21 @@ fn register_route( data: State, body: Ruma, ) -> MatrixResult { + if body.auth.is_none() { + return MatrixResult(Err(Error { + kind: ErrorKind::InvalidUsername, + message: serde_json::to_string(&json!({ + "flows": [ + { "stages": [ "m.login.dummy" ] }, + ], + "params": {}, + "session": "TODO:randomsessionid", + })) + .unwrap(), + status_code: http::StatusCode::UNAUTHORIZED, + })); + } + // Validate user id let user_id: UserId = match (*format!( "@{}:{}", @@ -353,7 +368,7 @@ fn options_route(_segments: PathBuf) -> MatrixResult(_req: &'r Request, data: Data) -> TransformFuture<'r, Self::Owned, Self::Error> { + fn transform<'r>( + _req: &'r Request, + data: Data, + ) -> TransformFuture<'r, Self::Owned, Self::Error> { Box::pin(async move { Transform::Owned(Success(data)) }) } @@ -123,8 +126,7 @@ impl Deref for Ruma { /// This struct converts ruma responses into rocket http responses. pub struct MatrixResult(pub std::result::Result); -impl>>> TryInto>> for MatrixResult -{ +impl>>> TryInto>> for MatrixResult { type Error = T::Error; fn try_into(self) -> Result>, T::Error> { @@ -136,13 +138,18 @@ impl>>> TryInto>> for M } #[rocket::async_trait] -impl<'r, T: Send + TryInto>>> Responder<'r> for MatrixResult where T::Error: Send{ +impl<'r, T: Send + TryInto>>> Responder<'r> for MatrixResult +where + T::Error: Send, +{ async fn respond_to(self, _: &'r Request<'_>) -> response::Result<'r> { let http_response: Result, _> = self.try_into(); match http_response { Ok(http_response) => { let mut response = rocket::response::Response::build(); - response.sized_body(Cursor::new(http_response.body().clone())).await; + response + .sized_body(Cursor::new(http_response.body().clone())) + .await; for header in http_response.headers() { response From ddcd423e6fbbee901473f63e75fce644d9a52c2e Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 6 Apr 2020 22:57:58 +0200 Subject: [PATCH 0025/1727] feat: random tokens, sessions, guest usernames and device ids --- Cargo.lock | 3 ++- Cargo.toml | 1 + src/main.rs | 39 +++++++++++++++++++++++++++------------ src/utils.rs | 8 ++++++++ 4 files changed, 38 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2978456..cd519ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -607,6 +607,7 @@ dependencies = [ "js_int", "log", "pretty_env_logger", + "rand", "rocket", "ruma-api", "ruma-client-api", @@ -1051,7 +1052,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.7.2" -source = "git+https://github.com/ruma/ruma-client-api.git#fe92c2940a2db80509e9a9f162c0f68f3ec3d0a4" +source = "git+https://github.com/ruma/ruma-client-api.git#dc582758e4f846b3751d84d21eb321e8eb4faf51" dependencies = [ "http", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 631e6ee..7fda8ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,3 +23,4 @@ ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } ruma-federation-api = "0.0.1" serde = "1.0.106" tokio = "0.2.16" +rand = "0.7.3" diff --git a/src/main.rs b/src/main.rs index 2d87943..6b97534 100644 --- a/src/main.rs +++ b/src/main.rs @@ -19,7 +19,7 @@ use ruma_client_api::{ membership::join_room_by_id, message::create_message_event, room::create_room, - session::login, + session::{get_login_types, login}, state::{create_state_event_for_empty_key, create_state_event_for_key}, sync::sync_events, }, @@ -31,6 +31,11 @@ use ruma_wrapper::{MatrixResult, Ruma}; use serde_json::json; use std::{collections::HashMap, convert::TryInto, path::PathBuf}; +const DEVICE_ID_LENGTH: usize = 16; +const SESSION_ID_LENGTH: usize = 16; +const TOKEN_LENGTH: usize = 16; +const GUEST_NAME_LENGTH: usize = 16; + #[get("/_matrix/client/versions")] fn get_supported_versions_route() -> MatrixResult { MatrixResult(Ok(get_supported_versions::Response { @@ -47,14 +52,14 @@ fn register_route( if body.auth.is_none() { return MatrixResult(Err(Error { kind: ErrorKind::InvalidUsername, - message: serde_json::to_string(&json!({ + message: json!({ "flows": [ { "stages": [ "m.login.dummy" ] }, ], "params": {}, - "session": "TODO:randomsessionid", - })) - .unwrap(), + "session": utils::random_string(SESSION_ID_LENGTH), + }) + .to_string(), status_code: http::StatusCode::UNAUTHORIZED, })); } @@ -62,7 +67,9 @@ fn register_route( // Validate user id let user_id: UserId = match (*format!( "@{}:{}", - body.username.clone().unwrap_or("randomname".to_owned()), + body.username + .clone() + .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)), data.hostname() )) .try_into() @@ -95,13 +102,13 @@ fn register_route( let device_id = body .device_id .clone() - .unwrap_or_else(|| "TODO:randomdeviceid".to_owned()); + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); // Add device data.device_add(&user_id, &device_id); // Generate new token for the device - let token = "TODO:randomtoken".to_owned(); + let token = utils::random_string(TOKEN_LENGTH); data.token_replace(&user_id, &device_id, token.clone()); MatrixResult(Ok(register::Response { @@ -112,6 +119,13 @@ fn register_route( })) } +#[get("/_matrix/client/r0/login", data = "<_body>")] +fn get_login_route(_body: Ruma) -> MatrixResult { + MatrixResult(Ok(get_login_types::Response { + flows: vec![get_login_types::LoginType::Password], + })) +} + #[post("/_matrix/client/r0/login", data = "")] fn login_route(data: State, body: Ruma) -> MatrixResult { // Validate login method @@ -167,22 +181,22 @@ fn login_route(data: State, body: Ruma) -> MatrixResult u64 { pub fn string_from_bytes(bytes: &[u8]) -> String { String::from_utf8(bytes.to_vec()).expect("bytes are valid utf8") } + +pub fn random_string(length: usize) -> String { + thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(length) + .collect() +} From 215a31c513efc4208be5cc7e3fcff280e88b8cf9 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 7 Apr 2020 13:21:05 +0200 Subject: [PATCH 0026/1727] Add a few dummy endpoints to make riot progress further --- src/main.rs | 68 ++++++++++++++++++++++++++++++++++++++------- src/ruma_wrapper.rs | 7 +++-- 2 files changed, 63 insertions(+), 12 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6b97534..ef4f08a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -16,8 +16,12 @@ use ruma_client_api::{ r0::{ account::register, alias::get_alias, + filter::create_filter, + keys::get_keys, membership::join_room_by_id, message::create_message_event, + presence::set_presence, + push::get_pushrules_all, room::create_room, session::{get_login_types, login}, state::{create_state_event_for_empty_key, create_state_event_for_key}, @@ -31,10 +35,10 @@ use ruma_wrapper::{MatrixResult, Ruma}; use serde_json::json; use std::{collections::HashMap, convert::TryInto, path::PathBuf}; -const DEVICE_ID_LENGTH: usize = 16; -const SESSION_ID_LENGTH: usize = 16; -const TOKEN_LENGTH: usize = 16; -const GUEST_NAME_LENGTH: usize = 16; +const GUEST_NAME_LENGTH: usize = 10; +const DEVICE_ID_LENGTH: usize = 10; +const SESSION_ID_LENGTH: usize = 256; +const TOKEN_LENGTH: usize = 256; #[get("/_matrix/client/versions")] fn get_supported_versions_route() -> MatrixResult { @@ -49,9 +53,10 @@ fn register_route( data: State, body: Ruma, ) -> MatrixResult { + /* if body.auth.is_none() { return MatrixResult(Err(Error { - kind: ErrorKind::InvalidUsername, + kind: ErrorKind::Unknown, message: json!({ "flows": [ { "stages": [ "m.login.dummy" ] }, @@ -62,7 +67,7 @@ fn register_route( .to_string(), status_code: http::StatusCode::UNAUTHORIZED, })); - } + }*/ // Validate user id let user_id: UserId = match (*format!( @@ -120,7 +125,9 @@ fn register_route( } #[get("/_matrix/client/r0/login", data = "<_body>")] -fn get_login_route(_body: Ruma) -> MatrixResult { +fn get_login_route( + _body: Ruma, +) -> MatrixResult { MatrixResult(Ok(get_login_types::Response { flows: vec![get_login_types::LoginType::Password], })) @@ -147,7 +154,7 @@ fn login_route(data: State, body: Ruma) -> MatrixResult, body: Ruma) -> MatrixResult, body: Ruma) -> MatrixResult MatrixResult { + // TODO + MatrixResult(Ok(get_pushrules_all::Response { + global: HashMap::new(), + })) +} + +#[post("/_matrix/client/r0/user/<_user_id>/filter", data = "")] +fn create_filter_route( + body: Ruma, + _user_id: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(create_filter::Response { + filter_id: utils::random_string(10), + })) +} + +#[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] +fn set_presence_route( + body: Ruma, + _user_id: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(set_presence::Response)) +} + +#[post("/_matrix/client/r0/keys/query", data = "")] +fn get_keys_route(body: Ruma) -> MatrixResult { + // TODO + MatrixResult(Ok(get_keys::Response { + failures: HashMap::new(), + device_keys: HashMap::new(), + })) +} + #[post("/_matrix/client/r0/createRoom", data = "")] fn create_room_route( data: State, @@ -404,6 +448,10 @@ fn main() { register_route, get_login_route, login_route, + get_pushrules_all_route, + create_filter_route, + set_presence_route, + get_keys_route, create_room_route, get_alias_route, join_room_by_id_route, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index a44a53c..e73c4da 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -63,7 +63,7 @@ where let token = match request .headers() .get_one("Authorization") - .map(|s| s.to_owned()) + .map(|s| s[7..].to_owned()) // Split off "Bearer " .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) { // TODO: M_MISSING_TOKEN @@ -95,7 +95,7 @@ where let http_request = http_request.body(body.clone()).unwrap(); log::info!("{:?}", http_request); - match T::Incoming::try_from(http_request) { + match T::Incoming::try_from(dbg!(http_request)) { Ok(t) => Success(Ruma { body: t, user_id, @@ -151,6 +151,9 @@ where .sized_body(Cursor::new(http_response.body().clone())) .await; + let status = http_response.status(); + response.raw_status(status.into(), ""); + for header in http_response.headers() { response .raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); From fdeee7fdb58414631464dbd211780167895022e1 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 7 Apr 2020 14:43:24 +0200 Subject: [PATCH 0027/1727] More dummy endpoints --- src/main.rs | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 57 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index ef4f08a..b9bd352 100644 --- a/src/main.rs +++ b/src/main.rs @@ -16,9 +16,9 @@ use ruma_client_api::{ r0::{ account::register, alias::get_alias, - filter::create_filter, + filter::{self, create_filter, get_filter}, keys::get_keys, - membership::join_room_by_id, + membership::{join_room_by_id, join_room_by_id_or_alias}, message::create_message_event, presence::set_presence, push::get_pushrules_all, @@ -30,7 +30,7 @@ use ruma_client_api::{ unversioned::get_supported_versions, }; use ruma_events::EventType; -use ruma_identifiers::{RoomId, UserId}; +use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; use serde_json::json; use std::{collections::HashMap, convert::TryInto, path::PathBuf}; @@ -214,6 +214,27 @@ fn get_pushrules_all_route() -> MatrixResult { })) } +#[get( + "/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>", + data = "" +)] +fn get_filter_route( + body: Ruma, + _user_id: String, + _filter_id: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(get_filter::Response { + filter: filter::FilterDefinition { + event_fields: None, + event_format: None, + account_data: None, + room: None, + presence: None, + }, + })) +} + #[post("/_matrix/client/r0/user/<_user_id>/filter", data = "")] fn create_filter_route( body: Ruma, @@ -305,6 +326,37 @@ fn join_room_by_id_route( })) } +#[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] +fn join_room_by_id_or_alias_route( + data: State, + body: Ruma, + _room_id_or_alias: String, +) -> MatrixResult { + let room_id = match &body.room_id_or_alias { + RoomIdOrAliasId::RoomAliasId(alias) => match alias.alias() { + "#room:localhost" => "!xclkjvdlfj:localhost".try_into().unwrap(), + _ => { + debug!("Room not found."); + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })); + } + }, + + RoomIdOrAliasId::RoomId(id) => id.clone(), + }; + + data.room_join( + &room_id, + body.user_id.as_ref().expect("user is authenticated"), + ); + MatrixResult(Ok(join_room_by_id_or_alias::Response { + room_id: room_id.clone(), + })) +} + #[put( "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", data = "" @@ -449,12 +501,14 @@ fn main() { get_login_route, login_route, get_pushrules_all_route, + get_filter_route, create_filter_route, set_presence_route, get_keys_route, create_room_route, get_alias_route, join_room_by_id_route, + join_room_by_id_or_alias_route, create_message_event_route, create_state_event_for_key_route, create_state_event_for_empty_key_route, From 8557278b9056fa181e0716d87f3d997022b87135 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 8 Apr 2020 15:05:00 +0200 Subject: [PATCH 0028/1727] better riot.im support --- README.md | 11 +++++-- src/data.rs | 58 +++++++++++++++++++++++++++++------ src/main.rs | 74 ++++++++++++++++++++++++++++++--------------- src/ruma_wrapper.rs | 2 +- src/utils.rs | 5 +-- 5 files changed, 110 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index 4e42852..0502e30 100644 --- a/README.md +++ b/README.md @@ -15,5 +15,12 @@ A Matrix Homeserver that's faster than others. - [x] Register, login, authentication tokens - [x] Create room messages - [x] Sync room messages -- [ ] Join rooms, lookup room ids -- [ ] Riot web support \ No newline at end of file +- [x] Join rooms, lookup room ids +- [x] Basic Riot web support +- [ ] Riot room discovery +- [ ] Riot read receipts +- [ ] Riot presence +- [ ] Proper room creation +- [ ] Riot E2EE +- [ ] Basic federation +- [ ] State resolution diff --git a/src/data.rs b/src/data.rs index 56878d7..a9a0ba5 100644 --- a/src/data.rs +++ b/src/data.rs @@ -103,7 +103,11 @@ impl Data { .unwrap(); } - pub fn room_join(&self, room_id: &RoomId, user_id: &UserId) { + pub fn room_join(&self, room_id: &RoomId, user_id: &UserId) -> bool { + if !self.room_exists(room_id) { + return false; + } + self.db.userid_roomids.add( user_id.to_string().as_bytes(), room_id.to_string().as_bytes().into(), @@ -112,6 +116,8 @@ impl Data { room_id.to_string().as_bytes(), user_id.to_string().as_bytes().into(), ); + + true } pub fn rooms_joined(&self, user_id: &UserId) -> Vec { @@ -126,6 +132,24 @@ impl Data { .collect() } + /// Check if a room exists by looking for PDUs in that room. + pub fn room_exists(&self, room_id: &RoomId) -> bool { + // Create the first part of the full pdu id + let mut prefix = vec![b'd']; + prefix.extend_from_slice(room_id.to_string().as_bytes()); + prefix.push(b'#'); // Add delimiter so we don't find rooms starting with the same id + + if let Some((key, _)) = self.db.pduid_pdus.get_gt(&prefix).unwrap() { + if key.starts_with(&prefix) { + true + } else { + false + } + } else { + false + } + } + pub fn pdu_get(&self, event_id: &EventId) -> Option { self.db .eventid_pduid @@ -177,6 +201,7 @@ impl Data { sender: UserId, event_type: EventType, content: serde_json::Value, + unsigned: Option>, state_key: Option, ) -> EventId { // prev_events are the leaves of the current graph. This method removes all leaves from the @@ -210,7 +235,7 @@ impl Data { depth: depth.try_into().unwrap(), auth_events: Vec::new(), redacts: None, - unsigned: Default::default(), // TODO + unsigned: unsigned.unwrap_or_default(), hashes: ruma_federation_api::EventHash { sha256: "aaa".to_owned(), }, @@ -263,29 +288,42 @@ impl Data { /// Returns a vector of all PDUs in a room. pub fn pdus_all(&self, room_id: &RoomId) -> Vec { - self.pdus_since(room_id, "".to_owned()) + self.pdus_since(room_id, 0) + } + + pub fn last_pdu_index(&self) -> u64 { + let count_key: Vec = vec![b'n']; + utils::u64_from_bytes( + &self + .db + .pduid_pdus + .get(&count_key) + .unwrap() + .unwrap_or_else(|| (&0_u64.to_be_bytes()).into()), + ) } /// Returns a vector of all events in a room that happened after the event with id `since`. - pub fn pdus_since(&self, room_id: &RoomId, since: String) -> Vec { + pub fn pdus_since(&self, room_id: &RoomId, since: u64) -> Vec { let mut pdus = Vec::new(); // Create the first part of the full pdu id - let mut pdu_id = vec![b'd']; - pdu_id.extend_from_slice(room_id.to_string().as_bytes()); - pdu_id.push(b'#'); // Add delimiter so we don't find rooms starting with the same id + let mut prefix = vec![b'd']; + prefix.extend_from_slice(room_id.to_string().as_bytes()); + prefix.push(b'#'); // Add delimiter so we don't find rooms starting with the same id - let mut current = pdu_id.clone(); - current.extend_from_slice(since.as_bytes()); + let mut current = prefix.clone(); + current.extend_from_slice(&since.to_be_bytes()); while let Some((key, value)) = self.db.pduid_pdus.get_gt(¤t).unwrap() { - if key.starts_with(&pdu_id) { + if key.starts_with(&prefix) { current = key.to_vec(); pdus.push(serde_json::from_slice(&value).expect("pdu in db is valid")); } else { break; } } + pdus } diff --git a/src/main.rs b/src/main.rs index b9bd352..a2cfc10 100644 --- a/src/main.rs +++ b/src/main.rs @@ -33,7 +33,7 @@ use ruma_events::EventType; use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; use serde_json::json; -use std::{collections::HashMap, convert::TryInto, path::PathBuf}; +use std::{collections::HashMap, convert::TryInto, path::PathBuf, time::Duration}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -67,7 +67,8 @@ fn register_route( .to_string(), status_code: http::StatusCode::UNAUTHORIZED, })); - }*/ + } + */ // Validate user id let user_id: UserId = match (*format!( @@ -272,17 +273,18 @@ fn create_room_route( // TODO: check if room is unique let room_id = RoomId::new(data.hostname()).expect("host is valid"); - data.room_join( - &room_id, - body.user_id.as_ref().expect("user is authenticated"), - ); - data.pdu_append( room_id.clone(), body.user_id.clone().expect("user is authenticated"), EventType::RoomMessage, json!({"msgtype": "m.text", "body": "Hello"}), None, + None, + ); + + data.room_join( + &room_id, + body.user_id.as_ref().expect("user is authenticated"), ); MatrixResult(Ok(create_room::Response { room_id })) @@ -317,13 +319,20 @@ fn join_room_by_id_route( body: Ruma, _room_id: String, ) -> MatrixResult { - data.room_join( + if data.room_join( &body.room_id, body.user_id.as_ref().expect("user is authenticated"), - ); - MatrixResult(Ok(join_room_by_id::Response { - room_id: body.room_id.clone(), - })) + ) { + MatrixResult(Ok(join_room_by_id::Response { + room_id: body.room_id.clone(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } } #[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] @@ -348,13 +357,18 @@ fn join_room_by_id_or_alias_route( RoomIdOrAliasId::RoomId(id) => id.clone(), }; - data.room_join( + if data.room_join( &room_id, body.user_id.as_ref().expect("user is authenticated"), - ); - MatrixResult(Ok(join_room_by_id_or_alias::Response { - room_id: room_id.clone(), - })) + ) { + MatrixResult(Ok(join_room_by_id_or_alias::Response { room_id })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } } #[put( @@ -368,11 +382,15 @@ fn create_message_event_route( _txn_id: String, body: Ruma, ) -> MatrixResult { + let mut unsigned = serde_json::Map::new(); + unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); + let event_id = data.pdu_append( body.room_id.clone(), body.user_id.clone().expect("user is authenticated"), body.event_type.clone(), - body.json_body, + body.json_body.clone(), + Some(unsigned), None, ); MatrixResult(Ok(create_message_event::Response { event_id })) @@ -395,6 +413,7 @@ fn create_state_event_for_key_route( body.user_id.clone().expect("user is authenticated"), body.event_type.clone(), body.json_body.clone(), + None, Some(body.state_key.clone()), ); MatrixResult(Ok(create_state_event_for_key::Response { event_id })) @@ -416,6 +435,7 @@ fn create_state_event_for_empty_key_route( body.user_id.clone().expect("user is authenticated"), body.event_type.clone(), body.json_body, + None, Some("".to_owned()), ); MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) @@ -426,17 +446,21 @@ fn sync_route( data: State, body: Ruma, ) -> MatrixResult { + std::thread::sleep(Duration::from_millis(200)); + let next_batch = data.last_pdu_index().to_string(); + let mut joined_rooms = HashMap::new(); let joined_roomids = data.rooms_joined(body.user_id.as_ref().expect("user is authenticated")); for room_id in joined_roomids { - let room_events = data - .pdus_all(&room_id) - .into_iter() - .map(|pdu| pdu.to_room_event()) - .collect(); + let pdus = if let Some(since) = body.since.clone().and_then(|string| string.parse().ok()) { + data.pdus_since(&room_id, since) + } else { + data.pdus_all(&room_id) + }; + let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); joined_rooms.insert( - "!roomid:localhost".try_into().unwrap(), + room_id.try_into().unwrap(), sync_events::JoinedRoom { account_data: sync_events::AccountData { events: Vec::new() }, summary: sync_events::RoomSummary { @@ -460,7 +484,7 @@ fn sync_route( } MatrixResult(Ok(sync_events::Response { - next_batch: String::new(), + next_batch, rooms: sync_events::Rooms { leave: Default::default(), join: joined_rooms, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index e73c4da..0bdcfae 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -95,7 +95,7 @@ where let http_request = http_request.body(body.clone()).unwrap(); log::info!("{:?}", http_request); - match T::Incoming::try_from(dbg!(http_request)) { + match T::Incoming::try_from(http_request) { Ok(t) => Success(Ruma { body: t, user_id, diff --git a/src/utils.rs b/src/utils.rs index b32b0f6..e08e09f 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -8,8 +8,9 @@ pub fn millis_since_unix_epoch() -> js_int::UInt { (SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() - .as_millis() as u32) - .into() + .as_millis() as u64) + .try_into() + .expect("time millis are <= MAX_SAFE_UINT") } pub fn increment(old: Option<&[u8]>) -> Option> { From d0920f0fa3afec3fcc57b57da215ed08bdc1fd82 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 8 Apr 2020 22:28:03 +0200 Subject: [PATCH 0029/1727] Add license --- .gitea/PULL_REQUEST_TEMPLATE.md | 1 + LICENSE | 661 ++++++++++++++++++++++++++++++++ 2 files changed, 662 insertions(+) create mode 100644 .gitea/PULL_REQUEST_TEMPLATE.md create mode 100644 LICENSE diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..6c87aec --- /dev/null +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1 @@ +[ ] I agree to release my code and all other changes of this PR under the AGPL-3.0 license diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..be3f7b2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. From c60402bf0d86e851c2d0768ff32ac1fda6361075 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 8 Apr 2020 23:13:30 +0200 Subject: [PATCH 0030/1727] Update ruma-client-api --- Cargo.lock | 2 +- src/main.rs | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd519ba..12c6626 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1052,7 +1052,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.7.2" -source = "git+https://github.com/ruma/ruma-client-api.git#dc582758e4f846b3751d84d21eb321e8eb4faf51" +source = "git+https://github.com/ruma/ruma-client-api.git#aeb4e237b7f13a068a92929fdb5c5adac4f346e1" dependencies = [ "http", "js_int", diff --git a/src/main.rs b/src/main.rs index a2cfc10..2102c1d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -118,10 +118,9 @@ fn register_route( data.token_replace(&user_id, &device_id, token.clone()); MatrixResult(Ok(register::Response { - access_token: token, - home_server: data.hostname().to_owned(), + access_token: Some(token), user_id, - device_id, + device_id: Some(device_id), })) } From 38ab7c843eb50bf9d8878d51a4fa8f091c2b5d32 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 8 Apr 2020 23:25:19 +0200 Subject: [PATCH 0031/1727] Update error type of /register route --- Cargo.lock | 2 +- Cargo.toml | 2 +- src/main.rs | 55 ++++++++++++++++++++++++--------------------- src/ruma_wrapper.rs | 13 +++++++---- 4 files changed, 40 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 12c6626..f39a719 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1052,7 +1052,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.7.2" -source = "git+https://github.com/ruma/ruma-client-api.git#aeb4e237b7f13a068a92929fdb5c5adac4f346e1" +source = "git+https://github.com/ruma/ruma-client-api.git?branch=uiaa-error-type#a7136c06285864dadcc0b0c6371d181002727c55" dependencies = [ "http", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 7fda8ee..2f9d5ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" -ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } +ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git", branch = "uiaa-error-type" } pretty_env_logger = "0.4.0" log = "0.4.8" sled = "0.31.0" diff --git a/src/main.rs b/src/main.rs index 2102c1d..09c8aac 100644 --- a/src/main.rs +++ b/src/main.rs @@ -14,7 +14,10 @@ use rocket::{get, options, post, put, routes, State}; use ruma_client_api::{ error::{Error, ErrorKind}, r0::{ - account::register, + account::{ + register, AuthenticationFlow, UserInteractiveAuthenticationInfo, + UserInteractiveAuthenticationResponse, + }, alias::get_alias, filter::{self, create_filter, get_filter}, keys::get_keys, @@ -52,23 +55,19 @@ fn get_supported_versions_route() -> MatrixResult, body: Ruma, -) -> MatrixResult { - /* +) -> MatrixResult { if body.auth.is_none() { - return MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: json!({ - "flows": [ - { "stages": [ "m.login.dummy" ] }, - ], - "params": {}, - "session": utils::random_string(SESSION_ID_LENGTH), - }) - .to_string(), - status_code: http::StatusCode::UNAUTHORIZED, - })); + return MatrixResult(Err(UserInteractiveAuthenticationResponse::AuthResponse( + UserInteractiveAuthenticationInfo { + flows: vec![AuthenticationFlow { + stages: vec!["m.login.dummy".to_owned()], + }], + completed: vec![], + params: json!({}), + session: Some(utils::random_string(SESSION_ID_LENGTH)), + }, + ))); } - */ // Validate user id let user_id: UserId = match (*format!( @@ -82,11 +81,13 @@ fn register_route( { Err(_) => { debug!("Username invalid"); - return MatrixResult(Err(Error { - kind: ErrorKind::InvalidUsername, - message: "Username was invalid.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); + return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( + Error { + kind: ErrorKind::InvalidUsername, + message: "Username was invalid.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + }, + ))); } Ok(user_id) => user_id, }; @@ -94,11 +95,13 @@ fn register_route( // Check if username is creative enough if data.user_exists(&user_id) { debug!("ID already taken"); - return MatrixResult(Err(Error { - kind: ErrorKind::UserInUse, - message: "Desired user ID is already taken.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); + return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( + Error { + kind: ErrorKind::UserInUse, + message: "Desired user ID is already taken.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + }, + ))); } // Create user diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 0bdcfae..f39ef75 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -9,7 +9,6 @@ use ruma_api::{ error::{FromHttpRequestError, FromHttpResponseError}, Endpoint, Outgoing, }; -use ruma_client_api::error::Error; use ruma_identifiers::UserId; use std::{ convert::{TryFrom, TryInto}, @@ -124,9 +123,13 @@ impl Deref for Ruma { } /// This struct converts ruma responses into rocket http responses. -pub struct MatrixResult(pub std::result::Result); +pub struct MatrixResult(pub std::result::Result); -impl>>> TryInto>> for MatrixResult { +impl TryInto>> for MatrixResult +where + T: TryInto>>, + E: Into>>, +{ type Error = T::Error; fn try_into(self) -> Result>, T::Error> { @@ -138,9 +141,11 @@ impl>>> TryInto>> for M } #[rocket::async_trait] -impl<'r, T: Send + TryInto>>> Responder<'r> for MatrixResult +impl<'r, T, E> Responder<'r> for MatrixResult where + T: Send + TryInto>>, T::Error: Send, + E: Into>> + Send, { async fn respond_to(self, _: &'r Request<'_>) -> response::Result<'r> { let http_response: Result, _> = self.try_into(); From a1b2b4e4fe309abac60feb0bf6fbf20260d821ed Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 8 Apr 2020 22:28:03 +0200 Subject: [PATCH 0032/1727] Add liberapay, change pr template --- .gitea/PULL_REQUEST_TEMPLATE.md | 2 +- README.md | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md index 6c87aec..7b15cba 100644 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -1 +1 @@ -[ ] I agree to release my code and all other changes of this PR under the AGPL-3.0 license +- [ ] I agree to release my code and all other changes of this PR under the AGPL-3.0 license diff --git a/README.md b/README.md index 0502e30..d4d6bdf 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # Matrix Homeserver in Rust +![Liberapay](http://img.shields.io/liberapay/receives/timokoesters.svg?logo=liberapay) + #### Goals A Matrix Homeserver that's faster than others. @@ -24,3 +26,7 @@ A Matrix Homeserver that's faster than others. - [ ] Riot E2EE - [ ] Basic federation - [ ] State resolution + +#### Donate + +Liberapay: From 9214a62dde288e157d717fa787a8bf1c2a4962e7 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 9 Apr 2020 11:00:58 +0200 Subject: [PATCH 0033/1727] better room creation --- src/main.rs | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/src/main.rs b/src/main.rs index 09c8aac..6beb5ab 100644 --- a/src/main.rs +++ b/src/main.rs @@ -274,20 +274,34 @@ fn create_room_route( ) -> MatrixResult { // TODO: check if room is unique let room_id = RoomId::new(data.hostname()).expect("host is valid"); + let user_id = body.user_id.clone().expect("user is authenticated"); data.pdu_append( room_id.clone(), - body.user_id.clone().expect("user is authenticated"), - EventType::RoomMessage, - json!({"msgtype": "m.text", "body": "Hello"}), + user_id.clone(), + EventType::RoomCreate, + json!({ "creator": user_id }), None, + Some("".to_owned()), + ); + data.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomName, + json!({"name": body.name}), None, + Some("".to_owned()), + ); + data.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomTopic, + json!({"topic": body.topic}), + None, + Some("".to_owned()), ); - data.room_join( - &room_id, - body.user_id.as_ref().expect("user is authenticated"), - ); + data.room_join(&room_id, &user_id); MatrixResult(Ok(create_room::Response { room_id })) } From 5ac45eac599b00afbf51eee1d79687835278463c Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 9 Apr 2020 12:22:39 +0200 Subject: [PATCH 0034/1727] public rooms dir --- src/data.rs | 28 ++++++++++++++++++++++++++++ src/main.rs | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/src/data.rs b/src/data.rs index a9a0ba5..84a93df 100644 --- a/src/data.rs +++ b/src/data.rs @@ -150,6 +150,34 @@ impl Data { } } + pub fn rooms_all(&self) -> Vec { + let mut room_ids = self + .db + .roomid_pduleaves + .iter_all() + .keys() + .map(|key| { + serde_json::from_slice( + &key.unwrap() + .iter() + .copied() + .take_while(|&x| x != 0xff) + .collect::>(), + ) + .unwrap() + }) + .collect::>(); + room_ids.dedup(); + room_ids + } + + pub fn room_users(&self, room_id: &RoomId) -> u32 { + self.db + .roomid_userids + .get_iter(room_id.to_string().as_bytes()) + .count() as u32 + } + pub fn pdu_get(&self, event_id: &EventId) -> Option { self.db .eventid_pduid diff --git a/src/main.rs b/src/main.rs index 6beb5ab..a0eb137 100644 --- a/src/main.rs +++ b/src/main.rs @@ -19,6 +19,7 @@ use ruma_client_api::{ UserInteractiveAuthenticationResponse, }, alias::get_alias, + directory::{self, get_public_rooms}, filter::{self, create_filter, get_filter}, keys::get_keys, membership::{join_room_by_id, join_room_by_id_or_alias}, @@ -387,6 +388,37 @@ fn join_room_by_id_or_alias_route( } } +#[get("/_matrix/client/r0/publicRooms", data = "")] +fn get_public_rooms_route( + data: State, + body: Ruma, +) -> MatrixResult { + let chunk = data + .rooms_all() + .into_iter() + .map(|room_id| directory::PublicRoomsChunk { + aliases: None, + canonical_alias: None, + name: None, + num_joined_members: data.room_users(&room_id).into(), + room_id, + topic: None, + world_readable: false, + guest_can_join: true, + avatar_url: None, + }) + .collect::>(); + + let total_room_count_estimate = (chunk.len() as u32).into(); + + MatrixResult(Ok(get_public_rooms::Response { + chunk: chunk, + prev_batch: None, + next_batch: None, + total_room_count_estimate: Some(total_room_count_estimate), + })) +} + #[put( "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", data = "" @@ -549,6 +581,7 @@ fn main() { get_alias_route, join_room_by_id_route, join_room_by_id_or_alias_route, + get_public_rooms_route, create_message_event_route, create_state_event_for_key_route, create_state_event_for_empty_key_route, From 11e75e708136a54e90f3d2ca67488f19cdf89f35 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 9 Apr 2020 14:39:50 +0200 Subject: [PATCH 0035/1727] make room directory work --- src/data.rs | 7 ++-- src/main.rs | 113 ++++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 91 insertions(+), 29 deletions(-) diff --git a/src/data.rs b/src/data.rs index 84a93df..dffb1e0 100644 --- a/src/data.rs +++ b/src/data.rs @@ -157,13 +157,14 @@ impl Data { .iter_all() .keys() .map(|key| { - serde_json::from_slice( + RoomId::try_from(&*utils::string_from_bytes( &key.unwrap() .iter() + .skip(1) // skip "d" .copied() - .take_while(|&x| x != 0xff) + .take_while(|&x| x != 0xff) // until delimiter .collect::>(), - ) + )) .unwrap() }) .collect::>(); diff --git a/src/main.rs b/src/main.rs index a0eb137..92cb6d1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -19,9 +19,10 @@ use ruma_client_api::{ UserInteractiveAuthenticationResponse, }, alias::get_alias, - directory::{self, get_public_rooms}, + config::{get_global_account_data, set_global_account_data}, + directory::{self, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, - keys::get_keys, + keys::{get_keys, upload_keys}, membership::{join_room_by_id, join_room_by_id_or_alias}, message::create_message_event, presence::set_presence, @@ -30,10 +31,11 @@ use ruma_client_api::{ session::{get_login_types, login}, state::{create_state_event_for_empty_key, create_state_event_for_key}, sync::sync_events, + thirdparty::get_protocols, }, unversioned::get_supported_versions, }; -use ruma_events::EventType; +use ruma_events::{collections::only::Event, EventType}; use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; use serde_json::json; @@ -250,6 +252,36 @@ fn create_filter_route( })) } +#[put( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" +)] +fn set_global_account_data_route( + body: Ruma, + _user_id: String, + _type: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(set_global_account_data::Response)) +} + +#[get( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" +)] +fn get_global_account_data_route( + body: Ruma, + _user_id: String, + _type: String, +) -> MatrixResult { + // TODO + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Data not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) +} + #[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] fn set_presence_route( body: Ruma, @@ -268,6 +300,16 @@ fn get_keys_route(body: Ruma) -> MatrixResult, + body: Ruma, +) -> MatrixResult { + MatrixResult(Ok(upload_keys::Response { + one_time_key_counts: HashMap::new(), + })) +} + #[post("/_matrix/client/r0/createRoom", data = "")] fn create_room_route( data: State, @@ -285,22 +327,28 @@ fn create_room_route( None, Some("".to_owned()), ); - data.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomName, - json!({"name": body.name}), - None, - Some("".to_owned()), - ); - data.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomTopic, - json!({"topic": body.topic}), - None, - Some("".to_owned()), - ); + + if let Some(name) = &body.name { + data.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomName, + json!({ "name": name }), + None, + Some("".to_owned()), + ); + } + + if let Some(topic) = &body.topic { + data.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomTopic, + json!({ "topic": topic }), + None, + Some("".to_owned()), + ); + } data.room_join(&room_id, &user_id); @@ -388,11 +436,11 @@ fn join_room_by_id_or_alias_route( } } -#[get("/_matrix/client/r0/publicRooms", data = "")] -fn get_public_rooms_route( +#[post("/_matrix/client/r0/publicRooms", data = "")] +fn get_public_rooms_filtered_route( data: State, - body: Ruma, -) -> MatrixResult { + body: Ruma, +) -> MatrixResult { let chunk = data .rooms_all() .into_iter() @@ -411,14 +459,23 @@ fn get_public_rooms_route( let total_room_count_estimate = (chunk.len() as u32).into(); - MatrixResult(Ok(get_public_rooms::Response { - chunk: chunk, + MatrixResult(Ok(get_public_rooms_filtered::Response { + chunk, prev_batch: None, next_batch: None, total_room_count_estimate: Some(total_room_count_estimate), })) } +#[get("/_matrix/client/r0/thirdparty/protocols", data = "")] +fn get_protocols_route( + body: Ruma, +) -> MatrixResult { + MatrixResult(Ok(dbg!(get_protocols::Response { + protocols: HashMap::new(), + }))) +} + #[put( "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", data = "" @@ -575,13 +632,17 @@ fn main() { get_pushrules_all_route, get_filter_route, create_filter_route, + set_global_account_data_route, + get_global_account_data_route, set_presence_route, get_keys_route, + upload_keys_route, create_room_route, get_alias_route, join_room_by_id_route, join_room_by_id_or_alias_route, - get_public_rooms_route, + get_public_rooms_filtered_route, + get_protocols_route, create_message_event_route, create_state_event_for_key_route, create_state_event_for_empty_key_route, From b12ae15a45336f4cf9232956cab6623590c68da8 Mon Sep 17 00:00:00 2001 From: timo Date: Thu, 9 Apr 2020 17:39:45 +0200 Subject: [PATCH 0036/1727] Update 'README.md' --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d4d6bdf..cffeb5e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ -# Matrix Homeserver in Rust +# Conduit +### A Matrix homeserver written in Rust ![Liberapay](http://img.shields.io/liberapay/receives/timokoesters.svg?logo=liberapay) From 062c5521f026ac98d899ded4137297476dad8988 Mon Sep 17 00:00:00 2001 From: Marcel Date: Thu, 9 Apr 2020 18:49:27 +0200 Subject: [PATCH 0037/1727] Add displayname and avatar_url endpoints Add PUT and GET /_matrix/client/r0/profile/{userId}/displayname Endpoint Add PUT and GET /_matrix/client/r0/profile/{userId}/avatar_url Endpoint Add GET /_matrix/client/r0/profile/{userId} Endpoint Took 2 hours 16 minutes --- src/data.rs | 34 ++++++++++++ src/database.rs | 20 +++++++ src/main.rs | 136 +++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 189 insertions(+), 1 deletion(-) diff --git a/src/data.rs b/src/data.rs index dffb1e0..3ec3b44 100644 --- a/src/data.rs +++ b/src/data.rs @@ -60,6 +60,40 @@ impl Data { .map(|bytes| utils::string_from_bytes(&bytes)) } + /// Set a new displayname. + pub fn displayname_set(&self, user_id: &UserId, displayname: Option) { + self.db + .profile_displayname + .insert(user_id.to_string(), &*displayname.unwrap_or_default()) + .unwrap(); + } + + /// Get a the displayname of a user. + pub fn displayname_get(&self, user_id: &UserId) -> Option { + self.db + .profile_displayname + .get(user_id.to_string()) + .unwrap() + .map(|bytes| utils::string_from_bytes(&bytes)) + } + + /// Set a new avatar_url. + pub fn avatar_url_set(&self, user_id: &UserId, avatar_url: String) { + self.db + .profile_avatar_url + .insert(user_id.to_string(), &*avatar_url) + .unwrap(); + } + + /// Get a the avatar_url of a user. + pub fn avatar_url_get(&self, user_id: &UserId) -> Option { + self.db + .profile_avatar_url + .get(user_id.to_string()) + .unwrap() + .map(|bytes| utils::string_from_bytes(&bytes)) + } + /// Add a new device to a user. pub fn device_add(&self, user_id: &UserId, device_id: &str) { if self diff --git a/src/database.rs b/src/database.rs index dee2e94..a6ddd55 100644 --- a/src/database.rs +++ b/src/database.rs @@ -52,6 +52,8 @@ impl MultiValue { pub struct Database { pub userid_password: sled::Tree, pub userid_deviceids: MultiValue, + pub profile_displayname: sled::Tree, + pub profile_avatar_url: sled::Tree, pub deviceid_token: sled::Tree, pub token_userid: sled::Tree, pub pduid_pdus: sled::Tree, @@ -75,6 +77,8 @@ impl Database { Self { userid_password: db.open_tree("userid_password").unwrap(), userid_deviceids: MultiValue(db.open_tree("userid_deviceids").unwrap()), + profile_displayname: db.open_tree("profile_displayname").unwrap(), + profile_avatar_url: db.open_tree("profile_avatar_url").unwrap(), deviceid_token: db.open_tree("deviceid_token").unwrap(), token_userid: db.open_tree("token_userid").unwrap(), pduid_pdus: db.open_tree("pduid_pdus").unwrap(), @@ -103,6 +107,22 @@ impl Database { String::from_utf8_lossy(&v), ); } + println!("# AccountData -> Displayname:"); + for (k, v) in self.profile_displayname.iter().map(|r| r.unwrap()) { + println!( + "{:?} -> {:?}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("# AccountData -> AvatarURL:"); + for (k, v) in self.profile_avatar_url.iter().map(|r| r.unwrap()) { + println!( + "{:?} -> {:?}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } println!("\n# DeviceId -> Token:"); for (k, v) in self.deviceid_token.iter().map(|r| r.unwrap()) { println!( diff --git a/src/main.rs b/src/main.rs index 92cb6d1..41c7e6c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -26,6 +26,9 @@ use ruma_client_api::{ membership::{join_room_by_id, join_room_by_id_or_alias}, message::create_message_event, presence::set_presence, + profile::{ + get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, + }, push::get_pushrules_all, room::create_room, session::{get_login_types, login}, @@ -39,7 +42,12 @@ use ruma_events::{collections::only::Event, EventType}; use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; use ruma_wrapper::{MatrixResult, Ruma}; use serde_json::json; -use std::{collections::HashMap, convert::TryInto, path::PathBuf, time::Duration}; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + path::PathBuf, + time::Duration, +}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -282,6 +290,127 @@ fn get_global_account_data_route( })) } +#[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] +fn set_displayname_route( + data: State, + body: Ruma, + _user_id: String, +) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + if body.displayname.is_none() { + debug!("Request was missing the displayname payload."); + return MatrixResult(Err(Error { + kind: ErrorKind::MissingParam, + message: "Missing displayname".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + + data.displayname_set(&user_id, body.displayname.clone()); + // TODO send a new m.room.member join event with the updated displayname + // TODO send a new m.presence event with the updated displayname + + MatrixResult(Ok(set_display_name::Response)) +} + +#[get( + "/_matrix/client/r0/profile//displayname", + data = "" +)] +fn get_displayname_route( + data: State, + body: Ruma, + user_id_raw: String, +) -> MatrixResult { + let user_id = (*body).user_id.clone(); + if let Some(displayname) = data.displayname_get(&user_id) { + return MatrixResult(Ok(get_display_name::Response { + displayname: Some(displayname), + })); + } + + // Return 404 if we don't have any + debug!("Profile was not found."); + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Profile was not found".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) +} +#[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] +fn set_avatar_url_route( + data: State, + body: Ruma, + _user_id: String, +) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + if body.avatar_url == "" { + debug!("Request was missing the avatar_url payload."); + return MatrixResult(Err(Error { + kind: ErrorKind::MissingParam, + message: "Missing avatar_url".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + + // TODO in the future when we can handle media uploads make sure that this url is our own server + // TODO also make sure this is mxc:// format + + data.avatar_url_set(&user_id, body.avatar_url.clone()); + // TODO send a new m.room.member join event with the updated avatar_url + // TODO send a new m.presence event with the updated avatar_url + + MatrixResult(Ok(set_avatar_url::Response)) +} + +#[get("/_matrix/client/r0/profile//avatar_url", data = "")] +fn get_avatar_url_route( + data: State, + body: Ruma, + user_id_raw: String, +) -> MatrixResult { + let user_id = (*body).user_id.clone(); + if let Some(avatar_url) = data.avatar_url_get(&user_id) { + return MatrixResult(Ok(get_avatar_url::Response { + avatar_url: Some(avatar_url), + })); + } + + // Return 404 if we don't have a profile for this id + debug!("Profile was not found."); + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Profile was not found".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) +} + +#[get("/_matrix/client/r0/profile/", data = "")] +fn get_profile_route( + data: State, + body: Ruma, + user_id_raw: String, +) -> MatrixResult { + let user_id = (*body).user_id.clone(); + let avatar_url = data.avatar_url_get(&user_id); + let displayname = data.displayname_get(&user_id); + + if avatar_url.is_some() && displayname.is_some() { + return MatrixResult(Ok(get_profile::Response { + avatar_url, + displayname, + })); + } + + // Return 404 if we don't have a profile for this id + debug!("Profile was not found."); + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Profile was not found".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) +} + #[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] fn set_presence_route( body: Ruma, @@ -634,6 +763,11 @@ fn main() { create_filter_route, set_global_account_data_route, get_global_account_data_route, + set_displayname_route, + get_displayname_route, + set_avatar_url_route, + get_avatar_url_route, + get_profile_route, set_presence_route, get_keys_route, upload_keys_route, From cd55220a756762abcfa8dc24eccc1833aaf36524 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 9 Apr 2020 19:38:15 +0200 Subject: [PATCH 0038/1727] fix: room directory duplicates --- src/database.rs | 2 +- src/main.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/database.rs b/src/database.rs index dee2e94..0fe8e52 100644 --- a/src/database.rs +++ b/src/database.rs @@ -7,7 +7,7 @@ pub struct MultiValue(sled::Tree); impl MultiValue { /// Get an iterator over all values. pub fn iter_all(&self) -> sled::Iter { - self.0.iter() + self.0.scan_prefix(b"d") } /// Get an iterator over all values of this id. diff --git a/src/main.rs b/src/main.rs index 92cb6d1..519d32b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -471,9 +471,9 @@ fn get_public_rooms_filtered_route( fn get_protocols_route( body: Ruma, ) -> MatrixResult { - MatrixResult(Ok(dbg!(get_protocols::Response { + MatrixResult(Ok(get_protocols::Response { protocols: HashMap::new(), - }))) + })) } #[put( @@ -618,7 +618,7 @@ fn main() { } pretty_env_logger::init(); - let data = Data::load_or_create("localhost"); + let data = Data::load_or_create("matrixtesting.koesters.xyz"); data.debug(); rocket::ignite() From 9f3368daa457cdfbcf056e4dad21e05cf8d30ffe Mon Sep 17 00:00:00 2001 From: timo Date: Thu, 9 Apr 2020 19:51:52 +0200 Subject: [PATCH 0039/1727] Update 'README.md' --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cffeb5e..89a0ea3 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # Conduit ### A Matrix homeserver written in Rust -![Liberapay](http://img.shields.io/liberapay/receives/timokoesters.svg?logo=liberapay) +[![Liberapay](http://img.shields.io/liberapay/receives/timokoesters.svg?logo=liberapay)](https://liberapay.com/timokoesters) #### Goals From b1284fd50966058f388fa672be79ba0412491eee Mon Sep 17 00:00:00 2001 From: Marcel Date: Thu, 9 Apr 2020 20:47:03 +0200 Subject: [PATCH 0040/1727] Replace profile_* with userid_* Add a missing dot in the errormessage Require mxc:// to be present at the start of an avatar_url Update mxc check TODO Show displayname or avatar_url if either is available when getting the profile Endpoint Return the correct data in case of a empty displayname or an empty avatar_url Took 50 minutes Took 34 seconds --- src/data.rs | 18 ++++++-- src/database.rs | 12 ++--- src/main.rs | 113 ++++++++++++++++++++++++++++++------------------ 3 files changed, 91 insertions(+), 52 deletions(-) diff --git a/src/data.rs b/src/data.rs index 3ec3b44..ef30e27 100644 --- a/src/data.rs +++ b/src/data.rs @@ -60,10 +60,15 @@ impl Data { .map(|bytes| utils::string_from_bytes(&bytes)) } + /// Removes a displayname. + pub fn displayname_remove(&self, user_id: &UserId) { + self.db.userid_displayname.remove(user_id).unwrap(); + } + /// Set a new displayname. pub fn displayname_set(&self, user_id: &UserId, displayname: Option) { self.db - .profile_displayname + .userid_displayname .insert(user_id.to_string(), &*displayname.unwrap_or_default()) .unwrap(); } @@ -71,16 +76,21 @@ impl Data { /// Get a the displayname of a user. pub fn displayname_get(&self, user_id: &UserId) -> Option { self.db - .profile_displayname + .userid_displayname .get(user_id.to_string()) .unwrap() .map(|bytes| utils::string_from_bytes(&bytes)) } + /// Removes a avatar_url. + pub fn avatar_url_remove(&self, user_id: &UserId) { + self.db.userid_avatar_url.remove(user_id).unwrap(); + } + /// Set a new avatar_url. pub fn avatar_url_set(&self, user_id: &UserId, avatar_url: String) { self.db - .profile_avatar_url + .userid_avatar_url .insert(user_id.to_string(), &*avatar_url) .unwrap(); } @@ -88,7 +98,7 @@ impl Data { /// Get a the avatar_url of a user. pub fn avatar_url_get(&self, user_id: &UserId) -> Option { self.db - .profile_avatar_url + .userid_avatar_url .get(user_id.to_string()) .unwrap() .map(|bytes| utils::string_from_bytes(&bytes)) diff --git a/src/database.rs b/src/database.rs index a6ddd55..bdfab5b 100644 --- a/src/database.rs +++ b/src/database.rs @@ -52,8 +52,8 @@ impl MultiValue { pub struct Database { pub userid_password: sled::Tree, pub userid_deviceids: MultiValue, - pub profile_displayname: sled::Tree, - pub profile_avatar_url: sled::Tree, + pub userid_displayname: sled::Tree, + pub userid_avatar_url: sled::Tree, pub deviceid_token: sled::Tree, pub token_userid: sled::Tree, pub pduid_pdus: sled::Tree, @@ -77,8 +77,8 @@ impl Database { Self { userid_password: db.open_tree("userid_password").unwrap(), userid_deviceids: MultiValue(db.open_tree("userid_deviceids").unwrap()), - profile_displayname: db.open_tree("profile_displayname").unwrap(), - profile_avatar_url: db.open_tree("profile_avatar_url").unwrap(), + userid_displayname: db.open_tree("userid_displayname").unwrap(), + userid_avatar_url: db.open_tree("userid_avatar_url").unwrap(), deviceid_token: db.open_tree("deviceid_token").unwrap(), token_userid: db.open_tree("token_userid").unwrap(), pduid_pdus: db.open_tree("pduid_pdus").unwrap(), @@ -108,7 +108,7 @@ impl Database { ); } println!("# AccountData -> Displayname:"); - for (k, v) in self.profile_displayname.iter().map(|r| r.unwrap()) { + for (k, v) in self.userid_displayname.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", String::from_utf8_lossy(&k), @@ -116,7 +116,7 @@ impl Database { ); } println!("# AccountData -> AvatarURL:"); - for (k, v) in self.profile_avatar_url.iter().map(|r| r.unwrap()) { + for (k, v) in self.userid_avatar_url.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", String::from_utf8_lossy(&k), diff --git a/src/main.rs b/src/main.rs index 41c7e6c..3bc1e4b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,5 @@ #![feature(proc_macro_hygiene, decl_macro)] + mod data; mod database; mod pdu; @@ -88,7 +89,7 @@ fn register_route( .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)), data.hostname() )) - .try_into() + .try_into() { Err(_) => { debug!("Username invalid"); @@ -152,7 +153,7 @@ fn login_route(data: State, body: Ruma) -> MatrixResult MatrixResult { } #[get( - "/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>", - data = "" +"/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>", +data = "" )] fn get_filter_route( body: Ruma, @@ -261,8 +262,8 @@ fn create_filter_route( } #[put( - "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", - data = "" +"/_matrix/client/r0/user/<_user_id>/account_data/<_type>", +data = "" )] fn set_global_account_data_route( body: Ruma, @@ -274,8 +275,8 @@ fn set_global_account_data_route( } #[get( - "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", - data = "" +"/_matrix/client/r0/user/<_user_id>/account_data/<_type>", +data = "" )] fn get_global_account_data_route( body: Ruma, @@ -297,25 +298,35 @@ fn set_displayname_route( _user_id: String, ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); + + // Send error on None and accept Some("") as valid username + // Synapse returns a parsing error but the spec doesn't require this if body.displayname.is_none() { debug!("Request was missing the displayname payload."); return MatrixResult(Err(Error { kind: ErrorKind::MissingParam, - message: "Missing displayname".to_owned(), + message: "Missing displayname.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })); } - data.displayname_set(&user_id, body.displayname.clone()); - // TODO send a new m.room.member join event with the updated displayname - // TODO send a new m.presence event with the updated displayname + if let Some(displayname) = body.displayname { + if displayname == "" { + data.displayname_remove(&user_id); + } else { + data.displayname_set(&user_id, body.displayname.clone()); + // TODO send a new m.room.member join event with the updated displayname + // TODO send a new m.presence event with the updated displayname + + } + } MatrixResult(Ok(set_display_name::Response)) } #[get( - "/_matrix/client/r0/profile//displayname", - data = "" +"/_matrix/client/r0/profile//displayname", +data = "" )] fn get_displayname_route( data: State, @@ -323,20 +334,26 @@ fn get_displayname_route( user_id_raw: String, ) -> MatrixResult { let user_id = (*body).user_id.clone(); + if !data.user_exists(&user_id) { + // Return 404 if we don't have a profile for this id + debug!("Profile was not found."); + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Profile was not found".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } if let Some(displayname) = data.displayname_get(&user_id) { return MatrixResult(Ok(get_display_name::Response { displayname: Some(displayname), })); } - // Return 404 if we don't have any - debug!("Profile was not found."); - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Profile was not found".to_owned(), - status_code: http::StatusCode::NOT_FOUND, + MatrixResult(Ok(get_display_name::Response { + displayname: None, })) } + #[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] fn set_avatar_url_route( data: State, @@ -344,21 +361,28 @@ fn set_avatar_url_route( _user_id: String, ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); - if body.avatar_url == "" { - debug!("Request was missing the avatar_url payload."); + + if !body.avatar_url.starts_with("mxc://") { + debug!("Request contains an invalid avatar_url."); return MatrixResult(Err(Error { - kind: ErrorKind::MissingParam, + kind: ErrorKind::InvalidParam, message: "Missing avatar_url".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })); } // TODO in the future when we can handle media uploads make sure that this url is our own server - // TODO also make sure this is mxc:// format + // TODO also make sure this is valid mxc:// format (not only starting with it) - data.avatar_url_set(&user_id, body.avatar_url.clone()); - // TODO send a new m.room.member join event with the updated avatar_url - // TODO send a new m.presence event with the updated avatar_url + + if body.avatar_url == "" { + data.avatar_url_remove(&user_id); + } else { + data.avatar_url_set(&user_id, body.displayname.clone()); + // TODO send a new m.room.member join event with the updated avatar_url + // TODO send a new m.presence event with the updated avatar_url + + } MatrixResult(Ok(set_avatar_url::Response)) } @@ -370,18 +394,23 @@ fn get_avatar_url_route( user_id_raw: String, ) -> MatrixResult { let user_id = (*body).user_id.clone(); + if !data.user_exists(&user_id) { + // Return 404 if we don't have a profile for this id + debug!("Profile was not found."); + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Profile was not found".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } if let Some(avatar_url) = data.avatar_url_get(&user_id) { return MatrixResult(Ok(get_avatar_url::Response { avatar_url: Some(avatar_url), })); } - // Return 404 if we don't have a profile for this id - debug!("Profile was not found."); - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Profile was not found".to_owned(), - status_code: http::StatusCode::NOT_FOUND, + MatrixResult(Ok(get_avatar_url::Response { + avatar_url: None, })) } @@ -395,7 +424,7 @@ fn get_profile_route( let avatar_url = data.avatar_url_get(&user_id); let displayname = data.displayname_get(&user_id); - if avatar_url.is_some() && displayname.is_some() { + if avatar_url.is_some() || displayname.is_some() { return MatrixResult(Ok(get_profile::Response { avatar_url, displayname, @@ -498,8 +527,8 @@ fn get_alias_route(room_alias: String) -> MatrixResult { })); } } - .try_into() - .unwrap(); + .try_into() + .unwrap(); MatrixResult(Ok(get_alias::Response { room_id, @@ -606,8 +635,8 @@ fn get_protocols_route( } #[put( - "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", - data = "" +"/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", +data = "" )] fn create_message_event_route( data: State, @@ -631,8 +660,8 @@ fn create_message_event_route( } #[put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", - data = "" +"/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", +data = "" )] fn create_state_event_for_key_route( data: State, @@ -654,8 +683,8 @@ fn create_state_event_for_key_route( } #[put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", - data = "" +"/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", +data = "" )] fn create_state_event_for_empty_key_route( data: State, From 49fe5e32710ee502bd10da843571c6e7cba2390a Mon Sep 17 00:00:00 2001 From: Marcel Date: Thu, 9 Apr 2020 21:01:39 +0200 Subject: [PATCH 0041/1727] Rename userid_avatar_url to userid_avatarurl and fix debug print Run cargo fmt Took 5 minutes --- src/data.rs | 6 +++--- src/database.rs | 10 +++++----- src/main.rs | 47 ++++++++++++++++++++--------------------------- 3 files changed, 28 insertions(+), 35 deletions(-) diff --git a/src/data.rs b/src/data.rs index ef30e27..70cd2f8 100644 --- a/src/data.rs +++ b/src/data.rs @@ -84,13 +84,13 @@ impl Data { /// Removes a avatar_url. pub fn avatar_url_remove(&self, user_id: &UserId) { - self.db.userid_avatar_url.remove(user_id).unwrap(); + self.db.userid_avatarurl.remove(user_id).unwrap(); } /// Set a new avatar_url. pub fn avatar_url_set(&self, user_id: &UserId, avatar_url: String) { self.db - .userid_avatar_url + .userid_avatarurl .insert(user_id.to_string(), &*avatar_url) .unwrap(); } @@ -98,7 +98,7 @@ impl Data { /// Get a the avatar_url of a user. pub fn avatar_url_get(&self, user_id: &UserId) -> Option { self.db - .userid_avatar_url + .userid_avatarurl .get(user_id.to_string()) .unwrap() .map(|bytes| utils::string_from_bytes(&bytes)) diff --git a/src/database.rs b/src/database.rs index bdfab5b..b8a4151 100644 --- a/src/database.rs +++ b/src/database.rs @@ -53,7 +53,7 @@ pub struct Database { pub userid_password: sled::Tree, pub userid_deviceids: MultiValue, pub userid_displayname: sled::Tree, - pub userid_avatar_url: sled::Tree, + pub userid_avatarurl: sled::Tree, pub deviceid_token: sled::Tree, pub token_userid: sled::Tree, pub pduid_pdus: sled::Tree, @@ -78,7 +78,7 @@ impl Database { userid_password: db.open_tree("userid_password").unwrap(), userid_deviceids: MultiValue(db.open_tree("userid_deviceids").unwrap()), userid_displayname: db.open_tree("userid_displayname").unwrap(), - userid_avatar_url: db.open_tree("userid_avatar_url").unwrap(), + userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), deviceid_token: db.open_tree("deviceid_token").unwrap(), token_userid: db.open_tree("token_userid").unwrap(), pduid_pdus: db.open_tree("pduid_pdus").unwrap(), @@ -107,7 +107,7 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("# AccountData -> Displayname:"); + println!("# UserId -> Displayname:"); for (k, v) in self.userid_displayname.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", @@ -115,8 +115,8 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("# AccountData -> AvatarURL:"); - for (k, v) in self.userid_avatar_url.iter().map(|r| r.unwrap()) { + println!("# UserId -> AvatarURL:"); + for (k, v) in self.userid_avatarurl.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", String::from_utf8_lossy(&k), diff --git a/src/main.rs b/src/main.rs index 3bc1e4b..aee2d47 100644 --- a/src/main.rs +++ b/src/main.rs @@ -89,7 +89,7 @@ fn register_route( .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)), data.hostname() )) - .try_into() + .try_into() { Err(_) => { debug!("Username invalid"); @@ -153,7 +153,7 @@ fn login_route(data: State, body: Ruma) -> MatrixResult MatrixResult { } #[get( -"/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>", -data = "" + "/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>", + data = "" )] fn get_filter_route( body: Ruma, @@ -262,8 +262,8 @@ fn create_filter_route( } #[put( -"/_matrix/client/r0/user/<_user_id>/account_data/<_type>", -data = "" + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" )] fn set_global_account_data_route( body: Ruma, @@ -275,8 +275,8 @@ fn set_global_account_data_route( } #[get( -"/_matrix/client/r0/user/<_user_id>/account_data/<_type>", -data = "" + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" )] fn get_global_account_data_route( body: Ruma, @@ -317,7 +317,6 @@ fn set_displayname_route( data.displayname_set(&user_id, body.displayname.clone()); // TODO send a new m.room.member join event with the updated displayname // TODO send a new m.presence event with the updated displayname - } } @@ -325,8 +324,8 @@ fn set_displayname_route( } #[get( -"/_matrix/client/r0/profile//displayname", -data = "" + "/_matrix/client/r0/profile//displayname", + data = "" )] fn get_displayname_route( data: State, @@ -349,9 +348,7 @@ fn get_displayname_route( })); } - MatrixResult(Ok(get_display_name::Response { - displayname: None, - })) + MatrixResult(Ok(get_display_name::Response { displayname: None })) } #[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] @@ -374,14 +371,12 @@ fn set_avatar_url_route( // TODO in the future when we can handle media uploads make sure that this url is our own server // TODO also make sure this is valid mxc:// format (not only starting with it) - if body.avatar_url == "" { data.avatar_url_remove(&user_id); } else { data.avatar_url_set(&user_id, body.displayname.clone()); // TODO send a new m.room.member join event with the updated avatar_url // TODO send a new m.presence event with the updated avatar_url - } MatrixResult(Ok(set_avatar_url::Response)) @@ -409,9 +404,7 @@ fn get_avatar_url_route( })); } - MatrixResult(Ok(get_avatar_url::Response { - avatar_url: None, - })) + MatrixResult(Ok(get_avatar_url::Response { avatar_url: None })) } #[get("/_matrix/client/r0/profile/", data = "")] @@ -527,8 +520,8 @@ fn get_alias_route(room_alias: String) -> MatrixResult { })); } } - .try_into() - .unwrap(); + .try_into() + .unwrap(); MatrixResult(Ok(get_alias::Response { room_id, @@ -635,8 +628,8 @@ fn get_protocols_route( } #[put( -"/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", -data = "" + "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", + data = "" )] fn create_message_event_route( data: State, @@ -660,8 +653,8 @@ fn create_message_event_route( } #[put( -"/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", -data = "" + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + data = "" )] fn create_state_event_for_key_route( data: State, @@ -683,8 +676,8 @@ fn create_state_event_for_key_route( } #[put( -"/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", -data = "" + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + data = "" )] fn create_state_event_for_empty_key_route( data: State, From 8bcbc983c381d8318aeb85fabfacd29563332f14 Mon Sep 17 00:00:00 2001 From: Marcel Date: Thu, 9 Apr 2020 21:11:21 +0200 Subject: [PATCH 0042/1727] Change and add Comments to the profile endpoints Add missing docs Took 9 minutes --- src/main.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/main.rs b/src/main.rs index aee2d47..e85d90b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -299,7 +299,7 @@ fn set_displayname_route( ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); - // Send error on None and accept Some("") as valid username + // Send error on None // Synapse returns a parsing error but the spec doesn't require this if body.displayname.is_none() { debug!("Request was missing the displayname payload."); @@ -311,6 +311,7 @@ fn set_displayname_route( } if let Some(displayname) = body.displayname { + // Some("") will clear the displayname if displayname == "" { data.displayname_remove(&user_id); } else { @@ -338,7 +339,7 @@ fn get_displayname_route( debug!("Profile was not found."); MatrixResult(Err(Error { kind: ErrorKind::NotFound, - message: "Profile was not found".to_owned(), + message: "Profile was not found.".to_owned(), status_code: http::StatusCode::NOT_FOUND, })) } @@ -348,6 +349,7 @@ fn get_displayname_route( })); } + // The user has no displayname MatrixResult(Ok(get_display_name::Response { displayname: None })) } @@ -394,7 +396,7 @@ fn get_avatar_url_route( debug!("Profile was not found."); MatrixResult(Err(Error { kind: ErrorKind::NotFound, - message: "Profile was not found".to_owned(), + message: "Profile was not found.".to_owned(), status_code: http::StatusCode::NOT_FOUND, })) } @@ -404,6 +406,7 @@ fn get_avatar_url_route( })); } + // The user has no avatar MatrixResult(Ok(get_avatar_url::Response { avatar_url: None })) } @@ -428,7 +431,7 @@ fn get_profile_route( debug!("Profile was not found."); MatrixResult(Err(Error { kind: ErrorKind::NotFound, - message: "Profile was not found".to_owned(), + message: "Profile was not found.".to_owned(), status_code: http::StatusCode::NOT_FOUND, })) } From 57264f72e91f2fef41345e6166e9b1adebd5fc8a Mon Sep 17 00:00:00 2001 From: Marcel Date: Thu, 9 Apr 2020 21:19:27 +0200 Subject: [PATCH 0043/1727] Fix avatar_url error message if not starting with mxc:// Took 8 minutes --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index e85d90b..17d01f5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -365,7 +365,7 @@ fn set_avatar_url_route( debug!("Request contains an invalid avatar_url."); return MatrixResult(Err(Error { kind: ErrorKind::InvalidParam, - message: "Missing avatar_url".to_owned(), + message: "avatar_url has to start with mxc://.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })); } From 8f41a4d30676e18f65ab9bbebf1f181554ae654e Mon Sep 17 00:00:00 2001 From: timo Date: Thu, 9 Apr 2020 23:23:16 +0200 Subject: [PATCH 0044/1727] Update 'README.md' --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 89a0ea3..b555f5b 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ ### A Matrix homeserver written in Rust [![Liberapay](http://img.shields.io/liberapay/receives/timokoesters.svg?logo=liberapay)](https://liberapay.com/timokoesters) +[![Matrix](https://img.shields.io/matrix/conduit:koesters.xyz?server_fqdn=matrix.koesters.xyz)](https://matrix.to/#/#conduit:koesters.xyz) #### Goals From 93b1d97166ddfe9e5fda9acb2c48000be7ecf759 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 10 Apr 2020 07:53:20 +0200 Subject: [PATCH 0045/1727] Update readme and cargo.toml --- Cargo.lock | 46 +++++++++++++++++++++++----------------------- Cargo.toml | 8 ++++++-- README.md | 7 ++++--- 3 files changed, 33 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f39a719..d8d3bc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -138,6 +138,29 @@ dependencies = [ "bitflags", ] +[[package]] +name = "conduit" +version = "0.1.0" +dependencies = [ + "directories", + "http", + "js_int", + "log", + "pretty_env_logger", + "rand", + "rocket", + "ruma-api", + "ruma-client-api", + "ruma-events", + "ruma-federation-api", + "ruma-identifiers", + "ruma-signatures", + "serde", + "serde_json", + "sled", + "tokio", +] + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -598,29 +621,6 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -[[package]] -name = "matrixserver" -version = "0.1.0" -dependencies = [ - "directories", - "http", - "js_int", - "log", - "pretty_env_logger", - "rand", - "rocket", - "ruma-api", - "ruma-client-api", - "ruma-events", - "ruma-federation-api", - "ruma-identifiers", - "ruma-signatures", - "serde", - "serde_json", - "sled", - "tokio", -] - [[package]] name = "maybe-uninit" version = "2.0.0" diff --git a/Cargo.toml b/Cargo.toml index 2f9d5ce..92a6681 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,11 @@ [package] -name = "matrixserver" -version = "0.1.0" +name = "conduit" +description = "A Matrix homeserver written in Rust" +license = "AGPL-3.0" authors = ["timokoesters "] +homepage = "https://conduit.rs" +readme = "README.md" +version = "0.1.0" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/README.md b/README.md index b555f5b..dc91303 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # Conduit ### A Matrix homeserver written in Rust -[![Liberapay](http://img.shields.io/liberapay/receives/timokoesters.svg?logo=liberapay)](https://liberapay.com/timokoesters) -[![Matrix](https://img.shields.io/matrix/conduit:koesters.xyz?server_fqdn=matrix.koesters.xyz)](https://matrix.to/#/#conduit:koesters.xyz) +[![Liberapay](https://img.shields.io/liberapay/receives/timokoesters?logo=liberapay)](https://liberapay.com/timokoesters) +[![Matrix](https://img.shields.io/matrix/conduit:koesters.xyz?server_fqdn=matrix.koesters.xyz&logo=matrix)](https://matrix.to/#/#conduit:koesters.xyz) #### Goals @@ -21,9 +21,10 @@ A Matrix Homeserver that's faster than others. - [x] Sync room messages - [x] Join rooms, lookup room ids - [x] Basic Riot web support -- [ ] Riot room discovery +- [x] Riot room discovery - [ ] Riot read receipts - [ ] Riot presence +- [ ] Password hashing - [ ] Proper room creation - [ ] Riot E2EE - [ ] Basic federation From 040296c7112d805fba187fc4547e12326e74c414 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 10 Apr 2020 13:36:57 +0200 Subject: [PATCH 0046/1727] Add test support and impl dummy /read_markers --- Cargo.lock | 12 ++++++++++++ Cargo.toml | 2 +- src/database.rs | 11 +++++++++++ src/main.rs | 44 +++++++++++++++++++++++++++++--------------- src/test.rs | 32 ++++++++++++++++++++++++++++++++ 5 files changed, 85 insertions(+), 16 deletions(-) create mode 100644 src/test.rs diff --git a/Cargo.lock b/Cargo.lock index d8d3bc7..3101ab1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1355,9 +1355,21 @@ dependencies = [ "pin-project-lite", "signal-hook-registry", "slab", + "tokio-macros", "winapi 0.3.8", ] +[[package]] +name = "tokio-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +dependencies = [ + "proc-macro2 1.0.10", + "quote 1.0.3", + "syn 1.0.17", +] + [[package]] name = "tokio-rustls" version = "0.12.2" diff --git a/Cargo.toml b/Cargo.toml index 92a6681..7618ac0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,5 +26,5 @@ serde_json = "1.0.50" ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } ruma-federation-api = "0.0.1" serde = "1.0.106" -tokio = "0.2.16" +tokio = { version = "0.2.16", features = ["macros"] } #rt-threaded rand = "0.7.3" diff --git a/src/database.rs b/src/database.rs index a19ec43..f17e76f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,6 +1,7 @@ use crate::utils; use directories::ProjectDirs; use sled::IVec; +use std::fs::remove_dir_all; pub struct MultiValue(sled::Tree); @@ -65,6 +66,16 @@ pub struct Database { } impl Database { + /// Tries to remove the old database but ignores all errors. + pub fn try_remove(hostname: &str) { + let mut path = ProjectDirs::from("xyz", "koesters", "matrixserver") + .unwrap() + .data_dir() + .to_path_buf(); + path.push(hostname); + let _ = remove_dir_all(path); + } + /// Load an existing database or create a new one. pub fn load_or_create(hostname: &str) -> Self { let mut path = ProjectDirs::from("xyz", "koesters", "matrixserver") diff --git a/src/main.rs b/src/main.rs index 4b1f72e..e7b49c7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -6,6 +6,9 @@ mod pdu; mod ruma_wrapper; mod utils; +#[cfg(test)] +mod test; + pub use data::Data; pub use database::Database; pub use pdu::PduEvent; @@ -31,6 +34,7 @@ use ruma_client_api::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, }, push::get_pushrules_all, + read_marker::set_read_marker, room::create_room, session::{get_login_types, login}, state::{create_state_event_for_empty_key, create_state_event_for_key}, @@ -159,9 +163,7 @@ fn login_route(data: State, body: Ruma) -> MatrixResult/read_markers", data = "")] +fn set_read_marker_route( + data: State, + body: Ruma, + _room_id: String, +) -> MatrixResult { + MatrixResult(Ok(set_read_marker::Response)) +} + #[post("/_matrix/client/r0/createRoom", data = "")] fn create_room_route( data: State, @@ -765,16 +776,7 @@ fn options_route(_segments: PathBuf) -> MatrixResult rocket::Rocket { rocket::ignite() .mount( "/", @@ -796,6 +798,7 @@ fn main() { set_presence_route, get_keys_route, upload_keys_route, + set_read_marker_route, create_room_route, get_alias_route, join_room_by_id_route, @@ -810,6 +813,17 @@ fn main() { ], ) .manage(data) - .launch() - .unwrap(); +} + +fn main() { + // Log info by default + if let Err(_) = std::env::var("RUST_LOG") { + std::env::set_var("RUST_LOG", "matrixserver=debug,info"); + } + pretty_env_logger::init(); + + let data = Data::load_or_create("matrixtesting.koesters.xyz"); + data.debug(); + + setup_rocket(data).launch().unwrap(); } diff --git a/src/test.rs b/src/test.rs new file mode 100644 index 0000000..6131eb2 --- /dev/null +++ b/src/test.rs @@ -0,0 +1,32 @@ +use super::*; +use rocket::{local::Client, http::Status}; + +fn setup_client() -> Client { + Database::try_remove("temp"); + let data = Data::load_or_create("temp"); + + let rocket = setup_rocket(data); + Client::new(rocket).expect("valid rocket instance") +} + +#[tokio::test] +async fn register_login() { + let client = setup_client(); + let mut response = client + .post("/_matrix/client/r0/register?kind=user") + .body( + r#"{ + "username": "cheeky_monkey", + "password": "ilovebananas", + "device_id": "GHTYAJCE", + "initial_device_display_name": "Jungle Phone", + "inhibit_login": false + }"#, + ) + .dispatch().await; + let body = serde_json::to_value(&response.body_string().await.unwrap()).unwrap(); + + assert_eq!(response.status().code, 401); + assert!(dbg!(&body["flows"]).as_array().unwrap().len() > 0); + assert!(body["session"].as_str().unwrap().len() > 0); +} From fd829bfa0d8538d9ebb1a33493399bb066221144 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 11 Apr 2020 09:30:11 +0200 Subject: [PATCH 0047/1727] move client_server code into own file --- src/client_server.rs | 763 +++++++++++++++++++++++++++++++++++++++ src/main.rs | 823 ++----------------------------------------- 2 files changed, 795 insertions(+), 791 deletions(-) create mode 100644 src/client_server.rs diff --git a/src/client_server.rs b/src/client_server.rs new file mode 100644 index 0000000..4d81066 --- /dev/null +++ b/src/client_server.rs @@ -0,0 +1,763 @@ +use crate::{utils, Data, Database, MatrixResult, PduEvent, Ruma}; + +use log::debug; +use rocket::{get, options, post, put, routes, State}; +use ruma_client_api::{ + error::{Error, ErrorKind}, + r0::{ + account::{ + register, AuthenticationFlow, UserInteractiveAuthenticationInfo, + UserInteractiveAuthenticationResponse, + }, + alias::get_alias, + config::{get_global_account_data, set_global_account_data}, + directory::{self, get_public_rooms_filtered}, + filter::{self, create_filter, get_filter}, + keys::{get_keys, upload_keys}, + membership::{join_room_by_id, join_room_by_id_or_alias}, + message::create_message_event, + presence::set_presence, + profile::{ + get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, + }, + push::get_pushrules_all, + read_marker::set_read_marker, + room::create_room, + session::{get_login_types, login}, + state::{create_state_event_for_empty_key, create_state_event_for_key}, + sync::sync_events, + thirdparty::get_protocols, + }, + unversioned::get_supported_versions, +}; +use ruma_events::{collections::only::Event, EventType}; +use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; +use serde_json::json; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + path::PathBuf, + time::Duration, +}; + +const GUEST_NAME_LENGTH: usize = 10; +const DEVICE_ID_LENGTH: usize = 10; +const SESSION_ID_LENGTH: usize = 256; +const TOKEN_LENGTH: usize = 256; + +#[get("/_matrix/client/versions")] +pub fn get_supported_versions_route() -> MatrixResult { + MatrixResult(Ok(get_supported_versions::Response { + versions: vec!["r0.6.0".to_owned()], + unstable_features: HashMap::new(), + })) +} + +#[post("/_matrix/client/r0/register", data = "")] +pub fn register_route( + data: State, + body: Ruma, +) -> MatrixResult { + if body.auth.is_none() { + return MatrixResult(Err(UserInteractiveAuthenticationResponse::AuthResponse( + UserInteractiveAuthenticationInfo { + flows: vec![AuthenticationFlow { + stages: vec!["m.login.dummy".to_owned()], + }], + completed: vec![], + params: json!({}), + session: Some(utils::random_string(SESSION_ID_LENGTH)), + }, + ))); + } + + // Validate user id + let user_id: UserId = match (*format!( + "@{}:{}", + body.username + .clone() + .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)), + data.hostname() + )) + .try_into() + { + Err(_) => { + debug!("Username invalid"); + return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( + Error { + kind: ErrorKind::InvalidUsername, + message: "Username was invalid.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + }, + ))); + } + Ok(user_id) => user_id, + }; + + // Check if username is creative enough + if data.user_exists(&user_id) { + debug!("ID already taken"); + return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( + Error { + kind: ErrorKind::UserInUse, + message: "Desired user ID is already taken.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + }, + ))); + } + + // Create user + data.user_add(&user_id, body.password.clone()); + + // Generate new device id if the user didn't specify one + let device_id = body + .device_id + .clone() + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); + + // Add device + data.device_add(&user_id, &device_id); + + // Generate new token for the device + let token = utils::random_string(TOKEN_LENGTH); + data.token_replace(&user_id, &device_id, token.clone()); + + MatrixResult(Ok(register::Response { + access_token: Some(token), + user_id, + device_id: Some(device_id), + })) +} + +#[get("/_matrix/client/r0/login", data = "<_body>")] +pub fn get_login_route( + _body: Ruma, +) -> MatrixResult { + MatrixResult(Ok(get_login_types::Response { + flows: vec![get_login_types::LoginType::Password], + })) +} + +#[post("/_matrix/client/r0/login", data = "")] +pub fn login_route(data: State, body: Ruma) -> MatrixResult { + // Validate login method + let user_id = + if let (login::UserInfo::MatrixId(mut username), login::LoginInfo::Password { password }) = + (body.user.clone(), body.login_info.clone()) + { + if !username.contains(':') { + username = format!("@{}:{}", username, data.hostname()); + } + if let Ok(user_id) = (*username).try_into() { + // Check password (this also checks if the user exists + if let Some(correct_password) = data.password_get(&user_id) { + if password == correct_password { + // Success! + user_id + } else { + debug!("Invalid password."); + return MatrixResult(Err(Error { + kind: ErrorKind::Forbidden, + message: "".to_owned(), + status_code: http::StatusCode::FORBIDDEN, + })); + } + } else { + debug!("UserId does not exist (has no assigned password). Can't log in."); + return MatrixResult(Err(Error { + kind: ErrorKind::Forbidden, + message: "".to_owned(), + status_code: http::StatusCode::FORBIDDEN, + })); + } + } else { + debug!("Invalid UserId."); + return MatrixResult(Err(Error { + kind: ErrorKind::InvalidUsername, + message: "Bad user id.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + } else { + debug!("Bad login type"); + return MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Bad login type.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + }; + + // Generate new device id if the user didn't specify one + let device_id = body + .device_id + .clone() + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); + + // Add device + data.device_add(&user_id, &device_id); + + // Generate a new token for the device + let token = utils::random_string(TOKEN_LENGTH); + data.token_replace(&user_id, &device_id, token.clone()); + + MatrixResult(Ok(login::Response { + user_id, + access_token: token, + home_server: Some(data.hostname().to_owned()), + device_id, + well_known: None, + })) +} + +#[get("/_matrix/client/r0/pushrules")] +pub fn get_pushrules_all_route() -> MatrixResult { + // TODO + MatrixResult(Ok(get_pushrules_all::Response { + global: HashMap::new(), + })) +} + +#[get( + "/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>", + data = "" +)] +pub fn get_filter_route( + body: Ruma, + _user_id: String, + _filter_id: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(get_filter::Response { + filter: filter::FilterDefinition { + event_fields: None, + event_format: None, + account_data: None, + room: None, + presence: None, + }, + })) +} + +#[post("/_matrix/client/r0/user/<_user_id>/filter", data = "")] +pub fn create_filter_route( + body: Ruma, + _user_id: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(create_filter::Response { + filter_id: utils::random_string(10), + })) +} + +#[put( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" +)] +pub fn set_global_account_data_route( + body: Ruma, + _user_id: String, + _type: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(set_global_account_data::Response)) +} + +#[get( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" +)] +pub fn get_global_account_data_route( + body: Ruma, + _user_id: String, + _type: String, +) -> MatrixResult { + // TODO + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Data not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) +} + +#[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] +pub fn set_displayname_route( + data: State, + body: Ruma, + _user_id: String, +) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + + // Send error on None + // Synapse returns a parsing error but the spec doesn't require this + if body.displayname.is_none() { + debug!("Request was missing the displayname payload."); + return MatrixResult(Err(Error { + kind: ErrorKind::MissingParam, + message: "Missing displayname.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + + if let Some(displayname) = &body.displayname { + // Some("") will clear the displayname + if displayname == "" { + data.displayname_remove(&user_id); + } else { + data.displayname_set(&user_id, body.displayname.clone()); + // TODO send a new m.room.member join event with the updated displayname + // TODO send a new m.presence event with the updated displayname + } + } + + MatrixResult(Ok(set_display_name::Response)) +} + +#[get( + "/_matrix/client/r0/profile//displayname", + data = "" +)] +pub fn get_displayname_route( + data: State, + body: Ruma, + user_id_raw: String, +) -> MatrixResult { + let user_id = (*body).user_id.clone(); + if !data.user_exists(&user_id) { + // Return 404 if we don't have a profile for this id + debug!("Profile was not found."); + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Profile was not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })); + } + if let Some(displayname) = data.displayname_get(&user_id) { + return MatrixResult(Ok(get_display_name::Response { + displayname: Some(displayname), + })); + } + + // The user has no displayname + MatrixResult(Ok(get_display_name::Response { displayname: None })) +} + +#[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] +pub fn set_avatar_url_route( + data: State, + body: Ruma, + _user_id: String, +) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + + if !body.avatar_url.starts_with("mxc://") { + debug!("Request contains an invalid avatar_url."); + return MatrixResult(Err(Error { + kind: ErrorKind::InvalidParam, + message: "avatar_url has to start with mxc://.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + + // TODO in the future when we can handle media uploads make sure that this url is our own server + // TODO also make sure this is valid mxc:// format (not only starting with it) + + if body.avatar_url == "" { + data.avatar_url_remove(&user_id); + } else { + data.avatar_url_set(&user_id, body.avatar_url.clone()); + // TODO send a new m.room.member join event with the updated avatar_url + // TODO send a new m.presence event with the updated avatar_url + } + + MatrixResult(Ok(set_avatar_url::Response)) +} + +#[get("/_matrix/client/r0/profile//avatar_url", data = "")] +pub fn get_avatar_url_route( + data: State, + body: Ruma, + user_id_raw: String, +) -> MatrixResult { + let user_id = (*body).user_id.clone(); + if !data.user_exists(&user_id) { + // Return 404 if we don't have a profile for this id + debug!("Profile was not found."); + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Profile was not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })); + } + if let Some(avatar_url) = data.avatar_url_get(&user_id) { + return MatrixResult(Ok(get_avatar_url::Response { + avatar_url: Some(avatar_url), + })); + } + + // The user has no avatar + MatrixResult(Ok(get_avatar_url::Response { avatar_url: None })) +} + +#[get("/_matrix/client/r0/profile/", data = "")] +pub fn get_profile_route( + data: State, + body: Ruma, + user_id_raw: String, +) -> MatrixResult { + let user_id = (*body).user_id.clone(); + let avatar_url = data.avatar_url_get(&user_id); + let displayname = data.displayname_get(&user_id); + + if avatar_url.is_some() || displayname.is_some() { + return MatrixResult(Ok(get_profile::Response { + avatar_url, + displayname, + })); + } + + // Return 404 if we don't have a profile for this id + debug!("Profile was not found."); + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Profile was not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) +} + +#[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] +pub fn set_presence_route( + body: Ruma, + _user_id: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(set_presence::Response)) +} + +#[post("/_matrix/client/r0/keys/query", data = "")] +pub fn get_keys_route(body: Ruma) -> MatrixResult { + // TODO + MatrixResult(Ok(get_keys::Response { + failures: HashMap::new(), + device_keys: HashMap::new(), + })) +} + +#[post("/_matrix/client/r0/keys/upload", data = "")] +pub fn upload_keys_route( + data: State, + body: Ruma, +) -> MatrixResult { + MatrixResult(Ok(upload_keys::Response { + one_time_key_counts: HashMap::new(), + })) +} + +#[post("/_matrix/client/r0/rooms/<_room_id>/read_markers", data = "")] +pub fn set_read_marker_route( + data: State, + body: Ruma, + _room_id: String, +) -> MatrixResult { + MatrixResult(Ok(set_read_marker::Response)) +} + +#[post("/_matrix/client/r0/createRoom", data = "")] +pub fn create_room_route( + data: State, + body: Ruma, +) -> MatrixResult { + // TODO: check if room is unique + let room_id = RoomId::new(data.hostname()).expect("host is valid"); + let user_id = body.user_id.clone().expect("user is authenticated"); + + data.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomCreate, + json!({ "creator": user_id }), + None, + Some("".to_owned()), + ); + + if let Some(name) = &body.name { + data.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomName, + json!({ "name": name }), + None, + Some("".to_owned()), + ); + } + + if let Some(topic) = &body.topic { + data.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomTopic, + json!({ "topic": topic }), + None, + Some("".to_owned()), + ); + } + + data.room_join(&room_id, &user_id); + + MatrixResult(Ok(create_room::Response { room_id })) +} + +#[get("/_matrix/client/r0/directory/room/")] +pub fn get_alias_route(room_alias: String) -> MatrixResult { + // TODO + let room_id = match &*room_alias { + "#room:localhost" => "!xclkjvdlfj:localhost", + _ => { + debug!("Room not found."); + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })); + } + } + .try_into() + .unwrap(); + + MatrixResult(Ok(get_alias::Response { + room_id, + servers: vec!["localhost".to_owned()], + })) +} + +#[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] +pub fn join_room_by_id_route( + data: State, + body: Ruma, + _room_id: String, +) -> MatrixResult { + if data.room_join( + &body.room_id, + body.user_id.as_ref().expect("user is authenticated"), + ) { + MatrixResult(Ok(join_room_by_id::Response { + room_id: body.room_id.clone(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } +} + +#[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] +pub fn join_room_by_id_or_alias_route( + data: State, + body: Ruma, + _room_id_or_alias: String, +) -> MatrixResult { + let room_id = match &body.room_id_or_alias { + RoomIdOrAliasId::RoomAliasId(alias) => match alias.alias() { + "#room:localhost" => "!xclkjvdlfj:localhost".try_into().unwrap(), + _ => { + debug!("Room not found."); + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })); + } + }, + + RoomIdOrAliasId::RoomId(id) => id.clone(), + }; + + if data.room_join( + &room_id, + body.user_id.as_ref().expect("user is authenticated"), + ) { + MatrixResult(Ok(join_room_by_id_or_alias::Response { room_id })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } +} + +#[post("/_matrix/client/r0/publicRooms", data = "")] +pub fn get_public_rooms_filtered_route( + data: State, + body: Ruma, +) -> MatrixResult { + let chunk = data + .rooms_all() + .into_iter() + .map(|room_id| directory::PublicRoomsChunk { + aliases: None, + canonical_alias: None, + name: None, + num_joined_members: data.room_users(&room_id).into(), + room_id, + topic: None, + world_readable: false, + guest_can_join: true, + avatar_url: None, + }) + .collect::>(); + + let total_room_count_estimate = (chunk.len() as u32).into(); + + MatrixResult(Ok(get_public_rooms_filtered::Response { + chunk, + prev_batch: None, + next_batch: None, + total_room_count_estimate: Some(total_room_count_estimate), + })) +} + +#[get("/_matrix/client/r0/thirdparty/protocols", data = "")] +pub fn get_protocols_route( + body: Ruma, +) -> MatrixResult { + MatrixResult(Ok(get_protocols::Response { + protocols: HashMap::new(), + })) +} + +#[put( + "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", + data = "" +)] +pub fn create_message_event_route( + data: State, + _room_id: String, + _event_type: String, + _txn_id: String, + body: Ruma, +) -> MatrixResult { + let mut unsigned = serde_json::Map::new(); + unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); + + let event_id = data.pdu_append( + body.room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + body.event_type.clone(), + body.json_body.clone(), + Some(unsigned), + None, + ); + MatrixResult(Ok(create_message_event::Response { event_id })) +} + +#[put( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + data = "" +)] +pub fn create_state_event_for_key_route( + data: State, + _room_id: String, + _event_type: String, + _state_key: String, + body: Ruma, +) -> MatrixResult { + // Reponse of with/without key is the same + let event_id = data.pdu_append( + body.room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + body.event_type.clone(), + body.json_body.clone(), + None, + Some(body.state_key.clone()), + ); + MatrixResult(Ok(create_state_event_for_key::Response { event_id })) +} + +#[put( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + data = "" +)] +pub fn create_state_event_for_empty_key_route( + data: State, + _room_id: String, + _event_type: String, + body: Ruma, +) -> MatrixResult { + // Reponse of with/without key is the same + let event_id = data.pdu_append( + body.room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + body.event_type.clone(), + body.json_body, + None, + Some("".to_owned()), + ); + MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) +} + +#[get("/_matrix/client/r0/sync", data = "")] +pub fn sync_route( + data: State, + body: Ruma, +) -> MatrixResult { + std::thread::sleep(Duration::from_millis(200)); + let next_batch = data.last_pdu_index().to_string(); + + let mut joined_rooms = HashMap::new(); + let joined_roomids = data.rooms_joined(body.user_id.as_ref().expect("user is authenticated")); + for room_id in joined_roomids { + let pdus = if let Some(since) = body.since.clone().and_then(|string| string.parse().ok()) { + data.pdus_since(&room_id, since) + } else { + data.pdus_all(&room_id) + }; + let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); + + joined_rooms.insert( + room_id.try_into().unwrap(), + sync_events::JoinedRoom { + account_data: sync_events::AccountData { events: Vec::new() }, + summary: sync_events::RoomSummary { + heroes: Vec::new(), + joined_member_count: None, + invited_member_count: None, + }, + unread_notifications: sync_events::UnreadNotificationsCount { + highlight_count: None, + notification_count: None, + }, + timeline: sync_events::Timeline { + limited: Some(false), + prev_batch: Some("".to_owned()), + events: room_events, + }, + state: sync_events::State { events: Vec::new() }, + ephemeral: sync_events::Ephemeral { events: Vec::new() }, + }, + ); + } + + MatrixResult(Ok(sync_events::Response { + next_batch, + rooms: sync_events::Rooms { + leave: Default::default(), + join: joined_rooms, + invite: Default::default(), + }, + presence: sync_events::Presence { events: Vec::new() }, + device_lists: Default::default(), + device_one_time_keys_count: Default::default(), + to_device: sync_events::ToDevice { events: Vec::new() }, + })) +} + +#[options("/<_segments..>")] +pub fn options_route(_segments: PathBuf) -> MatrixResult { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "This is the options route.".to_owned(), + status_code: http::StatusCode::OK, + })) +} diff --git a/src/main.rs b/src/main.rs index e7b49c7..5a5eaa0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,6 @@ #![feature(proc_macro_hygiene, decl_macro)] +mod client_server; mod data; mod database; mod pdu; @@ -12,804 +13,44 @@ mod test; pub use data::Data; pub use database::Database; pub use pdu::PduEvent; +pub use ruma_wrapper::{MatrixResult, Ruma}; -use log::debug; -use rocket::{get, options, post, put, routes, State}; -use ruma_client_api::{ - error::{Error, ErrorKind}, - r0::{ - account::{ - register, AuthenticationFlow, UserInteractiveAuthenticationInfo, - UserInteractiveAuthenticationResponse, - }, - alias::get_alias, - config::{get_global_account_data, set_global_account_data}, - directory::{self, get_public_rooms_filtered}, - filter::{self, create_filter, get_filter}, - keys::{get_keys, upload_keys}, - membership::{join_room_by_id, join_room_by_id_or_alias}, - message::create_message_event, - presence::set_presence, - profile::{ - get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, - }, - push::get_pushrules_all, - read_marker::set_read_marker, - room::create_room, - session::{get_login_types, login}, - state::{create_state_event_for_empty_key, create_state_event_for_key}, - sync::sync_events, - thirdparty::get_protocols, - }, - unversioned::get_supported_versions, -}; -use ruma_events::{collections::only::Event, EventType}; -use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; -use ruma_wrapper::{MatrixResult, Ruma}; -use serde_json::json; -use std::{ - collections::HashMap, - convert::{TryFrom, TryInto}, - path::PathBuf, - time::Duration, -}; - -const GUEST_NAME_LENGTH: usize = 10; -const DEVICE_ID_LENGTH: usize = 10; -const SESSION_ID_LENGTH: usize = 256; -const TOKEN_LENGTH: usize = 256; - -#[get("/_matrix/client/versions")] -fn get_supported_versions_route() -> MatrixResult { - MatrixResult(Ok(get_supported_versions::Response { - versions: vec!["r0.6.0".to_owned()], - unstable_features: HashMap::new(), - })) -} - -#[post("/_matrix/client/r0/register", data = "")] -fn register_route( - data: State, - body: Ruma, -) -> MatrixResult { - if body.auth.is_none() { - return MatrixResult(Err(UserInteractiveAuthenticationResponse::AuthResponse( - UserInteractiveAuthenticationInfo { - flows: vec![AuthenticationFlow { - stages: vec!["m.login.dummy".to_owned()], - }], - completed: vec![], - params: json!({}), - session: Some(utils::random_string(SESSION_ID_LENGTH)), - }, - ))); - } - - // Validate user id - let user_id: UserId = match (*format!( - "@{}:{}", - body.username - .clone() - .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)), - data.hostname() - )) - .try_into() - { - Err(_) => { - debug!("Username invalid"); - return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( - Error { - kind: ErrorKind::InvalidUsername, - message: "Username was invalid.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - }, - ))); - } - Ok(user_id) => user_id, - }; - - // Check if username is creative enough - if data.user_exists(&user_id) { - debug!("ID already taken"); - return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( - Error { - kind: ErrorKind::UserInUse, - message: "Desired user ID is already taken.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - }, - ))); - } - - // Create user - data.user_add(&user_id, body.password.clone()); - - // Generate new device id if the user didn't specify one - let device_id = body - .device_id - .clone() - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); - - // Add device - data.device_add(&user_id, &device_id); - - // Generate new token for the device - let token = utils::random_string(TOKEN_LENGTH); - data.token_replace(&user_id, &device_id, token.clone()); - - MatrixResult(Ok(register::Response { - access_token: Some(token), - user_id, - device_id: Some(device_id), - })) -} - -#[get("/_matrix/client/r0/login", data = "<_body>")] -fn get_login_route( - _body: Ruma, -) -> MatrixResult { - MatrixResult(Ok(get_login_types::Response { - flows: vec![get_login_types::LoginType::Password], - })) -} - -#[post("/_matrix/client/r0/login", data = "")] -fn login_route(data: State, body: Ruma) -> MatrixResult { - // Validate login method - let user_id = - if let (login::UserInfo::MatrixId(mut username), login::LoginInfo::Password { password }) = - (body.user.clone(), body.login_info.clone()) - { - if !username.contains(':') { - username = format!("@{}:{}", username, data.hostname()); - } - if let Ok(user_id) = (*username).try_into() { - // Check password (this also checks if the user exists - if let Some(correct_password) = data.password_get(&user_id) { - if password == correct_password { - // Success! - user_id - } else { - debug!("Invalid password."); - return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "".to_owned(), - status_code: http::StatusCode::FORBIDDEN, - })); - } - } else { - debug!("UserId does not exist (has no assigned password). Can't log in."); - return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "".to_owned(), - status_code: http::StatusCode::FORBIDDEN, - })); - } - } else { - debug!("Invalid UserId."); - return MatrixResult(Err(Error { - kind: ErrorKind::InvalidUsername, - message: "Bad user id.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } - } else { - debug!("Bad login type"); - return MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Bad login type.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - }; - - // Generate new device id if the user didn't specify one - let device_id = body - .device_id - .clone() - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); - - // Add device - data.device_add(&user_id, &device_id); - - // Generate a new token for the device - let token = utils::random_string(TOKEN_LENGTH); - data.token_replace(&user_id, &device_id, token.clone()); - - MatrixResult(Ok(login::Response { - user_id, - access_token: token, - home_server: Some(data.hostname().to_owned()), - device_id, - well_known: None, - })) -} - -#[get("/_matrix/client/r0/pushrules")] -fn get_pushrules_all_route() -> MatrixResult { - // TODO - MatrixResult(Ok(get_pushrules_all::Response { - global: HashMap::new(), - })) -} - -#[get( - "/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>", - data = "" -)] -fn get_filter_route( - body: Ruma, - _user_id: String, - _filter_id: String, -) -> MatrixResult { - // TODO - MatrixResult(Ok(get_filter::Response { - filter: filter::FilterDefinition { - event_fields: None, - event_format: None, - account_data: None, - room: None, - presence: None, - }, - })) -} - -#[post("/_matrix/client/r0/user/<_user_id>/filter", data = "")] -fn create_filter_route( - body: Ruma, - _user_id: String, -) -> MatrixResult { - // TODO - MatrixResult(Ok(create_filter::Response { - filter_id: utils::random_string(10), - })) -} - -#[put( - "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", - data = "" -)] -fn set_global_account_data_route( - body: Ruma, - _user_id: String, - _type: String, -) -> MatrixResult { - // TODO - MatrixResult(Ok(set_global_account_data::Response)) -} - -#[get( - "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", - data = "" -)] -fn get_global_account_data_route( - body: Ruma, - _user_id: String, - _type: String, -) -> MatrixResult { - // TODO - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Data not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) -} - -#[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] -fn set_displayname_route( - data: State, - body: Ruma, - _user_id: String, -) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); - - // Send error on None - // Synapse returns a parsing error but the spec doesn't require this - if body.displayname.is_none() { - debug!("Request was missing the displayname payload."); - return MatrixResult(Err(Error { - kind: ErrorKind::MissingParam, - message: "Missing displayname.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } - - if let Some(displayname) = &body.displayname { - // Some("") will clear the displayname - if displayname == "" { - data.displayname_remove(&user_id); - } else { - data.displayname_set(&user_id, body.displayname.clone()); - // TODO send a new m.room.member join event with the updated displayname - // TODO send a new m.presence event with the updated displayname - } - } - - MatrixResult(Ok(set_display_name::Response)) -} - -#[get( - "/_matrix/client/r0/profile//displayname", - data = "" -)] -fn get_displayname_route( - data: State, - body: Ruma, - user_id_raw: String, -) -> MatrixResult { - let user_id = (*body).user_id.clone(); - if !data.user_exists(&user_id) { - // Return 404 if we don't have a profile for this id - debug!("Profile was not found."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Profile was not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })); - } - if let Some(displayname) = data.displayname_get(&user_id) { - return MatrixResult(Ok(get_display_name::Response { - displayname: Some(displayname), - })); - } - - // The user has no displayname - MatrixResult(Ok(get_display_name::Response { displayname: None })) -} - -#[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] -fn set_avatar_url_route( - data: State, - body: Ruma, - _user_id: String, -) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); - - if !body.avatar_url.starts_with("mxc://") { - debug!("Request contains an invalid avatar_url."); - return MatrixResult(Err(Error { - kind: ErrorKind::InvalidParam, - message: "avatar_url has to start with mxc://.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } - - // TODO in the future when we can handle media uploads make sure that this url is our own server - // TODO also make sure this is valid mxc:// format (not only starting with it) - - if body.avatar_url == "" { - data.avatar_url_remove(&user_id); - } else { - data.avatar_url_set(&user_id, body.avatar_url.clone()); - // TODO send a new m.room.member join event with the updated avatar_url - // TODO send a new m.presence event with the updated avatar_url - } - - MatrixResult(Ok(set_avatar_url::Response)) -} - -#[get("/_matrix/client/r0/profile//avatar_url", data = "")] -fn get_avatar_url_route( - data: State, - body: Ruma, - user_id_raw: String, -) -> MatrixResult { - let user_id = (*body).user_id.clone(); - if !data.user_exists(&user_id) { - // Return 404 if we don't have a profile for this id - debug!("Profile was not found."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Profile was not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })); - } - if let Some(avatar_url) = data.avatar_url_get(&user_id) { - return MatrixResult(Ok(get_avatar_url::Response { - avatar_url: Some(avatar_url), - })); - } - - // The user has no avatar - MatrixResult(Ok(get_avatar_url::Response { avatar_url: None })) -} - -#[get("/_matrix/client/r0/profile/", data = "")] -fn get_profile_route( - data: State, - body: Ruma, - user_id_raw: String, -) -> MatrixResult { - let user_id = (*body).user_id.clone(); - let avatar_url = data.avatar_url_get(&user_id); - let displayname = data.displayname_get(&user_id); - - if avatar_url.is_some() || displayname.is_some() { - return MatrixResult(Ok(get_profile::Response { - avatar_url, - displayname, - })); - } - - // Return 404 if we don't have a profile for this id - debug!("Profile was not found."); - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Profile was not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) -} - -#[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] -fn set_presence_route( - body: Ruma, - _user_id: String, -) -> MatrixResult { - // TODO - MatrixResult(Ok(set_presence::Response)) -} - -#[post("/_matrix/client/r0/keys/query", data = "")] -fn get_keys_route(body: Ruma) -> MatrixResult { - // TODO - MatrixResult(Ok(get_keys::Response { - failures: HashMap::new(), - device_keys: HashMap::new(), - })) -} - -#[post("/_matrix/client/r0/keys/upload", data = "")] -fn upload_keys_route( - data: State, - body: Ruma, -) -> MatrixResult { - MatrixResult(Ok(upload_keys::Response { - one_time_key_counts: HashMap::new(), - })) -} - -#[post("/_matrix/client/r0/rooms/<_room_id>/read_markers", data = "")] -fn set_read_marker_route( - data: State, - body: Ruma, - _room_id: String, -) -> MatrixResult { - MatrixResult(Ok(set_read_marker::Response)) -} - -#[post("/_matrix/client/r0/createRoom", data = "")] -fn create_room_route( - data: State, - body: Ruma, -) -> MatrixResult { - // TODO: check if room is unique - let room_id = RoomId::new(data.hostname()).expect("host is valid"); - let user_id = body.user_id.clone().expect("user is authenticated"); - - data.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomCreate, - json!({ "creator": user_id }), - None, - Some("".to_owned()), - ); - - if let Some(name) = &body.name { - data.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomName, - json!({ "name": name }), - None, - Some("".to_owned()), - ); - } - - if let Some(topic) = &body.topic { - data.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomTopic, - json!({ "topic": topic }), - None, - Some("".to_owned()), - ); - } - - data.room_join(&room_id, &user_id); - - MatrixResult(Ok(create_room::Response { room_id })) -} - -#[get("/_matrix/client/r0/directory/room/")] -fn get_alias_route(room_alias: String) -> MatrixResult { - // TODO - let room_id = match &*room_alias { - "#room:localhost" => "!xclkjvdlfj:localhost", - _ => { - debug!("Room not found."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })); - } - } - .try_into() - .unwrap(); - - MatrixResult(Ok(get_alias::Response { - room_id, - servers: vec!["localhost".to_owned()], - })) -} - -#[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] -fn join_room_by_id_route( - data: State, - body: Ruma, - _room_id: String, -) -> MatrixResult { - if data.room_join( - &body.room_id, - body.user_id.as_ref().expect("user is authenticated"), - ) { - MatrixResult(Ok(join_room_by_id::Response { - room_id: body.room_id.clone(), - })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) - } -} - -#[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] -fn join_room_by_id_or_alias_route( - data: State, - body: Ruma, - _room_id_or_alias: String, -) -> MatrixResult { - let room_id = match &body.room_id_or_alias { - RoomIdOrAliasId::RoomAliasId(alias) => match alias.alias() { - "#room:localhost" => "!xclkjvdlfj:localhost".try_into().unwrap(), - _ => { - debug!("Room not found."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })); - } - }, - - RoomIdOrAliasId::RoomId(id) => id.clone(), - }; - - if data.room_join( - &room_id, - body.user_id.as_ref().expect("user is authenticated"), - ) { - MatrixResult(Ok(join_room_by_id_or_alias::Response { room_id })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) - } -} - -#[post("/_matrix/client/r0/publicRooms", data = "")] -fn get_public_rooms_filtered_route( - data: State, - body: Ruma, -) -> MatrixResult { - let chunk = data - .rooms_all() - .into_iter() - .map(|room_id| directory::PublicRoomsChunk { - aliases: None, - canonical_alias: None, - name: None, - num_joined_members: data.room_users(&room_id).into(), - room_id, - topic: None, - world_readable: false, - guest_can_join: true, - avatar_url: None, - }) - .collect::>(); - - let total_room_count_estimate = (chunk.len() as u32).into(); - - MatrixResult(Ok(get_public_rooms_filtered::Response { - chunk, - prev_batch: None, - next_batch: None, - total_room_count_estimate: Some(total_room_count_estimate), - })) -} - -#[get("/_matrix/client/r0/thirdparty/protocols", data = "")] -fn get_protocols_route( - body: Ruma, -) -> MatrixResult { - MatrixResult(Ok(get_protocols::Response { - protocols: HashMap::new(), - })) -} - -#[put( - "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", - data = "" -)] -fn create_message_event_route( - data: State, - _room_id: String, - _event_type: String, - _txn_id: String, - body: Ruma, -) -> MatrixResult { - let mut unsigned = serde_json::Map::new(); - unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - - let event_id = data.pdu_append( - body.room_id.clone(), - body.user_id.clone().expect("user is authenticated"), - body.event_type.clone(), - body.json_body.clone(), - Some(unsigned), - None, - ); - MatrixResult(Ok(create_message_event::Response { event_id })) -} - -#[put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", - data = "" -)] -fn create_state_event_for_key_route( - data: State, - _room_id: String, - _event_type: String, - _state_key: String, - body: Ruma, -) -> MatrixResult { - // Reponse of with/without key is the same - let event_id = data.pdu_append( - body.room_id.clone(), - body.user_id.clone().expect("user is authenticated"), - body.event_type.clone(), - body.json_body.clone(), - None, - Some(body.state_key.clone()), - ); - MatrixResult(Ok(create_state_event_for_key::Response { event_id })) -} - -#[put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", - data = "" -)] -fn create_state_event_for_empty_key_route( - data: State, - _room_id: String, - _event_type: String, - body: Ruma, -) -> MatrixResult { - // Reponse of with/without key is the same - let event_id = data.pdu_append( - body.room_id.clone(), - body.user_id.clone().expect("user is authenticated"), - body.event_type.clone(), - body.json_body, - None, - Some("".to_owned()), - ); - MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) -} - -#[get("/_matrix/client/r0/sync", data = "")] -fn sync_route( - data: State, - body: Ruma, -) -> MatrixResult { - std::thread::sleep(Duration::from_millis(200)); - let next_batch = data.last_pdu_index().to_string(); - - let mut joined_rooms = HashMap::new(); - let joined_roomids = data.rooms_joined(body.user_id.as_ref().expect("user is authenticated")); - for room_id in joined_roomids { - let pdus = if let Some(since) = body.since.clone().and_then(|string| string.parse().ok()) { - data.pdus_since(&room_id, since) - } else { - data.pdus_all(&room_id) - }; - let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); - - joined_rooms.insert( - room_id.try_into().unwrap(), - sync_events::JoinedRoom { - account_data: sync_events::AccountData { events: Vec::new() }, - summary: sync_events::RoomSummary { - heroes: Vec::new(), - joined_member_count: None, - invited_member_count: None, - }, - unread_notifications: sync_events::UnreadNotificationsCount { - highlight_count: None, - notification_count: None, - }, - timeline: sync_events::Timeline { - limited: Some(false), - prev_batch: Some("".to_owned()), - events: room_events, - }, - state: sync_events::State { events: Vec::new() }, - ephemeral: sync_events::Ephemeral { events: Vec::new() }, - }, - ); - } - - MatrixResult(Ok(sync_events::Response { - next_batch, - rooms: sync_events::Rooms { - leave: Default::default(), - join: joined_rooms, - invite: Default::default(), - }, - presence: sync_events::Presence { events: Vec::new() }, - device_lists: Default::default(), - device_one_time_keys_count: Default::default(), - to_device: sync_events::ToDevice { events: Vec::new() }, - })) -} - -#[options("/<_segments..>")] -fn options_route(_segments: PathBuf) -> MatrixResult { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "This is the options route.".to_owned(), - status_code: http::StatusCode::OK, - })) -} +use rocket::routes; fn setup_rocket(data: Data) -> rocket::Rocket { rocket::ignite() .mount( "/", routes![ - get_supported_versions_route, - register_route, - get_login_route, - login_route, - get_pushrules_all_route, - get_filter_route, - create_filter_route, - set_global_account_data_route, - get_global_account_data_route, - set_displayname_route, - get_displayname_route, - set_avatar_url_route, - get_avatar_url_route, - get_profile_route, - set_presence_route, - get_keys_route, - upload_keys_route, - set_read_marker_route, - create_room_route, - get_alias_route, - join_room_by_id_route, - join_room_by_id_or_alias_route, - get_public_rooms_filtered_route, - get_protocols_route, - create_message_event_route, - create_state_event_for_key_route, - create_state_event_for_empty_key_route, - sync_route, - options_route, + client_server::get_supported_versions_route, + client_server::register_route, + client_server::get_login_route, + client_server::login_route, + client_server::get_pushrules_all_route, + client_server::get_filter_route, + client_server::create_filter_route, + client_server::set_global_account_data_route, + client_server::get_global_account_data_route, + client_server::set_displayname_route, + client_server::get_displayname_route, + client_server::set_avatar_url_route, + client_server::get_avatar_url_route, + client_server::get_profile_route, + client_server::set_presence_route, + client_server::get_keys_route, + client_server::upload_keys_route, + client_server::set_read_marker_route, + client_server::create_room_route, + client_server::get_alias_route, + client_server::join_room_by_id_route, + client_server::join_room_by_id_or_alias_route, + client_server::get_public_rooms_filtered_route, + client_server::get_protocols_route, + client_server::create_message_event_route, + client_server::create_state_event_for_key_route, + client_server::create_state_event_for_empty_key_route, + client_server::sync_route, + client_server::options_route, ], ) .manage(data) From ab8c2f1e5e3d395def2d1d9f4ac9f6ab7924516e Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 11 Apr 2020 09:35:06 +0200 Subject: [PATCH 0048/1727] Rename pduid_pdus to pduid_pdu --- src/data.rs | 12 ++++++------ src/database.rs | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/data.rs b/src/data.rs index 583a986..b26b899 100644 --- a/src/data.rs +++ b/src/data.rs @@ -189,7 +189,7 @@ impl Data { prefix.extend_from_slice(room_id.to_string().as_bytes()); prefix.push(b'#'); // Add delimiter so we don't find rooms starting with the same id - if let Some((key, _)) = self.db.pduid_pdus.get_gt(&prefix).unwrap() { + if let Some((key, _)) = self.db.pduid_pdu.get_gt(&prefix).unwrap() { if key.starts_with(&prefix) { true } else { @@ -238,7 +238,7 @@ impl Data { serde_json::from_slice( &self .db - .pduid_pdus + .pduid_pdu .get(pdu_id) .unwrap() .expect("eventid_pduid in db is valid"), @@ -340,7 +340,7 @@ impl Data { let index = utils::u64_from_bytes( &self .db - .pduid_pdus + .pduid_pdu .update_and_fetch(&count_key, utils::increment) .unwrap() .unwrap(), @@ -353,7 +353,7 @@ impl Data { pdu_id.extend_from_slice(&index.to_be_bytes()); self.db - .pduid_pdus + .pduid_pdu .insert(&pdu_id, &*serde_json::to_string(&pdu).unwrap()) .unwrap(); @@ -375,7 +375,7 @@ impl Data { utils::u64_from_bytes( &self .db - .pduid_pdus + .pduid_pdu .get(&count_key) .unwrap() .unwrap_or_else(|| (&0_u64.to_be_bytes()).into()), @@ -394,7 +394,7 @@ impl Data { let mut current = prefix.clone(); current.extend_from_slice(&since.to_be_bytes()); - while let Some((key, value)) = self.db.pduid_pdus.get_gt(¤t).unwrap() { + while let Some((key, value)) = self.db.pduid_pdu.get_gt(¤t).unwrap() { if key.starts_with(&prefix) { current = key.to_vec(); pdus.push(serde_json::from_slice(&value).expect("pdu in db is valid")); diff --git a/src/database.rs b/src/database.rs index f17e76f..7a30861 100644 --- a/src/database.rs +++ b/src/database.rs @@ -57,7 +57,7 @@ pub struct Database { pub userid_avatarurl: sled::Tree, pub deviceid_token: sled::Tree, pub token_userid: sled::Tree, - pub pduid_pdus: sled::Tree, + pub pduid_pdu: sled::Tree, // PduId = RoomId + Since pub eventid_pduid: sled::Tree, pub roomid_pduleaves: MultiValue, pub roomid_userids: MultiValue, @@ -92,7 +92,7 @@ impl Database { userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), deviceid_token: db.open_tree("deviceid_token").unwrap(), token_userid: db.open_tree("token_userid").unwrap(), - pduid_pdus: db.open_tree("pduid_pdus").unwrap(), + pduid_pdu: db.open_tree("pduid_pdu").unwrap(), eventid_pduid: db.open_tree("eventid_pduid").unwrap(), roomid_pduleaves: MultiValue(db.open_tree("roomid_pduleaves").unwrap()), roomid_userids: MultiValue(db.open_tree("roomid_userids").unwrap()), @@ -174,8 +174,8 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("\n# PDU Id -> PDUs:"); - for (k, v) in self.pduid_pdus.iter().map(|r| r.unwrap()) { + println!("\n# PDU Id -> PDU:"); + for (k, v) in self.pduid_pdu.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", String::from_utf8_lossy(&k), From 3b9cadeec2ac139f1dfe489bafe10ed244526ca8 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 11 Apr 2020 20:03:22 +0200 Subject: [PATCH 0049/1727] feat: read receipts --- src/client_server.rs | 49 ++++++++++++++++---- src/data.rs | 107 ++++++++++++++++++++++++++++++++++++++++--- src/database.rs | 57 +++++++++++++++++++++-- src/ruma_wrapper.rs | 3 +- 4 files changed, 195 insertions(+), 21 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 4d81066..3c36c7c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -30,7 +30,10 @@ use ruma_client_api::{ }, unversioned::get_supported_versions, }; -use ruma_events::{collections::only::Event, EventType}; +use ruma_events::{ + collections::only::{Event as EduEvent, Event}, + EventType, +}; use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; use serde_json::json; use std::{ @@ -458,6 +461,33 @@ pub fn set_read_marker_route( body: Ruma, _room_id: String, ) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + // TODO: Fully read + if let Some(event) = &body.read_receipt { + let mut user_receipts = HashMap::new(); + user_receipts.insert( + user_id.clone(), + ruma_events::receipt::Receipt { + ts: Some(utils::millis_since_unix_epoch()), + }, + ); + let mut receipt_content = HashMap::new(); + receipt_content.insert( + event.clone(), + ruma_events::receipt::Receipts { + read: Some(user_receipts), + }, + ); + + data.roomlatest_update( + &user_id, + &body.room_id, + EduEvent::Receipt(ruma_events::receipt::ReceiptEvent { + content: receipt_content, + room_id: None, // None because it can be inferred + }), + ); + } MatrixResult(Ok(set_read_marker::Response)) } @@ -707,16 +737,17 @@ pub fn sync_route( let mut joined_rooms = HashMap::new(); let joined_roomids = data.rooms_joined(body.user_id.as_ref().expect("user is authenticated")); + let since = body + .since + .clone() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); for room_id in joined_roomids { - let pdus = if let Some(since) = body.since.clone().and_then(|string| string.parse().ok()) { - data.pdus_since(&room_id, since) - } else { - data.pdus_all(&room_id) - }; + let pdus = { data.pdus_since(&room_id, since) }; let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); joined_rooms.insert( - room_id.try_into().unwrap(), + room_id.clone().try_into().unwrap(), sync_events::JoinedRoom { account_data: sync_events::AccountData { events: Vec::new() }, summary: sync_events::RoomSummary { @@ -734,7 +765,9 @@ pub fn sync_route( events: room_events, }, state: sync_events::State { events: Vec::new() }, - ephemeral: sync_events::Ephemeral { events: Vec::new() }, + ephemeral: sync_events::Ephemeral { + events: data.roomlatests_since(&room_id, since), + }, }, ); } diff --git a/src/data.rs b/src/data.rs index b26b899..6d43278 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,7 +1,8 @@ use crate::{utils, Database, PduEvent}; -use ruma_events::EventType; +use ruma_events::{collections::only::Event as EduEvent, EventResult, EventType}; use ruma_federation_api::RoomV3Pdu; use ruma_identifiers::{EventId, RoomId, UserId}; +use serde_json::json; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, @@ -167,6 +168,15 @@ impl Data { user_id.to_string().as_bytes().into(), ); + self.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + json!({"membership": "join"}), + None, + Some(user_id.to_string()), + ); + true } @@ -187,7 +197,7 @@ impl Data { // Create the first part of the full pdu id let mut prefix = vec![b'd']; prefix.extend_from_slice(room_id.to_string().as_bytes()); - prefix.push(b'#'); // Add delimiter so we don't find rooms starting with the same id + prefix.push(0xff); // Add delimiter so we don't find rooms starting with the same id if let Some((key, _)) = self.db.pduid_pdu.get_gt(&prefix).unwrap() { if key.starts_with(&prefix) { @@ -334,14 +344,12 @@ impl Data { // The new value will need a new index. We store the last used index in 'n' // The count will go up regardless of the room_id // This is also the next_batch/since value - let count_key: Vec = vec![b'n']; - // Increment the last index and use that let index = utils::u64_from_bytes( &self .db .pduid_pdu - .update_and_fetch(&count_key, utils::increment) + .update_and_fetch(b"n", utils::increment) .unwrap() .unwrap(), ); @@ -349,7 +357,7 @@ impl Data { let mut pdu_id = vec![b'd']; pdu_id.extend_from_slice(room_id.to_string().as_bytes()); - pdu_id.push(b'#'); // Add delimiter so we don't find rooms starting with the same id + pdu_id.push(0xff); // Add delimiter so we don't find rooms starting with the same id pdu_id.extend_from_slice(&index.to_be_bytes()); self.db @@ -389,7 +397,7 @@ impl Data { // Create the first part of the full pdu id let mut prefix = vec![b'd']; prefix.extend_from_slice(room_id.to_string().as_bytes()); - prefix.push(b'#'); // Add delimiter so we don't find rooms starting with the same id + prefix.push(0xff); // Add delimiter so we don't find rooms starting with the same id let mut current = prefix.clone(); current.extend_from_slice(&since.to_be_bytes()); @@ -406,6 +414,91 @@ impl Data { pdus } + pub fn roomlatest_update(&self, user_id: &UserId, room_id: &RoomId, event: EduEvent) { + let mut prefix = vec![b'd']; + prefix.extend_from_slice(room_id.to_string().as_bytes()); + prefix.push(0xff); + + // Start with last + if let Some(mut current) = self + .db + .roomlatestid_roomlatest + .scan_prefix(&prefix) + .keys() + .next_back() + .map(|c| c.unwrap()) + { + // Remove old marker (There should at most one) + loop { + if !current.starts_with(&prefix) { + // We're in another room + break; + } + if current.rsplitn(2, |&b| b == 0xff).next().unwrap() + == user_id.to_string().as_bytes() + { + // This is the old room_latest + self.db.roomlatestid_roomlatest.remove(current).unwrap(); + break; + } + // Else, try the event before that + if let Some((k, _)) = self.db.roomlatestid_roomlatest.get_lt(current).unwrap() { + current = k; + } else { + break; + } + } + } + + // Increment the last index and use that + let index = utils::u64_from_bytes( + &self + .db + .pduid_pdu + .update_and_fetch(b"n", utils::increment) + .unwrap() + .unwrap(), + ); + + let mut room_latest_id = prefix; + room_latest_id.extend_from_slice(&index.to_be_bytes()); + room_latest_id.push(0xff); + room_latest_id.extend_from_slice(&user_id.to_string().as_bytes()); + + self.db + .roomlatestid_roomlatest + .insert(room_latest_id, &*serde_json::to_string(&event).unwrap()) + .unwrap(); + } + + /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. + pub fn roomlatests_since(&self, room_id: &RoomId, since: u64) -> Vec { + let mut room_latests = Vec::new(); + + let mut prefix = vec![b'd']; + prefix.extend_from_slice(room_id.to_string().as_bytes()); + prefix.push(0xff); + + let mut current = prefix.clone(); + current.extend_from_slice(&since.to_be_bytes()); + + while let Some((key, value)) = self.db.roomlatestid_roomlatest.get_gt(¤t).unwrap() { + if key.starts_with(&prefix) { + current = key.to_vec(); + room_latests.push( + serde_json::from_slice::>(&value) + .expect("room_latest in db is valid") + .into_result() + .expect("room_latest in db is valid"), + ); + } else { + break; + } + } + + room_latests + } + pub fn debug(&self) { self.db.debug(); } diff --git a/src/database.rs b/src/database.rs index 7a30861..18de222 100644 --- a/src/database.rs +++ b/src/database.rs @@ -57,18 +57,23 @@ pub struct Database { pub userid_avatarurl: sled::Tree, pub deviceid_token: sled::Tree, pub token_userid: sled::Tree, - pub pduid_pdu: sled::Tree, // PduId = RoomId + Since + pub pduid_pdu: sled::Tree, // PduId = 'd' + RoomId + Since (global since counter is at 'n') pub eventid_pduid: sled::Tree, pub roomid_pduleaves: MultiValue, pub roomid_userids: MultiValue, pub userid_roomids: MultiValue, + // EDUs: + pub roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Since + UserId TODO: Types + pub timeofremoval_roomrelevants: MultiValue, // Typing + pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Since + pub globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Since + Type + UserId _db: sled::Db, } impl Database { /// Tries to remove the old database but ignores all errors. pub fn try_remove(hostname: &str) { - let mut path = ProjectDirs::from("xyz", "koesters", "matrixserver") + let mut path = ProjectDirs::from("xyz", "koesters", "conduit") .unwrap() .data_dir() .to_path_buf(); @@ -78,7 +83,7 @@ impl Database { /// Load an existing database or create a new one. pub fn load_or_create(hostname: &str) -> Self { - let mut path = ProjectDirs::from("xyz", "koesters", "matrixserver") + let mut path = ProjectDirs::from("xyz", "koesters", "conduit") .unwrap() .data_dir() .to_path_buf(); @@ -97,6 +102,12 @@ impl Database { roomid_pduleaves: MultiValue(db.open_tree("roomid_pduleaves").unwrap()), roomid_userids: MultiValue(db.open_tree("roomid_userids").unwrap()), userid_roomids: MultiValue(db.open_tree("userid_roomids").unwrap()), + roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), + timeofremoval_roomrelevants: MultiValue( + db.open_tree("timeofremoval_roomrelevants").unwrap(), + ), + globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), + globallatestid_globallatest: db.open_tree("globallatestid_globallatest").unwrap(), _db: db, } } @@ -118,7 +129,7 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("# UserId -> Displayname:"); + println!("\n# UserId -> Displayname:"); for (k, v) in self.userid_displayname.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", @@ -126,7 +137,7 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("# UserId -> AvatarURL:"); + println!("\n# UserId -> AvatarURL:"); for (k, v) in self.userid_avatarurl.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", @@ -190,5 +201,41 @@ impl Database { String::from_utf8_lossy(&v), ); } + println!("\n# RoomLatestId -> RoomLatest"); + for (k, v) in self.roomlatestid_roomlatest.iter().map(|r| r.unwrap()) { + println!( + "{:?} -> {:?}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("\n# TimeOfRemoval -> RoomRelevants Id:"); + for (k, v) in self + .timeofremoval_roomrelevants + .iter_all() + .map(|r| r.unwrap()) + { + println!( + "{:?} -> {:?}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("\n# GlobalAllId -> GlobalAll:"); + for (k, v) in self.globalallid_globalall.iter().map(|r| r.unwrap()) { + println!( + "{:?} -> {:?}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } + println!("\n# GlobalLatestId -> GlobalLatest:"); + for (k, v) in self.globallatestid_globallatest.iter().map(|r| r.unwrap()) { + println!( + "{:?} -> {:?}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index f39ef75..81b7ea8 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,3 +1,4 @@ +use log::warn; use rocket::{ data::{Data, FromData, FromDataFuture, Transform, TransformFuture, Transformed}, http::Status, @@ -106,7 +107,7 @@ where }, }), Err(e) => { - log::error!("{:?}", e); + warn!("{:?}", e); Failure((Status::InternalServerError, ())) } } From 3debb6203cae400873c7b55335fc95ee13e277e7 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 12 Apr 2020 21:12:50 +0200 Subject: [PATCH 0050/1727] feat: handle typing events --- README.md | 3 +- Rocket.toml | 6 +-- src/client_server.rs | 41 +++++++++++++++-- src/data.rs | 104 ++++++++++++++++++++++++++++++++++++++++--- src/database.rs | 16 +++---- src/main.rs | 1 + src/utils.rs | 8 ++-- 7 files changed, 150 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index dc91303..acd70d2 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,8 @@ A Matrix Homeserver that's faster than others. - [x] Join rooms, lookup room ids - [x] Basic Riot web support - [x] Riot room discovery -- [ ] Riot read receipts +- [x] Riot read receipts +- [x] Typing indications - [ ] Riot presence - [ ] Password hashing - [ ] Proper room creation diff --git a/Rocket.toml b/Rocket.toml index f55e107..99c136d 100644 --- a/Rocket.toml +++ b/Rocket.toml @@ -2,6 +2,6 @@ address = "0.0.0.0" port = 14004 -#[global.tls] -#certs = "/etc/letsencrypt/live/matrixtesting.koesters.xyz/fullchain.pem" -#key = "/etc/letsencrypt/live/matrixtesting.koesters.xyz/privkey.pem" +[global.tls] +certs = "/etc/letsencrypt/live/matrixtesting.koesters.xyz/fullchain.pem" +key = "/etc/letsencrypt/live/matrixtesting.koesters.xyz/privkey.pem" diff --git a/src/client_server.rs b/src/client_server.rs index 3c36c7c..660da6c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -27,6 +27,7 @@ use ruma_client_api::{ state::{create_state_event_for_empty_key, create_state_event_for_key}, sync::sync_events, thirdparty::get_protocols, + typing::create_typing_event, }, unversioned::get_supported_versions, }; @@ -468,7 +469,7 @@ pub fn set_read_marker_route( user_receipts.insert( user_id.clone(), ruma_events::receipt::Receipt { - ts: Some(utils::millis_since_unix_epoch()), + ts: Some(utils::millis_since_unix_epoch().try_into().unwrap()), }, ); let mut receipt_content = HashMap::new(); @@ -491,6 +492,38 @@ pub fn set_read_marker_route( MatrixResult(Ok(set_read_marker::Response)) } +#[put( + "/_matrix/client/r0/rooms/<_room_id>/typing/<_user_id>", + data = "" +)] +pub fn create_typing_event_route( + data: State, + body: Ruma, + _room_id: String, + _user_id: String, +) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + let edu = EduEvent::Typing(ruma_events::typing::TypingEvent { + content: ruma_events::typing::TypingEventContent { + user_ids: vec![user_id.clone()], + }, + room_id: None, // None because it can be inferred + }); + + if body.typing { + data.roomactive_add( + edu, + &body.room_id, + body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) + + utils::millis_since_unix_epoch().try_into().unwrap_or(0), + ); + } else { + data.roomactive_remove(edu, &body.room_id); + } + + MatrixResult(Ok(create_typing_event::Response)) +} + #[post("/_matrix/client/r0/createRoom", data = "")] pub fn create_room_route( data: State, @@ -745,6 +778,8 @@ pub fn sync_route( for room_id in joined_roomids { let pdus = { data.pdus_since(&room_id, since) }; let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); + let mut edus = data.roomlatests_since(&room_id, since); + edus.extend_from_slice(&data.roomactives_in(&room_id)); joined_rooms.insert( room_id.clone().try_into().unwrap(), @@ -765,9 +800,7 @@ pub fn sync_route( events: room_events, }, state: sync_events::State { events: Vec::new() }, - ephemeral: sync_events::Ephemeral { - events: data.roomlatests_since(&room_id, since), - }, + ephemeral: sync_events::Ephemeral { events: edus }, }, ); } diff --git a/src/data.rs b/src/data.rs index 6d43278..85909f8 100644 --- a/src/data.rs +++ b/src/data.rs @@ -316,7 +316,7 @@ impl Data { room_id: room_id.clone(), sender: sender.clone(), origin: self.hostname.clone(), - origin_server_ts: utils::millis_since_unix_epoch(), + origin_server_ts: utils::millis_since_unix_epoch().try_into().unwrap(), kind: event_type, content, state_key, @@ -415,8 +415,7 @@ impl Data { } pub fn roomlatest_update(&self, user_id: &UserId, room_id: &RoomId, event: EduEvent) { - let mut prefix = vec![b'd']; - prefix.extend_from_slice(room_id.to_string().as_bytes()); + let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); // Start with last @@ -475,8 +474,7 @@ impl Data { pub fn roomlatests_since(&self, room_id: &RoomId, since: u64) -> Vec { let mut room_latests = Vec::new(); - let mut prefix = vec![b'd']; - prefix.extend_from_slice(room_id.to_string().as_bytes()); + let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); let mut current = prefix.clone(); @@ -499,6 +497,102 @@ impl Data { room_latests } + pub fn roomactive_add(&self, event: EduEvent, room_id: &RoomId, timeout: u64) { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let mut current = prefix.clone(); + + while let Some((key, _)) = self.db.roomactiveid_roomactive.get_gt(¤t).unwrap() { + if key.starts_with(&prefix) + && utils::u64_from_bytes(key.split(|&c| c == 0xff).nth(1).unwrap()) + > utils::millis_since_unix_epoch().try_into().unwrap() + { + current = key.to_vec(); + self.db.roomactiveid_roomactive.remove(¤t).unwrap(); + } else { + break; + } + } + + // Increment the last index and use that + let index = utils::u64_from_bytes( + &self + .db + .pduid_pdu + .update_and_fetch(b"n", utils::increment) + .unwrap() + .unwrap(), + ); + + let mut room_active_id = prefix; + room_active_id.extend_from_slice(&timeout.to_be_bytes()); + room_active_id.push(0xff); + room_active_id.extend_from_slice(&index.to_be_bytes()); + + self.db + .roomactiveid_roomactive + .insert(room_active_id, &*serde_json::to_string(&event).unwrap()) + .unwrap(); + } + + pub fn roomactive_remove(&self, event: EduEvent, room_id: &RoomId) { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let mut current = prefix.clone(); + + let json = serde_json::to_string(&event).unwrap(); + + while let Some((key, value)) = self.db.roomactiveid_roomactive.get_gt(¤t).unwrap() { + if key.starts_with(&prefix) { + current = key.to_vec(); + if value == json.as_bytes() { + self.db.roomactiveid_roomactive.remove(¤t).unwrap(); + break; + } + } else { + break; + } + } + } + + /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. + pub fn roomactives_in(&self, room_id: &RoomId) -> Vec { + let mut room_actives = Vec::new(); + + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let mut current = prefix.clone(); + current.extend_from_slice(&utils::millis_since_unix_epoch().to_be_bytes()); + + while let Some((key, value)) = self.db.roomactiveid_roomactive.get_gt(¤t).unwrap() { + if key.starts_with(&prefix) { + current = key.to_vec(); + room_actives.push( + serde_json::from_slice::>(&value) + .expect("room_active in db is valid") + .into_result() + .expect("room_active in db is valid"), + ); + } else { + break; + } + } + + if room_actives.is_empty() { + return vec![EduEvent::Typing(ruma_events::typing::TypingEvent { + content: ruma_events::typing::TypingEventContent { + user_ids: Vec::new(), + }, + room_id: None, // None because it can be inferred + })]; + } else { + room_actives + } + } + pub fn debug(&self) { self.db.debug(); } diff --git a/src/database.rs b/src/database.rs index 18de222..c7a8623 100644 --- a/src/database.rs +++ b/src/database.rs @@ -64,7 +64,7 @@ pub struct Database { pub userid_roomids: MultiValue, // EDUs: pub roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Since + UserId TODO: Types - pub timeofremoval_roomrelevants: MultiValue, // Typing + pub roomactiveid_roomactive: sled::Tree, // Typing, RoomActiveId = TimeoutTime + Since pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Since pub globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Since + Type + UserId _db: sled::Db, @@ -103,9 +103,7 @@ impl Database { roomid_userids: MultiValue(db.open_tree("roomid_userids").unwrap()), userid_roomids: MultiValue(db.open_tree("userid_roomids").unwrap()), roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), - timeofremoval_roomrelevants: MultiValue( - db.open_tree("timeofremoval_roomrelevants").unwrap(), - ), + roomactiveid_roomactive: db.open_tree("roomactiveid_roomactive").unwrap(), globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), globallatestid_globallatest: db.open_tree("globallatestid_globallatest").unwrap(), _db: db, @@ -201,7 +199,7 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("\n# RoomLatestId -> RoomLatest"); + println!("\n# RoomLatestId -> RoomLatest:"); for (k, v) in self.roomlatestid_roomlatest.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", @@ -209,12 +207,8 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("\n# TimeOfRemoval -> RoomRelevants Id:"); - for (k, v) in self - .timeofremoval_roomrelevants - .iter_all() - .map(|r| r.unwrap()) - { + println!("\n# RoomActiveId -> RoomActives:"); + for (k, v) in self.roomactiveid_roomactive.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", String::from_utf8_lossy(&k), diff --git a/src/main.rs b/src/main.rs index 5a5eaa0..d09a787 100644 --- a/src/main.rs +++ b/src/main.rs @@ -40,6 +40,7 @@ fn setup_rocket(data: Data) -> rocket::Rocket { client_server::get_keys_route, client_server::upload_keys_route, client_server::set_read_marker_route, + client_server::create_typing_event_route, client_server::create_room_route, client_server::get_alias_route, client_server::join_room_by_id_route, diff --git a/src/utils.rs b/src/utils.rs index e08e09f..5e94172 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -4,13 +4,11 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; -pub fn millis_since_unix_epoch() -> js_int::UInt { - (SystemTime::now() +pub fn millis_since_unix_epoch() -> u64 { + SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() - .as_millis() as u64) - .try_into() - .expect("time millis are <= MAX_SAFE_UINT") + .as_millis() as u64 } pub fn increment(old: Option<&[u8]>) -> Option> { From af1def50acf404edd1bc73d78782d177114df5f4 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 12 Apr 2020 22:29:48 +0200 Subject: [PATCH 0051/1727] Update README.md --- README.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index acd70d2..169a1df 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ [![Liberapay](https://img.shields.io/liberapay/receives/timokoesters?logo=liberapay)](https://liberapay.com/timokoesters) [![Matrix](https://img.shields.io/matrix/conduit:koesters.xyz?server_fqdn=matrix.koesters.xyz&logo=matrix)](https://matrix.to/#/#conduit:koesters.xyz) -#### Goals +#### What is the goal -A Matrix Homeserver that's faster than others. +A fast Matrix homeserver that's optimized for smaller, personal servers, instead of one server that has high scalability. #### What is it build on? @@ -14,7 +14,7 @@ A Matrix Homeserver that's faster than others. - [Sled](https://github.com/spacejam/sled): A simple (key, value) database with good performance - [Rocket](https://rocket.rs): A flexible web framework -#### Roadmap +#### What are the next steps? - [x] Register, login, authentication tokens - [x] Create room messages @@ -31,6 +31,10 @@ A Matrix Homeserver that's faster than others. - [ ] Basic federation - [ ] State resolution +#### How can I contribute? + +The best way to find something to work on is by joining the #conduit:koesters.xyz Matrix room and asking. + #### Donate Liberapay: From abcce95dd8eadcef233b96e082b865fa3cf764f3 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 14 Apr 2020 13:54:32 +0200 Subject: [PATCH 0052/1727] feat: invites, better public room dir, user search --- README.md | 1 + src/client_server.rs | 164 +++++++++++++++++++++++++++++++++++-------- src/data.rs | 82 ++++++++++++++++++++-- src/database.rs | 32 +++++++-- src/main.rs | 4 ++ src/pdu.rs | 16 ++++- 6 files changed, 257 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index 169a1df..b2b42b0 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ A fast Matrix homeserver that's optimized for smaller, personal servers, instead - [x] Riot room discovery - [x] Riot read receipts - [x] Typing indications +- [x] Invites, user search - [ ] Riot presence - [ ] Password hashing - [ ] Proper room creation diff --git a/src/client_server.rs b/src/client_server.rs index 660da6c..f1c8a85 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1,7 +1,7 @@ -use crate::{utils, Data, Database, MatrixResult, PduEvent, Ruma}; +use crate::{utils, Data, MatrixResult, Ruma}; use log::debug; -use rocket::{get, options, post, put, routes, State}; +use rocket::{get, options, post, put, State}; use ruma_client_api::{ error::{Error, ErrorKind}, r0::{ @@ -14,7 +14,7 @@ use ruma_client_api::{ directory::{self, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, keys::{get_keys, upload_keys}, - membership::{join_room_by_id, join_room_by_id_or_alias}, + membership::{invite_user, join_room_by_id, join_room_by_id_or_alias}, message::create_message_event, presence::set_presence, profile::{ @@ -28,21 +28,14 @@ use ruma_client_api::{ sync::sync_events, thirdparty::get_protocols, typing::create_typing_event, + user_directory::search_users, }, unversioned::get_supported_versions, }; -use ruma_events::{ - collections::only::{Event as EduEvent, Event}, - EventType, -}; +use ruma_events::{collections::only::Event as EduEvent, EventType}; use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; use serde_json::json; -use std::{ - collections::HashMap, - convert::{TryFrom, TryInto}, - path::PathBuf, - time::Duration, -}; +use std::{collections::HashMap, convert::TryInto, path::PathBuf, time::Duration}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -133,10 +126,8 @@ pub fn register_route( })) } -#[get("/_matrix/client/r0/login", data = "<_body>")] -pub fn get_login_route( - _body: Ruma, -) -> MatrixResult { +#[get("/_matrix/client/r0/login")] +pub fn get_login_route() -> MatrixResult { MatrixResult(Ok(get_login_types::Response { flows: vec![get_login_types::LoginType::Password], })) @@ -451,6 +442,7 @@ pub fn upload_keys_route( data: State, body: Ruma, ) -> MatrixResult { + // TODO MatrixResult(Ok(upload_keys::Response { one_time_key_counts: HashMap::new(), })) @@ -542,6 +534,24 @@ pub fn create_room_route( Some("".to_owned()), ); + data.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomPowerLevels, + json!({ + "ban": 50, + "events_default": 0, + "invite": 50, + "kick": 50, + "redact": 50, + "state_default": 50, + "users": { user_id.to_string(): 100 }, + "users_default": 0 + }), + None, + Some("".to_owned()), + ); + if let Some(name) = &body.name { data.pdu_append( room_id.clone(), @@ -564,8 +574,14 @@ pub fn create_room_route( ); } + dbg!(&*body); + data.room_join(&room_id, &user_id); + for user in &body.invite { + data.room_invite(&user_id, &room_id, user); + } + MatrixResult(Ok(create_room::Response { room_id })) } @@ -650,27 +666,58 @@ pub fn join_room_by_id_or_alias_route( } } +#[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] +pub fn invite_user_route( + data: State, + body: Ruma, + _room_id: String, +) -> MatrixResult { + if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { + data.room_invite( + &body.user_id.as_ref().expect("user is authenticated"), + &body.room_id, + &user_id, + ); + MatrixResult(Ok(invite_user::Response)) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "User not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } +} + #[post("/_matrix/client/r0/publicRooms", data = "")] pub fn get_public_rooms_filtered_route( data: State, body: Ruma, ) -> MatrixResult { - let chunk = data + let mut chunk = data .rooms_all() .into_iter() - .map(|room_id| directory::PublicRoomsChunk { - aliases: None, - canonical_alias: None, - name: None, - num_joined_members: data.room_users(&room_id).into(), - room_id, - topic: None, - world_readable: false, - guest_can_join: true, - avatar_url: None, + .map(|room_id| { + let state = data.room_state(&room_id); + directory::PublicRoomsChunk { + aliases: None, + canonical_alias: None, + name: state + .get(&(EventType::RoomName, "".to_owned())) + .and_then(|s| s.content.get("name")) + .and_then(|n| n.as_str()) + .map(|n| n.to_owned()), + num_joined_members: data.room_users(&room_id).into(), + room_id, + topic: None, + world_readable: false, + guest_can_join: true, + avatar_url: None, + } }) .collect::>(); + chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); + let total_room_count_estimate = (chunk.len() as u32).into(); MatrixResult(Ok(get_public_rooms_filtered::Response { @@ -681,10 +728,31 @@ pub fn get_public_rooms_filtered_route( })) } +#[post("/_matrix/client/r0/user_directory/search", data = "")] +pub fn search_users_route( + data: State, + body: Ruma, +) -> MatrixResult { + MatrixResult(Ok(search_users::Response { + results: data + .users_all() + .into_iter() + .filter(|user_id| user_id.to_string().contains(&body.search_term)) + .map(|user_id| search_users::User { + user_id, + display_name: None, + avatar_url: None, + }) + .collect(), + limited: false, + })) +} + #[get("/_matrix/client/r0/thirdparty/protocols", data = "")] pub fn get_protocols_route( body: Ruma, ) -> MatrixResult { + // TODO MatrixResult(Ok(get_protocols::Response { protocols: HashMap::new(), })) @@ -776,7 +844,7 @@ pub fn sync_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); for room_id in joined_roomids { - let pdus = { data.pdus_since(&room_id, since) }; + let pdus = data.pdus_since(&room_id, since); let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); let mut edus = data.roomlatests_since(&room_id, since); edus.extend_from_slice(&data.roomactives_in(&room_id)); @@ -805,12 +873,28 @@ pub fn sync_route( ); } + let mut invited_rooms = HashMap::new(); + for room_id in data.rooms_invited(body.user_id.as_ref().expect("user is authenticated")) { + let events = data + .pdus_since(&room_id, since) + .into_iter() + .filter_map(|pdu| pdu.to_stripped_state_event()) + .collect(); + + invited_rooms.insert( + room_id, + sync_events::InvitedRoom { + invite_state: sync_events::InviteState { events }, + }, + ); + } + MatrixResult(Ok(sync_events::Response { next_batch, rooms: sync_events::Rooms { leave: Default::default(), join: joined_rooms, - invite: Default::default(), + invite: invited_rooms, }, presence: sync_events::Presence { events: Vec::new() }, device_lists: Default::default(), @@ -819,6 +903,26 @@ pub fn sync_route( })) } +#[get("/_matrix/client/r0/voip/turnServer")] +pub fn turn_server_route() -> MatrixResult { + // TODO + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "There is no turn server yet.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) +} + +#[post("/_matrix/client/r0/publicised_groups")] +pub fn publicised_groups_route() -> MatrixResult { + // TODO + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "There are no publicised groups yet.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) +} + #[options("/<_segments..>")] pub fn options_route(_segments: PathBuf) -> MatrixResult { MatrixResult(Err(Error { diff --git a/src/data.rs b/src/data.rs index 85909f8..815586f 100644 --- a/src/data.rs +++ b/src/data.rs @@ -52,6 +52,15 @@ impl Data { .and_then(|bytes| (*utils::string_from_bytes(&bytes)).try_into().ok()) } + pub fn users_all(&self) -> Vec { + self.db + .userid_password + .iter() + .keys() + .map(|k| UserId::try_from(&*utils::string_from_bytes(&k.unwrap())).unwrap()) + .collect() + } + /// Checks if the given password is equal to the one in the database. pub fn password_get(&self, user_id: &UserId) -> Option { self.db @@ -139,13 +148,16 @@ impl Data { .any(|device| device == device_id.as_bytes())); // Does the user have that device? // Remove old token - if let Some(old_token) = self.db.deviceid_token.get(device_id).unwrap() { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + if let Some(old_token) = self.db.userdeviceid_token.get(&key).unwrap() { self.db.token_userid.remove(old_token).unwrap(); // It will be removed from deviceid_token by the insert later } // Assign token to device_id - self.db.deviceid_token.insert(device_id, &*token).unwrap(); + self.db.userdeviceid_token.insert(key, &*token).unwrap(); // Assign token to user self.db @@ -167,6 +179,10 @@ impl Data { room_id.to_string().as_bytes(), user_id.to_string().as_bytes().into(), ); + self.db.userid_inviteroomids.remove_value( + user_id.to_string().as_bytes(), + room_id.to_string().as_bytes(), + ); self.pdu_append( room_id.clone(), @@ -239,6 +255,52 @@ impl Data { .count() as u32 } + pub fn room_state(&self, room_id: &RoomId) -> HashMap<(EventType, String), PduEvent> { + let mut hashmap = HashMap::new(); + for pdu in self + .db + .roomstateid_pdu + .scan_prefix(&room_id.to_string().as_bytes()) + .values() + .map(|value| serde_json::from_slice::(&value.unwrap()).unwrap()) + { + hashmap.insert( + ( + pdu.kind.clone(), + pdu.state_key + .clone() + .expect("state events have a state key"), + ), + pdu, + ); + } + hashmap + } + + pub fn room_invite(&self, sender: &UserId, room_id: &RoomId, user_id: &UserId) { + self.pdu_append( + room_id.clone(), + sender.clone(), + EventType::RoomMember, + json!({"membership": "invite"}), + None, + Some(user_id.to_string()), + ); + self.db.userid_inviteroomids.add( + &user_id.to_string().as_bytes(), + room_id.to_string().as_bytes().into(), + ); + } + + pub fn rooms_invited(&self, user_id: &UserId) -> Vec { + self.db + .userid_inviteroomids + .get_iter(&user_id.to_string().as_bytes()) + .values() + .map(|key| RoomId::try_from(&*utils::string_from_bytes(&key.unwrap())).unwrap()) + .collect() + } + pub fn pdu_get(&self, event_id: &EventId) -> Option { self.db .eventid_pduid @@ -360,16 +422,24 @@ impl Data { pdu_id.push(0xff); // Add delimiter so we don't find rooms starting with the same id pdu_id.extend_from_slice(&index.to_be_bytes()); - self.db - .pduid_pdu - .insert(&pdu_id, &*serde_json::to_string(&pdu).unwrap()) - .unwrap(); + let pdu_json = serde_json::to_string(&pdu).unwrap(); + + self.db.pduid_pdu.insert(&pdu_id, &*pdu_json).unwrap(); self.db .eventid_pduid .insert(pdu.event_id.to_string(), pdu_id.clone()) .unwrap(); + if let Some(state_key) = pdu.state_key { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(dbg!(pdu.kind.to_string().as_bytes())); + key.push(0xff); + key.extend_from_slice(state_key.to_string().as_bytes()); + self.db.roomstateid_pdu.insert(key, &*pdu_json).unwrap(); + } + pdu.event_id } diff --git a/src/database.rs b/src/database.rs index c7a8623..041a215 100644 --- a/src/database.rs +++ b/src/database.rs @@ -27,6 +27,16 @@ impl MultiValue { } } + pub fn remove_value(&self, id: &[u8], value: &[u8]) { + if let Some(key) = self + .get_iter(id) + .find(|t| t.as_ref().unwrap().1 == value) + .map(|t| t.unwrap().0) + { + self.0.remove(key).unwrap(); + } + } + /// Add another value to the id. pub fn add(&self, id: &[u8], value: IVec) { // The new value will need a new index. We store the last used index in 'n' + id @@ -52,16 +62,18 @@ impl MultiValue { pub struct Database { pub userid_password: sled::Tree, - pub userid_deviceids: MultiValue, pub userid_displayname: sled::Tree, pub userid_avatarurl: sled::Tree, - pub deviceid_token: sled::Tree, + pub userid_deviceids: MultiValue, + pub userdeviceid_token: sled::Tree, pub token_userid: sled::Tree, pub pduid_pdu: sled::Tree, // PduId = 'd' + RoomId + Since (global since counter is at 'n') pub eventid_pduid: sled::Tree, pub roomid_pduleaves: MultiValue, + pub roomstateid_pdu: sled::Tree, // Room + StateType + StateKey pub roomid_userids: MultiValue, pub userid_roomids: MultiValue, + pub userid_inviteroomids: MultiValue, // EDUs: pub roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Since + UserId TODO: Types pub roomactiveid_roomactive: sled::Tree, // Typing, RoomActiveId = TimeoutTime + Since @@ -95,13 +107,15 @@ impl Database { userid_deviceids: MultiValue(db.open_tree("userid_deviceids").unwrap()), userid_displayname: db.open_tree("userid_displayname").unwrap(), userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), - deviceid_token: db.open_tree("deviceid_token").unwrap(), + userdeviceid_token: db.open_tree("userdeviceid_token").unwrap(), token_userid: db.open_tree("token_userid").unwrap(), pduid_pdu: db.open_tree("pduid_pdu").unwrap(), eventid_pduid: db.open_tree("eventid_pduid").unwrap(), roomid_pduleaves: MultiValue(db.open_tree("roomid_pduleaves").unwrap()), + roomstateid_pdu: db.open_tree("roomstateid_pdu").unwrap(), roomid_userids: MultiValue(db.open_tree("roomid_userids").unwrap()), userid_roomids: MultiValue(db.open_tree("userid_roomids").unwrap()), + userid_inviteroomids: MultiValue(db.open_tree("userid_inviteroomids").unwrap()), roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), roomactiveid_roomactive: db.open_tree("roomactiveid_roomactive").unwrap(), globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), @@ -143,8 +157,8 @@ impl Database { String::from_utf8_lossy(&v), ); } - println!("\n# DeviceId -> Token:"); - for (k, v) in self.deviceid_token.iter().map(|r| r.unwrap()) { + println!("\n# UserId+DeviceId -> Token:"); + for (k, v) in self.userdeviceid_token.iter().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", String::from_utf8_lossy(&k), @@ -167,6 +181,14 @@ impl Database { String::from_utf8_lossy(&v), ); } + println!("\n# RoomStateId -> PDU:"); + for (k, v) in self.roomstateid_pdu.iter().map(|r| r.unwrap()) { + println!( + "{:?} -> {:?}", + String::from_utf8_lossy(&k), + String::from_utf8_lossy(&v), + ); + } println!("\n# RoomId -> UserIds:"); for (k, v) in self.roomid_userids.iter_all().map(|r| r.unwrap()) { println!( diff --git a/src/main.rs b/src/main.rs index d09a787..f79a0b4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -45,12 +45,16 @@ fn setup_rocket(data: Data) -> rocket::Rocket { client_server::get_alias_route, client_server::join_room_by_id_route, client_server::join_room_by_id_or_alias_route, + client_server::invite_user_route, client_server::get_public_rooms_filtered_route, + client_server::search_users_route, client_server::get_protocols_route, client_server::create_message_event_route, client_server::create_state_event_for_key_route, client_server::create_state_event_for_empty_key_route, client_server::sync_route, + client_server::turn_server_route, + client_server::publicised_groups_route, client_server::options_route, ], ) diff --git a/src/pdu.rs b/src/pdu.rs index 588242b..47f94ac 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,5 +1,7 @@ use js_int::UInt; -use ruma_events::{collections::all::RoomEvent, EventResult, EventType}; +use ruma_events::{ + collections::all::RoomEvent, stripped::AnyStrippedStateEvent, EventResult, EventType, +}; use ruma_federation_api::EventHash; use ruma_identifiers::{EventId, RoomId, UserId}; use serde::{Deserialize, Serialize}; @@ -39,4 +41,16 @@ impl PduEvent { .into_result() .unwrap() } + + pub fn to_stripped_state_event(&self) -> Option { + // Can only fail in rare circumstances that won't ever happen here, see + // https://docs.rs/serde_json/1.0.50/serde_json/fn.to_string.html + let json = serde_json::to_string(&self).unwrap(); + + // EventResult's deserialize implementation always returns `Ok(...)` + serde_json::from_str::>(&json) + .unwrap() + .into_result() + .ok() + } } From fa9e127a1e7d36c5f36b6186182173d2ab921cf5 Mon Sep 17 00:00:00 2001 From: gnieto Date: Tue, 14 Apr 2020 22:25:44 +0200 Subject: [PATCH 0053/1727] Store hashed passwords (#7) Use if let instead of unwrap Default to invalid password if could not calculate Move hash password methdo and return Result Rename get_password method Default to empty password when no pwd is received Store hashed passwords Store passwords hashed with Argon2 and verify password with that stored hash. Co-authored-by: Guillem Nieto --- Cargo.lock | 15 ++++++- Cargo.toml | 1 + src/client_server.rs | 27 +++++++++--- src/data.rs | 8 ++-- src/test.rs | 102 ++++++++++++++++++++++++++++++++++++++----- src/utils.rs | 16 +++++++ 6 files changed, 148 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3101ab1..d606655 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -155,6 +155,7 @@ dependencies = [ "ruma-federation-api", "ruma-identifiers", "ruma-signatures", + "rust-argon2 0.8.2", "serde", "serde_json", "sled", @@ -926,7 +927,7 @@ checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" dependencies = [ "getrandom", "redox_syscall", - "rust-argon2", + "rust-argon2 0.7.0", ] [[package]] @@ -1136,6 +1137,18 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rust-argon2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" +dependencies = [ + "base64 0.12.0", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", +] + [[package]] name = "rustls" version = "0.16.0" diff --git a/Cargo.toml b/Cargo.toml index 7618ac0..3b8da6c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,3 +28,4 @@ ruma-federation-api = "0.0.1" serde = "1.0.106" tokio = { version = "0.2.16", features = ["macros"] } #rt-threaded rand = "0.7.3" +rust-argon2 = "0.8.2" \ No newline at end of file diff --git a/src/client_server.rs b/src/client_server.rs index f1c8a85..1859b6f 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -36,6 +36,7 @@ use ruma_events::{collections::only::Event as EduEvent, EventType}; use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; use serde_json::json; use std::{collections::HashMap, convert::TryInto, path::PathBuf, time::Duration}; +use argon2::{Config, Variant}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -103,8 +104,20 @@ pub fn register_route( ))); } - // Create user - data.user_add(&user_id, body.password.clone()); + let password = body.password.clone().unwrap_or_default(); + + if let Ok(hash) = utils::calculate_hash(&password) { + // Create user + data.user_add(&user_id, &hash); + } else { + return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( + Error { + kind: ErrorKind::InvalidParam, + message: "Password did not met requirements".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + }, + ))); + } // Generate new device id if the user didn't specify one let device_id = body @@ -144,9 +157,11 @@ pub fn login_route(data: State, body: Ruma) -> MatrixResul username = format!("@{}:{}", username, data.hostname()); } if let Ok(user_id) = (*username).try_into() { - // Check password (this also checks if the user exists - if let Some(correct_password) = data.password_get(&user_id) { - if password == correct_password { + if let Some(hash) = data.password_hash_get(&user_id) { + let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()) + .unwrap_or(false); + + if hash_matches { // Success! user_id } else { @@ -930,4 +945,4 @@ pub fn options_route(_segments: PathBuf) -> MatrixResult) { + pub fn user_add(&self, user_id: &UserId, hash: &str) { self.db .userid_password - .insert(user_id.to_string(), &*password.unwrap_or_default()) + .insert(user_id.to_string(), hash) .unwrap(); } @@ -61,8 +61,8 @@ impl Data { .collect() } - /// Checks if the given password is equal to the one in the database. - pub fn password_get(&self, user_id: &UserId) -> Option { + /// Gets password hash for given user id. + pub fn password_hash_get(&self, user_id: &UserId) -> Option { self.db .userid_password .get(user_id.to_string()) diff --git a/src/test.rs b/src/test.rs index 6131eb2..c3cefb1 100644 --- a/src/test.rs +++ b/src/test.rs @@ -1,5 +1,9 @@ use super::*; use rocket::{local::Client, http::Status}; +use serde_json::Value; +use serde_json::json; +use ruma_client_api::error::ErrorKind; +use std::time::Duration; fn setup_client() -> Client { Database::try_remove("temp"); @@ -14,19 +18,97 @@ async fn register_login() { let client = setup_client(); let mut response = client .post("/_matrix/client/r0/register?kind=user") - .body( - r#"{ - "username": "cheeky_monkey", - "password": "ilovebananas", - "device_id": "GHTYAJCE", - "initial_device_display_name": "Jungle Phone", - "inhibit_login": false - }"#, - ) + .body(registration_init()) .dispatch().await; - let body = serde_json::to_value(&response.body_string().await.unwrap()).unwrap(); + let body = serde_json::from_str::(&response.body_string().await.unwrap()).unwrap(); assert_eq!(response.status().code, 401); assert!(dbg!(&body["flows"]).as_array().unwrap().len() > 0); assert!(body["session"].as_str().unwrap().len() > 0); } + +#[tokio::test] +async fn login_after_register_correct_password() { + let client = setup_client(); + let mut response = client + .post("/_matrix/client/r0/register?kind=user") + .body(registration_init()) + .dispatch().await; + let body = serde_json::from_str::(&response.body_string().await.unwrap()).unwrap(); + let session = body["session"].clone(); + + let response = client + .post("/_matrix/client/r0/register?kind=user") + .body(registration(session.as_str().unwrap())) + .dispatch().await; + assert_eq!(response.status().code, 200); + + let login_response = client + .post("/_matrix/client/r0/login") + .body(login_with_password("ilovebananas")) + .dispatch() + .await; + assert_eq!(login_response.status().code, 200); +} + +#[tokio::test] +async fn login_after_register_incorrect_password() { + let client = setup_client(); + let mut response = client + .post("/_matrix/client/r0/register?kind=user") + .body(registration_init()) + .dispatch().await; + let body = serde_json::from_str::(&response.body_string().await.unwrap()).unwrap(); + let session = body["session"].clone(); + + let response = client + .post("/_matrix/client/r0/register?kind=user") + .body(registration(session.as_str().unwrap())) + .dispatch().await; + assert_eq!(response.status().code, 200); + + let mut login_response = client + .post("/_matrix/client/r0/login") + .body(login_with_password("idontlovebananas")) + .dispatch() + .await; + let body = serde_json::from_str::(&login_response.body_string().await.unwrap()).unwrap(); + assert_eq!(body.as_object().unwrap().get("errcode").unwrap().as_str().unwrap(), "M_FORBIDDEN"); + assert_eq!(login_response.status().code, 403); +} + +fn registration_init() -> &'static str { + r#"{ + "username": "cheeky_monkey", + "password": "ilovebananas", + "device_id": "GHTYAJCE", + "initial_device_display_name": "Jungle Phone", + "inhibit_login": false + }"# +} + +fn registration(session: &str) -> String { + json!({ + "auth": { + "session": session, + "type": "m.login.dummy" + }, + "username": "cheeky_monkey", + "password": "ilovebananas", + "device_id": "GHTYAJCE", + "initial_device_display_name": "Jungle Phone", + "inhibit_login": false + }).to_string() +} + +fn login_with_password(password: &str) -> String { + json!({ + "type": "m.login.password", + "identifier": { + "type": "m.id.user", + "user": "cheeky_monkey" + }, + "password": password, + "initial_device_display_name": "Jungle Phone" + }).to_string() +} \ No newline at end of file diff --git a/src/utils.rs b/src/utils.rs index 5e94172..3b3ed92 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -3,6 +3,7 @@ use std::{ convert::TryInto, time::{SystemTime, UNIX_EPOCH}, }; +use argon2::{Config, Variant}; pub fn millis_since_unix_epoch() -> u64 { SystemTime::now() @@ -39,3 +40,18 @@ pub fn random_string(length: usize) -> String { .take(length) .collect() } + +/// Calculate a new hash for the given password +pub fn calculate_hash(password: &str) -> Result { + let hashing_config = Config { + variant: Variant::Argon2id, + ..Default::default() + }; + + let salt = random_string(32); + argon2::hash_encoded( + password.as_bytes(), + salt.as_bytes(), + &hashing_config, + ) +} \ No newline at end of file From 2f638889956b4c9b94545bf0b8f5ee4463fe5686 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 14 Apr 2020 22:44:45 +0200 Subject: [PATCH 0054/1727] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b2b42b0..557c121 100644 --- a/README.md +++ b/README.md @@ -25,8 +25,8 @@ A fast Matrix homeserver that's optimized for smaller, personal servers, instead - [x] Riot read receipts - [x] Typing indications - [x] Invites, user search +- [x] Password hashing - [ ] Riot presence -- [ ] Password hashing - [ ] Proper room creation - [ ] Riot E2EE - [ ] Basic federation From 6d858c39ca95f14826173dfb8ce9abc2ed05d9ee Mon Sep 17 00:00:00 2001 From: TheCycoONE Date: Wed, 15 Apr 2020 21:17:33 +0200 Subject: [PATCH 0055/1727] Fix typo in password message --- src/client_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index 1859b6f..2548d25 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -113,7 +113,7 @@ pub fn register_route( return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( Error { kind: ErrorKind::InvalidParam, - message: "Password did not met requirements".to_owned(), + message: "Password did not meet requirements".to_owned(), status_code: http::StatusCode::BAD_REQUEST, }, ))); From 80ddf80f17c57d2f29d13788e8b5424d4b5598a8 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 19 Apr 2020 14:14:47 +0200 Subject: [PATCH 0056/1727] work --- Cargo.lock | 366 +++++++++++++++++++++++++++++++++++++------ Cargo.toml | 19 +-- src/client_server.rs | 192 +++++++++++++++-------- src/data.rs | 62 +++++++- src/database.rs | 12 +- src/main.rs | 8 +- src/pdu.rs | 13 +- src/server_server.rs | 25 +++ src/stateres.rs | 59 +++++++ src/utils.rs | 17 +- 10 files changed, 632 insertions(+), 141 deletions(-) create mode 100644 src/server_server.rs create mode 100644 src/stateres.rs diff --git a/Cargo.lock b/Cargo.lock index d606655..9d7eaf2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,9 +29,9 @@ checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "async-trait" -version = "0.1.29" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab5c215748dc1ad11a145359b1067107ae0f8ca5e99844fa64067ed5bf198e3" +checksum = "da71fef07bc806586090247e971229289f64c210a278ee5ae419314eb386b31d" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", @@ -148,6 +148,7 @@ dependencies = [ "log", "pretty_env_logger", "rand", + "reqwest", "rocket", "ruma-api", "ruma-client-api", @@ -179,6 +180,22 @@ dependencies = [ "time", ] +[[package]] +name = "core-foundation" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" + [[package]] name = "crc32fast" version = "1.2.0" @@ -271,6 +288,15 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" +[[package]] +name = "encoding_rs" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" +dependencies = [ + "cfg-if", +] + [[package]] name = "env_logger" version = "0.7.1" @@ -290,6 +316,21 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "fs2" version = "0.4.3" @@ -457,9 +498,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" +checksum = "8a0d737e0f947a1864e93d33fdef4af8445a00d1ed8dc0c8ddb73139ea6abf15" dependencies = [ "libc", ] @@ -502,9 +543,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6081100e960d9d74734659ffc9cc91daf1c0fc7aceb8eaa94ee1a3f5046f2e" +checksum = "96816e1d921eca64d208a85aab4f7798455a8e34229ee5a88c935bdee1b78b14" dependencies = [ "bytes", "futures-channel", @@ -516,6 +557,7 @@ dependencies = [ "httparse", "itoa", "log", + "net2", "pin-project", "time", "tokio", @@ -523,6 +565,19 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-tls" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3adcd308402b9553630734e9c36b77a7e48b3821251ca2493e8cd596763aafaa" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-tls", +] + [[package]] name = "idna" version = "0.2.0" @@ -594,15 +649,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" +checksum = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005" [[package]] name = "lock_api" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ "scopeguard", ] @@ -649,6 +704,16 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "mime_guess" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "mio" version = "0.6.21" @@ -691,6 +756,24 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "native-tls" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "net2" version = "0.2.33" @@ -704,19 +787,52 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" dependencies = [ "hermit-abi", "libc", ] [[package]] -name = "parking_lot" -version = "0.10.0" +name = "openssl" +version = "0.10.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc" +checksum = "cee6d85f4cb4c4f59a6a85d5b68a233d280c82e29e822913b9c8b129fbf20bdd" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "lazy_static", + "libc", + "openssl-sys", +] + +[[package]] +name = "openssl-probe" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" + +[[package]] +name = "openssl-sys" +version = "0.9.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7717097d810a0f2e2323f9e5d11e71608355e24828410b55b9d4f18aa5f9a5d8" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "parking_lot" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" dependencies = [ "lock_api", "parking_lot_core", @@ -724,9 +840,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7582838484df45743c8434fbff785e8edf260c28748353d44bc0da32e0ceabf1" +checksum = "0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb" dependencies = [ "cfg-if", "cloudabi", @@ -772,18 +888,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7804a463a8d9572f13453c516a5faea534a2403d7ced2f0c7e100eeff072772c" +checksum = "6f6a7f5eee6292c559c793430c55c00aea9d3b3d1905e855806ca4d7253426a2" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f" +checksum = "8988430ce790d8682672117bc06dda364c0be32d3abd738234f19f3240bad99a" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", @@ -802,6 +918,12 @@ version = "0.1.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" +[[package]] +name = "pkg-config" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" + [[package]] name = "ppv-lite86" version = "0.2.6" @@ -932,9 +1054,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.3.6" +version = "1.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3" +checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692" dependencies = [ "aho-corasick", "memchr", @@ -948,6 +1070,50 @@ version = "0.6.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" +[[package]] +name = "remove_dir_all" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" +dependencies = [ + "winapi 0.3.8", +] + +[[package]] +name = "reqwest" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b81e49ddec5109a9dcfc5f2a317ff53377c915e9ae9d4f2fb50914b85614e2" +dependencies = [ + "base64 0.11.0", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "hyper-tls", + "js-sys", + "lazy_static", + "log", + "mime", + "mime_guess", + "native-tls", + "percent-encoding 2.1.0", + "pin-project-lite", + "serde", + "serde_urlencoded", + "time", + "tokio", + "tokio-tls", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + [[package]] name = "ring" version = "0.16.12" @@ -1024,9 +1190,9 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.15.0" +version = "0.16.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120f0cd8625b842423ef3a63cabb8c309ca35a02de87cc4b377fb2cdd43f1fe5" +checksum = "7769e934360383f91d68a2b9132610d02436ef3272cbdd46de239c9025198a36" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1036,14 +1202,13 @@ dependencies = [ "serde_json", "serde_urlencoded", "strum", - "url", ] [[package]] name = "ruma-api-macros" -version = "0.12.0" +version = "0.16.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfc523efc9c1ba7033ff17888551c1d378e12eae087cfbe4fcee938ff516759e" +checksum = "0f8dad3311d0bee6d43da684ba03f9a84cddd39f9b3e7e88cab5d726419ec22e" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", @@ -1052,14 +1217,15 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.7.2" -source = "git+https://github.com/ruma/ruma-client-api.git?branch=uiaa-error-type#a7136c06285864dadcc0b0c6371d181002727c55" +version = "0.8.0-rc.2" +source = "git+https://github.com/ruma/ruma-client-api.git#1b7863dc36e6e043ae365791cc719dc190abd660" dependencies = [ "http", "js_int", "ruma-api", "ruma-events", "ruma-identifiers", + "ruma-serde", "serde", "serde_json", "strum", @@ -1068,22 +1234,23 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e34bfc20462f18d7f0beb6f1863db62d29438f2dcf390b625e9b20696cb2b3" +checksum = "844b5d38397c945395c7a1eaf89d55714c3d22983b870085a1a67d51fb6611cf" dependencies = [ "js_int", "ruma-events-macros", "ruma-identifiers", + "ruma-serde", "serde", "serde_json", ] [[package]] name = "ruma-events-macros" -version = "0.3.0" +version = "0.19.0-final" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff95b6b4480c570db471b490b35ad70add5470651654e75faf0b97052b4f29e1" +checksum = "5477046b734fde45dd7913dbc8d7b260af3b1c31ea2bc329bd2f0b44e37368be" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", @@ -1093,10 +1260,10 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a73a23c4d9243be91e101e1942f4d9cd913ef5156d756bafdfe2409ee23d72" +source = "git+https://github.com/ruma/ruma-federation-api.git#5448c650f0a583382152d0f43f2dcf720d495390" dependencies = [ "js_int", + "ruma-api", "ruma-events", "ruma-identifiers", "serde", @@ -1105,19 +1272,28 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.14.1" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e442c700a3b33fc4dd4a1c4b463ebdd252d2c2db31b83da6bb3009307039b9" +checksum = "63db5545f38077ea141fb112df070773e6ab9b7025174d732f56c6b37525ccc0" dependencies = [ - "rand", "serde", - "url", +] + +[[package]] +name = "ruma-serde" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09901d608958f63618546957134dd4242d2ca07a885a28f794ad4574a937c22c" +dependencies = [ + "js_int", + "serde", + "serde_json", ] [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma-signatures.git#9947e94cb28daea456904197f7cd754a8e48797a" +source = "git+https://github.com/ruma/ruma-signatures.git#c3f8399c268695464730afd6077c7ce50155b8d5" dependencies = [ "base64 0.12.0", "ring", @@ -1168,6 +1344,16 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" +[[package]] +name = "schannel" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" +dependencies = [ + "lazy_static", + "winapi 0.3.8", +] + [[package]] name = "scopeguard" version = "1.1.0" @@ -1184,6 +1370,29 @@ dependencies = [ "untrusted", ] +[[package]] +name = "security-framework" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "572dfa3a0785509e7a44b5b4bebcf94d41ba34e9ed9eb9df722545c3b3c4144a" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ddb15a5fec93b7021b8a9e96009c5d8d51c15673569f7c0f6b7204e5b7b404f" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "serde" version = "1.0.106" @@ -1261,9 +1470,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" +checksum = "05720e22615919e4734f6a99ceae50d00226c3c5aca406e102ebc33298214e0a" [[package]] name = "spin" @@ -1320,6 +1529,20 @@ dependencies = [ "unicode-xid 0.2.0", ] +[[package]] +name = "tempfile" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi 0.3.8", +] + [[package]] name = "termcolor" version = "1.1.0" @@ -1340,20 +1563,19 @@ dependencies = [ [[package]] name = "time" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "redox_syscall", "winapi 0.3.8", ] [[package]] name = "tokio" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee5a0dd887e37d37390c13ff8ac830f992307fe30a1fff0ab8427af67211ba28" +checksum = "34ef16d072d2b6dc8b4a56c70f5c5ced1a37752116f8e7c1e80c659aa7cb6713" dependencies = [ "bytes", "fnv", @@ -1395,6 +1617,16 @@ dependencies = [ "webpki", ] +[[package]] +name = "tokio-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bde02a3a5291395f59b06ec6945a3077602fac2b07eeeaf0dee2122f3619828" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-util" version = "0.3.1" @@ -1430,6 +1662,15 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check 0.9.1", +] + [[package]] name = "unicode-bidi" version = "0.3.4" @@ -1484,6 +1725,12 @@ dependencies = [ "serde", ] +[[package]] +name = "vcpkg" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" + [[package]] name = "version_check" version = "0.1.5" @@ -1519,6 +1766,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" dependencies = [ "cfg-if", + "serde", + "serde_json", "wasm-bindgen-macro", ] @@ -1537,6 +1786,18 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.60" @@ -1629,6 +1890,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi 0.3.8", +] + [[package]] name = "ws2_32-sys" version = "0.2.1" diff --git a/Cargo.toml b/Cargo.toml index 3b8da6c..5cf5412 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,19 +13,20 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" -ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git", branch = "uiaa-error-type" } +ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } +ruma-identifiers = "0.15.1" +ruma-api = "0.16.0-rc.1" +ruma-events = "0.19.0" +ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } +ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git" } pretty_env_logger = "0.4.0" log = "0.4.8" sled = "0.31.0" directories = "2.0.2" -ruma-identifiers = "0.14.1" -ruma-api = "0.15.0" -ruma-events = "0.18.0" js_int = "0.1.4" -serde_json = "1.0.50" -ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } -ruma-federation-api = "0.0.1" +serde_json = "1.0.51" serde = "1.0.106" -tokio = { version = "0.2.16", features = ["macros"] } #rt-threaded +tokio = { version = "0.2.18", features = ["macros"] } rand = "0.7.3" -rust-argon2 = "0.8.2" \ No newline at end of file +rust-argon2 = "0.8.2" +reqwest = "0.10.4" diff --git a/src/client_server.rs b/src/client_server.rs index 2548d25..a019142 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -5,16 +5,16 @@ use rocket::{get, options, post, put, State}; use ruma_client_api::{ error::{Error, ErrorKind}, r0::{ - account::{ - register, AuthenticationFlow, UserInteractiveAuthenticationInfo, - UserInteractiveAuthenticationResponse, - }, + account::register, alias::get_alias, + capabilities::get_capabilities, config::{get_global_account_data, set_global_account_data}, directory::{self, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, keys::{get_keys, upload_keys}, - membership::{invite_user, join_room_by_id, join_room_by_id_or_alias}, + membership::{ + get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, leave_room, + }, message::create_message_event, presence::set_presence, profile::{ @@ -28,15 +28,20 @@ use ruma_client_api::{ sync::sync_events, thirdparty::get_protocols, typing::create_typing_event, + uiaa::{AuthFlow, UiaaInfo, UiaaResponse}, user_directory::search_users, }, unversioned::get_supported_versions, }; use ruma_events::{collections::only::Event as EduEvent, EventType}; -use ruma_identifiers::{RoomId, RoomIdOrAliasId, UserId}; +use ruma_identifiers::{RoomId, UserId}; use serde_json::json; -use std::{collections::HashMap, convert::TryInto, path::PathBuf, time::Duration}; -use argon2::{Config, Variant}; +use std::{ + collections::{BTreeMap, HashMap}, + convert::{TryFrom, TryInto}, + path::PathBuf, + time::{Duration, SystemTime}, +}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -46,8 +51,8 @@ const TOKEN_LENGTH: usize = 256; #[get("/_matrix/client/versions")] pub fn get_supported_versions_route() -> MatrixResult { MatrixResult(Ok(get_supported_versions::Response { - versions: vec!["r0.6.0".to_owned()], - unstable_features: HashMap::new(), + versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], + unstable_features: BTreeMap::new(), })) } @@ -55,18 +60,17 @@ pub fn get_supported_versions_route() -> MatrixResult, body: Ruma, -) -> MatrixResult { +) -> MatrixResult { if body.auth.is_none() { - return MatrixResult(Err(UserInteractiveAuthenticationResponse::AuthResponse( - UserInteractiveAuthenticationInfo { - flows: vec![AuthenticationFlow { - stages: vec!["m.login.dummy".to_owned()], - }], - completed: vec![], - params: json!({}), - session: Some(utils::random_string(SESSION_ID_LENGTH)), - }, - ))); + return MatrixResult(Err(UiaaResponse::AuthResponse(UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.dummy".to_owned()], + }], + completed: vec![], + params: json!({}), + session: Some(utils::random_string(SESSION_ID_LENGTH)), + auth_error: None, + }))); } // Validate user id @@ -81,13 +85,11 @@ pub fn register_route( { Err(_) => { debug!("Username invalid"); - return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( - Error { - kind: ErrorKind::InvalidUsername, - message: "Username was invalid.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - }, - ))); + return MatrixResult(Err(UiaaResponse::MatrixError(Error { + kind: ErrorKind::InvalidUsername, + message: "Username was invalid.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + }))); } Ok(user_id) => user_id, }; @@ -95,13 +97,11 @@ pub fn register_route( // Check if username is creative enough if data.user_exists(&user_id) { debug!("ID already taken"); - return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( - Error { - kind: ErrorKind::UserInUse, - message: "Desired user ID is already taken.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - }, - ))); + return MatrixResult(Err(UiaaResponse::MatrixError(Error { + kind: ErrorKind::UserInUse, + message: "Desired user ID is already taken.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + }))); } let password = body.password.clone().unwrap_or_default(); @@ -110,13 +110,11 @@ pub fn register_route( // Create user data.user_add(&user_id, &hash); } else { - return MatrixResult(Err(UserInteractiveAuthenticationResponse::MatrixError( - Error { - kind: ErrorKind::InvalidParam, - message: "Password did not meet requirements".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - }, - ))); + return MatrixResult(Err(UiaaResponse::MatrixError(Error { + kind: ErrorKind::InvalidParam, + message: "Password did not met requirements".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + }))); } // Generate new device id if the user didn't specify one @@ -158,8 +156,8 @@ pub fn login_route(data: State, body: Ruma) -> MatrixResul } if let Ok(user_id) = (*username).try_into() { if let Some(hash) = data.password_hash_get(&user_id) { - let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()) - .unwrap_or(false); + let hash_matches = + argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); if hash_matches { // Success! @@ -219,11 +217,25 @@ pub fn login_route(data: State, body: Ruma) -> MatrixResul })) } +#[get("/_matrix/client/r0/capabilities", data = "")] +pub fn get_capabilities_route( + body: Ruma, +) -> MatrixResult { + // TODO + MatrixResult(Ok(get_capabilities::Response { + capabilities: get_capabilities::Capabilities { + change_password: None, + room_versions: None, + custom_capabilities: BTreeMap::new(), + }, + })) +} + #[get("/_matrix/client/r0/pushrules")] pub fn get_pushrules_all_route() -> MatrixResult { // TODO MatrixResult(Ok(get_pushrules_all::Response { - global: HashMap::new(), + global: BTreeMap::new(), })) } @@ -447,8 +459,8 @@ pub fn set_presence_route( pub fn get_keys_route(body: Ruma) -> MatrixResult { // TODO MatrixResult(Ok(get_keys::Response { - failures: HashMap::new(), - device_keys: HashMap::new(), + failures: BTreeMap::new(), + device_keys: BTreeMap::new(), })) } @@ -459,7 +471,7 @@ pub fn upload_keys_route( ) -> MatrixResult { // TODO MatrixResult(Ok(upload_keys::Response { - one_time_key_counts: HashMap::new(), + one_time_key_counts: BTreeMap::new(), })) } @@ -472,14 +484,14 @@ pub fn set_read_marker_route( let user_id = body.user_id.clone().expect("user is authenticated"); // TODO: Fully read if let Some(event) = &body.read_receipt { - let mut user_receipts = HashMap::new(); + let mut user_receipts = BTreeMap::new(); user_receipts.insert( user_id.clone(), ruma_events::receipt::Receipt { - ts: Some(utils::millis_since_unix_epoch().try_into().unwrap()), + ts: Some(SystemTime::now()), }, ); - let mut receipt_content = HashMap::new(); + let mut receipt_content = BTreeMap::new(); receipt_content.insert( event.clone(), ruma_events::receipt::Receipts { @@ -537,7 +549,7 @@ pub fn create_room_route( body: Ruma, ) -> MatrixResult { // TODO: check if room is unique - let room_id = RoomId::new(data.hostname()).expect("host is valid"); + let room_id = RoomId::try_from(data.hostname()).expect("host is valid"); let user_id = body.user_id.clone().expect("user is authenticated"); data.pdu_append( @@ -589,8 +601,6 @@ pub fn create_room_route( ); } - dbg!(&*body); - data.room_join(&room_id, &user_id); for user in &body.invite { @@ -651,8 +661,8 @@ pub fn join_room_by_id_or_alias_route( body: Ruma, _room_id_or_alias: String, ) -> MatrixResult { - let room_id = match &body.room_id_or_alias { - RoomIdOrAliasId::RoomAliasId(alias) => match alias.alias() { + let room_id = if body.room_id_or_alias.is_room_alias_id() { + match body.room_id_or_alias.as_ref() { "#room:localhost" => "!xclkjvdlfj:localhost".try_into().unwrap(), _ => { debug!("Room not found."); @@ -662,9 +672,9 @@ pub fn join_room_by_id_or_alias_route( status_code: http::StatusCode::NOT_FOUND, })); } - }, - - RoomIdOrAliasId::RoomId(id) => id.clone(), + } + } else { + body.room_id_or_alias.try_into().unwrap() }; if data.room_join( @@ -681,6 +691,17 @@ pub fn join_room_by_id_or_alias_route( } } +#[post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "")] +pub fn leave_room_route( + data: State, + body: Ruma, + _room_id: String, +) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + data.room_leave(&user_id, &body.room_id, &user_id); + MatrixResult(Ok(leave_room::Response)) +} + #[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] pub fn invite_user_route( data: State, @@ -714,7 +735,7 @@ pub fn get_public_rooms_filtered_route( .map(|room_id| { let state = data.room_state(&room_id); directory::PublicRoomsChunk { - aliases: None, + aliases: Vec::new(), canonical_alias: None, name: state .get(&(EventType::RoomName, "".to_owned())) @@ -763,13 +784,22 @@ pub fn search_users_route( })) } +#[get("/_matrix/client/r0/rooms/<_room_id>/members", data = "")] +pub fn get_member_events_route( + body: Ruma, + _room_id: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(get_member_events::Response { chunk: Vec::new() })) +} + #[get("/_matrix/client/r0/thirdparty/protocols", data = "")] pub fn get_protocols_route( body: Ruma, ) -> MatrixResult { // TODO MatrixResult(Ok(get_protocols::Response { - protocols: HashMap::new(), + protocols: BTreeMap::new(), })) } @@ -851,7 +881,7 @@ pub fn sync_route( std::thread::sleep(Duration::from_millis(200)); let next_batch = data.last_pdu_index().to_string(); - let mut joined_rooms = HashMap::new(); + let mut joined_rooms = BTreeMap::new(); let joined_roomids = data.rooms_joined(body.user_id.as_ref().expect("user is authenticated")); let since = body .since @@ -860,7 +890,11 @@ pub fn sync_route( .unwrap_or(0); for room_id in joined_roomids { let pdus = data.pdus_since(&room_id, since); - let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); + let room_events = pdus + .into_iter() + .map(|pdu| pdu.to_room_event()) + .filter_map(|e| e) + .collect(); let mut edus = data.roomlatests_since(&room_id, since); edus.extend_from_slice(&data.roomactives_in(&room_id)); @@ -888,7 +922,33 @@ pub fn sync_route( ); } - let mut invited_rooms = HashMap::new(); + let mut left_rooms = BTreeMap::new(); + let left_roomids = data.rooms_left(body.user_id.as_ref().expect("user is authenticated")); + for room_id in left_roomids { + let pdus = data.pdus_since(&room_id, since); + let room_events = pdus + .into_iter() + .map(|pdu| pdu.to_room_event()) + .filter_map(|e| e) + .collect(); + let mut edus = data.roomlatests_since(&room_id, since); + edus.extend_from_slice(&data.roomactives_in(&room_id)); + + left_rooms.insert( + room_id.clone().try_into().unwrap(), + sync_events::LeftRoom { + account_data: sync_events::AccountData { events: Vec::new() }, + timeline: sync_events::Timeline { + limited: Some(false), + prev_batch: Some("".to_owned()), + events: room_events, + }, + state: sync_events::State { events: Vec::new() }, + }, + ); + } + + let mut invited_rooms = BTreeMap::new(); for room_id in data.rooms_invited(body.user_id.as_ref().expect("user is authenticated")) { let events = data .pdus_since(&room_id, since) @@ -907,7 +967,7 @@ pub fn sync_route( MatrixResult(Ok(sync_events::Response { next_batch, rooms: sync_events::Rooms { - leave: Default::default(), + leave: left_rooms, join: joined_rooms, invite: invited_rooms, }, @@ -945,4 +1005,4 @@ pub fn options_route(_segments: PathBuf) -> MatrixResult Self { + let db = Database::load_or_create(hostname); Self { hostname: hostname.to_owned(), - db: Database::load_or_create(hostname), + reqwest_client: reqwest::Client::new(), + db, } } @@ -27,6 +30,15 @@ impl Data { &self.hostname } + /// Get the hostname of the server. + pub fn reqwest_client(&self) -> &reqwest::Client { + &self.reqwest_client + } + + pub fn keypair(&self) -> &ruma_signatures::Ed25519KeyPair { + &self.db.keypair + } + /// Check if a user has an account by looking for an assigned password. pub fn user_exists(&self, user_id: &UserId) -> bool { self.db @@ -183,6 +195,10 @@ impl Data { user_id.to_string().as_bytes(), room_id.to_string().as_bytes(), ); + self.db.userid_leftroomids.remove_value( + user_id.to_string().as_bytes(), + room_id.to_string().as_bytes().into(), + ); self.pdu_append( room_id.clone(), @@ -277,6 +293,33 @@ impl Data { hashmap } + pub fn room_leave(&self, sender: &UserId, room_id: &RoomId, user_id: &UserId) { + self.pdu_append( + room_id.clone(), + sender.clone(), + EventType::RoomMember, + json!({"membership": "leave"}), + None, + Some(user_id.to_string()), + ); + self.db.userid_inviteroomids.remove_value( + user_id.to_string().as_bytes(), + room_id.to_string().as_bytes().into(), + ); + self.db.userid_roomids.remove_value( + user_id.to_string().as_bytes(), + room_id.to_string().as_bytes().into(), + ); + self.db.roomid_userids.remove_value( + room_id.to_string().as_bytes(), + user_id.to_string().as_bytes().into(), + ); + self.db.userid_leftroomids.add( + user_id.to_string().as_bytes(), + room_id.to_string().as_bytes().into(), + ); + } + pub fn room_invite(&self, sender: &UserId, room_id: &RoomId, user_id: &UserId) { self.pdu_append( room_id.clone(), @@ -287,9 +330,13 @@ impl Data { Some(user_id.to_string()), ); self.db.userid_inviteroomids.add( - &user_id.to_string().as_bytes(), + user_id.to_string().as_bytes(), room_id.to_string().as_bytes().into(), ); + self.db.roomid_userids.add( + room_id.to_string().as_bytes(), + user_id.to_string().as_bytes().into(), + ); } pub fn rooms_invited(&self, user_id: &UserId) -> Vec { @@ -301,6 +348,15 @@ impl Data { .collect() } + pub fn rooms_left(&self, user_id: &UserId) -> Vec { + self.db + .userid_leftroomids + .get_iter(&user_id.to_string().as_bytes()) + .values() + .map(|key| RoomId::try_from(&*utils::string_from_bytes(&key.unwrap())).unwrap()) + .collect() + } + pub fn pdu_get(&self, event_id: &EventId) -> Option { self.db .eventid_pduid @@ -434,7 +490,7 @@ impl Data { if let Some(state_key) = pdu.state_key { let mut key = room_id.to_string().as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(dbg!(pdu.kind.to_string().as_bytes())); + key.extend_from_slice(pdu.kind.to_string().as_bytes()); key.push(0xff); key.extend_from_slice(state_key.to_string().as_bytes()); self.db.roomstateid_pdu.insert(key, &*pdu_json).unwrap(); diff --git a/src/database.rs b/src/database.rs index 041a215..bc32847 100644 --- a/src/database.rs +++ b/src/database.rs @@ -30,7 +30,7 @@ impl MultiValue { pub fn remove_value(&self, id: &[u8], value: &[u8]) { if let Some(key) = self .get_iter(id) - .find(|t| t.as_ref().unwrap().1 == value) + .find(|t| &t.as_ref().unwrap().1 == value) .map(|t| t.unwrap().0) { self.0.remove(key).unwrap(); @@ -74,11 +74,13 @@ pub struct Database { pub roomid_userids: MultiValue, pub userid_roomids: MultiValue, pub userid_inviteroomids: MultiValue, + pub userid_leftroomids: MultiValue, // EDUs: pub roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Since + UserId TODO: Types pub roomactiveid_roomactive: sled::Tree, // Typing, RoomActiveId = TimeoutTime + Since pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Since pub globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Since + Type + UserId + pub keypair: ruma_signatures::Ed25519KeyPair, _db: sled::Db, } @@ -116,10 +118,18 @@ impl Database { roomid_userids: MultiValue(db.open_tree("roomid_userids").unwrap()), userid_roomids: MultiValue(db.open_tree("userid_roomids").unwrap()), userid_inviteroomids: MultiValue(db.open_tree("userid_inviteroomids").unwrap()), + userid_leftroomids: MultiValue(db.open_tree("userid_leftroomids").unwrap()), roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), roomactiveid_roomactive: db.open_tree("roomactiveid_roomactive").unwrap(), globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), globallatestid_globallatest: db.open_tree("globallatestid_globallatest").unwrap(), + keypair: ruma_signatures::Ed25519KeyPair::new( + &*db.update_and_fetch("keypair", utils::generate_keypair) + .unwrap() + .unwrap(), + "0.0.0".to_owned(), + ) + .unwrap(), _db: db, } } diff --git a/src/main.rs b/src/main.rs index f79a0b4..f26ed91 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,6 +5,7 @@ mod data; mod database; mod pdu; mod ruma_wrapper; +mod server_server; mod utils; #[cfg(test)] @@ -26,6 +27,7 @@ fn setup_rocket(data: Data) -> rocket::Rocket { client_server::register_route, client_server::get_login_route, client_server::login_route, + client_server::get_capabilities_route, client_server::get_pushrules_all_route, client_server::get_filter_route, client_server::create_filter_route, @@ -45,9 +47,11 @@ fn setup_rocket(data: Data) -> rocket::Rocket { client_server::get_alias_route, client_server::join_room_by_id_route, client_server::join_room_by_id_or_alias_route, + client_server::leave_room_route, client_server::invite_user_route, client_server::get_public_rooms_filtered_route, client_server::search_users_route, + client_server::get_member_events_route, client_server::get_protocols_route, client_server::create_message_event_route, client_server::create_state_event_for_key_route, @@ -64,12 +68,12 @@ fn setup_rocket(data: Data) -> rocket::Rocket { fn main() { // Log info by default if let Err(_) = std::env::var("RUST_LOG") { - std::env::set_var("RUST_LOG", "matrixserver=debug,info"); + std::env::set_var("RUST_LOG", "warn"); } pretty_env_logger::init(); let data = Data::load_or_create("matrixtesting.koesters.xyz"); - data.debug(); + //data.debug(); setup_rocket(data).launch().unwrap(); } diff --git a/src/pdu.rs b/src/pdu.rs index 47f94ac..3f15aa2 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -31,15 +31,18 @@ pub struct PduEvent { } impl PduEvent { - pub fn to_room_event(&self) -> RoomEvent { + // TODO: This shouldn't be an option + pub fn to_room_event(&self) -> Option { // Can only fail in rare circumstances that won't ever happen here, see // https://docs.rs/serde_json/1.0.50/serde_json/fn.to_string.html let json = serde_json::to_string(&self).unwrap(); // EventResult's deserialize implementation always returns `Ok(...)` - serde_json::from_str::>(&json) - .unwrap() - .into_result() - .unwrap() + Some( + serde_json::from_str::>(&json) + .unwrap() + .into_result() + .ok()?, + ) } pub fn to_stripped_state_event(&self) -> Option { diff --git a/src/server_server.rs b/src/server_server.rs new file mode 100644 index 0000000..57f76ce --- /dev/null +++ b/src/server_server.rs @@ -0,0 +1,25 @@ +use std::convert::TryInto; + +pub fn send_request>>>( + data: &crate::Data, + method: http::Method, + uri: String, + destination: String, + request: T, +) where + T::Error: std::fmt::Debug, +{ + let mut http_request: http::Request<_> = request.try_into().unwrap(); + let request_json = serde_json::to_value(http_request.body()).unwrap(); + + let request_map = request_json.as_object_mut().unwrap(); + + request_map.insert("method".to_owned(), method.to_string().into()); + request_map.insert("uri".to_owned(), uri.to_string().into()); + //TODO: request_map.insert("origin".to_owned(), data.origin().to_string().into()); + request_map.insert("destination".to_owned(), destination.to_string().into()); + + ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut request_json).unwrap(); + let signature = request_json["signatures"]; + data.reqwest_client().execute(http_request.into()); +} diff --git a/src/stateres.rs b/src/stateres.rs new file mode 100644 index 0000000..ee47099 --- /dev/null +++ b/src/stateres.rs @@ -0,0 +1,59 @@ +use std::collections::HashMap; + +fn stateres(state_a: HashMap, state_b: HashMap) { + let mut unconflicted = todo!("state at fork event"); + + let mut conflicted: HashMap = state_a + .iter() + .filter(|(key_a, value_a)| match state_b.remove(key_a) { + Some(value_b) if value_a == value_b => unconflicted.insert(key_a, value_a), + _ => false, + }) + .collect(); + + // We removed unconflicted from state_b, now we can easily insert all events that are only in fork b + conflicted.extend(state_b); + + let partial_state = unconflicted.clone(); + + let full_conflicted = conflicted.clone(); // TODO: auth events + + let output_rev = Vec::new(); + let event_map = HashMap::new(); + let incoming_edges = HashMap::new(); + + for event in full_conflicted { + event_map.insert(event.event_id, event); + incoming_edges.insert(event.event_id, 0); + } + + for e in conflicted_control_events { + for a in e.auth_events { + incoming_edges[a.event_id] += 1; + } + } + + while incoming_edges.len() > 0 { + let mut count_0 = incoming_edges + .iter() + .filter(|(_, c)| c == 0) + .collect::>(); + + count_0.sort_by(|(x, _), (y, _)| { + x.power_level + .cmp(&a.power_level) + .then_with(|| x.origin_server.ts.cmp(&y.origin_server_ts)) + .then_with(|| x.event_id.cmp(&y.event_id)) + }); + + for (id, count) in count_0 { + output_rev.push(event_map[id]); + + for auth_event in event_map[id].auth_events { + incoming_edges[auth_event.event_id] -= 1; + } + + incoming_edges.remove(id); + } + } +} diff --git a/src/utils.rs b/src/utils.rs index 3b3ed92..a360036 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,9 +1,9 @@ +use argon2::{Config, Variant}; use rand::prelude::*; use std::{ convert::TryInto, time::{SystemTime, UNIX_EPOCH}, }; -use argon2::{Config, Variant}; pub fn millis_since_unix_epoch() -> u64 { SystemTime::now() @@ -25,6 +25,13 @@ pub fn increment(old: Option<&[u8]>) -> Option> { Some(number.to_be_bytes().to_vec()) } +pub fn generate_keypair(old: Option<&[u8]>) -> Option> { + Some( + old.map(|s| s.to_vec()) + .unwrap_or_else(|| ruma_signatures::Ed25519KeyPair::generate().unwrap()), + ) +} + pub fn u64_from_bytes(bytes: &[u8]) -> u64 { let array: [u8; 8] = bytes.try_into().expect("bytes are valid u64"); u64::from_be_bytes(array) @@ -49,9 +56,5 @@ pub fn calculate_hash(password: &str) -> Result { }; let salt = random_string(32); - argon2::hash_encoded( - password.as_bytes(), - salt.as_bytes(), - &hashing_config, - ) -} \ No newline at end of file + argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) +} From 9b79798e56c772fe338815d97e7d423e4e920806 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 6 Apr 2020 14:53:34 +0200 Subject: [PATCH 0057/1727] start work on signing --- src/data.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/data.rs b/src/data.rs index 2aa003a..08e5e59 100644 --- a/src/data.rs +++ b/src/data.rs @@ -457,6 +457,9 @@ impl Data { )) .expect("ruma's reference hashes are correct"); + let mut pdu_json = serde_json::to_value(pdu).unwrap(); + ruma_signatures::hash_and_sign_event(self.hostname(), self.keypair(), &mut pdu_json); + self.pdu_leaves_replace(&room_id, &pdu.event_id); // The new value will need a new index. We store the last used index in 'n' @@ -478,9 +481,10 @@ impl Data { pdu_id.push(0xff); // Add delimiter so we don't find rooms starting with the same id pdu_id.extend_from_slice(&index.to_be_bytes()); - let pdu_json = serde_json::to_string(&pdu).unwrap(); - - self.db.pduid_pdu.insert(&pdu_id, &*pdu_json).unwrap(); + self.db + .pduid_pdu + .insert(&pdu_id, &*serde_json::to_string(&pdu_json).unwrap()) + .unwrap(); self.db .eventid_pduid From b0d9ccdb2db31e685e05b828669bec2a00b6b589 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 22 Apr 2020 11:53:06 +0200 Subject: [PATCH 0058/1727] Signing, basis for federation --- Cargo.lock | 1 - Cargo.toml | 2 +- src/client_server.rs | 28 ++++++++++++-- src/data.rs | 9 +++-- src/database.rs | 2 +- src/server_server.rs | 88 ++++++++++++++++++++++++++++++++++++++------ src/test.rs | 38 +++++++++++++------ src/utils.rs | 4 +- 8 files changed, 137 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d7eaf2..5e6dc73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1260,7 +1260,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma-federation-api.git#5448c650f0a583382152d0f43f2dcf720d495390" dependencies = [ "js_int", "ruma-api", diff --git a/Cargo.toml b/Cargo.toml index 5cf5412..2a3a0b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ ruma-identifiers = "0.15.1" ruma-api = "0.16.0-rc.1" ruma-events = "0.19.0" ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } -ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git" } +ruma-federation-api = { path = "../ruma-federation-api" } pretty_env_logger = "0.4.0" log = "0.4.8" sled = "0.31.0" diff --git a/src/client_server.rs b/src/client_server.rs index a019142..c97ba80 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1,4 +1,4 @@ -use crate::{utils, Data, MatrixResult, Ruma}; +use crate::{server_server, utils, Data, MatrixResult, Ruma}; use log::debug; use rocket::{get, options, post, put, State}; @@ -674,7 +674,8 @@ pub fn join_room_by_id_or_alias_route( } } } else { - body.room_id_or_alias.try_into().unwrap() + todo!(); + //body.room_id_or_alias.try_into().unwrap() }; if data.room_join( @@ -725,8 +726,8 @@ pub fn invite_user_route( } #[post("/_matrix/client/r0/publicRooms", data = "")] -pub fn get_public_rooms_filtered_route( - data: State, +pub async fn get_public_rooms_filtered_route( + data: State<'_, Data>, body: Ruma, ) -> MatrixResult { let mut chunk = data @@ -752,6 +753,25 @@ pub fn get_public_rooms_filtered_route( }) .collect::>(); + chunk.extend_from_slice( + &server_server::send_request( + &data, + "https://matrix.org".to_owned(), + ruma_federation_api::v1::get_public_rooms::Request { + limit: None, + since: None, + include_all_networks: None, + third_party_instance_id: None, + }, + ) + .await + .unwrap() + .chunk + .into_iter() + .map(|c| serde_json::from_str(&serde_json::to_string(dbg!(&c)).unwrap()).unwrap()) + .collect::>(), + ); + chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); let total_room_count_estimate = (chunk.len() as u32).into(); diff --git a/src/data.rs b/src/data.rs index 08e5e59..8b24c9d 100644 --- a/src/data.rs +++ b/src/data.rs @@ -457,7 +457,7 @@ impl Data { )) .expect("ruma's reference hashes are correct"); - let mut pdu_json = serde_json::to_value(pdu).unwrap(); + let mut pdu_json = serde_json::to_value(&pdu).unwrap(); ruma_signatures::hash_and_sign_event(self.hostname(), self.keypair(), &mut pdu_json); self.pdu_leaves_replace(&room_id, &pdu.event_id); @@ -483,7 +483,7 @@ impl Data { self.db .pduid_pdu - .insert(&pdu_id, &*serde_json::to_string(&pdu_json).unwrap()) + .insert(&pdu_id, &*pdu_json.to_string()) .unwrap(); self.db @@ -497,7 +497,10 @@ impl Data { key.extend_from_slice(pdu.kind.to_string().as_bytes()); key.push(0xff); key.extend_from_slice(state_key.to_string().as_bytes()); - self.db.roomstateid_pdu.insert(key, &*pdu_json).unwrap(); + self.db + .roomstateid_pdu + .insert(key, &*pdu_json.to_string()) + .unwrap(); } pdu.event_id diff --git a/src/database.rs b/src/database.rs index bc32847..73406c1 100644 --- a/src/database.rs +++ b/src/database.rs @@ -127,7 +127,7 @@ impl Database { &*db.update_and_fetch("keypair", utils::generate_keypair) .unwrap() .unwrap(), - "0.0.0".to_owned(), + "key1".to_owned(), ) .unwrap(), _db: db, diff --git a/src/server_server.rs b/src/server_server.rs index 57f76ce..8f355c0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,25 +1,89 @@ -use std::convert::TryInto; +use log::error; +use http::header::{HeaderValue, AUTHORIZATION}; +use ruma_api::{ + error::{FromHttpRequestError, FromHttpResponseError}, + Endpoint, Outgoing, +}; +use std::convert::{TryFrom, TryInto}; -pub fn send_request>>>( +pub async fn send_request( data: &crate::Data, - method: http::Method, - uri: String, destination: String, request: T, -) where +) -> Option<::Incoming> +where + // We need to duplicate Endpoint's where clauses because the compiler is not smart enough yet. + // See https://github.com/rust-lang/rust/issues/54149 + ::Incoming: TryFrom>, Error = FromHttpRequestError>, + ::Incoming: TryFrom< + http::Response>, + Error = FromHttpResponseError<::ResponseError>, + >, T::Error: std::fmt::Debug, { let mut http_request: http::Request<_> = request.try_into().unwrap(); - let request_json = serde_json::to_value(http_request.body()).unwrap(); + let uri = destination.clone() + T::METADATA.path; + *http_request.uri_mut() = uri.parse().unwrap(); + + let body = http_request.body(); + let mut request_json = if !body.is_empty() { + serde_json::to_value(http_request.body()).unwrap() + } else { + serde_json::Map::new().into() + }; let request_map = request_json.as_object_mut().unwrap(); - request_map.insert("method".to_owned(), method.to_string().into()); - request_map.insert("uri".to_owned(), uri.to_string().into()); - //TODO: request_map.insert("origin".to_owned(), data.origin().to_string().into()); + request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); + request_map.insert("uri".to_owned(), uri.into()); + request_map.insert("origin".to_owned(), data.hostname().into()); request_map.insert("destination".to_owned(), destination.to_string().into()); - ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut request_json).unwrap(); - let signature = request_json["signatures"]; - data.reqwest_client().execute(http_request.into()); + ruma_signatures::sign_json(data.hostname(), data.keypair(), dbg!(&mut request_json)).unwrap(); + let signatures = request_json["signatures"] + .as_object() + .unwrap() + .values() + .next() + .unwrap() + .as_object() + .unwrap() + .iter() + .map(|(k, v)| (k, v.as_str().unwrap())); + + for s in signatures { + http_request.headers_mut().insert(AUTHORIZATION, HeaderValue::from_str(dbg!(&format!("X-Matrix origin={},key=\"{}\",sig=\"{}\"", data.hostname(), s.0, s.1))).unwrap()); + } + + let reqwest_response = data + .reqwest_client() + .execute(dbg!(http_request.into())) + .await; + + // Because reqwest::Response -> http::Response is complicated: + match reqwest_response { + Ok(mut reqwest_response) => { + let status = reqwest_response.status(); + let mut http_response = http::Response::builder().status(status); + let headers = http_response.headers_mut().unwrap(); + + for (k, v) in reqwest_response.headers_mut().drain() { + if let Some(key) = k { + headers.insert(key, v); + } + } + + let body = reqwest_response + .bytes() + .await + .unwrap() + .into_iter() + .collect(); + Some(::Incoming::try_from(dbg!(http_response.body(body).unwrap())).ok().unwrap()) + } + Err(e) => { + println!("ERROR: {}", e); + None + } + } } diff --git a/src/test.rs b/src/test.rs index c3cefb1..8756436 100644 --- a/src/test.rs +++ b/src/test.rs @@ -1,8 +1,7 @@ use super::*; -use rocket::{local::Client, http::Status}; -use serde_json::Value; -use serde_json::json; +use rocket::{http::Status, local::Client}; use ruma_client_api::error::ErrorKind; +use serde_json::{json, Value}; use std::time::Duration; fn setup_client() -> Client { @@ -19,7 +18,8 @@ async fn register_login() { let mut response = client .post("/_matrix/client/r0/register?kind=user") .body(registration_init()) - .dispatch().await; + .dispatch() + .await; let body = serde_json::from_str::(&response.body_string().await.unwrap()).unwrap(); assert_eq!(response.status().code, 401); @@ -33,14 +33,16 @@ async fn login_after_register_correct_password() { let mut response = client .post("/_matrix/client/r0/register?kind=user") .body(registration_init()) - .dispatch().await; + .dispatch() + .await; let body = serde_json::from_str::(&response.body_string().await.unwrap()).unwrap(); let session = body["session"].clone(); let response = client .post("/_matrix/client/r0/register?kind=user") .body(registration(session.as_str().unwrap())) - .dispatch().await; + .dispatch() + .await; assert_eq!(response.status().code, 200); let login_response = client @@ -57,14 +59,16 @@ async fn login_after_register_incorrect_password() { let mut response = client .post("/_matrix/client/r0/register?kind=user") .body(registration_init()) - .dispatch().await; + .dispatch() + .await; let body = serde_json::from_str::(&response.body_string().await.unwrap()).unwrap(); let session = body["session"].clone(); let response = client .post("/_matrix/client/r0/register?kind=user") .body(registration(session.as_str().unwrap())) - .dispatch().await; + .dispatch() + .await; assert_eq!(response.status().code, 200); let mut login_response = client @@ -73,7 +77,15 @@ async fn login_after_register_incorrect_password() { .dispatch() .await; let body = serde_json::from_str::(&login_response.body_string().await.unwrap()).unwrap(); - assert_eq!(body.as_object().unwrap().get("errcode").unwrap().as_str().unwrap(), "M_FORBIDDEN"); + assert_eq!( + body.as_object() + .unwrap() + .get("errcode") + .unwrap() + .as_str() + .unwrap(), + "M_FORBIDDEN" + ); assert_eq!(login_response.status().code, 403); } @@ -98,7 +110,8 @@ fn registration(session: &str) -> String { "device_id": "GHTYAJCE", "initial_device_display_name": "Jungle Phone", "inhibit_login": false - }).to_string() + }) + .to_string() } fn login_with_password(password: &str) -> String { @@ -110,5 +123,6 @@ fn login_with_password(password: &str) -> String { }, "password": password, "initial_device_display_name": "Jungle Phone" - }).to_string() -} \ No newline at end of file + }) + .to_string() +} diff --git a/src/utils.rs b/src/utils.rs index a360036..07a99f1 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -27,8 +27,10 @@ pub fn increment(old: Option<&[u8]>) -> Option> { pub fn generate_keypair(old: Option<&[u8]>) -> Option> { Some( + /* old.map(|s| s.to_vec()) - .unwrap_or_else(|| ruma_signatures::Ed25519KeyPair::generate().unwrap()), + .unwrap_or_else(|| */ + ruma_signatures::Ed25519KeyPair::generate().unwrap(), ) } From 1af6dd984ab021d1a7eb253111e1773fc1b6c370 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 22 Apr 2020 20:55:11 +0200 Subject: [PATCH 0059/1727] More work on federation --- Cargo.lock | 3 +- Cargo.toml | 3 +- src/client_server.rs | 2 +- src/main.rs | 6 +++- src/server_server.rs | 86 +++++++++++++++++++++++++++++++++++++++++--- 5 files changed, 91 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5e6dc73..df7e88f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,6 +142,7 @@ dependencies = [ name = "conduit" version = "0.1.0" dependencies = [ + "base64 0.12.0", "directories", "http", "js_int", @@ -1265,6 +1266,7 @@ dependencies = [ "ruma-api", "ruma-events", "ruma-identifiers", + "ruma-serde", "serde", "serde_json", ] @@ -1292,7 +1294,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma-signatures.git#c3f8399c268695464730afd6077c7ce50155b8d5" dependencies = [ "base64 0.12.0", "ring", diff --git a/Cargo.toml b/Cargo.toml index 2a3a0b6..afe23f7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } ruma-identifiers = "0.15.1" ruma-api = "0.16.0-rc.1" ruma-events = "0.19.0" -ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } +ruma-signatures = { path = "../ruma-signatures" } ruma-federation-api = { path = "../ruma-federation-api" } pretty_env_logger = "0.4.0" log = "0.4.8" @@ -30,3 +30,4 @@ tokio = { version = "0.2.18", features = ["macros"] } rand = "0.7.3" rust-argon2 = "0.8.2" reqwest = "0.10.4" +base64 = "0.12.0" diff --git a/src/client_server.rs b/src/client_server.rs index c97ba80..a2cc575 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -756,7 +756,7 @@ pub async fn get_public_rooms_filtered_route( chunk.extend_from_slice( &server_server::send_request( &data, - "https://matrix.org".to_owned(), + "https://matrix.koesters.xyz".to_owned(), ruma_federation_api::v1::get_public_rooms::Request { limit: None, since: None, diff --git a/src/main.rs b/src/main.rs index f26ed91..0794fcf 100644 --- a/src/main.rs +++ b/src/main.rs @@ -60,6 +60,10 @@ fn setup_rocket(data: Data) -> rocket::Rocket { client_server::turn_server_route, client_server::publicised_groups_route, client_server::options_route, + server_server::well_known_server, + server_server::get_server_version, + server_server::get_server_keys, + server_server::get_server_keys_deprecated, ], ) .manage(data) @@ -72,7 +76,7 @@ fn main() { } pretty_env_logger::init(); - let data = Data::load_or_create("matrixtesting.koesters.xyz"); + let data = Data::load_or_create("matrixtesting.koesters.xyz:14004"); //data.debug(); setup_rocket(data).launch().unwrap(); diff --git a/src/server_server.rs b/src/server_server.rs index 8f355c0..cbdcb6a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,10 +1,20 @@ -use log::error; +use crate::{utils, Data, MatrixResult, Ruma}; use http::header::{HeaderValue, AUTHORIZATION}; +use log::error; +use rocket::{get, options, post, put, response::content::Json, State}; use ruma_api::{ error::{FromHttpRequestError, FromHttpResponseError}, Endpoint, Outgoing, }; -use std::convert::{TryFrom, TryInto}; +use ruma_client_api::error::{Error, ErrorKind}; +use ruma_federation_api::{v1::get_server_version, v2::get_server_keys}; +use serde_json::json; +use std::{ + collections::{BTreeMap, HashMap}, + convert::{TryFrom, TryInto}, + path::PathBuf, + time::{Duration, SystemTime}, +}; pub async fn send_request( data: &crate::Data, @@ -39,7 +49,7 @@ where request_map.insert("origin".to_owned(), data.hostname().into()); request_map.insert("destination".to_owned(), destination.to_string().into()); - ruma_signatures::sign_json(data.hostname(), data.keypair(), dbg!(&mut request_json)).unwrap(); + ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut request_json).unwrap(); let signatures = request_json["signatures"] .as_object() .unwrap() @@ -52,7 +62,16 @@ where .map(|(k, v)| (k, v.as_str().unwrap())); for s in signatures { - http_request.headers_mut().insert(AUTHORIZATION, HeaderValue::from_str(dbg!(&format!("X-Matrix origin={},key=\"{}\",sig=\"{}\"", data.hostname(), s.0, s.1))).unwrap()); + http_request.headers_mut().insert( + AUTHORIZATION, + HeaderValue::from_str(&format!( + "X-Matrix origin={},key=\"{}\",sig=\"{}\"", + data.hostname(), + s.0, + s.1 + )) + .unwrap(), + ); } let reqwest_response = data @@ -79,7 +98,13 @@ where .unwrap() .into_iter() .collect(); - Some(::Incoming::try_from(dbg!(http_response.body(body).unwrap())).ok().unwrap()) + Some( + ::Incoming::try_from( + dbg!(http_response.body(body)).unwrap(), + ) + .ok() + .unwrap(), + ) } Err(e) => { println!("ERROR: {}", e); @@ -87,3 +112,54 @@ where } } } + +#[get("/.well-known/matrix/server")] +pub fn well_known_server(data: State) -> Json { + rocket::response::content::Json( + json!({ "m.server": "matrixtesting.koesters.xyz:14004"}).to_string(), + ) +} + +#[get("/_matrix/federation/v1/version")] +pub fn get_server_version(data: State) -> MatrixResult { + MatrixResult(Ok(get_server_version::Response { + server: get_server_version::Server { + name: Some("Conduit".to_owned()), + version: Some(env!("CARGO_PKG_VERSION").to_owned()), + }, + })) +} + +#[get("/_matrix/key/v2/server", data = "")] +pub fn get_server_keys(data: State, body: Ruma) -> Json { + let mut verify_keys = BTreeMap::new(); + verify_keys.insert( + format!("ed25519:{}", data.keypair().version()), + get_server_keys::VerifyKey { + key: base64::encode_config(data.keypair().public_key(), base64::STANDARD_NO_PAD), + }, + ); + let mut response = serde_json::from_slice( + http::Response::try_from(get_server_keys::Response { + server_name: data.hostname().to_owned(), + verify_keys, + old_verify_keys: BTreeMap::new(), + signatures: BTreeMap::new(), + valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 60 * 24), + }) + .unwrap() + .body(), + ) + .unwrap(); + ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut response).unwrap(); + Json(dbg!(response.to_string())) +} + +#[get("/_matrix/key/v2/server/<_key_id>", data = "")] +pub fn get_server_keys_deprecated( + data: State, + body: Ruma, + _key_id: String, +) -> Json { + get_server_keys(data, body) +} From 873d191569985737ccc815dbb031bbbc18e7e15e Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 22 Apr 2020 21:14:40 +0200 Subject: [PATCH 0060/1727] fix: http body as content when signing --- src/client_server.rs | 2 +- src/server_server.rs | 24 +++++++++++++----------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index a2cc575..dd609f2 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -756,7 +756,7 @@ pub async fn get_public_rooms_filtered_route( chunk.extend_from_slice( &server_server::send_request( &data, - "https://matrix.koesters.xyz".to_owned(), + "matrix.koesters.xyz".to_owned(), ruma_federation_api::v1::get_public_rooms::Request { limit: None, since: None, diff --git a/src/server_server.rs b/src/server_server.rs index cbdcb6a..f0fdf65 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -32,24 +32,26 @@ where T::Error: std::fmt::Debug, { let mut http_request: http::Request<_> = request.try_into().unwrap(); - let uri = destination.clone() + T::METADATA.path; - *http_request.uri_mut() = uri.parse().unwrap(); + + *http_request.uri_mut() = ("https://".to_owned() + &destination.clone() + T::METADATA.path).parse().unwrap(); - let body = http_request.body(); - let mut request_json = if !body.is_empty() { - serde_json::to_value(http_request.body()).unwrap() - } else { - serde_json::Map::new().into() + let mut request_map = serde_json::Map::new(); + + if !http_request.body().is_empty() { + request_map.insert("content".to_owned(), + serde_json::to_value(http_request.body()).unwrap()); }; - let request_map = request_json.as_object_mut().unwrap(); - request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); - request_map.insert("uri".to_owned(), uri.into()); + request_map.insert("uri".to_owned(), T::METADATA.path.into()); request_map.insert("origin".to_owned(), data.hostname().into()); request_map.insert("destination".to_owned(), destination.to_string().into()); + //request_map.insert("signatures".to_owned(), json!({})); + + let mut request_json = request_map.into(); + ruma_signatures::sign_json(data.hostname(), data.keypair(), dbg!(&mut request_json)).unwrap(); + dbg!(&request_json); - ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut request_json).unwrap(); let signatures = request_json["signatures"] .as_object() .unwrap() From 120b6f4b9542bc46c6335b3f36c8875aa352a228 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 23 Apr 2020 14:27:50 +0200 Subject: [PATCH 0061/1727] Bump dependencies and fixes --- Cargo.lock | 51 +++++++++++++++++++++++--------------------- Cargo.toml | 10 ++++----- src/client_server.rs | 8 +++---- src/data.rs | 23 +++++++++++--------- src/pdu.rs | 23 ++++++-------------- src/ruma_wrapper.rs | 23 ++++++-------------- src/server_server.rs | 19 +++++------------ 7 files changed, 67 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df7e88f..0b659c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11,9 +11,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d663a8e9a99154b5fb793032533f6328da35e23aac63d5c152279aa8ba356825" +checksum = "b585a98a234c46fc563103e9278c9391fde1f4e6850334da895d27edb9580f62" [[package]] name = "arrayref" @@ -119,9 +119,9 @@ checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" [[package]] name = "cc" -version = "1.0.50" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" +checksum = "c3d87b23d6a92cd03af510a5ade527033f6aa6fa92161e2d5863a907d4c5e31d" [[package]] name = "cfg-if" @@ -841,9 +841,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb" +checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ "cfg-if", "cloudabi", @@ -915,9 +915,9 @@ checksum = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" [[package]] name = "pin-utils" -version = "0.1.0-alpha.4" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" @@ -1191,9 +1191,9 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.16.0-rc.1" +version = "0.16.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7769e934360383f91d68a2b9132610d02436ef3272cbdd46de239c9025198a36" +checksum = "c296a951625ccc8c04d5188f1791d1628503c8614073a05833af9fed18b029c1" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1207,9 +1207,9 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.16.0-rc.1" +version = "0.16.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f8dad3311d0bee6d43da684ba03f9a84cddd39f9b3e7e88cab5d726419ec22e" +checksum = "0f6b02a6a860a96e3c2081c8aea88b37b2918b53e539856b73aadde1908b65ad" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", @@ -1218,8 +1218,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.8.0-rc.2" -source = "git+https://github.com/ruma/ruma-client-api.git#1b7863dc36e6e043ae365791cc719dc190abd660" +version = "0.8.0-rc.5" +source = "git+https://github.com/ruma/ruma-client-api.git#473cf2386178781688f12c538ee6419869d14a46" dependencies = [ "http", "js_int", @@ -1235,9 +1235,9 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.19.0" +version = "0.21.0-beta.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "844b5d38397c945395c7a1eaf89d55714c3d22983b870085a1a67d51fb6611cf" +checksum = "d4802476bbe517f2ac6cb7b1cf4869d54586c10e86e2ddc00806cafa32a96553" dependencies = [ "js_int", "ruma-events-macros", @@ -1249,9 +1249,9 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.19.0-final" +version = "0.21.0-beta.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5477046b734fde45dd7913dbc8d7b260af3b1c31ea2bc329bd2f0b44e37368be" +checksum = "abd3cfe96c9887fe2eebfa2e5e7d3a4afff02c374874d4e718f46dab5fd3320d" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", @@ -1261,6 +1261,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.1" +source = "git+https://git.koesters.xyz/timo/ruma-federation-api.git#6f1c5a6a714d6be2f420f3832d31e214fe4fb229" dependencies = [ "js_int", "ruma-api", @@ -1273,10 +1274,11 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63db5545f38077ea141fb112df070773e6ab9b7025174d732f56c6b37525ccc0" +checksum = "6ee548c5dbb5a92a93bb435fd3b66484cd19b0f37450c9b93677338cbe9550d2" dependencies = [ + "rand", "serde", ] @@ -1294,6 +1296,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" +source = "git+https://git.koesters.xyz/timo/ruma-signatures.git#e2cd0927c14133b8d49a8cd9fbd96f8af5c60f07" dependencies = [ "base64 0.12.0", "ring", @@ -1340,9 +1343,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" +checksum = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1" [[package]] name = "schannel" @@ -1877,9 +1880,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi 0.3.8", ] diff --git a/Cargo.toml b/Cargo.toml index afe23f7..9ae2a0c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,11 +14,11 @@ edition = "2018" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } -ruma-identifiers = "0.15.1" -ruma-api = "0.16.0-rc.1" -ruma-events = "0.19.0" -ruma-signatures = { path = "../ruma-signatures" } -ruma-federation-api = { path = "../ruma-federation-api" } +ruma-identifiers = { version = "0.16.0", features = ["rand"] } +ruma-api = "0.16.0-rc.3" +ruma-events = "0.21.0-beta.1" +ruma-signatures = { git = "https://git.koesters.xyz/timo/ruma-signatures.git" } +ruma-federation-api = { git = "https://git.koesters.xyz/timo/ruma-federation-api.git" } pretty_env_logger = "0.4.0" log = "0.4.8" sled = "0.31.0" diff --git a/src/client_server.rs b/src/client_server.rs index dd609f2..2c88daa 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -549,7 +549,7 @@ pub fn create_room_route( body: Ruma, ) -> MatrixResult { // TODO: check if room is unique - let room_id = RoomId::try_from(data.hostname()).expect("host is valid"); + let room_id = RoomId::new(data.hostname()).expect("host is valid"); let user_id = body.user_id.clone().expect("user is authenticated"); data.pdu_append( @@ -756,7 +756,7 @@ pub async fn get_public_rooms_filtered_route( chunk.extend_from_slice( &server_server::send_request( &data, - "matrix.koesters.xyz".to_owned(), + "koesters.xyz".to_owned(), ruma_federation_api::v1::get_public_rooms::Request { limit: None, since: None, @@ -913,7 +913,6 @@ pub fn sync_route( let room_events = pdus .into_iter() .map(|pdu| pdu.to_room_event()) - .filter_map(|e| e) .collect(); let mut edus = data.roomlatests_since(&room_id, since); edus.extend_from_slice(&data.roomactives_in(&room_id)); @@ -949,7 +948,6 @@ pub fn sync_route( let room_events = pdus .into_iter() .map(|pdu| pdu.to_room_event()) - .filter_map(|e| e) .collect(); let mut edus = data.roomlatests_since(&room_id, since); edus.extend_from_slice(&data.roomactives_in(&room_id)); @@ -973,7 +971,7 @@ pub fn sync_route( let events = data .pdus_since(&room_id, since) .into_iter() - .filter_map(|pdu| pdu.to_stripped_state_event()) + .map(|pdu| pdu.to_stripped_state_event()) .collect(); invited_rooms.insert( diff --git a/src/data.rs b/src/data.rs index 8b24c9d..864536f 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,5 +1,5 @@ use crate::{utils, Database, PduEvent}; -use ruma_events::{collections::only::Event as EduEvent, EventResult, EventType}; +use ruma_events::{collections::only::Event as EduEvent, EventJson, EventType}; use ruma_federation_api::RoomV3Pdu; use ruma_identifiers::{EventId, RoomId, UserId}; use serde_json::json; @@ -604,7 +604,7 @@ impl Data { } /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. - pub fn roomlatests_since(&self, room_id: &RoomId, since: u64) -> Vec { + pub fn roomlatests_since(&self, room_id: &RoomId, since: u64) -> Vec> { let mut room_latests = Vec::new(); let mut prefix = room_id.to_string().as_bytes().to_vec(); @@ -617,10 +617,11 @@ impl Data { if key.starts_with(&prefix) { current = key.to_vec(); room_latests.push( - serde_json::from_slice::>(&value) + serde_json::from_slice::>(&value) .expect("room_latest in db is valid") - .into_result() - .expect("room_latest in db is valid"), + .deserialize() + .expect("room_latest in db is valid") + .into(), ); } else { break; @@ -691,7 +692,7 @@ impl Data { } /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. - pub fn roomactives_in(&self, room_id: &RoomId) -> Vec { + pub fn roomactives_in(&self, room_id: &RoomId) -> Vec> { let mut room_actives = Vec::new(); let mut prefix = room_id.to_string().as_bytes().to_vec(); @@ -704,10 +705,11 @@ impl Data { if key.starts_with(&prefix) { current = key.to_vec(); room_actives.push( - serde_json::from_slice::>(&value) + serde_json::from_slice::>(&value) .expect("room_active in db is valid") - .into_result() - .expect("room_active in db is valid"), + .deserialize() + .expect("room_active in db is valid") + .into(), ); } else { break; @@ -720,7 +722,8 @@ impl Data { user_ids: Vec::new(), }, room_id: None, // None because it can be inferred - })]; + }) + .into()]; } else { room_actives } diff --git a/src/pdu.rs b/src/pdu.rs index 3f15aa2..b6aa45d 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,6 +1,6 @@ use js_int::UInt; use ruma_events::{ - collections::all::RoomEvent, stripped::AnyStrippedStateEvent, EventResult, EventType, + collections::all::RoomEvent, stripped::AnyStrippedStateEvent, EventJson, EventType, }; use ruma_federation_api::EventHash; use ruma_identifiers::{EventId, RoomId, UserId}; @@ -31,29 +31,20 @@ pub struct PduEvent { } impl PduEvent { - // TODO: This shouldn't be an option - pub fn to_room_event(&self) -> Option { + pub fn to_room_event(&self) -> EventJson { // Can only fail in rare circumstances that won't ever happen here, see // https://docs.rs/serde_json/1.0.50/serde_json/fn.to_string.html let json = serde_json::to_string(&self).unwrap(); - // EventResult's deserialize implementation always returns `Ok(...)` - Some( - serde_json::from_str::>(&json) - .unwrap() - .into_result() - .ok()?, - ) + // EventJson's deserialize implementation always returns `Ok(...)` + serde_json::from_str::>(&json).unwrap() } - pub fn to_stripped_state_event(&self) -> Option { + pub fn to_stripped_state_event(&self) -> EventJson { // Can only fail in rare circumstances that won't ever happen here, see // https://docs.rs/serde_json/1.0.50/serde_json/fn.to_string.html let json = serde_json::to_string(&self).unwrap(); - // EventResult's deserialize implementation always returns `Ok(...)` - serde_json::from_str::>(&json) - .unwrap() - .into_result() - .ok() + // EventJson's deserialize implementation always returns `Ok(...)` + serde_json::from_str::>(&json).unwrap() } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 81b7ea8..81294a4 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -7,12 +7,11 @@ use rocket::{ Request, State, }; use ruma_api::{ - error::{FromHttpRequestError, FromHttpResponseError}, - Endpoint, Outgoing, + Endpoint }; use ruma_identifiers::UserId; use std::{ - convert::{TryFrom, TryInto}, + convert::{TryInto}, io::Cursor, ops::Deref, }; @@ -22,21 +21,13 @@ const MESSAGE_LIMIT: u64 = 65535; /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { - body: T::Incoming, +pub struct Ruma { + body: T, pub user_id: Option, pub json_body: serde_json::Value, } impl<'a, T: Endpoint> FromData<'a> for Ruma -where - // We need to duplicate Endpoint's where clauses because the compiler is not smart enough yet. - // See https://github.com/rust-lang/rust/issues/54149 - ::Incoming: TryFrom>, Error = FromHttpRequestError>, - ::Incoming: TryFrom< - http::Response>, - Error = FromHttpResponseError<::ResponseError>, - >, { type Error = (); // TODO: Better error handling type Owned = Data; @@ -95,7 +86,7 @@ where let http_request = http_request.body(body.clone()).unwrap(); log::info!("{:?}", http_request); - match T::Incoming::try_from(http_request) { + match T::try_from(http_request) { Ok(t) => Success(Ruma { body: t, user_id, @@ -115,8 +106,8 @@ where } } -impl Deref for Ruma { - type Target = T::Incoming; +impl Deref for Ruma { + type Target = T; fn deref(&self) -> &Self::Target { &self.body diff --git a/src/server_server.rs b/src/server_server.rs index f0fdf65..13e1a3f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -4,7 +4,7 @@ use log::error; use rocket::{get, options, post, put, response::content::Json, State}; use ruma_api::{ error::{FromHttpRequestError, FromHttpResponseError}, - Endpoint, Outgoing, + Endpoint, }; use ruma_client_api::error::{Error, ErrorKind}; use ruma_federation_api::{v1::get_server_version, v2::get_server_keys}; @@ -20,20 +20,11 @@ pub async fn send_request( data: &crate::Data, destination: String, request: T, -) -> Option<::Incoming> -where - // We need to duplicate Endpoint's where clauses because the compiler is not smart enough yet. - // See https://github.com/rust-lang/rust/issues/54149 - ::Incoming: TryFrom>, Error = FromHttpRequestError>, - ::Incoming: TryFrom< - http::Response>, - Error = FromHttpResponseError<::ResponseError>, - >, - T::Error: std::fmt::Debug, +) -> Option { let mut http_request: http::Request<_> = request.try_into().unwrap(); - *http_request.uri_mut() = ("https://".to_owned() + &destination.clone() + T::METADATA.path).parse().unwrap(); + *http_request.uri_mut() = format!("https://{}:8448{}", &destination.clone(), T::METADATA.path).parse().unwrap(); let mut request_map = serde_json::Map::new(); @@ -50,7 +41,7 @@ where let mut request_json = request_map.into(); ruma_signatures::sign_json(data.hostname(), data.keypair(), dbg!(&mut request_json)).unwrap(); - dbg!(&request_json); + println!("{}", &request_json); let signatures = request_json["signatures"] .as_object() @@ -101,7 +92,7 @@ where .into_iter() .collect(); Some( - ::Incoming::try_from( + ::try_from( dbg!(http_response.body(body)).unwrap(), ) .ok() From 720cc0cffce3363dee2aacbccc6fd53b76ac8397 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 25 Apr 2020 11:47:32 +0200 Subject: [PATCH 0062/1727] feat: federated room directory --- src/client_server.rs | 22 ++++++--------- src/data.rs | 3 +- src/ruma_wrapper.rs | 13 ++------- src/server_server.rs | 67 +++++++++++++++++++------------------------- src/utils.rs | 4 +-- 5 files changed, 43 insertions(+), 66 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 2c88daa..dac5e47 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -37,8 +37,8 @@ use ruma_events::{collections::only::Event as EduEvent, EventType}; use ruma_identifiers::{RoomId, UserId}; use serde_json::json; use std::{ - collections::{BTreeMap, HashMap}, - convert::{TryFrom, TryInto}, + collections::BTreeMap, + convert::TryInto, path::PathBuf, time::{Duration, SystemTime}, }; @@ -753,10 +753,12 @@ pub async fn get_public_rooms_filtered_route( }) .collect::>(); + chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); + chunk.extend_from_slice( &server_server::send_request( &data, - "koesters.xyz".to_owned(), + "chat.privacytools.io".to_owned(), ruma_federation_api::v1::get_public_rooms::Request { limit: None, since: None, @@ -768,12 +770,10 @@ pub async fn get_public_rooms_filtered_route( .unwrap() .chunk .into_iter() - .map(|c| serde_json::from_str(&serde_json::to_string(dbg!(&c)).unwrap()).unwrap()) + .map(|c| serde_json::from_str(&serde_json::to_string(&c).unwrap()).unwrap()) .collect::>(), ); - chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); - let total_room_count_estimate = (chunk.len() as u32).into(); MatrixResult(Ok(get_public_rooms_filtered::Response { @@ -910,10 +910,7 @@ pub fn sync_route( .unwrap_or(0); for room_id in joined_roomids { let pdus = data.pdus_since(&room_id, since); - let room_events = pdus - .into_iter() - .map(|pdu| pdu.to_room_event()) - .collect(); + let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); let mut edus = data.roomlatests_since(&room_id, since); edus.extend_from_slice(&data.roomactives_in(&room_id)); @@ -945,10 +942,7 @@ pub fn sync_route( let left_roomids = data.rooms_left(body.user_id.as_ref().expect("user is authenticated")); for room_id in left_roomids { let pdus = data.pdus_since(&room_id, since); - let room_events = pdus - .into_iter() - .map(|pdu| pdu.to_room_event()) - .collect(); + let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); let mut edus = data.roomlatests_since(&room_id, since); edus.extend_from_slice(&data.roomactives_in(&room_id)); diff --git a/src/data.rs b/src/data.rs index 864536f..8096a08 100644 --- a/src/data.rs +++ b/src/data.rs @@ -458,7 +458,8 @@ impl Data { .expect("ruma's reference hashes are correct"); let mut pdu_json = serde_json::to_value(&pdu).unwrap(); - ruma_signatures::hash_and_sign_event(self.hostname(), self.keypair(), &mut pdu_json); + ruma_signatures::hash_and_sign_event(self.hostname(), self.keypair(), &mut pdu_json) + .unwrap(); self.pdu_leaves_replace(&room_id, &pdu.event_id); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 81294a4..1d3328b 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -6,15 +6,9 @@ use rocket::{ Outcome::*, Request, State, }; -use ruma_api::{ - Endpoint -}; +use ruma_api::Endpoint; use ruma_identifiers::UserId; -use std::{ - convert::{TryInto}, - io::Cursor, - ops::Deref, -}; +use std::{convert::TryInto, io::Cursor, ops::Deref}; use tokio::io::AsyncReadExt; const MESSAGE_LIMIT: u64 = 65535; @@ -27,8 +21,7 @@ pub struct Ruma { pub json_body: serde_json::Value, } -impl<'a, T: Endpoint> FromData<'a> for Ruma -{ +impl<'a, T: Endpoint> FromData<'a> for Ruma { type Error = (); // TODO: Better error handling type Owned = Data; type Borrowed = Self::Owned; diff --git a/src/server_server.rs b/src/server_server.rs index 13e1a3f..a62f23b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,18 +1,14 @@ -use crate::{utils, Data, MatrixResult, Ruma}; +use crate::{Data, MatrixResult}; use http::header::{HeaderValue, AUTHORIZATION}; use log::error; -use rocket::{get, options, post, put, response::content::Json, State}; -use ruma_api::{ - error::{FromHttpRequestError, FromHttpResponseError}, - Endpoint, -}; -use ruma_client_api::error::{Error, ErrorKind}; +use rocket::{get, post, put, response::content::Json, State}; +use ruma_api::Endpoint; +use ruma_client_api::error::Error; use ruma_federation_api::{v1::get_server_version, v2::get_server_keys}; use serde_json::json; use std::{ - collections::{BTreeMap, HashMap}, - convert::{TryFrom, TryInto}, - path::PathBuf, + collections::BTreeMap, + convert::TryFrom, time::{Duration, SystemTime}, }; @@ -20,28 +16,29 @@ pub async fn send_request( data: &crate::Data, destination: String, request: T, -) -> Option -{ +) -> Option { let mut http_request: http::Request<_> = request.try_into().unwrap(); - - *http_request.uri_mut() = format!("https://{}:8448{}", &destination.clone(), T::METADATA.path).parse().unwrap(); + + *http_request.uri_mut() = format!("https://{}:8448{}", &destination.clone(), T::METADATA.path) + .parse() + .unwrap(); let mut request_map = serde_json::Map::new(); if !http_request.body().is_empty() { - request_map.insert("content".to_owned(), - serde_json::to_value(http_request.body()).unwrap()); + request_map.insert( + "content".to_owned(), + serde_json::to_value(http_request.body()).unwrap(), + ); }; request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); request_map.insert("uri".to_owned(), T::METADATA.path.into()); request_map.insert("origin".to_owned(), data.hostname().into()); - request_map.insert("destination".to_owned(), destination.to_string().into()); - //request_map.insert("signatures".to_owned(), json!({})); + request_map.insert("destination".to_owned(), "privacytools.io".into()); let mut request_json = request_map.into(); - ruma_signatures::sign_json(data.hostname(), data.keypair(), dbg!(&mut request_json)).unwrap(); - println!("{}", &request_json); + ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut request_json).unwrap(); let signatures = request_json["signatures"] .as_object() @@ -67,10 +64,7 @@ pub async fn send_request( ); } - let reqwest_response = data - .reqwest_client() - .execute(dbg!(http_request.into())) - .await; + let reqwest_response = data.reqwest_client().execute(http_request.into()).await; // Because reqwest::Response -> http::Response is complicated: match reqwest_response { @@ -92,15 +86,13 @@ pub async fn send_request( .into_iter() .collect(); Some( - ::try_from( - dbg!(http_response.body(body)).unwrap(), - ) - .ok() - .unwrap(), + ::try_from(http_response.body(body).unwrap()) + .ok() + .unwrap(), ) } Err(e) => { - println!("ERROR: {}", e); + error!("{}", e); None } } @@ -114,7 +106,7 @@ pub fn well_known_server(data: State) -> Json { } #[get("/_matrix/federation/v1/version")] -pub fn get_server_version(data: State) -> MatrixResult { +pub fn get_server_version() -> MatrixResult { MatrixResult(Ok(get_server_version::Response { server: get_server_version::Server { name: Some("Conduit".to_owned()), @@ -123,8 +115,8 @@ pub fn get_server_version(data: State) -> MatrixResult, body: Ruma) -> Json { +#[get("/_matrix/key/v2/server")] +pub fn get_server_keys(data: State) -> Json { let mut verify_keys = BTreeMap::new(); verify_keys.insert( format!("ed25519:{}", data.keypair().version()), @@ -138,21 +130,20 @@ pub fn get_server_keys(data: State, body: Ruma) verify_keys, old_verify_keys: BTreeMap::new(), signatures: BTreeMap::new(), - valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 60 * 24), + valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2), }) .unwrap() .body(), ) .unwrap(); ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut response).unwrap(); - Json(dbg!(response.to_string())) + Json(response.to_string()) } -#[get("/_matrix/key/v2/server/<_key_id>", data = "")] +#[get("/_matrix/key/v2/server/<_key_id>")] pub fn get_server_keys_deprecated( data: State, - body: Ruma, _key_id: String, ) -> Json { - get_server_keys(data, body) + get_server_keys(data) } diff --git a/src/utils.rs b/src/utils.rs index 07a99f1..a360036 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -27,10 +27,8 @@ pub fn increment(old: Option<&[u8]>) -> Option> { pub fn generate_keypair(old: Option<&[u8]>) -> Option> { Some( - /* old.map(|s| s.to_vec()) - .unwrap_or_else(|| */ - ruma_signatures::Ed25519KeyPair::generate().unwrap(), + .unwrap_or_else(|| ruma_signatures::Ed25519KeyPair::generate().unwrap()), ) } From 4cc0a070929bb714429a9df3ee501d84fce65a9d Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 26 Apr 2020 22:39:15 +0200 Subject: [PATCH 0063/1727] feat: user renaming --- src/client_server.rs | 67 ++++++++++++++++++++++++-------------------- src/data.rs | 37 +++++++++++++++++++++--- src/main.rs | 2 +- src/ruma_wrapper.rs | 8 ++++++ src/server_server.rs | 26 +++++++++++++---- 5 files changed, 98 insertions(+), 42 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index dac5e47..873d3ec 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -38,7 +38,7 @@ use ruma_identifiers::{RoomId, UserId}; use serde_json::json; use std::{ collections::BTreeMap, - convert::TryInto, + convert::{TryFrom, TryInto}, path::PathBuf, time::{Duration, SystemTime}, }; @@ -325,8 +325,7 @@ pub fn set_displayname_route( if displayname == "" { data.displayname_remove(&user_id); } else { - data.displayname_set(&user_id, body.displayname.clone()); - // TODO send a new m.room.member join event with the updated displayname + data.displayname_set(&user_id, displayname.clone()); // TODO send a new m.presence event with the updated displayname } } @@ -610,26 +609,34 @@ pub fn create_room_route( MatrixResult(Ok(create_room::Response { room_id })) } -#[get("/_matrix/client/r0/directory/room/")] -pub fn get_alias_route(room_alias: String) -> MatrixResult { +#[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +pub fn get_alias_route( + data: State, + body: Ruma, + _room_alias: String, +) -> MatrixResult { // TODO - let room_id = match &*room_alias { - "#room:localhost" => "!xclkjvdlfj:localhost", - _ => { - debug!("Room not found."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })); + let room_id = if body.room_alias.server_name() == data.hostname() { + match body.room_alias.alias() { + "conduit" => "!lgOCCXQKtXOAPlAlG5:conduit.rs", + _ => { + debug!("Room alias not found."); + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })); + } } + } else { + todo!("ask remote server"); } .try_into() .unwrap(); MatrixResult(Ok(get_alias::Response { room_id, - servers: vec!["localhost".to_owned()], + servers: vec!["conduit.rs".to_owned()], })) } @@ -661,21 +668,19 @@ pub fn join_room_by_id_or_alias_route( body: Ruma, _room_id_or_alias: String, ) -> MatrixResult { - let room_id = if body.room_id_or_alias.is_room_alias_id() { - match body.room_id_or_alias.as_ref() { - "#room:localhost" => "!xclkjvdlfj:localhost".try_into().unwrap(), - _ => { - debug!("Room not found."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })); - } + let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { + Ok(room_id) => room_id, + Err(room_alias) => if room_alias.server_name() == data.hostname() { + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room alias not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })); + } else { + // Ask creator server of the room to join TODO ask someone else when not available + //server_server::send_request(data, destination, request) + todo!(); } - } else { - todo!(); - //body.room_id_or_alias.try_into().unwrap() }; if data.room_join( @@ -758,9 +763,9 @@ pub async fn get_public_rooms_filtered_route( chunk.extend_from_slice( &server_server::send_request( &data, - "chat.privacytools.io".to_owned(), + "privacytools.io".to_owned(), ruma_federation_api::v1::get_public_rooms::Request { - limit: None, + limit: Some(20_u32.into()), since: None, include_all_networks: None, third_party_instance_id: None, diff --git a/src/data.rs b/src/data.rs index 8096a08..88facca 100644 --- a/src/data.rs +++ b/src/data.rs @@ -91,11 +91,21 @@ impl Data { } /// Set a new displayname. - pub fn displayname_set(&self, user_id: &UserId, displayname: Option) { + pub fn displayname_set(&self, user_id: &UserId, displayname: String) { self.db .userid_displayname - .insert(user_id.to_string(), &*displayname.unwrap_or_default()) + .insert(user_id.to_string(), &*displayname) .unwrap(); + for room_id in self.rooms_joined(user_id) { + self.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + json!({"membership": "join", "displayname": displayname}), + None, + Some(user_id.to_string()), + ); + } } /// Get a the displayname of a user. @@ -200,11 +210,19 @@ impl Data { room_id.to_string().as_bytes().into(), ); + let mut content = json!({"membership": "join"}); + if let Some(displayname) = self.displayname_get(user_id) { + content + .as_object_mut() + .unwrap() + .insert("displayname".to_owned(), displayname.into()); + } + self.pdu_append( room_id.clone(), user_id.clone(), EventType::RoomMember, - json!({"membership": "join"}), + content, None, Some(user_id.to_string()), ); @@ -429,6 +447,17 @@ impl Data { .unwrap_or(0_u64) + 1; + let mut unsigned = unsigned.unwrap_or_default(); + // TODO: Optimize this to not load the whole room state? + if let Some(state_key) = &state_key { + if let Some(prev_pdu) = self + .room_state(&room_id) + .get(&(event_type.clone(), state_key.clone())) + { + unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); + } + } + let mut pdu = PduEvent { event_id: EventId::try_from("$thiswillbefilledinlater").unwrap(), room_id: room_id.clone(), @@ -442,7 +471,7 @@ impl Data { depth: depth.try_into().unwrap(), auth_events: Vec::new(), redacts: None, - unsigned: unsigned.unwrap_or_default(), + unsigned, hashes: ruma_federation_api::EventHash { sha256: "aaa".to_owned(), }, diff --git a/src/main.rs b/src/main.rs index 0794fcf..324e23f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -76,7 +76,7 @@ fn main() { } pretty_env_logger::init(); - let data = Data::load_or_create("matrixtesting.koesters.xyz:14004"); + let data = Data::load_or_create("conduit.rs"); //data.debug(); setup_rocket(data).launch().unwrap(); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 1d3328b..eec9d84 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -164,3 +164,11 @@ where } } } + +impl Deref for MatrixResult { + type Target = Result; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} diff --git a/src/server_server.rs b/src/server_server.rs index a62f23b..93c3c22 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -12,6 +12,22 @@ use std::{ time::{Duration, SystemTime}, }; +pub async fn request_well_known(data: &crate::Data, destination: &str) -> Option { + let body: serde_json::Value = serde_json::from_str(&data + .reqwest_client() + .get(&format!( + "https://{}/.well-known/matrix/server", + destination + )) + .send() + .await + .ok()? + .text() + .await + .ok()?).ok()?; + Some(body.get("m.server")?.as_str()?.to_owned()) +} + pub async fn send_request( data: &crate::Data, destination: String, @@ -19,7 +35,8 @@ pub async fn send_request( ) -> Option { let mut http_request: http::Request<_> = request.try_into().unwrap(); - *http_request.uri_mut() = format!("https://{}:8448{}", &destination.clone(), T::METADATA.path) + let actual_destination = "https://".to_owned() + &request_well_known(data, &destination).await.unwrap_or(destination.clone() + ":8448"); + *http_request.uri_mut() = (actual_destination + T::METADATA.path) .parse() .unwrap(); @@ -35,7 +52,7 @@ pub async fn send_request( request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); request_map.insert("uri".to_owned(), T::METADATA.path.into()); request_map.insert("origin".to_owned(), data.hostname().into()); - request_map.insert("destination".to_owned(), "privacytools.io".into()); + request_map.insert("destination".to_owned(), destination.into()); let mut request_json = request_map.into(); ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut request_json).unwrap(); @@ -141,9 +158,6 @@ pub fn get_server_keys(data: State) -> Json { } #[get("/_matrix/key/v2/server/<_key_id>")] -pub fn get_server_keys_deprecated( - data: State, - _key_id: String, -) -> Json { +pub fn get_server_keys_deprecated(data: State, _key_id: String) -> Json { get_server_keys(data) } From 23cb550d00ca41eb0447e3e84b5e9e2ef1f88047 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 28 Apr 2020 19:56:34 +0200 Subject: [PATCH 0064/1727] forget rooms, load history --- src/client_server.rs | 59 ++++++++++++++++++++++++++++++++++++++------ src/data.rs | 39 +++++++++++++++++++++++++++++ src/main.rs | 2 ++ 3 files changed, 92 insertions(+), 8 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 873d3ec..80bf61c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -13,9 +13,9 @@ use ruma_client_api::{ filter::{self, create_filter, get_filter}, keys::{get_keys, upload_keys}, membership::{ - get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, leave_room, + get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, forget_room, leave_room, }, - message::create_message_event, + message::{get_message_events, create_message_event}, presence::set_presence, profile::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, @@ -222,10 +222,13 @@ pub fn get_capabilities_route( body: Ruma, ) -> MatrixResult { // TODO + //let mut available = BTreeMap::new(); + //available.insert("5".to_owned(), get_capabilities::RoomVersionStability::Unstable); + MatrixResult(Ok(get_capabilities::Response { capabilities: get_capabilities::Capabilities { change_password: None, - room_versions: None, + room_versions: None, //Some(get_capabilities::RoomVersionsCapability { default: "5".to_owned(), available }), custom_capabilities: BTreeMap::new(), }, })) @@ -708,6 +711,17 @@ pub fn leave_room_route( MatrixResult(Ok(leave_room::Response)) } +#[post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "")] +pub fn forget_room_route( + data: State, + body: Ruma, + _room_id: String, +) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + data.room_forget(&body.room_id, &user_id); + MatrixResult(Ok(forget_room::Response)) +} + #[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] pub fn invite_user_route( data: State, @@ -903,7 +917,7 @@ pub fn sync_route( data: State, body: Ruma, ) -> MatrixResult { - std::thread::sleep(Duration::from_millis(200)); + std::thread::sleep(Duration::from_millis(300)); let next_batch = data.last_pdu_index().to_string(); let mut joined_rooms = BTreeMap::new(); @@ -915,7 +929,8 @@ pub fn sync_route( .unwrap_or(0); for room_id in joined_roomids { let pdus = data.pdus_since(&room_id, since); - let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); + let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect::>(); + let is_first_pdu = data.room_pdu_first(&room_id, since); let mut edus = data.roomlatests_since(&room_id, since); edus.extend_from_slice(&data.roomactives_in(&room_id)); @@ -933,8 +948,8 @@ pub fn sync_route( notification_count: None, }, timeline: sync_events::Timeline { - limited: Some(false), - prev_batch: Some("".to_owned()), + limited: None, + prev_batch: Some(since.to_string()), events: room_events, }, state: sync_events::State { events: Vec::new() }, @@ -957,7 +972,7 @@ pub fn sync_route( account_data: sync_events::AccountData { events: Vec::new() }, timeline: sync_events::Timeline { limited: Some(false), - prev_batch: Some("".to_owned()), + prev_batch: Some(next_batch.clone()), events: room_events, }, state: sync_events::State { events: Vec::new() }, @@ -995,6 +1010,34 @@ pub fn sync_route( })) } +#[get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "")] +pub fn get_message_events_route( + data: State, + body: Ruma, + _room_id: String) -> MatrixResult { + if let get_message_events::Direction::Forward = body.dir {todo!();} + + if let Ok(from) = body + .from + .clone() + .parse() { + let pdus = data.pdus_until(&body.room_id, from); + let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect::>(); + MatrixResult(Ok(get_message_events::Response { + start: body.from.clone(), + end: "".to_owned(), + chunk: room_events, + + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Invalid from.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } +} + #[get("/_matrix/client/r0/voip/turnServer")] pub fn turn_server_route() -> MatrixResult { // TODO diff --git a/src/data.rs b/src/data.rs index 88facca..6e09d07 100644 --- a/src/data.rs +++ b/src/data.rs @@ -338,6 +338,13 @@ impl Data { ); } + pub fn room_forget(&self, room_id: &RoomId, user_id: &UserId) { + self.db.userid_leftroomids.remove_value( + user_id.to_string().as_bytes(), + room_id.to_string().as_bytes().into(), + ); + } + pub fn room_invite(&self, sender: &UserId, room_id: &RoomId, user_id: &UserId) { self.pdu_append( room_id.clone(), @@ -375,6 +382,15 @@ impl Data { .collect() } + pub fn room_pdu_first(&self, room_id: &RoomId, pdu_index: u64) -> bool { + let mut pdu_id = vec![b'd']; + pdu_id.extend_from_slice(room_id.to_string().as_bytes()); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&pdu_index.to_be_bytes()); + + self.db.pduid_pdu.get_lt(&pdu_id).unwrap().is_none() + } + pub fn pdu_get(&self, event_id: &EventId) -> Option { self.db .eventid_pduid @@ -577,6 +593,29 @@ impl Data { pdus } + pub fn pdus_until(&self, room_id: &RoomId, until: u64) -> Vec { + let mut pdus = Vec::new(); + + // Create the first part of the full pdu id + let mut prefix = vec![b'd']; + prefix.extend_from_slice(room_id.to_string().as_bytes()); + prefix.push(0xff); // Add delimiter so we don't find rooms starting with the same id + + let mut current = prefix.clone(); + current.extend_from_slice(&until.to_be_bytes()); + + while let Some((key, value)) = self.db.pduid_pdu.get_lt(¤t).unwrap() { + if key.starts_with(&prefix) { + current = key.to_vec(); + pdus.push(serde_json::from_slice(&value).expect("pdu in db is valid")); + } else { + break; + } + } + + pdus + } + pub fn roomlatest_update(&self, user_id: &UserId, room_id: &RoomId, event: EduEvent) { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/main.rs b/src/main.rs index 324e23f..1399802 100644 --- a/src/main.rs +++ b/src/main.rs @@ -48,6 +48,7 @@ fn setup_rocket(data: Data) -> rocket::Rocket { client_server::join_room_by_id_route, client_server::join_room_by_id_or_alias_route, client_server::leave_room_route, + client_server::forget_room_route, client_server::invite_user_route, client_server::get_public_rooms_filtered_route, client_server::search_users_route, @@ -57,6 +58,7 @@ fn setup_rocket(data: Data) -> rocket::Rocket { client_server::create_state_event_for_key_route, client_server::create_state_event_for_empty_key_route, client_server::sync_route, + client_server::get_message_events_route, client_server::turn_server_route, client_server::publicised_groups_route, client_server::options_route, From 1cdf30f38cab3bc0051fa1550ec93db837caf889 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 29 Apr 2020 08:48:56 +0200 Subject: [PATCH 0065/1727] Add hostname to Rocket.toml config --- Rocket.toml | 3 ++- src/main.rs | 16 +++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/Rocket.toml b/Rocket.toml index 99c136d..5db4a3d 100644 --- a/Rocket.toml +++ b/Rocket.toml @@ -1,6 +1,7 @@ [global] -address = "0.0.0.0" +hostname = "conduit.rs" port = 14004 +address = "0.0.0.0" [global.tls] certs = "/etc/letsencrypt/live/matrixtesting.koesters.xyz/fullchain.pem" diff --git a/src/main.rs b/src/main.rs index 1399802..2f0a2f0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -16,9 +16,9 @@ pub use database::Database; pub use pdu::PduEvent; pub use ruma_wrapper::{MatrixResult, Ruma}; -use rocket::routes; +use rocket::{fairing::AdHoc, routes}; -fn setup_rocket(data: Data) -> rocket::Rocket { +fn setup_rocket() -> rocket::Rocket { rocket::ignite() .mount( "/", @@ -68,7 +68,12 @@ fn setup_rocket(data: Data) -> rocket::Rocket { server_server::get_server_keys_deprecated, ], ) - .manage(data) + .attach(AdHoc::on_attach("Config", |rocket| { + let hostname = rocket.config().get_str("hostname").unwrap_or("localhost"); + let data = Data::load_or_create(&hostname); + + Ok(rocket.manage(data)) + })) } fn main() { @@ -78,8 +83,5 @@ fn main() { } pretty_env_logger::init(); - let data = Data::load_or_create("conduit.rs"); - //data.debug(); - - setup_rocket(data).launch().unwrap(); + setup_rocket().launch().unwrap(); } From fd1aea7e3630a4f9016612e57ecc19fcf8ed86d3 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 28 Apr 2020 20:03:14 +0200 Subject: [PATCH 0066/1727] Update dependencies --- Cargo.lock | 64 ++++++++++++++++++++++---------------------- Cargo.toml | 10 +++---- src/client_server.rs | 9 +++---- src/server_server.rs | 4 +-- 4 files changed, 43 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b659c3..d205cd2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,7 +35,7 @@ checksum = "da71fef07bc806586090247e971229289f64c210a278ee5ae419314eb386b31d" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", ] [[package]] @@ -258,7 +258,7 @@ dependencies = [ "bitflags", "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", ] [[package]] @@ -415,7 +415,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", ] [[package]] @@ -499,9 +499,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d737e0f947a1864e93d33fdef4af8445a00d1ed8dc0c8ddb73139ea6abf15" +checksum = "61565ff7aaace3525556587bd2dc31d4a07071957be715e63ce7b1eccf51a8f4" dependencies = [ "libc", ] @@ -625,9 +625,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97dc0d13f3bf0369f00504ad806499490045d6f93524a6ead4081c380703a2f" +checksum = "77ab7bb370a788ad675863e035fd9bfa56a66a030a16a88ab80aeb6b18cbdf31" dependencies = [ "serde", ] @@ -904,7 +904,7 @@ checksum = "8988430ce790d8682672117bc06dda364c0be32d3abd738234f19f3240bad99a" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", ] [[package]] @@ -1213,13 +1213,13 @@ checksum = "0f6b02a6a860a96e3c2081c8aea88b37b2918b53e539856b73aadde1908b65ad" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", ] [[package]] name = "ruma-client-api" version = "0.8.0-rc.5" -source = "git+https://github.com/ruma/ruma-client-api.git#473cf2386178781688f12c538ee6419869d14a46" +source = "git+https://github.com/ruma/ruma-client-api.git#5a26c387646e17ba076e478d1e7b896b7e47137d" dependencies = [ "http", "js_int", @@ -1255,13 +1255,13 @@ checksum = "abd3cfe96c9887fe2eebfa2e5e7d3a4afff02c374874d4e718f46dab5fd3320d" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", ] [[package]] name = "ruma-federation-api" version = "0.0.1" -source = "git+https://git.koesters.xyz/timo/ruma-federation-api.git#6f1c5a6a714d6be2f420f3832d31e214fe4fb229" +source = "git+https://github.com/ruma/ruma-federation-api.git#263f2ffc75be6542bd68161e446adf588505fb56" dependencies = [ "js_int", "ruma-api", @@ -1296,7 +1296,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://git.koesters.xyz/timo/ruma-signatures.git#e2cd0927c14133b8d49a8cd9fbd96f8af5c60f07" +source = "git+https://github.com/ruma/ruma-signatures.git#1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" dependencies = [ "base64 0.12.0", "ring", @@ -1375,9 +1375,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572dfa3a0785509e7a44b5b4bebcf94d41ba34e9ed9eb9df722545c3b3c4144a" +checksum = "3f331b9025654145cd425b9ded0caf8f5ae0df80d418b326e2dc1c3dc5eb0620" dependencies = [ "bitflags", "core-foundation", @@ -1388,9 +1388,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ddb15a5fec93b7021b8a9e96009c5d8d51c15673569f7c0f6b7204e5b7b404f" +checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" dependencies = [ "core-foundation-sys", "libc", @@ -1413,14 +1413,14 @@ checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", ] [[package]] name = "serde_json" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da07b57ee2623368351e9a0488bb0b261322a15a6e0ae53e243cbdc0f4208da9" +checksum = "a7894c8ed05b7a3a279aeb79025fdec1d3158080b75b98a08faf2806bb799edd" dependencies = [ "itoa", "ryu", @@ -1473,9 +1473,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05720e22615919e4734f6a99ceae50d00226c3c5aca406e102ebc33298214e0a" +checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4" [[package]] name = "spin" @@ -1507,7 +1507,7 @@ dependencies = [ "heck", "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", ] [[package]] @@ -1523,9 +1523,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" +checksum = "410a7488c0a728c7ceb4ad59b9567eb4053d02e8cc7f5c0e0eeeb39518369213" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", @@ -1576,9 +1576,9 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ef16d072d2b6dc8b4a56c70f5c5ced1a37752116f8e7c1e80c659aa7cb6713" +checksum = "7d9c43f1bb96970e153bcbae39a65e249ccb942bd9d36dbdf086024920417c9c" dependencies = [ "bytes", "fnv", @@ -1605,7 +1605,7 @@ checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", ] [[package]] @@ -1712,9 +1712,9 @@ checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" [[package]] name = "untrusted" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" @@ -1785,7 +1785,7 @@ dependencies = [ "log", "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", "wasm-bindgen-shared", ] @@ -1819,7 +1819,7 @@ checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.3", - "syn 1.0.17", + "syn 1.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index 9ae2a0c..5ceb72f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,16 +17,16 @@ ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } ruma-identifiers = { version = "0.16.0", features = ["rand"] } ruma-api = "0.16.0-rc.3" ruma-events = "0.21.0-beta.1" -ruma-signatures = { git = "https://git.koesters.xyz/timo/ruma-signatures.git" } -ruma-federation-api = { git = "https://git.koesters.xyz/timo/ruma-federation-api.git" } +ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } +ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git" } pretty_env_logger = "0.4.0" log = "0.4.8" sled = "0.31.0" directories = "2.0.2" -js_int = "0.1.4" -serde_json = "1.0.51" +js_int = "0.1.5" +serde_json = "1.0.52" serde = "1.0.106" -tokio = { version = "0.2.18", features = ["macros"] } +tokio = { version = "0.2.19", features = ["macros"] } rand = "0.7.3" rust-argon2 = "0.8.2" reqwest = "0.10.4" diff --git a/src/client_server.rs b/src/client_server.rs index 80bf61c..cc6a163 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -781,8 +781,7 @@ pub async fn get_public_rooms_filtered_route( ruma_federation_api::v1::get_public_rooms::Request { limit: Some(20_u32.into()), since: None, - include_all_networks: None, - third_party_instance_id: None, + room_network: ruma_federation_api::v1::get_public_rooms::RoomNetwork::Matrix, }, ) .await @@ -1024,10 +1023,10 @@ pub fn get_message_events_route( let pdus = data.pdus_until(&body.room_id, from); let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect::>(); MatrixResult(Ok(get_message_events::Response { - start: body.from.clone(), - end: "".to_owned(), + start: Some(body.from.clone()), + end: None, chunk: room_events, - + state: Vec::new(), })) } else { MatrixResult(Err(Error { diff --git a/src/server_server.rs b/src/server_server.rs index 93c3c22..da0b8a0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -125,10 +125,10 @@ pub fn well_known_server(data: State) -> Json { #[get("/_matrix/federation/v1/version")] pub fn get_server_version() -> MatrixResult { MatrixResult(Ok(get_server_version::Response { - server: get_server_version::Server { + server: Some(get_server_version::Server { name: Some("Conduit".to_owned()), version: Some(env!("CARGO_PKG_VERSION").to_owned()), - }, + }), })) } From c769283953343d23a1b8811b713de13f60e8c028 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 29 Apr 2020 10:08:55 +0200 Subject: [PATCH 0067/1727] Update readme --- README.md | 12 +++++++----- src/client_server.rs | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 557c121..4c9baf5 100644 --- a/README.md +++ b/README.md @@ -20,17 +20,19 @@ A fast Matrix homeserver that's optimized for smaller, personal servers, instead - [x] Create room messages - [x] Sync room messages - [x] Join rooms, lookup room ids -- [x] Basic Riot web support -- [x] Riot room discovery -- [x] Riot read receipts +- [x] Riot web support +- [x] Room discovery +- [x] Read receipts - [x] Typing indications - [x] Invites, user search - [x] Password hashing +- [ ] Basic federation +- [ ] State resolution +- [ ] Permission system +- [ ] Notifications (push rules) - [ ] Riot presence - [ ] Proper room creation - [ ] Riot E2EE -- [ ] Basic federation -- [ ] State resolution #### How can I contribute? diff --git a/src/client_server.rs b/src/client_server.rs index cc6a163..0103208 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1058,7 +1058,7 @@ pub fn publicised_groups_route() -> MatrixResult } #[options("/<_segments..>")] -pub fn options_route(_segments: PathBuf) -> MatrixResult { +pub fn options_route(_segments: rocket::http::uri::Segments) -> MatrixResult { MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "This is the options route.".to_owned(), From 169dbe6c370883bb48d4c9ed02903d62d1e8f42f Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 29 Apr 2020 12:18:45 +0200 Subject: [PATCH 0068/1727] fix: send full state after joining a room --- src/client_server.rs | 102 +++++++++++++++++++++++++++++++------------ src/data.rs | 31 +++++++++---- src/database.rs | 14 +++--- src/pdu.rs | 14 +++--- src/server_server.rs | 36 ++++++++------- src/test.rs | 6 +-- 6 files changed, 134 insertions(+), 69 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 0103208..27db78c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -13,9 +13,10 @@ use ruma_client_api::{ filter::{self, create_filter, get_filter}, keys::{get_keys, upload_keys}, membership::{ - get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, forget_room, leave_room, + forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, + leave_room, }, - message::{get_message_events, create_message_event}, + message::{create_message_event, get_message_events}, presence::set_presence, profile::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, @@ -673,16 +674,18 @@ pub fn join_room_by_id_or_alias_route( ) -> MatrixResult { let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, - Err(room_alias) => if room_alias.server_name() == data.hostname() { - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room alias not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })); - } else { - // Ask creator server of the room to join TODO ask someone else when not available - //server_server::send_request(data, destination, request) - todo!(); + Err(room_alias) => { + if room_alias.server_name() == data.hostname() { + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room alias not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })); + } else { + // Ask creator server of the room to join TODO ask someone else when not available + //server_server::send_request(data, destination, request) + todo!(); + } } }; @@ -762,7 +765,7 @@ pub async fn get_public_rooms_filtered_route( .and_then(|s| s.content.get("name")) .and_then(|n| n.as_str()) .map(|n| n.to_owned()), - num_joined_members: data.room_users(&room_id).into(), + num_joined_members: data.room_users_joined(&room_id).into(), room_id, topic: None, world_readable: false, @@ -917,19 +920,37 @@ pub fn sync_route( body: Ruma, ) -> MatrixResult { std::thread::sleep(Duration::from_millis(300)); + let user_id = body.user_id.clone().expect("user is authenticated"); let next_batch = data.last_pdu_index().to_string(); let mut joined_rooms = BTreeMap::new(); - let joined_roomids = data.rooms_joined(body.user_id.as_ref().expect("user is authenticated")); + let joined_roomids = data.rooms_joined(&user_id); let since = body .since .clone() .and_then(|string| string.parse().ok()) .unwrap_or(0); + for room_id in joined_roomids { let pdus = data.pdus_since(&room_id, since); - let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect::>(); - let is_first_pdu = data.room_pdu_first(&room_id, since); + + let mut send_member_count = false; + let mut send_full_state = false; + for pdu in &pdus { + if pdu.kind == EventType::RoomMember { + if pdu.state_key == Some(user_id.to_string()) && pdu.content["membership"] == "join" + { + send_full_state = true; + } + send_member_count = true; + } + } + + let room_events = pdus + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); + let mut edus = data.roomlatests_since(&room_id, since); edus.extend_from_slice(&data.roomactives_in(&room_id)); @@ -939,8 +960,16 @@ pub fn sync_route( account_data: sync_events::AccountData { events: Vec::new() }, summary: sync_events::RoomSummary { heroes: Vec::new(), - joined_member_count: None, - invited_member_count: None, + joined_member_count: if send_member_count { + Some(data.room_users_joined(&room_id).into()) + } else { + None + }, + invited_member_count: if send_member_count { + Some(data.room_users_invited(&room_id).into()) + } else { + None + }, }, unread_notifications: sync_events::UnreadNotificationsCount { highlight_count: None, @@ -951,14 +980,24 @@ pub fn sync_route( prev_batch: Some(since.to_string()), events: room_events, }, - state: sync_events::State { events: Vec::new() }, + // TODO: state before timeline + state: sync_events::State { + events: if send_full_state { + data.room_state(&room_id) + .into_iter() + .map(|(_, pdu)| pdu.to_state_event()) + .collect() + } else { + Vec::new() + }, + }, ephemeral: sync_events::Ephemeral { events: edus }, }, ); } let mut left_rooms = BTreeMap::new(); - let left_roomids = data.rooms_left(body.user_id.as_ref().expect("user is authenticated")); + let left_roomids = data.rooms_left(&user_id); for room_id in left_roomids { let pdus = data.pdus_since(&room_id, since); let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); @@ -980,7 +1019,7 @@ pub fn sync_route( } let mut invited_rooms = BTreeMap::new(); - for room_id in data.rooms_invited(body.user_id.as_ref().expect("user is authenticated")) { + for room_id in data.rooms_invited(&user_id) { let events = data .pdus_since(&room_id, since) .into_iter() @@ -1013,15 +1052,18 @@ pub fn sync_route( pub fn get_message_events_route( data: State, body: Ruma, - _room_id: String) -> MatrixResult { - if let get_message_events::Direction::Forward = body.dir {todo!();} + _room_id: String, +) -> MatrixResult { + if let get_message_events::Direction::Forward = body.dir { + todo!(); + } - if let Ok(from) = body - .from - .clone() - .parse() { + if let Ok(from) = body.from.clone().parse() { let pdus = data.pdus_until(&body.room_id, from); - let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect::>(); + let room_events = pdus + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); MatrixResult(Ok(get_message_events::Response { start: Some(body.from.clone()), end: None, @@ -1058,7 +1100,9 @@ pub fn publicised_groups_route() -> MatrixResult } #[options("/<_segments..>")] -pub fn options_route(_segments: rocket::http::uri::Segments) -> MatrixResult { +pub fn options_route( + _segments: rocket::http::uri::Segments, +) -> MatrixResult { MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "This is the options route.".to_owned(), diff --git a/src/data.rs b/src/data.rs index 6e09d07..3b652ba 100644 --- a/src/data.rs +++ b/src/data.rs @@ -193,11 +193,11 @@ impl Data { return false; } - self.db.userid_roomids.add( + self.db.userid_joinroomids.add( user_id.to_string().as_bytes(), room_id.to_string().as_bytes().into(), ); - self.db.roomid_userids.add( + self.db.roomid_joinuserids.add( room_id.to_string().as_bytes(), user_id.to_string().as_bytes().into(), ); @@ -205,6 +205,10 @@ impl Data { user_id.to_string().as_bytes(), room_id.to_string().as_bytes(), ); + self.db.roomid_inviteuserids.remove_value( + user_id.to_string().as_bytes(), + room_id.to_string().as_bytes(), + ); self.db.userid_leftroomids.remove_value( user_id.to_string().as_bytes(), room_id.to_string().as_bytes().into(), @@ -232,7 +236,7 @@ impl Data { pub fn rooms_joined(&self, user_id: &UserId) -> Vec { self.db - .userid_roomids + .userid_joinroomids .get_iter(user_id.to_string().as_bytes()) .values() .map(|room_id| { @@ -282,9 +286,16 @@ impl Data { room_ids } - pub fn room_users(&self, room_id: &RoomId) -> u32 { + pub fn room_users_joined(&self, room_id: &RoomId) -> u32 { self.db - .roomid_userids + .roomid_joinuserids + .get_iter(room_id.to_string().as_bytes()) + .count() as u32 + } + + pub fn room_users_invited(&self, room_id: &RoomId) -> u32 { + self.db + .roomid_inviteuserids .get_iter(room_id.to_string().as_bytes()) .count() as u32 } @@ -324,11 +335,15 @@ impl Data { user_id.to_string().as_bytes(), room_id.to_string().as_bytes().into(), ); - self.db.userid_roomids.remove_value( + self.db.roomid_inviteuserids.remove_value( user_id.to_string().as_bytes(), room_id.to_string().as_bytes().into(), ); - self.db.roomid_userids.remove_value( + self.db.userid_joinroomids.remove_value( + user_id.to_string().as_bytes(), + room_id.to_string().as_bytes().into(), + ); + self.db.roomid_joinuserids.remove_value( room_id.to_string().as_bytes(), user_id.to_string().as_bytes().into(), ); @@ -358,7 +373,7 @@ impl Data { user_id.to_string().as_bytes(), room_id.to_string().as_bytes().into(), ); - self.db.roomid_userids.add( + self.db.roomid_inviteuserids.add( room_id.to_string().as_bytes(), user_id.to_string().as_bytes().into(), ); diff --git a/src/database.rs b/src/database.rs index 73406c1..3dd7564 100644 --- a/src/database.rs +++ b/src/database.rs @@ -71,8 +71,9 @@ pub struct Database { pub eventid_pduid: sled::Tree, pub roomid_pduleaves: MultiValue, pub roomstateid_pdu: sled::Tree, // Room + StateType + StateKey - pub roomid_userids: MultiValue, - pub userid_roomids: MultiValue, + pub roomid_joinuserids: MultiValue, + pub roomid_inviteuserids: MultiValue, + pub userid_joinroomids: MultiValue, pub userid_inviteroomids: MultiValue, pub userid_leftroomids: MultiValue, // EDUs: @@ -115,8 +116,9 @@ impl Database { eventid_pduid: db.open_tree("eventid_pduid").unwrap(), roomid_pduleaves: MultiValue(db.open_tree("roomid_pduleaves").unwrap()), roomstateid_pdu: db.open_tree("roomstateid_pdu").unwrap(), - roomid_userids: MultiValue(db.open_tree("roomid_userids").unwrap()), - userid_roomids: MultiValue(db.open_tree("userid_roomids").unwrap()), + roomid_joinuserids: MultiValue(db.open_tree("roomid_joinuserids").unwrap()), + roomid_inviteuserids: MultiValue(db.open_tree("roomid_inviteuserids").unwrap()), + userid_joinroomids: MultiValue(db.open_tree("userid_joinroomids").unwrap()), userid_inviteroomids: MultiValue(db.open_tree("userid_inviteroomids").unwrap()), userid_leftroomids: MultiValue(db.open_tree("userid_leftroomids").unwrap()), roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), @@ -200,7 +202,7 @@ impl Database { ); } println!("\n# RoomId -> UserIds:"); - for (k, v) in self.roomid_userids.iter_all().map(|r| r.unwrap()) { + for (k, v) in self.roomid_joinuserids.iter_all().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", String::from_utf8_lossy(&k), @@ -208,7 +210,7 @@ impl Database { ); } println!("\n# UserId -> RoomIds:"); - for (k, v) in self.userid_roomids.iter_all().map(|r| r.unwrap()) { + for (k, v) in self.userid_joinroomids.iter_all().map(|r| r.unwrap()) { println!( "{:?} -> {:?}", String::from_utf8_lossy(&k), diff --git a/src/pdu.rs b/src/pdu.rs index b6aa45d..0e1b3de 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,6 +1,8 @@ use js_int::UInt; use ruma_events::{ - collections::all::RoomEvent, stripped::AnyStrippedStateEvent, EventJson, EventType, + collections::all::{RoomEvent, StateEvent}, + stripped::AnyStrippedStateEvent, + EventJson, EventType, }; use ruma_federation_api::EventHash; use ruma_identifiers::{EventId, RoomId, UserId}; @@ -39,12 +41,12 @@ impl PduEvent { serde_json::from_str::>(&json).unwrap() } - pub fn to_stripped_state_event(&self) -> EventJson { - // Can only fail in rare circumstances that won't ever happen here, see - // https://docs.rs/serde_json/1.0.50/serde_json/fn.to_string.html + pub fn to_state_event(&self) -> EventJson { + let json = serde_json::to_string(&self).unwrap(); + serde_json::from_str::>(&json).unwrap() + } + pub fn to_stripped_state_event(&self) -> EventJson { let json = serde_json::to_string(&self).unwrap(); - - // EventJson's deserialize implementation always returns `Ok(...)` serde_json::from_str::>(&json).unwrap() } } diff --git a/src/server_server.rs b/src/server_server.rs index da0b8a0..394757a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -13,18 +13,21 @@ use std::{ }; pub async fn request_well_known(data: &crate::Data, destination: &str) -> Option { - let body: serde_json::Value = serde_json::from_str(&data - .reqwest_client() - .get(&format!( - "https://{}/.well-known/matrix/server", - destination - )) - .send() - .await - .ok()? - .text() - .await - .ok()?).ok()?; + let body: serde_json::Value = serde_json::from_str( + &data + .reqwest_client() + .get(&format!( + "https://{}/.well-known/matrix/server", + destination + )) + .send() + .await + .ok()? + .text() + .await + .ok()?, + ) + .ok()?; Some(body.get("m.server")?.as_str()?.to_owned()) } @@ -35,10 +38,11 @@ pub async fn send_request( ) -> Option { let mut http_request: http::Request<_> = request.try_into().unwrap(); - let actual_destination = "https://".to_owned() + &request_well_known(data, &destination).await.unwrap_or(destination.clone() + ":8448"); - *http_request.uri_mut() = (actual_destination + T::METADATA.path) - .parse() - .unwrap(); + let actual_destination = "https://".to_owned() + + &request_well_known(data, &destination) + .await + .unwrap_or(destination.clone() + ":8448"); + *http_request.uri_mut() = (actual_destination + T::METADATA.path).parse().unwrap(); let mut request_map = serde_json::Map::new(); diff --git a/src/test.rs b/src/test.rs index 8756436..9f56214 100644 --- a/src/test.rs +++ b/src/test.rs @@ -5,10 +5,8 @@ use serde_json::{json, Value}; use std::time::Duration; fn setup_client() -> Client { - Database::try_remove("temp"); - let data = Data::load_or_create("temp"); - - let rocket = setup_rocket(data); + Database::try_remove("localhost"); + let rocket = setup_rocket(); Client::new(rocket).expect("valid rocket instance") } From b02c5689419548c62e038d1c022769e047cd4039 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 1 May 2020 20:26:57 +0200 Subject: [PATCH 0069/1727] feat: notifications, simple permission systems --- Cargo.lock | 93 +++++++------- Cargo.toml | 2 +- src/client_server.rs | 226 +++++++++++++++++++++++++++++----- src/data.rs | 287 ++++++++++++++++++++++++++++++++++++------- src/database.rs | 20 +-- src/main.rs | 4 + 6 files changed, 506 insertions(+), 126 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d205cd2..3d2ef34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -34,7 +34,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da71fef07bc806586090247e971229289f64c210a278ee5ae419314eb386b31d" dependencies = [ "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", ] @@ -247,7 +247,7 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "devise_core", - "quote 1.0.3", + "quote 1.0.4", ] [[package]] @@ -257,7 +257,7 @@ source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9 dependencies = [ "bitflags", "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", ] @@ -414,7 +414,7 @@ checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", ] @@ -616,9 +616,9 @@ checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" [[package]] name = "js-sys" -version = "0.3.37" +version = "0.3.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" +checksum = "0b823ebafcee1632403f2782d28728aab353f7881547a700043ef455c078326f" dependencies = [ "wasm-bindgen", ] @@ -796,6 +796,12 @@ dependencies = [ "libc", ] +[[package]] +name = "once_cell" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" + [[package]] name = "openssl" version = "0.10.29" @@ -903,7 +909,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8988430ce790d8682672117bc06dda364c0be32d3abd738234f19f3240bad99a" dependencies = [ "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", ] @@ -988,9 +994,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" +checksum = "4c1f4b0efa5fc5e8ceb705136bfee52cfdb6a4e3509f770b478cd6ed434232a7" dependencies = [ "proc-macro2 1.0.10", ] @@ -1117,13 +1123,13 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.12" +version = "0.16.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c" +checksum = "703516ae74571f24b465b4a1431e81e2ad51336cb0ded733a55a1aa3eccac196" dependencies = [ "cc", - "lazy_static", "libc", + "once_cell", "spin", "untrusted", "web-sys", @@ -1162,7 +1168,7 @@ source = "git+https://github.com/SergioBenitez/Rocket.git?branch=async#78c8ac8cc dependencies = [ "devise", "indexmap", - "quote 1.0.3", + "quote 1.0.4", "rocket_http", "version_check 0.9.1", "yansi 0.5.0", @@ -1212,14 +1218,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f6b02a6a860a96e3c2081c8aea88b37b2918b53e539856b73aadde1908b65ad" dependencies = [ "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", ] [[package]] name = "ruma-client-api" version = "0.8.0-rc.5" -source = "git+https://github.com/ruma/ruma-client-api.git#5a26c387646e17ba076e478d1e7b896b7e47137d" +source = "git+https://github.com/ruma/ruma-client-api.git#dbb60142cf336784d809c6c4d79bd8de4c67fb5f" dependencies = [ "http", "js_int", @@ -1254,14 +1260,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abd3cfe96c9887fe2eebfa2e5e7d3a4afff02c374874d4e718f46dab5fd3320d" dependencies = [ "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", ] [[package]] name = "ruma-federation-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma-federation-api.git#263f2ffc75be6542bd68161e446adf588505fb56" +source = "git+https://github.com/ruma/ruma-federation-api.git#ccbf216f39bbbaa59131cc200eae5bd18aa1947c" dependencies = [ "js_int", "ruma-api", @@ -1284,13 +1290,16 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09901d608958f63618546957134dd4242d2ca07a885a28f794ad4574a937c22c" +checksum = "6ce9a52acce7ed3809e1b47d9cc67ee93972a2b0fedaaa76d6e794456a79858b" dependencies = [ + "dtoa", + "itoa", "js_int", "serde", "serde_json", + "url", ] [[package]] @@ -1412,7 +1421,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" dependencies = [ "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", ] @@ -1506,7 +1515,7 @@ checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", ] @@ -1528,7 +1537,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "410a7488c0a728c7ceb4ad59b9567eb4053d02e8cc7f5c0e0eeeb39518369213" dependencies = [ "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "unicode-xid 0.2.0", ] @@ -1576,9 +1585,9 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d9c43f1bb96970e153bcbae39a65e249ccb942bd9d36dbdf086024920417c9c" +checksum = "05c1d570eb1a36f0345a5ce9c6c6e665b70b73d11236912c0b477616aeec47b1" dependencies = [ "bytes", "fnv", @@ -1604,7 +1613,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", ] @@ -1764,9 +1773,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.60" +version = "0.2.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" +checksum = "f56e97dbea16d5f56549d6c8ea7f36efb6be98507308650c1a5970574b3941b9" dependencies = [ "cfg-if", "serde", @@ -1776,24 +1785,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.60" +version = "0.2.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" +checksum = "7b75d4f3f9b81dfc7d66b955876b325b20e8affd4ce8d93e51162626fc5faadb" dependencies = [ "bumpalo", "lazy_static", "log", "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" +checksum = "736dcd8f8455458c82614f12116aabd0209d440c1a28d8824bcaed755ac3e058" dependencies = [ "cfg-if", "js-sys", @@ -1803,22 +1812,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.60" +version = "0.2.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4" +checksum = "9dcde4b19e863521c1e78ecf100935132396291b09ae0ae2e155ff84ccbe9736" dependencies = [ - "quote 1.0.3", + "quote 1.0.4", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.60" +version = "0.2.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" +checksum = "13d87d2b117af2b86472402d70f7eb173bbe166beb5e727f3c0bebecdf356504" dependencies = [ "proc-macro2 1.0.10", - "quote 1.0.3", + "quote 1.0.4", "syn 1.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -1826,15 +1835,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.60" +version = "0.2.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639" +checksum = "71f77b681efd0bca6f8ea356cdc2e497538b41d3e2a02afed18ce8f022231d29" [[package]] name = "web-sys" -version = "0.3.37" +version = "0.3.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" +checksum = "07c5819dc39222a788ca169a81aef7d02739019256300534f493b5747d5469c2" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 5ceb72f..7001ada 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ directories = "2.0.2" js_int = "0.1.5" serde_json = "1.0.52" serde = "1.0.106" -tokio = { version = "0.2.19", features = ["macros"] } +tokio = { version = "0.2.20", features = ["macros"] } rand = "0.7.3" rust-argon2 = "0.8.2" reqwest = "0.10.4" diff --git a/src/client_server.rs b/src/client_server.rs index 27db78c..6b56fb1 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -8,10 +8,12 @@ use ruma_client_api::{ account::register, alias::get_alias, capabilities::get_capabilities, + client_exchange::send_event_to_device, config::{get_global_account_data, set_global_account_data}, directory::{self, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, keys::{get_keys, upload_keys}, + media::get_media_config, membership::{ forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, leave_room, @@ -21,7 +23,7 @@ use ruma_client_api::{ profile::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, }, - push::get_pushrules_all, + push::{self, get_pushrules_all, set_pushrule, set_pushrule_enabled}, read_marker::set_read_marker, room::create_room, session::{get_login_types, login}, @@ -40,7 +42,6 @@ use serde_json::json; use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, - path::PathBuf, time::{Duration, SystemTime}, }; @@ -238,9 +239,86 @@ pub fn get_capabilities_route( #[get("/_matrix/client/r0/pushrules")] pub fn get_pushrules_all_route() -> MatrixResult { // TODO - MatrixResult(Ok(get_pushrules_all::Response { - global: BTreeMap::new(), - })) + let mut global = BTreeMap::new(); + global.insert( + push::RuleKind::Underride, + vec![push::PushRule { + actions: vec![ + push::Action::Notify, + push::Action::SetTweak { + kind: push::TweakKind::Highlight, + value: Some(false.into()), + }, + ], + default: true, + enabled: true, + rule_id: ".m.rule.message".to_owned(), + conditions: Some(vec![push::PushCondition::EventMatch { + key: "type".to_owned(), + pattern: "m.room.message".to_owned(), + }]), + pattern: None, + }], + ); + MatrixResult(Ok(get_pushrules_all::Response { global })) +} + +#[put( + "/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>", + data = "" +)] +pub fn set_pushrule_route( + data: State, + body: Ruma, + _scope: String, + _kind: String, + _rule_id: String, +) -> MatrixResult { + // TODO + let user_id = body.user_id.clone().expect("user is authenticated"); + data.room_userdata_update( + None, + &user_id, + EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { + content: ruma_events::push_rules::PushRulesEventContent { + global: ruma_events::push_rules::Ruleset { + content: vec![], + override_rules: vec![], + room: vec![], + sender: vec![], + underride: vec![ruma_events::push_rules::ConditionalPushRule { + actions: vec![ + ruma_events::push_rules::Action::Notify, + ruma_events::push_rules::Action::SetTweak( + ruma_events::push_rules::Tweak::Highlight { value: false }, + ), + ], + default: true, + enabled: true, + rule_id: ".m.rule.message".to_owned(), + conditions: vec![ruma_events::push_rules::PushCondition::EventMatch( + ruma_events::push_rules::EventMatchCondition { + key: "type".to_owned(), + pattern: "m.room.message".to_owned(), + }, + )], + }], + }, + }, + }), + ); + + MatrixResult(Ok(set_pushrule::Response)) +} + +#[put("/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>/enabled")] +pub fn set_pushrule_enabled_route( + _scope: String, + _kind: String, + _rule_id: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(set_pushrule_enabled::Response)) } #[get( @@ -284,7 +362,6 @@ pub fn set_global_account_data_route( _user_id: String, _type: String, ) -> MatrixResult { - // TODO MatrixResult(Ok(set_global_account_data::Response)) } @@ -485,8 +562,20 @@ pub fn set_read_marker_route( _room_id: String, ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); - // TODO: Fully read + data.room_userdata_update( + Some(&body.room_id), + &user_id, + EduEvent::FullyRead(ruma_events::fully_read::FullyReadEvent { + content: ruma_events::fully_read::FullyReadEventContent { + event_id: body.fully_read.clone(), + }, + room_id: Some(body.room_id.clone()), + }), + ); + if let Some(event) = &body.read_receipt { + data.room_read_set(&body.room_id, &user_id, event); + let mut user_receipts = BTreeMap::new(); user_receipts.insert( user_id.clone(), @@ -564,6 +653,8 @@ pub fn create_room_route( Some("".to_owned()), ); + data.room_join(&room_id, &user_id); + data.pdu_append( room_id.clone(), user_id.clone(), @@ -604,8 +695,6 @@ pub fn create_room_route( ); } - data.room_join(&room_id, &user_id); - for user in &body.invite { data.room_invite(&user_id, &room_id, user); } @@ -855,17 +944,22 @@ pub fn create_message_event_route( _txn_id: String, body: Ruma, ) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - let event_id = data.pdu_append( - body.room_id.clone(), - body.user_id.clone().expect("user is authenticated"), - body.event_type.clone(), - body.json_body.clone(), - Some(unsigned), - None, - ); + let event_id = data + .pdu_append( + body.room_id.clone(), + user_id.clone(), + body.event_type.clone(), + body.json_body.clone(), + Some(unsigned), + None, + ) + .expect("message events are always okay"); + MatrixResult(Ok(create_message_event::Response { event_id })) } @@ -880,16 +974,21 @@ pub fn create_state_event_for_key_route( _state_key: String, body: Ruma, ) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + // Reponse of with/without key is the same - let event_id = data.pdu_append( + if let Some(event_id) = data.pdu_append( body.room_id.clone(), body.user_id.clone().expect("user is authenticated"), body.event_type.clone(), body.json_body.clone(), None, Some(body.state_key.clone()), - ); - MatrixResult(Ok(create_state_event_for_key::Response { event_id })) + ) { + MatrixResult(Ok(create_state_event_for_key::Response { event_id })) + } else { + panic!("TODO: error missing permissions"); + } } #[put( @@ -902,16 +1001,21 @@ pub fn create_state_event_for_empty_key_route( _event_type: String, body: Ruma, ) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + // Reponse of with/without key is the same - let event_id = data.pdu_append( + if let Some(event_id) = data.pdu_append( body.room_id.clone(), body.user_id.clone().expect("user is authenticated"), body.event_type.clone(), - body.json_body, + body.json_body.clone(), None, Some("".to_owned()), - ); - MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) + ) { + MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) + } else { + panic!("TODO: error missing permissions"); + } } #[get("/_matrix/client/r0/sync", data = "")] @@ -919,7 +1023,7 @@ pub fn sync_route( data: State, body: Ruma, ) -> MatrixResult { - std::thread::sleep(Duration::from_millis(300)); + std::thread::sleep(Duration::from_millis(1500)); let user_id = body.user_id.clone().expect("user is authenticated"); let next_batch = data.last_pdu_index().to_string(); @@ -932,7 +1036,7 @@ pub fn sync_route( .unwrap_or(0); for room_id in joined_roomids { - let pdus = data.pdus_since(&room_id, since); + let mut pdus = data.pdus_since(&room_id, since); let mut send_member_count = false; let mut send_full_state = false; @@ -946,6 +1050,25 @@ pub fn sync_route( } } + let notification_count = if let Some(last_read) = data.room_read_get(&room_id, &user_id) { + Some((data.pdus_since(&room_id, last_read).len() as u32).into()) + } else { + None + }; + + // They /sync response doesn't always return all messages, so we say the output is + // limited unless there are enough events + let mut limited = true; + pdus = pdus.split_off(pdus.len().checked_sub(10).unwrap_or_else(|| { + limited = false; + 0 + })); + + let prev_batch = pdus + .first() + .and_then(|e| data.pdu_get_count(&e.event_id)) + .map(|c| c.to_string()); + let room_events = pdus .into_iter() .map(|pdu| pdu.to_room_event()) @@ -957,7 +1080,13 @@ pub fn sync_route( joined_rooms.insert( room_id.clone().try_into().unwrap(), sync_events::JoinedRoom { - account_data: sync_events::AccountData { events: Vec::new() }, + account_data: sync_events::AccountData { + events: data + .room_userdata_since(Some(&room_id), &user_id, since) + .into_iter() + .map(|(_, v)| v) + .collect(), + }, summary: sync_events::RoomSummary { heroes: Vec::new(), joined_member_count: if send_member_count { @@ -973,11 +1102,11 @@ pub fn sync_route( }, unread_notifications: sync_events::UnreadNotificationsCount { highlight_count: None, - notification_count: None, + notification_count, }, timeline: sync_events::Timeline { - limited: None, - prev_batch: Some(since.to_string()), + limited: if limited { Some(limited) } else { None }, + prev_batch, events: room_events, }, // TODO: state before timeline @@ -1042,6 +1171,13 @@ pub fn sync_route( invite: invited_rooms, }, presence: sync_events::Presence { events: Vec::new() }, + account_data: sync_events::AccountData { + events: data + .room_userdata_since(None, &user_id, since) + .into_iter() + .map(|(_, v)| v) + .collect(), + }, device_lists: Default::default(), device_one_time_keys_count: Default::default(), to_device: sync_events::ToDevice { events: Vec::new() }, @@ -1059,14 +1195,23 @@ pub fn get_message_events_route( } if let Ok(from) = body.from.clone().parse() { - let pdus = data.pdus_until(&body.room_id, from); + let pdus = data.pdus_until( + &body.room_id, + from, + body.limit.map(|l| l.try_into().unwrap()).unwrap_or(10), + ); + let prev_batch = pdus + .last() + .and_then(|e| data.pdu_get_count(&e.event_id)) + .map(|c| c.to_string()); let room_events = pdus .into_iter() .map(|pdu| pdu.to_room_event()) .collect::>(); + MatrixResult(Ok(get_message_events::Response { start: Some(body.from.clone()), - end: None, + end: prev_batch, chunk: room_events, state: Vec::new(), })) @@ -1099,6 +1244,23 @@ pub fn publicised_groups_route() -> MatrixResult })) } +#[put("/_matrix/client/r0/sendToDevice/<_event_type>/<_txn_id>")] +pub fn send_event_to_device_route( + _event_type: String, + _txn_id: String, +) -> MatrixResult { + // TODO + MatrixResult(Ok(send_event_to_device::Response)) +} + +#[get("/_matrix/media/r0/config")] +pub fn get_media_config_route() -> MatrixResult { + // TODO + MatrixResult(Ok(get_media_config::Response { + upload_size: 0_u32.into(), + })) +} + #[options("/<_segments..>")] pub fn options_route( _segments: rocket::http::uri::Segments, diff --git a/src/data.rs b/src/data.rs index 3b652ba..9b9c541 100644 --- a/src/data.rs +++ b/src/data.rs @@ -1,11 +1,15 @@ -use crate::{utils, Database, PduEvent}; -use ruma_events::{collections::only::Event as EduEvent, EventJson, EventType}; +use crate::{database::COUNTER, utils, Database, PduEvent}; +use ruma_events::{ + collections::only::Event as EduEvent, room::power_levels::PowerLevelsEventContent, EventJson, + EventType, +}; use ruma_federation_api::RoomV3Pdu; use ruma_identifiers::{EventId, RoomId, UserId}; use serde_json::json; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, + mem, }; pub struct Data { @@ -189,7 +193,14 @@ impl Data { } pub fn room_join(&self, room_id: &RoomId, user_id: &UserId) -> bool { - if !self.room_exists(room_id) { + if !self.room_exists(room_id) + && !self + .db + .userid_joinroomids + .get_iter(user_id.to_string().as_bytes()) + .values() + .any(|r| r.unwrap() == room_id.to_string().as_bytes()) + { return false; } @@ -249,8 +260,7 @@ impl Data { /// Check if a room exists by looking for PDUs in that room. pub fn room_exists(&self, room_id: &RoomId) -> bool { // Create the first part of the full pdu id - let mut prefix = vec![b'd']; - prefix.extend_from_slice(room_id.to_string().as_bytes()); + let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); // Add delimiter so we don't find rooms starting with the same id if let Some((key, _)) = self.db.pduid_pdu.get_gt(&prefix).unwrap() { @@ -397,13 +407,14 @@ impl Data { .collect() } - pub fn room_pdu_first(&self, room_id: &RoomId, pdu_index: u64) -> bool { - let mut pdu_id = vec![b'd']; - pdu_id.extend_from_slice(room_id.to_string().as_bytes()); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&pdu_index.to_be_bytes()); - - self.db.pduid_pdu.get_lt(&pdu_id).unwrap().is_none() + pub fn pdu_get_count(&self, event_id: &EventId) -> Option { + self.db + .eventid_pduid + .get(event_id.to_string().as_bytes()) + .unwrap() + .map(|pdu_id| { + utils::u64_from_bytes(&pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()]) + }) } pub fn pdu_get(&self, event_id: &EventId) -> Option { @@ -459,7 +470,39 @@ impl Data { content: serde_json::Value, unsigned: Option>, state_key: Option, - ) -> EventId { + ) -> Option { + // Is the event authorized? + if state_key.is_some() { + if let Some(pdu) = self + .room_state(&room_id) + .get(&(EventType::RoomPowerLevels, "".to_owned())) + { + let power_levels = serde_json::from_value::>( + pdu.content.clone(), + ) + .unwrap() + .deserialize() + .unwrap(); + + match event_type { + EventType::RoomMember => { + // Member events are okay for now (TODO) + } + _ if power_levels + .users + .get(&sender) + .unwrap_or(&power_levels.users_default) + <= &0.into() => + { + // Not authorized + return None; + } + // User has sufficient power + _ => {} + } + } + } + // prev_events are the leaves of the current graph. This method removes all leaves from the // room and replaces them with our event // TODO: Make sure this isn't called twice in parallel @@ -523,22 +566,19 @@ impl Data { self.pdu_leaves_replace(&room_id, &pdu.event_id); - // The new value will need a new index. We store the last used index in 'n' // The count will go up regardless of the room_id // This is also the next_batch/since value // Increment the last index and use that let index = utils::u64_from_bytes( &self .db - .pduid_pdu - .update_and_fetch(b"n", utils::increment) + .global + .update_and_fetch(COUNTER, utils::increment) .unwrap() .unwrap(), ); - let mut pdu_id = vec![b'd']; - pdu_id.extend_from_slice(room_id.to_string().as_bytes()); - + let mut pdu_id = room_id.to_string().as_bytes().to_vec(); pdu_id.push(0xff); // Add delimiter so we don't find rooms starting with the same id pdu_id.extend_from_slice(&index.to_be_bytes()); @@ -564,7 +604,9 @@ impl Data { .unwrap(); } - pdu.event_id + self.room_read_set(&room_id, &sender, &pdu.event_id); + + Some(pdu.event_id) } /// Returns a vector of all PDUs in a room. @@ -573,12 +615,11 @@ impl Data { } pub fn last_pdu_index(&self) -> u64 { - let count_key: Vec = vec![b'n']; utils::u64_from_bytes( &self .db - .pduid_pdu - .get(&count_key) + .global + .get(&COUNTER) .unwrap() .unwrap_or_else(|| (&0_u64.to_be_bytes()).into()), ) @@ -586,15 +627,23 @@ impl Data { /// Returns a vector of all events in a room that happened after the event with id `since`. pub fn pdus_since(&self, room_id: &RoomId, since: u64) -> Vec { + // Create the first part of the full pdu id + let mut pdu_id = room_id.to_string().as_bytes().to_vec(); + pdu_id.push(0xff); // Add delimiter so we don't find rooms starting with the same id + pdu_id.extend_from_slice(&(since).to_be_bytes()); + + self.pdus_since_pduid(room_id, pdu_id) + } + + /// Returns a vector of all events in a room that happened after the event with id `since`. + pub fn pdus_since_pduid(&self, room_id: &RoomId, pdu_id: Vec) -> Vec { let mut pdus = Vec::new(); // Create the first part of the full pdu id - let mut prefix = vec![b'd']; - prefix.extend_from_slice(room_id.to_string().as_bytes()); + let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); // Add delimiter so we don't find rooms starting with the same id - let mut current = prefix.clone(); - current.extend_from_slice(&since.to_be_bytes()); + let mut current = pdu_id; while let Some((key, value)) = self.db.pduid_pdu.get_gt(¤t).unwrap() { if key.starts_with(&prefix) { @@ -608,19 +657,18 @@ impl Data { pdus } - pub fn pdus_until(&self, room_id: &RoomId, until: u64) -> Vec { + pub fn pdus_until(&self, room_id: &RoomId, until: u64, max: u32) -> Vec { let mut pdus = Vec::new(); // Create the first part of the full pdu id - let mut prefix = vec![b'd']; - prefix.extend_from_slice(room_id.to_string().as_bytes()); + let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); // Add delimiter so we don't find rooms starting with the same id let mut current = prefix.clone(); current.extend_from_slice(&until.to_be_bytes()); while let Some((key, value)) = self.db.pduid_pdu.get_lt(¤t).unwrap() { - if key.starts_with(&prefix) { + if pdus.len() < max as usize && key.starts_with(&prefix) { current = key.to_vec(); pdus.push(serde_json::from_slice(&value).expect("pdu in db is valid")); } else { @@ -670,8 +718,8 @@ impl Data { let index = utils::u64_from_bytes( &self .db - .pduid_pdu - .update_and_fetch(b"n", utils::increment) + .global + .update_and_fetch(COUNTER, utils::increment) .unwrap() .unwrap(), ); @@ -695,17 +743,14 @@ impl Data { prefix.push(0xff); let mut current = prefix.clone(); - current.extend_from_slice(&since.to_be_bytes()); + current.extend_from_slice(&(since + 1).to_be_bytes()); while let Some((key, value)) = self.db.roomlatestid_roomlatest.get_gt(¤t).unwrap() { if key.starts_with(&prefix) { current = key.to_vec(); room_latests.push( serde_json::from_slice::>(&value) - .expect("room_latest in db is valid") - .deserialize() - .expect("room_latest in db is valid") - .into(), + .expect("room_latest in db is valid"), ); } else { break; @@ -715,6 +760,11 @@ impl Data { room_latests } + /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. + pub fn roomlatests_all(&self, room_id: &RoomId) -> Vec> { + self.roomlatests_since(room_id, 0) + } + pub fn roomactive_add(&self, event: EduEvent, room_id: &RoomId, timeout: u64) { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -737,8 +787,8 @@ impl Data { let index = utils::u64_from_bytes( &self .db - .pduid_pdu - .update_and_fetch(b"n", utils::increment) + .global + .update_and_fetch(COUNTER, utils::increment) .unwrap() .unwrap(), ); @@ -790,10 +840,7 @@ impl Data { current = key.to_vec(); room_actives.push( serde_json::from_slice::>(&value) - .expect("room_active in db is valid") - .deserialize() - .expect("room_active in db is valid") - .into(), + .expect("room_active in db is valid"), ); } else { break; @@ -813,6 +860,158 @@ impl Data { } } + pub fn room_userdata_update( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + event: EduEvent, + ) { + let mut prefix = room_id + .map(|r| r.to_string()) + .unwrap_or_default() + .as_bytes() + .to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.push(0xff); + + // Start with last + if let Some(mut current) = self + .db + .roomuserdataid_accountdata + .scan_prefix(&prefix) + .keys() + .next_back() + .map(|c| c.unwrap()) + { + // Remove old entry (there should be at most one) + loop { + if !current.starts_with(&prefix) { + // We're in another room or user + break; + } + if current.rsplit(|&b| b == 0xff).nth(2).unwrap() == user_id.to_string().as_bytes() + { + // This is the old room_latest + self.db.roomuserdataid_accountdata.remove(current).unwrap(); + break; + } + // Else, try the event before that + if let Some((k, _)) = self.db.roomuserdataid_accountdata.get_lt(current).unwrap() { + current = k; + } else { + break; + } + } + } + + // Increment the last index and use that + let index = utils::u64_from_bytes( + &self + .db + .global + .update_and_fetch(COUNTER, utils::increment) + .unwrap() + .unwrap(), + ); + + let mut key = prefix; + key.extend_from_slice(&index.to_be_bytes()); + + let json = serde_json::to_value(&event).unwrap(); + key.extend_from_slice(json["type"].as_str().unwrap().as_bytes()); + + self.db + .roomuserdataid_accountdata + .insert(key, &*json.to_string()) + .unwrap(); + } + + pub fn room_userdata_get( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + kind: &str, + ) -> Option> { + self.room_userdata_all(room_id, user_id).remove(kind) + } + + pub fn room_userdata_since( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + since: u64, + ) -> HashMap> { + let mut userdata = HashMap::new(); + + let mut prefix = room_id + .map(|r| r.to_string()) + .unwrap_or_default() + .as_bytes() + .to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.push(0xff); + + let mut current = prefix.clone(); + current.extend_from_slice(&(since + 1).to_be_bytes()); + + while let Some((key, value)) = self.db.roomuserdataid_accountdata.get_gt(¤t).unwrap() + { + if key.starts_with(&prefix) { + current = key.to_vec(); + let json = serde_json::from_slice::(&value).unwrap(); + userdata.insert( + json["type"].as_str().unwrap().to_owned(), + serde_json::from_value::>(json) + .expect("userdata in db is valid"), + ); + } else { + break; + } + } + + userdata + } + + pub fn room_userdata_all( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + ) -> HashMap> { + self.room_userdata_since(room_id, user_id, 0) + } + + pub fn room_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + event_id: &EventId, + ) -> Option<()> { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&user_id.to_string().as_bytes()); + + self.db + .roomuserid_lastread + .insert(key, &self.pdu_get_count(event_id)?.to_be_bytes()) + .unwrap(); + + Some(()) + } + + pub fn room_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Option { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&user_id.to_string().as_bytes()); + + self.db + .roomuserid_lastread + .get(key) + .unwrap() + .map(|v| utils::u64_from_bytes(&v)) + } + pub fn debug(&self) { self.db.debug(); } diff --git a/src/database.rs b/src/database.rs index 3dd7564..4551bc0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -5,6 +5,8 @@ use std::fs::remove_dir_all; pub struct MultiValue(sled::Tree); +pub const COUNTER: &str = "c"; + impl MultiValue { /// Get an iterator over all values. pub fn iter_all(&self) -> sled::Iter { @@ -67,22 +69,24 @@ pub struct Database { pub userid_deviceids: MultiValue, pub userdeviceid_token: sled::Tree, pub token_userid: sled::Tree, - pub pduid_pdu: sled::Tree, // PduId = 'd' + RoomId + Since (global since counter is at 'n') + pub pduid_pdu: sled::Tree, // PduId = RoomId + Count pub eventid_pduid: sled::Tree, pub roomid_pduleaves: MultiValue, pub roomstateid_pdu: sled::Tree, // Room + StateType + StateKey + pub roomuserdataid_accountdata: sled::Tree, // RoomUserDataId = Room + User + Count + Type + pub roomuserid_lastread: sled::Tree, // RoomUserId = Room + User pub roomid_joinuserids: MultiValue, pub roomid_inviteuserids: MultiValue, pub userid_joinroomids: MultiValue, pub userid_inviteroomids: MultiValue, pub userid_leftroomids: MultiValue, // EDUs: - pub roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Since + UserId TODO: Types - pub roomactiveid_roomactive: sled::Tree, // Typing, RoomActiveId = TimeoutTime + Since - pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Since - pub globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Since + Type + UserId + pub roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Count + UserId TODO: Types + pub roomactiveid_roomactive: sled::Tree, // Typing, RoomActiveId = TimeoutTime + Count + pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count + pub globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Count + Type + UserId pub keypair: ruma_signatures::Ed25519KeyPair, - _db: sled::Db, + pub global: sled::Db, } impl Database { @@ -116,6 +120,8 @@ impl Database { eventid_pduid: db.open_tree("eventid_pduid").unwrap(), roomid_pduleaves: MultiValue(db.open_tree("roomid_pduleaves").unwrap()), roomstateid_pdu: db.open_tree("roomstateid_pdu").unwrap(), + roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata").unwrap(), + roomuserid_lastread: db.open_tree("roomuserid_lastread").unwrap(), roomid_joinuserids: MultiValue(db.open_tree("roomid_joinuserids").unwrap()), roomid_inviteuserids: MultiValue(db.open_tree("roomid_inviteuserids").unwrap()), userid_joinroomids: MultiValue(db.open_tree("userid_joinroomids").unwrap()), @@ -132,7 +138,7 @@ impl Database { "key1".to_owned(), ) .unwrap(), - _db: db, + global: db, } } diff --git a/src/main.rs b/src/main.rs index 2f0a2f0..1b6e7aa 100644 --- a/src/main.rs +++ b/src/main.rs @@ -29,6 +29,8 @@ fn setup_rocket() -> rocket::Rocket { client_server::login_route, client_server::get_capabilities_route, client_server::get_pushrules_all_route, + client_server::set_pushrule_route, + client_server::set_pushrule_enabled_route, client_server::get_filter_route, client_server::create_filter_route, client_server::set_global_account_data_route, @@ -61,6 +63,8 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_message_events_route, client_server::turn_server_route, client_server::publicised_groups_route, + client_server::send_event_to_device_route, + client_server::get_media_config_route, client_server::options_route, server_server::well_known_server, server_server::get_server_version, From 4ca1ada73e1c2fde76b5dfa942a9660f1105881e Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 1 May 2020 21:45:55 +0200 Subject: [PATCH 0070/1727] update ruma --- Cargo.lock | 38 +++++++++++++++++++------------------- src/client_server.rs | 6 +++--- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d2ef34..17a0ad5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -616,9 +616,9 @@ checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" [[package]] name = "js-sys" -version = "0.3.38" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b823ebafcee1632403f2782d28728aab353f7881547a700043ef455c078326f" +checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" dependencies = [ "wasm-bindgen", ] @@ -717,9 +717,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" +checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ "cfg-if", "fuchsia-zircon", @@ -1225,7 +1225,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.8.0-rc.5" -source = "git+https://github.com/ruma/ruma-client-api.git#dbb60142cf336784d809c6c4d79bd8de4c67fb5f" +source = "git+https://github.com/ruma/ruma-client-api.git#06f83742506e06d6d2731667eb9fa081654455cf" dependencies = [ "http", "js_int", @@ -1773,9 +1773,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.61" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f56e97dbea16d5f56549d6c8ea7f36efb6be98507308650c1a5970574b3941b9" +checksum = "e3c7d40d09cdbf0f4895ae58cf57d92e1e57a9dd8ed2e8390514b54a47cc5551" dependencies = [ "cfg-if", "serde", @@ -1785,9 +1785,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.61" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75d4f3f9b81dfc7d66b955876b325b20e8affd4ce8d93e51162626fc5faadb" +checksum = "c3972e137ebf830900db522d6c8fd74d1900dcfc733462e9a12e942b00b4ac94" dependencies = [ "bumpalo", "lazy_static", @@ -1800,9 +1800,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736dcd8f8455458c82614f12116aabd0209d440c1a28d8824bcaed755ac3e058" +checksum = "8a369c5e1dfb7569e14d62af4da642a3cbc2f9a3652fe586e26ac22222aa4b04" dependencies = [ "cfg-if", "js-sys", @@ -1812,9 +1812,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.61" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dcde4b19e863521c1e78ecf100935132396291b09ae0ae2e155ff84ccbe9736" +checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" dependencies = [ "quote 1.0.4", "wasm-bindgen-macro-support", @@ -1822,9 +1822,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.61" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d87d2b117af2b86472402d70f7eb173bbe166beb5e727f3c0bebecdf356504" +checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ "proc-macro2 1.0.10", "quote 1.0.4", @@ -1835,15 +1835,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.61" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71f77b681efd0bca6f8ea356cdc2e497538b41d3e2a02afed18ce8f022231d29" +checksum = "a91c2916119c17a8e316507afaaa2dd94b47646048014bbdf6bef098c1bb58ad" [[package]] name = "web-sys" -version = "0.3.38" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c5819dc39222a788ca169a81aef7d02739019256300534f493b5747d5469c2" +checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/src/client_server.rs b/src/client_server.rs index 6b56fb1..f099631 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1080,13 +1080,13 @@ pub fn sync_route( joined_rooms.insert( room_id.clone().try_into().unwrap(), sync_events::JoinedRoom { - account_data: sync_events::AccountData { + account_data: Some(sync_events::AccountData { events: data .room_userdata_since(Some(&room_id), &user_id, since) .into_iter() .map(|(_, v)| v) .collect(), - }, + }), summary: sync_events::RoomSummary { heroes: Vec::new(), joined_member_count: if send_member_count { @@ -1136,7 +1136,7 @@ pub fn sync_route( left_rooms.insert( room_id.clone().try_into().unwrap(), sync_events::LeftRoom { - account_data: sync_events::AccountData { events: Vec::new() }, + account_data: Some(sync_events::AccountData { events: Vec::new() }), timeline: sync_events::Timeline { limited: Some(false), prev_batch: Some(next_batch.clone()), From 4bc7712ee4996b2d9f379d6eba82fba829a47482 Mon Sep 17 00:00:00 2001 From: Marcel Date: Fri, 1 May 2020 23:17:25 +0200 Subject: [PATCH 0071/1727] [ClientServer] Add /_matrix/client/r0/register/available endpoint Took 1 hour 25 minutes --- src/client_server.rs | 50 +++++++++++++++++++++++++++++++++++++------- src/main.rs | 1 + 2 files changed, 44 insertions(+), 7 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 6b56fb1..8113bce 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1,11 +1,15 @@ -use crate::{server_server, utils, Data, MatrixResult, Ruma}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, + time::{Duration, SystemTime}, +}; use log::debug; use rocket::{get, options, post, put, State}; use ruma_client_api::{ error::{Error, ErrorKind}, r0::{ - account::register, + account::{get_username_availability, register}, alias::get_alias, capabilities::get_capabilities, client_exchange::send_event_to_device, @@ -39,11 +43,8 @@ use ruma_client_api::{ use ruma_events::{collections::only::Event as EduEvent, EventType}; use ruma_identifiers::{RoomId, UserId}; use serde_json::json; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - time::{Duration, SystemTime}, -}; + +use crate::{server_server, utils, Data, MatrixResult, Ruma}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -58,6 +59,41 @@ pub fn get_supported_versions_route() -> MatrixResult, + body: Ruma, +) -> MatrixResult { + // Validate user id + let user_id: UserId = + match (*format!("@{}:{}", body.username.clone(), data.hostname())).try_into() { + Err(_) => { + debug!("Username invalid"); + return MatrixResult(Err(Error { + kind: ErrorKind::InvalidUsername, + message: "Username was invalid.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + Ok(user_id) => user_id, + }; + + // Check if username is creative enough + if data.user_exists(&user_id) { + debug!("ID already taken"); + return MatrixResult(Err(Error { + kind: ErrorKind::UserInUse, + message: "Desired user ID is already taken.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + + // TODO add check for appservice namespaces + + // If no if check is true we have an username that's available to be used. + MatrixResult(Ok(get_username_availability::Response { available: true })) +} + #[post("/_matrix/client/r0/register", data = "")] pub fn register_route( data: State, diff --git a/src/main.rs b/src/main.rs index 1b6e7aa..db97599 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,6 +24,7 @@ fn setup_rocket() -> rocket::Rocket { "/", routes![ client_server::get_supported_versions_route, + client_server::get_register_available_route, client_server::register_route, client_server::get_login_route, client_server::login_route, From 4b191a93112a420b6b06bbcd38bd829bb4e8ba20 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 2 May 2020 09:24:09 +0200 Subject: [PATCH 0072/1727] improvement: set default push rules on register --- src/client_server.rs | 33 +++++++++++++++++++++++++++++++++ src/ruma_wrapper.rs | 2 +- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index 31ecb4e..58c5bda 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -168,6 +168,39 @@ pub fn register_route( let token = utils::random_string(TOKEN_LENGTH); data.token_replace(&user_id, &device_id, token.clone()); + // Initial data + data.room_userdata_update( + None, + &user_id, + EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { + content: ruma_events::push_rules::PushRulesEventContent { + global: ruma_events::push_rules::Ruleset { + content: vec![], + override_rules: vec![], + room: vec![], + sender: vec![], + underride: vec![ruma_events::push_rules::ConditionalPushRule { + actions: vec![ + ruma_events::push_rules::Action::Notify, + ruma_events::push_rules::Action::SetTweak( + ruma_events::push_rules::Tweak::Highlight { value: false }, + ), + ], + default: true, + enabled: true, + rule_id: ".m.rule.message".to_owned(), + conditions: vec![ruma_events::push_rules::PushCondition::EventMatch( + ruma_events::push_rules::EventMatchCondition { + key: "type".to_owned(), + pattern: "m.room.message".to_owned(), + }, + )], + }], + }, + }, + }), + ); + MatrixResult(Ok(register::Response { access_token: Some(token), user_id, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index eec9d84..753edea 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -92,7 +92,7 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { }), Err(e) => { warn!("{:?}", e); - Failure((Status::InternalServerError, ())) + Failure((Status::BadRequest, ())) } } }) From 8f67c01efd33027828573c4373b1a173f49f2007 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 3 May 2020 17:25:31 +0200 Subject: [PATCH 0073/1727] refactor: split database into multiple files, more error handling, cleaner code --- Cargo.lock | 94 +--- Cargo.toml | 2 +- Rocket.toml | 4 +- src/client_server.rs | 635 ++++++++++++--------- src/data.rs | 1018 ---------------------------------- src/database.rs | 287 ++-------- src/database/account_data.rs | 120 ++++ src/database/globals.rs | 61 ++ src/database/rooms.rs | 547 ++++++++++++++++++ src/database/rooms/edus.rs | 190 +++++++ src/database/users.rs | 144 +++++ src/error.rs | 36 ++ src/main.rs | 8 +- src/ruma_wrapper.rs | 8 +- src/server_server.rs | 36 +- src/test.rs | 4 +- src/utils.rs | 9 +- 17 files changed, 1573 insertions(+), 1630 deletions(-) delete mode 100644 src/data.rs create mode 100644 src/database/account_data.rs create mode 100644 src/database/globals.rs create mode 100644 src/database/rooms.rs create mode 100644 src/database/rooms/edus.rs create mode 100644 src/database/users.rs create mode 100644 src/error.rs diff --git a/Cargo.lock b/Cargo.lock index 17a0ad5..642e805 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,14 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "aho-corasick" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" -dependencies = [ - "memchr", -] - [[package]] name = "arc-swap" version = "0.4.6" @@ -147,7 +138,6 @@ dependencies = [ "http", "js_int", "log", - "pretty_env_logger", "rand", "reqwest", "rocket", @@ -161,6 +151,7 @@ dependencies = [ "serde", "serde_json", "sled", + "thiserror", "tokio", ] @@ -298,19 +289,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "fnv" version = "1.0.6" @@ -533,15 +511,6 @@ version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error", -] - [[package]] name = "hyper" version = "0.13.5" @@ -937,16 +906,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -[[package]] -name = "pretty_env_logger" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" -dependencies = [ - "env_logger", - "log", -] - [[package]] name = "proc-macro-hack" version = "0.5.15" @@ -977,12 +936,6 @@ dependencies = [ "unicode-xid 0.2.0", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quote" version = "0.6.13" @@ -1059,24 +1012,6 @@ dependencies = [ "rust-argon2 0.7.0", ] -[[package]] -name = "regex" -version = "1.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", - "thread_local", -] - -[[package]] -name = "regex-syntax" -version = "0.6.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" - [[package]] name = "remove_dir_all" version = "0.5.2" @@ -1556,21 +1491,23 @@ dependencies = [ ] [[package]] -name = "termcolor" -version = "1.1.0" +name = "thiserror" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +checksum = "d12a1dae4add0f0d568eebc7bf142f145ba1aa2544cafb195c76f0f409091b60" dependencies = [ - "winapi-util", + "thiserror-impl", ] [[package]] -name = "thread_local" -version = "1.0.1" +name = "thiserror-impl" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "3f34e0c1caaa462fd840ec6b768946ea1e7842620d94fe29d5b847138f521269" dependencies = [ - "lazy_static", + "proc-macro2 1.0.10", + "quote 1.0.4", + "syn 1.0.18", ] [[package]] @@ -1887,15 +1824,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi 0.3.8", -] - [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 7001ada..4aa2f15 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,6 @@ ruma-api = "0.16.0-rc.3" ruma-events = "0.21.0-beta.1" ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git" } -pretty_env_logger = "0.4.0" log = "0.4.8" sled = "0.31.0" directories = "2.0.2" @@ -31,3 +30,4 @@ rand = "0.7.3" rust-argon2 = "0.8.2" reqwest = "0.10.4" base64 = "0.12.0" +thiserror = "1.0.16" diff --git a/Rocket.toml b/Rocket.toml index 5db4a3d..4a7d79a 100644 --- a/Rocket.toml +++ b/Rocket.toml @@ -1,6 +1,6 @@ [global] -hostname = "conduit.rs" -port = 14004 +hostname = "matrixtesting.koesters.xyz:59003" +port = 59003 address = "0.0.0.0" [global.tls] diff --git a/src/client_server.rs b/src/client_server.rs index 58c5bda..3d63ffd 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -44,7 +44,7 @@ use ruma_events::{collections::only::Event as EduEvent, EventType}; use ruma_identifiers::{RoomId, UserId}; use serde_json::json; -use crate::{server_server, utils, Data, MatrixResult, Ruma}; +use crate::{server_server, utils, Database, MatrixResult, Ruma}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -61,12 +61,12 @@ pub fn get_supported_versions_route() -> MatrixResult, + db: State<'_, Database>, body: Ruma, ) -> MatrixResult { // Validate user id let user_id: UserId = - match (*format!("@{}:{}", body.username.clone(), data.hostname())).try_into() { + match (*format!("@{}:{}", body.username.clone(), db.globals.hostname())).try_into() { Err(_) => { debug!("Username invalid"); return MatrixResult(Err(Error { @@ -79,7 +79,7 @@ pub fn get_register_available_route( }; // Check if username is creative enough - if data.user_exists(&user_id) { + if db.users.exists(&user_id).unwrap() { debug!("ID already taken"); return MatrixResult(Err(Error { kind: ErrorKind::UserInUse, @@ -96,7 +96,7 @@ pub fn get_register_available_route( #[post("/_matrix/client/r0/register", data = "")] pub fn register_route( - data: State, + db: State<'_, Database>, body: Ruma, ) -> MatrixResult { if body.auth.is_none() { @@ -117,7 +117,7 @@ pub fn register_route( body.username .clone() .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)), - data.hostname() + db.globals.hostname() )) .try_into() { @@ -133,7 +133,7 @@ pub fn register_route( }; // Check if username is creative enough - if data.user_exists(&user_id) { + if db.users.exists(&user_id).unwrap() { debug!("ID already taken"); return MatrixResult(Err(UiaaResponse::MatrixError(Error { kind: ErrorKind::UserInUse, @@ -146,7 +146,7 @@ pub fn register_route( if let Ok(hash) = utils::calculate_hash(&password) { // Create user - data.user_add(&user_id, &hash); + db.users.create(&user_id, &hash).unwrap(); } else { return MatrixResult(Err(UiaaResponse::MatrixError(Error { kind: ErrorKind::InvalidParam, @@ -161,15 +161,17 @@ pub fn register_route( .clone() .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); - // Add device - data.device_add(&user_id, &device_id); - // Generate new token for the device let token = utils::random_string(TOKEN_LENGTH); - data.token_replace(&user_id, &device_id, token.clone()); + + // Add device + db + .users + .create_device(&user_id, &device_id, &token) + .unwrap(); // Initial data - data.room_userdata_update( + db.account_data.update( None, &user_id, EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { @@ -199,7 +201,9 @@ pub fn register_route( }, }, }), - ); + &db.globals, + ) + .unwrap(); MatrixResult(Ok(register::Response { access_token: Some(token), @@ -216,17 +220,17 @@ pub fn get_login_route() -> MatrixResult { } #[post("/_matrix/client/r0/login", data = "")] -pub fn login_route(data: State, body: Ruma) -> MatrixResult { +pub fn login_route(db: State<'_, Database>, body: Ruma) -> MatrixResult { // Validate login method let user_id = if let (login::UserInfo::MatrixId(mut username), login::LoginInfo::Password { password }) = (body.user.clone(), body.login_info.clone()) { if !username.contains(':') { - username = format!("@{}:{}", username, data.hostname()); + username = format!("@{}:{}", username, db.globals.hostname()); } if let Ok(user_id) = (*username).try_into() { - if let Some(hash) = data.password_hash_get(&user_id) { + if let Some(hash) = db.users.password_hash(&user_id).unwrap() { let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); @@ -272,26 +276,26 @@ pub fn login_route(data: State, body: Ruma) -> MatrixResul .clone() .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); - // Add device - data.device_add(&user_id, &device_id); - // Generate a new token for the device let token = utils::random_string(TOKEN_LENGTH); - data.token_replace(&user_id, &device_id, token.clone()); + + // Add device + db + .users + .create_device(&user_id, &device_id, &token) + .unwrap(); MatrixResult(Ok(login::Response { user_id, access_token: token, - home_server: Some(data.hostname().to_owned()), + home_server: Some(db.globals.hostname().to_owned()), device_id, well_known: None, })) } -#[get("/_matrix/client/r0/capabilities", data = "")] -pub fn get_capabilities_route( - body: Ruma, -) -> MatrixResult { +#[get("/_matrix/client/r0/capabilities")] +pub fn get_capabilities_route() -> MatrixResult { // TODO //let mut available = BTreeMap::new(); //available.insert("5".to_owned(), get_capabilities::RoomVersionStability::Unstable); @@ -337,7 +341,7 @@ pub fn get_pushrules_all_route() -> MatrixResult { data = "" )] pub fn set_pushrule_route( - data: State, + db: State<'_, Database>, body: Ruma, _scope: String, _kind: String, @@ -345,7 +349,7 @@ pub fn set_pushrule_route( ) -> MatrixResult { // TODO let user_id = body.user_id.clone().expect("user is authenticated"); - data.room_userdata_update( + db.account_data.update( None, &user_id, EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { @@ -375,7 +379,9 @@ pub fn set_pushrule_route( }, }, }), - ); + &db.globals + ) + .unwrap(); MatrixResult(Ok(set_pushrule::Response)) } @@ -392,10 +398,8 @@ pub fn set_pushrule_enabled_route( #[get( "/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>", - data = "" )] pub fn get_filter_route( - body: Ruma, _user_id: String, _filter_id: String, ) -> MatrixResult { @@ -411,9 +415,8 @@ pub fn get_filter_route( })) } -#[post("/_matrix/client/r0/user/<_user_id>/filter", data = "")] +#[post("/_matrix/client/r0/user/<_user_id>/filter")] pub fn create_filter_route( - body: Ruma, _user_id: String, ) -> MatrixResult { // TODO @@ -424,10 +427,8 @@ pub fn create_filter_route( #[put( "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", - data = "" )] pub fn set_global_account_data_route( - body: Ruma, _user_id: String, _type: String, ) -> MatrixResult { @@ -436,10 +437,8 @@ pub fn set_global_account_data_route( #[get( "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", - data = "" )] pub fn get_global_account_data_route( - body: Ruma, _user_id: String, _type: String, ) -> MatrixResult { @@ -453,15 +452,26 @@ pub fn get_global_account_data_route( #[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] pub fn set_displayname_route( - data: State, + db: State<'_, Database>, body: Ruma, _user_id: String, ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); - // Send error on None - // Synapse returns a parsing error but the spec doesn't require this - if body.displayname.is_none() { + if let Some(displayname) = &body.displayname { + // Some("") will clear the displayname + if displayname == "" { + db.users.set_displayname(&user_id, None).unwrap(); + } else { + db + .users + .set_displayname(&user_id, Some(displayname.clone())) + .unwrap(); + // TODO: send a new m.presence event with the updated displayname + } + } else { + // Send error on None + // Synapse returns a parsing error but the spec doesn't require this debug!("Request was missing the displayname payload."); return MatrixResult(Err(Error { kind: ErrorKind::MissingParam, @@ -470,30 +480,17 @@ pub fn set_displayname_route( })); } - if let Some(displayname) = &body.displayname { - // Some("") will clear the displayname - if displayname == "" { - data.displayname_remove(&user_id); - } else { - data.displayname_set(&user_id, displayname.clone()); - // TODO send a new m.presence event with the updated displayname - } - } - MatrixResult(Ok(set_display_name::Response)) } -#[get( - "/_matrix/client/r0/profile//displayname", - data = "" -)] +#[get("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] pub fn get_displayname_route( - data: State, + db: State<'_, Database>, body: Ruma, - user_id_raw: String, + _user_id: String, ) -> MatrixResult { let user_id = (*body).user_id.clone(); - if !data.user_exists(&user_id) { + if !db.users.exists(&user_id).unwrap() { // Return 404 if we don't have a profile for this id debug!("Profile was not found."); return MatrixResult(Err(Error { @@ -502,7 +499,7 @@ pub fn get_displayname_route( status_code: http::StatusCode::NOT_FOUND, })); } - if let Some(displayname) = data.displayname_get(&user_id) { + if let Some(displayname) = db.users.displayname(&user_id).unwrap() { return MatrixResult(Ok(get_display_name::Response { displayname: Some(displayname), })); @@ -514,7 +511,7 @@ pub fn get_displayname_route( #[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] pub fn set_avatar_url_route( - data: State, + db: State<'_, Database>, body: Ruma, _user_id: String, ) -> MatrixResult { @@ -533,9 +530,12 @@ pub fn set_avatar_url_route( // TODO also make sure this is valid mxc:// format (not only starting with it) if body.avatar_url == "" { - data.avatar_url_remove(&user_id); + db.users.set_avatar_url(&user_id, None).unwrap(); } else { - data.avatar_url_set(&user_id, body.avatar_url.clone()); + db + .users + .set_avatar_url(&user_id, Some(body.avatar_url.clone())) + .unwrap(); // TODO send a new m.room.member join event with the updated avatar_url // TODO send a new m.presence event with the updated avatar_url } @@ -543,14 +543,14 @@ pub fn set_avatar_url_route( MatrixResult(Ok(set_avatar_url::Response)) } -#[get("/_matrix/client/r0/profile//avatar_url", data = "")] +#[get("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] pub fn get_avatar_url_route( - data: State, + db: State<'_, Database>, body: Ruma, - user_id_raw: String, + _user_id: String, ) -> MatrixResult { let user_id = (*body).user_id.clone(); - if !data.user_exists(&user_id) { + if !db.users.exists(&user_id).unwrap() { // Return 404 if we don't have a profile for this id debug!("Profile was not found."); return MatrixResult(Err(Error { @@ -559,7 +559,7 @@ pub fn get_avatar_url_route( status_code: http::StatusCode::NOT_FOUND, })); } - if let Some(avatar_url) = data.avatar_url_get(&user_id) { + if let Some(avatar_url) = db.users.avatar_url(&user_id).unwrap() { return MatrixResult(Ok(get_avatar_url::Response { avatar_url: Some(avatar_url), })); @@ -569,15 +569,15 @@ pub fn get_avatar_url_route( MatrixResult(Ok(get_avatar_url::Response { avatar_url: None })) } -#[get("/_matrix/client/r0/profile/", data = "")] +#[get("/_matrix/client/r0/profile/<_user_id>", data = "")] pub fn get_profile_route( - data: State, + db: State<'_, Database>, body: Ruma, - user_id_raw: String, + _user_id: String, ) -> MatrixResult { let user_id = (*body).user_id.clone(); - let avatar_url = data.avatar_url_get(&user_id); - let displayname = data.displayname_get(&user_id); + let avatar_url = db.users.avatar_url(&user_id).unwrap(); + let displayname = db.users.displayname(&user_id).unwrap(); if avatar_url.is_some() || displayname.is_some() { return MatrixResult(Ok(get_profile::Response { @@ -595,17 +595,16 @@ pub fn get_profile_route( })) } -#[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] +#[put("/_matrix/client/r0/presence/<_user_id>/status")] pub fn set_presence_route( - body: Ruma, _user_id: String, ) -> MatrixResult { // TODO MatrixResult(Ok(set_presence::Response)) } -#[post("/_matrix/client/r0/keys/query", data = "")] -pub fn get_keys_route(body: Ruma) -> MatrixResult { +#[post("/_matrix/client/r0/keys/query")] +pub fn get_keys_route() -> MatrixResult { // TODO MatrixResult(Ok(get_keys::Response { failures: BTreeMap::new(), @@ -613,11 +612,8 @@ pub fn get_keys_route(body: Ruma) -> MatrixResult, - body: Ruma, -) -> MatrixResult { +#[post("/_matrix/client/r0/keys/upload")] +pub fn upload_keys_route() -> MatrixResult { // TODO MatrixResult(Ok(upload_keys::Response { one_time_key_counts: BTreeMap::new(), @@ -626,12 +622,12 @@ pub fn upload_keys_route( #[post("/_matrix/client/r0/rooms/<_room_id>/read_markers", data = "")] pub fn set_read_marker_route( - data: State, + db: State<'_, Database>, body: Ruma, _room_id: String, ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); - data.room_userdata_update( + db.account_data.update( Some(&body.room_id), &user_id, EduEvent::FullyRead(ruma_events::fully_read::FullyReadEvent { @@ -640,10 +636,24 @@ pub fn set_read_marker_route( }, room_id: Some(body.room_id.clone()), }), - ); + &db.globals + ) + .unwrap(); if let Some(event) = &body.read_receipt { - data.room_read_set(&body.room_id, &user_id, event); + db + .rooms + .edus + .room_read_set( + &body.room_id, + &user_id, + db + .rooms + .get_pdu_count(event) + .unwrap() + .expect("TODO: what if a client specifies an invalid event"), + ) + .unwrap(); let mut user_receipts = BTreeMap::new(); user_receipts.insert( @@ -660,14 +670,19 @@ pub fn set_read_marker_route( }, ); - data.roomlatest_update( - &user_id, - &body.room_id, - EduEvent::Receipt(ruma_events::receipt::ReceiptEvent { - content: receipt_content, - room_id: None, // None because it can be inferred - }), - ); + db + .rooms + .edus + .roomlatest_update( + &user_id, + &body.room_id, + EduEvent::Receipt(ruma_events::receipt::ReceiptEvent { + content: receipt_content, + room_id: None, // None because it can be inferred + }), + &db.globals, + ) + .unwrap(); } MatrixResult(Ok(set_read_marker::Response)) } @@ -677,7 +692,7 @@ pub fn set_read_marker_route( data = "" )] pub fn create_typing_event_route( - data: State, + db: State<'_, Database>, body: Ruma, _room_id: String, _user_id: String, @@ -691,14 +706,23 @@ pub fn create_typing_event_route( }); if body.typing { - data.roomactive_add( - edu, - &body.room_id, - body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) - + utils::millis_since_unix_epoch().try_into().unwrap_or(0), - ); + db + .rooms + .edus + .roomactive_add( + edu, + &body.room_id, + body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) + + utils::millis_since_unix_epoch().try_into().unwrap_or(0), + &db.globals, + ) + .unwrap(); } else { - data.roomactive_remove(edu, &body.room_id); + db + .rooms + .edus + .roomactive_remove(edu, &body.room_id) + .unwrap(); } MatrixResult(Ok(create_typing_event::Response)) @@ -706,66 +730,93 @@ pub fn create_typing_event_route( #[post("/_matrix/client/r0/createRoom", data = "")] pub fn create_room_route( - data: State, + db: State<'_, Database>, body: Ruma, ) -> MatrixResult { // TODO: check if room is unique - let room_id = RoomId::new(data.hostname()).expect("host is valid"); + let room_id = RoomId::new(db.globals.hostname()).expect("host is valid"); let user_id = body.user_id.clone().expect("user is authenticated"); - data.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomCreate, - json!({ "creator": user_id }), - None, - Some("".to_owned()), - ); - - data.room_join(&room_id, &user_id); - - data.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomPowerLevels, - json!({ - "ban": 50, - "events_default": 0, - "invite": 50, - "kick": 50, - "redact": 50, - "state_default": 50, - "users": { user_id.to_string(): 100 }, - "users_default": 0 - }), - None, - Some("".to_owned()), - ); - - if let Some(name) = &body.name { - data.pdu_append( + db + .rooms + .append_pdu( room_id.clone(), user_id.clone(), - EventType::RoomName, - json!({ "name": name }), + EventType::RoomCreate, + json!({ "creator": user_id }), None, Some("".to_owned()), - ); + &db.globals, + ) + .unwrap(); + + db + .rooms + .join( + &room_id, + &user_id, + db.users.displayname(&user_id).unwrap(), + &db.globals, + ) + .unwrap(); + + db + .rooms + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomPowerLevels, + json!({ + "ban": 50, + "events_default": 0, + "invite": 50, + "kick": 50, + "redact": 50, + "state_default": 50, + "users": { user_id.to_string(): 100 }, + "users_default": 0 + }), + None, + Some("".to_owned()), + &db.globals, + ) + .unwrap(); + + if let Some(name) = &body.name { + db + .rooms + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomName, + json!({ "name": name }), + None, + Some("".to_owned()), + &db.globals, + ) + .unwrap(); } if let Some(topic) = &body.topic { - data.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomTopic, - json!({ "topic": topic }), - None, - Some("".to_owned()), - ); + db + .rooms + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomTopic, + json!({ "topic": topic }), + None, + Some("".to_owned()), + &db.globals, + ) + .unwrap(); } for user in &body.invite { - data.room_invite(&user_id, &room_id, user); + db + .rooms + .invite(&user_id, &room_id, user, &db.globals) + .unwrap(); } MatrixResult(Ok(create_room::Response { room_id })) @@ -773,12 +824,12 @@ pub fn create_room_route( #[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] pub fn get_alias_route( - data: State, + db: State<'_, Database>, body: Ruma, _room_alias: String, ) -> MatrixResult { // TODO - let room_id = if body.room_alias.server_name() == data.hostname() { + let room_id = if body.room_alias.server_name() == db.globals.hostname() { match body.room_alias.alias() { "conduit" => "!lgOCCXQKtXOAPlAlG5:conduit.rs", _ => { @@ -804,14 +855,22 @@ pub fn get_alias_route( #[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] pub fn join_room_by_id_route( - data: State, + db: State<'_, Database>, body: Ruma, _room_id: String, ) -> MatrixResult { - if data.room_join( - &body.room_id, - body.user_id.as_ref().expect("user is authenticated"), - ) { + let user_id = body.user_id.clone().expect("user is authenticated"); + + if db + .rooms + .join( + &body.room_id, + &user_id, + db.users.displayname(&user_id).unwrap(), + &db.globals, + ) + .is_ok() + { MatrixResult(Ok(join_room_by_id::Response { room_id: body.room_id.clone(), })) @@ -826,14 +885,16 @@ pub fn join_room_by_id_route( #[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] pub fn join_room_by_id_or_alias_route( - data: State, + db: State<'_, Database>, body: Ruma, _room_id_or_alias: String, ) -> MatrixResult { + let user_id = body.user_id.clone().expect("user is authenticated"); + let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, Err(room_alias) => { - if room_alias.server_name() == data.hostname() { + if room_alias.server_name() == db.globals.hostname() { return MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Room alias not found.".to_owned(), @@ -847,10 +908,16 @@ pub fn join_room_by_id_or_alias_route( } }; - if data.room_join( - &room_id, - body.user_id.as_ref().expect("user is authenticated"), - ) { + if db + .rooms + .join( + &room_id, + &user_id, + db.users.displayname(&user_id).unwrap(), + &db.globals, + ) + .is_ok() + { MatrixResult(Ok(join_room_by_id_or_alias::Response { room_id })) } else { MatrixResult(Err(Error { @@ -863,38 +930,45 @@ pub fn join_room_by_id_or_alias_route( #[post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "")] pub fn leave_room_route( - data: State, + db: State<'_, Database>, body: Ruma, _room_id: String, ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); - data.room_leave(&user_id, &body.room_id, &user_id); + db + .rooms + .leave(&user_id, &body.room_id, &user_id, &db.globals) + .unwrap(); MatrixResult(Ok(leave_room::Response)) } #[post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "")] pub fn forget_room_route( - data: State, + db: State<'_, Database>, body: Ruma, _room_id: String, ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); - data.room_forget(&body.room_id, &user_id); + db.rooms.forget(&body.room_id, &user_id).unwrap(); MatrixResult(Ok(forget_room::Response)) } #[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] pub fn invite_user_route( - data: State, + db: State<'_, Database>, body: Ruma, _room_id: String, ) -> MatrixResult { if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { - data.room_invite( - &body.user_id.as_ref().expect("user is authenticated"), - &body.room_id, - &user_id, - ); + db + .rooms + .invite( + &body.user_id.as_ref().expect("user is authenticated"), + &body.room_id, + &user_id, + &db.globals, + ) + .unwrap(); MatrixResult(Ok(invite_user::Response)) } else { MatrixResult(Err(Error { @@ -905,16 +979,16 @@ pub fn invite_user_route( } } -#[post("/_matrix/client/r0/publicRooms", data = "")] +#[post("/_matrix/client/r0/publicRooms")] pub async fn get_public_rooms_filtered_route( - data: State<'_, Data>, - body: Ruma, + db: State<'_, Database>, ) -> MatrixResult { - let mut chunk = data - .rooms_all() + let mut chunk = db + .rooms + .all_rooms() .into_iter() .map(|room_id| { - let state = data.room_state(&room_id); + let state = db.rooms.room_state(&room_id).unwrap(); directory::PublicRoomsChunk { aliases: Vec::new(), canonical_alias: None, @@ -923,7 +997,7 @@ pub async fn get_public_rooms_filtered_route( .and_then(|s| s.content.get("name")) .and_then(|n| n.as_str()) .map(|n| n.to_owned()), - num_joined_members: data.room_users_joined(&room_id).into(), + num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), room_id, topic: None, world_readable: false, @@ -937,7 +1011,7 @@ pub async fn get_public_rooms_filtered_route( chunk.extend_from_slice( &server_server::send_request( - &data, + &db, "privacytools.io".to_owned(), ruma_federation_api::v1::get_public_rooms::Request { limit: Some(20_u32.into()), @@ -965,13 +1039,14 @@ pub async fn get_public_rooms_filtered_route( #[post("/_matrix/client/r0/user_directory/search", data = "")] pub fn search_users_route( - data: State, + db: State<'_, Database>, body: Ruma, ) -> MatrixResult { MatrixResult(Ok(search_users::Response { - results: data - .users_all() - .into_iter() + results: db + .users + .iter() + .map(Result::unwrap) .filter(|user_id| user_id.to_string().contains(&body.search_term)) .map(|user_id| search_users::User { user_id, @@ -983,18 +1058,16 @@ pub fn search_users_route( })) } -#[get("/_matrix/client/r0/rooms/<_room_id>/members", data = "")] +#[get("/_matrix/client/r0/rooms/<_room_id>/members")] pub fn get_member_events_route( - body: Ruma, _room_id: String, ) -> MatrixResult { // TODO MatrixResult(Ok(get_member_events::Response { chunk: Vec::new() })) } -#[get("/_matrix/client/r0/thirdparty/protocols", data = "")] +#[get("/_matrix/client/r0/thirdparty/protocols")] pub fn get_protocols_route( - body: Ruma, ) -> MatrixResult { // TODO MatrixResult(Ok(get_protocols::Response { @@ -1007,7 +1080,7 @@ pub fn get_protocols_route( data = "" )] pub fn create_message_event_route( - data: State, + db: State<'_, Database>, _room_id: String, _event_type: String, _txn_id: String, @@ -1018,14 +1091,16 @@ pub fn create_message_event_route( let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - let event_id = data - .pdu_append( + let event_id = db + .rooms + .append_pdu( body.room_id.clone(), user_id.clone(), body.event_type.clone(), body.json_body.clone(), Some(unsigned), None, + &db.globals, ) .expect("message events are always okay"); @@ -1037,7 +1112,7 @@ pub fn create_message_event_route( data = "" )] pub fn create_state_event_for_key_route( - data: State, + db: State<'_, Database>, _room_id: String, _event_type: String, _state_key: String, @@ -1046,18 +1121,20 @@ pub fn create_state_event_for_key_route( let user_id = body.user_id.clone().expect("user is authenticated"); // Reponse of with/without key is the same - if let Some(event_id) = data.pdu_append( - body.room_id.clone(), - body.user_id.clone().expect("user is authenticated"), - body.event_type.clone(), - body.json_body.clone(), - None, - Some(body.state_key.clone()), - ) { - MatrixResult(Ok(create_state_event_for_key::Response { event_id })) - } else { - panic!("TODO: error missing permissions"); - } + let event_id = db + .rooms + .append_pdu( + body.room_id.clone(), + user_id, + body.event_type.clone(), + body.json_body.clone(), + None, + Some(body.state_key.clone()), + &db.globals, + ) + .unwrap(); + + MatrixResult(Ok(create_state_event_for_key::Response { event_id })) } #[put( @@ -1065,7 +1142,7 @@ pub fn create_state_event_for_key_route( data = "" )] pub fn create_state_event_for_empty_key_route( - data: State, + db: State<'_, Database>, _room_id: String, _event_type: String, body: Ruma, @@ -1073,39 +1150,46 @@ pub fn create_state_event_for_empty_key_route( let user_id = body.user_id.clone().expect("user is authenticated"); // Reponse of with/without key is the same - if let Some(event_id) = data.pdu_append( - body.room_id.clone(), - body.user_id.clone().expect("user is authenticated"), - body.event_type.clone(), - body.json_body.clone(), - None, - Some("".to_owned()), - ) { - MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) - } else { - panic!("TODO: error missing permissions"); - } + let event_id = db + .rooms + .append_pdu( + body.room_id.clone(), + user_id, + body.event_type.clone(), + body.json_body.clone(), + None, + Some("".to_owned()), + &db.globals, + ) + .unwrap(); + + MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) } #[get("/_matrix/client/r0/sync", data = "")] pub fn sync_route( - data: State, + db: State<'_, Database>, body: Ruma, ) -> MatrixResult { - std::thread::sleep(Duration::from_millis(1500)); + std::thread::sleep(Duration::from_millis(100)); let user_id = body.user_id.clone().expect("user is authenticated"); - let next_batch = data.last_pdu_index().to_string(); + let next_batch = db.globals.current_count().unwrap().to_string(); let mut joined_rooms = BTreeMap::new(); - let joined_roomids = data.rooms_joined(&user_id); let since = body .since .clone() .and_then(|string| string.parse().ok()) .unwrap_or(0); - for room_id in joined_roomids { - let mut pdus = data.pdus_since(&room_id, since); + for room_id in db.rooms.rooms_joined(&user_id) { + let room_id = room_id.unwrap(); + + let mut pdus = db + .rooms + .pdus_since(&room_id, since).unwrap() + .map(|r| r.unwrap()) + .collect::>(); let mut send_member_count = false; let mut send_full_state = false; @@ -1119,8 +1203,13 @@ pub fn sync_route( } } - let notification_count = if let Some(last_read) = data.room_read_get(&room_id, &user_id) { - Some((data.pdus_since(&room_id, last_read).len() as u32).into()) + let notification_count = if let Some(last_read) = db + .rooms + .edus + .room_read_get(&room_id, &user_id) + .unwrap() + { + Some((db.rooms.pdus_since(&room_id, last_read).unwrap().count() as u32).into()) } else { None }; @@ -1135,7 +1224,7 @@ pub fn sync_route( let prev_batch = pdus .first() - .and_then(|e| data.pdu_get_count(&e.event_id)) + .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) .map(|c| c.to_string()); let room_events = pdus @@ -1143,15 +1232,39 @@ pub fn sync_route( .map(|pdu| pdu.to_room_event()) .collect::>(); - let mut edus = data.roomlatests_since(&room_id, since); - edus.extend_from_slice(&data.roomactives_in(&room_id)); + let mut edus = db + .rooms + .edus + .roomactives_all(&room_id) + .map(|r| r.unwrap()) + .collect::>(); + + if edus.is_empty() { + edus.push( + EduEvent::Typing(ruma_events::typing::TypingEvent { + content: ruma_events::typing::TypingEventContent { + user_ids: Vec::new(), + }, + room_id: None, // None because it can be inferred + }) + .into(), + ); + } + + edus.extend( + db + .rooms + .edus + .roomlatests_since(&room_id, since).unwrap() + .map(|r| r.unwrap()), + ); joined_rooms.insert( room_id.clone().try_into().unwrap(), sync_events::JoinedRoom { account_data: Some(sync_events::AccountData { - events: data - .room_userdata_since(Some(&room_id), &user_id, since) + events: db.account_data + .changes_since(Some(&room_id), &user_id, since).unwrap() .into_iter() .map(|(_, v)| v) .collect(), @@ -1159,12 +1272,12 @@ pub fn sync_route( summary: sync_events::RoomSummary { heroes: Vec::new(), joined_member_count: if send_member_count { - Some(data.room_users_joined(&room_id).into()) + Some((db.rooms.room_members(&room_id).count() as u32).into()) } else { None }, invited_member_count: if send_member_count { - Some(data.room_users_invited(&room_id).into()) + Some((db.rooms.room_members_invited(&room_id).count() as u32).into()) } else { None }, @@ -1181,7 +1294,10 @@ pub fn sync_route( // TODO: state before timeline state: sync_events::State { events: if send_full_state { - data.room_state(&room_id) + db + .rooms + .room_state(&room_id) + .unwrap() .into_iter() .map(|(_, pdu)| pdu.to_state_event()) .collect() @@ -1195,12 +1311,28 @@ pub fn sync_route( } let mut left_rooms = BTreeMap::new(); - let left_roomids = data.rooms_left(&user_id); - for room_id in left_roomids { - let pdus = data.pdus_since(&room_id, since); - let room_events = pdus.into_iter().map(|pdu| pdu.to_room_event()).collect(); - let mut edus = data.roomlatests_since(&room_id, since); - edus.extend_from_slice(&data.roomactives_in(&room_id)); + for room_id in db.rooms.rooms_left(&user_id) { + let room_id = room_id.unwrap(); + let pdus = db.rooms.pdus_since(&room_id, since).unwrap(); + let room_events = pdus + .into_iter() + .map(|pdu| pdu.unwrap().to_room_event()) + .collect(); + + let mut edus = db + .rooms + .edus + .roomlatests_since(&room_id, since).unwrap() + .map(|r| r.unwrap()) + .collect::>(); + + edus.extend( + db + .rooms + .edus + .roomactives_all(&room_id) + .map(|r| r.unwrap()), + ); left_rooms.insert( room_id.clone().try_into().unwrap(), @@ -1217,11 +1349,13 @@ pub fn sync_route( } let mut invited_rooms = BTreeMap::new(); - for room_id in data.rooms_invited(&user_id) { - let events = data - .pdus_since(&room_id, since) + for room_id in db.rooms.rooms_invited(&user_id) { + let room_id = room_id.unwrap(); + let events = db + .rooms + .pdus_since(&room_id, since).unwrap() .into_iter() - .map(|pdu| pdu.to_stripped_state_event()) + .map(|pdu| pdu.unwrap().to_stripped_state_event()) .collect(); invited_rooms.insert( @@ -1241,8 +1375,8 @@ pub fn sync_route( }, presence: sync_events::Presence { events: Vec::new() }, account_data: sync_events::AccountData { - events: data - .room_userdata_since(None, &user_id, since) + events: db.account_data + .changes_since(None, &user_id, since).unwrap() .into_iter() .map(|(_, v)| v) .collect(), @@ -1255,7 +1389,7 @@ pub fn sync_route( #[get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "")] pub fn get_message_events_route( - data: State, + db: State<'_, Database>, body: Ruma, _room_id: String, ) -> MatrixResult { @@ -1264,14 +1398,15 @@ pub fn get_message_events_route( } if let Ok(from) = body.from.clone().parse() { - let pdus = data.pdus_until( - &body.room_id, - from, - body.limit.map(|l| l.try_into().unwrap()).unwrap_or(10), - ); + let pdus = db + .rooms + .pdus_until(&body.room_id, from) + .take(body.limit.map(|l| l.try_into().unwrap()).unwrap_or(10_u32) as usize) + .map(|r| r.unwrap()) + .collect::>(); let prev_batch = pdus .last() - .and_then(|e| data.pdu_get_count(&e.event_id)) + .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) .map(|c| c.to_string()); let room_events = pdus .into_iter() @@ -1332,7 +1467,7 @@ pub fn get_media_config_route() -> MatrixResult { #[options("/<_segments..>")] pub fn options_route( - _segments: rocket::http::uri::Segments, + _segments: rocket::http::uri::Segments<'_>, ) -> MatrixResult { MatrixResult(Err(Error { kind: ErrorKind::NotFound, diff --git a/src/data.rs b/src/data.rs deleted file mode 100644 index 9b9c541..0000000 --- a/src/data.rs +++ /dev/null @@ -1,1018 +0,0 @@ -use crate::{database::COUNTER, utils, Database, PduEvent}; -use ruma_events::{ - collections::only::Event as EduEvent, room::power_levels::PowerLevelsEventContent, EventJson, - EventType, -}; -use ruma_federation_api::RoomV3Pdu; -use ruma_identifiers::{EventId, RoomId, UserId}; -use serde_json::json; -use std::{ - collections::HashMap, - convert::{TryFrom, TryInto}, - mem, -}; - -pub struct Data { - hostname: String, - reqwest_client: reqwest::Client, - db: Database, -} - -impl Data { - /// Load an existing database or create a new one. - pub fn load_or_create(hostname: &str) -> Self { - let db = Database::load_or_create(hostname); - Self { - hostname: hostname.to_owned(), - reqwest_client: reqwest::Client::new(), - db, - } - } - - /// Get the hostname of the server. - pub fn hostname(&self) -> &str { - &self.hostname - } - - /// Get the hostname of the server. - pub fn reqwest_client(&self) -> &reqwest::Client { - &self.reqwest_client - } - - pub fn keypair(&self) -> &ruma_signatures::Ed25519KeyPair { - &self.db.keypair - } - - /// Check if a user has an account by looking for an assigned password. - pub fn user_exists(&self, user_id: &UserId) -> bool { - self.db - .userid_password - .contains_key(user_id.to_string()) - .unwrap() - } - - /// Create a new user account by assigning them a password. - pub fn user_add(&self, user_id: &UserId, hash: &str) { - self.db - .userid_password - .insert(user_id.to_string(), hash) - .unwrap(); - } - - /// Find out which user an access token belongs to. - pub fn user_from_token(&self, token: &str) -> Option { - self.db - .token_userid - .get(token) - .unwrap() - .and_then(|bytes| (*utils::string_from_bytes(&bytes)).try_into().ok()) - } - - pub fn users_all(&self) -> Vec { - self.db - .userid_password - .iter() - .keys() - .map(|k| UserId::try_from(&*utils::string_from_bytes(&k.unwrap())).unwrap()) - .collect() - } - - /// Gets password hash for given user id. - pub fn password_hash_get(&self, user_id: &UserId) -> Option { - self.db - .userid_password - .get(user_id.to_string()) - .unwrap() - .map(|bytes| utils::string_from_bytes(&bytes)) - } - - /// Removes a displayname. - pub fn displayname_remove(&self, user_id: &UserId) { - self.db - .userid_displayname - .remove(user_id.to_string()) - .unwrap(); - } - - /// Set a new displayname. - pub fn displayname_set(&self, user_id: &UserId, displayname: String) { - self.db - .userid_displayname - .insert(user_id.to_string(), &*displayname) - .unwrap(); - for room_id in self.rooms_joined(user_id) { - self.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - json!({"membership": "join", "displayname": displayname}), - None, - Some(user_id.to_string()), - ); - } - } - - /// Get a the displayname of a user. - pub fn displayname_get(&self, user_id: &UserId) -> Option { - self.db - .userid_displayname - .get(user_id.to_string()) - .unwrap() - .map(|bytes| utils::string_from_bytes(&bytes)) - } - - /// Removes a avatar_url. - pub fn avatar_url_remove(&self, user_id: &UserId) { - self.db - .userid_avatarurl - .remove(user_id.to_string()) - .unwrap(); - } - - /// Set a new avatar_url. - pub fn avatar_url_set(&self, user_id: &UserId, avatar_url: String) { - self.db - .userid_avatarurl - .insert(user_id.to_string(), &*avatar_url) - .unwrap(); - } - - /// Get a the avatar_url of a user. - pub fn avatar_url_get(&self, user_id: &UserId) -> Option { - self.db - .userid_avatarurl - .get(user_id.to_string()) - .unwrap() - .map(|bytes| utils::string_from_bytes(&bytes)) - } - - /// Add a new device to a user. - pub fn device_add(&self, user_id: &UserId, device_id: &str) { - if self - .db - .userid_deviceids - .get_iter(&user_id.to_string().as_bytes()) - .filter_map(|item| item.ok()) - .map(|(_key, value)| value) - .all(|device| device != device_id) - { - self.db - .userid_deviceids - .add(user_id.to_string().as_bytes(), device_id.into()); - } - } - - /// Replace the access token of one device. - pub fn token_replace(&self, user_id: &UserId, device_id: &String, token: String) { - // Make sure the device id belongs to the user - debug_assert!(self - .db - .userid_deviceids - .get_iter(&user_id.to_string().as_bytes()) - .filter_map(|item| item.ok()) - .map(|(_key, value)| value) - .any(|device| device == device_id.as_bytes())); // Does the user have that device? - - // Remove old token - let mut key = user_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - if let Some(old_token) = self.db.userdeviceid_token.get(&key).unwrap() { - self.db.token_userid.remove(old_token).unwrap(); - // It will be removed from deviceid_token by the insert later - } - - // Assign token to device_id - self.db.userdeviceid_token.insert(key, &*token).unwrap(); - - // Assign token to user - self.db - .token_userid - .insert(token, &*user_id.to_string()) - .unwrap(); - } - - pub fn room_join(&self, room_id: &RoomId, user_id: &UserId) -> bool { - if !self.room_exists(room_id) - && !self - .db - .userid_joinroomids - .get_iter(user_id.to_string().as_bytes()) - .values() - .any(|r| r.unwrap() == room_id.to_string().as_bytes()) - { - return false; - } - - self.db.userid_joinroomids.add( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes().into(), - ); - self.db.roomid_joinuserids.add( - room_id.to_string().as_bytes(), - user_id.to_string().as_bytes().into(), - ); - self.db.userid_inviteroomids.remove_value( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes(), - ); - self.db.roomid_inviteuserids.remove_value( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes(), - ); - self.db.userid_leftroomids.remove_value( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes().into(), - ); - - let mut content = json!({"membership": "join"}); - if let Some(displayname) = self.displayname_get(user_id) { - content - .as_object_mut() - .unwrap() - .insert("displayname".to_owned(), displayname.into()); - } - - self.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - content, - None, - Some(user_id.to_string()), - ); - - true - } - - pub fn rooms_joined(&self, user_id: &UserId) -> Vec { - self.db - .userid_joinroomids - .get_iter(user_id.to_string().as_bytes()) - .values() - .map(|room_id| { - RoomId::try_from(&*utils::string_from_bytes(&room_id.unwrap())) - .expect("user joined valid room ids") - }) - .collect() - } - - /// Check if a room exists by looking for PDUs in that room. - pub fn room_exists(&self, room_id: &RoomId) -> bool { - // Create the first part of the full pdu id - let mut prefix = room_id.to_string().as_bytes().to_vec(); - prefix.push(0xff); // Add delimiter so we don't find rooms starting with the same id - - if let Some((key, _)) = self.db.pduid_pdu.get_gt(&prefix).unwrap() { - if key.starts_with(&prefix) { - true - } else { - false - } - } else { - false - } - } - - pub fn rooms_all(&self) -> Vec { - let mut room_ids = self - .db - .roomid_pduleaves - .iter_all() - .keys() - .map(|key| { - RoomId::try_from(&*utils::string_from_bytes( - &key.unwrap() - .iter() - .skip(1) // skip "d" - .copied() - .take_while(|&x| x != 0xff) // until delimiter - .collect::>(), - )) - .unwrap() - }) - .collect::>(); - room_ids.dedup(); - room_ids - } - - pub fn room_users_joined(&self, room_id: &RoomId) -> u32 { - self.db - .roomid_joinuserids - .get_iter(room_id.to_string().as_bytes()) - .count() as u32 - } - - pub fn room_users_invited(&self, room_id: &RoomId) -> u32 { - self.db - .roomid_inviteuserids - .get_iter(room_id.to_string().as_bytes()) - .count() as u32 - } - - pub fn room_state(&self, room_id: &RoomId) -> HashMap<(EventType, String), PduEvent> { - let mut hashmap = HashMap::new(); - for pdu in self - .db - .roomstateid_pdu - .scan_prefix(&room_id.to_string().as_bytes()) - .values() - .map(|value| serde_json::from_slice::(&value.unwrap()).unwrap()) - { - hashmap.insert( - ( - pdu.kind.clone(), - pdu.state_key - .clone() - .expect("state events have a state key"), - ), - pdu, - ); - } - hashmap - } - - pub fn room_leave(&self, sender: &UserId, room_id: &RoomId, user_id: &UserId) { - self.pdu_append( - room_id.clone(), - sender.clone(), - EventType::RoomMember, - json!({"membership": "leave"}), - None, - Some(user_id.to_string()), - ); - self.db.userid_inviteroomids.remove_value( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes().into(), - ); - self.db.roomid_inviteuserids.remove_value( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes().into(), - ); - self.db.userid_joinroomids.remove_value( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes().into(), - ); - self.db.roomid_joinuserids.remove_value( - room_id.to_string().as_bytes(), - user_id.to_string().as_bytes().into(), - ); - self.db.userid_leftroomids.add( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes().into(), - ); - } - - pub fn room_forget(&self, room_id: &RoomId, user_id: &UserId) { - self.db.userid_leftroomids.remove_value( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes().into(), - ); - } - - pub fn room_invite(&self, sender: &UserId, room_id: &RoomId, user_id: &UserId) { - self.pdu_append( - room_id.clone(), - sender.clone(), - EventType::RoomMember, - json!({"membership": "invite"}), - None, - Some(user_id.to_string()), - ); - self.db.userid_inviteroomids.add( - user_id.to_string().as_bytes(), - room_id.to_string().as_bytes().into(), - ); - self.db.roomid_inviteuserids.add( - room_id.to_string().as_bytes(), - user_id.to_string().as_bytes().into(), - ); - } - - pub fn rooms_invited(&self, user_id: &UserId) -> Vec { - self.db - .userid_inviteroomids - .get_iter(&user_id.to_string().as_bytes()) - .values() - .map(|key| RoomId::try_from(&*utils::string_from_bytes(&key.unwrap())).unwrap()) - .collect() - } - - pub fn rooms_left(&self, user_id: &UserId) -> Vec { - self.db - .userid_leftroomids - .get_iter(&user_id.to_string().as_bytes()) - .values() - .map(|key| RoomId::try_from(&*utils::string_from_bytes(&key.unwrap())).unwrap()) - .collect() - } - - pub fn pdu_get_count(&self, event_id: &EventId) -> Option { - self.db - .eventid_pduid - .get(event_id.to_string().as_bytes()) - .unwrap() - .map(|pdu_id| { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()]) - }) - } - - pub fn pdu_get(&self, event_id: &EventId) -> Option { - self.db - .eventid_pduid - .get(event_id.to_string().as_bytes()) - .unwrap() - .map(|pdu_id| { - serde_json::from_slice( - &self - .db - .pduid_pdu - .get(pdu_id) - .unwrap() - .expect("eventid_pduid in db is valid"), - ) - .expect("pdu is valid") - }) - } - - pub fn pdu_leaves_get(&self, room_id: &RoomId) -> Vec { - let event_ids = self - .db - .roomid_pduleaves - .get_iter(room_id.to_string().as_bytes()) - .values() - .map(|pdu_id| { - EventId::try_from(&*utils::string_from_bytes(&pdu_id.unwrap())) - .expect("pdu leaves are valid event ids") - }) - .collect(); - - event_ids - } - - pub fn pdu_leaves_replace(&self, room_id: &RoomId, event_id: &EventId) { - self.db - .roomid_pduleaves - .clear(room_id.to_string().as_bytes()); - - self.db.roomid_pduleaves.add( - &room_id.to_string().as_bytes(), - (*event_id.to_string()).into(), - ); - } - - /// Add a persisted data unit from this homeserver - pub fn pdu_append( - &self, - room_id: RoomId, - sender: UserId, - event_type: EventType, - content: serde_json::Value, - unsigned: Option>, - state_key: Option, - ) -> Option { - // Is the event authorized? - if state_key.is_some() { - if let Some(pdu) = self - .room_state(&room_id) - .get(&(EventType::RoomPowerLevels, "".to_owned())) - { - let power_levels = serde_json::from_value::>( - pdu.content.clone(), - ) - .unwrap() - .deserialize() - .unwrap(); - - match event_type { - EventType::RoomMember => { - // Member events are okay for now (TODO) - } - _ if power_levels - .users - .get(&sender) - .unwrap_or(&power_levels.users_default) - <= &0.into() => - { - // Not authorized - return None; - } - // User has sufficient power - _ => {} - } - } - } - - // prev_events are the leaves of the current graph. This method removes all leaves from the - // room and replaces them with our event - // TODO: Make sure this isn't called twice in parallel - let prev_events = self.pdu_leaves_get(&room_id); - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .map(|event_id| { - self.pdu_get(event_id) - .expect("pdu in prev_events is valid") - .depth - .into() - }) - .max() - .unwrap_or(0_u64) - + 1; - - let mut unsigned = unsigned.unwrap_or_default(); - // TODO: Optimize this to not load the whole room state? - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = self - .room_state(&room_id) - .get(&(event_type.clone(), state_key.clone())) - { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - } - } - - let mut pdu = PduEvent { - event_id: EventId::try_from("$thiswillbefilledinlater").unwrap(), - room_id: room_id.clone(), - sender: sender.clone(), - origin: self.hostname.clone(), - origin_server_ts: utils::millis_since_unix_epoch().try_into().unwrap(), - kind: event_type, - content, - state_key, - prev_events, - depth: depth.try_into().unwrap(), - auth_events: Vec::new(), - redacts: None, - unsigned, - hashes: ruma_federation_api::EventHash { - sha256: "aaa".to_owned(), - }, - signatures: HashMap::new(), - }; - - // Generate event id - pdu.event_id = EventId::try_from(&*format!( - "${}", - ruma_signatures::reference_hash(&serde_json::to_value(&pdu).unwrap()) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are correct"); - - let mut pdu_json = serde_json::to_value(&pdu).unwrap(); - ruma_signatures::hash_and_sign_event(self.hostname(), self.keypair(), &mut pdu_json) - .unwrap(); - - self.pdu_leaves_replace(&room_id, &pdu.event_id); - - // The count will go up regardless of the room_id - // This is also the next_batch/since value - // Increment the last index and use that - let index = utils::u64_from_bytes( - &self - .db - .global - .update_and_fetch(COUNTER, utils::increment) - .unwrap() - .unwrap(), - ); - - let mut pdu_id = room_id.to_string().as_bytes().to_vec(); - pdu_id.push(0xff); // Add delimiter so we don't find rooms starting with the same id - pdu_id.extend_from_slice(&index.to_be_bytes()); - - self.db - .pduid_pdu - .insert(&pdu_id, &*pdu_json.to_string()) - .unwrap(); - - self.db - .eventid_pduid - .insert(pdu.event_id.to_string(), pdu_id.clone()) - .unwrap(); - - if let Some(state_key) = pdu.state_key { - let mut key = room_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu.kind.to_string().as_bytes()); - key.push(0xff); - key.extend_from_slice(state_key.to_string().as_bytes()); - self.db - .roomstateid_pdu - .insert(key, &*pdu_json.to_string()) - .unwrap(); - } - - self.room_read_set(&room_id, &sender, &pdu.event_id); - - Some(pdu.event_id) - } - - /// Returns a vector of all PDUs in a room. - pub fn pdus_all(&self, room_id: &RoomId) -> Vec { - self.pdus_since(room_id, 0) - } - - pub fn last_pdu_index(&self) -> u64 { - utils::u64_from_bytes( - &self - .db - .global - .get(&COUNTER) - .unwrap() - .unwrap_or_else(|| (&0_u64.to_be_bytes()).into()), - ) - } - - /// Returns a vector of all events in a room that happened after the event with id `since`. - pub fn pdus_since(&self, room_id: &RoomId, since: u64) -> Vec { - // Create the first part of the full pdu id - let mut pdu_id = room_id.to_string().as_bytes().to_vec(); - pdu_id.push(0xff); // Add delimiter so we don't find rooms starting with the same id - pdu_id.extend_from_slice(&(since).to_be_bytes()); - - self.pdus_since_pduid(room_id, pdu_id) - } - - /// Returns a vector of all events in a room that happened after the event with id `since`. - pub fn pdus_since_pduid(&self, room_id: &RoomId, pdu_id: Vec) -> Vec { - let mut pdus = Vec::new(); - - // Create the first part of the full pdu id - let mut prefix = room_id.to_string().as_bytes().to_vec(); - prefix.push(0xff); // Add delimiter so we don't find rooms starting with the same id - - let mut current = pdu_id; - - while let Some((key, value)) = self.db.pduid_pdu.get_gt(¤t).unwrap() { - if key.starts_with(&prefix) { - current = key.to_vec(); - pdus.push(serde_json::from_slice(&value).expect("pdu in db is valid")); - } else { - break; - } - } - - pdus - } - - pub fn pdus_until(&self, room_id: &RoomId, until: u64, max: u32) -> Vec { - let mut pdus = Vec::new(); - - // Create the first part of the full pdu id - let mut prefix = room_id.to_string().as_bytes().to_vec(); - prefix.push(0xff); // Add delimiter so we don't find rooms starting with the same id - - let mut current = prefix.clone(); - current.extend_from_slice(&until.to_be_bytes()); - - while let Some((key, value)) = self.db.pduid_pdu.get_lt(¤t).unwrap() { - if pdus.len() < max as usize && key.starts_with(&prefix) { - current = key.to_vec(); - pdus.push(serde_json::from_slice(&value).expect("pdu in db is valid")); - } else { - break; - } - } - - pdus - } - - pub fn roomlatest_update(&self, user_id: &UserId, room_id: &RoomId, event: EduEvent) { - let mut prefix = room_id.to_string().as_bytes().to_vec(); - prefix.push(0xff); - - // Start with last - if let Some(mut current) = self - .db - .roomlatestid_roomlatest - .scan_prefix(&prefix) - .keys() - .next_back() - .map(|c| c.unwrap()) - { - // Remove old marker (There should at most one) - loop { - if !current.starts_with(&prefix) { - // We're in another room - break; - } - if current.rsplitn(2, |&b| b == 0xff).next().unwrap() - == user_id.to_string().as_bytes() - { - // This is the old room_latest - self.db.roomlatestid_roomlatest.remove(current).unwrap(); - break; - } - // Else, try the event before that - if let Some((k, _)) = self.db.roomlatestid_roomlatest.get_lt(current).unwrap() { - current = k; - } else { - break; - } - } - } - - // Increment the last index and use that - let index = utils::u64_from_bytes( - &self - .db - .global - .update_and_fetch(COUNTER, utils::increment) - .unwrap() - .unwrap(), - ); - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&index.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(&user_id.to_string().as_bytes()); - - self.db - .roomlatestid_roomlatest - .insert(room_latest_id, &*serde_json::to_string(&event).unwrap()) - .unwrap(); - } - - /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. - pub fn roomlatests_since(&self, room_id: &RoomId, since: u64) -> Vec> { - let mut room_latests = Vec::new(); - - let mut prefix = room_id.to_string().as_bytes().to_vec(); - prefix.push(0xff); - - let mut current = prefix.clone(); - current.extend_from_slice(&(since + 1).to_be_bytes()); - - while let Some((key, value)) = self.db.roomlatestid_roomlatest.get_gt(¤t).unwrap() { - if key.starts_with(&prefix) { - current = key.to_vec(); - room_latests.push( - serde_json::from_slice::>(&value) - .expect("room_latest in db is valid"), - ); - } else { - break; - } - } - - room_latests - } - - /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. - pub fn roomlatests_all(&self, room_id: &RoomId) -> Vec> { - self.roomlatests_since(room_id, 0) - } - - pub fn roomactive_add(&self, event: EduEvent, room_id: &RoomId, timeout: u64) { - let mut prefix = room_id.to_string().as_bytes().to_vec(); - prefix.push(0xff); - - let mut current = prefix.clone(); - - while let Some((key, _)) = self.db.roomactiveid_roomactive.get_gt(¤t).unwrap() { - if key.starts_with(&prefix) - && utils::u64_from_bytes(key.split(|&c| c == 0xff).nth(1).unwrap()) - > utils::millis_since_unix_epoch().try_into().unwrap() - { - current = key.to_vec(); - self.db.roomactiveid_roomactive.remove(¤t).unwrap(); - } else { - break; - } - } - - // Increment the last index and use that - let index = utils::u64_from_bytes( - &self - .db - .global - .update_and_fetch(COUNTER, utils::increment) - .unwrap() - .unwrap(), - ); - - let mut room_active_id = prefix; - room_active_id.extend_from_slice(&timeout.to_be_bytes()); - room_active_id.push(0xff); - room_active_id.extend_from_slice(&index.to_be_bytes()); - - self.db - .roomactiveid_roomactive - .insert(room_active_id, &*serde_json::to_string(&event).unwrap()) - .unwrap(); - } - - pub fn roomactive_remove(&self, event: EduEvent, room_id: &RoomId) { - let mut prefix = room_id.to_string().as_bytes().to_vec(); - prefix.push(0xff); - - let mut current = prefix.clone(); - - let json = serde_json::to_string(&event).unwrap(); - - while let Some((key, value)) = self.db.roomactiveid_roomactive.get_gt(¤t).unwrap() { - if key.starts_with(&prefix) { - current = key.to_vec(); - if value == json.as_bytes() { - self.db.roomactiveid_roomactive.remove(¤t).unwrap(); - break; - } - } else { - break; - } - } - } - - /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. - pub fn roomactives_in(&self, room_id: &RoomId) -> Vec> { - let mut room_actives = Vec::new(); - - let mut prefix = room_id.to_string().as_bytes().to_vec(); - prefix.push(0xff); - - let mut current = prefix.clone(); - current.extend_from_slice(&utils::millis_since_unix_epoch().to_be_bytes()); - - while let Some((key, value)) = self.db.roomactiveid_roomactive.get_gt(¤t).unwrap() { - if key.starts_with(&prefix) { - current = key.to_vec(); - room_actives.push( - serde_json::from_slice::>(&value) - .expect("room_active in db is valid"), - ); - } else { - break; - } - } - - if room_actives.is_empty() { - return vec![EduEvent::Typing(ruma_events::typing::TypingEvent { - content: ruma_events::typing::TypingEventContent { - user_ids: Vec::new(), - }, - room_id: None, // None because it can be inferred - }) - .into()]; - } else { - room_actives - } - } - - pub fn room_userdata_update( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - event: EduEvent, - ) { - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(&user_id.to_string().as_bytes()); - prefix.push(0xff); - - // Start with last - if let Some(mut current) = self - .db - .roomuserdataid_accountdata - .scan_prefix(&prefix) - .keys() - .next_back() - .map(|c| c.unwrap()) - { - // Remove old entry (there should be at most one) - loop { - if !current.starts_with(&prefix) { - // We're in another room or user - break; - } - if current.rsplit(|&b| b == 0xff).nth(2).unwrap() == user_id.to_string().as_bytes() - { - // This is the old room_latest - self.db.roomuserdataid_accountdata.remove(current).unwrap(); - break; - } - // Else, try the event before that - if let Some((k, _)) = self.db.roomuserdataid_accountdata.get_lt(current).unwrap() { - current = k; - } else { - break; - } - } - } - - // Increment the last index and use that - let index = utils::u64_from_bytes( - &self - .db - .global - .update_and_fetch(COUNTER, utils::increment) - .unwrap() - .unwrap(), - ); - - let mut key = prefix; - key.extend_from_slice(&index.to_be_bytes()); - - let json = serde_json::to_value(&event).unwrap(); - key.extend_from_slice(json["type"].as_str().unwrap().as_bytes()); - - self.db - .roomuserdataid_accountdata - .insert(key, &*json.to_string()) - .unwrap(); - } - - pub fn room_userdata_get( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - kind: &str, - ) -> Option> { - self.room_userdata_all(room_id, user_id).remove(kind) - } - - pub fn room_userdata_since( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - since: u64, - ) -> HashMap> { - let mut userdata = HashMap::new(); - - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(&user_id.to_string().as_bytes()); - prefix.push(0xff); - - let mut current = prefix.clone(); - current.extend_from_slice(&(since + 1).to_be_bytes()); - - while let Some((key, value)) = self.db.roomuserdataid_accountdata.get_gt(¤t).unwrap() - { - if key.starts_with(&prefix) { - current = key.to_vec(); - let json = serde_json::from_slice::(&value).unwrap(); - userdata.insert( - json["type"].as_str().unwrap().to_owned(), - serde_json::from_value::>(json) - .expect("userdata in db is valid"), - ); - } else { - break; - } - } - - userdata - } - - pub fn room_userdata_all( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - ) -> HashMap> { - self.room_userdata_since(room_id, user_id, 0) - } - - pub fn room_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - event_id: &EventId, - ) -> Option<()> { - let mut key = room_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); - - self.db - .roomuserid_lastread - .insert(key, &self.pdu_get_count(event_id)?.to_be_bytes()) - .unwrap(); - - Some(()) - } - - pub fn room_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Option { - let mut key = room_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); - - self.db - .roomuserid_lastread - .get(key) - .unwrap() - .map(|v| utils::u64_from_bytes(&v)) - } - - pub fn debug(&self) { - self.db.debug(); - } -} diff --git a/src/database.rs b/src/database.rs index 4551bc0..47f0a56 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,92 +1,19 @@ -use crate::utils; +pub(self) mod account_data; +pub(self) mod globals; +pub(self) mod rooms; +pub(self) mod users; + use directories::ProjectDirs; -use sled::IVec; use std::fs::remove_dir_all; -pub struct MultiValue(sled::Tree); - -pub const COUNTER: &str = "c"; - -impl MultiValue { - /// Get an iterator over all values. - pub fn iter_all(&self) -> sled::Iter { - self.0.scan_prefix(b"d") - } - - /// Get an iterator over all values of this id. - pub fn get_iter(&self, id: &[u8]) -> sled::Iter { - // Data keys start with d - let mut key = vec![b'd']; - key.extend_from_slice(id.as_ref()); - key.push(0xff); // Add delimiter so we don't find keys starting with the same id - - self.0.scan_prefix(key) - } - - pub fn clear(&self, id: &[u8]) { - for key in self.get_iter(id).keys() { - self.0.remove(key.unwrap()).unwrap(); - } - } - - pub fn remove_value(&self, id: &[u8], value: &[u8]) { - if let Some(key) = self - .get_iter(id) - .find(|t| &t.as_ref().unwrap().1 == value) - .map(|t| t.unwrap().0) - { - self.0.remove(key).unwrap(); - } - } - - /// Add another value to the id. - pub fn add(&self, id: &[u8], value: IVec) { - // The new value will need a new index. We store the last used index in 'n' + id - let mut count_key: Vec = vec![b'n']; - count_key.extend_from_slice(id.as_ref()); - - // Increment the last index and use that - let index = self - .0 - .update_and_fetch(&count_key, utils::increment) - .unwrap() - .unwrap(); - - // Data keys start with d - let mut key = vec![b'd']; - key.extend_from_slice(id.as_ref()); - key.push(0xff); - key.extend_from_slice(&index); - - self.0.insert(key, value).unwrap(); - } -} - pub struct Database { - pub userid_password: sled::Tree, - pub userid_displayname: sled::Tree, - pub userid_avatarurl: sled::Tree, - pub userid_deviceids: MultiValue, - pub userdeviceid_token: sled::Tree, - pub token_userid: sled::Tree, - pub pduid_pdu: sled::Tree, // PduId = RoomId + Count - pub eventid_pduid: sled::Tree, - pub roomid_pduleaves: MultiValue, - pub roomstateid_pdu: sled::Tree, // Room + StateType + StateKey - pub roomuserdataid_accountdata: sled::Tree, // RoomUserDataId = Room + User + Count + Type - pub roomuserid_lastread: sled::Tree, // RoomUserId = Room + User - pub roomid_joinuserids: MultiValue, - pub roomid_inviteuserids: MultiValue, - pub userid_joinroomids: MultiValue, - pub userid_inviteroomids: MultiValue, - pub userid_leftroomids: MultiValue, - // EDUs: - pub roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Count + UserId TODO: Types - pub roomactiveid_roomactive: sled::Tree, // Typing, RoomActiveId = TimeoutTime + Count - pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count - pub globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Count + Type + UserId - pub keypair: ruma_signatures::Ed25519KeyPair, - pub global: sled::Db, + pub globals: globals::Globals, + pub users: users::Users, + pub rooms: rooms::Rooms, + pub account_data: account_data::AccountData, + //pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count + //pub globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Count + Type + UserId + pub _db: sled::Db, } impl Database { @@ -110,166 +37,38 @@ impl Database { let db = sled::open(&path).unwrap(); Self { - userid_password: db.open_tree("userid_password").unwrap(), - userid_deviceids: MultiValue(db.open_tree("userid_deviceids").unwrap()), - userid_displayname: db.open_tree("userid_displayname").unwrap(), - userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), - userdeviceid_token: db.open_tree("userdeviceid_token").unwrap(), - token_userid: db.open_tree("token_userid").unwrap(), - pduid_pdu: db.open_tree("pduid_pdu").unwrap(), - eventid_pduid: db.open_tree("eventid_pduid").unwrap(), - roomid_pduleaves: MultiValue(db.open_tree("roomid_pduleaves").unwrap()), - roomstateid_pdu: db.open_tree("roomstateid_pdu").unwrap(), - roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata").unwrap(), - roomuserid_lastread: db.open_tree("roomuserid_lastread").unwrap(), - roomid_joinuserids: MultiValue(db.open_tree("roomid_joinuserids").unwrap()), - roomid_inviteuserids: MultiValue(db.open_tree("roomid_inviteuserids").unwrap()), - userid_joinroomids: MultiValue(db.open_tree("userid_joinroomids").unwrap()), - userid_inviteroomids: MultiValue(db.open_tree("userid_inviteroomids").unwrap()), - userid_leftroomids: MultiValue(db.open_tree("userid_leftroomids").unwrap()), - roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), - roomactiveid_roomactive: db.open_tree("roomactiveid_roomactive").unwrap(), - globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), - globallatestid_globallatest: db.open_tree("globallatestid_globallatest").unwrap(), - keypair: ruma_signatures::Ed25519KeyPair::new( - &*db.update_and_fetch("keypair", utils::generate_keypair) - .unwrap() - .unwrap(), - "key1".to_owned(), - ) - .unwrap(), - global: db, - } - } + globals: globals::Globals::load(db.open_tree("global").unwrap(), hostname.to_owned()), + users: users::Users { + userid_password: db.open_tree("userid_password").unwrap(), + userdeviceid: db.open_tree("userdeviceid").unwrap(), + userid_displayname: db.open_tree("userid_displayname").unwrap(), + userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), + userdeviceid_token: db.open_tree("userdeviceid_token").unwrap(), + token_userid: db.open_tree("token_userid").unwrap(), + }, + rooms: rooms::Rooms { + edus: rooms::RoomEdus { + roomuserid_lastread: db.open_tree("roomuserid_lastread").unwrap(), + roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), + roomactiveid_roomactive: db.open_tree("roomactiveid_roomactive").unwrap(), + }, + pduid_pdu: db.open_tree("pduid_pdu").unwrap(), + eventid_pduid: db.open_tree("eventid_pduid").unwrap(), + roomid_pduleaves: db.open_tree("roomid_pduleaves").unwrap(), + roomstateid_pdu: db.open_tree("roomstateid_pdu").unwrap(), - pub fn debug(&self) { - println!("# UserId -> Password:"); - for (k, v) in self.userid_password.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# UserId -> DeviceIds:"); - for (k, v) in self.userid_deviceids.iter_all().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# UserId -> Displayname:"); - for (k, v) in self.userid_displayname.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# UserId -> AvatarURL:"); - for (k, v) in self.userid_avatarurl.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# UserId+DeviceId -> Token:"); - for (k, v) in self.userdeviceid_token.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# Token -> UserId:"); - for (k, v) in self.token_userid.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# RoomId -> PDU leaves:"); - for (k, v) in self.roomid_pduleaves.iter_all().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# RoomStateId -> PDU:"); - for (k, v) in self.roomstateid_pdu.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# RoomId -> UserIds:"); - for (k, v) in self.roomid_joinuserids.iter_all().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# UserId -> RoomIds:"); - for (k, v) in self.userid_joinroomids.iter_all().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# PDU Id -> PDU:"); - for (k, v) in self.pduid_pdu.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# EventId -> PDU Id:"); - for (k, v) in self.eventid_pduid.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# RoomLatestId -> RoomLatest:"); - for (k, v) in self.roomlatestid_roomlatest.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# RoomActiveId -> RoomActives:"); - for (k, v) in self.roomactiveid_roomactive.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# GlobalAllId -> GlobalAll:"); - for (k, v) in self.globalallid_globalall.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); - } - println!("\n# GlobalLatestId -> GlobalLatest:"); - for (k, v) in self.globallatestid_globallatest.iter().map(|r| r.unwrap()) { - println!( - "{:?} -> {:?}", - String::from_utf8_lossy(&k), - String::from_utf8_lossy(&v), - ); + userroomid_joined: db.open_tree("userroomid_joined").unwrap(), + roomuserid_joined: db.open_tree("roomuserid_joined").unwrap(), + userroomid_invited: db.open_tree("userroomid_invited").unwrap(), + roomuserid_invited: db.open_tree("roomuserid_invited").unwrap(), + userroomid_left: db.open_tree("userroomid_left").unwrap(), + }, + account_data: account_data::AccountData { + roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata").unwrap(), + }, + //globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), + //globallatestid_globallatest: db.open_tree("globallatestid_globallatest").unwrap(), + _db: db, } } } diff --git a/src/database/account_data.rs b/src/database/account_data.rs new file mode 100644 index 0000000..1d48232 --- /dev/null +++ b/src/database/account_data.rs @@ -0,0 +1,120 @@ +use crate::Result; +use ruma_events::{collections::only::Event as EduEvent, EventJson}; +use ruma_identifiers::{RoomId, UserId}; +use std::collections::HashMap; + +pub struct AccountData { + pub(super) roomuserdataid_accountdata: sled::Tree, // RoomUserDataId = Room + User + Count + Type +} + +impl AccountData { + /// Places one event in the account data of the user and removes the previous entry. + pub fn update( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + event: EduEvent, + globals: &super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id + .map(|r| r.to_string()) + .unwrap_or_default() + .as_bytes() + .to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.push(0xff); + + // Remove old entry + if let Some(old) = self + .roomuserdataid_accountdata + .scan_prefix(&prefix) + .keys() + .rev() + .filter_map(|r| r.ok()) + .take_while(|key| key.starts_with(&prefix)) + .filter(|key| { + key.split(|&b| b == 0xff) + .nth(1) + .filter(|&user| user == user_id.to_string().as_bytes()) + .is_some() + }) + .next() + { + // This is the old room_latest + self.roomuserdataid_accountdata.remove(old)?; + println!("removed old account data"); + } + + let mut key = prefix; + key.extend_from_slice(&globals.next_count()?.to_be_bytes()); + key.push(0xff); + let json = serde_json::to_value(&event)?; + key.extend_from_slice(json["type"].as_str().unwrap().as_bytes()); + + self.roomuserdataid_accountdata + .insert(key, &*json.to_string()) + .unwrap(); + + Ok(()) + } + + // TODO: Optimize + /// Searches the account data for a specific kind. + pub fn get( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + kind: &str, + ) -> Result>> { + Ok(self.all(room_id, user_id)?.remove(kind)) + } + + /// Returns all changes to the account data that happened after `since`. + pub fn changes_since( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + since: u64, + ) -> Result>> { + let mut userdata = HashMap::new(); + + let mut prefix = room_id + .map(|r| r.to_string()) + .unwrap_or_default() + .as_bytes() + .to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.push(0xff); + + // Skip the data that's exactly at since, because we sent that last time + let mut first_possible = prefix.clone(); + first_possible.extend_from_slice(&(since + 1).to_be_bytes()); + + for json in self + .roomuserdataid_accountdata + .range(&*first_possible..) + .filter_map(|r| r.ok()) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(_, v)| serde_json::from_slice::(&v).unwrap()) + { + userdata.insert( + json["type"].as_str().unwrap().to_owned(), + serde_json::from_value::>(json) + .expect("userdata in db is valid"), + ); + } + + Ok(userdata) + } + + /// Returns all account data. + pub fn all( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + ) -> Result>> { + self.changes_since(room_id, user_id, 0) + } +} diff --git a/src/database/globals.rs b/src/database/globals.rs new file mode 100644 index 0000000..f9e9999 --- /dev/null +++ b/src/database/globals.rs @@ -0,0 +1,61 @@ +use crate::{utils, Result}; + +pub const COUNTER: &str = "c"; + +pub struct Globals { + pub(super) globals: sled::Tree, + hostname: String, + keypair: ruma_signatures::Ed25519KeyPair, + reqwest_client: reqwest::Client, +} + +impl Globals { + pub fn load(globals: sled::Tree, hostname: String) -> Self { + let keypair = ruma_signatures::Ed25519KeyPair::new( + &*globals + .update_and_fetch("keypair", utils::generate_keypair) + .unwrap() + .unwrap(), + "key1".to_owned(), + ) + .unwrap(); + + Self { + globals, + hostname, + keypair, + reqwest_client: reqwest::Client::new(), + } + } + + /// Returns the hostname of the server. + pub fn hostname(&self) -> &str { + &self.hostname + } + + /// Returns this server's keypair. + pub fn keypair(&self) -> &ruma_signatures::Ed25519KeyPair { + &self.keypair + } + + /// Returns a reqwest client which can be used to send requests. + pub fn reqwest_client(&self) -> &reqwest::Client { + &self.reqwest_client + } + + pub fn next_count(&self) -> Result { + Ok(utils::u64_from_bytes( + &self + .globals + .update_and_fetch(COUNTER, utils::increment)? + .expect("utils::increment will always put in a value"), + )) + } + + pub fn current_count(&self) -> Result { + Ok(self + .globals + .get(COUNTER)? + .map_or(0_u64, |bytes| utils::u64_from_bytes(&bytes))) + } +} diff --git a/src/database/rooms.rs b/src/database/rooms.rs new file mode 100644 index 0000000..f52888c --- /dev/null +++ b/src/database/rooms.rs @@ -0,0 +1,547 @@ +mod edus; + +pub use edus::RoomEdus; + +use crate::{utils, Error, PduEvent, Result}; +use ruma_events::{room::power_levels::PowerLevelsEventContent, EventJson, EventType}; +use ruma_identifiers::{EventId, RoomId, UserId}; +use serde_json::json; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + mem, +}; + +pub struct Rooms { + pub edus: edus::RoomEdus, + pub(super) pduid_pdu: sled::Tree, // PduId = RoomId + Count + pub(super) eventid_pduid: sled::Tree, + pub(super) roomid_pduleaves: sled::Tree, + pub(super) roomstateid_pdu: sled::Tree, // Room + StateType + StateKey + + pub(super) userroomid_joined: sled::Tree, + pub(super) roomuserid_joined: sled::Tree, + pub(super) userroomid_invited: sled::Tree, + pub(super) roomuserid_invited: sled::Tree, + pub(super) userroomid_left: sled::Tree, +} + +impl Rooms { + /// Checks if a room exists. + pub fn exists(&self, room_id: &RoomId) -> Result { + // Look for PDUs in that room. + + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + Ok(self + .pduid_pdu + .get_gt(&prefix)? + .filter(|(k, _)| k.starts_with(&prefix)) + .is_some()) + } + + // TODO: Remove and replace with public room dir + /// Returns a vector over all rooms. + pub fn all_rooms(&self) -> Vec { + let mut room_ids = self + .roomid_pduleaves + .iter() + .keys() + .map(|key| { + RoomId::try_from( + &*utils::string_from_bytes( + &key.unwrap() + .iter() + .copied() + .take_while(|&x| x != 0xff) // until delimiter + .collect::>(), + ) + .unwrap(), + ) + .unwrap() + }) + .collect::>(); + room_ids.dedup(); + room_ids + } + + /// Returns the full room state. + pub fn room_state(&self, room_id: &RoomId) -> Result> { + let mut hashmap = HashMap::new(); + for pdu in self + .roomstateid_pdu + .scan_prefix(&room_id.to_string().as_bytes()) + .values() + .map(|value| Ok::<_, Error>(serde_json::from_slice::(&value?)?)) + { + let pdu = pdu?; + hashmap.insert( + ( + pdu.kind.clone(), + pdu.state_key + .clone() + .expect("state events have a state key"), + ), + pdu, + ); + } + Ok(hashmap) + } + + /// Returns the `count` of this pdu's id. + pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { + Ok(self + .eventid_pduid + .get(event_id.to_string().as_bytes())? + .map(|pdu_id| { + utils::u64_from_bytes(&pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()]) + })) + } + + /// Returns the json of a pdu. + pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.to_string().as_bytes())? + .map_or(Ok(None), |pdu_id| { + Ok(serde_json::from_slice( + &self.pduid_pdu.get(pdu_id)?.ok_or(Error::BadDatabase( + "eventid_pduid points to nonexistent pdu", + ))?, + )?) + .map(Some) + }) + } + + /// Returns the leaf pdus of a room. + pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let mut events = Vec::new(); + + for event in self + .roomid_pduleaves + .scan_prefix(prefix) + .values() + .map(|bytes| Ok::<_, Error>(EventId::try_from(&*utils::string_from_bytes(&bytes?)?)?)) + { + events.push(event?); + } + + Ok(events) + } + + /// Replace the leaves of a room with a new event. + pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { + self.roomid_pduleaves.remove(key?)?; + } + + prefix.extend_from_slice(event_id.to_string().as_bytes()); + self.roomid_pduleaves + .insert(&prefix, &*event_id.to_string())?; + + Ok(()) + } + + /// Creates a new persisted data unit and adds it to a room. + pub fn append_pdu( + &self, + room_id: RoomId, + sender: UserId, + event_type: EventType, + content: serde_json::Value, + unsigned: Option>, + state_key: Option, + globals: &super::globals::Globals, + ) -> Result { + // Is the event authorized? + if state_key.is_some() { + if let Some(pdu) = self + .room_state(&room_id)? + .get(&(EventType::RoomPowerLevels, "".to_owned())) + { + let power_levels = serde_json::from_value::>( + pdu.content.clone(), + )? + .deserialize()?; + + match event_type { + EventType::RoomMember => { + // Member events are okay for now (TODO) + } + _ if power_levels + .users + .get(&sender) + .unwrap_or(&power_levels.users_default) + <= &0.into() => + { + // Not authorized + return Err(Error::BadRequest("event not authorized")); + } + // User has sufficient power + _ => {} + } + } + } + + // prev_events are the leaves of the current graph. This method removes all leaves from the + // room and replaces them with our event + // TODO: Make sure this isn't called twice in parallel + let prev_events = self.get_pdu_leaves(&room_id)?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .filter_map(|event_id| Some(self.get_pdu_json(event_id).ok()??.get("depth")?.as_u64()?)) + .max() + .unwrap_or(0_u64) + + 1; + + let mut unsigned = unsigned.unwrap_or_default(); + // TODO: Optimize this to not load the whole room state? + if let Some(state_key) = &state_key { + if let Some(prev_pdu) = self + .room_state(&room_id)? + .get(&(event_type.clone(), state_key.clone())) + { + unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); + } + } + + let mut pdu = PduEvent { + event_id: EventId::try_from("$thiswillbefilledinlater").expect("we know this is valid"), + room_id: room_id.clone(), + sender: sender.clone(), + origin: globals.hostname().to_owned(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("this only fails many years in the future"), + kind: event_type, + content, + state_key, + prev_events, + depth: depth + .try_into() + .expect("depth can overflow and should be deprecated..."), + auth_events: Vec::new(), + redacts: None, + unsigned, + hashes: ruma_federation_api::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: HashMap::new(), + }; + + // Generate event id + pdu.event_id = EventId::try_from(&*format!( + "${}", + ruma_signatures::reference_hash(&serde_json::to_value(&pdu)?) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are correct"); + + let mut pdu_json = serde_json::to_value(&pdu)?; + ruma_signatures::hash_and_sign_event(globals.hostname(), globals.keypair(), &mut pdu_json) + .expect("our new event can be hashed and signed"); + + self.replace_pdu_leaves(&room_id, &pdu.event_id)?; + + // Increment the last index and use that + // This is also the next_batch/since value + let index = globals.next_count()?; + + let mut pdu_id = room_id.to_string().as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&index.to_be_bytes()); + + self.pduid_pdu.insert(&pdu_id, &*pdu_json.to_string())?; + + self.eventid_pduid + .insert(pdu.event_id.to_string(), pdu_id.clone())?; + + if let Some(state_key) = pdu.state_key { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pdu.kind.to_string().as_bytes()); + key.push(0xff); + key.extend_from_slice(state_key.to_string().as_bytes()); + self.roomstateid_pdu.insert(key, &*pdu_json.to_string())?; + } + + self.edus.room_read_set(&room_id, &sender, index)?; + + Ok(pdu.event_id) + } + + /// Returns an iterator over all PDUs in a room. + pub fn all_pdus(&self, room_id: &RoomId) -> Result>> { + self.pdus_since(room_id, 0) + } + + /// Returns an iterator over all events in a room that happened after the event with id `since`. + pub fn pdus_since( + &self, + room_id: &RoomId, + since: u64, + ) -> Result>> { + // Create the first part of the full pdu id + let mut pdu_id = room_id.to_string().as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&(since).to_be_bytes()); + + self.pdus_since_pduid(room_id, &pdu_id) + } + + /// Returns an iterator over all events in a room that happened after the event with id `since`. + pub fn pdus_since_pduid( + &self, + room_id: &RoomId, + pdu_id: &[u8], + ) -> Result>> { + // Create the first part of the full pdu id + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + Ok(self + .pduid_pdu + .range(pdu_id..) + // Skip the first pdu if it's exactly at since, because we sent that last time + .skip(if self.pduid_pdu.get(pdu_id)?.is_some() { + 1 + } else { + 0 + }) + .filter_map(|r| r.ok()) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) + } + + /// Returns an iterator over all events in a room that happened before the event with id + /// `until` in reverse-chronological order. + pub fn pdus_until( + &self, + room_id: &RoomId, + until: u64, + ) -> impl Iterator> { + // Create the first part of the full pdu id + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let mut current = prefix.clone(); + current.extend_from_slice(&until.to_be_bytes()); + + let current: &[u8] = ¤t; + + self.pduid_pdu + .range(..current) + .rev() + .filter_map(|r| r.ok()) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(_, v)| Ok(serde_json::from_slice(&v)?)) + } + + /// Makes a user join a room. + pub fn join( + &self, + room_id: &RoomId, + user_id: &UserId, + displayname: Option, + globals: &super::globals::Globals, + ) -> Result<()> { + if !self.exists(room_id)? { + return Err(Error::BadRequest("room does not exist")); + } + + let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + + let mut roomuser_id = room_id.to_string().as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.to_string().as_bytes()); + + self.userroomid_joined.insert(&userroom_id, &[])?; + self.roomuserid_joined.insert(&roomuser_id, &[])?; + self.userroomid_invited.remove(&userroom_id)?; + self.roomuserid_invited.remove(&roomuser_id)?; + self.userroomid_left.remove(&userroom_id)?; + + let mut content = json!({"membership": "join"}); + if let Some(displayname) = displayname { + content + .as_object_mut() + .unwrap() + .insert("displayname".to_owned(), displayname.into()); + } + + self.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + content, + None, + Some(user_id.to_string()), + globals, + )?; + + Ok(()) + } + + /// Makes a user leave a room. + pub fn leave( + &self, + sender: &UserId, + room_id: &RoomId, + user_id: &UserId, + globals: &super::globals::Globals, + ) -> Result<()> { + let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + + let mut roomuser_id = room_id.to_string().as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.to_string().as_bytes()); + + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_invited.remove(&userroom_id)?; + self.roomuserid_invited.remove(&userroom_id)?; + self.userroomid_left.insert(&userroom_id, &[])?; + + self.append_pdu( + room_id.clone(), + sender.clone(), + EventType::RoomMember, + json!({"membership": "leave"}), + None, + Some(user_id.to_string()), + globals, + )?; + + Ok(()) + } + + /// Makes a user forget a room. + pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { + let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + + self.userroomid_left.remove(userroom_id)?; + + Ok(()) + } + + /// Makes a user invite another user into room. + pub fn invite( + &self, + sender: &UserId, + room_id: &RoomId, + user_id: &UserId, + globals: &super::globals::Globals, + ) -> Result<()> { + let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + + let mut roomuser_id = room_id.to_string().as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.to_string().as_bytes()); + + self.userroomid_invited.insert(userroom_id, &[])?; + self.roomuserid_invited.insert(roomuser_id, &[])?; + + self.append_pdu( + room_id.clone(), + sender.clone(), + EventType::RoomMember, + json!({"membership": "invite"}), + None, + Some(user_id.to_string()), + globals, + )?; + + Ok(()) + } + + /// Returns an iterator over all rooms a user joined. + pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { + self.roomuserid_joined + .scan_prefix(room_id.to_string()) + .values() + .map(|key| { + Ok(UserId::try_from(&*utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("userroomid is invalid"))?, + )?)?) + }) + } + + /// Returns an iterator over all rooms a user joined. + pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator> { + self.roomuserid_invited + .scan_prefix(room_id.to_string()) + .keys() + .map(|key| { + Ok(UserId::try_from(&*utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("userroomid is invalid"))?, + )?)?) + }) + } + + /// Returns an iterator over all rooms a user joined. + pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { + self.userroomid_joined + .scan_prefix(user_id.to_string()) + .keys() + .map(|key| { + Ok(RoomId::try_from(&*utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("userroomid is invalid"))?, + )?)?) + }) + } + + /// Returns an iterator over all rooms a user was invited to. + pub fn rooms_invited(&self, user_id: &UserId) -> impl Iterator> { + self.userroomid_invited + .scan_prefix(&user_id.to_string()) + .keys() + .map(|key| { + Ok(RoomId::try_from(&*utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("userroomid is invalid"))?, + )?)?) + }) + } + + /// Returns an iterator over all rooms a user left. + pub fn rooms_left(&self, user_id: &UserId) -> impl Iterator> { + self.userroomid_left + .scan_prefix(&user_id.to_string()) + .keys() + .map(|key| { + Ok(RoomId::try_from(&*utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("userroomid is invalid"))?, + )?)?) + }) + } +} diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs new file mode 100644 index 0000000..f2db5a4 --- /dev/null +++ b/src/database/rooms/edus.rs @@ -0,0 +1,190 @@ +use crate::{utils, Result}; +use ruma_events::{collections::only::Event as EduEvent, EventJson}; +use ruma_identifiers::{RoomId, UserId}; + +pub struct RoomEdus { + pub(in super::super) roomuserid_lastread: sled::Tree, // RoomUserId = Room + User + pub(in super::super) roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Count + UserId + pub(in super::super) roomactiveid_roomactive: sled::Tree, // Typing, RoomActiveId = RoomId + TimeoutTime + Count +} + +impl RoomEdus { + /// Adds an event which will be saved until a new event replaces it (e.g. read receipt). + pub fn roomlatest_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: EduEvent, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + // Remove old entry + if let Some(old) = self + .roomlatestid_roomlatest + .scan_prefix(&prefix) + .keys() + .rev() + .filter_map(|r| r.ok()) + .take_while(|key| key.starts_with(&prefix)) + .find(|key| { + key.rsplit(|&b| b == 0xff).next().unwrap() == user_id.to_string().as_bytes() + }) + { + // This is the old room_latest + self.roomlatestid_roomlatest.remove(old)?; + } + + let mut room_latest_id = prefix; + room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); + room_latest_id.push(0xff); + room_latest_id.extend_from_slice(&user_id.to_string().as_bytes()); + + self.roomlatestid_roomlatest + .insert(room_latest_id, &*serde_json::to_string(&event)?)?; + + Ok(()) + } + + /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + pub fn roomlatests_since( + &self, + room_id: &RoomId, + since: u64, + ) -> Result>>> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let mut first_possible_edu = prefix.clone(); + first_possible_edu.extend_from_slice(&since.to_be_bytes()); + + Ok(self + .roomlatestid_roomlatest + .range(&*first_possible_edu..) + // Skip the first pdu if it's exactly at since, because we sent that last time + .skip( + if self + .roomlatestid_roomlatest + .get(first_possible_edu)? + .is_some() + { + 1 + } else { + 0 + }, + ) + .filter_map(|r| r.ok()) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) + } + + /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. + pub fn roomlatests_all( + &self, + room_id: &RoomId, + ) -> Result>>> { + self.roomlatests_since(room_id, 0) + } + + /// Adds an event that will be saved until the `timeout` timestamp (e.g. typing notifications). + pub fn roomactive_add( + &self, + event: EduEvent, + room_id: &RoomId, + timeout: u64, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + // Cleanup all outdated edus before inserting a new one + for outdated_edu in self + .roomactiveid_roomactive + .scan_prefix(&prefix) + .keys() + .filter_map(|r| r.ok()) + .take_while(|k| { + utils::u64_from_bytes( + k.split(|&c| c == 0xff) + .nth(1) + .expect("roomactive has valid timestamp and delimiters"), + ) < utils::millis_since_unix_epoch() + }) + { + // This is an outdated edu (time > timestamp) + self.roomlatestid_roomlatest.remove(outdated_edu)?; + } + + let mut room_active_id = prefix; + room_active_id.extend_from_slice(&timeout.to_be_bytes()); + room_active_id.push(0xff); + room_active_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); + + self.roomactiveid_roomactive + .insert(room_active_id, &*serde_json::to_string(&event)?)?; + + Ok(()) + } + + /// Removes an active event manually (before the timeout is reached). + pub fn roomactive_remove(&self, event: EduEvent, room_id: &RoomId) -> Result<()> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let json = serde_json::to_string(&event)?; + + // Remove outdated entries + for outdated_edu in self + .roomactiveid_roomactive + .scan_prefix(&prefix) + .filter_map(|r| r.ok()) + .filter(|(_, v)| v == json.as_bytes()) + { + self.roomactiveid_roomactive.remove(outdated_edu.0)?; + } + + Ok(()) + } + + /// Returns an iterator over all active events (e.g. typing notifications). + pub fn roomactives_all( + &self, + room_id: &RoomId, + ) -> impl Iterator>> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let mut first_active_edu = prefix.clone(); + first_active_edu.extend_from_slice(&utils::millis_since_unix_epoch().to_be_bytes()); + + self.roomactiveid_roomactive + .range(first_active_edu..) + .filter_map(|r| r.ok()) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(_, v)| Ok(serde_json::from_slice(&v)?)) + } + + /// Sets a private read marker at `count`. + pub fn room_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&user_id.to_string().as_bytes()); + + self.roomuserid_lastread.insert(key, &count.to_be_bytes())?; + + Ok(()) + } + + /// Returns the private read marker. + pub fn room_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&user_id.to_string().as_bytes()); + + Ok(self + .roomuserid_lastread + .get(key)? + .map(|v| utils::u64_from_bytes(&v))) + } +} diff --git a/src/database/users.rs b/src/database/users.rs new file mode 100644 index 0000000..e3bf1d0 --- /dev/null +++ b/src/database/users.rs @@ -0,0 +1,144 @@ +use crate::{utils, Error, Result}; +use ruma_identifiers::UserId; +use std::convert::TryFrom; + +pub struct Users { + pub(super) userid_password: sled::Tree, + pub(super) userid_displayname: sled::Tree, + pub(super) userid_avatarurl: sled::Tree, + pub(super) userdeviceid: sled::Tree, + pub(super) userdeviceid_token: sled::Tree, + pub(super) token_userid: sled::Tree, +} + +impl Users { + /// Check if a user has an account on this homeserver. + pub fn exists(&self, user_id: &UserId) -> Result { + Ok(self.userid_password.contains_key(user_id.to_string())?) + } + + /// Create a new user account on this homeserver. + pub fn create(&self, user_id: &UserId, hash: &str) -> Result<()> { + self.userid_password.insert(user_id.to_string(), hash)?; + Ok(()) + } + + /// Find out which user an access token belongs to. + pub fn find_from_token(&self, token: &str) -> Result> { + self.token_userid.get(token)?.map_or(Ok(None), |bytes| { + utils::string_from_bytes(&bytes) + .and_then(|string| Ok(UserId::try_from(string)?)) + .map(Some) + }) + } + + /// Returns an iterator over all users on this homeserver. + pub fn iter(&self) -> impl Iterator> { + self.userid_password.iter().keys().map(|r| { + utils::string_from_bytes(&r?).and_then(|string| Ok(UserId::try_from(&*string)?)) + }) + } + + /// Returns the password hash for the given user. + pub fn password_hash(&self, user_id: &UserId) -> Result> { + self.userid_password + .get(user_id.to_string())? + .map_or(Ok(None), |bytes| utils::string_from_bytes(&bytes).map(Some)) + } + + /// Returns the displayname of a user on this homeserver. + pub fn displayname(&self, user_id: &UserId) -> Result> { + self.userid_displayname + .get(user_id.to_string())? + .map_or(Ok(None), |bytes| utils::string_from_bytes(&bytes).map(Some)) + } + + /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. + pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { + if let Some(displayname) = displayname { + self.userid_displayname + .insert(user_id.to_string(), &*displayname)?; + } else { + self.userid_displayname.remove(user_id.to_string())?; + } + + Ok(()) + /* TODO: + for room_id in self.rooms_joined(user_id) { + self.pdu_append( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + json!({"membership": "join", "displayname": displayname}), + None, + Some(user_id.to_string()), + ); + } + */ + } + + /// Get a the avatar_url of a user. + pub fn avatar_url(&self, user_id: &UserId) -> Result> { + self.userid_avatarurl + .get(user_id.to_string())? + .map_or(Ok(None), |bytes| utils::string_from_bytes(&bytes).map(Some)) + } + + /// Sets a new avatar_url or removes it if avatar_url is None. + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + if let Some(avatar_url) = avatar_url { + self.userid_avatarurl + .insert(user_id.to_string(), &*avatar_url)?; + } else { + self.userid_avatarurl.remove(user_id.to_string())?; + } + + Ok(()) + } + + /// Adds a new device to a user. + pub fn create_device(&self, user_id: &UserId, device_id: &str, token: &str) -> Result<()> { + if !self.exists(user_id)? { + return Err(Error::BadRequest( + "tried to create device for nonexistent user", + )); + } + + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + + self.userdeviceid.insert(key, &[])?; + + self.set_token(user_id, device_id, token)?; + + Ok(()) + } + + /// Replaces the access token of one device. + pub fn set_token(&self, user_id: &UserId, device_id: &str, token: &str) -> Result<()> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + + if self.userdeviceid.get(&key)?.is_none() { + return Err(Error::BadRequest( + "Tried to set token for nonexistent device", + )); + } + + // Remove old token + if let Some(old_token) = self.userdeviceid_token.get(&key)? { + self.token_userid.remove(old_token)?; + // It will be removed from userdeviceid_token by the insert later + } + + // Assign token to device_id + self.userdeviceid_token.insert(key, &*token)?; + + // Assign token to user + self.token_userid.insert(token, &*user_id.to_string())?; + + Ok(()) + } +} diff --git a/src/error.rs b/src/error.rs new file mode 100644 index 0000000..71fd918 --- /dev/null +++ b/src/error.rs @@ -0,0 +1,36 @@ +use thiserror::Error; + +pub type Result = std::result::Result; + +#[derive(Error, Debug)] +pub enum Error { + #[error("problem with the database")] + SledError { + #[from] + source: sled::Error, + }, + #[error("tried to parse invalid string")] + StringFromBytesError { + #[from] + source: std::string::FromUtf8Error, + }, + #[error("tried to parse invalid identifier")] + SerdeJsonError { + #[from] + source: serde_json::Error, + }, + #[error("tried to parse invalid identifier")] + RumaIdentifierError { + #[from] + source: ruma_identifiers::Error, + }, + #[error("tried to parse invalid event")] + RumaEventError { + #[from] + source: ruma_events::InvalidEvent, + }, + #[error("bad request")] + BadRequest(&'static str), + #[error("problem in that database")] + BadDatabase(&'static str), +} diff --git a/src/main.rs b/src/main.rs index db97599..3452423 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,8 +1,9 @@ #![feature(proc_macro_hygiene, decl_macro)] +#![warn(rust_2018_idioms)] mod client_server; -mod data; mod database; +mod error; mod pdu; mod ruma_wrapper; mod server_server; @@ -11,8 +12,8 @@ mod utils; #[cfg(test)] mod test; -pub use data::Data; pub use database::Database; +pub use error::{Error, Result}; pub use pdu::PduEvent; pub use ruma_wrapper::{MatrixResult, Ruma}; @@ -75,7 +76,7 @@ fn setup_rocket() -> rocket::Rocket { ) .attach(AdHoc::on_attach("Config", |rocket| { let hostname = rocket.config().get_str("hostname").unwrap_or("localhost"); - let data = Data::load_or_create(&hostname); + let data = Database::load_or_create(&hostname); Ok(rocket.manage(data)) })) @@ -86,7 +87,6 @@ fn main() { if let Err(_) = std::env::var("RUST_LOG") { std::env::set_var("RUST_LOG", "warn"); } - pretty_env_logger::init(); setup_rocket().launch().unwrap(); } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 753edea..7568573 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -27,21 +27,21 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { type Borrowed = Self::Owned; fn transform<'r>( - _req: &'r Request, + _req: &'r Request<'_>, data: Data, ) -> TransformFuture<'r, Self::Owned, Self::Error> { Box::pin(async move { Transform::Owned(Success(data)) }) } fn from_data( - request: &'a Request, + request: &'a Request<'_>, outcome: Transformed<'a, Self>, ) -> FromDataFuture<'a, Self, Self::Error> { Box::pin(async move { let data = rocket::try_outcome!(outcome.owned()); let user_id = if T::METADATA.requires_authentication { - let data = request.guard::>().await.unwrap(); + let db = request.guard::>().await.unwrap(); // Get token from header or query value let token = match request @@ -56,7 +56,7 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { }; // Check if token is valid - match data.user_from_token(&token) { + match db.users.find_from_token(&token).unwrap() { // TODO: M_UNKNOWN_TOKEN None => return Failure((Status::Unauthorized, ())), Some(user_id) => Some(user_id), diff --git a/src/server_server.rs b/src/server_server.rs index 394757a..bb43957 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,7 @@ -use crate::{Data, MatrixResult}; +use crate::{Database, MatrixResult}; use http::header::{HeaderValue, AUTHORIZATION}; use log::error; -use rocket::{get, post, put, response::content::Json, State}; +use rocket::{get, response::content::Json, State}; use ruma_api::Endpoint; use ruma_client_api::error::Error; use ruma_federation_api::{v1::get_server_version, v2::get_server_keys}; @@ -12,9 +12,9 @@ use std::{ time::{Duration, SystemTime}, }; -pub async fn request_well_known(data: &crate::Data, destination: &str) -> Option { +pub async fn request_well_known(db: &crate::Database, destination: &str) -> Option { let body: serde_json::Value = serde_json::from_str( - &data + &db.globals .reqwest_client() .get(&format!( "https://{}/.well-known/matrix/server", @@ -32,14 +32,14 @@ pub async fn request_well_known(data: &crate::Data, destination: &str) -> Option } pub async fn send_request( - data: &crate::Data, + db: &crate::Database, destination: String, request: T, ) -> Option { let mut http_request: http::Request<_> = request.try_into().unwrap(); let actual_destination = "https://".to_owned() - + &request_well_known(data, &destination) + + &request_well_known(db, &destination) .await .unwrap_or(destination.clone() + ":8448"); *http_request.uri_mut() = (actual_destination + T::METADATA.path).parse().unwrap(); @@ -55,11 +55,11 @@ pub async fn send_request( request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); request_map.insert("uri".to_owned(), T::METADATA.path.into()); - request_map.insert("origin".to_owned(), data.hostname().into()); + request_map.insert("origin".to_owned(), db.globals.hostname().into()); request_map.insert("destination".to_owned(), destination.into()); let mut request_json = request_map.into(); - ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut request_json).unwrap(); + ruma_signatures::sign_json(db.globals.hostname(), db.globals.keypair(), &mut request_json).unwrap(); let signatures = request_json["signatures"] .as_object() @@ -77,7 +77,7 @@ pub async fn send_request( AUTHORIZATION, HeaderValue::from_str(&format!( "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - data.hostname(), + db.globals.hostname(), s.0, s.1 )) @@ -85,7 +85,7 @@ pub async fn send_request( ); } - let reqwest_response = data.reqwest_client().execute(http_request.into()).await; + let reqwest_response = db.globals.reqwest_client().execute(http_request.into()).await; // Because reqwest::Response -> http::Response is complicated: match reqwest_response { @@ -120,7 +120,7 @@ pub async fn send_request( } #[get("/.well-known/matrix/server")] -pub fn well_known_server(data: State) -> Json { +pub fn well_known_server() -> Json { rocket::response::content::Json( json!({ "m.server": "matrixtesting.koesters.xyz:14004"}).to_string(), ) @@ -137,17 +137,17 @@ pub fn get_server_version() -> MatrixResult } #[get("/_matrix/key/v2/server")] -pub fn get_server_keys(data: State) -> Json { +pub fn get_server_keys(db: State<'_, Database>) -> Json { let mut verify_keys = BTreeMap::new(); verify_keys.insert( - format!("ed25519:{}", data.keypair().version()), + format!("ed25519:{}", db.globals.keypair().version()), get_server_keys::VerifyKey { - key: base64::encode_config(data.keypair().public_key(), base64::STANDARD_NO_PAD), + key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), }, ); let mut response = serde_json::from_slice( http::Response::try_from(get_server_keys::Response { - server_name: data.hostname().to_owned(), + server_name: db.globals.hostname().to_owned(), verify_keys, old_verify_keys: BTreeMap::new(), signatures: BTreeMap::new(), @@ -157,11 +157,11 @@ pub fn get_server_keys(data: State) -> Json { .body(), ) .unwrap(); - ruma_signatures::sign_json(data.hostname(), data.keypair(), &mut response).unwrap(); + ruma_signatures::sign_json(db.globals.hostname(), db.globals.keypair(), &mut response).unwrap(); Json(response.to_string()) } #[get("/_matrix/key/v2/server/<_key_id>")] -pub fn get_server_keys_deprecated(data: State, _key_id: String) -> Json { - get_server_keys(data) +pub fn get_server_keys_deprecated(db: State<'_, Database>, _key_id: String) -> Json { + get_server_keys(db) } diff --git a/src/test.rs b/src/test.rs index 9f56214..1df3a9d 100644 --- a/src/test.rs +++ b/src/test.rs @@ -1,8 +1,6 @@ use super::*; -use rocket::{http::Status, local::Client}; -use ruma_client_api::error::ErrorKind; +use rocket::local::Client; use serde_json::{json, Value}; -use std::time::Duration; fn setup_client() -> Client { Database::try_remove("localhost"); diff --git a/src/utils.rs b/src/utils.rs index a360036..45b9b08 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,3 +1,4 @@ +use crate::Result; use argon2::{Config, Variant}; use rand::prelude::*; use std::{ @@ -32,13 +33,15 @@ pub fn generate_keypair(old: Option<&[u8]>) -> Option> { ) } +/// Parses the bytes into an u64. pub fn u64_from_bytes(bytes: &[u8]) -> u64 { let array: [u8; 8] = bytes.try_into().expect("bytes are valid u64"); u64::from_be_bytes(array) } -pub fn string_from_bytes(bytes: &[u8]) -> String { - String::from_utf8(bytes.to_vec()).expect("bytes are valid utf8") +/// Parses the bytes into a string. +pub fn string_from_bytes(bytes: &[u8]) -> Result { + Ok(String::from_utf8(bytes.to_vec())?) } pub fn random_string(length: usize) -> String { @@ -49,7 +52,7 @@ pub fn random_string(length: usize) -> String { } /// Calculate a new hash for the given password -pub fn calculate_hash(password: &str) -> Result { +pub fn calculate_hash(password: &str) -> std::result::Result { let hashing_config = Config { variant: Variant::Argon2id, ..Default::default() From 551308e9a80b249fdc8d91a6df9d2c7d60040640 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 8 May 2020 21:13:52 +0200 Subject: [PATCH 0074/1727] Update dependencies and send displayname updates again --- Cargo.lock | 154 +++++++++++++++++++++++------------------- Cargo.toml | 12 ++-- src/client_server.rs | 40 +++++++---- src/database.rs | 6 +- src/database/rooms.rs | 5 +- src/database/users.rs | 18 +---- 6 files changed, 123 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 642e805..1f1dc94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,9 +24,9 @@ version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da71fef07bc806586090247e971229289f64c210a278ee5ae419314eb386b31d" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -69,9 +69,9 @@ checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" [[package]] name = "base64" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5ca2cd0adc3f48f9e9ea5a6bbdf9ccc0bfade884847e484d452414c7ccffb3" +checksum = "53d1ccbaf7d9ec9537465a97bf19edc1a4e158ecb49fc16178202238c569cc42" [[package]] name = "bitflags" @@ -133,7 +133,7 @@ dependencies = [ name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.12.0", + "base64 0.12.1", "directories", "http", "js_int", @@ -143,6 +143,7 @@ dependencies = [ "rocket", "ruma-api", "ruma-client-api", + "ruma-common", "ruma-events", "ruma-federation-api", "ruma-identifiers", @@ -247,9 +248,9 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "bitflags", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -391,9 +392,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -449,9 +450,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377038bf3c89d18d6ca1431e7a5027194fbd724ca10592b9487ede5e8e144f42" +checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" dependencies = [ "bytes", "fnv", @@ -705,9 +706,9 @@ dependencies = [ [[package]] name = "mio-uds" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" +checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" dependencies = [ "iovec", "libc", @@ -746,9 +747,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" +checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" dependencies = [ "cfg-if", "libc", @@ -793,9 +794,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-sys" -version = "0.9.55" +version = "0.9.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7717097d810a0f2e2323f9e5d11e71608355e24828410b55b9d4f18aa5f9a5d8" +checksum = "f02309a7f127000ed50594f0b50ecc69e7c654e16d41b4e8156d1b3df8e0b52e" dependencies = [ "autocfg", "cc", @@ -864,29 +865,29 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f6a7f5eee6292c559c793430c55c00aea9d3b3d1905e855806ca4d7253426a2" +checksum = "82c3bfbfb5bb42f99498c7234bbd768c220eb0cea6818259d0d18a1aa3d2595d" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8988430ce790d8682672117bc06dda364c0be32d3abd738234f19f3240bad99a" +checksum = "ccbf6449dcfb18562c015526b085b8df1aa3cdab180af8ec2ebd300a3bd28f63" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] name = "pin-project-lite" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" +checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" [[package]] name = "pin-utils" @@ -929,9 +930,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" +checksum = "8872cf6f48eee44265156c111456a700ab3483686b3f96df4cf5481c89157319" dependencies = [ "unicode-xid 0.2.0", ] @@ -951,7 +952,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c1f4b0efa5fc5e8ceb705136bfee52cfdb6a4e3509f770b478cd6ed434232a7" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", ] [[package]] @@ -1132,55 +1133,68 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.16.0-rc.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c296a951625ccc8c04d5188f1791d1628503c8614073a05833af9fed18b029c1" +checksum = "f4d1f23ec408993a39acb852a311a5469422b0c087a6497efeb0ad9c04ad72db" dependencies = [ "http", "percent-encoding 2.1.0", "ruma-api-macros", "ruma-identifiers", + "ruma-serde", "serde", "serde_json", - "serde_urlencoded", "strum", ] [[package]] name = "ruma-api-macros" -version = "0.16.0-rc.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6b02a6a860a96e3c2081c8aea88b37b2918b53e539856b73aadde1908b65ad" +checksum = "8ae19a3485b607be10c07f0e6a1c672e8cc2693a50a29f25195f7034dbe7859c" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] name = "ruma-client-api" -version = "0.8.0-rc.5" -source = "git+https://github.com/ruma/ruma-client-api.git#06f83742506e06d6d2731667eb9fa081654455cf" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84a57433fc6dded259aef2b12ceb91d78d3607b182278f648edd5c19c23d81cd" dependencies = [ "http", "js_int", "ruma-api", + "ruma-common", "ruma-events", "ruma-identifiers", "ruma-serde", "serde", "serde_json", "strum", - "url", +] + +[[package]] +name = "ruma-common" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "235f2fed35114ef3fbff1b7d5350f22ac712cffff55cc7b7732d39ae4adf6966" +dependencies = [ + "ruma-serde", + "serde", + "serde_json", ] [[package]] name = "ruma-events" -version = "0.21.0-beta.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4802476bbe517f2ac6cb7b1cf4869d54586c10e86e2ddc00806cafa32a96553" +checksum = "ca4eff279e18bdb3daba46381eb08ac5b2d4ab471271b8c2f597073dcbadc415" dependencies = [ "js_int", + "ruma-common", "ruma-events-macros", "ruma-identifiers", "ruma-serde", @@ -1190,13 +1204,13 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.21.0-beta.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abd3cfe96c9887fe2eebfa2e5e7d3a4afff02c374874d4e718f46dab5fd3320d" +checksum = "2e32130570495f21f922ffa0127438228d475e5f8dfcc201d77b8e717ce1c0f0" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -1215,19 +1229,18 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee548c5dbb5a92a93bb435fd3b66484cd19b0f37450c9b93677338cbe9550d2" +checksum = "77c93b9d5f951a2fb57b19c048a05ac1dbdb280ff7617ec6b02f54bf14318ed8" dependencies = [ - "rand", "serde", ] [[package]] name = "ruma-serde" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce9a52acce7ed3809e1b47d9cc67ee93972a2b0fedaaa76d6e794456a79858b" +checksum = "e14edc0e2f5177c419e3b89060b1e94fb3af81b2f253783ac6967f14a7ec3911" dependencies = [ "dtoa", "itoa", @@ -1242,7 +1255,7 @@ name = "ruma-signatures" version = "0.6.0-dev.1" source = "git+https://github.com/ruma/ruma-signatures.git#1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" dependencies = [ - "base64 0.12.0", + "base64 0.12.1", "ring", "serde_json", "untrusted", @@ -1266,7 +1279,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" dependencies = [ - "base64 0.12.0", + "base64 0.12.1", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1319,9 +1332,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f331b9025654145cd425b9ded0caf8f5ae0df80d418b326e2dc1c3dc5eb0620" +checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" dependencies = [ "bitflags", "core-foundation", @@ -1355,9 +1368,9 @@ version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -1449,9 +1462,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -1467,11 +1480,11 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410a7488c0a728c7ceb4ad59b9567eb4053d02e8cc7f5c0e0eeeb39518369213" +checksum = "e8e5aa70697bb26ee62214ae3288465ecec0000f05182f039b477001f08f5ae7" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", "unicode-xid 0.2.0", ] @@ -1505,9 +1518,9 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f34e0c1caaa462fd840ec6b768946ea1e7842620d94fe29d5b847138f521269" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -1549,9 +1562,9 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -1568,9 +1581,9 @@ dependencies = [ [[package]] name = "tokio-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bde02a3a5291395f59b06ec6945a3077602fac2b07eeeaf0dee2122f3619828" +checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", "tokio", @@ -1671,7 +1684,6 @@ dependencies = [ "idna", "matches", "percent-encoding 2.1.0", - "serde", ] [[package]] @@ -1729,9 +1741,9 @@ dependencies = [ "bumpalo", "lazy_static", "log", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", "wasm-bindgen-shared", ] @@ -1763,9 +1775,9 @@ version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.4", - "syn 1.0.18", + "syn 1.0.19", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index 4aa2f15..217f58e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ description = "A Matrix homeserver written in Rust" license = "AGPL-3.0" authors = ["timokoesters "] homepage = "https://conduit.rs" +repository = "https://git.koesters.xyz/timo/conduit" readme = "README.md" version = "0.1.0" edition = "2018" @@ -13,10 +14,10 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" -ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } -ruma-identifiers = { version = "0.16.0", features = ["rand"] } -ruma-api = "0.16.0-rc.3" -ruma-events = "0.21.0-beta.1" +ruma-client-api = "0.8.0" +ruma-identifiers = "0.16.1" +ruma-api = "0.16.0" +ruma-events = "0.21.0" ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git" } log = "0.4.8" @@ -29,5 +30,6 @@ tokio = { version = "0.2.20", features = ["macros"] } rand = "0.7.3" rust-argon2 = "0.8.2" reqwest = "0.10.4" -base64 = "0.12.0" +base64 = "0.12.1" thiserror = "1.0.16" +ruma-common = "0.1.1" diff --git a/src/client_server.rs b/src/client_server.rs index 3d63ffd..09164db 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -12,7 +12,7 @@ use ruma_client_api::{ account::{get_username_availability, register}, alias::get_alias, capabilities::get_capabilities, - client_exchange::send_event_to_device, + to_device::send_event_to_device, config::{get_global_account_data, set_global_account_data}, directory::{self, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, @@ -42,7 +42,7 @@ use ruma_client_api::{ }; use ruma_events::{collections::only::Event as EduEvent, EventType}; use ruma_identifiers::{RoomId, UserId}; -use serde_json::json; +use serde_json::{json, value::RawValue}; use crate::{server_server, utils, Database, MatrixResult, Ruma}; @@ -105,7 +105,7 @@ pub fn register_route( stages: vec!["m.login.dummy".to_owned()], }], completed: vec![], - params: json!({}), + params: RawValue::from_string("".to_owned()).unwrap(), session: Some(utils::random_string(SESSION_ID_LENGTH)), auth_error: None, }))); @@ -185,7 +185,7 @@ pub fn register_route( actions: vec![ ruma_events::push_rules::Action::Notify, ruma_events::push_rules::Action::SetTweak( - ruma_events::push_rules::Tweak::Highlight { value: false }, + ruma_common::push::Tweak::Highlight(false), ), ], default: true, @@ -318,10 +318,7 @@ pub fn get_pushrules_all_route() -> MatrixResult { vec![push::PushRule { actions: vec![ push::Action::Notify, - push::Action::SetTweak { - kind: push::TweakKind::Highlight, - value: Some(false.into()), - }, + push::Action::SetTweak(ruma_common::push::Tweak::Highlight(false)) ], default: true, enabled: true, @@ -363,7 +360,7 @@ pub fn set_pushrule_route( actions: vec![ ruma_events::push_rules::Action::Notify, ruma_events::push_rules::Action::SetTweak( - ruma_events::push_rules::Tweak::Highlight { value: false }, + ruma_common::push::Tweak::Highlight(false), ), ], default: true, @@ -467,8 +464,21 @@ pub fn set_displayname_route( .users .set_displayname(&user_id, Some(displayname.clone())) .unwrap(); - // TODO: send a new m.presence event with the updated displayname } + + // Send a new membership event into all joined rooms + for room_id in db.rooms.rooms_joined(&user_id) { + db.rooms.append_pdu( + room_id.unwrap(), + user_id.clone(), + EventType::RoomMember, + json!({"membership": "join", "displayname": displayname}), + None, + Some(user_id.to_string()), + &db.globals + ).unwrap(); + } + // TODO: send a new m.presence event } else { // Send error on None // Synapse returns a parsing error but the spec doesn't require this @@ -734,7 +744,7 @@ pub fn create_room_route( body: Ruma, ) -> MatrixResult { // TODO: check if room is unique - let room_id = RoomId::new(db.globals.hostname()).expect("host is valid"); + let room_id = RoomId::try_from(db.globals.hostname()).expect("host is valid"); let user_id = body.user_id.clone().expect("user is authenticated"); db @@ -1104,7 +1114,7 @@ pub fn create_message_event_route( ) .expect("message events are always okay"); - MatrixResult(Ok(create_message_event::Response { event_id })) + MatrixResult(Ok(create_message_event::Response { event_id: Some(event_id) })) } #[put( @@ -1134,7 +1144,7 @@ pub fn create_state_event_for_key_route( ) .unwrap(); - MatrixResult(Ok(create_state_event_for_key::Response { event_id })) + MatrixResult(Ok(create_state_event_for_key::Response { event_id: Some(event_id) })) } #[put( @@ -1163,7 +1173,7 @@ pub fn create_state_event_for_empty_key_route( ) .unwrap(); - MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) + MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id: Some(event_id) })) } #[get("/_matrix/client/r0/sync", data = "")] @@ -1171,7 +1181,7 @@ pub fn sync_route( db: State<'_, Database>, body: Ruma, ) -> MatrixResult { - std::thread::sleep(Duration::from_millis(100)); + std::thread::sleep(Duration::from_millis(1500)); let user_id = body.user_id.clone().expect("user is authenticated"); let next_batch = db.globals.current_count().unwrap().to_string(); diff --git a/src/database.rs b/src/database.rs index 47f0a56..76109c9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -40,7 +40,7 @@ impl Database { globals: globals::Globals::load(db.open_tree("global").unwrap(), hostname.to_owned()), users: users::Users { userid_password: db.open_tree("userid_password").unwrap(), - userdeviceid: db.open_tree("userdeviceid").unwrap(), + userdeviceids: db.open_tree("userdeviceids").unwrap(), userid_displayname: db.open_tree("userid_displayname").unwrap(), userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), userdeviceid_token: db.open_tree("userdeviceid_token").unwrap(), @@ -49,8 +49,8 @@ impl Database { rooms: rooms::Rooms { edus: rooms::RoomEdus { roomuserid_lastread: db.open_tree("roomuserid_lastread").unwrap(), - roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), - roomactiveid_roomactive: db.open_tree("roomactiveid_roomactive").unwrap(), + roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), // Read receipts + roomactiveid_roomactive: db.open_tree("roomactiveid_roomactive").unwrap(), // Typing notifs }, pduid_pdu: db.open_tree("pduid_pdu").unwrap(), eventid_pduid: db.open_tree("eventid_pduid").unwrap(), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f52888c..1130682 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -17,7 +17,7 @@ pub struct Rooms { pub(super) pduid_pdu: sled::Tree, // PduId = RoomId + Count pub(super) eventid_pduid: sled::Tree, pub(super) roomid_pduleaves: sled::Tree, - pub(super) roomstateid_pdu: sled::Tree, // Room + StateType + StateKey + pub(super) roomstateid_pdu: sled::Tree, // RoomStateId = Room + StateType + StateKey pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, @@ -29,11 +29,10 @@ pub struct Rooms { impl Rooms { /// Checks if a room exists. pub fn exists(&self, room_id: &RoomId) -> Result { - // Look for PDUs in that room. - let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); + // Look for PDUs in that room. Ok(self .pduid_pdu .get_gt(&prefix)? diff --git a/src/database/users.rs b/src/database/users.rs index e3bf1d0..529da91 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -6,7 +6,7 @@ pub struct Users { pub(super) userid_password: sled::Tree, pub(super) userid_displayname: sled::Tree, pub(super) userid_avatarurl: sled::Tree, - pub(super) userdeviceid: sled::Tree, + pub(super) userdeviceids: sled::Tree, pub(super) userdeviceid_token: sled::Tree, pub(super) token_userid: sled::Tree, } @@ -63,18 +63,6 @@ impl Users { } Ok(()) - /* TODO: - for room_id in self.rooms_joined(user_id) { - self.pdu_append( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - json!({"membership": "join", "displayname": displayname}), - None, - Some(user_id.to_string()), - ); - } - */ } /// Get a the avatar_url of a user. @@ -108,7 +96,7 @@ impl Users { key.push(0xff); key.extend_from_slice(device_id.as_bytes()); - self.userdeviceid.insert(key, &[])?; + self.userdeviceids.insert(key, &[])?; self.set_token(user_id, device_id, token)?; @@ -121,7 +109,7 @@ impl Users { key.push(0xff); key.extend_from_slice(device_id.as_bytes()); - if self.userdeviceid.get(&key)?.is_none() { + if self.userdeviceids.get(&key)?.is_none() { return Err(Error::BadRequest( "Tried to set token for nonexistent device", )); From ee0d6940bdf1b0f47008ab3522fb432480d9f4b4 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 9 May 2020 21:47:09 +0200 Subject: [PATCH 0075/1727] feat: presence updates --- src/client_server.rs | 387 +++++++++++++++++++----------------- src/database.rs | 10 +- src/database/global_edus.rs | 68 +++++++ src/database/rooms/edus.rs | 8 - src/server_server.rs | 13 +- 5 files changed, 293 insertions(+), 193 deletions(-) create mode 100644 src/database/global_edus.rs diff --git a/src/client_server.rs b/src/client_server.rs index 09164db..d740ab5 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -12,7 +12,6 @@ use ruma_client_api::{ account::{get_username_availability, register}, alias::get_alias, capabilities::get_capabilities, - to_device::send_event_to_device, config::{get_global_account_data, set_global_account_data}, directory::{self, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, @@ -34,13 +33,14 @@ use ruma_client_api::{ state::{create_state_event_for_empty_key, create_state_event_for_key}, sync::sync_events, thirdparty::get_protocols, + to_device::send_event_to_device, typing::create_typing_event, uiaa::{AuthFlow, UiaaInfo, UiaaResponse}, user_directory::search_users, }, unversioned::get_supported_versions, }; -use ruma_events::{collections::only::Event as EduEvent, EventType}; +use ruma_events::{collections::only::Event as EduEvent, EventJson, EventType}; use ruma_identifiers::{RoomId, UserId}; use serde_json::{json, value::RawValue}; @@ -165,45 +165,45 @@ pub fn register_route( let token = utils::random_string(TOKEN_LENGTH); // Add device - db - .users + db.users .create_device(&user_id, &device_id, &token) .unwrap(); // Initial data - db.account_data.update( - None, - &user_id, - EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { - content: ruma_events::push_rules::PushRulesEventContent { - global: ruma_events::push_rules::Ruleset { - content: vec![], - override_rules: vec![], - room: vec![], - sender: vec![], - underride: vec![ruma_events::push_rules::ConditionalPushRule { - actions: vec![ - ruma_events::push_rules::Action::Notify, - ruma_events::push_rules::Action::SetTweak( - ruma_common::push::Tweak::Highlight(false), - ), - ], - default: true, - enabled: true, - rule_id: ".m.rule.message".to_owned(), - conditions: vec![ruma_events::push_rules::PushCondition::EventMatch( - ruma_events::push_rules::EventMatchCondition { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }, - )], - }], + db.account_data + .update( + None, + &user_id, + EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { + content: ruma_events::push_rules::PushRulesEventContent { + global: ruma_events::push_rules::Ruleset { + content: vec![], + override_rules: vec![], + room: vec![], + sender: vec![], + underride: vec![ruma_events::push_rules::ConditionalPushRule { + actions: vec![ + ruma_events::push_rules::Action::Notify, + ruma_events::push_rules::Action::SetTweak( + ruma_common::push::Tweak::Highlight(false), + ), + ], + default: true, + enabled: true, + rule_id: ".m.rule.message".to_owned(), + conditions: vec![ruma_events::push_rules::PushCondition::EventMatch( + ruma_events::push_rules::EventMatchCondition { + key: "type".to_owned(), + pattern: "m.room.message".to_owned(), + }, + )], + }], + }, }, - }, - }), - &db.globals, - ) - .unwrap(); + }), + &db.globals, + ) + .unwrap(); MatrixResult(Ok(register::Response { access_token: Some(token), @@ -220,7 +220,10 @@ pub fn get_login_route() -> MatrixResult { } #[post("/_matrix/client/r0/login", data = "")] -pub fn login_route(db: State<'_, Database>, body: Ruma) -> MatrixResult { +pub fn login_route( + db: State<'_, Database>, + body: Ruma, +) -> MatrixResult { // Validate login method let user_id = if let (login::UserInfo::MatrixId(mut username), login::LoginInfo::Password { password }) = @@ -280,8 +283,7 @@ pub fn login_route(db: State<'_, Database>, body: Ruma) -> Matri let token = utils::random_string(TOKEN_LENGTH); // Add device - db - .users + db.users .create_device(&user_id, &device_id, &token) .unwrap(); @@ -318,7 +320,7 @@ pub fn get_pushrules_all_route() -> MatrixResult { vec![push::PushRule { actions: vec![ push::Action::Notify, - push::Action::SetTweak(ruma_common::push::Tweak::Highlight(false)) + push::Action::SetTweak(ruma_common::push::Tweak::Highlight(false)), ], default: true, enabled: true, @@ -346,39 +348,40 @@ pub fn set_pushrule_route( ) -> MatrixResult { // TODO let user_id = body.user_id.clone().expect("user is authenticated"); - db.account_data.update( - None, - &user_id, - EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { - content: ruma_events::push_rules::PushRulesEventContent { - global: ruma_events::push_rules::Ruleset { - content: vec![], - override_rules: vec![], - room: vec![], - sender: vec![], - underride: vec![ruma_events::push_rules::ConditionalPushRule { - actions: vec![ - ruma_events::push_rules::Action::Notify, - ruma_events::push_rules::Action::SetTweak( - ruma_common::push::Tweak::Highlight(false), - ), - ], - default: true, - enabled: true, - rule_id: ".m.rule.message".to_owned(), - conditions: vec![ruma_events::push_rules::PushCondition::EventMatch( - ruma_events::push_rules::EventMatchCondition { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }, - )], - }], + db.account_data + .update( + None, + &user_id, + EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { + content: ruma_events::push_rules::PushRulesEventContent { + global: ruma_events::push_rules::Ruleset { + content: vec![], + override_rules: vec![], + room: vec![], + sender: vec![], + underride: vec![ruma_events::push_rules::ConditionalPushRule { + actions: vec![ + ruma_events::push_rules::Action::Notify, + ruma_events::push_rules::Action::SetTweak( + ruma_common::push::Tweak::Highlight(false), + ), + ], + default: true, + enabled: true, + rule_id: ".m.rule.message".to_owned(), + conditions: vec![ruma_events::push_rules::PushCondition::EventMatch( + ruma_events::push_rules::EventMatchCondition { + key: "type".to_owned(), + pattern: "m.room.message".to_owned(), + }, + )], + }], + }, }, - }, - }), - &db.globals - ) - .unwrap(); + }), + &db.globals, + ) + .unwrap(); MatrixResult(Ok(set_pushrule::Response)) } @@ -393,9 +396,7 @@ pub fn set_pushrule_enabled_route( MatrixResult(Ok(set_pushrule_enabled::Response)) } -#[get( - "/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>", -)] +#[get("/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>")] pub fn get_filter_route( _user_id: String, _filter_id: String, @@ -413,18 +414,14 @@ pub fn get_filter_route( } #[post("/_matrix/client/r0/user/<_user_id>/filter")] -pub fn create_filter_route( - _user_id: String, -) -> MatrixResult { +pub fn create_filter_route(_user_id: String) -> MatrixResult { // TODO MatrixResult(Ok(create_filter::Response { filter_id: utils::random_string(10), })) } -#[put( - "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", -)] +#[put("/_matrix/client/r0/user/<_user_id>/account_data/<_type>")] pub fn set_global_account_data_route( _user_id: String, _type: String, @@ -432,9 +429,7 @@ pub fn set_global_account_data_route( MatrixResult(Ok(set_global_account_data::Response)) } -#[get( - "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", -)] +#[get("/_matrix/client/r0/user/<_user_id>/account_data/<_type>")] pub fn get_global_account_data_route( _user_id: String, _type: String, @@ -460,25 +455,44 @@ pub fn set_displayname_route( if displayname == "" { db.users.set_displayname(&user_id, None).unwrap(); } else { - db - .users + db.users .set_displayname(&user_id, Some(displayname.clone())) .unwrap(); } // Send a new membership event into all joined rooms for room_id in db.rooms.rooms_joined(&user_id) { - db.rooms.append_pdu( - room_id.unwrap(), - user_id.clone(), - EventType::RoomMember, - json!({"membership": "join", "displayname": displayname}), - None, - Some(user_id.to_string()), - &db.globals - ).unwrap(); + db.rooms + .append_pdu( + room_id.unwrap(), + user_id.clone(), + EventType::RoomMember, + json!({"membership": "join", "displayname": displayname}), + None, + Some(user_id.to_string()), + &db.globals, + ) + .unwrap(); } - // TODO: send a new m.presence event + + // Presence update + db.global_edus + .update_globallatest( + &user_id, + EduEvent::Presence(ruma_events::presence::PresenceEvent { + content: ruma_events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&user_id).unwrap(), + currently_active: None, + displayname: db.users.displayname(&user_id).unwrap(), + last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), + presence: ruma_events::presence::PresenceState::Online, + status_msg: None, + }, + sender: user_id.clone(), + }), + &db.globals, + ) + .unwrap(); } else { // Send error on None // Synapse returns a parsing error but the spec doesn't require this @@ -542,8 +556,7 @@ pub fn set_avatar_url_route( if body.avatar_url == "" { db.users.set_avatar_url(&user_id, None).unwrap(); } else { - db - .users + db.users .set_avatar_url(&user_id, Some(body.avatar_url.clone())) .unwrap(); // TODO send a new m.room.member join event with the updated avatar_url @@ -605,11 +618,32 @@ pub fn get_profile_route( })) } -#[put("/_matrix/client/r0/presence/<_user_id>/status")] +#[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] pub fn set_presence_route( + db: State<'_, Database>, + body: Ruma, _user_id: String, ) -> MatrixResult { - // TODO + let user_id = body.user_id.clone().expect("user is authenticated"); + + db.global_edus + .update_globallatest( + &user_id, + EduEvent::Presence(ruma_events::presence::PresenceEvent { + content: ruma_events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&user_id).unwrap(), + currently_active: None, + displayname: db.users.displayname(&user_id).unwrap(), + last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), + presence: body.presence, + status_msg: body.status_msg.clone(), + }, + sender: user_id.clone(), + }), + &db.globals, + ) + .unwrap(); + MatrixResult(Ok(set_presence::Response)) } @@ -637,28 +671,27 @@ pub fn set_read_marker_route( _room_id: String, ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); - db.account_data.update( - Some(&body.room_id), - &user_id, - EduEvent::FullyRead(ruma_events::fully_read::FullyReadEvent { - content: ruma_events::fully_read::FullyReadEventContent { - event_id: body.fully_read.clone(), - }, - room_id: Some(body.room_id.clone()), - }), - &db.globals - ) - .unwrap(); + db.account_data + .update( + Some(&body.room_id), + &user_id, + EduEvent::FullyRead(ruma_events::fully_read::FullyReadEvent { + content: ruma_events::fully_read::FullyReadEventContent { + event_id: body.fully_read.clone(), + }, + room_id: Some(body.room_id.clone()), + }), + &db.globals, + ) + .unwrap(); if let Some(event) = &body.read_receipt { - db - .rooms + db.rooms .edus .room_read_set( &body.room_id, &user_id, - db - .rooms + db.rooms .get_pdu_count(event) .unwrap() .expect("TODO: what if a client specifies an invalid event"), @@ -680,8 +713,7 @@ pub fn set_read_marker_route( }, ); - db - .rooms + db.rooms .edus .roomlatest_update( &user_id, @@ -716,8 +748,7 @@ pub fn create_typing_event_route( }); if body.typing { - db - .rooms + db.rooms .edus .roomactive_add( edu, @@ -728,11 +759,7 @@ pub fn create_typing_event_route( ) .unwrap(); } else { - db - .rooms - .edus - .roomactive_remove(edu, &body.room_id) - .unwrap(); + db.rooms.edus.roomactive_remove(edu, &body.room_id).unwrap(); } MatrixResult(Ok(create_typing_event::Response)) @@ -747,8 +774,7 @@ pub fn create_room_route( let room_id = RoomId::try_from(db.globals.hostname()).expect("host is valid"); let user_id = body.user_id.clone().expect("user is authenticated"); - db - .rooms + db.rooms .append_pdu( room_id.clone(), user_id.clone(), @@ -760,8 +786,7 @@ pub fn create_room_route( ) .unwrap(); - db - .rooms + db.rooms .join( &room_id, &user_id, @@ -770,8 +795,7 @@ pub fn create_room_route( ) .unwrap(); - db - .rooms + db.rooms .append_pdu( room_id.clone(), user_id.clone(), @@ -793,8 +817,7 @@ pub fn create_room_route( .unwrap(); if let Some(name) = &body.name { - db - .rooms + db.rooms .append_pdu( room_id.clone(), user_id.clone(), @@ -808,8 +831,7 @@ pub fn create_room_route( } if let Some(topic) = &body.topic { - db - .rooms + db.rooms .append_pdu( room_id.clone(), user_id.clone(), @@ -823,8 +845,7 @@ pub fn create_room_route( } for user in &body.invite { - db - .rooms + db.rooms .invite(&user_id, &room_id, user, &db.globals) .unwrap(); } @@ -945,8 +966,7 @@ pub fn leave_room_route( _room_id: String, ) -> MatrixResult { let user_id = body.user_id.clone().expect("user is authenticated"); - db - .rooms + db.rooms .leave(&user_id, &body.room_id, &user_id, &db.globals) .unwrap(); MatrixResult(Ok(leave_room::Response)) @@ -970,8 +990,7 @@ pub fn invite_user_route( _room_id: String, ) -> MatrixResult { if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { - db - .rooms + db.rooms .invite( &body.user_id.as_ref().expect("user is authenticated"), &body.room_id, @@ -1069,16 +1088,13 @@ pub fn search_users_route( } #[get("/_matrix/client/r0/rooms/<_room_id>/members")] -pub fn get_member_events_route( - _room_id: String, -) -> MatrixResult { +pub fn get_member_events_route(_room_id: String) -> MatrixResult { // TODO MatrixResult(Ok(get_member_events::Response { chunk: Vec::new() })) } #[get("/_matrix/client/r0/thirdparty/protocols")] -pub fn get_protocols_route( -) -> MatrixResult { +pub fn get_protocols_route() -> MatrixResult { // TODO MatrixResult(Ok(get_protocols::Response { protocols: BTreeMap::new(), @@ -1114,7 +1130,9 @@ pub fn create_message_event_route( ) .expect("message events are always okay"); - MatrixResult(Ok(create_message_event::Response { event_id: Some(event_id) })) + MatrixResult(Ok(create_message_event::Response { + event_id: Some(event_id), + })) } #[put( @@ -1144,7 +1162,9 @@ pub fn create_state_event_for_key_route( ) .unwrap(); - MatrixResult(Ok(create_state_event_for_key::Response { event_id: Some(event_id) })) + MatrixResult(Ok(create_state_event_for_key::Response { + event_id: Some(event_id), + })) } #[put( @@ -1173,7 +1193,9 @@ pub fn create_state_event_for_empty_key_route( ) .unwrap(); - MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id: Some(event_id) })) + MatrixResult(Ok(create_state_event_for_empty_key::Response { + event_id: Some(event_id), + })) } #[get("/_matrix/client/r0/sync", data = "")] @@ -1197,7 +1219,8 @@ pub fn sync_route( let mut pdus = db .rooms - .pdus_since(&room_id, since).unwrap() + .pdus_since(&room_id, since) + .unwrap() .map(|r| r.unwrap()) .collect::>(); @@ -1213,16 +1236,12 @@ pub fn sync_route( } } - let notification_count = if let Some(last_read) = db - .rooms - .edus - .room_read_get(&room_id, &user_id) - .unwrap() - { - Some((db.rooms.pdus_since(&room_id, last_read).unwrap().count() as u32).into()) - } else { - None - }; + let notification_count = + if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &user_id).unwrap() { + Some((db.rooms.pdus_since(&room_id, last_read).unwrap().count() as u32).into()) + } else { + None + }; // They /sync response doesn't always return all messages, so we say the output is // limited unless there are enough events @@ -1262,10 +1281,10 @@ pub fn sync_route( } edus.extend( - db - .rooms + db.rooms .edus - .roomlatests_since(&room_id, since).unwrap() + .roomlatests_since(&room_id, since) + .unwrap() .map(|r| r.unwrap()), ); @@ -1273,8 +1292,10 @@ pub fn sync_route( room_id.clone().try_into().unwrap(), sync_events::JoinedRoom { account_data: Some(sync_events::AccountData { - events: db.account_data - .changes_since(Some(&room_id), &user_id, since).unwrap() + events: db + .account_data + .changes_since(Some(&room_id), &user_id, since) + .unwrap() .into_iter() .map(|(_, v)| v) .collect(), @@ -1304,8 +1325,7 @@ pub fn sync_route( // TODO: state before timeline state: sync_events::State { events: if send_full_state { - db - .rooms + db.rooms .room_state(&room_id) .unwrap() .into_iter() @@ -1332,17 +1352,12 @@ pub fn sync_route( let mut edus = db .rooms .edus - .roomlatests_since(&room_id, since).unwrap() + .roomlatests_since(&room_id, since) + .unwrap() .map(|r| r.unwrap()) .collect::>(); - edus.extend( - db - .rooms - .edus - .roomactives_all(&room_id) - .map(|r| r.unwrap()), - ); + edus.extend(db.rooms.edus.roomactives_all(&room_id).map(|r| r.unwrap())); left_rooms.insert( room_id.clone().try_into().unwrap(), @@ -1363,7 +1378,8 @@ pub fn sync_route( let room_id = room_id.unwrap(); let events = db .rooms - .pdus_since(&room_id, since).unwrap() + .pdus_since(&room_id, since) + .unwrap() .into_iter() .map(|pdu| pdu.unwrap().to_stripped_state_event()) .collect(); @@ -1383,10 +1399,23 @@ pub fn sync_route( join: joined_rooms, invite: invited_rooms, }, - presence: sync_events::Presence { events: Vec::new() }, + presence: sync_events::Presence { + events: db + .global_edus + .globallatests_since(since) + .unwrap() + .map(|edu| { + EventJson::::from( + edu.unwrap().json().to_owned(), + ) + }) + .collect(), + }, account_data: sync_events::AccountData { - events: db.account_data - .changes_since(None, &user_id, since).unwrap() + events: db + .account_data + .changes_since(None, &user_id, since) + .unwrap() .into_iter() .map(|(_, v)| v) .collect(), diff --git a/src/database.rs b/src/database.rs index 76109c9..3b8f927 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,4 +1,5 @@ pub(self) mod account_data; +pub(self) mod global_edus; pub(self) mod globals; pub(self) mod rooms; pub(self) mod users; @@ -11,8 +12,7 @@ pub struct Database { pub users: users::Users, pub rooms: rooms::Rooms, pub account_data: account_data::AccountData, - //pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count - //pub globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Count + Type + UserId + pub global_edus: global_edus::GlobalEdus, pub _db: sled::Db, } @@ -66,8 +66,10 @@ impl Database { account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata").unwrap(), }, - //globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), - //globallatestid_globallatest: db.open_tree("globallatestid_globallatest").unwrap(), + global_edus: global_edus::GlobalEdus { + //globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), + globallatestid_globallatest: db.open_tree("globallatestid_globallatest").unwrap(), // Presence + }, _db: db, } } diff --git a/src/database/global_edus.rs b/src/database/global_edus.rs new file mode 100644 index 0000000..db44674 --- /dev/null +++ b/src/database/global_edus.rs @@ -0,0 +1,68 @@ +use crate::Result; +use ruma_events::{collections::only::Event as EduEvent, EventJson}; +use ruma_identifiers::UserId; + +pub struct GlobalEdus { + pub(super) globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Count + UserId + //pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count +} + +impl GlobalEdus { + /// Adds a global event which will be saved until a new event replaces it (e.g. presence updates). + pub fn update_globallatest( + &self, + user_id: &UserId, + event: EduEvent, + globals: &super::globals::Globals, + ) -> Result<()> { + // Remove old entry + if let Some(old) = self + .globallatestid_globallatest + .iter() + .keys() + .rev() + .filter_map(|r| r.ok()) + .find(|key| { + key.rsplit(|&b| b == 0xff).next().unwrap() == user_id.to_string().as_bytes() + }) + { + // This is the old global_latest + self.globallatestid_globallatest.remove(old)?; + } + + let mut global_latest_id = globals.next_count()?.to_be_bytes().to_vec(); + global_latest_id.push(0xff); + global_latest_id.extend_from_slice(&user_id.to_string().as_bytes()); + + self.globallatestid_globallatest + .insert(global_latest_id, &*serde_json::to_string(&event)?)?; + + Ok(()) + } + + /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. + pub fn globallatests_since( + &self, + since: u64, + ) -> Result>>> { + let first_possible_edu = since.to_be_bytes().to_vec(); + + Ok(self + .globallatestid_globallatest + .range(&*first_possible_edu..) + // Skip the first pdu if it's exactly at since, because we sent that last time + .skip( + if self + .globallatestid_globallatest + .get(first_possible_edu)? + .is_some() + { + 1 + } else { + 0 + }, + ) + .filter_map(|r| r.ok()) + .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) + } +} diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index f2db5a4..a2ade55 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -79,14 +79,6 @@ impl RoomEdus { .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) } - /// Returns a vector of the most recent read_receipts in a room that happened after the event with id `since`. - pub fn roomlatests_all( - &self, - room_id: &RoomId, - ) -> Result>>> { - self.roomlatests_since(room_id, 0) - } - /// Adds an event that will be saved until the `timeout` timestamp (e.g. typing notifications). pub fn roomactive_add( &self, diff --git a/src/server_server.rs b/src/server_server.rs index bb43957..f77699e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -59,7 +59,12 @@ pub async fn send_request( request_map.insert("destination".to_owned(), destination.into()); let mut request_json = request_map.into(); - ruma_signatures::sign_json(db.globals.hostname(), db.globals.keypair(), &mut request_json).unwrap(); + ruma_signatures::sign_json( + db.globals.hostname(), + db.globals.keypair(), + &mut request_json, + ) + .unwrap(); let signatures = request_json["signatures"] .as_object() @@ -85,7 +90,11 @@ pub async fn send_request( ); } - let reqwest_response = db.globals.reqwest_client().execute(http_request.into()).await; + let reqwest_response = db + .globals + .reqwest_client() + .execute(http_request.into()) + .await; // Because reqwest::Response -> http::Response is complicated: match reqwest_response { From 1dbde0e1c17bbedd8631a210cad1b6f211acb7b2 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 10 May 2020 18:30:12 +0200 Subject: [PATCH 0076/1727] improvement: add option to get device id from token --- src/client_server.rs | 2 +- src/database.rs | 4 ++-- src/database/users.rs | 45 ++++++++++++++++++++++++++----------------- src/ruma_wrapper.rs | 9 ++++++--- 4 files changed, 36 insertions(+), 24 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index d740ab5..183abbf 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -105,7 +105,7 @@ pub fn register_route( stages: vec!["m.login.dummy".to_owned()], }], completed: vec![], - params: RawValue::from_string("".to_owned()).unwrap(), + params: RawValue::from_string("{}".to_owned()).unwrap(), session: Some(utils::random_string(SESSION_ID_LENGTH)), auth_error: None, }))); diff --git a/src/database.rs b/src/database.rs index 3b8f927..4e6ac57 100644 --- a/src/database.rs +++ b/src/database.rs @@ -44,11 +44,11 @@ impl Database { userid_displayname: db.open_tree("userid_displayname").unwrap(), userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), userdeviceid_token: db.open_tree("userdeviceid_token").unwrap(), - token_userid: db.open_tree("token_userid").unwrap(), + token_userdeviceid: db.open_tree("token_userdeviceid").unwrap(), }, rooms: rooms::Rooms { edus: rooms::RoomEdus { - roomuserid_lastread: db.open_tree("roomuserid_lastread").unwrap(), + roomuserid_lastread: db.open_tree("roomuserid_lastread").unwrap(), // "Private" read receipt roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), // Read receipts roomactiveid_roomactive: db.open_tree("roomactiveid_roomactive").unwrap(), // Typing notifs }, diff --git a/src/database/users.rs b/src/database/users.rs index 529da91..5a8b0aa 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,7 +8,7 @@ pub struct Users { pub(super) userid_avatarurl: sled::Tree, pub(super) userdeviceids: sled::Tree, pub(super) userdeviceid_token: sled::Tree, - pub(super) token_userid: sled::Tree, + pub(super) token_userdeviceid: sled::Tree, } impl Users { @@ -24,12 +24,23 @@ impl Users { } /// Find out which user an access token belongs to. - pub fn find_from_token(&self, token: &str) -> Result> { - self.token_userid.get(token)?.map_or(Ok(None), |bytes| { - utils::string_from_bytes(&bytes) - .and_then(|string| Ok(UserId::try_from(string)?)) - .map(Some) - }) + pub fn find_from_token(&self, token: &str) -> Result> { + self.token_userdeviceid + .get(token)? + .map_or(Ok(None), |bytes| { + let mut parts = bytes.split(|&b| b == 0xff); + let user_bytes = parts + .next() + .ok_or(Error::BadDatabase("token_userdeviceid value invalid"))?; + let device_bytes = parts + .next() + .ok_or(Error::BadDatabase("token_userdeviceid value invalid"))?; + + Ok(Some(( + UserId::try_from(utils::string_from_bytes(&user_bytes)?)?, + utils::string_from_bytes(&device_bytes)?, + ))) + }) } /// Returns an iterator over all users on this homeserver. @@ -105,27 +116,25 @@ impl Users { /// Replaces the access token of one device. pub fn set_token(&self, user_id: &UserId, device_id: &str, token: &str) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + userdeviceid.push(0xff); + userdeviceid.extend_from_slice(device_id.as_bytes()); - if self.userdeviceids.get(&key)?.is_none() { + if self.userdeviceids.get(&userdeviceid)?.is_none() { return Err(Error::BadRequest( "Tried to set token for nonexistent device", )); } // Remove old token - if let Some(old_token) = self.userdeviceid_token.get(&key)? { - self.token_userid.remove(old_token)?; + if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { + self.token_userdeviceid.remove(old_token)?; // It will be removed from userdeviceid_token by the insert later } - // Assign token to device_id - self.userdeviceid_token.insert(key, &*token)?; - - // Assign token to user - self.token_userid.insert(token, &*user_id.to_string())?; + // Assign token to user device combination + self.userdeviceid_token.insert(&userdeviceid, &*token)?; + self.token_userdeviceid.insert(token, userdeviceid)?; Ok(()) } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 7568573..7c904db 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -18,6 +18,7 @@ const MESSAGE_LIMIT: u64 = 65535; pub struct Ruma { body: T, pub user_id: Option, + pub device_id: Option, pub json_body: serde_json::Value, } @@ -40,7 +41,7 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { Box::pin(async move { let data = rocket::try_outcome!(outcome.owned()); - let user_id = if T::METADATA.requires_authentication { + let (user_id, device_id) = if T::METADATA.requires_authentication { let db = request.guard::>().await.unwrap(); // Get token from header or query value @@ -59,10 +60,11 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { match db.users.find_from_token(&token).unwrap() { // TODO: M_UNKNOWN_TOKEN None => return Failure((Status::Unauthorized, ())), - Some(user_id) => Some(user_id), + Some((user_id, device_id)) => (Some(user_id), Some(device_id)), } + } else { - None + (None, None) }; let mut http_request = http::Request::builder() @@ -83,6 +85,7 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { Ok(t) => Success(Ruma { body: t, user_id, + device_id, // TODO: Can we avoid parsing it again? json_body: if !body.is_empty() { serde_json::from_slice(&body).expect("Ruma already parsed it successfully") From 8e041f90dd524867d39455f22b8871d9e592824d Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 11 May 2020 12:33:25 +0200 Subject: [PATCH 0077/1727] refactor: make cargo clippy happier --- rust-toolchain | 2 +- src/client_server.rs | 42 ++++++++++++++++-------------------- src/database/account_data.rs | 3 +-- src/database/rooms.rs | 4 ++-- src/main.rs | 2 +- src/ruma_wrapper.rs | 1 - 6 files changed, 23 insertions(+), 31 deletions(-) diff --git a/rust-toolchain b/rust-toolchain index bf867e0..e40c16e 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly +nightly-2020-05-09 diff --git a/src/client_server.rs b/src/client_server.rs index 183abbf..1a407f7 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -158,7 +158,6 @@ pub fn register_route( // Generate new device id if the user didn't specify one let device_id = body .device_id - .clone() .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); // Generate new token for the device @@ -276,7 +275,6 @@ pub fn login_route( // Generate new device id if the user didn't specify one let device_id = body .device_id - .clone() .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); // Generate a new token for the device @@ -347,7 +345,7 @@ pub fn set_pushrule_route( _rule_id: String, ) -> MatrixResult { // TODO - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); db.account_data .update( None, @@ -448,7 +446,7 @@ pub fn set_displayname_route( body: Ruma, _user_id: String, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); if let Some(displayname) = &body.displayname { // Some("") will clear the displayname @@ -539,7 +537,7 @@ pub fn set_avatar_url_route( body: Ruma, _user_id: String, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); if !body.avatar_url.starts_with("mxc://") { debug!("Request contains an invalid avatar_url."); @@ -624,7 +622,7 @@ pub fn set_presence_route( body: Ruma, _user_id: String, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); db.global_edus .update_globallatest( @@ -670,7 +668,7 @@ pub fn set_read_marker_route( body: Ruma, _room_id: String, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); db.account_data .update( Some(&body.room_id), @@ -739,7 +737,7 @@ pub fn create_typing_event_route( _room_id: String, _user_id: String, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); let edu = EduEvent::Typing(ruma_events::typing::TypingEvent { content: ruma_events::typing::TypingEventContent { user_ids: vec![user_id.clone()], @@ -772,7 +770,7 @@ pub fn create_room_route( ) -> MatrixResult { // TODO: check if room is unique let room_id = RoomId::try_from(db.globals.hostname()).expect("host is valid"); - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); db.rooms .append_pdu( @@ -890,7 +888,7 @@ pub fn join_room_by_id_route( body: Ruma, _room_id: String, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); if db .rooms @@ -920,7 +918,7 @@ pub fn join_room_by_id_or_alias_route( body: Ruma, _room_id_or_alias: String, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, @@ -965,7 +963,7 @@ pub fn leave_room_route( body: Ruma, _room_id: String, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); db.rooms .leave(&user_id, &body.room_id, &user_id, &db.globals) .unwrap(); @@ -978,7 +976,7 @@ pub fn forget_room_route( body: Ruma, _room_id: String, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); db.rooms.forget(&body.room_id, &user_id).unwrap(); MatrixResult(Ok(forget_room::Response)) } @@ -1112,7 +1110,7 @@ pub fn create_message_event_route( _txn_id: String, body: Ruma, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); @@ -1146,14 +1144,14 @@ pub fn create_state_event_for_key_route( _state_key: String, body: Ruma, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); // Reponse of with/without key is the same let event_id = db .rooms .append_pdu( body.room_id.clone(), - user_id, + user_id.clone(), body.event_type.clone(), body.json_body.clone(), None, @@ -1177,14 +1175,14 @@ pub fn create_state_event_for_empty_key_route( _event_type: String, body: Ruma, ) -> MatrixResult { - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); // Reponse of with/without key is the same let event_id = db .rooms .append_pdu( body.room_id.clone(), - user_id, + user_id.clone(), body.event_type.clone(), body.json_body.clone(), None, @@ -1204,7 +1202,7 @@ pub fn sync_route( body: Ruma, ) -> MatrixResult { std::thread::sleep(Duration::from_millis(1500)); - let user_id = body.user_id.clone().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); let next_batch = db.globals.current_count().unwrap().to_string(); let mut joined_rooms = BTreeMap::new(); @@ -1344,10 +1342,7 @@ pub fn sync_route( for room_id in db.rooms.rooms_left(&user_id) { let room_id = room_id.unwrap(); let pdus = db.rooms.pdus_since(&room_id, since).unwrap(); - let room_events = pdus - .into_iter() - .map(|pdu| pdu.unwrap().to_room_event()) - .collect(); + let room_events = pdus.map(|pdu| pdu.unwrap().to_room_event()).collect(); let mut edus = db .rooms @@ -1380,7 +1375,6 @@ pub fn sync_route( .rooms .pdus_since(&room_id, since) .unwrap() - .into_iter() .map(|pdu| pdu.unwrap().to_stripped_state_event()) .collect(); diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 1d48232..7ade70c 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -33,13 +33,12 @@ impl AccountData { .rev() .filter_map(|r| r.ok()) .take_while(|key| key.starts_with(&prefix)) - .filter(|key| { + .find(|key| { key.split(|&b| b == 0xff) .nth(1) .filter(|&user| user == user_id.to_string().as_bytes()) .is_some() }) - .next() { // This is the old room_latest self.roomuserdataid_accountdata.remove(old)?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 1130682..f741afa 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -261,14 +261,14 @@ impl Rooms { self.pduid_pdu.insert(&pdu_id, &*pdu_json.to_string())?; self.eventid_pduid - .insert(pdu.event_id.to_string(), pdu_id.clone())?; + .insert(pdu.event_id.to_string(), pdu_id)?; if let Some(state_key) = pdu.state_key { let mut key = room_id.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.kind.to_string().as_bytes()); key.push(0xff); - key.extend_from_slice(state_key.to_string().as_bytes()); + key.extend_from_slice(state_key.as_bytes()); self.roomstateid_pdu.insert(key, &*pdu_json.to_string())?; } diff --git a/src/main.rs b/src/main.rs index 3452423..13b0b65 100644 --- a/src/main.rs +++ b/src/main.rs @@ -84,7 +84,7 @@ fn setup_rocket() -> rocket::Rocket { fn main() { // Log info by default - if let Err(_) = std::env::var("RUST_LOG") { + if std::env::var("RUST_LOG").is_err() { std::env::set_var("RUST_LOG", "warn"); } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 7c904db..21c5925 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -62,7 +62,6 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { None => return Failure((Status::Unauthorized, ())), Some((user_id, device_id)) => (Some(user_id), Some(device_id)), } - } else { (None, None) }; From 00a9424719436c88102a211a311252032848b639 Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Wed, 6 May 2020 15:36:44 +0200 Subject: [PATCH 0078/1727] feat: sytests --- Cargo.lock | 4 +- Cargo.toml | 4 +- src/client_server.rs | 14 +- src/database.rs | 32 +- src/database/globals.rs | 12 +- src/database/rooms.rs | 10 +- src/main.rs | 3 +- src/server_server.rs | 10 +- sytest/are-we-synapse-yet.list | 836 +++++++++++++++++++++++++++++ sytest/are-we-synapse-yet.py | 260 +++++++++ sytest/show-expected-fail-tests.sh | 105 ++++ sytest/sytest-blacklist | 0 sytest/sytest-whitelist | 85 +++ 13 files changed, 1339 insertions(+), 36 deletions(-) create mode 100644 sytest/are-we-synapse-yet.list create mode 100755 sytest/are-we-synapse-yet.py create mode 100755 sytest/show-expected-fail-tests.sh create mode 100644 sytest/sytest-blacklist create mode 100644 sytest/sytest-whitelist diff --git a/Cargo.lock b/Cargo.lock index 1f1dc94..f059aa4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1216,7 +1216,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma-federation-api.git#ccbf216f39bbbaa59131cc200eae5bd18aa1947c" +source = "git+https://github.com/ruma/ruma-federation-api.git?rev=ccbf216f39bbbaa59131cc200eae5bd18aa1947c#ccbf216f39bbbaa59131cc200eae5bd18aa1947c" dependencies = [ "js_int", "ruma-api", @@ -1253,7 +1253,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma-signatures.git#1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" +source = "git+https://github.com/ruma/ruma-signatures.git?rev=1ca545cba8dfd43e0fc8e3c18e1311fb73390a97#1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" dependencies = [ "base64 0.12.1", "ring", diff --git a/Cargo.toml b/Cargo.toml index 217f58e..e0a6f10 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,8 +18,8 @@ ruma-client-api = "0.8.0" ruma-identifiers = "0.16.1" ruma-api = "0.16.0" ruma-events = "0.21.0" -ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git" } -ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git" } +ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git", rev = "1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" } +ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git", rev = "ccbf216f39bbbaa59131cc200eae5bd18aa1947c" } log = "0.4.8" sled = "0.31.0" directories = "2.0.2" diff --git a/src/client_server.rs b/src/client_server.rs index 1a407f7..d32134d 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -66,7 +66,7 @@ pub fn get_register_available_route( ) -> MatrixResult { // Validate user id let user_id: UserId = - match (*format!("@{}:{}", body.username.clone(), db.globals.hostname())).try_into() { + match (*format!("@{}:{}", body.username.clone(), db.globals.server_name())).try_into() { Err(_) => { debug!("Username invalid"); return MatrixResult(Err(Error { @@ -117,7 +117,7 @@ pub fn register_route( body.username .clone() .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)), - db.globals.hostname() + db.globals.server_name() )) .try_into() { @@ -229,7 +229,7 @@ pub fn login_route( (body.user.clone(), body.login_info.clone()) { if !username.contains(':') { - username = format!("@{}:{}", username, db.globals.hostname()); + username = format!("@{}:{}", username, db.globals.server_name()); } if let Ok(user_id) = (*username).try_into() { if let Some(hash) = db.users.password_hash(&user_id).unwrap() { @@ -288,7 +288,7 @@ pub fn login_route( MatrixResult(Ok(login::Response { user_id, access_token: token, - home_server: Some(db.globals.hostname().to_owned()), + home_server: Some(db.globals.server_name().to_owned()), device_id, well_known: None, })) @@ -769,7 +769,7 @@ pub fn create_room_route( body: Ruma, ) -> MatrixResult { // TODO: check if room is unique - let room_id = RoomId::try_from(db.globals.hostname()).expect("host is valid"); + let room_id = RoomId::try_from(db.globals.server_name()).expect("host is valid"); let user_id = body.user_id.as_ref().expect("user is authenticated"); db.rooms @@ -858,7 +858,7 @@ pub fn get_alias_route( _room_alias: String, ) -> MatrixResult { // TODO - let room_id = if body.room_alias.server_name() == db.globals.hostname() { + let room_id = if body.room_alias.server_name() == db.globals.server_name() { match body.room_alias.alias() { "conduit" => "!lgOCCXQKtXOAPlAlG5:conduit.rs", _ => { @@ -923,7 +923,7 @@ pub fn join_room_by_id_or_alias_route( let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, Err(room_alias) => { - if room_alias.server_name() == db.globals.hostname() { + if room_alias.server_name() == db.globals.server_name() { return MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Room alias not found.".to_owned(), diff --git a/src/database.rs b/src/database.rs index 4e6ac57..0bd3aa0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -7,6 +7,8 @@ pub(self) mod users; use directories::ProjectDirs; use std::fs::remove_dir_all; +use rocket::Config; + pub struct Database { pub globals: globals::Globals, pub users: users::Users, @@ -18,26 +20,38 @@ pub struct Database { impl Database { /// Tries to remove the old database but ignores all errors. - pub fn try_remove(hostname: &str) { + pub fn try_remove(server_name: &str) { let mut path = ProjectDirs::from("xyz", "koesters", "conduit") .unwrap() .data_dir() .to_path_buf(); - path.push(hostname); + path.push(server_name); let _ = remove_dir_all(path); } /// Load an existing database or create a new one. - pub fn load_or_create(hostname: &str) -> Self { - let mut path = ProjectDirs::from("xyz", "koesters", "conduit") - .unwrap() - .data_dir() - .to_path_buf(); - path.push(hostname); + pub fn load_or_create(config: &Config) -> Self { + let server_name = config.get_str("server_name").unwrap_or("localhost"); + + let path = config + .get_str("database_path") + .map(|x| x.to_owned()) + .unwrap_or_else(|_| { + let path = ProjectDirs::from("xyz", "koesters", "conduit") + .unwrap() + .data_dir() + .join(server_name); + path.to_str().unwrap().to_owned() + }); + let db = sled::open(&path).unwrap(); + log::info!("Opened sled database at {}", path); Self { - globals: globals::Globals::load(db.open_tree("global").unwrap(), hostname.to_owned()), + globals: globals::Globals::load( + db.open_tree("global").unwrap(), + server_name.to_owned(), + ), users: users::Users { userid_password: db.open_tree("userid_password").unwrap(), userdeviceids: db.open_tree("userdeviceids").unwrap(), diff --git a/src/database/globals.rs b/src/database/globals.rs index f9e9999..eb20e37 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,13 +4,13 @@ pub const COUNTER: &str = "c"; pub struct Globals { pub(super) globals: sled::Tree, - hostname: String, + server_name: String, keypair: ruma_signatures::Ed25519KeyPair, reqwest_client: reqwest::Client, } impl Globals { - pub fn load(globals: sled::Tree, hostname: String) -> Self { + pub fn load(globals: sled::Tree, server_name: String) -> Self { let keypair = ruma_signatures::Ed25519KeyPair::new( &*globals .update_and_fetch("keypair", utils::generate_keypair) @@ -22,15 +22,15 @@ impl Globals { Self { globals, - hostname, + server_name, keypair, reqwest_client: reqwest::Client::new(), } } - /// Returns the hostname of the server. - pub fn hostname(&self) -> &str { - &self.hostname + /// Returns the server_name of the server. + pub fn server_name(&self) -> &str { + &self.server_name } /// Returns this server's keypair. diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f741afa..28b3560 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -216,7 +216,7 @@ impl Rooms { event_id: EventId::try_from("$thiswillbefilledinlater").expect("we know this is valid"), room_id: room_id.clone(), sender: sender.clone(), - origin: globals.hostname().to_owned(), + origin: globals.server_name().to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("this only fails many years in the future"), @@ -245,8 +245,12 @@ impl Rooms { .expect("ruma's reference hashes are correct"); let mut pdu_json = serde_json::to_value(&pdu)?; - ruma_signatures::hash_and_sign_event(globals.hostname(), globals.keypair(), &mut pdu_json) - .expect("our new event can be hashed and signed"); + ruma_signatures::hash_and_sign_event( + globals.server_name(), + globals.keypair(), + &mut pdu_json, + ) + .expect("our new event can be hashed and signed"); self.replace_pdu_leaves(&room_id, &pdu.event_id)?; diff --git a/src/main.rs b/src/main.rs index 13b0b65..bef55ac 100644 --- a/src/main.rs +++ b/src/main.rs @@ -75,8 +75,7 @@ fn setup_rocket() -> rocket::Rocket { ], ) .attach(AdHoc::on_attach("Config", |rocket| { - let hostname = rocket.config().get_str("hostname").unwrap_or("localhost"); - let data = Database::load_or_create(&hostname); + let data = Database::load_or_create(&rocket.config()); Ok(rocket.manage(data)) })) diff --git a/src/server_server.rs b/src/server_server.rs index f77699e..2fcbe98 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -55,12 +55,12 @@ pub async fn send_request( request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); request_map.insert("uri".to_owned(), T::METADATA.path.into()); - request_map.insert("origin".to_owned(), db.globals.hostname().into()); + request_map.insert("origin".to_owned(), db.globals.server_name().into()); request_map.insert("destination".to_owned(), destination.into()); let mut request_json = request_map.into(); ruma_signatures::sign_json( - db.globals.hostname(), + db.globals.server_name(), db.globals.keypair(), &mut request_json, ) @@ -82,7 +82,7 @@ pub async fn send_request( AUTHORIZATION, HeaderValue::from_str(&format!( "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - db.globals.hostname(), + db.globals.server_name(), s.0, s.1 )) @@ -156,7 +156,7 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { ); let mut response = serde_json::from_slice( http::Response::try_from(get_server_keys::Response { - server_name: db.globals.hostname().to_owned(), + server_name: db.globals.server_name().to_owned(), verify_keys, old_verify_keys: BTreeMap::new(), signatures: BTreeMap::new(), @@ -166,7 +166,7 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { .body(), ) .unwrap(); - ruma_signatures::sign_json(db.globals.hostname(), db.globals.keypair(), &mut response).unwrap(); + ruma_signatures::sign_json(db.globals.server_name(), db.globals.keypair(), &mut response).unwrap(); Json(response.to_string()) } diff --git a/sytest/are-we-synapse-yet.list b/sytest/are-we-synapse-yet.list new file mode 100644 index 0000000..cdc280a --- /dev/null +++ b/sytest/are-we-synapse-yet.list @@ -0,0 +1,836 @@ +reg GET /register yields a set of flows +reg POST /register can create a user +reg POST /register downcases capitals in usernames +reg POST /register returns the same device_id as that in the request +reg POST /register rejects registration of usernames with '!' +reg POST /register rejects registration of usernames with '"' +reg POST /register rejects registration of usernames with ':' +reg POST /register rejects registration of usernames with '?' +reg POST /register rejects registration of usernames with '\' +reg POST /register rejects registration of usernames with '@' +reg POST /register rejects registration of usernames with '[' +reg POST /register rejects registration of usernames with ']' +reg POST /register rejects registration of usernames with '{' +reg POST /register rejects registration of usernames with '|' +reg POST /register rejects registration of usernames with '}' +reg POST /register rejects registration of usernames with '£' +reg POST /register rejects registration of usernames with 'é' +reg POST /register rejects registration of usernames with '\n' +reg POST /register rejects registration of usernames with ''' +reg POST /r0/admin/register with shared secret +reg POST /r0/admin/register admin with shared secret +reg POST /r0/admin/register with shared secret downcases capitals +reg POST /r0/admin/register with shared secret disallows symbols +reg POST rejects invalid utf-8 in JSON +log GET /login yields a set of flows +log POST /login can log in as a user +log POST /login returns the same device_id as that in the request +log POST /login can log in as a user with just the local part of the id +log POST /login as non-existing user is rejected +log POST /login wrong password is rejected +log Interactive authentication types include SSO +log Can perform interactive authentication with SSO +log The user must be consistent through an interactive authentication session with SSO +log The operation must be consistent through an interactive authentication session +v1s GET /events initially +v1s GET /initialSync initially +csa Version responds 200 OK with valid structure +pro PUT /profile/:user_id/displayname sets my name +pro GET /profile/:user_id/displayname publicly accessible +pro PUT /profile/:user_id/avatar_url sets my avatar +pro GET /profile/:user_id/avatar_url publicly accessible +dev GET /device/{deviceId} +dev GET /device/{deviceId} gives a 404 for unknown devices +dev GET /devices +dev PUT /device/{deviceId} updates device fields +dev PUT /device/{deviceId} gives a 404 for unknown devices +dev DELETE /device/{deviceId} +dev DELETE /device/{deviceId} requires UI auth user to match device owner +dev DELETE /device/{deviceId} with no body gives a 401 +dev The deleted device must be consistent through an interactive auth session +dev Users receive device_list updates for their own devices +pre GET /presence/:user_id/status fetches initial status +pre PUT /presence/:user_id/status updates my presence +crm POST /createRoom makes a public room +crm POST /createRoom makes a private room +crm POST /createRoom makes a private room with invites +crm POST /createRoom makes a room with a name +crm POST /createRoom makes a room with a topic +syn Can /sync newly created room +crm POST /createRoom creates a room with the given version +crm POST /createRoom rejects attempts to create rooms with numeric versions +crm POST /createRoom rejects attempts to create rooms with unknown versions +crm POST /createRoom ignores attempts to set the room version via creation_content +mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership +mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event +rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels +mem GET /rooms/:room_id/joined_members fetches my membership +v1s GET /rooms/:room_id/initialSync fetches initial sync state +pub GET /publicRooms lists newly-created room +ali GET /directory/room/:room_alias yields room ID +mem GET /joined_rooms lists newly-created room +rst POST /rooms/:room_id/state/m.room.name sets name +rst GET /rooms/:room_id/state/m.room.name gets name +rst POST /rooms/:room_id/state/m.room.topic sets topic +rst GET /rooms/:room_id/state/m.room.topic gets topic +rst GET /rooms/:room_id/state fetches entire room state +crm POST /createRoom with creation content +ali PUT /directory/room/:room_alias creates alias +nsp GET /rooms/:room_id/aliases lists aliases +jon POST /rooms/:room_id/join can join a room +jon POST /join/:room_alias can join a room +jon POST /join/:room_id can join a room +jon POST /join/:room_id can join a room with custom content +jon POST /join/:room_alias can join a room with custom content +lev POST /rooms/:room_id/leave can leave a room +inv POST /rooms/:room_id/invite can send an invite +ban POST /rooms/:room_id/ban can ban a user +snd POST /rooms/:room_id/send/:event_type sends a message +snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message +snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id +get GET /rooms/:room_id/messages returns a message +get GET /rooms/:room_id/messages lazy loads members correctly +typ PUT /rooms/:room_id/typing/:user_id sets typing notification +rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels +rst PUT /rooms/:room_id/state/m.room.power_levels can set levels +rst PUT power_levels should not explode if the old power levels were empty +rst Both GET and PUT work +rct POST /rooms/:room_id/receipt can create receipts +red POST /rooms/:room_id/read_markers can create read marker +med POST /media/v1/upload can create an upload +med GET /media/v1/download can fetch the value again +cap GET /capabilities is present and well formed for registered user +cap GET /r0/capabilities is not public +reg Register with a recaptcha +reg registration is idempotent, without username specified +reg registration is idempotent, with username specified +reg registration remembers parameters +reg registration accepts non-ascii passwords +reg registration with inhibit_login inhibits login +reg User signups are forbidden from starting with '_' +reg Can register using an email address +log Can login with 3pid and password using m.login.password +log login types include SSO +log /login/cas/redirect redirects if the old m.login.cas login type is listed +log Can login with new user via CAS +lox Can logout current device +lox Can logout all devices +lox Request to logout with invalid an access token is rejected +lox Request to logout without an access token is rejected +log After changing password, can't log in with old password +log After changing password, can log in with new password +log After changing password, existing session still works +log After changing password, a different session no longer works by default +log After changing password, different sessions can optionally be kept +psh Pushers created with a different access token are deleted on password change +psh Pushers created with a the same access token are not deleted on password change +acc Can deactivate account +acc Can't deactivate account with wrong password +acc After deactivating account, can't log in with password +acc After deactivating account, can't log in with an email +v1s initialSync sees my presence status +pre Presence change reports an event to myself +pre Friends presence changes reports events +crm Room creation reports m.room.create to myself +crm Room creation reports m.room.member to myself +rst Setting room topic reports m.room.topic to myself +v1s Global initialSync +v1s Global initialSync with limit=0 gives no messages +v1s Room initialSync +v1s Room initialSync with limit=0 gives no messages +rst Setting state twice is idempotent +jon Joining room twice is idempotent +syn New room members see their own join event +v1s New room members see existing users' presence in room initialSync +syn Existing members see new members' join events +syn Existing members see new members' presence +v1s All room members see all room members' presence in global initialSync +f,jon Remote users can join room by alias +syn New room members see their own join event +v1s New room members see existing members' presence in room initialSync +syn Existing members see new members' join events +syn Existing members see new member's presence +v1s New room members see first user's profile information in global initialSync +v1s New room members see first user's profile information in per-room initialSync +f,jon Remote users may not join unfederated rooms +syn Local room members see posted message events +v1s Fetching eventstream a second time doesn't yield the message again +syn Local non-members don't see posted message events +get Local room members can get room messages +f,syn Remote room members also see posted message events +f,get Remote room members can get room messages +get Message history can be paginated +f,get Message history can be paginated over federation +eph Ephemeral messages received from clients are correctly expired +ali Room aliases can contain Unicode +f,ali Remote room alias queries can handle Unicode +ali Canonical alias can be set +ali Canonical alias can include alt_aliases +ali Regular users can add and delete aliases in the default room configuration +ali Regular users can add and delete aliases when m.room.aliases is restricted +ali Deleting a non-existent alias should return a 404 +ali Users can't delete other's aliases +ali Users with sufficient power-level can delete other's aliases +ali Can delete canonical alias +ali Alias creators can delete alias with no ops +ali Alias creators can delete canonical alias with no ops +ali Only room members can list aliases of a room +inv Can invite users to invite-only rooms +inv Uninvited users cannot join the room +inv Invited user can reject invite +f,inv Invited user can reject invite over federation +f,inv Invited user can reject invite over federation several times +inv Invited user can reject invite for empty room +f,inv Invited user can reject invite over federation for empty room +inv Invited user can reject local invite after originator leaves +inv Invited user can see room metadata +f,inv Remote invited user can see room metadata +inv Users cannot invite themselves to a room +inv Users cannot invite a user that is already in the room +ban Banned user is kicked and may not rejoin until unbanned +f,ban Remote banned user is kicked and may not rejoin until unbanned +ban 'ban' event respects room powerlevel +plv setting 'm.room.name' respects room powerlevel +plv setting 'm.room.power_levels' respects room powerlevel (2 subtests) +plv Unprivileged users can set m.room.topic if it only needs level 0 +plv Users cannot set ban powerlevel higher than their own (2 subtests) +plv Users cannot set kick powerlevel higher than their own (2 subtests) +plv Users cannot set redact powerlevel higher than their own (2 subtests) +v1s Check that event streams started after a client joined a room work (SYT-1) +v1s Event stream catches up fully after many messages +xxx POST /rooms/:room_id/redact/:event_id as power user redacts message +xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message +xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message +xxx POST /redact disallows redaction of event in different room +xxx Redaction of a redaction redacts the redaction reason +v1s A departed room is still included in /initialSync (SPEC-216) +v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) +rst Can get rooms/{roomId}/state for a departed room (SPEC-216) +mem Can get rooms/{roomId}/members for a departed room (SPEC-216) +get Can get rooms/{roomId}/messages for a departed room (SPEC-216) +rst Can get 'm.room.name' state for a departed room (SPEC-216) +syn Getting messages going forward is limited for a departed room (SPEC-216) +3pd Can invite existing 3pid +3pd Can invite existing 3pid with no ops into a private room +3pd Can invite existing 3pid in createRoom +3pd Can invite unbound 3pid +f,3pd Can invite unbound 3pid over federation +3pd Can invite unbound 3pid with no ops into a private room +f,3pd Can invite unbound 3pid over federation with no ops into a private room +f,3pd Can invite unbound 3pid over federation with users from both servers +3pd Can accept unbound 3pid invite after inviter leaves +3pd Can accept third party invite with /join +3pd 3pid invite join with wrong but valid signature are rejected +3pd 3pid invite join valid signature but revoked keys are rejected +3pd 3pid invite join valid signature but unreachable ID server are rejected +gst Guest user cannot call /events globally +gst Guest users can join guest_access rooms +gst Guest users can send messages to guest_access rooms if joined +gst Guest user calling /events doesn't tightloop +gst Guest users are kicked from guest_access rooms on revocation of guest_access +gst Guest user can set display names +gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation +gst Guest user can upgrade to fully featured user +gst Guest user cannot upgrade other users +pub GET /publicRooms lists rooms +pub GET /publicRooms includes avatar URLs +gst Guest users can accept invites to private rooms over federation +gst Guest users denied access over federation if guest access prohibited +mem Room members can override their displayname on a room-specific basis +mem Room members can join a room with an overridden displayname +mem Users cannot kick users from a room they are not in +mem Users cannot kick users who have already left a room +typ Typing notification sent to local room members +f,typ Typing notifications also sent to remote room members +typ Typing can be explicitly stopped +rct Read receipts are visible to /initialSync +rct Read receipts are sent as events +rct Receipts must be m.read +pro displayname updates affect room member events +pro avatar_url updates affect room member events +gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users +gst Guest non-joined user cannot call /events on shared room +gst Guest non-joined user cannot call /events on invited room +gst Guest non-joined user cannot call /events on joined room +gst Guest non-joined user cannot call /events on default room +gst Guest non-joined user can call /events on world_readable room +gst Guest non-joined users can get state for world_readable rooms +gst Guest non-joined users can get individual state for world_readable rooms +gst Guest non-joined users cannot room initalSync for non-world_readable rooms +gst Guest non-joined users can room initialSync for world_readable rooms +gst Guest non-joined users can get individual state for world_readable rooms after leaving +gst Guest non-joined users cannot send messages to guest_access rooms if not joined +gst Guest users can sync from world_readable guest_access rooms if joined +gst Guest users can sync from shared guest_access rooms if joined +gst Guest users can sync from invited guest_access rooms if joined +gst Guest users can sync from joined guest_access rooms if joined +gst Guest users can sync from default guest_access rooms if joined +ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users +ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users +ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users +ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users +ath m.room.history_visibility == "default" allows/forbids appropriately for Real users +ath Real non-joined user cannot call /events on shared room +ath Real non-joined user cannot call /events on invited room +ath Real non-joined user cannot call /events on joined room +ath Real non-joined user cannot call /events on default room +ath Real non-joined user can call /events on world_readable room +ath Real non-joined users can get state for world_readable rooms +ath Real non-joined users can get individual state for world_readable rooms +ath Real non-joined users cannot room initalSync for non-world_readable rooms +ath Real non-joined users can room initialSync for world_readable rooms +ath Real non-joined users can get individual state for world_readable rooms after leaving +ath Real non-joined users cannot send messages to guest_access rooms if not joined +ath Real users can sync from world_readable guest_access rooms if joined +ath Real users can sync from shared guest_access rooms if joined +ath Real users can sync from invited guest_access rooms if joined +ath Real users can sync from joined guest_access rooms if joined +ath Real users can sync from default guest_access rooms if joined +ath Only see history_visibility changes on boundaries +f,ath Backfill works correctly with history visibility set to joined +fgt Forgotten room messages cannot be paginated +fgt Forgetting room does not show up in v2 /sync +fgt Can forget room you've been kicked from +fgt Can't forget room you're still in +mem Can re-join room if re-invited +ath Only original members of the room can see messages from erased users +mem /joined_rooms returns only joined rooms +mem /joined_members return joined members +ctx /context/ on joined room works +ctx /context/ on non world readable room does not work +ctx /context/ returns correct number of events +ctx /context/ with lazy_load_members filter works +get /event/ on joined room works +get /event/ on non world readable room does not work +get /event/ does not allow access to events before the user joined +mem Can get rooms/{roomId}/members +mem Can get rooms/{roomId}/members at a given point +mem Can filter rooms/{roomId}/members +upg /upgrade creates a new room +upg /upgrade should preserve room visibility for public rooms +upg /upgrade should preserve room visibility for private rooms +upg /upgrade copies >100 power levels to the new room +upg /upgrade copies the power levels to the new room +upg /upgrade preserves the power level of the upgrading user in old and new rooms +upg /upgrade copies important state to the new room +upg /upgrade copies ban events to the new room +upg local user has push rules copied to upgraded room +f,upg remote user has push rules copied to upgraded room +upg /upgrade moves aliases to the new room +upg /upgrade moves remote aliases to the new room +upg /upgrade preserves direct room state +upg /upgrade preserves room federation ability +upg /upgrade restricts power levels in the old room +upg /upgrade restricts power levels in the old room when the old PLs are unusual +upg /upgrade to an unknown version is rejected +upg /upgrade is rejected if the user can't send state events +upg /upgrade of a bogus room fails gracefully +upg Cannot send tombstone event that points to the same room +f,upg Local and remote users' homeservers remove a room from their public directory on upgrade +rst Name/topic keys are correct +f,pub Can get remote public room list +pub Can paginate public room list +pub Can search public room list +syn Can create filter +syn Can download filter +syn Can sync +syn Can sync a joined room +syn Full state sync includes joined rooms +syn Newly joined room is included in an incremental sync +syn Newly joined room has correct timeline in incremental sync +syn Newly joined room includes presence in incremental sync +syn Get presence for newly joined members in incremental sync +syn Can sync a room with a single message +syn Can sync a room with a message with a transaction id +syn A message sent after an initial sync appears in the timeline of an incremental sync. +syn A filtered timeline reaches its limit +syn Syncing a new room with a large timeline limit isn't limited +syn A full_state incremental update returns only recent timeline +syn A prev_batch token can be used in the v1 messages API +syn A next_batch token can be used in the v1 messages API +syn User sees their own presence in a sync +syn User is offline if they set_presence=offline in their sync +syn User sees updates to presence from other users in the incremental sync. +syn State is included in the timeline in the initial sync +f,syn State from remote users is included in the state in the initial sync +syn Changes to state are included in an incremental sync +syn Changes to state are included in an gapped incremental sync +f,syn State from remote users is included in the timeline in an incremental sync +syn A full_state incremental update returns all state +syn When user joins a room the state is included in the next sync +syn A change to displayname should not result in a full state sync +syn A change to displayname should appear in incremental /sync +syn When user joins a room the state is included in a gapped sync +syn When user joins and leaves a room in the same batch, the full state is still included in the next sync +syn Current state appears in timeline in private history +syn Current state appears in timeline in private history with many messages before +syn Current state appears in timeline in private history with many messages after +syn Rooms a user is invited to appear in an initial sync +syn Rooms a user is invited to appear in an incremental sync +syn Newly joined room is included in an incremental sync after invite +syn Sync can be polled for updates +syn Sync is woken up for leaves +syn Left rooms appear in the leave section of sync +syn Newly left rooms appear in the leave section of incremental sync +syn We should see our own leave event, even if history_visibility is restricted (SYN-662) +syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) +syn Newly left rooms appear in the leave section of gapped sync +syn Previously left rooms don't appear in the leave section of sync +syn Left rooms appear in the leave section of full state sync +syn Archived rooms only contain history from before the user left +syn Banned rooms appear in the leave section of sync +syn Newly banned rooms appear in the leave section of incremental sync +syn Newly banned rooms appear in the leave section of incremental sync +syn Typing events appear in initial sync +syn Typing events appear in incremental sync +syn Typing events appear in gapped sync +syn Read receipts appear in initial v2 /sync +syn New read receipts appear in incremental v2 /sync +syn Can pass a JSON filter as a query parameter +syn Can request federation format via the filter +syn Read markers appear in incremental v2 /sync +syn Read markers appear in initial v2 /sync +syn Read markers can be updated +syn Lazy loading parameters in the filter are strictly boolean +syn The only membership state included in an initial sync is for all the senders in the timeline +syn The only membership state included in an incremental sync is for senders in the timeline +syn The only membership state included in a gapped incremental sync is for senders in the timeline +syn Gapped incremental syncs include all state changes +syn Old leaves are present in gapped incremental syncs +syn Leaves are present in non-gapped incremental syncs +syn Old members are included in gappy incr LL sync if they start speaking +syn Members from the gap are included in gappy incr LL sync +syn We don't send redundant membership state across incremental syncs by default +syn We do send redundant membership state across incremental syncs if asked +syn Unnamed room comes with a name summary +syn Named room comes with just joined member count summary +syn Room summary only has 5 heroes +syn Room summary counts change when membership changes +rmv User can create and send/receive messages in a room with version 1 +rmv User can create and send/receive messages in a room with version 1 (2 subtests) +rmv local user can join room with version 1 +rmv User can invite local user to room with version 1 +rmv remote user can join room with version 1 +rmv User can invite remote user to room with version 1 +rmv Remote user can backfill in a room with version 1 +rmv Can reject invites over federation for rooms with version 1 +rmv Can receive redactions from regular users over federation in room version 1 +rmv User can create and send/receive messages in a room with version 2 +rmv User can create and send/receive messages in a room with version 2 (2 subtests) +rmv local user can join room with version 2 +rmv User can invite local user to room with version 2 +rmv remote user can join room with version 2 +rmv User can invite remote user to room with version 2 +rmv Remote user can backfill in a room with version 2 +rmv Can reject invites over federation for rooms with version 2 +rmv Can receive redactions from regular users over federation in room version 2 +rmv User can create and send/receive messages in a room with version 3 +rmv User can create and send/receive messages in a room with version 3 (2 subtests) +rmv local user can join room with version 3 +rmv User can invite local user to room with version 3 +rmv remote user can join room with version 3 +rmv User can invite remote user to room with version 3 +rmv Remote user can backfill in a room with version 3 +rmv Can reject invites over federation for rooms with version 3 +rmv Can receive redactions from regular users over federation in room version 3 +rmv User can create and send/receive messages in a room with version 4 +rmv User can create and send/receive messages in a room with version 4 (2 subtests) +rmv local user can join room with version 4 +rmv User can invite local user to room with version 4 +rmv remote user can join room with version 4 +rmv User can invite remote user to room with version 4 +rmv Remote user can backfill in a room with version 4 +rmv Can reject invites over federation for rooms with version 4 +rmv Can receive redactions from regular users over federation in room version 4 +rmv User can create and send/receive messages in a room with version 5 +rmv User can create and send/receive messages in a room with version 5 (2 subtests) +rmv local user can join room with version 5 +rmv User can invite local user to room with version 5 +rmv remote user can join room with version 5 +rmv User can invite remote user to room with version 5 +rmv Remote user can backfill in a room with version 5 +rmv Can reject invites over federation for rooms with version 5 +rmv Can receive redactions from regular users over federation in room version 5 +pre Presence changes are reported to local room members +f,pre Presence changes are also reported to remote room members +pre Presence changes to UNAVAILABLE are reported to local room members +f,pre Presence changes to UNAVAILABLE are reported to remote room members +v1s Newly created users see their own presence in /initialSync (SYT-34) +dvk Can upload device keys +dvk Should reject keys claiming to belong to a different user +dvk Can query device keys using POST +dvk Can query specific device keys using POST +dvk query for user with no keys returns empty key dict +dvk Can claim one time key using POST +f,dvk Can query remote device keys using POST +f,dvk Can claim remote one time key using POST +dvk Local device key changes appear in v2 /sync +dvk Local new device changes appear in v2 /sync +dvk Local delete device changes appear in v2 /sync +dvk Local update device changes appear in v2 /sync +dvk Can query remote device keys using POST after notification +f,dev Device deletion propagates over federation +f,dev If remote user leaves room, changes device and rejoins we see update in sync +f,dev If remote user leaves room we no longer receive device updates +dvk Local device key changes appear in /keys/changes +dvk New users appear in /keys/changes +f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes +dvk Get left notifs in sync and /keys/changes when other user leaves +dvk Get left notifs for other users in sync and /keys/changes when user leaves +f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes +dvk Can create backup version +dvk Can update backup version +dvk Responds correctly when backup is empty +dvk Can backup keys +dvk Can update keys with better versions +dvk Will not update keys with worse versions +dvk Will not back up to an old backup version +dvk Can delete backup +dvk Deleted & recreated backups are empty +dvk Can create more than 10 backup versions +dvk Can upload self-signing keys +dvk Fails to upload self-signing keys with no auth +dvk Fails to upload self-signing key without master key +dvk Changing master key notifies local users +dvk Changing user-signing key notifies local users +f,dvk can fetch self-signing keys over federation +f,dvk uploading self-signing key notifies over federation +f,dvk uploading signed devices gets propagated over federation +tag Can add tag +tag Can remove tag +tag Can list tags for a room +v1s Tags appear in the v1 /events stream +v1s Tags appear in the v1 /initalSync +v1s Tags appear in the v1 room initial sync +tag Tags appear in an initial v2 /sync +tag Newly updated tags appear in an incremental v2 /sync +tag Deleted tags appear in an incremental v2 /sync +tag local user has tags copied to the new room +f,tag remote user has tags copied to the new room +sch Can search for an event by body +sch Can get context around search results +sch Can back-paginate search results +sch Search works across an upgraded room and its predecessor +sch Search results with rank ordering do not include redacted events +sch Search results with recent ordering do not include redacted events +acc Can add account data +acc Can add account data to room +acc Can get account data without syncing +acc Can get room account data without syncing +v1s Latest account data comes down in /initialSync +v1s Latest account data comes down in room initialSync +v1s Account data appears in v1 /events stream +v1s Room account data appears in v1 /events stream +acc Latest account data appears in v2 /sync +acc New account data appears in incremental v2 /sync +oid Can generate a openid access_token that can be exchanged for information about a user +oid Invalid openid access tokens are rejected +oid Requests to userinfo without access tokens are rejected +std Can send a message directly to a device using PUT /sendToDevice +std Can recv a device message using /sync +std Can recv device messages until they are acknowledged +std Device messages with the same txn_id are deduplicated +std Device messages wake up /sync +std Can recv device messages over federation +std Device messages over federation wake up /sync +std Can send messages with a wildcard device id +std Can send messages with a wildcard device id to two devices +std Wildcard device messages wake up /sync +std Wildcard device messages over federation wake up /sync +adm /whois +nsp /purge_history +nsp /purge_history by ts +nsp Can backfill purged history +nsp Shutdown room +ign Ignore user in existing room +ign Ignore invite in full sync +ign Ignore invite in incremental sync +fky Checking local federation server +fky Federation key API allows unsigned requests for keys +fky Federation key API can act as a notary server via a GET request +fky Federation key API can act as a notary server via a POST request +fky Key notary server should return an expired key if it can't find any others +fky Key notary server must not overwrite a valid key with a spurious result from the origin server +fqu Non-numeric ports in server names are rejected +fqu Outbound federation can query profile data +fqu Inbound federation can query profile data +fqu Outbound federation can query room alias directory +fqu Inbound federation can query room alias directory +fsj Outbound federation can query v1 /send_join +fsj Outbound federation can query v2 /send_join +fmj Outbound federation passes make_join failures through to the client +fsj Inbound federation can receive v1 /send_join +fsj Inbound federation can receive v2 /send_join +fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms +fsj Inbound /v1/send_join rejects incorrectly-signed joins +fsj Inbound /v1/send_join rejects joins from other servers +fau Inbound federation rejects remote attempts to kick local users to rooms +frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support +frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support +frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 +frv Inbound federation accepts attempts to join v2 rooms from servers with support +frv Outbound federation correctly handles unsupported room versions +frv A pair of servers can establish a join in a v2 room +fsj Outbound federation rejects send_join responses with no m.room.create event +frv Outbound federation rejects m.room.create events with an unknown room version +fsj Event with an invalid signature in the send_join response should not cause room join to fail +fed Outbound federation can send events +fed Inbound federation can receive events +fed Inbound federation can receive redacted events +fed Ephemeral messages received from servers are correctly expired +fed Events whose auth_events are in the wrong room do not mess up the room state +fed Inbound federation can return events +fed Inbound federation redacts events from erased users +fme Outbound federation can request missing events +fme Inbound federation can return missing events for world_readable visibility +fme Inbound federation can return missing events for shared visibility +fme Inbound federation can return missing events for invite visibility +fme Inbound federation can return missing events for joined visibility +fme outliers whose auth_events are in a different room are correctly rejected +fbk Outbound federation can backfill events +fbk Inbound federation can backfill events +fbk Backfill checks the events requested belong to the room +fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination +fiv Outbound federation can send invites via v1 API +fiv Outbound federation can send invites via v2 API +fiv Inbound federation can receive invites via v1 API +fiv Inbound federation can receive invites via v2 API +fiv Inbound federation can receive invite and reject when remote replies with a 403 +fiv Inbound federation can receive invite and reject when remote replies with a 500 +fiv Inbound federation can receive invite and reject when remote is unreachable +fiv Inbound federation rejects invites which are not signed by the sender +fiv Inbound federation can receive invite rejections +fiv Inbound federation rejects incorrectly-signed invite rejections +fsl Inbound /v1/send_leave rejects leaves from other servers +fst Inbound federation can get state for a room +fst Inbound federation of state requires event_id as a mandatory paramater +fst Inbound federation can get state_ids for a room +fst Inbound federation of state_ids requires event_id as a mandatory paramater +fst Federation rejects inbound events where the prev_events cannot be found +fst Room state at a rejected message event is the same as its predecessor +fst Room state at a rejected state event is the same as its predecessor +fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state +fst Federation handles empty auth_events in state_ids sanely +fst Getting state checks the events requested belong to the room +fst Getting state IDs checks the events requested belong to the room +fst Should not be able to take over the room by pretending there is no PL event +fpb Inbound federation can get public room list +fed Outbound federation sends receipts +fed Inbound federation rejects receipts from wrong remote +fed Inbound federation ignores redactions from invalid servers room > v3 +fed An event which redacts an event in a different room should be ignored +fed An event which redacts itself should be ignored +fed A pair of events which redact each other should be ignored +fdk Local device key changes get to remote servers +fdk Server correctly handles incoming m.device_list_update +fdk Server correctly resyncs when client query keys and there is no remote cache +fdk Server correctly resyncs when server leaves and rejoins a room +fdk Local device key changes get to remote servers with correct prev_id +fdk Device list doesn't change if remote server is down +fdk If a device list update goes missing, the server resyncs on the next one +fst Name/topic keys are correct +fau Remote servers cannot set power levels in rooms without existing powerlevels +fau Remote servers should reject attempts by non-creators to set the power levels +fau Inbound federation rejects typing notifications from wrong remote +fed Forward extremities remain so even after the next events are populated as outliers +fau Banned servers cannot send events +fau Banned servers cannot /make_join +fau Banned servers cannot /send_join +fau Banned servers cannot /make_leave +fau Banned servers cannot /send_leave +fau Banned servers cannot /invite +fau Banned servers cannot get room state +fau Banned servers cannot get room state ids +fau Banned servers cannot backfill +fau Banned servers cannot /event_auth +fau Banned servers cannot get missing events +fau Server correctly handles transactions that break edu limits +fau Inbound federation correctly soft fails events +fau Inbound federation accepts a second soft-failed event +fau Inbound federation correctly handles soft failed events as extremities +med Can upload with Unicode file name +med Can download with Unicode file name locally +f,med Can download with Unicode file name over federation +med Alternative server names do not cause a routing loop +med Can download specifying a different Unicode file name +med Can upload without a file name +med Can download without a file name locally +f,med Can download without a file name over federation +med Can upload with ASCII file name +med Can download file 'ascii' +med Can download file 'name with spaces' +med Can download file 'name;with;semicolons' +med Can download specifying a different ASCII file name +med Can send image in room message +med Can fetch images in room +med POSTed media can be thumbnailed +f,med Remote media can be thumbnailed +med Test URL preview +med Can read configuration endpoint +nsp Can quarantine media in rooms +udr User appears in user directory +udr User in private room doesn't appear in user directory +udr User joining then leaving public room appears and dissappears from directory +udr Users appear/disappear from directory when join_rules are changed +udr Users appear/disappear from directory when history_visibility are changed +udr Users stay in directory when join_rules are changed but history_visibility is world_readable +f,udr User in remote room doesn't appear in user directory after server left room +udr User directory correctly update on display name change +udr User in shared private room does appear in user directory +udr User in shared private room does appear in user directory until leave +udr User in dir while user still shares private rooms +nsp Create group +nsp Add group rooms +nsp Remove group rooms +nsp Get local group profile +nsp Get local group users +nsp Add/remove local group rooms +nsp Get local group summary +nsp Get remote group profile +nsp Get remote group users +nsp Add/remove remote group rooms +nsp Get remote group summary +nsp Add local group users +nsp Remove self from local group +nsp Remove other from local group +nsp Add remote group users +nsp Remove self from remote group +nsp Listing invited users of a remote group when not a member returns a 403 +nsp Add group category +nsp Remove group category +nsp Get group categories +nsp Add group role +nsp Remove group role +nsp Get group roles +nsp Add room to group summary +nsp Adding room to group summary keeps room_id when fetching rooms in group +nsp Adding multiple rooms to group summary have correct order +nsp Remove room from group summary +nsp Add room to group summary with category +nsp Remove room from group summary with category +nsp Add user to group summary +nsp Adding multiple users to group summary have correct order +nsp Remove user from group summary +nsp Add user to group summary with role +nsp Remove user from group summary with role +nsp Local group invites come down sync +nsp Group creator sees group in sync +nsp Group creator sees group in initial sync +nsp Get/set local group publicity +nsp Bulk get group publicity +nsp Joinability comes down summary +nsp Set group joinable and join it +nsp Group is not joinable by default +nsp Group is joinable over federation +nsp Room is transitioned on local and remote groups upon room upgrade +3pd Can bind 3PID via home server +3pd Can bind and unbind 3PID via homeserver +3pd Can unbind 3PID via homeserver when bound out of band +3pd 3PIDs are unbound after account deactivation +3pd Can bind and unbind 3PID via /unbind by specifying the identity server +3pd Can bind and unbind 3PID via /unbind without specifying the identity server +app AS can create a user +app AS can create a user with an underscore +app AS can create a user with inhibit_login +app AS cannot create users outside its own namespace +app Regular users cannot register within the AS namespace +app AS can make room aliases +app Regular users cannot create room aliases within the AS namespace +app AS-ghosted users can use rooms via AS +app AS-ghosted users can use rooms themselves +app Ghost user must register before joining room +app AS can set avatar for ghosted users +app AS can set displayname for ghosted users +app AS can't set displayname for random users +app Inviting an AS-hosted user asks the AS server +app Accesing an AS-hosted room alias asks the AS server +app Events in rooms with AS-hosted room aliases are sent to AS server +app AS user (not ghost) can join room without registering +app AS user (not ghost) can join room without registering, with user_id query param +app HS provides query metadata +app HS can provide query metadata on a single protocol +app HS will proxy request for 3PU mapping +app HS will proxy request for 3PL mapping +app AS can publish rooms in their own list +app AS and main public room lists are separate +app AS can deactivate a user +psh Test that a message is pushed +psh Invites are pushed +psh Rooms with names are correctly named in pushed +psh Rooms with canonical alias are correctly named in pushed +psh Rooms with many users are correctly pushed +psh Don't get pushed for rooms you've muted +psh Rejected events are not pushed +psh Can add global push rule for room +psh Can add global push rule for sender +psh Can add global push rule for content +psh Can add global push rule for override +psh Can add global push rule for underride +psh Can add global push rule for content +psh New rules appear before old rules by default +psh Can add global push rule before an existing rule +psh Can add global push rule after an existing rule +psh Can delete a push rule +psh Can disable a push rule +psh Adding the same push rule twice is idempotent +psh Messages that notify from another user increment unread notification count +psh Messages that highlight from another user increment unread highlight count +psh Can change the actions of default rules +psh Changing the actions of an unknown default rule fails with 404 +psh Can change the actions of a user specified rule +psh Changing the actions of an unknown rule fails with 404 +psh Can fetch a user's pushers +psh Push rules come down in an initial /sync +psh Adding a push rule wakes up an incremental /sync +psh Disabling a push rule wakes up an incremental /sync +psh Enabling a push rule wakes up an incremental /sync +psh Setting actions for a push rule wakes up an incremental /sync +psh Can enable/disable default rules +psh Enabling an unknown default rule fails with 404 +psh Test that rejected pushers are removed. +psh Notifications can be viewed with GET /notifications +psh Trying to add push rule with no scope fails with 400 +psh Trying to add push rule with invalid scope fails with 400 +psh Trying to add push rule with missing template fails with 400 +psh Trying to add push rule with missing rule_id fails with 400 +psh Trying to add push rule with empty rule_id fails with 400 +psh Trying to add push rule with invalid template fails with 400 +psh Trying to add push rule with rule_id with slashes fails with 400 +psh Trying to add push rule with override rule without conditions fails with 400 +psh Trying to add push rule with underride rule without conditions fails with 400 +psh Trying to add push rule with condition without kind fails with 400 +psh Trying to add push rule with content rule without pattern fails with 400 +psh Trying to add push rule with no actions fails with 400 +psh Trying to add push rule with invalid action fails with 400 +psh Trying to add push rule with invalid attr fails with 400 +psh Trying to add push rule with invalid value for enabled fails with 400 +psh Trying to get push rules with no trailing slash fails with 400 +psh Trying to get push rules with scope without trailing slash fails with 400 +psh Trying to get push rules with template without tailing slash fails with 400 +psh Trying to get push rules with unknown scope fails with 400 +psh Trying to get push rules with unknown template fails with 400 +psh Trying to get push rules with unknown attribute fails with 400 +psh Trying to get push rules with unknown rule_id fails with 404 +v1s GET /initialSync with non-numeric 'limit' +v1s GET /events with non-numeric 'limit' +v1s GET /events with negative 'limit' +v1s GET /events with non-numeric 'timeout' +ath Event size limits +syn Check creating invalid filters returns 4xx +f,pre New federated private chats get full presence information (SYN-115) +pre Left room members do not cause problems for presence +crm Rooms can be created with an initial invite list (SYN-205) +typ Typing notifications don't leak +ban Non-present room members cannot ban others +psh Getting push rules doesn't corrupt the cache SYN-390 +inv Test that we can be reinvited to a room we created +syn Multiple calls to /sync should not cause 500 errors +gst Guest user can call /events on another world_readable room (SYN-606) +gst Real user can call /events on another world_readable room (SYN-606) +gst Events come down the correct room +pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list +std Can send a to-device message to two users which both receive it using /sync diff --git a/sytest/are-we-synapse-yet.py b/sytest/are-we-synapse-yet.py new file mode 100755 index 0000000..0b334ba --- /dev/null +++ b/sytest/are-we-synapse-yet.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python3 + +from __future__ import division +import argparse +import re +import sys + +# Usage: $ ./are-we-synapse-yet.py [-v] results.tap +# This script scans a results.tap file from Dendrite's CI process and spits out +# a rating of how close we are to Synapse parity, based purely on SyTests. +# The main complexity is grouping tests sensibly into features like 'Registration' +# and 'Federation'. Then it just checks the ones which are passing and calculates +# percentages for each group. Produces results like: +# +# Client-Server APIs: 29% (196/666 tests) +# ------------------- +# Registration : 62% (20/32 tests) +# Login : 7% (1/15 tests) +# V1 CS APIs : 10% (3/30 tests) +# ... +# +# or in verbose mode: +# +# Client-Server APIs: 29% (196/666 tests) +# ------------------- +# Registration : 62% (20/32 tests) +# ✓ GET /register yields a set of flows +# ✓ POST /register can create a user +# ✓ POST /register downcases capitals in usernames +# ... +# +# You can also tack `-v` on to see exactly which tests each category falls under. + +test_mappings = { + "nsp": "Non-Spec API", + "f": "Federation", # flag to mark test involves federation + + "federation_apis": { + "fky": "Key API", + "fsj": "send_join API", + "fmj": "make_join API", + "fsl": "send_leave API", + "fiv": "Invite API", + "fqu": "Query API", + "frv": "room versions", + "fau": "Auth", + "fbk": "Backfill API", + "fme": "get_missing_events API", + "fst": "State APIs", + "fpb": "Public Room API", + "fdk": "Device Key APIs", + "fed": "Federation API", + }, + + "client_apis": { + "reg": "Registration", + "log": "Login", + "lox": "Logout", + "v1s": "V1 CS APIs", + "csa": "Misc CS APIs", + "pro": "Profile", + "dev": "Devices", + "dvk": "Device Keys", + "pre": "Presence", + "crm": "Create Room", + "syn": "Sync API", + "rmv": "Room Versions", + "rst": "Room State APIs", + "pub": "Public Room APIs", + "mem": "Room Membership", + "ali": "Room Aliases", + "jon": "Joining Rooms", + "lev": "Leaving Rooms", + "inv": "Inviting users to Rooms", + "ban": "Banning users", + "snd": "Sending events", + "get": "Getting events for Rooms", + "rct": "Receipts", + "red": "Read markers", + "med": "Media APIs", + "cap": "Capabilities API", + "typ": "Typing API", + "psh": "Push APIs", + "acc": "Account APIs", + "eph": "Ephemeral Events", + "plv": "Power Levels", + "xxx": "Redaction", + "3pd": "Third-Party ID APIs", + "gst": "Guest APIs", + "ath": "Room Auth", + "fgt": "Forget APIs", + "ctx": "Context APIs", + "upg": "Room Upgrade APIs", + "tag": "Tagging APIs", + "sch": "Search APIs", + "oid": "OpenID API", + "std": "Send-to-Device APIs", + "adm": "Server Admin API", + "ign": "Ignore Users", + "udr": "User Directory APIs", + "app": "Application Services API", + }, +} + +# optional 'not ' with test number then anything but '#' +re_testname = re.compile(r"^(not )?ok [0-9]+ ([^#]+)") + +# Parses lines like the following: +# +# SUCCESS: ok 3 POST /register downcases capitals in usernames +# FAIL: not ok 54 (expected fail) POST /createRoom creates a room with the given version +# SKIP: ok 821 Multiple calls to /sync should not cause 500 errors # skip lack of can_post_room_receipts +# EXPECT FAIL: not ok 822 (expected fail) Guest user can call /events on another world_readable room (SYN-606) # TODO expected fail +# +# Only SUCCESS lines are treated as success, the rest are not implemented. +# +# Returns a dict like: +# { name: "...", ok: True } +def parse_test_line(line): + if not line.startswith("ok ") and not line.startswith("not ok "): + return + re_match = re_testname.match(line) + test_name = re_match.groups()[1].replace("(expected fail) ", "").strip() + test_pass = False + if line.startswith("ok ") and not "# skip " in line: + test_pass = True + return { + "name": test_name, + "ok": test_pass, + } + +# Prints the stats for a complete section. +# header_name => "Client-Server APIs" +# gid_to_tests => { gid: { : True|False }} +# gid_to_name => { gid: "Group Name" } +# verbose => True|False +# Produces: +# Client-Server APIs: 29% (196/666 tests) +# ------------------- +# Registration : 62% (20/32 tests) +# Login : 7% (1/15 tests) +# V1 CS APIs : 10% (3/30 tests) +# ... +# or in verbose mode: +# Client-Server APIs: 29% (196/666 tests) +# ------------------- +# Registration : 62% (20/32 tests) +# ✓ GET /register yields a set of flows +# ✓ POST /register can create a user +# ✓ POST /register downcases capitals in usernames +# ... +def print_stats(header_name, gid_to_tests, gid_to_name, verbose): + subsections = [] # Registration: 100% (13/13 tests) + subsection_test_names = {} # 'subsection name': ["✓ Test 1", "✓ Test 2", "× Test 3"] + total_passing = 0 + total_tests = 0 + for gid, tests in gid_to_tests.items(): + group_total = len(tests) + group_passing = 0 + test_names_and_marks = [] + for name, passing in tests.items(): + if passing: + group_passing += 1 + test_names_and_marks.append(f"{'✓' if passing else '×'} {name}") + + total_tests += group_total + total_passing += group_passing + pct = "{0:.0f}%".format(group_passing/group_total * 100) + line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) + subsections.append(line) + subsection_test_names[line] = test_names_and_marks + + pct = "{0:.0f}%".format(total_passing/total_tests * 100) + print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) + print("-" * (len(header_name)+1)) + for line in subsections: + print(" %s" % (line,)) + if verbose: + for test_name_and_pass_mark in subsection_test_names[line]: + print(" %s" % (test_name_and_pass_mark,)) + print("") + print("") + +def main(results_tap_path, verbose): + # Load up test mappings + test_name_to_group_id = {} + fed_tests = set() + client_tests = set() + groupless_tests = set() + with open("./are-we-synapse-yet.list", "r") as f: + for line in f.readlines(): + test_name = " ".join(line.split(" ")[1:]).strip() + groups = line.split(" ")[0].split(",") + for gid in groups: + if gid == "f" or gid in test_mappings["federation_apis"]: + fed_tests.add(test_name) + else: + client_tests.add(test_name) + if gid == "f": + continue # we expect another group ID + test_name_to_group_id[test_name] = gid + + # parse results.tap + summary = { + "client": { + # gid: { + # test_name: OK + # } + }, + "federation": { + # gid: { + # test_name: OK + # } + }, + "nonspec": { + "nsp": {} + }, + } + with open(results_tap_path, "r") as f: + for line in f.readlines(): + test_result = parse_test_line(line) + if not test_result: + continue + name = test_result["name"] + group_id = test_name_to_group_id.get(name) + if not group_id: + groupless_tests.add(name) + # raise Exception("The test '%s' doesn't have a group" % (name,)) + if group_id == "nsp": + summary["nonspec"]["nsp"][name] = test_result["ok"] + elif group_id in test_mappings["federation_apis"]: + group = summary["federation"].get(group_id, {}) + group[name] = test_result["ok"] + summary["federation"][group_id] = group + elif group_id in test_mappings["client_apis"]: + group = summary["client"].get(group_id, {}) + group[name] = test_result["ok"] + summary["client"][group_id] = group + + print("Are We Synapse Yet?") + print("===================") + print("") + print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose) + print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose) + print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose) + if verbose: + print("The following tests don't have a group:") + for name in groupless_tests: + print(" %s" % (name,)) + else: + print("%d tests don't have a group" % len(groupless_tests)) + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("tap_file", help="path to results.tap") + parser.add_argument("-v", action="store_true", help="show individual test names in output") + args = parser.parse_args() + main(args.tap_file, args.v) diff --git a/sytest/show-expected-fail-tests.sh b/sytest/show-expected-fail-tests.sh new file mode 100755 index 0000000..320d4eb --- /dev/null +++ b/sytest/show-expected-fail-tests.sh @@ -0,0 +1,105 @@ +#! /bin/bash +# +# Parses a results.tap file from SyTest output and a file containing test names (a test whitelist) +# and checks whether a test name that exists in the whitelist (that should pass), failed or not. +# +# An optional blacklist file can be added, also containing test names, where if a test name is +# present, the script will not error even if the test is in the whitelist file and failed +# +# For each of these files, lines starting with '#' are ignored. +# +# Usage ./show-expected-fail-tests.sh results.tap whitelist [blacklist] + +results_file=$1 +whitelist_file=$2 +blacklist_file=$3 + +fail_build=0 + +if [ $# -lt 2 ]; then + echo "Usage: $0 results.tap whitelist [blacklist]" + exit 1 +fi + +if [ ! -f "$results_file" ]; then + echo "ERROR: Specified results file '${results_file}' doesn't exist." + fail_build=1 +fi + +if [ ! -f "$whitelist_file" ]; then + echo "ERROR: Specified test whitelist '${whitelist_file}' doesn't exist." + fail_build=1 +fi + +blacklisted_tests=() + +# Check if a blacklist file was provided +if [ $# -eq 3 ]; then + # Read test blacklist file + if [ ! -f "$blacklist_file" ]; then + echo "ERROR: Specified test blacklist file '${blacklist_file}' doesn't exist." + fail_build=1 + fi + + # Read each line, ignoring those that start with '#' + blacklisted_tests="" + search_non_comments=$(grep -v '^#' ${blacklist_file}) + while read -r line ; do + # Record the blacklisted test name + blacklisted_tests+=("${line}") + done <<< "${search_non_comments}" # This allows us to edit blacklisted_tests in the while loop +fi + +[ "$fail_build" = 0 ] || exit 1 + +passed_but_expected_fail=$(grep ' # TODO passed but expected fail' ${results_file} | sed -E 's/^ok [0-9]+ (\(expected fail\) )?//' | sed -E 's/( \([0-9]+ subtests\))? # TODO passed but expected fail$//') +tests_to_add="" +already_in_whitelist="" + +while read -r test_name; do + # Ignore empty lines + [ "${test_name}" = "" ] && continue + + grep "^${test_name}" "${whitelist_file}" > /dev/null 2>&1 + if [ "$?" != "0" ]; then + # Check if this test name is blacklisted + if printf '%s\n' "${blacklisted_tests[@]}" | grep -q -P "^${test_name}$"; then + # Don't notify about this test + continue + fi + + # Append this test_name to the existing list + tests_to_add="${tests_to_add}${test_name}\n" + fail_build=1 + else + already_in_whitelist="${already_in_whitelist}${test_name}\n" + fi +done <<< "${passed_but_expected_fail}" + +# TODO: Check that the same test doesn't exist in both the whitelist and blacklist +# TODO: Check that the same test doesn't appear twice in the whitelist|blacklist + +# Trim test output strings +tests_to_add=$(IFS=$'\n' echo "${tests_to_add[*]%%'\n'}") +already_in_whitelist=$(IFS=$'\n' echo "${already_in_whitelist[*]%%'\n'}") + +# Format output with markdown for buildkite annotation rendering purposes +if [ -n "${tests_to_add}" ] && [ -n "${already_in_whitelist}" ]; then + echo "### 📜 SyTest Whitelist Maintenance" +fi + +if [ -n "${tests_to_add}" ]; then + echo "**ERROR**: The following tests passed but are not present in \`$2\`. Please append them to the file:" + echo "\`\`\`" + echo -e "${tests_to_add}" + echo "\`\`\`" +fi + +if [ -n "${already_in_whitelist}" ]; then + echo "**WARN**: Tests in the whitelist still marked as **expected fail**:" + echo "\`\`\`" + echo -e "${already_in_whitelist}" + echo "\`\`\`" +fi + +exit ${fail_build} diff --git a/sytest/sytest-blacklist b/sytest/sytest-blacklist new file mode 100644 index 0000000..e69de29 diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist new file mode 100644 index 0000000..2c118c2 --- /dev/null +++ b/sytest/sytest-whitelist @@ -0,0 +1,85 @@ +# Register endpoints implemented +GET /register yields a set of flows +POST /register can create a user +POST /register downcases capitals in usernames +POST /register rejects registration of usernames with '!' +POST /register rejects registration of usernames with '"' +POST /register rejects registration of usernames with ':' +POST /register rejects registration of usernames with '?' +POST /register rejects registration of usernames with '\' +POST /register rejects registration of usernames with '@' +POST /register rejects registration of usernames with '[' +POST /register rejects registration of usernames with ']' +POST /register rejects registration of usernames with '{' +POST /register rejects registration of usernames with '|' +POST /register rejects registration of usernames with '}' +POST /register rejects registration of usernames with '£' +POST /register rejects registration of usernames with 'é' +POST /register rejects registration of usernames with '\n' +POST /register rejects registration of usernames with ''' +# Login endpoints implemented +GET /login yields a set of flows +POST /login can log in as a user +POST /login returns the same device_id as that in the request +POST /login can log in as a user with just the local part of the id +POST /login as non-existing user is rejected +POST /login wrong password is rejected +# Room creation endpoints implemented +POST /createRoom makes a public room +POST /createRoom makes a private room +POST /createRoom makes a private room with invites +POST /createRoom makes a room with a name +POST /createRoom makes a room with a topic +Can /sync newly created room +GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership +GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels +POST /join/:room_alias can join a room +POST /join/:room_id can join a room +POST /join/:room_id can join a room with custom content +POST /join/:room_alias can join a room with custom content +POST /rooms/:room_id/join can join a room +POST /rooms/:room_id/leave can leave a room +POST /rooms/:room_id/invite can send an invite +POST /rooms/:room_id/ban can ban a user +POST /rooms/:room_id/send/:event_type sends a message +PUT /rooms/:room_id/send/:event_type/:txn_id sends a message +PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id +GET /rooms/:room_id/state/m.room.power_levels can fetch levels +PUT /rooms/:room_id/state/m.room.power_levels can set levels +PUT power_levels should not explode if the old power levels were empty +Both GET and PUT work +POST /rooms/:room_id/read_markers can create read marker +User signups are forbidden from starting with '_' +Request to logout with invalid an access token is rejected +Request to logout without an access token is rejected +Room creation reports m.room.create to myself +Room creation reports m.room.member to myself +Version responds 200 OK with valid structure +PUT /profile/:user_id/displayname sets my name +GET /profile/:user_id/displayname publicly accessible +GET /device/{deviceId} gives a 404 for unknown devices +PUT /device/{deviceId} gives a 404 for unknown devices +After deactivating account, can't log in with an email +Can create filter +Should reject keys claiming to belong to a different user +Can add account data +Checking local federation server +Alternative server names do not cause a routing loop +Can read configuration endpoint +AS cannot create users outside its own namespace +Changing the actions of an unknown default rule fails with 404 +Changing the actions of an unknown rule fails with 404 +Trying to add push rule with invalid scope fails with 400 +Trying to add push rule with invalid template fails with 400 +Trying to add push rule with rule_id with slashes fails with 400 +Trying to add push rule with override rule without conditions fails with 400 +Trying to add push rule with underride rule without conditions fails with 400 +Trying to add push rule with condition without kind fails with 400 +Trying to add push rule with content rule without pattern fails with 400 +Trying to add push rule with no actions fails with 400 +Trying to add push rule with invalid action fails with 400 +Trying to get push rules with unknown rule_id fails with 404 +GET /events with non-numeric 'limit' +GET /events with negative 'limit' +GET /events with non-numeric 'timeout' +Getting push rules doesn't corrupt the cache SYN-390 \ No newline at end of file From d08f91d1c393256e696a78aff6f272b8f6f51f81 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 13 May 2020 10:41:51 +0200 Subject: [PATCH 0079/1727] fix: only allow valid usernames in /register --- .gitignore | 2 ++ Rocket.toml | 10 +++++----- src/client_server.rs | 25 ++++++++++++++----------- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/.gitignore b/.gitignore index 53eaa21..ee48b11 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ /target **/*.rs.bk + +Rocket.toml diff --git a/Rocket.toml b/Rocket.toml index 4a7d79a..fb9f6d6 100644 --- a/Rocket.toml +++ b/Rocket.toml @@ -1,8 +1,8 @@ [global] -hostname = "matrixtesting.koesters.xyz:59003" -port = 59003 +server_name = "your.server.name" +port = 8448 address = "0.0.0.0" -[global.tls] -certs = "/etc/letsencrypt/live/matrixtesting.koesters.xyz/fullchain.pem" -key = "/etc/letsencrypt/live/matrixtesting.koesters.xyz/privkey.pem" +#[global.tls] +#certs = "/etc/letsencrypt/live/your.server.name/fullchain.pem" +#key = "/etc/letsencrypt/live/your.server.name/privkey.pem" diff --git a/src/client_server.rs b/src/client_server.rs index d32134d..e2d4040 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -65,9 +65,12 @@ pub fn get_register_available_route( body: Ruma, ) -> MatrixResult { // Validate user id - let user_id: UserId = - match (*format!("@{}:{}", body.username.clone(), db.globals.server_name())).try_into() { - Err(_) => { + let user_id = + match UserId::parse_with_server_name(body.username.clone(), db.globals.server_name()) + .ok() + .filter(|user_id| !user_id.is_historical()) + { + None => { debug!("Username invalid"); return MatrixResult(Err(Error { kind: ErrorKind::InvalidUsername, @@ -75,7 +78,7 @@ pub fn get_register_available_route( status_code: http::StatusCode::BAD_REQUEST, })); } - Ok(user_id) => user_id, + Some(user_id) => user_id, }; // Check if username is creative enough @@ -112,16 +115,16 @@ pub fn register_route( } // Validate user id - let user_id: UserId = match (*format!( - "@{}:{}", + let user_id = match UserId::parse_with_server_name( body.username .clone() .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)), - db.globals.server_name() - )) - .try_into() + db.globals.server_name(), + ) + .ok() + .filter(|user_id| !user_id.is_historical()) { - Err(_) => { + None => { debug!("Username invalid"); return MatrixResult(Err(UiaaResponse::MatrixError(Error { kind: ErrorKind::InvalidUsername, @@ -129,7 +132,7 @@ pub fn register_route( status_code: http::StatusCode::BAD_REQUEST, }))); } - Ok(user_id) => user_id, + Some(user_id) => user_id, }; // Check if username is creative enough From 4fb79ebb4cdfab4f3265fd6481abfadcd7a10002 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 14 May 2020 09:10:15 +0200 Subject: [PATCH 0080/1727] chore: update dependencies --- Cargo.lock | 201 ++++++++++++++++------------- Cargo.toml | 14 +- Rocket.toml => Rocket-example.toml | 0 src/client_server.rs | 4 +- src/main.rs | 4 +- 5 files changed, 122 insertions(+), 101 deletions(-) rename Rocket.toml => Rocket-example.toml (100%) diff --git a/Cargo.lock b/Cargo.lock index f059aa4..1734df4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20,13 +20,13 @@ checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "async-trait" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da71fef07bc806586090247e971229289f64c210a278ee5ae419314eb386b31d" +checksum = "26c4f3195085c36ea8d24d32b2f828d23296a9370a28aa39d111f6f16bef9f3b" dependencies = [ "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] @@ -92,9 +92,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" +checksum = "5356f1d23ee24a1f785a56d1d1a5f0fd5b0f6a0c0fb2412ce11da71649ab78f6" [[package]] name = "byteorder" @@ -239,7 +239,7 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "devise_core", - "quote 1.0.4", + "quote 1.0.5", ] [[package]] @@ -249,8 +249,8 @@ source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9 dependencies = [ "bitflags", "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] @@ -283,9 +283,9 @@ checksum = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" [[package]] name = "encoding_rs" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" +checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" dependencies = [ "cfg-if", ] @@ -339,9 +339,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780" +checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" dependencies = [ "futures-channel", "futures-core", @@ -354,9 +354,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" +checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" dependencies = [ "futures-core", "futures-sink", @@ -364,15 +364,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" +checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" [[package]] name = "futures-executor" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba" +checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" dependencies = [ "futures-core", "futures-task", @@ -381,39 +381,42 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" +checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" [[package]] name = "futures-macro" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" +checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] name = "futures-sink" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" +checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" [[package]] name = "futures-task" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" +checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +dependencies = [ + "once_cell", +] [[package]] name = "futures-util" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" +checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" dependencies = [ "futures-channel", "futures-core", @@ -422,6 +425,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", + "pin-project", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -620,9 +624,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005" +checksum = "3baa92041a6fec78c687fa0cc2b3fae8884f743d672cf551bed1d6dac6988d0f" [[package]] name = "lock_api" @@ -768,9 +772,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" +checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" [[package]] name = "openssl" @@ -865,22 +869,22 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.13" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82c3bfbfb5bb42f99498c7234bbd768c220eb0cea6818259d0d18a1aa3d2595d" +checksum = "81d480cb4e89522ccda96d0eed9af94180b7a5f93fb28f66e1fd7d68431663d1" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.13" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccbf6449dcfb18562c015526b085b8df1aa3cdab180af8ec2ebd300a3bd28f63" +checksum = "a82996f11efccb19b685b14b5df818de31c1edcee3daa256ab5775dd98e72feb" dependencies = [ "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] @@ -948,9 +952,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c1f4b0efa5fc5e8ceb705136bfee52cfdb6a4e3509f770b478cd6ed434232a7" +checksum = "42934bc9c8ab0d3b273a16d8551c8f0fcff46be73276ca083ec2414c15c4ba5e" dependencies = [ "proc-macro2 1.0.12", ] @@ -1104,7 +1108,7 @@ source = "git+https://github.com/SergioBenitez/Rocket.git?branch=async#78c8ac8cc dependencies = [ "devise", "indexmap", - "quote 1.0.4", + "quote 1.0.5", "rocket_http", "version_check 0.9.1", "yansi 0.5.0", @@ -1133,15 +1137,15 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4d1f23ec408993a39acb852a311a5469422b0c087a6497efeb0ad9c04ad72db" +checksum = "82ffdb7fb9cf6af2b1d0d8254d922560ecb70081d7e70931c9b996b6b4839db5" dependencies = [ "http", "percent-encoding 2.1.0", "ruma-api-macros", "ruma-identifiers", - "ruma-serde", + "ruma-serde 0.2.0", "serde", "serde_json", "strum", @@ -1149,13 +1153,13 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae19a3485b607be10c07f0e6a1c672e8cc2693a50a29f25195f7034dbe7859c" +checksum = "52b82b4567b9af9b40a86f7778821c016ea961f55e4fee255f8f24bb28ee7452" dependencies = [ "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] @@ -1170,7 +1174,7 @@ dependencies = [ "ruma-common", "ruma-events", "ruma-identifiers", - "ruma-serde", + "ruma-serde 0.1.3", "serde", "serde_json", "strum", @@ -1182,35 +1186,35 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "235f2fed35114ef3fbff1b7d5350f22ac712cffff55cc7b7732d39ae4adf6966" dependencies = [ - "ruma-serde", + "ruma-serde 0.1.3", "serde", "serde_json", ] [[package]] name = "ruma-events" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca4eff279e18bdb3daba46381eb08ac5b2d4ab471271b8c2f597073dcbadc415" +checksum = "e952b80bddd83666a84db842d3e4d2ea43f8b4df8134864835c1807bd843fe21" dependencies = [ "js_int", "ruma-common", "ruma-events-macros", "ruma-identifiers", - "ruma-serde", + "ruma-serde 0.1.3", "serde", "serde_json", ] [[package]] name = "ruma-events-macros" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e32130570495f21f922ffa0127438228d475e5f8dfcc201d77b8e717ce1c0f0" +checksum = "60256afd4cb1bcdbcd9676e88cbbd09afc2541bd7b0858a559ce9ad7c6cfbb2a" dependencies = [ "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] @@ -1222,7 +1226,7 @@ dependencies = [ "ruma-api", "ruma-events", "ruma-identifiers", - "ruma-serde", + "ruma-serde 0.1.3", "serde", "serde_json", ] @@ -1233,6 +1237,7 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c93b9d5f951a2fb57b19c048a05ac1dbdb280ff7617ec6b02f54bf14318ed8" dependencies = [ + "rand", "serde", ] @@ -1250,6 +1255,20 @@ dependencies = [ "url", ] +[[package]] +name = "ruma-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb00252245980b8f8d9833632e611aba8e1657c9fbefefe2b35c2817fc2b58a4" +dependencies = [ + "dtoa", + "itoa", + "js_int", + "serde", + "serde_json", + "url", +] + [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" @@ -1306,9 +1325,9 @@ checksum = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1" [[package]] name = "schannel" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", "winapi 0.3.8", @@ -1355,29 +1374,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.106" +version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399" +checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.106" +version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" +checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" dependencies = [ "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] name = "serde_json" -version = "1.0.52" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7894c8ed05b7a3a279aeb79025fdec1d3158080b75b98a08faf2806bb799edd" +checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2" dependencies = [ "itoa", "ryu", @@ -1463,8 +1482,8 @@ checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] @@ -1480,12 +1499,12 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.19" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e5aa70697bb26ee62214ae3288465ecec0000f05182f039b477001f08f5ae7" +checksum = "4696caa4048ac7ce2bcd2e484b3cef88c1004e41b8e945a277e2c25dc0b72060" dependencies = [ "proc-macro2 1.0.12", - "quote 1.0.4", + "quote 1.0.5", "unicode-xid 0.2.0", ] @@ -1505,22 +1524,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d12a1dae4add0f0d568eebc7bf142f145ba1aa2544cafb195c76f0f409091b60" +checksum = "467e5ff447618a916519a4e0d62772ab14f434897f3d63f05d8700ef1e9b22c1" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f34e0c1caaa462fd840ec6b768946ea1e7842620d94fe29d5b847138f521269" +checksum = "e63c1091225b9834089b429bc4a2e01223470e3183e891582909e9d1c4cb55d9" dependencies = [ "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] @@ -1535,9 +1554,9 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05c1d570eb1a36f0345a5ce9c6c6e665b70b73d11236912c0b477616aeec47b1" +checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" dependencies = [ "bytes", "fnv", @@ -1563,8 +1582,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", ] [[package]] @@ -1742,8 +1761,8 @@ dependencies = [ "lazy_static", "log", "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", "wasm-bindgen-shared", ] @@ -1765,7 +1784,7 @@ version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" dependencies = [ - "quote 1.0.4", + "quote 1.0.5", "wasm-bindgen-macro-support", ] @@ -1776,8 +1795,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "quote 1.0.5", + "syn 1.0.21", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index e0a6f10..4d1b4b1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,21 +15,21 @@ edition = "2018" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" ruma-client-api = "0.8.0" -ruma-identifiers = "0.16.1" -ruma-api = "0.16.0" -ruma-events = "0.21.0" +ruma-identifiers = { version = "0.16.1", features = ["rand"] } +ruma-api = "0.16.1" +ruma-events = "0.21.1" ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git", rev = "1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" } ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git", rev = "ccbf216f39bbbaa59131cc200eae5bd18aa1947c" } log = "0.4.8" sled = "0.31.0" directories = "2.0.2" js_int = "0.1.5" -serde_json = "1.0.52" -serde = "1.0.106" -tokio = { version = "0.2.20", features = ["macros"] } +serde_json = "1.0.53" +serde = "1.0.110" +tokio = { version = "0.2.21", features = ["macros"] } rand = "0.7.3" rust-argon2 = "0.8.2" reqwest = "0.10.4" base64 = "0.12.1" -thiserror = "1.0.16" +thiserror = "1.0.17" ruma-common = "0.1.1" diff --git a/Rocket.toml b/Rocket-example.toml similarity index 100% rename from Rocket.toml rename to Rocket-example.toml diff --git a/src/client_server.rs b/src/client_server.rs index e2d4040..b66993a 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -772,7 +772,7 @@ pub fn create_room_route( body: Ruma, ) -> MatrixResult { // TODO: check if room is unique - let room_id = RoomId::try_from(db.globals.server_name()).expect("host is valid"); + let room_id = RoomId::new(db.globals.server_name()).expect("host is valid"); let user_id = body.user_id.as_ref().expect("user is authenticated"); db.rooms @@ -1039,6 +1039,7 @@ pub async fn get_public_rooms_filtered_route( chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); + /* chunk.extend_from_slice( &server_server::send_request( &db, @@ -1056,6 +1057,7 @@ pub async fn get_public_rooms_filtered_route( .map(|c| serde_json::from_str(&serde_json::to_string(&c).unwrap()).unwrap()) .collect::>(), ); + */ let total_room_count_estimate = (chunk.len() as u32).into(); diff --git a/src/main.rs b/src/main.rs index bef55ac..514a8dd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -83,8 +83,8 @@ fn setup_rocket() -> rocket::Rocket { fn main() { // Log info by default - if std::env::var("RUST_LOG").is_err() { - std::env::set_var("RUST_LOG", "warn"); + if std::env::var("ROCKET_LOG").is_err() { + std::env::set_var("ROCKET_LOG", "critical"); } setup_rocket().launch().unwrap(); From 7fc71b3968948bc609a28a7a130ee47c81f23083 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 17 May 2020 19:56:40 +0200 Subject: [PATCH 0081/1727] feat: end to end encryption --- src/client_server.rs | 246 +++++++++++++++++++++++++++++------ src/database.rs | 3 + src/database/account_data.rs | 43 +++--- src/database/global_edus.rs | 2 +- src/database/users.rs | 222 ++++++++++++++++++++++++++++++- src/main.rs | 3 +- src/server_server.rs | 7 +- 7 files changed, 461 insertions(+), 65 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index b66993a..8627dcb 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -4,7 +4,7 @@ use std::{ time::{Duration, SystemTime}, }; -use log::debug; +use log::{debug, warn}; use rocket::{get, options, post, put, State}; use ruma_client_api::{ error::{Error, ErrorKind}, @@ -15,7 +15,7 @@ use ruma_client_api::{ config::{get_global_account_data, set_global_account_data}, directory::{self, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, - keys::{get_keys, upload_keys}, + keys::{claim_keys, get_keys, upload_keys}, media::get_media_config, membership::{ forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, @@ -33,7 +33,7 @@ use ruma_client_api::{ state::{create_state_event_for_empty_key, create_state_event_for_key}, sync::sync_events, thirdparty::get_protocols, - to_device::send_event_to_device, + to_device::{self, send_event_to_device}, typing::create_typing_event, uiaa::{AuthFlow, UiaaInfo, UiaaResponse}, user_directory::search_users, @@ -176,7 +176,8 @@ pub fn register_route( .update( None, &user_id, - EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { + &EventType::PushRules, + serde_json::to_value(ruma_events::push_rules::PushRulesEvent { content: ruma_events::push_rules::PushRulesEventContent { global: ruma_events::push_rules::Ruleset { content: vec![], @@ -202,7 +203,8 @@ pub fn register_route( }], }, }, - }), + }) + .unwrap(), &db.globals, ) .unwrap(); @@ -353,7 +355,8 @@ pub fn set_pushrule_route( .update( None, &user_id, - EduEvent::PushRules(ruma_events::push_rules::PushRulesEvent { + &EventType::PushRules, + serde_json::to_value(ruma_events::push_rules::PushRulesEvent { content: ruma_events::push_rules::PushRulesEventContent { global: ruma_events::push_rules::Ruleset { content: vec![], @@ -379,7 +382,8 @@ pub fn set_pushrule_route( }], }, }, - }), + }) + .unwrap(), &db.globals, ) .unwrap(); @@ -422,25 +426,56 @@ pub fn create_filter_route(_user_id: String) -> MatrixResult/account_data/<_type>")] +#[put( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" +)] pub fn set_global_account_data_route( + db: State<'_, Database>, + body: Ruma, _user_id: String, _type: String, ) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + db.account_data + .update( + None, + user_id, + &EventType::try_from(&body.event_type).unwrap(), + serde_json::from_str(body.data.get()).unwrap(), + &db.globals, + ) + .unwrap(); + MatrixResult(Ok(set_global_account_data::Response)) } -#[get("/_matrix/client/r0/user/<_user_id>/account_data/<_type>")] +#[get( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" +)] pub fn get_global_account_data_route( + db: State<'_, Database>, + body: Ruma, _user_id: String, _type: String, ) -> MatrixResult { - // TODO - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Data not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + if let Some(data) = db + .account_data + .get(None, user_id, &EventType::try_from(&body.event_type).unwrap()) + .unwrap() + { + MatrixResult(Ok(get_global_account_data::Response { account_data: data })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Data not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } } #[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] @@ -648,20 +683,93 @@ pub fn set_presence_route( MatrixResult(Ok(set_presence::Response)) } -#[post("/_matrix/client/r0/keys/query")] -pub fn get_keys_route() -> MatrixResult { - // TODO - MatrixResult(Ok(get_keys::Response { - failures: BTreeMap::new(), - device_keys: BTreeMap::new(), +#[post("/_matrix/client/r0/keys/upload", data = "")] +pub fn upload_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + if let Some(one_time_keys) = &body.one_time_keys { + for (key_key, key_value) in one_time_keys { + db.users + .add_one_time_key(user_id, device_id, key_key, key_value) + .unwrap(); + } + } + + if let Some(device_keys) = &body.device_keys { + db.users + .add_device_keys(user_id, device_id, device_keys) + .unwrap(); + } + + MatrixResult(Ok(upload_keys::Response { + one_time_key_counts: db.users.count_one_time_keys(user_id, device_id).unwrap(), })) } -#[post("/_matrix/client/r0/keys/upload")] -pub fn upload_keys_route() -> MatrixResult { - // TODO - MatrixResult(Ok(upload_keys::Response { - one_time_key_counts: BTreeMap::new(), +#[post("/_matrix/client/r0/keys/query", data = "")] +pub fn get_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> MatrixResult { + let mut device_keys = BTreeMap::new(); + + for (user_id, device_ids) in &body.device_keys { + if device_ids.is_empty() { + let mut container = BTreeMap::new(); + for (device_id, keys) in db + .users + .all_device_keys(&user_id.clone()) + .map(|r| r.unwrap()) + { + container.insert(device_id, keys); + } + device_keys.insert(user_id.clone(), container); + } else { + for device_id in device_ids { + let mut container = BTreeMap::new(); + for keys in db.users.get_device_keys(&user_id.clone(), &device_id) { + container.insert(device_id.clone(), keys.unwrap()); + } + device_keys.insert(user_id.clone(), container); + } + } + } + + MatrixResult(Ok(get_keys::Response { + failures: BTreeMap::new(), + device_keys, + })) +} + +#[post("/_matrix/client/r0/keys/claim", data = "")] +pub fn claim_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> MatrixResult { + let mut one_time_keys = BTreeMap::new(); + for (user_id, map) in &body.one_time_keys { + let mut container = BTreeMap::new(); + for (device_id, key_algorithm) in map { + if let Some(one_time_keys) = db + .users + .take_one_time_key(user_id, device_id, key_algorithm) + .unwrap() + { + let mut c = BTreeMap::new(); + c.insert(one_time_keys.0, one_time_keys.1); + container.insert(device_id.clone(), c); + } + } + one_time_keys.insert(user_id.clone(), container); + } + + MatrixResult(Ok(claim_keys::Response { + failures: BTreeMap::new(), + one_time_keys, })) } @@ -672,16 +780,19 @@ pub fn set_read_marker_route( _room_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + db.account_data .update( Some(&body.room_id), &user_id, - EduEvent::FullyRead(ruma_events::fully_read::FullyReadEvent { + &EventType::FullyRead, + serde_json::to_value(ruma_events::fully_read::FullyReadEvent { content: ruma_events::fully_read::FullyReadEventContent { event_id: body.fully_read.clone(), }, room_id: Some(body.room_id.clone()), - }), + }) + .unwrap(), &db.globals, ) .unwrap(); @@ -745,7 +856,7 @@ pub fn create_typing_event_route( content: ruma_events::typing::TypingEventContent { user_ids: vec![user_id.clone()], }, - room_id: None, // None because it can be inferred + room_id: Some(body.room_id.clone()), // TODO: Can be None because it can be inferred }); if body.typing { @@ -860,7 +971,7 @@ pub fn get_alias_route( body: Ruma, _room_alias: String, ) -> MatrixResult { - // TODO + warn!("TODO: get_alias_route"); let room_id = if body.room_alias.server_name() == db.globals.server_name() { match body.room_alias.alias() { "conduit" => "!lgOCCXQKtXOAPlAlG5:conduit.rs", @@ -1092,13 +1203,13 @@ pub fn search_users_route( #[get("/_matrix/client/r0/rooms/<_room_id>/members")] pub fn get_member_events_route(_room_id: String) -> MatrixResult { - // TODO + warn!("TODO: get_member_events_route"); MatrixResult(Ok(get_member_events::Response { chunk: Vec::new() })) } #[get("/_matrix/client/r0/thirdparty/protocols")] pub fn get_protocols_route() -> MatrixResult { - // TODO + warn!("TODO: get_protocols_route"); MatrixResult(Ok(get_protocols::Response { protocols: BTreeMap::new(), })) @@ -1208,6 +1319,8 @@ pub fn sync_route( ) -> MatrixResult { std::thread::sleep(Duration::from_millis(1500)); let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + let next_batch = db.globals.current_count().unwrap().to_string(); let mut joined_rooms = BTreeMap::new(); @@ -1277,7 +1390,7 @@ pub fn sync_route( content: ruma_events::typing::TypingEventContent { user_ids: Vec::new(), }, - room_id: None, // None because it can be inferred + room_id: Some(room_id.clone()), // None because it can be inferred }) .into(), ); @@ -1403,10 +1516,22 @@ pub fn sync_route( .global_edus .globallatests_since(since) .unwrap() - .map(|edu| { - EventJson::::from( - edu.unwrap().json().to_owned(), + .filter_map(|edu| { + // Only look for presence events + if let Ok(mut edu) = EventJson::::from( + edu.unwrap().into_json(), ) + .deserialize() + { + let timestamp = edu.content.last_active_ago.unwrap(); + edu.content.last_active_ago = Some( + js_int::UInt::try_from(utils::millis_since_unix_epoch()).unwrap() + - timestamp + ); + Some(edu.into()) + } else { + None + } }) .collect(), }, @@ -1421,7 +1546,12 @@ pub fn sync_route( }, device_lists: Default::default(), device_one_time_keys_count: Default::default(), - to_device: sync_events::ToDevice { events: Vec::new() }, + to_device: sync_events::ToDevice { + events: db + .users + .take_to_device_events(user_id, device_id, 100) + .unwrap(), + }, })) } @@ -1468,7 +1598,7 @@ pub fn get_message_events_route( #[get("/_matrix/client/r0/voip/turnServer")] pub fn turn_server_route() -> MatrixResult { - // TODO + warn!("TODO: turn_server_route"); MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "There is no turn server yet.".to_owned(), @@ -1478,7 +1608,7 @@ pub fn turn_server_route() -> MatrixResult { #[post("/_matrix/client/r0/publicised_groups")] pub fn publicised_groups_route() -> MatrixResult { - // TODO + warn!("TODO: publicised_groups_route"); MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "There are no publicised groups yet.".to_owned(), @@ -1486,18 +1616,48 @@ pub fn publicised_groups_route() -> MatrixResult })) } -#[put("/_matrix/client/r0/sendToDevice/<_event_type>/<_txn_id>")] +#[put( + "/_matrix/client/r0/sendToDevice/<_event_type>/<_txn_id>", + data = "" +)] pub fn send_event_to_device_route( + db: State<'_, Database>, + body: Ruma, _event_type: String, _txn_id: String, ) -> MatrixResult { - // TODO + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + for (target_user_id, map) in &body.messages { + for (target_device_id_maybe, event) in map { + match target_device_id_maybe { + to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => db + .users + .add_to_device_event( + user_id, + &target_user_id, + &target_device_id, + &body.event_type, + serde_json::from_str(event.get()).unwrap(), + &db.globals, + ) + .unwrap(), + + to_device::DeviceIdOrAllDevices::AllDevices => { + for target_device_id in db.users.all_device_ids(&target_user_id) { + target_device_id.unwrap(); + } + } + } + } + } + MatrixResult(Ok(send_event_to_device::Response)) } #[get("/_matrix/media/r0/config")] pub fn get_media_config_route() -> MatrixResult { - // TODO + warn!("TODO: get_media_config_route"); MatrixResult(Ok(get_media_config::Response { upload_size: 0_u32.into(), })) @@ -1509,7 +1669,7 @@ pub fn options_route( ) -> MatrixResult { MatrixResult(Err(Error { kind: ErrorKind::NotFound, - message: "This is the options route.".to_owned(), + message: "".to_owned(), status_code: http::StatusCode::OK, })) } diff --git a/src/database.rs b/src/database.rs index 0bd3aa0..4cdad87 100644 --- a/src/database.rs +++ b/src/database.rs @@ -59,6 +59,9 @@ impl Database { userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), userdeviceid_token: db.open_tree("userdeviceid_token").unwrap(), token_userdeviceid: db.open_tree("token_userdeviceid").unwrap(), + onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys").unwrap(), + userdeviceid_devicekeys: db.open_tree("userdeviceid_devicekeys").unwrap(), + todeviceid_events: db.open_tree("todeviceid_events").unwrap(), }, rooms: rooms::Rooms { edus: rooms::RoomEdus { diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 7ade70c..659d3e5 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,7 +1,7 @@ -use crate::Result; -use ruma_events::{collections::only::Event as EduEvent, EventJson}; +use crate::{utils, Error, Result}; +use ruma_events::{collections::only::Event as EduEvent, EventJson, EventType}; use ruma_identifiers::{RoomId, UserId}; -use std::collections::HashMap; +use std::{collections::HashMap, convert::TryFrom}; pub struct AccountData { pub(super) roomuserdataid_accountdata: sled::Tree, // RoomUserDataId = Room + User + Count + Type @@ -13,7 +13,8 @@ impl AccountData { &self, room_id: Option<&RoomId>, user_id: &UserId, - event: EduEvent, + kind: &EventType, + data: serde_json::Value, globals: &super::globals::Globals, ) -> Result<()> { let mut prefix = room_id @@ -48,11 +49,10 @@ impl AccountData { let mut key = prefix; key.extend_from_slice(&globals.next_count()?.to_be_bytes()); key.push(0xff); - let json = serde_json::to_value(&event)?; - key.extend_from_slice(json["type"].as_str().unwrap().as_bytes()); + key.extend_from_slice(kind.to_string().as_bytes()); self.roomuserdataid_accountdata - .insert(key, &*json.to_string()) + .insert(key, &*data.to_string()) .unwrap(); Ok(()) @@ -64,7 +64,7 @@ impl AccountData { &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: &str, + kind: &EventType, ) -> Result>> { Ok(self.all(room_id, user_id)?.remove(kind)) } @@ -75,7 +75,7 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>> { + ) -> Result>> { let mut userdata = HashMap::new(); let mut prefix = room_id @@ -91,17 +91,30 @@ impl AccountData { let mut first_possible = prefix.clone(); first_possible.extend_from_slice(&(since + 1).to_be_bytes()); - for json in self + for r in self .roomuserdataid_accountdata .range(&*first_possible..) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| serde_json::from_slice::(&v).unwrap()) + .map(|(k, v)| { + Ok::<_, Error>(( + EventType::try_from(utils::string_from_bytes( + k.rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("roomuserdataid is invalid"))?, + )?) + .map_err(|_| Error::BadDatabase("roomuserdataid is invalid"))?, + serde_json::from_slice::(&v).unwrap(), + )) + }) { + let (kind, content) = r.unwrap(); + let mut json = serde_json::Map::new(); + json.insert("content".to_owned(), content); + json.insert("type".to_owned(), kind.to_string().into()); userdata.insert( - json["type"].as_str().unwrap().to_owned(), - serde_json::from_value::>(json) - .expect("userdata in db is valid"), + kind, + serde_json::from_value::>(json.into())?, ); } @@ -113,7 +126,7 @@ impl AccountData { &self, room_id: Option<&RoomId>, user_id: &UserId, - ) -> Result>> { + ) -> Result>> { self.changes_since(room_id, user_id, 0) } } diff --git a/src/database/global_edus.rs b/src/database/global_edus.rs index db44674..f665260 100644 --- a/src/database/global_edus.rs +++ b/src/database/global_edus.rs @@ -3,8 +3,8 @@ use ruma_events::{collections::only::Event as EduEvent, EventJson}; use ruma_identifiers::UserId; pub struct GlobalEdus { + //pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count pub(super) globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Count + UserId - //pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count } impl GlobalEdus { diff --git a/src/database/users.rs b/src/database/users.rs index 5a8b0aa..ced5fe1 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,6 +1,9 @@ use crate::{utils, Error, Result}; -use ruma_identifiers::UserId; -use std::convert::TryFrom; +use js_int::UInt; +use ruma_client_api::r0::keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}; +use ruma_events::{to_device::AnyToDeviceEvent, EventJson, EventType}; +use ruma_identifiers::{DeviceId, UserId}; +use std::{collections::BTreeMap, convert::TryFrom}; pub struct Users { pub(super) userid_password: sled::Tree, @@ -9,6 +12,11 @@ pub struct Users { pub(super) userdeviceids: sled::Tree, pub(super) userdeviceid_token: sled::Tree, pub(super) token_userdeviceid: sled::Tree, + + pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + AlgorithmAndDeviceId + pub(super) userdeviceid_devicekeys: sled::Tree, + + pub(super) todeviceid_events: sled::Tree, // ToDeviceId = UserId + DeviceId + Count } impl Users { @@ -96,7 +104,7 @@ impl Users { } /// Adds a new device to a user. - pub fn create_device(&self, user_id: &UserId, device_id: &str, token: &str) -> Result<()> { + pub fn create_device(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { if !self.exists(user_id)? { return Err(Error::BadRequest( "tried to create device for nonexistent user", @@ -114,8 +122,22 @@ impl Users { Ok(()) } + /// Returns an iterator over all device ids of this user. + pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator> { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + self.userdeviceids.scan_prefix(prefix).keys().map(|bytes| { + Ok(utils::string_from_bytes( + &*bytes? + .rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("userdeviceid is invalid"))?, + )?) + }) + } + /// Replaces the access token of one device. - pub fn set_token(&self, user_id: &UserId, device_id: &str, token: &str) -> Result<()> { + pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -138,4 +160,196 @@ impl Users { Ok(()) } + + pub fn add_one_time_key( + &self, + user_id: &UserId, + device_id: &DeviceId, + one_time_key_key: &AlgorithmAndDeviceId, + one_time_key_value: &OneTimeKey, + ) -> Result<()> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + + if self.userdeviceids.get(&key)?.is_none() { + return Err(Error::BadRequest( + "Tried to set token for nonexistent device", + )); + } + + key.push(0xff); + // TODO: Use AlgorithmAndDeviceId::to_string when it's available (and update everything, + // because there are no wrapping quotation marks anymore) + key.extend_from_slice(&serde_json::to_string(one_time_key_key)?.as_bytes()); + + self.onetimekeyid_onetimekeys + .insert(&key, &*serde_json::to_string(&one_time_key_value)?)?; + + Ok(()) + } + + pub fn take_one_time_key( + &self, + user_id: &UserId, + device_id: &DeviceId, + key_algorithm: &KeyAlgorithm, + ) -> Result> { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + prefix.push(b'"'); // Annoying quotation mark + prefix.extend_from_slice(key_algorithm.to_string().as_bytes()); + prefix.push(b':'); + + self.onetimekeyid_onetimekeys + .scan_prefix(&prefix) + .next() + .map(|r| { + let (key, value) = r?; + Ok(( + serde_json::from_slice( + &*key + .rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("onetimekeyid is invalid"))?, + )?, + serde_json::from_slice(&*value)?, + )) + }) + .transpose() + } + + pub fn count_one_time_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result> { + let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + userdeviceid.push(0xff); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + let mut counts = BTreeMap::new(); + + for algorithm in self + .onetimekeyid_onetimekeys + .scan_prefix(&userdeviceid) + .keys() + .map(|bytes| { + Ok::<_, Error>( + serde_json::from_slice::( + &*bytes? + .rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("onetimekeyid is invalid"))?, + )? + .0, + ) + }) + { + *counts.entry(algorithm?).or_default() += UInt::from(1_u32); + } + + Ok(counts) + } + + pub fn add_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + device_keys: &DeviceKeys, + ) -> Result<()> { + let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + userdeviceid.push(0xff); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + self.userdeviceid_devicekeys + .insert(&userdeviceid, &*serde_json::to_string(&device_keys)?)?; + + Ok(()) + } + + pub fn get_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> impl Iterator> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + + self.userdeviceid_devicekeys + .scan_prefix(key) + .values() + .map(|bytes| Ok(serde_json::from_slice(&bytes?)?)) + } + + pub fn all_device_keys( + &self, + user_id: &UserId, + ) -> impl Iterator> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + + self.userdeviceid_devicekeys.scan_prefix(key).map(|r| { + let (key, value) = r?; + Ok(( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("userdeviceid is invalid"))?, + )?, + serde_json::from_slice(&*value)?, + )) + }) + } + + pub fn add_to_device_event( + &self, + sender: &UserId, + target_user_id: &UserId, + target_device_id: &DeviceId, + event_type: &EventType, + content: serde_json::Value, + globals: &super::globals::Globals, + ) -> Result<()> { + let mut key = target_user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(target_device_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(&globals.next_count()?.to_be_bytes()); + + let mut json = serde_json::Map::new(); + json.insert("type".to_owned(), event_type.to_string().into()); + json.insert("sender".to_owned(), sender.to_string().into()); + json.insert("content".to_owned(), content); + + self.todeviceid_events + .insert(&key, &*serde_json::to_string(&json)?)?; + + Ok(()) + } + + pub fn take_to_device_events( + &self, + user_id: &UserId, + device_id: &DeviceId, + max: usize, + ) -> Result>> { + let mut events = Vec::new(); + + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + + for result in self.todeviceid_events.scan_prefix(&prefix).take(max) { + let (key, value) = result?; + events.push(serde_json::from_slice(&*value)?); + self.todeviceid_events.remove(key)?; + } + + Ok(events) + } } diff --git a/src/main.rs b/src/main.rs index 514a8dd..717ef96 100644 --- a/src/main.rs +++ b/src/main.rs @@ -43,8 +43,9 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_avatar_url_route, client_server::get_profile_route, client_server::set_presence_route, - client_server::get_keys_route, client_server::upload_keys_route, + client_server::get_keys_route, + client_server::claim_keys_route, client_server::set_read_marker_route, client_server::create_typing_event_route, client_server::create_room_route, diff --git a/src/server_server.rs b/src/server_server.rs index 2fcbe98..84ca5cc 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -166,7 +166,12 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { .body(), ) .unwrap(); - ruma_signatures::sign_json(db.globals.server_name(), db.globals.keypair(), &mut response).unwrap(); + ruma_signatures::sign_json( + db.globals.server_name(), + db.globals.keypair(), + &mut response, + ) + .unwrap(); Json(response.to_string()) } From 6215218c3c7488d5e55a6c0a83f52230dcd60aaa Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 17 May 2020 21:28:36 +0200 Subject: [PATCH 0082/1727] fix: account data json --- src/client_server.rs | 20 ++++++++++++++++---- src/database/account_data.rs | 23 +++++++++++------------ 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 8627dcb..d7e129c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -204,6 +204,8 @@ pub fn register_route( }, }, }) + .unwrap() + .as_object_mut() .unwrap(), &db.globals, ) @@ -383,6 +385,8 @@ pub fn set_pushrule_route( }, }, }) + .unwrap() + .as_object_mut() .unwrap(), &db.globals, ) @@ -443,7 +447,9 @@ pub fn set_global_account_data_route( None, user_id, &EventType::try_from(&body.event_type).unwrap(), - serde_json::from_str(body.data.get()).unwrap(), + json!({"content": serde_json::from_str::(body.data.get()).unwrap()}) + .as_object_mut() + .unwrap(), &db.globals, ) .unwrap(); @@ -465,7 +471,11 @@ pub fn get_global_account_data_route( if let Some(data) = db .account_data - .get(None, user_id, &EventType::try_from(&body.event_type).unwrap()) + .get( + None, + user_id, + &EventType::try_from(&body.event_type).unwrap(), + ) .unwrap() { MatrixResult(Ok(get_global_account_data::Response { account_data: data })) @@ -792,6 +802,8 @@ pub fn set_read_marker_route( }, room_id: Some(body.room_id.clone()), }) + .unwrap() + .as_object_mut() .unwrap(), &db.globals, ) @@ -1317,7 +1329,7 @@ pub fn sync_route( db: State<'_, Database>, body: Ruma, ) -> MatrixResult { - std::thread::sleep(Duration::from_millis(1500)); + std::thread::sleep(Duration::from_millis(1000)); let user_id = body.user_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); @@ -1526,7 +1538,7 @@ pub fn sync_route( let timestamp = edu.content.last_active_ago.unwrap(); edu.content.last_active_ago = Some( js_int::UInt::try_from(utils::millis_since_unix_epoch()).unwrap() - - timestamp + - timestamp, ); Some(edu.into()) } else { diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 659d3e5..21982de 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -14,9 +14,14 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, kind: &EventType, - data: serde_json::Value, + json: &mut serde_json::Map, globals: &super::globals::Globals, ) -> Result<()> { + if json.get("content").is_none() { + return Err(Error::BadRequest("json needs to have a content field")); + } + json.insert("type".to_owned(), kind.to_string().into()); + let mut prefix = room_id .map(|r| r.to_string()) .unwrap_or_default() @@ -43,7 +48,6 @@ impl AccountData { { // This is the old room_latest self.roomuserdataid_accountdata.remove(old)?; - println!("removed old account data"); } let mut key = prefix; @@ -52,7 +56,7 @@ impl AccountData { key.extend_from_slice(kind.to_string().as_bytes()); self.roomuserdataid_accountdata - .insert(key, &*data.to_string()) + .insert(key, &*serde_json::to_string(&json)?) .unwrap(); Ok(()) @@ -104,18 +108,13 @@ impl AccountData { .ok_or(Error::BadDatabase("roomuserdataid is invalid"))?, )?) .map_err(|_| Error::BadDatabase("roomuserdataid is invalid"))?, - serde_json::from_slice::(&v).unwrap(), + serde_json::from_slice::>(&v).unwrap(), )) }) { - let (kind, content) = r.unwrap(); - let mut json = serde_json::Map::new(); - json.insert("content".to_owned(), content); - json.insert("type".to_owned(), kind.to_string().into()); - userdata.insert( - kind, - serde_json::from_value::>(json.into())?, - ); + let (kind, data) = r.unwrap(); + &data.deserialize(); + userdata.insert(kind, data); } Ok(userdata) From 56dd6cb16ff8bd290d15595381b45087f790ee60 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 18 May 2020 09:22:07 +0200 Subject: [PATCH 0083/1727] improvement: get state --- Cargo.toml | 2 +- src/client_server.rs | 85 +++++++++++++++++++++++++++++++++++++++++--- src/main.rs | 3 ++ 3 files changed, 85 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4d1b4b1..e6ed01e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ log = "0.4.8" sled = "0.31.0" directories = "2.0.2" js_int = "0.1.5" -serde_json = "1.0.53" +serde_json = { version = "1.0.53", features = ["raw_value"] } serde = "1.0.110" tokio = { version = "0.2.21", features = ["macros"] } rand = "0.7.3" diff --git a/src/client_server.rs b/src/client_server.rs index d7e129c..6030952 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -30,7 +30,10 @@ use ruma_client_api::{ read_marker::set_read_marker, room::create_room, session::{get_login_types, login}, - state::{create_state_event_for_empty_key, create_state_event_for_key}, + state::{ + create_state_event_for_empty_key, create_state_event_for_key, get_state_events, + get_state_events_for_empty_key, get_state_events_for_key, + }, sync::sync_events, thirdparty::get_protocols, to_device::{self, send_event_to_device}, @@ -1233,10 +1236,10 @@ pub fn get_protocols_route() -> MatrixResult { )] pub fn create_message_event_route( db: State<'_, Database>, + body: Ruma, _room_id: String, _event_type: String, _txn_id: String, - body: Ruma, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); @@ -1267,10 +1270,10 @@ pub fn create_message_event_route( )] pub fn create_state_event_for_key_route( db: State<'_, Database>, + body: Ruma, _room_id: String, _event_type: String, _state_key: String, - body: Ruma, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); @@ -1299,9 +1302,9 @@ pub fn create_state_event_for_key_route( )] pub fn create_state_event_for_empty_key_route( db: State<'_, Database>, + body: Ruma, _room_id: String, _event_type: String, - body: Ruma, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); @@ -1324,6 +1327,80 @@ pub fn create_state_event_for_empty_key_route( })) } +#[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] +pub fn get_state_events_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, +) -> MatrixResult { + MatrixResult(Ok(get_state_events::Response { + room_state: db + .rooms + .room_state(&body.room_id) + .unwrap() + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), + })) +} + +#[get( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + data = "" +)] +pub fn get_state_events_for_key_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, + _event_type: String, + _state_key: String, +) -> MatrixResult { + if let Some(event) = db + .rooms + .room_state(&body.room_id) + .unwrap() + .get(&(body.event_type.clone(), body.state_key.clone())) + { + MatrixResult(Ok(get_state_events_for_key::Response { + content: serde_json::value::to_raw_value(event).unwrap(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "State event not found.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } +} + +#[get( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + data = "" +)] +pub fn get_state_events_for_empty_key_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, + _event_type: String, +) -> MatrixResult { + if let Some(event) = db + .rooms + .room_state(&body.room_id) + .unwrap() + .get(&(body.event_type.clone(), "".to_owned())) + { + MatrixResult(Ok(get_state_events_for_key::Response { + content: serde_json::value::to_raw_value(event).unwrap(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "State event not found.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } +} + #[get("/_matrix/client/r0/sync", data = "")] pub fn sync_route( db: State<'_, Database>, diff --git a/src/main.rs b/src/main.rs index 717ef96..7581da1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -62,6 +62,9 @@ fn setup_rocket() -> rocket::Rocket { client_server::create_message_event_route, client_server::create_state_event_for_key_route, client_server::create_state_event_for_empty_key_route, + client_server::get_state_events_route, + client_server::get_state_events_for_key_route, + client_server::get_state_events_for_empty_key_route, client_server::sync_route, client_server::get_message_events_route, client_server::turn_server_route, From f0aed35ecf78e029f174c0d93524a4ffc6ad3ce8 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 18 May 2020 10:29:45 +0200 Subject: [PATCH 0084/1727] improvement: user dir search also searches in displaynames --- src/client_server.rs | 15 +++++++++++---- src/database/account_data.rs | 1 - 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 6030952..054a0bc 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1205,11 +1205,18 @@ pub fn search_users_route( .users .iter() .map(Result::unwrap) - .filter(|user_id| user_id.to_string().contains(&body.search_term)) .map(|user_id| search_users::User { - user_id, - display_name: None, - avatar_url: None, + user_id: user_id.clone(), + display_name: db.users.displayname(&user_id).unwrap(), + avatar_url: db.users.avatar_url(&user_id).unwrap(), + }) + .filter(|user| { + user.user_id.to_string().contains(&body.search_term) + || user + .display_name + .as_ref() + .filter(|name| name.contains(&body.search_term)) + .is_some() }) .collect(), limited: false, diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 21982de..e09ef2c 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -113,7 +113,6 @@ impl AccountData { }) { let (kind, data) = r.unwrap(); - &data.deserialize(); userdata.insert(kind, data); } From 42ae433b25e8d651120a346d24440da55f35fe07 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 18 May 2020 11:36:32 +0200 Subject: [PATCH 0085/1727] fix: send devicekeyupdate users in /sync --- src/client_server.rs | 23 +++++++++++++++-------- src/database.rs | 1 + src/database/users.rs | 12 ++++++++++++ 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 054a0bc..ab70b31 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -714,7 +714,7 @@ pub fn upload_keys_route( if let Some(device_keys) = &body.device_keys { db.users - .add_device_keys(user_id, device_id, device_keys) + .add_device_keys(user_id, device_id, device_keys, &db.globals) .unwrap(); } @@ -1640,7 +1640,18 @@ pub fn sync_route( .map(|(_, v)| v) .collect(), }, - device_lists: Default::default(), + device_lists: if since != 0 { + Some(sync_events::DeviceLists { + changed: db + .users + .device_keys_changed(since) + .map(|u| u.unwrap().to_string()) + .collect(), // TODO: use userids when ruma changes + left: Vec::new(), // TODO + }) + } else { + None // TODO: left + }, device_one_time_keys_count: Default::default(), to_device: sync_events::ToDevice { events: db @@ -1762,10 +1773,6 @@ pub fn get_media_config_route() -> MatrixResult { #[options("/<_segments..>")] pub fn options_route( _segments: rocket::http::uri::Segments<'_>, -) -> MatrixResult { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "".to_owned(), - status_code: http::StatusCode::OK, - })) +) -> MatrixResult { + MatrixResult(Ok(send_event_to_device::Response)) } diff --git a/src/database.rs b/src/database.rs index 4cdad87..77ea2f9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -61,6 +61,7 @@ impl Database { token_userdeviceid: db.open_tree("token_userdeviceid").unwrap(), onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys").unwrap(), userdeviceid_devicekeys: db.open_tree("userdeviceid_devicekeys").unwrap(), + devicekeychangeid_userid: db.open_tree("devicekeychangeid_userid").unwrap(), todeviceid_events: db.open_tree("todeviceid_events").unwrap(), }, rooms: rooms::Rooms { diff --git a/src/database/users.rs b/src/database/users.rs index ced5fe1..1b56066 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -15,6 +15,7 @@ pub struct Users { pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + AlgorithmAndDeviceId pub(super) userdeviceid_devicekeys: sled::Tree, + pub(super) devicekeychangeid_userid: sled::Tree, // DeviceKeyChangeId = Count pub(super) todeviceid_events: sled::Tree, // ToDeviceId = UserId + DeviceId + Count } @@ -259,6 +260,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, device_keys: &DeviceKeys, + globals: &super::globals::Globals, ) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); @@ -267,6 +269,9 @@ impl Users { self.userdeviceid_devicekeys .insert(&userdeviceid, &*serde_json::to_string(&device_keys)?)?; + self.devicekeychangeid_userid + .insert(globals.next_count()?.to_be_bytes(), &*user_id.to_string())?; + Ok(()) } @@ -285,6 +290,13 @@ impl Users { .map(|bytes| Ok(serde_json::from_slice(&bytes?)?)) } + pub fn device_keys_changed(&self, since: u64) -> impl Iterator> { + self.devicekeychangeid_userid + .range(since.to_be_bytes()..) + .values() + .map(|bytes| Ok(UserId::try_from(utils::string_from_bytes(&bytes?)?)?)) + } + pub fn all_device_keys( &self, user_id: &UserId, From 821c608c6a9bfafd11a6f4654852f3778f713049 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 18 May 2020 17:53:34 +0200 Subject: [PATCH 0086/1727] feat: media --- src/client_server.rs | 69 +++++++++++++++++++++++++++++++++++++++---- src/database.rs | 5 ++++ src/database/media.rs | 56 +++++++++++++++++++++++++++++++++++ src/main.rs | 3 ++ src/ruma_wrapper.rs | 12 +++----- 5 files changed, 132 insertions(+), 13 deletions(-) create mode 100644 src/database/media.rs diff --git a/src/client_server.rs b/src/client_server.rs index ab70b31..30b409c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -16,7 +16,7 @@ use ruma_client_api::{ directory::{self, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, keys::{claim_keys, get_keys, upload_keys}, - media::get_media_config, + media::{create_content, get_content_thumbnail, get_content, get_media_config}, membership::{ forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, leave_room, @@ -53,6 +53,7 @@ const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; const SESSION_ID_LENGTH: usize = 256; const TOKEN_LENGTH: usize = 256; +const MXC_LENGTH: usize = 256; #[get("/_matrix/client/versions")] pub fn get_supported_versions_route() -> MatrixResult { @@ -1259,7 +1260,7 @@ pub fn create_message_event_route( body.room_id.clone(), user_id.clone(), body.event_type.clone(), - body.json_body.clone(), + body.json_body.clone().unwrap(), Some(unsigned), None, &db.globals, @@ -1291,7 +1292,7 @@ pub fn create_state_event_for_key_route( body.room_id.clone(), user_id.clone(), body.event_type.clone(), - body.json_body.clone(), + body.json_body.clone().unwrap(), None, Some(body.state_key.clone()), &db.globals, @@ -1322,7 +1323,7 @@ pub fn create_state_event_for_empty_key_route( body.room_id.clone(), user_id.clone(), body.event_type.clone(), - body.json_body.clone(), + body.json_body.clone().unwrap(), None, Some("".to_owned()), &db.globals, @@ -1766,10 +1767,68 @@ pub fn send_event_to_device_route( pub fn get_media_config_route() -> MatrixResult { warn!("TODO: get_media_config_route"); MatrixResult(Ok(get_media_config::Response { - upload_size: 0_u32.into(), + upload_size: (20_u32 * 1024 * 1024).into(), // 20 MB })) } +#[post("/_matrix/media/r0/upload", data = "")] +pub fn create_content_route( + db: State<'_, Database>, + body: Ruma, +) -> MatrixResult { + let mxc = format!("mxc://{}/{}", db.globals.server_name(), utils::random_string(MXC_LENGTH)); + db.media + .create(mxc.clone(), body.filename.as_ref(), &body.content_type, &body.file) + .unwrap(); + + MatrixResult(Ok(create_content::Response { + content_uri: mxc, + })) +} + +#[get("/_matrix/media/r0/download/<_server_name>/<_media_id>", data = "")] +pub fn get_content_route( + db: State<'_, Database>, + body: Ruma, + _server_name: String, + _media_id: String, +) -> MatrixResult { + if let Some((filename, content_type, file)) = db.media.get(format!("mxc://{}/{}", body.server_name, body.media_id)).unwrap() { + MatrixResult(Ok(get_content::Response { + file, + content_type, + content_disposition: filename.unwrap_or_default(), // TODO: Spec says this should be optional + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Media not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } +} + +#[get("/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", data = "")] +pub fn get_content_thumbnail_route( + db: State<'_, Database>, + body: Ruma, + _server_name: String, + _media_id: String, +) -> MatrixResult { + if let Some((_, content_type, file)) = db.media.get(format!("mxc://{}/{}", body.server_name, body.media_id)).unwrap() { + MatrixResult(Ok(get_content_thumbnail::Response { + file, + content_type, + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Media not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, + })) + } +} + #[options("/<_segments..>")] pub fn options_route( _segments: rocket::http::uri::Segments<'_>, diff --git a/src/database.rs b/src/database.rs index 77ea2f9..9c08a22 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,6 +1,7 @@ pub(self) mod account_data; pub(self) mod global_edus; pub(self) mod globals; +pub(self) mod media; pub(self) mod rooms; pub(self) mod users; @@ -15,6 +16,7 @@ pub struct Database { pub rooms: rooms::Rooms, pub account_data: account_data::AccountData, pub global_edus: global_edus::GlobalEdus, + pub media: media::Media, pub _db: sled::Db, } @@ -88,6 +90,9 @@ impl Database { //globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), globallatestid_globallatest: db.open_tree("globallatestid_globallatest").unwrap(), // Presence }, + media: media::Media { + mediaid_file: db.open_tree("mediaid_file").unwrap(), + }, _db: db, } } diff --git a/src/database/media.rs b/src/database/media.rs new file mode 100644 index 0000000..36d9410 --- /dev/null +++ b/src/database/media.rs @@ -0,0 +1,56 @@ +use crate::{utils, Error, Result}; + +pub struct Media { + pub(super) mediaid_file: sled::Tree, // MediaId = MXC + Filename + ContentType +} + +impl Media { + /// Uploads or replaces a file. + pub fn create( + &self, + mxc: String, + filename: Option<&String>, + content_type: &str, + file: &[u8], + ) -> Result<()> { + let mut key = mxc.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(filename.map(|f| f.as_bytes()).unwrap_or_default()); + key.push(0xff); + key.extend_from_slice(content_type.as_bytes()); + + self.mediaid_file.insert(key, file)?; + + Ok(()) + } + + /// Downloads a file. + pub fn get(&self, mxc: String) -> Result, String, Vec)>> { + let mut prefix = mxc.as_bytes().to_vec(); + prefix.push(0xff); + + if let Some(r) = self.mediaid_file.scan_prefix(&prefix).next() { + let (key, file) = r?; + let mut parts = key.split(|&b| b == 0xff).skip(1); + + let filename_bytes = parts + .next() + .ok_or(Error::BadDatabase("mediaid is invalid"))?; + let filename = if filename_bytes.is_empty() { + None + } else { + Some(utils::string_from_bytes(filename_bytes)?) + }; + + let content_type = utils::string_from_bytes( + parts + .next() + .ok_or(Error::BadDatabase("mediaid is invalid"))?, + )?; + + Ok(Some((filename, content_type, file.to_vec()))) + } else { + Ok(None) + } + } +} diff --git a/src/main.rs b/src/main.rs index 7581da1..043f757 100644 --- a/src/main.rs +++ b/src/main.rs @@ -71,6 +71,9 @@ fn setup_rocket() -> rocket::Rocket { client_server::publicised_groups_route, client_server::send_event_to_device_route, client_server::get_media_config_route, + client_server::create_content_route, + client_server::get_content_route, + client_server::get_content_thumbnail_route, client_server::options_route, server_server::well_known_server, server_server::get_server_version, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 21c5925..d6f6cfe 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -11,7 +11,7 @@ use ruma_identifiers::UserId; use std::{convert::TryInto, io::Cursor, ops::Deref}; use tokio::io::AsyncReadExt; -const MESSAGE_LIMIT: u64 = 65535; +const MESSAGE_LIMIT: u64 = 20 * 1024 * 1024; // 20 MB /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. @@ -19,7 +19,7 @@ pub struct Ruma { body: T, pub user_id: Option, pub device_id: Option, - pub json_body: serde_json::Value, + pub json_body: Option, // This is None if parsing failed (for raw byte bodies) } impl<'a, T: Endpoint> FromData<'a> for Ruma { @@ -85,12 +85,8 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { body: t, user_id, device_id, - // TODO: Can we avoid parsing it again? - json_body: if !body.is_empty() { - serde_json::from_slice(&body).expect("Ruma already parsed it successfully") - } else { - serde_json::Value::default() - }, + // TODO: Can we avoid parsing it again? (We only need this for append_pdu) + json_body: serde_json::from_slice(&body).ok() }), Err(e) => { warn!("{:?}", e); From 61f4f2c71637e2db3b8713410b24f318e5c850b9 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 19 May 2020 16:28:03 +0200 Subject: [PATCH 0087/1727] improvement: implement GET publicRooms --- src/client_server.rs | 42 +++++++++++++++++++++++++++++++++++++++++- src/main.rs | 1 + 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index 30b409c..e5a6f9b 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -13,7 +13,7 @@ use ruma_client_api::{ alias::get_alias, capabilities::get_capabilities, config::{get_global_account_data, set_global_account_data}, - directory::{self, get_public_rooms_filtered}, + directory::{self, get_public_rooms, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, keys::{claim_keys, get_keys, upload_keys}, media::{create_content, get_content_thumbnail, get_content, get_media_config}, @@ -1136,6 +1136,46 @@ pub fn invite_user_route( } } +#[get("/_matrix/client/r0/publicRooms")] +pub async fn get_public_rooms_route( + db: State<'_, Database>, +) -> MatrixResult { + let mut chunk = db + .rooms + .all_rooms() + .into_iter() + .map(|room_id| { + let state = db.rooms.room_state(&room_id).unwrap(); + directory::PublicRoomsChunk { + aliases: Vec::new(), + canonical_alias: None, + name: state + .get(&(EventType::RoomName, "".to_owned())) + .and_then(|s| s.content.get("name")) + .and_then(|n| n.as_str()) + .map(|n| n.to_owned()), + num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), + room_id, + topic: None, + world_readable: false, + guest_can_join: true, + avatar_url: None, + } + }) + .collect::>(); + + chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); + + let total_room_count_estimate = (chunk.len() as u32).into(); + + MatrixResult(Ok(get_public_rooms::Response { + chunk, + prev_batch: None, + next_batch: None, + total_room_count_estimate: Some(total_room_count_estimate), + })) +} + #[post("/_matrix/client/r0/publicRooms")] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, diff --git a/src/main.rs b/src/main.rs index 043f757..bf33104 100644 --- a/src/main.rs +++ b/src/main.rs @@ -55,6 +55,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::leave_room_route, client_server::forget_room_route, client_server::invite_user_route, + client_server::get_public_rooms_route, client_server::get_public_rooms_filtered_route, client_server::search_users_route, client_server::get_member_events_route, From d544d28b6e3065a00c167d1b80518fa64407dbea Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 19 May 2020 18:31:34 +0200 Subject: [PATCH 0088/1727] feat: image thumbnails --- Cargo.lock | 294 ++++++++++++++++++++++++++++++------------ Cargo.toml | 9 +- README.md | 3 +- src/client_server.rs | 63 +++++---- src/database/media.rs | 103 ++++++++++++++- src/error.rs | 5 + 6 files changed, 365 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1734df4..dbe8bc3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,11 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +[[package]] +name = "adler32" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" + [[package]] name = "arc-swap" version = "0.4.6" @@ -24,9 +30,9 @@ version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26c4f3195085c36ea8d24d32b2f828d23296a9370a28aa39d111f6f16bef9f3b" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] @@ -96,6 +102,12 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5356f1d23ee24a1f785a56d1d1a5f0fd5b0f6a0c0fb2412ce11da71649ab78f6" +[[package]] +name = "bytemuck" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37fa13df2292ecb479ec23aa06f4507928bef07839be9ef15281411076629431" + [[package]] name = "byteorder" version = "1.3.4" @@ -110,9 +122,9 @@ checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" [[package]] name = "cc" -version = "1.0.52" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d87b23d6a92cd03af510a5ade527033f6aa6fa92161e2d5863a907d4c5e31d" +checksum = "404b1fe4f65288577753b17e3b36a04596ee784493ec249bf81c7f2d2acd751c" [[package]] name = "cfg-if" @@ -129,6 +141,12 @@ dependencies = [ "bitflags", ] +[[package]] +name = "color_quant" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" + [[package]] name = "conduit" version = "0.1.0" @@ -136,6 +154,7 @@ dependencies = [ "base64 0.12.1", "directories", "http", + "image", "js_int", "log", "rand", @@ -224,6 +243,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "deflate" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e5d2a2273fed52a7f947ee55b092c4057025d7a3e04e5ecdbd25d6c3fb1bd7" +dependencies = [ + "adler32", + "byteorder", +] + [[package]] name = "devise" version = "0.3.0" @@ -239,7 +268,7 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "devise_core", - "quote 1.0.5", + "quote 1.0.6", ] [[package]] @@ -248,9 +277,9 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "bitflags", - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] @@ -292,9 +321,9 @@ dependencies = [ [[package]] name = "fnv" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foreign-types" @@ -392,9 +421,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] @@ -452,6 +481,16 @@ dependencies = [ "wasi", ] +[[package]] +name = "gif" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "471d90201b3b223f3451cd4ad53e34295f16a1df17b1edf3736d47761c3981af" +dependencies = [ + "color_quant", + "lzw", +] + [[package]] name = "h2" version = "0.2.5" @@ -482,9 +521,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61565ff7aaace3525556587bd2dc31d4a07071957be715e63ce7b1eccf51a8f4" +checksum = "91780f809e750b0a89f5544be56617ff6b1227ee485bcb06ebe10cdf89bd3b71" dependencies = [ "libc", ] @@ -564,6 +603,22 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "image" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f4167a8f21fa2bb3f17a652a760acd7572645281c98e3b612a26242c96ee" +dependencies = [ + "bytemuck", + "byteorder", + "gif", + "jpeg-decoder", + "num-iter", + "num-rational", + "num-traits", + "png", +] + [[package]] name = "indexmap" version = "1.3.2" @@ -573,6 +628,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "inflate" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cdb29978cc5797bd8dcc8e5bf7de604891df2a8dc576973d71a281e916db2ff" +dependencies = [ + "adler32", +] + [[package]] name = "iovec" version = "0.1.4" @@ -588,6 +652,15 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +[[package]] +name = "jpeg-decoder" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b47b4c4e017b01abdc5bcc126d2d1002e5a75bbe3ce73f9f4f311a916363704" +dependencies = [ + "byteorder", +] + [[package]] name = "js-sys" version = "0.3.39" @@ -646,6 +719,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "lzw" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d947cbb889ed21c2a84be6ffbaebf5b4e0f4340638cba0444907e38b56be084" + [[package]] name = "matches" version = "0.1.8" @@ -760,6 +839,47 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "num-integer" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfb0800a0291891dd9f4fe7bd9c19384f98f7fbe0cd0f39a2c6b88b9868bbc00" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" +dependencies = [ + "autocfg", +] + [[package]] name = "num_cpus" version = "1.13.0" @@ -869,22 +989,22 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81d480cb4e89522ccda96d0eed9af94180b7a5f93fb28f66e1fd7d68431663d1" +checksum = "edc93aeee735e60ecb40cf740eb319ff23eab1c5748abfdb5c180e4ce49f7791" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82996f11efccb19b685b14b5df818de31c1edcee3daa256ab5775dd98e72feb" +checksum = "e58db2081ba5b4c93bd6be09c40fd36cb9193a8336c384f3b40012e531aa7e40" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] @@ -906,10 +1026,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" [[package]] -name = "ppv-lite86" -version = "0.2.6" +name = "png" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" +checksum = "2c68a431ed29933a4eb5709aca9800989758c97759345860fa5db3cfced0b65d" +dependencies = [ + "bitflags", + "crc32fast", + "deflate", + "inflate", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "proc-macro-hack" @@ -934,9 +1066,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8872cf6f48eee44265156c111456a700ab3483686b3f96df4cf5481c89157319" +checksum = "53f5ffe53a6b28e37c9c1ce74893477864d64f74778a93a4beb43c8fa167f639" dependencies = [ "unicode-xid 0.2.0", ] @@ -952,11 +1084,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42934bc9c8ab0d3b273a16d8551c8f0fcff46be73276ca083ec2414c15c4ba5e" +checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" dependencies = [ - "proc-macro2 1.0.12", + "proc-macro2 1.0.13", ] [[package]] @@ -1108,7 +1240,7 @@ source = "git+https://github.com/SergioBenitez/Rocket.git?branch=async#78c8ac8cc dependencies = [ "devise", "indexmap", - "quote 1.0.5", + "quote 1.0.6", "rocket_http", "version_check 0.9.1", "yansi 0.5.0", @@ -1145,7 +1277,7 @@ dependencies = [ "percent-encoding 2.1.0", "ruma-api-macros", "ruma-identifiers", - "ruma-serde 0.2.0", + "ruma-serde 0.2.1", "serde", "serde_json", "strum", @@ -1157,16 +1289,15 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52b82b4567b9af9b40a86f7778821c016ea961f55e4fee255f8f24bb28ee7452" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] name = "ruma-client-api" version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a57433fc6dded259aef2b12ceb91d78d3607b182278f648edd5c19c23d81cd" +source = "git+https://github.com/ruma/ruma-client-api.git#3a3ccabbf22c34da5c9de7cac54d9fbd3e571dcf" dependencies = [ "http", "js_int", @@ -1174,7 +1305,7 @@ dependencies = [ "ruma-common", "ruma-events", "ruma-identifiers", - "ruma-serde 0.1.3", + "ruma-serde 0.2.1", "serde", "serde_json", "strum", @@ -1182,39 +1313,40 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235f2fed35114ef3fbff1b7d5350f22ac712cffff55cc7b7732d39ae4adf6966" +checksum = "253416d67b4bde281f2781424232a58a946a4f1c451d5f857a8d0705d58eaf2a" dependencies = [ - "ruma-serde 0.1.3", + "matches", + "ruma-serde 0.2.1", "serde", "serde_json", ] [[package]] name = "ruma-events" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e952b80bddd83666a84db842d3e4d2ea43f8b4df8134864835c1807bd843fe21" +checksum = "0afc6d4da07d10213478d32dc42b6222df0a1ea319e9ced9f8a341617952d909" dependencies = [ "js_int", "ruma-common", "ruma-events-macros", "ruma-identifiers", - "ruma-serde 0.1.3", + "ruma-serde 0.2.1", "serde", "serde_json", ] [[package]] name = "ruma-events-macros" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60256afd4cb1bcdbcd9676e88cbbd09afc2541bd7b0858a559ce9ad7c6cfbb2a" +checksum = "fc706c4a53cc54c3a198cfbcd7dfff20448599d84f90e636d96034d0df5a9ac9" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] @@ -1257,9 +1389,9 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb00252245980b8f8d9833632e611aba8e1657c9fbefefe2b35c2817fc2b58a4" +checksum = "0dd3d04c6755bae23101dec7426d044b773ef517932f23d5a6254c4caa1cfce3" dependencies = [ "dtoa", "itoa", @@ -1387,9 +1519,9 @@ version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] @@ -1481,9 +1613,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] @@ -1499,12 +1631,12 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4696caa4048ac7ce2bcd2e484b3cef88c1004e41b8e945a277e2c25dc0b72060" +checksum = "1425de3c33b0941002740a420b1a906a350b88d08b82b2c8a01035a3f9447bac" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.5", + "proc-macro2 1.0.13", + "quote 1.0.6", "unicode-xid 0.2.0", ] @@ -1524,22 +1656,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467e5ff447618a916519a4e0d62772ab14f434897f3d63f05d8700ef1e9b22c1" +checksum = "5976891d6950b4f68477850b5b9e5aa64d955961466f9e174363f573e54e8ca7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e63c1091225b9834089b429bc4a2e01223470e3183e891582909e9d1c4cb55d9" +checksum = "ab81dbd1cd69cd2ce22ecfbdd3bdb73334ba25350649408cc6c085f46d89573d" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] @@ -1581,16 +1713,16 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", ] [[package]] name = "tokio-rustls" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "141afec0978abae6573065a48882c6bae44c5cc61db9b511ac4abf6a09bfd9cc" +checksum = "3068d891551949b37681724d6b73666787cc63fa8e255c812a41d2513aff9775" dependencies = [ "futures-core", "rustls", @@ -1760,9 +1892,9 @@ dependencies = [ "bumpalo", "lazy_static", "log", - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", "wasm-bindgen-shared", ] @@ -1784,7 +1916,7 @@ version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" dependencies = [ - "quote 1.0.5", + "quote 1.0.6", "wasm-bindgen-macro-support", ] @@ -1794,9 +1926,9 @@ version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.5", - "syn 1.0.21", + "proc-macro2 1.0.13", + "quote 1.0.6", + "syn 1.0.22", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index e6ed01e..6edb8e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,10 +14,10 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" -ruma-client-api = "0.8.0" +ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } ruma-identifiers = { version = "0.16.1", features = ["rand"] } ruma-api = "0.16.1" -ruma-events = "0.21.1" +ruma-events = "0.21.2" ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git", rev = "1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" } ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git", rev = "ccbf216f39bbbaa59131cc200eae5bd18aa1947c" } log = "0.4.8" @@ -31,5 +31,6 @@ rand = "0.7.3" rust-argon2 = "0.8.2" reqwest = "0.10.4" base64 = "0.12.1" -thiserror = "1.0.17" -ruma-common = "0.1.1" +thiserror = "1.0.18" +ruma-common = "0.1.2" +image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } diff --git a/README.md b/README.md index 4c9baf5..5aabeeb 100644 --- a/README.md +++ b/README.md @@ -26,13 +26,14 @@ A fast Matrix homeserver that's optimized for smaller, personal servers, instead - [x] Typing indications - [x] Invites, user search - [x] Password hashing +- [x] Riot E2EE +- [x] Media - [ ] Basic federation - [ ] State resolution - [ ] Permission system - [ ] Notifications (push rules) - [ ] Riot presence - [ ] Proper room creation -- [ ] Riot E2EE #### How can I contribute? diff --git a/src/client_server.rs b/src/client_server.rs index e5a6f9b..c21afc3 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -16,7 +16,7 @@ use ruma_client_api::{ directory::{self, get_public_rooms, get_public_rooms_filtered}, filter::{self, create_filter, get_filter}, keys::{claim_keys, get_keys, upload_keys}, - media::{create_content, get_content_thumbnail, get_content, get_media_config}, + media::{create_content, get_content, get_content_thumbnail, get_media_config}, membership::{ forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, leave_room, @@ -1307,9 +1307,7 @@ pub fn create_message_event_route( ) .expect("message events are always okay"); - MatrixResult(Ok(create_message_event::Response { - event_id: Some(event_id), - })) + MatrixResult(Ok(create_message_event::Response { event_id })) } #[put( @@ -1339,9 +1337,7 @@ pub fn create_state_event_for_key_route( ) .unwrap(); - MatrixResult(Ok(create_state_event_for_key::Response { - event_id: Some(event_id), - })) + MatrixResult(Ok(create_state_event_for_key::Response { event_id })) } #[put( @@ -1370,9 +1366,7 @@ pub fn create_state_event_for_empty_key_route( ) .unwrap(); - MatrixResult(Ok(create_state_event_for_empty_key::Response { - event_id: Some(event_id), - })) + MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) } #[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] @@ -1805,7 +1799,6 @@ pub fn send_event_to_device_route( #[get("/_matrix/media/r0/config")] pub fn get_media_config_route() -> MatrixResult { - warn!("TODO: get_media_config_route"); MatrixResult(Ok(get_media_config::Response { upload_size: (20_u32 * 1024 * 1024).into(), // 20 MB })) @@ -1816,24 +1809,38 @@ pub fn create_content_route( db: State<'_, Database>, body: Ruma, ) -> MatrixResult { - let mxc = format!("mxc://{}/{}", db.globals.server_name(), utils::random_string(MXC_LENGTH)); + let mxc = format!( + "mxc://{}/{}", + db.globals.server_name(), + utils::random_string(MXC_LENGTH) + ); db.media - .create(mxc.clone(), body.filename.as_ref(), &body.content_type, &body.file) + .create( + mxc.clone(), + body.filename.as_ref(), + &body.content_type, + &body.file, + ) .unwrap(); - MatrixResult(Ok(create_content::Response { - content_uri: mxc, - })) + MatrixResult(Ok(create_content::Response { content_uri: mxc })) } -#[get("/_matrix/media/r0/download/<_server_name>/<_media_id>", data = "")] +#[get( + "/_matrix/media/r0/download/<_server_name>/<_media_id>", + data = "" +)] pub fn get_content_route( db: State<'_, Database>, body: Ruma, _server_name: String, _media_id: String, ) -> MatrixResult { - if let Some((filename, content_type, file)) = db.media.get(format!("mxc://{}/{}", body.server_name, body.media_id)).unwrap() { + if let Some((filename, content_type, file)) = db + .media + .get(format!("mxc://{}/{}", body.server_name, body.media_id)) + .unwrap() + { MatrixResult(Ok(get_content::Response { file, content_type, @@ -1848,18 +1855,26 @@ pub fn get_content_route( } } -#[get("/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", data = "")] +#[get( + "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", + data = "" +)] pub fn get_content_thumbnail_route( db: State<'_, Database>, body: Ruma, _server_name: String, _media_id: String, ) -> MatrixResult { - if let Some((_, content_type, file)) = db.media.get(format!("mxc://{}/{}", body.server_name, body.media_id)).unwrap() { - MatrixResult(Ok(get_content_thumbnail::Response { - file, - content_type, - })) + if let Some((_, content_type, file)) = db + .media + .get_thumbnail( + format!("mxc://{}/{}", body.server_name, body.media_id), + body.width.try_into().unwrap(), + body.height.try_into().unwrap(), + ) + .unwrap() + { + MatrixResult(Ok(get_content_thumbnail::Response { file, content_type })) } else { MatrixResult(Err(Error { kind: ErrorKind::NotFound, diff --git a/src/database/media.rs b/src/database/media.rs index 36d9410..96b95fe 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -1,7 +1,8 @@ use crate::{utils, Error, Result}; +use std::mem; pub struct Media { - pub(super) mediaid_file: sled::Tree, // MediaId = MXC + Filename + ContentType + pub(super) mediaid_file: sled::Tree, // MediaId = MXC + WidthHeight + Filename + ContentType } impl Media { @@ -15,6 +16,9 @@ impl Media { ) -> Result<()> { let mut key = mxc.as_bytes().to_vec(); key.push(0xff); + key.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail + key.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail + key.push(0xff); key.extend_from_slice(filename.map(|f| f.as_bytes()).unwrap_or_default()); key.push(0xff); key.extend_from_slice(content_type.as_bytes()); @@ -28,10 +32,19 @@ impl Media { pub fn get(&self, mxc: String) -> Result, String, Vec)>> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); + prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail + prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail + prefix.push(0xff); if let Some(r) = self.mediaid_file.scan_prefix(&prefix).next() { let (key, file) = r?; - let mut parts = key.split(|&b| b == 0xff).skip(1); + let mut parts = key.rsplit(|&b| b == 0xff); + + let content_type = utils::string_from_bytes( + parts + .next() + .ok_or(Error::BadDatabase("mediaid is invalid"))?, + )?; let filename_bytes = parts .next() @@ -42,13 +55,99 @@ impl Media { Some(utils::string_from_bytes(filename_bytes)?) }; + Ok(Some((filename, content_type, file.to_vec()))) + } else { + Ok(None) + } + } + + /// Downloads a file's thumbnail. + pub fn get_thumbnail( + &self, + mxc: String, + width: u32, + height: u32, + ) -> Result, String, Vec)>> { + let mut main_prefix = mxc.as_bytes().to_vec(); + main_prefix.push(0xff); + + let mut thumbnail_prefix = main_prefix.clone(); + thumbnail_prefix.extend_from_slice(&width.to_be_bytes()); + thumbnail_prefix.extend_from_slice(&height.to_be_bytes()); + thumbnail_prefix.push(0xff); + + let mut original_prefix = main_prefix; + original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail + original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail + original_prefix.push(0xff); + + if let Some(r) = self.mediaid_file.scan_prefix(&thumbnail_prefix).next() { + // Using saved thumbnail + let (key, file) = r?; + let mut parts = key.rsplit(|&b| b == 0xff); + let content_type = utils::string_from_bytes( parts .next() .ok_or(Error::BadDatabase("mediaid is invalid"))?, )?; + let filename_bytes = parts + .next() + .ok_or(Error::BadDatabase("mediaid is invalid"))?; + let filename = if filename_bytes.is_empty() { + None + } else { + Some(utils::string_from_bytes(filename_bytes)?) + }; + Ok(Some((filename, content_type, file.to_vec()))) + } else if let Some(r) = self.mediaid_file.scan_prefix(&original_prefix).next() { + // Generate a thumbnail + let (key, file) = r?; + let mut parts = key.rsplit(|&b| b == 0xff); + + let content_type = utils::string_from_bytes( + parts + .next() + .ok_or(Error::BadDatabase("mediaid is invalid"))?, + )?; + + let filename_bytes = parts + .next() + .ok_or(Error::BadDatabase("mediaid is invalid"))?; + let filename = if filename_bytes.is_empty() { + None + } else { + Some(utils::string_from_bytes(filename_bytes)?) + }; + + if let Ok(image) = image::load_from_memory(&file) { + let thumbnail = image.thumbnail(width, height); + let mut thumbnail_bytes = Vec::new(); + thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Jpeg(75))?; + + // Save thumbnail in database so we don't have to generate it again next time + let mut thumbnail_key = key.to_vec(); + let width_index = thumbnail_key + .iter() + .position(|&b| b == 0xff) + .ok_or(Error::BadDatabase("mediaid is invalid"))? + + 1; + let mut widthheight = width.to_be_bytes().to_vec(); + widthheight.extend_from_slice(&height.to_be_bytes()); + + thumbnail_key.splice( + width_index..width_index + 2 * mem::size_of::(), + widthheight, + ); + + self.mediaid_file.insert(thumbnail_key, &*thumbnail_bytes)?; + + Ok(Some((filename, content_type, thumbnail_bytes))) + } else { + Ok(None) + } } else { Ok(None) } diff --git a/src/error.rs b/src/error.rs index 71fd918..a91d409 100644 --- a/src/error.rs +++ b/src/error.rs @@ -29,6 +29,11 @@ pub enum Error { #[from] source: ruma_events::InvalidEvent, }, + #[error("could not generate image")] + ImageError { + #[from] + source: image::error::ImageError, + }, #[error("bad request")] BadRequest(&'static str), #[error("problem in that database")] From eb4af8576544a54bb113006921439bb26045fc23 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 19 May 2020 22:56:28 +0200 Subject: [PATCH 0089/1727] fix: riot sometimes freaks out when refreshing --- src/database/account_data.rs | 13 +++++++++---- src/ruma_wrapper.rs | 7 ++++--- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/database/account_data.rs b/src/database/account_data.rs index e09ef2c..d1114ec 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -22,13 +22,16 @@ impl AccountData { } json.insert("type".to_owned(), kind.to_string().into()); + let user_id_string = user_id.to_string(); + let kind_string = kind.to_string(); + let mut prefix = room_id .map(|r| r.to_string()) .unwrap_or_default() .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.extend_from_slice(&user_id_string.as_bytes()); prefix.push(0xff); // Remove old entry @@ -40,10 +43,12 @@ impl AccountData { .filter_map(|r| r.ok()) .take_while(|key| key.starts_with(&prefix)) .find(|key| { - key.split(|&b| b == 0xff) - .nth(1) - .filter(|&user| user == user_id.to_string().as_bytes()) + let user = key.split(|&b| b == 0xff).nth(1); + let k = key.rsplit(|&b| b == 0xff).next(); + + user.filter(|&user| user == user_id_string.as_bytes()) .is_some() + && k.filter(|&k| k == kind_string.as_bytes()).is_some() }) { // This is the old room_latest diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index d6f6cfe..28e6fbb 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -135,9 +135,6 @@ where match http_response { Ok(http_response) => { let mut response = rocket::response::Response::build(); - response - .sized_body(Cursor::new(http_response.body().clone())) - .await; let status = http_response.status(); response.raw_status(status.into(), ""); @@ -147,6 +144,10 @@ where .raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); } + response + .sized_body(Cursor::new(http_response.into_body())) + .await; + response.raw_header("Access-Control-Allow-Origin", "*"); response.raw_header( "Access-Control-Allow-Methods", From ada260bf42af45d845f1a16d00c3848200fa2fb4 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 19 May 2020 23:50:20 +0200 Subject: [PATCH 0090/1727] fix: use png for thumbnails --- src/database/media.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/media.rs b/src/database/media.rs index 96b95fe..c64fd0b 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -125,7 +125,7 @@ impl Media { if let Ok(image) = image::load_from_memory(&file) { let thumbnail = image.thumbnail(width, height); let mut thumbnail_bytes = Vec::new(); - thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Jpeg(75))?; + thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; // Save thumbnail in database so we don't have to generate it again next time let mut thumbnail_key = key.to_vec(); From e0fccffde054fbe84b1e4bd110cfa9068b761b29 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 19 May 2020 23:47:30 +0200 Subject: [PATCH 0091/1727] fix: changing avatar url now sends room events --- src/client_server.rs | 53 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 3 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index c21afc3..e521624 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -510,6 +510,13 @@ pub fn set_displayname_route( .unwrap(); } + let mut json = serde_json::Map::new(); + json.insert("membership".to_owned(), "join".into()); + json.insert("displayname".to_owned(), (**displayname).into()); + if let Some(avatar_url) = db.users.avatar_url(&user_id).unwrap() { + json.insert("avatar_url".to_owned(), avatar_url.into()); + } + // Send a new membership event into all joined rooms for room_id in db.rooms.rooms_joined(&user_id) { db.rooms @@ -517,7 +524,7 @@ pub fn set_displayname_route( room_id.unwrap(), user_id.clone(), EventType::RoomMember, - json!({"membership": "join", "displayname": displayname}), + json.clone().into(), None, Some(user_id.to_string()), &db.globals, @@ -609,8 +616,48 @@ pub fn set_avatar_url_route( db.users .set_avatar_url(&user_id, Some(body.avatar_url.clone())) .unwrap(); - // TODO send a new m.room.member join event with the updated avatar_url - // TODO send a new m.presence event with the updated avatar_url + + + let mut json = serde_json::Map::new(); + json.insert("membership".to_owned(), "join".into()); + json.insert("avatar_url".to_owned(), (*body.avatar_url).into()); + if let Some(displayname) = db.users.displayname(&user_id).unwrap() { + json.insert("displayname".to_owned(), displayname.into()); + } + + // Send a new membership event into all joined rooms + for room_id in db.rooms.rooms_joined(&user_id) { + db.rooms + .append_pdu( + room_id.unwrap(), + user_id.clone(), + EventType::RoomMember, + json.clone().into(), + None, + Some(user_id.to_string()), + &db.globals, + ) + .unwrap(); + } + + // Presence update + db.global_edus + .update_globallatest( + &user_id, + EduEvent::Presence(ruma_events::presence::PresenceEvent { + content: ruma_events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&user_id).unwrap(), + currently_active: None, + displayname: db.users.displayname(&user_id).unwrap(), + last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), + presence: ruma_events::presence::PresenceState::Online, + status_msg: None, + }, + sender: user_id.clone(), + }), + &db.globals, + ) + .unwrap(); } MatrixResult(Ok(set_avatar_url::Response)) From 8df2a1a072e75d4c56a625e0c55bae6e5646f1cd Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 20 May 2020 00:12:07 +0200 Subject: [PATCH 0092/1727] improvement: add room topic to public room dir --- src/client_server.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index e521624..bc0af79 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1203,7 +1203,11 @@ pub async fn get_public_rooms_route( .map(|n| n.to_owned()), num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), room_id, - topic: None, + topic: state + .get(&(EventType::RoomTopic, "".to_owned())) + .and_then(|s| s.content.get("topic")) + .and_then(|n| n.as_str()) + .map(|n| n.to_owned()), world_readable: false, guest_can_join: true, avatar_url: None, @@ -1243,7 +1247,11 @@ pub async fn get_public_rooms_filtered_route( .map(|n| n.to_owned()), num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), room_id, - topic: None, + topic: state + .get(&(EventType::RoomTopic, "".to_owned())) + .and_then(|s| s.content.get("topic")) + .and_then(|n| n.as_str()) + .map(|n| n.to_owned()), world_readable: false, guest_can_join: true, avatar_url: None, From 43478a5870f1344b6ba802be7b075047ba68e702 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 20 May 2020 09:24:44 +0200 Subject: [PATCH 0093/1727] fix: send correct membership events when joining / creating rooms --- src/client_server.rs | 6 +++--- src/database/rooms.rs | 19 +++++++++++-------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index bc0af79..778dff7 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -965,7 +965,7 @@ pub fn create_room_route( .join( &room_id, &user_id, - db.users.displayname(&user_id).unwrap(), + &db.users, &db.globals, ) .unwrap(); @@ -1072,7 +1072,7 @@ pub fn join_room_by_id_route( .join( &body.room_id, &user_id, - db.users.displayname(&user_id).unwrap(), + &db.users, &db.globals, ) .is_ok() @@ -1119,7 +1119,7 @@ pub fn join_room_by_id_or_alias_route( .join( &room_id, &user_id, - db.users.displayname(&user_id).unwrap(), + &db.users, &db.globals, ) .is_ok() diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 28b3560..312390b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -353,7 +353,7 @@ impl Rooms { &self, room_id: &RoomId, user_id: &UserId, - displayname: Option, + users: &super::users::Users, globals: &super::globals::Globals, ) -> Result<()> { if !self.exists(room_id)? { @@ -374,19 +374,22 @@ impl Rooms { self.roomuserid_invited.remove(&roomuser_id)?; self.userroomid_left.remove(&userroom_id)?; - let mut content = json!({"membership": "join"}); - if let Some(displayname) = displayname { - content - .as_object_mut() - .unwrap() - .insert("displayname".to_owned(), displayname.into()); + let mut json = serde_json::Map::new(); + json.insert("membership".to_owned(), "join".into()); + + if let Some(displayname) = users.displayname(&user_id).unwrap() { + json.insert("displayname".to_owned(), displayname.into()); + } + + if let Some(avatar_url) = users.avatar_url(&user_id).unwrap() { + json.insert("avatar_url".to_owned(), avatar_url.into()); } self.append_pdu( room_id.clone(), user_id.clone(), EventType::RoomMember, - content, + json.into(), None, Some(user_id.to_string()), globals, From d95e8b4880f10a6f409b8f9302baf35354703d57 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 21 May 2020 13:52:25 +0200 Subject: [PATCH 0094/1727] fix: problem with to-device for all devices --- Cargo.lock | 32 ++++++++++++++++---------------- src/client_server.rs | 17 ++++++++++++++--- 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dbe8bc3..234372b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -32,7 +32,7 @@ checksum = "26c4f3195085c36ea8d24d32b2f828d23296a9370a28aa39d111f6f16bef9f3b" dependencies = [ "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -122,9 +122,9 @@ checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" [[package]] name = "cc" -version = "1.0.53" +version = "1.0.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "404b1fe4f65288577753b17e3b36a04596ee784493ec249bf81c7f2d2acd751c" +checksum = "7bbb73db36c1246e9034e307d0fba23f9a2e251faa47ade70c1bd252220c8311" [[package]] name = "cfg-if" @@ -279,7 +279,7 @@ dependencies = [ "bitflags", "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -423,7 +423,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -1004,7 +1004,7 @@ checksum = "e58db2081ba5b4c93bd6be09c40fd36cb9193a8336c384f3b40012e531aa7e40" dependencies = [ "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -1291,7 +1291,7 @@ checksum = "52b82b4567b9af9b40a86f7778821c016ea961f55e4fee255f8f24bb28ee7452" dependencies = [ "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -1346,7 +1346,7 @@ checksum = "fc706c4a53cc54c3a198cfbcd7dfff20448599d84f90e636d96034d0df5a9ac9" dependencies = [ "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -1521,7 +1521,7 @@ checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" dependencies = [ "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -1615,7 +1615,7 @@ dependencies = [ "heck", "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -1631,9 +1631,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1425de3c33b0941002740a420b1a906a350b88d08b82b2c8a01035a3f9447bac" +checksum = "95b5f192649e48a5302a13f2feb224df883b98933222369e4b3b0fe2a5447269" dependencies = [ "proc-macro2 1.0.13", "quote 1.0.6", @@ -1671,7 +1671,7 @@ checksum = "ab81dbd1cd69cd2ce22ecfbdd3bdb73334ba25350649408cc6c085f46d89573d" dependencies = [ "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -1715,7 +1715,7 @@ checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", ] [[package]] @@ -1894,7 +1894,7 @@ dependencies = [ "log", "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", "wasm-bindgen-shared", ] @@ -1928,7 +1928,7 @@ checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ "proc-macro2 1.0.13", "quote 1.0.6", - "syn 1.0.22", + "syn 1.0.23", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/src/client_server.rs b/src/client_server.rs index 778dff7..a47dfae 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -919,7 +919,7 @@ pub fn create_typing_event_route( content: ruma_events::typing::TypingEventContent { user_ids: vec![user_id.clone()], }, - room_id: Some(body.room_id.clone()), // TODO: Can be None because it can be inferred + room_id: None, // None because it can be inferred }); if body.typing { @@ -1545,6 +1545,7 @@ pub fn sync_route( None }; + // They /sync response doesn't always return all messages, so we say the output is // limited unless there are enough events let mut limited = true; @@ -1576,7 +1577,7 @@ pub fn sync_route( content: ruma_events::typing::TypingEventContent { user_ids: Vec::new(), }, - room_id: Some(room_id.clone()), // None because it can be inferred + room_id: None, // None because it can be inferred }) .into(), ); @@ -1842,7 +1843,17 @@ pub fn send_event_to_device_route( to_device::DeviceIdOrAllDevices::AllDevices => { for target_device_id in db.users.all_device_ids(&target_user_id) { - target_device_id.unwrap(); + db + .users + .add_to_device_event( + user_id, + &target_user_id, + &target_device_id.unwrap(), + &body.event_type, + serde_json::from_str(event.get()).unwrap(), + &db.globals, + ) + .unwrap(); } } } From 5d6542a8a617ff58f4aedf32886c99fb0eb234af Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 21 May 2020 17:36:07 +0200 Subject: [PATCH 0095/1727] improvement: video link in readme --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 5aabeeb..87b1352 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,10 @@ [![Liberapay](https://img.shields.io/liberapay/receives/timokoesters?logo=liberapay)](https://liberapay.com/timokoesters) [![Matrix](https://img.shields.io/matrix/conduit:koesters.xyz?server_fqdn=matrix.koesters.xyz&logo=matrix)](https://matrix.to/#/#conduit:koesters.xyz) +#### Is it fast? + +See it in action: + #### What is the goal A fast Matrix homeserver that's optimized for smaller, personal servers, instead of one server that has high scalability. From 1014388a9c0f917dd92ce85fb300c8d9abb50d5a Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 23 May 2020 19:17:08 +0200 Subject: [PATCH 0096/1727] fix: one time keys are never removed --- Cargo.lock | 5 +- Cargo.toml | 2 +- src/client_server.rs | 145 ++++++++++++++---------------------------- src/database/users.rs | 2 + src/utils.rs | 2 +- 5 files changed, 54 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 234372b..dc47816 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1296,8 +1296,9 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma-client-api.git#3a3ccabbf22c34da5c9de7cac54d9fbd3e571dcf" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "082913ad135ca55ee06a55d295bea954982f2ac5e0150adc09024f5cbb8cb6cf" dependencies = [ "http", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 6edb8e3..5a068f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" -ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } +ruma-client-api = "0.9.0" ruma-identifiers = { version = "0.16.1", features = ["rand"] } ruma-api = "0.16.1" ruma-events = "0.21.2" diff --git a/src/client_server.rs b/src/client_server.rs index a47dfae..a2045d8 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -185,14 +185,20 @@ pub fn register_route( content: ruma_events::push_rules::PushRulesEventContent { global: ruma_events::push_rules::Ruleset { content: vec![], - override_rules: vec![], + override_rules: vec![ruma_events::push_rules::ConditionalPushRule { + actions: vec![ruma_events::push_rules::Action::DontNotify], + default: true, + enabled: false, + rule_id: ".m.rule.master".to_owned(), + conditions: vec![], + }], room: vec![], sender: vec![], underride: vec![ruma_events::push_rules::ConditionalPushRule { actions: vec![ ruma_events::push_rules::Action::Notify, ruma_events::push_rules::Action::SetTweak( - ruma_common::push::Tweak::Highlight(false), + ruma_common::push::Tweak::Sound("default".to_owned()), ), ], default: true, @@ -320,28 +326,27 @@ pub fn get_capabilities_route() -> MatrixResult { })) } -#[get("/_matrix/client/r0/pushrules")] -pub fn get_pushrules_all_route() -> MatrixResult { - // TODO - let mut global = BTreeMap::new(); - global.insert( - push::RuleKind::Underride, - vec![push::PushRule { - actions: vec![ - push::Action::Notify, - push::Action::SetTweak(ruma_common::push::Tweak::Highlight(false)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.message".to_owned(), - conditions: Some(vec![push::PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }]), - pattern: None, - }], - ); - MatrixResult(Ok(get_pushrules_all::Response { global })) +#[get("/_matrix/client/r0/pushrules", data = "")] +pub fn get_pushrules_all_route( + db: State<'_, Database>, + body: Ruma, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + if let Some(EduEvent::PushRules(pushrules)) = db + .account_data + .get(None, &user_id, &EventType::PushRules) + .unwrap().map(|edu| edu.deserialize().expect("PushRules event in db is valid")) + { + MatrixResult(Ok(get_pushrules_all::Response { + global: BTreeMap::new(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "PushRules event not found.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } } #[put( @@ -356,46 +361,7 @@ pub fn set_pushrule_route( _rule_id: String, ) -> MatrixResult { // TODO - let user_id = body.user_id.as_ref().expect("user is authenticated"); - db.account_data - .update( - None, - &user_id, - &EventType::PushRules, - serde_json::to_value(ruma_events::push_rules::PushRulesEvent { - content: ruma_events::push_rules::PushRulesEventContent { - global: ruma_events::push_rules::Ruleset { - content: vec![], - override_rules: vec![], - room: vec![], - sender: vec![], - underride: vec![ruma_events::push_rules::ConditionalPushRule { - actions: vec![ - ruma_events::push_rules::Action::Notify, - ruma_events::push_rules::Action::SetTweak( - ruma_common::push::Tweak::Highlight(false), - ), - ], - default: true, - enabled: true, - rule_id: ".m.rule.message".to_owned(), - conditions: vec![ruma_events::push_rules::PushCondition::EventMatch( - ruma_events::push_rules::EventMatchCondition { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }, - )], - }], - }, - }, - }) - .unwrap() - .as_object_mut() - .unwrap(), - &db.globals, - ) - .unwrap(); - + warn!("TODO: set_pushrule_route"); MatrixResult(Ok(set_pushrule::Response)) } @@ -406,6 +372,7 @@ pub fn set_pushrule_enabled_route( _rule_id: String, ) -> MatrixResult { // TODO + warn!("TODO: set_pushrule_enabled_route"); MatrixResult(Ok(set_pushrule_enabled::Response)) } @@ -617,7 +584,6 @@ pub fn set_avatar_url_route( .set_avatar_url(&user_id, Some(body.avatar_url.clone())) .unwrap(); - let mut json = serde_json::Map::new(); json.insert("membership".to_owned(), "join".into()); json.insert("avatar_url".to_owned(), (*body.avatar_url).into()); @@ -962,12 +928,7 @@ pub fn create_room_route( .unwrap(); db.rooms - .join( - &room_id, - &user_id, - &db.users, - &db.globals, - ) + .join(&room_id, &user_id, &db.users, &db.globals) .unwrap(); db.rooms @@ -1069,12 +1030,7 @@ pub fn join_room_by_id_route( if db .rooms - .join( - &body.room_id, - &user_id, - &db.users, - &db.globals, - ) + .join(&body.room_id, &user_id, &db.users, &db.globals) .is_ok() { MatrixResult(Ok(join_room_by_id::Response { @@ -1116,12 +1072,7 @@ pub fn join_room_by_id_or_alias_route( if db .rooms - .join( - &room_id, - &user_id, - &db.users, - &db.globals, - ) + .join(&room_id, &user_id, &db.users, &db.globals) .is_ok() { MatrixResult(Ok(join_room_by_id_or_alias::Response { room_id })) @@ -1545,7 +1496,6 @@ pub fn sync_route( None }; - // They /sync response doesn't always return all messages, so we say the output is // limited unless there are enough events let mut limited = true; @@ -1617,7 +1567,7 @@ pub fn sync_route( }, }, unread_notifications: sync_events::UnreadNotificationsCount { - highlight_count: None, + highlight_count: notification_count, notification_count, }, timeline: sync_events::Timeline { @@ -1736,8 +1686,8 @@ pub fn sync_route( changed: db .users .device_keys_changed(since) - .map(|u| u.unwrap().to_string()) - .collect(), // TODO: use userids when ruma changes + .map(|u| u.unwrap()) + .collect(), left: Vec::new(), // TODO }) } else { @@ -1843,17 +1793,16 @@ pub fn send_event_to_device_route( to_device::DeviceIdOrAllDevices::AllDevices => { for target_device_id in db.users.all_device_ids(&target_user_id) { - db - .users - .add_to_device_event( - user_id, - &target_user_id, - &target_device_id.unwrap(), - &body.event_type, - serde_json::from_str(event.get()).unwrap(), - &db.globals, - ) - .unwrap(); + db.users + .add_to_device_event( + user_id, + &target_user_id, + &target_device_id.unwrap(), + &body.event_type, + serde_json::from_str(event.get()).unwrap(), + &db.globals, + ) + .unwrap(); } } } diff --git a/src/database/users.rs b/src/database/users.rs index 1b56066..bf1f214 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -209,6 +209,8 @@ impl Users { .next() .map(|r| { let (key, value) = r?; + self.onetimekeyid_onetimekeys.remove(&key)?; + Ok(( serde_json::from_slice( &*key diff --git a/src/utils.rs b/src/utils.rs index 45b9b08..5b41bd4 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -20,7 +20,7 @@ pub fn increment(old: Option<&[u8]>) -> Option> { let number = u64::from_be_bytes(array); number + 1 } - None => 0, + None => 1, // Start at one. since 0 should return the first event in the db }; Some(number.to_be_bytes().to_vec()) From b106d1393bcbe53c665a28c25836fa9fbad27f1c Mon Sep 17 00:00:00 2001 From: josias Date: Sun, 24 May 2020 22:10:09 +0200 Subject: [PATCH 0097/1727] Add logout route and database methods (#21) Condense keys Move remove methods to remove_device Code cleanup Add method for removing todevice events Remove unnecessary existence checks Add logout route and database methods Co-authored-by: Josias --- src/client_server.rs | 15 ++++++++++++++- src/database/users.rs | 31 +++++++++++++++++++++++++++++++ src/main.rs | 1 + 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index a2045d8..6748082 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -29,7 +29,7 @@ use ruma_client_api::{ push::{self, get_pushrules_all, set_pushrule, set_pushrule_enabled}, read_marker::set_read_marker, room::create_room, - session::{get_login_types, login}, + session::{get_login_types, login, logout}, state::{ create_state_event_for_empty_key, create_state_event_for_key, get_state_events, get_state_events_for_empty_key, get_state_events_for_key, @@ -311,6 +311,19 @@ pub fn login_route( })) } +#[post("/_matrix/client/r0/logout", data = "")] +pub fn logout_route( + db: State<'_, Database>, + body: Ruma, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + db.users.remove_device(&user_id, &device_id).unwrap(); + + MatrixResult(Ok(logout::Response)) +} + #[get("/_matrix/client/r0/capabilities")] pub fn get_capabilities_route() -> MatrixResult { // TODO diff --git a/src/database/users.rs b/src/database/users.rs index bf1f214..81a7bb8 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -123,6 +123,37 @@ impl Users { Ok(()) } + /// Removes a device from a user + pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + userdeviceid.push(0xff); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + // Remove device keys + self.userdeviceid_devicekeys.remove(&userdeviceid)?; + + // Remove tokens + if let Some(old_token) = self.userdeviceid_token.remove(&userdeviceid)? { + self.token_userdeviceid.remove(&old_token)?; + } + + // Remove todevice events + let mut prefix = userdeviceid.clone(); + prefix.push(0xff); + + for result in self.todeviceid_events.scan_prefix(&prefix) { + let (key, value) = result?; + self.todeviceid_events.remove(key)?; + } + + // TODO: Remove onetimekeys + + // Remove the device + self.userdeviceids.remove(userdeviceid)?; + + Ok(()) + } + /// Returns an iterator over all device ids of this user. pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator> { let mut prefix = user_id.to_string().as_bytes().to_vec(); diff --git a/src/main.rs b/src/main.rs index bf33104..3e34ded 100644 --- a/src/main.rs +++ b/src/main.rs @@ -29,6 +29,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::register_route, client_server::get_login_route, client_server::login_route, + client_server::logout_route, client_server::get_capabilities_route, client_server::get_pushrules_all_route, client_server::set_pushrule_route, From c8ba9dce0150a5d2775ffe1bff0ee1c31b6f7006 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 23 May 2020 11:20:00 +0200 Subject: [PATCH 0098/1727] feat: proper room creation --- src/client_server.rs | 392 +++++++++++++++++++++++++++++++------------ src/ruma_wrapper.rs | 9 +- 2 files changed, 292 insertions(+), 109 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 6748082..52f60ad 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -26,9 +26,9 @@ use ruma_client_api::{ profile::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, }, - push::{self, get_pushrules_all, set_pushrule, set_pushrule_enabled}, + push::{get_pushrules_all, set_pushrule, set_pushrule_enabled}, read_marker::set_read_marker, - room::create_room, + room::{self, create_room}, session::{get_login_types, login, logout}, state::{ create_state_event_for_empty_key, create_state_event_for_key, get_state_events, @@ -43,8 +43,12 @@ use ruma_client_api::{ }, unversioned::get_supported_versions, }; -use ruma_events::{collections::only::Event as EduEvent, EventJson, EventType}; -use ruma_identifiers::{RoomId, UserId}; +use ruma_events::{ + collections::only::Event as EduEvent, + room::{guest_access, history_visibility, join_rules}, + EventJson, EventType, +}; +use ruma_identifiers::{RoomId, RoomVersionId, UserId}; use serde_json::{json, value::RawValue}; use crate::{server_server, utils, Database, MatrixResult, Ruma}; @@ -348,7 +352,8 @@ pub fn get_pushrules_all_route( if let Some(EduEvent::PushRules(pushrules)) = db .account_data .get(None, &user_id, &EventType::PushRules) - .unwrap().map(|edu| edu.deserialize().expect("PushRules event in db is valid")) + .unwrap() + .map(|edu| edu.deserialize().expect("PushRules event in db is valid")) { MatrixResult(Ok(get_pushrules_all::Response { global: BTreeMap::new(), @@ -490,21 +495,30 @@ pub fn set_displayname_route( .unwrap(); } - let mut json = serde_json::Map::new(); - json.insert("membership".to_owned(), "join".into()); - json.insert("displayname".to_owned(), (**displayname).into()); - if let Some(avatar_url) = db.users.avatar_url(&user_id).unwrap() { - json.insert("avatar_url".to_owned(), avatar_url.into()); - } - // Send a new membership event into all joined rooms for room_id in db.rooms.rooms_joined(&user_id) { + let room_id = room_id.unwrap(); db.rooms .append_pdu( - room_id.unwrap(), + room_id.clone(), user_id.clone(), EventType::RoomMember, - json.clone().into(), + serde_json::to_value(ruma_events::room::member::MemberEventContent { + displayname: Some(displayname.clone()), + ..serde_json::from_value::>( + db.rooms + .room_state(&room_id) + .unwrap() + .get(&(EventType::RoomMember, user_id.to_string())) + .expect("user should be part of the room") + .content + .clone(), + ) + .unwrap() + .deserialize() + .unwrap() + }) + .unwrap(), None, Some(user_id.to_string()), &db.globals, @@ -597,21 +611,30 @@ pub fn set_avatar_url_route( .set_avatar_url(&user_id, Some(body.avatar_url.clone())) .unwrap(); - let mut json = serde_json::Map::new(); - json.insert("membership".to_owned(), "join".into()); - json.insert("avatar_url".to_owned(), (*body.avatar_url).into()); - if let Some(displayname) = db.users.displayname(&user_id).unwrap() { - json.insert("displayname".to_owned(), displayname.into()); - } - // Send a new membership event into all joined rooms for room_id in db.rooms.rooms_joined(&user_id) { + let room_id = room_id.unwrap(); db.rooms .append_pdu( - room_id.unwrap(), + room_id.clone(), user_id.clone(), EventType::RoomMember, - json.clone().into(), + serde_json::to_value(ruma_events::room::member::MemberEventContent { + avatar_url: Some(body.avatar_url.clone()), + ..serde_json::from_value::>( + db.rooms + .room_state(&room_id) + .unwrap() + .get(&(EventType::RoomMember, user_id.to_string())) + .expect("user should be part of the room") + .content + .clone(), + ) + .unwrap() + .deserialize() + .unwrap() + }) + .unwrap(), None, Some(user_id.to_string()), &db.globals, @@ -933,7 +956,16 @@ pub fn create_room_route( room_id.clone(), user_id.clone(), EventType::RoomCreate, - json!({ "creator": user_id }), + serde_json::to_value(ruma_events::room::create::CreateEventContent { + creator: user_id.clone(), + federate: body + .creation_content + .and_then(|c| c.federate) + .unwrap_or(true), + predecessor: None, // TODO: Check creation_content.predecessor once ruma has that + room_version: RoomVersionId::version_5(), + }) + .unwrap(), None, Some("".to_owned()), &db.globals, @@ -944,34 +976,148 @@ pub fn create_room_route( .join(&room_id, &user_id, &db.users, &db.globals) .unwrap(); + // Figure out preset. We need it for power levels and preset specific events + let visibility = body.visibility.unwrap_or(room::Visibility::Private); + let preset = body.preset.unwrap_or_else(|| match visibility { + room::Visibility::Private => create_room::RoomPreset::PrivateChat, + room::Visibility::Public => create_room::RoomPreset::PublicChat, + }); + + // 0. Power levels + let mut users = BTreeMap::new(); + users.insert(user_id.clone(), 100.into()); + for invite_user_id in &body.invite { + users.insert(invite_user_id.clone(), 100.into()); + } + + let power_levels_content = if let Some(power_levels) = &body.power_level_content_override { + serde_json::from_str(power_levels.json().get()) + .expect("TODO: handle. we hope the client sends a valid power levels json") + } else { + serde_json::to_value(ruma_events::room::power_levels::PowerLevelsEventContent { + ban: 50.into(), + events: BTreeMap::new(), + events_default: 0.into(), + invite: 50.into(), + kick: 50.into(), + redact: 50.into(), + state_default: 50.into(), + users, + users_default: 0.into(), + notifications: ruma_events::room::power_levels::NotificationPowerLevels { + room: 50.into(), + }, + }) + .unwrap() + }; db.rooms .append_pdu( room_id.clone(), user_id.clone(), EventType::RoomPowerLevels, - json!({ - "ban": 50, - "events_default": 0, - "invite": 50, - "kick": 50, - "redact": 50, - "state_default": 50, - "users": { user_id.to_string(): 100 }, - "users_default": 0 - }), + power_levels_content, None, Some("".to_owned()), &db.globals, ) .unwrap(); + // 1. Events set by preset + // 1.1 Join Rules + db.rooms + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomJoinRules, + match preset { + create_room::RoomPreset::PublicChat => { + serde_json::to_value(join_rules::JoinRulesEventContent { + join_rule: join_rules::JoinRule::Public, + }) + .unwrap() + } + _ => serde_json::to_value(join_rules::JoinRulesEventContent { + join_rule: join_rules::JoinRule::Invite, + }) + .unwrap(), + }, + None, + Some("".to_owned()), + &db.globals, + ) + .unwrap(); + + // 1.2 History Visibility + db.rooms + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomHistoryVisibility, + serde_json::to_value(history_visibility::HistoryVisibilityEventContent { + history_visibility: history_visibility::HistoryVisibility::Shared, + }) + .unwrap(), + None, + Some("".to_owned()), + &db.globals, + ) + .unwrap(); + + // 1.3 Guest Access + db.rooms + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomGuestAccess, + match preset { + create_room::RoomPreset::PublicChat => { + serde_json::to_value(guest_access::GuestAccessEventContent { + guest_access: guest_access::GuestAccess::Forbidden, + }) + .unwrap() + } + _ => serde_json::to_value(guest_access::GuestAccessEventContent { + guest_access: guest_access::GuestAccess::CanJoin, + }) + .unwrap(), + }, + None, + Some("".to_owned()), + &db.globals, + ) + .unwrap(); + + // 2. Events listed in initial_state + for create_room::InitialStateEvent { + event_type, + state_key, + content, + } in &body.initial_state + { + db.rooms + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::from(event_type), + serde_json::from_str(content.get()).unwrap(), + None, + state_key.clone(), + &db.globals, + ) + .unwrap(); + } + + // 3. Events implied by name and topic if let Some(name) = &body.name { db.rooms .append_pdu( room_id.clone(), user_id.clone(), EventType::RoomName, - json!({ "name": name }), + serde_json::to_value( + ruma_events::room::name::NameEventContent::new(name.clone()).unwrap(), + ) + .unwrap(), None, Some("".to_owned()), &db.globals, @@ -985,7 +1131,10 @@ pub fn create_room_route( room_id.clone(), user_id.clone(), EventType::RoomTopic, - json!({ "topic": topic }), + serde_json::to_value(ruma_events::room::topic::TopicEventContent { + topic: topic.clone(), + }) + .unwrap(), None, Some("".to_owned()), &db.globals, @@ -993,6 +1142,7 @@ pub fn create_room_route( .unwrap(); } + // 4. Events implied by invite (and TODO: invite_3pid) for user in &body.invite { db.rooms .invite(&user_id, &room_id, user, &db.globals) @@ -1050,6 +1200,8 @@ pub fn join_room_by_id_route( room_id: body.room_id.clone(), })) } else { + // We don't have this room. Let's ask a remote server + // TODO MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Room not found.".to_owned(), @@ -1064,8 +1216,6 @@ pub fn join_room_by_id_or_alias_route( body: Ruma, _room_id_or_alias: String, ) -> MatrixResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); - let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, Err(room_alias) => { @@ -1083,19 +1233,21 @@ pub fn join_room_by_id_or_alias_route( } }; - if db - .rooms - .join(&room_id, &user_id, &db.users, &db.globals) - .is_ok() - { - MatrixResult(Ok(join_room_by_id_or_alias::Response { room_id })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) - } + let body = Ruma { + user_id: body.user_id.clone(), + device_id: body.device_id.clone(), + json_body: None, + body: join_room_by_id::Request { + room_id, + third_party_signed: body.third_party_signed.clone(), + }, + }; + MatrixResult(match join_room_by_id_route(db, body, "".to_owned()).0 { + Ok(response) => Ok(join_room_by_id_or_alias::Response { + room_id: response.room_id, + }), + Err(e) => Err(e), + }) } #[post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "")] @@ -1147,53 +1299,60 @@ pub fn invite_user_route( } } -#[get("/_matrix/client/r0/publicRooms")] +#[get("/_matrix/client/r0/publicRooms", data = "")] pub async fn get_public_rooms_route( db: State<'_, Database>, + body: Ruma, ) -> MatrixResult { - let mut chunk = db - .rooms - .all_rooms() - .into_iter() - .map(|room_id| { - let state = db.rooms.room_state(&room_id).unwrap(); - directory::PublicRoomsChunk { - aliases: Vec::new(), - canonical_alias: None, - name: state - .get(&(EventType::RoomName, "".to_owned())) - .and_then(|s| s.content.get("name")) - .and_then(|n| n.as_str()) - .map(|n| n.to_owned()), - num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), - room_id, - topic: state - .get(&(EventType::RoomTopic, "".to_owned())) - .and_then(|s| s.content.get("topic")) - .and_then(|n| n.as_str()) - .map(|n| n.to_owned()), - world_readable: false, - guest_can_join: true, - avatar_url: None, - } - }) - .collect::>(); + let Ruma { + body: + get_public_rooms::Request { + limit, + server, + since, + }, + user_id, + device_id, + json_body, + } = body; - chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); + let response = get_public_rooms_filtered_route( + db, + Ruma { + body: get_public_rooms_filtered::Request { + filter: None, + limit, + room_network: get_public_rooms_filtered::RoomNetwork::Matrix, + server, + since, + }, + user_id, + device_id, + json_body, + }, + ) + .await; - let total_room_count_estimate = (chunk.len() as u32).into(); - - MatrixResult(Ok(get_public_rooms::Response { - chunk, - prev_batch: None, - next_batch: None, - total_room_count_estimate: Some(total_room_count_estimate), - })) + MatrixResult(match response.0 { + Ok(get_public_rooms_filtered::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + }) => Ok(get_public_rooms::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + }), + Err(e) => Err(e), + }) } -#[post("/_matrix/client/r0/publicRooms")] +#[post("/_matrix/client/r0/publicRooms", data = "")] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, + body: Ruma, ) -> MatrixResult { let mut chunk = db .rooms @@ -1201,21 +1360,32 @@ pub async fn get_public_rooms_filtered_route( .into_iter() .map(|room_id| { let state = db.rooms.room_state(&room_id).unwrap(); + directory::PublicRoomsChunk { aliases: Vec::new(), canonical_alias: None, - name: state - .get(&(EventType::RoomName, "".to_owned())) - .and_then(|s| s.content.get("name")) - .and_then(|n| n.as_str()) - .map(|n| n.to_owned()), + name: state.get(&(EventType::RoomName, "".to_owned())).map(|s| { + serde_json::from_value::>( + s.content.clone(), + ) + .unwrap() + .deserialize() + .unwrap() + .name() + .unwrap() + .to_owned() + }), num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), room_id, - topic: state - .get(&(EventType::RoomTopic, "".to_owned())) - .and_then(|s| s.content.get("topic")) - .and_then(|n| n.as_str()) - .map(|n| n.to_owned()), + topic: state.get(&(EventType::RoomTopic, "".to_owned())).map(|s| { + serde_json::from_value::< + EventJson, + >(s.content.clone()) + .unwrap() + .deserialize() + .unwrap() + .topic + }), world_readable: false, guest_can_join: true, avatar_url: None, @@ -1319,7 +1489,7 @@ pub fn create_message_event_route( body.room_id.clone(), user_id.clone(), body.event_type.clone(), - body.json_body.clone().unwrap(), + serde_json::from_str(body.json_body.unwrap().get()).unwrap(), Some(unsigned), None, &db.globals, @@ -1349,7 +1519,7 @@ pub fn create_state_event_for_key_route( body.room_id.clone(), user_id.clone(), body.event_type.clone(), - body.json_body.clone().unwrap(), + serde_json::from_str(body.json_body.clone().unwrap().get()).unwrap(), None, Some(body.state_key.clone()), &db.globals, @@ -1378,7 +1548,7 @@ pub fn create_state_event_for_empty_key_route( body.room_id.clone(), user_id.clone(), body.event_type.clone(), - body.json_body.clone().unwrap(), + serde_json::from_str(body.json_body.unwrap().get()).unwrap(), None, Some("".to_owned()), &db.globals, @@ -1494,11 +1664,21 @@ pub fn sync_route( let mut send_full_state = false; for pdu in &pdus { if pdu.kind == EventType::RoomMember { - if pdu.state_key == Some(user_id.to_string()) && pdu.content["membership"] == "join" - { - send_full_state = true; - } send_member_count = true; + if !send_full_state && pdu.state_key == Some(user_id.to_string()) { + let content = serde_json::from_value::< + EventJson, + >(pdu.content.clone()) + .unwrap() + .deserialize() + .unwrap(); + if content.membership == ruma_events::room::member::MembershipState::Join { + send_full_state = true; + // Both send_member_count and send_full_state are set. There's nothing more + // to do + break; + } + } } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 28e6fbb..eff3a86 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,3 +1,4 @@ +use crate::utils; use log::warn; use rocket::{ data::{Data, FromData, FromDataFuture, Transform, TransformFuture, Transformed}, @@ -16,10 +17,10 @@ const MESSAGE_LIMIT: u64 = 20 * 1024 * 1024; // 20 MB /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. pub struct Ruma { - body: T, + pub body: T, pub user_id: Option, pub device_id: Option, - pub json_body: Option, // This is None if parsing failed (for raw byte bodies) + pub json_body: Option>, // This is None when body is not a valid string } impl<'a, T: Endpoint> FromData<'a> for Ruma { @@ -86,7 +87,9 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { user_id, device_id, // TODO: Can we avoid parsing it again? (We only need this for append_pdu) - json_body: serde_json::from_slice(&body).ok() + json_body: utils::string_from_bytes(&body) + .ok() + .and_then(|s| serde_json::value::RawValue::from_string(s).ok()), }), Err(e) => { warn!("{:?}", e); From b6c0e9bfb27b2bcf6e66b6b748653d1fd1ad321d Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 24 May 2020 18:25:52 +0200 Subject: [PATCH 0099/1727] feat: access control --- src/client_server.rs | 634 ++++++++++++++++++++++++------------------ src/database/rooms.rs | 413 ++++++++++++++++++--------- 2 files changed, 647 insertions(+), 400 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 52f60ad..5641f01 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -45,7 +45,7 @@ use ruma_client_api::{ }; use ruma_events::{ collections::only::Event as EduEvent, - room::{guest_access, history_visibility, join_rules}, + room::{guest_access, history_visibility, join_rules, member}, EventJson, EventType, }; use ruma_identifiers::{RoomId, RoomVersionId, UserId}; @@ -246,13 +246,11 @@ pub fn login_route( ) -> MatrixResult { // Validate login method let user_id = - if let (login::UserInfo::MatrixId(mut username), login::LoginInfo::Password { password }) = + // TODO: Other login methods + if let (login::UserInfo::MatrixId(username), login::LoginInfo::Password { password }) = (body.user.clone(), body.login_info.clone()) { - if !username.contains(':') { - username = format!("@{}:{}", username, db.globals.server_name()); - } - if let Ok(user_id) = (*username).try_into() { + if let Ok(user_id) = UserId::parse_with_server_name(username, db.globals.server_name()) { if let Some(hash) = db.users.password_hash(&user_id).unwrap() { let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); @@ -349,6 +347,8 @@ pub fn get_pushrules_all_route( body: Ruma, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + warn!("TODO: get_pushrules_all_route"); + if let Some(EduEvent::PushRules(pushrules)) = db .account_data .get(None, &user_id, &EventType::PushRules) @@ -472,7 +472,7 @@ pub fn get_global_account_data_route( MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Data not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, + status_code: http::StatusCode::BAD_REQUEST, })) } } @@ -485,76 +485,60 @@ pub fn set_displayname_route( ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if let Some(displayname) = &body.displayname { - // Some("") will clear the displayname - if displayname == "" { - db.users.set_displayname(&user_id, None).unwrap(); - } else { - db.users - .set_displayname(&user_id, Some(displayname.clone())) - .unwrap(); - } + db.users + .set_displayname(&user_id, body.displayname.clone()) + .unwrap(); - // Send a new membership event into all joined rooms - for room_id in db.rooms.rooms_joined(&user_id) { - let room_id = room_id.unwrap(); - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - serde_json::to_value(ruma_events::room::member::MemberEventContent { - displayname: Some(displayname.clone()), - ..serde_json::from_value::>( - db.rooms - .room_state(&room_id) - .unwrap() - .get(&(EventType::RoomMember, user_id.to_string())) - .expect("user should be part of the room") - .content - .clone(), - ) - .unwrap() - .deserialize() - .unwrap() - }) - .unwrap(), - None, - Some(user_id.to_string()), - &db.globals, - ) - .unwrap(); - } - - // Presence update - db.global_edus - .update_globallatest( - &user_id, - EduEvent::Presence(ruma_events::presence::PresenceEvent { - content: ruma_events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&user_id).unwrap(), - currently_active: None, - displayname: db.users.displayname(&user_id).unwrap(), - last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), - presence: ruma_events::presence::PresenceState::Online, - status_msg: None, - }, - sender: user_id.clone(), - }), + // Send a new membership event into all joined rooms + for room_id in db.rooms.rooms_joined(&user_id) { + let room_id = room_id.unwrap(); + db.rooms + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(ruma_events::room::member::MemberEventContent { + displayname: body.displayname.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state(&room_id) + .unwrap() + .get(&(EventType::RoomMember, user_id.to_string())) + .expect("user is part of the room") + .content + .clone(), + ) + .unwrap() + .deserialize() + .unwrap() + }) + .unwrap(), + None, + Some(user_id.to_string()), &db.globals, ) .unwrap(); - } else { - // Send error on None - // Synapse returns a parsing error but the spec doesn't require this - debug!("Request was missing the displayname payload."); - return MatrixResult(Err(Error { - kind: ErrorKind::MissingParam, - message: "Missing displayname.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); } + // Presence update + db.global_edus + .update_globallatest( + &user_id, + EduEvent::Presence(ruma_events::presence::PresenceEvent { + content: ruma_events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&user_id).unwrap(), + currently_active: None, + displayname: db.users.displayname(&user_id).unwrap(), + last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), + presence: ruma_events::presence::PresenceState::Online, + status_msg: None, + }, + sender: user_id.clone(), + }), + &db.globals, + ) + .unwrap(); + MatrixResult(Ok(set_display_name::Response)) } @@ -565,23 +549,9 @@ pub fn get_displayname_route( _user_id: String, ) -> MatrixResult { let user_id = (*body).user_id.clone(); - if !db.users.exists(&user_id).unwrap() { - // Return 404 if we don't have a profile for this id - debug!("Profile was not found."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Profile was not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })); - } - if let Some(displayname) = db.users.displayname(&user_id).unwrap() { - return MatrixResult(Ok(get_display_name::Response { - displayname: Some(displayname), - })); - } - - // The user has no displayname - MatrixResult(Ok(get_display_name::Response { displayname: None })) + MatrixResult(Ok(get_display_name::Response { + displayname: db.users.displayname(&user_id).unwrap(), + })) } #[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] @@ -592,76 +562,74 @@ pub fn set_avatar_url_route( ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if !body.avatar_url.starts_with("mxc://") { - debug!("Request contains an invalid avatar_url."); - return MatrixResult(Err(Error { - kind: ErrorKind::InvalidParam, - message: "avatar_url has to start with mxc://.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } - - // TODO in the future when we can handle media uploads make sure that this url is our own server - // TODO also make sure this is valid mxc:// format (not only starting with it) - - if body.avatar_url == "" { - db.users.set_avatar_url(&user_id, None).unwrap(); - } else { - db.users - .set_avatar_url(&user_id, Some(body.avatar_url.clone())) - .unwrap(); - - // Send a new membership event into all joined rooms - for room_id in db.rooms.rooms_joined(&user_id) { - let room_id = room_id.unwrap(); - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - serde_json::to_value(ruma_events::room::member::MemberEventContent { - avatar_url: Some(body.avatar_url.clone()), - ..serde_json::from_value::>( - db.rooms - .room_state(&room_id) - .unwrap() - .get(&(EventType::RoomMember, user_id.to_string())) - .expect("user should be part of the room") - .content - .clone(), - ) - .unwrap() - .deserialize() - .unwrap() - }) - .unwrap(), - None, - Some(user_id.to_string()), - &db.globals, - ) - .unwrap(); + if let avatar_url = &body.avatar_url { + if !avatar_url.starts_with("mxc://") { + debug!("Request contains an invalid avatar_url."); + return MatrixResult(Err(Error { + kind: ErrorKind::InvalidParam, + message: "avatar_url has to start with mxc://.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); } - // Presence update - db.global_edus - .update_globallatest( - &user_id, - EduEvent::Presence(ruma_events::presence::PresenceEvent { - content: ruma_events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&user_id).unwrap(), - currently_active: None, - displayname: db.users.displayname(&user_id).unwrap(), - last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), - presence: ruma_events::presence::PresenceState::Online, - status_msg: None, - }, - sender: user_id.clone(), - }), + // TODO in the future when we can handle media uploads make sure that this url is our own server + // TODO also make sure this is valid mxc:// format (not only starting with it) + } + + db.users + .set_avatar_url(&user_id, Some(body.avatar_url.clone())) + .unwrap(); + + // Send a new membership event into all joined rooms + for room_id in db.rooms.rooms_joined(&user_id) { + let room_id = room_id.unwrap(); + db.rooms + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(ruma_events::room::member::MemberEventContent { + avatar_url: Some(body.avatar_url.clone()), + ..serde_json::from_value::>( + db.rooms + .room_state(&room_id) + .unwrap() + .get(&(EventType::RoomMember, user_id.to_string())) + .expect("user should be part of the room") + .content + .clone(), + ) + .unwrap() + .deserialize() + .unwrap() + }) + .unwrap(), + None, + Some(user_id.to_string()), &db.globals, ) .unwrap(); } + // Presence update + db.global_edus + .update_globallatest( + &user_id, + EduEvent::Presence(ruma_events::presence::PresenceEvent { + content: ruma_events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&user_id).unwrap(), + currently_active: None, + displayname: db.users.displayname(&user_id).unwrap(), + last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), + presence: ruma_events::presence::PresenceState::Online, + status_msg: None, + }, + sender: user_id.clone(), + }), + &db.globals, + ) + .unwrap(); + MatrixResult(Ok(set_avatar_url::Response)) } @@ -672,23 +640,9 @@ pub fn get_avatar_url_route( _user_id: String, ) -> MatrixResult { let user_id = (*body).user_id.clone(); - if !db.users.exists(&user_id).unwrap() { - // Return 404 if we don't have a profile for this id - debug!("Profile was not found."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Profile was not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })); - } - if let Some(avatar_url) = db.users.avatar_url(&user_id).unwrap() { - return MatrixResult(Ok(get_avatar_url::Response { - avatar_url: Some(avatar_url), - })); - } - - // The user has no avatar - MatrixResult(Ok(get_avatar_url::Response { avatar_url: None })) + MatrixResult(Ok(get_avatar_url::Response { + avatar_url: db.users.avatar_url(&user_id).unwrap(), + })) } #[get("/_matrix/client/r0/profile/<_user_id>", data = "")] @@ -713,7 +667,7 @@ pub fn get_profile_route( MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Profile was not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, + status_code: http::StatusCode::BAD_REQUEST, })) } @@ -947,10 +901,11 @@ pub fn create_room_route( db: State<'_, Database>, body: Ruma, ) -> MatrixResult { - // TODO: check if room is unique let room_id = RoomId::new(db.globals.server_name()).expect("host is valid"); let user_id = body.user_id.as_ref().expect("user is authenticated"); + // TODO: Create alias and check if it already exists + db.rooms .append_pdu( room_id.clone(), @@ -972,8 +927,24 @@ pub fn create_room_route( ) .unwrap(); + // Join room db.rooms - .join(&room_id, &user_id, &db.users, &db.globals) + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&user_id).unwrap(), + avatar_url: db.users.avatar_url(&user_id).unwrap(), + is_direct: body.is_direct, + third_party_invite: None, + }) + .unwrap(), + None, + Some(user_id.to_string()), + &db.globals, + ) .unwrap(); // Figure out preset. We need it for power levels and preset specific events @@ -1145,7 +1116,22 @@ pub fn create_room_route( // 4. Events implied by invite (and TODO: invite_3pid) for user in &body.invite { db.rooms - .invite(&user_id, &room_id, user, &db.globals) + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user).unwrap(), + avatar_url: db.users.avatar_url(&user).unwrap(), + is_direct: body.is_direct, + third_party_invite: None, + }) + .unwrap(), + None, + Some(user.to_string()), + &db.globals, + ) .unwrap(); } @@ -1167,7 +1153,7 @@ pub fn get_alias_route( return MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, + status_code: http::StatusCode::BAD_REQUEST, })); } } @@ -1191,23 +1177,54 @@ pub fn join_room_by_id_route( ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if db + // TODO: Ask a remote server if we don't have this room + + let event = db .rooms - .join(&body.room_id, &user_id, &db.users, &db.globals) - .is_ok() - { - MatrixResult(Ok(join_room_by_id::Response { - room_id: body.room_id.clone(), - })) - } else { - // We don't have this room. Let's ask a remote server - // TODO - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) - } + .room_state(&body.room_id) + .unwrap() + .get(&(EventType::RoomMember, user_id.to_string())) + .map_or_else( + || { + // There was no existing membership event + member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&user_id).unwrap(), + avatar_url: db.users.avatar_url(&user_id).unwrap(), + is_direct: None, + third_party_invite: None, + } + }, + |pdu| { + // We change the existing membership event + let mut event = serde_json::from_value::>( + pdu.content.clone(), + ) + .unwrap() + .deserialize() + .unwrap(); + event.membership = member::MembershipState::Join; + event.displayname = db.users.displayname(&user_id).unwrap(); + event.avatar_url = db.users.avatar_url(&user_id).unwrap(); + event + }, + ); + + db.rooms + .append_pdu( + body.room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(event).unwrap(), + None, + Some(user_id.to_string()), + &db.globals, + ) + .unwrap(); + + MatrixResult(Ok(join_room_by_id::Response { + room_id: body.room_id.clone(), + })) } #[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] @@ -1223,7 +1240,7 @@ pub fn join_room_by_id_or_alias_route( return MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Room alias not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, + status_code: http::StatusCode::BAD_REQUEST, })); } else { // Ask creator server of the room to join TODO ask someone else when not available @@ -1257,9 +1274,19 @@ pub fn leave_room_route( _room_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + db.rooms - .leave(&user_id, &body.room_id, &user_id, &db.globals) + .append_pdu( + body.room_id.clone(), + user_id.clone(), + EventType::RoomMember, + json!({"membership": "leave"}), + None, + Some(user_id.to_string()), + &db.globals, + ) .unwrap(); + MatrixResult(Ok(leave_room::Response)) } @@ -1270,7 +1297,9 @@ pub fn forget_room_route( _room_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + db.rooms.forget(&body.room_id, &user_id).unwrap(); + MatrixResult(Ok(forget_room::Response)) } @@ -1281,20 +1310,32 @@ pub fn invite_user_route( _room_id: String, ) -> MatrixResult { if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { + let event = member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user_id).unwrap(), + avatar_url: db.users.avatar_url(&user_id).unwrap(), + is_direct: None, + third_party_invite: None, + }; + db.rooms - .invite( - &body.user_id.as_ref().expect("user is authenticated"), - &body.room_id, - &user_id, + .append_pdu( + body.room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + EventType::RoomMember, + serde_json::to_value(event).unwrap(), + None, + Some(user_id.to_string()), &db.globals, ) .unwrap(); + MatrixResult(Ok(invite_user::Response)) } else { MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "User not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, + status_code: http::StatusCode::BAD_REQUEST, })) } } @@ -1483,20 +1524,23 @@ pub fn create_message_event_route( let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - let event_id = db - .rooms - .append_pdu( - body.room_id.clone(), - user_id.clone(), - body.event_type.clone(), - serde_json::from_str(body.json_body.unwrap().get()).unwrap(), - Some(unsigned), - None, - &db.globals, - ) - .expect("message events are always okay"); - - MatrixResult(Ok(create_message_event::Response { event_id })) + if let Ok(event_id) = db.rooms.append_pdu( + body.room_id.clone(), + user_id.clone(), + body.event_type.clone(), + serde_json::from_str(body.json_body.unwrap().get()).unwrap(), + Some(unsigned), + None, + &db.globals, + ) { + MatrixResult(Ok(create_message_event::Response { event_id })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Failed to send message.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } } #[put( @@ -1513,20 +1557,23 @@ pub fn create_state_event_for_key_route( let user_id = body.user_id.as_ref().expect("user is authenticated"); // Reponse of with/without key is the same - let event_id = db - .rooms - .append_pdu( - body.room_id.clone(), - user_id.clone(), - body.event_type.clone(), - serde_json::from_str(body.json_body.clone().unwrap().get()).unwrap(), - None, - Some(body.state_key.clone()), - &db.globals, - ) - .unwrap(); - - MatrixResult(Ok(create_state_event_for_key::Response { event_id })) + if let Ok(event_id) = db.rooms.append_pdu( + body.room_id.clone(), + user_id.clone(), + body.event_type.clone(), + serde_json::from_str(body.json_body.clone().unwrap().get()).unwrap(), + None, + Some(body.state_key.clone()), + &db.globals, + ) { + MatrixResult(Ok(create_state_event_for_key::Response { event_id })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Failed to send event.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } } #[put( @@ -1542,20 +1589,23 @@ pub fn create_state_event_for_empty_key_route( let user_id = body.user_id.as_ref().expect("user is authenticated"); // Reponse of with/without key is the same - let event_id = db - .rooms - .append_pdu( - body.room_id.clone(), - user_id.clone(), - body.event_type.clone(), - serde_json::from_str(body.json_body.unwrap().get()).unwrap(), - None, - Some("".to_owned()), - &db.globals, - ) - .unwrap(); - - MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) + if let Ok(event_id) = db.rooms.append_pdu( + body.room_id.clone(), + user_id.clone(), + body.event_type.clone(), + serde_json::from_str(body.json_body.unwrap().get()).unwrap(), + None, + Some("".to_owned()), + &db.globals, + ) { + MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Failed to send event.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } } #[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] @@ -1564,15 +1614,25 @@ pub fn get_state_events_route( body: Ruma, _room_id: String, ) -> MatrixResult { - MatrixResult(Ok(get_state_events::Response { - room_state: db - .rooms - .room_state(&body.room_id) - .unwrap() - .values() - .map(|pdu| pdu.to_state_event()) - .collect(), - })) + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + if db.rooms.is_joined(user_id, &body.room_id).unwrap() { + MatrixResult(Ok(get_state_events::Response { + room_state: db + .rooms + .room_state(&body.room_id) + .unwrap() + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::Forbidden, + message: "You don't have permission to view the room state.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } } #[get( @@ -1586,19 +1646,29 @@ pub fn get_state_events_for_key_route( _event_type: String, _state_key: String, ) -> MatrixResult { - if let Some(event) = db - .rooms - .room_state(&body.room_id) - .unwrap() - .get(&(body.event_type.clone(), body.state_key.clone())) - { - MatrixResult(Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(event).unwrap(), - })) + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + if db.rooms.is_joined(user_id, &body.room_id).unwrap() { + if let Some(event) = db + .rooms + .room_state(&body.room_id) + .unwrap() + .get(&(body.event_type.clone(), body.state_key.clone())) + { + MatrixResult(Ok(get_state_events_for_key::Response { + content: serde_json::value::to_raw_value(event).unwrap(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "State event not found.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } } else { MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "State event not found.".to_owned(), + kind: ErrorKind::Forbidden, + message: "You don't have permission to view the room state.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })) } @@ -1614,19 +1684,29 @@ pub fn get_state_events_for_empty_key_route( _room_id: String, _event_type: String, ) -> MatrixResult { - if let Some(event) = db - .rooms - .room_state(&body.room_id) - .unwrap() - .get(&(body.event_type.clone(), "".to_owned())) - { - MatrixResult(Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(event).unwrap(), - })) + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + if db.rooms.is_joined(user_id, &body.room_id).unwrap() { + if let Some(event) = db + .rooms + .room_state(&body.room_id) + .unwrap() + .get(&(body.event_type.clone(), "".to_owned())) + { + MatrixResult(Ok(get_state_events_for_key::Response { + content: serde_json::value::to_raw_value(event).unwrap(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "State event not found.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } } else { MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "State event not found.".to_owned(), + kind: ErrorKind::Forbidden, + message: "You don't have permission to view the room state.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })) } @@ -1902,6 +1982,16 @@ pub fn get_message_events_route( body: Ruma, _room_id: String, ) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { + return MatrixResult(Err(Error { + kind: ErrorKind::Forbidden, + message: "You don't have permission to view this room.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + if let get_message_events::Direction::Forward = body.dir { todo!(); } @@ -1930,7 +2020,7 @@ pub fn get_message_events_route( })) } else { MatrixResult(Err(Error { - kind: ErrorKind::NotFound, + kind: ErrorKind::Unknown, message: "Invalid from.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })) @@ -2058,7 +2148,7 @@ pub fn get_content_route( MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Media not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, + status_code: http::StatusCode::BAD_REQUEST, })) } } @@ -2087,7 +2177,7 @@ pub fn get_content_thumbnail_route( MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Media not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, + status_code: http::StatusCode::BAD_REQUEST, })) } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 312390b..87f3429 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,11 +3,18 @@ mod edus; pub use edus::RoomEdus; use crate::{utils, Error, PduEvent, Result}; -use ruma_events::{room::power_levels::PowerLevelsEventContent, EventJson, EventType}; +use log::error; +use ruma_events::{ + room::{ + join_rules, member, + power_levels::{self, PowerLevelsEventContent}, + }, + EventJson, EventType, +}; use ruma_identifiers::{EventId, RoomId, UserId}; -use serde_json::json; + use std::{ - collections::HashMap, + collections::{BTreeMap, HashMap}, convert::{TryFrom, TryInto}, mem, }; @@ -112,6 +119,20 @@ impl Rooms { }) } + /// Returns the pdu. + pub fn get_pdu(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.to_string().as_bytes())? + .map_or(Ok(None), |pdu_id| { + Ok(serde_json::from_slice( + &self.pduid_pdu.get(pdu_id)?.ok_or(Error::BadDatabase( + "eventid_pduid points to nonexistent pdu", + ))?, + )?) + .map(Some) + }) + } + /// Returns the leaf pdus of a room. pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { let mut prefix = room_id.to_string().as_bytes().to_vec(); @@ -158,41 +179,227 @@ impl Rooms { state_key: Option, globals: &super::globals::Globals, ) -> Result { + // TODO: Make sure this isn't called twice in parallel + + let prev_events = self.get_pdu_leaves(&room_id)?; + // Is the event authorized? - if state_key.is_some() { - if let Some(pdu) = self + if let Some(state_key) = &state_key { + let power_levels = self .room_state(&room_id)? .get(&(EventType::RoomPowerLevels, "".to_owned())) + .map_or_else( + || { + Ok::<_, Error>(power_levels::PowerLevelsEventContent { + ban: 50.into(), + events: BTreeMap::new(), + events_default: 0.into(), + invite: 50.into(), + kick: 50.into(), + redact: 50.into(), + state_default: 0.into(), + users: BTreeMap::new(), + users_default: 0.into(), + notifications: + ruma_events::room::power_levels::NotificationPowerLevels { + room: 50.into(), + }, + }) + }, + |power_levels| { + Ok( + serde_json::from_value::>( + power_levels.content.clone(), + )? + .deserialize()?, + ) + }, + )?; { - let power_levels = serde_json::from_value::>( - pdu.content.clone(), - )? - .deserialize()?; + let sender_membership = self + .room_state(&room_id)? + .get(&(EventType::RoomMember, sender.to_string())) + .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { + Ok( + serde_json::from_value::>( + pdu.content.clone(), + )? + .deserialize()? + .membership, + ) + })?; - match event_type { + let sender_power = power_levels.users.get(&sender).map_or_else( + || { + if sender_membership != member::MembershipState::Join { + None + } else { + Some(&power_levels.users_default) + } + }, + // If it's okay, wrap with Some(_) + Some, + ); + + if !match event_type { EventType::RoomMember => { - // Member events are okay for now (TODO) + let target_user_id = UserId::try_from(&**state_key)?; + + let current_membership = self + .room_state(&room_id)? + .get(&(EventType::RoomMember, target_user_id.to_string())) + .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { + Ok(serde_json::from_value::< + EventJson, + >(pdu.content.clone())? + .deserialize()? + .membership) + })?; + + let target_membership = serde_json::from_value::< + EventJson, + >(content.clone())? + .deserialize()? + .membership; + + let target_power = power_levels.users.get(&target_user_id).map_or_else( + || { + if target_membership != member::MembershipState::Join { + None + } else { + Some(&power_levels.users_default) + } + }, + // If it's okay, wrap with Some(_) + Some, + ); + + let join_rules = self + .room_state(&room_id)? + .get(&(EventType::RoomJoinRules, "".to_owned())) + .map_or(join_rules::JoinRule::Public, |pdu| { + serde_json::from_value::< + EventJson, + >(pdu.content.clone()) + .unwrap() + .deserialize() + .unwrap() + .join_rule + }); + + if target_membership == member::MembershipState::Join { + let mut prev_events = prev_events.iter(); + let prev_event = self + .get_pdu(prev_events.next().ok_or(Error::BadRequest( + "membership can't be the first event", + ))?)? + .ok_or(Error::BadDatabase("pdu leave points to valid event"))?; + if prev_event.kind == EventType::RoomCreate + && prev_event.prev_events.is_empty() + { + true + } else if sender != target_user_id { + false + } else if let member::MembershipState::Ban = current_membership { + false + } else if join_rules == join_rules::JoinRule::Invite + && (current_membership == member::MembershipState::Join + || current_membership == member::MembershipState::Invite) + { + true + } else if join_rules == join_rules::JoinRule::Public { + true + } else { + false + } + } else if target_membership == member::MembershipState::Invite { + if let Some(third_party_invite_json) = content.get("third_party_invite") + { + if current_membership == member::MembershipState::Ban { + false + } else { + let _third_party_invite = + serde_json::from_value::( + third_party_invite_json.clone(), + )?; + todo!("handle third party invites"); + } + } else if sender_membership != member::MembershipState::Join { + false + } else if current_membership == member::MembershipState::Join + || current_membership == member::MembershipState::Ban + { + false + } else if sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some() + { + true + } else { + false + } + } else if target_membership == member::MembershipState::Leave { + if sender == target_user_id { + current_membership == member::MembershipState::Join + || current_membership == member::MembershipState::Invite + } else if sender_membership != member::MembershipState::Join { + false + } else if current_membership == member::MembershipState::Ban + && sender_power.filter(|&p| p < &power_levels.ban).is_some() + { + false + } else if sender_power.filter(|&p| p >= &power_levels.kick).is_some() + && target_power < sender_power + { + true + } else { + false + } + } else if target_membership == member::MembershipState::Ban { + if sender_membership != member::MembershipState::Join { + false + } else if sender_power.filter(|&p| p >= &power_levels.ban).is_some() + && target_power < sender_power + { + true + } else { + false + } + } else { + false + } } - _ if power_levels - .users - .get(&sender) - .unwrap_or(&power_levels.users_default) - <= &0.into() => - { - // Not authorized - return Err(Error::BadRequest("event not authorized")); + EventType::RoomCreate => prev_events.is_empty(), + _ if sender_membership == member::MembershipState::Join => { + // TODO + sender_power.unwrap_or(&power_levels.users_default) + >= &power_levels.state_default } - // User has sufficient power - _ => {} + + _ => false, + } { + error!("Unauthorized"); + // Not authorized + return Err(Error::BadRequest("event not authorized")); + } + if event_type == EventType::RoomMember { + // TODO: Don't get this twice + let target_user_id = UserId::try_from(&**state_key)?; + self.update_membership( + &room_id, + &target_user_id, + &serde_json::from_value::>( + content.clone(), + )? + .deserialize()? + .membership, + )?; } } + } else if !self.is_joined(&sender, &room_id)? { + return Err(Error::BadRequest("event not authorized")); } - // prev_events are the leaves of the current graph. This method removes all leaves from the - // room and replaces them with our event - // TODO: Make sure this isn't called twice in parallel - let prev_events = self.get_pdu_leaves(&room_id)?; - // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() @@ -206,7 +413,7 @@ impl Rooms { if let Some(state_key) = &state_key { if let Some(prev_pdu) = self .room_state(&room_id)? - .get(&(event_type.clone(), state_key.clone())) + .get(&(event_type.clone(), state_key.to_owned())) { unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); } @@ -348,63 +555,12 @@ impl Rooms { .map(|(_, v)| Ok(serde_json::from_slice(&v)?)) } - /// Makes a user join a room. - pub fn join( + /// Makes a user join a room. Only call this if the membership is Join already + fn update_membership( &self, room_id: &RoomId, user_id: &UserId, - users: &super::users::Users, - globals: &super::globals::Globals, - ) -> Result<()> { - if !self.exists(room_id)? { - return Err(Error::BadRequest("room does not exist")); - } - - let mut userroom_id = user_id.to_string().as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.to_string().as_bytes()); - - let mut roomuser_id = room_id.to_string().as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.to_string().as_bytes()); - - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invited.remove(&userroom_id)?; - self.roomuserid_invited.remove(&roomuser_id)?; - self.userroomid_left.remove(&userroom_id)?; - - let mut json = serde_json::Map::new(); - json.insert("membership".to_owned(), "join".into()); - - if let Some(displayname) = users.displayname(&user_id).unwrap() { - json.insert("displayname".to_owned(), displayname.into()); - } - - if let Some(avatar_url) = users.avatar_url(&user_id).unwrap() { - json.insert("avatar_url".to_owned(), avatar_url.into()); - } - - self.append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - json.into(), - None, - Some(user_id.to_string()), - globals, - )?; - - Ok(()) - } - - /// Makes a user leave a room. - pub fn leave( - &self, - sender: &UserId, - room_id: &RoomId, - user_id: &UserId, - globals: &super::globals::Globals, + membership: &member::MembershipState, ) -> Result<()> { let mut userroom_id = user_id.to_string().as_bytes().to_vec(); userroom_id.push(0xff); @@ -414,21 +570,30 @@ impl Rooms { roomuser_id.push(0xff); roomuser_id.extend_from_slice(user_id.to_string().as_bytes()); - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invited.remove(&userroom_id)?; - self.roomuserid_invited.remove(&userroom_id)?; - self.userroomid_left.insert(&userroom_id, &[])?; - - self.append_pdu( - room_id.clone(), - sender.clone(), - EventType::RoomMember, - json!({"membership": "leave"}), - None, - Some(user_id.to_string()), - globals, - )?; + match &membership { + member::MembershipState::Join => { + self.userroomid_joined.insert(&userroom_id, &[])?; + self.roomuserid_joined.insert(&roomuser_id, &[])?; + self.userroomid_invited.remove(&userroom_id)?; + self.roomuserid_invited.remove(&roomuser_id)?; + self.userroomid_left.remove(&userroom_id)?; + } + member::MembershipState::Invite => { + self.userroomid_invited.insert(&userroom_id, &[])?; + self.roomuserid_invited.insert(&roomuser_id, &[])?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_left.remove(&userroom_id)?; + } + member::MembershipState::Leave | member::MembershipState::Ban => { + self.userroomid_left.insert(&userroom_id, &[])?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_invited.remove(&userroom_id)?; + self.roomuserid_invited.remove(&roomuser_id)?; + } + _ => {} + } Ok(()) } @@ -444,38 +609,6 @@ impl Rooms { Ok(()) } - /// Makes a user invite another user into room. - pub fn invite( - &self, - sender: &UserId, - room_id: &RoomId, - user_id: &UserId, - globals: &super::globals::Globals, - ) -> Result<()> { - let mut userroom_id = user_id.to_string().as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.to_string().as_bytes()); - - let mut roomuser_id = room_id.to_string().as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.to_string().as_bytes()); - - self.userroomid_invited.insert(userroom_id, &[])?; - self.roomuserid_invited.insert(roomuser_id, &[])?; - - self.append_pdu( - room_id.clone(), - sender.clone(), - EventType::RoomMember, - json!({"membership": "invite"}), - None, - Some(user_id.to_string()), - globals, - )?; - - Ok(()) - } - /// Returns an iterator over all rooms a user joined. pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { self.roomuserid_joined @@ -550,4 +683,28 @@ impl Rooms { )?)?) }) } + + pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + + Ok(self.userroomid_joined.get(userroom_id)?.is_some()) + } + + pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + + Ok(self.userroomid_invited.get(userroom_id)?.is_some()) + } + + pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + + Ok(self.userroomid_left.get(userroom_id)?.is_some()) + } } From 63c62a4afaef35b0a1e8dc0384182f671f6e027b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 25 May 2020 17:36:54 +0200 Subject: [PATCH 0100/1727] Update 'README.md' --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 87b1352..067d9ca 100644 --- a/README.md +++ b/README.md @@ -32,12 +32,11 @@ A fast Matrix homeserver that's optimized for smaller, personal servers, instead - [x] Password hashing - [x] Riot E2EE - [x] Media +- [x] Permission system - [ ] Basic federation - [ ] State resolution -- [ ] Permission system - [ ] Notifications (push rules) - [ ] Riot presence -- [ ] Proper room creation #### How can I contribute? From 58683585cc494b2d2f596b44d76c0df1fc323c61 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 25 May 2020 20:10:46 +0200 Subject: [PATCH 0101/1727] improvement: show more info in public rooms list --- src/client_server.rs | 40 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 5641f01..cf858b4 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1404,7 +1404,15 @@ pub async fn get_public_rooms_filtered_route( directory::PublicRoomsChunk { aliases: Vec::new(), - canonical_alias: None, + canonical_alias: state.get(&(EventType::RoomCanonicalAlias, "".to_owned())).and_then(|s| { + serde_json::from_value::< + EventJson, + >(s.content.clone()) + .unwrap() + .deserialize() + .unwrap() + .alias + }).map(|a| a.to_string()), name: state.get(&(EventType::RoomName, "".to_owned())).map(|s| { serde_json::from_value::>( s.content.clone(), @@ -1427,9 +1435,33 @@ pub async fn get_public_rooms_filtered_route( .unwrap() .topic }), - world_readable: false, - guest_can_join: true, - avatar_url: None, + world_readable: state.get(&(EventType::RoomHistoryVisibility, "".to_owned())).map_or(false, |s| { + serde_json::from_value::< + EventJson, + >(s.content.clone()) + .unwrap() + .deserialize() + .unwrap() + .history_visibility == history_visibility::HistoryVisibility::WorldReadable + }), + guest_can_join: state.get(&(EventType::RoomGuestAccess, "".to_owned())).map_or(false, |s| { + serde_json::from_value::< + EventJson, + >(s.content.clone()) + .unwrap() + .deserialize() + .unwrap() + .guest_access == guest_access::GuestAccess::CanJoin + }), + avatar_url: state.get(&(EventType::RoomAvatar, "".to_owned())).map(|s| { + serde_json::from_value::< + EventJson, + >(s.content.clone()) + .unwrap() + .deserialize() + .unwrap() + .url + }), } }) .collect::>(); From 43ed80c6c24d6462c305d9468150e02e73bf35bf Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Petit Date: Tue, 26 May 2020 16:01:38 +0200 Subject: [PATCH 0102/1727] Move create room test into blacklist - fixes #23 --- sytest/sytest-blacklist | 2 ++ sytest/sytest-whitelist | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/sytest/sytest-blacklist b/sytest/sytest-blacklist index e69de29..b45e075 100644 --- a/sytest/sytest-blacklist +++ b/sytest/sytest-blacklist @@ -0,0 +1,2 @@ +# This test checks for a room-alias key in the response which is not in the spec +POST /createRoom makes a public room \ No newline at end of file diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index 2c118c2..a30543f 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -25,7 +25,6 @@ POST /login can log in as a user with just the local part of the id POST /login as non-existing user is rejected POST /login wrong password is rejected # Room creation endpoints implemented -POST /createRoom makes a public room POST /createRoom makes a private room POST /createRoom makes a private room with invites POST /createRoom makes a room with a name From 4b3b562347154a85ae3dd50118b18be5aa7a7506 Mon Sep 17 00:00:00 2001 From: Guillem Nieto Date: Tue, 26 May 2020 21:06:54 +0200 Subject: [PATCH 0103/1727] Get device_id from body instead of auth data Device_id was retrieved from the auth data instead of login's body and this was causing that a new device was created on every login. This is (I guess) provoking that some sytests are failing (for example, "POST /login returns the same device_id as that in the request"). --- src/client_server.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client_server.rs b/src/client_server.rs index cf858b4..1cbb839 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -293,6 +293,7 @@ pub fn login_route( // Generate new device id if the user didn't specify one let device_id = body + .body .device_id .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); From ca77c792222af5c8da50f36b9f4e06526bcb1548 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 27 May 2020 07:09:23 +0200 Subject: [PATCH 0104/1727] fix: /register lowercases user ids fixes #24 --- src/client_server.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index 1cbb839..6812fdd 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -126,7 +126,8 @@ pub fn register_route( let user_id = match UserId::parse_with_server_name( body.username .clone() - .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)), + .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)) + .to_lowercase(), db.globals.server_name(), ) .ok() From 55842a0e57e215c3246660eae33cba2b844ab623 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 29 May 2020 09:54:30 +0200 Subject: [PATCH 0105/1727] Update 'README.md' --- README.md | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 067d9ca..947063b 100644 --- a/README.md +++ b/README.md @@ -4,13 +4,22 @@ [![Liberapay](https://img.shields.io/liberapay/receives/timokoesters?logo=liberapay)](https://liberapay.com/timokoesters) [![Matrix](https://img.shields.io/matrix/conduit:koesters.xyz?server_fqdn=matrix.koesters.xyz&logo=matrix)](https://matrix.to/#/#conduit:koesters.xyz) +#### What is the goal + +A fast Matrix homeserver that's easy to set up and just works. You can install it on a mini-computer like the Raspberry Pi to host Matrix for your family, friends or company. + #### Is it fast? See it in action: -#### What is the goal +#### Can I try it out? -A fast Matrix homeserver that's optimized for smaller, personal servers, instead of one server that has high scalability. +Yes! Just open and register on the default homeserver. You can also use and set the homeserver url to to connect. (BUG: Registration currently doesn't work on riot.im. [Fix](https://github.com/matrix-org/matrix-js-sdk/pull/1304) will come soon) + +#### How can I deploy my own? + +You just have to clone the repo, build it with `cargo build --release` and call the binary (target/release/conduit) from somewhere like a systemd script. +It's explained in more detail [here](https://git.koesters.xyz/timo/conduit/wiki/Deploy). #### What is it build on? @@ -18,29 +27,15 @@ A fast Matrix homeserver that's optimized for smaller, personal servers, instead - [Sled](https://github.com/spacejam/sled): A simple (key, value) database with good performance - [Rocket](https://rocket.rs): A flexible web framework -#### What are the next steps? +#### What are the biggest things still missing? -- [x] Register, login, authentication tokens -- [x] Create room messages -- [x] Sync room messages -- [x] Join rooms, lookup room ids -- [x] Riot web support -- [x] Room discovery -- [x] Read receipts -- [x] Typing indications -- [x] Invites, user search -- [x] Password hashing -- [x] Riot E2EE -- [x] Media -- [x] Permission system -- [ ] Basic federation -- [ ] State resolution -- [ ] Notifications (push rules) -- [ ] Riot presence +- Federation (Make Conduit talk to other Matrix servers) +- Notifications (Make Matrix client notify the user when new messages or pings arrive) +- Lots of testing #### How can I contribute? -The best way to find something to work on is by joining the #conduit:koesters.xyz Matrix room and asking. +If you want to help, you may be able to find something in the issue tracker. If you do, comment on the issue, so others know. You can also join #conduit:matrix.org and ask there. #### Donate From 16538a6c168b95d470c4415429e8a60e61aae7b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 29 May 2020 10:04:19 +0200 Subject: [PATCH 0106/1727] Update 'README.md' --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 947063b..94061a8 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,8 @@ It's explained in more detail [here](https://git.koesters.xyz/timo/conduit/wiki/ - Notifications (Make Matrix client notify the user when new messages or pings arrive) - Lots of testing +Also check out the [milestones](https://git.koesters.xyz/timo/conduit/milestones). + #### How can I contribute? If you want to help, you may be able to find something in the issue tracker. If you do, comment on the issue, so others know. You can also join #conduit:matrix.org and ask there. From 18bf67748c14c2ded3bd7112e38317153d93a577 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 26 May 2020 10:27:51 +0200 Subject: [PATCH 0107/1727] feat: redaction --- Cargo.lock | 126 ++++++++++++++++++------------------------ Cargo.toml | 11 ++-- src/client_server.rs | 82 ++++++++++++++++++++------- src/database/rooms.rs | 55 +++++++++++++++--- src/pdu.rs | 33 ++++++++++- src/server_server.rs | 4 +- 6 files changed, 204 insertions(+), 107 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc47816..fc9840f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,9 +30,9 @@ version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26c4f3195085c36ea8d24d32b2f828d23296a9370a28aa39d111f6f16bef9f3b" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] @@ -277,9 +277,9 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "bitflags", - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] @@ -421,9 +421,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] @@ -918,9 +918,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-sys" -version = "0.9.56" +version = "0.9.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f02309a7f127000ed50594f0b50ecc69e7c654e16d41b4e8156d1b3df8e0b52e" +checksum = "7410fef80af8ac071d4f63755c0ab89ac3df0fd1ea91f1d1f37cf5cec4395990" dependencies = [ "autocfg", "cc", @@ -1002,9 +1002,9 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e58db2081ba5b4c93bd6be09c40fd36cb9193a8336c384f3b40012e531aa7e40" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] @@ -1045,9 +1045,9 @@ checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "proc-macro-hack" -version = "0.5.15" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" +checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" [[package]] name = "proc-macro-nested" @@ -1066,9 +1066,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.13" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53f5ffe53a6b28e37c9c1ce74893477864d64f74778a93a4beb43c8fa167f639" +checksum = "1502d12e458c49a4c9cbff560d0fe0060c252bc29799ed94ca2ed4bb665a0101" dependencies = [ "unicode-xid 0.2.0", ] @@ -1088,7 +1088,7 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", ] [[package]] @@ -1229,7 +1229,7 @@ dependencies = [ "time", "tokio", "toml", - "version_check 0.9.1", + "version_check 0.9.2", "yansi 0.5.0", ] @@ -1242,7 +1242,7 @@ dependencies = [ "indexmap", "quote 1.0.6", "rocket_http", - "version_check 0.9.1", + "version_check 0.9.2", "yansi 0.5.0", ] @@ -1277,7 +1277,7 @@ dependencies = [ "percent-encoding 2.1.0", "ruma-api-macros", "ruma-identifiers", - "ruma-serde 0.2.1", + "ruma-serde", "serde", "serde_json", "strum", @@ -1289,16 +1289,15 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52b82b4567b9af9b40a86f7778821c016ea961f55e4fee255f8f24bb28ee7452" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] name = "ruma-client-api" version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "082913ad135ca55ee06a55d295bea954982f2ac5e0150adc09024f5cbb8cb6cf" +source = "git+https://github.com/ruma/ruma-client-api.git#b064daf23dbf970933e83ce3b84a2563c5e646e7" dependencies = [ "http", "js_int", @@ -1306,7 +1305,7 @@ dependencies = [ "ruma-common", "ruma-events", "ruma-identifiers", - "ruma-serde 0.2.1", + "ruma-serde", "serde", "serde_json", "strum", @@ -1319,7 +1318,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "253416d67b4bde281f2781424232a58a946a4f1c451d5f857a8d0705d58eaf2a" dependencies = [ "matches", - "ruma-serde 0.2.1", + "ruma-serde", "serde", "serde_json", ] @@ -1327,14 +1326,13 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0afc6d4da07d10213478d32dc42b6222df0a1ea319e9ced9f8a341617952d909" +source = "git+https://github.com/ruma/ruma-events.git#a17714669da1db4aa7bf10948463bb964cf5058a" dependencies = [ "js_int", "ruma-common", "ruma-events-macros", "ruma-identifiers", - "ruma-serde 0.2.1", + "ruma-serde", "serde", "serde_json", ] @@ -1342,24 +1340,24 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc706c4a53cc54c3a198cfbcd7dfff20448599d84f90e636d96034d0df5a9ac9" +source = "git+https://github.com/ruma/ruma-events.git#a17714669da1db4aa7bf10948463bb964cf5058a" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] name = "ruma-federation-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma-federation-api.git?rev=ccbf216f39bbbaa59131cc200eae5bd18aa1947c#ccbf216f39bbbaa59131cc200eae5bd18aa1947c" +source = "git+https://github.com/ruma/ruma-federation-api.git?rev=4cf4aa6ef74b25ad8c14d99d7774129f023df163#4cf4aa6ef74b25ad8c14d99d7774129f023df163" dependencies = [ "js_int", + "matches", "ruma-api", "ruma-events", "ruma-identifiers", - "ruma-serde 0.1.3", + "ruma-serde", "serde", "serde_json", ] @@ -1376,23 +1374,9 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.1.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14edc0e2f5177c419e3b89060b1e94fb3af81b2f253783ac6967f14a7ec3911" -dependencies = [ - "dtoa", - "itoa", - "js_int", - "serde", - "serde_json", - "url", -] - -[[package]] -name = "ruma-serde" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd3d04c6755bae23101dec7426d044b773ef517932f23d5a6254c4caa1cfce3" +checksum = "8c71dabb8e2709ca4f59201cb72d7fe8d590e7e3f55feb348e851c18354938af" dependencies = [ "dtoa", "itoa", @@ -1520,9 +1504,9 @@ version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] @@ -1614,9 +1598,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] @@ -1632,11 +1616,11 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.23" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b5f192649e48a5302a13f2feb224df883b98933222369e4b3b0fe2a5447269" +checksum = "f14a640819f79b72a710c0be059dce779f9339ae046c8bef12c361d56702146f" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", "unicode-xid 0.2.0", ] @@ -1657,22 +1641,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5976891d6950b4f68477850b5b9e5aa64d955961466f9e174363f573e54e8ca7" +checksum = "b13f926965ad00595dd129fa12823b04bbf866e9085ab0a5f2b05b850fbfc344" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab81dbd1cd69cd2ce22ecfbdd3bdb73334ba25350649408cc6c085f46d89573d" +checksum = "893582086c2f98cde18f906265a65b5030a074b1046c674ae898be6519a7f479" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] @@ -1714,9 +1698,9 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", ] [[package]] @@ -1782,7 +1766,7 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ - "version_check 0.9.1", + "version_check 0.9.2", ] [[package]] @@ -1852,9 +1836,9 @@ checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" [[package]] name = "version_check" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" [[package]] name = "want" @@ -1893,9 +1877,9 @@ dependencies = [ "bumpalo", "lazy_static", "log", - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", "wasm-bindgen-shared", ] @@ -1927,9 +1911,9 @@ version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ - "proc-macro2 1.0.13", + "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.23", + "syn 1.0.25", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index 5a068f5..c864508 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,12 +14,12 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" -ruma-client-api = "0.9.0" +ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } ruma-identifiers = { version = "0.16.1", features = ["rand"] } ruma-api = "0.16.1" -ruma-events = "0.21.2" +ruma-events = { git = "https://github.com/ruma/ruma-events.git" } ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git", rev = "1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" } -ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git", rev = "ccbf216f39bbbaa59131cc200eae5bd18aa1947c" } +ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git", rev = "4cf4aa6ef74b25ad8c14d99d7774129f023df163" } log = "0.4.8" sled = "0.31.0" directories = "2.0.2" @@ -31,6 +31,9 @@ rand = "0.7.3" rust-argon2 = "0.8.2" reqwest = "0.10.4" base64 = "0.12.1" -thiserror = "1.0.18" +thiserror = "1.0.19" ruma-common = "0.1.2" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } + +[patch.crates-io] +ruma-events = { git = "https://github.com/ruma/ruma-events.git" } diff --git a/src/client_server.rs b/src/client_server.rs index 6812fdd..0701be1 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -39,13 +39,13 @@ use ruma_client_api::{ to_device::{self, send_event_to_device}, typing::create_typing_event, uiaa::{AuthFlow, UiaaInfo, UiaaResponse}, - user_directory::search_users, + user_directory::search_users, redact::redact_event, }, unversioned::get_supported_versions, }; use ruma_events::{ collections::only::Event as EduEvent, - room::{guest_access, history_visibility, join_rules, member}, + room::{guest_access, history_visibility, join_rules, member, redaction}, EventJson, EventType, }; use ruma_identifiers::{RoomId, RoomVersionId, UserId}; @@ -517,6 +517,7 @@ pub fn set_displayname_route( .unwrap(), None, Some(user_id.to_string()), + None, &db.globals, ) .unwrap(); @@ -564,7 +565,7 @@ pub fn set_avatar_url_route( ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if let avatar_url = &body.avatar_url { + if let Some(avatar_url) = &body.avatar_url { if !avatar_url.starts_with("mxc://") { debug!("Request contains an invalid avatar_url."); return MatrixResult(Err(Error { @@ -579,7 +580,7 @@ pub fn set_avatar_url_route( } db.users - .set_avatar_url(&user_id, Some(body.avatar_url.clone())) + .set_avatar_url(&user_id, body.avatar_url.clone()) .unwrap(); // Send a new membership event into all joined rooms @@ -591,7 +592,7 @@ pub fn set_avatar_url_route( user_id.clone(), EventType::RoomMember, serde_json::to_value(ruma_events::room::member::MemberEventContent { - avatar_url: Some(body.avatar_url.clone()), + avatar_url: body.avatar_url.clone(), ..serde_json::from_value::>( db.rooms .room_state(&room_id) @@ -608,6 +609,7 @@ pub fn set_avatar_url_route( .unwrap(), None, Some(user_id.to_string()), + None, &db.globals, ) .unwrap(); @@ -917,14 +919,15 @@ pub fn create_room_route( creator: user_id.clone(), federate: body .creation_content - .and_then(|c| c.federate) - .unwrap_or(true), - predecessor: None, // TODO: Check creation_content.predecessor once ruma has that + .as_ref() + .map_or(true, |c| c.federate), + predecessor: body.creation_content.as_ref().and_then(|c| c.predecessor.clone()), room_version: RoomVersionId::version_5(), }) .unwrap(), None, Some("".to_owned()), + None, &db.globals, ) .unwrap(); @@ -945,6 +948,7 @@ pub fn create_room_route( .unwrap(), None, Some(user_id.to_string()), + None, &db.globals, ) .unwrap(); @@ -968,18 +972,8 @@ pub fn create_room_route( .expect("TODO: handle. we hope the client sends a valid power levels json") } else { serde_json::to_value(ruma_events::room::power_levels::PowerLevelsEventContent { - ban: 50.into(), - events: BTreeMap::new(), - events_default: 0.into(), - invite: 50.into(), - kick: 50.into(), - redact: 50.into(), - state_default: 50.into(), users, - users_default: 0.into(), - notifications: ruma_events::room::power_levels::NotificationPowerLevels { - room: 50.into(), - }, + ..Default::default() }) .unwrap() }; @@ -991,6 +985,7 @@ pub fn create_room_route( power_levels_content, None, Some("".to_owned()), + None, &db.globals, ) .unwrap(); @@ -1016,6 +1011,7 @@ pub fn create_room_route( }, None, Some("".to_owned()), + None, &db.globals, ) .unwrap(); @@ -1032,6 +1028,7 @@ pub fn create_room_route( .unwrap(), None, Some("".to_owned()), + None, &db.globals, ) .unwrap(); @@ -1056,6 +1053,7 @@ pub fn create_room_route( }, None, Some("".to_owned()), + None, &db.globals, ) .unwrap(); @@ -1071,10 +1069,11 @@ pub fn create_room_route( .append_pdu( room_id.clone(), user_id.clone(), - EventType::from(event_type), + event_type.clone(), serde_json::from_str(content.get()).unwrap(), None, state_key.clone(), + None, &db.globals, ) .unwrap(); @@ -1093,6 +1092,7 @@ pub fn create_room_route( .unwrap(), None, Some("".to_owned()), + None, &db.globals, ) .unwrap(); @@ -1110,6 +1110,7 @@ pub fn create_room_route( .unwrap(), None, Some("".to_owned()), + None, &db.globals, ) .unwrap(); @@ -1132,6 +1133,7 @@ pub fn create_room_route( .unwrap(), None, Some(user.to_string()), + None, &db.globals, ) .unwrap(); @@ -1140,6 +1142,38 @@ pub fn create_room_route( MatrixResult(Ok(create_room::Response { room_id })) } +#[put("/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", data = "")] +pub fn redact_event_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, + _event_id: String, + _txn_id: String, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + if let Ok(event_id) = db.rooms.append_pdu( + body.room_id.clone(), + user_id.clone(), + EventType::RoomRedaction, + serde_json::to_value(redaction::RedactionEventContent { + reason: body.reason.clone(), + }).unwrap(), + None, + None, + Some(body.event_id.clone()), + &db.globals, + ) { + MatrixResult(Ok(redact_event::Response { event_id })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Failed to redact event.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } +} + #[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] pub fn get_alias_route( db: State<'_, Database>, @@ -1220,6 +1254,7 @@ pub fn join_room_by_id_route( serde_json::to_value(event).unwrap(), None, Some(user_id.to_string()), + None, &db.globals, ) .unwrap(); @@ -1285,6 +1320,7 @@ pub fn leave_room_route( json!({"membership": "leave"}), None, Some(user_id.to_string()), + None, &db.globals, ) .unwrap(); @@ -1328,6 +1364,7 @@ pub fn invite_user_route( serde_json::to_value(event).unwrap(), None, Some(user_id.to_string()), + None, &db.globals, ) .unwrap(); @@ -1414,7 +1451,7 @@ pub async fn get_public_rooms_filtered_route( .deserialize() .unwrap() .alias - }).map(|a| a.to_string()), + }), name: state.get(&(EventType::RoomName, "".to_owned())).map(|s| { serde_json::from_value::>( s.content.clone(), @@ -1565,6 +1602,7 @@ pub fn create_message_event_route( serde_json::from_str(body.json_body.unwrap().get()).unwrap(), Some(unsigned), None, + None, &db.globals, ) { MatrixResult(Ok(create_message_event::Response { event_id })) @@ -1598,6 +1636,7 @@ pub fn create_state_event_for_key_route( serde_json::from_str(body.json_body.clone().unwrap().get()).unwrap(), None, Some(body.state_key.clone()), + None, &db.globals, ) { MatrixResult(Ok(create_state_event_for_key::Response { event_id })) @@ -1630,6 +1669,7 @@ pub fn create_state_event_for_empty_key_route( serde_json::from_str(body.json_body.unwrap().get()).unwrap(), None, Some("".to_owned()), + None, &db.globals, ) { MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 87f3429..8e626cb 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -12,7 +12,7 @@ use ruma_events::{ EventJson, EventType, }; use ruma_identifiers::{EventId, RoomId, UserId}; - +use sled::IVec; use std::{ collections::{BTreeMap, HashMap}, convert::{TryFrom, TryInto}, @@ -110,28 +110,50 @@ impl Rooms { self.eventid_pduid .get(event_id.to_string().as_bytes())? .map_or(Ok(None), |pdu_id| { - Ok(serde_json::from_slice( + Ok(Some(serde_json::from_slice( &self.pduid_pdu.get(pdu_id)?.ok_or(Error::BadDatabase( "eventid_pduid points to nonexistent pdu", ))?, - )?) - .map(Some) + )?)) }) } + /// Returns the pdu's id. + pub fn get_pdu_id(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.to_string().as_bytes())? + .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) + } + /// Returns the pdu. pub fn get_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.to_string().as_bytes())? .map_or(Ok(None), |pdu_id| { - Ok(serde_json::from_slice( + Ok(Some(serde_json::from_slice( &self.pduid_pdu.get(pdu_id)?.ok_or(Error::BadDatabase( "eventid_pduid points to nonexistent pdu", ))?, - )?) - .map(Some) + )?)) }) } + /// Returns the pdu. + pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { + self.pduid_pdu + .get(pdu_id)? + .map_or(Ok(None), |pdu| Ok(Some(serde_json::from_slice(&pdu)?))) + } + + /// Returns the pdu. + pub fn replace_pdu(&self, pdu_id: &IVec, pdu: &PduEvent) -> Result<()> { + if self.pduid_pdu.get(&pdu_id)?.is_some() { + self.pduid_pdu + .insert(&pdu_id, &*serde_json::to_string(pdu)?)?; + Ok(()) + } else { + Err(Error::BadRequest("pdu does not exist")) + } + } /// Returns the leaf pdus of a room. pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { @@ -177,6 +199,7 @@ impl Rooms { content: serde_json::Value, unsigned: Option>, state_key: Option, + redacts: Option, globals: &super::globals::Globals, ) -> Result { // TODO: Make sure this isn't called twice in parallel @@ -435,7 +458,7 @@ impl Rooms { .try_into() .expect("depth can overflow and should be deprecated..."), auth_events: Vec::new(), - redacts: None, + redacts, unsigned, hashes: ruma_federation_api::EventHash { sha256: "aaa".to_owned(), @@ -555,7 +578,21 @@ impl Rooms { .map(|(_, v)| Ok(serde_json::from_slice(&v)?)) } - /// Makes a user join a room. Only call this if the membership is Join already + /// Replace a PDU with the redacted form. + pub fn redact_pdu(&self, event_id: &EventId) -> Result<()> { + if let Some(pdu_id) = self.get_pdu_id(event_id)? { + let mut pdu = self + .get_pdu_from_id(&pdu_id)? + .ok_or(Error::BadDatabase("pduid points to invalid pdu"))?; + pdu.redact(); + self.replace_pdu(&pdu_id, &pdu)?; + Ok(()) + } else { + Err(Error::BadRequest("eventid does not exist")) + } + } + + /// Update current membership data. fn update_membership( &self, room_id: &RoomId, diff --git a/src/pdu.rs b/src/pdu.rs index 0e1b3de..3e1ac0a 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -33,6 +33,38 @@ pub struct PduEvent { } impl PduEvent { + pub fn redact(&mut self) { + self.unsigned.clear(); + let allowed = match self.kind { + EventType::RoomMember => vec!["membership"], + EventType::RoomCreate => vec!["creator"], + EventType::RoomJoinRules => vec!["join_rule"], + EventType::RoomPowerLevels => vec![ + "ban", + "events", + "events_default", + "kick", + "redact", + "state_default", + "users", + "users_default", + ], + EventType::RoomHistoryVisibility => vec!["history_visibility"], + _ => vec![], + }; + + let old_content = self.content.as_object_mut().unwrap(); // TODO error + let mut new_content = serde_json::Map::new(); + + for key in allowed { + if let Some(value) = old_content.remove(key) { + new_content.insert(key.to_owned(), value); + } + } + + self.content = new_content.into(); + } + pub fn to_room_event(&self) -> EventJson { // Can only fail in rare circumstances that won't ever happen here, see // https://docs.rs/serde_json/1.0.50/serde_json/fn.to_string.html @@ -40,7 +72,6 @@ impl PduEvent { // EventJson's deserialize implementation always returns `Ok(...)` serde_json::from_str::>(&json).unwrap() } - pub fn to_state_event(&self) -> EventJson { let json = serde_json::to_string(&self).unwrap(); serde_json::from_str::>(&json).unwrap() diff --git a/src/server_server.rs b/src/server_server.rs index 84ca5cc..6aa1e99 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -4,7 +4,9 @@ use log::error; use rocket::{get, response::content::Json, State}; use ruma_api::Endpoint; use ruma_client_api::error::Error; -use ruma_federation_api::{v1::get_server_version, v2::get_server_keys}; +use ruma_federation_api::discovery::{ + get_server_keys::v2 as get_server_keys, get_server_version::v1 as get_server_version, +}; use serde_json::json; use std::{ collections::BTreeMap, From 9c26e22ad7d1d821feba6253703c074bcb3c1bf6 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 24 May 2020 08:30:57 +0200 Subject: [PATCH 0108/1727] improvement: load aliases from database --- src/client_server.rs | 12 ++++++------ src/database/rooms.rs | 12 ++++++++++++ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 0701be1..36cdb38 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1272,17 +1272,17 @@ pub fn join_room_by_id_or_alias_route( ) -> MatrixResult { let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, - Err(room_alias) => { - if room_alias.server_name() == db.globals.server_name() { + Err(_) => { + if let Some(room_id) = db.rooms.id_from_alias(body.room_id_or_alias.as_ref()).unwrap() { + room_id + } else { + // Ask creator server of the room to join TODO ask someone else when not available + //server_server::send_request(data, destination, request) return MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Room alias not found.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })); - } else { - // Ask creator server of the room to join TODO ask someone else when not available - //server_server::send_request(data, destination, request) - todo!(); } } }; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 8e626cb..8969da1 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -26,6 +26,8 @@ pub struct Rooms { pub(super) roomid_pduleaves: sled::Tree, pub(super) roomstateid_pdu: sled::Tree, // RoomStateId = Room + StateType + StateKey + pub(super) alias_roomid: sled::Tree, + pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, pub(super) userroomid_invited: sled::Tree, @@ -646,6 +648,16 @@ impl Rooms { Ok(()) } + pub fn id_from_alias(&self, alias: &str) -> Result> { + if !alias.starts_with('#') { + return Err(Error::BadRequest("room alias does not start with #")); + } + + self.alias_roomid.get(alias)?.map_or(Ok(None), |bytes| { + Ok(Some(RoomId::try_from(utils::string_from_bytes(&bytes)?)?)) + }) + } + /// Returns an iterator over all rooms a user joined. pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { self.roomuserid_joined From 4e507ef70624fcaf3386bf3ee71c9bfb65801754 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 25 May 2020 23:24:13 +0200 Subject: [PATCH 0109/1727] feat: room visibility and aliases --- Cargo.lock | 109 +++++++------- Cargo.toml | 10 +- README.md | 2 +- Rocket-example.toml | 3 + src/client_server.rs | 321 ++++++++++++++++++++++++++++++++---------- src/database.rs | 4 + src/database/rooms.rs | 98 +++++++++---- src/database/users.rs | 5 +- src/main.rs | 5 + 9 files changed, 399 insertions(+), 158 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc9840f..6b41ec3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -32,7 +32,7 @@ checksum = "26c4f3195085c36ea8d24d32b2f828d23296a9370a28aa39d111f6f16bef9f3b" dependencies = [ "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] @@ -279,7 +279,7 @@ dependencies = [ "bitflags", "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] @@ -423,7 +423,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] @@ -557,9 +557,9 @@ checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" [[package]] name = "hyper" -version = "0.13.5" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96816e1d921eca64d208a85aab4f7798455a8e34229ee5a88c935bdee1b78b14" +checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f" dependencies = [ "bytes", "futures-channel", @@ -571,8 +571,8 @@ dependencies = [ "httparse", "itoa", "log", - "net2", "pin-project", + "socket2", "time", "tokio", "tower-service", @@ -663,9 +663,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.39" +version = "0.3.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" +checksum = "ce10c23ad2ea25ceca0093bd3192229da4c5b3c0f2de499c1ecac0d98d452177" dependencies = [ "wasm-bindgen", ] @@ -697,9 +697,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.70" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3baa92041a6fec78c687fa0cc2b3fae8884f743d672cf551bed1d6dac6988d0f" +checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" [[package]] name = "lock_api" @@ -1004,7 +1004,7 @@ checksum = "e58db2081ba5b4c93bd6be09c40fd36cb9193a8336c384f3b40012e531aa7e40" dependencies = [ "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] @@ -1195,9 +1195,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.13" +version = "0.16.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703516ae74571f24b465b4a1431e81e2ad51336cb0ded733a55a1aa3eccac196" +checksum = "06b3fefa4f12272808f809a0af618501fdaba41a58963c5fb72238ab0be09603" dependencies = [ "cc", "libc", @@ -1291,13 +1291,13 @@ checksum = "52b82b4567b9af9b40a86f7778821c016ea961f55e4fee255f8f24bb28ee7452" dependencies = [ "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma-client-api.git#b064daf23dbf970933e83ce3b84a2563c5e646e7" +source = "git+https://github.com/ruma/ruma-client-api.git?rev=c725288cd099690c1d13f1a9b9e57228bc860a62#c725288cd099690c1d13f1a9b9e57228bc860a62" dependencies = [ "http", "js_int", @@ -1325,8 +1325,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.21.2" -source = "git+https://github.com/ruma/ruma-events.git#a17714669da1db4aa7bf10948463bb964cf5058a" +version = "0.21.3" +source = "git+https://github.com/ruma/ruma-events.git?rev=4d09416cd1663d63c22153705c9e1fd77910797f#4d09416cd1663d63c22153705c9e1fd77910797f" dependencies = [ "js_int", "ruma-common", @@ -1335,16 +1335,17 @@ dependencies = [ "ruma-serde", "serde", "serde_json", + "strum", ] [[package]] name = "ruma-events-macros" -version = "0.21.2" -source = "git+https://github.com/ruma/ruma-events.git#a17714669da1db4aa7bf10948463bb964cf5058a" +version = "0.21.3" +source = "git+https://github.com/ruma/ruma-events.git?rev=4d09416cd1663d63c22153705c9e1fd77910797f#4d09416cd1663d63c22153705c9e1fd77910797f" dependencies = [ "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] @@ -1491,22 +1492,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.110" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c" +checksum = "c9124df5b40cbd380080b2cc6ab894c040a3070d995f5c9dc77e18c34a8ae37d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.110" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" +checksum = "3f2c3ac8e6ca1e9c80b8be1023940162bf81ae3cffbb1809474152f2ce1eb250" dependencies = [ "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] @@ -1570,6 +1571,18 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4" +[[package]] +name = "socket2" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "winapi 0.3.8", +] + [[package]] name = "spin" version = "0.5.2" @@ -1600,7 +1613,7 @@ dependencies = [ "heck", "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] @@ -1616,9 +1629,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.25" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f14a640819f79b72a710c0be059dce779f9339ae046c8bef12c361d56702146f" +checksum = "bb37da98a55b1d08529362d9cbb863be17556873df2585904ab9d2bc951291d0" dependencies = [ "proc-macro2 1.0.17", "quote 1.0.6", @@ -1656,7 +1669,7 @@ checksum = "893582086c2f98cde18f906265a65b5030a074b1046c674ae898be6519a7f479" dependencies = [ "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] @@ -1700,7 +1713,7 @@ checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", ] [[package]] @@ -1858,9 +1871,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.62" +version = "0.2.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c7d40d09cdbf0f4895ae58cf57d92e1e57a9dd8ed2e8390514b54a47cc5551" +checksum = "4c2dc4aa152834bc334f506c1a06b866416a8b6697d5c9f75b9a689c8486def0" dependencies = [ "cfg-if", "serde", @@ -1870,24 +1883,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.62" +version = "0.2.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3972e137ebf830900db522d6c8fd74d1900dcfc733462e9a12e942b00b4ac94" +checksum = "ded84f06e0ed21499f6184df0e0cb3494727b0c5da89534e0fcc55c51d812101" dependencies = [ "bumpalo", "lazy_static", "log", "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a369c5e1dfb7569e14d62af4da642a3cbc2f9a3652fe586e26ac22222aa4b04" +checksum = "64487204d863f109eb77e8462189d111f27cb5712cc9fdb3461297a76963a2f6" dependencies = [ "cfg-if", "js-sys", @@ -1897,9 +1910,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.62" +version = "0.2.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" +checksum = "838e423688dac18d73e31edce74ddfac468e37b1506ad163ffaf0a46f703ffe3" dependencies = [ "quote 1.0.6", "wasm-bindgen-macro-support", @@ -1907,28 +1920,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.62" +version = "0.2.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" +checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92" dependencies = [ "proc-macro2 1.0.17", "quote 1.0.6", - "syn 1.0.25", + "syn 1.0.29", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.62" +version = "0.2.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91c2916119c17a8e316507afaaa2dd94b47646048014bbdf6bef098c1bb58ad" +checksum = "c9ba19973a58daf4db6f352eda73dc0e289493cd29fb2632eb172085b6521acd" [[package]] name = "web-sys" -version = "0.3.39" +version = "0.3.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" +checksum = "7b72fe77fd39e4bd3eaa4412fd299a0be6b3dfe9d2597e2f1c20beb968f41d17" dependencies = [ "js-sys", "wasm-bindgen", @@ -1936,9 +1949,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.2" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" +checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" dependencies = [ "ring", "untrusted", diff --git a/Cargo.toml b/Cargo.toml index c864508..198521e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,10 +14,10 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" -ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git" } +ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git", rev = "c725288cd099690c1d13f1a9b9e57228bc860a62" } ruma-identifiers = { version = "0.16.1", features = ["rand"] } ruma-api = "0.16.1" -ruma-events = { git = "https://github.com/ruma/ruma-events.git" } +ruma-events = { git = "https://github.com/ruma/ruma-events.git", rev = "4d09416cd1663d63c22153705c9e1fd77910797f" } ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git", rev = "1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" } ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git", rev = "4cf4aa6ef74b25ad8c14d99d7774129f023df163" } log = "0.4.8" @@ -25,15 +25,15 @@ sled = "0.31.0" directories = "2.0.2" js_int = "0.1.5" serde_json = { version = "1.0.53", features = ["raw_value"] } -serde = "1.0.110" +serde = "1.0.111" tokio = { version = "0.2.21", features = ["macros"] } rand = "0.7.3" rust-argon2 = "0.8.2" -reqwest = "0.10.4" +reqwest = "=0.10.4" base64 = "0.12.1" thiserror = "1.0.19" ruma-common = "0.1.2" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } [patch.crates-io] -ruma-events = { git = "https://github.com/ruma/ruma-events.git" } +ruma-events = { git = "https://github.com/ruma/ruma-events.git", rev = "4d09416cd1663d63c22153705c9e1fd77910797f" } diff --git a/README.md b/README.md index 94061a8..772fb96 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ Also check out the [milestones](https://git.koesters.xyz/timo/conduit/milestones #### How can I contribute? -If you want to help, you may be able to find something in the issue tracker. If you do, comment on the issue, so others know. You can also join #conduit:matrix.org and ask there. +If you want to help, you may be able to find something in the issue tracker. If you do, comment on the issue, so others know. You can also join #conduit:matrix.org and ask there. #### Donate diff --git a/Rocket-example.toml b/Rocket-example.toml index fb9f6d6..924b540 100644 --- a/Rocket-example.toml +++ b/Rocket-example.toml @@ -3,6 +3,9 @@ server_name = "your.server.name" port = 8448 address = "0.0.0.0" +# Default path is in this user's data +#database_path = "/home/timo/MyConduitServer" + #[global.tls] #certs = "/etc/letsencrypt/live/your.server.name/fullchain.pem" #key = "/etc/letsencrypt/live/your.server.name/privkey.pem" diff --git a/src/client_server.rs b/src/client_server.rs index 36cdb38..150555c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -5,15 +5,18 @@ use std::{ }; use log::{debug, warn}; -use rocket::{get, options, post, put, State}; +use rocket::{delete, get, options, post, put, State}; use ruma_client_api::{ error::{Error, ErrorKind}, r0::{ account::{get_username_availability, register}, - alias::get_alias, + alias::{create_alias, delete_alias, get_alias}, capabilities::get_capabilities, config::{get_global_account_data, set_global_account_data}, - directory::{self, get_public_rooms, get_public_rooms_filtered}, + directory::{ + self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, + set_room_visibility, + }, filter::{self, create_filter, get_filter}, keys::{claim_keys, get_keys, upload_keys}, media::{create_content, get_content, get_content_thumbnail, get_media_config}, @@ -28,6 +31,7 @@ use ruma_client_api::{ }, push::{get_pushrules_all, set_pushrule, set_pushrule_enabled}, read_marker::set_read_marker, + redact::redact_event, room::{self, create_room}, session::{get_login_types, login, logout}, state::{ @@ -39,16 +43,16 @@ use ruma_client_api::{ to_device::{self, send_event_to_device}, typing::create_typing_event, uiaa::{AuthFlow, UiaaInfo, UiaaResponse}, - user_directory::search_users, redact::redact_event, + user_directory::search_users, }, unversioned::get_supported_versions, }; use ruma_events::{ collections::only::Event as EduEvent, - room::{guest_access, history_visibility, join_rules, member, redaction}, + room::{canonical_alias, guest_access, history_visibility, join_rules, member, redaction}, EventJson, EventType, }; -use ruma_identifiers::{RoomId, RoomVersionId, UserId}; +use ruma_identifiers::{RoomAliasId, RoomId, RoomVersionId, UserId}; use serde_json::{json, value::RawValue}; use crate::{server_server, utils, Database, MatrixResult, Ruma}; @@ -671,7 +675,7 @@ pub fn get_profile_route( MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Profile was not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, + status_code: http::StatusCode::NOT_FOUND, })) } @@ -908,8 +912,32 @@ pub fn create_room_route( let room_id = RoomId::new(db.globals.server_name()).expect("host is valid"); let user_id = body.user_id.as_ref().expect("user is authenticated"); - // TODO: Create alias and check if it already exists + let alias = if let Some(localpart) = &body.room_alias_name { + // TODO: Check for invalid characters and maximum length + if let Ok(alias) = + RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) + { + if db.rooms.id_from_alias(&alias).unwrap().is_some() { + return MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Alias already exists.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + Some(alias) + } else { + return MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Invalid alias.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + } else { + None + }; + + // 1. The room create event db.rooms .append_pdu( room_id.clone(), @@ -917,11 +945,11 @@ pub fn create_room_route( EventType::RoomCreate, serde_json::to_value(ruma_events::room::create::CreateEventContent { creator: user_id.clone(), - federate: body + federate: body.creation_content.as_ref().map_or(true, |c| c.federate), + predecessor: body .creation_content .as_ref() - .map_or(true, |c| c.federate), - predecessor: body.creation_content.as_ref().and_then(|c| c.predecessor.clone()), + .and_then(|c| c.predecessor.clone()), room_version: RoomVersionId::version_5(), }) .unwrap(), @@ -932,7 +960,7 @@ pub fn create_room_route( ) .unwrap(); - // Join room + // 2. Let the room creator join db.rooms .append_pdu( room_id.clone(), @@ -960,7 +988,7 @@ pub fn create_room_route( room::Visibility::Public => create_room::RoomPreset::PublicChat, }); - // 0. Power levels + // 3. Power levels let mut users = BTreeMap::new(); users.insert(user_id.clone(), 100.into()); for invite_user_id in &body.invite { @@ -972,8 +1000,18 @@ pub fn create_room_route( .expect("TODO: handle. we hope the client sends a valid power levels json") } else { serde_json::to_value(ruma_events::room::power_levels::PowerLevelsEventContent { + ban: 50.into(), + events: BTreeMap::new(), + events_default: 0.into(), + invite: 50.into(), + kick: 50.into(), + redact: 50.into(), + state_default: 50.into(), users, - ..Default::default() + users_default: 0.into(), + notifications: ruma_events::room::power_levels::NotificationPowerLevels { + room: 50.into(), + }, }) .unwrap() }; @@ -990,8 +1028,8 @@ pub fn create_room_route( ) .unwrap(); - // 1. Events set by preset - // 1.1 Join Rules + // 4. Events set by preset + // 4.1 Join Rules db.rooms .append_pdu( room_id.clone(), @@ -1016,7 +1054,7 @@ pub fn create_room_route( ) .unwrap(); - // 1.2 History Visibility + // 4.2 History Visibility db.rooms .append_pdu( room_id.clone(), @@ -1033,7 +1071,7 @@ pub fn create_room_route( ) .unwrap(); - // 1.3 Guest Access + // 4.3 Guest Access db.rooms .append_pdu( room_id.clone(), @@ -1058,7 +1096,7 @@ pub fn create_room_route( ) .unwrap(); - // 2. Events listed in initial_state + // 5. Events listed in initial_state for create_room::InitialStateEvent { event_type, state_key, @@ -1079,7 +1117,7 @@ pub fn create_room_route( .unwrap(); } - // 3. Events implied by name and topic + // 6. Events implied by name and topic if let Some(name) = &body.name { db.rooms .append_pdu( @@ -1116,7 +1154,7 @@ pub fn create_room_route( .unwrap(); } - // 4. Events implied by invite (and TODO: invite_3pid) + // 7. Events implied by invite (and TODO: invite_3pid) for user in &body.invite { db.rooms .append_pdu( @@ -1139,10 +1177,24 @@ pub fn create_room_route( .unwrap(); } + // Homeserver specific stuff + if let Some(alias) = alias { + db.rooms + .set_alias(&alias, Some(&room_id), &db.globals) + .unwrap(); + } + + if let Some(room::Visibility::Public) = body.visibility { + db.rooms.set_public(&room_id, true).unwrap(); + } + MatrixResult(Ok(create_room::Response { room_id })) } -#[put("/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", data = "")] +#[put( + "/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", + data = "" +)] pub fn redact_event_route( db: State<'_, Database>, body: Ruma, @@ -1158,7 +1210,8 @@ pub fn redact_event_route( EventType::RoomRedaction, serde_json::to_value(redaction::RedactionEventContent { reason: body.reason.clone(), - }).unwrap(), + }) + .unwrap(), None, None, Some(body.event_id.clone()), @@ -1174,35 +1227,63 @@ pub fn redact_event_route( } } +#[put("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +pub fn create_alias_route( + db: State<'_, Database>, + body: Ruma, + _room_alias: String, +) -> MatrixResult { + if db.rooms.id_from_alias(&body.room_alias).unwrap().is_some() { + return MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Alias already exists".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + + db.rooms + .set_alias(&body.room_alias, Some(&body.room_id), &db.globals) + .unwrap(); + + MatrixResult(Ok(create_alias::Response)) +} + +#[delete("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +pub fn delete_alias_route( + db: State<'_, Database>, + body: Ruma, + _room_alias: String, +) -> MatrixResult { + db.rooms + .set_alias(&body.room_alias, None, &db.globals) + .unwrap(); + + MatrixResult(Ok(delete_alias::Response)) +} + #[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] pub fn get_alias_route( db: State<'_, Database>, body: Ruma, _room_alias: String, ) -> MatrixResult { - warn!("TODO: get_alias_route"); - let room_id = if body.room_alias.server_name() == db.globals.server_name() { - match body.room_alias.alias() { - "conduit" => "!lgOCCXQKtXOAPlAlG5:conduit.rs", - _ => { - debug!("Room alias not found."); - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } + if body.room_alias.server_name() == db.globals.server_name() { + if let Some(room_id) = db.rooms.id_from_alias(&body.room_alias).unwrap() { + MatrixResult(Ok(get_alias::Response { + room_id, + servers: vec![db.globals.server_name().to_owned()], + })) + } else { + debug!("Room alias not found."); + return MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Room with alias not found.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); } } else { todo!("ask remote server"); } - .try_into() - .unwrap(); - - MatrixResult(Ok(get_alias::Response { - room_id, - servers: vec!["conduit.rs".to_owned()], - })) } #[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] @@ -1273,7 +1354,11 @@ pub fn join_room_by_id_or_alias_route( let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, Err(_) => { - if let Some(room_id) = db.rooms.id_from_alias(body.room_id_or_alias.as_ref()).unwrap() { + if let Some(room_id) = db + .rooms + .id_from_alias(&body.room_id_or_alias.clone().try_into().unwrap()) + .unwrap() + { room_id } else { // Ask creator server of the room to join TODO ask someone else when not available @@ -1379,6 +1464,35 @@ pub fn invite_user_route( } } +#[put("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] +pub async fn set_room_visibility_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, +) -> MatrixResult { + match body.visibility { + room::Visibility::Public => db.rooms.set_public(&body.room_id, true).unwrap(), + room::Visibility::Private => db.rooms.set_public(&body.room_id, false).unwrap(), + } + + MatrixResult(Ok(set_room_visibility::Response)) +} + +#[get("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] +pub async fn get_room_visibility_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, +) -> MatrixResult { + MatrixResult(Ok(get_room_visibility::Response { + visibility: if db.rooms.is_public_room(&body.room_id).unwrap() { + room::Visibility::Public + } else { + room::Visibility::Private + }, + })) +} + #[get("/_matrix/client/r0/publicRooms", data = "")] pub async fn get_public_rooms_route( db: State<'_, Database>, @@ -1436,9 +1550,10 @@ pub async fn get_public_rooms_filtered_route( ) -> MatrixResult { let mut chunk = db .rooms - .all_rooms() - .into_iter() + .public_rooms() .map(|room_id| { + let room_id = room_id.unwrap(); + let state = db.rooms.room_state(&room_id).unwrap(); directory::PublicRoomsChunk { @@ -1628,12 +1743,46 @@ pub fn create_state_event_for_key_route( ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - // Reponse of with/without key is the same + let content = + serde_json::from_str::(body.json_body.clone().unwrap().get()).unwrap(); + + if body.event_type == EventType::RoomCanonicalAlias { + let canonical_alias = serde_json::from_value::< + EventJson, + >(content.clone()) + .unwrap() + .deserialize() + .unwrap(); + + let mut aliases = canonical_alias.alt_aliases; + + if let Some(alias) = canonical_alias.alias { + aliases.push(alias); + } + + for alias in aliases { + if alias.server_name() != db.globals.server_name() + || db + .rooms + .id_from_alias(&alias) + .unwrap() + .filter(|room| room == &body.room_id) // Make sure it's the right room + .is_none() + { + return MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "You are only allowed to send canonical_alias events when it's aliases already exists".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + } + } + if let Ok(event_id) = db.rooms.append_pdu( body.room_id.clone(), user_id.clone(), body.event_type.clone(), - serde_json::from_str(body.json_body.clone().unwrap().get()).unwrap(), + content, None, Some(body.state_key.clone()), None, @@ -1659,27 +1808,43 @@ pub fn create_state_event_for_empty_key_route( _room_id: String, _event_type: String, ) -> MatrixResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + // This just calls create_state_event_for_key_route + let Ruma { + body: + create_state_event_for_empty_key::Request { + room_id, + event_type, + data, + }, + user_id, + device_id, + json_body, + } = body; - // Reponse of with/without key is the same - if let Ok(event_id) = db.rooms.append_pdu( - body.room_id.clone(), - user_id.clone(), - body.event_type.clone(), - serde_json::from_str(body.json_body.unwrap().get()).unwrap(), - None, - Some("".to_owned()), - None, - &db.globals, - ) { - MatrixResult(Ok(create_state_event_for_empty_key::Response { event_id })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Failed to send event.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) - } + let response = create_state_event_for_key_route( + db, + Ruma { + body: create_state_event_for_key::Request { + room_id, + event_type, + data, + state_key: "".to_owned(), + }, + user_id, + device_id, + json_body, + }, + _room_id, + _event_type, + "".to_owned(), + ); + + MatrixResult(match response.0 { + Ok(create_state_event_for_key::Response { event_id }) => { + Ok(create_state_event_for_empty_key::Response { event_id }) + } + Err(e) => Err(e), + }) } #[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] @@ -1973,17 +2138,19 @@ pub fn sync_route( let mut invited_rooms = BTreeMap::new(); for room_id in db.rooms.rooms_invited(&user_id) { let room_id = room_id.unwrap(); - let events = db - .rooms - .pdus_since(&room_id, since) - .unwrap() - .map(|pdu| pdu.unwrap().to_stripped_state_event()) - .collect(); invited_rooms.insert( - room_id, + room_id.clone(), sync_events::InvitedRoom { - invite_state: sync_events::InviteState { events }, + invite_state: sync_events::InviteState { + events: db + .rooms + .room_state(&room_id) + .unwrap() + .into_iter() + .map(|(_, pdu)| pdu.to_stripped_state_event()) + .collect(), + }, }, ); } @@ -2086,12 +2253,12 @@ pub fn get_message_events_route( .map(|pdu| pdu.to_room_event()) .collect::>(); - MatrixResult(Ok(get_message_events::Response { + MatrixResult(Ok(dbg!(get_message_events::Response { start: Some(body.from.clone()), end: prev_batch, chunk: room_events, state: Vec::new(), - })) + }))) } else { MatrixResult(Err(Error { kind: ErrorKind::Unknown, diff --git a/src/database.rs b/src/database.rs index 9c08a22..de14805 100644 --- a/src/database.rs +++ b/src/database.rs @@ -77,6 +77,10 @@ impl Database { roomid_pduleaves: db.open_tree("roomid_pduleaves").unwrap(), roomstateid_pdu: db.open_tree("roomstateid_pdu").unwrap(), + alias_roomid: db.open_tree("alias_roomid").unwrap(), + aliasid_alias: db.open_tree("alias_roomid").unwrap(), + publicroomids: db.open_tree("publicroomids").unwrap(), + userroomid_joined: db.open_tree("userroomid_joined").unwrap(), roomuserid_joined: db.open_tree("roomuserid_joined").unwrap(), userroomid_invited: db.open_tree("userroomid_invited").unwrap(), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 8969da1..1c25c25 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -11,7 +11,7 @@ use ruma_events::{ }, EventJson, EventType, }; -use ruma_identifiers::{EventId, RoomId, UserId}; +use ruma_identifiers::{EventId, RoomAliasId, RoomId, UserId}; use sled::IVec; use std::{ collections::{BTreeMap, HashMap}, @@ -27,6 +27,8 @@ pub struct Rooms { pub(super) roomstateid_pdu: sled::Tree, // RoomStateId = Room + StateType + StateKey pub(super) alias_roomid: sled::Tree, + pub(super) aliasid_alias: sled::Tree, // AliasId = RoomId + Count + pub(super) publicroomids: sled::Tree, pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, @@ -312,7 +314,7 @@ impl Rooms { .join_rule }); - if target_membership == member::MembershipState::Join { + let authorized = if target_membership == member::MembershipState::Join { let mut prev_events = prev_events.iter(); let prev_event = self .get_pdu(prev_events.next().ok_or(Error::BadRequest( @@ -392,34 +394,30 @@ impl Rooms { } } else { false + }; + + if authorized { + // Update our membership info + self.update_membership(&room_id, &target_user_id, &target_membership)?; } + + authorized } EventType::RoomCreate => prev_events.is_empty(), - _ if sender_membership == member::MembershipState::Join => { + + // Not allow any of the following events if the sender is not joined. + _ if sender_membership != member::MembershipState::Join => false, + + _ => { // TODO sender_power.unwrap_or(&power_levels.users_default) >= &power_levels.state_default } - - _ => false, } { error!("Unauthorized"); // Not authorized return Err(Error::BadRequest("event not authorized")); } - if event_type == EventType::RoomMember { - // TODO: Don't get this twice - let target_user_id = UserId::try_from(&**state_key)?; - self.update_membership( - &room_id, - &target_user_id, - &serde_json::from_value::>( - content.clone(), - )? - .deserialize()? - .membership, - )?; - } } } else if !self.is_joined(&sender, &room_id)? { return Err(Error::BadRequest("event not authorized")); @@ -648,14 +646,66 @@ impl Rooms { Ok(()) } - pub fn id_from_alias(&self, alias: &str) -> Result> { - if !alias.starts_with('#') { - return Err(Error::BadRequest("room alias does not start with #")); + pub fn set_alias( + &self, + alias: &RoomAliasId, + room_id: Option<&RoomId>, + globals: &super::globals::Globals, + ) -> Result<()> { + if let Some(room_id) = room_id { + self.alias_roomid + .insert(alias.alias(), &*room_id.to_string())?; + let mut aliasid = room_id.to_string().as_bytes().to_vec(); + aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + self.aliasid_alias.insert(aliasid, &*alias.alias())?; + } else { + if let Some(room_id) = self.alias_roomid.remove(alias.alias())? { + for key in self.aliasid_alias.scan_prefix(room_id).keys() { + self.aliasid_alias.remove(key?)?; + } + } } - self.alias_roomid.get(alias)?.map_or(Ok(None), |bytes| { - Ok(Some(RoomId::try_from(utils::string_from_bytes(&bytes)?)?)) - }) + Ok(()) + } + + pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result> { + self.alias_roomid + .get(alias.alias())? + .map_or(Ok(None), |bytes| { + Ok(Some(RoomId::try_from(utils::string_from_bytes(&bytes)?)?)) + }) + } + + pub fn room_aliases(&self, room_id: &RoomId) -> impl Iterator> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + self.aliasid_alias + .scan_prefix(prefix) + .values() + .map(|bytes| Ok(RoomAliasId::try_from(utils::string_from_bytes(&bytes?)?)?)) + } + + pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { + if public { + self.publicroomids.insert(room_id.to_string(), &[])?; + } else { + self.publicroomids.remove(room_id.to_string())?; + } + + Ok(()) + } + + pub fn is_public_room(&self, room_id: &RoomId) -> Result { + Ok(self.publicroomids.contains_key(room_id.to_string())?) + } + + pub fn public_rooms(&self) -> impl Iterator> { + self.publicroomids + .iter() + .keys() + .map(|bytes| Ok(RoomId::try_from(utils::string_from_bytes(&bytes?)?)?)) } /// Returns an iterator over all rooms a user joined. diff --git a/src/database/users.rs b/src/database/users.rs index 81a7bb8..6540a70 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -141,9 +141,8 @@ impl Users { let mut prefix = userdeviceid.clone(); prefix.push(0xff); - for result in self.todeviceid_events.scan_prefix(&prefix) { - let (key, value) = result?; - self.todeviceid_events.remove(key)?; + for key in self.todeviceid_events.scan_prefix(&prefix).keys() { + self.todeviceid_events.remove(key?)?; } // TODO: Remove onetimekeys diff --git a/src/main.rs b/src/main.rs index 3e34ded..4146282 100644 --- a/src/main.rs +++ b/src/main.rs @@ -50,12 +50,17 @@ fn setup_rocket() -> rocket::Rocket { client_server::set_read_marker_route, client_server::create_typing_event_route, client_server::create_room_route, + client_server::redact_event_route, + client_server::create_alias_route, + client_server::delete_alias_route, client_server::get_alias_route, client_server::join_room_by_id_route, client_server::join_room_by_id_or_alias_route, client_server::leave_room_route, client_server::forget_room_route, client_server::invite_user_route, + client_server::set_room_visibility_route, + client_server::get_room_visibility_route, client_server::get_public_rooms_route, client_server::get_public_rooms_filtered_route, client_server::search_users_route, From b519bc696257d082c7e84b95a31ce03063d13261 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 31 May 2020 19:40:01 +0200 Subject: [PATCH 0110/1727] fix: only show notifications for messages --- src/client_server.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 150555c..3635bf3 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2003,7 +2003,14 @@ pub fn sync_route( let notification_count = if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &user_id).unwrap() { - Some((db.rooms.pdus_since(&room_id, last_read).unwrap().count() as u32).into()) + Some( + (db.rooms + .pdus_since(&room_id, last_read) + .unwrap() + .filter(|pdu| matches!(pdu.as_ref().unwrap().kind.clone(), EventType::RoomMessage | EventType::RoomEncrypted)) + .count() as u32) + .into(), + ) } else { None }; @@ -2079,7 +2086,7 @@ pub fn sync_route( }, }, unread_notifications: sync_events::UnreadNotificationsCount { - highlight_count: notification_count, + highlight_count: None, notification_count, }, timeline: sync_events::Timeline { From 5a47c754273e3d7c7c7a984d9966a88f9f8196ac Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 1 Jun 2020 20:58:49 +0200 Subject: [PATCH 0111/1727] fix: make redactions permanent --- src/client_server.rs | 4 ++-- src/database/rooms.rs | 25 ++++++++++++++++++++----- src/pdu.rs | 6 ++++++ 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 3635bf3..e56a7c5 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2260,12 +2260,12 @@ pub fn get_message_events_route( .map(|pdu| pdu.to_room_event()) .collect::>(); - MatrixResult(Ok(dbg!(get_message_events::Response { + MatrixResult(Ok(get_message_events::Response { start: Some(body.from.clone()), end: prev_batch, chunk: room_events, state: Vec::new(), - }))) + })) } else { MatrixResult(Err(Error { kind: ErrorKind::Unknown, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 1c25c25..5b9f1e2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -8,6 +8,7 @@ use ruma_events::{ room::{ join_rules, member, power_levels::{self, PowerLevelsEventContent}, + redaction, }, EventJson, EventType, }; @@ -207,7 +208,6 @@ impl Rooms { globals: &super::globals::Globals, ) -> Result { // TODO: Make sure this isn't called twice in parallel - let prev_events = self.get_pdu_leaves(&room_id)?; // Is the event authorized? @@ -404,7 +404,6 @@ impl Rooms { authorized } EventType::RoomCreate => prev_events.is_empty(), - // Not allow any of the following events if the sender is not joined. _ if sender_membership != member::MembershipState::Join => false, @@ -450,15 +449,15 @@ impl Rooms { origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("this only fails many years in the future"), - kind: event_type, - content, + kind: event_type.clone(), + content: content.clone(), state_key, prev_events, depth: depth .try_into() .expect("depth can overflow and should be deprecated..."), auth_events: Vec::new(), - redacts, + redacts: redacts.clone(), unsigned, hashes: ruma_federation_api::EventHash { sha256: "aaa".to_owned(), @@ -506,6 +505,22 @@ impl Rooms { self.roomstateid_pdu.insert(key, &*pdu_json.to_string())?; } + match event_type { + EventType::RoomRedaction => { + if let Some(redact_id) = &redacts { + // TODO: Reason + let _reason = serde_json::from_value::< + EventJson, + >(content)? + .deserialize()? + .reason; + + self.redact_pdu(&redact_id)?; + } + } + _ => {} + } + self.edus.room_read_set(&room_id, &sender, index)?; Ok(pdu.event_id) diff --git a/src/pdu.rs b/src/pdu.rs index 3e1ac0a..1249642 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -7,6 +7,7 @@ use ruma_events::{ use ruma_federation_api::EventHash; use ruma_identifiers::{EventId, RoomId, UserId}; use serde::{Deserialize, Serialize}; +use serde_json::json; use std::collections::HashMap; #[derive(Deserialize, Serialize)] @@ -62,6 +63,11 @@ impl PduEvent { } } + self.unsigned.insert( + "redacted_because".to_owned(), + json!({"content": {}, "type": "m.room.redaction"}), + ); + self.content = new_content.into(); } From 75e75c3b2f21fe65ccc3f558fa3c03d75818981e Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Petit Date: Tue, 2 Jun 2020 13:57:41 +0200 Subject: [PATCH 0112/1727] Black list temporaly some room creation tests. --- sytest/sytest-blacklist | 9 +++++++-- sytest/sytest-whitelist | 3 --- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/sytest/sytest-blacklist b/sytest/sytest-blacklist index b45e075..009de22 100644 --- a/sytest/sytest-blacklist +++ b/sytest/sytest-blacklist @@ -1,2 +1,7 @@ -# This test checks for a room-alias key in the response which is not in the spec -POST /createRoom makes a public room \ No newline at end of file +# This test checks for a room-alias key in the response which is not in the spec, we must add it back in whitelist when https://github.com/matrix-org/sytest/pull/880 is merged +POST /createRoom makes a public room +# These fails because they use a endpoint which is not in the spec, we must add them back in whitelist when https://github.com/matrix-org/sytest/issues/878 is closed +POST /createRoom makes a room with a name +POST /createRoom makes a room with a topic +Can /sync newly created room +POST /createRoom ignores attempts to set the room version via creation_content \ No newline at end of file diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index a30543f..1b09fa7 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -27,9 +27,6 @@ POST /login wrong password is rejected # Room creation endpoints implemented POST /createRoom makes a private room POST /createRoom makes a private room with invites -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -Can /sync newly created room GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels POST /join/:room_alias can join a room From 1222289c7504b8d5fd183f05211ae98b2cca6a73 Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Petit Date: Wed, 3 Jun 2020 09:30:34 +0200 Subject: [PATCH 0113/1727] get_state_events_for_key_route return the content of the requested event instead of the all event. --- src/client_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index e56a7c5..2cb7a5f 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1895,7 +1895,7 @@ pub fn get_state_events_for_key_route( .get(&(body.event_type.clone(), body.state_key.clone())) { MatrixResult(Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(event).unwrap(), + content: serde_json::value::to_raw_value(&event.content).unwrap(), })) } else { MatrixResult(Err(Error { From ed9b544ace12159f8a9267e73f401ca949b5792d Mon Sep 17 00:00:00 2001 From: gnieto Date: Wed, 3 Jun 2020 13:41:30 +0200 Subject: [PATCH 0114/1727] Implement devices API (#20) small improvements Cargo fmt Simplify insert and update methods Review feedback Remove has_device method calls Load all devices with a single db call Remove device as in logout Put all metadata on the same tree Create userdevice key fucntion Implement devices API Implement all the devices endpoints. There's a couple of pending tasks: - Integrate the "logout" logic once it lands to master (this should remove the given device from the database). - Track and store last seen timestamp and IP. Co-authored-by: timokoesters Co-authored-by: Guillem Nieto --- src/client_server.rs | 99 +++++++++++++++++++++++++++++++++++-- src/database.rs | 2 +- src/database/users.rs | 110 ++++++++++++++++++++++++++++++++++-------- src/main.rs | 5 ++ 4 files changed, 190 insertions(+), 26 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 2cb7a5f..5f5070e 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -13,6 +13,9 @@ use ruma_client_api::{ alias::{create_alias, delete_alias, get_alias}, capabilities::get_capabilities, config::{get_global_account_data, set_global_account_data}, + device::{ + self, delete_device, delete_devices, get_device, get_devices, update_device, + }, directory::{ self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, set_room_visibility, @@ -52,7 +55,7 @@ use ruma_events::{ room::{canonical_alias, guest_access, history_visibility, join_rules, member, redaction}, EventJson, EventType, }; -use ruma_identifiers::{RoomAliasId, RoomId, RoomVersionId, UserId}; +use ruma_identifiers::{DeviceId, RoomAliasId, RoomId, RoomVersionId, UserId}; use serde_json::{json, value::RawValue}; use crate::{server_server, utils, Database, MatrixResult, Ruma}; @@ -173,7 +176,7 @@ pub fn register_route( // Generate new device id if the user didn't specify one let device_id = body - .device_id + .device_id.clone() .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); // Generate new token for the device @@ -181,7 +184,7 @@ pub fn register_route( // Add device db.users - .create_device(&user_id, &device_id, &token) + .create_device(&user_id, &device_id, &token, body.initial_device_display_name.clone()) .unwrap(); // Initial data @@ -300,6 +303,7 @@ pub fn login_route( let device_id = body .body .device_id + .clone() .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); // Generate a new token for the device @@ -307,7 +311,7 @@ pub fn login_route( // Add device db.users - .create_device(&user_id, &device_id, &token) + .create_device(&user_id, &device_id, &token, body.initial_device_display_name.clone()) .unwrap(); MatrixResult(Ok(login::Response { @@ -2430,6 +2434,93 @@ pub fn get_content_thumbnail_route( } } +#[get("/_matrix/client/r0/devices", data = "")] +pub fn get_devices_route( + db: State<'_, Database>, + body: Ruma, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + let devices = db + .users + .all_devices_metadata(user_id) + .map(|r| r.unwrap()) + .collect::>(); + + MatrixResult(Ok(get_devices::Response { devices })) +} + +#[get("/_matrix/client/r0/devices/", data = "")] +pub fn get_device_route( + db: State<'_, Database>, + body: Ruma, + device_id: DeviceId, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device = db.users.get_device_metadata(&user_id, &device_id).unwrap(); + + match device { + None => MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Device not found".to_string(), + status_code: http::StatusCode::NOT_FOUND, + })), + Some(device) => MatrixResult(Ok(get_device::Response { device })), + } +} + +#[put("/_matrix/client/r0/devices/", data = "")] +pub fn update_device_route( + db: State<'_, Database>, + body: Ruma, + device_id: DeviceId, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device = db.users.get_device_metadata(&user_id, &device_id).unwrap(); + + match device { + None => MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "Device not found".to_string(), + status_code: http::StatusCode::NOT_FOUND, + })), + Some(mut device) => { + device.display_name = body.display_name.clone(); + + db.users + .update_device_metadata(&user_id, &device_id, &device) + .unwrap(); + + MatrixResult(Ok(update_device::Response)) + } + } +} + +#[delete("/_matrix/client/r0/devices/", data = "")] +pub fn delete_device_route( + db: State<'_, Database>, + body: Ruma, + device_id: DeviceId, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + db.users.remove_device(&user_id, &device_id).unwrap(); + + MatrixResult(Ok(delete_device::Response)) +} + +#[post("/_matrix/client/r0/delete_devices", data = "")] +pub fn delete_devices_route( + db: State<'_, Database>, + body: Ruma, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + for device_id in &body.devices { + db.users.remove_device(&user_id, &device_id).unwrap() + } + + MatrixResult(Ok(delete_devices::Response)) +} + #[options("/<_segments..>")] pub fn options_route( _segments: rocket::http::uri::Segments<'_>, diff --git a/src/database.rs b/src/database.rs index de14805..d4927a7 100644 --- a/src/database.rs +++ b/src/database.rs @@ -56,10 +56,10 @@ impl Database { ), users: users::Users { userid_password: db.open_tree("userid_password").unwrap(), - userdeviceids: db.open_tree("userdeviceids").unwrap(), userid_displayname: db.open_tree("userid_displayname").unwrap(), userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), userdeviceid_token: db.open_tree("userdeviceid_token").unwrap(), + userdeviceid_metadata: db.open_tree("userdeviceid_metadata").unwrap(), token_userdeviceid: db.open_tree("token_userdeviceid").unwrap(), onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys").unwrap(), userdeviceid_devicekeys: db.open_tree("userdeviceid_devicekeys").unwrap(), diff --git a/src/database/users.rs b/src/database/users.rs index 6540a70..e216301 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,16 +1,19 @@ use crate::{utils, Error, Result}; use js_int::UInt; -use ruma_client_api::r0::keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}; +use ruma_client_api::r0::{ + device::Device, + keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}, +}; use ruma_events::{to_device::AnyToDeviceEvent, EventJson, EventType}; use ruma_identifiers::{DeviceId, UserId}; -use std::{collections::BTreeMap, convert::TryFrom}; +use std::{collections::BTreeMap, convert::TryFrom, time::SystemTime}; pub struct Users { pub(super) userid_password: sled::Tree, pub(super) userid_displayname: sled::Tree, pub(super) userid_avatarurl: sled::Tree, - pub(super) userdeviceids: sled::Tree, pub(super) userdeviceid_token: sled::Tree, + pub(super) userdeviceid_metadata: sled::Tree, // This is also used to check if a device exists pub(super) token_userdeviceid: sled::Tree, pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + AlgorithmAndDeviceId @@ -105,25 +108,40 @@ impl Users { } /// Adds a new device to a user. - pub fn create_device(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + pub fn create_device( + &self, + user_id: &UserId, + device_id: &DeviceId, + token: &str, + initial_device_display_name: Option, + ) -> Result<()> { if !self.exists(user_id)? { return Err(Error::BadRequest( "tried to create device for nonexistent user", )); } - let mut key = user_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + userdeviceid.push(0xff); + userdeviceid.extend_from_slice(device_id.as_bytes()); - self.userdeviceids.insert(key, &[])?; + self.userdeviceid_metadata.insert( + userdeviceid, + serde_json::to_string(&Device { + device_id: device_id.clone(), + display_name: initial_device_display_name, + last_seen_ip: None, // TODO + last_seen_ts: Some(SystemTime::now()), + })? + .as_bytes(), + )?; self.set_token(user_id, device_id, token)?; Ok(()) } - /// Removes a device from a user + /// Removes a device from a user. pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); @@ -147,8 +165,7 @@ impl Users { // TODO: Remove onetimekeys - // Remove the device - self.userdeviceids.remove(userdeviceid)?; + self.userdeviceid_metadata.remove(&userdeviceid)?; Ok(()) } @@ -157,14 +174,18 @@ impl Users { pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); - self.userdeviceids.scan_prefix(prefix).keys().map(|bytes| { - Ok(utils::string_from_bytes( - &*bytes? - .rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("userdeviceid is invalid"))?, - )?) - }) + // All devices have metadata + self.userdeviceid_metadata + .scan_prefix(prefix) + .keys() + .map(|bytes| { + Ok(utils::string_from_bytes( + &*bytes? + .rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("userdeviceid is invalid"))?, + )?) + }) } /// Replaces the access token of one device. @@ -173,7 +194,8 @@ impl Users { userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); - if self.userdeviceids.get(&userdeviceid)?.is_none() { + // All devices have metadata + if self.userdeviceid_metadata.get(&userdeviceid)?.is_none() { return Err(Error::BadRequest( "Tried to set token for nonexistent device", )); @@ -203,7 +225,8 @@ impl Users { key.push(0xff); key.extend_from_slice(device_id.as_bytes()); - if self.userdeviceids.get(&key)?.is_none() { + // All devices have metadata + if self.userdeviceid_metadata.get(&key)?.is_none() { return Err(Error::BadRequest( "Tried to set token for nonexistent device", )); @@ -396,4 +419,49 @@ impl Users { Ok(events) } + + pub fn update_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + device: &Device, + ) -> Result<()> { + let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + userdeviceid.push(0xff); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + if self.userdeviceid_metadata.get(userdeviceid)?.is_none() { + return Err(Error::BadRequest("device does not exist")); + } + + self.userdeviceid_metadata + .insert(userdeviceid, serde_json::to_string(device)?.as_bytes())?; + + Ok(()) + } + + /// Get device metadata. + pub fn get_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result> { + let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + userdeviceid.push(0xff); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + self.userdeviceid_metadata + .get(&userdeviceid)? + .map_or(Ok(None), |bytes| Ok(Some(serde_json::from_slice(&bytes)?))) + } + + pub fn all_devices_metadata(&self, user_id: &UserId) -> impl Iterator> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + + self.userdeviceid_metadata + .scan_prefix(key) + .values() + .map(|bytes| Ok(serde_json::from_slice::(&bytes?)?)) + } } diff --git a/src/main.rs b/src/main.rs index 4146282..c4a630b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -81,6 +81,11 @@ fn setup_rocket() -> rocket::Rocket { client_server::create_content_route, client_server::get_content_route, client_server::get_content_thumbnail_route, + client_server::get_devices_route, + client_server::get_device_route, + client_server::update_device_route, + client_server::delete_device_route, + client_server::delete_devices_route, client_server::options_route, server_server::well_known_server, server_server::get_server_version, From 0883c446595a53a53eca46b3b0e08f7878ddbb7a Mon Sep 17 00:00:00 2001 From: gnieto Date: Wed, 3 Jun 2020 13:41:30 +0200 Subject: [PATCH 0115/1727] Implement devices API (#20) small improvements Cargo fmt Simplify insert and update methods Review feedback Remove has_device method calls Load all devices with a single db call Remove device as in logout Put all metadata on the same tree Create userdevice key fucntion Implement devices API Implement all the devices endpoints. There's a couple of pending tasks: - Integrate the "logout" logic once it lands to master (this should remove the given device from the database). - Track and store last seen timestamp and IP. Co-authored-by: timokoesters Co-authored-by: Guillem Nieto --- src/database/users.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/users.rs b/src/database/users.rs index e216301..3a437d3 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -430,7 +430,7 @@ impl Users { userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); - if self.userdeviceid_metadata.get(userdeviceid)?.is_none() { + if self.userdeviceid_metadata.get(&userdeviceid)?.is_none() { return Err(Error::BadRequest("device does not exist")); } From 19c9e4b916226f02b8a122e7ca08e1478ce204f2 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 2 Jun 2020 19:32:18 +0200 Subject: [PATCH 0116/1727] fix: room version 6 --- Cargo.lock | 88 ++++++++++++++++++++++---------------------- src/client_server.rs | 30 +++++++++++---- 2 files changed, 66 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b41ec3..ea9c232 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,9 +30,9 @@ version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26c4f3195085c36ea8d24d32b2f828d23296a9370a28aa39d111f6f16bef9f3b" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5356f1d23ee24a1f785a56d1d1a5f0fd5b0f6a0c0fb2412ce11da71649ab78f6" +checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "bytemuck" @@ -277,9 +277,9 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "bitflags", - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] @@ -421,9 +421,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] @@ -621,9 +621,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" +checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" dependencies = [ "autocfg", ] @@ -1002,16 +1002,16 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e58db2081ba5b4c93bd6be09c40fd36cb9193a8336c384f3b40012e531aa7e40" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] name = "pin-project-lite" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" +checksum = "9df32da11d84f3a7d70205549562966279adb900e080fad3dccd8e64afccf0ad" [[package]] name = "pin-utils" @@ -1027,9 +1027,9 @@ checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" [[package]] name = "png" -version = "0.16.3" +version = "0.16.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c68a431ed29933a4eb5709aca9800989758c97759345860fa5db3cfced0b65d" +checksum = "12faa637ed9ae3d3c881332e54b5ae2dba81cda9fc4bbce0faa1ba53abcead50" dependencies = [ "bitflags", "crc32fast", @@ -1066,9 +1066,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1502d12e458c49a4c9cbff560d0fe0060c252bc29799ed94ca2ed4bb665a0101" +checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" dependencies = [ "unicode-xid 0.2.0", ] @@ -1088,7 +1088,7 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", ] [[package]] @@ -1289,9 +1289,9 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52b82b4567b9af9b40a86f7778821c016ea961f55e4fee255f8f24bb28ee7452" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] @@ -1343,9 +1343,9 @@ name = "ruma-events-macros" version = "0.21.3" source = "git+https://github.com/ruma/ruma-events.git?rev=4d09416cd1663d63c22153705c9e1fd77910797f#4d09416cd1663d63c22153705c9e1fd77910797f" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] @@ -1365,9 +1365,9 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c93b9d5f951a2fb57b19c048a05ac1dbdb280ff7617ec6b02f54bf14318ed8" +checksum = "6316cb248e3e0323a5a269b8eaed571404fb4f65c81848549e9ba99fd9b8e9de" dependencies = [ "rand", "serde", @@ -1437,9 +1437,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" [[package]] name = "schannel" @@ -1505,9 +1505,9 @@ version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f2c3ac8e6ca1e9c80b8be1023940162bf81ae3cffbb1809474152f2ce1eb250" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] @@ -1611,9 +1611,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] @@ -1629,11 +1629,11 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb37da98a55b1d08529362d9cbb863be17556873df2585904ab9d2bc951291d0" +checksum = "93a56fabc59dce20fe48b6c832cc249c713e7ed88fa28b0ee0a3bfcaae5fe4e2" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", "unicode-xid 0.2.0", ] @@ -1667,9 +1667,9 @@ version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "893582086c2f98cde18f906265a65b5030a074b1046c674ae898be6519a7f479" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] @@ -1711,9 +1711,9 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", ] [[package]] @@ -1837,9 +1837,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" +checksum = "55d1e41d56121e07f1e223db0a4def204e45c85425f6a16d462fd07c8d10d74c" [[package]] name = "version_check" @@ -1890,9 +1890,9 @@ dependencies = [ "bumpalo", "lazy_static", "log", - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", "wasm-bindgen-shared", ] @@ -1924,9 +1924,9 @@ version = "0.2.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92" dependencies = [ - "proc-macro2 1.0.17", + "proc-macro2 1.0.18", "quote 1.0.6", - "syn 1.0.29", + "syn 1.0.30", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/src/client_server.rs b/src/client_server.rs index 5f5070e..753a811 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -338,14 +338,23 @@ pub fn logout_route( #[get("/_matrix/client/r0/capabilities")] pub fn get_capabilities_route() -> MatrixResult { - // TODO - //let mut available = BTreeMap::new(); - //available.insert("5".to_owned(), get_capabilities::RoomVersionStability::Unstable); + let mut available = BTreeMap::new(); + available.insert( + "5".to_owned(), + get_capabilities::RoomVersionStability::Stable, + ); + available.insert( + "6".to_owned(), + get_capabilities::RoomVersionStability::Stable, + ); MatrixResult(Ok(get_capabilities::Response { capabilities: get_capabilities::Capabilities { - change_password: None, - room_versions: None, //Some(get_capabilities::RoomVersionsCapability { default: "5".to_owned(), available }), + change_password: None, // None means it is possible + room_versions: Some(get_capabilities::RoomVersionsCapability { + default: "6".to_owned(), + available, + }), custom_capabilities: BTreeMap::new(), }, })) @@ -954,7 +963,7 @@ pub fn create_room_route( .creation_content .as_ref() .and_then(|c| c.predecessor.clone()), - room_version: RoomVersionId::version_5(), + room_version: RoomVersionId::version_6(), }) .unwrap(), None, @@ -2011,7 +2020,12 @@ pub fn sync_route( (db.rooms .pdus_since(&room_id, last_read) .unwrap() - .filter(|pdu| matches!(pdu.as_ref().unwrap().kind.clone(), EventType::RoomMessage | EventType::RoomEncrypted)) + .filter(|pdu| { + matches!( + pdu.as_ref().unwrap().kind.clone(), + EventType::RoomMessage | EventType::RoomEncrypted + ) + }) .count() as u32) .into(), ) @@ -2218,7 +2232,7 @@ pub fn sync_route( } else { None // TODO: left }, - device_one_time_keys_count: Default::default(), + device_one_time_keys_count: Default::default(), // TODO to_device: sync_events::ToDevice { events: db .users From 325e373684079769175d6b5dc59ae1f52e10fed4 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 3 Jun 2020 16:24:00 +0200 Subject: [PATCH 0117/1727] refactor: small changes --- src/client_server.rs | 106 +++++++++++++++++++++---------------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 753a811..fcf3bcc 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1868,23 +1868,23 @@ pub fn get_state_events_route( ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if db.rooms.is_joined(user_id, &body.room_id).unwrap() { - MatrixResult(Ok(get_state_events::Response { - room_state: db - .rooms - .room_state(&body.room_id) - .unwrap() - .values() - .map(|pdu| pdu.to_state_event()) - .collect(), - })) - } else { - MatrixResult(Err(Error { + if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { + return MatrixResult(Err(Error { kind: ErrorKind::Forbidden, message: "You don't have permission to view the room state.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) + status_code: http::StatusCode::FORBIDDEN, + })); } + + MatrixResult(Ok(get_state_events::Response { + room_state: db + .rooms + .room_state(&body.room_id) + .unwrap() + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), + })) } #[get( @@ -1900,28 +1900,28 @@ pub fn get_state_events_for_key_route( ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if db.rooms.is_joined(user_id, &body.room_id).unwrap() { - if let Some(event) = db - .rooms - .room_state(&body.room_id) - .unwrap() - .get(&(body.event_type.clone(), body.state_key.clone())) - { - MatrixResult(Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(&event.content).unwrap(), - })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "State event not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) - } - } else { - MatrixResult(Err(Error { + if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { + return MatrixResult(Err(Error { kind: ErrorKind::Forbidden, message: "You don't have permission to view the room state.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, + status_code: http::StatusCode::FORBIDDEN, + })); + } + + if let Some(event) = db + .rooms + .room_state(&body.room_id) + .unwrap() + .get(&(body.event_type.clone(), body.state_key.clone())) + { + MatrixResult(Ok(get_state_events_for_key::Response { + content: serde_json::value::to_raw_value(&event.content).unwrap(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "State event not found.".to_owned(), + status_code: http::StatusCode::NOT_FOUND, })) } } @@ -1938,27 +1938,27 @@ pub fn get_state_events_for_empty_key_route( ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if db.rooms.is_joined(user_id, &body.room_id).unwrap() { - if let Some(event) = db - .rooms - .room_state(&body.room_id) - .unwrap() - .get(&(body.event_type.clone(), "".to_owned())) - { - MatrixResult(Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(event).unwrap(), - })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "State event not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) - } - } else { - MatrixResult(Err(Error { + if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { + return MatrixResult(Err(Error { kind: ErrorKind::Forbidden, message: "You don't have permission to view the room state.".to_owned(), + status_code: http::StatusCode::FORBIDDEN, + })); + } + + if let Some(event) = db + .rooms + .room_state(&body.room_id) + .unwrap() + .get(&(body.event_type.clone(), "".to_owned())) + { + MatrixResult(Ok(get_state_events_for_key::Response { + content: serde_json::value::to_raw_value(event).unwrap(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::NotFound, + message: "State event not found.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, })) } From b192fddf2fcd6807b77ecd538d35c0ced88f5e51 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 3 Jun 2020 20:55:11 +0200 Subject: [PATCH 0118/1727] fix: show device display names --- Cargo.lock | 4 +-- Cargo.toml | 2 +- src/client_server.rs | 52 +++++++++++++++++++++++++++++-------- src/database/rooms.rs | 60 ++++++++++++++++++------------------------- src/database/users.rs | 14 +++++----- 5 files changed, 75 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea9c232..9f69ddd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,9 +26,9 @@ checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "async-trait" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c4f3195085c36ea8d24d32b2f828d23296a9370a28aa39d111f6f16bef9f3b" +checksum = "0eb7f9ad01405feb3c1dac82463038945cf88eea4569acaf3ad662233496dd96" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.6", diff --git a/Cargo.toml b/Cargo.toml index 198521e..53aacc7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ edition = "2018" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git", rev = "c725288cd099690c1d13f1a9b9e57228bc860a62" } -ruma-identifiers = { version = "0.16.1", features = ["rand"] } +ruma-identifiers = { version = "0.16.2", features = ["rand"] } ruma-api = "0.16.1" ruma-events = { git = "https://github.com/ruma/ruma-events.git", rev = "4d09416cd1663d63c22153705c9e1fd77910797f" } ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git", rev = "1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" } diff --git a/src/client_server.rs b/src/client_server.rs index fcf3bcc..244f91c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -13,15 +13,13 @@ use ruma_client_api::{ alias::{create_alias, delete_alias, get_alias}, capabilities::get_capabilities, config::{get_global_account_data, set_global_account_data}, - device::{ - self, delete_device, delete_devices, get_device, get_devices, update_device, - }, + device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, directory::{ self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, set_room_visibility, }, filter::{self, create_filter, get_filter}, - keys::{claim_keys, get_keys, upload_keys}, + keys::{self, claim_keys, get_keys, upload_keys}, media::{create_content, get_content, get_content_thumbnail, get_media_config}, membership::{ forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, @@ -176,7 +174,8 @@ pub fn register_route( // Generate new device id if the user didn't specify one let device_id = body - .device_id.clone() + .device_id + .clone() .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); // Generate new token for the device @@ -184,7 +183,12 @@ pub fn register_route( // Add device db.users - .create_device(&user_id, &device_id, &token, body.initial_device_display_name.clone()) + .create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + ) .unwrap(); // Initial data @@ -311,7 +315,12 @@ pub fn login_route( // Add device db.users - .create_device(&user_id, &device_id, &token, body.initial_device_display_name.clone()) + .create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + ) .unwrap(); MatrixResult(Ok(login::Response { @@ -758,11 +767,21 @@ pub fn get_keys_route( for (user_id, device_ids) in &body.device_keys { if device_ids.is_empty() { let mut container = BTreeMap::new(); - for (device_id, keys) in db + for (device_id, mut keys) in db .users .all_device_keys(&user_id.clone()) .map(|r| r.unwrap()) { + let metadata = db + .users + .get_device_metadata(user_id, &device_id) + .unwrap() + .expect("this device should exist"); + + keys.unsigned = Some(keys::UnsignedDeviceInfo { + device_display_name: metadata.display_name, + }); + container.insert(device_id, keys); } device_keys.insert(user_id.clone(), container); @@ -770,7 +789,18 @@ pub fn get_keys_route( for device_id in device_ids { let mut container = BTreeMap::new(); for keys in db.users.get_device_keys(&user_id.clone(), &device_id) { - container.insert(device_id.clone(), keys.unwrap()); + let mut keys = keys.unwrap(); + let metadata = db + .users + .get_device_metadata(user_id, &device_id) + .unwrap() + .expect("this device should exist"); + + keys.unsigned = Some(keys::UnsignedDeviceInfo { + device_display_name: metadata.display_name, + }); + + container.insert(device_id.clone(), keys); } device_keys.insert(user_id.clone(), container); } @@ -1288,11 +1318,11 @@ pub fn get_alias_route( })) } else { debug!("Room alias not found."); - return MatrixResult(Err(Error { + MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "Room with alias not found.".to_owned(), status_code: http::StatusCode::BAD_REQUEST, - })); + })) } } else { todo!("ask remote server"); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5b9f1e2..ea124be 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -329,15 +329,11 @@ impl Rooms { false } else if let member::MembershipState::Ban = current_membership { false - } else if join_rules == join_rules::JoinRule::Invite - && (current_membership == member::MembershipState::Join - || current_membership == member::MembershipState::Invite) - { - true - } else if join_rules == join_rules::JoinRule::Public { - true } else { - false + join_rules == join_rules::JoinRule::Invite + && (current_membership == member::MembershipState::Join + || current_membership == member::MembershipState::Invite) + || join_rules == join_rules::JoinRule::Public } } else if target_membership == member::MembershipState::Invite { if let Some(third_party_invite_json) = content.get("third_party_invite") @@ -351,46 +347,35 @@ impl Rooms { )?; todo!("handle third party invites"); } - } else if sender_membership != member::MembershipState::Join { - false - } else if current_membership == member::MembershipState::Join + } else if sender_membership != member::MembershipState::Join + || current_membership == member::MembershipState::Join || current_membership == member::MembershipState::Ban { false - } else if sender_power - .filter(|&p| p >= &power_levels.invite) - .is_some() - { - true } else { - false + sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some() } } else if target_membership == member::MembershipState::Leave { if sender == target_user_id { current_membership == member::MembershipState::Join || current_membership == member::MembershipState::Invite - } else if sender_membership != member::MembershipState::Join { - false - } else if current_membership == member::MembershipState::Ban - && sender_power.filter(|&p| p < &power_levels.ban).is_some() + } else if sender_membership != member::MembershipState::Join + || current_membership == member::MembershipState::Ban + && sender_power.filter(|&p| p < &power_levels.ban).is_some() { false - } else if sender_power.filter(|&p| p >= &power_levels.kick).is_some() - && target_power < sender_power - { - true } else { - false + sender_power.filter(|&p| p >= &power_levels.kick).is_some() + && target_power < sender_power } } else if target_membership == member::MembershipState::Ban { if sender_membership != member::MembershipState::Join { false - } else if sender_power.filter(|&p| p >= &power_levels.ban).is_some() - && target_power < sender_power - { - true } else { - false + sender_power.filter(|&p| p >= &power_levels.ban).is_some() + && target_power < sender_power } } else { false @@ -668,16 +653,21 @@ impl Rooms { globals: &super::globals::Globals, ) -> Result<()> { if let Some(room_id) = room_id { + // New alias self.alias_roomid .insert(alias.alias(), &*room_id.to_string())?; let mut aliasid = room_id.to_string().as_bytes().to_vec(); aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); self.aliasid_alias.insert(aliasid, &*alias.alias())?; } else { - if let Some(room_id) = self.alias_roomid.remove(alias.alias())? { - for key in self.aliasid_alias.scan_prefix(room_id).keys() { - self.aliasid_alias.remove(key?)?; - } + // room_id=None means remove alias + let room_id = self + .alias_roomid + .remove(alias.alias())? + .ok_or(Error::BadRequest("Alias does not exist"))?; + + for key in self.aliasid_alias.scan_prefix(room_id).keys() { + self.aliasid_alias.remove(key?)?; } } diff --git a/src/database/users.rs b/src/database/users.rs index 3a437d3..efd420a 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -361,14 +361,12 @@ impl Users { self.userdeviceid_devicekeys.scan_prefix(key).map(|r| { let (key, value) = r?; - Ok(( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("userdeviceid is invalid"))?, - )?, - serde_json::from_slice(&*value)?, - )) + let userdeviceid = utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("userdeviceid is invalid"))?, + )?; + Ok((userdeviceid, serde_json::from_slice(&*value)?)) }) } From 1c85b0fd05f6906531222014426323325afb470c Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 3 Jun 2020 21:29:02 +0200 Subject: [PATCH 0119/1727] remove tests in favor of sytests --- src/test.rs | 124 ---------------------------------------------------- 1 file changed, 124 deletions(-) delete mode 100644 src/test.rs diff --git a/src/test.rs b/src/test.rs deleted file mode 100644 index 1df3a9d..0000000 --- a/src/test.rs +++ /dev/null @@ -1,124 +0,0 @@ -use super::*; -use rocket::local::Client; -use serde_json::{json, Value}; - -fn setup_client() -> Client { - Database::try_remove("localhost"); - let rocket = setup_rocket(); - Client::new(rocket).expect("valid rocket instance") -} - -#[tokio::test] -async fn register_login() { - let client = setup_client(); - let mut response = client - .post("/_matrix/client/r0/register?kind=user") - .body(registration_init()) - .dispatch() - .await; - let body = serde_json::from_str::(&response.body_string().await.unwrap()).unwrap(); - - assert_eq!(response.status().code, 401); - assert!(dbg!(&body["flows"]).as_array().unwrap().len() > 0); - assert!(body["session"].as_str().unwrap().len() > 0); -} - -#[tokio::test] -async fn login_after_register_correct_password() { - let client = setup_client(); - let mut response = client - .post("/_matrix/client/r0/register?kind=user") - .body(registration_init()) - .dispatch() - .await; - let body = serde_json::from_str::(&response.body_string().await.unwrap()).unwrap(); - let session = body["session"].clone(); - - let response = client - .post("/_matrix/client/r0/register?kind=user") - .body(registration(session.as_str().unwrap())) - .dispatch() - .await; - assert_eq!(response.status().code, 200); - - let login_response = client - .post("/_matrix/client/r0/login") - .body(login_with_password("ilovebananas")) - .dispatch() - .await; - assert_eq!(login_response.status().code, 200); -} - -#[tokio::test] -async fn login_after_register_incorrect_password() { - let client = setup_client(); - let mut response = client - .post("/_matrix/client/r0/register?kind=user") - .body(registration_init()) - .dispatch() - .await; - let body = serde_json::from_str::(&response.body_string().await.unwrap()).unwrap(); - let session = body["session"].clone(); - - let response = client - .post("/_matrix/client/r0/register?kind=user") - .body(registration(session.as_str().unwrap())) - .dispatch() - .await; - assert_eq!(response.status().code, 200); - - let mut login_response = client - .post("/_matrix/client/r0/login") - .body(login_with_password("idontlovebananas")) - .dispatch() - .await; - let body = serde_json::from_str::(&login_response.body_string().await.unwrap()).unwrap(); - assert_eq!( - body.as_object() - .unwrap() - .get("errcode") - .unwrap() - .as_str() - .unwrap(), - "M_FORBIDDEN" - ); - assert_eq!(login_response.status().code, 403); -} - -fn registration_init() -> &'static str { - r#"{ - "username": "cheeky_monkey", - "password": "ilovebananas", - "device_id": "GHTYAJCE", - "initial_device_display_name": "Jungle Phone", - "inhibit_login": false - }"# -} - -fn registration(session: &str) -> String { - json!({ - "auth": { - "session": session, - "type": "m.login.dummy" - }, - "username": "cheeky_monkey", - "password": "ilovebananas", - "device_id": "GHTYAJCE", - "initial_device_display_name": "Jungle Phone", - "inhibit_login": false - }) - .to_string() -} - -fn login_with_password(password: &str) -> String { - json!({ - "type": "m.login.password", - "identifier": { - "type": "m.id.user", - "user": "cheeky_monkey" - }, - "password": password, - "initial_device_display_name": "Jungle Phone" - }) - .to_string() -} From 8328eeb5ac1ff6a483355f8569e1d7d70150dedf Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 3 Jun 2020 14:16:56 +0200 Subject: [PATCH 0120/1727] Make sytest not fail --- src/client_server.rs | 3 +-- src/main.rs | 13 +++++-------- sytest/sytest-whitelist | 28 +++++++++++++--------------- 3 files changed, 19 insertions(+), 25 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 244f91c..9dfe2d7 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -55,8 +55,7 @@ use ruma_events::{ }; use ruma_identifiers::{DeviceId, RoomAliasId, RoomId, RoomVersionId, UserId}; use serde_json::{json, value::RawValue}; - -use crate::{server_server, utils, Database, MatrixResult, Ruma}; +use crate::{utils, Database, MatrixResult, Ruma}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; diff --git a/src/main.rs b/src/main.rs index c4a630b..27493d1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -6,12 +6,9 @@ mod database; mod error; mod pdu; mod ruma_wrapper; -mod server_server; +//mod server_server; mod utils; -#[cfg(test)] -mod test; - pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; @@ -87,10 +84,10 @@ fn setup_rocket() -> rocket::Rocket { client_server::delete_device_route, client_server::delete_devices_route, client_server::options_route, - server_server::well_known_server, - server_server::get_server_version, - server_server::get_server_keys, - server_server::get_server_keys_deprecated, + //server_server::well_known_server, + //server_server::get_server_version, + //server_server::get_server_keys, + //server_server::get_server_keys_deprecated, ], ) .attach(AdHoc::on_attach("Config", |rocket| { diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index 1b09fa7..140fad8 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -28,26 +28,13 @@ POST /login wrong password is rejected POST /createRoom makes a private room POST /createRoom makes a private room with invites GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -POST /join/:room_alias can join a room POST /join/:room_id can join a room -POST /join/:room_id can join a room with custom content -POST /join/:room_alias can join a room with custom content POST /rooms/:room_id/join can join a room POST /rooms/:room_id/leave can leave a room POST /rooms/:room_id/invite can send an invite -POST /rooms/:room_id/ban can ban a user -POST /rooms/:room_id/send/:event_type sends a message -PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -GET /rooms/:room_id/state/m.room.power_levels can fetch levels PUT /rooms/:room_id/state/m.room.power_levels can set levels PUT power_levels should not explode if the old power levels were empty Both GET and PUT work -POST /rooms/:room_id/read_markers can create read marker -User signups are forbidden from starting with '_' -Request to logout with invalid an access token is rejected -Request to logout without an access token is rejected Room creation reports m.room.create to myself Room creation reports m.room.member to myself Version responds 200 OK with valid structure @@ -60,7 +47,6 @@ Can create filter Should reject keys claiming to belong to a different user Can add account data Checking local federation server -Alternative server names do not cause a routing loop Can read configuration endpoint AS cannot create users outside its own namespace Changing the actions of an unknown default rule fails with 404 @@ -78,4 +64,16 @@ Trying to get push rules with unknown rule_id fails with 404 GET /events with non-numeric 'limit' GET /events with negative 'limit' GET /events with non-numeric 'timeout' -Getting push rules doesn't corrupt the cache SYN-390 \ No newline at end of file +Getting push rules doesn't corrupt the cache SYN-390 +GET /publicRooms lists newly-created room +PUT /directory/room/:room_alias creates alias +3pid invite join with wrong but valid signature are rejected +3pid invite join valid signature but revoked keys are rejected +3pid invite join valid signature but unreachable ID server are rejected +query for user with no keys returns empty key dict +Can upload without a file name +Can upload with ASCII file name +User appears in user directory +User directory correctly update on display name change +User in shared private room does appear in user directory +User in dir while user still shares private rooms From 168f2281fd87ccb0d1090668627b95ed03d36077 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 4 Jun 2020 11:17:36 +0200 Subject: [PATCH 0121/1727] improvement: bundle typing events and only send on changes Fixes #67 and #49 --- src/client_server.rs | 59 +++++++-------- src/database.rs | 5 +- src/database/rooms.rs | 25 ------- src/database/rooms/edus.rs | 150 ++++++++++++++++++++++++++----------- 4 files changed, 141 insertions(+), 98 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 9dfe2d7..6d99a82 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -4,6 +4,7 @@ use std::{ time::{Duration, SystemTime}, }; +use crate::{utils, Database, MatrixResult, Ruma}; use log::{debug, warn}; use rocket::{delete, get, options, post, put, State}; use ruma_client_api::{ @@ -55,7 +56,6 @@ use ruma_events::{ }; use ruma_identifiers::{DeviceId, RoomAliasId, RoomId, RoomVersionId, UserId}; use serde_json::{json, value::RawValue}; -use crate::{utils, Database, MatrixResult, Ruma}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -921,18 +921,12 @@ pub fn create_typing_event_route( _user_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let edu = EduEvent::Typing(ruma_events::typing::TypingEvent { - content: ruma_events::typing::TypingEventContent { - user_ids: vec![user_id.clone()], - }, - room_id: None, // None because it can be inferred - }); if body.typing { db.rooms .edus .roomactive_add( - edu, + &user_id, &body.room_id, body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) + utils::millis_since_unix_epoch().try_into().unwrap_or(0), @@ -940,7 +934,10 @@ pub fn create_typing_event_route( ) .unwrap(); } else { - db.rooms.edus.roomactive_remove(edu, &body.room_id).unwrap(); + db.rooms + .edus + .roomactive_remove(&user_id, &body.room_id, &db.globals) + .unwrap(); } MatrixResult(Ok(create_typing_event::Response)) @@ -2083,30 +2080,23 @@ pub fn sync_route( let mut edus = db .rooms .edus - .roomactives_all(&room_id) + .roomlatests_since(&room_id, since) + .unwrap() .map(|r| r.unwrap()) .collect::>(); - if edus.is_empty() { - edus.push( - EduEvent::Typing(ruma_events::typing::TypingEvent { - content: ruma_events::typing::TypingEventContent { - user_ids: Vec::new(), - }, - room_id: None, // None because it can be inferred - }) - .into(), - ); + if db + .rooms + .edus + .last_roomactive_update(&room_id, &db.globals) + .unwrap() + > since + { + edus.push(serde_json::from_str(&serde_json::to_string( + &EduEvent::Typing(db.rooms.edus.roomactives_all(&room_id).unwrap()), + ).unwrap()).unwrap()); } - edus.extend( - db.rooms - .edus - .roomlatests_since(&room_id, since) - .unwrap() - .map(|r| r.unwrap()), - ); - joined_rooms.insert( room_id.clone().try_into().unwrap(), sync_events::JoinedRoom { @@ -2173,7 +2163,17 @@ pub fn sync_route( .map(|r| r.unwrap()) .collect::>(); - edus.extend(db.rooms.edus.roomactives_all(&room_id).map(|r| r.unwrap())); + if db + .rooms + .edus + .last_roomactive_update(&room_id, &db.globals) + .unwrap() + > since + { + edus.push(serde_json::from_str(&serde_json::to_string( + &EduEvent::Typing(db.rooms.edus.roomactives_all(&room_id).unwrap()), + ).unwrap()).unwrap()); + } left_rooms.insert( room_id.clone().try_into().unwrap(), @@ -2324,7 +2324,6 @@ pub fn get_message_events_route( #[get("/_matrix/client/r0/voip/turnServer")] pub fn turn_server_route() -> MatrixResult { - warn!("TODO: turn_server_route"); MatrixResult(Err(Error { kind: ErrorKind::NotFound, message: "There is no turn server yet.".to_owned(), diff --git a/src/database.rs b/src/database.rs index d4927a7..7be0dc7 100644 --- a/src/database.rs +++ b/src/database.rs @@ -70,7 +70,10 @@ impl Database { edus: rooms::RoomEdus { roomuserid_lastread: db.open_tree("roomuserid_lastread").unwrap(), // "Private" read receipt roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), // Read receipts - roomactiveid_roomactive: db.open_tree("roomactiveid_roomactive").unwrap(), // Typing notifs + roomactiveid_userid: db.open_tree("roomactiveid_userid").unwrap(), // Typing notifs + roomid_lastroomactiveupdate: db + .open_tree("roomid_lastroomactiveupdate") + .unwrap(), }, pduid_pdu: db.open_tree("pduid_pdu").unwrap(), eventid_pduid: db.open_tree("eventid_pduid").unwrap(), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ea124be..a9a9306 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -52,31 +52,6 @@ impl Rooms { .is_some()) } - // TODO: Remove and replace with public room dir - /// Returns a vector over all rooms. - pub fn all_rooms(&self) -> Vec { - let mut room_ids = self - .roomid_pduleaves - .iter() - .keys() - .map(|key| { - RoomId::try_from( - &*utils::string_from_bytes( - &key.unwrap() - .iter() - .copied() - .take_while(|&x| x != 0xff) // until delimiter - .collect::>(), - ) - .unwrap(), - ) - .unwrap() - }) - .collect::>(); - room_ids.dedup(); - room_ids - } - /// Returns the full room state. pub fn room_state(&self, room_id: &RoomId) -> Result> { let mut hashmap = HashMap::new(); diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index a2ade55..0519b43 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,11 +1,13 @@ -use crate::{utils, Result}; +use crate::{utils, Error, Result}; use ruma_events::{collections::only::Event as EduEvent, EventJson}; use ruma_identifiers::{RoomId, UserId}; +use std::convert::TryFrom; pub struct RoomEdus { pub(in super::super) roomuserid_lastread: sled::Tree, // RoomUserId = Room + User pub(in super::super) roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Count + UserId - pub(in super::super) roomactiveid_roomactive: sled::Tree, // Typing, RoomActiveId = RoomId + TimeoutTime + Count + pub(in super::super) roomactiveid_userid: sled::Tree, // Typing, RoomActiveId = RoomId + TimeoutTime + Count + pub(in super::super) roomid_lastroomactiveupdate: sled::Tree, // LastRoomActiveUpdate = Count } impl RoomEdus { @@ -79,10 +81,11 @@ impl RoomEdus { .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) } - /// Adds an event that will be saved until the `timeout` timestamp (e.g. typing notifications). + /// Sets a user as typing until the timeout timestamp is reached or roomactive_remove is + /// called. pub fn roomactive_add( &self, - event: EduEvent, + user_id: &UserId, room_id: &RoomId, timeout: u64, globals: &super::super::globals::Globals, @@ -90,9 +93,73 @@ impl RoomEdus { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); - // Cleanup all outdated edus before inserting a new one + let count = globals.next_count()?.to_be_bytes(); + + let mut room_active_id = prefix; + room_active_id.extend_from_slice(&timeout.to_be_bytes()); + room_active_id.push(0xff); + room_active_id.extend_from_slice(&count); + + self.roomactiveid_userid + .insert(&room_active_id, &*user_id.to_string().as_bytes())?; + + self.roomid_lastroomactiveupdate + .insert(&room_id.to_string().as_bytes(), &count)?; + + Ok(()) + } + + /// Removes a user from typing before the timeout is reached. + pub fn roomactive_remove( + &self, + user_id: &UserId, + room_id: &RoomId, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let user_id = user_id.to_string(); + + let mut found_outdated = false; + + // Maybe there are multiple ones from calling roomactive_add multiple times for outdated_edu in self - .roomactiveid_roomactive + .roomactiveid_userid + .scan_prefix(&prefix) + .filter_map(|r| r.ok()) + .filter(|(_, v)| v == user_id.as_bytes()) + { + self.roomactiveid_userid.remove(outdated_edu.0)?; + found_outdated = true; + } + + if found_outdated { + self.roomid_lastroomactiveupdate.insert( + &room_id.to_string().as_bytes(), + &globals.next_count()?.to_be_bytes(), + )?; + } + + Ok(()) + } + + /// Makes sure that typing events with old timestamps get removed. + fn roomactives_maintain( + &self, + room_id: &RoomId, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let current_timestamp = utils::millis_since_unix_epoch(); + + let mut found_outdated = false; + + // Find all outdated edus before inserting a new one + for outdated_edu in self + .roomactiveid_userid .scan_prefix(&prefix) .keys() .filter_map(|r| r.ok()) @@ -101,60 +168,59 @@ impl RoomEdus { k.split(|&c| c == 0xff) .nth(1) .expect("roomactive has valid timestamp and delimiters"), - ) < utils::millis_since_unix_epoch() + ) < current_timestamp }) { // This is an outdated edu (time > timestamp) self.roomlatestid_roomlatest.remove(outdated_edu)?; + found_outdated = true; } - let mut room_active_id = prefix; - room_active_id.extend_from_slice(&timeout.to_be_bytes()); - room_active_id.push(0xff); - room_active_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - - self.roomactiveid_roomactive - .insert(room_active_id, &*serde_json::to_string(&event)?)?; - - Ok(()) - } - - /// Removes an active event manually (before the timeout is reached). - pub fn roomactive_remove(&self, event: EduEvent, room_id: &RoomId) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); - prefix.push(0xff); - - let json = serde_json::to_string(&event)?; - - // Remove outdated entries - for outdated_edu in self - .roomactiveid_roomactive - .scan_prefix(&prefix) - .filter_map(|r| r.ok()) - .filter(|(_, v)| v == json.as_bytes()) - { - self.roomactiveid_roomactive.remove(outdated_edu.0)?; + if found_outdated { + self.roomid_lastroomactiveupdate.insert( + &room_id.to_string().as_bytes(), + &globals.next_count()?.to_be_bytes(), + )?; } Ok(()) } /// Returns an iterator over all active events (e.g. typing notifications). - pub fn roomactives_all( + pub fn last_roomactive_update( &self, room_id: &RoomId, - ) -> impl Iterator>> { + globals: &super::super::globals::Globals, + ) -> Result { + self.roomactives_maintain(room_id, globals)?; + + Ok(self + .roomid_lastroomactiveupdate + .get(&room_id.to_string().as_bytes())? + .map(|bytes| utils::u64_from_bytes(&bytes)) + .unwrap_or(0)) + } + + /// Returns an iterator over all active events (e.g. typing notifications). + pub fn roomactives_all(&self, room_id: &RoomId) -> Result { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); - let mut first_active_edu = prefix.clone(); - first_active_edu.extend_from_slice(&utils::millis_since_unix_epoch().to_be_bytes()); + let mut user_ids = Vec::new(); - self.roomactiveid_roomactive - .range(first_active_edu..) - .filter_map(|r| r.ok()) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| Ok(serde_json::from_slice(&v)?)) + for user_id in self + .roomactiveid_userid + .scan_prefix(prefix) + .values() + .map(|user_id| Ok::<_, Error>(UserId::try_from(utils::string_from_bytes(&user_id?)?)?)) + { + user_ids.push(user_id?); + } + + Ok(ruma_events::typing::TypingEvent { + content: ruma_events::typing::TypingEventContent { user_ids }, + room_id: None, // Can be inferred + }) } /// Sets a private read marker at `count`. From a8df1acdfdef93fcae3e1fea5c0057cdb5fa1bb9 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 4 Jun 2020 13:58:55 +0200 Subject: [PATCH 0122/1727] feat: load replies, forward pagination --- Cargo.lock | 40 ++++++-- Cargo.toml | 7 +- src/client_server.rs | 229 +++++++++++++++++++++++++++++++++--------- src/database/rooms.rs | 23 +++++ src/main.rs | 1 + 5 files changed, 240 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9f69ddd..365781d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -163,7 +163,7 @@ dependencies = [ "ruma-api", "ruma-client-api", "ruma-common", - "ruma-events", + "ruma-events 0.21.3 (git+https://github.com/ruma/ruma-events?rev=7395f94)", "ruma-federation-api", "ruma-identifiers", "ruma-signatures", @@ -1297,13 +1297,13 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma-client-api.git?rev=c725288cd099690c1d13f1a9b9e57228bc860a62#c725288cd099690c1d13f1a9b9e57228bc860a62" +source = "git+https://github.com/ruma/ruma-client-api.git?rev=c2c5a3cea01b0544e5adb40f7ddae828627afd2c#c2c5a3cea01b0544e5adb40f7ddae828627afd2c" dependencies = [ "http", "js_int", "ruma-api", "ruma-common", - "ruma-events", + "ruma-events 0.21.3 (git+https://github.com/ruma/ruma-events?rev=7395f94)", "ruma-identifiers", "ruma-serde", "serde", @@ -1326,11 +1326,11 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.21.3" -source = "git+https://github.com/ruma/ruma-events.git?rev=4d09416cd1663d63c22153705c9e1fd77910797f#4d09416cd1663d63c22153705c9e1fd77910797f" +source = "git+https://github.com/ruma/ruma-events?rev=7395f94#7395f940a7cf70c1598223570fb2b731a6a41707" dependencies = [ "js_int", "ruma-common", - "ruma-events-macros", + "ruma-events-macros 0.21.3 (git+https://github.com/ruma/ruma-events?rev=7395f94)", "ruma-identifiers", "ruma-serde", "serde", @@ -1338,10 +1338,36 @@ dependencies = [ "strum", ] +[[package]] +name = "ruma-events" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ddf82c2231e4c53443424df34e868e4b09c20de7a76780d47a133a3b3f8ad9c" +dependencies = [ + "js_int", + "ruma-common", + "ruma-events-macros 0.21.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + [[package]] name = "ruma-events-macros" version = "0.21.3" -source = "git+https://github.com/ruma/ruma-events.git?rev=4d09416cd1663d63c22153705c9e1fd77910797f#4d09416cd1663d63c22153705c9e1fd77910797f" +source = "git+https://github.com/ruma/ruma-events?rev=7395f94#7395f940a7cf70c1598223570fb2b731a6a41707" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.6", + "syn 1.0.30", +] + +[[package]] +name = "ruma-events-macros" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88e5c5b242fe4ee0cc56879057353621196d0988dd359579cad8f43471e483b7" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.6", @@ -1356,7 +1382,7 @@ dependencies = [ "js_int", "matches", "ruma-api", - "ruma-events", + "ruma-events 0.21.3 (registry+https://github.com/rust-lang/crates.io-index)", "ruma-identifiers", "ruma-serde", "serde", diff --git a/Cargo.toml b/Cargo.toml index 53aacc7..7f7ba5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,10 +14,10 @@ edition = "2018" [dependencies] rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" -ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git", rev = "c725288cd099690c1d13f1a9b9e57228bc860a62" } +ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git", rev = "c2c5a3cea01b0544e5adb40f7ddae828627afd2c" } ruma-identifiers = { version = "0.16.2", features = ["rand"] } ruma-api = "0.16.1" -ruma-events = { git = "https://github.com/ruma/ruma-events.git", rev = "4d09416cd1663d63c22153705c9e1fd77910797f" } +ruma-events = { git = "https://github.com/ruma/ruma-events.git", rev = "7395f94" } ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git", rev = "1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" } ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git", rev = "4cf4aa6ef74b25ad8c14d99d7774129f023df163" } log = "0.4.8" @@ -34,6 +34,3 @@ base64 = "0.12.1" thiserror = "1.0.19" ruma-common = "0.1.2" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } - -[patch.crates-io] -ruma-events = { git = "https://github.com/ruma/ruma-events.git", rev = "4d09416cd1663d63c22153705c9e1fd77910797f" } diff --git a/src/client_server.rs b/src/client_server.rs index 6d99a82..2ef6d02 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -14,6 +14,7 @@ use ruma_client_api::{ alias::{create_alias, delete_alias, get_alias}, capabilities::get_capabilities, config::{get_global_account_data, set_global_account_data}, + context::get_context, device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, directory::{ self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, @@ -200,7 +201,7 @@ pub fn register_route( content: ruma_events::push_rules::PushRulesEventContent { global: ruma_events::push_rules::Ruleset { content: vec![], - override_rules: vec![ruma_events::push_rules::ConditionalPushRule { + override_: vec![ruma_events::push_rules::ConditionalPushRule { actions: vec![ruma_events::push_rules::Action::DontNotify], default: true, enabled: false, @@ -219,12 +220,10 @@ pub fn register_route( default: true, enabled: true, rule_id: ".m.rule.message".to_owned(), - conditions: vec![ruma_events::push_rules::PushCondition::EventMatch( - ruma_events::push_rules::EventMatchCondition { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }, - )], + conditions: vec![ruma_events::push_rules::PushCondition::EventMatch { + key: "type".to_owned(), + pattern: "m.room.message".to_owned(), + }], }], }, }, @@ -348,11 +347,11 @@ pub fn logout_route( pub fn get_capabilities_route() -> MatrixResult { let mut available = BTreeMap::new(); available.insert( - "5".to_owned(), + RoomVersionId::version_5(), get_capabilities::RoomVersionStability::Stable, ); available.insert( - "6".to_owned(), + RoomVersionId::version_6(), get_capabilities::RoomVersionStability::Stable, ); @@ -374,7 +373,6 @@ pub fn get_pushrules_all_route( body: Ruma, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - warn!("TODO: get_pushrules_all_route"); if let Some(EduEvent::PushRules(pushrules)) = db .account_data @@ -383,7 +381,7 @@ pub fn get_pushrules_all_route( .map(|edu| edu.deserialize().expect("PushRules event in db is valid")) { MatrixResult(Ok(get_pushrules_all::Response { - global: BTreeMap::new(), + global: pushrules.content.global })) } else { MatrixResult(Err(Error { @@ -2092,9 +2090,15 @@ pub fn sync_route( .unwrap() > since { - edus.push(serde_json::from_str(&serde_json::to_string( - &EduEvent::Typing(db.rooms.edus.roomactives_all(&room_id).unwrap()), - ).unwrap()).unwrap()); + edus.push( + serde_json::from_str( + &serde_json::to_string(&EduEvent::Typing( + db.rooms.edus.roomactives_all(&room_id).unwrap(), + )) + .unwrap(), + ) + .unwrap(), + ); } joined_rooms.insert( @@ -2170,9 +2174,15 @@ pub fn sync_route( .unwrap() > since { - edus.push(serde_json::from_str(&serde_json::to_string( - &EduEvent::Typing(db.rooms.edus.roomactives_all(&room_id).unwrap()), - ).unwrap()).unwrap()); + edus.push( + serde_json::from_str( + &serde_json::to_string(&EduEvent::Typing( + db.rooms.edus.roomactives_all(&room_id).unwrap(), + )) + .unwrap(), + ) + .unwrap(), + ); } left_rooms.insert( @@ -2271,6 +2281,93 @@ pub fn sync_route( })) } +#[get( + "/_matrix/client/r0/rooms/<_room_id>/context/<_event_id>", + data = "" +)] +pub fn get_context_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, + _event_id: String, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { + return MatrixResult(Err(Error { + kind: ErrorKind::Forbidden, + message: "You don't have permission to view this room.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })); + } + + if let Some(base_event) = db.rooms.get_pdu(&body.event_id).unwrap() { + let base_event = base_event + .to_room_event(); + + let base_token = db + .rooms + .get_pdu_count(&body.event_id) + .unwrap() + .expect("event exists, so count should exist too"); + + let events_before = db + .rooms + .pdus_until(&body.room_id, base_token) + .take(u32::try_from(body.limit).unwrap() as usize / 2) + .map(|r| r.unwrap()) + .collect::>(); + + let start_token = events_before + .last() + .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) + .map(|c| c.to_string()); + + let events_before = events_before + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); + + let events_after = db + .rooms + .pdus_after(&body.room_id, base_token) + .take(u32::try_from(body.limit).unwrap() as usize / 2) + .map(|r| r.unwrap()) + .collect::>(); + + let end_token = events_after + .last() + .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) + .map(|c| c.to_string()); + + let events_after = events_after + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); + + MatrixResult(Ok(get_context::Response { + start: start_token, + end: end_token, + events_before, + event: Some(base_event), + events_after, + state: db // TODO: State at event + .rooms + .room_state(&body.room_id) + .unwrap() + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Invalid base event.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } +} + #[get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "")] pub fn get_message_events_route( db: State<'_, Database>, @@ -2287,39 +2384,75 @@ pub fn get_message_events_route( })); } - if let get_message_events::Direction::Forward = body.dir { - todo!(); + match body.dir { + get_message_events::Direction::Forward => { + if let Ok(from) = body.from.clone().parse() { + let events_after = db + .rooms + .pdus_after(&body.room_id, from) + .take(body.limit.map(|l| l.try_into().unwrap()).unwrap_or(10_u32) as usize) + .map(|r| r.unwrap()) + .collect::>(); + + let end_token = events_after + .last() + .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) + .map(|c| c.to_string()); + + let events_after = events_after + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); + + MatrixResult(Ok(get_message_events::Response { + start: Some(body.from.clone()), + end: end_token, + chunk: events_after, + state: Vec::new(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Invalid from.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } + } + get_message_events::Direction::Backward => { + if let Ok(from) = body.from.clone().parse() { + let events_before = db + .rooms + .pdus_until(&body.room_id, from) + .take(body.limit.map(|l| l.try_into().unwrap()).unwrap_or(10_u32) as usize) + .map(|r| r.unwrap()) + .collect::>(); + + let start_token = events_before + .last() + .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) + .map(|c| c.to_string()); + + let events_before = events_before + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); + + MatrixResult(Ok(get_message_events::Response { + start: Some(body.from.clone()), + end: start_token, + chunk: events_before, + state: Vec::new(), + })) + } else { + MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Invalid from.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) + } + } } - if let Ok(from) = body.from.clone().parse() { - let pdus = db - .rooms - .pdus_until(&body.room_id, from) - .take(body.limit.map(|l| l.try_into().unwrap()).unwrap_or(10_u32) as usize) - .map(|r| r.unwrap()) - .collect::>(); - let prev_batch = pdus - .last() - .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) - .map(|c| c.to_string()); - let room_events = pdus - .into_iter() - .map(|pdu| pdu.to_room_event()) - .collect::>(); - - MatrixResult(Ok(get_message_events::Response { - start: Some(body.from.clone()), - end: prev_batch, - chunk: room_events, - state: Vec::new(), - })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Invalid from.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) - } } #[get("/_matrix/client/r0/voip/turnServer")] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a9a9306..44cd202 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -553,6 +553,29 @@ impl Rooms { .map(|(_, v)| Ok(serde_json::from_slice(&v)?)) } + /// Returns an iterator over all events in a room that happened after the event with id + /// `from` in chronological order. + pub fn pdus_after( + &self, + room_id: &RoomId, + from: u64, + ) -> impl Iterator> { + // Create the first part of the full pdu id + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let mut current = prefix.clone(); + current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event + + let current: &[u8] = ¤t; + + self.pduid_pdu + .range(current..) + .filter_map(|r| r.ok()) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(_, v)| Ok(serde_json::from_slice(&v)?)) + } + /// Replace a PDU with the redacted form. pub fn redact_pdu(&self, event_id: &EventId) -> Result<()> { if let Some(pdu_id) = self.get_pdu_id(event_id)? { diff --git a/src/main.rs b/src/main.rs index 27493d1..12a5195 100644 --- a/src/main.rs +++ b/src/main.rs @@ -70,6 +70,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_state_events_for_key_route, client_server::get_state_events_for_empty_key_route, client_server::sync_route, + client_server::get_context_route, client_server::get_message_events_route, client_server::turn_server_route, client_server::publicised_groups_route, From b7f7a39973e41101b3220d4f4ad898262743799e Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 4 Jun 2020 15:02:27 +0200 Subject: [PATCH 0123/1727] feat: kick,ban,unban user route --- src/client_server.rs | 191 +++++++++++++++++++++++++++++++++------- src/main.rs | 3 + sytest/sytest-whitelist | 1 + 3 files changed, 161 insertions(+), 34 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 2ef6d02..07f1911 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -25,7 +25,7 @@ use ruma_client_api::{ media::{create_content, get_content, get_content_thumbnail, get_media_config}, membership::{ forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, - leave_room, + kick_user, leave_room, ban_user, unban_user, }, message::{create_message_event, get_message_events}, presence::set_presence, @@ -381,7 +381,7 @@ pub fn get_pushrules_all_route( .map(|edu| edu.deserialize().expect("PushRules event in db is valid")) { MatrixResult(Ok(get_pushrules_all::Response { - global: pushrules.content.global + global: pushrules.content.global, })) } else { MatrixResult(Err(Error { @@ -1433,13 +1433,28 @@ pub fn leave_room_route( _room_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + let state = db.rooms.room_state(&body.room_id).unwrap(); + + let mut event = + serde_json::from_value::>( + state + .get(&(EventType::RoomMember, user_id.to_string())) + .unwrap() // TODO: error handling + .content + .clone(), + ) + .unwrap() + .deserialize() + .unwrap(); + + event.membership = ruma_events::room::member::MembershipState::Leave; db.rooms .append_pdu( body.room_id.clone(), user_id.clone(), EventType::RoomMember, - json!({"membership": "leave"}), + serde_json::to_value(event).unwrap(), None, Some(user_id.to_string()), None, @@ -1450,6 +1465,125 @@ pub fn leave_room_route( MatrixResult(Ok(leave_room::Response)) } +#[post("/_matrix/client/r0/rooms/<_room_id>/kick", data = "")] +pub fn kick_user_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let state = db.rooms.room_state(&body.room_id).unwrap(); + + let mut event = + serde_json::from_value::>( + state + .get(&(EventType::RoomMember, user_id.to_string())) + .unwrap() // TODO: error handling + .content + .clone(), + ) + .unwrap() + .deserialize() + .unwrap(); + + event.membership = ruma_events::room::member::MembershipState::Leave; + // TODO: reason + + db.rooms + .append_pdu( + body.room_id.clone(), + user_id.clone(), // Sender + EventType::RoomMember, + serde_json::to_value(event).unwrap(), + None, + Some(body.body.user_id.to_string()), + None, + &db.globals, + ) + .unwrap(); + + MatrixResult(Ok(kick_user::Response)) +} + +#[post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "")] +pub fn ban_user_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let state = db.rooms.room_state(&body.room_id).unwrap(); + + let mut event = + serde_json::from_value::>( + state + .get(&(EventType::RoomMember, user_id.to_string())) + .unwrap() // TODO: error handling + .content + .clone(), + ) + .unwrap() + .deserialize() + .unwrap(); + + event.membership = ruma_events::room::member::MembershipState::Ban; + // TODO: reason + + db.rooms + .append_pdu( + body.room_id.clone(), + user_id.clone(), // Sender + EventType::RoomMember, + serde_json::to_value(event).unwrap(), + None, + Some(body.body.user_id.to_string()), + None, + &db.globals, + ) + .unwrap(); + + MatrixResult(Ok(ban_user::Response)) +} + +#[post("/_matrix/client/r0/rooms/<_room_id>/unban", data = "")] +pub fn unban_user_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, +) -> MatrixResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let state = db.rooms.room_state(&body.room_id).unwrap(); + + let mut event = + serde_json::from_value::>( + state + .get(&(EventType::RoomMember, user_id.to_string())) + .unwrap() // TODO: error handling + .content + .clone(), + ) + .unwrap() + .deserialize() + .unwrap(); + + event.membership = ruma_events::room::member::MembershipState::Leave; + + db.rooms + .append_pdu( + body.room_id.clone(), + user_id.clone(), // Sender + EventType::RoomMember, + serde_json::to_value(event).unwrap(), + None, + Some(body.body.user_id.to_string()), + None, + &db.globals, + ) + .unwrap(); + + MatrixResult(Ok(unban_user::Response)) +} + #[post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "")] pub fn forget_room_route( db: State<'_, Database>, @@ -1470,20 +1604,19 @@ pub fn invite_user_route( _room_id: String, ) -> MatrixResult { if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { - let event = member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user_id).unwrap(), - avatar_url: db.users.avatar_url(&user_id).unwrap(), - is_direct: None, - third_party_invite: None, - }; - db.rooms .append_pdu( body.room_id.clone(), body.user_id.clone().expect("user is authenticated"), EventType::RoomMember, - serde_json::to_value(event).unwrap(), + serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user_id).unwrap(), + avatar_url: db.users.avatar_url(&user_id).unwrap(), + is_direct: None, + third_party_invite: None, + }) + .unwrap(), None, Some(user_id.to_string()), None, @@ -2302,8 +2435,7 @@ pub fn get_context_route( } if let Some(base_event) = db.rooms.get_pdu(&body.event_id).unwrap() { - let base_event = base_event - .to_room_event(); + let base_event = base_event.to_room_event(); let base_token = db .rooms @@ -2358,7 +2490,7 @@ pub fn get_context_route( .values() .map(|pdu| pdu.to_state_event()) .collect(), - })) + })) } else { MatrixResult(Err(Error { kind: ErrorKind::Unknown, @@ -2384,9 +2516,9 @@ pub fn get_message_events_route( })); } - match body.dir { - get_message_events::Direction::Forward => { - if let Ok(from) = body.from.clone().parse() { + if let Ok(from) = body.from.clone().parse() { + match body.dir { + get_message_events::Direction::Forward => { let events_after = db .rooms .pdus_after(&body.room_id, from) @@ -2410,16 +2542,8 @@ pub fn get_message_events_route( chunk: events_after, state: Vec::new(), })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Invalid from.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) } - } - get_message_events::Direction::Backward => { - if let Ok(from) = body.from.clone().parse() { + get_message_events::Direction::Backward => { let events_before = db .rooms .pdus_until(&body.room_id, from) @@ -2443,16 +2567,15 @@ pub fn get_message_events_route( chunk: events_before, state: Vec::new(), })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Invalid from.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) } } + } else { + MatrixResult(Err(Error { + kind: ErrorKind::Unknown, + message: "Invalid from.".to_owned(), + status_code: http::StatusCode::BAD_REQUEST, + })) } - } #[get("/_matrix/client/r0/voip/turnServer")] diff --git a/src/main.rs b/src/main.rs index 12a5195..ad9aeda 100644 --- a/src/main.rs +++ b/src/main.rs @@ -55,6 +55,9 @@ fn setup_rocket() -> rocket::Rocket { client_server::join_room_by_id_or_alias_route, client_server::leave_room_route, client_server::forget_room_route, + client_server::kick_user_route, + client_server::ban_user_route, + client_server::unban_user_route, client_server::invite_user_route, client_server::set_room_visibility_route, client_server::get_room_visibility_route, diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index 140fad8..bf9059c 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -77,3 +77,4 @@ User appears in user directory User directory correctly update on display name change User in shared private room does appear in user directory User in dir while user still shares private rooms +POST /rooms/:room_id/ban can ban a user From 32da76b9a21ca41a39f521419d3f29d6e672e5c4 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 4 Jun 2020 18:27:57 +0200 Subject: [PATCH 0124/1727] feat: heroes, don't send notifications every time --- src/client_server.rs | 109 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 91 insertions(+), 18 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 07f1911..b20e7af 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -24,8 +24,8 @@ use ruma_client_api::{ keys::{self, claim_keys, get_keys, upload_keys}, media::{create_content, get_content, get_content_thumbnail, get_media_config}, membership::{ - forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, - kick_user, leave_room, ban_user, unban_user, + ban_user, forget_room, get_member_events, invite_user, join_room_by_id, + join_room_by_id_or_alias, kick_user, leave_room, unban_user, }, message::{create_message_event, get_message_events}, presence::set_presence, @@ -2151,7 +2151,9 @@ pub fn sync_route( let mut send_member_count = false; let mut send_full_state = false; + let mut send_notification_counts = false; for pdu in &pdus { + send_notification_counts = true; if pdu.kind == EventType::RoomMember { send_member_count = true; if !send_full_state && pdu.state_key == Some(user_id.to_string()) { @@ -2171,7 +2173,85 @@ pub fn sync_route( } } - let notification_count = + let state = db.rooms.room_state(&room_id).unwrap(); + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + let joined_member_count = db.rooms.room_members(&room_id).count(); + let invited_member_count = db.rooms.room_members_invited(&room_id).count(); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still joined or + // invited until we have 5 or we reach the end + + for hero in db + .rooms + .all_pdus(&room_id) + .unwrap() + .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus + .filter(|pdu| pdu.kind == EventType::RoomMember) + .filter_map(|pdu| { + let content = serde_json::from_value::< + EventJson, + >(pdu.content.clone()) + .unwrap() + .deserialize() + .unwrap(); + + let current_content = serde_json::from_value::< + EventJson, + >( + state + .get(&( + EventType::RoomMember, + pdu.state_key.clone().expect( + "TODO: error handling. Is it really a state event?", + ), + )) + .expect("a user that joined once will always have a member event") + .content + .clone(), + ) + .unwrap() + .deserialize() + .unwrap(); + + // The membership was and still is invite or join + if matches!( + content.membership, + ruma_events::room::member::MembershipState::Join + | ruma_events::room::member::MembershipState::Invite + ) && matches!( + current_content.membership, + ruma_events::room::member::MembershipState::Join + | ruma_events::room::member::MembershipState::Invite + ) { + Some(pdu.state_key.unwrap()) + } else { + None + } + }) + { + if heroes.contains(&hero) || hero == user_id.to_string() { + continue; + } + + heroes.push(hero); + } + } + + ( + Some(joined_member_count), + Some(invited_member_count), + heroes, + ) + } else { + (None, None, Vec::new()) + }; + + let notification_count = if send_notification_counts { if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &user_id).unwrap() { Some( (db.rooms @@ -2188,7 +2268,10 @@ pub fn sync_route( ) } else { None - }; + } + } else { + None + }; // They /sync response doesn't always return all messages, so we say the output is // limited unless there are enough events @@ -2247,17 +2330,9 @@ pub fn sync_route( .collect(), }), summary: sync_events::RoomSummary { - heroes: Vec::new(), - joined_member_count: if send_member_count { - Some((db.rooms.room_members(&room_id).count() as u32).into()) - } else { - None - }, - invited_member_count: if send_member_count { - Some((db.rooms.room_members_invited(&room_id).count() as u32).into()) - } else { - None - }, + heroes, + joined_member_count: joined_member_count.map(|n| (n as u32).into()), + invited_member_count: invited_member_count.map(|n| (n as u32).into()), }, unread_notifications: sync_events::UnreadNotificationsCount { highlight_count: None, @@ -2271,9 +2346,7 @@ pub fn sync_route( // TODO: state before timeline state: sync_events::State { events: if send_full_state { - db.rooms - .room_state(&room_id) - .unwrap() + state .into_iter() .map(|(_, pdu)| pdu.to_state_event()) .collect() From d404f902bf3bbd190a55c9ec2523cda3149b5e59 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 4 Jun 2020 22:36:48 +0200 Subject: [PATCH 0125/1727] fix: send presence too often --- src/client_server.rs | 47 ++++++++++++++----------------------- src/database.rs | 3 +-- src/database/global_edus.rs | 46 +++++++++++++----------------------- src/database/rooms/edus.rs | 14 +---------- 4 files changed, 36 insertions(+), 74 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index b20e7af..a7f8093 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -548,9 +548,8 @@ pub fn set_displayname_route( // Presence update db.global_edus - .update_globallatest( - &user_id, - EduEvent::Presence(ruma_events::presence::PresenceEvent { + .update_presence( + ruma_events::presence::PresenceEvent { content: ruma_events::presence::PresenceEventContent { avatar_url: db.users.avatar_url(&user_id).unwrap(), currently_active: None, @@ -560,7 +559,7 @@ pub fn set_displayname_route( status_msg: None, }, sender: user_id.clone(), - }), + }, &db.globals, ) .unwrap(); @@ -640,9 +639,8 @@ pub fn set_avatar_url_route( // Presence update db.global_edus - .update_globallatest( - &user_id, - EduEvent::Presence(ruma_events::presence::PresenceEvent { + .update_presence( + ruma_events::presence::PresenceEvent { content: ruma_events::presence::PresenceEventContent { avatar_url: db.users.avatar_url(&user_id).unwrap(), currently_active: None, @@ -652,7 +650,7 @@ pub fn set_avatar_url_route( status_msg: None, }, sender: user_id.clone(), - }), + }, &db.globals, ) .unwrap(); @@ -707,9 +705,8 @@ pub fn set_presence_route( let user_id = body.user_id.as_ref().expect("user is authenticated"); db.global_edus - .update_globallatest( - &user_id, - EduEvent::Presence(ruma_events::presence::PresenceEvent { + .update_presence( + ruma_events::presence::PresenceEvent { content: ruma_events::presence::PresenceEventContent { avatar_url: db.users.avatar_url(&user_id).unwrap(), currently_active: None, @@ -719,7 +716,7 @@ pub fn set_presence_route( status_msg: body.status_msg.clone(), }, sender: user_id.clone(), - }), + }, &db.globals, ) .unwrap(); @@ -2435,24 +2432,16 @@ pub fn sync_route( presence: sync_events::Presence { events: db .global_edus - .globallatests_since(since) + .presence_since(since) .unwrap() - .filter_map(|edu| { - // Only look for presence events - if let Ok(mut edu) = EventJson::::from( - edu.unwrap().into_json(), - ) - .deserialize() - { - let timestamp = edu.content.last_active_ago.unwrap(); - edu.content.last_active_ago = Some( - js_int::UInt::try_from(utils::millis_since_unix_epoch()).unwrap() - - timestamp, - ); - Some(edu.into()) - } else { - None - } + .map(|edu| { + let mut edu = edu.unwrap().deserialize().unwrap(); + let timestamp = edu.content.last_active_ago.unwrap(); + let last_active_ago = js_int::UInt::try_from(utils::millis_since_unix_epoch()) + .unwrap() + - timestamp; + edu.content.last_active_ago = Some(last_active_ago); + edu.into() }) .collect(), }, diff --git a/src/database.rs b/src/database.rs index 7be0dc7..dc78ba9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -94,8 +94,7 @@ impl Database { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata").unwrap(), }, global_edus: global_edus::GlobalEdus { - //globalallid_globalall: db.open_tree("globalallid_globalall").unwrap(), - globallatestid_globallatest: db.open_tree("globallatestid_globallatest").unwrap(), // Presence + presenceid_presence: db.open_tree("presenceid_presence").unwrap(), // Presence }, media: media::Media { mediaid_file: db.open_tree("mediaid_file").unwrap(), diff --git a/src/database/global_edus.rs b/src/database/global_edus.rs index f665260..5f7491b 100644 --- a/src/database/global_edus.rs +++ b/src/database/global_edus.rs @@ -1,67 +1,53 @@ use crate::Result; -use ruma_events::{collections::only::Event as EduEvent, EventJson}; -use ruma_identifiers::UserId; +use ruma_events::EventJson; pub struct GlobalEdus { //pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count - pub(super) globallatestid_globallatest: sled::Tree, // Presence, GlobalLatestId = Count + UserId + pub(super) presenceid_presence: sled::Tree, // Presence, PresenceId = Count + UserId } impl GlobalEdus { /// Adds a global event which will be saved until a new event replaces it (e.g. presence updates). - pub fn update_globallatest( + pub fn update_presence( &self, - user_id: &UserId, - event: EduEvent, + presence: ruma_events::presence::PresenceEvent, globals: &super::globals::Globals, ) -> Result<()> { // Remove old entry if let Some(old) = self - .globallatestid_globallatest + .presenceid_presence .iter() .keys() .rev() .filter_map(|r| r.ok()) .find(|key| { - key.rsplit(|&b| b == 0xff).next().unwrap() == user_id.to_string().as_bytes() + key.rsplit(|&b| b == 0xff).next().unwrap() == presence.sender.to_string().as_bytes() }) { // This is the old global_latest - self.globallatestid_globallatest.remove(old)?; + self.presenceid_presence.remove(old)?; } - let mut global_latest_id = globals.next_count()?.to_be_bytes().to_vec(); - global_latest_id.push(0xff); - global_latest_id.extend_from_slice(&user_id.to_string().as_bytes()); + let mut presence_id = globals.next_count()?.to_be_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&presence.sender.to_string().as_bytes()); - self.globallatestid_globallatest - .insert(global_latest_id, &*serde_json::to_string(&event)?)?; + self.presenceid_presence + .insert(presence_id, &*serde_json::to_string(&presence)?)?; Ok(()) } /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. - pub fn globallatests_since( + pub fn presence_since( &self, since: u64, - ) -> Result>>> { - let first_possible_edu = since.to_be_bytes().to_vec(); + ) -> Result>>> { + let first_possible_edu = (since + 1).to_be_bytes().to_vec(); // +1 so we don't send the event at since Ok(self - .globallatestid_globallatest + .presenceid_presence .range(&*first_possible_edu..) - // Skip the first pdu if it's exactly at since, because we sent that last time - .skip( - if self - .globallatestid_globallatest - .get(first_possible_edu)? - .is_some() - { - 1 - } else { - 0 - }, - ) .filter_map(|r| r.ok()) .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 0519b43..a3fa2bc 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -59,23 +59,11 @@ impl RoomEdus { prefix.push(0xff); let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&since.to_be_bytes()); + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since Ok(self .roomlatestid_roomlatest .range(&*first_possible_edu..) - // Skip the first pdu if it's exactly at since, because we sent that last time - .skip( - if self - .roomlatestid_roomlatest - .get(first_possible_edu)? - .is_some() - { - 1 - } else { - 0 - }, - ) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) From 0a49b243228cfe30559e3f9897c1e6f72ed447e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 4 Jun 2020 22:51:19 +0200 Subject: [PATCH 0126/1727] Update 'README.md' --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 772fb96..46aa298 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ See it in action: #### Can I try it out? -Yes! Just open and register on the default homeserver. You can also use and set the homeserver url to to connect. (BUG: Registration currently doesn't work on riot.im. [Fix](https://github.com/matrix-org/matrix-js-sdk/pull/1304) will come soon) +Yes! Just open Riot () and register on the `https://conduit.koesters.xyz:14004` homeserver. #### How can I deploy my own? From 08d19c3e0bcdb4ca4cd6972501fac92a5fdca398 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 5 Jun 2020 09:04:51 +0200 Subject: [PATCH 0127/1727] fix: broken dependencies --- Cargo.lock | 44 ++++++++++++++++++++++---------------------- Cargo.toml | 13 +++++++------ 2 files changed, 29 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 365781d..bd8af81 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,9 +26,9 @@ checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "async-trait" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eb7f9ad01405feb3c1dac82463038945cf88eea4569acaf3ad662233496dd96" +checksum = "8f1c13101a3224fb178860ae372a031ce350bbd92d39968518f016744dde0bf7" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.6", @@ -163,7 +163,7 @@ dependencies = [ "ruma-api", "ruma-client-api", "ruma-common", - "ruma-events 0.21.3 (git+https://github.com/ruma/ruma-events?rev=7395f94)", + "ruma-events 0.21.3 (git+https://github.com/ruma/ruma-events?rev=c1ee72d)", "ruma-federation-api", "ruma-identifiers", "ruma-signatures", @@ -989,18 +989,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc93aeee735e60ecb40cf740eb319ff23eab1c5748abfdb5c180e4ce49f7791" +checksum = "ba3a1acf4a3e70849f8a673497ef984f043f95d2d8252dcdf74d54e6a1e47e8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e58db2081ba5b4c93bd6be09c40fd36cb9193a8336c384f3b40012e531aa7e40" +checksum = "194e88048b71a3e02eb4ee36a6995fed9b8236c11a7bb9f7247a9d9835b3f265" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.6", @@ -1009,9 +1009,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9df32da11d84f3a7d70205549562966279adb900e080fad3dccd8e64afccf0ad" +checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" [[package]] name = "pin-utils" @@ -1160,11 +1160,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.4" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b81e49ddec5109a9dcfc5f2a317ff53377c915e9ae9d4f2fb50914b85614e2" +checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" dependencies = [ - "base64 0.11.0", + "base64 0.12.1", "bytes", "encoding_rs", "futures-core", @@ -1183,7 +1183,6 @@ dependencies = [ "pin-project-lite", "serde", "serde_urlencoded", - "time", "tokio", "tokio-tls", "url", @@ -1297,13 +1296,13 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma-client-api.git?rev=c2c5a3cea01b0544e5adb40f7ddae828627afd2c#c2c5a3cea01b0544e5adb40f7ddae828627afd2c" +source = "git+https://github.com/ruma/ruma-client-api.git?rev=632eb9d520028816c5fb7224bd0aca8d1e3793f1#632eb9d520028816c5fb7224bd0aca8d1e3793f1" dependencies = [ "http", "js_int", "ruma-api", "ruma-common", - "ruma-events 0.21.3 (git+https://github.com/ruma/ruma-events?rev=7395f94)", + "ruma-events 0.21.3 (git+https://github.com/ruma/ruma-events?rev=c1ee72d)", "ruma-identifiers", "ruma-serde", "serde", @@ -1313,24 +1312,25 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "253416d67b4bde281f2781424232a58a946a4f1c451d5f857a8d0705d58eaf2a" +checksum = "6cb49e83277e82c69cc258cedc7e68b3d72ba378f1cb6105cbfcc8831e422b4d" dependencies = [ "matches", "ruma-serde", "serde", "serde_json", + "strum", ] [[package]] name = "ruma-events" version = "0.21.3" -source = "git+https://github.com/ruma/ruma-events?rev=7395f94#7395f940a7cf70c1598223570fb2b731a6a41707" +source = "git+https://github.com/ruma/ruma-events?rev=c1ee72d#c1ee72db0f3107a97f6a4273a0ea3fed5c4c30e2" dependencies = [ "js_int", "ruma-common", - "ruma-events-macros 0.21.3 (git+https://github.com/ruma/ruma-events?rev=7395f94)", + "ruma-events-macros 0.21.3 (git+https://github.com/ruma/ruma-events?rev=c1ee72d)", "ruma-identifiers", "ruma-serde", "serde", @@ -1356,7 +1356,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.21.3" -source = "git+https://github.com/ruma/ruma-events?rev=7395f94#7395f940a7cf70c1598223570fb2b731a6a41707" +source = "git+https://github.com/ruma/ruma-events?rev=c1ee72d#c1ee72db0f3107a97f6a4273a0ea3fed5c4c30e2" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.6", @@ -2019,9 +2019,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "winreg" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ "winapi 0.3.8", ] diff --git a/Cargo.toml b/Cargo.toml index 7f7ba5b..1bca0a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,14 +12,16 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } -http = "0.2.1" -ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git", rev = "c2c5a3cea01b0544e5adb40f7ddae828627afd2c" } +ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git", rev = "632eb9d520028816c5fb7224bd0aca8d1e3793f1" } ruma-identifiers = { version = "0.16.2", features = ["rand"] } ruma-api = "0.16.1" -ruma-events = { git = "https://github.com/ruma/ruma-events.git", rev = "7395f94" } +ruma-events = { git = "https://github.com/ruma/ruma-events.git", rev = "c1ee72d" } ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git", rev = "1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" } ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git", rev = "4cf4aa6ef74b25ad8c14d99d7774129f023df163" } +ruma-common = "0.1.3" + +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } +http = "0.2.1" log = "0.4.8" sled = "0.31.0" directories = "2.0.2" @@ -29,8 +31,7 @@ serde = "1.0.111" tokio = { version = "0.2.21", features = ["macros"] } rand = "0.7.3" rust-argon2 = "0.8.2" -reqwest = "=0.10.4" +reqwest = "0.10.6" base64 = "0.12.1" thiserror = "1.0.19" -ruma-common = "0.1.2" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } From 7526fd86026bf364f2e6556642d03a1009f1f88c Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 5 Jun 2020 18:19:26 +0200 Subject: [PATCH 0128/1727] Switch to the ruma meta-crate --- Cargo.lock | 75 +++++-------- Cargo.toml | 20 ++-- src/client_server.rs | 202 ++++++++++++++++++----------------- src/database/account_data.rs | 6 +- src/database/global_edus.rs | 7 +- src/database/globals.rs | 6 +- src/database/rooms.rs | 24 +++-- src/database/rooms/edus.rs | 12 ++- src/database/users.rs | 12 ++- src/error.rs | 4 +- src/pdu.rs | 14 +-- src/ruma_wrapper.rs | 5 +- src/server_server.rs | 10 +- src/utils.rs | 2 +- 14 files changed, 197 insertions(+), 202 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bd8af81..8527dba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -160,13 +160,7 @@ dependencies = [ "rand", "reqwest", "rocket", - "ruma-api", - "ruma-client-api", - "ruma-common", - "ruma-events 0.21.3 (git+https://github.com/ruma/ruma-events?rev=c1ee72d)", - "ruma-federation-api", - "ruma-identifiers", - "ruma-signatures", + "ruma", "rust-argon2 0.8.2", "serde", "serde_json", @@ -1266,11 +1260,24 @@ dependencies = [ "unicode-xid 0.2.0", ] +[[package]] +name = "ruma" +version = "0.1.0" +source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +dependencies = [ + "ruma-api", + "ruma-client-api", + "ruma-common", + "ruma-events", + "ruma-federation-api", + "ruma-identifiers", + "ruma-signatures", +] + [[package]] name = "ruma-api" version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82ffdb7fb9cf6af2b1d0d8254d922560ecb70081d7e70931c9b996b6b4839db5" +source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1285,8 +1292,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52b82b4567b9af9b40a86f7778821c016ea961f55e4fee255f8f24bb28ee7452" +source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.6", @@ -1296,13 +1302,13 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma-client-api.git?rev=632eb9d520028816c5fb7224bd0aca8d1e3793f1#632eb9d520028816c5fb7224bd0aca8d1e3793f1" +source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" dependencies = [ "http", "js_int", "ruma-api", "ruma-common", - "ruma-events 0.21.3 (git+https://github.com/ruma/ruma-events?rev=c1ee72d)", + "ruma-events", "ruma-identifiers", "ruma-serde", "serde", @@ -1313,8 +1319,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cb49e83277e82c69cc258cedc7e68b3d72ba378f1cb6105cbfcc8831e422b4d" +source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" dependencies = [ "matches", "ruma-serde", @@ -1330,7 +1335,7 @@ source = "git+https://github.com/ruma/ruma-events?rev=c1ee72d#c1ee72db0f3107a97f dependencies = [ "js_int", "ruma-common", - "ruma-events-macros 0.21.3 (git+https://github.com/ruma/ruma-events?rev=c1ee72d)", + "ruma-events-macros", "ruma-identifiers", "ruma-serde", "serde", @@ -1338,21 +1343,6 @@ dependencies = [ "strum", ] -[[package]] -name = "ruma-events" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ddf82c2231e4c53443424df34e868e4b09c20de7a76780d47a133a3b3f8ad9c" -dependencies = [ - "js_int", - "ruma-common", - "ruma-events-macros 0.21.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - [[package]] name = "ruma-events-macros" version = "0.21.3" @@ -1363,26 +1353,16 @@ dependencies = [ "syn 1.0.30", ] -[[package]] -name = "ruma-events-macros" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88e5c5b242fe4ee0cc56879057353621196d0988dd359579cad8f43471e483b7" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", -] - [[package]] name = "ruma-federation-api" -version = "0.0.1" -source = "git+https://github.com/ruma/ruma-federation-api.git?rev=4cf4aa6ef74b25ad8c14d99d7774129f023df163#4cf4aa6ef74b25ad8c14d99d7774129f023df163" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff655a4cb7d43b60b18e07a601889836c1c12854bb16f4c083826b664fdc55aa" dependencies = [ "js_int", "matches", "ruma-api", - "ruma-events 0.21.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-events", "ruma-identifiers", "ruma-serde", "serde", @@ -1402,8 +1382,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c71dabb8e2709ca4f59201cb72d7fe8d590e7e3f55feb348e851c18354938af" +source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" dependencies = [ "dtoa", "itoa", @@ -1416,7 +1395,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma-signatures.git?rev=1ca545cba8dfd43e0fc8e3c18e1311fb73390a97#1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" +source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" dependencies = [ "base64 0.12.1", "ring", diff --git a/Cargo.toml b/Cargo.toml index 1bca0a8..3c5c9fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,14 +12,6 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ruma-client-api = { git = "https://github.com/ruma/ruma-client-api.git", rev = "632eb9d520028816c5fb7224bd0aca8d1e3793f1" } -ruma-identifiers = { version = "0.16.2", features = ["rand"] } -ruma-api = "0.16.1" -ruma-events = { git = "https://github.com/ruma/ruma-events.git", rev = "c1ee72d" } -ruma-signatures = { git = "https://github.com/ruma/ruma-signatures.git", rev = "1ca545cba8dfd43e0fc8e3c18e1311fb73390a97" } -ruma-federation-api = { git = "https://github.com/ruma/ruma-federation-api.git", rev = "4cf4aa6ef74b25ad8c14d99d7774129f023df163" } -ruma-common = "0.1.3" - rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } http = "0.2.1" log = "0.4.8" @@ -35,3 +27,15 @@ reqwest = "0.10.6" base64 = "0.12.1" thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } + +[dependencies.ruma] +git = "https://github.com/ruma/ruma" +rev = "f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +features = ["rand", "client-api", "federation-api"] + +# These are required only until ruma-events and ruma-federation-api are merged into ruma/ruma +[patch.crates-io] +ruma-api = { git = "https://github.com/ruma/ruma", rev = "f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" } +ruma-common = { git = "https://github.com/ruma/ruma", rev = "f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" } +ruma-events = { git = "https://github.com/ruma/ruma-events", rev = "c1ee72d" } +ruma-serde = { git = "https://github.com/ruma/ruma", rev = "f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" } diff --git a/src/client_server.rs b/src/client_server.rs index a7f8093..d7fe641 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -7,55 +7,57 @@ use std::{ use crate::{utils, Database, MatrixResult, Ruma}; use log::{debug, warn}; use rocket::{delete, get, options, post, put, State}; -use ruma_client_api::{ - error::{Error, ErrorKind}, - r0::{ - account::{get_username_availability, register}, - alias::{create_alias, delete_alias, get_alias}, - capabilities::get_capabilities, - config::{get_global_account_data, set_global_account_data}, - context::get_context, - device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, - directory::{ - self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, - set_room_visibility, +use ruma::{ + api::client::{ + error::{Error, ErrorKind}, + r0::{ + account::{get_username_availability, register}, + alias::{create_alias, delete_alias, get_alias}, + capabilities::get_capabilities, + config::{get_global_account_data, set_global_account_data}, + context::get_context, + device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, + directory::{ + self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, + set_room_visibility, + }, + filter::{self, create_filter, get_filter}, + keys::{self, claim_keys, get_keys, upload_keys}, + media::{create_content, get_content, get_content_thumbnail, get_media_config}, + membership::{ + ban_user, forget_room, get_member_events, invite_user, join_room_by_id, + join_room_by_id_or_alias, kick_user, leave_room, unban_user, + }, + message::{create_message_event, get_message_events}, + presence::set_presence, + profile::{ + get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, + }, + push::{get_pushrules_all, set_pushrule, set_pushrule_enabled}, + read_marker::set_read_marker, + redact::redact_event, + room::{self, create_room}, + session::{get_login_types, login, logout}, + state::{ + create_state_event_for_empty_key, create_state_event_for_key, get_state_events, + get_state_events_for_empty_key, get_state_events_for_key, + }, + sync::sync_events, + thirdparty::get_protocols, + to_device::{self, send_event_to_device}, + typing::create_typing_event, + uiaa::{AuthFlow, UiaaInfo, UiaaResponse}, + user_directory::search_users, }, - filter::{self, create_filter, get_filter}, - keys::{self, claim_keys, get_keys, upload_keys}, - media::{create_content, get_content, get_content_thumbnail, get_media_config}, - membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, kick_user, leave_room, unban_user, - }, - message::{create_message_event, get_message_events}, - presence::set_presence, - profile::{ - get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, - }, - push::{get_pushrules_all, set_pushrule, set_pushrule_enabled}, - read_marker::set_read_marker, - redact::redact_event, - room::{self, create_room}, - session::{get_login_types, login, logout}, - state::{ - create_state_event_for_empty_key, create_state_event_for_key, get_state_events, - get_state_events_for_empty_key, get_state_events_for_key, - }, - sync::sync_events, - thirdparty::get_protocols, - to_device::{self, send_event_to_device}, - typing::create_typing_event, - uiaa::{AuthFlow, UiaaInfo, UiaaResponse}, - user_directory::search_users, + unversioned::get_supported_versions, }, - unversioned::get_supported_versions, + events::{ + collections::only::Event as EduEvent, + room::{canonical_alias, guest_access, history_visibility, join_rules, member, redaction}, + EventJson, EventType, + }, + identifiers::{DeviceId, RoomAliasId, RoomId, RoomVersionId, UserId}, }; -use ruma_events::{ - collections::only::Event as EduEvent, - room::{canonical_alias, guest_access, history_visibility, join_rules, member, redaction}, - EventJson, EventType, -}; -use ruma_identifiers::{DeviceId, RoomAliasId, RoomId, RoomVersionId, UserId}; use serde_json::{json, value::RawValue}; const GUEST_NAME_LENGTH: usize = 10; @@ -197,12 +199,12 @@ pub fn register_route( None, &user_id, &EventType::PushRules, - serde_json::to_value(ruma_events::push_rules::PushRulesEvent { - content: ruma_events::push_rules::PushRulesEventContent { - global: ruma_events::push_rules::Ruleset { + serde_json::to_value(ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: ruma::events::push_rules::Ruleset { content: vec![], - override_: vec![ruma_events::push_rules::ConditionalPushRule { - actions: vec![ruma_events::push_rules::Action::DontNotify], + override_: vec![ruma::events::push_rules::ConditionalPushRule { + actions: vec![ruma::events::push_rules::Action::DontNotify], default: true, enabled: false, rule_id: ".m.rule.master".to_owned(), @@ -210,17 +212,17 @@ pub fn register_route( }], room: vec![], sender: vec![], - underride: vec![ruma_events::push_rules::ConditionalPushRule { + underride: vec![ruma::events::push_rules::ConditionalPushRule { actions: vec![ - ruma_events::push_rules::Action::Notify, - ruma_events::push_rules::Action::SetTweak( - ruma_common::push::Tweak::Sound("default".to_owned()), + ruma::events::push_rules::Action::Notify, + ruma::events::push_rules::Action::SetTweak( + ruma::push::Tweak::Sound("default".to_owned()), ), ], default: true, enabled: true, rule_id: ".m.rule.message".to_owned(), - conditions: vec![ruma_events::push_rules::PushCondition::EventMatch { + conditions: vec![ruma::events::push_rules::PushCondition::EventMatch { key: "type".to_owned(), pattern: "m.room.message".to_owned(), }], @@ -522,7 +524,7 @@ pub fn set_displayname_route( room_id.clone(), user_id.clone(), EventType::RoomMember, - serde_json::to_value(ruma_events::room::member::MemberEventContent { + serde_json::to_value(ruma::events::room::member::MemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_value::>( db.rooms @@ -549,13 +551,13 @@ pub fn set_displayname_route( // Presence update db.global_edus .update_presence( - ruma_events::presence::PresenceEvent { - content: ruma_events::presence::PresenceEventContent { + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { avatar_url: db.users.avatar_url(&user_id).unwrap(), currently_active: None, displayname: db.users.displayname(&user_id).unwrap(), last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), - presence: ruma_events::presence::PresenceState::Online, + presence: ruma::events::presence::PresenceState::Online, status_msg: None, }, sender: user_id.clone(), @@ -613,7 +615,7 @@ pub fn set_avatar_url_route( room_id.clone(), user_id.clone(), EventType::RoomMember, - serde_json::to_value(ruma_events::room::member::MemberEventContent { + serde_json::to_value(ruma::events::room::member::MemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_value::>( db.rooms @@ -640,13 +642,13 @@ pub fn set_avatar_url_route( // Presence update db.global_edus .update_presence( - ruma_events::presence::PresenceEvent { - content: ruma_events::presence::PresenceEventContent { + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { avatar_url: db.users.avatar_url(&user_id).unwrap(), currently_active: None, displayname: db.users.displayname(&user_id).unwrap(), last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), - presence: ruma_events::presence::PresenceState::Online, + presence: ruma::events::presence::PresenceState::Online, status_msg: None, }, sender: user_id.clone(), @@ -706,8 +708,8 @@ pub fn set_presence_route( db.global_edus .update_presence( - ruma_events::presence::PresenceEvent { - content: ruma_events::presence::PresenceEventContent { + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { avatar_url: db.users.avatar_url(&user_id).unwrap(), currently_active: None, displayname: db.users.displayname(&user_id).unwrap(), @@ -848,8 +850,8 @@ pub fn set_read_marker_route( Some(&body.room_id), &user_id, &EventType::FullyRead, - serde_json::to_value(ruma_events::fully_read::FullyReadEvent { - content: ruma_events::fully_read::FullyReadEventContent { + serde_json::to_value(ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { event_id: body.fully_read.clone(), }, room_id: Some(body.room_id.clone()), @@ -877,14 +879,14 @@ pub fn set_read_marker_route( let mut user_receipts = BTreeMap::new(); user_receipts.insert( user_id.clone(), - ruma_events::receipt::Receipt { + ruma::events::receipt::Receipt { ts: Some(SystemTime::now()), }, ); let mut receipt_content = BTreeMap::new(); receipt_content.insert( event.clone(), - ruma_events::receipt::Receipts { + ruma::events::receipt::Receipts { read: Some(user_receipts), }, ); @@ -894,7 +896,7 @@ pub fn set_read_marker_route( .roomlatest_update( &user_id, &body.room_id, - EduEvent::Receipt(ruma_events::receipt::ReceiptEvent { + EduEvent::Receipt(ruma::events::receipt::ReceiptEvent { content: receipt_content, room_id: None, // None because it can be inferred }), @@ -977,7 +979,7 @@ pub fn create_room_route( room_id.clone(), user_id.clone(), EventType::RoomCreate, - serde_json::to_value(ruma_events::room::create::CreateEventContent { + serde_json::to_value(ruma::events::room::create::CreateEventContent { creator: user_id.clone(), federate: body.creation_content.as_ref().map_or(true, |c| c.federate), predecessor: body @@ -1033,7 +1035,7 @@ pub fn create_room_route( serde_json::from_str(power_levels.json().get()) .expect("TODO: handle. we hope the client sends a valid power levels json") } else { - serde_json::to_value(ruma_events::room::power_levels::PowerLevelsEventContent { + serde_json::to_value(ruma::events::room::power_levels::PowerLevelsEventContent { ban: 50.into(), events: BTreeMap::new(), events_default: 0.into(), @@ -1043,7 +1045,7 @@ pub fn create_room_route( state_default: 50.into(), users, users_default: 0.into(), - notifications: ruma_events::room::power_levels::NotificationPowerLevels { + notifications: ruma::events::room::power_levels::NotificationPowerLevels { room: 50.into(), }, }) @@ -1159,7 +1161,7 @@ pub fn create_room_route( user_id.clone(), EventType::RoomName, serde_json::to_value( - ruma_events::room::name::NameEventContent::new(name.clone()).unwrap(), + ruma::events::room::name::NameEventContent::new(name.clone()).unwrap(), ) .unwrap(), None, @@ -1176,7 +1178,7 @@ pub fn create_room_route( room_id.clone(), user_id.clone(), EventType::RoomTopic, - serde_json::to_value(ruma_events::room::topic::TopicEventContent { + serde_json::to_value(ruma::events::room::topic::TopicEventContent { topic: topic.clone(), }) .unwrap(), @@ -1433,7 +1435,7 @@ pub fn leave_room_route( let state = db.rooms.room_state(&body.room_id).unwrap(); let mut event = - serde_json::from_value::>( + serde_json::from_value::>( state .get(&(EventType::RoomMember, user_id.to_string())) .unwrap() // TODO: error handling @@ -1444,7 +1446,7 @@ pub fn leave_room_route( .deserialize() .unwrap(); - event.membership = ruma_events::room::member::MembershipState::Leave; + event.membership = ruma::events::room::member::MembershipState::Leave; db.rooms .append_pdu( @@ -1472,7 +1474,7 @@ pub fn kick_user_route( let state = db.rooms.room_state(&body.room_id).unwrap(); let mut event = - serde_json::from_value::>( + serde_json::from_value::>( state .get(&(EventType::RoomMember, user_id.to_string())) .unwrap() // TODO: error handling @@ -1483,7 +1485,7 @@ pub fn kick_user_route( .deserialize() .unwrap(); - event.membership = ruma_events::room::member::MembershipState::Leave; + event.membership = ruma::events::room::member::MembershipState::Leave; // TODO: reason db.rooms @@ -1512,7 +1514,7 @@ pub fn ban_user_route( let state = db.rooms.room_state(&body.room_id).unwrap(); let mut event = - serde_json::from_value::>( + serde_json::from_value::>( state .get(&(EventType::RoomMember, user_id.to_string())) .unwrap() // TODO: error handling @@ -1523,7 +1525,7 @@ pub fn ban_user_route( .deserialize() .unwrap(); - event.membership = ruma_events::room::member::MembershipState::Ban; + event.membership = ruma::events::room::member::MembershipState::Ban; // TODO: reason db.rooms @@ -1552,7 +1554,7 @@ pub fn unban_user_route( let state = db.rooms.room_state(&body.room_id).unwrap(); let mut event = - serde_json::from_value::>( + serde_json::from_value::>( state .get(&(EventType::RoomMember, user_id.to_string())) .unwrap() // TODO: error handling @@ -1563,7 +1565,7 @@ pub fn unban_user_route( .deserialize() .unwrap(); - event.membership = ruma_events::room::member::MembershipState::Leave; + event.membership = ruma::events::room::member::MembershipState::Leave; db.rooms .append_pdu( @@ -1727,7 +1729,7 @@ pub async fn get_public_rooms_filtered_route( aliases: Vec::new(), canonical_alias: state.get(&(EventType::RoomCanonicalAlias, "".to_owned())).and_then(|s| { serde_json::from_value::< - EventJson, + EventJson, >(s.content.clone()) .unwrap() .deserialize() @@ -1735,7 +1737,7 @@ pub async fn get_public_rooms_filtered_route( .alias }), name: state.get(&(EventType::RoomName, "".to_owned())).map(|s| { - serde_json::from_value::>( + serde_json::from_value::>( s.content.clone(), ) .unwrap() @@ -1749,7 +1751,7 @@ pub async fn get_public_rooms_filtered_route( room_id, topic: state.get(&(EventType::RoomTopic, "".to_owned())).map(|s| { serde_json::from_value::< - EventJson, + EventJson, >(s.content.clone()) .unwrap() .deserialize() @@ -1758,7 +1760,7 @@ pub async fn get_public_rooms_filtered_route( }), world_readable: state.get(&(EventType::RoomHistoryVisibility, "".to_owned())).map_or(false, |s| { serde_json::from_value::< - EventJson, + EventJson, >(s.content.clone()) .unwrap() .deserialize() @@ -1767,7 +1769,7 @@ pub async fn get_public_rooms_filtered_route( }), guest_can_join: state.get(&(EventType::RoomGuestAccess, "".to_owned())).map_or(false, |s| { serde_json::from_value::< - EventJson, + EventJson, >(s.content.clone()) .unwrap() .deserialize() @@ -1776,7 +1778,7 @@ pub async fn get_public_rooms_filtered_route( }), avatar_url: state.get(&(EventType::RoomAvatar, "".to_owned())).map(|s| { serde_json::from_value::< - EventJson, + EventJson, >(s.content.clone()) .unwrap() .deserialize() @@ -1794,10 +1796,10 @@ pub async fn get_public_rooms_filtered_route( &server_server::send_request( &db, "privacytools.io".to_owned(), - ruma_federation_api::v1::get_public_rooms::Request { + ruma::api::federation::v1::get_public_rooms::Request { limit: Some(20_u32.into()), since: None, - room_network: ruma_federation_api::v1::get_public_rooms::RoomNetwork::Matrix, + room_network: ruma::api::federation::v1::get_public_rooms::RoomNetwork::Matrix, }, ) .await @@ -2155,12 +2157,12 @@ pub fn sync_route( send_member_count = true; if !send_full_state && pdu.state_key == Some(user_id.to_string()) { let content = serde_json::from_value::< - EventJson, + EventJson, >(pdu.content.clone()) .unwrap() .deserialize() .unwrap(); - if content.membership == ruma_events::room::member::MembershipState::Join { + if content.membership == ruma::events::room::member::MembershipState::Join { send_full_state = true; // Both send_member_count and send_full_state are set. There's nothing more // to do @@ -2191,14 +2193,14 @@ pub fn sync_route( .filter(|pdu| pdu.kind == EventType::RoomMember) .filter_map(|pdu| { let content = serde_json::from_value::< - EventJson, + EventJson, >(pdu.content.clone()) .unwrap() .deserialize() .unwrap(); let current_content = serde_json::from_value::< - EventJson, + EventJson, >( state .get(&( @@ -2218,12 +2220,12 @@ pub fn sync_route( // The membership was and still is invite or join if matches!( content.membership, - ruma_events::room::member::MembershipState::Join - | ruma_events::room::member::MembershipState::Invite + ruma::events::room::member::MembershipState::Join + | ruma::events::room::member::MembershipState::Invite ) && matches!( current_content.membership, - ruma_events::room::member::MembershipState::Join - | ruma_events::room::member::MembershipState::Invite + ruma::events::room::member::MembershipState::Join + | ruma::events::room::member::MembershipState::Invite ) { Some(pdu.state_key.unwrap()) } else { diff --git a/src/database/account_data.rs b/src/database/account_data.rs index d1114ec..f09b4c5 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,6 +1,8 @@ use crate::{utils, Error, Result}; -use ruma_events::{collections::only::Event as EduEvent, EventJson, EventType}; -use ruma_identifiers::{RoomId, UserId}; +use ruma::{ + events::{collections::only::Event as EduEvent, EventJson, EventType}, + identifiers::{RoomId, UserId}, +}; use std::{collections::HashMap, convert::TryFrom}; pub struct AccountData { diff --git a/src/database/global_edus.rs b/src/database/global_edus.rs index 5f7491b..e9c6d23 100644 --- a/src/database/global_edus.rs +++ b/src/database/global_edus.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma_events::EventJson; +use ruma::events::EventJson; pub struct GlobalEdus { //pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count @@ -10,7 +10,7 @@ impl GlobalEdus { /// Adds a global event which will be saved until a new event replaces it (e.g. presence updates). pub fn update_presence( &self, - presence: ruma_events::presence::PresenceEvent, + presence: ruma::events::presence::PresenceEvent, globals: &super::globals::Globals, ) -> Result<()> { // Remove old entry @@ -42,7 +42,8 @@ impl GlobalEdus { pub fn presence_since( &self, since: u64, - ) -> Result>>> { + ) -> Result>>> + { let first_possible_edu = (since + 1).to_be_bytes().to_vec(); // +1 so we don't send the event at since Ok(self diff --git a/src/database/globals.rs b/src/database/globals.rs index eb20e37..93d5794 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -5,13 +5,13 @@ pub const COUNTER: &str = "c"; pub struct Globals { pub(super) globals: sled::Tree, server_name: String, - keypair: ruma_signatures::Ed25519KeyPair, + keypair: ruma::signatures::Ed25519KeyPair, reqwest_client: reqwest::Client, } impl Globals { pub fn load(globals: sled::Tree, server_name: String) -> Self { - let keypair = ruma_signatures::Ed25519KeyPair::new( + let keypair = ruma::signatures::Ed25519KeyPair::new( &*globals .update_and_fetch("keypair", utils::generate_keypair) .unwrap() @@ -34,7 +34,7 @@ impl Globals { } /// Returns this server's keypair. - pub fn keypair(&self) -> &ruma_signatures::Ed25519KeyPair { + pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { &self.keypair } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 44cd202..5d9da48 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -4,15 +4,17 @@ pub use edus::RoomEdus; use crate::{utils, Error, PduEvent, Result}; use log::error; -use ruma_events::{ - room::{ - join_rules, member, - power_levels::{self, PowerLevelsEventContent}, - redaction, +use ruma::{ + events::{ + room::{ + join_rules, member, + power_levels::{self, PowerLevelsEventContent}, + redaction, + }, + EventJson, EventType, }, - EventJson, EventType, + identifiers::{EventId, RoomAliasId, RoomId, UserId}, }; -use ruma_identifiers::{EventId, RoomAliasId, RoomId, UserId}; use sled::IVec; use std::{ collections::{BTreeMap, HashMap}, @@ -203,7 +205,7 @@ impl Rooms { users: BTreeMap::new(), users_default: 0.into(), notifications: - ruma_events::room::power_levels::NotificationPowerLevels { + ruma::events::room::power_levels::NotificationPowerLevels { room: 50.into(), }, }) @@ -419,7 +421,7 @@ impl Rooms { auth_events: Vec::new(), redacts: redacts.clone(), unsigned, - hashes: ruma_federation_api::EventHash { + hashes: ruma::api::federation::EventHash { sha256: "aaa".to_owned(), }, signatures: HashMap::new(), @@ -428,13 +430,13 @@ impl Rooms { // Generate event id pdu.event_id = EventId::try_from(&*format!( "${}", - ruma_signatures::reference_hash(&serde_json::to_value(&pdu)?) + ruma::signatures::reference_hash(&serde_json::to_value(&pdu)?) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are correct"); let mut pdu_json = serde_json::to_value(&pdu)?; - ruma_signatures::hash_and_sign_event( + ruma::signatures::hash_and_sign_event( globals.server_name(), globals.keypair(), &mut pdu_json, diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index a3fa2bc..385ed7a 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,6 +1,8 @@ use crate::{utils, Error, Result}; -use ruma_events::{collections::only::Event as EduEvent, EventJson}; -use ruma_identifiers::{RoomId, UserId}; +use ruma::{ + events::{collections::only::Event as EduEvent, EventJson}, + identifiers::{RoomId, UserId}, +}; use std::convert::TryFrom; pub struct RoomEdus { @@ -190,7 +192,7 @@ impl RoomEdus { } /// Returns an iterator over all active events (e.g. typing notifications). - pub fn roomactives_all(&self, room_id: &RoomId) -> Result { + pub fn roomactives_all(&self, room_id: &RoomId) -> Result { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -205,8 +207,8 @@ impl RoomEdus { user_ids.push(user_id?); } - Ok(ruma_events::typing::TypingEvent { - content: ruma_events::typing::TypingEventContent { user_ids }, + Ok(ruma::events::typing::TypingEvent { + content: ruma::events::typing::TypingEventContent { user_ids }, room_id: None, // Can be inferred }) } diff --git a/src/database/users.rs b/src/database/users.rs index efd420a..8893b10 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,11 +1,13 @@ use crate::{utils, Error, Result}; use js_int::UInt; -use ruma_client_api::r0::{ - device::Device, - keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}, +use ruma::{ + api::client::r0::{ + device::Device, + keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}, + }, + events::{to_device::AnyToDeviceEvent, EventJson, EventType}, + identifiers::{DeviceId, UserId}, }; -use ruma_events::{to_device::AnyToDeviceEvent, EventJson, EventType}; -use ruma_identifiers::{DeviceId, UserId}; use std::{collections::BTreeMap, convert::TryFrom, time::SystemTime}; pub struct Users { diff --git a/src/error.rs b/src/error.rs index a91d409..3561d9e 100644 --- a/src/error.rs +++ b/src/error.rs @@ -22,12 +22,12 @@ pub enum Error { #[error("tried to parse invalid identifier")] RumaIdentifierError { #[from] - source: ruma_identifiers::Error, + source: ruma::identifiers::Error, }, #[error("tried to parse invalid event")] RumaEventError { #[from] - source: ruma_events::InvalidEvent, + source: ruma::events::InvalidEvent, }, #[error("could not generate image")] ImageError { diff --git a/src/pdu.rs b/src/pdu.rs index 1249642..6ee0fd5 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,11 +1,13 @@ use js_int::UInt; -use ruma_events::{ - collections::all::{RoomEvent, StateEvent}, - stripped::AnyStrippedStateEvent, - EventJson, EventType, +use ruma::{ + api::federation::EventHash, + events::{ + collections::all::{RoomEvent, StateEvent}, + stripped::AnyStrippedStateEvent, + EventJson, EventType, + }, + identifiers::{EventId, RoomId, UserId}, }; -use ruma_federation_api::EventHash; -use ruma_identifiers::{EventId, RoomId, UserId}; use serde::{Deserialize, Serialize}; use serde_json::json; use std::collections::HashMap; diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index eff3a86..47c8967 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -7,8 +7,7 @@ use rocket::{ Outcome::*, Request, State, }; -use ruma_api::Endpoint; -use ruma_identifiers::UserId; +use ruma::{api::Endpoint, identifiers::UserId}; use std::{convert::TryInto, io::Cursor, ops::Deref}; use tokio::io::AsyncReadExt; @@ -109,7 +108,7 @@ impl Deref for Ruma { } /// This struct converts ruma responses into rocket http responses. -pub struct MatrixResult(pub std::result::Result); +pub struct MatrixResult(pub std::result::Result); impl TryInto>> for MatrixResult where diff --git a/src/server_server.rs b/src/server_server.rs index 6aa1e99..99d75c4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2,9 +2,9 @@ use crate::{Database, MatrixResult}; use http::header::{HeaderValue, AUTHORIZATION}; use log::error; use rocket::{get, response::content::Json, State}; -use ruma_api::Endpoint; -use ruma_client_api::error::Error; -use ruma_federation_api::discovery::{ +use ruma::api::Endpoint; +use ruma::api::client::error::Error; +use ruma::api::federation::discovery::{ get_server_keys::v2 as get_server_keys, get_server_version::v1 as get_server_version, }; use serde_json::json; @@ -61,7 +61,7 @@ pub async fn send_request( request_map.insert("destination".to_owned(), destination.into()); let mut request_json = request_map.into(); - ruma_signatures::sign_json( + ruma::signatures::sign_json( db.globals.server_name(), db.globals.keypair(), &mut request_json, @@ -168,7 +168,7 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { .body(), ) .unwrap(); - ruma_signatures::sign_json( + ruma::signatures::sign_json( db.globals.server_name(), db.globals.keypair(), &mut response, diff --git a/src/utils.rs b/src/utils.rs index 5b41bd4..8f3b4ad 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -29,7 +29,7 @@ pub fn increment(old: Option<&[u8]>) -> Option> { pub fn generate_keypair(old: Option<&[u8]>) -> Option> { Some( old.map(|s| s.to_vec()) - .unwrap_or_else(|| ruma_signatures::Ed25519KeyPair::generate().unwrap()), + .unwrap_or_else(|| ruma::signatures::Ed25519KeyPair::generate().unwrap()), ) } From 8eedc1256719c97904b4d0dea5b56a6c543923a3 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 6 Jun 2020 15:23:37 +0200 Subject: [PATCH 0129/1727] fix: edus will not be removed after timeout --- src/database/rooms/edus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 385ed7a..5d04639 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -162,7 +162,7 @@ impl RoomEdus { }) { // This is an outdated edu (time > timestamp) - self.roomlatestid_roomlatest.remove(outdated_edu)?; + self.roomactiveid_userid.remove(outdated_edu)?; found_outdated = true; } From 9269f009dbfcd167efb237df4d7dc3d9c8fd090e Mon Sep 17 00:00:00 2001 From: Guillem Nieto Date: Sat, 6 Jun 2020 17:23:14 +0200 Subject: [PATCH 0130/1727] Allow client to load history on newly joined rooms On /sync, check if a room is a new join between `since` parameter and now. If it's a newly joined room, set the limited flag to true, which will force the client to load room messages via the `/messages` endpoint. On `master`, I could not reproduce the messages not showing to others when joining after being invited. Fixes #39 --- src/client_server.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index d7fe641..84e3cc8 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2149,13 +2149,13 @@ pub fn sync_route( .collect::>(); let mut send_member_count = false; - let mut send_full_state = false; + let mut joined_since_last_sync = false; let mut send_notification_counts = false; for pdu in &pdus { send_notification_counts = true; if pdu.kind == EventType::RoomMember { send_member_count = true; - if !send_full_state && pdu.state_key == Some(user_id.to_string()) { + if !joined_since_last_sync && pdu.state_key == Some(user_id.to_string()) { let content = serde_json::from_value::< EventJson, >(pdu.content.clone()) @@ -2163,8 +2163,8 @@ pub fn sync_route( .deserialize() .unwrap(); if content.membership == ruma::events::room::member::MembershipState::Join { - send_full_state = true; - // Both send_member_count and send_full_state are set. There's nothing more + joined_since_last_sync = true; + // Both send_member_count and joined_since_last_sync are set. There's nothing more // to do break; } @@ -2338,13 +2338,13 @@ pub fn sync_route( notification_count, }, timeline: sync_events::Timeline { - limited: if limited { Some(limited) } else { None }, + limited: if limited || joined_since_last_sync { Some(true) } else { None }, prev_batch, events: room_events, }, // TODO: state before timeline state: sync_events::State { - events: if send_full_state { + events: if joined_since_last_sync { state .into_iter() .map(|(_, pdu)| pdu.to_state_event()) From cc383ac932f4f3fbbd42b4c46bb007d02989f4c6 Mon Sep 17 00:00:00 2001 From: Guillem Nieto Date: Sat, 6 Jun 2020 19:48:01 +0200 Subject: [PATCH 0131/1727] Cargo fmt --- src/client_server.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index 84e3cc8..057b473 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2338,7 +2338,11 @@ pub fn sync_route( notification_count, }, timeline: sync_events::Timeline { - limited: if limited || joined_since_last_sync { Some(true) } else { None }, + limited: if limited || joined_since_last_sync { + Some(true) + } else { + None + }, prev_batch, events: room_events, }, From c85d363d71c7f578917e61f2ab3c635b46f69a7e Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 6 Jun 2020 18:44:50 +0200 Subject: [PATCH 0132/1727] feat: user interactive authentication --- src/client_server.rs | 98 ++++++++++++++++++++++---- src/database.rs | 5 ++ src/database/uiaa.rs | 161 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 249 insertions(+), 15 deletions(-) create mode 100644 src/database/uiaa.rs diff --git a/src/client_server.rs b/src/client_server.rs index 057b473..03be7bf 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -62,9 +62,9 @@ use serde_json::{json, value::RawValue}; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; -const SESSION_ID_LENGTH: usize = 256; const TOKEN_LENGTH: usize = 256; const MXC_LENGTH: usize = 256; +const SESSION_ID_LENGTH: usize = 256; #[get("/_matrix/client/versions")] pub fn get_supported_versions_route() -> MatrixResult { @@ -117,18 +117,6 @@ pub fn register_route( db: State<'_, Database>, body: Ruma, ) -> MatrixResult { - if body.auth.is_none() { - return MatrixResult(Err(UiaaResponse::AuthResponse(UiaaInfo { - flows: vec![AuthFlow { - stages: vec!["m.login.dummy".to_owned()], - }], - completed: vec![], - params: RawValue::from_string("{}".to_owned()).unwrap(), - session: Some(utils::random_string(SESSION_ID_LENGTH)), - auth_error: None, - }))); - } - // Validate user id let user_id = match UserId::parse_with_server_name( body.username @@ -161,6 +149,32 @@ pub fn register_route( }))); } + // UIAA + let uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.dummy".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: Some(utils::random_string(SESSION_ID_LENGTH)), + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db + .uiaa + .try_auth(&user_id, &"".to_owned(), auth, &uiaainfo, &db.users, &db.globals) + .unwrap(); + if !worked { + return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + } + // Success! + } else { + db.uiaa.create(&user_id, &"".to_owned(), &uiaainfo).unwrap(); + + return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + } + let password = body.password.clone().unwrap_or_default(); if let Ok(hash) = utils::calculate_hash(&password) { @@ -2867,8 +2881,35 @@ pub fn delete_device_route( db: State<'_, Database>, body: Ruma, device_id: DeviceId, -) -> MatrixResult { +) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + + // UIAA + let uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: Some(utils::random_string(SESSION_ID_LENGTH)), + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db + .uiaa + .try_auth(&user_id, &"".to_owned(), auth, &uiaainfo, &db.users, &db.globals) + .unwrap(); + if !worked { + return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + } + // Success! + } else { + db.uiaa.create(&user_id, &"".to_owned(), &uiaainfo).unwrap(); + + return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + } + db.users.remove_device(&user_id, &device_id).unwrap(); MatrixResult(Ok(delete_device::Response)) @@ -2878,8 +2919,35 @@ pub fn delete_device_route( pub fn delete_devices_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + + // UIAA + let uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: Some(utils::random_string(SESSION_ID_LENGTH)), + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db + .uiaa + .try_auth(&user_id, &"".to_owned(), auth, &uiaainfo, &db.users, &db.globals) + .unwrap(); + if !worked { + return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + } + // Success! + } else { + db.uiaa.create(&user_id, &"".to_owned(), &uiaainfo).unwrap(); + + return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + } + for device_id in &body.devices { db.users.remove_device(&user_id, &device_id).unwrap() } diff --git a/src/database.rs b/src/database.rs index dc78ba9..492f880 100644 --- a/src/database.rs +++ b/src/database.rs @@ -3,6 +3,7 @@ pub(self) mod global_edus; pub(self) mod globals; pub(self) mod media; pub(self) mod rooms; +pub(self) mod uiaa; pub(self) mod users; use directories::ProjectDirs; @@ -13,6 +14,7 @@ use rocket::Config; pub struct Database { pub globals: globals::Globals, pub users: users::Users, + pub uiaa: uiaa::Uiaa, pub rooms: rooms::Rooms, pub account_data: account_data::AccountData, pub global_edus: global_edus::GlobalEdus, @@ -66,6 +68,9 @@ impl Database { devicekeychangeid_userid: db.open_tree("devicekeychangeid_userid").unwrap(), todeviceid_events: db.open_tree("todeviceid_events").unwrap(), }, + uiaa: uiaa::Uiaa { + userdeviceid_uiaainfo: db.open_tree("userdeviceid_uiaainfo").unwrap(), + }, rooms: rooms::Rooms { edus: rooms::RoomEdus { roomuserid_lastread: db.open_tree("roomuserid_lastread").unwrap(), // "Private" read receipt diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs new file mode 100644 index 0000000..f1de476 --- /dev/null +++ b/src/database/uiaa.rs @@ -0,0 +1,161 @@ +use crate::{utils, Error, Result}; +use js_int::UInt; +use log::debug; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::{ + device::Device, + keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}, + uiaa::{AuthData, AuthFlow, UiaaInfo, UiaaResponse}, + }, + }, + events::{to_device::AnyToDeviceEvent, EventJson, EventType}, + identifiers::{DeviceId, UserId}, +}; +use serde_json::value::RawValue; +use std::{collections::BTreeMap, convert::TryFrom, time::SystemTime}; + +pub struct Uiaa { + pub(super) userdeviceid_uiaainfo: sled::Tree, // User-interactive authentication +} + +impl Uiaa { + /// Creates a new Uiaa session. Make sure the session token is unique. + pub fn create(&self, user_id: &UserId, device_id: &str, uiaainfo: &UiaaInfo) -> Result<()> { + self.update_uiaa_session(user_id, device_id, Some(uiaainfo)) + } + + pub fn try_auth( + &self, + user_id: &UserId, + device_id: &DeviceId, + auth: &AuthData, + uiaainfo: &UiaaInfo, + users: &super::users::Users, + globals: &super::globals::Globals, + ) -> Result<(bool, UiaaInfo)> { + if let AuthData::DirectRequest { + kind, + session, + auth_parameters, + } = &auth + { + let mut uiaainfo = session + .as_ref() + .map(|session| { + Ok::<_, Error>(self.get_uiaa_session(&user_id, &"".to_owned(), session)?) + }) + .unwrap_or(Ok(uiaainfo.clone()))?; + + // Find out what the user completed + match &**kind { + "m.login.password" => { + if auth_parameters["identifier"]["type"] != "m.id.user" { + panic!("identifier not supported"); + } + + let user_id = UserId::parse_with_server_name( + auth_parameters["identifier"]["user"].as_str().unwrap(), + globals.server_name(), + )?; + let password = auth_parameters["password"].as_str().unwrap(); + + // Check if password is correct + if let Some(hash) = users.password_hash(&user_id)? { + let hash_matches = + argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); + + if !hash_matches { + debug!("Invalid password."); + uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { + kind: ErrorKind::Forbidden, + message: "Invalid username or password.".to_owned(), + }); + return Ok((false, uiaainfo)); + } + } + + // Password was correct! Let's add it to `completed` + uiaainfo.completed.push("m.login.password".to_owned()); + } + "m.login.dummy" => { + uiaainfo.completed.push("m.login.dummy".to_owned()); + } + k => panic!("type not supported: {}", k), + } + + // Check if a flow now succeeds + let mut completed = false; + 'flows: for flow in &mut uiaainfo.flows { + for stage in &flow.stages { + if !uiaainfo.completed.contains(stage) { + continue 'flows; + } + } + // We didn't break, so this flow succeeded! + completed = true; + } + + if !completed { + self.update_uiaa_session(user_id, device_id, Some(&uiaainfo))?; + return Ok((false, uiaainfo)); + } + + // UIAA was successful! Remove this session and return true + self.update_uiaa_session(user_id, device_id, None)?; + return Ok((true, uiaainfo)); + } else { + panic!("FallbackAcknowledgement is not supported yet"); + } + } + + fn update_uiaa_session( + &self, + user_id: &UserId, + device_id: &str, + uiaainfo: Option<&UiaaInfo>, + ) -> Result<()> { + let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + userdeviceid.push(0xff); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + if let Some(uiaainfo) = uiaainfo { + self.userdeviceid_uiaainfo + .insert(&userdeviceid, &*serde_json::to_string(&uiaainfo)?)?; + } else { + self.userdeviceid_uiaainfo.remove(&userdeviceid)?; + } + + Ok(()) + } + + fn get_uiaa_session( + &self, + user_id: &UserId, + device_id: &str, + session: &str, + ) -> Result { + let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + userdeviceid.push(0xff); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + let uiaainfo = serde_json::from_slice::( + &self + .userdeviceid_uiaainfo + .get(&userdeviceid)? + .ok_or(Error::BadRequest("session does not exist"))?, + )?; + + if uiaainfo + .session + .as_ref() + .filter(|&s| s == session) + .is_none() + { + return Err(Error::BadRequest("wrong session token")); + } + + Ok(uiaainfo) + } +} From 0067f49d525b4b99ce1c783d313fb09cb0b071c9 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 6 Jun 2020 19:02:31 +0200 Subject: [PATCH 0133/1727] feat: close registration with ROCKET_REGISTRATION_DISABLED=true --- src/client_server.rs | 8 ++++++++ src/database.rs | 8 +++----- src/database/globals.rs | 24 ++++++++++++++++-------- 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 03be7bf..c190ef7 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -117,6 +117,14 @@ pub fn register_route( db: State<'_, Database>, body: Ruma, ) -> MatrixResult { + if db.globals.registration_disabled() { + return MatrixResult(Err(UiaaResponse::MatrixError(Error { + kind: ErrorKind::Unknown, + message: "Registration has been disabled.".to_owned(), + status_code: http::StatusCode::FORBIDDEN, + }))); + } + // Validate user id let user_id = match UserId::parse_with_server_name( body.username diff --git a/src/database.rs b/src/database.rs index 492f880..34af8fc 100644 --- a/src/database.rs +++ b/src/database.rs @@ -7,6 +7,7 @@ pub(self) mod uiaa; pub(self) mod users; use directories::ProjectDirs; +use log::info; use std::fs::remove_dir_all; use rocket::Config; @@ -49,13 +50,10 @@ impl Database { }); let db = sled::open(&path).unwrap(); - log::info!("Opened sled database at {}", path); + info!("Opened sled database at {}", path); Self { - globals: globals::Globals::load( - db.open_tree("global").unwrap(), - server_name.to_owned(), - ), + globals: globals::Globals::load(db.open_tree("global").unwrap(), config), users: users::Users { userid_password: db.open_tree("userid_password").unwrap(), userid_displayname: db.open_tree("userid_displayname").unwrap(), diff --git a/src/database/globals.rs b/src/database/globals.rs index 93d5794..08ab411 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,13 +4,14 @@ pub const COUNTER: &str = "c"; pub struct Globals { pub(super) globals: sled::Tree, - server_name: String, keypair: ruma::signatures::Ed25519KeyPair, reqwest_client: reqwest::Client, + server_name: String, + registration_disabled: bool, } impl Globals { - pub fn load(globals: sled::Tree, server_name: String) -> Self { + pub fn load(globals: sled::Tree, config: &rocket::Config) -> Self { let keypair = ruma::signatures::Ed25519KeyPair::new( &*globals .update_and_fetch("keypair", utils::generate_keypair) @@ -22,17 +23,16 @@ impl Globals { Self { globals, - server_name, keypair, reqwest_client: reqwest::Client::new(), + server_name: config + .get_str("server_name") + .unwrap_or("localhost") + .to_owned(), + registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), } } - /// Returns the server_name of the server. - pub fn server_name(&self) -> &str { - &self.server_name - } - /// Returns this server's keypair. pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { &self.keypair @@ -58,4 +58,12 @@ impl Globals { .get(COUNTER)? .map_or(0_u64, |bytes| utils::u64_from_bytes(&bytes))) } + + pub fn server_name(&self) -> &str { + &self.server_name + } + + pub fn registration_disabled(&self) -> bool { + self.registration_disabled + } } From b4d65ab67d95485f9762363492ce259a24aa1450 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 6 Jun 2020 22:34:08 +0200 Subject: [PATCH 0134/1727] improvement: optimize /sync response --- Cargo.lock | 17 +++-- Cargo.toml | 11 +-- src/client_server.rs | 155 +++++++++++++++++++++--------------------- src/database/rooms.rs | 2 +- src/pdu.rs | 2 +- 5 files changed, 95 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8527dba..fe80fb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1263,7 +1263,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" dependencies = [ "ruma-api", "ruma-client-api", @@ -1277,7 +1277,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1292,7 +1292,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.6", @@ -1302,7 +1302,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" dependencies = [ "http", "js_int", @@ -1319,7 +1319,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.1.3" -source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" dependencies = [ "matches", "ruma-serde", @@ -1356,8 +1356,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff655a4cb7d43b60b18e07a601889836c1c12854bb16f4c083826b664fdc55aa" +source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" dependencies = [ "js_int", "matches", @@ -1382,7 +1381,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" dependencies = [ "dtoa", "itoa", @@ -1395,7 +1394,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=f6fb971329a4a5a7faeebf7ea47a86cd19e580f4#f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" dependencies = [ "base64 0.12.1", "ring", diff --git a/Cargo.toml b/Cargo.toml index 3c5c9fa..38c7530 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,12 +30,13 @@ image = { version = "0.23.4", default-features = false, features = ["jpeg", "png [dependencies.ruma] git = "https://github.com/ruma/ruma" -rev = "f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" +rev = "12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +#path = "../ruma/ruma" features = ["rand", "client-api", "federation-api"] # These are required only until ruma-events and ruma-federation-api are merged into ruma/ruma [patch.crates-io] -ruma-api = { git = "https://github.com/ruma/ruma", rev = "f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" } -ruma-common = { git = "https://github.com/ruma/ruma", rev = "f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" } -ruma-events = { git = "https://github.com/ruma/ruma-events", rev = "c1ee72d" } -ruma-serde = { git = "https://github.com/ruma/ruma", rev = "f6fb971329a4a5a7faeebf7ea47a86cd19e580f4" } +ruma-common = { git = "https://github.com/ruma/ruma", rev = "12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" } +ruma-serde = { git = "https://github.com/ruma/ruma", rev = "12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" } +#ruma-common = { path = "../ruma/ruma-common" } +#ruma-serde = { path = "../ruma/ruma-serde" } diff --git a/src/client_server.rs b/src/client_server.rs index c190ef7..01ec7a3 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2338,50 +2338,51 @@ pub fn sync_route( ); } - joined_rooms.insert( - room_id.clone().try_into().unwrap(), - sync_events::JoinedRoom { - account_data: Some(sync_events::AccountData { - events: db - .account_data - .changes_since(Some(&room_id), &user_id, since) - .unwrap() - .into_iter() - .map(|(_, v)| v) - .collect(), - }), - summary: sync_events::RoomSummary { - heroes, - joined_member_count: joined_member_count.map(|n| (n as u32).into()), - invited_member_count: invited_member_count.map(|n| (n as u32).into()), - }, - unread_notifications: sync_events::UnreadNotificationsCount { - highlight_count: None, - notification_count, - }, - timeline: sync_events::Timeline { - limited: if limited || joined_since_last_sync { - Some(true) - } else { - None - }, - prev_batch, - events: room_events, - }, - // TODO: state before timeline - state: sync_events::State { - events: if joined_since_last_sync { - state - .into_iter() - .map(|(_, pdu)| pdu.to_state_event()) - .collect() - } else { - Vec::new() - }, - }, - ephemeral: sync_events::Ephemeral { events: edus }, + let joined_room = sync_events::JoinedRoom { + account_data: sync_events::AccountData { + events: db + .account_data + .changes_since(Some(&room_id), &user_id, since) + .unwrap() + .into_iter() + .map(|(_, v)| v) + .collect(), }, - ); + summary: sync_events::RoomSummary { + heroes, + joined_member_count: joined_member_count.map(|n| (n as u32).into()), + invited_member_count: invited_member_count.map(|n| (n as u32).into()), + }, + unread_notifications: sync_events::UnreadNotificationsCount { + highlight_count: None, + notification_count, + }, + timeline: sync_events::Timeline { + limited: if limited || joined_since_last_sync { + Some(true) + } else { + None + }, + prev_batch, + events: room_events, + }, + // TODO: state before timeline + state: sync_events::State { + events: if joined_since_last_sync { + state + .into_iter() + .map(|(_, pdu)| pdu.to_state_event()) + .collect() + } else { + Vec::new() + }, + }, + ephemeral: sync_events::Ephemeral { events: edus }, + }; + + if !joined_room.is_empty() { + joined_rooms.insert(room_id.clone().try_into().unwrap(), joined_room); + } } let mut left_rooms = BTreeMap::new(); @@ -2390,6 +2391,7 @@ pub fn sync_route( let pdus = db.rooms.pdus_since(&room_id, since).unwrap(); let room_events = pdus.map(|pdu| pdu.unwrap().to_room_event()).collect(); + // TODO: Only until leave point let mut edus = db .rooms .edus @@ -2416,38 +2418,40 @@ pub fn sync_route( ); } - left_rooms.insert( - room_id.clone().try_into().unwrap(), - sync_events::LeftRoom { - account_data: Some(sync_events::AccountData { events: Vec::new() }), - timeline: sync_events::Timeline { - limited: Some(false), - prev_batch: Some(next_batch.clone()), - events: room_events, - }, - state: sync_events::State { events: Vec::new() }, + let left_room = sync_events::LeftRoom { + account_data: sync_events::AccountData { events: Vec::new() }, + timeline: sync_events::Timeline { + limited: Some(false), + prev_batch: Some(next_batch.clone()), + events: room_events, }, - ); + state: sync_events::State { events: Vec::new() }, + }; + + if !left_room.is_empty() { + left_rooms.insert(room_id.clone().try_into().unwrap(), left_room); + } } let mut invited_rooms = BTreeMap::new(); for room_id in db.rooms.rooms_invited(&user_id) { let room_id = room_id.unwrap(); - invited_rooms.insert( - room_id.clone(), - sync_events::InvitedRoom { - invite_state: sync_events::InviteState { - events: db - .rooms - .room_state(&room_id) - .unwrap() - .into_iter() - .map(|(_, pdu)| pdu.to_stripped_state_event()) - .collect(), - }, + let invited_room = sync_events::InvitedRoom { + invite_state: sync_events::InviteState { + events: db + .rooms + .room_state(&room_id) + .unwrap() + .into_iter() + .map(|(_, pdu)| pdu.to_stripped_state_event()) + .collect(), }, - ); + }; + + if !invited_room.is_empty() { + invited_rooms.insert(room_id.clone(), invited_room); + } } MatrixResult(Ok(sync_events::Response { @@ -2482,17 +2486,16 @@ pub fn sync_route( .map(|(_, v)| v) .collect(), }, - device_lists: if since != 0 { - Some(sync_events::DeviceLists { - changed: db - .users + device_lists: sync_events::DeviceLists { + changed: if since != 0 { + db.users .device_keys_changed(since) .map(|u| u.unwrap()) - .collect(), - left: Vec::new(), // TODO - }) - } else { - None // TODO: left + .collect() + } else { + Vec::new() + }, + left: Vec::new(), // TODO }, device_one_time_keys_count: Default::default(), // TODO to_device: sync_events::ToDevice { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5d9da48..fa422de 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -421,7 +421,7 @@ impl Rooms { auth_events: Vec::new(), redacts: redacts.clone(), unsigned, - hashes: ruma::api::federation::EventHash { + hashes: ruma::api::federation::pdu::EventHash { sha256: "aaa".to_owned(), }, signatures: HashMap::new(), diff --git a/src/pdu.rs b/src/pdu.rs index 6ee0fd5..454d27f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,6 +1,6 @@ use js_int::UInt; use ruma::{ - api::federation::EventHash, + api::federation::pdu::EventHash, events::{ collections::all::{RoomEvent, StateEvent}, stripped::AnyStrippedStateEvent, From 588049678b5340311f34a958d2cc2ac4b3f7ef53 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 7 Jun 2020 18:38:00 +0200 Subject: [PATCH 0135/1727] refactor: replace DeviceId with str or String --- src/client_server.rs | 19 ++++++++++++------- src/database/uiaa.rs | 4 ++-- src/database/users.rs | 34 +++++++++++++++------------------- sytest/sytest-whitelist | 1 - 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 01ec7a3..3620a00 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -56,7 +56,7 @@ use ruma::{ room::{canonical_alias, guest_access, history_visibility, join_rules, member, redaction}, EventJson, EventType, }, - identifiers::{DeviceId, RoomAliasId, RoomId, RoomVersionId, UserId}, + identifiers::{RoomAliasId, RoomId, RoomVersionId, UserId}, }; use serde_json::{json, value::RawValue}; @@ -2841,13 +2841,15 @@ pub fn get_devices_route( MatrixResult(Ok(get_devices::Response { devices })) } -#[get("/_matrix/client/r0/devices/", data = "")] +#[get("/_matrix/client/r0/devices/<_device_id>", data = "")] pub fn get_device_route( db: State<'_, Database>, body: Ruma, - device_id: DeviceId, + _device_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + let device = db.users.get_device_metadata(&user_id, &device_id).unwrap(); match device { @@ -2860,13 +2862,15 @@ pub fn get_device_route( } } -#[put("/_matrix/client/r0/devices/", data = "")] +#[put("/_matrix/client/r0/devices/<_device_id>", data = "")] pub fn update_device_route( db: State<'_, Database>, body: Ruma, - device_id: DeviceId, + _device_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + let device = db.users.get_device_metadata(&user_id, &device_id).unwrap(); match device { @@ -2887,13 +2891,14 @@ pub fn update_device_route( } } -#[delete("/_matrix/client/r0/devices/", data = "")] +#[delete("/_matrix/client/r0/devices/<_device_id>", data = "")] pub fn delete_device_route( db: State<'_, Database>, body: Ruma, - device_id: DeviceId, + _device_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); // UIAA let uiaainfo = UiaaInfo { diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index f1de476..6cd25b9 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -11,7 +11,7 @@ use ruma::{ }, }, events::{to_device::AnyToDeviceEvent, EventJson, EventType}, - identifiers::{DeviceId, UserId}, + identifiers::UserId, }; use serde_json::value::RawValue; use std::{collections::BTreeMap, convert::TryFrom, time::SystemTime}; @@ -29,7 +29,7 @@ impl Uiaa { pub fn try_auth( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: &str, auth: &AuthData, uiaainfo: &UiaaInfo, users: &super::users::Users, diff --git a/src/database/users.rs b/src/database/users.rs index 8893b10..5c47455 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -6,7 +6,7 @@ use ruma::{ keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}, }, events::{to_device::AnyToDeviceEvent, EventJson, EventType}, - identifiers::{DeviceId, UserId}, + identifiers::UserId, }; use std::{collections::BTreeMap, convert::TryFrom, time::SystemTime}; @@ -113,7 +113,7 @@ impl Users { pub fn create_device( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: &str, token: &str, initial_device_display_name: Option, ) -> Result<()> { @@ -130,7 +130,7 @@ impl Users { self.userdeviceid_metadata.insert( userdeviceid, serde_json::to_string(&Device { - device_id: device_id.clone(), + device_id: device_id.to_owned(), display_name: initial_device_display_name, last_seen_ip: None, // TODO last_seen_ts: Some(SystemTime::now()), @@ -144,7 +144,7 @@ impl Users { } /// Removes a device from a user. - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + pub fn remove_device(&self, user_id: &UserId, device_id: &str) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -173,7 +173,7 @@ impl Users { } /// Returns an iterator over all device ids of this user. - pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator> { + pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata @@ -191,7 +191,7 @@ impl Users { } /// Replaces the access token of one device. - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + pub fn set_token(&self, user_id: &UserId, device_id: &str, token: &str) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -219,7 +219,7 @@ impl Users { pub fn add_one_time_key( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: &str, one_time_key_key: &AlgorithmAndDeviceId, one_time_key_value: &OneTimeKey, ) -> Result<()> { @@ -248,7 +248,7 @@ impl Users { pub fn take_one_time_key( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: &str, key_algorithm: &KeyAlgorithm, ) -> Result> { let mut prefix = user_id.to_string().as_bytes().to_vec(); @@ -282,7 +282,7 @@ impl Users { pub fn count_one_time_keys( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: &str, ) -> Result> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); @@ -315,7 +315,7 @@ impl Users { pub fn add_device_keys( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: &str, device_keys: &DeviceKeys, globals: &super::globals::Globals, ) -> Result<()> { @@ -335,7 +335,7 @@ impl Users { pub fn get_device_keys( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: &str, ) -> impl Iterator> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); @@ -376,7 +376,7 @@ impl Users { &self, sender: &UserId, target_user_id: &UserId, - target_device_id: &DeviceId, + target_device_id: &str, event_type: &EventType, content: serde_json::Value, globals: &super::globals::Globals, @@ -401,7 +401,7 @@ impl Users { pub fn take_to_device_events( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: &str, max: usize, ) -> Result>> { let mut events = Vec::new(); @@ -423,7 +423,7 @@ impl Users { pub fn update_device_metadata( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: &str, device: &Device, ) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); @@ -441,11 +441,7 @@ impl Users { } /// Get device metadata. - pub fn get_device_metadata( - &self, - user_id: &UserId, - device_id: &DeviceId, - ) -> Result> { + pub fn get_device_metadata(&self, user_id: &UserId, device_id: &str) -> Result> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index bf9059c..a13d30f 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -43,7 +43,6 @@ GET /profile/:user_id/displayname publicly accessible GET /device/{deviceId} gives a 404 for unknown devices PUT /device/{deviceId} gives a 404 for unknown devices After deactivating account, can't log in with an email -Can create filter Should reject keys claiming to belong to a different user Can add account data Checking local federation server From bfe5b89ba43c5fb325bbd599a67e2d48694a24b7 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 8 Jun 2020 11:45:22 +0200 Subject: [PATCH 0136/1727] style: cargo fmt --- src/client_server.rs | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 3620a00..481a40a 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -171,12 +171,19 @@ pub fn register_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db .uiaa - .try_auth(&user_id, &"".to_owned(), auth, &uiaainfo, &db.users, &db.globals) + .try_auth( + &user_id, + &"".to_owned(), + auth, + &uiaainfo, + &db.users, + &db.globals, + ) .unwrap(); if !worked { return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); } - // Success! + // Success! } else { db.uiaa.create(&user_id, &"".to_owned(), &uiaainfo).unwrap(); @@ -2914,12 +2921,19 @@ pub fn delete_device_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db .uiaa - .try_auth(&user_id, &"".to_owned(), auth, &uiaainfo, &db.users, &db.globals) + .try_auth( + &user_id, + &"".to_owned(), + auth, + &uiaainfo, + &db.users, + &db.globals, + ) .unwrap(); if !worked { return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); } - // Success! + // Success! } else { db.uiaa.create(&user_id, &"".to_owned(), &uiaainfo).unwrap(); @@ -2952,12 +2966,19 @@ pub fn delete_devices_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db .uiaa - .try_auth(&user_id, &"".to_owned(), auth, &uiaainfo, &db.users, &db.globals) + .try_auth( + &user_id, + &"".to_owned(), + auth, + &uiaainfo, + &db.users, + &db.globals, + ) .unwrap(); if !worked { return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); } - // Success! + // Success! } else { db.uiaa.create(&user_id, &"".to_owned(), &uiaainfo).unwrap(); From 176bd114a09e0299f048daa4bb48bcca70b84de4 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 8 Jun 2020 12:28:30 +0200 Subject: [PATCH 0137/1727] fix: use correct device in GET /devices --- src/client_server.rs | 47 +++++++++++++++++++---------------------- sytest/sytest-whitelist | 1 + 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 481a40a..6e0c40a 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -171,22 +171,14 @@ pub fn register_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db .uiaa - .try_auth( - &user_id, - &"".to_owned(), - auth, - &uiaainfo, - &db.users, - &db.globals, - ) + .try_auth(&user_id, "", auth, &uiaainfo, &db.users, &db.globals) .unwrap(); if !worked { return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); } // Success! } else { - db.uiaa.create(&user_id, &"".to_owned(), &uiaainfo).unwrap(); - + db.uiaa.create(&user_id, "", &uiaainfo).unwrap(); return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); } @@ -604,7 +596,7 @@ pub fn get_displayname_route( body: Ruma, _user_id: String, ) -> MatrixResult { - let user_id = (*body).user_id.clone(); + let user_id = body.body.user_id.clone(); MatrixResult(Ok(get_display_name::Response { displayname: db.users.displayname(&user_id).unwrap(), })) @@ -695,7 +687,7 @@ pub fn get_avatar_url_route( body: Ruma, _user_id: String, ) -> MatrixResult { - let user_id = (*body).user_id.clone(); + let user_id = body.body.user_id.clone(); MatrixResult(Ok(get_avatar_url::Response { avatar_url: db.users.avatar_url(&user_id).unwrap(), })) @@ -707,7 +699,7 @@ pub fn get_profile_route( body: Ruma, _user_id: String, ) -> MatrixResult { - let user_id = (*body).user_id.clone(); + let user_id = body.body.user_id.clone(); let avatar_url = db.users.avatar_url(&user_id).unwrap(); let displayname = db.users.displayname(&user_id).unwrap(); @@ -2855,9 +2847,11 @@ pub fn get_device_route( _device_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - let device = db.users.get_device_metadata(&user_id, &device_id).unwrap(); + let device = db + .users + .get_device_metadata(&user_id, &body.body.device_id) + .unwrap(); match device { None => MatrixResult(Err(Error { @@ -2876,9 +2870,11 @@ pub fn update_device_route( _device_id: String, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - let device = db.users.get_device_metadata(&user_id, &device_id).unwrap(); + let device = db + .users + .get_device_metadata(&user_id, &body.body.device_id) + .unwrap(); match device { None => MatrixResult(Err(Error { @@ -2890,7 +2886,7 @@ pub fn update_device_route( device.display_name = body.display_name.clone(); db.users - .update_device_metadata(&user_id, &device_id, &device) + .update_device_metadata(&user_id, &body.body.device_id, &device) .unwrap(); MatrixResult(Ok(update_device::Response)) @@ -2923,7 +2919,7 @@ pub fn delete_device_route( .uiaa .try_auth( &user_id, - &"".to_owned(), + &device_id, auth, &uiaainfo, &db.users, @@ -2935,12 +2931,13 @@ pub fn delete_device_route( } // Success! } else { - db.uiaa.create(&user_id, &"".to_owned(), &uiaainfo).unwrap(); - + db.uiaa.create(&user_id, &device_id, &uiaainfo).unwrap(); return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); } - db.users.remove_device(&user_id, &device_id).unwrap(); + db.users + .remove_device(&user_id, &body.body.device_id) + .unwrap(); MatrixResult(Ok(delete_device::Response)) } @@ -2951,6 +2948,7 @@ pub fn delete_devices_route( body: Ruma, ) -> MatrixResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); // UIAA let uiaainfo = UiaaInfo { @@ -2968,7 +2966,7 @@ pub fn delete_devices_route( .uiaa .try_auth( &user_id, - &"".to_owned(), + &device_id, auth, &uiaainfo, &db.users, @@ -2980,8 +2978,7 @@ pub fn delete_devices_route( } // Success! } else { - db.uiaa.create(&user_id, &"".to_owned(), &uiaainfo).unwrap(); - + db.uiaa.create(&user_id, &device_id, &uiaainfo).unwrap(); return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); } diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index a13d30f..bf9059c 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -43,6 +43,7 @@ GET /profile/:user_id/displayname publicly accessible GET /device/{deviceId} gives a 404 for unknown devices PUT /device/{deviceId} gives a 404 for unknown devices After deactivating account, can't log in with an email +Can create filter Should reject keys claiming to belong to a different user Can add account data Checking local federation server From 95047272e85b2f3fcce03e1bd75d93bb18cca256 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 8 Jun 2020 14:29:44 +0200 Subject: [PATCH 0138/1727] fix: let example config show how to use registration_disabled --- Rocket-example.toml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Rocket-example.toml b/Rocket-example.toml index 924b540..30a3c3a 100644 --- a/Rocket-example.toml +++ b/Rocket-example.toml @@ -1,11 +1,13 @@ [global] server_name = "your.server.name" -port = 8448 -address = "0.0.0.0" +#registration_disabled = true # Default path is in this user's data #database_path = "/home/timo/MyConduitServer" +port = 14004 +address = "0.0.0.0" + #[global.tls] #certs = "/etc/letsencrypt/live/your.server.name/fullchain.pem" #key = "/etc/letsencrypt/live/your.server.name/privkey.pem" From f2a5d466284d20a0c815bffe22fc7e2634ed055a Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 8 Jun 2020 15:17:58 +0200 Subject: [PATCH 0139/1727] fix --- src/client_server.rs | 7 ++++--- src/database/uiaa.rs | 14 +++----------- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 6e0c40a..b054204 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -58,7 +58,7 @@ use ruma::{ }, identifiers::{RoomAliasId, RoomId, RoomVersionId, UserId}, }; -use serde_json::{json, value::RawValue}; +use serde_json::json; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -2904,13 +2904,13 @@ pub fn delete_device_route( let device_id = body.device_id.as_ref().expect("user is authenticated"); // UIAA - let uiaainfo = UiaaInfo { + let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec!["m.login.password".to_owned()], }], completed: Vec::new(), params: Default::default(), - session: Some(utils::random_string(SESSION_ID_LENGTH)), + session: None, auth_error: None, }; @@ -2931,6 +2931,7 @@ pub fn delete_device_route( } // Success! } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); db.uiaa.create(&user_id, &device_id, &uiaainfo).unwrap(); return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); } diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 6cd25b9..9851e84 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,20 +1,12 @@ -use crate::{utils, Error, Result}; -use js_int::UInt; +use crate::{Error, Result}; use log::debug; use ruma::{ api::client::{ error::ErrorKind, - r0::{ - device::Device, - keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}, - uiaa::{AuthData, AuthFlow, UiaaInfo, UiaaResponse}, - }, + r0::uiaa::{AuthData, UiaaInfo}, }, - events::{to_device::AnyToDeviceEvent, EventJson, EventType}, identifiers::UserId, }; -use serde_json::value::RawValue; -use std::{collections::BTreeMap, convert::TryFrom, time::SystemTime}; pub struct Uiaa { pub(super) userdeviceid_uiaainfo: sled::Tree, // User-interactive authentication @@ -44,7 +36,7 @@ impl Uiaa { let mut uiaainfo = session .as_ref() .map(|session| { - Ok::<_, Error>(self.get_uiaa_session(&user_id, &"".to_owned(), session)?) + Ok::<_, Error>(self.get_uiaa_session(&user_id, &device_id, session)?) }) .unwrap_or(Ok(uiaainfo.clone()))?; From 2368a905843427d3897142e062eb59a147840d39 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 9 Jun 2020 15:13:17 +0200 Subject: [PATCH 0140/1727] refactor: better error handling --- src/client_server.rs | 2763 ++++++++++++++++------------------ src/database.rs | 95 +- src/database/account_data.rs | 34 +- src/database/global_edus.rs | 18 +- src/database/globals.rs | 26 +- src/database/media.rs | 39 +- src/database/rooms.rs | 517 ++++--- src/database/rooms/edus.rs | 57 +- src/database/uiaa.rs | 70 +- src/database/users.rs | 165 +- src/error.rs | 88 +- src/main.rs | 4 +- src/pdu.rs | 29 +- src/ruma_wrapper.rs | 34 +- src/utils.rs | 32 +- 15 files changed, 2062 insertions(+), 1909 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index b054204..06748af 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -4,12 +4,12 @@ use std::{ time::{Duration, SystemTime}, }; -use crate::{utils, Database, MatrixResult, Ruma}; +use crate::{utils, ConduitResult, Database, Error, Ruma}; use log::{debug, warn}; use rocket::{delete, get, options, post, put, State}; use ruma::{ api::client::{ - error::{Error, ErrorKind}, + error::ErrorKind, r0::{ account::{get_username_availability, register}, alias::{create_alias, delete_alias, get_alias}, @@ -53,7 +53,10 @@ use ruma::{ }, events::{ collections::only::Event as EduEvent, - room::{canonical_alias, guest_access, history_visibility, join_rules, member, redaction}, + room::{ + canonical_alias, guest_access, history_visibility, join_rules, member, name, redaction, + topic, + }, EventJson, EventType, }, identifiers::{RoomAliasId, RoomId, RoomVersionId, UserId}, @@ -67,66 +70,58 @@ const MXC_LENGTH: usize = 256; const SESSION_ID_LENGTH: usize = 256; #[get("/_matrix/client/versions")] -pub fn get_supported_versions_route() -> MatrixResult { - MatrixResult(Ok(get_supported_versions::Response { +pub fn get_supported_versions_route() -> ConduitResult { + Ok(get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], unstable_features: BTreeMap::new(), - })) + } + .into()) } #[get("/_matrix/client/r0/register/available", data = "")] pub fn get_register_available_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { // Validate user id - let user_id = - match UserId::parse_with_server_name(body.username.clone(), db.globals.server_name()) - .ok() - .filter(|user_id| !user_id.is_historical()) - { - None => { - debug!("Username invalid"); - return MatrixResult(Err(Error { - kind: ErrorKind::InvalidUsername, - message: "Username was invalid.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } - Some(user_id) => user_id, - }; + let user_id = UserId::parse_with_server_name(body.username.clone(), db.globals.server_name()) + .ok() + .filter(|user_id| { + !user_id.is_historical() && user_id.server_name() == db.globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; // Check if username is creative enough - if db.users.exists(&user_id).unwrap() { - debug!("ID already taken"); - return MatrixResult(Err(Error { - kind: ErrorKind::UserInUse, - message: "Desired user ID is already taken.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); + if db.users.exists(&user_id)? { + return Err(Error::BadRequest( + ErrorKind::UserInUse, + "Desired user ID is already taken.", + )); } // TODO add check for appservice namespaces // If no if check is true we have an username that's available to be used. - MatrixResult(Ok(get_username_availability::Response { available: true })) + Ok(get_username_availability::Response { available: true }.into()) } #[post("/_matrix/client/r0/register", data = "")] pub fn register_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { if db.globals.registration_disabled() { - return MatrixResult(Err(UiaaResponse::MatrixError(Error { - kind: ErrorKind::Unknown, - message: "Registration has been disabled.".to_owned(), - status_code: http::StatusCode::FORBIDDEN, - }))); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Registration has been disabled.", + )); } // Validate user id - let user_id = match UserId::parse_with_server_name( + let user_id = UserId::parse_with_server_name( body.username .clone() .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)) @@ -134,65 +129,55 @@ pub fn register_route( db.globals.server_name(), ) .ok() - .filter(|user_id| !user_id.is_historical()) - { - None => { - debug!("Username invalid"); - return MatrixResult(Err(UiaaResponse::MatrixError(Error { - kind: ErrorKind::InvalidUsername, - message: "Username was invalid.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - }))); - } - Some(user_id) => user_id, - }; + .filter(|user_id| !user_id.is_historical() && user_id.server_name() == db.globals.server_name()) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; // Check if username is creative enough - if db.users.exists(&user_id).unwrap() { - debug!("ID already taken"); - return MatrixResult(Err(UiaaResponse::MatrixError(Error { - kind: ErrorKind::UserInUse, - message: "Desired user ID is already taken.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - }))); + if db.users.exists(&user_id)? { + return Err(Error::BadRequest( + ErrorKind::UserInUse, + "Desired user ID is already taken.", + )); } // UIAA - let uiaainfo = UiaaInfo { + let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec!["m.login.dummy".to_owned()], }], completed: Vec::new(), params: Default::default(), - session: Some(utils::random_string(SESSION_ID_LENGTH)), + session: None, auth_error: None, }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db - .uiaa - .try_auth(&user_id, "", auth, &uiaainfo, &db.users, &db.globals) - .unwrap(); + let (worked, uiaainfo) = + db.uiaa + .try_auth(&user_id, "", auth, &uiaainfo, &db.users, &db.globals)?; if !worked { - return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + return Err(Error::Uiaa(uiaainfo)); } // Success! } else { - db.uiaa.create(&user_id, "", &uiaainfo).unwrap(); - return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&user_id, "", &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); } let password = body.password.clone().unwrap_or_default(); if let Ok(hash) = utils::calculate_hash(&password) { // Create user - db.users.create(&user_id, &hash).unwrap(); + db.users.create(&user_id, &hash)?; } else { - return MatrixResult(Err(UiaaResponse::MatrixError(Error { - kind: ErrorKind::InvalidParam, - message: "Password did not met requirements".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - }))); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Password does not meet the requirements.", + )); } // Generate new device id if the user didn't specify one @@ -205,123 +190,95 @@ pub fn register_route( let token = utils::random_string(TOKEN_LENGTH); // Add device - db.users - .create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - ) - .unwrap(); + db.users.create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + )?; // Initial data - db.account_data - .update( - None, - &user_id, - &EventType::PushRules, - serde_json::to_value(ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: ruma::events::push_rules::Ruleset { - content: vec![], - override_: vec![ruma::events::push_rules::ConditionalPushRule { - actions: vec![ruma::events::push_rules::Action::DontNotify], - default: true, - enabled: false, - rule_id: ".m.rule.master".to_owned(), - conditions: vec![], + db.account_data.update( + None, + &user_id, + &EventType::PushRules, + serde_json::to_value(ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: ruma::events::push_rules::Ruleset { + content: vec![], + override_: vec![ruma::events::push_rules::ConditionalPushRule { + actions: vec![ruma::events::push_rules::Action::DontNotify], + default: true, + enabled: false, + rule_id: ".m.rule.master".to_owned(), + conditions: vec![], + }], + room: vec![], + sender: vec![], + underride: vec![ruma::events::push_rules::ConditionalPushRule { + actions: vec![ + ruma::events::push_rules::Action::Notify, + ruma::events::push_rules::Action::SetTweak(ruma::push::Tweak::Sound( + "default".to_owned(), + )), + ], + default: true, + enabled: true, + rule_id: ".m.rule.message".to_owned(), + conditions: vec![ruma::events::push_rules::PushCondition::EventMatch { + key: "type".to_owned(), + pattern: "m.room.message".to_owned(), }], - room: vec![], - sender: vec![], - underride: vec![ruma::events::push_rules::ConditionalPushRule { - actions: vec![ - ruma::events::push_rules::Action::Notify, - ruma::events::push_rules::Action::SetTweak( - ruma::push::Tweak::Sound("default".to_owned()), - ), - ], - default: true, - enabled: true, - rule_id: ".m.rule.message".to_owned(), - conditions: vec![ruma::events::push_rules::PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }], - }], - }, + }], }, - }) - .unwrap() - .as_object_mut() - .unwrap(), - &db.globals, - ) - .unwrap(); + }, + }) + .expect("data is valid, we just created it") + .as_object_mut() + .expect("data is valid, we just created it"), + &db.globals, + )?; - MatrixResult(Ok(register::Response { + Ok(register::Response { access_token: Some(token), user_id, device_id: Some(device_id), - })) + } + .into()) } #[get("/_matrix/client/r0/login")] -pub fn get_login_route() -> MatrixResult { - MatrixResult(Ok(get_login_types::Response { +pub fn get_login_route() -> ConduitResult { + Ok(get_login_types::Response { flows: vec![get_login_types::LoginType::Password], - })) + } + .into()) } #[post("/_matrix/client/r0/login", data = "")] pub fn login_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { // Validate login method let user_id = // TODO: Other login methods if let (login::UserInfo::MatrixId(username), login::LoginInfo::Password { password }) = (body.user.clone(), body.login_info.clone()) { - if let Ok(user_id) = UserId::parse_with_server_name(username, db.globals.server_name()) { - if let Some(hash) = db.users.password_hash(&user_id).unwrap() { - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); + let user_id = UserId::parse_with_server_name(username, db.globals.server_name()).map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Wrong username or password."))?; - if hash_matches { - // Success! - user_id - } else { - debug!("Invalid password."); - return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "".to_owned(), - status_code: http::StatusCode::FORBIDDEN, - })); - } - } else { - debug!("UserId does not exist (has no assigned password). Can't log in."); - return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "".to_owned(), - status_code: http::StatusCode::FORBIDDEN, - })); - } - } else { - debug!("Invalid UserId."); - return MatrixResult(Err(Error { - kind: ErrorKind::InvalidUsername, - message: "Bad user id.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); + let hash_matches = + argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); + + if !hash_matches { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Wrong username or password.")); } + + user_id } else { - debug!("Bad login type"); - return MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Bad login type.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); + return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; // Generate new device id if the user didn't specify one @@ -335,39 +292,38 @@ pub fn login_route( let token = utils::random_string(TOKEN_LENGTH); // Add device - db.users - .create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - ) - .unwrap(); + db.users.create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + )?; - MatrixResult(Ok(login::Response { + Ok(login::Response { user_id, access_token: token, home_server: Some(db.globals.server_name().to_owned()), device_id, well_known: None, - })) + } + .into()) } #[post("/_matrix/client/r0/logout", data = "")] pub fn logout_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); - db.users.remove_device(&user_id, &device_id).unwrap(); + db.users.remove_device(&user_id, &device_id)?; - MatrixResult(Ok(logout::Response)) + Ok(logout::Response.into()) } #[get("/_matrix/client/r0/capabilities")] -pub fn get_capabilities_route() -> MatrixResult { +pub fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); available.insert( RoomVersionId::version_5(), @@ -378,7 +334,7 @@ pub fn get_capabilities_route() -> MatrixResult { get_capabilities::RoomVersionStability::Stable, ); - MatrixResult(Ok(get_capabilities::Response { + Ok(get_capabilities::Response { capabilities: get_capabilities::Capabilities { change_password: None, // None means it is possible room_versions: Some(get_capabilities::RoomVersionsCapability { @@ -387,31 +343,36 @@ pub fn get_capabilities_route() -> MatrixResult { }), custom_capabilities: BTreeMap::new(), }, - })) + } + .into()) } #[get("/_matrix/client/r0/pushrules", data = "")] pub fn get_pushrules_all_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if let Some(EduEvent::PushRules(pushrules)) = db + if let EduEvent::PushRules(pushrules) = db .account_data - .get(None, &user_id, &EventType::PushRules) - .unwrap() - .map(|edu| edu.deserialize().expect("PushRules event in db is valid")) + .get(None, &user_id, &EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))? + .deserialize() + .map_err(|_| Error::BadRequest( + ErrorKind::NotFound, + "PushRules event in db is invalid.", + ))? { - MatrixResult(Ok(get_pushrules_all::Response { + Ok(get_pushrules_all::Response { global: pushrules.content.global, - })) + } + .into()) } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "PushRules event not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) + Err(Error::BadDatabase("Pushrules event has wrong content.")) } } @@ -425,10 +386,10 @@ pub fn set_pushrule_route( _scope: String, _kind: String, _rule_id: String, -) -> MatrixResult { +) -> ConduitResult { // TODO warn!("TODO: set_pushrule_route"); - MatrixResult(Ok(set_pushrule::Response)) + Ok(set_pushrule::Response.into()) } #[put("/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>/enabled")] @@ -436,19 +397,19 @@ pub fn set_pushrule_enabled_route( _scope: String, _kind: String, _rule_id: String, -) -> MatrixResult { +) -> ConduitResult { // TODO warn!("TODO: set_pushrule_enabled_route"); - MatrixResult(Ok(set_pushrule_enabled::Response)) + Ok(set_pushrule_enabled::Response.into()) } #[get("/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>")] pub fn get_filter_route( _user_id: String, _filter_id: String, -) -> MatrixResult { +) -> ConduitResult { // TODO - MatrixResult(Ok(get_filter::Response { + Ok(get_filter::Response { filter: filter::FilterDefinition { event_fields: None, event_format: None, @@ -456,15 +417,17 @@ pub fn get_filter_route( room: None, presence: None, }, - })) + } + .into()) } #[post("/_matrix/client/r0/user/<_user_id>/filter")] -pub fn create_filter_route(_user_id: String) -> MatrixResult { +pub fn create_filter_route(_user_id: String) -> ConduitResult { // TODO - MatrixResult(Ok(create_filter::Response { + Ok(create_filter::Response { filter_id: utils::random_string(10), - })) + } + .into()) } #[put( @@ -476,22 +439,24 @@ pub fn set_global_account_data_route( body: Ruma, _user_id: String, _type: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - db.account_data - .update( - None, - user_id, - &EventType::try_from(&body.event_type).unwrap(), - json!({"content": serde_json::from_str::(body.data.get()).unwrap()}) - .as_object_mut() - .unwrap(), - &db.globals, + db.account_data.update( + None, + user_id, + &EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), + json!( + {"content": serde_json::from_str::(body.data.get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))? + } ) - .unwrap(); + .as_object_mut() + .expect("we just created a valid object"), + &db.globals, + )?; - MatrixResult(Ok(set_global_account_data::Response)) + Ok(set_global_account_data::Response.into()) } #[get( @@ -503,26 +468,19 @@ pub fn get_global_account_data_route( body: Ruma, _user_id: String, _type: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if let Some(data) = db + let data = db .account_data .get( None, user_id, - &EventType::try_from(&body.event_type).unwrap(), - ) - .unwrap() - { - MatrixResult(Ok(get_global_account_data::Response { account_data: data })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Data not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) - } + &EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), + )? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + + Ok(get_global_account_data::Response { account_data: data }.into()) } #[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] @@ -530,64 +488,64 @@ pub fn set_displayname_route( db: State<'_, Database>, body: Ruma, _user_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); db.users - .set_displayname(&user_id, body.displayname.clone()) - .unwrap(); + .set_displayname(&user_id, body.displayname.clone())?; // Send a new membership event into all joined rooms for room_id in db.rooms.rooms_joined(&user_id) { - let room_id = room_id.unwrap(); - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - serde_json::to_value(ruma::events::room::member::MemberEventContent { - displayname: body.displayname.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state(&room_id) - .unwrap() - .get(&(EventType::RoomMember, user_id.to_string())) - .expect("user is part of the room") - .content - .clone(), - ) - .unwrap() - .deserialize() - .unwrap() - }) - .unwrap(), - None, - Some(user_id.to_string()), - None, - &db.globals, - ) - .unwrap(); + let room_id = room_id?; + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(ruma::events::room::member::MemberEventContent { + displayname: body.displayname.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state(&room_id)? + .get(&(EventType::RoomMember, user_id.to_string())) + .ok_or(Error::BadDatabase( + "Tried to send displayname update for user not in the room.", + ))? + .content + .clone(), + ) + .map_err(|_| Error::BadDatabase("Database contains invalid PDU."))? + .deserialize() + .map_err(|_| Error::BadDatabase("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + None, + Some(user_id.to_string()), + None, + &db.globals, + )?; } // Presence update - db.global_edus - .update_presence( - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&user_id).unwrap(), - currently_active: None, - displayname: db.users.displayname(&user_id).unwrap(), - last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), - presence: ruma::events::presence::PresenceState::Online, - status_msg: None, - }, - sender: user_id.clone(), + db.global_edus.update_presence( + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&user_id)?, + currently_active: None, + displayname: db.users.displayname(&user_id)?, + last_active_ago: Some( + utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + ), + presence: ruma::events::presence::PresenceState::Online, + status_msg: None, }, - &db.globals, - ) - .unwrap(); + sender: user_id.clone(), + }, + &db.globals, + )?; - MatrixResult(Ok(set_display_name::Response)) + Ok(set_display_name::Response.into()) } #[get("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] @@ -595,11 +553,12 @@ pub fn get_displayname_route( db: State<'_, Database>, body: Ruma, _user_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.body.user_id.clone(); - MatrixResult(Ok(get_display_name::Response { - displayname: db.users.displayname(&user_id).unwrap(), - })) + Ok(get_display_name::Response { + displayname: db.users.displayname(&user_id)?, + } + .into()) } #[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] @@ -607,78 +566,75 @@ pub fn set_avatar_url_route( db: State<'_, Database>, body: Ruma, _user_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); if let Some(avatar_url) = &body.avatar_url { if !avatar_url.starts_with("mxc://") { - debug!("Request contains an invalid avatar_url."); - return MatrixResult(Err(Error { - kind: ErrorKind::InvalidParam, - message: "avatar_url has to start with mxc://.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "avatar_url has to start with mxc://.", + )); } // TODO in the future when we can handle media uploads make sure that this url is our own server // TODO also make sure this is valid mxc:// format (not only starting with it) } - db.users - .set_avatar_url(&user_id, body.avatar_url.clone()) - .unwrap(); + db.users.set_avatar_url(&user_id, body.avatar_url.clone())?; // Send a new membership event into all joined rooms for room_id in db.rooms.rooms_joined(&user_id) { - let room_id = room_id.unwrap(); - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - serde_json::to_value(ruma::events::room::member::MemberEventContent { - avatar_url: body.avatar_url.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state(&room_id) - .unwrap() - .get(&(EventType::RoomMember, user_id.to_string())) - .expect("user should be part of the room") - .content - .clone(), - ) - .unwrap() - .deserialize() - .unwrap() - }) - .unwrap(), - None, - Some(user_id.to_string()), - None, - &db.globals, - ) - .unwrap(); + let room_id = room_id?; + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(ruma::events::room::member::MemberEventContent { + avatar_url: body.avatar_url.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state(&room_id)? + .get(&(EventType::RoomMember, user_id.to_string())) + .ok_or(Error::BadDatabase( + "Tried to send avatar url update for user not in the room.", + ))? + .content + .clone(), + ) + .map_err(|_| Error::BadDatabase("Database contains invalid PDU."))? + .deserialize() + .map_err(|_| Error::BadDatabase("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + None, + Some(user_id.to_string()), + None, + &db.globals, + )?; } // Presence update - db.global_edus - .update_presence( - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&user_id).unwrap(), - currently_active: None, - displayname: db.users.displayname(&user_id).unwrap(), - last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), - presence: ruma::events::presence::PresenceState::Online, - status_msg: None, - }, - sender: user_id.clone(), + db.global_edus.update_presence( + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&user_id)?, + currently_active: None, + displayname: db.users.displayname(&user_id)?, + last_active_ago: Some( + utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + ), + presence: ruma::events::presence::PresenceState::Online, + status_msg: None, }, - &db.globals, - ) - .unwrap(); + sender: user_id.clone(), + }, + &db.globals, + )?; - MatrixResult(Ok(set_avatar_url::Response)) + Ok(set_avatar_url::Response.into()) } #[get("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] @@ -686,11 +642,12 @@ pub fn get_avatar_url_route( db: State<'_, Database>, body: Ruma, _user_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.body.user_id.clone(); - MatrixResult(Ok(get_avatar_url::Response { - avatar_url: db.users.avatar_url(&user_id).unwrap(), - })) + Ok(get_avatar_url::Response { + avatar_url: db.users.avatar_url(&user_id)?, + } + .into()) } #[get("/_matrix/client/r0/profile/<_user_id>", data = "")] @@ -698,25 +655,24 @@ pub fn get_profile_route( db: State<'_, Database>, body: Ruma, _user_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.body.user_id.clone(); - let avatar_url = db.users.avatar_url(&user_id).unwrap(); - let displayname = db.users.displayname(&user_id).unwrap(); + let avatar_url = db.users.avatar_url(&user_id)?; + let displayname = db.users.displayname(&user_id)?; - if avatar_url.is_some() || displayname.is_some() { - return MatrixResult(Ok(get_profile::Response { - avatar_url, - displayname, - })); + if avatar_url.is_none() && displayname.is_none() { + // Return 404 if we don't have a profile for this id + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Profile was not found.", + )); } - // Return 404 if we don't have a profile for this id - debug!("Profile was not found."); - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Profile was not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) + Ok(get_profile::Response { + avatar_url, + displayname, + } + .into()) } #[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] @@ -724,76 +680,73 @@ pub fn set_presence_route( db: State<'_, Database>, body: Ruma, _user_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - db.global_edus - .update_presence( - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&user_id).unwrap(), - currently_active: None, - displayname: db.users.displayname(&user_id).unwrap(), - last_active_ago: Some(utils::millis_since_unix_epoch().try_into().unwrap()), - presence: body.presence, - status_msg: body.status_msg.clone(), - }, - sender: user_id.clone(), + db.global_edus.update_presence( + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&user_id)?, + currently_active: None, + displayname: db.users.displayname(&user_id)?, + last_active_ago: Some( + utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + ), + presence: body.presence, + status_msg: body.status_msg.clone(), }, - &db.globals, - ) - .unwrap(); + sender: user_id.clone(), + }, + &db.globals, + )?; - MatrixResult(Ok(set_presence::Response)) + Ok(set_presence::Response.into()) } #[post("/_matrix/client/r0/keys/upload", data = "")] pub fn upload_keys_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); if let Some(one_time_keys) = &body.one_time_keys { for (key_key, key_value) in one_time_keys { db.users - .add_one_time_key(user_id, device_id, key_key, key_value) - .unwrap(); + .add_one_time_key(user_id, device_id, key_key, key_value)?; } } if let Some(device_keys) = &body.device_keys { db.users - .add_device_keys(user_id, device_id, device_keys, &db.globals) - .unwrap(); + .add_device_keys(user_id, device_id, device_keys, &db.globals)?; } - MatrixResult(Ok(upload_keys::Response { - one_time_key_counts: db.users.count_one_time_keys(user_id, device_id).unwrap(), - })) + Ok(upload_keys::Response { + one_time_key_counts: db.users.count_one_time_keys(user_id, device_id)?, + } + .into()) } #[post("/_matrix/client/r0/keys/query", data = "")] pub fn get_keys_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let mut device_keys = BTreeMap::new(); for (user_id, device_ids) in &body.device_keys { if device_ids.is_empty() { let mut container = BTreeMap::new(); - for (device_id, mut keys) in db - .users - .all_device_keys(&user_id.clone()) - .map(|r| r.unwrap()) - { - let metadata = db - .users - .get_device_metadata(user_id, &device_id) - .unwrap() - .expect("this device should exist"); + for result in db.users.all_device_keys(&user_id.clone()) { + let (device_id, mut keys) = result?; + + let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( + Error::BadDatabase("all_device_keys contained nonexistent device."), + )?; keys.unsigned = Some(keys::UnsignedDeviceInfo { device_display_name: metadata.display_name, @@ -806,12 +759,13 @@ pub fn get_keys_route( for device_id in device_ids { let mut container = BTreeMap::new(); for keys in db.users.get_device_keys(&user_id.clone(), &device_id) { - let mut keys = keys.unwrap(); - let metadata = db - .users - .get_device_metadata(user_id, &device_id) - .unwrap() - .expect("this device should exist"); + let mut keys = keys?; + let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( + Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to get keys for nonexistent device.", + ), + )?; keys.unsigned = Some(keys::UnsignedDeviceInfo { device_display_name: metadata.display_name, @@ -824,25 +778,25 @@ pub fn get_keys_route( } } - MatrixResult(Ok(get_keys::Response { + Ok(get_keys::Response { failures: BTreeMap::new(), device_keys, - })) + } + .into()) } #[post("/_matrix/client/r0/keys/claim", data = "")] pub fn claim_keys_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let mut one_time_keys = BTreeMap::new(); for (user_id, map) in &body.one_time_keys { let mut container = BTreeMap::new(); for (device_id, key_algorithm) in map { - if let Some(one_time_keys) = db - .users - .take_one_time_key(user_id, device_id, key_algorithm) - .unwrap() + if let Some(one_time_keys) = + db.users + .take_one_time_key(user_id, device_id, key_algorithm)? { let mut c = BTreeMap::new(); c.insert(one_time_keys.0, one_time_keys.1); @@ -852,10 +806,11 @@ pub fn claim_keys_route( one_time_keys.insert(user_id.clone(), container); } - MatrixResult(Ok(claim_keys::Response { + Ok(claim_keys::Response { failures: BTreeMap::new(), one_time_keys, - })) + } + .into()) } #[post("/_matrix/client/r0/rooms/<_room_id>/read_markers", data = "")] @@ -863,39 +818,34 @@ pub fn set_read_marker_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - db.account_data - .update( - Some(&body.room_id), - &user_id, - &EventType::FullyRead, - serde_json::to_value(ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { - event_id: body.fully_read.clone(), - }, - room_id: Some(body.room_id.clone()), - }) - .unwrap() - .as_object_mut() - .unwrap(), - &db.globals, - ) - .unwrap(); + db.account_data.update( + Some(&body.room_id), + &user_id, + &EventType::FullyRead, + serde_json::to_value(ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: body.fully_read.clone(), + }, + room_id: Some(body.room_id.clone()), + }) + .expect("we just created a valid event") + .as_object_mut() + .expect("we just created a valid event"), + &db.globals, + )?; if let Some(event) = &body.read_receipt { - db.rooms - .edus - .room_read_set( - &body.room_id, - &user_id, - db.rooms - .get_pdu_count(event) - .unwrap() - .expect("TODO: what if a client specifies an invalid event"), - ) - .unwrap(); + db.rooms.edus.room_read_set( + &body.room_id, + &user_id, + db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, + )?; let mut user_receipts = BTreeMap::new(); user_receipts.insert( @@ -912,20 +862,17 @@ pub fn set_read_marker_route( }, ); - db.rooms - .edus - .roomlatest_update( - &user_id, - &body.room_id, - EduEvent::Receipt(ruma::events::receipt::ReceiptEvent { - content: receipt_content, - room_id: None, // None because it can be inferred - }), - &db.globals, - ) - .unwrap(); + db.rooms.edus.roomlatest_update( + &user_id, + &body.room_id, + EduEvent::Receipt(ruma::events::receipt::ReceiptEvent { + content: receipt_content, + room_id: None, // None because it can be inferred + }), + &db.globals, + )?; } - MatrixResult(Ok(set_read_marker::Response)) + Ok(set_read_marker::Response.into()) } #[put( @@ -937,106 +884,94 @@ pub fn create_typing_event_route( body: Ruma, _room_id: String, _user_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); if body.typing { - db.rooms - .edus - .roomactive_add( - &user_id, - &body.room_id, - body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) - + utils::millis_since_unix_epoch().try_into().unwrap_or(0), - &db.globals, - ) - .unwrap(); + db.rooms.edus.roomactive_add( + &user_id, + &body.room_id, + body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) + + utils::millis_since_unix_epoch(), + &db.globals, + )?; } else { db.rooms .edus - .roomactive_remove(&user_id, &body.room_id, &db.globals) - .unwrap(); + .roomactive_remove(&user_id, &body.room_id, &db.globals)?; } - MatrixResult(Ok(create_typing_event::Response)) + Ok(create_typing_event::Response.into()) } #[post("/_matrix/client/r0/createRoom", data = "")] pub fn create_room_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { - let room_id = RoomId::new(db.globals.server_name()).expect("host is valid"); +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let alias = if let Some(localpart) = &body.room_alias_name { - // TODO: Check for invalid characters and maximum length - if let Ok(alias) = - RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) - { - if db.rooms.id_from_alias(&alias).unwrap().is_some() { - return MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Alias already exists.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } + let room_id = RoomId::new(db.globals.server_name()) + .map_err(|_| Error::BadDatabase("Server name is invalid."))?; - Some(alias) - } else { - return MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Invalid alias.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } - } else { - None - }; + let alias = body + .room_alias_name + .as_ref() + .map_or(Ok(None), |localpart| { + // TODO: Check for invalid characters and maximum length + let alias = + RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + + if db.rooms.id_from_alias(&alias)?.is_some() { + Err(Error::BadRequest( + ErrorKind::RoomInUse, + "Room alias already exists.", + )) + } else { + Ok(Some(alias)) + } + })?; // 1. The room create event - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomCreate, - serde_json::to_value(ruma::events::room::create::CreateEventContent { - creator: user_id.clone(), - federate: body.creation_content.as_ref().map_or(true, |c| c.federate), - predecessor: body - .creation_content - .as_ref() - .and_then(|c| c.predecessor.clone()), - room_version: RoomVersionId::version_6(), - }) - .unwrap(), - None, - Some("".to_owned()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomCreate, + serde_json::to_value(ruma::events::room::create::CreateEventContent { + creator: user_id.clone(), + federate: body.creation_content.as_ref().map_or(true, |c| c.federate), + predecessor: body + .creation_content + .as_ref() + .and_then(|c| c.predecessor.clone()), + room_version: RoomVersionId::version_6(), + }) + .expect("event is valid, we just created it"), + None, + Some("".to_owned()), + None, + &db.globals, + )?; // 2. Let the room creator join - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&user_id).unwrap(), - avatar_url: db.users.avatar_url(&user_id).unwrap(), - is_direct: body.is_direct, - third_party_invite: None, - }) - .unwrap(), - None, - Some(user_id.to_string()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, + is_direct: body.is_direct, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + None, + Some(user_id.to_string()), + None, + &db.globals, + )?; // Figure out preset. We need it for power levels and preset specific events let visibility = body.visibility.unwrap_or(room::Visibility::Private); @@ -1053,8 +988,9 @@ pub fn create_room_route( } let power_levels_content = if let Some(power_levels) = &body.power_level_content_override { - serde_json::from_str(power_levels.json().get()) - .expect("TODO: handle. we hope the client sends a valid power levels json") + serde_json::from_str(power_levels.json().get()).map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") + })? } else { serde_json::to_value(ruma::events::room::power_levels::PowerLevelsEventContent { ban: 50.into(), @@ -1070,88 +1006,80 @@ pub fn create_room_route( room: 50.into(), }, }) - .unwrap() + .expect("event is valid, we just created it") }; - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomPowerLevels, - power_levels_content, - None, - Some("".to_owned()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomPowerLevels, + power_levels_content, + None, + Some("".to_owned()), + None, + &db.globals, + )?; // 4. Events set by preset // 4.1 Join Rules - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomJoinRules, - match preset { - create_room::RoomPreset::PublicChat => { - serde_json::to_value(join_rules::JoinRulesEventContent { - join_rule: join_rules::JoinRule::Public, - }) - .unwrap() - } - _ => serde_json::to_value(join_rules::JoinRulesEventContent { - join_rule: join_rules::JoinRule::Invite, + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomJoinRules, + match preset { + create_room::RoomPreset::PublicChat => { + serde_json::to_value(join_rules::JoinRulesEventContent { + join_rule: join_rules::JoinRule::Public, }) - .unwrap(), - }, - None, - Some("".to_owned()), - None, - &db.globals, - ) - .unwrap(); + .expect("event is valid, we just created it") + } + _ => serde_json::to_value(join_rules::JoinRulesEventContent { + join_rule: join_rules::JoinRule::Invite, + }) + .expect("event is valid, we just created it"), + }, + None, + Some("".to_owned()), + None, + &db.globals, + )?; // 4.2 History Visibility - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomHistoryVisibility, - serde_json::to_value(history_visibility::HistoryVisibilityEventContent { - history_visibility: history_visibility::HistoryVisibility::Shared, - }) - .unwrap(), - None, - Some("".to_owned()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomHistoryVisibility, + serde_json::to_value(history_visibility::HistoryVisibilityEventContent { + history_visibility: history_visibility::HistoryVisibility::Shared, + }) + .expect("event is valid, we just created it"), + None, + Some("".to_owned()), + None, + &db.globals, + )?; // 4.3 Guest Access - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomGuestAccess, - match preset { - create_room::RoomPreset::PublicChat => { - serde_json::to_value(guest_access::GuestAccessEventContent { - guest_access: guest_access::GuestAccess::Forbidden, - }) - .unwrap() - } - _ => serde_json::to_value(guest_access::GuestAccessEventContent { - guest_access: guest_access::GuestAccess::CanJoin, + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomGuestAccess, + match preset { + create_room::RoomPreset::PublicChat => { + serde_json::to_value(guest_access::GuestAccessEventContent { + guest_access: guest_access::GuestAccess::Forbidden, }) - .unwrap(), - }, - None, - Some("".to_owned()), - None, - &db.globals, - ) - .unwrap(); + .expect("event is valid, we just created it") + } + _ => serde_json::to_value(guest_access::GuestAccessEventContent { + guest_access: guest_access::GuestAccess::CanJoin, + }) + .expect("event is valid, we just created it"), + }, + None, + Some("".to_owned()), + None, + &db.globals, + )?; // 5. Events listed in initial_state for create_room::InitialStateEvent { @@ -1160,92 +1088,85 @@ pub fn create_room_route( content, } in &body.initial_state { - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - event_type.clone(), - serde_json::from_str(content.get()).unwrap(), - None, - state_key.clone(), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + event_type.clone(), + serde_json::from_str(content.get()).map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid initial_state content.") + })?, + None, + state_key.clone(), + None, + &db.globals, + )?; } // 6. Events implied by name and topic if let Some(name) = &body.name { - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomName, - serde_json::to_value( - ruma::events::room::name::NameEventContent::new(name.clone()).unwrap(), - ) - .unwrap(), - None, - Some("".to_owned()), - None, - &db.globals, + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomName, + serde_json::to_value( + name::NameEventContent::new(name.clone()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid."))?, ) - .unwrap(); + .expect("event is valid, we just created it"), + None, + Some("".to_owned()), + None, + &db.globals, + )?; } if let Some(topic) = &body.topic { - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomTopic, - serde_json::to_value(ruma::events::room::topic::TopicEventContent { - topic: topic.clone(), - }) - .unwrap(), - None, - Some("".to_owned()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomTopic, + serde_json::to_value(topic::TopicEventContent { + topic: topic.clone(), + }) + .expect("event is valid, we just created it"), + None, + Some("".to_owned()), + None, + &db.globals, + )?; } // 7. Events implied by invite (and TODO: invite_3pid) for user in &body.invite { - db.rooms - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user).unwrap(), - avatar_url: db.users.avatar_url(&user).unwrap(), - is_direct: body.is_direct, - third_party_invite: None, - }) - .unwrap(), - None, - Some(user.to_string()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user)?, + avatar_url: db.users.avatar_url(&user)?, + is_direct: body.is_direct, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + None, + Some(user.to_string()), + None, + &db.globals, + )?; } // Homeserver specific stuff if let Some(alias) = alias { - db.rooms - .set_alias(&alias, Some(&room_id), &db.globals) - .unwrap(); + db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; } if let Some(room::Visibility::Public) = body.visibility { - db.rooms.set_public(&room_id, true).unwrap(); + db.rooms.set_public(&room_id, true)?; } - MatrixResult(Ok(create_room::Response { room_id })) + Ok(create_room::Response { room_id }.into()) } #[put( @@ -1258,30 +1179,24 @@ pub fn redact_event_route( _room_id: String, _event_id: String, _txn_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if let Ok(event_id) = db.rooms.append_pdu( + let event_id = db.rooms.append_pdu( body.room_id.clone(), user_id.clone(), EventType::RoomRedaction, serde_json::to_value(redaction::RedactionEventContent { reason: body.reason.clone(), }) - .unwrap(), + .expect("event is valid, we just created it"), None, None, Some(body.event_id.clone()), &db.globals, - ) { - MatrixResult(Ok(redact_event::Response { event_id })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Failed to redact event.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) - } + )?; + + Ok(redact_event::Response { event_id }.into()) } #[put("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] @@ -1289,20 +1204,15 @@ pub fn create_alias_route( db: State<'_, Database>, body: Ruma, _room_alias: String, -) -> MatrixResult { - if db.rooms.id_from_alias(&body.room_alias).unwrap().is_some() { - return MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Alias already exists".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); +) -> ConduitResult { + if db.rooms.id_from_alias(&body.room_alias)?.is_some() { + return Err(Error::Conflict("Alias already exists.")); } db.rooms - .set_alias(&body.room_alias, Some(&body.room_id), &db.globals) - .unwrap(); + .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?; - MatrixResult(Ok(create_alias::Response)) + Ok(create_alias::Response.into()) } #[delete("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] @@ -1310,12 +1220,10 @@ pub fn delete_alias_route( db: State<'_, Database>, body: Ruma, _room_alias: String, -) -> MatrixResult { - db.rooms - .set_alias(&body.room_alias, None, &db.globals) - .unwrap(); +) -> ConduitResult { + db.rooms.set_alias(&body.room_alias, None, &db.globals)?; - MatrixResult(Ok(delete_alias::Response)) + Ok(delete_alias::Response.into()) } #[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] @@ -1323,24 +1231,24 @@ pub fn get_alias_route( db: State<'_, Database>, body: Ruma, _room_alias: String, -) -> MatrixResult { - if body.room_alias.server_name() == db.globals.server_name() { - if let Some(room_id) = db.rooms.id_from_alias(&body.room_alias).unwrap() { - MatrixResult(Ok(get_alias::Response { - room_id, - servers: vec![db.globals.server_name().to_owned()], - })) - } else { - debug!("Room alias not found."); - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room with alias not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) - } - } else { +) -> ConduitResult { + if body.room_alias.server_name() != db.globals.server_name() { todo!("ask remote server"); } + + let room_id = db + .rooms + .id_from_alias(&body.room_alias)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room with alias not found.", + ))?; + + Ok(get_alias::Response { + room_id, + servers: vec![db.globals.server_name().to_owned()], + } + .into()) } #[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] @@ -1348,58 +1256,56 @@ pub fn join_room_by_id_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); // TODO: Ask a remote server if we don't have this room let event = db .rooms - .room_state(&body.room_id) - .unwrap() + .room_state(&body.room_id)? .get(&(EventType::RoomMember, user_id.to_string())) .map_or_else( || { // There was no existing membership event - member::MemberEventContent { + Ok::<_, Error>(member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&user_id).unwrap(), - avatar_url: db.users.avatar_url(&user_id).unwrap(), + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, is_direct: None, third_party_invite: None, - } + }) }, |pdu| { // We change the existing membership event let mut event = serde_json::from_value::>( pdu.content.clone(), ) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid member event in db."))? .deserialize() - .unwrap(); + .map_err(|_| Error::BadDatabase("Invalid member event in db."))?; event.membership = member::MembershipState::Join; - event.displayname = db.users.displayname(&user_id).unwrap(); - event.avatar_url = db.users.avatar_url(&user_id).unwrap(); - event + event.displayname = db.users.displayname(&user_id)?; + event.avatar_url = db.users.avatar_url(&user_id)?; + Ok(event) }, - ); + )?; - db.rooms - .append_pdu( - body.room_id.clone(), - user_id.clone(), - EventType::RoomMember, - serde_json::to_value(event).unwrap(), - None, - Some(user_id.to_string()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + body.room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(event).expect("event is valid, we just created it"), + None, + Some(user_id.to_string()), + None, + &db.globals, + )?; - MatrixResult(Ok(join_room_by_id::Response { + Ok(join_room_by_id::Response { room_id: body.room_id.clone(), - })) + } + .into()) } #[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] @@ -1407,27 +1313,13 @@ pub fn join_room_by_id_or_alias_route( db: State<'_, Database>, body: Ruma, _room_id_or_alias: String, -) -> MatrixResult { - let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { - Ok(room_id) => room_id, - Err(_) => { - if let Some(room_id) = db - .rooms - .id_from_alias(&body.room_id_or_alias.clone().try_into().unwrap()) - .unwrap() - { - room_id - } else { - // Ask creator server of the room to join TODO ask someone else when not available - //server_server::send_request(data, destination, request) - return MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Room alias not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); - } - } - }; +) -> ConduitResult { + let room_id = RoomId::try_from(body.room_id_or_alias.clone()).or_else(|alias| { + Ok::<_, Error>(db.rooms.id_from_alias(&alias)?.ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room not found (TODO: Federation).", + ))?) + })?; let body = Ruma { user_id: body.user_id.clone(), @@ -1438,12 +1330,11 @@ pub fn join_room_by_id_or_alias_route( third_party_signed: body.third_party_signed.clone(), }, }; - MatrixResult(match join_room_by_id_route(db, body, "".to_owned()).0 { - Ok(response) => Ok(join_room_by_id_or_alias::Response { - room_id: response.room_id, - }), - Err(e) => Err(e), - }) + + Ok(join_room_by_id_or_alias::Response { + room_id: join_room_by_id_route(db, body, "".to_owned())?.0.room_id, + } + .into()) } #[post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "")] @@ -1451,38 +1342,38 @@ pub fn leave_room_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let state = db.rooms.room_state(&body.room_id).unwrap(); + let state = db.rooms.room_state(&body.room_id)?; - let mut event = - serde_json::from_value::>( - state - .get(&(EventType::RoomMember, user_id.to_string())) - .unwrap() // TODO: error handling - .content - .clone(), - ) - .unwrap() - .deserialize() - .unwrap(); + let mut event = serde_json::from_value::>( + state + .get(&(EventType::RoomMember, user_id.to_string())) + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot leave a room you are not a member of.", + ))? + .content + .clone(), + ) + .map_err(|_| Error::BadDatabase("Invalid member event in database."))? + .deserialize() + .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; - event.membership = ruma::events::room::member::MembershipState::Leave; + event.membership = member::MembershipState::Leave; - db.rooms - .append_pdu( - body.room_id.clone(), - user_id.clone(), - EventType::RoomMember, - serde_json::to_value(event).unwrap(), - None, - Some(user_id.to_string()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + body.room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(event).expect("event is valid, we just created it"), + None, + Some(user_id.to_string()), + None, + &db.globals, + )?; - MatrixResult(Ok(leave_room::Response)) + Ok(leave_room::Response.into()) } #[post("/_matrix/client/r0/rooms/<_room_id>/kick", data = "")] @@ -1490,39 +1381,40 @@ pub fn kick_user_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let state = db.rooms.room_state(&body.room_id).unwrap(); + let state = db.rooms.room_state(&body.room_id)?; let mut event = serde_json::from_value::>( state .get(&(EventType::RoomMember, user_id.to_string())) - .unwrap() // TODO: error handling + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot kick member that's not in the room.", + ))? .content .clone(), ) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid member event in database."))? .deserialize() - .unwrap(); + .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Leave; // TODO: reason - db.rooms - .append_pdu( - body.room_id.clone(), - user_id.clone(), // Sender - EventType::RoomMember, - serde_json::to_value(event).unwrap(), - None, - Some(body.body.user_id.to_string()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + body.room_id.clone(), + user_id.clone(), // Sender + EventType::RoomMember, + serde_json::to_value(event).expect("event is valid, we just created it"), + None, + Some(body.body.user_id.to_string()), + None, + &db.globals, + )?; - MatrixResult(Ok(kick_user::Response)) + Ok(kick_user::Response.into()) } #[post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "")] @@ -1530,39 +1422,46 @@ pub fn ban_user_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let state = db.rooms.room_state(&body.room_id).unwrap(); + let state = db.rooms.room_state(&body.room_id)?; - let mut event = - serde_json::from_value::>( - state - .get(&(EventType::RoomMember, user_id.to_string())) - .unwrap() // TODO: error handling - .content - .clone(), - ) - .unwrap() - .deserialize() - .unwrap(); - - event.membership = ruma::events::room::member::MembershipState::Ban; // TODO: reason - db.rooms - .append_pdu( - body.room_id.clone(), - user_id.clone(), // Sender - EventType::RoomMember, - serde_json::to_value(event).unwrap(), - None, - Some(body.body.user_id.to_string()), - None, - &db.globals, - ) - .unwrap(); + let event = state + .get(&(EventType::RoomMember, user_id.to_string())) + .map_or( + Ok::<_, Error>(member::MemberEventContent { + membership: member::MembershipState::Ban, + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, + is_direct: None, + third_party_invite: None, + }), + |event| { + let mut event = serde_json::from_value::>( + event.content.clone(), + ) + .map_err(|_| Error::BadDatabase("Invalid member event in database."))? + .deserialize() + .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; + event.membership = ruma::events::room::member::MembershipState::Ban; + Ok(event) + }, + )?; - MatrixResult(Ok(ban_user::Response)) + db.rooms.append_pdu( + body.room_id.clone(), + user_id.clone(), // Sender + EventType::RoomMember, + serde_json::to_value(event).expect("event is valid, we just created it"), + None, + Some(body.body.user_id.to_string()), + None, + &db.globals, + )?; + + Ok(ban_user::Response.into()) } #[post("/_matrix/client/r0/rooms/<_room_id>/unban", data = "")] @@ -1570,38 +1469,39 @@ pub fn unban_user_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let state = db.rooms.room_state(&body.room_id).unwrap(); + let state = db.rooms.room_state(&body.room_id)?; let mut event = serde_json::from_value::>( state .get(&(EventType::RoomMember, user_id.to_string())) - .unwrap() // TODO: error handling + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot unban a user who is not banned.", + ))? .content .clone(), ) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid member event in database."))? .deserialize() - .unwrap(); + .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Leave; - db.rooms - .append_pdu( - body.room_id.clone(), - user_id.clone(), // Sender - EventType::RoomMember, - serde_json::to_value(event).unwrap(), - None, - Some(body.body.user_id.to_string()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + body.room_id.clone(), + user_id.clone(), // Sender + EventType::RoomMember, + serde_json::to_value(event).expect("event is valid, we just created it"), + None, + Some(body.body.user_id.to_string()), + None, + &db.globals, + )?; - MatrixResult(Ok(unban_user::Response)) + Ok(unban_user::Response.into()) } #[post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "")] @@ -1609,12 +1509,12 @@ pub fn forget_room_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - db.rooms.forget(&body.room_id, &user_id).unwrap(); + db.rooms.forget(&body.room_id, &user_id)?; - MatrixResult(Ok(forget_room::Response)) + Ok(forget_room::Response.into()) } #[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] @@ -1622,35 +1522,29 @@ pub fn invite_user_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { - db.rooms - .append_pdu( - body.room_id.clone(), - body.user_id.clone().expect("user is authenticated"), - EventType::RoomMember, - serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user_id).unwrap(), - avatar_url: db.users.avatar_url(&user_id).unwrap(), - is_direct: None, - third_party_invite: None, - }) - .unwrap(), - None, - Some(user_id.to_string()), - None, - &db.globals, - ) - .unwrap(); + db.rooms.append_pdu( + body.room_id.clone(), + body.user_id.clone().expect("user is authenticated"), + EventType::RoomMember, + serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + None, + Some(user_id.to_string()), + None, + &db.globals, + )?; - MatrixResult(Ok(invite_user::Response)) + Ok(invite_user::Response.into()) } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "User not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) + Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) } } @@ -1659,13 +1553,13 @@ pub async fn set_room_visibility_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { match body.visibility { - room::Visibility::Public => db.rooms.set_public(&body.room_id, true).unwrap(), - room::Visibility::Private => db.rooms.set_public(&body.room_id, false).unwrap(), + room::Visibility::Public => db.rooms.set_public(&body.room_id, true)?, + room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, } - MatrixResult(Ok(set_room_visibility::Response)) + Ok(set_room_visibility::Response.into()) } #[get("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] @@ -1673,21 +1567,22 @@ pub async fn get_room_visibility_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { - MatrixResult(Ok(get_room_visibility::Response { - visibility: if db.rooms.is_public_room(&body.room_id).unwrap() { +) -> ConduitResult { + Ok(get_room_visibility::Response { + visibility: if db.rooms.is_public_room(&body.room_id)? { room::Visibility::Public } else { room::Visibility::Private }, - })) + } + .into()) } #[get("/_matrix/client/r0/publicRooms", data = "")] pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let Ruma { body: get_public_rooms::Request { @@ -1700,7 +1595,12 @@ pub async fn get_public_rooms_route( json_body, } = body; - let response = get_public_rooms_filtered_route( + let get_public_rooms_filtered::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + } = get_public_rooms_filtered_route( db, Ruma { body: get_public_rooms_filtered::Request { @@ -1715,99 +1615,94 @@ pub async fn get_public_rooms_route( json_body, }, ) - .await; + .await? + .0; - MatrixResult(match response.0 { - Ok(get_public_rooms_filtered::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - }) => Ok(get_public_rooms::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - }), - Err(e) => Err(e), - }) + Ok(get_public_rooms::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + } + .into()) } #[post("/_matrix/client/r0/publicRooms", data = "")] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let mut chunk = db .rooms .public_rooms() .map(|room_id| { - let room_id = room_id.unwrap(); + let room_id = room_id?; - let state = db.rooms.room_state(&room_id).unwrap(); + let state = db.rooms.room_state(&room_id)?; - directory::PublicRoomsChunk { + let chunk = directory::PublicRoomsChunk { aliases: Vec::new(), - canonical_alias: state.get(&(EventType::RoomCanonicalAlias, "".to_owned())).and_then(|s| { - serde_json::from_value::< + canonical_alias: state.get(&(EventType::RoomCanonicalAlias, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { + Ok(serde_json::from_value::< EventJson, >(s.content.clone()) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid canonical alias event in database."))? .deserialize() - .unwrap() - .alias - }), - name: state.get(&(EventType::RoomName, "".to_owned())).map(|s| { - serde_json::from_value::>( + .map_err(|_| Error::BadDatabase("Invalid canonical alias event in database."))? + .alias) + })?, + name: state.get(&(EventType::RoomName, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { + Ok(serde_json::from_value::>( s.content.clone(), ) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid room name event in database."))? .deserialize() - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid room name event in database."))? .name() - .unwrap() - .to_owned() - }), + .map(|n| n.to_owned())) + })?, num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), room_id, - topic: state.get(&(EventType::RoomTopic, "".to_owned())).map(|s| { - serde_json::from_value::< + topic: state.get(&(EventType::RoomTopic, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { + Ok(Some(serde_json::from_value::< EventJson, >(s.content.clone()) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid room topic event in database."))? .deserialize() - .unwrap() - .topic - }), - world_readable: state.get(&(EventType::RoomHistoryVisibility, "".to_owned())).map_or(false, |s| { - serde_json::from_value::< + .map_err(|_| Error::BadDatabase("Invalid room topic event in database."))? + .topic)) + })?, + world_readable: state.get(&(EventType::RoomHistoryVisibility, "".to_owned())).map_or(Ok::<_, Error>(false), |s| { + Ok(serde_json::from_value::< EventJson, >(s.content.clone()) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid room history visibility event in database."))? .deserialize() - .unwrap() - .history_visibility == history_visibility::HistoryVisibility::WorldReadable - }), - guest_can_join: state.get(&(EventType::RoomGuestAccess, "".to_owned())).map_or(false, |s| { - serde_json::from_value::< + .map_err(|_| Error::BadDatabase("Invalid room history visibility event in database."))? + .history_visibility == history_visibility::HistoryVisibility::WorldReadable) + })?, + guest_can_join: state.get(&(EventType::RoomGuestAccess, "".to_owned())).map_or(Ok::<_, Error>(false), |s| { + Ok(serde_json::from_value::< EventJson, >(s.content.clone()) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid room guest access event in database."))? .deserialize() - .unwrap() - .guest_access == guest_access::GuestAccess::CanJoin - }), - avatar_url: state.get(&(EventType::RoomAvatar, "".to_owned())).map(|s| { - serde_json::from_value::< + .map_err(|_| Error::BadDatabase("Invalid room guest access event in database."))? + .guest_access == guest_access::GuestAccess::CanJoin) + })?, + avatar_url: state.get(&(EventType::RoomAvatar, "".to_owned())).map_or( Ok::<_, Error>(None),|s| { + Ok(Some(serde_json::from_value::< EventJson, >(s.content.clone()) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid room avatar event in database."))? .deserialize() - .unwrap() - .url - }), - } + .map_err(|_| Error::BadDatabase("Invalid room avatar event in database."))? + .url)) + })?, + }; + Ok::<_, Error>(chunk) }) + .filter_map(|r| r.ok()) // Filter out buggy rooms .collect::>(); chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); @@ -1824,38 +1719,42 @@ pub async fn get_public_rooms_filtered_route( }, ) .await - .unwrap() + ? .chunk .into_iter() - .map(|c| serde_json::from_str(&serde_json::to_string(&c).unwrap()).unwrap()) + .map(|c| serde_json::from_str(&serde_json::to_string(&c)?)?) .collect::>(), ); */ let total_room_count_estimate = (chunk.len() as u32).into(); - MatrixResult(Ok(get_public_rooms_filtered::Response { + Ok(get_public_rooms_filtered::Response { chunk, prev_batch: None, next_batch: None, total_room_count_estimate: Some(total_room_count_estimate), - })) + } + .into()) } #[post("/_matrix/client/r0/user_directory/search", data = "")] pub fn search_users_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { - MatrixResult(Ok(search_users::Response { +) -> ConduitResult { + Ok(search_users::Response { results: db .users .iter() - .map(Result::unwrap) - .map(|user_id| search_users::User { - user_id: user_id.clone(), - display_name: db.users.displayname(&user_id).unwrap(), - avatar_url: db.users.avatar_url(&user_id).unwrap(), + .filter_map(|user_id| { + // Filter out buggy users (they should not exist, but you never know...) + let user_id = user_id.ok()?; + Some(search_users::User { + user_id: user_id.clone(), + display_name: db.users.displayname(&user_id).ok()?, + avatar_url: db.users.avatar_url(&user_id).ok()?, + }) }) .filter(|user| { user.user_id.to_string().contains(&body.search_term) @@ -1867,21 +1766,23 @@ pub fn search_users_route( }) .collect(), limited: false, - })) + } + .into()) } #[get("/_matrix/client/r0/rooms/<_room_id>/members")] -pub fn get_member_events_route(_room_id: String) -> MatrixResult { +pub fn get_member_events_route(_room_id: String) -> ConduitResult { warn!("TODO: get_member_events_route"); - MatrixResult(Ok(get_member_events::Response { chunk: Vec::new() })) + Ok(get_member_events::Response { chunk: Vec::new() }.into()) } #[get("/_matrix/client/r0/thirdparty/protocols")] -pub fn get_protocols_route() -> MatrixResult { +pub fn get_protocols_route() -> ConduitResult { warn!("TODO: get_protocols_route"); - MatrixResult(Ok(get_protocols::Response { + Ok(get_protocols::Response { protocols: BTreeMap::new(), - })) + } + .into()) } #[put( @@ -1894,30 +1795,29 @@ pub fn create_message_event_route( _room_id: String, _event_type: String, _txn_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - if let Ok(event_id) = db.rooms.append_pdu( + let event_id = db.rooms.append_pdu( body.room_id.clone(), user_id.clone(), body.event_type.clone(), - serde_json::from_str(body.json_body.unwrap().get()).unwrap(), + serde_json::from_str( + body.json_body + .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? + .get(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, Some(unsigned), None, None, &db.globals, - ) { - MatrixResult(Ok(create_message_event::Response { event_id })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Failed to send message.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) - } + )?; + + Ok(create_message_event::Response { event_id }.into()) } #[put( @@ -1930,19 +1830,24 @@ pub fn create_state_event_for_key_route( _room_id: String, _event_type: String, _state_key: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let content = - serde_json::from_str::(body.json_body.clone().unwrap().get()).unwrap(); + let content = serde_json::from_str::( + body.json_body + .as_ref() + .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? + .get(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; if body.event_type == EventType::RoomCanonicalAlias { let canonical_alias = serde_json::from_value::< EventJson, >(content.clone()) - .unwrap() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid canonical alias."))? .deserialize() - .unwrap(); + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid canonical alias."))?; let mut aliases = canonical_alias.alt_aliases; @@ -1954,21 +1859,16 @@ pub fn create_state_event_for_key_route( if alias.server_name() != db.globals.server_name() || db .rooms - .id_from_alias(&alias) - .unwrap() + .id_from_alias(&alias)? .filter(|room| room == &body.room_id) // Make sure it's the right room .is_none() { - return MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "You are only allowed to send canonical_alias events when it's aliases already exists".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); + return Err(Error::BadRequest(ErrorKind::Forbidden, "You are only allowed to send canonical_alias events when it's aliases already exists")); } } } - if let Ok(event_id) = db.rooms.append_pdu( + let event_id = db.rooms.append_pdu( body.room_id.clone(), user_id.clone(), body.event_type.clone(), @@ -1977,15 +1877,9 @@ pub fn create_state_event_for_key_route( Some(body.state_key.clone()), None, &db.globals, - ) { - MatrixResult(Ok(create_state_event_for_key::Response { event_id })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Failed to send event.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) - } + )?; + + Ok(create_state_event_for_key::Response { event_id }.into()) } #[put( @@ -1997,7 +1891,7 @@ pub fn create_state_event_for_empty_key_route( body: Ruma, _room_id: String, _event_type: String, -) -> MatrixResult { +) -> ConduitResult { // This just calls create_state_event_for_key_route let Ruma { body: @@ -2011,30 +1905,28 @@ pub fn create_state_event_for_empty_key_route( json_body, } = body; - let response = create_state_event_for_key_route( - db, - Ruma { - body: create_state_event_for_key::Request { - room_id, - event_type, - data, - state_key: "".to_owned(), + Ok(create_state_event_for_empty_key::Response { + event_id: create_state_event_for_key_route( + db, + Ruma { + body: create_state_event_for_key::Request { + room_id, + event_type, + data, + state_key: "".to_owned(), + }, + user_id, + device_id, + json_body, }, - user_id, - device_id, - json_body, - }, - _room_id, - _event_type, - "".to_owned(), - ); - - MatrixResult(match response.0 { - Ok(create_state_event_for_key::Response { event_id }) => { - Ok(create_state_event_for_empty_key::Response { event_id }) - } - Err(e) => Err(e), - }) + _room_id, + _event_type, + "".to_owned(), + )? + .0 + .event_id, + } + .into()) } #[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] @@ -2042,26 +1934,25 @@ pub fn get_state_events_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { - return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "You don't have permission to view the room state.".to_owned(), - status_code: http::StatusCode::FORBIDDEN, - })); + if !db.rooms.is_joined(user_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); } - MatrixResult(Ok(get_state_events::Response { + Ok(get_state_events::Response { room_state: db .rooms - .room_state(&body.room_id) - .unwrap() + .room_state(&body.room_id)? .values() .map(|pdu| pdu.to_state_event()) .collect(), - })) + } + .into()) } #[get( @@ -2074,33 +1965,30 @@ pub fn get_state_events_for_key_route( _room_id: String, _event_type: String, _state_key: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { - return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "You don't have permission to view the room state.".to_owned(), - status_code: http::StatusCode::FORBIDDEN, - })); + if !db.rooms.is_joined(user_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); } - if let Some(event) = db - .rooms - .room_state(&body.room_id) - .unwrap() + let state = db.rooms.room_state(&body.room_id)?; + + let event = state .get(&(body.event_type.clone(), body.state_key.clone())) - { - MatrixResult(Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(&event.content).unwrap(), - })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "State event not found.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "State event not found.", + ))?; + + Ok(get_state_events_for_key::Response { + content: serde_json::value::to_raw_value(&event.content) + .map_err(|_| Error::BadDatabase("Invalid event content in database"))?, } + .into()) } #[get( @@ -2112,45 +2000,42 @@ pub fn get_state_events_for_empty_key_route( body: Ruma, _room_id: String, _event_type: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { - return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "You don't have permission to view the room state.".to_owned(), - status_code: http::StatusCode::FORBIDDEN, - })); + if !db.rooms.is_joined(user_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); } - if let Some(event) = db - .rooms - .room_state(&body.room_id) - .unwrap() + let state = db.rooms.room_state(&body.room_id)?; + + let event = state .get(&(body.event_type.clone(), "".to_owned())) - { - MatrixResult(Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(event).unwrap(), - })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "State event not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "State event not found.", + ))?; + + Ok(get_state_events_for_empty_key::Response { + content: serde_json::value::to_raw_value(event) + .map_err(|_| Error::BadDatabase("Invalid event content in database"))?, } + .into()) } #[get("/_matrix/client/r0/sync", data = "")] pub fn sync_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { std::thread::sleep(Duration::from_millis(1000)); let user_id = body.user_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); - let next_batch = db.globals.current_count().unwrap().to_string(); + let next_batch = db.globals.current_count()?.to_string(); let mut joined_rooms = BTreeMap::new(); let since = body @@ -2160,13 +2045,12 @@ pub fn sync_route( .unwrap_or(0); for room_id in db.rooms.rooms_joined(&user_id) { - let room_id = room_id.unwrap(); + let room_id = room_id?; let mut pdus = db .rooms - .pdus_since(&room_id, since) - .unwrap() - .map(|r| r.unwrap()) + .pdus_since(&room_id, since)? + .filter_map(|r| r.ok()) // Filter out buggy events .collect::>(); let mut send_member_count = false; @@ -2180,9 +2064,9 @@ pub fn sync_route( let content = serde_json::from_value::< EventJson, >(pdu.content.clone()) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid PDU in database."))? .deserialize() - .unwrap(); + .map_err(|_| Error::BadDatabase("Invalid PDU in database."))?; if content.membership == ruma::events::room::member::MembershipState::Join { joined_since_last_sync = true; // Both send_member_count and joined_since_last_sync are set. There's nothing more @@ -2193,7 +2077,7 @@ pub fn sync_route( } } - let state = db.rooms.room_state(&room_id).unwrap(); + let state = db.rooms.room_state(&room_id)?; let (joined_member_count, invited_member_count, heroes) = if send_member_count { let joined_member_count = db.rooms.room_members(&room_id).count(); @@ -2208,51 +2092,54 @@ pub fn sync_route( for hero in db .rooms - .all_pdus(&room_id) - .unwrap() + .all_pdus(&room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|pdu| pdu.kind == EventType::RoomMember) - .filter_map(|pdu| { + .map(|pdu| { let content = serde_json::from_value::< EventJson, >(pdu.content.clone()) - .unwrap() + .map_err(|_| Error::BadDatabase("Invalid member event in database."))? .deserialize() - .unwrap(); + .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; - let current_content = serde_json::from_value::< - EventJson, - >( - state - .get(&( - EventType::RoomMember, - pdu.state_key.clone().expect( - "TODO: error handling. Is it really a state event?", - ), - )) - .expect("a user that joined once will always have a member event") - .content - .clone(), - ) - .unwrap() - .deserialize() - .unwrap(); + if let Some(state_key) = &pdu.state_key { + let current_content = serde_json::from_value::< + EventJson, + >( + state + .get(&(EventType::RoomMember, state_key.clone())) + .ok_or(Error::BadDatabase( + "A user that joined once has no member event anymore.", + ))? + .content + .clone(), + ) + .map_err(|_| Error::BadDatabase("Invalid member event in database."))? + .deserialize() + .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; - // The membership was and still is invite or join - if matches!( - content.membership, - ruma::events::room::member::MembershipState::Join - | ruma::events::room::member::MembershipState::Invite - ) && matches!( - current_content.membership, - ruma::events::room::member::MembershipState::Join - | ruma::events::room::member::MembershipState::Invite - ) { - Some(pdu.state_key.unwrap()) + // The membership was and still is invite or join + if matches!( + content.membership, + ruma::events::room::member::MembershipState::Join + | ruma::events::room::member::MembershipState::Invite + ) && matches!( + current_content.membership, + ruma::events::room::member::MembershipState::Join + | ruma::events::room::member::MembershipState::Invite + ) { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } } else { - None + Ok(None) } }) + .filter_map(|u| u.ok()) // Filter out buggy users + // Filter for possible heroes + .filter_map(|u| u) { if heroes.contains(&hero) || hero == user_id.to_string() { continue; @@ -2272,14 +2159,14 @@ pub fn sync_route( }; let notification_count = if send_notification_counts { - if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &user_id).unwrap() { + if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &user_id)? { Some( (db.rooms - .pdus_since(&room_id, last_read) - .unwrap() + .pdus_since(&room_id, last_read)? + .filter_map(|pdu| pdu.ok()) // Filter out buggy events .filter(|pdu| { matches!( - pdu.as_ref().unwrap().kind.clone(), + pdu.kind.clone(), EventType::RoomMessage | EventType::RoomEncrypted ) }) @@ -2301,10 +2188,14 @@ pub fn sync_route( 0 })); - let prev_batch = pdus - .first() - .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) - .map(|c| c.to_string()); + let prev_batch = pdus.first().map_or(Ok::<_, Error>(None), |e| { + Ok(Some( + db.rooms + .get_pdu_count(&e.event_id)? + .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .to_string(), + )) + })?; let room_events = pdus .into_iter() @@ -2314,26 +2205,24 @@ pub fn sync_route( let mut edus = db .rooms .edus - .roomlatests_since(&room_id, since) - .unwrap() - .map(|r| r.unwrap()) + .roomlatests_since(&room_id, since)? + .filter_map(|r| r.ok()) // Filter out buggy events .collect::>(); if db .rooms .edus - .last_roomactive_update(&room_id, &db.globals) - .unwrap() + .last_roomactive_update(&room_id, &db.globals)? > since { edus.push( serde_json::from_str( &serde_json::to_string(&EduEvent::Typing( - db.rooms.edus.roomactives_all(&room_id).unwrap(), + db.rooms.edus.roomactives_all(&room_id)?, )) - .unwrap(), + .expect("event is valid, we just created it"), ) - .unwrap(), + .expect("event is valid, we just created it"), ); } @@ -2341,8 +2230,7 @@ pub fn sync_route( account_data: sync_events::AccountData { events: db .account_data - .changes_since(Some(&room_id), &user_id, since) - .unwrap() + .changes_since(Some(&room_id), &user_id, since)? .into_iter() .map(|(_, v)| v) .collect(), @@ -2380,40 +2268,41 @@ pub fn sync_route( }; if !joined_room.is_empty() { - joined_rooms.insert(room_id.clone().try_into().unwrap(), joined_room); + joined_rooms.insert(room_id.clone(), joined_room); } } let mut left_rooms = BTreeMap::new(); for room_id in db.rooms.rooms_left(&user_id) { - let room_id = room_id.unwrap(); - let pdus = db.rooms.pdus_since(&room_id, since).unwrap(); - let room_events = pdus.map(|pdu| pdu.unwrap().to_room_event()).collect(); + let room_id = room_id?; + let pdus = db.rooms.pdus_since(&room_id, since)?; + let room_events = pdus + .filter_map(|pdu| pdu.ok()) // Filter out buggy events + .map(|pdu| pdu.to_room_event()) + .collect(); // TODO: Only until leave point let mut edus = db .rooms .edus - .roomlatests_since(&room_id, since) - .unwrap() - .map(|r| r.unwrap()) + .roomlatests_since(&room_id, since)? + .filter_map(|r| r.ok()) // Filter out buggy events .collect::>(); if db .rooms .edus - .last_roomactive_update(&room_id, &db.globals) - .unwrap() + .last_roomactive_update(&room_id, &db.globals)? > since { edus.push( serde_json::from_str( &serde_json::to_string(&EduEvent::Typing( - db.rooms.edus.roomactives_all(&room_id).unwrap(), + db.rooms.edus.roomactives_all(&room_id)?, )) - .unwrap(), + .expect("event is valid, we just created it"), ) - .unwrap(), + .expect("event is valid, we just created it"), ); } @@ -2428,20 +2317,19 @@ pub fn sync_route( }; if !left_room.is_empty() { - left_rooms.insert(room_id.clone().try_into().unwrap(), left_room); + left_rooms.insert(room_id.clone(), left_room); } } let mut invited_rooms = BTreeMap::new(); for room_id in db.rooms.rooms_invited(&user_id) { - let room_id = room_id.unwrap(); + let room_id = room_id?; let invited_room = sync_events::InvitedRoom { invite_state: sync_events::InviteState { events: db .rooms - .room_state(&room_id) - .unwrap() + .room_state(&room_id)? .into_iter() .map(|(_, pdu)| pdu.to_stripped_state_event()) .collect(), @@ -2453,7 +2341,7 @@ pub fn sync_route( } } - MatrixResult(Ok(sync_events::Response { + Ok(sync_events::Response { next_batch, rooms: sync_events::Rooms { leave: left_rooms, @@ -2463,24 +2351,27 @@ pub fn sync_route( presence: sync_events::Presence { events: db .global_edus - .presence_since(since) - .unwrap() + .presence_since(since)? .map(|edu| { - let mut edu = edu.unwrap().deserialize().unwrap(); - let timestamp = edu.content.last_active_ago.unwrap(); - let last_active_ago = js_int::UInt::try_from(utils::millis_since_unix_epoch()) - .unwrap() - - timestamp; - edu.content.last_active_ago = Some(last_active_ago); - edu.into() + let mut edu = edu? + .deserialize() + .map_err(|_| Error::BadDatabase("EDU in database is invalid."))?; + if let Some(timestamp) = edu.content.last_active_ago { + let last_active_ago = + js_int::UInt::try_from(utils::millis_since_unix_epoch()) + .expect("time is valid") + - timestamp; + edu.content.last_active_ago = Some(last_active_ago); + } + Ok::<_, Error>(edu.into()) }) + .filter_map(|edu| edu.ok()) // Filter out buggy events .collect(), }, account_data: sync_events::AccountData { events: db .account_data - .changes_since(None, &user_id, since) - .unwrap() + .changes_since(None, &user_id, since)? .into_iter() .map(|(_, v)| v) .collect(), @@ -2489,8 +2380,8 @@ pub fn sync_route( changed: if since != 0 { db.users .device_keys_changed(since) - .map(|u| u.unwrap()) - .collect() + .filter_map(|u| u.ok()) + .collect() // Filter out buggy events } else { Vec::new() }, @@ -2498,12 +2389,10 @@ pub fn sync_route( }, device_one_time_keys_count: Default::default(), // TODO to_device: sync_events::ToDevice { - events: db - .users - .take_to_device_events(user_id, device_id, 100) - .unwrap(), + events: db.users.take_to_device_events(user_id, device_id, 100)?, }, - })) + } + .into()) } #[get( @@ -2515,81 +2404,96 @@ pub fn get_context_route( body: Ruma, _room_id: String, _event_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { - return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "You don't have permission to view this room.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); + if !db.rooms.is_joined(user_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); } - if let Some(base_event) = db.rooms.get_pdu(&body.event_id).unwrap() { - let base_event = base_event.to_room_event(); + let base_event = db + .rooms + .get_pdu(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Base event not found.", + ))? + .to_room_event(); - let base_token = db + let base_token = db + .rooms + .get_pdu_count(&body.event_id)? + .expect("event still exists"); + + let events_before = db + .rooms + .pdus_until(&body.room_id, base_token) + .take( + u32::try_from(body.limit).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize + / 2, + ) + .filter_map(|r| r.ok()) // Remove buggy events + .collect::>(); + + let start_token = events_before.last().map_or(Ok(None), |e| { + Ok::<_, Error>(Some( + db.rooms + .get_pdu_count(&e.event_id)? + .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .to_string(), + )) + })?; + + let events_before = events_before + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); + + let events_after = db + .rooms + .pdus_after(&body.room_id, base_token) + .take( + u32::try_from(body.limit).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize + / 2, + ) + .filter_map(|r| r.ok()) // Remove buggy events + .collect::>(); + + let end_token = events_after.last().map_or(Ok(None), |e| { + Ok::<_, Error>(Some( + db.rooms + .get_pdu_count(&e.event_id)? + .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .to_string(), + )) + })?; + + let events_after = events_after + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); + + Ok(get_context::Response { + start: start_token, + end: end_token, + events_before, + event: Some(base_event), + events_after, + state: db // TODO: State at event .rooms - .get_pdu_count(&body.event_id) - .unwrap() - .expect("event exists, so count should exist too"); - - let events_before = db - .rooms - .pdus_until(&body.room_id, base_token) - .take(u32::try_from(body.limit).unwrap() as usize / 2) - .map(|r| r.unwrap()) - .collect::>(); - - let start_token = events_before - .last() - .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) - .map(|c| c.to_string()); - - let events_before = events_before - .into_iter() - .map(|pdu| pdu.to_room_event()) - .collect::>(); - - let events_after = db - .rooms - .pdus_after(&body.room_id, base_token) - .take(u32::try_from(body.limit).unwrap() as usize / 2) - .map(|r| r.unwrap()) - .collect::>(); - - let end_token = events_after - .last() - .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) - .map(|c| c.to_string()); - - let events_after = events_after - .into_iter() - .map(|pdu| pdu.to_room_event()) - .collect::>(); - - MatrixResult(Ok(get_context::Response { - start: start_token, - end: end_token, - events_before, - event: Some(base_event), - events_after, - state: db // TODO: State at event - .rooms - .room_state(&body.room_id) - .unwrap() - .values() - .map(|pdu| pdu.to_state_event()) - .collect(), - })) - } else { - MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Invalid base event.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) + .room_state(&body.room_id)? + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), } + .into()) } #[get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "")] @@ -2597,96 +2501,109 @@ pub fn get_message_events_route( db: State<'_, Database>, body: Ruma, _room_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id).unwrap() { - return MatrixResult(Err(Error { - kind: ErrorKind::Forbidden, - message: "You don't have permission to view this room.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })); + if !db.rooms.is_joined(user_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); } - if let Ok(from) = body.from.clone().parse() { - match body.dir { - get_message_events::Direction::Forward => { - let events_after = db - .rooms - .pdus_after(&body.room_id, from) - .take(body.limit.map(|l| l.try_into().unwrap()).unwrap_or(10_u32) as usize) - .map(|r| r.unwrap()) - .collect::>(); + let from = body + .from + .clone() + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; + match body.dir { + get_message_events::Direction::Forward => { + let events_after = db + .rooms + .pdus_after(&body.room_id, from) + // Use limit or else 10 + .take(body.limit.map_or(Ok::<_, Error>(10_usize), |l| { + Ok(u32::try_from(l).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize) + })?) + .filter_map(|r| r.ok()) // Filter out buggy events + .collect::>(); - let end_token = events_after - .last() - .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) - .map(|c| c.to_string()); + let end_token = events_after.last().map_or(Ok::<_, Error>(None), |e| { + Ok(Some( + db.rooms + .get_pdu_count(&e.event_id)? + .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .to_string(), + )) + })?; - let events_after = events_after - .into_iter() - .map(|pdu| pdu.to_room_event()) - .collect::>(); + let events_after = events_after + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); - MatrixResult(Ok(get_message_events::Response { - start: Some(body.from.clone()), - end: end_token, - chunk: events_after, - state: Vec::new(), - })) - } - get_message_events::Direction::Backward => { - let events_before = db - .rooms - .pdus_until(&body.room_id, from) - .take(body.limit.map(|l| l.try_into().unwrap()).unwrap_or(10_u32) as usize) - .map(|r| r.unwrap()) - .collect::>(); - - let start_token = events_before - .last() - .and_then(|e| db.rooms.get_pdu_count(&e.event_id).unwrap()) - .map(|c| c.to_string()); - - let events_before = events_before - .into_iter() - .map(|pdu| pdu.to_room_event()) - .collect::>(); - - MatrixResult(Ok(get_message_events::Response { - start: Some(body.from.clone()), - end: start_token, - chunk: events_before, - state: Vec::new(), - })) + Ok(get_message_events::Response { + start: Some(body.from.clone()), + end: end_token, + chunk: events_after, + state: Vec::new(), } + .into()) + } + get_message_events::Direction::Backward => { + let events_before = db + .rooms + .pdus_until(&body.room_id, from) + // Use limit or else 10 + .take(body.limit.map_or(Ok::<_, Error>(10_usize), |l| { + Ok(u32::try_from(l).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize) + })?) + .filter_map(|r| r.ok()) // Filter out buggy events + .collect::>(); + + let start_token = events_before.last().map_or(Ok::<_, Error>(None), |e| { + Ok(Some( + db.rooms + .get_pdu_count(&e.event_id)? + .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .to_string(), + )) + })?; + + let events_before = events_before + .into_iter() + .map(|pdu| pdu.to_room_event()) + .collect::>(); + + Ok(get_message_events::Response { + start: Some(body.from.clone()), + end: start_token, + chunk: events_before, + state: Vec::new(), + } + .into()) } - } else { - MatrixResult(Err(Error { - kind: ErrorKind::Unknown, - message: "Invalid from.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) } } #[get("/_matrix/client/r0/voip/turnServer")] -pub fn turn_server_route() -> MatrixResult { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "There is no turn server yet.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) +pub fn turn_server_route() -> ConduitResult { + Err(Error::BadRequest( + ErrorKind::NotFound, + "There is no turn server yet.", + )) } #[post("/_matrix/client/r0/publicised_groups")] -pub fn publicised_groups_route() -> MatrixResult { - warn!("TODO: publicised_groups_route"); - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "There are no publicised groups yet.".to_owned(), - status_code: http::StatusCode::NOT_FOUND, - })) +pub fn publicised_groups_route() -> ConduitResult { + Err(Error::BadRequest( + ErrorKind::NotFound, + "There are not publicised groups yet.", + )) } #[put( @@ -2698,72 +2615,72 @@ pub fn send_event_to_device_route( body: Ruma, _event_type: String, _txn_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { match target_device_id_maybe { - to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => db - .users - .add_to_device_event( + to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => { + db.users.add_to_device_event( user_id, &target_user_id, &target_device_id, &body.event_type, - serde_json::from_str(event.get()).unwrap(), + serde_json::from_str(event.get()).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, &db.globals, - ) - .unwrap(), + )? + } to_device::DeviceIdOrAllDevices::AllDevices => { for target_device_id in db.users.all_device_ids(&target_user_id) { - db.users - .add_to_device_event( - user_id, - &target_user_id, - &target_device_id.unwrap(), - &body.event_type, - serde_json::from_str(event.get()).unwrap(), - &db.globals, - ) - .unwrap(); + db.users.add_to_device_event( + user_id, + &target_user_id, + &target_device_id?, + &body.event_type, + serde_json::from_str(event.get()).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, + &db.globals, + )?; } } } } } - MatrixResult(Ok(send_event_to_device::Response)) + Ok(send_event_to_device::Response.into()) } #[get("/_matrix/media/r0/config")] -pub fn get_media_config_route() -> MatrixResult { - MatrixResult(Ok(get_media_config::Response { +pub fn get_media_config_route() -> ConduitResult { + Ok(get_media_config::Response { upload_size: (20_u32 * 1024 * 1024).into(), // 20 MB - })) + } + .into()) } #[post("/_matrix/media/r0/upload", data = "")] pub fn create_content_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let mxc = format!( "mxc://{}/{}", db.globals.server_name(), utils::random_string(MXC_LENGTH) ); - db.media - .create( - mxc.clone(), - body.filename.as_ref(), - &body.content_type, - &body.file, - ) - .unwrap(); + db.media.create( + mxc.clone(), + body.filename.as_ref(), + &body.content_type, + &body.file, + )?; - MatrixResult(Ok(create_content::Response { content_uri: mxc })) + Ok(create_content::Response { content_uri: mxc }.into()) } #[get( @@ -2775,23 +2692,19 @@ pub fn get_content_route( body: Ruma, _server_name: String, _media_id: String, -) -> MatrixResult { +) -> ConduitResult { if let Some((filename, content_type, file)) = db .media - .get(format!("mxc://{}/{}", body.server_name, body.media_id)) - .unwrap() + .get(format!("mxc://{}/{}", body.server_name, body.media_id))? { - MatrixResult(Ok(get_content::Response { + Ok(get_content::Response { file, content_type, content_disposition: filename.unwrap_or_default(), // TODO: Spec says this should be optional - })) + } + .into()) } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Media not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } } @@ -2804,23 +2717,19 @@ pub fn get_content_thumbnail_route( body: Ruma, _server_name: String, _media_id: String, -) -> MatrixResult { - if let Some((_, content_type, file)) = db - .media - .get_thumbnail( - format!("mxc://{}/{}", body.server_name, body.media_id), - body.width.try_into().unwrap(), - body.height.try_into().unwrap(), - ) - .unwrap() - { - MatrixResult(Ok(get_content_thumbnail::Response { file, content_type })) +) -> ConduitResult { + if let Some((_, content_type, file)) = db.media.get_thumbnail( + format!("mxc://{}/{}", body.server_name, body.media_id), + body.width + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + body.height + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + )? { + Ok(get_content_thumbnail::Response { file, content_type }.into()) } else { - MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Media not found.".to_owned(), - status_code: http::StatusCode::BAD_REQUEST, - })) + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } } @@ -2828,16 +2737,16 @@ pub fn get_content_thumbnail_route( pub fn get_devices_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); let devices = db .users .all_devices_metadata(user_id) - .map(|r| r.unwrap()) + .filter_map(|r| r.ok()) // Filter out buggy devices .collect::>(); - MatrixResult(Ok(get_devices::Response { devices })) + Ok(get_devices::Response { devices }.into()) } #[get("/_matrix/client/r0/devices/<_device_id>", data = "")] @@ -2845,22 +2754,15 @@ pub fn get_device_route( db: State<'_, Database>, body: Ruma, _device_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); let device = db .users - .get_device_metadata(&user_id, &body.body.device_id) - .unwrap(); + .get_device_metadata(&user_id, &body.body.device_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - match device { - None => MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Device not found".to_string(), - status_code: http::StatusCode::NOT_FOUND, - })), - Some(device) => MatrixResult(Ok(get_device::Response { device })), - } + Ok(get_device::Response { device }.into()) } #[put("/_matrix/client/r0/devices/<_device_id>", data = "")] @@ -2868,30 +2770,20 @@ pub fn update_device_route( db: State<'_, Database>, body: Ruma, _device_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let device = db + let mut device = db .users - .get_device_metadata(&user_id, &body.body.device_id) - .unwrap(); + .get_device_metadata(&user_id, &body.body.device_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - match device { - None => MatrixResult(Err(Error { - kind: ErrorKind::NotFound, - message: "Device not found".to_string(), - status_code: http::StatusCode::NOT_FOUND, - })), - Some(mut device) => { - device.display_name = body.display_name.clone(); + device.display_name = body.display_name.clone(); - db.users - .update_device_metadata(&user_id, &body.body.device_id, &device) - .unwrap(); + db.users + .update_device_metadata(&user_id, &body.body.device_id, &device)?; - MatrixResult(Ok(update_device::Response)) - } - } + Ok(update_device::Response.into()) } #[delete("/_matrix/client/r0/devices/<_device_id>", data = "")] @@ -2899,7 +2791,7 @@ pub fn delete_device_route( db: State<'_, Database>, body: Ruma, _device_id: String, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); @@ -2915,84 +2807,77 @@ pub fn delete_device_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db - .uiaa - .try_auth( - &user_id, - &device_id, - auth, - &uiaainfo, - &db.users, - &db.globals, - ) - .unwrap(); + let (worked, uiaainfo) = db.uiaa.try_auth( + &user_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; if !worked { - return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + return Err(Error::Uiaa(uiaainfo)); } // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, &device_id, &uiaainfo).unwrap(); - return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); } - db.users - .remove_device(&user_id, &body.body.device_id) - .unwrap(); + db.users.remove_device(&user_id, &body.body.device_id)?; - MatrixResult(Ok(delete_device::Response)) + Ok(delete_device::Response.into()) } #[post("/_matrix/client/r0/delete_devices", data = "")] pub fn delete_devices_route( db: State<'_, Database>, body: Ruma, -) -> MatrixResult { +) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); // UIAA - let uiaainfo = UiaaInfo { + let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec!["m.login.password".to_owned()], }], completed: Vec::new(), params: Default::default(), - session: Some(utils::random_string(SESSION_ID_LENGTH)), + session: None, auth_error: None, }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db - .uiaa - .try_auth( - &user_id, - &device_id, - auth, - &uiaainfo, - &db.users, - &db.globals, - ) - .unwrap(); + let (worked, uiaainfo) = db.uiaa.try_auth( + &user_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; if !worked { - return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + return Err(Error::Uiaa(uiaainfo)); } // Success! } else { - db.uiaa.create(&user_id, &device_id, &uiaainfo).unwrap(); - return MatrixResult(Err(UiaaResponse::AuthResponse(uiaainfo))); + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); } for device_id in &body.devices { - db.users.remove_device(&user_id, &device_id).unwrap() + db.users.remove_device(&user_id, &device_id)? } - MatrixResult(Ok(delete_devices::Response)) + Ok(delete_devices::Response.into()) } #[options("/<_segments..>")] pub fn options_route( _segments: rocket::http::uri::Segments<'_>, -) -> MatrixResult { - MatrixResult(Ok(send_event_to_device::Response)) +) -> ConduitResult { + Ok(send_event_to_device::Response.into()) } diff --git a/src/database.rs b/src/database.rs index 34af8fc..2cc01ea 100644 --- a/src/database.rs +++ b/src/database.rs @@ -6,6 +6,7 @@ pub(self) mod rooms; pub(self) mod uiaa; pub(self) mod users; +use crate::{Error, Result}; use directories::ProjectDirs; use log::info; use std::fs::remove_dir_all; @@ -25,84 +26,92 @@ pub struct Database { impl Database { /// Tries to remove the old database but ignores all errors. - pub fn try_remove(server_name: &str) { + pub fn try_remove(server_name: &str) -> Result<()> { let mut path = ProjectDirs::from("xyz", "koesters", "conduit") - .unwrap() + .ok_or(Error::BadConfig( + "The OS didn't return a valid home directory path.", + ))? .data_dir() .to_path_buf(); path.push(server_name); let _ = remove_dir_all(path); + + Ok(()) } /// Load an existing database or create a new one. - pub fn load_or_create(config: &Config) -> Self { + pub fn load_or_create(config: &Config) -> Result { let server_name = config.get_str("server_name").unwrap_or("localhost"); let path = config .get_str("database_path") - .map(|x| x.to_owned()) + .map(|x| Ok::<_, Error>(x.to_owned())) .unwrap_or_else(|_| { let path = ProjectDirs::from("xyz", "koesters", "conduit") - .unwrap() + .ok_or(Error::BadConfig( + "The OS didn't return a valid home directory path.", + ))? .data_dir() .join(server_name); - path.to_str().unwrap().to_owned() - }); - let db = sled::open(&path).unwrap(); + Ok(path + .to_str() + .ok_or(Error::BadConfig("Database path contains invalid unicode."))? + .to_owned()) + })?; + + let db = sled::open(&path)?; info!("Opened sled database at {}", path); - Self { - globals: globals::Globals::load(db.open_tree("global").unwrap(), config), + Ok(Self { + globals: globals::Globals::load(db.open_tree("global")?, config)?, users: users::Users { - userid_password: db.open_tree("userid_password").unwrap(), - userid_displayname: db.open_tree("userid_displayname").unwrap(), - userid_avatarurl: db.open_tree("userid_avatarurl").unwrap(), - userdeviceid_token: db.open_tree("userdeviceid_token").unwrap(), - userdeviceid_metadata: db.open_tree("userdeviceid_metadata").unwrap(), - token_userdeviceid: db.open_tree("token_userdeviceid").unwrap(), - onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys").unwrap(), - userdeviceid_devicekeys: db.open_tree("userdeviceid_devicekeys").unwrap(), - devicekeychangeid_userid: db.open_tree("devicekeychangeid_userid").unwrap(), - todeviceid_events: db.open_tree("todeviceid_events").unwrap(), + userid_password: db.open_tree("userid_password")?, + userid_displayname: db.open_tree("userid_displayname")?, + userid_avatarurl: db.open_tree("userid_avatarurl")?, + userdeviceid_token: db.open_tree("userdeviceid_token")?, + userdeviceid_metadata: db.open_tree("userdeviceid_metadata")?, + token_userdeviceid: db.open_tree("token_userdeviceid")?, + onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys")?, + userdeviceid_devicekeys: db.open_tree("userdeviceid_devicekeys")?, + devicekeychangeid_userid: db.open_tree("devicekeychangeid_userid")?, + todeviceid_events: db.open_tree("todeviceid_events")?, }, uiaa: uiaa::Uiaa { - userdeviceid_uiaainfo: db.open_tree("userdeviceid_uiaainfo").unwrap(), + userdeviceid_uiaainfo: db.open_tree("userdeviceid_uiaainfo")?, }, rooms: rooms::Rooms { edus: rooms::RoomEdus { - roomuserid_lastread: db.open_tree("roomuserid_lastread").unwrap(), // "Private" read receipt - roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest").unwrap(), // Read receipts - roomactiveid_userid: db.open_tree("roomactiveid_userid").unwrap(), // Typing notifs - roomid_lastroomactiveupdate: db - .open_tree("roomid_lastroomactiveupdate") - .unwrap(), + roomuserid_lastread: db.open_tree("roomuserid_lastread")?, // "Private" read receipt + roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest")?, // Read receipts + roomactiveid_userid: db.open_tree("roomactiveid_userid")?, // Typing notifs + roomid_lastroomactiveupdate: db.open_tree("roomid_lastroomactiveupdate")?, }, - pduid_pdu: db.open_tree("pduid_pdu").unwrap(), - eventid_pduid: db.open_tree("eventid_pduid").unwrap(), - roomid_pduleaves: db.open_tree("roomid_pduleaves").unwrap(), - roomstateid_pdu: db.open_tree("roomstateid_pdu").unwrap(), + pduid_pdu: db.open_tree("pduid_pdu")?, + eventid_pduid: db.open_tree("eventid_pduid")?, + roomid_pduleaves: db.open_tree("roomid_pduleaves")?, + roomstateid_pdu: db.open_tree("roomstateid_pdu")?, - alias_roomid: db.open_tree("alias_roomid").unwrap(), - aliasid_alias: db.open_tree("alias_roomid").unwrap(), - publicroomids: db.open_tree("publicroomids").unwrap(), + alias_roomid: db.open_tree("alias_roomid")?, + aliasid_alias: db.open_tree("alias_roomid")?, + publicroomids: db.open_tree("publicroomids")?, - userroomid_joined: db.open_tree("userroomid_joined").unwrap(), - roomuserid_joined: db.open_tree("roomuserid_joined").unwrap(), - userroomid_invited: db.open_tree("userroomid_invited").unwrap(), - roomuserid_invited: db.open_tree("roomuserid_invited").unwrap(), - userroomid_left: db.open_tree("userroomid_left").unwrap(), + userroomid_joined: db.open_tree("userroomid_joined")?, + roomuserid_joined: db.open_tree("roomuserid_joined")?, + userroomid_invited: db.open_tree("userroomid_invited")?, + roomuserid_invited: db.open_tree("roomuserid_invited")?, + userroomid_left: db.open_tree("userroomid_left")?, }, account_data: account_data::AccountData { - roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata").unwrap(), + roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, }, global_edus: global_edus::GlobalEdus { - presenceid_presence: db.open_tree("presenceid_presence").unwrap(), // Presence + presenceid_presence: db.open_tree("presenceid_presence")?, // Presence }, media: media::Media { - mediaid_file: db.open_tree("mediaid_file").unwrap(), + mediaid_file: db.open_tree("mediaid_file")?, }, _db: db, - } + }) } } diff --git a/src/database/account_data.rs b/src/database/account_data.rs index f09b4c5..f7c564d 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,5 +1,6 @@ use crate::{utils, Error, Result}; use ruma::{ + api::client::error::ErrorKind, events::{collections::only::Event as EduEvent, EventJson, EventType}, identifiers::{RoomId, UserId}, }; @@ -20,7 +21,10 @@ impl AccountData { globals: &super::globals::Globals, ) -> Result<()> { if json.get("content").is_none() { - return Err(Error::BadRequest("json needs to have a content field")); + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Json needs to have a content field.", + )); } json.insert("type".to_owned(), kind.to_string().into()); @@ -62,9 +66,10 @@ impl AccountData { key.push(0xff); key.extend_from_slice(kind.to_string().as_bytes()); - self.roomuserdataid_accountdata - .insert(key, &*serde_json::to_string(&json)?) - .unwrap(); + self.roomuserdataid_accountdata.insert( + key, + &*serde_json::to_string(&json).expect("Map::to_string always works"), + )?; Ok(()) } @@ -109,17 +114,22 @@ impl AccountData { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(k, v)| { Ok::<_, Error>(( - EventType::try_from(utils::string_from_bytes( - k.rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("roomuserdataid is invalid"))?, - )?) - .map_err(|_| Error::BadDatabase("roomuserdataid is invalid"))?, - serde_json::from_slice::>(&v).unwrap(), + EventType::try_from( + utils::string_from_bytes( + k.rsplit(|&b| b == 0xff) + .next() + .ok_or(Error::BadDatabase("RoomUserData ID in db is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("RoomUserData ID in db is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("RoomUserData ID in db is invalid."))?, + serde_json::from_slice::>(&v).map_err(|_| { + Error::BadDatabase("Database contains invalid account data.") + })?, )) }) { - let (kind, data) = r.unwrap(); + let (kind, data) = r?; userdata.insert(kind, data); } diff --git a/src/database/global_edus.rs b/src/database/global_edus.rs index e9c6d23..7d1ac20 100644 --- a/src/database/global_edus.rs +++ b/src/database/global_edus.rs @@ -1,4 +1,4 @@ -use crate::Result; +use crate::{Error, Result}; use ruma::events::EventJson; pub struct GlobalEdus { @@ -21,7 +21,10 @@ impl GlobalEdus { .rev() .filter_map(|r| r.ok()) .find(|key| { - key.rsplit(|&b| b == 0xff).next().unwrap() == presence.sender.to_string().as_bytes() + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element") + == presence.sender.to_string().as_bytes() }) { // This is the old global_latest @@ -32,8 +35,10 @@ impl GlobalEdus { presence_id.push(0xff); presence_id.extend_from_slice(&presence.sender.to_string().as_bytes()); - self.presenceid_presence - .insert(presence_id, &*serde_json::to_string(&presence)?)?; + self.presenceid_presence.insert( + presence_id, + &*serde_json::to_string(&presence).expect("PresenceEvent can be serialized"), + )?; Ok(()) } @@ -50,6 +55,9 @@ impl GlobalEdus { .presenceid_presence .range(&*first_possible_edu..) .filter_map(|r| r.ok()) - .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) + .map(|(_, v)| { + Ok(serde_json::from_slice(&v) + .map_err(|_| Error::BadDatabase("Invalid presence event in db."))?) + })) } } diff --git a/src/database/globals.rs b/src/database/globals.rs index 08ab411..32cddd8 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,4 +1,4 @@ -use crate::{utils, Result}; +use crate::{utils, Error, Result}; pub const COUNTER: &str = "c"; @@ -11,17 +11,16 @@ pub struct Globals { } impl Globals { - pub fn load(globals: sled::Tree, config: &rocket::Config) -> Self { + pub fn load(globals: sled::Tree, config: &rocket::Config) -> Result { let keypair = ruma::signatures::Ed25519KeyPair::new( &*globals - .update_and_fetch("keypair", utils::generate_keypair) - .unwrap() - .unwrap(), + .update_and_fetch("keypair", utils::generate_keypair)? + .expect("utils::generate_keypair always returns Some"), "key1".to_owned(), ) - .unwrap(); + .map_err(|_| Error::BadDatabase("Private or public keys are invalid."))?; - Self { + Ok(Self { globals, keypair, reqwest_client: reqwest::Client::new(), @@ -30,7 +29,7 @@ impl Globals { .unwrap_or("localhost") .to_owned(), registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), - } + }) } /// Returns this server's keypair. @@ -49,14 +48,15 @@ impl Globals { .globals .update_and_fetch(COUNTER, utils::increment)? .expect("utils::increment will always put in a value"), - )) + ) + .map_err(|_| Error::BadDatabase("Count has invalid bytes."))?) } pub fn current_count(&self) -> Result { - Ok(self - .globals - .get(COUNTER)? - .map_or(0_u64, |bytes| utils::u64_from_bytes(&bytes))) + self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { + Ok(utils::u64_from_bytes(&bytes) + .map_err(|_| Error::BadDatabase("Count has invalid bytes."))?) + }) } pub fn server_name(&self) -> &str { diff --git a/src/database/media.rs b/src/database/media.rs index c64fd0b..f70e924 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -43,16 +43,21 @@ impl Media { let content_type = utils::string_from_bytes( parts .next() - .ok_or(Error::BadDatabase("mediaid is invalid"))?, - )?; + .ok_or(Error::BadDatabase("Invalid Media ID in db"))?, + ) + .map_err(|_| Error::BadDatabase("Invalid content type in db."))?; let filename_bytes = parts .next() - .ok_or(Error::BadDatabase("mediaid is invalid"))?; + .ok_or(Error::BadDatabase("Media ID in db is invalid."))?; + let filename = if filename_bytes.is_empty() { None } else { - Some(utils::string_from_bytes(filename_bytes)?) + Some( + utils::string_from_bytes(filename_bytes) + .map_err(|_| Error::BadDatabase("Filename in db is invalid."))?, + ) }; Ok(Some((filename, content_type, file.to_vec()))) @@ -89,16 +94,21 @@ impl Media { let content_type = utils::string_from_bytes( parts .next() - .ok_or(Error::BadDatabase("mediaid is invalid"))?, - )?; + .ok_or(Error::BadDatabase("Invalid Media ID in db"))?, + ) + .map_err(|_| Error::BadDatabase("Invalid content type in db."))?; let filename_bytes = parts .next() - .ok_or(Error::BadDatabase("mediaid is invalid"))?; + .ok_or(Error::BadDatabase("Media ID in db is invalid."))?; + let filename = if filename_bytes.is_empty() { None } else { - Some(utils::string_from_bytes(filename_bytes)?) + Some( + utils::string_from_bytes(filename_bytes) + .map_err(|_| Error::BadDatabase("Filename in db is invalid."))?, + ) }; Ok(Some((filename, content_type, file.to_vec()))) @@ -110,16 +120,21 @@ impl Media { let content_type = utils::string_from_bytes( parts .next() - .ok_or(Error::BadDatabase("mediaid is invalid"))?, - )?; + .ok_or(Error::BadDatabase("Media ID in db is invalid"))?, + ) + .map_err(|_| Error::BadDatabase("Invalid content type in db."))?; let filename_bytes = parts .next() - .ok_or(Error::BadDatabase("mediaid is invalid"))?; + .ok_or(Error::BadDatabase("Media ID in db is invalid"))?; + let filename = if filename_bytes.is_empty() { None } else { - Some(utils::string_from_bytes(filename_bytes)?) + Some( + utils::string_from_bytes(filename_bytes) + .map_err(|_| Error::BadDatabase("Filename in db is invalid."))?, + ) }; if let Ok(image) = image::load_from_memory(&file) { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fa422de..799a7cb 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -5,6 +5,7 @@ pub use edus::RoomEdus; use crate::{utils, Error, PduEvent, Result}; use log::error; use ruma::{ + api::client::error::ErrorKind, events::{ room::{ join_rules, member, @@ -61,30 +62,34 @@ impl Rooms { .roomstateid_pdu .scan_prefix(&room_id.to_string().as_bytes()) .values() - .map(|value| Ok::<_, Error>(serde_json::from_slice::(&value?)?)) + .map(|value| { + Ok::<_, Error>( + serde_json::from_slice::(&value?) + .map_err(|_| Error::BadDatabase("Invalid PDU in db."))?, + ) + }) { let pdu = pdu?; - hashmap.insert( - ( - pdu.kind.clone(), - pdu.state_key - .clone() - .expect("state events have a state key"), - ), - pdu, - ); + let state_key = pdu.state_key.clone().ok_or(Error::BadDatabase( + "Room state contains event without state_key.", + ))?; + hashmap.insert((pdu.kind.clone(), state_key), pdu); } Ok(hashmap) } /// Returns the `count` of this pdu's id. pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - Ok(self - .eventid_pduid + self.eventid_pduid .get(event_id.to_string().as_bytes())? - .map(|pdu_id| { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()]) - })) + .map_or(Ok(None), |pdu_id| { + Ok(Some( + utils::u64_from_bytes( + &pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()], + ) + .map_err(|_| Error::BadDatabase("PDU has invalid count bytes."))?, + )) + }) } /// Returns the json of a pdu. @@ -92,11 +97,12 @@ impl Rooms { self.eventid_pduid .get(event_id.to_string().as_bytes())? .map_or(Ok(None), |pdu_id| { - Ok(Some(serde_json::from_slice( - &self.pduid_pdu.get(pdu_id)?.ok_or(Error::BadDatabase( - "eventid_pduid points to nonexistent pdu", - ))?, - )?)) + Ok(Some( + serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or( + Error::BadDatabase("eventid_pduid points to nonexistent pdu."), + )?) + .map_err(|_| Error::BadDatabase("Invalid PDU in db."))?, + )) }) } @@ -112,28 +118,37 @@ impl Rooms { self.eventid_pduid .get(event_id.to_string().as_bytes())? .map_or(Ok(None), |pdu_id| { - Ok(Some(serde_json::from_slice( - &self.pduid_pdu.get(pdu_id)?.ok_or(Error::BadDatabase( - "eventid_pduid points to nonexistent pdu", - ))?, - )?)) + Ok(Some( + serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or( + Error::BadDatabase("eventid_pduid points to nonexistent pdu."), + )?) + .map_err(|_| Error::BadDatabase("Invalid PDU in db."))?, + )) }) } /// Returns the pdu. pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { - self.pduid_pdu - .get(pdu_id)? - .map_or(Ok(None), |pdu| Ok(Some(serde_json::from_slice(&pdu)?))) + self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::BadDatabase("Invalid PDU in db."))?, + )) + }) } - /// Returns the pdu. - pub fn replace_pdu(&self, pdu_id: &IVec, pdu: &PduEvent) -> Result<()> { + /// Removes a pdu and creates a new one with the same id. + fn replace_pdu(&self, pdu_id: &IVec, pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(&pdu_id)?.is_some() { - self.pduid_pdu - .insert(&pdu_id, &*serde_json::to_string(pdu)?)?; + self.pduid_pdu.insert( + &pdu_id, + &*serde_json::to_string(pdu).expect("PduEvent::to_string always works"), + )?; Ok(()) } else { - Err(Error::BadRequest("pdu does not exist")) + Err(Error::BadRequest( + ErrorKind::NotFound, + "PDU does not exist.", + )) } } @@ -148,7 +163,12 @@ impl Rooms { .roomid_pduleaves .scan_prefix(prefix) .values() - .map(|bytes| Ok::<_, Error>(EventId::try_from(&*utils::string_from_bytes(&bytes?)?)?)) + .map(|bytes| { + Ok::<_, Error>( + serde_json::from_slice(&bytes?) + .map_err(|_| Error::BadDatabase("Invalid EventID in roomid_pduleaves."))?, + ) + }) { events.push(event?); } @@ -214,174 +234,203 @@ impl Rooms { Ok( serde_json::from_value::>( power_levels.content.clone(), - )? - .deserialize()?, + ) + .expect("EventJson::from_value always works.") + .deserialize() + .map_err(|_| Error::BadDatabase("Invalid PowerLevels event in db."))?, ) }, )?; - { - let sender_membership = self - .room_state(&room_id)? - .get(&(EventType::RoomMember, sender.to_string())) - .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { - Ok( - serde_json::from_value::>( - pdu.content.clone(), - )? - .deserialize()? - .membership, + let sender_membership = self + .room_state(&room_id)? + .get(&(EventType::RoomMember, sender.to_string())) + .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { + Ok( + serde_json::from_value::>( + pdu.content.clone(), + ) + .expect("EventJson::from_value always works.") + .deserialize() + .map_err(|_| Error::BadDatabase("Invalid Member event in db."))? + .membership, + ) + })?; + + let sender_power = power_levels.users.get(&sender).map_or_else( + || { + if sender_membership != member::MembershipState::Join { + None + } else { + Some(&power_levels.users_default) + } + }, + // If it's okay, wrap with Some(_) + Some, + ); + + if !match event_type { + EventType::RoomMember => { + let target_user_id = UserId::try_from(&**state_key).map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "State key of member event does not contain user id.", ) })?; - let sender_power = power_levels.users.get(&sender).map_or_else( - || { - if sender_membership != member::MembershipState::Join { - None - } else { - Some(&power_levels.users_default) - } - }, - // If it's okay, wrap with Some(_) - Some, - ); + let current_membership = self + .room_state(&room_id)? + .get(&(EventType::RoomMember, target_user_id.to_string())) + .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { + Ok( + serde_json::from_value::>( + pdu.content.clone(), + ) + .expect("EventJson::from_value always works.") + .deserialize() + .map_err(|_| Error::BadDatabase("Invalid Member event in db."))? + .membership, + ) + })?; - if !match event_type { - EventType::RoomMember => { - let target_user_id = UserId::try_from(&**state_key)?; + let target_membership = serde_json::from_value::< + EventJson, + >(content.clone()) + .expect("EventJson::from_value always works.") + .deserialize() + .map_err(|_| Error::BadDatabase("Invalid Member event in db."))? + .membership; - let current_membership = self - .room_state(&room_id)? - .get(&(EventType::RoomMember, target_user_id.to_string())) - .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { - Ok(serde_json::from_value::< - EventJson, - >(pdu.content.clone())? - .deserialize()? - .membership) - })?; + let target_power = power_levels.users.get(&target_user_id).map_or_else( + || { + if target_membership != member::MembershipState::Join { + None + } else { + Some(&power_levels.users_default) + } + }, + // If it's okay, wrap with Some(_) + Some, + ); - let target_membership = serde_json::from_value::< - EventJson, - >(content.clone())? - .deserialize()? - .membership; - - let target_power = power_levels.users.get(&target_user_id).map_or_else( - || { - if target_membership != member::MembershipState::Join { - None - } else { - Some(&power_levels.users_default) - } - }, - // If it's okay, wrap with Some(_) - Some, - ); - - let join_rules = self - .room_state(&room_id)? + let join_rules = + self.room_state(&room_id)? .get(&(EventType::RoomJoinRules, "".to_owned())) - .map_or(join_rules::JoinRule::Public, |pdu| { - serde_json::from_value::< + .map_or(Ok::<_, Error>(join_rules::JoinRule::Public), |pdu| { + Ok(serde_json::from_value::< EventJson, >(pdu.content.clone()) - .unwrap() + .expect("EventJson::from_value always works.") .deserialize() - .unwrap() - .join_rule - }); + .map_err(|_| { + Error::BadDatabase("Database contains invalid JoinRules event") + })? + .join_rule) + })?; - let authorized = if target_membership == member::MembershipState::Join { - let mut prev_events = prev_events.iter(); - let prev_event = self - .get_pdu(prev_events.next().ok_or(Error::BadRequest( - "membership can't be the first event", - ))?)? - .ok_or(Error::BadDatabase("pdu leave points to valid event"))?; - if prev_event.kind == EventType::RoomCreate - && prev_event.prev_events.is_empty() - { - true - } else if sender != target_user_id { - false - } else if let member::MembershipState::Ban = current_membership { - false - } else { - join_rules == join_rules::JoinRule::Invite - && (current_membership == member::MembershipState::Join - || current_membership == member::MembershipState::Invite) - || join_rules == join_rules::JoinRule::Public - } - } else if target_membership == member::MembershipState::Invite { - if let Some(third_party_invite_json) = content.get("third_party_invite") - { - if current_membership == member::MembershipState::Ban { - false - } else { - let _third_party_invite = - serde_json::from_value::( - third_party_invite_json.clone(), - )?; - todo!("handle third party invites"); - } - } else if sender_membership != member::MembershipState::Join - || current_membership == member::MembershipState::Join - || current_membership == member::MembershipState::Ban - { - false - } else { - sender_power - .filter(|&p| p >= &power_levels.invite) - .is_some() - } - } else if target_membership == member::MembershipState::Leave { - if sender == target_user_id { - current_membership == member::MembershipState::Join - || current_membership == member::MembershipState::Invite - } else if sender_membership != member::MembershipState::Join - || current_membership == member::MembershipState::Ban - && sender_power.filter(|&p| p < &power_levels.ban).is_some() - { - false - } else { - sender_power.filter(|&p| p >= &power_levels.kick).is_some() - && target_power < sender_power - } - } else if target_membership == member::MembershipState::Ban { - if sender_membership != member::MembershipState::Join { - false - } else { - sender_power.filter(|&p| p >= &power_levels.ban).is_some() - && target_power < sender_power - } - } else { + let authorized = if target_membership == member::MembershipState::Join { + let mut prev_events = prev_events.iter(); + let prev_event = self + .get_pdu(prev_events.next().ok_or(Error::BadRequest( + ErrorKind::Unknown, + "Membership can't be the first event", + ))?)? + .ok_or(Error::BadDatabase("PDU leaf points to invalid event!"))?; + if prev_event.kind == EventType::RoomCreate + && prev_event.prev_events.is_empty() + { + true + } else if sender != target_user_id { false - }; - - if authorized { - // Update our membership info - self.update_membership(&room_id, &target_user_id, &target_membership)?; + } else if let member::MembershipState::Ban = current_membership { + false + } else { + join_rules == join_rules::JoinRule::Invite + && (current_membership == member::MembershipState::Join + || current_membership == member::MembershipState::Invite) + || join_rules == join_rules::JoinRule::Public } + } else if target_membership == member::MembershipState::Invite { + if let Some(third_party_invite_json) = content.get("third_party_invite") { + if current_membership == member::MembershipState::Ban { + false + } else { + let _third_party_invite = + serde_json::from_value::( + third_party_invite_json.clone(), + ) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "ThirdPartyInvite is invalid", + ) + })?; + todo!("handle third party invites"); + } + } else if sender_membership != member::MembershipState::Join + || current_membership == member::MembershipState::Join + || current_membership == member::MembershipState::Ban + { + false + } else { + sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some() + } + } else if target_membership == member::MembershipState::Leave { + if sender == target_user_id { + current_membership == member::MembershipState::Join + || current_membership == member::MembershipState::Invite + } else if sender_membership != member::MembershipState::Join + || current_membership == member::MembershipState::Ban + && sender_power.filter(|&p| p < &power_levels.ban).is_some() + { + false + } else { + sender_power.filter(|&p| p >= &power_levels.kick).is_some() + && target_power < sender_power + } + } else if target_membership == member::MembershipState::Ban { + if sender_membership != member::MembershipState::Join { + false + } else { + sender_power.filter(|&p| p >= &power_levels.ban).is_some() + && target_power < sender_power + } + } else { + false + }; - authorized + if authorized { + // Update our membership info + self.update_membership(&room_id, &target_user_id, &target_membership)?; } - EventType::RoomCreate => prev_events.is_empty(), - // Not allow any of the following events if the sender is not joined. - _ if sender_membership != member::MembershipState::Join => false, - _ => { - // TODO - sender_power.unwrap_or(&power_levels.users_default) - >= &power_levels.state_default - } - } { - error!("Unauthorized"); - // Not authorized - return Err(Error::BadRequest("event not authorized")); + authorized } + EventType::RoomCreate => prev_events.is_empty(), + // Not allow any of the following events if the sender is not joined. + _ if sender_membership != member::MembershipState::Join => false, + + _ => { + // TODO + sender_power.unwrap_or(&power_levels.users_default) + >= &power_levels.state_default + } + } { + error!("Unauthorized"); + // Not authorized + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Event is not authorized", + )); } } else if !self.is_joined(&sender, &room_id)? { - return Err(Error::BadRequest("event not authorized")); + // TODO: auth rules apply to all events, not only those with a state key + error!("Unauthorized"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Event is not authorized", + )); } // Our depth is the maximum depth of prev_events + 1 @@ -410,14 +459,14 @@ impl Rooms { origin: globals.server_name().to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() - .expect("this only fails many years in the future"), + .expect("time is valid"), kind: event_type.clone(), content: content.clone(), state_key, prev_events, depth: depth .try_into() - .expect("depth can overflow and should be deprecated..."), + .map_err(|_| Error::BadDatabase("Depth is invalid"))?, auth_events: Vec::new(), redacts: redacts.clone(), unsigned, @@ -430,18 +479,20 @@ impl Rooms { // Generate event id pdu.event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&serde_json::to_value(&pdu)?) - .expect("ruma can calculate reference hashes") + ruma::signatures::reference_hash( + &serde_json::to_value(&pdu).expect("event is valid, we just created it") + ) + .expect("ruma can calculate reference hashes") )) - .expect("ruma's reference hashes are correct"); + .expect("ruma's reference hashes are valid event ids"); - let mut pdu_json = serde_json::to_value(&pdu)?; + let mut pdu_json = serde_json::to_value(&pdu).expect("event is valid, we just created it"); ruma::signatures::hash_and_sign_event( globals.server_name(), globals.keypair(), &mut pdu_json, ) - .expect("our new event can be hashed and signed"); + .expect("event is valid, we just created it"); self.replace_pdu_leaves(&room_id, &pdu.event_id)?; @@ -473,8 +524,15 @@ impl Rooms { // TODO: Reason let _reason = serde_json::from_value::< EventJson, - >(content)? - .deserialize()? + >(content) + .expect("EventJson::from_value always works.") + .deserialize() + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid redaction event content.", + ) + })? .reason; self.redact_pdu(&redact_id)?; @@ -528,7 +586,10 @@ impl Rooms { }) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) + .map(|(_, v)| { + Ok(serde_json::from_slice(&v) + .map_err(|_| Error::BadDatabase("PDU in db is invalid."))?) + })) } /// Returns an iterator over all events in a room that happened before the event with id @@ -552,7 +613,10 @@ impl Rooms { .rev() .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| Ok(serde_json::from_slice(&v)?)) + .map(|(_, v)| { + Ok(serde_json::from_slice(&v) + .map_err(|_| Error::BadDatabase("PDU in db is invalid."))?) + }) } /// Returns an iterator over all events in a room that happened after the event with id @@ -575,7 +639,10 @@ impl Rooms { .range(current..) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| Ok(serde_json::from_slice(&v)?)) + .map(|(_, v)| { + Ok(serde_json::from_slice(&v) + .map_err(|_| Error::BadDatabase("PDU in db is invalid."))?) + }) } /// Replace a PDU with the redacted form. @@ -583,12 +650,15 @@ impl Rooms { if let Some(pdu_id) = self.get_pdu_id(event_id)? { let mut pdu = self .get_pdu_from_id(&pdu_id)? - .ok_or(Error::BadDatabase("pduid points to invalid pdu"))?; - pdu.redact(); + .ok_or(Error::BadDatabase("PDU ID points to invalid PDU."))?; + pdu.redact()?; self.replace_pdu(&pdu_id, &pdu)?; Ok(()) } else { - Err(Error::BadRequest("eventid does not exist")) + Err(Error::BadRequest( + ErrorKind::NotFound, + "Event ID does not exist.", + )) } } @@ -664,7 +734,10 @@ impl Rooms { let room_id = self .alias_roomid .remove(alias.alias())? - .ok_or(Error::BadRequest("Alias does not exist"))?; + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Alias does not exist.", + ))?; for key in self.aliasid_alias.scan_prefix(room_id).keys() { self.aliasid_alias.remove(key?)?; @@ -678,7 +751,9 @@ impl Rooms { self.alias_roomid .get(alias.alias())? .map_or(Ok(None), |bytes| { - Ok(Some(RoomId::try_from(utils::string_from_bytes(&bytes)?)?)) + Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { + Error::BadDatabase("Room ID in alias_roomid is invalid.") + })?)) }) } @@ -689,7 +764,10 @@ impl Rooms { self.aliasid_alias .scan_prefix(prefix) .values() - .map(|bytes| Ok(RoomAliasId::try_from(utils::string_from_bytes(&bytes?)?)?)) + .map(|bytes| { + Ok(serde_json::from_slice(&bytes?) + .map_err(|_| Error::BadDatabase("Alias in aliasid_alias is invalid."))?) + }) } pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { @@ -707,10 +785,10 @@ impl Rooms { } pub fn public_rooms(&self) -> impl Iterator> { - self.publicroomids - .iter() - .keys() - .map(|bytes| Ok(RoomId::try_from(utils::string_from_bytes(&bytes?)?)?)) + self.publicroomids.iter().keys().map(|bytes| { + Ok(serde_json::from_slice(&bytes?) + .map_err(|_| Error::BadDatabase("Room ID in publicroomids is invalid."))?) + }) } /// Returns an iterator over all rooms a user joined. @@ -719,12 +797,13 @@ impl Rooms { .scan_prefix(room_id.to_string()) .values() .map(|key| { - Ok(UserId::try_from(&*utils::string_from_bytes( + Ok(serde_json::from_slice( &key? .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("userroomid is invalid"))?, - )?)?) + .ok_or(Error::BadDatabase("RoomUser ID is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("Invalid User ID in db."))?) }) } @@ -734,12 +813,13 @@ impl Rooms { .scan_prefix(room_id.to_string()) .keys() .map(|key| { - Ok(UserId::try_from(&*utils::string_from_bytes( + Ok(serde_json::from_slice( &key? .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("userroomid is invalid"))?, - )?)?) + .ok_or(Error::BadDatabase("RoomUser ID is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("Invalid User ID in db."))?) }) } @@ -749,12 +829,13 @@ impl Rooms { .scan_prefix(user_id.to_string()) .keys() .map(|key| { - Ok(RoomId::try_from(&*utils::string_from_bytes( + Ok(serde_json::from_slice( &key? .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("userroomid is invalid"))?, - )?)?) + .ok_or(Error::BadDatabase("UserRoom ID is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("Invalid Room ID in db."))?) }) } @@ -764,12 +845,13 @@ impl Rooms { .scan_prefix(&user_id.to_string()) .keys() .map(|key| { - Ok(RoomId::try_from(&*utils::string_from_bytes( + Ok(serde_json::from_slice( &key? .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("userroomid is invalid"))?, - )?)?) + .ok_or(Error::BadDatabase("UserRoom ID is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("Invalid Room ID in db."))?) }) } @@ -779,12 +861,13 @@ impl Rooms { .scan_prefix(&user_id.to_string()) .keys() .map(|key| { - Ok(RoomId::try_from(&*utils::string_from_bytes( + Ok(serde_json::from_slice( &key? .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("userroomid is invalid"))?, - )?)?) + .ok_or(Error::BadDatabase("UserRoom ID is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("Invalid Room ID in db."))?) }) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 5d04639..c8b03aa 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -33,7 +33,10 @@ impl RoomEdus { .filter_map(|r| r.ok()) .take_while(|key| key.starts_with(&prefix)) .find(|key| { - key.rsplit(|&b| b == 0xff).next().unwrap() == user_id.to_string().as_bytes() + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element") + == user_id.to_string().as_bytes() }) { // This is the old room_latest @@ -45,8 +48,10 @@ impl RoomEdus { room_latest_id.push(0xff); room_latest_id.extend_from_slice(&user_id.to_string().as_bytes()); - self.roomlatestid_roomlatest - .insert(room_latest_id, &*serde_json::to_string(&event)?)?; + self.roomlatestid_roomlatest.insert( + room_latest_id, + &*serde_json::to_string(&event).expect("EduEvent::to_string always works"), + )?; Ok(()) } @@ -68,7 +73,10 @@ impl RoomEdus { .range(&*first_possible_edu..) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| Ok(serde_json::from_slice(&v)?))) + .map(|(_, v)| { + Ok(serde_json::from_slice(&v) + .map_err(|_| Error::BadDatabase("Read receipt in db is invalid."))?) + })) } /// Sets a user as typing until the timeout timestamp is reached or roomactive_remove is @@ -152,17 +160,21 @@ impl RoomEdus { .roomactiveid_userid .scan_prefix(&prefix) .keys() - .filter_map(|r| r.ok()) - .take_while(|k| { - utils::u64_from_bytes( - k.split(|&c| c == 0xff) - .nth(1) - .expect("roomactive has valid timestamp and delimiters"), - ) < current_timestamp + .map(|key| { + let key = key?; + Ok::<_, Error>(( + key.clone(), + utils::u64_from_bytes(key.split(|&b| b == 0xff).nth(1).ok_or( + Error::BadDatabase("RoomActive has invalid timestamp or delimiters."), + )?) + .map_err(|_| Error::BadDatabase("RoomActive has invalid timestamp bytes."))?, + )) }) + .filter_map(|r| r.ok()) + .take_while(|&(_, timestamp)| timestamp < current_timestamp) { // This is an outdated edu (time > timestamp) - self.roomactiveid_userid.remove(outdated_edu)?; + self.roomactiveid_userid.remove(outdated_edu.0)?; found_outdated = true; } @@ -187,7 +199,11 @@ impl RoomEdus { Ok(self .roomid_lastroomactiveupdate .get(&room_id.to_string().as_bytes())? - .map(|bytes| utils::u64_from_bytes(&bytes)) + .map_or(Ok::<_, Error>(None), |bytes| { + Ok(Some( + utils::u64_from_bytes(&bytes).map_err(|_| Error::BadDatabase(""))?, + )) + })? .unwrap_or(0)) } @@ -202,7 +218,11 @@ impl RoomEdus { .roomactiveid_userid .scan_prefix(prefix) .values() - .map(|user_id| Ok::<_, Error>(UserId::try_from(utils::string_from_bytes(&user_id?)?)?)) + .map(|user_id| { + Ok::<_, Error>(serde_json::from_slice(&user_id?).map_err(|_| { + Error::BadDatabase("User ID in roomactiveid_userid is invalid.") + })?) + }) { user_ids.push(user_id?); } @@ -230,9 +250,10 @@ impl RoomEdus { key.push(0xff); key.extend_from_slice(&user_id.to_string().as_bytes()); - Ok(self - .roomuserid_lastread - .get(key)? - .map(|v| utils::u64_from_bytes(&v))) + self.roomuserid_lastread.get(key)?.map_or(Ok(None), |v| { + Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { + Error::BadDatabase("Invalid private read marker bytes") + })?)) + }) } } diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 9851e84..0ae2ea4 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -43,15 +43,51 @@ impl Uiaa { // Find out what the user completed match &**kind { "m.login.password" => { - if auth_parameters["identifier"]["type"] != "m.id.user" { - panic!("identifier not supported"); + let identifier = auth_parameters.get("identifier").ok_or(Error::BadRequest( + ErrorKind::MissingParam, + "m.login.password needs identifier.", + ))?; + + let identifier_type = identifier.get("type").ok_or(Error::BadRequest( + ErrorKind::MissingParam, + "Identifier needs a type.", + ))?; + + if identifier_type != "m.id.user" { + return Err(Error::BadRequest( + ErrorKind::Unrecognized, + "Identifier type not recognized.", + )); } - let user_id = UserId::parse_with_server_name( - auth_parameters["identifier"]["user"].as_str().unwrap(), - globals.server_name(), - )?; - let password = auth_parameters["password"].as_str().unwrap(); + let username = identifier + .get("user") + .ok_or(Error::BadRequest( + ErrorKind::MissingParam, + "Identifier needs user field.", + ))? + .as_str() + .ok_or(Error::BadRequest( + ErrorKind::BadJson, + "User is not a string.", + ))?; + + let user_id = UserId::parse_with_server_name(username, globals.server_name()) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") + })?; + + let password = auth_parameters + .get("password") + .ok_or(Error::BadRequest( + ErrorKind::MissingParam, + "Password is missing.", + ))? + .as_str() + .ok_or(Error::BadRequest( + ErrorKind::BadJson, + "Password is not a string.", + ))?; // Check if password is correct if let Some(hash) = users.password_hash(&user_id)? { @@ -59,7 +95,6 @@ impl Uiaa { argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); if !hash_matches { - debug!("Invalid password."); uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { kind: ErrorKind::Forbidden, message: "Invalid username or password.".to_owned(), @@ -113,8 +148,10 @@ impl Uiaa { userdeviceid.extend_from_slice(device_id.as_bytes()); if let Some(uiaainfo) = uiaainfo { - self.userdeviceid_uiaainfo - .insert(&userdeviceid, &*serde_json::to_string(&uiaainfo)?)?; + self.userdeviceid_uiaainfo.insert( + &userdeviceid, + &*serde_json::to_string(&uiaainfo).expect("UiaaInfo::to_string always works"), + )?; } else { self.userdeviceid_uiaainfo.remove(&userdeviceid)?; } @@ -136,8 +173,12 @@ impl Uiaa { &self .userdeviceid_uiaainfo .get(&userdeviceid)? - .ok_or(Error::BadRequest("session does not exist"))?, - )?; + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "UIAA session does not exist.", + ))?, + ) + .map_err(|_| Error::BadDatabase("UiaaInfo in userdeviceid_uiaainfo is invalid."))?; if uiaainfo .session @@ -145,7 +186,10 @@ impl Uiaa { .filter(|&s| s == session) .is_none() { - return Err(Error::BadRequest("wrong session token")); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "UIAA session token invalid.", + )); } Ok(uiaainfo) diff --git a/src/database/users.rs b/src/database/users.rs index 5c47455..b70c2de 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -43,24 +43,29 @@ impl Users { .get(token)? .map_or(Ok(None), |bytes| { let mut parts = bytes.split(|&b| b == 0xff); - let user_bytes = parts - .next() - .ok_or(Error::BadDatabase("token_userdeviceid value invalid"))?; - let device_bytes = parts - .next() - .ok_or(Error::BadDatabase("token_userdeviceid value invalid"))?; + let user_bytes = parts.next().ok_or(Error::BadDatabase( + "token_userdeviceid value in db is invalid.", + ))?; + let device_bytes = parts.next().ok_or(Error::BadDatabase( + "token_userdeviceid value in db is invalid.", + ))?; Ok(Some(( - UserId::try_from(utils::string_from_bytes(&user_bytes)?)?, - utils::string_from_bytes(&device_bytes)?, + serde_json::from_slice(&user_bytes).map_err(|_| { + Error::BadDatabase("User ID in token_userdeviceid is invalid.") + })?, + utils::string_from_bytes(&device_bytes).map_err(|_| { + Error::BadDatabase("Device ID in token_userdeviceid is invalid.") + })?, ))) }) } /// Returns an iterator over all users on this homeserver. pub fn iter(&self) -> impl Iterator> { - self.userid_password.iter().keys().map(|r| { - utils::string_from_bytes(&r?).and_then(|string| Ok(UserId::try_from(&*string)?)) + self.userid_password.iter().keys().map(|bytes| { + Ok(serde_json::from_slice(&bytes?) + .map_err(|_| Error::BadDatabase("User ID bytes in db are invalid."))?) }) } @@ -68,14 +73,22 @@ impl Users { pub fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password .get(user_id.to_string())? - .map_or(Ok(None), |bytes| utils::string_from_bytes(&bytes).map(Some)) + .map_or(Ok(None), |bytes| { + Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { + Error::BadDatabase("Password hash in db is not valid string.") + })?)) + }) } /// Returns the displayname of a user on this homeserver. pub fn displayname(&self, user_id: &UserId) -> Result> { self.userid_displayname .get(user_id.to_string())? - .map_or(Ok(None), |bytes| utils::string_from_bytes(&bytes).map(Some)) + .map_or(Ok(None), |bytes| { + Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { + Error::BadDatabase("Displayname in db is invalid.") + })?)) + }) } /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. @@ -94,7 +107,11 @@ impl Users { pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl .get(user_id.to_string())? - .map_or(Ok(None), |bytes| utils::string_from_bytes(&bytes).map(Some)) + .map_or(Ok(None), |bytes| { + Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { + Error::BadDatabase("Avatar URL in db is invalid.") + })?)) + }) } /// Sets a new avatar_url or removes it if avatar_url is None. @@ -117,11 +134,8 @@ impl Users { token: &str, initial_device_display_name: Option, ) -> Result<()> { - if !self.exists(user_id)? { - return Err(Error::BadRequest( - "tried to create device for nonexistent user", - )); - } + // This method should never be called for nonexistent users. + assert!(self.exists(user_id)?); let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); @@ -134,7 +148,8 @@ impl Users { display_name: initial_device_display_name, last_seen_ip: None, // TODO last_seen_ts: Some(SystemTime::now()), - })? + }) + .expect("Device::to_string never fails.") .as_bytes(), )?; @@ -185,23 +200,22 @@ impl Users { &*bytes? .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("userdeviceid is invalid"))?, - )?) + .ok_or(Error::BadDatabase("UserDevice ID in db is invalid."))?, + ) + .map_err(|_| { + Error::BadDatabase("Device ID in userdeviceid_metadata is invalid.") + })?) }) } /// Replaces the access token of one device. - pub fn set_token(&self, user_id: &UserId, device_id: &str, token: &str) -> Result<()> { + fn set_token(&self, user_id: &UserId, device_id: &str, token: &str) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); // All devices have metadata - if self.userdeviceid_metadata.get(&userdeviceid)?.is_none() { - return Err(Error::BadRequest( - "Tried to set token for nonexistent device", - )); - } + assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); // Remove old token if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { @@ -228,19 +242,23 @@ impl Users { key.extend_from_slice(device_id.as_bytes()); // All devices have metadata - if self.userdeviceid_metadata.get(&key)?.is_none() { - return Err(Error::BadRequest( - "Tried to set token for nonexistent device", - )); - } + // Only existing devices should be able to call this. + assert!(self.userdeviceid_metadata.get(&key)?.is_some()); key.push(0xff); // TODO: Use AlgorithmAndDeviceId::to_string when it's available (and update everything, // because there are no wrapping quotation marks anymore) - key.extend_from_slice(&serde_json::to_string(one_time_key_key)?.as_bytes()); + key.extend_from_slice( + &serde_json::to_string(one_time_key_key) + .expect("AlgorithmAndDeviceId::to_string always works") + .as_bytes(), + ); - self.onetimekeyid_onetimekeys - .insert(&key, &*serde_json::to_string(&one_time_key_value)?)?; + self.onetimekeyid_onetimekeys.insert( + &key, + &*serde_json::to_string(&one_time_key_value) + .expect("OneTimeKey::to_string always works"), + )?; Ok(()) } @@ -271,9 +289,11 @@ impl Users { &*key .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("onetimekeyid is invalid"))?, - )?, - serde_json::from_slice(&*value)?, + .ok_or(Error::BadDatabase("OneTimeKeyId in db is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("OneTimeKeyId in db is invalid."))?, + serde_json::from_slice(&*value) + .map_err(|_| Error::BadDatabase("OneTimeKeys in db are invalid."))?, )) }) .transpose() @@ -300,8 +320,9 @@ impl Users { &*bytes? .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("onetimekeyid is invalid"))?, - )? + .ok_or(Error::BadDatabase("OneTimeKey ID in db is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("AlgorithmAndDeviceID in db is invalid."))? .0, ) }) @@ -323,8 +344,10 @@ impl Users { userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); - self.userdeviceid_devicekeys - .insert(&userdeviceid, &*serde_json::to_string(&device_keys)?)?; + self.userdeviceid_devicekeys.insert( + &userdeviceid, + &*serde_json::to_string(&device_keys).expect("DeviceKeys::to_string always works"), + )?; self.devicekeychangeid_userid .insert(globals.next_count()?.to_be_bytes(), &*user_id.to_string())?; @@ -344,14 +367,21 @@ impl Users { self.userdeviceid_devicekeys .scan_prefix(key) .values() - .map(|bytes| Ok(serde_json::from_slice(&bytes?)?)) + .map(|bytes| { + Ok(serde_json::from_slice(&bytes?) + .map_err(|_| Error::BadDatabase("DeviceKeys in db are invalid."))?) + }) } pub fn device_keys_changed(&self, since: u64) -> impl Iterator> { self.devicekeychangeid_userid .range(since.to_be_bytes()..) .values() - .map(|bytes| Ok(UserId::try_from(utils::string_from_bytes(&bytes?)?)?)) + .map(|bytes| { + Ok(serde_json::from_slice(&bytes?).map_err(|_| { + Error::BadDatabase("User ID in devicekeychangeid_userid is invalid.") + })?) + }) } pub fn all_device_keys( @@ -366,9 +396,14 @@ impl Users { let userdeviceid = utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("userdeviceid is invalid"))?, - )?; - Ok((userdeviceid, serde_json::from_slice(&*value)?)) + .ok_or(Error::BadDatabase("UserDeviceID in db is invalid."))?, + ) + .map_err(|_| Error::BadDatabase("UserDeviceId in db is invalid."))?; + Ok(( + userdeviceid, + serde_json::from_slice(&*value) + .map_err(|_| Error::BadDatabase("DeviceKeys in db are invalid."))?, + )) }) } @@ -392,8 +427,10 @@ impl Users { json.insert("sender".to_owned(), sender.to_string().into()); json.insert("content".to_owned(), content); - self.todeviceid_events - .insert(&key, &*serde_json::to_string(&json)?)?; + self.todeviceid_events.insert( + &key, + &*serde_json::to_string(&json).expect("Map::to_string always works"), + )?; Ok(()) } @@ -413,7 +450,10 @@ impl Users { for result in self.todeviceid_events.scan_prefix(&prefix).take(max) { let (key, value) = result?; - events.push(serde_json::from_slice(&*value)?); + events.push( + serde_json::from_slice(&*value) + .map_err(|_| Error::BadDatabase("Event in todeviceid_events is invalid."))?, + ); self.todeviceid_events.remove(key)?; } @@ -430,12 +470,15 @@ impl Users { userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); - if self.userdeviceid_metadata.get(&userdeviceid)?.is_none() { - return Err(Error::BadRequest("device does not exist")); - } + // Only existing devices should be able to call this. + assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); - self.userdeviceid_metadata - .insert(userdeviceid, serde_json::to_string(device)?.as_bytes())?; + self.userdeviceid_metadata.insert( + userdeviceid, + serde_json::to_string(device) + .expect("Device::to_string always works") + .as_bytes(), + )?; Ok(()) } @@ -448,7 +491,11 @@ impl Users { self.userdeviceid_metadata .get(&userdeviceid)? - .map_or(Ok(None), |bytes| Ok(Some(serde_json::from_slice(&bytes)?))) + .map_or(Ok(None), |bytes| { + Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { + Error::BadDatabase("Metadata in userdeviceid_metadata is invalid.") + })?)) + }) } pub fn all_devices_metadata(&self, user_id: &UserId) -> impl Iterator> { @@ -458,6 +505,10 @@ impl Users { self.userdeviceid_metadata .scan_prefix(key) .values() - .map(|bytes| Ok(serde_json::from_slice::(&bytes?)?)) + .map(|bytes| { + Ok(serde_json::from_slice::(&bytes?).map_err(|_| { + Error::BadDatabase("Device in userdeviceid_metadata is invalid.") + })?) + }) } } diff --git a/src/error.rs b/src/error.rs index 3561d9e..3652f0a 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,41 +1,79 @@ +use crate::RumaResponse; +use http::StatusCode; +use rocket::{ + response::{self, Responder}, + Request, +}; +use ruma::api::client::{ + error::{Error as RumaError, ErrorKind}, + r0::uiaa::{UiaaInfo, UiaaResponse}, +}; use thiserror::Error; pub type Result = std::result::Result; #[derive(Error, Debug)] pub enum Error { - #[error("problem with the database")] + #[error("There was a problem with the connection to the database.")] SledError { #[from] source: sled::Error, }, - #[error("tried to parse invalid string")] - StringFromBytesError { - #[from] - source: std::string::FromUtf8Error, - }, - #[error("tried to parse invalid identifier")] - SerdeJsonError { - #[from] - source: serde_json::Error, - }, - #[error("tried to parse invalid identifier")] - RumaIdentifierError { - #[from] - source: ruma::identifiers::Error, - }, - #[error("tried to parse invalid event")] - RumaEventError { - #[from] - source: ruma::events::InvalidEvent, - }, - #[error("could not generate image")] + #[error("Could not generate an image.")] ImageError { #[from] source: image::error::ImageError, }, - #[error("bad request")] - BadRequest(&'static str), - #[error("problem in that database")] + #[error("{0}")] + BadConfig(&'static str), + #[error("{0}")] BadDatabase(&'static str), + #[error("uiaa")] + Uiaa(UiaaInfo), + + #[error("{0}: {1}")] + BadRequest(ErrorKind, &'static str), + #[error("{0}")] + Conflict(&'static str), // This is only needed for when a room alias already exists +} + +#[rocket::async_trait] +impl<'r> Responder<'r> for Error { + async fn respond_to(self, r: &'r Request<'_>) -> response::Result<'r> { + if let Self::Uiaa(uiaainfo) = &self { + return RumaResponse::from(UiaaResponse::AuthResponse(uiaainfo.clone())) + .respond_to(r) + .await; + } + + let message = format!("{}", self); + + use ErrorKind::*; + let (kind, status_code) = match self { + Self::BadRequest(kind, _) => ( + kind, + match kind { + Forbidden | GuestAccessForbidden | ThreepidAuthFailed | ThreepidDenied => { + StatusCode::FORBIDDEN + } + Unauthorized | UnknownToken | MissingToken => StatusCode::UNAUTHORIZED, + NotFound => StatusCode::NOT_FOUND, + LimitExceeded => StatusCode::TOO_MANY_REQUESTS, + UserDeactivated => StatusCode::FORBIDDEN, + TooLarge => StatusCode::PAYLOAD_TOO_LARGE, + _ => StatusCode::BAD_REQUEST, + }, + ), + Self::Conflict(_) => (Unknown, StatusCode::CONFLICT), + _ => (Unknown, StatusCode::INTERNAL_SERVER_ERROR), + }; + + RumaResponse::from(RumaError { + kind, + message, + status_code, + }) + .respond_to(r) + .await + } } diff --git a/src/main.rs b/src/main.rs index ad9aeda..bd0f8b9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -12,7 +12,7 @@ mod utils; pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; -pub use ruma_wrapper::{MatrixResult, Ruma}; +pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; use rocket::{fairing::AdHoc, routes}; @@ -95,7 +95,7 @@ fn setup_rocket() -> rocket::Rocket { ], ) .attach(AdHoc::on_attach("Config", |rocket| { - let data = Database::load_or_create(&rocket.config()); + let data = Database::load_or_create(&rocket.config()).expect("valid config"); Ok(rocket.manage(data)) })) diff --git a/src/pdu.rs b/src/pdu.rs index 454d27f..8c8423a 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,3 +1,4 @@ +use crate::{Error, Result}; use js_int::UInt; use ruma::{ api::federation::pdu::EventHash, @@ -36,7 +37,7 @@ pub struct PduEvent { } impl PduEvent { - pub fn redact(&mut self) { + pub fn redact(&mut self) -> Result<()> { self.unsigned.clear(); let allowed = match self.kind { EventType::RoomMember => vec!["membership"], @@ -56,7 +57,11 @@ impl PduEvent { _ => vec![], }; - let old_content = self.content.as_object_mut().unwrap(); // TODO error + let old_content = self + .content + .as_object_mut() + .ok_or(Error::BadDatabase("PDU has invalid content"))?; + let mut new_content = serde_json::Map::new(); for key in allowed { @@ -71,21 +76,23 @@ impl PduEvent { ); self.content = new_content.into(); + + Ok(()) } pub fn to_room_event(&self) -> EventJson { - // Can only fail in rare circumstances that won't ever happen here, see - // https://docs.rs/serde_json/1.0.50/serde_json/fn.to_string.html - let json = serde_json::to_string(&self).unwrap(); - // EventJson's deserialize implementation always returns `Ok(...)` - serde_json::from_str::>(&json).unwrap() + let json = serde_json::to_string(&self).expect("PDUs are always valid"); + serde_json::from_str::>(&json) + .expect("EventJson::from_str always works") } pub fn to_state_event(&self) -> EventJson { - let json = serde_json::to_string(&self).unwrap(); - serde_json::from_str::>(&json).unwrap() + let json = serde_json::to_string(&self).expect("PDUs are always valid"); + serde_json::from_str::>(&json) + .expect("EventJson::from_str always works") } pub fn to_stripped_state_event(&self) -> EventJson { - let json = serde_json::to_string(&self).unwrap(); - serde_json::from_str::>(&json).unwrap() + let json = serde_json::to_string(&self).expect("PDUs are always valid"); + serde_json::from_str::>(&json) + .expect("EventJson::from_str always works") } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 47c8967..8be5c47 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,4 +1,4 @@ -use crate::utils; +use crate::{utils, Error}; use log::warn; use rocket::{ data::{Data, FromData, FromDataFuture, Transform, TransformFuture, Transformed}, @@ -42,7 +42,7 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { let data = rocket::try_outcome!(outcome.owned()); let (user_id, device_id) = if T::METADATA.requires_authentication { - let db = request.guard::>().await.unwrap(); + let db = request.guard::>().await.expect("database was loaded"); // Get token from header or query value let token = match request @@ -108,32 +108,24 @@ impl Deref for Ruma { } /// This struct converts ruma responses into rocket http responses. -pub struct MatrixResult(pub std::result::Result); +pub type ConduitResult = std::result::Result, Error>; -impl TryInto>> for MatrixResult -where - T: TryInto>>, - E: Into>>, -{ - type Error = T::Error; +pub struct RumaResponse>>>(pub T); - fn try_into(self) -> Result>, T::Error> { - match self.0 { - Ok(t) => t.try_into(), - Err(e) => Ok(e.into()), - } +impl>>> From for RumaResponse { + fn from(t: T) -> Self { + Self(t) } } #[rocket::async_trait] -impl<'r, T, E> Responder<'r> for MatrixResult +impl<'r, T> Responder<'r> for RumaResponse where T: Send + TryInto>>, T::Error: Send, - E: Into>> + Send, { async fn respond_to(self, _: &'r Request<'_>) -> response::Result<'r> { - let http_response: Result, _> = self.try_into(); + let http_response: Result, _> = self.0.try_into(); match http_response { Ok(http_response) => { let mut response = rocket::response::Response::build(); @@ -165,11 +157,3 @@ where } } } - -impl Deref for MatrixResult { - type Target = Result; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} diff --git a/src/utils.rs b/src/utils.rs index 8f3b4ad..0ab3bfa 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,4 +1,3 @@ -use crate::Result; use argon2::{Config, Variant}; use rand::prelude::*; use std::{ @@ -9,39 +8,38 @@ use std::{ pub fn millis_since_unix_epoch() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) - .unwrap() + .expect("time is valid") .as_millis() as u64 } pub fn increment(old: Option<&[u8]>) -> Option> { - let number = match old { - Some(bytes) => { - let array: [u8; 8] = bytes.try_into().unwrap(); - let number = u64::from_be_bytes(array); + let number = match old.map(|bytes| bytes.try_into()) { + Some(Ok(bytes)) => { + let number = u64::from_be_bytes(bytes); number + 1 } - None => 1, // Start at one. since 0 should return the first event in the db + _ => 1, // Start at one. since 0 should return the first event in the db }; Some(number.to_be_bytes().to_vec()) } pub fn generate_keypair(old: Option<&[u8]>) -> Option> { - Some( - old.map(|s| s.to_vec()) - .unwrap_or_else(|| ruma::signatures::Ed25519KeyPair::generate().unwrap()), - ) + Some(old.map(|s| s.to_vec()).unwrap_or_else(|| { + ruma::signatures::Ed25519KeyPair::generate() + .expect("Ed25519KeyPair generation always works (?)") + })) } /// Parses the bytes into an u64. -pub fn u64_from_bytes(bytes: &[u8]) -> u64 { - let array: [u8; 8] = bytes.try_into().expect("bytes are valid u64"); - u64::from_be_bytes(array) +pub fn u64_from_bytes(bytes: &[u8]) -> Result { + let array: [u8; 8] = bytes.try_into()?; + Ok(u64::from_be_bytes(array)) } /// Parses the bytes into a string. -pub fn string_from_bytes(bytes: &[u8]) -> Result { - Ok(String::from_utf8(bytes.to_vec())?) +pub fn string_from_bytes(bytes: &[u8]) -> Result { + String::from_utf8(bytes.to_vec()) } pub fn random_string(length: usize) -> String { @@ -52,7 +50,7 @@ pub fn random_string(length: usize) -> String { } /// Calculate a new hash for the given password -pub fn calculate_hash(password: &str) -> std::result::Result { +pub fn calculate_hash(password: &str) -> Result { let hashing_config = Config { variant: Variant::Argon2id, ..Default::default() From 56d4742201dec585483ff73177163dcb83ee2412 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 11 Jun 2020 10:03:08 +0200 Subject: [PATCH 0141/1727] improvement: log bad database errors automatically --- src/client_server.rs | 124 +++++++++++++------------ src/database/account_data.rs | 14 ++- src/database/global_edus.rs | 2 +- src/database/globals.rs | 6 +- src/database/media.rs | 36 ++++---- src/database/rooms.rs | 170 +++++++++++++++++++++-------------- src/database/rooms/edus.rs | 32 ++++--- src/database/uiaa.rs | 2 +- src/database/users.rs | 81 ++++++++++------- src/error.rs | 9 ++ src/pdu.rs | 2 +- src/ruma_wrapper.rs | 5 +- sytest/sytest-whitelist | 3 +- 13 files changed, 278 insertions(+), 208 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 06748af..3a2e771 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -362,17 +362,14 @@ pub fn get_pushrules_all_route( "PushRules event not found.", ))? .deserialize() - .map_err(|_| Error::BadRequest( - ErrorKind::NotFound, - "PushRules event in db is invalid.", - ))? + .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event in db is invalid."))? { Ok(get_pushrules_all::Response { global: pushrules.content.global, } .into()) } else { - Err(Error::BadDatabase("Pushrules event has wrong content.")) + Err(Error::bad_database("Pushrules event has wrong content.")) } } @@ -507,15 +504,17 @@ pub fn set_displayname_route( db.rooms .room_state(&room_id)? .get(&(EventType::RoomMember, user_id.to_string())) - .ok_or(Error::BadDatabase( - "Tried to send displayname update for user not in the room.", - ))? + .ok_or_else(|| { + Error::bad_database( + "Tried to send displayname update for user not in the room.", + ) + })? .content .clone(), ) - .map_err(|_| Error::BadDatabase("Database contains invalid PDU."))? + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? .deserialize() - .map_err(|_| Error::BadDatabase("Database contains invalid PDU."))? + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? }) .expect("event is valid, we just created it"), None, @@ -596,15 +595,17 @@ pub fn set_avatar_url_route( db.rooms .room_state(&room_id)? .get(&(EventType::RoomMember, user_id.to_string())) - .ok_or(Error::BadDatabase( - "Tried to send avatar url update for user not in the room.", - ))? + .ok_or_else(|| { + Error::bad_database( + "Tried to send avatar url update for user not in the room.", + ) + })? .content .clone(), ) - .map_err(|_| Error::BadDatabase("Database contains invalid PDU."))? + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? .deserialize() - .map_err(|_| Error::BadDatabase("Database contains invalid PDU."))? + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? }) .expect("event is valid, we just created it"), None, @@ -744,9 +745,12 @@ pub fn get_keys_route( for result in db.users.all_device_keys(&user_id.clone()) { let (device_id, mut keys) = result?; - let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( - Error::BadDatabase("all_device_keys contained nonexistent device."), - )?; + let metadata = db + .users + .get_device_metadata(user_id, &device_id)? + .ok_or_else(|| { + Error::bad_database("all_device_keys contained nonexistent device.") + })?; keys.unsigned = Some(keys::UnsignedDeviceInfo { device_display_name: metadata.display_name, @@ -912,7 +916,7 @@ pub fn create_room_route( let user_id = body.user_id.as_ref().expect("user is authenticated"); let room_id = RoomId::new(db.globals.server_name()) - .map_err(|_| Error::BadDatabase("Server name is invalid."))?; + .map_err(|_| Error::bad_database("Server name is invalid."))?; let alias = body .room_alias_name @@ -1281,9 +1285,9 @@ pub fn join_room_by_id_route( let mut event = serde_json::from_value::>( pdu.content.clone(), ) - .map_err(|_| Error::BadDatabase("Invalid member event in db."))? + .map_err(|_| Error::bad_database("Invalid member event in db."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid member event in db."))?; + .map_err(|_| Error::bad_database("Invalid member event in db."))?; event.membership = member::MembershipState::Join; event.displayname = db.users.displayname(&user_id)?; event.avatar_url = db.users.avatar_url(&user_id)?; @@ -1356,9 +1360,9 @@ pub fn leave_room_route( .content .clone(), ) - .map_err(|_| Error::BadDatabase("Invalid member event in database."))? + .map_err(|_| Error::bad_database("Invalid member event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; + .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = member::MembershipState::Leave; @@ -1396,9 +1400,9 @@ pub fn kick_user_route( .content .clone(), ) - .map_err(|_| Error::BadDatabase("Invalid member event in database."))? + .map_err(|_| Error::bad_database("Invalid member event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; + .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Leave; // TODO: reason @@ -1442,9 +1446,9 @@ pub fn ban_user_route( let mut event = serde_json::from_value::>( event.content.clone(), ) - .map_err(|_| Error::BadDatabase("Invalid member event in database."))? + .map_err(|_| Error::bad_database("Invalid member event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; + .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Ban; Ok(event) }, @@ -1484,9 +1488,9 @@ pub fn unban_user_route( .content .clone(), ) - .map_err(|_| Error::BadDatabase("Invalid member event in database."))? + .map_err(|_| Error::bad_database("Invalid member event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; + .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Leave; @@ -1646,18 +1650,18 @@ pub async fn get_public_rooms_filtered_route( Ok(serde_json::from_value::< EventJson, >(s.content.clone()) - .map_err(|_| Error::BadDatabase("Invalid canonical alias event in database."))? + .map_err(|_| Error::bad_database("Invalid canonical alias event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid canonical alias event in database."))? + .map_err(|_| Error::bad_database("Invalid canonical alias event in database."))? .alias) })?, name: state.get(&(EventType::RoomName, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { Ok(serde_json::from_value::>( s.content.clone(), ) - .map_err(|_| Error::BadDatabase("Invalid room name event in database."))? + .map_err(|_| Error::bad_database("Invalid room name event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid room name event in database."))? + .map_err(|_| Error::bad_database("Invalid room name event in database."))? .name() .map(|n| n.to_owned())) })?, @@ -1667,36 +1671,36 @@ pub async fn get_public_rooms_filtered_route( Ok(Some(serde_json::from_value::< EventJson, >(s.content.clone()) - .map_err(|_| Error::BadDatabase("Invalid room topic event in database."))? + .map_err(|_| Error::bad_database("Invalid room topic event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid room topic event in database."))? + .map_err(|_| Error::bad_database("Invalid room topic event in database."))? .topic)) })?, world_readable: state.get(&(EventType::RoomHistoryVisibility, "".to_owned())).map_or(Ok::<_, Error>(false), |s| { Ok(serde_json::from_value::< EventJson, >(s.content.clone()) - .map_err(|_| Error::BadDatabase("Invalid room history visibility event in database."))? + .map_err(|_| Error::bad_database("Invalid room history visibility event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid room history visibility event in database."))? + .map_err(|_| Error::bad_database("Invalid room history visibility event in database."))? .history_visibility == history_visibility::HistoryVisibility::WorldReadable) })?, guest_can_join: state.get(&(EventType::RoomGuestAccess, "".to_owned())).map_or(Ok::<_, Error>(false), |s| { Ok(serde_json::from_value::< EventJson, >(s.content.clone()) - .map_err(|_| Error::BadDatabase("Invalid room guest access event in database."))? + .map_err(|_| Error::bad_database("Invalid room guest access event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid room guest access event in database."))? + .map_err(|_| Error::bad_database("Invalid room guest access event in database."))? .guest_access == guest_access::GuestAccess::CanJoin) })?, avatar_url: state.get(&(EventType::RoomAvatar, "".to_owned())).map_or( Ok::<_, Error>(None),|s| { Ok(Some(serde_json::from_value::< EventJson, >(s.content.clone()) - .map_err(|_| Error::BadDatabase("Invalid room avatar event in database."))? + .map_err(|_| Error::bad_database("Invalid room avatar event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid room avatar event in database."))? + .map_err(|_| Error::bad_database("Invalid room avatar event in database."))? .url)) })?, }; @@ -1986,7 +1990,7 @@ pub fn get_state_events_for_key_route( Ok(get_state_events_for_key::Response { content: serde_json::value::to_raw_value(&event.content) - .map_err(|_| Error::BadDatabase("Invalid event content in database"))?, + .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) } @@ -2021,7 +2025,7 @@ pub fn get_state_events_for_empty_key_route( Ok(get_state_events_for_empty_key::Response { content: serde_json::value::to_raw_value(event) - .map_err(|_| Error::BadDatabase("Invalid event content in database"))?, + .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) } @@ -2064,9 +2068,9 @@ pub fn sync_route( let content = serde_json::from_value::< EventJson, >(pdu.content.clone()) - .map_err(|_| Error::BadDatabase("Invalid PDU in database."))? + .map_err(|_| Error::bad_database("Invalid PDU in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid PDU in database."))?; + .map_err(|_| Error::bad_database("Invalid PDU in database."))?; if content.membership == ruma::events::room::member::MembershipState::Join { joined_since_last_sync = true; // Both send_member_count and joined_since_last_sync are set. There's nothing more @@ -2099,9 +2103,9 @@ pub fn sync_route( let content = serde_json::from_value::< EventJson, >(pdu.content.clone()) - .map_err(|_| Error::BadDatabase("Invalid member event in database."))? + .map_err(|_| Error::bad_database("Invalid member event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; + .map_err(|_| Error::bad_database("Invalid member event in database."))?; if let Some(state_key) = &pdu.state_key { let current_content = serde_json::from_value::< @@ -2109,15 +2113,19 @@ pub fn sync_route( >( state .get(&(EventType::RoomMember, state_key.clone())) - .ok_or(Error::BadDatabase( - "A user that joined once has no member event anymore.", - ))? + .ok_or_else(|| { + Error::bad_database( + "A user that joined once has no member event anymore.", + ) + })? .content .clone(), ) - .map_err(|_| Error::BadDatabase("Invalid member event in database."))? + .map_err(|_| Error::bad_database("Invalid member event in database."))? .deserialize() - .map_err(|_| Error::BadDatabase("Invalid member event in database."))?; + .map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; // The membership was and still is invite or join if matches!( @@ -2192,7 +2200,7 @@ pub fn sync_route( Ok(Some( db.rooms .get_pdu_count(&e.event_id)? - .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? .to_string(), )) })?; @@ -2355,7 +2363,7 @@ pub fn sync_route( .map(|edu| { let mut edu = edu? .deserialize() - .map_err(|_| Error::BadDatabase("EDU in database is invalid."))?; + .map_err(|_| Error::bad_database("EDU in database is invalid."))?; if let Some(timestamp) = edu.content.last_active_ago { let last_active_ago = js_int::UInt::try_from(utils::millis_since_unix_epoch()) @@ -2444,7 +2452,7 @@ pub fn get_context_route( Ok::<_, Error>(Some( db.rooms .get_pdu_count(&e.event_id)? - .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? .to_string(), )) })?; @@ -2470,7 +2478,7 @@ pub fn get_context_route( Ok::<_, Error>(Some( db.rooms .get_pdu_count(&e.event_id)? - .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? .to_string(), )) })?; @@ -2534,7 +2542,7 @@ pub fn get_message_events_route( Ok(Some( db.rooms .get_pdu_count(&e.event_id)? - .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? .to_string(), )) })?; @@ -2569,7 +2577,7 @@ pub fn get_message_events_route( Ok(Some( db.rooms .get_pdu_count(&e.event_id)? - .ok_or(Error::BadDatabase("Can't find count from event in db."))? + .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? .to_string(), )) })?; diff --git a/src/database/account_data.rs b/src/database/account_data.rs index f7c564d..befd937 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -115,16 +115,14 @@ impl AccountData { .map(|(k, v)| { Ok::<_, Error>(( EventType::try_from( - utils::string_from_bytes( - k.rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("RoomUserData ID in db is invalid."))?, - ) - .map_err(|_| Error::BadDatabase("RoomUserData ID in db is invalid."))?, + utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( + || Error::bad_database("RoomUserData ID in db is invalid."), + )?) + .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, ) - .map_err(|_| Error::BadDatabase("RoomUserData ID in db is invalid."))?, + .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, serde_json::from_slice::>(&v).map_err(|_| { - Error::BadDatabase("Database contains invalid account data.") + Error::bad_database("Database contains invalid account data.") })?, )) }) diff --git a/src/database/global_edus.rs b/src/database/global_edus.rs index 7d1ac20..f58c7d6 100644 --- a/src/database/global_edus.rs +++ b/src/database/global_edus.rs @@ -57,7 +57,7 @@ impl GlobalEdus { .filter_map(|r| r.ok()) .map(|(_, v)| { Ok(serde_json::from_slice(&v) - .map_err(|_| Error::BadDatabase("Invalid presence event in db."))?) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?) })) } } diff --git a/src/database/globals.rs b/src/database/globals.rs index 32cddd8..a767d8a 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -18,7 +18,7 @@ impl Globals { .expect("utils::generate_keypair always returns Some"), "key1".to_owned(), ) - .map_err(|_| Error::BadDatabase("Private or public keys are invalid."))?; + .map_err(|_| Error::bad_database("Private or public keys are invalid."))?; Ok(Self { globals, @@ -49,13 +49,13 @@ impl Globals { .update_and_fetch(COUNTER, utils::increment)? .expect("utils::increment will always put in a value"), ) - .map_err(|_| Error::BadDatabase("Count has invalid bytes."))?) + .map_err(|_| Error::bad_database("Count has invalid bytes."))?) } pub fn current_count(&self) -> Result { self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { Ok(utils::u64_from_bytes(&bytes) - .map_err(|_| Error::BadDatabase("Count has invalid bytes."))?) + .map_err(|_| Error::bad_database("Count has invalid bytes."))?) }) } diff --git a/src/database/media.rs b/src/database/media.rs index f70e924..0d0820d 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -43,21 +43,20 @@ impl Media { let content_type = utils::string_from_bytes( parts .next() - .ok_or(Error::BadDatabase("Invalid Media ID in db"))?, + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?, ) - .map_err(|_| Error::BadDatabase("Invalid content type in db."))?; + .map_err(|_| Error::bad_database("Content type in mediaid_file is invalid unicode."))?; let filename_bytes = parts .next() - .ok_or(Error::BadDatabase("Media ID in db is invalid."))?; + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; let filename = if filename_bytes.is_empty() { None } else { - Some( - utils::string_from_bytes(filename_bytes) - .map_err(|_| Error::BadDatabase("Filename in db is invalid."))?, - ) + Some(utils::string_from_bytes(filename_bytes).map_err(|_| { + Error::bad_database("Filename in mediaid_file is invalid unicode.") + })?) }; Ok(Some((filename, content_type, file.to_vec()))) @@ -94,20 +93,20 @@ impl Media { let content_type = utils::string_from_bytes( parts .next() - .ok_or(Error::BadDatabase("Invalid Media ID in db"))?, + .ok_or_else(|| Error::bad_database("Invalid Media ID in db"))?, ) - .map_err(|_| Error::BadDatabase("Invalid content type in db."))?; + .map_err(|_| Error::bad_database("Content type in mediaid_file is invalid unicode."))?; let filename_bytes = parts .next() - .ok_or(Error::BadDatabase("Media ID in db is invalid."))?; + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; let filename = if filename_bytes.is_empty() { None } else { Some( utils::string_from_bytes(filename_bytes) - .map_err(|_| Error::BadDatabase("Filename in db is invalid."))?, + .map_err(|_| Error::bad_database("Filename in db is invalid."))?, ) }; @@ -120,21 +119,20 @@ impl Media { let content_type = utils::string_from_bytes( parts .next() - .ok_or(Error::BadDatabase("Media ID in db is invalid"))?, + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?, ) - .map_err(|_| Error::BadDatabase("Invalid content type in db."))?; + .map_err(|_| Error::bad_database("Content type in mediaid_file is invalid unicode."))?; let filename_bytes = parts .next() - .ok_or(Error::BadDatabase("Media ID in db is invalid"))?; + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; let filename = if filename_bytes.is_empty() { None } else { - Some( - utils::string_from_bytes(filename_bytes) - .map_err(|_| Error::BadDatabase("Filename in db is invalid."))?, - ) + Some(utils::string_from_bytes(filename_bytes).map_err(|_| { + Error::bad_database("Filename in mediaid_file is invalid unicode.") + })?) }; if let Ok(image) = image::load_from_memory(&file) { @@ -147,7 +145,7 @@ impl Media { let width_index = thumbnail_key .iter() .position(|&b| b == 0xff) - .ok_or(Error::BadDatabase("mediaid is invalid"))? + .ok_or_else(|| Error::bad_database("Media in db is invalid."))? + 1; let mut widthheight = width.to_be_bytes().to_vec(); widthheight.extend_from_slice(&height.to_be_bytes()); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 799a7cb..6b3b9c5 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -65,14 +65,14 @@ impl Rooms { .map(|value| { Ok::<_, Error>( serde_json::from_slice::(&value?) - .map_err(|_| Error::BadDatabase("Invalid PDU in db."))?, + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, ) }) { let pdu = pdu?; - let state_key = pdu.state_key.clone().ok_or(Error::BadDatabase( - "Room state contains event without state_key.", - ))?; + let state_key = pdu.state_key.clone().ok_or_else(|| { + Error::bad_database("Room state contains event without state_key.") + })?; hashmap.insert((pdu.kind.clone(), state_key), pdu); } Ok(hashmap) @@ -87,7 +87,7 @@ impl Rooms { utils::u64_from_bytes( &pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()], ) - .map_err(|_| Error::BadDatabase("PDU has invalid count bytes."))?, + .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?, )) }) } @@ -98,10 +98,10 @@ impl Rooms { .get(event_id.to_string().as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or( - Error::BadDatabase("eventid_pduid points to nonexistent pdu."), - )?) - .map_err(|_| Error::BadDatabase("Invalid PDU in db."))?, + serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("eventid_pduid points to nonexistent pdu.") + })?) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) } @@ -119,10 +119,10 @@ impl Rooms { .get(event_id.to_string().as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or( - Error::BadDatabase("eventid_pduid points to nonexistent pdu."), - )?) - .map_err(|_| Error::BadDatabase("Invalid PDU in db."))?, + serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("eventid_pduid points to nonexistent pdu.") + })?) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) } @@ -131,7 +131,7 @@ impl Rooms { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) - .map_err(|_| Error::BadDatabase("Invalid PDU in db."))?, + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) } @@ -165,8 +165,10 @@ impl Rooms { .values() .map(|bytes| { Ok::<_, Error>( - serde_json::from_slice(&bytes?) - .map_err(|_| Error::BadDatabase("Invalid EventID in roomid_pduleaves."))?, + EventId::try_from(utils::string_from_bytes(&bytes?).map_err(|_| { + Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?, ) }) { @@ -237,7 +239,7 @@ impl Rooms { ) .expect("EventJson::from_value always works.") .deserialize() - .map_err(|_| Error::BadDatabase("Invalid PowerLevels event in db."))?, + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?, ) }, )?; @@ -251,7 +253,7 @@ impl Rooms { ) .expect("EventJson::from_value always works.") .deserialize() - .map_err(|_| Error::BadDatabase("Invalid Member event in db."))? + .map_err(|_| Error::bad_database("Invalid Member event in db."))? .membership, ) })?; @@ -287,7 +289,7 @@ impl Rooms { ) .expect("EventJson::from_value always works.") .deserialize() - .map_err(|_| Error::BadDatabase("Invalid Member event in db."))? + .map_err(|_| Error::bad_database("Invalid Member event in db."))? .membership, ) })?; @@ -297,7 +299,7 @@ impl Rooms { >(content.clone()) .expect("EventJson::from_value always works.") .deserialize() - .map_err(|_| Error::BadDatabase("Invalid Member event in db."))? + .map_err(|_| Error::bad_database("Invalid Member event in db."))? .membership; let target_power = power_levels.users.get(&target_user_id).map_or_else( @@ -322,7 +324,7 @@ impl Rooms { .expect("EventJson::from_value always works.") .deserialize() .map_err(|_| { - Error::BadDatabase("Database contains invalid JoinRules event") + Error::bad_database("Database contains invalid JoinRules event") })? .join_rule) })?; @@ -334,7 +336,9 @@ impl Rooms { ErrorKind::Unknown, "Membership can't be the first event", ))?)? - .ok_or(Error::BadDatabase("PDU leaf points to invalid event!"))?; + .ok_or_else(|| { + Error::bad_database("PDU leaf points to invalid event!") + })?; if prev_event.kind == EventType::RoomCreate && prev_event.prev_events.is_empty() { @@ -466,7 +470,7 @@ impl Rooms { prev_events, depth: depth .try_into() - .map_err(|_| Error::BadDatabase("Depth is invalid"))?, + .map_err(|_| Error::bad_database("Depth is invalid"))?, auth_events: Vec::new(), redacts: redacts.clone(), unsigned, @@ -588,7 +592,7 @@ impl Rooms { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(_, v)| { Ok(serde_json::from_slice(&v) - .map_err(|_| Error::BadDatabase("PDU in db is invalid."))?) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?) })) } @@ -615,7 +619,7 @@ impl Rooms { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(_, v)| { Ok(serde_json::from_slice(&v) - .map_err(|_| Error::BadDatabase("PDU in db is invalid."))?) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?) }) } @@ -641,7 +645,7 @@ impl Rooms { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(_, v)| { Ok(serde_json::from_slice(&v) - .map_err(|_| Error::BadDatabase("PDU in db is invalid."))?) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?) }) } @@ -650,7 +654,7 @@ impl Rooms { if let Some(pdu_id) = self.get_pdu_id(event_id)? { let mut pdu = self .get_pdu_from_id(&pdu_id)? - .ok_or(Error::BadDatabase("PDU ID points to invalid PDU."))?; + .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; pdu.redact()?; self.replace_pdu(&pdu_id, &pdu)?; Ok(()) @@ -751,9 +755,12 @@ impl Rooms { self.alias_roomid .get(alias.alias())? .map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::BadDatabase("Room ID in alias_roomid is invalid.") - })?)) + Ok(Some( + RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in alias_roomid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid."))?, + )) }) } @@ -766,7 +773,7 @@ impl Rooms { .values() .map(|bytes| { Ok(serde_json::from_slice(&bytes?) - .map_err(|_| Error::BadDatabase("Alias in aliasid_alias is invalid."))?) + .map_err(|_| Error::bad_database("Alias in aliasid_alias is invalid."))?) }) } @@ -786,56 +793,75 @@ impl Rooms { pub fn public_rooms(&self) -> impl Iterator> { self.publicroomids.iter().keys().map(|bytes| { - Ok(serde_json::from_slice(&bytes?) - .map_err(|_| Error::BadDatabase("Room ID in publicroomids is invalid."))?) + Ok( + RoomId::try_from(utils::string_from_bytes(&bytes?).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid."))?, + ) }) } - /// Returns an iterator over all rooms a user joined. + /// Returns an iterator over all joined members of a room. pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { self.roomuserid_joined .scan_prefix(room_id.to_string()) - .values() + .keys() .map(|key| { - Ok(serde_json::from_slice( - &key? - .rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("RoomUser ID is invalid."))?, + Ok(UserId::try_from( + utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_joined is invalid unicode.") + })?, ) - .map_err(|_| Error::BadDatabase("Invalid User ID in db."))?) + .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid."))?) }) } - /// Returns an iterator over all rooms a user joined. + /// Returns an iterator over all invited members of a room. pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator> { self.roomuserid_invited .scan_prefix(room_id.to_string()) .keys() .map(|key| { - Ok(serde_json::from_slice( - &key? - .rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("RoomUser ID is invalid."))?, + Ok(UserId::try_from( + utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_invited is invalid unicode.") + })?, ) - .map_err(|_| Error::BadDatabase("Invalid User ID in db."))?) + .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid."))?) }) } - /// Returns an iterator over all rooms a user joined. + /// Returns an iterator over all left members of a room. pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { self.userroomid_joined .scan_prefix(user_id.to_string()) .keys() .map(|key| { - Ok(serde_json::from_slice( - &key? - .rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("UserRoom ID is invalid."))?, + Ok(RoomId::try_from( + utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_joined is invalid unicode.") + })?, ) - .map_err(|_| Error::BadDatabase("Invalid Room ID in db."))?) + .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid."))?) }) } @@ -845,13 +871,18 @@ impl Rooms { .scan_prefix(&user_id.to_string()) .keys() .map(|key| { - Ok(serde_json::from_slice( - &key? - .rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("UserRoom ID is invalid."))?, + Ok(RoomId::try_from( + utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, ) - .map_err(|_| Error::BadDatabase("Invalid Room ID in db."))?) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?) }) } @@ -861,13 +892,18 @@ impl Rooms { .scan_prefix(&user_id.to_string()) .keys() .map(|key| { - Ok(serde_json::from_slice( - &key? - .rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("UserRoom ID is invalid."))?, + Ok(RoomId::try_from( + utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_left is invalid unicode.") + })?, ) - .map_err(|_| Error::BadDatabase("Invalid Room ID in db."))?) + .map_err(|_| Error::bad_database("Room ID in userroomid_left is invalid."))?) }) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index c8b03aa..645ccb0 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -74,8 +74,9 @@ impl RoomEdus { .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(_, v)| { - Ok(serde_json::from_slice(&v) - .map_err(|_| Error::BadDatabase("Read receipt in db is invalid."))?) + Ok(serde_json::from_slice(&v).map_err(|_| { + Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid.") + })?) })) } @@ -164,10 +165,10 @@ impl RoomEdus { let key = key?; Ok::<_, Error>(( key.clone(), - utils::u64_from_bytes(key.split(|&b| b == 0xff).nth(1).ok_or( - Error::BadDatabase("RoomActive has invalid timestamp or delimiters."), - )?) - .map_err(|_| Error::BadDatabase("RoomActive has invalid timestamp bytes."))?, + utils::u64_from_bytes(key.split(|&b| b == 0xff).nth(1).ok_or_else(|| { + Error::bad_database("RoomActive has invalid timestamp or delimiters.") + })?) + .map_err(|_| Error::bad_database("RoomActive has invalid timestamp bytes."))?, )) }) .filter_map(|r| r.ok()) @@ -200,9 +201,9 @@ impl RoomEdus { .roomid_lastroomactiveupdate .get(&room_id.to_string().as_bytes())? .map_or(Ok::<_, Error>(None), |bytes| { - Ok(Some( - utils::u64_from_bytes(&bytes).map_err(|_| Error::BadDatabase(""))?, - )) + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") + })?)) })? .unwrap_or(0)) } @@ -219,9 +220,14 @@ impl RoomEdus { .scan_prefix(prefix) .values() .map(|user_id| { - Ok::<_, Error>(serde_json::from_slice(&user_id?).map_err(|_| { - Error::BadDatabase("User ID in roomactiveid_userid is invalid.") - })?) + Ok::<_, Error>( + UserId::try_from(utils::string_from_bytes(&user_id?).map_err(|_| { + Error::bad_database("User ID in roomactiveid_userid is invalid unicode.") + })?) + .map_err(|_| { + Error::bad_database("User ID in roomactiveid_userid is invalid.") + })?, + ) }) { user_ids.push(user_id?); @@ -252,7 +258,7 @@ impl RoomEdus { self.roomuserid_lastread.get(key)?.map_or(Ok(None), |v| { Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::BadDatabase("Invalid private read marker bytes") + Error::bad_database("Invalid private read marker bytes") })?)) }) } diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 0ae2ea4..fa4fc6e 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -178,7 +178,7 @@ impl Uiaa { "UIAA session does not exist.", ))?, ) - .map_err(|_| Error::BadDatabase("UiaaInfo in userdeviceid_uiaainfo is invalid."))?; + .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid."))?; if uiaainfo .session diff --git a/src/database/users.rs b/src/database/users.rs index b70c2de..07c6912 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -43,19 +43,22 @@ impl Users { .get(token)? .map_or(Ok(None), |bytes| { let mut parts = bytes.split(|&b| b == 0xff); - let user_bytes = parts.next().ok_or(Error::BadDatabase( - "token_userdeviceid value in db is invalid.", - ))?; - let device_bytes = parts.next().ok_or(Error::BadDatabase( - "token_userdeviceid value in db is invalid.", - ))?; + let user_bytes = parts.next().ok_or_else(|| { + Error::bad_database("User ID in token_userdeviceid is invalid.") + })?; + let device_bytes = parts.next().ok_or_else(|| { + Error::bad_database("Device ID in token_userdeviceid is invalid.") + })?; Ok(Some(( - serde_json::from_slice(&user_bytes).map_err(|_| { - Error::BadDatabase("User ID in token_userdeviceid is invalid.") + UserId::try_from(utils::string_from_bytes(&user_bytes).map_err(|_| { + Error::bad_database("User ID in token_userdeviceid is invalid unicode.") + })?) + .map_err(|_| { + Error::bad_database("User ID in token_userdeviceid is invalid.") })?, utils::string_from_bytes(&device_bytes).map_err(|_| { - Error::BadDatabase("Device ID in token_userdeviceid is invalid.") + Error::bad_database("Device ID in token_userdeviceid is invalid.") })?, ))) }) @@ -64,8 +67,12 @@ impl Users { /// Returns an iterator over all users on this homeserver. pub fn iter(&self) -> impl Iterator> { self.userid_password.iter().keys().map(|bytes| { - Ok(serde_json::from_slice(&bytes?) - .map_err(|_| Error::BadDatabase("User ID bytes in db are invalid."))?) + Ok( + UserId::try_from(utils::string_from_bytes(&bytes?).map_err(|_| { + Error::bad_database("User ID in userid_password is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in userid_password is invalid."))?, + ) }) } @@ -75,7 +82,7 @@ impl Users { .get(user_id.to_string())? .map_or(Ok(None), |bytes| { Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::BadDatabase("Password hash in db is not valid string.") + Error::bad_database("Password hash in db is not valid string.") })?)) }) } @@ -86,7 +93,7 @@ impl Users { .get(user_id.to_string())? .map_or(Ok(None), |bytes| { Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::BadDatabase("Displayname in db is invalid.") + Error::bad_database("Displayname in db is invalid.") })?)) }) } @@ -109,7 +116,7 @@ impl Users { .get(user_id.to_string())? .map_or(Ok(None), |bytes| { Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::BadDatabase("Avatar URL in db is invalid.") + Error::bad_database("Avatar URL in db is invalid.") })?)) }) } @@ -200,10 +207,10 @@ impl Users { &*bytes? .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("UserDevice ID in db is invalid."))?, + .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, ) .map_err(|_| { - Error::BadDatabase("Device ID in userdeviceid_metadata is invalid.") + Error::bad_database("Device ID in userdeviceid_metadata is invalid.") })?) }) } @@ -289,11 +296,11 @@ impl Users { &*key .rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("OneTimeKeyId in db is invalid."))?, + .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, ) - .map_err(|_| Error::BadDatabase("OneTimeKeyId in db is invalid."))?, + .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, serde_json::from_slice(&*value) - .map_err(|_| Error::BadDatabase("OneTimeKeys in db are invalid."))?, + .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, )) }) .transpose() @@ -317,12 +324,11 @@ impl Users { .map(|bytes| { Ok::<_, Error>( serde_json::from_slice::( - &*bytes? - .rsplit(|&b| b == 0xff) - .next() - .ok_or(Error::BadDatabase("OneTimeKey ID in db is invalid."))?, + &*bytes?.rsplit(|&b| b == 0xff).next().ok_or_else(|| { + Error::bad_database("OneTimeKey ID in db is invalid.") + })?, ) - .map_err(|_| Error::BadDatabase("AlgorithmAndDeviceID in db is invalid."))? + .map_err(|_| Error::bad_database("AlgorithmAndDeviceID in db is invalid."))? .0, ) }) @@ -369,7 +375,7 @@ impl Users { .values() .map(|bytes| { Ok(serde_json::from_slice(&bytes?) - .map_err(|_| Error::BadDatabase("DeviceKeys in db are invalid."))?) + .map_err(|_| Error::bad_database("DeviceKeys in db are invalid."))?) }) } @@ -378,9 +384,16 @@ impl Users { .range(since.to_be_bytes()..) .values() .map(|bytes| { - Ok(serde_json::from_slice(&bytes?).map_err(|_| { - Error::BadDatabase("User ID in devicekeychangeid_userid is invalid.") - })?) + Ok( + UserId::try_from(utils::string_from_bytes(&bytes?).map_err(|_| { + Error::bad_database( + "User ID in devicekeychangeid_userid is invalid unicode.", + ) + })?) + .map_err(|_| { + Error::bad_database("User ID in devicekeychangeid_userid is invalid.") + })?, + ) }) } @@ -396,13 +409,13 @@ impl Users { let userdeviceid = utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() - .ok_or(Error::BadDatabase("UserDeviceID in db is invalid."))?, + .ok_or_else(|| Error::bad_database("UserDeviceID in db is invalid."))?, ) - .map_err(|_| Error::BadDatabase("UserDeviceId in db is invalid."))?; + .map_err(|_| Error::bad_database("UserDeviceId in db is invalid."))?; Ok(( userdeviceid, serde_json::from_slice(&*value) - .map_err(|_| Error::BadDatabase("DeviceKeys in db are invalid."))?, + .map_err(|_| Error::bad_database("DeviceKeys in db are invalid."))?, )) }) } @@ -452,7 +465,7 @@ impl Users { let (key, value) = result?; events.push( serde_json::from_slice(&*value) - .map_err(|_| Error::BadDatabase("Event in todeviceid_events is invalid."))?, + .map_err(|_| Error::bad_database("Event in todeviceid_events is invalid."))?, ); self.todeviceid_events.remove(key)?; } @@ -493,7 +506,7 @@ impl Users { .get(&userdeviceid)? .map_or(Ok(None), |bytes| { Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::BadDatabase("Metadata in userdeviceid_metadata is invalid.") + Error::bad_database("Metadata in userdeviceid_metadata is invalid.") })?)) }) } @@ -507,7 +520,7 @@ impl Users { .values() .map(|bytes| { Ok(serde_json::from_slice::(&bytes?).map_err(|_| { - Error::BadDatabase("Device in userdeviceid_metadata is invalid.") + Error::bad_database("Device in userdeviceid_metadata is invalid.") })?) }) } diff --git a/src/error.rs b/src/error.rs index 3652f0a..24bb39b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,5 +1,6 @@ use crate::RumaResponse; use http::StatusCode; +use log::error; use rocket::{ response::{self, Responder}, Request, @@ -27,6 +28,7 @@ pub enum Error { #[error("{0}")] BadConfig(&'static str), #[error("{0}")] + /// Don't create this directly. Use Error::bad_database instead. BadDatabase(&'static str), #[error("uiaa")] Uiaa(UiaaInfo), @@ -37,6 +39,13 @@ pub enum Error { Conflict(&'static str), // This is only needed for when a room alias already exists } +impl Error { + pub fn bad_database(message: &'static str) -> Self { + error!("BadDatabase: {}", message); + Self::BadDatabase(message) + } +} + #[rocket::async_trait] impl<'r> Responder<'r> for Error { async fn respond_to(self, r: &'r Request<'_>) -> response::Result<'r> { diff --git a/src/pdu.rs b/src/pdu.rs index 8c8423a..5cb5fb1 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -60,7 +60,7 @@ impl PduEvent { let old_content = self .content .as_object_mut() - .ok_or(Error::BadDatabase("PDU has invalid content"))?; + .ok_or_else(|| Error::bad_database("PDU in db has invalid content."))?; let mut new_content = serde_json::Map::new(); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 8be5c47..406357a 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -42,7 +42,10 @@ impl<'a, T: Endpoint> FromData<'a> for Ruma { let data = rocket::try_outcome!(outcome.owned()); let (user_id, device_id) = if T::METADATA.requires_authentication { - let db = request.guard::>().await.expect("database was loaded"); + let db = request + .guard::>() + .await + .expect("database was loaded"); // Get token from header or query value let token = match request diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index bf9059c..a77c546 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -35,8 +35,6 @@ POST /rooms/:room_id/invite can send an invite PUT /rooms/:room_id/state/m.room.power_levels can set levels PUT power_levels should not explode if the old power levels were empty Both GET and PUT work -Room creation reports m.room.create to myself -Room creation reports m.room.member to myself Version responds 200 OK with valid structure PUT /profile/:user_id/displayname sets my name GET /profile/:user_id/displayname publicly accessible @@ -78,3 +76,4 @@ User directory correctly update on display name change User in shared private room does appear in user directory User in dir while user still shares private rooms POST /rooms/:room_id/ban can ban a user +Alternative server names do not cause a routing loop From fa7612f06912de17dbcb8bf0edd8de0571ad00ad Mon Sep 17 00:00:00 2001 From: PublicByte Date: Thu, 11 Jun 2020 21:06:43 +0200 Subject: [PATCH 0142/1727] refactor: remove unused imports --- src/client_server.rs | 4 ++-- src/database/uiaa.rs | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 3a2e771..e49bfb4 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -5,7 +5,7 @@ use std::{ }; use crate::{utils, ConduitResult, Database, Error, Ruma}; -use log::{debug, warn}; +use log::warn; use rocket::{delete, get, options, post, put, State}; use ruma::{ api::client::{ @@ -46,7 +46,7 @@ use ruma::{ thirdparty::get_protocols, to_device::{self, send_event_to_device}, typing::create_typing_event, - uiaa::{AuthFlow, UiaaInfo, UiaaResponse}, + uiaa::{AuthFlow, UiaaInfo}, user_directory::search_users, }, unversioned::get_supported_versions, diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index fa4fc6e..1272d5f 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,5 +1,4 @@ use crate::{Error, Result}; -use log::debug; use ruma::{ api::client::{ error::ErrorKind, From c6194b27a3fb1e12f2fd9a1192b6de507eba62e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 16 Jun 2020 08:21:06 +0200 Subject: [PATCH 0143/1727] Update 'README.md' --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 46aa298..06738ff 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ See it in action: #### Can I try it out? -Yes! Just open Riot () and register on the `https://conduit.koesters.xyz:14004` homeserver. +Yes! Just open Riot ( or RiotX) and register on the `https://conduit.koesters.xyz:14004` homeserver. #### How can I deploy my own? @@ -42,3 +42,4 @@ If you want to help, you may be able to find something in the issue tracker. If #### Donate Liberapay: +Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` From 853ccd6fbf4639a0d2b005eb294a45c35539b73c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 16 Jun 2020 08:43:46 +0200 Subject: [PATCH 0144/1727] Update 'README.md' --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 06738ff..ded4844 100644 --- a/README.md +++ b/README.md @@ -41,5 +41,5 @@ If you want to help, you may be able to find something in the issue tracker. If #### Donate -Liberapay: +Liberapay: \ Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` From 13d474f6ee210e6e747ad5ec29d871eb1c8b462f Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 16 Jun 2020 12:19:58 +0200 Subject: [PATCH 0145/1727] fix: update ruma version --- Cargo.lock | 20 ++++++++++---------- Cargo.toml | 7 ++++--- src/database/rooms.rs | 2 +- src/pdu.rs | 2 +- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fe80fb6..ae62715 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1263,7 +1263,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "ruma-api", "ruma-client-api", @@ -1277,7 +1277,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1292,7 +1292,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.6", @@ -1302,7 +1302,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "http", "js_int", @@ -1319,7 +1319,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.1.3" -source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "matches", "ruma-serde", @@ -1356,7 +1356,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "js_int", "matches", @@ -1371,17 +1371,17 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6316cb248e3e0323a5a269b8eaed571404fb4f65c81848549e9ba99fd9b8e9de" +source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "rand", "serde", + "strum", ] [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "dtoa", "itoa", @@ -1394,7 +1394,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=12388c3fbc8ba2a685cbf0fe810c633c827f5b2c#12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "base64 0.12.1", "ring", diff --git a/Cargo.toml b/Cargo.toml index 38c7530..cc8c595 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,13 +30,14 @@ image = { version = "0.23.4", default-features = false, features = ["jpeg", "png [dependencies.ruma] git = "https://github.com/ruma/ruma" -rev = "12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" +rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" #path = "../ruma/ruma" features = ["rand", "client-api", "federation-api"] # These are required only until ruma-events and ruma-federation-api are merged into ruma/ruma [patch.crates-io] -ruma-common = { git = "https://github.com/ruma/ruma", rev = "12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" } -ruma-serde = { git = "https://github.com/ruma/ruma", rev = "12388c3fbc8ba2a685cbf0fe810c633c827f5b2c" } +ruma-common = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } +ruma-serde = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } +ruma-identifiers = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } #ruma-common = { path = "../ruma/ruma-common" } #ruma-serde = { path = "../ruma/ruma-serde" } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6b3b9c5..ee14fc8 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -474,7 +474,7 @@ impl Rooms { auth_events: Vec::new(), redacts: redacts.clone(), unsigned, - hashes: ruma::api::federation::pdu::EventHash { + hashes: ruma::api::federation::EventHash { sha256: "aaa".to_owned(), }, signatures: HashMap::new(), diff --git a/src/pdu.rs b/src/pdu.rs index 5cb5fb1..f496933 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,7 +1,7 @@ use crate::{Error, Result}; use js_int::UInt; use ruma::{ - api::federation::pdu::EventHash, + api::federation::EventHash, events::{ collections::all::{RoomEvent, StateEvent}, stripped::AnyStrippedStateEvent, From db685e0cf77a810e0286292abef394863cf65772 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 16 Jun 2020 13:53:15 +0200 Subject: [PATCH 0146/1727] fix: update rocket version --- Cargo.lock | 222 ++++++++++++++++++++++++---------------------------- Cargo.toml | 2 +- src/main.rs | 11 +-- 3 files changed, 111 insertions(+), 124 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae62715..258af13 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,15 +2,15 @@ # It is not intended for manual editing. [[package]] name = "adler32" -version = "1.0.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" +checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d" [[package]] name = "arc-swap" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b585a98a234c46fc563103e9278c9391fde1f4e6850334da895d27edb9580f62" +checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" [[package]] name = "arrayref" @@ -26,13 +26,13 @@ checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "async-trait" -version = "0.1.33" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1c13101a3224fb178860ae372a031ce350bbd92d39968518f016744dde0bf7" +checksum = "89cb5d814ab2a47fd66d3266e9efccb53ca4c740b7451043b8ffcf9a6208f3f8" dependencies = [ "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] @@ -75,9 +75,9 @@ checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" [[package]] name = "base64" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d1ccbaf7d9ec9537465a97bf19edc1a4e158ecb49fc16178202238c569cc42" +checksum = "e223af0dc48c96d4f8342ec01a4974f139df863896b316681efd36742f22cc67" [[package]] name = "bitflags" @@ -151,7 +151,7 @@ checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.12.1", + "base64 0.12.2", "directories", "http", "image", @@ -262,7 +262,7 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "devise_core", - "quote 1.0.6", + "quote 1.0.7", ] [[package]] @@ -272,8 +272,8 @@ source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9 dependencies = [ "bitflags", "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] @@ -288,11 +288,10 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ - "cfg-if", "libc", "redox_users", "winapi 0.3.8", @@ -416,8 +415,8 @@ checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] @@ -515,9 +514,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91780f809e750b0a89f5544be56617ff6b1227ee485bcb06ebe10cdf89bd3b71" +checksum = "b9586eedd4ce6b3c498bc3b4dd92fc9f11166aa908a914071953768066c67909" dependencies = [ "libc", ] @@ -599,9 +598,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.4" +version = "0.23.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f4167a8f21fa2bb3f17a652a760acd7572645281c98e3b612a26242c96ee" +checksum = "d534e95ad8b9d5aa614322d02352b4f1bf962254adcf02ac6f2def8be18498e8" dependencies = [ "bytemuck", "byteorder", @@ -622,15 +621,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "inflate" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cdb29978cc5797bd8dcc8e5bf7de604891df2a8dc576973d71a281e916db2ff" -dependencies = [ - "adler32", -] - [[package]] name = "iovec" version = "0.1.4" @@ -762,6 +752,15 @@ dependencies = [ "unicase", ] +[[package]] +name = "miniz_oxide" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" +dependencies = [ + "adler32", +] + [[package]] name = "mio" version = "0.6.22" @@ -835,9 +834,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" +checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" dependencies = [ "autocfg", "num-traits", @@ -845,9 +844,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb0800a0291891dd9f4fe7bd9c19384f98f7fbe0cd0f39a2c6b88b9868bbc00" +checksum = "7a6e6b7c748f995c4c29c5f5ae0248536e04a5739927c74ec0fa564805094b9f" dependencies = [ "autocfg", "num-integer", @@ -867,9 +866,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" +checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" dependencies = [ "autocfg", ] @@ -912,9 +911,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-sys" -version = "0.9.57" +version = "0.9.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7410fef80af8ac071d4f63755c0ab89ac3df0fd1ea91f1d1f37cf5cec4395990" +checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" dependencies = [ "autocfg", "cc", @@ -949,24 +948,24 @@ dependencies = [ [[package]] name = "pear" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c26d2b92e47063ffce70d3e3b1bd097af121a9e0db07ca38a6cc1cf0cc85ff25" +checksum = "5320f212db967792b67cfe12bd469d08afd6318a249bd917d5c19bc92200ab8a" dependencies = [ "pear_codegen", ] [[package]] name = "pear_codegen" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "336db4a192cc7f54efeb0c4e11a9245394824cc3bcbd37ba3ff51240c35d7a6e" +checksum = "bfc1c836fdc3d1ef87c348b237b5b5c4dff922156fb2d968f57734f9669768ca" dependencies = [ "proc-macro2 0.4.30", "quote 0.6.13", "syn 0.15.44", - "version_check 0.1.5", - "yansi 0.4.0", + "version_check", + "yansi", ] [[package]] @@ -983,22 +982,22 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.19" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a1acf4a3e70849f8a673497ef984f043f95d2d8252dcdf74d54e6a1e47e8a" +checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.19" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194e88048b71a3e02eb4ee36a6995fed9b8236c11a7bb9f7247a9d9835b3f265" +checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" dependencies = [ "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] @@ -1021,14 +1020,14 @@ checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" [[package]] name = "png" -version = "0.16.4" +version = "0.16.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12faa637ed9ae3d3c881332e54b5ae2dba81cda9fc4bbce0faa1ba53abcead50" +checksum = "34ccdd66f6fe4b2433b07e4728e9a013e43233120427046e93ceb709c3a439bf" dependencies = [ "bitflags", "crc32fast", "deflate", - "inflate", + "miniz_oxide", ] [[package]] @@ -1045,9 +1044,9 @@ checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" [[package]] name = "proc-macro-nested" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" +checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" @@ -1078,9 +1077,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ "proc-macro2 1.0.18", ] @@ -1145,9 +1144,9 @@ dependencies = [ [[package]] name = "remove_dir_all" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ "winapi 0.3.8", ] @@ -1158,7 +1157,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" dependencies = [ - "base64 0.12.1", + "base64 0.12.2", "bytes", "encoding_rs", "futures-core", @@ -1204,14 +1203,13 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?branch=async#78c8ac8ccdbe85abb9508fb9657e70eb2b8d08c0" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" dependencies = [ "async-trait", "atty", "base16", "base64 0.11.0", "futures", - "futures-util", "log", "memchr", "num_cpus", @@ -1222,27 +1220,27 @@ dependencies = [ "time", "tokio", "toml", - "version_check 0.9.2", - "yansi 0.5.0", + "version_check", + "yansi", ] [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?branch=async#78c8ac8ccdbe85abb9508fb9657e70eb2b8d08c0" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" dependencies = [ "devise", "indexmap", - "quote 1.0.6", + "quote 1.0.7", "rocket_http", - "version_check 0.9.2", - "yansi 0.5.0", + "version_check", + "yansi", ] [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?branch=async#78c8ac8ccdbe85abb9508fb9657e70eb2b8d08c0" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" dependencies = [ "cookie", "http", @@ -1295,8 +1293,8 @@ version = "0.16.1" source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] @@ -1349,8 +1347,8 @@ version = "0.21.3" source = "git+https://github.com/ruma/ruma-events?rev=c1ee72d#c1ee72db0f3107a97f6a4273a0ea3fed5c4c30e2" dependencies = [ "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] @@ -1396,7 +1394,7 @@ name = "ruma-signatures" version = "0.6.0-dev.1" source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" dependencies = [ - "base64 0.12.1", + "base64 0.12.2", "ring", "serde_json", "untrusted", @@ -1420,7 +1418,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" dependencies = [ - "base64 0.12.1", + "base64 0.12.2", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1496,29 +1494,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.111" +version = "1.0.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9124df5b40cbd380080b2cc6ab894c040a3070d995f5c9dc77e18c34a8ae37d" +checksum = "736aac72d1eafe8e5962d1d1c3d99b0df526015ba40915cb3c49d042e92ec243" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.111" +version = "1.0.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2c3ac8e6ca1e9c80b8be1023940162bf81ae3cffbb1809474152f2ce1eb250" +checksum = "bf0343ce212ac0d3d6afd9391ac8e9c9efe06b533c8d33f660f6390cc4093f57" dependencies = [ "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] name = "serde_json" -version = "1.0.53" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2" +checksum = "ec2c5d7e739bc07a3e73381a39d61fdb5f671c60c1df26a130690665803d8226" dependencies = [ "itoa", "ryu", @@ -1616,8 +1614,8 @@ checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] @@ -1633,12 +1631,12 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93a56fabc59dce20fe48b6c832cc249c713e7ed88fa28b0ee0a3bfcaae5fe4e2" +checksum = "b5304cfdf27365b7585c25d4af91b35016ed21ef88f17ced89c7093b43dba8b6" dependencies = [ "proc-macro2 1.0.18", - "quote 1.0.6", + "quote 1.0.7", "unicode-xid 0.2.0", ] @@ -1658,22 +1656,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13f926965ad00595dd129fa12823b04bbf866e9085ab0a5f2b05b850fbfc344" +checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "893582086c2f98cde18f906265a65b5030a074b1046c674ae898be6519a7f479" +checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" dependencies = [ "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] @@ -1716,8 +1714,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", ] [[package]] @@ -1783,7 +1781,7 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ - "version_check 0.9.2", + "version_check", ] [[package]] @@ -1841,15 +1839,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55d1e41d56121e07f1e223db0a4def204e45c85425f6a16d462fd07c8d10d74c" - -[[package]] -name = "version_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" [[package]] name = "version_check" @@ -1895,8 +1887,8 @@ dependencies = [ "lazy_static", "log", "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", "wasm-bindgen-shared", ] @@ -1918,7 +1910,7 @@ version = "0.2.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "838e423688dac18d73e31edce74ddfac468e37b1506ad163ffaf0a46f703ffe3" dependencies = [ - "quote 1.0.6", + "quote 1.0.7", "wasm-bindgen-macro-support", ] @@ -1929,8 +1921,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92" dependencies = [ "proc-macro2 1.0.18", - "quote 1.0.6", - "syn 1.0.30", + "quote 1.0.7", + "syn 1.0.31", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2014,12 +2006,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "yansi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d60c3b48c9cdec42fb06b3b84b5b087405e1fa1c644a1af3930e4dfafe93de48" - [[package]] name = "yansi" version = "0.5.0" diff --git a/Cargo.toml b/Cargo.toml index cc8c595..62d7b7a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", branch = "async", features = ["tls"] } +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "4928e35ec5c4b9242f50d644282d9896d0160a10", features = ["tls"] } http = "0.2.1" log = "0.4.8" sled = "0.31.0" diff --git a/src/main.rs b/src/main.rs index bd0f8b9..8b68e36 100644 --- a/src/main.rs +++ b/src/main.rs @@ -94,18 +94,19 @@ fn setup_rocket() -> rocket::Rocket { //server_server::get_server_keys_deprecated, ], ) - .attach(AdHoc::on_attach("Config", |rocket| { - let data = Database::load_or_create(&rocket.config()).expect("valid config"); + .attach(AdHoc::on_attach("Config", |mut rocket| async { + let data = Database::load_or_create(rocket.config().await).expect("valid config"); Ok(rocket.manage(data)) })) } -fn main() { - // Log info by default +#[rocket::main] +async fn main() { + // Default log level if std::env::var("ROCKET_LOG").is_err() { std::env::set_var("ROCKET_LOG", "critical"); } - setup_rocket().launch().unwrap(); + setup_rocket().launch().await.unwrap(); } From 02fe030b2aec3e264b47fd1e7b7db847fa5f94f4 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 12 Jun 2020 13:18:25 +0200 Subject: [PATCH 0147/1727] improvement: better default push rules --- src/client_server.rs | 120 ++++++++++------------ src/database/rooms.rs | 79 +++++++++++--- src/main.rs | 2 + src/pdu.rs | 6 ++ src/push_rules.rs | 232 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 359 insertions(+), 80 deletions(-) create mode 100644 src/push_rules.rs diff --git a/src/client_server.rs b/src/client_server.rs index e49bfb4..0676abc 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -204,33 +204,7 @@ pub fn register_route( &EventType::PushRules, serde_json::to_value(ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { - global: ruma::events::push_rules::Ruleset { - content: vec![], - override_: vec![ruma::events::push_rules::ConditionalPushRule { - actions: vec![ruma::events::push_rules::Action::DontNotify], - default: true, - enabled: false, - rule_id: ".m.rule.master".to_owned(), - conditions: vec![], - }], - room: vec![], - sender: vec![], - underride: vec![ruma::events::push_rules::ConditionalPushRule { - actions: vec![ - ruma::events::push_rules::Action::Notify, - ruma::events::push_rules::Action::SetTweak(ruma::push::Tweak::Sound( - "default".to_owned(), - )), - ], - default: true, - enabled: true, - rule_id: ".m.rule.message".to_owned(), - conditions: vec![ruma::events::push_rules::PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }], - }], - }, + global: crate::push_rules::default_pushrules(&user_id), }, }) .expect("data is valid, we just created it") @@ -502,8 +476,7 @@ pub fn set_displayname_route( displayname: body.displayname.clone(), ..serde_json::from_value::>( db.rooms - .room_state(&room_id)? - .get(&(EventType::RoomMember, user_id.to_string())) + .room_state_get(&room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or_else(|| { Error::bad_database( "Tried to send displayname update for user not in the room.", @@ -593,8 +566,7 @@ pub fn set_avatar_url_route( avatar_url: body.avatar_url.clone(), ..serde_json::from_value::>( db.rooms - .room_state(&room_id)? - .get(&(EventType::RoomMember, user_id.to_string())) + .room_state_get(&room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or_else(|| { Error::bad_database( "Tried to send avatar url update for user not in the room.", @@ -1267,8 +1239,7 @@ pub fn join_room_by_id_route( let event = db .rooms - .room_state(&body.room_id)? - .get(&(EventType::RoomMember, user_id.to_string())) + .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? .map_or_else( || { // There was no existing membership event @@ -1348,11 +1319,10 @@ pub fn leave_room_route( _room_id: String, ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let state = db.rooms.room_state(&body.room_id)?; let mut event = serde_json::from_value::>( - state - .get(&(EventType::RoomMember, user_id.to_string())) + db.rooms + .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -1387,12 +1357,11 @@ pub fn kick_user_route( _room_id: String, ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let state = db.rooms.room_state(&body.room_id)?; let mut event = serde_json::from_value::>( - state - .get(&(EventType::RoomMember, user_id.to_string())) + db.rooms + .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot kick member that's not in the room.", @@ -1428,12 +1397,12 @@ pub fn ban_user_route( _room_id: String, ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let state = db.rooms.room_state(&body.room_id)?; // TODO: reason - let event = state - .get(&(EventType::RoomMember, user_id.to_string())) + let event = db + .rooms + .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? .map_or( Ok::<_, Error>(member::MemberEventContent { membership: member::MembershipState::Ban, @@ -1475,12 +1444,11 @@ pub fn unban_user_route( _room_id: String, ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let state = db.rooms.room_state(&body.room_id)?; let mut event = serde_json::from_value::>( - state - .get(&(EventType::RoomMember, user_id.to_string())) + db.rooms + .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot unban a user who is not banned.", @@ -1642,7 +1610,8 @@ pub async fn get_public_rooms_filtered_route( .map(|room_id| { let room_id = room_id?; - let state = db.rooms.room_state(&room_id)?; + // TODO: Do not load full state? + let state = db.rooms.room_state_full(&room_id)?; let chunk = directory::PublicRoomsChunk { aliases: Vec::new(), @@ -1775,9 +1744,29 @@ pub fn search_users_route( } #[get("/_matrix/client/r0/rooms/<_room_id>/members")] -pub fn get_member_events_route(_room_id: String) -> ConduitResult { - warn!("TODO: get_member_events_route"); - Ok(get_member_events::Response { chunk: Vec::new() }.into()) +pub fn get_member_events_route( + db: State<'_, Database>, + //body: Ruma, + _room_id: String, +) -> ConduitResult { + //let user_id = body.user_id.as_ref().expect("user is authenticated"); + + //if !db.rooms.is_joined(user_id, &body.room_id)? { + // return Err(Error::BadRequest( + // ErrorKind::Forbidden, + // "You don't have permission to view this room.", + // )); + //} + + Ok(get_member_events::Response { + chunk: Vec::new(),/*db + .rooms + .room_state_type(&body.room_id, &EventType::RoomMember)? + .values() + .map(|pdu| pdu.to_member_event()) + .collect(),*/ + } + .into()) } #[get("/_matrix/client/r0/thirdparty/protocols")] @@ -1951,7 +1940,7 @@ pub fn get_state_events_route( Ok(get_state_events::Response { room_state: db .rooms - .room_state(&body.room_id)? + .room_state_full(&body.room_id)? .values() .map(|pdu| pdu.to_state_event()) .collect(), @@ -1979,10 +1968,9 @@ pub fn get_state_events_for_key_route( )); } - let state = db.rooms.room_state(&body.room_id)?; - - let event = state - .get(&(body.event_type.clone(), body.state_key.clone())) + let event = db + .rooms + .room_state_get(&body.room_id, &body.event_type, &body.state_key)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", @@ -2014,17 +2002,16 @@ pub fn get_state_events_for_empty_key_route( )); } - let state = db.rooms.room_state(&body.room_id)?; - - let event = state - .get(&(body.event_type.clone(), "".to_owned())) + let event = db + .rooms + .room_state_get(&body.room_id, &body.event_type, "")? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", ))?; Ok(get_state_events_for_empty_key::Response { - content: serde_json::value::to_raw_value(event) + content: serde_json::value::to_raw_value(&event) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) @@ -2068,7 +2055,7 @@ pub fn sync_route( let content = serde_json::from_value::< EventJson, >(pdu.content.clone()) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .expect("EventJson::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid PDU in database."))?; if content.membership == ruma::events::room::member::MembershipState::Join { @@ -2081,7 +2068,7 @@ pub fn sync_route( } } - let state = db.rooms.room_state(&room_id)?; + let members = db.rooms.room_state_type(&room_id, &EventType::RoomMember)?; let (joined_member_count, invited_member_count, heroes) = if send_member_count { let joined_member_count = db.rooms.room_members(&room_id).count(); @@ -2111,8 +2098,8 @@ pub fn sync_route( let current_content = serde_json::from_value::< EventJson, >( - state - .get(&(EventType::RoomMember, state_key.clone())) + members + .get(state_key) .ok_or_else(|| { Error::bad_database( "A user that joined once has no member event anymore.", @@ -2264,7 +2251,8 @@ pub fn sync_route( // TODO: state before timeline state: sync_events::State { events: if joined_since_last_sync { - state + db.rooms + .room_state_full(&room_id)? .into_iter() .map(|(_, pdu)| pdu.to_state_event()) .collect() @@ -2337,7 +2325,7 @@ pub fn sync_route( invite_state: sync_events::InviteState { events: db .rooms - .room_state(&room_id)? + .room_state_full(&room_id)? .into_iter() .map(|(_, pdu)| pdu.to_stripped_state_event()) .collect(), @@ -2496,7 +2484,7 @@ pub fn get_context_route( events_after, state: db // TODO: State at event .rooms - .room_state(&body.room_id)? + .room_state_full(&body.room_id)? .values() .map(|pdu| pdu.to_state_event()) .collect(), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ee14fc8..604b5aa 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -56,7 +56,10 @@ impl Rooms { } /// Returns the full room state. - pub fn room_state(&self, room_id: &RoomId) -> Result> { + pub fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result> { let mut hashmap = HashMap::new(); for pdu in self .roomstateid_pdu @@ -78,6 +81,58 @@ impl Rooms { Ok(hashmap) } + /// Returns the full room state. + pub fn room_state_type( + &self, + room_id: &RoomId, + event_type: &EventType, + ) -> Result> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&event_type.to_string().as_bytes()); + + let mut hashmap = HashMap::new(); + for pdu in self + .roomstateid_pdu + .scan_prefix(&prefix) + .values() + .map(|value| { + Ok::<_, Error>( + serde_json::from_slice::(&value?) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + ) + }) + { + let pdu = pdu?; + let state_key = pdu.state_key.clone().ok_or_else(|| { + Error::bad_database("Room state contains event without state_key.") + })?; + hashmap.insert(state_key, pdu); + } + Ok(hashmap) + } + + /// Returns the full room state. + pub fn room_state_get( + &self, + room_id: &RoomId, + event_type: &EventType, + state_key: &str, + ) -> Result> { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&event_type.to_string().as_bytes()); + key.push(0xff); + key.extend_from_slice(&state_key.as_bytes()); + + self.roomstateid_pdu.get(&key)?.map_or(Ok(None), |value| { + Ok::<_, Error>(Some( + serde_json::from_slice::(&value) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + /// Returns the `count` of this pdu's id. pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid @@ -212,8 +267,7 @@ impl Rooms { // Is the event authorized? if let Some(state_key) = &state_key { let power_levels = self - .room_state(&room_id)? - .get(&(EventType::RoomPowerLevels, "".to_owned())) + .room_state_get(&room_id, &EventType::RoomPowerLevels, "")? .map_or_else( || { Ok::<_, Error>(power_levels::PowerLevelsEventContent { @@ -244,8 +298,7 @@ impl Rooms { }, )?; let sender_membership = self - .room_state(&room_id)? - .get(&(EventType::RoomMember, sender.to_string())) + .room_state_get(&room_id, &EventType::RoomMember, &sender.to_string())? .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { Ok( serde_json::from_value::>( @@ -280,8 +333,11 @@ impl Rooms { })?; let current_membership = self - .room_state(&room_id)? - .get(&(EventType::RoomMember, target_user_id.to_string())) + .room_state_get( + &room_id, + &EventType::RoomMember, + &target_user_id.to_string(), + )? .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { Ok( serde_json::from_value::>( @@ -315,8 +371,7 @@ impl Rooms { ); let join_rules = - self.room_state(&room_id)? - .get(&(EventType::RoomJoinRules, "".to_owned())) + self.room_state_get(&room_id, &EventType::RoomJoinRules, "")? .map_or(Ok::<_, Error>(join_rules::JoinRule::Public), |pdu| { Ok(serde_json::from_value::< EventJson, @@ -446,12 +501,8 @@ impl Rooms { + 1; let mut unsigned = unsigned.unwrap_or_default(); - // TODO: Optimize this to not load the whole room state? if let Some(state_key) = &state_key { - if let Some(prev_pdu) = self - .room_state(&room_id)? - .get(&(event_type.clone(), state_key.to_owned())) - { + if let Some(prev_pdu) = self.room_state_get(&room_id, &event_type, &state_key)? { unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); } } diff --git a/src/main.rs b/src/main.rs index 8b68e36..5c51ae0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,8 @@ #![feature(proc_macro_hygiene, decl_macro)] #![warn(rust_2018_idioms)] +pub mod push_rules; + mod client_server; mod database; mod error; diff --git a/src/pdu.rs b/src/pdu.rs index f496933..8a5858e 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -4,6 +4,7 @@ use ruma::{ api::federation::EventHash, events::{ collections::all::{RoomEvent, StateEvent}, + room::member::MemberEvent, stripped::AnyStrippedStateEvent, EventJson, EventType, }, @@ -95,4 +96,9 @@ impl PduEvent { serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } + pub fn to_member_event(&self) -> EventJson { + let json = serde_json::to_string(&self).expect("PDUs are always valid"); + serde_json::from_str::>(&json) + .expect("EventJson::from_str always works") + } } diff --git a/src/push_rules.rs b/src/push_rules.rs new file mode 100644 index 0000000..44c9363 --- /dev/null +++ b/src/push_rules.rs @@ -0,0 +1,232 @@ +use ruma::{ + events::push_rules::{ + ConditionalPushRule, PatternedPushRule, PushCondition, PushRule, Ruleset, + }, + identifiers::UserId, + push::{Action, Tweak}, +}; + +pub fn default_pushrules(user_id: &UserId) -> Ruleset { + Ruleset { + content: vec![contains_user_name_rule(&user_id)], + override_: vec![ + master_rule(), + suppress_notices_rule(), + invite_for_me_rule(), + member_event_rule(), + contains_display_name_rule(), + tombstone_rule(), + roomnotif_rule(), + ], + room: vec![], + sender: vec![], + underride: vec![ + call_rule(), + encrypted_room_one_to_one_rule(), + room_one_to_one_rule(), + message_rule(), + encrypted_rule(), + ], + } +} + +pub fn master_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![Action::DontNotify], + default: true, + enabled: false, + rule_id: ".m.rule.master".to_owned(), + conditions: vec![], + } +} + +pub fn suppress_notices_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![Action::DontNotify], + default: true, + enabled: true, + rule_id: ".m.rule.suppress_notices".to_owned(), + conditions: vec![PushCondition::EventMatch { + key: "content.msgtype".to_owned(), + pattern: "m.notice".to_owned(), + }], + } +} + +pub fn invite_for_me_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![ + Action::Notify, + Action::SetTweak(Tweak::Sound("default".to_owned())), + Action::SetTweak(Tweak::Highlight(false)), + ], + default: true, + enabled: true, + rule_id: ".m.rule.invite_for_me".to_owned(), + conditions: vec![PushCondition::EventMatch { + key: "content.membership".to_owned(), + pattern: "m.invite".to_owned(), + }], + } +} + +pub fn member_event_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![Action::DontNotify], + default: true, + enabled: true, + rule_id: ".m.rule.member_event".to_owned(), + conditions: vec![PushCondition::EventMatch { + key: "content.membership".to_owned(), + pattern: "type".to_owned(), + }], + } +} + +pub fn contains_display_name_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![ + Action::Notify, + Action::SetTweak(Tweak::Sound("default".to_owned())), + Action::SetTweak(Tweak::Highlight(true)), + ], + default: true, + enabled: true, + rule_id: ".m.rule.contains_display_name".to_owned(), + conditions: vec![PushCondition::ContainsDisplayName], + } +} + +pub fn tombstone_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(true))], + default: true, + enabled: true, + rule_id: ".m.rule.tombstone".to_owned(), + conditions: vec![ + PushCondition::EventMatch { + key: "type".to_owned(), + pattern: "m.room.tombstone".to_owned(), + }, + PushCondition::EventMatch { + key: "state_key".to_owned(), + pattern: "".to_owned(), + }, + ], + } +} + +pub fn roomnotif_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(true))], + default: true, + enabled: true, + rule_id: ".m.rule.roomnotif".to_owned(), + conditions: vec![ + PushCondition::EventMatch { + key: "content.body".to_owned(), + pattern: "@room".to_owned(), + }, + PushCondition::SenderNotificationPermission { + key: "room".to_owned(), + }, + ], + } +} + +pub fn contains_user_name_rule(user_id: &UserId) -> PatternedPushRule { + PatternedPushRule { + actions: vec![ + Action::Notify, + Action::SetTweak(Tweak::Sound("default".to_owned())), + Action::SetTweak(Tweak::Highlight(true)), + ], + default: true, + enabled: true, + rule_id: ".m.rule.contains_user_name".to_owned(), + pattern: user_id.localpart().to_owned(), + } +} + +pub fn call_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![ + Action::Notify, + Action::SetTweak(Tweak::Sound("ring".to_owned())), + Action::SetTweak(Tweak::Highlight(false)), + ], + default: true, + enabled: true, + rule_id: ".m.rule.call".to_owned(), + conditions: vec![PushCondition::EventMatch { + key: "type".to_owned(), + pattern: "m.call.invite".to_owned(), + }], + } +} + +pub fn encrypted_room_one_to_one_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![ + Action::Notify, + Action::SetTweak(Tweak::Sound("default".to_owned())), + Action::SetTweak(Tweak::Highlight(false)), + ], + default: true, + enabled: true, + rule_id: ".m.rule.encrypted_room_one_to_one".to_owned(), + conditions: vec![ + PushCondition::RoomMemberCount { is: "2".to_owned() }, + PushCondition::EventMatch { + key: "type".to_owned(), + pattern: "m.room.encrypted".to_owned(), + }, + ], + } +} + +pub fn room_one_to_one_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![ + Action::Notify, + Action::SetTweak(Tweak::Sound("default".to_owned())), + Action::SetTweak(Tweak::Highlight(false)), + ], + default: true, + enabled: true, + rule_id: ".m.rule.room_one_to_one".to_owned(), + conditions: vec![ + PushCondition::RoomMemberCount { is: "2".to_owned() }, + PushCondition::EventMatch { + key: "type".to_owned(), + pattern: "m.room.message".to_owned(), + }, + ], + } +} + +pub fn message_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(false))], + default: true, + enabled: true, + rule_id: ".m.rule.message".to_owned(), + conditions: vec![PushCondition::EventMatch { + key: "type".to_owned(), + pattern: "m.room.message".to_owned(), + }], + } +} + +pub fn encrypted_rule() -> ConditionalPushRule { + ConditionalPushRule { + actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(false))], + default: true, + enabled: true, + rule_id: ".m.rule.encrypted".to_owned(), + conditions: vec![PushCondition::EventMatch { + key: "type".to_owned(), + pattern: "m.room.encrypted".to_owned(), + }], + } +} From 7031240af3d92712b2594679e7a9ebcd0a2e16fc Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 14 Jun 2020 08:57:47 +0200 Subject: [PATCH 0148/1727] improvement: /members route --- src/client_server.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 0676abc..c1bb693 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1743,28 +1743,28 @@ pub fn search_users_route( .into()) } -#[get("/_matrix/client/r0/rooms/<_room_id>/members")] +#[get("/_matrix/client/r0/rooms/<_room_id>/members", data = "")] pub fn get_member_events_route( db: State<'_, Database>, - //body: Ruma, + body: Ruma, _room_id: String, ) -> ConduitResult { - //let user_id = body.user_id.as_ref().expect("user is authenticated"); + let user_id = body.user_id.as_ref().expect("user is authenticated"); - //if !db.rooms.is_joined(user_id, &body.room_id)? { - // return Err(Error::BadRequest( - // ErrorKind::Forbidden, - // "You don't have permission to view this room.", - // )); - //} + if !db.rooms.is_joined(user_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } Ok(get_member_events::Response { - chunk: Vec::new(),/*db + chunk: db .rooms .room_state_type(&body.room_id, &EventType::RoomMember)? .values() .map(|pdu| pdu.to_member_event()) - .collect(),*/ + .collect(), } .into()) } From a49a572d76a96f262bf2dbe1a22b428fb6626ead Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 16 Jun 2020 12:11:38 +0200 Subject: [PATCH 0149/1727] feat: cross-signing --- Cargo.lock | 38 ++-- Cargo.toml | 11 +- src/client_server.rs | 334 ++++++++++++++++++++++++++++++++---- src/database.rs | 14 +- src/database/key_backups.rs | 207 ++++++++++++++++++++++ src/database/rooms.rs | 47 +++-- src/database/users.rs | 275 ++++++++++++++++++++++++----- src/main.rs | 10 ++ src/push_rules.rs | 4 +- 9 files changed, 827 insertions(+), 113 deletions(-) create mode 100644 src/database/key_backups.rs diff --git a/Cargo.lock b/Cargo.lock index 258af13..5d8f46f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -299,9 +299,9 @@ dependencies = [ [[package]] name = "dtoa" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" +checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" [[package]] name = "encoding_rs" @@ -632,9 +632,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" [[package]] name = "jpeg-decoder" @@ -1261,7 +1261,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "ruma-api", "ruma-client-api", @@ -1275,7 +1275,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1290,7 +1290,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", @@ -1300,7 +1300,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "http", "js_int", @@ -1317,7 +1317,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.1.3" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "matches", "ruma-serde", @@ -1354,7 +1354,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "js_int", "matches", @@ -1369,7 +1369,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "rand", "serde", @@ -1379,7 +1379,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "dtoa", "itoa", @@ -1392,7 +1392,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "base64 0.12.2", "ring", @@ -1684,6 +1684,12 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "tinyvec" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" + [[package]] name = "tokio" version = "0.2.21" @@ -1795,11 +1801,11 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" dependencies = [ - "smallvec", + "tinyvec", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 62d7b7a..4bdb929 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,15 +29,16 @@ thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } [dependencies.ruma] -git = "https://github.com/ruma/ruma" -rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +git = "https://github.com/timokoesters/ruma" +#rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" #path = "../ruma/ruma" features = ["rand", "client-api", "federation-api"] # These are required only until ruma-events and ruma-federation-api are merged into ruma/ruma [patch.crates-io] -ruma-common = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } -ruma-serde = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } -ruma-identifiers = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } +ruma-common = { git = "https://github.com/timokoesters/ruma" } +ruma-serde = { git = "https://github.com/timokoesters/ruma" } +ruma-identifiers = { git = "https://github.com/timokoesters/ruma" } #ruma-common = { path = "../ruma/ruma-common" } #ruma-serde = { path = "../ruma/ruma-serde" } +#ruma-identifiers = { path = "../ruma/ruma-identifiers" } diff --git a/src/client_server.rs b/src/client_server.rs index c1bb693..fac0286 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -5,6 +5,7 @@ use std::{ }; use crate::{utils, ConduitResult, Database, Error, Ruma}; +use keys::{upload_signatures, upload_signing_keys}; use log::warn; use rocket::{delete, get, options, post, put, State}; use ruma::{ @@ -13,6 +14,10 @@ use ruma::{ r0::{ account::{get_username_availability, register}, alias::{create_alias, delete_alias, get_alias}, + backup::{ + add_backup_keys, create_backup, get_backup, get_backup_keys, get_latest_backup, + update_backup, + }, capabilities::get_capabilities, config::{get_global_account_data, set_global_account_data}, context::get_context, @@ -33,7 +38,7 @@ use ruma::{ profile::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, }, - push::{get_pushrules_all, set_pushrule, set_pushrule_enabled}, + push::{get_pushers, get_pushrules_all, set_pushrule, set_pushrule_enabled}, read_marker::set_read_marker, redact::redact_event, room::{self, create_room}, @@ -71,9 +76,13 @@ const SESSION_ID_LENGTH: usize = 256; #[get("/_matrix/client/versions")] pub fn get_supported_versions_route() -> ConduitResult { + let mut unstable_features = BTreeMap::new(); + + unstable_features.insert("org.matrix.e2e_cross_signing".to_owned(), true); + Ok(get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], - unstable_features: BTreeMap::new(), + unstable_features, } .into()) } @@ -349,11 +358,11 @@ pub fn get_pushrules_all_route( #[put( "/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>", - data = "" + //data = "" )] pub fn set_pushrule_route( - db: State<'_, Database>, - body: Ruma, + //db: State<'_, Database>, + //body: Ruma, _scope: String, _kind: String, _rule_id: String, @@ -694,8 +703,13 @@ pub fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { - db.users - .add_device_keys(user_id, device_id, device_keys, &db.globals)?; + // This check is needed to assure that signatures are kept + if db.users.get_device_keys(user_id, device_id)?.is_none() { + db.users + .add_device_keys(user_id, device_id, device_keys, &db.globals)?; + } else { + println!("Key from {} was skipped: {:?}", user_id, device_keys); + } } Ok(upload_keys::Response { @@ -709,33 +723,38 @@ pub fn get_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { + let sender_id = body.user_id.as_ref().expect("user is authenticated"); + + let mut master_keys = BTreeMap::new(); + let mut self_signing_keys = BTreeMap::new(); + let mut user_signing_keys = BTreeMap::new(); let mut device_keys = BTreeMap::new(); for (user_id, device_ids) in &body.device_keys { if device_ids.is_empty() { let mut container = BTreeMap::new(); - for result in db.users.all_device_keys(&user_id.clone()) { - let (device_id, mut keys) = result?; + for device_id in db.users.all_device_ids(user_id) { + let device_id = device_id?; + if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? { + let metadata = db + .users + .get_device_metadata(user_id, &device_id)? + .ok_or_else(|| { + Error::bad_database("all_device_keys contained nonexistent device.") + })?; - let metadata = db - .users - .get_device_metadata(user_id, &device_id)? - .ok_or_else(|| { - Error::bad_database("all_device_keys contained nonexistent device.") - })?; + keys.unsigned = Some(keys::UnsignedDeviceInfo { + device_display_name: metadata.display_name, + }); - keys.unsigned = Some(keys::UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }); - - container.insert(device_id, keys); + container.insert(device_id.to_owned(), keys); + } } device_keys.insert(user_id.clone(), container); } else { for device_id in device_ids { let mut container = BTreeMap::new(); - for keys in db.users.get_device_keys(&user_id.clone(), &device_id) { - let mut keys = keys?; + if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? { let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( Error::BadRequest( ErrorKind::InvalidParam, @@ -752,11 +771,26 @@ pub fn get_keys_route( device_keys.insert(user_id.clone(), container); } } + + if let Some(master_key) = db.users.get_master_key(user_id, sender_id)? { + master_keys.insert(user_id.clone(), master_key); + } + if let Some(self_signing_key) = db.users.get_self_signing_key(user_id, sender_id)? { + self_signing_keys.insert(user_id.clone(), self_signing_key); + } + if user_id == sender_id { + if let Some(user_signing_key) = db.users.get_user_signing_key(sender_id)? { + user_signing_keys.insert(user_id.clone(), user_signing_key); + } + } } Ok(get_keys::Response { - failures: BTreeMap::new(), + master_keys, + self_signing_keys, + user_signing_keys, device_keys, + failures: BTreeMap::new(), } .into()) } @@ -789,6 +823,125 @@ pub fn claim_keys_route( .into()) } +#[post("/_matrix/client/unstable/room_keys/version", data = "")] +pub fn create_backup_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let version = db + .key_backups + .create_backup(&user_id, &body.algorithm, &db.globals)?; + + Ok(create_backup::Response { version }.into()) +} + +#[put( + "/_matrix/client/unstable/room_keys/version/<_version>", + data = "" +)] +pub fn update_backup_route( + db: State<'_, Database>, + body: Ruma, + _version: String, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + db.key_backups + .update_backup(&user_id, &body.version, &body.algorithm, &db.globals)?; + + Ok(update_backup::Response.into()) +} + +#[get("/_matrix/client/unstable/room_keys/version", data = "")] +pub fn get_latest_backup_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + let (version, algorithm) = + db.key_backups + .get_latest_backup(&user_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; + + Ok(get_latest_backup::Response { + algorithm, + count: (db.key_backups.count_keys(user_id, &version)? as u32).into(), + etag: db.key_backups.get_etag(user_id, &version)?, + version, + } + .into()) +} + +#[get( + "/_matrix/client/unstable/room_keys/version/<_version>", + data = "" +)] +pub fn get_backup_route( + db: State<'_, Database>, + body: Ruma, + _version: String, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let algorithm = + db.key_backups + .get_backup(&user_id, &body.version)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; + + Ok(get_backup::Response { + algorithm, + count: (db.key_backups.count_keys(user_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(user_id, &body.version)?, + version: body.version.clone(), + } + .into()) +} + +#[put("/_matrix/client/unstable/room_keys/keys", data = "")] +pub fn add_backup_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + for (room_id, room) in &body.rooms { + for (session_id, key_data) in &room.sessions { + db.key_backups.add_key( + &user_id, + &body.version, + &room_id, + &session_id, + &key_data, + &db.globals, + )? + } + } + + Ok(add_backup_keys::Response { + count: (db.key_backups.count_keys(user_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(user_id, &body.version)?, + } + .into()) +} + +#[get("/_matrix/client/unstable/room_keys/keys", data = "")] +pub fn get_backup_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + let rooms = db.key_backups.get_all(&user_id, &body.version)?; + + Ok(get_backup_keys::Response { rooms }.into()) +} + #[post("/_matrix/client/r0/rooms/<_room_id>/read_markers", data = "")] pub fn set_read_marker_route( db: State<'_, Database>, @@ -2040,7 +2193,7 @@ pub fn sync_route( let mut pdus = db .rooms - .pdus_since(&room_id, since)? + .pdus_since(&user_id, &room_id, since)? .filter_map(|r| r.ok()) // Filter out buggy events .collect::>(); @@ -2083,7 +2236,7 @@ pub fn sync_route( for hero in db .rooms - .all_pdus(&room_id)? + .all_pdus(&user_id, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|pdu| pdu.kind == EventType::RoomMember) .map(|pdu| { @@ -2157,7 +2310,7 @@ pub fn sync_route( if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &user_id)? { Some( (db.rooms - .pdus_since(&room_id, last_read)? + .pdus_since(&user_id, &room_id, last_read)? .filter_map(|pdu| pdu.ok()) // Filter out buggy events .filter(|pdu| { matches!( @@ -2271,7 +2424,7 @@ pub fn sync_route( let mut left_rooms = BTreeMap::new(); for room_id in db.rooms.rooms_left(&user_id) { let room_id = room_id?; - let pdus = db.rooms.pdus_since(&room_id, since)?; + let pdus = db.rooms.pdus_since(&user_id, &room_id, since)?; let room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events .map(|pdu| pdu.to_room_event()) @@ -2375,7 +2528,7 @@ pub fn sync_route( device_lists: sync_events::DeviceLists { changed: if since != 0 { db.users - .device_keys_changed(since) + .keys_changed(since) .filter_map(|u| u.ok()) .collect() // Filter out buggy events } else { @@ -2426,7 +2579,7 @@ pub fn get_context_route( let events_before = db .rooms - .pdus_until(&body.room_id, base_token) + .pdus_until(&user_id, &body.room_id, base_token) .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -2452,7 +2605,7 @@ pub fn get_context_route( let events_after = db .rooms - .pdus_after(&body.room_id, base_token) + .pdus_after(&user_id, &body.room_id, base_token) .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -2516,7 +2669,7 @@ pub fn get_message_events_route( get_message_events::Direction::Forward => { let events_after = db .rooms - .pdus_after(&body.room_id, from) + .pdus_after(&user_id, &body.room_id, from) // Use limit or else 10 .take(body.limit.map_or(Ok::<_, Error>(10_usize), |l| { Ok(u32::try_from(l).map_err(|_| { @@ -2551,7 +2704,7 @@ pub fn get_message_events_route( get_message_events::Direction::Backward => { let events_before = db .rooms - .pdus_until(&body.room_id, from) + .pdus_until(&user_id, &body.room_id, from) // Use limit or else 10 .take(body.limit.map_or(Ok::<_, Error>(10_usize), |l| { Ok(u32::try_from(l).map_err(|_| { @@ -2871,9 +3024,126 @@ pub fn delete_devices_route( Ok(delete_devices::Response.into()) } +#[post("/_matrix/client/unstable/keys/device_signing/upload", data = "")] +pub fn upload_signing_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db.uiaa.try_auth( + &user_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + if let Some(master_key) = &body.master_key { + db.users.add_cross_signing_keys( + user_id, + &master_key, + &body.self_signing_key, + &body.user_signing_key, + &db.globals, + )?; + } + + Ok(upload_signing_keys::Response.into()) +} + +#[post("/_matrix/client/unstable/keys/signatures/upload", data = "")] +pub fn upload_signatures_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.user_id.as_ref().expect("user is authenticated"); + + for (user_id, signed_keys) in &body.signed_keys { + for (key_id, signed_key) in signed_keys { + for signature in signed_key + .get("signatures") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Missing signatures field.", + ))? + .get(sender_id.to_string()) + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid user in signatures field.", + ))? + .as_object() + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid signature.", + ))? + .clone() + .into_iter() + { + // Signature validation? + let signature = ( + signature.0, + signature + .1 + .as_str() + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid signature value.", + ))? + .to_owned(), + ); + db.users + .sign_key(&user_id, &key_id, signature, &sender_id, &db.globals)?; + } + } + } + + Ok(upload_signatures::Response.into()) +} + +#[get("/_matrix/client/r0/pushers")] +pub fn pushers_route() -> ConduitResult { + Ok(get_pushers::Response { + pushers: Vec::new(), + } + .into()) +} + +#[post("/_matrix/client/r0/pushers/set")] +pub fn set_pushers_route() -> ConduitResult { + Ok(get_pushers::Response { + pushers: Vec::new(), + } + .into()) +} + #[options("/<_segments..>")] pub fn options_route( _segments: rocket::http::uri::Segments<'_>, ) -> ConduitResult { Ok(send_event_to_device::Response.into()) } + diff --git a/src/database.rs b/src/database.rs index 2cc01ea..370fde7 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,6 +1,7 @@ pub(self) mod account_data; pub(self) mod global_edus; pub(self) mod globals; +pub(self) mod key_backups; pub(self) mod media; pub(self) mod rooms; pub(self) mod uiaa; @@ -21,6 +22,7 @@ pub struct Database { pub account_data: account_data::AccountData, pub global_edus: global_edus::GlobalEdus, pub media: media::Media, + pub key_backups: key_backups::KeyBackups, pub _db: sled::Db, } @@ -73,8 +75,11 @@ impl Database { userdeviceid_metadata: db.open_tree("userdeviceid_metadata")?, token_userdeviceid: db.open_tree("token_userdeviceid")?, onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys")?, - userdeviceid_devicekeys: db.open_tree("userdeviceid_devicekeys")?, - devicekeychangeid_userid: db.open_tree("devicekeychangeid_userid")?, + keychangeid_userid: db.open_tree("devicekeychangeid_userid")?, + keyid_key: db.open_tree("keyid_key")?, + userid_masterkeyid: db.open_tree("userid_masterkeyid")?, + userid_selfsigningkeyid: db.open_tree("userid_selfsigningkeyid")?, + userid_usersigningkeyid: db.open_tree("userid_usersigningkeyid")?, todeviceid_events: db.open_tree("todeviceid_events")?, }, uiaa: uiaa::Uiaa { @@ -111,6 +116,11 @@ impl Database { media: media::Media { mediaid_file: db.open_tree("mediaid_file")?, }, + key_backups: key_backups::KeyBackups { + backupid_algorithm: db.open_tree("backupid_algorithm")?, + backupid_etag: db.open_tree("backupid_etag")?, + backupkeyid_backup: db.open_tree("backupkeyid_backupmetadata")?, + }, _db: db, }) } diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs new file mode 100644 index 0000000..991931b --- /dev/null +++ b/src/database/key_backups.rs @@ -0,0 +1,207 @@ +use crate::{utils, Error, Result}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::backup::{get_backup_keys::Sessions, BackupAlgorithm, KeyData}, + }, + identifiers::{RoomId, UserId}, +}; +use std::{collections::BTreeMap, convert::TryFrom}; + +pub struct KeyBackups { + pub(super) backupid_algorithm: sled::Tree, // BackupId = UserId + Version(Count) + pub(super) backupid_etag: sled::Tree, // BackupId = UserId + Version(Count) + pub(super) backupkeyid_backup: sled::Tree, // BackupKeyId = UserId + Version + RoomId + SessionId +} + +impl KeyBackups { + pub fn create_backup( + &self, + user_id: &UserId, + backup_metadata: &BackupAlgorithm, + globals: &super::globals::Globals, + ) -> Result { + let version = globals.next_count()?.to_string(); + + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + + self.backupid_algorithm.insert( + &key, + &*serde_json::to_string(backup_metadata) + .expect("BackupAlgorithm::to_string always works"), + )?; + self.backupid_etag + .insert(&key, &globals.next_count()?.to_be_bytes())?; + Ok(version) + } + + pub fn update_backup( + &self, + user_id: &UserId, + version: &str, + backup_metadata: &BackupAlgorithm, + globals: &super::globals::Globals, + ) -> Result { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + + if self.backupid_algorithm.get(&key)?.is_none() { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Tried to update nonexistent backup.", + )); + } + + self.backupid_algorithm.insert( + &key, + &*serde_json::to_string(backup_metadata) + .expect("BackupAlgorithm::to_string always works"), + )?; + self.backupid_etag + .insert(&key, &globals.next_count()?.to_be_bytes())?; + Ok(version.to_string()) + } + + pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + self.backupid_algorithm + .scan_prefix(&prefix) + .last() + .map_or(Ok(None), |r| { + let (key, value) = r?; + let version = utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; + + Ok(Some(( + version, + serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("Algorithm in backupid_algorithm is invalid.") + })?, + ))) + }) + } + + pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(version.as_bytes()); + + self.backupid_algorithm.get(key)?.map_or(Ok(None), |bytes| { + Ok(serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid."))?) + }) + } + + pub fn add_key( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + key_data: &KeyData, + globals: &super::globals::Globals, + ) -> Result<()> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(version.as_bytes()); + + if self.backupid_algorithm.get(&key)?.is_none() { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Tried to update nonexistent backup.", + )); + } + + self.backupid_etag + .insert(&key, &globals.next_count()?.to_be_bytes())?; + + key.push(0xff); + key.extend_from_slice(room_id.to_string().as_bytes()); + key.push(0xff); + key.extend_from_slice(session_id.as_bytes()); + + self.backupkeyid_backup.insert( + &key, + &*serde_json::to_string(&key_data).expect("KeyData::to_string always works"), + )?; + + Ok(()) + } + + pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(version.as_bytes()); + + Ok(self.backupkeyid_backup.scan_prefix(&prefix).count()) + } + + pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + + Ok(utils::u64_from_bytes( + &self + .backupid_etag + .get(&key)? + .ok_or_else(|| Error::bad_database("Backup has no etag."))?, + ) + .map_err(|_| Error::bad_database("etag in backupid_etag invalid."))? + .to_string()) + } + + pub fn get_all(&self, user_id: &UserId, version: &str) -> Result> { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(version.as_bytes()); + + let mut rooms = BTreeMap::::new(); + + for result in self.backupkeyid_backup.scan_prefix(&prefix).map(|r| { + let (key, value) = r?; + let mut parts = key.rsplit(|&b| b == 0xff); + + let session_id = utils::string_from_bytes( + &parts + .next() + .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup session_id is invalid."))?; + + let room_id = RoomId::try_from( + utils::string_from_bytes( + &parts + .next() + .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid room id."))?; + + let key_data = serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid."))?; + + Ok::<_, Error>((room_id, session_id, key_data)) + }) { + let (room_id, session_id, key_data) = result?; + rooms + .entry(room_id) + .or_insert_with(|| Sessions { + sessions: BTreeMap::new(), + }) + .sessions + .insert(session_id, key_data); + } + + Ok(rooms) + } +} diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 604b5aa..bd1a7ba 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -602,13 +602,18 @@ impl Rooms { } /// Returns an iterator over all PDUs in a room. - pub fn all_pdus(&self, room_id: &RoomId) -> Result>> { - self.pdus_since(room_id, 0) + pub fn all_pdus( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>> { + self.pdus_since(user_id, room_id, 0) } /// Returns an iterator over all events in a room that happened after the event with id `since`. pub fn pdus_since( &self, + user_id: &UserId, room_id: &RoomId, since: u64, ) -> Result>> { @@ -617,12 +622,13 @@ impl Rooms { pdu_id.push(0xff); pdu_id.extend_from_slice(&(since).to_be_bytes()); - self.pdus_since_pduid(room_id, &pdu_id) + self.pdus_since_pduid(user_id, room_id, &pdu_id) } /// Returns an iterator over all events in a room that happened after the event with id `since`. pub fn pdus_since_pduid( &self, + user_id: &UserId, room_id: &RoomId, pdu_id: &[u8], ) -> Result>> { @@ -630,6 +636,7 @@ impl Rooms { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); + let user_id = user_id.clone(); Ok(self .pduid_pdu .range(pdu_id..) @@ -641,9 +648,13 @@ impl Rooms { }) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| { - Ok(serde_json::from_slice(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?) + .map(move |(_, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.unsigned.remove("transaction_id"); + } + Ok(pdu) })) } @@ -651,6 +662,7 @@ impl Rooms { /// `until` in reverse-chronological order. pub fn pdus_until( &self, + user_id: &UserId, room_id: &RoomId, until: u64, ) -> impl Iterator> { @@ -663,14 +675,19 @@ impl Rooms { let current: &[u8] = ¤t; + let user_id = user_id.clone(); self.pduid_pdu .range(..current) .rev() .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| { - Ok(serde_json::from_slice(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?) + .map(move |(_, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.unsigned.remove("transaction_id"); + } + Ok(pdu) }) } @@ -678,6 +695,7 @@ impl Rooms { /// `from` in chronological order. pub fn pdus_after( &self, + user_id: &UserId, room_id: &RoomId, from: u64, ) -> impl Iterator> { @@ -690,13 +708,18 @@ impl Rooms { let current: &[u8] = ¤t; + let user_id = user_id.clone(); self.pduid_pdu .range(current..) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| { - Ok(serde_json::from_slice(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?) + .map(move |(_, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.unsigned.remove("transaction_id"); + } + Ok(pdu) }) } diff --git a/src/database/users.rs b/src/database/users.rs index 07c6912..6f90537 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,9 +1,12 @@ use crate::{utils, Error, Result}; use js_int::UInt; use ruma::{ - api::client::r0::{ - device::Device, - keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}, + api::client::{ + error::ErrorKind, + r0::{ + device::Device, + keys::{AlgorithmAndDeviceId, CrossSigningKey, DeviceKeys, KeyAlgorithm, OneTimeKey}, + }, }, events::{to_device::AnyToDeviceEvent, EventJson, EventType}, identifiers::UserId, @@ -19,8 +22,11 @@ pub struct Users { pub(super) token_userdeviceid: sled::Tree, pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + AlgorithmAndDeviceId - pub(super) userdeviceid_devicekeys: sled::Tree, - pub(super) devicekeychangeid_userid: sled::Tree, // DeviceKeyChangeId = Count + pub(super) keychangeid_userid: sled::Tree, // KeyChangeId = Count + pub(super) keyid_key: sled::Tree, // KeyId = UserId + KeyId (depends on key type) + pub(super) userid_masterkeyid: sled::Tree, + pub(super) userid_selfsigningkeyid: sled::Tree, + pub(super) userid_usersigningkeyid: sled::Tree, pub(super) todeviceid_events: sled::Tree, // ToDeviceId = UserId + DeviceId + Count } @@ -171,9 +177,6 @@ impl Users { userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); - // Remove device keys - self.userdeviceid_devicekeys.remove(&userdeviceid)?; - // Remove tokens if let Some(old_token) = self.userdeviceid_token.remove(&userdeviceid)? { self.token_userdeviceid.remove(&old_token)?; @@ -350,38 +353,168 @@ impl Users { userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); - self.userdeviceid_devicekeys.insert( + self.keyid_key.insert( &userdeviceid, &*serde_json::to_string(&device_keys).expect("DeviceKeys::to_string always works"), )?; - self.devicekeychangeid_userid + self.keychangeid_userid .insert(globals.next_count()?.to_be_bytes(), &*user_id.to_string())?; Ok(()) } - pub fn get_device_keys( + pub fn add_cross_signing_keys( &self, user_id: &UserId, - device_id: &str, - ) -> impl Iterator> { - let mut key = user_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + master_key: &CrossSigningKey, + self_signing_key: &Option, + user_signing_key: &Option, + globals: &super::globals::Globals, + ) -> Result<()> { + // TODO: Check signatures - self.userdeviceid_devicekeys - .scan_prefix(key) - .values() - .map(|bytes| { - Ok(serde_json::from_slice(&bytes?) - .map_err(|_| Error::bad_database("DeviceKeys in db are invalid."))?) - }) + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + // Master key + let mut master_key_ids = master_key.keys.values(); + let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained no key.", + ))?; + + if master_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained more than one key.", + )); + } + + let mut master_key_key = prefix.clone(); + master_key_key.extend_from_slice(master_key_id.as_bytes()); + + self.keyid_key.insert( + &master_key_key, + &*serde_json::to_string(&master_key).expect("CrossSigningKey::to_string always works"), + )?; + + self.userid_masterkeyid + .insert(&*user_id.to_string(), master_key_key)?; + + // Self-signing key + if let Some(self_signing_key) = self_signing_key { + let mut self_signing_key_ids = self_signing_key.keys.values(); + let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Self signing key contained no key.", + ))?; + + if self_signing_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Self signing key contained more than one key.", + )); + } + + let mut self_signing_key_key = prefix.clone(); + self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); + + self.keyid_key.insert( + &self_signing_key_key, + &*serde_json::to_string(&self_signing_key) + .expect("CrossSigningKey::to_string always works"), + )?; + + self.userid_selfsigningkeyid + .insert(&*user_id.to_string(), self_signing_key_key)?; + } + + // User-signing key + if let Some(user_signing_key) = user_signing_key { + let mut user_signing_key_ids = user_signing_key.keys.values(); + let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "User signing key contained no key.", + ))?; + + if user_signing_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "User signing key contained more than one key.", + )); + } + + let mut user_signing_key_key = prefix.clone(); + user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); + + self.keyid_key.insert( + &user_signing_key_key, + &*serde_json::to_string(&user_signing_key) + .expect("CrossSigningKey::to_string always works"), + )?; + + self.userid_usersigningkeyid + .insert(&*user_id.to_string(), user_signing_key_key)?; + } + + self.keychangeid_userid + .insert(globals.next_count()?.to_be_bytes(), &*user_id.to_string())?; + + Ok(()) } - pub fn device_keys_changed(&self, since: u64) -> impl Iterator> { - self.devicekeychangeid_userid - .range(since.to_be_bytes()..) + pub fn sign_key( + &self, + target_id: &UserId, + key_id: &str, + signature: (String, String), + sender_id: &UserId, + globals: &super::globals::Globals, + ) -> Result<()> { + println!( + "Adding signatures on {}'s {} by {}: {}->{}", + target_id, key_id, sender_id, signature.0, signature.1 + ); + + let mut key = target_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(key_id.to_string().as_bytes()); + + let mut cross_signing_key = + serde_json::from_slice::(&self.keyid_key.get(&key)?.ok_or( + Error::BadRequest(ErrorKind::InvalidParam, "Tried to sign nonexistent key."), + )?) + .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; + + let signatures = cross_signing_key + .get_mut("signatures") + .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? + .as_object_mut() + .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? + .entry(sender_id.clone()) + .or_insert_with(|| serde_json::Map::new().into()); + + signatures + .as_object_mut() + .ok_or_else(|| Error::bad_database("signatures in keyid_key for a user is invalid."))? + .insert(signature.0, signature.1.into()); + + self.keyid_key.insert( + &key, + &*serde_json::to_string(&cross_signing_key) + .expect("CrossSigningKey::to_string always works"), + )?; + + self.keychangeid_userid + .insert(globals.next_count()?.to_be_bytes(), &*target_id.to_string())?; + + Ok(()) + } + + pub fn keys_changed(&self, since: u64) -> impl Iterator> { + self.keychangeid_userid + .range((since + 1).to_be_bytes()..) .values() .map(|bytes| { Ok( @@ -397,29 +530,85 @@ impl Users { }) } - pub fn all_device_keys( - &self, - user_id: &UserId, - ) -> impl Iterator> { + pub fn get_device_keys(&self, user_id: &UserId, device_id: &str) -> Result> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); - self.userdeviceid_devicekeys.scan_prefix(key).map(|r| { - let (key, value) = r?; - let userdeviceid = utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("UserDeviceID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("UserDeviceId in db is invalid."))?; - Ok(( - userdeviceid, - serde_json::from_slice(&*value) - .map_err(|_| Error::bad_database("DeviceKeys in db are invalid."))?, - )) + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { + Error::bad_database("DeviceKeys in db are invalid.") + })?)) }) } + pub fn get_master_key( + &self, + user_id: &UserId, + sender_id: &UserId, + ) -> Result> { + // TODO: hide some signatures + self.userid_masterkeyid + .get(user_id.to_string())? + .map_or(Ok(None), |key| { + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| { + Error::bad_database("CrossSigningKey in db is invalid.") + })?; + + // A user is not allowed to see signatures from users other than himself and + // the target user + cross_signing_key.signatures = cross_signing_key + .signatures + .into_iter() + .filter(|(user, _)| user == user_id || user == sender_id) + .collect(); + + Ok(Some(cross_signing_key)) + }) + }) + } + + pub fn get_self_signing_key( + &self, + user_id: &UserId, + sender_id: &UserId, + ) -> Result> { + self.userid_selfsigningkeyid + .get(user_id.to_string())? + .map_or(Ok(None), |key| { + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| { + Error::bad_database("CrossSigningKey in db is invalid.") + })?; + + // A user is not allowed to see signatures from users other than himself and + // the target user + cross_signing_key.signatures = cross_signing_key + .signatures + .into_iter() + .filter(|(user, _)| user == user_id || user == sender_id) + .collect(); + + Ok(Some(cross_signing_key)) + }) + }) + } + + pub fn get_user_signing_key(&self, user_id: &UserId) -> Result> { + self.userid_usersigningkeyid + .get(user_id.to_string())? + .map_or(Ok(None), |key| { + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { + Error::bad_database("CrossSigningKey in db is invalid.") + })?)) + }) + }) + } + pub fn add_to_device_event( &self, sender: &UserId, diff --git a/src/main.rs b/src/main.rs index 5c51ae0..a6c1afc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,6 +46,12 @@ fn setup_rocket() -> rocket::Rocket { client_server::upload_keys_route, client_server::get_keys_route, client_server::claim_keys_route, + client_server::create_backup_route, + client_server::update_backup_route, + client_server::get_latest_backup_route, + client_server::get_backup_route, + client_server::add_backup_keys_route, + client_server::get_backup_keys_route, client_server::set_read_marker_route, client_server::create_typing_event_route, client_server::create_room_route, @@ -90,6 +96,10 @@ fn setup_rocket() -> rocket::Rocket { client_server::delete_device_route, client_server::delete_devices_route, client_server::options_route, + client_server::upload_signing_keys_route, + client_server::upload_signatures_route, + client_server::pushers_route, + client_server::set_pushers_route, //server_server::well_known_server, //server_server::get_server_version, //server_server::get_server_keys, diff --git a/src/push_rules.rs b/src/push_rules.rs index 44c9363..90e9243 100644 --- a/src/push_rules.rs +++ b/src/push_rules.rs @@ -1,7 +1,5 @@ use ruma::{ - events::push_rules::{ - ConditionalPushRule, PatternedPushRule, PushCondition, PushRule, Ruleset, - }, + events::push_rules::{ConditionalPushRule, PatternedPushRule, PushCondition, Ruleset}, identifiers::UserId, push::{Action, Tweak}, }; From 8f06d0258a436ae98de5727e061b5285008eeaf8 Mon Sep 17 00:00:00 2001 From: Guillem Nieto Date: Fri, 19 Jun 2020 12:19:07 +0200 Subject: [PATCH 0150/1727] Remove `is_direct` flag from Join event When I was investigating the DM issue, I found that join event contained the `is_direct` flag. According to the spec, this flag should only be set on "invite" events, but not on join. Other homeservers do not have this flag on join. --- src/client_server.rs | 35 +++++++---------------------------- 1 file changed, 7 insertions(+), 28 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index fac0286..6a9ac77 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1390,34 +1390,13 @@ pub fn join_room_by_id_route( // TODO: Ask a remote server if we don't have this room - let event = db - .rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? - .map_or_else( - || { - // There was no existing membership event - Ok::<_, Error>(member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, - is_direct: None, - third_party_invite: None, - }) - }, - |pdu| { - // We change the existing membership event - let mut event = serde_json::from_value::>( - pdu.content.clone(), - ) - .map_err(|_| Error::bad_database("Invalid member event in db."))? - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in db."))?; - event.membership = member::MembershipState::Join; - event.displayname = db.users.displayname(&user_id)?; - event.avatar_url = db.users.avatar_url(&user_id)?; - Ok(event) - }, - )?; + let event = member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, + is_direct: None, + third_party_invite: None, + }; db.rooms.append_pdu( body.room_id.clone(), From 24c544f65001ae30792dd9e274fb2c170f10ba22 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 26 Jun 2020 10:07:02 +0200 Subject: [PATCH 0151/1727] fix: DMs show up as rooms --- src/database/rooms.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index bd1a7ba..b7f7221 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -504,6 +504,10 @@ impl Rooms { if let Some(state_key) = &state_key { if let Some(prev_pdu) = self.room_state_get(&room_id, &event_type, &state_key)? { unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); + unsigned.insert( + "prev_sender".to_owned(), + serde_json::to_value(prev_pdu.sender).expect("UserId::to_value always works"), + ); } } From 05f1f73e10743d6fde9a9c9c2a6be0c002b57418 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 26 Jun 2020 16:10:56 +0200 Subject: [PATCH 0152/1727] improvement: upgrade to sled 0.32 BREAKING CHANGE: old databases are not compatible anymore --- Cargo.lock | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++++- Cargo.toml | 2 +- 2 files changed, 61 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d8f46f..8a69dea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,14 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +[[package]] +name = "addr2line" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "602d785912f476e480434627e8732e6766b760c045bbf897d9dfaa9f4fbd399c" +dependencies = [ + "gimli", +] + [[package]] name = "adler32" version = "1.1.0" @@ -52,6 +61,20 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +[[package]] +name = "backtrace" +version = "0.3.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05100821de9e028f12ae3d189176b41ee198341eb8f369956407fea2f5cc666c" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + [[package]] name = "base16" version = "0.2.1" @@ -164,7 +187,8 @@ dependencies = [ "rust-argon2 0.8.2", "serde", "serde_json", - "sled", + "sled 0.31.0", + "sled 0.32.0", "thiserror", "tokio", ] @@ -484,6 +508,12 @@ dependencies = [ "lzw", ] +[[package]] +name = "gimli" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" + [[package]] name = "h2" version = "0.2.5" @@ -883,6 +913,12 @@ dependencies = [ "libc", ] +[[package]] +name = "object" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" + [[package]] name = "once_cell" version = "1.4.0" @@ -1424,6 +1460,12 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rustc-demangle" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" + [[package]] name = "rustls" version = "0.16.0" @@ -1567,6 +1609,23 @@ dependencies = [ "parking_lot", ] +[[package]] +name = "sled" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdad3dc85d888056d3bd9954ffdf22d8a22701b6cd3aca4f6df4c436111898c4" +dependencies = [ + "backtrace", + "crc32fast", + "crossbeam-epoch", + "crossbeam-utils", + "fs2", + "fxhash", + "libc", + "log", + "parking_lot", +] + [[package]] name = "smallvec" version = "1.4.0" diff --git a/Cargo.toml b/Cargo.toml index 4bdb929..ba3dad8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ edition = "2018" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "4928e35ec5c4b9242f50d644282d9896d0160a10", features = ["tls"] } http = "0.2.1" log = "0.4.8" -sled = "0.31.0" +sled = "0.32.0" directories = "2.0.2" js_int = "0.1.5" serde_json = { version = "1.0.53", features = ["raw_value"] } From 54ad1fbed9b8348edc6399c48da1d441641553bd Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 26 Jun 2020 16:15:18 +0200 Subject: [PATCH 0153/1727] fix: CI --- src/client_server.rs | 3 --- src/database/users.rs | 5 ----- sytest/sytest-whitelist | 12 ------------ 3 files changed, 20 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 6a9ac77..49692a2 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -707,8 +707,6 @@ pub fn upload_keys_route( if db.users.get_device_keys(user_id, device_id)?.is_none() { db.users .add_device_keys(user_id, device_id, device_keys, &db.globals)?; - } else { - println!("Key from {} was skipped: {:?}", user_id, device_keys); } } @@ -3125,4 +3123,3 @@ pub fn options_route( ) -> ConduitResult { Ok(send_event_to_device::Response.into()) } - diff --git a/src/database/users.rs b/src/database/users.rs index 6f90537..9f4eb40 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -472,11 +472,6 @@ impl Users { sender_id: &UserId, globals: &super::globals::Globals, ) -> Result<()> { - println!( - "Adding signatures on {}'s {} by {}: {}->{}", - target_id, key_id, sender_id, signature.0, signature.1 - ); - let mut key = target_id.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(key_id.to_string().as_bytes()); diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index a77c546..3642c44 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -1,4 +1,3 @@ -# Register endpoints implemented GET /register yields a set of flows POST /register can create a user POST /register downcases capitals in usernames @@ -17,14 +16,12 @@ POST /register rejects registration of usernames with '£' POST /register rejects registration of usernames with 'é' POST /register rejects registration of usernames with '\n' POST /register rejects registration of usernames with ''' -# Login endpoints implemented GET /login yields a set of flows POST /login can log in as a user POST /login returns the same device_id as that in the request POST /login can log in as a user with just the local part of the id POST /login as non-existing user is rejected POST /login wrong password is rejected -# Room creation endpoints implemented POST /createRoom makes a private room POST /createRoom makes a private room with invites GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership @@ -49,15 +46,6 @@ Can read configuration endpoint AS cannot create users outside its own namespace Changing the actions of an unknown default rule fails with 404 Changing the actions of an unknown rule fails with 404 -Trying to add push rule with invalid scope fails with 400 -Trying to add push rule with invalid template fails with 400 -Trying to add push rule with rule_id with slashes fails with 400 -Trying to add push rule with override rule without conditions fails with 400 -Trying to add push rule with underride rule without conditions fails with 400 -Trying to add push rule with condition without kind fails with 400 -Trying to add push rule with content rule without pattern fails with 400 -Trying to add push rule with no actions fails with 400 -Trying to add push rule with invalid action fails with 400 Trying to get push rules with unknown rule_id fails with 404 GET /events with non-numeric 'limit' GET /events with negative 'limit' From b83fba5c448316c96494465b6e85ca1c967e73bb Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 26 Jun 2020 18:04:08 +0200 Subject: [PATCH 0154/1727] chore: update dependencies --- Cargo.lock | 216 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 128 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a69dea..6b18e1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,13 +35,13 @@ checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "async-trait" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89cb5d814ab2a47fd66d3266e9efccb53ca4c740b7451043b8ffcf9a6208f3f8" +checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -52,7 +52,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -98,9 +98,9 @@ checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" [[package]] name = "base64" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e223af0dc48c96d4f8342ec01a4974f139df863896b316681efd36742f22cc67" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "bitflags" @@ -139,15 +139,18 @@ checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "bytes" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" +checksum = "118cf036fbb97d0816e3c34b2d7a1e8cfc60f68fcf63d550ddbe9bd5f59c213b" +dependencies = [ + "loom", +] [[package]] name = "cc" -version = "1.0.54" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bbb73db36c1246e9034e307d0fba23f9a2e251faa47ade70c1bd252220c8311" +checksum = "b1be3409f94d7bdceeb5f5fac551039d9b3f00e25da7a74fc4d33400a0d96368" [[package]] name = "cfg-if" @@ -174,7 +177,7 @@ checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.12.2", + "base64 0.12.3", "directories", "http", "image", @@ -187,8 +190,7 @@ dependencies = [ "rust-argon2 0.8.2", "serde", "serde_json", - "sled 0.31.0", - "sled 0.32.0", + "sled", "thiserror", "tokio", ] @@ -297,7 +299,7 @@ dependencies = [ "bitflags", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -318,7 +320,7 @@ checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -364,7 +366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -440,7 +442,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -487,6 +489,19 @@ dependencies = [ "byteorder", ] +[[package]] +name = "generator" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add72f17bb81521258fcc8a7a3245b1e184e916bfbe34f0ea89558f440df5c68" +dependencies = [ + "cc", + "libc", + "log", + "rustc_version", + "winapi 0.3.9", +] + [[package]] name = "getrandom" version = "0.1.14" @@ -628,9 +643,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.5" +version = "0.23.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d534e95ad8b9d5aa614322d02352b4f1bf962254adcf02ac6f2def8be18498e8" +checksum = "b5b0553fec6407d63fe2975b794dfb099f3f790bdc958823851af37b26404ab4" dependencies = [ "bytemuck", "byteorder", @@ -686,9 +701,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ab7bb370a788ad675863e035fd9bfa56a66a030a16a88ab80aeb6b18cbdf31" +checksum = "1b2b63d60564122f2a7d6592c2f1d6c1c60e7a266b4d24715950a1ddad784f66" dependencies = [ "serde", ] @@ -733,6 +748,17 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "loom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ecc775857611e1df29abba5c41355cdf540e7e9d4acfdf0f355eefee82330b7" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", +] + [[package]] name = "lzw" version = "0.10.0" @@ -859,7 +885,7 @@ checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" dependencies = [ "cfg-if", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -885,9 +911,9 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.2.4" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" +checksum = "a5b4d7360f362cfb50dde8143501e6940b22f644be75a4cc90b2d81968908138" dependencies = [ "autocfg", "num-integer", @@ -927,9 +953,9 @@ checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" [[package]] name = "openssl" -version = "0.10.29" +version = "0.10.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee6d85f4cb4c4f59a6a85d5b68a233d280c82e29e822913b9c8b129fbf20bdd" +checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" dependencies = [ "bitflags", "cfg-if", @@ -979,7 +1005,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1033,7 +1059,7 @@ checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -1099,7 +1125,7 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" dependencies = [ - "unicode-xid 0.2.0", + "unicode-xid 0.2.1", ] [[package]] @@ -1184,7 +1210,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1193,7 +1219,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" dependencies = [ - "base64 0.12.2", + "base64 0.12.3", "bytes", "encoding_rs", "futures-core", @@ -1223,9 +1249,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.14" +version = "0.16.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06b3fefa4f12272808f809a0af618501fdaba41a58963c5fb72238ab0be09603" +checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" dependencies = [ "cc", "libc", @@ -1233,7 +1259,7 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1291,13 +1317,13 @@ dependencies = [ "time", "tokio", "tokio-rustls", - "unicode-xid 0.2.0", + "unicode-xid 0.2.1", ] [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" +source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ "ruma-api", "ruma-client-api", @@ -1311,7 +1337,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" +source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1326,17 +1352,17 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" +source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" +source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ "http", "js_int", @@ -1353,7 +1379,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.1.3" -source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" +source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ "matches", "ruma-serde", @@ -1384,13 +1410,13 @@ source = "git+https://github.com/ruma/ruma-events?rev=c1ee72d#c1ee72db0f3107a97f dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" +source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ "js_int", "matches", @@ -1405,7 +1431,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.16.2" -source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" +source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ "rand", "serde", @@ -1415,7 +1441,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" +source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ "dtoa", "itoa", @@ -1428,9 +1454,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" +source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ - "base64 0.12.2", + "base64 0.12.3", "ring", "serde_json", "untrusted", @@ -1454,7 +1480,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" dependencies = [ - "base64 0.12.2", + "base64 0.12.3", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1466,6 +1492,15 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + [[package]] name = "rustls" version = "0.16.0" @@ -1492,9 +1527,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.8", + "winapi 0.3.9", ] +[[package]] +name = "scoped-tls" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" + [[package]] name = "scopeguard" version = "1.1.0" @@ -1535,23 +1576,38 @@ dependencies = [ ] [[package]] -name = "serde" -version = "1.0.112" +name = "semver" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736aac72d1eafe8e5962d1d1c3d99b0df526015ba40915cb3c49d042e92ec243" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.112" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf0343ce212ac0d3d6afd9391ac8e9c9efe06b533c8d33f660f6390cc4093f57" +checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -1593,22 +1649,6 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -[[package]] -name = "sled" -version = "0.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb6824dde66ad33bf20c6e8476f5b82b871bc8bc3c129a10ea2f7dae5060fa3" -dependencies = [ - "crc32fast", - "crossbeam-epoch", - "crossbeam-utils", - "fs2", - "fxhash", - "libc", - "log", - "parking_lot", -] - [[package]] name = "sled" version = "0.32.0" @@ -1641,7 +1681,7 @@ dependencies = [ "cfg-if", "libc", "redox_syscall", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1674,7 +1714,7 @@ dependencies = [ "heck", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -1690,13 +1730,13 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.31" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5304cfdf27365b7585c25d4af91b35016ed21ef88f17ced89c7093b43dba8b6" +checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "unicode-xid 0.2.0", + "unicode-xid 0.2.1", ] [[package]] @@ -1710,7 +1750,7 @@ dependencies = [ "rand", "redox_syscall", "remove_dir_all", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1730,7 +1770,7 @@ checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -1740,7 +1780,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1769,7 +1809,7 @@ dependencies = [ "signal-hook-registry", "slab", "tokio-macros", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1780,7 +1820,7 @@ checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -1881,9 +1921,9 @@ checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "unicode-xid" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] name = "untrusted" @@ -1953,7 +1993,7 @@ dependencies = [ "log", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", "wasm-bindgen-shared", ] @@ -1987,7 +2027,7 @@ checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2026,9 +2066,9 @@ checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", @@ -2058,7 +2098,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] From 67a1f21f5d2a8e881cf447b1d8ef0927171610d8 Mon Sep 17 00:00:00 2001 From: the0 Date: Thu, 2 Jul 2020 20:38:25 +0200 Subject: [PATCH 0155/1727] feat: implement password changing (#138) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Password: Fixes Password: Implement logging out all devices except current Password: Implement password changing Co-authored-by: the0 Reviewed-on: https://git.koesters.xyz/timo/conduit/pulls/138 Reviewed-by: Timo Kösters --- src/client_server.rs | 54 ++++++++++++++++++++++++++++++++++++++++++- src/database/users.rs | 13 +++++++++++ src/main.rs | 1 + 3 files changed, 67 insertions(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index 49692a2..65b2c86 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -12,7 +12,7 @@ use ruma::{ api::client::{ error::ErrorKind, r0::{ - account::{get_username_availability, register}, + account::{change_password, get_username_availability, register}, alias::{create_alias, delete_alias, get_alias}, backup::{ add_backup_keys, create_backup, get_backup, get_backup_keys, get_latest_backup, @@ -305,6 +305,58 @@ pub fn logout_route( Ok(logout::Response.into()) } +#[post("/_matrix/client/r0/account/password", data = "")] +pub fn change_password_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db.uiaa.try_auth( + &user_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + db.users.set_password(&user_id, &body.new_password)?; + + // TODO: Read logout_devices field when it's available and respect that, currently not supported in Ruma + // See: https://github.com/ruma/ruma/issues/107 + // Logout all devices except the current one + for id in db + .users + .all_device_ids(&user_id) + .filter_map(|id| id.ok()) + .filter(|id| id != device_id) + { + db.users.remove_device(&user_id, &id)?; + } + + Ok(change_password::Response.into()) +} + #[get("/_matrix/client/r0/capabilities")] pub fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); diff --git a/src/database/users.rs b/src/database/users.rs index 9f4eb40..e05cf2e 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -93,6 +93,19 @@ impl Users { }) } + /// Hash and set the user's password to the Argon2 hash + pub fn set_password(&self, user_id: &UserId, password: &str) -> Result<()> { + if let Ok(hash) = utils::calculate_hash(&password) { + self.userid_password.insert(user_id.to_string(), &*hash)?; + } else { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Password does not meet the requirements.", + )); + } + Ok(()) + } + /// Returns the displayname of a user on this homeserver. pub fn displayname(&self, user_id: &UserId) -> Result> { self.userid_displayname diff --git a/src/main.rs b/src/main.rs index a6c1afc..1147572 100644 --- a/src/main.rs +++ b/src/main.rs @@ -29,6 +29,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_login_route, client_server::login_route, client_server::logout_route, + client_server::change_password_route, client_server::get_capabilities_route, client_server::get_pushrules_all_route, client_server::set_pushrule_route, From b81939841b272206991041ce7643a1c572590db3 Mon Sep 17 00:00:00 2001 From: the0 Date: Sun, 5 Jul 2020 07:48:19 +0200 Subject: [PATCH 0156/1727] feat: account deactivation (#137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Deactivation: swap unwrap_or(false) to .ok()? feat: implement deactivate account route Implement error code on login to deactivated account Deactivation: Changes requested Add missing .clone() Deactivation: Requested changes Remove unneeded .filter() Deactivation: badly named signature leads to confusion Co-authored-by: the0 Reviewed-on: https://git.koesters.xyz/timo/conduit/pulls/137 Reviewed-by: Timo Kösters --- src/client_server.rs | 117 ++++++++++++++++++++++++++++++++++++------ src/database/users.rs | 36 +++++++++++-- src/main.rs | 1 + 3 files changed, 132 insertions(+), 22 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 65b2c86..cde8bf5 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -12,7 +12,10 @@ use ruma::{ api::client::{ error::ErrorKind, r0::{ - account::{change_password, get_username_availability, register}, + account::{ + change_password, deactivate, get_username_availability, register, + ThirdPartyIdRemovalStatus, + }, alias::{create_alias, delete_alias, get_alias}, backup::{ add_backup_keys, create_backup, get_backup, get_backup_keys, get_latest_backup, @@ -179,15 +182,8 @@ pub fn register_route( let password = body.password.clone().unwrap_or_default(); - if let Ok(hash) = utils::calculate_hash(&password) { - // Create user - db.users.create(&user_id, &hash)?; - } else { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Password does not meet the requirements.", - )); - } + // Create user + db.users.create(&user_id, &password)?; // Generate new device id if the user didn't specify one let device_id = body @@ -252,6 +248,10 @@ pub fn login_route( let user_id = UserId::parse_with_server_name(username, db.globals.server_name()).map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Wrong username or password."))?; + if hash.is_empty() { + return Err(Error::BadRequest(ErrorKind::UserDeactivated, "The user has been deactivated")); + } + let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); @@ -312,6 +312,7 @@ pub fn change_password_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); + let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec!["m.login.password".to_owned()], @@ -334,6 +335,7 @@ pub fn change_password_route( if !worked { return Err(Error::Uiaa(uiaainfo)); } + // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); db.uiaa.create(&user_id, &device_id, &uiaainfo)?; @@ -357,6 +359,79 @@ pub fn change_password_route( Ok(change_password::Response.into()) } +#[post("/_matrix/client/r0/account/deactivate", data = "")] +pub fn deactivate_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db.uiaa.try_auth( + &user_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + // Leave all joined rooms and reject all invitations + for room_id in db + .rooms + .rooms_joined(&user_id) + .chain(db.rooms.rooms_invited(&user_id)) + { + let room_id = room_id?; + let event = member::MemberEventContent { + membership: member::MembershipState::Leave, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + }; + + db.rooms.append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(event).expect("event is valid, we just created it"), + None, + Some(user_id.to_string()), + None, + &db.globals, + )?; + } + + // Remove devices and mark account as deactivated + db.users.deactivate_account(&user_id)?; + + Ok(deactivate::Response { + id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, + } + .into()) +} + #[get("/_matrix/client/r0/capabilities")] pub fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); @@ -1905,19 +1980,27 @@ pub fn search_users_route( .filter_map(|user_id| { // Filter out buggy users (they should not exist, but you never know...) let user_id = user_id.ok()?; - Some(search_users::User { + if db.users.is_deactivated(&user_id).ok()? { + return None; + } + + let user = search_users::User { user_id: user_id.clone(), display_name: db.users.displayname(&user_id).ok()?, avatar_url: db.users.avatar_url(&user_id).ok()?, - }) - }) - .filter(|user| { - user.user_id.to_string().contains(&body.search_term) - || user + }; + + if !user.user_id.to_string().contains(&body.search_term) + && user .display_name .as_ref() .filter(|name| name.contains(&body.search_term)) - .is_some() + .is_none() + { + return None; + } + + Some(user) }) .collect(), limited: false, diff --git a/src/database/users.rs b/src/database/users.rs index e05cf2e..2ccf59a 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -37,9 +37,21 @@ impl Users { Ok(self.userid_password.contains_key(user_id.to_string())?) } + /// Check if account is deactivated + pub fn is_deactivated(&self, user_id: &UserId) -> Result { + Ok(self + .userid_password + .get(user_id.to_string())? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "User does not exist.", + ))? + .is_empty()) + } + /// Create a new user account on this homeserver. - pub fn create(&self, user_id: &UserId, hash: &str) -> Result<()> { - self.userid_password.insert(user_id.to_string(), hash)?; + pub fn create(&self, user_id: &UserId, password: &str) -> Result<()> { + self.set_password(user_id, password)?; Ok(()) } @@ -97,13 +109,13 @@ impl Users { pub fn set_password(&self, user_id: &UserId, password: &str) -> Result<()> { if let Ok(hash) = utils::calculate_hash(&password) { self.userid_password.insert(user_id.to_string(), &*hash)?; + Ok(()) } else { - return Err(Error::BadRequest( + Err(Error::BadRequest( ErrorKind::InvalidParam, "Password does not meet the requirements.", - )); + )) } - Ok(()) } /// Returns the displayname of a user on this homeserver. @@ -721,4 +733,18 @@ impl Users { })?) }) } + + /// Deactivate account + pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { + // Remove all associated devices + for device_id in self.all_device_ids(user_id) { + self.remove_device(&user_id, &device_id?)?; + } + + // Set the password to "" to indicate a deactivated account + self.userid_password.insert(user_id.to_string(), "")?; + + // TODO: Unhook 3PID + Ok(()) + } } diff --git a/src/main.rs b/src/main.rs index 1147572..f94df50 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,6 +30,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::login_route, client_server::logout_route, client_server::change_password_route, + client_server::deactivate_route, client_server::get_capabilities_route, client_server::get_pushrules_all_route, client_server::set_pushrule_route, From 7c2144c6d7fd19dd933aac3dc0d30ffa30dee80a Mon Sep 17 00:00:00 2001 From: CapsizeGlimmer <> Date: Sat, 4 Jul 2020 16:41:05 -0400 Subject: [PATCH 0157/1727] Implement /logout/all --- src/client_server.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index cde8bf5..f34dc60 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -45,7 +45,7 @@ use ruma::{ read_marker::set_read_marker, redact::redact_event, room::{self, create_room}, - session::{get_login_types, login, logout}, + session::{get_login_types, login, logout, logout_all}, state::{ create_state_event_for_empty_key, create_state_event_for_key, get_state_events, get_state_events_for_empty_key, get_state_events_for_key, @@ -305,6 +305,22 @@ pub fn logout_route( Ok(logout::Response.into()) } +#[post("/_matrix/client/r0/logout/all", data = "")] +pub fn logout_all_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + for device_id in db.users.all_device_ids(user_id) { + if let Ok(device_id) = device_id { + db.users.remove_device(&user_id, &device_id)?; + } + } + + Ok(logout_all::Response.into()) +} + #[post("/_matrix/client/r0/account/password", data = "")] pub fn change_password_route( db: State<'_, Database>, From 76aa44ead334d33d155f7094d7e147c8322ed2ac Mon Sep 17 00:00:00 2001 From: CapsizeGlimmer <> Date: Sun, 5 Jul 2020 02:35:19 -0400 Subject: [PATCH 0158/1727] Add logout/all route to main --- src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main.rs b/src/main.rs index f94df50..47b840f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -29,6 +29,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_login_route, client_server::login_route, client_server::logout_route, + client_server::logout_all_route, client_server::change_password_route, client_server::deactivate_route, client_server::get_capabilities_route, From 3a8efaeafa6c4fa9178673cf2938dcafbe1e16c5 Mon Sep 17 00:00:00 2001 From: CapsizeGlimmer <> Date: Tue, 7 Jul 2020 23:09:29 -0400 Subject: [PATCH 0159/1727] Whitelist logout all --- sytest/sytest-whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index 3642c44..9012346 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -65,3 +65,4 @@ User in shared private room does appear in user directory User in dir while user still shares private rooms POST /rooms/:room_id/ban can ban a user Alternative server names do not cause a routing loop +Can logout all devices From f361cb3d398dd75ed1c15a9ef433ad82e0d4c7f5 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sat, 11 Jul 2020 09:51:58 +0200 Subject: [PATCH 0160/1727] Improve README --- README.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index ded4844..53eb85c 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ See it in action: #### Can I try it out? -Yes! Just open Riot ( or RiotX) and register on the `https://conduit.koesters.xyz:14004` homeserver. +Yes! Just open Riot ( or RiotX for example) and register on the `https://conduit.koesters.xyz:14004` homeserver. #### How can I deploy my own? @@ -29,15 +29,20 @@ It's explained in more detail [here](https://git.koesters.xyz/timo/conduit/wiki/ #### What are the biggest things still missing? -- Federation (Make Conduit talk to other Matrix servers) -- Notifications (Make Matrix client notify the user when new messages or pings arrive) +- Federation (Talk to other Matrix servers) +- Appservices (Bridges and Bots) +- Push notifications on mobile +- Notification settings - Lots of testing Also check out the [milestones](https://git.koesters.xyz/timo/conduit/milestones). #### How can I contribute? -If you want to help, you may be able to find something in the issue tracker. If you do, comment on the issue, so others know. You can also join #conduit:matrix.org and ask there. +1. Look for an issue you would like to work on and make sure it's not assigned to other users +2. Ask someone to assign the issue to you (comment on the issue or chat in #conduit:matrix.org) +3. Fork the repo and work on the issue. #conduit:matrix.org is happy to help :) +4. Submit a PR #### Donate From 469071e105781181356d41b98541ea36160156f8 Mon Sep 17 00:00:00 2001 From: the0 Date: Sat, 11 Jul 2020 14:08:37 +0200 Subject: [PATCH 0161/1727] feat: implement /event (#144) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Merge branch 'master' into event-route Event: Implement /event Co-authored-by: the0 Reviewed-on: https://git.koesters.xyz/timo/conduit/pulls/144 Reviewed-by: Timo Kösters --- src/client_server.rs | 31 ++++++++++++++++++++++++++++++- src/main.rs | 1 + 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/src/client_server.rs b/src/client_server.rs index f34dc60..f1a9dbe 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -44,7 +44,7 @@ use ruma::{ push::{get_pushers, get_pushrules_all, set_pushrule, set_pushrule_enabled}, read_marker::set_read_marker, redact::redact_event, - room::{self, create_room}, + room::{self, create_room, get_room_event}, session::{get_login_types, login, logout, logout_all}, state::{ create_state_event_for_empty_key, create_state_event_for_key, get_state_events, @@ -2059,6 +2059,35 @@ pub fn get_protocols_route() -> ConduitResult { .into()) } +#[get( + "/_matrix/client/r0/rooms/<_room_id>/event/<_event_id>", + data = "" +)] +pub fn get_room_event_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, + _event_id: String, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(user_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + Ok(get_room_event::Response { + event: db + .rooms + .get_pdu(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? + .to_room_event(), + } + .into()) +} + #[put( "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", data = "" diff --git a/src/main.rs b/src/main.rs index 47b840f..f406a17 100644 --- a/src/main.rs +++ b/src/main.rs @@ -36,6 +36,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_pushrules_all_route, client_server::set_pushrule_route, client_server::set_pushrule_enabled_route, + client_server::get_room_event_route, client_server::get_filter_route, client_server::create_filter_route, client_server::set_global_account_data_route, From b9de21949d170b5ac81ec5f40ce52f93886a79d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 14 Jul 2020 18:23:26 +0200 Subject: [PATCH 0162/1727] Update to rocket master (#147) Merge branch 'master' into rocket Update to rocket master Co-authored-by: timokoesters Reviewed-on: https://git.koesters.xyz/timo/conduit/pulls/147 --- Cargo.lock | 401 ++++++++++++++++++++++++++++++++++++++++++-- Cargo.toml | 2 +- rust-toolchain | 2 +- src/error.rs | 13 +- src/ruma_wrapper.rs | 15 +- 5 files changed, 401 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b18e1f..07f6826 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,6 +15,61 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d" +[[package]] +name = "aead" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf01b9b56e767bb57b94ebf91a58b338002963785cdd7013e21c0d4679471e4" +dependencies = [ + "generic-array", +] + +[[package]] +name = "aes" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54eb1d8fe354e5fc611daf4f2ea97dd45a765f4f1e4512306ec183ae2e8f20c9" +dependencies = [ + "aes-soft", + "aesni", + "block-cipher-trait", +] + +[[package]] +name = "aes-gcm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "834a6bda386024dbb7c8fc51322856c10ffe69559f972261c868485f5759c638" +dependencies = [ + "aead", + "aes", + "block-cipher-trait", + "ghash", + "subtle 2.2.3", + "zeroize", +] + +[[package]] +name = "aes-soft" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d" +dependencies = [ + "block-cipher-trait", + "byteorder", + "opaque-debug", +] + +[[package]] +name = "aesni" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f70a6b5f971e473091ab7cfb5ffac6cde81666c4556751d8d5620ead8abf100" +dependencies = [ + "block-cipher-trait", + "opaque-debug", +] + [[package]] name = "arc-swap" version = "0.4.7" @@ -44,6 +99,12 @@ dependencies = [ "syn 1.0.33", ] +[[package]] +name = "atomic" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" + [[package]] name = "atty" version = "0.2.14" @@ -76,10 +137,10 @@ dependencies = [ ] [[package]] -name = "base16" -version = "0.2.1" +name = "base-x" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d27c3610c36aee21ce8ac510e6224498de4228ad772a171ed65643a24693a5a8" +checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" [[package]] name = "base64" @@ -102,6 +163,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +[[package]] +name = "binascii" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" + [[package]] name = "bitflags" version = "1.2.1" @@ -119,12 +186,48 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array", +] + +[[package]] +name = "block-cipher-trait" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c924d49bd09e7c06003acda26cd9742e796e34282ec6c1189404dee0c1f4774" +dependencies = [ + "generic-array", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + [[package]] name = "bumpalo" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + [[package]] name = "bytemuck" version = "1.2.0" @@ -203,13 +306,17 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "cookie" -version = "0.12.0" -source = "git+https://github.com/SergioBenitez/cookie-rs?rev=e0f3e6c#e0f3e6c4daea108d55838c56da777b36898bd223" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca761767cf3fa9068cc893ec8c247a22d0fd0535848e65640c0548bd1f8bbb36" dependencies = [ - "base64 0.10.1", + "aes-gcm", + "base64 0.12.3", + "hkdf", "percent-encoding 2.1.0", - "ring", - "time", + "rand", + "sha2", + "time 0.2.16", ] [[package]] @@ -263,6 +370,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array", + "subtle 1.0.0", +] + [[package]] name = "deflate" version = "0.8.4" @@ -302,6 +419,15 @@ dependencies = [ "syn 1.0.33", ] +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array", +] + [[package]] name = "directories" version = "2.0.2" @@ -323,6 +449,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + [[package]] name = "dtoa" version = "0.4.6" @@ -338,6 +470,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + [[package]] name = "fnv" version = "1.0.7" @@ -502,6 +640,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + [[package]] name = "getrandom" version = "0.1.14" @@ -513,6 +660,15 @@ dependencies = [ "wasi", ] +[[package]] +name = "ghash" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f0930ed19a7184089ea46d2fedead2f6dc2b674c5db4276b7da336c7cd83252" +dependencies = [ + "polyval", +] + [[package]] name = "gif" version = "0.10.3" @@ -529,6 +685,12 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + [[package]] name = "h2" version = "0.2.5" @@ -566,6 +728,26 @@ dependencies = [ "libc", ] +[[package]] +name = "hkdf" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa08a006102488bd9cd5b8013aabe84955cf5ae22e304c2caf655b633aefae3" +dependencies = [ + "digest", + "hmac", +] + +[[package]] +name = "hmac" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +dependencies = [ + "crypto-mac", + "digest", +] + [[package]] name = "http" version = "0.2.1" @@ -611,7 +793,7 @@ dependencies = [ "log", "pin-project", "socket2", - "time", + "time 0.1.43", "tokio", "tower-service", "want", @@ -951,6 +1133,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + [[package]] name = "openssl" version = "0.10.30" @@ -1092,6 +1280,16 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "polyval" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ec3341498978de3bfd12d1b22f1af1de22818f5473a11e8a6ef997989e3a212" +dependencies = [ + "cfg-if", + "universal-hash", +] + [[package]] name = "ppv-lite86" version = "0.2.8" @@ -1204,6 +1402,26 @@ dependencies = [ "rust-argon2 0.7.0", ] +[[package]] +name = "ref-cast" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "745c1787167ddae5569661d5ffb8b25ae5fedbf46717eaa92d652221cec72623" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -1265,21 +1483,22 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" dependencies = [ "async-trait", + "atomic", "atty", - "base16", - "base64 0.11.0", + "binascii", "futures", "log", "memchr", "num_cpus", "pear", + "ref-cast", "rocket_codegen", "rocket_http", "state", - "time", + "time 0.2.16", "tokio", "toml", "version_check", @@ -1289,9 +1508,10 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" dependencies = [ "devise", + "glob", "indexmap", "quote 1.0.7", "rocket_http", @@ -1302,7 +1522,7 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" dependencies = [ "cookie", "http", @@ -1312,9 +1532,10 @@ dependencies = [ "mime", "pear", "percent-encoding 1.0.1", + "ref-cast", "smallvec", "state", - "time", + "time 0.2.16", "tokio", "tokio-rustls", "unicode-xid 0.2.1", @@ -1633,6 +1854,24 @@ dependencies = [ "url", ] +[[package]] +name = "sha1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" + +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer", + "digest", + "fake-simd", + "opaque-debug", +] + [[package]] name = "signal-hook-registry" version = "1.2.0" @@ -1690,12 +1929,70 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "standback" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" +dependencies = [ + "version_check", +] + [[package]] name = "state" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "serde", + "serde_derive", + "syn 1.0.33", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2 1.0.18", + "quote 1.0.7", + "serde", + "serde_derive", + "serde_json", + "sha1", + "syn 1.0.33", +] + +[[package]] +name = "stdweb-internal-runtime" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" + [[package]] name = "strum" version = "0.18.0" @@ -1717,6 +2014,18 @@ dependencies = [ "syn 1.0.33", ] +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "subtle" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" + [[package]] name = "syn" version = "0.15.44" @@ -1783,6 +2092,44 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "time" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a51cadc5b1eec673a685ff7c33192ff7b7603d0b75446fb354939ee615acb15" +dependencies = [ + "cfg-if", + "libc", + "standback", + "stdweb", + "time-macros", + "version_check", + "winapi 0.3.9", +] + +[[package]] +name = "time-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9b6e9f095bc105e183e3cd493d72579be3181ad4004fceb01adbe9eecab2d" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +dependencies = [ + "proc-macro-hack", + "proc-macro2 1.0.18", + "quote 1.0.7", + "standback", + "syn 1.0.33", +] + [[package]] name = "tinyvec" version = "0.3.3" @@ -1880,6 +2227,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +[[package]] +name = "typenum" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" + [[package]] name = "unicase" version = "2.6.0" @@ -1925,6 +2278,16 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +[[package]] +name = "universal-hash" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0c900f2f9b4116803415878ff48b63da9edb268668e08cf9292d7503114a01" +dependencies = [ + "generic-array", + "subtle 2.2.3", +] + [[package]] name = "untrusted" version = "0.7.1" @@ -2116,3 +2479,9 @@ name = "yansi" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" + +[[package]] +name = "zeroize" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" diff --git a/Cargo.toml b/Cargo.toml index ba3dad8..30af056 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "4928e35ec5c4b9242f50d644282d9896d0160a10", features = ["tls"] } +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "33e95f49008dcbc8dc51da7d37e0570059176b73", features = ["tls"] } http = "0.2.1" log = "0.4.8" sled = "0.32.0" diff --git a/rust-toolchain b/rust-toolchain index e40c16e..b45e88a 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2020-05-09 +nightly-2020-07-12 diff --git a/src/error.rs b/src/error.rs index 24bb39b..7305073 100644 --- a/src/error.rs +++ b/src/error.rs @@ -46,13 +46,13 @@ impl Error { } } -#[rocket::async_trait] -impl<'r> Responder<'r> for Error { - async fn respond_to(self, r: &'r Request<'_>) -> response::Result<'r> { +impl<'r, 'o> Responder<'r, 'o> for Error +where + 'o: 'r, +{ + fn respond_to(self, r: &'r Request<'_>) -> response::Result<'o> { if let Self::Uiaa(uiaainfo) = &self { - return RumaResponse::from(UiaaResponse::AuthResponse(uiaainfo.clone())) - .respond_to(r) - .await; + return RumaResponse::from(UiaaResponse::AuthResponse(uiaainfo.clone())).respond_to(r); } let message = format!("{}", self); @@ -83,6 +83,5 @@ impl<'r> Responder<'r> for Error { status_code, }) .respond_to(r) - .await } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 406357a..e3dc5ae 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,7 +1,7 @@ use crate::{utils, Error}; use log::warn; use rocket::{ - data::{Data, FromData, FromDataFuture, Transform, TransformFuture, Transformed}, + data::{Data, FromDataFuture, Transform, TransformFuture, Transformed, FromTransformedData}, http::Status, response::{self, Responder}, Outcome::*, @@ -22,7 +22,7 @@ pub struct Ruma { pub json_body: Option>, // This is None when body is not a valid string } -impl<'a, T: Endpoint> FromData<'a> for Ruma { +impl<'a, T: Endpoint> FromTransformedData<'a> for Ruma { type Error = (); // TODO: Better error handling type Owned = Data; type Borrowed = Self::Owned; @@ -121,13 +121,13 @@ impl>>> From for RumaResponse { } } -#[rocket::async_trait] -impl<'r, T> Responder<'r> for RumaResponse +impl<'r, 'o, T> Responder<'r, 'o> for RumaResponse where T: Send + TryInto>>, T::Error: Send, + 'o: 'r { - async fn respond_to(self, _: &'r Request<'_>) -> response::Result<'r> { + fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { let http_response: Result, _> = self.0.try_into(); match http_response { Ok(http_response) => { @@ -141,9 +141,10 @@ where .raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); } + let http_body = http_response.into_body(); + response - .sized_body(Cursor::new(http_response.into_body())) - .await; + .sized_body(http_body.len(), Cursor::new(http_body)); response.raw_header("Access-Control-Allow-Origin", "*"); response.raw_header( From 9c095f153809f051c13bfd4a8e66660a33a27e04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 16 Jul 2020 07:50:11 +0200 Subject: [PATCH 0163/1727] Update README, fix CI (#148) Update 'README.md' Update README, fix CI Co-authored-by: timokoesters Reviewed-on: https://git.koesters.xyz/timo/conduit/pulls/148 --- README.md | 2 +- sytest/sytest-whitelist | 97 ++++++++++++++++++++++------------------- 2 files changed, 54 insertions(+), 45 deletions(-) diff --git a/README.md b/README.md index 53eb85c..404636a 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ See it in action: #### Can I try it out? -Yes! Just open Riot ( or RiotX for example) and register on the `https://conduit.koesters.xyz:14004` homeserver. +Yes! Just open a Matrix client ( or Element Android for example) and register on the `https://conduit.koesters.xyz` homeserver. #### How can I deploy my own? diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index 9012346..f29075c 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -1,68 +1,77 @@ +3pid invite join valid signature but revoked keys are rejected +3pid invite join valid signature but unreachable ID server are rejected +3pid invite join with wrong but valid signature are rejected +AS cannot create users outside its own namespace +After deactivating account, can't log in with an email +Alternative server names do not cause a routing loop +Both GET and PUT work +Can add account data +Can create filter +Can logout all devices +Can read configuration endpoint +Can send a message directly to a device using PUT /sendToDevice +Can upload with ASCII file name +Can upload with Unicode file name +Can upload without a file name +Changing the actions of an unknown default rule fails with 404 +Changing the actions of an unknown rule fails with 404 +Checking local federation server +GET /device/{deviceId} +GET /device/{deviceId} gives a 404 for unknown devices +GET /devices +GET /events with negative 'limit' +GET /events with non-numeric 'limit' +GET /events with non-numeric 'timeout' +GET /login yields a set of flows +GET /media/r0/download can fetch the value again +GET /profile/:user_id/displayname publicly accessible +GET /publicRooms lists newly-created room GET /register yields a set of flows +GET /rooms/:room_id/state fetches entire room state +GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership +Getting push rules doesn't corrupt the cache SYN-390 +POST /createRoom makes a private room +POST /createRoom makes a private room with invites +POST /join/:room_id can join a room +POST /login as non-existing user is rejected +POST /login can log in as a user +POST /login can log in as a user with just the local part of the id +POST /login returns the same device_id as that in the request +POST /login wrong password is rejected +POST /media/r0/upload can create an upload POST /register can create a user POST /register downcases capitals in usernames POST /register rejects registration of usernames with '!' POST /register rejects registration of usernames with '"' +POST /register rejects registration of usernames with ''' POST /register rejects registration of usernames with ':' POST /register rejects registration of usernames with '?' -POST /register rejects registration of usernames with '\' POST /register rejects registration of usernames with '@' POST /register rejects registration of usernames with '[' +POST /register rejects registration of usernames with '\' +POST /register rejects registration of usernames with '\n' POST /register rejects registration of usernames with ']' POST /register rejects registration of usernames with '{' POST /register rejects registration of usernames with '|' POST /register rejects registration of usernames with '}' POST /register rejects registration of usernames with '£' POST /register rejects registration of usernames with 'é' -POST /register rejects registration of usernames with '\n' -POST /register rejects registration of usernames with ''' -GET /login yields a set of flows -POST /login can log in as a user -POST /login returns the same device_id as that in the request -POST /login can log in as a user with just the local part of the id -POST /login as non-existing user is rejected -POST /login wrong password is rejected -POST /createRoom makes a private room -POST /createRoom makes a private room with invites -GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -POST /join/:room_id can join a room +POST /rooms/:room_id/ban can ban a user +POST /rooms/:room_id/invite can send an invite POST /rooms/:room_id/join can join a room POST /rooms/:room_id/leave can leave a room -POST /rooms/:room_id/invite can send an invite +POSTed media can be thumbnailed +PUT /device/{deviceId} gives a 404 for unknown devices +PUT /device/{deviceId} updates device fields +PUT /directory/room/:room_alias creates alias +PUT /profile/:user_id/displayname sets my name PUT /rooms/:room_id/state/m.room.power_levels can set levels PUT power_levels should not explode if the old power levels were empty -Both GET and PUT work -Version responds 200 OK with valid structure -PUT /profile/:user_id/displayname sets my name -GET /profile/:user_id/displayname publicly accessible -GET /device/{deviceId} gives a 404 for unknown devices -PUT /device/{deviceId} gives a 404 for unknown devices -After deactivating account, can't log in with an email -Can create filter Should reject keys claiming to belong to a different user -Can add account data -Checking local federation server -Can read configuration endpoint -AS cannot create users outside its own namespace -Changing the actions of an unknown default rule fails with 404 -Changing the actions of an unknown rule fails with 404 Trying to get push rules with unknown rule_id fails with 404 -GET /events with non-numeric 'limit' -GET /events with negative 'limit' -GET /events with non-numeric 'timeout' -Getting push rules doesn't corrupt the cache SYN-390 -GET /publicRooms lists newly-created room -PUT /directory/room/:room_alias creates alias -3pid invite join with wrong but valid signature are rejected -3pid invite join valid signature but revoked keys are rejected -3pid invite join valid signature but unreachable ID server are rejected -query for user with no keys returns empty key dict -Can upload without a file name -Can upload with ASCII file name User appears in user directory User directory correctly update on display name change -User in shared private room does appear in user directory User in dir while user still shares private rooms -POST /rooms/:room_id/ban can ban a user -Alternative server names do not cause a routing loop -Can logout all devices +User in shared private room does appear in user directory +Version responds 200 OK with valid structure +query for user with no keys returns empty key dict From 7c38e538395aff4eea227f619a712bd0a6327532 Mon Sep 17 00:00:00 2001 From: Devin R Date: Sun, 21 Jun 2020 15:58:42 -0400 Subject: [PATCH 0164/1727] Move to depend on ruma monorepo and ruma-events0.22 --- Cargo.lock | 2487 ---------------------------------- Cargo.toml | 12 +- src/client_server.rs | 257 ++-- src/database/account_data.rs | 2 +- src/database/globals.rs | 21 +- src/database/rooms.rs | 6 +- src/database/rooms/edus.rs | 4 +- src/database/users.rs | 2 +- src/error.rs | 5 + src/pdu.rs | 19 +- 10 files changed, 176 insertions(+), 2639 deletions(-) delete mode 100644 Cargo.lock diff --git a/Cargo.lock b/Cargo.lock deleted file mode 100644 index 07f6826..0000000 --- a/Cargo.lock +++ /dev/null @@ -1,2487 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "addr2line" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602d785912f476e480434627e8732e6766b760c045bbf897d9dfaa9f4fbd399c" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler32" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d" - -[[package]] -name = "aead" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf01b9b56e767bb57b94ebf91a58b338002963785cdd7013e21c0d4679471e4" -dependencies = [ - "generic-array", -] - -[[package]] -name = "aes" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54eb1d8fe354e5fc611daf4f2ea97dd45a765f4f1e4512306ec183ae2e8f20c9" -dependencies = [ - "aes-soft", - "aesni", - "block-cipher-trait", -] - -[[package]] -name = "aes-gcm" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "834a6bda386024dbb7c8fc51322856c10ffe69559f972261c868485f5759c638" -dependencies = [ - "aead", - "aes", - "block-cipher-trait", - "ghash", - "subtle 2.2.3", - "zeroize", -] - -[[package]] -name = "aes-soft" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d" -dependencies = [ - "block-cipher-trait", - "byteorder", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f70a6b5f971e473091ab7cfb5ffac6cde81666c4556751d8d5620ead8abf100" -dependencies = [ - "block-cipher-trait", - "opaque-debug", -] - -[[package]] -name = "arc-swap" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" - -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" - -[[package]] -name = "async-trait" -version = "0.1.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "atomic" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "autocfg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" - -[[package]] -name = "backtrace" -version = "0.3.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05100821de9e028f12ae3d189176b41ee198341eb8f369956407fea2f5cc666c" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base-x" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" - -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -dependencies = [ - "byteorder", -] - -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - -[[package]] -name = "binascii" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" - -[[package]] -name = "blake2b_simd" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" -dependencies = [ - "arrayref", - "arrayvec", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array", -] - -[[package]] -name = "block-cipher-trait" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c924d49bd09e7c06003acda26cd9742e796e34282ec6c1189404dee0c1f4774" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - -[[package]] -name = "bumpalo" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "bytemuck" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37fa13df2292ecb479ec23aa06f4507928bef07839be9ef15281411076629431" - -[[package]] -name = "byteorder" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" - -[[package]] -name = "bytes" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "118cf036fbb97d0816e3c34b2d7a1e8cfc60f68fcf63d550ddbe9bd5f59c213b" -dependencies = [ - "loom", -] - -[[package]] -name = "cc" -version = "1.0.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1be3409f94d7bdceeb5f5fac551039d9b3f00e25da7a74fc4d33400a0d96368" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags", -] - -[[package]] -name = "color_quant" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" - -[[package]] -name = "conduit" -version = "0.1.0" -dependencies = [ - "base64 0.12.3", - "directories", - "http", - "image", - "js_int", - "log", - "rand", - "reqwest", - "rocket", - "ruma", - "rust-argon2 0.8.2", - "serde", - "serde_json", - "sled", - "thiserror", - "tokio", -] - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "cookie" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca761767cf3fa9068cc893ec8c247a22d0fd0535848e65640c0548bd1f8bbb36" -dependencies = [ - "aes-gcm", - "base64 0.12.3", - "hkdf", - "percent-encoding 2.1.0", - "rand", - "sha2", - "time 0.2.16", -] - -[[package]] -name = "core-foundation" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" - -[[package]] -name = "crc32fast" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "lazy_static", - "maybe-uninit", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if", - "lazy_static", -] - -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array", - "subtle 1.0.0", -] - -[[package]] -name = "deflate" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e5d2a2273fed52a7f947ee55b092c4057025d7a3e04e5ecdbd25d6c3fb1bd7" -dependencies = [ - "adler32", - "byteorder", -] - -[[package]] -name = "devise" -version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" -dependencies = [ - "devise_codegen", - "devise_core", -] - -[[package]] -name = "devise_codegen" -version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" -dependencies = [ - "devise_core", - "quote 1.0.7", -] - -[[package]] -name = "devise_core" -version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" -dependencies = [ - "bitflags", - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array", -] - -[[package]] -name = "directories" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" -dependencies = [ - "cfg-if", - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" -dependencies = [ - "libc", - "redox_users", - "winapi 0.3.9", -] - -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - -[[package]] -name = "dtoa" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" - -[[package]] -name = "encoding_rs" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "fs2" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - -[[package]] -name = "futures" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" - -[[package]] -name = "futures-executor" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" - -[[package]] -name = "futures-macro" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" -dependencies = [ - "proc-macro-hack", - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "futures-sink" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" - -[[package]] -name = "futures-task" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" -dependencies = [ - "once_cell", -] - -[[package]] -name = "futures-util" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project", - "pin-utils", - "proc-macro-hack", - "proc-macro-nested", - "slab", -] - -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - -[[package]] -name = "generator" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add72f17bb81521258fcc8a7a3245b1e184e916bfbe34f0ea89558f440df5c68" -dependencies = [ - "cc", - "libc", - "log", - "rustc_version", - "winapi 0.3.9", -] - -[[package]] -name = "generic-array" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -dependencies = [ - "typenum", -] - -[[package]] -name = "getrandom" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "ghash" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0930ed19a7184089ea46d2fedead2f6dc2b674c5db4276b7da336c7cd83252" -dependencies = [ - "polyval", -] - -[[package]] -name = "gif" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "471d90201b3b223f3451cd4ad53e34295f16a1df17b1edf3736d47761c3981af" -dependencies = [ - "color_quant", - "lzw", -] - -[[package]] -name = "gimli" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" - -[[package]] -name = "glob" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" - -[[package]] -name = "h2" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "log", - "slab", - "tokio", - "tokio-util", -] - -[[package]] -name = "heck" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "hermit-abi" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9586eedd4ce6b3c498bc3b4dd92fc9f11166aa908a914071953768066c67909" -dependencies = [ - "libc", -] - -[[package]] -name = "hkdf" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fa08a006102488bd9cd5b8013aabe84955cf5ae22e304c2caf655b633aefae3" -dependencies = [ - "digest", - "hmac", -] - -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac", - "digest", -] - -[[package]] -name = "http" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -dependencies = [ - "bytes", - "http", -] - -[[package]] -name = "httparse" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" - -[[package]] -name = "hyper" -version = "0.13.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "itoa", - "log", - "pin-project", - "socket2", - "time 0.1.43", - "tokio", - "tower-service", - "want", -] - -[[package]] -name = "hyper-tls" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3adcd308402b9553630734e9c36b77a7e48b3821251ca2493e8cd596763aafaa" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-tls", -] - -[[package]] -name = "idna" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "image" -version = "0.23.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b0553fec6407d63fe2975b794dfb099f3f790bdc958823851af37b26404ab4" -dependencies = [ - "bytemuck", - "byteorder", - "gif", - "jpeg-decoder", - "num-iter", - "num-rational", - "num-traits", - "png", -] - -[[package]] -name = "indexmap" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" -dependencies = [ - "autocfg", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", -] - -[[package]] -name = "itoa" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" - -[[package]] -name = "jpeg-decoder" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b47b4c4e017b01abdc5bcc126d2d1002e5a75bbe3ce73f9f4f311a916363704" -dependencies = [ - "byteorder", -] - -[[package]] -name = "js-sys" -version = "0.3.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce10c23ad2ea25ceca0093bd3192229da4c5b3c0f2de499c1ecac0d98d452177" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "js_int" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b2b63d60564122f2a7d6592c2f1d6c1c60e7a266b4d24715950a1ddad784f66" -dependencies = [ - "serde", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.71" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" - -[[package]] -name = "lock_api" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "loom" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ecc775857611e1df29abba5c41355cdf540e7e9d4acfdf0f355eefee82330b7" -dependencies = [ - "cfg-if", - "generator", - "scoped-tls", -] - -[[package]] -name = "lzw" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d947cbb889ed21c2a84be6ffbaebf5b4e0f4340638cba0444907e38b56be084" - -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - -[[package]] -name = "memchr" -version = "2.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" - -[[package]] -name = "memoffset" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" -dependencies = [ - "autocfg", -] - -[[package]] -name = "mime" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" - -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - -[[package]] -name = "miniz_oxide" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" -dependencies = [ - "adler32", -] - -[[package]] -name = "mio" -version = "0.6.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" -dependencies = [ - "cfg-if", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow", - "net2", - "slab", - "winapi 0.2.8", -] - -[[package]] -name = "mio-uds" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = [ - "iovec", - "libc", - "mio", -] - -[[package]] -name = "miow" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", -] - -[[package]] -name = "native-tls" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "net2" -version = "0.2.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" -dependencies = [ - "cfg-if", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "num-integer" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e6b7c748f995c4c29c5f5ae0248536e04a5739927c74ec0fa564805094b9f" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b4d7360f362cfb50dde8143501e6940b22f644be75a4cc90b2d81968908138" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "object" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" - -[[package]] -name = "once_cell" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "openssl" -version = "0.10.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" -dependencies = [ - "bitflags", - "cfg-if", - "foreign-types", - "lazy_static", - "libc", - "openssl-sys", -] - -[[package]] -name = "openssl-probe" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" - -[[package]] -name = "openssl-sys" -version = "0.9.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" -dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "parking_lot" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" -dependencies = [ - "cfg-if", - "cloudabi", - "libc", - "redox_syscall", - "smallvec", - "winapi 0.3.9", -] - -[[package]] -name = "pear" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5320f212db967792b67cfe12bd469d08afd6318a249bd917d5c19bc92200ab8a" -dependencies = [ - "pear_codegen", -] - -[[package]] -name = "pear_codegen" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfc1c836fdc3d1ef87c348b237b5b5c4dff922156fb2d968f57734f9669768ca" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", - "version_check", - "yansi", -] - -[[package]] -name = "percent-encoding" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" - -[[package]] -name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" - -[[package]] -name = "pin-project" -version = "0.4.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "pin-project-lite" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" - -[[package]] -name = "png" -version = "0.16.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ccdd66f6fe4b2433b07e4728e9a013e43233120427046e93ceb709c3a439bf" -dependencies = [ - "bitflags", - "crc32fast", - "deflate", - "miniz_oxide", -] - -[[package]] -name = "polyval" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ec3341498978de3bfd12d1b22f1af1de22818f5473a11e8a6ef997989e3a212" -dependencies = [ - "cfg-if", - "universal-hash", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" - -[[package]] -name = "proc-macro-hack" -version = "0.5.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" - -[[package]] -name = "proc-macro-nested" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" - -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid 0.1.0", -] - -[[package]] -name = "proc-macro2" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" -dependencies = [ - "unicode-xid 0.2.1", -] - -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = [ - "proc-macro2 0.4.30", -] - -[[package]] -name = "quote" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" -dependencies = [ - "proc-macro2 1.0.18", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom", - "libc", - "rand_chacha", - "rand_core", - "rand_hc", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core", -] - -[[package]] -name = "redox_syscall" -version = "0.1.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" - -[[package]] -name = "redox_users" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" -dependencies = [ - "getrandom", - "redox_syscall", - "rust-argon2 0.7.0", -] - -[[package]] -name = "ref-cast" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745c1787167ddae5569661d5ffb8b25ae5fedbf46717eaa92d652221cec72623" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "reqwest" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" -dependencies = [ - "base64 0.12.3", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "http", - "http-body", - "hyper", - "hyper-tls", - "js-sys", - "lazy_static", - "log", - "mime", - "mime_guess", - "native-tls", - "percent-encoding 2.1.0", - "pin-project-lite", - "serde", - "serde_urlencoded", - "tokio", - "tokio-tls", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - -[[package]] -name = "ring" -version = "0.16.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi 0.3.9", -] - -[[package]] -name = "rocket" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" -dependencies = [ - "async-trait", - "atomic", - "atty", - "binascii", - "futures", - "log", - "memchr", - "num_cpus", - "pear", - "ref-cast", - "rocket_codegen", - "rocket_http", - "state", - "time 0.2.16", - "tokio", - "toml", - "version_check", - "yansi", -] - -[[package]] -name = "rocket_codegen" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" -dependencies = [ - "devise", - "glob", - "indexmap", - "quote 1.0.7", - "rocket_http", - "version_check", - "yansi", -] - -[[package]] -name = "rocket_http" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" -dependencies = [ - "cookie", - "http", - "hyper", - "indexmap", - "log", - "mime", - "pear", - "percent-encoding 1.0.1", - "ref-cast", - "smallvec", - "state", - "time 0.2.16", - "tokio", - "tokio-rustls", - "unicode-xid 0.2.1", -] - -[[package]] -name = "ruma" -version = "0.1.0" -source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" -dependencies = [ - "ruma-api", - "ruma-client-api", - "ruma-common", - "ruma-events", - "ruma-federation-api", - "ruma-identifiers", - "ruma-signatures", -] - -[[package]] -name = "ruma-api" -version = "0.16.1" -source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" -dependencies = [ - "http", - "percent-encoding 2.1.0", - "ruma-api-macros", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", - "strum", -] - -[[package]] -name = "ruma-api-macros" -version = "0.16.1" -source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "ruma-client-api" -version = "0.9.0" -source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" -dependencies = [ - "http", - "js_int", - "ruma-api", - "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", - "strum", -] - -[[package]] -name = "ruma-common" -version = "0.1.3" -source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" -dependencies = [ - "matches", - "ruma-serde", - "serde", - "serde_json", - "strum", -] - -[[package]] -name = "ruma-events" -version = "0.21.3" -source = "git+https://github.com/ruma/ruma-events?rev=c1ee72d#c1ee72db0f3107a97f6a4273a0ea3fed5c4c30e2" -dependencies = [ - "js_int", - "ruma-common", - "ruma-events-macros", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", - "strum", -] - -[[package]] -name = "ruma-events-macros" -version = "0.21.3" -source = "git+https://github.com/ruma/ruma-events?rev=c1ee72d#c1ee72db0f3107a97f6a4273a0ea3fed5c4c30e2" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "ruma-federation-api" -version = "0.0.2" -source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" -dependencies = [ - "js_int", - "matches", - "ruma-api", - "ruma-events", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-identifiers" -version = "0.16.2" -source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" -dependencies = [ - "rand", - "serde", - "strum", -] - -[[package]] -name = "ruma-serde" -version = "0.2.2" -source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" -dependencies = [ - "dtoa", - "itoa", - "js_int", - "serde", - "serde_json", - "url", -] - -[[package]] -name = "ruma-signatures" -version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" -dependencies = [ - "base64 0.12.3", - "ring", - "serde_json", - "untrusted", -] - -[[package]] -name = "rust-argon2" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" -dependencies = [ - "base64 0.11.0", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils", -] - -[[package]] -name = "rust-argon2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" -dependencies = [ - "base64 0.12.3", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - -[[package]] -name = "rustls" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" -dependencies = [ - "base64 0.10.1", - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "ryu" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" - -[[package]] -name = "schannel" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = [ - "lazy_static", - "winapi 0.3.9", -] - -[[package]] -name = "scoped-tls" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "sct" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "security-framework" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - -[[package]] -name = "serde" -version = "1.0.114" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.114" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "serde_json" -version = "1.0.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2c5d7e739bc07a3e73381a39d61fdb5f671c60c1df26a130690665803d8226" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" -dependencies = [ - "dtoa", - "itoa", - "serde", - "url", -] - -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer", - "digest", - "fake-simd", - "opaque-debug", -] - -[[package]] -name = "signal-hook-registry" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" -dependencies = [ - "arc-swap", - "libc", -] - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" - -[[package]] -name = "sled" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdad3dc85d888056d3bd9954ffdf22d8a22701b6cd3aca4f6df4c436111898c4" -dependencies = [ - "backtrace", - "crc32fast", - "crossbeam-epoch", - "crossbeam-utils", - "fs2", - "fxhash", - "libc", - "log", - "parking_lot", -] - -[[package]] -name = "smallvec" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4" - -[[package]] -name = "socket2" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "winapi 0.3.9", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "standback" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" -dependencies = [ - "version_check", -] - -[[package]] -name = "state" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" - -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "serde", - "serde_derive", - "syn 1.0.33", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2 1.0.18", - "quote 1.0.7", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn 1.0.33", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - -[[package]] -name = "strum" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bd81eb48f4c437cadc685403cad539345bf703d78e63707418431cecd4522b" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" -dependencies = [ - "heck", - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - -[[package]] -name = "subtle" -version = "2.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" - -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid 0.1.0", -] - -[[package]] -name = "syn" -version = "1.0.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "unicode-xid 0.2.1", -] - -[[package]] -name = "tempfile" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" -dependencies = [ - "cfg-if", - "libc", - "rand", - "redox_syscall", - "remove_dir_all", - "winapi 0.3.9", -] - -[[package]] -name = "thiserror" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "time" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a51cadc5b1eec673a685ff7c33192ff7b7603d0b75446fb354939ee615acb15" -dependencies = [ - "cfg-if", - "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi 0.3.9", -] - -[[package]] -name = "time-macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ae9b6e9f095bc105e183e3cd493d72579be3181ad4004fceb01adbe9eecab2d" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros-impl" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" -dependencies = [ - "proc-macro-hack", - "proc-macro2 1.0.18", - "quote 1.0.7", - "standback", - "syn 1.0.33", -] - -[[package]] -name = "tinyvec" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" - -[[package]] -name = "tokio" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "libc", - "memchr", - "mio", - "mio-uds", - "num_cpus", - "pin-project-lite", - "signal-hook-registry", - "slab", - "tokio-macros", - "winapi 0.3.9", -] - -[[package]] -name = "tokio-macros" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - -[[package]] -name = "tokio-rustls" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3068d891551949b37681724d6b73666787cc63fa8e255c812a41d2513aff9775" -dependencies = [ - "futures-core", - "rustls", - "tokio", - "webpki", -] - -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "toml" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" -dependencies = [ - "serde", -] - -[[package]] -name = "tower-service" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" - -[[package]] -name = "try-lock" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" - -[[package]] -name = "typenum" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -dependencies = [ - "matches", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" - -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" - -[[package]] -name = "unicode-xid" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" - -[[package]] -name = "universal-hash" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df0c900f2f9b4116803415878ff48b63da9edb268668e08cf9292d7503114a01" -dependencies = [ - "generic-array", - "subtle 2.2.3", -] - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "url" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" -dependencies = [ - "idna", - "matches", - "percent-encoding 2.1.0", -] - -[[package]] -name = "vcpkg" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" - -[[package]] -name = "version_check" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = [ - "log", - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasm-bindgen" -version = "0.2.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2dc4aa152834bc334f506c1a06b866416a8b6697d5c9f75b9a689c8486def0" -dependencies = [ - "cfg-if", - "serde", - "serde_json", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded84f06e0ed21499f6184df0e0cb3494727b0c5da89534e0fcc55c51d812101" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64487204d863f109eb77e8462189d111f27cb5712cc9fdb3461297a76963a2f6" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "838e423688dac18d73e31edce74ddfac468e37b1506ad163ffaf0a46f703ffe3" -dependencies = [ - "quote 1.0.7", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9ba19973a58daf4db6f352eda73dc0e289493cd29fb2632eb172085b6521acd" - -[[package]] -name = "web-sys" -version = "0.3.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b72fe77fd39e4bd3eaa4412fd299a0be6b3dfe9d2597e2f1c20beb968f41d17" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "winreg" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - -[[package]] -name = "yansi" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" - -[[package]] -name = "zeroize" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" diff --git a/Cargo.toml b/Cargo.toml index 30af056..eaad710 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,16 +29,16 @@ thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } [dependencies.ruma] -git = "https://github.com/timokoesters/ruma" -#rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +git = "https://github.com/DevinR528/ruma" +branch = "matrix-sdk2" #path = "../ruma/ruma" features = ["rand", "client-api", "federation-api"] # These are required only until ruma-events and ruma-federation-api are merged into ruma/ruma -[patch.crates-io] -ruma-common = { git = "https://github.com/timokoesters/ruma" } -ruma-serde = { git = "https://github.com/timokoesters/ruma" } -ruma-identifiers = { git = "https://github.com/timokoesters/ruma" } +# [patch.crates-io] +# ruma-common = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } +# ruma-serde = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } +# ruma-identifiers = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } #ruma-common = { path = "../ruma/ruma-common" } #ruma-serde = { path = "../ruma/ruma-serde" } #ruma-identifiers = { path = "../ruma/ruma-identifiers" } diff --git a/src/client_server.rs b/src/client_server.rs index f1a9dbe..5a20eb3 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -60,12 +60,11 @@ use ruma::{ unversioned::get_supported_versions, }, events::{ - collections::only::Event as EduEvent, room::{ canonical_alias, guest_access, history_visibility, join_rules, member, name, redaction, topic, }, - EventJson, EventType, + AnyBasicEvent, AnyEphemeralRoomEvent, AnyEvent as EduEvent, EventJson, EventType, }, identifiers::{RoomAliasId, RoomId, RoomVersionId, UserId}, }; @@ -77,7 +76,7 @@ const TOKEN_LENGTH: usize = 256; const MXC_LENGTH: usize = 256; const SESSION_ID_LENGTH: usize = 256; -#[get("/_matrix/client/versions")] +// #[get("/_matrix/client/versions")] pub fn get_supported_versions_route() -> ConduitResult { let mut unstable_features = BTreeMap::new(); @@ -90,7 +89,7 @@ pub fn get_supported_versions_route() -> ConduitResult, body: Ruma, @@ -120,7 +119,7 @@ pub fn get_register_available_route( Ok(get_username_availability::Response { available: true }.into()) } -#[post("/_matrix/client/r0/register", data = "")] +// #[post("/_matrix/client/r0/register", data = "")] pub fn register_route( db: State<'_, Database>, body: Ruma, @@ -226,7 +225,7 @@ pub fn register_route( .into()) } -#[get("/_matrix/client/r0/login")] +// #[get("/_matrix/client/r0/login")] pub fn get_login_route() -> ConduitResult { Ok(get_login_types::Response { flows: vec![get_login_types::LoginType::Password], @@ -234,7 +233,7 @@ pub fn get_login_route() -> ConduitResult { .into()) } -#[post("/_matrix/client/r0/login", data = "")] +// #[post("/_matrix/client/r0/login", data = "")] pub fn login_route( db: State<'_, Database>, body: Ruma, @@ -285,14 +284,14 @@ pub fn login_route( Ok(login::Response { user_id, access_token: token, - home_server: Some(db.globals.server_name().to_owned()), + home_server: Some(db.globals.server_name().to_string()), device_id, well_known: None, } .into()) } -#[post("/_matrix/client/r0/logout", data = "")] +// #[post("/_matrix/client/r0/logout", data = "")] pub fn logout_route( db: State<'_, Database>, body: Ruma, @@ -473,14 +472,14 @@ pub fn get_capabilities_route() -> ConduitResult { .into()) } -#[get("/_matrix/client/r0/pushrules", data = "")] +// #[get("/_matrix/client/r0/pushrules", data = "")] pub fn get_pushrules_all_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if let EduEvent::PushRules(pushrules) = db + if let EduEvent::Basic(AnyBasicEvent::PushRules(pushrules)) = db .account_data .get(None, &user_id, &EventType::PushRules)? .ok_or(Error::BadRequest( @@ -515,7 +514,7 @@ pub fn set_pushrule_route( Ok(set_pushrule::Response.into()) } -#[put("/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>/enabled")] +// #[put("/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>/enabled")] pub fn set_pushrule_enabled_route( _scope: String, _kind: String, @@ -526,7 +525,7 @@ pub fn set_pushrule_enabled_route( Ok(set_pushrule_enabled::Response.into()) } -#[get("/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>")] +// #[get("/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>")] pub fn get_filter_route( _user_id: String, _filter_id: String, @@ -544,7 +543,7 @@ pub fn get_filter_route( .into()) } -#[post("/_matrix/client/r0/user/<_user_id>/filter")] +// #[post("/_matrix/client/r0/user/<_user_id>/filter")] pub fn create_filter_route(_user_id: String) -> ConduitResult { // TODO Ok(create_filter::Response { @@ -553,10 +552,10 @@ pub fn create_filter_route(_user_id: String) -> ConduitResult/account_data/<_type>", - data = "" -)] +// #[put( +// "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", +// data = "" +// )] pub fn set_global_account_data_route( db: State<'_, Database>, body: Ruma, @@ -582,10 +581,10 @@ pub fn set_global_account_data_route( Ok(set_global_account_data::Response.into()) } -#[get( - "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", - data = "" -)] +// #[get( +// "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", +// data = "" +// )] pub fn get_global_account_data_route( db: State<'_, Database>, body: Ruma, @@ -603,10 +602,19 @@ pub fn get_global_account_data_route( )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - Ok(get_global_account_data::Response { account_data: data }.into()) + // TODO clearly this is not ideal... + // NOTE: EventJson is no longer needed as all the enums and event structs impl ser/de + let data: Result = data.deserialize().map_err(Into::into); + match data? { + EduEvent::Basic(data) => Ok(get_global_account_data::Response { + account_data: EventJson::from(data), + } + .into()), + _ => panic!("timo what do i do here"), + } } -#[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] +// #[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] pub fn set_displayname_route( db: State<'_, Database>, body: Ruma, @@ -672,7 +680,7 @@ pub fn set_displayname_route( Ok(set_display_name::Response.into()) } -#[get("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] +// #[get("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] pub fn get_displayname_route( db: State<'_, Database>, body: Ruma, @@ -685,7 +693,7 @@ pub fn get_displayname_route( .into()) } -#[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] +// #[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] pub fn set_avatar_url_route( db: State<'_, Database>, body: Ruma, @@ -762,7 +770,7 @@ pub fn set_avatar_url_route( Ok(set_avatar_url::Response.into()) } -#[get("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] +// #[get("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] pub fn get_avatar_url_route( db: State<'_, Database>, body: Ruma, @@ -775,7 +783,7 @@ pub fn get_avatar_url_route( .into()) } -#[get("/_matrix/client/r0/profile/<_user_id>", data = "")] +// #[get("/_matrix/client/r0/profile/<_user_id>", data = "")] pub fn get_profile_route( db: State<'_, Database>, body: Ruma, @@ -800,7 +808,7 @@ pub fn get_profile_route( .into()) } -#[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] +// #[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] pub fn set_presence_route( db: State<'_, Database>, body: Ruma, @@ -830,7 +838,7 @@ pub fn set_presence_route( Ok(set_presence::Response.into()) } -#[post("/_matrix/client/r0/keys/upload", data = "")] +// #[post("/_matrix/client/r0/keys/upload", data = "")] pub fn upload_keys_route( db: State<'_, Database>, body: Ruma, @@ -859,7 +867,7 @@ pub fn upload_keys_route( .into()) } -#[post("/_matrix/client/r0/keys/query", data = "")] +// #[post("/_matrix/client/r0/keys/query", data = "")] pub fn get_keys_route( db: State<'_, Database>, body: Ruma, @@ -936,7 +944,7 @@ pub fn get_keys_route( .into()) } -#[post("/_matrix/client/r0/keys/claim", data = "")] +// #[post("/_matrix/client/r0/keys/claim", data = "")] pub fn claim_keys_route( db: State<'_, Database>, body: Ruma, @@ -1099,7 +1107,7 @@ pub fn set_read_marker_route( content: ruma::events::fully_read::FullyReadEventContent { event_id: body.fully_read.clone(), }, - room_id: Some(body.room_id.clone()), + room_id: body.room_id.clone(), }) .expect("we just created a valid event") .as_object_mut() @@ -1135,20 +1143,22 @@ pub fn set_read_marker_route( db.rooms.edus.roomlatest_update( &user_id, &body.room_id, - EduEvent::Receipt(ruma::events::receipt::ReceiptEvent { - content: receipt_content, - room_id: None, // None because it can be inferred - }), + EduEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )), &db.globals, )?; } Ok(set_read_marker::Response.into()) } -#[put( - "/_matrix/client/r0/rooms/<_room_id>/typing/<_user_id>", - data = "" -)] +// #[put( +// "/_matrix/client/r0/rooms/<_room_id>/typing/<_user_id>", +// data = "" +// )] pub fn create_typing_event_route( db: State<'_, Database>, body: Ruma, @@ -1174,15 +1184,14 @@ pub fn create_typing_event_route( Ok(create_typing_event::Response.into()) } -#[post("/_matrix/client/r0/createRoom", data = "")] +// #[post("/_matrix/client/r0/createRoom", data = "")] pub fn create_room_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let room_id = RoomId::new(db.globals.server_name()) - .map_err(|_| Error::bad_database("Server name is invalid."))?; + let room_id = RoomId::new(db.globals.server_name()); let alias = body .room_alias_name @@ -1439,10 +1448,10 @@ pub fn create_room_route( Ok(create_room::Response { room_id }.into()) } -#[put( - "/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", - data = "" -)] +// #[put( +// "/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", +// data = "" +// )] pub fn redact_event_route( db: State<'_, Database>, body: Ruma, @@ -1469,7 +1478,7 @@ pub fn redact_event_route( Ok(redact_event::Response { event_id }.into()) } -#[put("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +// #[put("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] pub fn create_alias_route( db: State<'_, Database>, body: Ruma, @@ -1485,7 +1494,7 @@ pub fn create_alias_route( Ok(create_alias::Response.into()) } -#[delete("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +// #[delete("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] pub fn delete_alias_route( db: State<'_, Database>, body: Ruma, @@ -1496,7 +1505,7 @@ pub fn delete_alias_route( Ok(delete_alias::Response.into()) } -#[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +// #[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] pub fn get_alias_route( db: State<'_, Database>, body: Ruma, @@ -1516,12 +1525,12 @@ pub fn get_alias_route( Ok(get_alias::Response { room_id, - servers: vec![db.globals.server_name().to_owned()], + servers: vec![db.globals.server_name().to_string()], } .into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] +// #[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] pub fn join_room_by_id_route( db: State<'_, Database>, body: Ruma, @@ -1556,7 +1565,7 @@ pub fn join_room_by_id_route( .into()) } -#[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] +// #[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] pub fn join_room_by_id_or_alias_route( db: State<'_, Database>, body: Ruma, @@ -1585,7 +1594,7 @@ pub fn join_room_by_id_or_alias_route( .into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "")] +// #[post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "")] pub fn leave_room_route( db: State<'_, Database>, body: Ruma, @@ -1623,7 +1632,7 @@ pub fn leave_room_route( Ok(leave_room::Response.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/kick", data = "")] +// #[post("/_matrix/client/r0/rooms/<_room_id>/kick", data = "")] pub fn kick_user_route( db: State<'_, Database>, body: Ruma, @@ -1663,7 +1672,7 @@ pub fn kick_user_route( Ok(kick_user::Response.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "")] +// #[post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "")] pub fn ban_user_route( db: State<'_, Database>, body: Ruma, @@ -1710,7 +1719,7 @@ pub fn ban_user_route( Ok(ban_user::Response.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/unban", data = "")] +// #[post("/_matrix/client/r0/rooms/<_room_id>/unban", data = "")] pub fn unban_user_route( db: State<'_, Database>, body: Ruma, @@ -1749,7 +1758,7 @@ pub fn unban_user_route( Ok(unban_user::Response.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "")] +// #[post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "")] pub fn forget_room_route( db: State<'_, Database>, body: Ruma, @@ -1762,7 +1771,7 @@ pub fn forget_room_route( Ok(forget_room::Response.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] +// #[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] pub fn invite_user_route( db: State<'_, Database>, body: Ruma, @@ -1793,7 +1802,7 @@ pub fn invite_user_route( } } -#[put("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] +// #[put("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] pub async fn set_room_visibility_route( db: State<'_, Database>, body: Ruma, @@ -1807,7 +1816,7 @@ pub async fn set_room_visibility_route( Ok(set_room_visibility::Response.into()) } -#[get("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] +// #[get("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] pub async fn get_room_visibility_route( db: State<'_, Database>, body: Ruma, @@ -1823,7 +1832,7 @@ pub async fn get_room_visibility_route( .into()) } -#[get("/_matrix/client/r0/publicRooms", data = "")] +// #[get("/_matrix/client/r0/publicRooms", data = "")] pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma, @@ -1872,7 +1881,7 @@ pub async fn get_public_rooms_route( .into()) } -#[post("/_matrix/client/r0/publicRooms", data = "")] +// #[post("/_matrix/client/r0/publicRooms", data = "")] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma, @@ -1984,7 +1993,7 @@ pub async fn get_public_rooms_filtered_route( .into()) } -#[post("/_matrix/client/r0/user_directory/search", data = "")] +// #[post("/_matrix/client/r0/user_directory/search", data = "")] pub fn search_users_route( db: State<'_, Database>, body: Ruma, @@ -2050,7 +2059,7 @@ pub fn get_member_events_route( .into()) } -#[get("/_matrix/client/r0/thirdparty/protocols")] +// #[get("/_matrix/client/r0/thirdparty/protocols")] pub fn get_protocols_route() -> ConduitResult { warn!("TODO: get_protocols_route"); Ok(get_protocols::Response { @@ -2124,7 +2133,7 @@ pub fn create_message_event_route( } #[put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + // "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", data = "" )] pub fn create_state_event_for_key_route( @@ -2185,10 +2194,10 @@ pub fn create_state_event_for_key_route( Ok(create_state_event_for_key::Response { event_id }.into()) } -#[put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", - data = "" -)] +// #[put( +// "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", +// data = "" +// )] pub fn create_state_event_for_empty_key_route( db: State<'_, Database>, body: Ruma, @@ -2232,7 +2241,7 @@ pub fn create_state_event_for_empty_key_route( .into()) } -#[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] +// #[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] pub fn get_state_events_route( db: State<'_, Database>, body: Ruma, @@ -2258,10 +2267,10 @@ pub fn get_state_events_route( .into()) } -#[get( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", - data = "" -)] +// #[get( +// "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", +// data = "" +// )] pub fn get_state_events_for_key_route( db: State<'_, Database>, body: Ruma, @@ -2293,10 +2302,10 @@ pub fn get_state_events_for_key_route( .into()) } -#[get( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", - data = "" -)] +// #[get( +// "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", +// data = "" +// )] pub fn get_state_events_for_empty_key_route( db: State<'_, Database>, body: Ruma, @@ -2327,7 +2336,7 @@ pub fn get_state_events_for_empty_key_route( .into()) } -#[get("/_matrix/client/r0/sync", data = "")] +// #[get("/_matrix/client/r0/sync", data = "")] pub fn sync_route( db: State<'_, Database>, body: Ruma, @@ -2522,9 +2531,9 @@ pub fn sync_route( { edus.push( serde_json::from_str( - &serde_json::to_string(&EduEvent::Typing( + &serde_json::to_string(&EduEvent::Ephemeral(AnyEphemeralRoomEvent::Typing( db.rooms.edus.roomactives_all(&room_id)?, - )) + ))) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -2537,7 +2546,13 @@ pub fn sync_route( .account_data .changes_since(Some(&room_id), &user_id, since)? .into_iter() - .map(|(_, v)| v) + .flat_map(|(_, v)| { + if let Some(EduEvent::Basic(account_event)) = v.deserialize().ok() { + Some(EventJson::from(account_event)) + } else { + None + } + }) .collect(), }, summary: sync_events::RoomSummary { @@ -2701,10 +2716,10 @@ pub fn sync_route( .into()) } -#[get( - "/_matrix/client/r0/rooms/<_room_id>/context/<_event_id>", - data = "" -)] +// #[get( +// "/_matrix/client/r0/rooms/<_room_id>/context/<_event_id>", +// data = "" +// )] pub fn get_context_route( db: State<'_, Database>, body: Ruma, @@ -2802,7 +2817,7 @@ pub fn get_context_route( .into()) } -#[get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "")] +// #[get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "")] pub fn get_message_events_route( db: State<'_, Database>, body: Ruma, @@ -2824,15 +2839,16 @@ pub fn get_message_events_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; match body.dir { get_message_events::Direction::Forward => { + let limit = body + .limit + .try_into() + .map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?; + let events_after = db .rooms .pdus_after(&user_id, &body.room_id, from) // Use limit or else 10 - .take(body.limit.map_or(Ok::<_, Error>(10_usize), |l| { - Ok(u32::try_from(l).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") - })? as usize) - })?) + .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .collect::>(); @@ -2859,15 +2875,16 @@ pub fn get_message_events_route( .into()) } get_message_events::Direction::Backward => { + let limit = body + .limit + .try_into() + .map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?; + let events_before = db .rooms .pdus_until(&user_id, &body.room_id, from) // Use limit or else 10 - .take(body.limit.map_or(Ok::<_, Error>(10_usize), |l| { - Ok(u32::try_from(l).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") - })? as usize) - })?) + .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .collect::>(); @@ -2896,7 +2913,7 @@ pub fn get_message_events_route( } } -#[get("/_matrix/client/r0/voip/turnServer")] +// #[get("/_matrix/client/r0/voip/turnServer")] pub fn turn_server_route() -> ConduitResult { Err(Error::BadRequest( ErrorKind::NotFound, @@ -2904,7 +2921,7 @@ pub fn turn_server_route() -> ConduitResult { )) } -#[post("/_matrix/client/r0/publicised_groups")] +// #[post("/_matrix/client/r0/publicised_groups")] pub fn publicised_groups_route() -> ConduitResult { Err(Error::BadRequest( ErrorKind::NotFound, @@ -2912,10 +2929,10 @@ pub fn publicised_groups_route() -> ConduitResult/<_txn_id>", - data = "" -)] +// #[put( +// "/_matrix/client/r0/sendToDevice/<_event_type>/<_txn_id>", +// data = "" +// )] pub fn send_event_to_device_route( db: State<'_, Database>, body: Ruma, @@ -2961,7 +2978,7 @@ pub fn send_event_to_device_route( Ok(send_event_to_device::Response.into()) } -#[get("/_matrix/media/r0/config")] +// #[get("/_matrix/media/r0/config")] pub fn get_media_config_route() -> ConduitResult { Ok(get_media_config::Response { upload_size: (20_u32 * 1024 * 1024).into(), // 20 MB @@ -2969,7 +2986,7 @@ pub fn get_media_config_route() -> ConduitResult { .into()) } -#[post("/_matrix/media/r0/upload", data = "")] +// #[post("/_matrix/media/r0/upload", data = "")] pub fn create_content_route( db: State<'_, Database>, body: Ruma, @@ -2989,10 +3006,10 @@ pub fn create_content_route( Ok(create_content::Response { content_uri: mxc }.into()) } -#[get( - "/_matrix/media/r0/download/<_server_name>/<_media_id>", - data = "" -)] +// #[get( +// "/_matrix/media/r0/download/<_server_name>/<_media_id>", +// data = "" +// )] pub fn get_content_route( db: State<'_, Database>, body: Ruma, @@ -3014,10 +3031,10 @@ pub fn get_content_route( } } -#[get( - "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", - data = "" -)] +// #[get( +// "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", +// data = "" +// )] pub fn get_content_thumbnail_route( db: State<'_, Database>, body: Ruma, @@ -3039,7 +3056,7 @@ pub fn get_content_thumbnail_route( } } -#[get("/_matrix/client/r0/devices", data = "")] +// #[get("/_matrix/client/r0/devices", data = "")] pub fn get_devices_route( db: State<'_, Database>, body: Ruma, @@ -3055,7 +3072,7 @@ pub fn get_devices_route( Ok(get_devices::Response { devices }.into()) } -#[get("/_matrix/client/r0/devices/<_device_id>", data = "")] +// #[get("/_matrix/client/r0/devices/<_device_id>", data = "")] pub fn get_device_route( db: State<'_, Database>, body: Ruma, @@ -3071,7 +3088,7 @@ pub fn get_device_route( Ok(get_device::Response { device }.into()) } -#[put("/_matrix/client/r0/devices/<_device_id>", data = "")] +// #[put("/_matrix/client/r0/devices/<_device_id>", data = "")] pub fn update_device_route( db: State<'_, Database>, body: Ruma, @@ -3092,7 +3109,7 @@ pub fn update_device_route( Ok(update_device::Response.into()) } -#[delete("/_matrix/client/r0/devices/<_device_id>", data = "")] +// #[delete("/_matrix/client/r0/devices/<_device_id>", data = "")] pub fn delete_device_route( db: State<'_, Database>, body: Ruma, @@ -3136,7 +3153,7 @@ pub fn delete_device_route( Ok(delete_device::Response.into()) } -#[post("/_matrix/client/r0/delete_devices", data = "")] +// #[post("/_matrix/client/r0/delete_devices", data = "")] pub fn delete_devices_route( db: State<'_, Database>, body: Ruma, diff --git a/src/database/account_data.rs b/src/database/account_data.rs index befd937..3b64ba7 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,7 +1,7 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::error::ErrorKind, - events::{collections::only::Event as EduEvent, EventJson, EventType}, + events::{AnyEvent as EduEvent, EventJson, EventType}, identifiers::{RoomId, UserId}, }; use std::{collections::HashMap, convert::TryFrom}; diff --git a/src/database/globals.rs b/src/database/globals.rs index a767d8a..00e0982 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,12 +1,14 @@ -use crate::{utils, Error, Result}; +use std::convert::TryFrom; +use crate::{utils, Error, Result}; +use ruma::identifiers::{ServerName, ServerNameRef}; pub const COUNTER: &str = "c"; pub struct Globals { pub(super) globals: sled::Tree, keypair: ruma::signatures::Ed25519KeyPair, reqwest_client: reqwest::Client, - server_name: String, + server_name: ServerName, registration_disabled: bool, } @@ -24,10 +26,13 @@ impl Globals { globals, keypair, reqwest_client: reqwest::Client::new(), - server_name: config - .get_str("server_name") - .unwrap_or("localhost") - .to_owned(), + server_name: ServerName::try_from( + config + .get_str("server_name") + .unwrap_or("localhost") + .to_owned(), + ) + .map_err(|_| Error::bad_database("Invalid server name"))?, registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), }) } @@ -59,8 +64,8 @@ impl Globals { }) } - pub fn server_name(&self) -> &str { - &self.server_name + pub fn server_name(&self) -> ServerNameRef<'_> { + self.server_name.as_ref() } pub fn registration_disabled(&self) -> bool { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b7f7221..d29ab42 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -515,7 +515,7 @@ impl Rooms { event_id: EventId::try_from("$thiswillbefilledinlater").expect("we know this is valid"), room_id: room_id.clone(), sender: sender.clone(), - origin: globals.server_name().to_owned(), + origin: globals.server_name().to_string(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), @@ -529,7 +529,7 @@ impl Rooms { auth_events: Vec::new(), redacts: redacts.clone(), unsigned, - hashes: ruma::api::federation::EventHash { + hashes: ruma::events::pdu::EventHash { sha256: "aaa".to_owned(), }, signatures: HashMap::new(), @@ -547,7 +547,7 @@ impl Rooms { let mut pdu_json = serde_json::to_value(&pdu).expect("event is valid, we just created it"); ruma::signatures::hash_and_sign_event( - globals.server_name(), + globals.server_name().as_str(), globals.keypair(), &mut pdu_json, ) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 645ccb0..de138b6 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,6 +1,6 @@ use crate::{utils, Error, Result}; use ruma::{ - events::{collections::only::Event as EduEvent, EventJson}, + events::{AnyEvent as EduEvent, EventJson}, identifiers::{RoomId, UserId}, }; use std::convert::TryFrom; @@ -235,7 +235,7 @@ impl RoomEdus { Ok(ruma::events::typing::TypingEvent { content: ruma::events::typing::TypingEventContent { user_ids }, - room_id: None, // Can be inferred + room_id: room_id.clone(), // Can be inferred }) } diff --git a/src/database/users.rs b/src/database/users.rs index 2ccf59a..4e2e1a2 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,7 +8,7 @@ use ruma::{ keys::{AlgorithmAndDeviceId, CrossSigningKey, DeviceKeys, KeyAlgorithm, OneTimeKey}, }, }, - events::{to_device::AnyToDeviceEvent, EventJson, EventType}, + events::{AnyToDeviceEvent, EventJson, EventType}, identifiers::UserId, }; use std::{collections::BTreeMap, convert::TryFrom, time::SystemTime}; diff --git a/src/error.rs b/src/error.rs index 7305073..e1af15a 100644 --- a/src/error.rs +++ b/src/error.rs @@ -25,6 +25,11 @@ pub enum Error { #[from] source: image::error::ImageError, }, + #[error("Could not deserialize json")] + SerdeError { + #[from] + source: serde_json::Error, + }, #[error("{0}")] BadConfig(&'static str), #[error("{0}")] diff --git a/src/pdu.rs b/src/pdu.rs index 8a5858e..4504ae6 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,12 +1,9 @@ use crate::{Error, Result}; use js_int::UInt; use ruma::{ - api::federation::EventHash, events::{ - collections::all::{RoomEvent, StateEvent}, - room::member::MemberEvent, - stripped::AnyStrippedStateEvent, - EventJson, EventType, + pdu::EventHash, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEventStub, EventJson, + EventType, }, identifiers::{EventId, RoomId, UserId}, }; @@ -81,19 +78,19 @@ impl PduEvent { Ok(()) } - pub fn to_room_event(&self) -> EventJson { + pub fn to_room_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) + serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } - pub fn to_state_event(&self) -> EventJson { + pub fn to_state_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) + serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } - pub fn to_stripped_state_event(&self) -> EventJson { + pub fn to_stripped_state_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) + serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } pub fn to_member_event(&self) -> EventJson { From 24b670204713b54bee092f39322286f7a3117b92 Mon Sep 17 00:00:00 2001 From: Devin R Date: Mon, 22 Jun 2020 07:26:09 -0400 Subject: [PATCH 0165/1727] Add to_*_event_stub methods to pdu, filter for correct event kind When creating some responses (sync) an AnyRoomEventStub is needed for this PduEvent will deserialize the JSON as a Stub event and a non stub event when needed. Ephemeral and account events are checked to be the correct type and filtered out if not. This requires an extra `deserialize` call which could/should be removed. TODO: Possibly get rid of EventJson in some places. --- src/client_server.rs | 222 ++++++++++++++++++++----------------- src/database/rooms/edus.rs | 2 + src/pdu.rs | 14 ++- 3 files changed, 136 insertions(+), 102 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 5a20eb3..15ced16 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -76,7 +76,7 @@ const TOKEN_LENGTH: usize = 256; const MXC_LENGTH: usize = 256; const SESSION_ID_LENGTH: usize = 256; -// #[get("/_matrix/client/versions")] +#[get("/_matrix/client/versions")] pub fn get_supported_versions_route() -> ConduitResult { let mut unstable_features = BTreeMap::new(); @@ -89,7 +89,7 @@ pub fn get_supported_versions_route() -> ConduitResult, body: Ruma, @@ -119,7 +119,7 @@ pub fn get_register_available_route( Ok(get_username_availability::Response { available: true }.into()) } -// #[post("/_matrix/client/r0/register", data = "")] +#[post("/_matrix/client/r0/register", data = "")] pub fn register_route( db: State<'_, Database>, body: Ruma, @@ -225,7 +225,7 @@ pub fn register_route( .into()) } -// #[get("/_matrix/client/r0/login")] +#[get("/_matrix/client/r0/login")] pub fn get_login_route() -> ConduitResult { Ok(get_login_types::Response { flows: vec![get_login_types::LoginType::Password], @@ -233,7 +233,7 @@ pub fn get_login_route() -> ConduitResult { .into()) } -// #[post("/_matrix/client/r0/login", data = "")] +#[post("/_matrix/client/r0/login", data = "")] pub fn login_route( db: State<'_, Database>, body: Ruma, @@ -291,7 +291,7 @@ pub fn login_route( .into()) } -// #[post("/_matrix/client/r0/logout", data = "")] +#[post("/_matrix/client/r0/logout", data = "")] pub fn logout_route( db: State<'_, Database>, body: Ruma, @@ -472,7 +472,7 @@ pub fn get_capabilities_route() -> ConduitResult { .into()) } -// #[get("/_matrix/client/r0/pushrules", data = "")] +#[get("/_matrix/client/r0/pushrules", data = "")] pub fn get_pushrules_all_route( db: State<'_, Database>, body: Ruma, @@ -514,7 +514,7 @@ pub fn set_pushrule_route( Ok(set_pushrule::Response.into()) } -// #[put("/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>/enabled")] +#[put("/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>/enabled")] pub fn set_pushrule_enabled_route( _scope: String, _kind: String, @@ -525,7 +525,7 @@ pub fn set_pushrule_enabled_route( Ok(set_pushrule_enabled::Response.into()) } -// #[get("/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>")] +#[get("/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>")] pub fn get_filter_route( _user_id: String, _filter_id: String, @@ -543,7 +543,7 @@ pub fn get_filter_route( .into()) } -// #[post("/_matrix/client/r0/user/<_user_id>/filter")] +#[post("/_matrix/client/r0/user/<_user_id>/filter")] pub fn create_filter_route(_user_id: String) -> ConduitResult { // TODO Ok(create_filter::Response { @@ -552,10 +552,10 @@ pub fn create_filter_route(_user_id: String) -> ConduitResult/account_data/<_type>", -// data = "" -// )] +#[put( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" +)] pub fn set_global_account_data_route( db: State<'_, Database>, body: Ruma, @@ -581,10 +581,10 @@ pub fn set_global_account_data_route( Ok(set_global_account_data::Response.into()) } -// #[get( -// "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", -// data = "" -// )] +#[get( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" +)] pub fn get_global_account_data_route( db: State<'_, Database>, body: Ruma, @@ -614,7 +614,7 @@ pub fn get_global_account_data_route( } } -// #[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] +#[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] pub fn set_displayname_route( db: State<'_, Database>, body: Ruma, @@ -680,7 +680,7 @@ pub fn set_displayname_route( Ok(set_display_name::Response.into()) } -// #[get("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] +#[get("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] pub fn get_displayname_route( db: State<'_, Database>, body: Ruma, @@ -693,7 +693,7 @@ pub fn get_displayname_route( .into()) } -// #[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] +#[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] pub fn set_avatar_url_route( db: State<'_, Database>, body: Ruma, @@ -770,7 +770,7 @@ pub fn set_avatar_url_route( Ok(set_avatar_url::Response.into()) } -// #[get("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] +#[get("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] pub fn get_avatar_url_route( db: State<'_, Database>, body: Ruma, @@ -783,7 +783,7 @@ pub fn get_avatar_url_route( .into()) } -// #[get("/_matrix/client/r0/profile/<_user_id>", data = "")] +#[get("/_matrix/client/r0/profile/<_user_id>", data = "")] pub fn get_profile_route( db: State<'_, Database>, body: Ruma, @@ -808,7 +808,7 @@ pub fn get_profile_route( .into()) } -// #[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] +#[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] pub fn set_presence_route( db: State<'_, Database>, body: Ruma, @@ -838,7 +838,7 @@ pub fn set_presence_route( Ok(set_presence::Response.into()) } -// #[post("/_matrix/client/r0/keys/upload", data = "")] +#[post("/_matrix/client/r0/keys/upload", data = "")] pub fn upload_keys_route( db: State<'_, Database>, body: Ruma, @@ -867,7 +867,7 @@ pub fn upload_keys_route( .into()) } -// #[post("/_matrix/client/r0/keys/query", data = "")] +#[post("/_matrix/client/r0/keys/query", data = "")] pub fn get_keys_route( db: State<'_, Database>, body: Ruma, @@ -944,7 +944,7 @@ pub fn get_keys_route( .into()) } -// #[post("/_matrix/client/r0/keys/claim", data = "")] +#[post("/_matrix/client/r0/keys/claim", data = "")] pub fn claim_keys_route( db: State<'_, Database>, body: Ruma, @@ -1155,10 +1155,10 @@ pub fn set_read_marker_route( Ok(set_read_marker::Response.into()) } -// #[put( -// "/_matrix/client/r0/rooms/<_room_id>/typing/<_user_id>", -// data = "" -// )] +#[put( + "/_matrix/client/r0/rooms/<_room_id>/typing/<_user_id>", + data = "" +)] pub fn create_typing_event_route( db: State<'_, Database>, body: Ruma, @@ -1184,7 +1184,7 @@ pub fn create_typing_event_route( Ok(create_typing_event::Response.into()) } -// #[post("/_matrix/client/r0/createRoom", data = "")] +#[post("/_matrix/client/r0/createRoom", data = "")] pub fn create_room_route( db: State<'_, Database>, body: Ruma, @@ -1448,10 +1448,10 @@ pub fn create_room_route( Ok(create_room::Response { room_id }.into()) } -// #[put( -// "/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", -// data = "" -// )] +#[put( + "/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", + data = "" +)] pub fn redact_event_route( db: State<'_, Database>, body: Ruma, @@ -1478,7 +1478,7 @@ pub fn redact_event_route( Ok(redact_event::Response { event_id }.into()) } -// #[put("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +#[put("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] pub fn create_alias_route( db: State<'_, Database>, body: Ruma, @@ -1494,7 +1494,7 @@ pub fn create_alias_route( Ok(create_alias::Response.into()) } -// #[delete("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +#[delete("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] pub fn delete_alias_route( db: State<'_, Database>, body: Ruma, @@ -1505,7 +1505,7 @@ pub fn delete_alias_route( Ok(delete_alias::Response.into()) } -// #[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +#[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] pub fn get_alias_route( db: State<'_, Database>, body: Ruma, @@ -1530,7 +1530,7 @@ pub fn get_alias_route( .into()) } -// #[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] +#[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] pub fn join_room_by_id_route( db: State<'_, Database>, body: Ruma, @@ -1565,7 +1565,7 @@ pub fn join_room_by_id_route( .into()) } -// #[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] +#[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] pub fn join_room_by_id_or_alias_route( db: State<'_, Database>, body: Ruma, @@ -1594,7 +1594,7 @@ pub fn join_room_by_id_or_alias_route( .into()) } -// #[post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "")] +#[post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "")] pub fn leave_room_route( db: State<'_, Database>, body: Ruma, @@ -1632,7 +1632,7 @@ pub fn leave_room_route( Ok(leave_room::Response.into()) } -// #[post("/_matrix/client/r0/rooms/<_room_id>/kick", data = "")] +#[post("/_matrix/client/r0/rooms/<_room_id>/kick", data = "")] pub fn kick_user_route( db: State<'_, Database>, body: Ruma, @@ -1672,7 +1672,7 @@ pub fn kick_user_route( Ok(kick_user::Response.into()) } -// #[post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "")] +#[post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "")] pub fn ban_user_route( db: State<'_, Database>, body: Ruma, @@ -1719,7 +1719,7 @@ pub fn ban_user_route( Ok(ban_user::Response.into()) } -// #[post("/_matrix/client/r0/rooms/<_room_id>/unban", data = "")] +#[post("/_matrix/client/r0/rooms/<_room_id>/unban", data = "")] pub fn unban_user_route( db: State<'_, Database>, body: Ruma, @@ -1758,7 +1758,7 @@ pub fn unban_user_route( Ok(unban_user::Response.into()) } -// #[post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "")] +#[post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "")] pub fn forget_room_route( db: State<'_, Database>, body: Ruma, @@ -1771,7 +1771,7 @@ pub fn forget_room_route( Ok(forget_room::Response.into()) } -// #[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] +#[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] pub fn invite_user_route( db: State<'_, Database>, body: Ruma, @@ -1802,7 +1802,7 @@ pub fn invite_user_route( } } -// #[put("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] +#[put("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] pub async fn set_room_visibility_route( db: State<'_, Database>, body: Ruma, @@ -1816,7 +1816,7 @@ pub async fn set_room_visibility_route( Ok(set_room_visibility::Response.into()) } -// #[get("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] +#[get("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] pub async fn get_room_visibility_route( db: State<'_, Database>, body: Ruma, @@ -1832,7 +1832,7 @@ pub async fn get_room_visibility_route( .into()) } -// #[get("/_matrix/client/r0/publicRooms", data = "")] +#[get("/_matrix/client/r0/publicRooms", data = "")] pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma, @@ -1881,7 +1881,7 @@ pub async fn get_public_rooms_route( .into()) } -// #[post("/_matrix/client/r0/publicRooms", data = "")] +#[post("/_matrix/client/r0/publicRooms", data = "")] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma, @@ -1993,7 +1993,7 @@ pub async fn get_public_rooms_filtered_route( .into()) } -// #[post("/_matrix/client/r0/user_directory/search", data = "")] +#[post("/_matrix/client/r0/user_directory/search", data = "")] pub fn search_users_route( db: State<'_, Database>, body: Ruma, @@ -2059,7 +2059,7 @@ pub fn get_member_events_route( .into()) } -// #[get("/_matrix/client/r0/thirdparty/protocols")] +#[get("/_matrix/client/r0/thirdparty/protocols")] pub fn get_protocols_route() -> ConduitResult { warn!("TODO: get_protocols_route"); Ok(get_protocols::Response { @@ -2133,7 +2133,7 @@ pub fn create_message_event_route( } #[put( - // "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", data = "" )] pub fn create_state_event_for_key_route( @@ -2194,10 +2194,10 @@ pub fn create_state_event_for_key_route( Ok(create_state_event_for_key::Response { event_id }.into()) } -// #[put( -// "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", -// data = "" -// )] +#[put( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + data = "" +)] pub fn create_state_event_for_empty_key_route( db: State<'_, Database>, body: Ruma, @@ -2241,7 +2241,7 @@ pub fn create_state_event_for_empty_key_route( .into()) } -// #[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] +#[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] pub fn get_state_events_route( db: State<'_, Database>, body: Ruma, @@ -2267,10 +2267,10 @@ pub fn get_state_events_route( .into()) } -// #[get( -// "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", -// data = "" -// )] +#[get( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + data = "" +)] pub fn get_state_events_for_key_route( db: State<'_, Database>, body: Ruma, @@ -2302,10 +2302,10 @@ pub fn get_state_events_for_key_route( .into()) } -// #[get( -// "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", -// data = "" -// )] +#[get( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + data = "" +)] pub fn get_state_events_for_empty_key_route( db: State<'_, Database>, body: Ruma, @@ -2336,7 +2336,7 @@ pub fn get_state_events_for_empty_key_route( .into()) } -// #[get("/_matrix/client/r0/sync", data = "")] +#[get("/_matrix/client/r0/sync", data = "")] pub fn sync_route( db: State<'_, Database>, body: Ruma, @@ -2513,7 +2513,7 @@ pub fn sync_route( let room_events = pdus .into_iter() - .map(|pdu| pdu.to_room_event()) + .map(|pdu| pdu.to_room_event_stub()) .collect::>(); let mut edus = db @@ -2521,6 +2521,14 @@ pub fn sync_route( .edus .roomlatests_since(&room_id, since)? .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(|r| { + if let Ok(EduEvent::Ephemeral(ev)) = r.deserialize() { + // TODO we could get rid of EventJson? + Some(EventJson::from(ev)) + } else { + None + } + }) .collect::>(); if db @@ -2579,7 +2587,7 @@ pub fn sync_route( db.rooms .room_state_full(&room_id)? .into_iter() - .map(|(_, pdu)| pdu.to_state_event()) + .map(|(_, pdu)| pdu.to_state_event_stub()) .collect() } else { Vec::new() @@ -2599,7 +2607,7 @@ pub fn sync_route( let pdus = db.rooms.pdus_since(&user_id, &room_id, since)?; let room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .map(|pdu| pdu.to_room_event()) + .map(|pdu| pdu.to_room_event_stub()) .collect(); // TODO: Only until leave point @@ -2608,6 +2616,14 @@ pub fn sync_route( .edus .roomlatests_since(&room_id, since)? .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(|r| { + if let Ok(EduEvent::Ephemeral(ev)) = r.deserialize() { + // TODO we could get rid of EventJson? + Some(EventJson::from(ev)) + } else { + None + } + }) .collect::>(); if db @@ -2618,9 +2634,9 @@ pub fn sync_route( { edus.push( serde_json::from_str( - &serde_json::to_string(&EduEvent::Typing( + &serde_json::to_string(&EduEvent::Ephemeral(AnyEphemeralRoomEvent::Typing( db.rooms.edus.roomactives_all(&room_id)?, - )) + ))) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -2694,7 +2710,13 @@ pub fn sync_route( .account_data .changes_since(None, &user_id, since)? .into_iter() - .map(|(_, v)| v) + .flat_map(|(_, v)| { + if let Some(EduEvent::Basic(account_event)) = v.deserialize().ok() { + Some(EventJson::from(account_event)) + } else { + None + } + }) .collect(), }, device_lists: sync_events::DeviceLists { @@ -2716,10 +2738,10 @@ pub fn sync_route( .into()) } -// #[get( -// "/_matrix/client/r0/rooms/<_room_id>/context/<_event_id>", -// data = "" -// )] +#[get( + "/_matrix/client/r0/rooms/<_room_id>/context/<_event_id>", + data = "" +)] pub fn get_context_route( db: State<'_, Database>, body: Ruma, @@ -2817,7 +2839,7 @@ pub fn get_context_route( .into()) } -// #[get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "")] +#[get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "")] pub fn get_message_events_route( db: State<'_, Database>, body: Ruma, @@ -2913,7 +2935,7 @@ pub fn get_message_events_route( } } -// #[get("/_matrix/client/r0/voip/turnServer")] +#[get("/_matrix/client/r0/voip/turnServer")] pub fn turn_server_route() -> ConduitResult { Err(Error::BadRequest( ErrorKind::NotFound, @@ -2921,7 +2943,7 @@ pub fn turn_server_route() -> ConduitResult { )) } -// #[post("/_matrix/client/r0/publicised_groups")] +#[post("/_matrix/client/r0/publicised_groups")] pub fn publicised_groups_route() -> ConduitResult { Err(Error::BadRequest( ErrorKind::NotFound, @@ -2929,10 +2951,10 @@ pub fn publicised_groups_route() -> ConduitResult/<_txn_id>", -// data = "" -// )] +#[put( + "/_matrix/client/r0/sendToDevice/<_event_type>/<_txn_id>", + data = "" +)] pub fn send_event_to_device_route( db: State<'_, Database>, body: Ruma, @@ -2978,7 +3000,7 @@ pub fn send_event_to_device_route( Ok(send_event_to_device::Response.into()) } -// #[get("/_matrix/media/r0/config")] +#[get("/_matrix/media/r0/config")] pub fn get_media_config_route() -> ConduitResult { Ok(get_media_config::Response { upload_size: (20_u32 * 1024 * 1024).into(), // 20 MB @@ -2986,7 +3008,7 @@ pub fn get_media_config_route() -> ConduitResult { .into()) } -// #[post("/_matrix/media/r0/upload", data = "")] +#[post("/_matrix/media/r0/upload", data = "")] pub fn create_content_route( db: State<'_, Database>, body: Ruma, @@ -3006,10 +3028,10 @@ pub fn create_content_route( Ok(create_content::Response { content_uri: mxc }.into()) } -// #[get( -// "/_matrix/media/r0/download/<_server_name>/<_media_id>", -// data = "" -// )] +#[get( + "/_matrix/media/r0/download/<_server_name>/<_media_id>", + data = "" +)] pub fn get_content_route( db: State<'_, Database>, body: Ruma, @@ -3031,10 +3053,10 @@ pub fn get_content_route( } } -// #[get( -// "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", -// data = "" -// )] +#[get( + "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", + data = "" +)] pub fn get_content_thumbnail_route( db: State<'_, Database>, body: Ruma, @@ -3056,7 +3078,7 @@ pub fn get_content_thumbnail_route( } } -// #[get("/_matrix/client/r0/devices", data = "")] +#[get("/_matrix/client/r0/devices", data = "")] pub fn get_devices_route( db: State<'_, Database>, body: Ruma, @@ -3072,7 +3094,7 @@ pub fn get_devices_route( Ok(get_devices::Response { devices }.into()) } -// #[get("/_matrix/client/r0/devices/<_device_id>", data = "")] +#[get("/_matrix/client/r0/devices/<_device_id>", data = "")] pub fn get_device_route( db: State<'_, Database>, body: Ruma, @@ -3088,7 +3110,7 @@ pub fn get_device_route( Ok(get_device::Response { device }.into()) } -// #[put("/_matrix/client/r0/devices/<_device_id>", data = "")] +#[put("/_matrix/client/r0/devices/<_device_id>", data = "")] pub fn update_device_route( db: State<'_, Database>, body: Ruma, @@ -3109,7 +3131,7 @@ pub fn update_device_route( Ok(update_device::Response.into()) } -// #[delete("/_matrix/client/r0/devices/<_device_id>", data = "")] +#[delete("/_matrix/client/r0/devices/<_device_id>", data = "")] pub fn delete_device_route( db: State<'_, Database>, body: Ruma, @@ -3153,7 +3175,7 @@ pub fn delete_device_route( Ok(delete_device::Response.into()) } -// #[post("/_matrix/client/r0/delete_devices", data = "")] +#[post("/_matrix/client/r0/delete_devices", data = "")] pub fn delete_devices_route( db: State<'_, Database>, body: Ruma, diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index de138b6..c352a01 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -62,6 +62,8 @@ impl RoomEdus { room_id: &RoomId, since: u64, ) -> Result>>> { + // TODO is this ^^^^^^^ + // only ever a read receipt could we just return EphemeralRoomEvent here? let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/pdu.rs b/src/pdu.rs index 4504ae6..73bb869 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -2,8 +2,8 @@ use crate::{Error, Result}; use js_int::UInt; use ruma::{ events::{ - pdu::EventHash, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEventStub, EventJson, - EventType, + pdu::EventHash, AnyRoomEvent, AnyRoomEventStub, AnyStateEvent, AnyStateEventStub, + AnyStrippedStateEventStub, EventJson, EventType, }, identifiers::{EventId, RoomId, UserId}, }; @@ -78,6 +78,11 @@ impl PduEvent { Ok(()) } + pub fn to_room_event_stub(&self) -> EventJson { + let json = serde_json::to_string(&self).expect("PDUs are always valid"); + serde_json::from_str::>(&json) + .expect("EventJson::from_str always works") + } pub fn to_room_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); serde_json::from_str::>(&json) @@ -88,6 +93,11 @@ impl PduEvent { serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } + pub fn to_state_event_stub(&self) -> EventJson { + let json = serde_json::to_string(&self).expect("PDUs are always valid"); + serde_json::from_str::>(&json) + .expect("EventJson::from_str always works") + } pub fn to_stripped_state_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); serde_json::from_str::>(&json) From ddc759887010013632929ed0164b19c5eb5bec45 Mon Sep 17 00:00:00 2001 From: Devin R Date: Tue, 23 Jun 2020 18:58:39 -0400 Subject: [PATCH 0166/1727] Use ruma/ruma master --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index eaad710..55308b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,8 +29,8 @@ thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } [dependencies.ruma] -git = "https://github.com/DevinR528/ruma" -branch = "matrix-sdk2" +git = "https://github.com/ruma/ruma" +# branch = "matrix-sdk2" #path = "../ruma/ruma" features = ["rand", "client-api", "federation-api"] From 84dcb885a7fb28a1472656a8e036199381c0fd19 Mon Sep 17 00:00:00 2001 From: Devin R Date: Wed, 24 Jun 2020 17:03:33 -0400 Subject: [PATCH 0167/1727] Return correct Errors and replace panic, fix misc review issues Remove EventJson todo comments, clean up Cargo.toml commented ruma deps. --- Cargo.toml | 14 ++----------- src/client_server.rs | 40 ++++++++++++++++---------------------- src/database/globals.rs | 2 +- src/database/rooms/edus.rs | 2 -- src/error.rs | 5 ----- 5 files changed, 20 insertions(+), 43 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 55308b5..f15c403 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,16 +29,6 @@ thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } [dependencies.ruma] -git = "https://github.com/ruma/ruma" -# branch = "matrix-sdk2" -#path = "../ruma/ruma" +git = "https://github.com/DevinR528/ruma" +branch = "key-sign" features = ["rand", "client-api", "federation-api"] - -# These are required only until ruma-events and ruma-federation-api are merged into ruma/ruma -# [patch.crates-io] -# ruma-common = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } -# ruma-serde = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } -# ruma-identifiers = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } -#ruma-common = { path = "../ruma/ruma-common" } -#ruma-serde = { path = "../ruma/ruma-serde" } -#ruma-identifiers = { path = "../ruma/ruma-identifiers" } diff --git a/src/client_server.rs b/src/client_server.rs index 15ced16..d3ff62c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -602,15 +602,17 @@ pub fn get_global_account_data_route( )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - // TODO clearly this is not ideal... - // NOTE: EventJson is no longer needed as all the enums and event structs impl ser/de - let data: Result = data.deserialize().map_err(Into::into); - match data? { - EduEvent::Basic(data) => Ok(get_global_account_data::Response { + let data: Result = data + .deserialize() + .map_err(|_| Error::bad_database("Deserialization of account data failed")); + + if let EduEvent::Basic(data) = data? { + Ok(get_global_account_data::Response { account_data: EventJson::from(data), } - .into()), - _ => panic!("timo what do i do here"), + .into()) + } else { + Err(Error::bad_database("Encountered a non account data event.")) } } @@ -2523,7 +2525,6 @@ pub fn sync_route( .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|r| { if let Ok(EduEvent::Ephemeral(ev)) = r.deserialize() { - // TODO we could get rid of EventJson? Some(EventJson::from(ev)) } else { None @@ -2554,8 +2555,8 @@ pub fn sync_route( .account_data .changes_since(Some(&room_id), &user_id, since)? .into_iter() - .flat_map(|(_, v)| { - if let Some(EduEvent::Basic(account_event)) = v.deserialize().ok() { + .filter_map(|(_, v)| { + if let Ok(EduEvent::Basic(account_event)) = v.deserialize() { Some(EventJson::from(account_event)) } else { None @@ -2618,7 +2619,6 @@ pub fn sync_route( .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|r| { if let Ok(EduEvent::Ephemeral(ev)) = r.deserialize() { - // TODO we could get rid of EventJson? Some(EventJson::from(ev)) } else { None @@ -2710,8 +2710,8 @@ pub fn sync_route( .account_data .changes_since(None, &user_id, since)? .into_iter() - .flat_map(|(_, v)| { - if let Some(EduEvent::Basic(account_event)) = v.deserialize().ok() { + .filter_map(|(_, v)| { + if let Ok(EduEvent::Basic(account_event)) = v.deserialize() { Some(EventJson::from(account_event)) } else { None @@ -2859,13 +2859,12 @@ pub fn get_message_events_route( .clone() .parse() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; + let limit = body + .limit + .try_into() + .map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?; match body.dir { get_message_events::Direction::Forward => { - let limit = body - .limit - .try_into() - .map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?; - let events_after = db .rooms .pdus_after(&user_id, &body.room_id, from) @@ -2897,11 +2896,6 @@ pub fn get_message_events_route( .into()) } get_message_events::Direction::Backward => { - let limit = body - .limit - .try_into() - .map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?; - let events_before = db .rooms .pdus_until(&user_id, &body.room_id, from) diff --git a/src/database/globals.rs b/src/database/globals.rs index 00e0982..b04eebb 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -32,7 +32,7 @@ impl Globals { .unwrap_or("localhost") .to_owned(), ) - .map_err(|_| Error::bad_database("Invalid server name"))?, + .map_err(|_| Error::BadConfig("Invalid server name"))?, registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), }) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index c352a01..de138b6 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -62,8 +62,6 @@ impl RoomEdus { room_id: &RoomId, since: u64, ) -> Result>>> { - // TODO is this ^^^^^^^ - // only ever a read receipt could we just return EphemeralRoomEvent here? let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/error.rs b/src/error.rs index e1af15a..7305073 100644 --- a/src/error.rs +++ b/src/error.rs @@ -25,11 +25,6 @@ pub enum Error { #[from] source: image::error::ImageError, }, - #[error("Could not deserialize json")] - SerdeError { - #[from] - source: serde_json::Error, - }, #[error("{0}")] BadConfig(&'static str), #[error("{0}")] From 63e23154f37dca1662df61c419460f682da8a8b7 Mon Sep 17 00:00:00 2001 From: Devin R Date: Fri, 26 Jun 2020 18:34:11 -0400 Subject: [PATCH 0168/1727] Rebase with key backups and cross signing Moved back to a fork of ruma with timo's key-backup and cross-signing branch. Ephemeral events in sync responses are EphemeralRoomEventStub (they also have no room_id like all of sync responses events) --- Cargo.lock | 2128 ++++++++++++++++++++++++++++++++++++ src/client_server.rs | 18 +- src/database.rs | 1 + src/database/rooms/edus.rs | 3 +- src/pdu.rs | 9 +- src/push_rules.rs | 15 +- 6 files changed, 2149 insertions(+), 25 deletions(-) create mode 100644 Cargo.lock diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..5a8b807 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,2128 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "addr2line" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "602d785912f476e480434627e8732e6766b760c045bbf897d9dfaa9f4fbd399c" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler32" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d" + +[[package]] +name = "arc-swap" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" + +[[package]] +name = "async-trait" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "autocfg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" + +[[package]] +name = "backtrace" +version = "0.3.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05100821de9e028f12ae3d189176b41ee198341eb8f369956407fea2f5cc666c" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base16" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d27c3610c36aee21ce8ac510e6224498de4228ad772a171ed65643a24693a5a8" + +[[package]] +name = "base64" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +dependencies = [ + "byteorder", +] + +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" + +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "blake2b_simd" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "bumpalo" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" + +[[package]] +name = "bytemuck" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37fa13df2292ecb479ec23aa06f4507928bef07839be9ef15281411076629431" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "bytes" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "118cf036fbb97d0816e3c34b2d7a1e8cfc60f68fcf63d550ddbe9bd5f59c213b" +dependencies = [ + "loom", +] + +[[package]] +name = "cc" +version = "1.0.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1be3409f94d7bdceeb5f5fac551039d9b3f00e25da7a74fc4d33400a0d96368" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "color_quant" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" + +[[package]] +name = "conduit" +version = "0.1.0" +dependencies = [ + "base64 0.12.3", + "directories", + "http", + "image", + "js_int", + "log", + "rand", + "reqwest", + "rocket", + "ruma", + "rust-argon2 0.8.2", + "serde", + "serde_json", + "sled", + "thiserror", + "tokio", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "cookie" +version = "0.12.0" +source = "git+https://github.com/SergioBenitez/cookie-rs?rev=e0f3e6c#e0f3e6c4daea108d55838c56da777b36898bd223" +dependencies = [ + "base64 0.10.1", + "percent-encoding 2.1.0", + "ring", + "time", +] + +[[package]] +name = "core-foundation" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" + +[[package]] +name = "crc32fast" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "lazy_static", + "maybe-uninit", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if", + "lazy_static", +] + +[[package]] +name = "deflate" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e5d2a2273fed52a7f947ee55b092c4057025d7a3e04e5ecdbd25d6c3fb1bd7" +dependencies = [ + "adler32", + "byteorder", +] + +[[package]] +name = "devise" +version = "0.3.0" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" +dependencies = [ + "devise_codegen", + "devise_core", +] + +[[package]] +name = "devise_codegen" +version = "0.3.0" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" +dependencies = [ + "devise_core", + "quote 1.0.7", +] + +[[package]] +name = "devise_core" +version = "0.3.0" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" +dependencies = [ + "bitflags", + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "directories" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" +dependencies = [ + "cfg-if", + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +dependencies = [ + "libc", + "redox_users", + "winapi 0.3.9", +] + +[[package]] +name = "dtoa" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" + +[[package]] +name = "encoding_rs" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +dependencies = [ + "matches", + "percent-encoding 2.1.0", +] + +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "fuchsia-zircon" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +dependencies = [ + "bitflags", + "fuchsia-zircon-sys", +] + +[[package]] +name = "fuchsia-zircon-sys" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" + +[[package]] +name = "futures" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" + +[[package]] +name = "futures-executor" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" + +[[package]] +name = "futures-macro" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +dependencies = [ + "proc-macro-hack", + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "futures-sink" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" + +[[package]] +name = "futures-task" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +dependencies = [ + "once_cell", +] + +[[package]] +name = "futures-util" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", +] + +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + +[[package]] +name = "generator" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add72f17bb81521258fcc8a7a3245b1e184e916bfbe34f0ea89558f440df5c68" +dependencies = [ + "cc", + "libc", + "log", + "rustc_version", + "winapi 0.3.9", +] + +[[package]] +name = "getrandom" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gif" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "471d90201b3b223f3451cd4ad53e34295f16a1df17b1edf3736d47761c3981af" +dependencies = [ + "color_quant", + "lzw", +] + +[[package]] +name = "gimli" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" + +[[package]] +name = "h2" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "log", + "slab", + "tokio", + "tokio-util", +] + +[[package]] +name = "heck" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9586eedd4ce6b3c498bc3b4dd92fc9f11166aa908a914071953768066c67909" +dependencies = [ + "libc", +] + +[[package]] +name = "http" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "httparse" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" + +[[package]] +name = "hyper" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "itoa", + "log", + "pin-project", + "socket2", + "time", + "tokio", + "tower-service", + "want", +] + +[[package]] +name = "hyper-tls" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3adcd308402b9553630734e9c36b77a7e48b3821251ca2493e8cd596763aafaa" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-tls", +] + +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "image" +version = "0.23.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b0553fec6407d63fe2975b794dfb099f3f790bdc958823851af37b26404ab4" +dependencies = [ + "bytemuck", + "byteorder", + "gif", + "jpeg-decoder", + "num-iter", + "num-rational", + "num-traits", + "png", +] + +[[package]] +name = "indexmap" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" +dependencies = [ + "autocfg", +] + +[[package]] +name = "iovec" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +dependencies = [ + "libc", +] + +[[package]] +name = "itoa" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" + +[[package]] +name = "jpeg-decoder" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b47b4c4e017b01abdc5bcc126d2d1002e5a75bbe3ce73f9f4f311a916363704" +dependencies = [ + "byteorder", +] + +[[package]] +name = "js-sys" +version = "0.3.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce10c23ad2ea25ceca0093bd3192229da4c5b3c0f2de499c1ecac0d98d452177" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "js_int" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b2b63d60564122f2a7d6592c2f1d6c1c60e7a266b4d24715950a1ddad784f66" +dependencies = [ + "serde", +] + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" + +[[package]] +name = "lock_api" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "loom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ecc775857611e1df29abba5c41355cdf540e7e9d4acfdf0f355eefee82330b7" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", +] + +[[package]] +name = "lzw" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d947cbb889ed21c2a84be6ffbaebf5b4e0f4340638cba0444907e38b56be084" + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + +[[package]] +name = "memchr" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" + +[[package]] +name = "memoffset" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" +dependencies = [ + "autocfg", +] + +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + +[[package]] +name = "mime_guess" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "miniz_oxide" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" +dependencies = [ + "adler32", +] + +[[package]] +name = "mio" +version = "0.6.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +dependencies = [ + "cfg-if", + "fuchsia-zircon", + "fuchsia-zircon-sys", + "iovec", + "kernel32-sys", + "libc", + "log", + "miow", + "net2", + "slab", + "winapi 0.2.8", +] + +[[package]] +name = "mio-uds" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +dependencies = [ + "iovec", + "libc", + "mio", +] + +[[package]] +name = "miow" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +dependencies = [ + "kernel32-sys", + "net2", + "winapi 0.2.8", + "ws2_32-sys", +] + +[[package]] +name = "native-tls" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "net2" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +dependencies = [ + "cfg-if", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "num-integer" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e6b7c748f995c4c29c5f5ae0248536e04a5739927c74ec0fa564805094b9f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b4d7360f362cfb50dde8143501e6940b22f644be75a4cc90b2d81968908138" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" + +[[package]] +name = "once_cell" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" + +[[package]] +name = "openssl" +version = "0.10.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "lazy_static", + "libc", + "openssl-sys", +] + +[[package]] +name = "openssl-probe" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" + +[[package]] +name = "openssl-sys" +version = "0.9.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "parking_lot" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" +dependencies = [ + "cfg-if", + "cloudabi", + "libc", + "redox_syscall", + "smallvec", + "winapi 0.3.9", +] + +[[package]] +name = "pear" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5320f212db967792b67cfe12bd469d08afd6318a249bd917d5c19bc92200ab8a" +dependencies = [ + "pear_codegen", +] + +[[package]] +name = "pear_codegen" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfc1c836fdc3d1ef87c348b237b5b5c4dff922156fb2d968f57734f9669768ca" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "syn 0.15.44", + "version_check", + "yansi", +] + +[[package]] +name = "percent-encoding" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" + +[[package]] +name = "pin-project" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "pin-project-lite" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" + +[[package]] +name = "png" +version = "0.16.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34ccdd66f6fe4b2433b07e4728e9a013e43233120427046e93ceb709c3a439bf" +dependencies = [ + "bitflags", + "crc32fast", + "deflate", + "miniz_oxide", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" + +[[package]] +name = "proc-macro-hack" +version = "0.5.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" + +[[package]] +name = "proc-macro-nested" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" + +[[package]] +name = "proc-macro2" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +dependencies = [ + "unicode-xid 0.1.0", +] + +[[package]] +name = "proc-macro2" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +dependencies = [ + "unicode-xid 0.2.1", +] + +[[package]] +name = "quote" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +dependencies = [ + "proc-macro2 0.4.30", +] + +[[package]] +name = "quote" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +dependencies = [ + "proc-macro2 1.0.18", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" + +[[package]] +name = "redox_users" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +dependencies = [ + "getrandom", + "redox_syscall", + "rust-argon2 0.7.0", +] + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "reqwest" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" +dependencies = [ + "base64 0.12.3", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "hyper-tls", + "js-sys", + "lazy_static", + "log", + "mime", + "mime_guess", + "native-tls", + "percent-encoding 2.1.0", + "pin-project-lite", + "serde", + "serde_urlencoded", + "tokio", + "tokio-tls", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + +[[package]] +name = "ring" +version = "0.16.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi 0.3.9", +] + +[[package]] +name = "rocket" +version = "0.5.0-dev" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" +dependencies = [ + "async-trait", + "atty", + "base16", + "base64 0.11.0", + "futures", + "log", + "memchr", + "num_cpus", + "pear", + "rocket_codegen", + "rocket_http", + "state", + "time", + "tokio", + "toml", + "version_check", + "yansi", +] + +[[package]] +name = "rocket_codegen" +version = "0.5.0-dev" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" +dependencies = [ + "devise", + "indexmap", + "quote 1.0.7", + "rocket_http", + "version_check", + "yansi", +] + +[[package]] +name = "rocket_http" +version = "0.5.0-dev" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" +dependencies = [ + "cookie", + "http", + "hyper", + "indexmap", + "log", + "mime", + "pear", + "percent-encoding 1.0.1", + "smallvec", + "state", + "time", + "tokio", + "tokio-rustls", + "unicode-xid 0.2.1", +] + +[[package]] +name = "ruma" +version = "0.1.0" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "ruma-api", + "ruma-client-api", + "ruma-common", + "ruma-events", + "ruma-federation-api", + "ruma-identifiers", + "ruma-signatures", +] + +[[package]] +name = "ruma-api" +version = "0.16.1" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "http", + "percent-encoding 2.1.0", + "ruma-api-macros", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", + "strum", +] + +[[package]] +name = "ruma-api-macros" +version = "0.16.1" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "ruma-client-api" +version = "0.9.0" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "http", + "js_int", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", + "strum", +] + +[[package]] +name = "ruma-common" +version = "0.1.3" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "js_int", + "matches", + "ruma-serde", + "serde", + "serde_json", + "strum", +] + +[[package]] +name = "ruma-events" +version = "0.21.3" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "js_int", + "ruma-common", + "ruma-events-macros", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", + "strum", +] + +[[package]] +name = "ruma-events-macros" +version = "0.21.3" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "ruma-federation-api" +version = "0.0.2" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "js_int", + "matches", + "ruma-api", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-identifiers" +version = "0.16.2" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "rand", + "serde", + "strum", +] + +[[package]] +name = "ruma-serde" +version = "0.2.2" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "form_urlencoded", + "itoa", + "js_int", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-signatures" +version = "0.6.0-dev.1" +source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +dependencies = [ + "base64 0.12.3", + "ring", + "serde_json", + "untrusted", +] + +[[package]] +name = "rust-argon2" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" +dependencies = [ + "base64 0.11.0", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", +] + +[[package]] +name = "rust-argon2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" +dependencies = [ + "base64 0.12.3", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "rustls" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" +dependencies = [ + "base64 0.10.1", + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi 0.3.9", +] + +[[package]] +name = "scoped-tls" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "sct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "security-framework" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "serde_json" +version = "1.0.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec2c5d7e739bc07a3e73381a39d61fdb5f671c60c1df26a130690665803d8226" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +dependencies = [ + "dtoa", + "itoa", + "serde", + "url", +] + +[[package]] +name = "signal-hook-registry" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +dependencies = [ + "arc-swap", + "libc", +] + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" + +[[package]] +name = "sled" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdad3dc85d888056d3bd9954ffdf22d8a22701b6cd3aca4f6df4c436111898c4" +dependencies = [ + "backtrace", + "crc32fast", + "crossbeam-epoch", + "crossbeam-utils", + "fs2", + "fxhash", + "libc", + "log", + "parking_lot", +] + +[[package]] +name = "smallvec" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4" + +[[package]] +name = "socket2" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "winapi 0.3.9", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "state" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" + +[[package]] +name = "strum" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57bd81eb48f4c437cadc685403cad539345bf703d78e63707418431cecd4522b" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" +dependencies = [ + "heck", + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "syn" +version = "0.15.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "unicode-xid 0.1.0", +] + +[[package]] +name = "syn" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "unicode-xid 0.2.1", +] + +[[package]] +name = "tempfile" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi 0.3.9", +] + +[[package]] +name = "thiserror" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "time" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "tinyvec" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" + +[[package]] +name = "tokio" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "iovec", + "lazy_static", + "libc", + "memchr", + "mio", + "mio-uds", + "num_cpus", + "pin-project-lite", + "signal-hook-registry", + "slab", + "tokio-macros", + "winapi 0.3.9", +] + +[[package]] +name = "tokio-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "tokio-rustls" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3068d891551949b37681724d6b73666787cc63fa8e255c812a41d2513aff9775" +dependencies = [ + "futures-core", + "rustls", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" +dependencies = [ + "serde", +] + +[[package]] +name = "tower-service" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" + +[[package]] +name = "try-lock" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" + +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +dependencies = [ + "matches", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" + +[[package]] +name = "unicode-xid" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "url" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +dependencies = [ + "idna", + "matches", + "percent-encoding 2.1.0", +] + +[[package]] +name = "vcpkg" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" + +[[package]] +name = "version_check" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasm-bindgen" +version = "0.2.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c2dc4aa152834bc334f506c1a06b866416a8b6697d5c9f75b9a689c8486def0" +dependencies = [ + "cfg-if", + "serde", + "serde_json", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded84f06e0ed21499f6184df0e0cb3494727b0c5da89534e0fcc55c51d812101" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64487204d863f109eb77e8462189d111f27cb5712cc9fdb3461297a76963a2f6" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "838e423688dac18d73e31edce74ddfac468e37b1506ad163ffaf0a46f703ffe3" +dependencies = [ + "quote 1.0.7", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9ba19973a58daf4db6f352eda73dc0e289493cd29fb2632eb172085b6521acd" + +[[package]] +name = "web-sys" +version = "0.3.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b72fe77fd39e4bd3eaa4412fd299a0be6b3dfe9d2597e2f1c20beb968f41d17" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "winreg" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "ws2_32-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + +[[package]] +name = "yansi" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" diff --git a/src/client_server.rs b/src/client_server.rs index d3ff62c..057db5b 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2523,13 +2523,6 @@ pub fn sync_route( .edus .roomlatests_since(&room_id, since)? .filter_map(|r| r.ok()) // Filter out buggy events - .filter_map(|r| { - if let Ok(EduEvent::Ephemeral(ev)) = r.deserialize() { - Some(EventJson::from(ev)) - } else { - None - } - }) .collect::>(); if db @@ -2617,13 +2610,6 @@ pub fn sync_route( .edus .roomlatests_since(&room_id, since)? .filter_map(|r| r.ok()) // Filter out buggy events - .filter_map(|r| { - if let Ok(EduEvent::Ephemeral(ev)) = r.deserialize() { - Some(EventJson::from(ev)) - } else { - None - } - }) .collect::>(); if db @@ -2634,9 +2620,9 @@ pub fn sync_route( { edus.push( serde_json::from_str( - &serde_json::to_string(&EduEvent::Ephemeral(AnyEphemeralRoomEvent::Typing( + &serde_json::to_string(&ruma::events::AnyEphemeralRoomEventStub::Typing( db.rooms.edus.roomactives_all(&room_id)?, - ))) + )) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), diff --git a/src/database.rs b/src/database.rs index 370fde7..fcda5af 100644 --- a/src/database.rs +++ b/src/database.rs @@ -62,6 +62,7 @@ impl Database { .to_owned()) })?; + println!("{:?}", path); let db = sled::open(&path)?; info!("Opened sled database at {}", path); diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index de138b6..10f893a 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -61,7 +61,8 @@ impl RoomEdus { &self, room_id: &RoomId, since: u64, - ) -> Result>>> { + ) -> Result>>> + { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/pdu.rs b/src/pdu.rs index 73bb869..2fcc3df 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -2,8 +2,9 @@ use crate::{Error, Result}; use js_int::UInt; use ruma::{ events::{ - pdu::EventHash, AnyRoomEvent, AnyRoomEventStub, AnyStateEvent, AnyStateEventStub, - AnyStrippedStateEventStub, EventJson, EventType, + pdu::EventHash, room::member::MemberEventContent, AnyRoomEvent, AnyRoomEventStub, + AnyStateEvent, AnyStateEventStub, AnyStrippedStateEventStub, EventJson, EventType, + StateEvent, }, identifiers::{EventId, RoomId, UserId}, }; @@ -103,9 +104,9 @@ impl PduEvent { serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } - pub fn to_member_event(&self) -> EventJson { + pub fn to_member_event(&self) -> EventJson> { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) + serde_json::from_str::>>(&json) .expect("EventJson::from_str always works") } } diff --git a/src/push_rules.rs b/src/push_rules.rs index 90e9243..cb277b9 100644 --- a/src/push_rules.rs +++ b/src/push_rules.rs @@ -1,7 +1,10 @@ +use js_int::uint; use ruma::{ - events::push_rules::{ConditionalPushRule, PatternedPushRule, PushCondition, Ruleset}, identifiers::UserId, - push::{Action, Tweak}, + push::{ + Action, ConditionalPushRule, PatternedPushRule, PushCondition, RoomMemberCountIs, Ruleset, + Tweak, + }, }; pub fn default_pushrules(user_id: &UserId) -> Ruleset { @@ -174,7 +177,9 @@ pub fn encrypted_room_one_to_one_rule() -> ConditionalPushRule { enabled: true, rule_id: ".m.rule.encrypted_room_one_to_one".to_owned(), conditions: vec![ - PushCondition::RoomMemberCount { is: "2".to_owned() }, + PushCondition::RoomMemberCount { + is: RoomMemberCountIs::from(uint!(2)..), + }, PushCondition::EventMatch { key: "type".to_owned(), pattern: "m.room.encrypted".to_owned(), @@ -194,7 +199,9 @@ pub fn room_one_to_one_rule() -> ConditionalPushRule { enabled: true, rule_id: ".m.rule.room_one_to_one".to_owned(), conditions: vec![ - PushCondition::RoomMemberCount { is: "2".to_owned() }, + PushCondition::RoomMemberCount { + is: RoomMemberCountIs::from(uint!(2)..), + }, PushCondition::EventMatch { key: "type".to_owned(), pattern: "m.room.message".to_owned(), From d02685a4fd6c4071ea9f22c7876cd87422cdb555 Mon Sep 17 00:00:00 2001 From: Devin R Date: Fri, 17 Jul 2020 16:00:39 -0400 Subject: [PATCH 0169/1727] Update to latest ruma/ruma commit This will most likely be the API that is released to crates.io so it should be fairly stable... --- Cargo.lock | 309 ++++++++++++++++++------------------ Cargo.toml | 6 +- src/client_server.rs | 60 +++---- src/database/globals.rs | 21 ++- src/database/key_backups.rs | 2 +- src/database/rooms/edus.rs | 2 +- src/database/users.rs | 6 +- src/pdu.rs | 16 +- 8 files changed, 205 insertions(+), 217 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a8b807..3d23fac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,13 +2,19 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.12.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602d785912f476e480434627e8732e6766b760c045bbf897d9dfaa9f4fbd399c" +checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" dependencies = [ "gimli", ] +[[package]] +name = "adler" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" + [[package]] name = "adler32" version = "1.1.0" @@ -21,6 +27,12 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" +[[package]] +name = "array-init" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f30bbe2f5e3d117f55bd8c7a1f9191e4a5deba9f15f595bbea4f670c59c765db" + [[package]] name = "arrayref" version = "0.3.6" @@ -41,7 +53,7 @@ checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] @@ -63,14 +75,14 @@ checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] name = "backtrace" -version = "0.3.49" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05100821de9e028f12ae3d189176b41ee198341eb8f369956407fea2f5cc666c" +checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" dependencies = [ "addr2line", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.4.0", "object", "rustc-demangle", ] @@ -139,18 +151,15 @@ checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "bytes" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "118cf036fbb97d0816e3c34b2d7a1e8cfc60f68fcf63d550ddbe9bd5f59c213b" -dependencies = [ - "loom", -] +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "cc" -version = "1.0.55" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1be3409f94d7bdceeb5f5fac551039d9b3f00e25da7a74fc4d33400a0d96368" +checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" [[package]] name = "cfg-if" @@ -160,9 +169,9 @@ checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "cloudabi" -version = "0.0.3" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" dependencies = [ "bitflags", ] @@ -265,9 +274,9 @@ dependencies = [ [[package]] name = "deflate" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e5d2a2273fed52a7f947ee55b092c4057025d7a3e04e5ecdbd25d6c3fb1bd7" +checksum = "73770f8e1fe7d64df17ca66ad28994a0a623ea497fa69486e14984e715c5d174" dependencies = [ "adler32", "byteorder", @@ -299,7 +308,7 @@ dependencies = [ "bitflags", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] @@ -452,7 +461,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] @@ -499,19 +508,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "generator" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add72f17bb81521258fcc8a7a3245b1e184e916bfbe34f0ea89558f440df5c68" -dependencies = [ - "cc", - "libc", - "log", - "rustc_version", - "winapi 0.3.9", -] - [[package]] name = "getrandom" version = "0.1.14" @@ -535,15 +531,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" +checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" [[package]] name = "h2" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" +checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" dependencies = [ "bytes", "fnv", @@ -552,10 +548,10 @@ dependencies = [ "futures-util", "http", "indexmap", - "log", "slab", "tokio", "tokio-util", + "tracing", ] [[package]] @@ -569,9 +565,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9586eedd4ce6b3c498bc3b4dd92fc9f11166aa908a914071953768066c67909" +checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" dependencies = [ "libc", ] @@ -605,9 +601,9 @@ checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" [[package]] name = "hyper" -version = "0.13.6" +version = "0.13.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f" +checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" dependencies = [ "bytes", "futures-channel", @@ -618,20 +614,20 @@ dependencies = [ "http-body", "httparse", "itoa", - "log", "pin-project", "socket2", "time", "tokio", "tower-service", + "tracing", "want", ] [[package]] name = "hyper-tls" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3adcd308402b9553630734e9c36b77a7e48b3821251ca2493e8cd596763aafaa" +checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ "bytes", "hyper", @@ -653,9 +649,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.6" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b0553fec6407d63fe2975b794dfb099f3f790bdc958823851af37b26404ab4" +checksum = "a2397fc43bd5648b7117aabb3c5e62d0e62c194826ec77b0b4d0c41e62744635" dependencies = [ "bytemuck", "byteorder", @@ -676,6 +672,12 @@ dependencies = [ "autocfg", ] +[[package]] +name = "instant" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b141fdc7836c525d4d594027d318c84161ca17aaf8113ab1f81ab93ae897485" + [[package]] name = "iovec" version = "0.1.4" @@ -693,18 +695,18 @@ checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" [[package]] name = "jpeg-decoder" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b47b4c4e017b01abdc5bcc126d2d1002e5a75bbe3ce73f9f4f311a916363704" +checksum = "cc797adac5f083b8ff0ca6f6294a999393d76e197c36488e2ef732c4715f6fa3" dependencies = [ "byteorder", ] [[package]] name = "js-sys" -version = "0.3.40" +version = "0.3.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce10c23ad2ea25ceca0093bd3192229da4c5b3c0f2de499c1ecac0d98d452177" +checksum = "52732a3d3ad72c58ad2dc70624f9c17b46ecd0943b9a4f1ee37c4c18c5d983e2" dependencies = [ "wasm-bindgen", ] @@ -736,39 +738,28 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" +checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701" [[package]] name = "lock_api" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" +checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" dependencies = [ "scopeguard", ] [[package]] name = "log" -version = "0.4.8" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ "cfg-if", ] -[[package]] -name = "loom" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ecc775857611e1df29abba5c41355cdf540e7e9d4acfdf0f355eefee82330b7" -dependencies = [ - "cfg-if", - "generator", - "scoped-tls", -] - [[package]] name = "lzw" version = "0.10.0" @@ -795,9 +786,9 @@ checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" [[package]] name = "memoffset" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" +checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" dependencies = [ "autocfg", ] @@ -827,6 +818,15 @@ dependencies = [ "adler32", ] +[[package]] +name = "miniz_oxide" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.6.22" @@ -996,22 +996,24 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.10.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" +checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" dependencies = [ + "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" +checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" dependencies = [ "cfg-if", "cloudabi", + "instant", "libc", "redox_syscall", "smallvec", @@ -1069,7 +1071,7 @@ checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] @@ -1086,20 +1088,20 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" [[package]] name = "png" -version = "0.16.5" +version = "0.16.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ccdd66f6fe4b2433b07e4728e9a013e43233120427046e93ceb709c3a439bf" +checksum = "c150bf7479fafe3dd8740dbe48cc33b2a3efb7b0fe3483aced8bbc39f6d0238d" dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide", + "miniz_oxide 0.3.7", ] [[package]] @@ -1199,9 +1201,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_users" @@ -1333,7 +1335,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ "ruma-api", "ruma-client-api", @@ -1347,7 +1349,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1362,17 +1364,18 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ + "matches", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ "http", "js_int", @@ -1389,7 +1392,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.1.3" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ "js_int", "matches", @@ -1402,7 +1405,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.21.3" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ "js_int", "ruma-common", @@ -1417,21 +1420,23 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.21.3" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ + "matches", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ "js_int", "matches", "ruma-api", + "ruma-common", "ruma-events", "ruma-identifiers", "ruma-serde", @@ -1441,9 +1446,10 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.16.2" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +version = "0.17.0-pre.1" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ + "matches", "rand", "serde", "strum", @@ -1452,7 +1458,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ "form_urlencoded", "itoa", @@ -1464,7 +1470,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/DevinR528/ruma?branch=key-sign#00edba94afa930c3a9674ac5565aa0c930b1d592" +source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" dependencies = [ "base64 0.12.3", "ring", @@ -1502,15 +1508,6 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - [[package]] name = "rustls" version = "0.16.0" @@ -1540,12 +1537,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" - [[package]] name = "scopeguard" version = "1.1.0" @@ -1585,21 +1576,6 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" version = "1.0.114" @@ -1617,14 +1593,14 @@ checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] name = "serde_json" -version = "1.0.55" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2c5d7e739bc07a3e73381a39d61fdb5f671c60c1df26a130690665803d8226" +checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3" dependencies = [ "itoa", "ryu", @@ -1661,10 +1637,11 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "sled" -version = "0.32.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdad3dc85d888056d3bd9954ffdf22d8a22701b6cd3aca4f6df4c436111898c4" +checksum = "7e3dbbb8ee10611bd1d020767c27599ccbbf8365f7e0ed7e54429cc8b9433ad8" dependencies = [ + "array-init", "backtrace", "crc32fast", "crossbeam-epoch", @@ -1678,9 +1655,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4" +checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" [[package]] name = "socket2" @@ -1724,7 +1701,7 @@ dependencies = [ "heck", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] @@ -1740,9 +1717,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" +checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", @@ -1780,7 +1757,7 @@ checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] @@ -1830,7 +1807,7 @@ checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", ] [[package]] @@ -1885,10 +1862,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] -name = "try-lock" -version = "0.2.2" +name = "tracing" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +checksum = "c2e2a2de6b0d5cbb13fc21193a2296888eaab62b6044479aafb3c54c01c29fcd" +dependencies = [ + "cfg-if", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94ae75f0d28ae10786f3b1895c55fe72e79928fd5ccdebb5438c75e93fec178f" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "try-lock" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "unicase" @@ -1982,9 +1979,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.63" +version = "0.2.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2dc4aa152834bc334f506c1a06b866416a8b6697d5c9f75b9a689c8486def0" +checksum = "f3edbcc9536ab7eababcc6d2374a0b7bfe13a2b6d562c5e07f370456b1a8f33d" dependencies = [ "cfg-if", "serde", @@ -1994,24 +1991,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.63" +version = "0.2.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded84f06e0ed21499f6184df0e0cb3494727b0c5da89534e0fcc55c51d812101" +checksum = "89ed2fb8c84bfad20ea66b26a3743f3e7ba8735a69fe7d95118c33ec8fc1244d" dependencies = [ "bumpalo", "lazy_static", "log", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.13" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64487204d863f109eb77e8462189d111f27cb5712cc9fdb3461297a76963a2f6" +checksum = "41ad6e4e8b2b7f8c90b6e09a9b590ea15cb0d1dbe28502b5a405cd95d1981671" dependencies = [ "cfg-if", "js-sys", @@ -2021,9 +2018,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.63" +version = "0.2.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "838e423688dac18d73e31edce74ddfac468e37b1506ad163ffaf0a46f703ffe3" +checksum = "eb071268b031a64d92fc6cf691715ca5a40950694d8f683c5bb43db7c730929e" dependencies = [ "quote 1.0.7", "wasm-bindgen-macro-support", @@ -2031,28 +2028,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.63" +version = "0.2.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92" +checksum = "cf592c807080719d1ff2f245a687cbadb3ed28b2077ed7084b47aba8b691f2c6" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.34", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.63" +version = "0.2.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9ba19973a58daf4db6f352eda73dc0e289493cd29fb2632eb172085b6521acd" +checksum = "72b6c0220ded549d63860c78c38f3bcc558d1ca3f4efa74942c536ddbbb55e87" [[package]] name = "web-sys" -version = "0.3.40" +version = "0.3.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b72fe77fd39e4bd3eaa4412fd299a0be6b3dfe9d2597e2f1c20beb968f41d17" +checksum = "8be2398f326b7ba09815d0b403095f34dd708579220d099caae89be0b32137b2" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index f15c403..514a178 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,6 @@ thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } [dependencies.ruma] -git = "https://github.com/DevinR528/ruma" -branch = "key-sign" -features = ["rand", "client-api", "federation-api"] +git = "https://github.com/ruma/ruma" +features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] +rev = "848b225" diff --git a/src/client_server.rs b/src/client_server.rs index 057db5b..efe6973 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -220,7 +220,7 @@ pub fn register_route( Ok(register::Response { access_token: Some(token), user_id, - device_id: Some(device_id), + device_id: Some(device_id.into_boxed_str()), } .into()) } @@ -268,7 +268,7 @@ pub fn login_route( .body .device_id .clone() - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into_boxed_str()); // Generate a new token for the device let token = utils::random_string(TOKEN_LENGTH); @@ -285,7 +285,7 @@ pub fn login_route( user_id, access_token: token, home_server: Some(db.globals.server_name().to_string()), - device_id, + device_id: device_id.to_string(), well_known: None, } .into()) @@ -898,7 +898,7 @@ pub fn get_keys_route( device_display_name: metadata.display_name, }); - container.insert(device_id.to_owned(), keys); + container.insert(device_id.to_owned().into_boxed_str(), keys); } } device_keys.insert(user_id.clone(), container); @@ -917,7 +917,7 @@ pub fn get_keys_route( device_display_name: metadata.display_name, }); - container.insert(device_id.clone(), keys); + container.insert(device_id.to_string().into_boxed_str(), keys); } device_keys.insert(user_id.clone(), container); } @@ -1214,21 +1214,19 @@ pub fn create_room_route( } })?; + let mut content = ruma::events::room::create::CreateEventContent::new(user_id.clone()); + content.federate = body.creation_content.as_ref().map_or(true, |c| c.federate); + content.predecessor = body + .creation_content + .as_ref() + .and_then(|c| c.predecessor.clone()); + content.room_version = RoomVersionId::version_6(); // 1. The room create event db.rooms.append_pdu( room_id.clone(), user_id.clone(), EventType::RoomCreate, - serde_json::to_value(ruma::events::room::create::CreateEventContent { - creator: user_id.clone(), - federate: body.creation_content.as_ref().map_or(true, |c| c.federate), - predecessor: body - .creation_content - .as_ref() - .and_then(|c| c.predecessor.clone()), - room_version: RoomVersionId::version_6(), - }) - .expect("event is valid, we just created it"), + serde_json::to_value(content).expect("event is valid, we just created it"), None, Some("".to_owned()), None, @@ -1329,9 +1327,9 @@ pub fn create_room_route( room_id.clone(), user_id.clone(), EventType::RoomHistoryVisibility, - serde_json::to_value(history_visibility::HistoryVisibilityEventContent { - history_visibility: history_visibility::HistoryVisibility::Shared, - }) + serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( + history_visibility::HistoryVisibility::Shared, + )) .expect("event is valid, we just created it"), None, Some("".to_owned()), @@ -1345,15 +1343,13 @@ pub fn create_room_route( user_id.clone(), EventType::RoomGuestAccess, match preset { - create_room::RoomPreset::PublicChat => { - serde_json::to_value(guest_access::GuestAccessEventContent { - guest_access: guest_access::GuestAccess::Forbidden, - }) - .expect("event is valid, we just created it") - } - _ => serde_json::to_value(guest_access::GuestAccessEventContent { - guest_access: guest_access::GuestAccess::CanJoin, - }) + create_room::RoomPreset::PublicChat => serde_json::to_value( + guest_access::GuestAccessEventContent::new(guest_access::GuestAccess::Forbidden), + ) + .expect("event is valid, we just created it"), + _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::CanJoin, + )) .expect("event is valid, we just created it"), }, None, @@ -2567,11 +2563,7 @@ pub fn sync_route( notification_count, }, timeline: sync_events::Timeline { - limited: if limited || joined_since_last_sync { - Some(true) - } else { - None - }, + limited: limited || joined_since_last_sync, prev_batch, events: room_events, }, @@ -2620,7 +2612,7 @@ pub fn sync_route( { edus.push( serde_json::from_str( - &serde_json::to_string(&ruma::events::AnyEphemeralRoomEventStub::Typing( + &serde_json::to_string(&ruma::events::AnySyncEphemeralRoomEvent::Typing( db.rooms.edus.roomactives_all(&room_id)?, )) .expect("event is valid, we just created it"), @@ -2632,7 +2624,7 @@ pub fn sync_route( let left_room = sync_events::LeftRoom { account_data: sync_events::AccountData { events: Vec::new() }, timeline: sync_events::Timeline { - limited: Some(false), + limited: false, prev_batch: Some(next_batch.clone()), events: room_events, }, diff --git a/src/database/globals.rs b/src/database/globals.rs index b04eebb..d307150 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,14 +1,14 @@ -use std::convert::TryFrom; +use std::convert::TryInto; use crate::{utils, Error, Result}; -use ruma::identifiers::{ServerName, ServerNameRef}; +use ruma::identifiers::ServerName; pub const COUNTER: &str = "c"; pub struct Globals { pub(super) globals: sled::Tree, keypair: ruma::signatures::Ed25519KeyPair, reqwest_client: reqwest::Client, - server_name: ServerName, + server_name: Box, registration_disabled: bool, } @@ -26,13 +26,12 @@ impl Globals { globals, keypair, reqwest_client: reqwest::Client::new(), - server_name: ServerName::try_from( - config - .get_str("server_name") - .unwrap_or("localhost") - .to_owned(), - ) - .map_err(|_| Error::BadConfig("Invalid server name"))?, + server_name: config + .get_str("server_name") + .unwrap_or("localhost") + .to_string() + .try_into() + .map_err(|_| crate::Error::bad_database("Private or public keys are invalid."))?, registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), }) } @@ -64,7 +63,7 @@ impl Globals { }) } - pub fn server_name(&self) -> ServerNameRef<'_> { + pub fn server_name(&self) -> &ServerName { self.server_name.as_ref() } diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 991931b..eb31b8d 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -2,7 +2,7 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, - r0::backup::{get_backup_keys::Sessions, BackupAlgorithm, KeyData}, + r0::backup::{BackupAlgorithm, KeyData, Sessions}, }, identifiers::{RoomId, UserId}, }; diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 10f893a..0b3ac57 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -61,7 +61,7 @@ impl RoomEdus { &self, room_id: &RoomId, since: u64, - ) -> Result>>> + ) -> Result>>> { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/users.rs b/src/database/users.rs index 4e2e1a2..b755b18 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -9,7 +9,7 @@ use ruma::{ }, }, events::{AnyToDeviceEvent, EventJson, EventType}, - identifiers::UserId, + identifiers::{DeviceId, UserId}, }; use std::{collections::BTreeMap, convert::TryFrom, time::SystemTime}; @@ -168,7 +168,7 @@ impl Users { pub fn create_device( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, token: &str, initial_device_display_name: Option, ) -> Result<()> { @@ -182,7 +182,7 @@ impl Users { self.userdeviceid_metadata.insert( userdeviceid, serde_json::to_string(&Device { - device_id: device_id.to_owned(), + device_id: device_id.to_string().into_boxed_str(), display_name: initial_device_display_name, last_seen_ip: None, // TODO last_seen_ts: Some(SystemTime::now()), diff --git a/src/pdu.rs b/src/pdu.rs index 2fcc3df..403e699 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -2,8 +2,8 @@ use crate::{Error, Result}; use js_int::UInt; use ruma::{ events::{ - pdu::EventHash, room::member::MemberEventContent, AnyRoomEvent, AnyRoomEventStub, - AnyStateEvent, AnyStateEventStub, AnyStrippedStateEventStub, EventJson, EventType, + pdu::EventHash, room::member::MemberEventContent, AnyRoomEvent, AnyStateEvent, + AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventJson, EventType, StateEvent, }, identifiers::{EventId, RoomId, UserId}, @@ -79,9 +79,9 @@ impl PduEvent { Ok(()) } - pub fn to_room_event_stub(&self) -> EventJson { + pub fn to_room_event_stub(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) + serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } pub fn to_room_event(&self) -> EventJson { @@ -94,14 +94,14 @@ impl PduEvent { serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } - pub fn to_state_event_stub(&self) -> EventJson { + pub fn to_state_event_stub(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) + serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } - pub fn to_stripped_state_event(&self) -> EventJson { + pub fn to_stripped_state_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) + serde_json::from_str::>(&json) .expect("EventJson::from_str always works") } pub fn to_member_event(&self) -> EventJson> { From 59cbf202e47e729dafb474ef46b4825fbd9af37d Mon Sep 17 00:00:00 2001 From: Devin R Date: Fri, 17 Jul 2020 16:05:53 -0400 Subject: [PATCH 0170/1727] Cargo.lock updated while rebasing --- Cargo.lock | 425 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 409 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d23fac..79973c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,6 +21,61 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d" +[[package]] +name = "aead" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf01b9b56e767bb57b94ebf91a58b338002963785cdd7013e21c0d4679471e4" +dependencies = [ + "generic-array", +] + +[[package]] +name = "aes" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54eb1d8fe354e5fc611daf4f2ea97dd45a765f4f1e4512306ec183ae2e8f20c9" +dependencies = [ + "aes-soft", + "aesni", + "block-cipher-trait", +] + +[[package]] +name = "aes-gcm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "834a6bda386024dbb7c8fc51322856c10ffe69559f972261c868485f5759c638" +dependencies = [ + "aead", + "aes", + "block-cipher-trait", + "ghash", + "subtle 2.2.3", + "zeroize", +] + +[[package]] +name = "aes-soft" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d" +dependencies = [ + "block-cipher-trait", + "byteorder", + "opaque-debug", +] + +[[package]] +name = "aesni" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f70a6b5f971e473091ab7cfb5ffac6cde81666c4556751d8d5620ead8abf100" +dependencies = [ + "block-cipher-trait", + "opaque-debug", +] + [[package]] name = "arc-swap" version = "0.4.7" @@ -56,6 +111,12 @@ dependencies = [ "syn 1.0.34", ] +[[package]] +name = "atomic" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" + [[package]] name = "atty" version = "0.2.14" @@ -88,10 +149,10 @@ dependencies = [ ] [[package]] -name = "base16" -version = "0.2.1" +name = "base-x" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d27c3610c36aee21ce8ac510e6224498de4228ad772a171ed65643a24693a5a8" +checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" [[package]] name = "base64" @@ -114,6 +175,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +[[package]] +name = "binascii" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" + [[package]] name = "bitflags" version = "1.2.1" @@ -131,12 +198,48 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array", +] + +[[package]] +name = "block-cipher-trait" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c924d49bd09e7c06003acda26cd9742e796e34282ec6c1189404dee0c1f4774" +dependencies = [ + "generic-array", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + [[package]] name = "bumpalo" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + [[package]] name = "bytemuck" version = "1.2.0" @@ -212,13 +315,17 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "cookie" -version = "0.12.0" -source = "git+https://github.com/SergioBenitez/cookie-rs?rev=e0f3e6c#e0f3e6c4daea108d55838c56da777b36898bd223" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca761767cf3fa9068cc893ec8c247a22d0fd0535848e65640c0548bd1f8bbb36" dependencies = [ - "base64 0.10.1", + "aes-gcm", + "base64 0.12.3", + "hkdf", "percent-encoding 2.1.0", - "ring", - "time", + "rand", + "sha2", + "time 0.2.16", ] [[package]] @@ -272,6 +379,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array", + "subtle 1.0.0", +] + [[package]] name = "deflate" version = "0.8.6" @@ -311,6 +428,15 @@ dependencies = [ "syn 1.0.34", ] +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array", +] + [[package]] name = "directories" version = "2.0.2" @@ -332,6 +458,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + [[package]] name = "dtoa" version = "0.4.6" @@ -347,6 +479,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + [[package]] name = "fnv" version = "1.0.7" @@ -508,6 +646,15 @@ dependencies = [ "byteorder", ] +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + [[package]] name = "getrandom" version = "0.1.14" @@ -519,6 +666,15 @@ dependencies = [ "wasi", ] +[[package]] +name = "ghash" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f0930ed19a7184089ea46d2fedead2f6dc2b674c5db4276b7da336c7cd83252" +dependencies = [ + "polyval", +] + [[package]] name = "gif" version = "0.10.3" @@ -535,6 +691,12 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + [[package]] name = "h2" version = "0.2.6" @@ -572,6 +734,26 @@ dependencies = [ "libc", ] +[[package]] +name = "hkdf" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa08a006102488bd9cd5b8013aabe84955cf5ae22e304c2caf655b633aefae3" +dependencies = [ + "digest", + "hmac", +] + +[[package]] +name = "hmac" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +dependencies = [ + "crypto-mac", + "digest", +] + [[package]] name = "http" version = "0.2.1" @@ -616,7 +798,7 @@ dependencies = [ "itoa", "pin-project", "socket2", - "time", + "time 0.1.43", "tokio", "tower-service", "tracing", @@ -961,6 +1143,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + [[package]] name = "openssl" version = "0.10.30" @@ -1104,6 +1292,16 @@ dependencies = [ "miniz_oxide 0.3.7", ] +[[package]] +name = "polyval" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ec3341498978de3bfd12d1b22f1af1de22818f5473a11e8a6ef997989e3a212" +dependencies = [ + "cfg-if", + "universal-hash", +] + [[package]] name = "ppv-lite86" version = "0.2.8" @@ -1216,6 +1414,26 @@ dependencies = [ "rust-argon2 0.7.0", ] +[[package]] +name = "ref-cast" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "745c1787167ddae5569661d5ffb8b25ae5fedbf46717eaa92d652221cec72623" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.34", +] + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -1277,21 +1495,22 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" dependencies = [ "async-trait", + "atomic", "atty", - "base16", - "base64 0.11.0", + "binascii", "futures", "log", "memchr", "num_cpus", "pear", + "ref-cast", "rocket_codegen", "rocket_http", "state", - "time", + "time 0.2.16", "tokio", "toml", "version_check", @@ -1301,9 +1520,10 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" dependencies = [ "devise", + "glob", "indexmap", "quote 1.0.7", "rocket_http", @@ -1314,7 +1534,7 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=4928e35ec5c4b9242f50d644282d9896d0160a10#4928e35ec5c4b9242f50d644282d9896d0160a10" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" dependencies = [ "cookie", "http", @@ -1324,9 +1544,10 @@ dependencies = [ "mime", "pear", "percent-encoding 1.0.1", + "ref-cast", "smallvec", "state", - "time", + "time 0.2.16", "tokio", "tokio-rustls", "unicode-xid 0.2.1", @@ -1508,6 +1729,15 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + [[package]] name = "rustls" version = "0.16.0" @@ -1576,6 +1806,21 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + [[package]] name = "serde" version = "1.0.114" @@ -1619,6 +1864,24 @@ dependencies = [ "url", ] +[[package]] +name = "sha1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" + +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer", + "digest", + "fake-simd", + "opaque-debug", +] + [[package]] name = "signal-hook-registry" version = "1.2.0" @@ -1677,12 +1940,70 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "standback" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" +dependencies = [ + "version_check", +] + [[package]] name = "state" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "serde", + "serde_derive", + "syn 1.0.34", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2 1.0.18", + "quote 1.0.7", + "serde", + "serde_derive", + "serde_json", + "sha1", + "syn 1.0.34", +] + +[[package]] +name = "stdweb-internal-runtime" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" + [[package]] name = "strum" version = "0.18.0" @@ -1704,6 +2025,18 @@ dependencies = [ "syn 1.0.34", ] +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "subtle" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" + [[package]] name = "syn" version = "0.15.44" @@ -1770,6 +2103,44 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "time" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a51cadc5b1eec673a685ff7c33192ff7b7603d0b75446fb354939ee615acb15" +dependencies = [ + "cfg-if", + "libc", + "standback", + "stdweb", + "time-macros", + "version_check", + "winapi 0.3.9", +] + +[[package]] +name = "time-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9b6e9f095bc105e183e3cd493d72579be3181ad4004fceb01adbe9eecab2d" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +dependencies = [ + "proc-macro-hack", + "proc-macro2 1.0.18", + "quote 1.0.7", + "standback", + "syn 1.0.34", +] + [[package]] name = "tinyvec" version = "0.3.3" @@ -1887,6 +2258,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "typenum" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" + [[package]] name = "unicase" version = "2.6.0" @@ -1932,6 +2309,16 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +[[package]] +name = "universal-hash" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0c900f2f9b4116803415878ff48b63da9edb268668e08cf9292d7503114a01" +dependencies = [ + "generic-array", + "subtle 2.2.3", +] + [[package]] name = "untrusted" version = "0.7.1" @@ -2123,3 +2510,9 @@ name = "yansi" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" + +[[package]] +name = "zeroize" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" From 029c60be783519249aeba845fd5ceff10b239d43 Mon Sep 17 00:00:00 2001 From: Devin R Date: Fri, 17 Jul 2020 19:52:04 -0400 Subject: [PATCH 0171/1727] Replace to_string into_boxed_str with into --- src/client_server.rs | 8 ++++---- src/database/users.rs | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index efe6973..1300bff 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -220,7 +220,7 @@ pub fn register_route( Ok(register::Response { access_token: Some(token), user_id, - device_id: Some(device_id.into_boxed_str()), + device_id: Some(device_id.into()), } .into()) } @@ -268,7 +268,7 @@ pub fn login_route( .body .device_id .clone() - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into_boxed_str()); + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); // Generate a new token for the device let token = utils::random_string(TOKEN_LENGTH); @@ -898,7 +898,7 @@ pub fn get_keys_route( device_display_name: metadata.display_name, }); - container.insert(device_id.to_owned().into_boxed_str(), keys); + container.insert(device_id.into(), keys); } } device_keys.insert(user_id.clone(), container); @@ -917,7 +917,7 @@ pub fn get_keys_route( device_display_name: metadata.display_name, }); - container.insert(device_id.to_string().into_boxed_str(), keys); + container.insert(device_id.as_ref().into(), keys); } device_keys.insert(user_id.clone(), container); } diff --git a/src/database/users.rs b/src/database/users.rs index b755b18..b103691 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -182,7 +182,7 @@ impl Users { self.userdeviceid_metadata.insert( userdeviceid, serde_json::to_string(&Device { - device_id: device_id.to_string().into_boxed_str(), + device_id: device_id.into(), display_name: initial_device_display_name, last_seen_ip: None, // TODO last_seen_ts: Some(SystemTime::now()), From 75959cdc10f5e2036107ad02d68ee782e452ab53 Mon Sep 17 00:00:00 2001 From: Devin R Date: Sat, 18 Jul 2020 08:21:25 -0400 Subject: [PATCH 0172/1727] Resolve stying review issues --- Cargo.toml | 6 +---- src/client_server.rs | 45 +++++++++++++++++++------------------- src/database.rs | 1 - src/database/globals.rs | 2 +- src/database/rooms/edus.rs | 36 ++++++++++++++++++++++++++++-- 5 files changed, 59 insertions(+), 31 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 514a178..80785c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,8 +27,4 @@ reqwest = "0.10.6" base64 = "0.12.1" thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } - -[dependencies.ruma] -git = "https://github.com/ruma/ruma" -features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] -rev = "848b225" +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "848b225" } diff --git a/src/client_server.rs b/src/client_server.rs index 1300bff..c83f9e2 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -64,7 +64,7 @@ use ruma::{ canonical_alias, guest_access, history_visibility, join_rules, member, name, redaction, topic, }, - AnyBasicEvent, AnyEphemeralRoomEvent, AnyEvent as EduEvent, EventJson, EventType, + AnyBasicEvent, AnyEphemeralRoomEvent, AnyEvent, EventJson, EventType, }, identifiers::{RoomAliasId, RoomId, RoomVersionId, UserId}, }; @@ -285,7 +285,7 @@ pub fn login_route( user_id, access_token: token, home_server: Some(db.globals.server_name().to_string()), - device_id: device_id.to_string(), + device_id: device_id.into(), well_known: None, } .into()) @@ -479,7 +479,7 @@ pub fn get_pushrules_all_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if let EduEvent::Basic(AnyBasicEvent::PushRules(pushrules)) = db + if let AnyEvent::Basic(AnyBasicEvent::PushRules(pushrules)) = db .account_data .get(None, &user_id, &EventType::PushRules)? .ok_or(Error::BadRequest( @@ -602,13 +602,13 @@ pub fn get_global_account_data_route( )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - let data: Result = data + let data: AnyEvent = data .deserialize() - .map_err(|_| Error::bad_database("Deserialization of account data failed")); + .map_err(|_| Error::bad_database("Deserialization of account data failed"))?; - if let EduEvent::Basic(data) = data? { + if let AnyEvent::Basic(data) = data { Ok(get_global_account_data::Response { - account_data: EventJson::from(data), + account_data: data.into(), } .into()) } else { @@ -1145,7 +1145,7 @@ pub fn set_read_marker_route( db.rooms.edus.roomlatest_update( &user_id, &body.room_id, - EduEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( + AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), @@ -1221,6 +1221,7 @@ pub fn create_room_route( .as_ref() .and_then(|c| c.predecessor.clone()); content.room_version = RoomVersionId::version_6(); + // 1. The room create event db.rooms.append_pdu( room_id.clone(), @@ -2511,7 +2512,7 @@ pub fn sync_route( let room_events = pdus .into_iter() - .map(|pdu| pdu.to_room_event_stub()) + .map(|pdu| pdu.to_sync_room_event()) .collect::>(); let mut edus = db @@ -2529,7 +2530,7 @@ pub fn sync_route( { edus.push( serde_json::from_str( - &serde_json::to_string(&EduEvent::Ephemeral(AnyEphemeralRoomEvent::Typing( + &serde_json::to_string(&AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Typing( db.rooms.edus.roomactives_all(&room_id)?, ))) .expect("event is valid, we just created it"), @@ -2544,14 +2545,14 @@ pub fn sync_route( .account_data .changes_since(Some(&room_id), &user_id, since)? .into_iter() - .filter_map(|(_, v)| { - if let Ok(EduEvent::Basic(account_event)) = v.deserialize() { - Some(EventJson::from(account_event)) + .map(|(_, v)| { + if let Ok(AnyEvent::Basic(account_event)) = v.deserialize() { + Ok(EventJson::from(account_event)) } else { - None + Err(Error::bad_database("found invalid event")) } }) - .collect(), + .collect::, _>>()?, }, summary: sync_events::RoomSummary { heroes, @@ -2573,7 +2574,7 @@ pub fn sync_route( db.rooms .room_state_full(&room_id)? .into_iter() - .map(|(_, pdu)| pdu.to_state_event_stub()) + .map(|(_, pdu)| pdu.to_sync_state_event()) .collect() } else { Vec::new() @@ -2593,7 +2594,7 @@ pub fn sync_route( let pdus = db.rooms.pdus_since(&user_id, &room_id, since)?; let room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .map(|pdu| pdu.to_room_event_stub()) + .map(|pdu| pdu.to_sync_room_event()) .collect(); // TODO: Only until leave point @@ -2688,14 +2689,14 @@ pub fn sync_route( .account_data .changes_since(None, &user_id, since)? .into_iter() - .filter_map(|(_, v)| { - if let Ok(EduEvent::Basic(account_event)) = v.deserialize() { - Some(EventJson::from(account_event)) + .map(|(_, v)| { + if let Ok(AnyEvent::Basic(account_event)) = v.deserialize() { + Ok(EventJson::from(account_event)) } else { - None + Err(Error::bad_database("found invalid event")) } }) - .collect(), + .collect::, _>>()?, }, device_lists: sync_events::DeviceLists { changed: if since != 0 { diff --git a/src/database.rs b/src/database.rs index fcda5af..370fde7 100644 --- a/src/database.rs +++ b/src/database.rs @@ -62,7 +62,6 @@ impl Database { .to_owned()) })?; - println!("{:?}", path); let db = sled::open(&path)?; info!("Opened sled database at {}", path); diff --git a/src/database/globals.rs b/src/database/globals.rs index d307150..1654636 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -31,7 +31,7 @@ impl Globals { .unwrap_or("localhost") .to_string() .try_into() - .map_err(|_| crate::Error::bad_database("Private or public keys are invalid."))?, + .map_err(|_| Error::bad_database("Private or public keys are invalid."))?, registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), }) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 0b3ac57..3ee1e75 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,6 +1,6 @@ use crate::{utils, Error, Result}; use ruma::{ - events::{AnyEvent as EduEvent, EventJson}, + events::{AnyEvent as EduEvent, EventJson, SyncEphemeralRoomEvent}, identifiers::{RoomId, UserId}, }; use std::convert::TryFrom; @@ -236,9 +236,41 @@ impl RoomEdus { Ok(ruma::events::typing::TypingEvent { content: ruma::events::typing::TypingEventContent { user_ids }, - room_id: room_id.clone(), // Can be inferred + room_id: room_id.clone(), }) } + // REMOVE the above method and uncomment the bottom when ruma/ruma PR #141 is merged + // pub fn roomactives_all( + // &self, + // room_id: &RoomId, + // ) -> Result> { + // let mut prefix = room_id.to_string().as_bytes().to_vec(); + // prefix.push(0xff); + + // let mut user_ids = Vec::new(); + + // for user_id in self + // .roomactiveid_userid + // .scan_prefix(prefix) + // .values() + // .map(|user_id| { + // Ok::<_, Error>( + // UserId::try_from(utils::string_from_bytes(&user_id?).map_err(|_| { + // Error::bad_database("User ID in roomactiveid_userid is invalid unicode.") + // })?) + // .map_err(|_| { + // Error::bad_database("User ID in roomactiveid_userid is invalid.") + // })?, + // ) + // }) + // { + // user_ids.push(user_id?); + // } + + // Ok(SyncEphemeralRoomEvent { + // content: ruma::events::typing::TypingEventContent { user_ids }, + // }) + // } /// Sets a private read marker at `count`. pub fn room_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { From 9a993fa7c1597a56d289378ab3ad7f766d3e8a57 Mon Sep 17 00:00:00 2001 From: Devin R Date: Sat, 18 Jul 2020 08:22:30 -0400 Subject: [PATCH 0173/1727] Convert Pdu to a validated ruma type when calling to_*_event --- src/pdu.rs | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/src/pdu.rs b/src/pdu.rs index 403e699..f55acaf 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -79,34 +79,40 @@ impl PduEvent { Ok(()) } - pub fn to_room_event_stub(&self) -> EventJson { + pub fn to_sync_room_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) - .expect("EventJson::from_str always works") + serde_json::from_str::(&json) + .map(EventJson::from) + .expect("AnySyncRoomEvent can always be built from a full PDU event") } pub fn to_room_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) - .expect("EventJson::from_str always works") + serde_json::from_str::(&json) + .map(EventJson::from) + .expect("AnyRoomEvent can always be built from a full PDU event") } pub fn to_state_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) - .expect("EventJson::from_str always works") + serde_json::from_str::(&json) + .map(EventJson::from) + .expect("AnyStateEvent can always be built from a full PDU event") } - pub fn to_state_event_stub(&self) -> EventJson { + pub fn to_sync_state_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) - .expect("EventJson::from_str always works") + serde_json::from_str::(&json) + .map(EventJson::from) + .expect("AnySyncStateEvent can always be built from a full PDU event") } pub fn to_stripped_state_event(&self) -> EventJson { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) - .expect("EventJson::from_str always works") + serde_json::from_str::(&json) + .map(EventJson::from) + .expect("AnyStrippedStateEvent can always be built from a full PDU event") } pub fn to_member_event(&self) -> EventJson> { let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>>(&json) - .expect("EventJson::from_str always works") + serde_json::from_str::>(&json) + .map(EventJson::from) + .expect("StateEvent can always be built from a full PDU event") } } From 33bc666859ccbe939684c9e908b17be37cf6a7ed Mon Sep 17 00:00:00 2001 From: Devin R Date: Tue, 21 Jul 2020 14:04:39 -0400 Subject: [PATCH 0174/1727] Use sync ephemeral events in sync Resposne (remove room_id) Bump ruma to latest master which also fixes some server name as String to now being of type Box. --- Cargo.lock | 124 ++++++++++++++++++++----------------- Cargo.toml | 2 +- src/client_server.rs | 11 ++-- src/database/rooms.rs | 2 +- src/database/rooms/edus.rs | 41 ++---------- src/pdu.rs | 4 +- 6 files changed, 82 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 79973c0..8800c5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -423,9 +423,9 @@ version = "0.3.0" source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" dependencies = [ "bitflags", - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -597,9 +597,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -716,6 +716,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "hashbrown" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb" +dependencies = [ + "autocfg", +] + [[package]] name = "heck" version = "0.3.1" @@ -847,11 +856,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" +checksum = "5b88cd59ee5f71fea89a62248fc8f387d44400cefe05ef548466d61ced9029a7" dependencies = [ "autocfg", + "hashbrown", ] [[package]] @@ -895,9 +905,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b2b63d60564122f2a7d6592c2f1d6c1c60e7a266b4d24715950a1ddad784f66" +checksum = "b96797f53235a1d6dc985f244a69de54b04c45b7e0e357a35c85a45a847d92f2" dependencies = [ "serde", ] @@ -920,9 +930,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701" +checksum = "bd7d4bd64732af4bf3a67f367c27df8520ad7e230c5817b8ff485864d80242b9" [[package]] name = "lock_api" @@ -1257,9 +1267,9 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -1282,9 +1292,9 @@ checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" [[package]] name = "png" -version = "0.16.6" +version = "0.16.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c150bf7479fafe3dd8740dbe48cc33b2a3efb7b0fe3483aced8bbc39f6d0238d" +checksum = "dfe7f9f1c730833200b134370e1d5098964231af8450bce9b78ee3ab5278b970" dependencies = [ "bitflags", "crc32fast", @@ -1331,9 +1341,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" dependencies = [ "unicode-xid 0.2.1", ] @@ -1353,7 +1363,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", ] [[package]] @@ -1429,9 +1439,9 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -1556,7 +1566,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "ruma-api", "ruma-client-api", @@ -1570,7 +1580,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1585,18 +1595,18 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "matches", - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "http", "js_int", @@ -1613,7 +1623,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.1.3" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "js_int", "matches", @@ -1626,7 +1636,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "js_int", "ruma-common", @@ -1641,18 +1651,18 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "matches", - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "js_int", "matches", @@ -1668,7 +1678,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.0-pre.1" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "matches", "rand", @@ -1679,7 +1689,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "form_urlencoded", "itoa", @@ -1691,7 +1701,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=848b225#848b22568106d05c5444f3fe46070d5aa16e422b" +source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" dependencies = [ "base64 0.12.3", "ring", @@ -1836,9 +1846,9 @@ version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -1975,11 +1985,11 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", "serde", "serde_derive", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -1989,13 +1999,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" dependencies = [ "base-x", - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", "serde", "serde_derive", "serde_json", "sha1", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -2020,9 +2030,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -2050,11 +2060,11 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b" +checksum = "fb7f4c519df8c117855e19dd8cc851e89eb746fe7a73f0157e0d95fdec5369b0" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", "unicode-xid 0.2.1", ] @@ -2088,9 +2098,9 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -2135,10 +2145,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", "standback", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -2176,9 +2186,9 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", ] [[package]] @@ -2385,9 +2395,9 @@ dependencies = [ "bumpalo", "lazy_static", "log", - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", "wasm-bindgen-shared", ] @@ -2419,9 +2429,9 @@ version = "0.2.65" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf592c807080719d1ff2f245a687cbadb3ed28b2077ed7084b47aba8b691f2c6" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.19", "quote 1.0.7", - "syn 1.0.34", + "syn 1.0.35", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index 80785c8..adf7a98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,4 +27,4 @@ reqwest = "0.10.6" base64 = "0.12.1" thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "848b225" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "b55f827" } diff --git a/src/client_server.rs b/src/client_server.rs index c83f9e2..48c03c3 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -64,7 +64,8 @@ use ruma::{ canonical_alias, guest_access, history_visibility, join_rules, member, name, redaction, topic, }, - AnyBasicEvent, AnyEphemeralRoomEvent, AnyEvent, EventJson, EventType, + AnyBasicEvent, AnyEphemeralRoomEvent, AnyEvent, AnySyncEphemeralRoomEvent, EventJson, + EventType, }, identifiers::{RoomAliasId, RoomId, RoomVersionId, UserId}, }; @@ -284,7 +285,7 @@ pub fn login_route( Ok(login::Response { user_id, access_token: token, - home_server: Some(db.globals.server_name().to_string()), + home_server: Some(db.globals.server_name().to_owned()), device_id: device_id.into(), well_known: None, } @@ -2530,9 +2531,9 @@ pub fn sync_route( { edus.push( serde_json::from_str( - &serde_json::to_string(&AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Typing( + &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( db.rooms.edus.roomactives_all(&room_id)?, - ))) + )) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -2613,7 +2614,7 @@ pub fn sync_route( { edus.push( serde_json::from_str( - &serde_json::to_string(&ruma::events::AnySyncEphemeralRoomEvent::Typing( + &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( db.rooms.edus.roomactives_all(&room_id)?, )) .expect("event is valid, we just created it"), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d29ab42..7e6036d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -515,7 +515,7 @@ impl Rooms { event_id: EventId::try_from("$thiswillbefilledinlater").expect("we know this is valid"), room_id: room_id.clone(), sender: sender.clone(), - origin: globals.server_name().to_string(), + origin: globals.server_name().to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 3ee1e75..b96f1b1 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -209,8 +209,10 @@ impl RoomEdus { .unwrap_or(0)) } - /// Returns an iterator over all active events (e.g. typing notifications). - pub fn roomactives_all(&self, room_id: &RoomId) -> Result { + pub fn roomactives_all( + &self, + room_id: &RoomId, + ) -> Result> { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -234,43 +236,10 @@ impl RoomEdus { user_ids.push(user_id?); } - Ok(ruma::events::typing::TypingEvent { + Ok(SyncEphemeralRoomEvent { content: ruma::events::typing::TypingEventContent { user_ids }, - room_id: room_id.clone(), }) } - // REMOVE the above method and uncomment the bottom when ruma/ruma PR #141 is merged - // pub fn roomactives_all( - // &self, - // room_id: &RoomId, - // ) -> Result> { - // let mut prefix = room_id.to_string().as_bytes().to_vec(); - // prefix.push(0xff); - - // let mut user_ids = Vec::new(); - - // for user_id in self - // .roomactiveid_userid - // .scan_prefix(prefix) - // .values() - // .map(|user_id| { - // Ok::<_, Error>( - // UserId::try_from(utils::string_from_bytes(&user_id?).map_err(|_| { - // Error::bad_database("User ID in roomactiveid_userid is invalid unicode.") - // })?) - // .map_err(|_| { - // Error::bad_database("User ID in roomactiveid_userid is invalid.") - // })?, - // ) - // }) - // { - // user_ids.push(user_id?); - // } - - // Ok(SyncEphemeralRoomEvent { - // content: ruma::events::typing::TypingEventContent { user_ids }, - // }) - // } /// Sets a private read marker at `count`. pub fn room_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { diff --git a/src/pdu.rs b/src/pdu.rs index f55acaf..0db77a1 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -6,7 +6,7 @@ use ruma::{ AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventJson, EventType, StateEvent, }, - identifiers::{EventId, RoomId, UserId}, + identifiers::{EventId, RoomId, ServerName, UserId}, }; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -17,7 +17,7 @@ pub struct PduEvent { pub event_id: EventId, pub room_id: RoomId, pub sender: UserId, - pub origin: String, + pub origin: Box, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: EventType, From 84d47da392f323a0b35bf5ce35ee6414b85efd47 Mon Sep 17 00:00:00 2001 From: Devin R Date: Tue, 21 Jul 2020 16:26:01 -0400 Subject: [PATCH 0175/1727] Simplify deserialization and deconstruction for Responses --- src/client_server.rs | 47 +++++++++++++++-------------------------- src/database/globals.rs | 2 +- 2 files changed, 18 insertions(+), 31 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 48c03c3..208e360 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -594,27 +594,18 @@ pub fn get_global_account_data_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let data = db - .account_data + db.account_data .get( None, user_id, &EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), )? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - - let data: AnyEvent = data - .deserialize() - .map_err(|_| Error::bad_database("Deserialization of account data failed"))?; - - if let AnyEvent::Basic(data) = data { - Ok(get_global_account_data::Response { - account_data: data.into(), - } - .into()) - } else { - Err(Error::bad_database("Encountered a non account data event.")) - } + .and_then(|ev| { + serde_json::from_str(ev.json().get()) + .map(|data| get_global_account_data::Response { account_data: data }.into()) + .ok() + }) + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found.")) } #[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] @@ -2546,14 +2537,12 @@ pub fn sync_route( .account_data .changes_since(Some(&room_id), &user_id, since)? .into_iter() - .map(|(_, v)| { - if let Ok(AnyEvent::Basic(account_event)) = v.deserialize() { - Ok(EventJson::from(account_event)) - } else { - Err(Error::bad_database("found invalid event")) - } + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() }) - .collect::, _>>()?, + .collect::>(), }, summary: sync_events::RoomSummary { heroes, @@ -2690,14 +2679,12 @@ pub fn sync_route( .account_data .changes_since(None, &user_id, since)? .into_iter() - .map(|(_, v)| { - if let Ok(AnyEvent::Basic(account_event)) = v.deserialize() { - Ok(EventJson::from(account_event)) - } else { - Err(Error::bad_database("found invalid event")) - } + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() }) - .collect::, _>>()?, + .collect::>(), }, device_lists: sync_events::DeviceLists { changed: if since != 0 { diff --git a/src/database/globals.rs b/src/database/globals.rs index 1654636..d59e1c3 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -31,7 +31,7 @@ impl Globals { .unwrap_or("localhost") .to_string() .try_into() - .map_err(|_| Error::bad_database("Private or public keys are invalid."))?, + .map_err(|_| Error::BadConfig("Invalid server name found."))?, registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), }) } From d8e1248df6afbbff62af65d670299bc2f31e7c63 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 22 Jul 2020 21:57:14 +0200 Subject: [PATCH 0176/1727] improvement: switch to stable rust --- Cargo.lock | 238 +++++++++++++++++++------------------------ Cargo.toml | 2 +- rust-toolchain | 2 +- src/client_server.rs | 4 +- src/main.rs | 1 - 5 files changed, 106 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07f6826..7920c56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -94,9 +94,9 @@ version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -142,15 +142,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -dependencies = [ - "byteorder", -] - [[package]] name = "base64" version = "0.11.0" @@ -313,7 +304,7 @@ dependencies = [ "aes-gcm", "base64 0.12.3", "hkdf", - "percent-encoding 2.1.0", + "percent-encoding", "rand", "sha2", "time 0.2.16", @@ -393,7 +384,7 @@ dependencies = [ [[package]] name = "devise" version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=1e42a2691#1e42a2691ef9934a446b8ed0ca1c4c8cf283f8bf" dependencies = [ "devise_codegen", "devise_core", @@ -402,21 +393,22 @@ dependencies = [ [[package]] name = "devise_codegen" version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=1e42a2691#1e42a2691ef9934a446b8ed0ca1c4c8cf283f8bf" dependencies = [ "devise_core", - "quote 1.0.7", + "quote", ] [[package]] name = "devise_core" version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=1e42a2691#1e42a2691ef9934a446b8ed0ca1c4c8cf283f8bf" dependencies = [ "bitflags", - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn", ] [[package]] @@ -578,9 +570,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -848,6 +840,12 @@ dependencies = [ "autocfg", ] +[[package]] +name = "inlinable_string" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb6ee2a7da03bfc3b66ca47c92c2e392fcc053ea040a85561749b026f7aad09a" + [[package]] name = "iovec" version = "0.1.4" @@ -1198,31 +1196,24 @@ dependencies = [ [[package]] name = "pear" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5320f212db967792b67cfe12bd469d08afd6318a249bd917d5c19bc92200ab8a" +version = "0.2.0-dev" +source = "git+https://github.com/SergioBenitez/Pear.git?rev=4b68055#4b680556063568a42fcd4328335cdfdf7608be49" dependencies = [ + "inlinable_string", "pear_codegen", -] - -[[package]] -name = "pear_codegen" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfc1c836fdc3d1ef87c348b237b5b5c4dff922156fb2d968f57734f9669768ca" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", - "version_check", "yansi", ] [[package]] -name = "percent-encoding" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +name = "pear_codegen" +version = "0.2.0-dev" +source = "git+https://github.com/SergioBenitez/Pear.git?rev=4b68055#4b680556063568a42fcd4328335cdfdf7608be49" +dependencies = [ + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn", +] [[package]] name = "percent-encoding" @@ -1245,9 +1236,9 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1308,31 +1299,25 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid 0.1.0", -] - [[package]] name = "proc-macro2" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" dependencies = [ - "unicode-xid 0.2.1", + "unicode-xid", ] [[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +name = "proc-macro2-diagnostics" +version = "0.1.0" +source = "git+https://github.com/SergioBenitez/proc-macro2-diagnostics.git?rev=13fbb43#13fbb43db72034b6f9660a9b00e338cebd8dcf44" dependencies = [ - "proc-macro2 0.4.30", + "proc-macro2", + "quote", + "syn", + "version_check", + "yansi", ] [[package]] @@ -1341,7 +1326,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2", ] [[package]] @@ -1417,9 +1402,9 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1452,7 +1437,7 @@ dependencies = [ "mime", "mime_guess", "native-tls", - "percent-encoding 2.1.0", + "percent-encoding", "pin-project-lite", "serde", "serde_urlencoded", @@ -1483,7 +1468,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67#8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67" dependencies = [ "async-trait", "atomic", @@ -1508,21 +1493,19 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67#8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67" dependencies = [ "devise", "glob", "indexmap", - "quote 1.0.7", + "quote", "rocket_http", - "version_check", - "yansi", ] [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=33e95f49008dcbc8dc51da7d37e0570059176b73#33e95f49008dcbc8dc51da7d37e0570059176b73" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67#8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67" dependencies = [ "cookie", "http", @@ -1531,14 +1514,14 @@ dependencies = [ "log", "mime", "pear", - "percent-encoding 1.0.1", + "percent-encoding", "ref-cast", "smallvec", "state", "time 0.2.16", "tokio", "tokio-rustls", - "unicode-xid 0.2.1", + "unicode-xid", ] [[package]] @@ -1561,7 +1544,7 @@ version = "0.16.1" source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ "http", - "percent-encoding 2.1.0", + "percent-encoding", "ruma-api-macros", "ruma-identifiers", "ruma-serde", @@ -1575,9 +1558,9 @@ name = "ruma-api-macros" version = "0.16.1" source = "git+https://github.com/timokoesters/ruma#5a30f9cfc6c168f25cfcf51f3d80b3594c0f59b1" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1629,9 +1612,9 @@ name = "ruma-events-macros" version = "0.21.3" source = "git+https://github.com/ruma/ruma-events?rev=c1ee72d#c1ee72db0f3107a97f6a4273a0ea3fed5c4c30e2" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1724,11 +1707,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" +checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" dependencies = [ - "base64 0.10.1", + "base64 0.12.3", "log", "ring", "sct", @@ -1826,9 +1809,9 @@ version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1964,11 +1947,11 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", + "proc-macro2", + "quote", "serde", "serde_derive", - "syn 1.0.33", + "syn", ] [[package]] @@ -1978,13 +1961,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" dependencies = [ "base-x", - "proc-macro2 1.0.18", - "quote 1.0.7", + "proc-macro2", + "quote", "serde", "serde_derive", "serde_json", "sha1", - "syn 1.0.33", + "syn", ] [[package]] @@ -2009,9 +1992,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -2026,26 +2009,15 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid 0.1.0", -] - [[package]] name = "syn" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "unicode-xid 0.2.1", + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] @@ -2077,9 +2049,9 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -2124,10 +2096,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.18", - "quote 1.0.7", + "proc-macro2", + "quote", "standback", - "syn 1.0.33", + "syn", ] [[package]] @@ -2165,16 +2137,16 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "tokio-rustls" -version = "0.12.3" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3068d891551949b37681724d6b73666787cc63fa8e255c812a41d2513aff9775" +checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" dependencies = [ "futures-core", "rustls", @@ -2208,9 +2180,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.4.10" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" +checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" dependencies = [ "serde", ] @@ -2266,12 +2238,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" - [[package]] name = "unicode-xid" version = "0.2.1" @@ -2302,7 +2268,7 @@ checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" dependencies = [ "idna", "matches", - "percent-encoding 2.1.0", + "percent-encoding", ] [[package]] @@ -2354,9 +2320,9 @@ dependencies = [ "bumpalo", "lazy_static", "log", - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", "wasm-bindgen-shared", ] @@ -2378,7 +2344,7 @@ version = "0.2.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "838e423688dac18d73e31edce74ddfac468e37b1506ad163ffaf0a46f703ffe3" dependencies = [ - "quote 1.0.7", + "quote", "wasm-bindgen-macro-support", ] @@ -2388,9 +2354,9 @@ version = "0.2.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", + "proc-macro2", + "quote", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index 30af056..0ca3ac3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "33e95f49008dcbc8dc51da7d37e0570059176b73", features = ["tls"] } +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"] } http = "0.2.1" log = "0.4.8" sled = "0.32.0" diff --git a/rust-toolchain b/rust-toolchain index b45e88a..50aceaa 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2020-07-12 +1.45.0 diff --git a/src/client_server.rs b/src/client_server.rs index f1a9dbe..2609567 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1872,10 +1872,10 @@ pub async fn get_public_rooms_route( .into()) } -#[post("/_matrix/client/r0/publicRooms", data = "")] +#[post("/_matrix/client/r0/publicRooms", data = "<_body>")] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, - body: Ruma, + _body: Ruma, ) -> ConduitResult { let mut chunk = db .rooms diff --git a/src/main.rs b/src/main.rs index f406a17..1815aec 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,3 @@ -#![feature(proc_macro_hygiene, decl_macro)] #![warn(rust_2018_idioms)] pub mod push_rules; From 630196f0a3ff0653630f6ee30d76cb286a414f67 Mon Sep 17 00:00:00 2001 From: aura Date: Sat, 25 Jul 2020 18:35:22 +0200 Subject: [PATCH 0177/1727] Implement get_joined_rooms (#155) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit whitelist /joined_rooms in sytest Signed-off-by: Aurabindo Pillai style: fmt feat: implement /joined_rooms (#78) Signed-off-by: Aurabindo Pillai Co-authored-by: Aurabindo Pillai Co-authored-by: timokoesters Reviewed-on: https://git.koesters.xyz/timo/conduit/pulls/155 Reviewed-by: Timo Kösters --- src/client_server.rs | 19 ++++++++++++++++++- src/main.rs | 1 + src/ruma_wrapper.rs | 7 +++---- sytest/sytest-whitelist | 2 ++ 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 2609567..f4a34b2 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -34,7 +34,7 @@ use ruma::{ media::{create_content, get_content, get_content_thumbnail, get_media_config}, membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, kick_user, leave_room, unban_user, + join_room_by_id_or_alias, joined_rooms, kick_user, leave_room, unban_user, }, message::{create_message_event, get_message_events}, presence::set_presence, @@ -1439,6 +1439,23 @@ pub fn create_room_route( Ok(create_room::Response { room_id }.into()) } +#[get("/_matrix/client/r0/joined_rooms", data = "")] +pub fn joined_rooms_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + Ok(joined_rooms::Response { + joined_rooms: db + .rooms + .rooms_joined(&user_id) + .filter_map(|r| r.ok()) + .collect(), + } + .into()) +} + #[put( "/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", data = "" diff --git a/src/main.rs b/src/main.rs index 1815aec..ef2b7cc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -66,6 +66,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::join_room_by_id_or_alias_route, client_server::leave_room_route, client_server::forget_room_route, + client_server::joined_rooms_route, client_server::kick_user_route, client_server::ban_user_route, client_server::unban_user_route, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index e3dc5ae..48f5487 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,7 +1,7 @@ use crate::{utils, Error}; use log::warn; use rocket::{ - data::{Data, FromDataFuture, Transform, TransformFuture, Transformed, FromTransformedData}, + data::{Data, FromDataFuture, FromTransformedData, Transform, TransformFuture, Transformed}, http::Status, response::{self, Responder}, Outcome::*, @@ -125,7 +125,7 @@ impl<'r, 'o, T> Responder<'r, 'o> for RumaResponse where T: Send + TryInto>>, T::Error: Send, - 'o: 'r + 'o: 'r, { fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { let http_response: Result, _> = self.0.try_into(); @@ -143,8 +143,7 @@ where let http_body = http_response.into_body(); - response - .sized_body(http_body.len(), Cursor::new(http_body)); + response.sized_body(http_body.len(), Cursor::new(http_body)); response.raw_header("Access-Control-Allow-Origin", "*"); response.raw_header( diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index f29075c..0d5ff7b 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -29,6 +29,8 @@ GET /publicRooms lists newly-created room GET /register yields a set of flows GET /rooms/:room_id/state fetches entire room state GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership +GET /joined_rooms lists newly-created room +/joined_rooms returns only joined rooms Getting push rules doesn't corrupt the cache SYN-390 POST /createRoom makes a private room POST /createRoom makes a private room with invites From 60381ddcf42eb1d55b5f817810af4e47dc0fffed Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 25 Jul 2020 14:25:24 -0400 Subject: [PATCH 0178/1727] Update to latest ruma/master rev --- Cargo.lock | 242 ++++++++++++++++++++++-------------------- Cargo.toml | 2 +- src/client_server.rs | 46 ++++---- src/database/uiaa.rs | 21 ++-- src/database/users.rs | 63 ++++++----- src/push_rules.rs | 83 ++++++++------- src/ruma_wrapper.rs | 9 +- 7 files changed, 248 insertions(+), 218 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b2d93d3..d9865b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,57 +23,56 @@ checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d" [[package]] name = "aead" -version = "0.2.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf01b9b56e767bb57b94ebf91a58b338002963785cdd7013e21c0d4679471e4" +checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" dependencies = [ "generic-array", ] [[package]] name = "aes" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54eb1d8fe354e5fc611daf4f2ea97dd45a765f4f1e4512306ec183ae2e8f20c9" +checksum = "f7001367fde4c768a19d1029f0a8be5abd9308e1119846d5bd9ad26297b8faf5" dependencies = [ "aes-soft", "aesni", - "block-cipher-trait", + "block-cipher", ] [[package]] name = "aes-gcm" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "834a6bda386024dbb7c8fc51322856c10ffe69559f972261c868485f5759c638" +checksum = "86f5007801316299f922a6198d1d09a0bae95786815d066d5880d13f7c45ead1" dependencies = [ "aead", "aes", - "block-cipher-trait", + "block-cipher", "ghash", - "subtle 2.2.3", - "zeroize", + "subtle", ] [[package]] name = "aes-soft" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d" +checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" dependencies = [ - "block-cipher-trait", + "block-cipher", "byteorder", - "opaque-debug", + "opaque-debug 0.2.3", ] [[package]] name = "aesni" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f70a6b5f971e473091ab7cfb5ffac6cde81666c4556751d8d5620ead8abf100" +checksum = "d050d39b0b7688b3a3254394c3e30a9d66c41dcf9b05b0e2dbdc623f6505d264" dependencies = [ - "block-cipher-trait", - "opaque-debug", + "block-cipher", + "opaque-debug 0.2.3", ] [[package]] @@ -191,32 +190,20 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.7.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array", -] - -[[package]] -name = "block-cipher-trait" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c924d49bd09e7c06003acda26cd9742e796e34282ec6c1189404dee0c1f4774" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ "generic-array", ] [[package]] -name = "block-padding" -version = "0.1.5" +name = "block-cipher" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +checksum = "fa136449e765dc7faa244561ccae839c394048667929af599b5d931ebe7b7f10" dependencies = [ - "byte-tools", + "generic-array", ] [[package]] @@ -225,17 +212,11 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - [[package]] name = "bytemuck" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37fa13df2292ecb479ec23aa06f4507928bef07839be9ef15281411076629431" +checksum = "d40636046a60a45ee5185e885a3ccb771f7a2065fb7cbcc2a7ecfd9896d1c365" [[package]] name = "byteorder" @@ -306,9 +287,9 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "cookie" -version = "0.14.1" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca761767cf3fa9068cc893ec8c247a22d0fd0535848e65640c0548bd1f8bbb36" +checksum = "1373a16a4937bc34efec7b391f9c1500c30b8478a701a4f44c9165cc0475a6e0" dependencies = [ "aes-gcm", "base64 0.12.3", @@ -317,6 +298,7 @@ dependencies = [ "rand", "sha2", "time 0.2.16", + "version_check", ] [[package]] @@ -335,6 +317,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +[[package]] +name = "cpuid-bool" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" + [[package]] name = "crc32fast" version = "1.2.0" @@ -372,12 +360,12 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ "generic-array", - "subtle 1.0.0", + "subtle", ] [[package]] @@ -422,9 +410,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ "generic-array", ] @@ -471,12 +459,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fnv" version = "1.0.7" @@ -505,7 +487,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" dependencies = [ "matches", - "percent-encoding 2.1.0", + "percent-encoding", ] [[package]] @@ -640,11 +622,12 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.12.3" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "60fb4bb6bba52f78a471264d9a3b7d026cc0af47b22cd2cffbc0b787ca003e63" dependencies = [ "typenum", + "version_check", ] [[package]] @@ -660,9 +643,9 @@ dependencies = [ [[package]] name = "ghash" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0930ed19a7184089ea46d2fedead2f6dc2b674c5db4276b7da336c7cd83252" +checksum = "d6e27f0689a6e15944bdce7e45425efb87eaa8ab0c6e87f11d0987a9133e2531" dependencies = [ "polyval", ] @@ -737,9 +720,9 @@ dependencies = [ [[package]] name = "hkdf" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fa08a006102488bd9cd5b8013aabe84955cf5ae22e304c2caf655b633aefae3" +checksum = "fe1149865383e4526a43aee8495f9a325f0b806c63ce6427d06336a590abbbc9" dependencies = [ "digest", "hmac", @@ -747,9 +730,9 @@ dependencies = [ [[package]] name = "hmac" -version = "0.7.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ "crypto-mac", "digest", @@ -862,6 +845,12 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb6ee2a7da03bfc3b66ca47c92c2e392fcc053ea040a85561749b026f7aad09a" +[[package]] +name = "instant" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b141fdc7836c525d4d594027d318c84161ca17aaf8113ab1f81ab93ae897485" + [[package]] name = "iovec" version = "0.1.4" @@ -871,6 +860,12 @@ dependencies = [ "libc", ] +[[package]] +name = "ipnet" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" + [[package]] name = "itoa" version = "0.4.6" @@ -1151,6 +1146,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + [[package]] name = "openssl" version = "0.10.30" @@ -1289,9 +1290,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ec3341498978de3bfd12d1b22f1af1de22818f5473a11e8a6ef997989e3a212" +checksum = "d9a50142b55ab3ed0e9f68dfb3709f1d90d29da24e91033f28b96330643107dc" dependencies = [ "cfg-if", "universal-hash", @@ -1317,7 +1318,7 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" dependencies = [ @@ -1434,9 +1435,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" +checksum = "12427a5577082c24419c9c417db35cfeb65962efc7675bb6b0d5f1f9d315bfe6" dependencies = [ "base64 0.12.3", "bytes", @@ -1447,6 +1448,7 @@ dependencies = [ "http-body", "hyper", "hyper-tls", + "ipnet", "js-sys", "lazy_static", "log", @@ -1543,26 +1545,27 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "ruma-api", "ruma-client-api", "ruma-common", "ruma-events", "ruma-federation-api", - "ruma-identifiers", + "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", + "ruma-identifiers-macros", "ruma-signatures", ] [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "http", "percent-encoding", "ruma-api-macros", - "ruma-identifiers", + "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", "ruma-serde", "serde", "serde_json", @@ -1572,7 +1575,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "proc-macro2", "quote", @@ -1582,14 +1585,14 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "http", "js_int", "ruma-api", "ruma-common", "ruma-events", - "ruma-identifiers", + "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", "ruma-serde", "serde", "serde_json", @@ -1598,11 +1601,10 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.1.3" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "js_int", - "matches", "ruma-serde", "serde", "serde_json", @@ -1612,12 +1614,12 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "js_int", "ruma-common", "ruma-events-macros", - "ruma-identifiers", + "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", "ruma-serde", "serde", "serde_json", @@ -1627,7 +1629,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "proc-macro2", "quote", @@ -1637,14 +1639,13 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "js_int", - "matches", "ruma-api", "ruma-common", "ruma-events", - "ruma-identifiers", + "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", "ruma-serde", "serde", "serde_json", @@ -1652,19 +1653,39 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.17.0-pre.1" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +version = "0.17.0" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ - "matches", "rand", "serde", "strum", ] +[[package]] +name = "ruma-identifiers" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7046d6ff26cf4f5b8bc77af68527544c61e32cab5810c40035c6491c08da0d3" +dependencies = [ + "serde", + "strum", +] + +[[package]] +name = "ruma-identifiers-macros" +version = "0.17.0" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +dependencies = [ + "proc-macro2", + "quote", + "ruma-identifiers 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syn", +] + [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "form_urlencoded", "itoa", @@ -1676,7 +1697,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=b55f827#b55f82742c88ea6d8744f37bacac5cbfa17a9029" +source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" dependencies = [ "base64 0.12.3", "ring", @@ -1857,14 +1878,15 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "sha2" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" dependencies = [ "block-buffer", + "cfg-if", + "cpuid-bool", "digest", - "fake-simd", - "opaque-debug", + "opaque-debug 0.3.0", ] [[package]] @@ -2010,12 +2032,6 @@ dependencies = [ "syn", ] -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - [[package]] name = "subtle" version = "2.2.3" @@ -2024,7 +2040,7 @@ checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" [[package]] name = "syn" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb7f4c519df8c117855e19dd8cc851e89eb746fe7a73f0157e0d95fdec5369b0" dependencies = [ @@ -2123,9 +2139,9 @@ checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" [[package]] name = "tokio" -version = "0.2.21" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" +checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" dependencies = [ "bytes", "fnv", @@ -2208,9 +2224,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e2a2de6b0d5cbb13fc21193a2296888eaab62b6044479aafb3c54c01c29fcd" +checksum = "dbdf4ccd1652592b01286a5dbe1e2a77d78afaa34beadd9872a5f7396f92aaa9" dependencies = [ "cfg-if", "log", @@ -2279,12 +2295,12 @@ checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] name = "universal-hash" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df0c900f2f9b4116803415878ff48b63da9edb268668e08cf9292d7503114a01" +checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ "generic-array", - "subtle 2.2.3", + "subtle", ] [[package]] @@ -2478,9 +2494,3 @@ name = "yansi" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" - -[[package]] -name = "zeroize" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" diff --git a/Cargo.toml b/Cargo.toml index c805d71..e0f246e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,4 +27,4 @@ reqwest = "0.10.6" base64 = "0.12.1" thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "b55f827" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "08fbace" } diff --git a/src/client_server.rs b/src/client_server.rs index da29599..f13d6fd 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -169,14 +169,14 @@ pub fn register_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa - .try_auth(&user_id, "", auth, &uiaainfo, &db.users, &db.globals)?; + .try_auth(&user_id, "".into(), auth, &uiaainfo, &db.users, &db.globals)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, "", &uiaainfo)?; + db.uiaa.create(&user_id, "".into(), &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } @@ -189,7 +189,7 @@ pub fn register_route( let device_id = body .device_id .clone() - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH)); + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); // Generate new token for the device let token = utils::random_string(TOKEN_LENGTH); @@ -300,7 +300,7 @@ pub fn logout_route( let user_id = body.user_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); - db.users.remove_device(&user_id, &device_id)?; + db.users.remove_device(&user_id, device_id)?; Ok(logout::Response.into()) } @@ -340,14 +340,9 @@ pub fn change_password_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - &user_id, - &device_id, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; + let (worked, uiaainfo) = + db.uiaa + .try_auth(&user_id, device_id, auth, &uiaainfo, &db.users, &db.globals)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } @@ -452,11 +447,11 @@ pub fn deactivate_route( pub fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); available.insert( - RoomVersionId::version_5(), + RoomVersionId::Version5, get_capabilities::RoomVersionStability::Stable, ); available.insert( - RoomVersionId::version_6(), + RoomVersionId::Version6, get_capabilities::RoomVersionStability::Stable, ); @@ -890,7 +885,7 @@ pub fn get_keys_route( device_display_name: metadata.display_name, }); - container.insert(device_id.into(), keys); + container.insert(device_id, keys); } } device_keys.insert(user_id.clone(), container); @@ -909,7 +904,7 @@ pub fn get_keys_route( device_display_name: metadata.display_name, }); - container.insert(device_id.as_ref().into(), keys); + container.insert(device_id.clone(), keys); } device_keys.insert(user_id.clone(), container); } @@ -1212,7 +1207,7 @@ pub fn create_room_route( .creation_content .as_ref() .and_then(|c| c.predecessor.clone()); - content.room_version = RoomVersionId::version_6(); + content.room_version = RoomVersionId::Version6; // 1. The room create event db.rooms.append_pdu( @@ -1298,15 +1293,14 @@ pub fn create_room_route( user_id.clone(), EventType::RoomJoinRules, match preset { - create_room::RoomPreset::PublicChat => { - serde_json::to_value(join_rules::JoinRulesEventContent { - join_rule: join_rules::JoinRule::Public, - }) - .expect("event is valid, we just created it") - } - _ => serde_json::to_value(join_rules::JoinRulesEventContent { - join_rule: join_rules::JoinRule::Invite, - }) + create_room::RoomPreset::PublicChat => serde_json::to_value( + join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), + ) + .expect("event is valid, we just created it"), + // according to spec "invite" is the default + _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( + join_rules::JoinRule::Invite, + )) .expect("event is valid, we just created it"), }, None, diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 1272d5f..d490499 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -4,7 +4,7 @@ use ruma::{ error::ErrorKind, r0::uiaa::{AuthData, UiaaInfo}, }, - identifiers::UserId, + identifiers::{DeviceId, UserId}, }; pub struct Uiaa { @@ -13,14 +13,19 @@ pub struct Uiaa { impl Uiaa { /// Creates a new Uiaa session. Make sure the session token is unique. - pub fn create(&self, user_id: &UserId, device_id: &str, uiaainfo: &UiaaInfo) -> Result<()> { + pub fn create( + &self, + user_id: &UserId, + device_id: &DeviceId, + uiaainfo: &UiaaInfo, + ) -> Result<()> { self.update_uiaa_session(user_id, device_id, Some(uiaainfo)) } pub fn try_auth( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, auth: &AuthData, uiaainfo: &UiaaInfo, users: &super::users::Users, @@ -130,7 +135,7 @@ impl Uiaa { // UIAA was successful! Remove this session and return true self.update_uiaa_session(user_id, device_id, None)?; - return Ok((true, uiaainfo)); + Ok((true, uiaainfo)) } else { panic!("FallbackAcknowledgement is not supported yet"); } @@ -139,12 +144,12 @@ impl Uiaa { fn update_uiaa_session( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, uiaainfo: Option<&UiaaInfo>, ) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); if let Some(uiaainfo) = uiaainfo { self.userdeviceid_uiaainfo.insert( @@ -161,12 +166,12 @@ impl Uiaa { fn get_uiaa_session( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, session: &str, ) -> Result { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); let uiaainfo = serde_json::from_slice::( &self diff --git a/src/database/users.rs b/src/database/users.rs index b103691..3a02a9d 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -177,7 +177,7 @@ impl Users { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); self.userdeviceid_metadata.insert( userdeviceid, @@ -191,16 +191,16 @@ impl Users { .as_bytes(), )?; - self.set_token(user_id, device_id, token)?; + self.set_token(user_id, &device_id, token)?; Ok(()) } /// Removes a device from a user. - pub fn remove_device(&self, user_id: &UserId, device_id: &str) -> Result<()> { + pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); // Remove tokens if let Some(old_token) = self.userdeviceid_token.remove(&userdeviceid)? { @@ -223,7 +223,7 @@ impl Users { } /// Returns an iterator over all device ids of this user. - pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator> { + pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator>> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata @@ -237,17 +237,16 @@ impl Users { .next() .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, ) - .map_err(|_| { - Error::bad_database("Device ID in userdeviceid_metadata is invalid.") - })?) + .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? + .into()) }) } /// Replaces the access token of one device. - fn set_token(&self, user_id: &UserId, device_id: &str, token: &str) -> Result<()> { + fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); // All devices have metadata assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); @@ -268,13 +267,13 @@ impl Users { pub fn add_one_time_key( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, one_time_key_key: &AlgorithmAndDeviceId, one_time_key_value: &OneTimeKey, ) -> Result<()> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + key.extend_from_slice(device_id.as_str().as_bytes()); // All devices have metadata // Only existing devices should be able to call this. @@ -301,12 +300,12 @@ impl Users { pub fn take_one_time_key( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, key_algorithm: &KeyAlgorithm, ) -> Result> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); + prefix.extend_from_slice(device_id.as_str().as_bytes()); prefix.push(0xff); prefix.push(b'"'); // Annoying quotation mark prefix.extend_from_slice(key_algorithm.to_string().as_bytes()); @@ -337,11 +336,11 @@ impl Users { pub fn count_one_time_keys( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, ) -> Result> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); let mut counts = BTreeMap::new(); @@ -370,13 +369,13 @@ impl Users { pub fn add_device_keys( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, device_keys: &DeviceKeys, globals: &super::globals::Globals, ) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); self.keyid_key.insert( &userdeviceid, @@ -550,10 +549,14 @@ impl Users { }) } - pub fn get_device_keys(&self, user_id: &UserId, device_id: &str) -> Result> { + pub fn get_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + key.extend_from_slice(device_id.as_str().as_bytes()); self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { @@ -633,14 +636,14 @@ impl Users { &self, sender: &UserId, target_user_id: &UserId, - target_device_id: &str, + target_device_id: &DeviceId, event_type: &EventType, content: serde_json::Value, globals: &super::globals::Globals, ) -> Result<()> { let mut key = target_user_id.to_string().as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(target_device_id.as_bytes()); + key.extend_from_slice(target_device_id.as_str().as_bytes()); key.push(0xff); key.extend_from_slice(&globals.next_count()?.to_be_bytes()); @@ -660,14 +663,14 @@ impl Users { pub fn take_to_device_events( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, max: usize, ) -> Result>> { let mut events = Vec::new(); let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); + prefix.extend_from_slice(device_id.as_str().as_bytes()); prefix.push(0xff); for result in self.todeviceid_events.scan_prefix(&prefix).take(max) { @@ -685,12 +688,12 @@ impl Users { pub fn update_device_metadata( &self, user_id: &UserId, - device_id: &str, + device_id: &DeviceId, device: &Device, ) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); // Only existing devices should be able to call this. assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); @@ -706,10 +709,14 @@ impl Users { } /// Get device metadata. - pub fn get_device_metadata(&self, user_id: &UserId, device_id: &str) -> Result> { + pub fn get_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); self.userdeviceid_metadata .get(&userdeviceid)? diff --git a/src/push_rules.rs b/src/push_rules.rs index cb277b9..5bc0d74 100644 --- a/src/push_rules.rs +++ b/src/push_rules.rs @@ -2,47 +2,46 @@ use js_int::uint; use ruma::{ identifiers::UserId, push::{ - Action, ConditionalPushRule, PatternedPushRule, PushCondition, RoomMemberCountIs, Ruleset, - Tweak, + Action, ConditionalPushRule, ConditionalPushRuleInit, PatternedPushRule, + PatternedPushRuleInit, PushCondition, RoomMemberCountIs, Ruleset, Tweak, }, }; pub fn default_pushrules(user_id: &UserId) -> Ruleset { - Ruleset { - content: vec![contains_user_name_rule(&user_id)], - override_: vec![ - master_rule(), - suppress_notices_rule(), - invite_for_me_rule(), - member_event_rule(), - contains_display_name_rule(), - tombstone_rule(), - roomnotif_rule(), - ], - room: vec![], - sender: vec![], - underride: vec![ - call_rule(), - encrypted_room_one_to_one_rule(), - room_one_to_one_rule(), - message_rule(), - encrypted_rule(), - ], - } + let mut rules = Ruleset::default(); + rules.content = vec![contains_user_name_rule(&user_id)]; + rules.override_ = vec![ + master_rule(), + suppress_notices_rule(), + invite_for_me_rule(), + member_event_rule(), + contains_display_name_rule(), + tombstone_rule(), + roomnotif_rule(), + ]; + rules.underride = vec![ + call_rule(), + encrypted_room_one_to_one_rule(), + room_one_to_one_rule(), + message_rule(), + encrypted_rule(), + ]; + rules } pub fn master_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![Action::DontNotify], default: true, enabled: false, rule_id: ".m.rule.master".to_owned(), conditions: vec![], } + .into() } pub fn suppress_notices_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![Action::DontNotify], default: true, enabled: true, @@ -52,10 +51,11 @@ pub fn suppress_notices_rule() -> ConditionalPushRule { pattern: "m.notice".to_owned(), }], } + .into() } pub fn invite_for_me_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![ Action::Notify, Action::SetTweak(Tweak::Sound("default".to_owned())), @@ -69,10 +69,11 @@ pub fn invite_for_me_rule() -> ConditionalPushRule { pattern: "m.invite".to_owned(), }], } + .into() } pub fn member_event_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![Action::DontNotify], default: true, enabled: true, @@ -82,10 +83,11 @@ pub fn member_event_rule() -> ConditionalPushRule { pattern: "type".to_owned(), }], } + .into() } pub fn contains_display_name_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![ Action::Notify, Action::SetTweak(Tweak::Sound("default".to_owned())), @@ -96,10 +98,11 @@ pub fn contains_display_name_rule() -> ConditionalPushRule { rule_id: ".m.rule.contains_display_name".to_owned(), conditions: vec![PushCondition::ContainsDisplayName], } + .into() } pub fn tombstone_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(true))], default: true, enabled: true, @@ -115,10 +118,11 @@ pub fn tombstone_rule() -> ConditionalPushRule { }, ], } + .into() } pub fn roomnotif_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(true))], default: true, enabled: true, @@ -133,10 +137,11 @@ pub fn roomnotif_rule() -> ConditionalPushRule { }, ], } + .into() } pub fn contains_user_name_rule(user_id: &UserId) -> PatternedPushRule { - PatternedPushRule { + PatternedPushRuleInit { actions: vec![ Action::Notify, Action::SetTweak(Tweak::Sound("default".to_owned())), @@ -147,10 +152,11 @@ pub fn contains_user_name_rule(user_id: &UserId) -> PatternedPushRule { rule_id: ".m.rule.contains_user_name".to_owned(), pattern: user_id.localpart().to_owned(), } + .into() } pub fn call_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![ Action::Notify, Action::SetTweak(Tweak::Sound("ring".to_owned())), @@ -164,10 +170,11 @@ pub fn call_rule() -> ConditionalPushRule { pattern: "m.call.invite".to_owned(), }], } + .into() } pub fn encrypted_room_one_to_one_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![ Action::Notify, Action::SetTweak(Tweak::Sound("default".to_owned())), @@ -186,10 +193,11 @@ pub fn encrypted_room_one_to_one_rule() -> ConditionalPushRule { }, ], } + .into() } pub fn room_one_to_one_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![ Action::Notify, Action::SetTweak(Tweak::Sound("default".to_owned())), @@ -208,10 +216,11 @@ pub fn room_one_to_one_rule() -> ConditionalPushRule { }, ], } + .into() } pub fn message_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(false))], default: true, enabled: true, @@ -221,10 +230,11 @@ pub fn message_rule() -> ConditionalPushRule { pattern: "m.room.message".to_owned(), }], } + .into() } pub fn encrypted_rule() -> ConditionalPushRule { - ConditionalPushRule { + ConditionalPushRuleInit { actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(false))], default: true, enabled: true, @@ -234,4 +244,5 @@ pub fn encrypted_rule() -> ConditionalPushRule { pattern: "m.room.encrypted".to_owned(), }], } + .into() } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 48f5487..cd2d21e 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -7,7 +7,10 @@ use rocket::{ Outcome::*, Request, State, }; -use ruma::{api::Endpoint, identifiers::UserId}; +use ruma::{ + api::Endpoint, + identifiers::{DeviceId, UserId}, +}; use std::{convert::TryInto, io::Cursor, ops::Deref}; use tokio::io::AsyncReadExt; @@ -18,7 +21,7 @@ const MESSAGE_LIMIT: u64 = 20 * 1024 * 1024; // 20 MB pub struct Ruma { pub body: T, pub user_id: Option, - pub device_id: Option, + pub device_id: Option>, pub json_body: Option>, // This is None when body is not a valid string } @@ -63,7 +66,7 @@ impl<'a, T: Endpoint> FromTransformedData<'a> for Ruma { match db.users.find_from_token(&token).unwrap() { // TODO: M_UNKNOWN_TOKEN None => return Failure((Status::Unauthorized, ())), - Some((user_id, device_id)) => (Some(user_id), Some(device_id)), + Some((user_id, device_id)) => (Some(user_id), Some(device_id.into())), } } else { (None, None) From 87fd432527bcd9f442bd8228bb81cc330123a7f6 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 25 Jul 2020 15:48:12 -0400 Subject: [PATCH 0179/1727] Refactor account_data to be more similar to other endpoints --- src/client_server.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index f13d6fd..9676f7b 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -589,18 +589,19 @@ pub fn get_global_account_data_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - db.account_data + let event = db + .account_data .get( None, user_id, &EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), )? - .and_then(|ev| { - serde_json::from_str(ev.json().get()) - .map(|data| get_global_account_data::Response { account_data: data }.into()) - .ok() - }) - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found.")) + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + + let data = serde_json::from_str(event.json().get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + Ok(get_global_account_data::Response { account_data: data }.into()) } #[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] From 16576d19cd082aa380c472bbfe91a41d2d3624e8 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 3 Jul 2020 11:24:23 +0200 Subject: [PATCH 0180/1727] improvement: only remove to-device events when sure the client received them To make this work, I had to remove the recommended limit of 100 to-device events per /sync (https://matrix.org/docs/spec/client_server/latest#id72) --- src/client_server.rs | 5 ++++- src/database/users.rs | 46 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 9676f7b..0c085e3 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2659,6 +2659,9 @@ pub fn sync_route( } } + // Remove all to-device events the device received *last time* + db.users.remove_to_device_events(user_id, device_id, since)?; + Ok(sync_events::Response { next_batch, rooms: sync_events::Rooms { @@ -2711,7 +2714,7 @@ pub fn sync_route( }, device_one_time_keys_count: Default::default(), // TODO to_device: sync_events::ToDevice { - events: db.users.take_to_device_events(user_id, device_id, 100)?, + events: db.users.get_to_device_events(user_id, device_id)?, }, } .into()) diff --git a/src/database/users.rs b/src/database/users.rs index 3a02a9d..6e6258f 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -11,7 +11,7 @@ use ruma::{ events::{AnyToDeviceEvent, EventJson, EventType}, identifiers::{DeviceId, UserId}, }; -use std::{collections::BTreeMap, convert::TryFrom, time::SystemTime}; +use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; pub struct Users { pub(super) userid_password: sled::Tree, @@ -660,11 +660,10 @@ impl Users { Ok(()) } - pub fn take_to_device_events( + pub fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, - max: usize, ) -> Result>> { let mut events = Vec::new(); @@ -673,18 +672,51 @@ impl Users { prefix.extend_from_slice(device_id.as_str().as_bytes()); prefix.push(0xff); - for result in self.todeviceid_events.scan_prefix(&prefix).take(max) { - let (key, value) = result?; + for value in self.todeviceid_events.scan_prefix(&prefix).values() { events.push( - serde_json::from_slice(&*value) + serde_json::from_slice(&*value?) .map_err(|_| Error::bad_database("Event in todeviceid_events is invalid."))?, ); - self.todeviceid_events.remove(key)?; } Ok(events) } + pub fn remove_to_device_events( + &self, + user_id: &UserId, + device_id: &DeviceId, + until: u64, + ) -> Result<()> { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_ref().as_bytes()); + prefix.push(0xff); + + let mut last = prefix.clone(); + last.extend_from_slice(&until.to_be_bytes()); + + for (key, _) in self + .todeviceid_events + .range(&*prefix..=&*last) + .keys() + .map(|key| { + let key = key?; + Ok::<_, Error>(( + key.clone(), + utils::u64_from_bytes(&key[key.len() - mem::size_of::()..key.len()]) + .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, + )) + }) + .filter_map(|r| r.ok()) + .take_while(|&(_, count)| count <= until) + { + self.todeviceid_events.remove(key)?; + } + + Ok(()) + } + pub fn update_device_metadata( &self, user_id: &UserId, From 09561ccea3773f8581b3a4b78134e495a9b8a0b3 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 26 Jul 2020 15:41:28 +0200 Subject: [PATCH 0181/1727] Upgrade ruma --- Cargo.lock | 50 ++++++++----------- Cargo.toml | 2 +- src/client_server.rs | 96 ++++++++++++++++++------------------ src/database/account_data.rs | 12 ++--- src/database/global_edus.rs | 5 +- src/database/globals.rs | 2 +- src/database/key_backups.rs | 2 +- src/database/rooms.rs | 84 ++++++++++++++----------------- src/database/rooms/edus.rs | 7 ++- src/database/uiaa.rs | 6 +-- src/database/users.rs | 32 ++++++------ src/pdu.rs | 29 ++++++----- src/push_rules.rs | 2 +- src/ruma_wrapper.rs | 5 +- 14 files changed, 154 insertions(+), 180 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d9865b4..ab5b551 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1545,14 +1545,14 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "ruma-api", "ruma-client-api", "ruma-common", "ruma-events", "ruma-federation-api", - "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", + "ruma-identifiers", "ruma-identifiers-macros", "ruma-signatures", ] @@ -1560,12 +1560,12 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "http", "percent-encoding", "ruma-api-macros", - "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", + "ruma-identifiers", "ruma-serde", "serde", "serde_json", @@ -1575,7 +1575,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "proc-macro2", "quote", @@ -1585,14 +1585,14 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "http", "js_int", "ruma-api", "ruma-common", "ruma-events", - "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", + "ruma-identifiers", "ruma-serde", "serde", "serde_json", @@ -1602,7 +1602,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "js_int", "ruma-serde", @@ -1614,12 +1614,12 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "js_int", "ruma-common", "ruma-events-macros", - "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", + "ruma-identifiers", "ruma-serde", "serde", "serde_json", @@ -1629,7 +1629,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "proc-macro2", "quote", @@ -1639,13 +1639,13 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "js_int", "ruma-api", "ruma-common", "ruma-events", - "ruma-identifiers 0.17.0 (git+https://github.com/ruma/ruma?rev=08fbace)", + "ruma-identifiers", "ruma-serde", "serde", "serde_json", @@ -1653,39 +1653,29 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +version = "0.17.1" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "rand", "serde", "strum", ] -[[package]] -name = "ruma-identifiers" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7046d6ff26cf4f5b8bc77af68527544c61e32cab5810c40035c6491c08da0d3" -dependencies = [ - "serde", - "strum", -] - [[package]] name = "ruma-identifiers-macros" -version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +version = "0.17.1" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "proc-macro2", "quote", - "ruma-identifiers 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ruma-identifiers", "syn", ] [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "form_urlencoded", "itoa", @@ -1697,7 +1687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=08fbace#08fbace022f732319b9c5d2b97954b935bf39bf2" +source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" dependencies = [ "base64 0.12.3", "ring", diff --git a/Cargo.toml b/Cargo.toml index e0f246e..ec1ee63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,4 +27,4 @@ reqwest = "0.10.6" base64 = "0.12.1" thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "08fbace" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "e047c647ddcb368e7eb1e05ae8823a9494273457" } diff --git a/src/client_server.rs b/src/client_server.rs index 0c085e3..3083ff2 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -64,10 +64,9 @@ use ruma::{ canonical_alias, guest_access, history_visibility, join_rules, member, name, redaction, topic, }, - AnyBasicEvent, AnyEphemeralRoomEvent, AnyEvent, AnySyncEphemeralRoomEvent, EventJson, - EventType, + AnyBasicEvent, AnyEphemeralRoomEvent, AnyEvent, AnySyncEphemeralRoomEvent, EventType, }, - identifiers::{RoomAliasId, RoomId, RoomVersionId, UserId}, + Raw, RoomAliasId, RoomId, RoomVersionId, UserId, }; use serde_json::json; @@ -624,7 +623,7 @@ pub fn set_displayname_route( EventType::RoomMember, serde_json::to_value(ruma::events::room::member::MemberEventContent { displayname: body.displayname.clone(), - ..serde_json::from_value::>( + ..serde_json::from_value::>( db.rooms .room_state_get(&room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or_else(|| { @@ -659,7 +658,7 @@ pub fn set_displayname_route( .try_into() .expect("time is valid"), ), - presence: ruma::events::presence::PresenceState::Online, + presence: ruma::presence::PresenceState::Online, status_msg: None, }, sender: user_id.clone(), @@ -714,7 +713,7 @@ pub fn set_avatar_url_route( EventType::RoomMember, serde_json::to_value(ruma::events::room::member::MemberEventContent { avatar_url: body.avatar_url.clone(), - ..serde_json::from_value::>( + ..serde_json::from_value::>( db.rooms .room_state_get(&room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or_else(|| { @@ -749,7 +748,7 @@ pub fn set_avatar_url_route( .try_into() .expect("time is valid"), ), - presence: ruma::events::presence::PresenceState::Online, + presence: ruma::presence::PresenceState::Online, status_msg: None, }, sender: user_id.clone(), @@ -1605,7 +1604,7 @@ pub fn leave_room_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let mut event = serde_json::from_value::>( + let mut event = serde_json::from_value::>( db.rooms .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or(Error::BadRequest( @@ -1643,20 +1642,19 @@ pub fn kick_user_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let mut event = - serde_json::from_value::>( - db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot kick member that's not in the room.", - ))? - .content - .clone(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))? - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + let mut event = serde_json::from_value::>( + db.rooms + .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot kick member that's not in the room.", + ))? + .content + .clone(), + ) + .map_err(|_| Error::bad_database("Invalid member event in database."))? + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Leave; // TODO: reason @@ -1697,7 +1695,7 @@ pub fn ban_user_route( third_party_invite: None, }), |event| { - let mut event = serde_json::from_value::>( + let mut event = serde_json::from_value::>( event.content.clone(), ) .map_err(|_| Error::bad_database("Invalid member event in database."))? @@ -1730,20 +1728,19 @@ pub fn unban_user_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let mut event = - serde_json::from_value::>( - db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot unban a user who is not banned.", - ))? - .content - .clone(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))? - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + let mut event = serde_json::from_value::>( + db.rooms + .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot unban a user who is not banned.", + ))? + .content + .clone(), + ) + .map_err(|_| Error::bad_database("Invalid member event in database."))? + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Leave; @@ -1902,7 +1899,7 @@ pub async fn get_public_rooms_filtered_route( aliases: Vec::new(), canonical_alias: state.get(&(EventType::RoomCanonicalAlias, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { Ok(serde_json::from_value::< - EventJson, + Raw, >(s.content.clone()) .map_err(|_| Error::bad_database("Invalid canonical alias event in database."))? .deserialize() @@ -1910,7 +1907,7 @@ pub async fn get_public_rooms_filtered_route( .alias) })?, name: state.get(&(EventType::RoomName, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { - Ok(serde_json::from_value::>( + Ok(serde_json::from_value::>( s.content.clone(), ) .map_err(|_| Error::bad_database("Invalid room name event in database."))? @@ -1923,7 +1920,7 @@ pub async fn get_public_rooms_filtered_route( room_id, topic: state.get(&(EventType::RoomTopic, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { Ok(Some(serde_json::from_value::< - EventJson, + Raw, >(s.content.clone()) .map_err(|_| Error::bad_database("Invalid room topic event in database."))? .deserialize() @@ -1932,7 +1929,7 @@ pub async fn get_public_rooms_filtered_route( })?, world_readable: state.get(&(EventType::RoomHistoryVisibility, "".to_owned())).map_or(Ok::<_, Error>(false), |s| { Ok(serde_json::from_value::< - EventJson, + Raw, >(s.content.clone()) .map_err(|_| Error::bad_database("Invalid room history visibility event in database."))? .deserialize() @@ -1941,7 +1938,7 @@ pub async fn get_public_rooms_filtered_route( })?, guest_can_join: state.get(&(EventType::RoomGuestAccess, "".to_owned())).map_or(Ok::<_, Error>(false), |s| { Ok(serde_json::from_value::< - EventJson, + Raw, >(s.content.clone()) .map_err(|_| Error::bad_database("Invalid room guest access event in database."))? .deserialize() @@ -1950,7 +1947,7 @@ pub async fn get_public_rooms_filtered_route( })?, avatar_url: state.get(&(EventType::RoomAvatar, "".to_owned())).map_or( Ok::<_, Error>(None),|s| { Ok(Some(serde_json::from_value::< - EventJson, + Raw, >(s.content.clone()) .map_err(|_| Error::bad_database("Invalid room avatar event in database."))? .deserialize() @@ -2158,7 +2155,7 @@ pub fn create_state_event_for_key_route( if body.event_type == EventType::RoomCanonicalAlias { let canonical_alias = serde_json::from_value::< - EventJson, + Raw, >(content.clone()) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid canonical alias."))? .deserialize() @@ -2375,9 +2372,9 @@ pub fn sync_route( send_member_count = true; if !joined_since_last_sync && pdu.state_key == Some(user_id.to_string()) { let content = serde_json::from_value::< - EventJson, + Raw, >(pdu.content.clone()) - .expect("EventJson::from_value always works") + .expect("Raw::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid PDU in database."))?; if content.membership == ruma::events::room::member::MembershipState::Join { @@ -2410,7 +2407,7 @@ pub fn sync_route( .filter(|pdu| pdu.kind == EventType::RoomMember) .map(|pdu| { let content = serde_json::from_value::< - EventJson, + Raw, >(pdu.content.clone()) .map_err(|_| Error::bad_database("Invalid member event in database."))? .deserialize() @@ -2418,7 +2415,7 @@ pub fn sync_route( if let Some(state_key) = &pdu.state_key { let current_content = serde_json::from_value::< - EventJson, + Raw, >( members .get(state_key) @@ -2660,7 +2657,8 @@ pub fn sync_route( } // Remove all to-device events the device received *last time* - db.users.remove_to_device_events(user_id, device_id, since)?; + db.users + .remove_to_device_events(user_id, device_id, since)?; Ok(sync_events::Response { next_batch, diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 3b64ba7..8397c12 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,8 +1,8 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::error::ErrorKind, - events::{AnyEvent as EduEvent, EventJson, EventType}, - identifiers::{RoomId, UserId}, + events::{AnyEvent as EduEvent, EventType}, + Raw, RoomId, UserId, }; use std::{collections::HashMap, convert::TryFrom}; @@ -81,7 +81,7 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, kind: &EventType, - ) -> Result>> { + ) -> Result>> { Ok(self.all(room_id, user_id)?.remove(kind)) } @@ -91,7 +91,7 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>> { + ) -> Result>> { let mut userdata = HashMap::new(); let mut prefix = room_id @@ -121,7 +121,7 @@ impl AccountData { .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, ) .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - serde_json::from_slice::>(&v).map_err(|_| { + serde_json::from_slice::>(&v).map_err(|_| { Error::bad_database("Database contains invalid account data.") })?, )) @@ -139,7 +139,7 @@ impl AccountData { &self, room_id: Option<&RoomId>, user_id: &UserId, - ) -> Result>> { + ) -> Result>> { self.changes_since(room_id, user_id, 0) } } diff --git a/src/database/global_edus.rs b/src/database/global_edus.rs index f58c7d6..94f2de8 100644 --- a/src/database/global_edus.rs +++ b/src/database/global_edus.rs @@ -1,5 +1,5 @@ use crate::{Error, Result}; -use ruma::events::EventJson; +use ruma::Raw; pub struct GlobalEdus { //pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count @@ -47,8 +47,7 @@ impl GlobalEdus { pub fn presence_since( &self, since: u64, - ) -> Result>>> - { + ) -> Result>>> { let first_possible_edu = (since + 1).to_be_bytes().to_vec(); // +1 so we don't send the event at since Ok(self diff --git a/src/database/globals.rs b/src/database/globals.rs index d59e1c3..4578605 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,7 @@ use std::convert::TryInto; use crate::{utils, Error, Result}; -use ruma::identifiers::ServerName; +use ruma::ServerName; pub const COUNTER: &str = "c"; pub struct Globals { diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index eb31b8d..a506564 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -4,7 +4,7 @@ use ruma::{ error::ErrorKind, r0::backup::{BackupAlgorithm, KeyData, Sessions}, }, - identifiers::{RoomId, UserId}, + {RoomId, UserId}, }; use std::{collections::BTreeMap, convert::TryFrom}; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7e6036d..5b9659e 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -12,9 +12,9 @@ use ruma::{ power_levels::{self, PowerLevelsEventContent}, redaction, }, - EventJson, EventType, + EventType, }, - identifiers::{EventId, RoomAliasId, RoomId, UserId}, + EventId, Raw, RoomAliasId, RoomId, UserId, }; use sled::IVec; use std::{ @@ -287,28 +287,24 @@ impl Rooms { }) }, |power_levels| { - Ok( - serde_json::from_value::>( - power_levels.content.clone(), - ) - .expect("EventJson::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?, + Ok(serde_json::from_value::>( + power_levels.content.clone(), ) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?) }, )?; let sender_membership = self .room_state_get(&room_id, &EventType::RoomMember, &sender.to_string())? .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { - Ok( - serde_json::from_value::>( - pdu.content.clone(), - ) - .expect("EventJson::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership, + Ok(serde_json::from_value::>( + pdu.content.clone(), ) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid Member event in db."))? + .membership) })?; let sender_power = power_levels.users.get(&sender).map_or_else( @@ -339,24 +335,21 @@ impl Rooms { &target_user_id.to_string(), )? .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { - Ok( - serde_json::from_value::>( - pdu.content.clone(), - ) - .expect("EventJson::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership, + Ok(serde_json::from_value::>( + pdu.content.clone(), ) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid Member event in db."))? + .membership) })?; - let target_membership = serde_json::from_value::< - EventJson, - >(content.clone()) - .expect("EventJson::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership; + let target_membership = + serde_json::from_value::>(content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid Member event in db."))? + .membership; let target_power = power_levels.users.get(&target_user_id).map_or_else( || { @@ -374,9 +367,9 @@ impl Rooms { self.room_state_get(&room_id, &EventType::RoomJoinRules, "")? .map_or(Ok::<_, Error>(join_rules::JoinRule::Public), |pdu| { Ok(serde_json::from_value::< - EventJson, + Raw, >(pdu.content.clone()) - .expect("EventJson::from_value always works.") + .expect("Raw::from_value always works.") .deserialize() .map_err(|_| { Error::bad_database("Database contains invalid JoinRules event") @@ -581,18 +574,17 @@ impl Rooms { EventType::RoomRedaction => { if let Some(redact_id) = &redacts { // TODO: Reason - let _reason = serde_json::from_value::< - EventJson, - >(content) - .expect("EventJson::from_value always works.") - .deserialize() - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid redaction event content.", - ) - })? - .reason; + let _reason = + serde_json::from_value::>(content) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid redaction event content.", + ) + })? + .reason; self.redact_pdu(&redact_id)?; } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index b96f1b1..22d0166 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,7 +1,7 @@ use crate::{utils, Error, Result}; use ruma::{ - events::{AnyEvent as EduEvent, EventJson, SyncEphemeralRoomEvent}, - identifiers::{RoomId, UserId}, + events::{AnyEvent as EduEvent, SyncEphemeralRoomEvent}, + Raw, RoomId, UserId, }; use std::convert::TryFrom; @@ -61,8 +61,7 @@ impl RoomEdus { &self, room_id: &RoomId, since: u64, - ) -> Result>>> - { + ) -> Result>>> { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index d490499..4366eb2 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -4,7 +4,7 @@ use ruma::{ error::ErrorKind, r0::uiaa::{AuthData, UiaaInfo}, }, - identifiers::{DeviceId, UserId}, + DeviceId, UserId, }; pub struct Uiaa { @@ -149,7 +149,7 @@ impl Uiaa { ) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); + userdeviceid.extend_from_slice(device_id.as_bytes()); if let Some(uiaainfo) = uiaainfo { self.userdeviceid_uiaainfo.insert( @@ -171,7 +171,7 @@ impl Uiaa { ) -> Result { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); + userdeviceid.extend_from_slice(device_id.as_bytes()); let uiaainfo = serde_json::from_slice::( &self diff --git a/src/database/users.rs b/src/database/users.rs index 6e6258f..5030f32 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,8 +8,8 @@ use ruma::{ keys::{AlgorithmAndDeviceId, CrossSigningKey, DeviceKeys, KeyAlgorithm, OneTimeKey}, }, }, - events::{AnyToDeviceEvent, EventJson, EventType}, - identifiers::{DeviceId, UserId}, + events::{AnyToDeviceEvent, EventType}, + DeviceId, Raw, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; @@ -177,7 +177,7 @@ impl Users { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); + userdeviceid.extend_from_slice(device_id.as_bytes()); self.userdeviceid_metadata.insert( userdeviceid, @@ -200,7 +200,7 @@ impl Users { pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); + userdeviceid.extend_from_slice(device_id.as_bytes()); // Remove tokens if let Some(old_token) = self.userdeviceid_token.remove(&userdeviceid)? { @@ -246,7 +246,7 @@ impl Users { fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); + userdeviceid.extend_from_slice(device_id.as_bytes()); // All devices have metadata assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); @@ -273,7 +273,7 @@ impl Users { ) -> Result<()> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(device_id.as_str().as_bytes()); + key.extend_from_slice(device_id.as_bytes()); // All devices have metadata // Only existing devices should be able to call this. @@ -305,7 +305,7 @@ impl Users { ) -> Result> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(device_id.as_str().as_bytes()); + prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); prefix.push(b'"'); // Annoying quotation mark prefix.extend_from_slice(key_algorithm.to_string().as_bytes()); @@ -340,7 +340,7 @@ impl Users { ) -> Result> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); + userdeviceid.extend_from_slice(device_id.as_bytes()); let mut counts = BTreeMap::new(); @@ -375,7 +375,7 @@ impl Users { ) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); + userdeviceid.extend_from_slice(device_id.as_bytes()); self.keyid_key.insert( &userdeviceid, @@ -556,7 +556,7 @@ impl Users { ) -> Result> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(device_id.as_str().as_bytes()); + key.extend_from_slice(device_id.as_bytes()); self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { @@ -643,7 +643,7 @@ impl Users { ) -> Result<()> { let mut key = target_user_id.to_string().as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(target_device_id.as_str().as_bytes()); + key.extend_from_slice(target_device_id.as_bytes()); key.push(0xff); key.extend_from_slice(&globals.next_count()?.to_be_bytes()); @@ -664,12 +664,12 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result>> { + ) -> Result>> { let mut events = Vec::new(); let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(device_id.as_str().as_bytes()); + prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); for value in self.todeviceid_events.scan_prefix(&prefix).values() { @@ -690,7 +690,7 @@ impl Users { ) -> Result<()> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(device_id.as_ref().as_bytes()); + prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); let mut last = prefix.clone(); @@ -725,7 +725,7 @@ impl Users { ) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); + userdeviceid.extend_from_slice(device_id.as_bytes()); // Only existing devices should be able to call this. assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); @@ -748,7 +748,7 @@ impl Users { ) -> Result> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_str().as_bytes()); + userdeviceid.extend_from_slice(device_id.as_bytes()); self.userdeviceid_metadata .get(&userdeviceid)? diff --git a/src/pdu.rs b/src/pdu.rs index 0db77a1..0cfdb63 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -3,10 +3,9 @@ use js_int::UInt; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyRoomEvent, AnyStateEvent, - AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventJson, EventType, - StateEvent, + AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - identifiers::{EventId, RoomId, ServerName, UserId}, + EventId, Raw, RoomId, ServerName, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -79,40 +78,40 @@ impl PduEvent { Ok(()) } - pub fn to_sync_room_event(&self) -> EventJson { + pub fn to_sync_room_event(&self) -> Raw { let json = serde_json::to_string(&self).expect("PDUs are always valid"); serde_json::from_str::(&json) - .map(EventJson::from) + .map(Raw::from) .expect("AnySyncRoomEvent can always be built from a full PDU event") } - pub fn to_room_event(&self) -> EventJson { + pub fn to_room_event(&self) -> Raw { let json = serde_json::to_string(&self).expect("PDUs are always valid"); serde_json::from_str::(&json) - .map(EventJson::from) + .map(Raw::from) .expect("AnyRoomEvent can always be built from a full PDU event") } - pub fn to_state_event(&self) -> EventJson { + pub fn to_state_event(&self) -> Raw { let json = serde_json::to_string(&self).expect("PDUs are always valid"); serde_json::from_str::(&json) - .map(EventJson::from) + .map(Raw::from) .expect("AnyStateEvent can always be built from a full PDU event") } - pub fn to_sync_state_event(&self) -> EventJson { + pub fn to_sync_state_event(&self) -> Raw { let json = serde_json::to_string(&self).expect("PDUs are always valid"); serde_json::from_str::(&json) - .map(EventJson::from) + .map(Raw::from) .expect("AnySyncStateEvent can always be built from a full PDU event") } - pub fn to_stripped_state_event(&self) -> EventJson { + pub fn to_stripped_state_event(&self) -> Raw { let json = serde_json::to_string(&self).expect("PDUs are always valid"); serde_json::from_str::(&json) - .map(EventJson::from) + .map(Raw::from) .expect("AnyStrippedStateEvent can always be built from a full PDU event") } - pub fn to_member_event(&self) -> EventJson> { + pub fn to_member_event(&self) -> Raw> { let json = serde_json::to_string(&self).expect("PDUs are always valid"); serde_json::from_str::>(&json) - .map(EventJson::from) + .map(Raw::from) .expect("StateEvent can always be built from a full PDU event") } } diff --git a/src/push_rules.rs b/src/push_rules.rs index 5bc0d74..a1f32f4 100644 --- a/src/push_rules.rs +++ b/src/push_rules.rs @@ -1,6 +1,6 @@ use js_int::uint; use ruma::{ - identifiers::UserId, + UserId, push::{ Action, ConditionalPushRule, ConditionalPushRuleInit, PatternedPushRule, PatternedPushRuleInit, PushCondition, RoomMemberCountIs, Ruleset, Tweak, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index cd2d21e..15e50ba 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -7,10 +7,7 @@ use rocket::{ Outcome::*, Request, State, }; -use ruma::{ - api::Endpoint, - identifiers::{DeviceId, UserId}, -}; +use ruma::{api::Endpoint, DeviceId, UserId}; use std::{convert::TryInto, io::Cursor, ops::Deref}; use tokio::io::AsyncReadExt; From b7df8fe83c0ef18c2b3a2ca27eb2660f6dc1bea1 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 26 Jul 2020 17:34:12 +0200 Subject: [PATCH 0182/1727] fix: stop /messages at `to` Fixes #150 --- src/client_server.rs | 64 ++++++++++++++++++++++++------------------- src/database/rooms.rs | 20 +++++++------- 2 files changed, 46 insertions(+), 38 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 3083ff2..ab7e515 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2763,7 +2763,7 @@ pub fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect::>(); - let start_token = events_before.last().map_or(Ok(None), |e| { + let start_token = events_before.last().map_or(Ok(None), |(_, e)| { Ok::<_, Error>(Some( db.rooms .get_pdu_count(&e.event_id)? @@ -2774,7 +2774,7 @@ pub fn get_context_route( let events_before = events_before .into_iter() - .map(|pdu| pdu.to_room_event()) + .map(|(_, pdu)| pdu.to_room_event()) .collect::>(); let events_after = db @@ -2789,18 +2789,19 @@ pub fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect::>(); - let end_token = events_after.last().map_or(Ok(None), |e| { - Ok::<_, Error>(Some( - db.rooms - .get_pdu_count(&e.event_id)? - .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? + let end_token = if let Some(last_event) = events_after.last() { + Some( + utils::u64_from_bytes(&last_event.0) + .map_err(|_| Error::bad_database("Invalid pdu id in db."))? .to_string(), - )) - })?; + ) + } else { + None + }; let events_after = events_after .into_iter() - .map(|pdu| pdu.to_room_event()) + .map(|(_, pdu)| pdu.to_room_event()) .collect::>(); Ok(get_context::Response { @@ -2839,32 +2840,38 @@ pub fn get_message_events_route( .clone() .parse() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; + + let to = body.to.as_ref().map(|t| t.as_bytes()); + + // Use limit or else 10 let limit = body .limit .try_into() .map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?; + match body.dir { get_message_events::Direction::Forward => { let events_after = db .rooms .pdus_after(&user_id, &body.room_id, from) - // Use limit or else 10 .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events + .take_while(|(k, _)| Some(&**k) != to) // Stop at `to` .collect::>(); - let end_token = events_after.last().map_or(Ok::<_, Error>(None), |e| { - Ok(Some( - db.rooms - .get_pdu_count(&e.event_id)? - .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? + let end_token = if let Some(last_event) = events_after.last() { + Some( + utils::u64_from_bytes(&last_event.0) + .map_err(|_| Error::bad_database("Invalid pdu id in db."))? .to_string(), - )) - })?; + ) + } else { + None + }; let events_after = events_after .into_iter() - .map(|pdu| pdu.to_room_event()) + .map(|(_, pdu)| pdu.to_room_event()) .collect::>(); Ok(get_message_events::Response { @@ -2879,23 +2886,24 @@ pub fn get_message_events_route( let events_before = db .rooms .pdus_until(&user_id, &body.room_id, from) - // Use limit or else 10 .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events + .take_while(|(k, _)| Some(&**k) != to) // Stop at `to` .collect::>(); - let start_token = events_before.last().map_or(Ok::<_, Error>(None), |e| { - Ok(Some( - db.rooms - .get_pdu_count(&e.event_id)? - .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? + let start_token = if let Some(last_event) = events_before.last() { + Some( + utils::u64_from_bytes(&last_event.0) + .map_err(|_| Error::bad_database("Invalid pdu id in db."))? .to_string(), - )) - })?; + ) + } else { + None + }; let events_before = events_before .into_iter() - .map(|pdu| pdu.to_room_event()) + .map(|(_, pdu)| pdu.to_room_event()) .collect::>(); Ok(get_message_events::Response { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5b9659e..3ef4f3f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -654,14 +654,14 @@ impl Rooms { })) } - /// Returns an iterator over all events in a room that happened before the event with id - /// `until` in reverse-chronological order. + /// Returns an iterator over all events and their tokens in a room that happened before the + /// event with id `until` in reverse-chronological order. pub fn pdus_until( &self, user_id: &UserId, room_id: &RoomId, until: u64, - ) -> impl Iterator> { + ) -> impl Iterator> { // Create the first part of the full pdu id let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -677,24 +677,24 @@ impl Rooms { .rev() .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(_, v)| { + .map(move |(k, v)| { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; if pdu.sender != user_id { pdu.unsigned.remove("transaction_id"); } - Ok(pdu) + Ok((k, pdu)) }) } - /// Returns an iterator over all events in a room that happened after the event with id - /// `from` in chronological order. + /// Returns an iterator over all events and their token in a room that happened after the event + /// with id `from` in chronological order. pub fn pdus_after( &self, user_id: &UserId, room_id: &RoomId, from: u64, - ) -> impl Iterator> { + ) -> impl Iterator> { // Create the first part of the full pdu id let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -709,13 +709,13 @@ impl Rooms { .range(current..) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(_, v)| { + .map(move |(k, v)| { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; if pdu.sender != user_id { pdu.unsigned.remove("transaction_id"); } - Ok(pdu) + Ok((k, pdu)) }) } From 5a8705bd257c399a54bed9e1109a16901cf70519 Mon Sep 17 00:00:00 2001 From: gnieto Date: Sun, 26 Jul 2020 22:33:20 +0200 Subject: [PATCH 0183/1727] Add room tags (#140) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Merge branch 'master' into task/add-tags Add room tagging support Co-authored-by: Timo Kösters Co-authored-by: Guillem Nieto Reviewed-on: https://git.koesters.xyz/timo/conduit/pulls/140 Reviewed-by: Timo Kösters --- src/client_server.rs | 177 ++++++++++++++++++++++++++--------- src/database/account_data.rs | 95 ++++++++++--------- src/main.rs | 3 + sytest/sytest-whitelist | 7 +- 4 files changed, 190 insertions(+), 92 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index ab7e515..c8f264c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -51,6 +51,7 @@ use ruma::{ get_state_events_for_empty_key, get_state_events_for_key, }, sync::sync_events, + tag::{create_tag, delete_tag, get_tags}, thirdparty::get_protocols, to_device::{self, send_event_to_device}, typing::create_typing_event, @@ -64,11 +65,10 @@ use ruma::{ canonical_alias, guest_access, history_visibility, join_rules, member, name, redaction, topic, }, - AnyBasicEvent, AnyEphemeralRoomEvent, AnyEvent, AnySyncEphemeralRoomEvent, EventType, + AnyEphemeralRoomEvent, AnyEvent, AnySyncEphemeralRoomEvent, EventType, }, Raw, RoomAliasId, RoomId, RoomVersionId, UserId, }; -use serde_json::json; const GUEST_NAME_LENGTH: usize = 10; const DEVICE_ID_LENGTH: usize = 10; @@ -205,15 +205,12 @@ pub fn register_route( db.account_data.update( None, &user_id, - &EventType::PushRules, - serde_json::to_value(ruma::events::push_rules::PushRulesEvent { + EventType::PushRules, + &ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { global: crate::push_rules::default_pushrules(&user_id), }, - }) - .expect("data is valid, we just created it") - .as_object_mut() - .expect("data is valid, we just created it"), + }, &db.globals, )?; @@ -474,23 +471,18 @@ pub fn get_pushrules_all_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - if let AnyEvent::Basic(AnyBasicEvent::PushRules(pushrules)) = db + let event = db .account_data - .get(None, &user_id, &EventType::PushRules)? + .get::(None, &user_id, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", - ))? - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event in db is invalid."))? - { - Ok(get_pushrules_all::Response { - global: pushrules.content.global, - } - .into()) - } else { - Err(Error::bad_database("Pushrules event has wrong content.")) + ))?; + + Ok(get_pushrules_all::Response { + global: event.content.global, } + .into()) } #[put( @@ -559,17 +551,16 @@ pub fn set_global_account_data_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + let content = serde_json::from_str::(body.data.get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; + + let event_type = body.event_type.to_string(); + db.account_data.update( None, user_id, - &EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), - json!( - {"content": serde_json::from_str::(body.data.get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))? - } - ) - .as_object_mut() - .expect("we just created a valid object"), + EventType::Custom(event_type), + &content, &db.globals, )?; @@ -588,19 +579,19 @@ pub fn get_global_account_data_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); - let event = db + let data = db .account_data - .get( + .get::( None, user_id, - &EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), + EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - let data = serde_json::from_str(event.json().get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - - Ok(get_global_account_data::Response { account_data: data }.into()) + Ok(get_global_account_data::Response { + account_data: Raw::from(data), + } + .into()) } #[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] @@ -1088,19 +1079,17 @@ pub fn set_read_marker_route( ) -> ConduitResult { let user_id = body.user_id.as_ref().expect("user is authenticated"); + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: body.fully_read.clone(), + }, + room_id: body.room_id.clone(), + }; db.account_data.update( Some(&body.room_id), &user_id, - &EventType::FullyRead, - serde_json::to_value(ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { - event_id: body.fully_read.clone(), - }, - room_id: body.room_id.clone(), - }) - .expect("we just created a valid event") - .as_object_mut() - .expect("we just created a valid event"), + EventType::FullyRead, + &fully_read_event, &db.globals, )?; @@ -3318,6 +3307,104 @@ pub fn set_pushers_route() -> ConduitResult { .into()) } +#[put( + "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags/<_tag>", + data = "" +)] +pub fn update_tag_route( + db: State<'_, Database>, + _user_id: String, + _room_id: String, + _tag: String, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + let mut tags_event = db + .account_data + .get::(Some(&body.room_id), user_id, EventType::Tag)? + .unwrap_or_else(|| ruma::events::tag::TagEvent { + content: ruma::events::tag::TagEventContent { + tags: BTreeMap::new(), + }, + }); + tags_event + .content + .tags + .insert(body.tag.to_string(), body.tag_info.clone()); + + db.account_data.update( + Some(&body.room_id), + user_id, + EventType::Tag, + &tags_event, + &db.globals, + )?; + + Ok(create_tag::Response.into()) +} + +#[delete( + "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags/<_tag>", + data = "" +)] +pub fn delete_tag_route( + db: State<'_, Database>, + _user_id: String, + _room_id: String, + _tag: String, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + let mut tags_event = db + .account_data + .get::(Some(&body.room_id), user_id, EventType::Tag)? + .unwrap_or_else(|| ruma::events::tag::TagEvent { + content: ruma::events::tag::TagEventContent { + tags: BTreeMap::new(), + }, + }); + tags_event.content.tags.remove(&body.tag); + + db.account_data.update( + Some(&body.room_id), + user_id, + EventType::Tag, + &tags_event, + &db.globals, + )?; + + Ok(delete_tag::Response.into()) +} + +#[get( + "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags", + data = "" +)] +pub fn get_tags_route( + db: State<'_, Database>, + _user_id: String, + _room_id: String, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + Ok(get_tags::Response { + tags: db + .account_data + .get::(Some(&body.room_id), user_id, EventType::Tag)? + .unwrap_or_else(|| ruma::events::tag::TagEvent { + content: ruma::events::tag::TagEventContent { + tags: BTreeMap::new(), + }, + }) + .content + .tags, + } + .into()) +} + #[options("/<_segments..>")] pub fn options_route( _segments: rocket::http::uri::Segments<'_>, diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 8397c12..1afbcd6 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,9 +1,11 @@ use crate::{utils, Error, Result}; use ruma::{ - api::client::error::ErrorKind, events::{AnyEvent as EduEvent, EventType}, Raw, RoomId, UserId, }; +use serde::de::DeserializeOwned; +use serde::Serialize; +use sled::IVec; use std::{collections::HashMap, convert::TryFrom}; pub struct AccountData { @@ -12,77 +14,55 @@ pub struct AccountData { impl AccountData { /// Places one event in the account data of the user and removes the previous entry. - pub fn update( + pub fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: &EventType, - json: &mut serde_json::Map, + event_type: EventType, + event: &T, globals: &super::globals::Globals, ) -> Result<()> { - if json.get("content").is_none() { - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Json needs to have a content field.", - )); - } - json.insert("type".to_owned(), kind.to_string().into()); - - let user_id_string = user_id.to_string(); - let kind_string = kind.to_string(); - let mut prefix = room_id .map(|r| r.to_string()) .unwrap_or_default() .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id_string.as_bytes()); + prefix.extend_from_slice(&user_id.to_string().as_bytes()); prefix.push(0xff); // Remove old entry - if let Some(old) = self - .roomuserdataid_accountdata - .scan_prefix(&prefix) - .keys() - .rev() - .filter_map(|r| r.ok()) - .take_while(|key| key.starts_with(&prefix)) - .find(|key| { - let user = key.split(|&b| b == 0xff).nth(1); - let k = key.rsplit(|&b| b == 0xff).next(); - - user.filter(|&user| user == user_id_string.as_bytes()) - .is_some() - && k.filter(|&k| k == kind_string.as_bytes()).is_some() - }) - { - // This is the old room_latest - self.roomuserdataid_accountdata.remove(old)?; + if let Some(previous) = self.find_event(room_id, user_id, &event_type) { + let (old_key, _) = previous?; + self.roomuserdataid_accountdata.remove(old_key)?; } let mut key = prefix; key.extend_from_slice(&globals.next_count()?.to_be_bytes()); key.push(0xff); - key.extend_from_slice(kind.to_string().as_bytes()); + key.extend_from_slice(event_type.to_string().as_bytes()); self.roomuserdataid_accountdata.insert( key, - &*serde_json::to_string(&json).expect("Map::to_string always works"), + &*serde_json::to_string(&event).expect("Map::to_string always works"), )?; Ok(()) } - // TODO: Optimize /// Searches the account data for a specific kind. - pub fn get( + pub fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: &EventType, - ) -> Result>> { - Ok(self.all(room_id, user_id)?.remove(kind)) + kind: EventType, + ) -> Result> { + self.find_event(room_id, user_id, &kind) + .map(|r| { + let (_, v) = r?; + serde_json::from_slice(&v).map_err(|_| Error::BadDatabase("could not deserialize")) + }) + .transpose() } /// Returns all changes to the account data that happened after `since`. @@ -134,12 +114,37 @@ impl AccountData { Ok(userdata) } - /// Returns all account data. - pub fn all( + fn find_event( &self, room_id: Option<&RoomId>, user_id: &UserId, - ) -> Result>> { - self.changes_since(room_id, user_id, 0) + kind: &EventType, + ) -> Option> { + let mut prefix = room_id + .map(|r| r.to_string()) + .unwrap_or_default() + .as_bytes() + .to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.push(0xff); + let kind = kind.clone(); + + self.roomuserdataid_accountdata + .scan_prefix(prefix) + .rev() + .find(move |r| { + r.as_ref() + .map(|(k, _)| { + k.rsplit(|&b| b == 0xff) + .next() + .map(|current_event_type| { + current_event_type == kind.to_string().as_bytes() + }) + .unwrap_or(false) + }) + .unwrap_or(false) + }) + .map(|r| Ok(r?)) } } diff --git a/src/main.rs b/src/main.rs index ef2b7cc..a530a20 100644 --- a/src/main.rs +++ b/src/main.rs @@ -99,6 +99,9 @@ fn setup_rocket() -> rocket::Rocket { client_server::update_device_route, client_server::delete_device_route, client_server::delete_devices_route, + client_server::get_tags_route, + client_server::update_tag_route, + client_server::delete_tag_route, client_server::options_route, client_server::upload_signing_keys_route, client_server::upload_signatures_route, diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index 0d5ff7b..a2766de 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -1,3 +1,4 @@ +/joined_rooms returns only joined rooms 3pid invite join valid signature but revoked keys are rejected 3pid invite join valid signature but unreachable ID server are rejected 3pid invite join with wrong but valid signature are rejected @@ -6,9 +7,12 @@ After deactivating account, can't log in with an email Alternative server names do not cause a routing loop Both GET and PUT work Can add account data +Can add tag Can create filter +Can list tags for a room Can logout all devices Can read configuration endpoint +Can remove tag Can send a message directly to a device using PUT /sendToDevice Can upload with ASCII file name Can upload with Unicode file name @@ -22,6 +26,7 @@ GET /devices GET /events with negative 'limit' GET /events with non-numeric 'limit' GET /events with non-numeric 'timeout' +GET /joined_rooms lists newly-created room GET /login yields a set of flows GET /media/r0/download can fetch the value again GET /profile/:user_id/displayname publicly accessible @@ -29,8 +34,6 @@ GET /publicRooms lists newly-created room GET /register yields a set of flows GET /rooms/:room_id/state fetches entire room state GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -GET /joined_rooms lists newly-created room -/joined_rooms returns only joined rooms Getting push rules doesn't corrupt the cache SYN-390 POST /createRoom makes a private room POST /createRoom makes a private room with invites From dc5fb7e33a2bb820071df2e7f443d205690ff427 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 26 Jul 2020 20:41:10 +0200 Subject: [PATCH 0184/1727] feat: encryption_disabled config option Can be used with ROCKET_ENCRYPTION_DISABLED=true --- Rocket-example.toml | 2 ++ src/client_server.rs | 5 +++++ src/database/globals.rs | 6 ++++++ src/database/rooms.rs | 5 +++++ 4 files changed, 18 insertions(+) diff --git a/Rocket-example.toml b/Rocket-example.toml index 30a3c3a..677b8e1 100644 --- a/Rocket-example.toml +++ b/Rocket-example.toml @@ -1,6 +1,7 @@ [global] server_name = "your.server.name" #registration_disabled = true +#encryption_disabled = true # Default path is in this user's data #database_path = "/home/timo/MyConduitServer" @@ -8,6 +9,7 @@ server_name = "your.server.name" port = 14004 address = "0.0.0.0" +# Not necessary when using a reverse proxy #[global.tls] #certs = "/etc/letsencrypt/live/your.server.name/fullchain.pem" #key = "/etc/letsencrypt/live/your.server.name/privkey.pem" diff --git a/src/client_server.rs b/src/client_server.rs index c8f264c..e6a2c23 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1341,6 +1341,11 @@ pub fn create_room_route( content, } in &body.initial_state { + // Silently skip encryption events if they are not allowed + if event_type == &EventType::RoomEncryption && db.globals.encryption_disabled() { + continue + } + db.rooms.append_pdu( room_id.clone(), user_id.clone(), diff --git a/src/database/globals.rs b/src/database/globals.rs index 4578605..3a257a5 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -10,6 +10,7 @@ pub struct Globals { reqwest_client: reqwest::Client, server_name: Box, registration_disabled: bool, + encryption_disabled: bool, } impl Globals { @@ -33,6 +34,7 @@ impl Globals { .try_into() .map_err(|_| Error::BadConfig("Invalid server name found."))?, registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), + encryption_disabled: config.get_bool("encryption_disabled").unwrap_or(false), }) } @@ -70,4 +72,8 @@ impl Globals { pub fn registration_disabled(&self) -> bool { self.registration_disabled } + + pub fn encryption_disabled(&self) -> bool { + self.encryption_disabled + } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3ef4f3f..298212d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -319,7 +319,12 @@ impl Rooms { Some, ); + // Is the event allowed? if !match event_type { + EventType::RoomEncryption => { + // Don't allow encryption events when it's disabled + !globals.encryption_disabled() + } EventType::RoomMember => { let target_user_id = UserId::try_from(&**state_key).map_err(|_| { Error::BadRequest( From 808a7deae3939ead9c1556957d7ba8ae4d24285a Mon Sep 17 00:00:00 2001 From: timokoesters Date: Sun, 26 Jul 2020 22:45:10 +0200 Subject: [PATCH 0185/1727] cargo fmt --- src/client_server.rs | 2 +- src/push_rules.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index e6a2c23..a3f4760 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1343,7 +1343,7 @@ pub fn create_room_route( { // Silently skip encryption events if they are not allowed if event_type == &EventType::RoomEncryption && db.globals.encryption_disabled() { - continue + continue; } db.rooms.append_pdu( diff --git a/src/push_rules.rs b/src/push_rules.rs index a1f32f4..43afbca 100644 --- a/src/push_rules.rs +++ b/src/push_rules.rs @@ -1,10 +1,10 @@ use js_int::uint; use ruma::{ - UserId, push::{ Action, ConditionalPushRule, ConditionalPushRuleInit, PatternedPushRule, PatternedPushRuleInit, PushCondition, RoomMemberCountIs, Ruleset, Tweak, }, + UserId, }; pub fn default_pushrules(user_id: &UserId) -> Ruleset { From 27650712016a686c08268e753294b81caef6213c Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 27 Jul 2020 08:19:14 +0200 Subject: [PATCH 0186/1727] Fix CI --- sytest/sytest-whitelist | 42 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index a2766de..ad55536 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -4,22 +4,40 @@ 3pid invite join with wrong but valid signature are rejected AS cannot create users outside its own namespace After deactivating account, can't log in with an email +Alias creators can delete alias with no ops +Alias creators can delete canonical alias with no ops Alternative server names do not cause a routing loop Both GET and PUT work Can add account data Can add tag Can create filter +Can delete canonical alias +Can invite users to invite-only rooms Can list tags for a room Can logout all devices +Can logout current device Can read configuration endpoint +Can recv a device message using /sync +Can recv device messages until they are acknowledged Can remove tag Can send a message directly to a device using PUT /sendToDevice +Can send a to-device message to two users which both receive it using /sync +Can send messages with a wildcard device id +Can send messages with a wildcard device id to two devices +Can sync Can upload with ASCII file name Can upload with Unicode file name Can upload without a file name Changing the actions of an unknown default rule fails with 404 Changing the actions of an unknown rule fails with 404 Checking local federation server +Current state appears in timeline in private history +Current state appears in timeline in private history with many messages before +Deleted tags appear in an incremental v2 /sync +Deleting a non-existent alias should return a 404 +Device messages over federation wake up /sync +Device messages wake up /sync +Events come down the correct room GET /device/{deviceId} GET /device/{deviceId} gives a 404 for unknown devices GET /devices @@ -29,12 +47,14 @@ GET /events with non-numeric 'timeout' GET /joined_rooms lists newly-created room GET /login yields a set of flows GET /media/r0/download can fetch the value again +GET /profile/:user_id/avatar_url publicly accessible GET /profile/:user_id/displayname publicly accessible GET /publicRooms lists newly-created room GET /register yields a set of flows GET /rooms/:room_id/state fetches entire room state GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership Getting push rules doesn't corrupt the cache SYN-390 +Newly updated tags appear in an incremental v2 /sync POST /createRoom makes a private room POST /createRoom makes a private room with invites POST /join/:room_id can join a room @@ -65,18 +85,40 @@ POST /rooms/:room_id/ban can ban a user POST /rooms/:room_id/invite can send an invite POST /rooms/:room_id/join can join a room POST /rooms/:room_id/leave can leave a room +POST /rooms/:room_id/state/m.room.name sets name +POST /rooms/:room_id/state/m.room.topic sets topic POSTed media can be thumbnailed PUT /device/{deviceId} gives a 404 for unknown devices PUT /device/{deviceId} updates device fields PUT /directory/room/:room_alias creates alias +PUT /profile/:user_id/avatar_url sets my avatar PUT /profile/:user_id/displayname sets my name PUT /rooms/:room_id/state/m.room.power_levels can set levels PUT power_levels should not explode if the old power levels were empty +Push rules come down in an initial /sync +Read markers appear in incremental v2 /sync +Read markers appear in initial v2 /sync +Read markers can be updated +Regular users can add and delete aliases in the default room configuration +Room creation reports m.room.create to myself +Room creation reports m.room.member to myself +Rooms a user is invited to appear in an incremental sync +Rooms a user is invited to appear in an initial sync +Setting room topic reports m.room.topic to myself Should reject keys claiming to belong to a different user +Tags appear in an initial v2 /sync Trying to get push rules with unknown rule_id fails with 404 +Typing events appear in gapped sync +Typing events appear in incremental sync +Typing events appear in initial sync +Uninvited users cannot join the room User appears in user directory User directory correctly update on display name change User in dir while user still shares private rooms User in shared private room does appear in user directory +User is offline if they set_presence=offline in their sync +Users with sufficient power-level can delete other's aliases Version responds 200 OK with valid structure +Wildcard device messages over federation wake up /sync +Wildcard device messages wake up /sync query for user with no keys returns empty key dict From fa2da9e04849ccf2c37e3769d7fc776599186e2e Mon Sep 17 00:00:00 2001 From: CapsizeGlimmer <> Date: Thu, 23 Jul 2020 23:03:24 -0400 Subject: [PATCH 0187/1727] Implement max_request_size config option --- src/client_server.rs | 10 +++++----- src/database/globals.rs | 16 +++++++++++++--- src/ruma_wrapper.rs | 14 ++++++-------- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index a3f4760..eede5fd 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2977,11 +2977,11 @@ pub fn send_event_to_device_route( } #[get("/_matrix/media/r0/config")] -pub fn get_media_config_route() -> ConduitResult { - Ok(get_media_config::Response { - upload_size: (20_u32 * 1024 * 1024).into(), // 20 MB - } - .into()) +pub fn get_media_config_route( + db: State<'_, Database>, +) -> ConduitResult { + let upload_size = db.globals.max_request_size().into(); + Ok(get_media_config::Response { upload_size }.into()) } #[post("/_matrix/media/r0/upload", data = "")] diff --git a/src/database/globals.rs b/src/database/globals.rs index 3a257a5..5db2806 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,7 @@ -use std::convert::TryInto; - use crate::{utils, Error, Result}; use ruma::ServerName; +use std::convert::TryInto; + pub const COUNTER: &str = "c"; pub struct Globals { @@ -9,6 +9,7 @@ pub struct Globals { keypair: ruma::signatures::Ed25519KeyPair, reqwest_client: reqwest::Client, server_name: Box, + max_request_size: u32, registration_disabled: bool, encryption_disabled: bool, } @@ -32,7 +33,12 @@ impl Globals { .unwrap_or("localhost") .to_string() .try_into() - .map_err(|_| Error::BadConfig("Invalid server name found."))?, + .map_err(|_| Error::BadConfig("Invalid server_name."))?, + max_request_size: config + .get_int("max_request_size") + .unwrap_or(20 * 1024 * 1024) // Default to 20 MB + .try_into() + .map_err(|_| Error::BadConfig("Invalid max_request_size."))?, registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), encryption_disabled: config.get_bool("encryption_disabled").unwrap_or(false), }) @@ -69,6 +75,10 @@ impl Globals { self.server_name.as_ref() } + pub fn max_request_size(&self) -> u32 { + self.max_request_size + } + pub fn registration_disabled(&self) -> bool { self.registration_disabled } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 15e50ba..5b380b3 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -11,8 +11,6 @@ use ruma::{api::Endpoint, DeviceId, UserId}; use std::{convert::TryInto, io::Cursor, ops::Deref}; use tokio::io::AsyncReadExt; -const MESSAGE_LIMIT: u64 = 20 * 1024 * 1024; // 20 MB - /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. pub struct Ruma { @@ -40,13 +38,12 @@ impl<'a, T: Endpoint> FromTransformedData<'a> for Ruma { ) -> FromDataFuture<'a, Self, Self::Error> { Box::pin(async move { let data = rocket::try_outcome!(outcome.owned()); + let db = request + .guard::>() + .await + .expect("database was loaded"); let (user_id, device_id) = if T::METADATA.requires_authentication { - let db = request - .guard::>() - .await - .expect("database was loaded"); - // Get token from header or query value let token = match request .headers() @@ -76,7 +73,8 @@ impl<'a, T: Endpoint> FromTransformedData<'a> for Ruma { http_request = http_request.header(header.name.as_str(), &*header.value); } - let mut handle = data.open().take(MESSAGE_LIMIT); + let limit = db.globals.max_request_size(); + let mut handle = data.open().take(limit.into()); let mut body = Vec::new(); handle.read_to_end(&mut body).await.unwrap(); From 8fcf8e20b825ebe15c89839444b911154247d7c3 Mon Sep 17 00:00:00 2001 From: CapsizeGlimmer <> Date: Fri, 24 Jul 2020 16:04:01 -0400 Subject: [PATCH 0188/1727] fix MR 153 requests; cargo fmt --- src/client_server.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index eede5fd..79885fa 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2980,8 +2980,10 @@ pub fn send_event_to_device_route( pub fn get_media_config_route( db: State<'_, Database>, ) -> ConduitResult { - let upload_size = db.globals.max_request_size().into(); - Ok(get_media_config::Response { upload_size }.into()) + Ok(get_media_config::Response { + upload_size: db.globals.max_request_size().into(), + } + .into()) } #[post("/_matrix/media/r0/upload", data = "")] From 5f47cbf7dcf09087f2646e97b545a385660e6dfd Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 27 Jul 2020 18:06:27 +0200 Subject: [PATCH 0189/1727] improvement: documentation --- Rocket-example.toml | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/Rocket-example.toml b/Rocket-example.toml index 677b8e1..41b36d3 100644 --- a/Rocket-example.toml +++ b/Rocket-example.toml @@ -1,15 +1,29 @@ [global] +# The name of this server +# Note: If server name != hostname, you need a .well-known file for federation +# to work server_name = "your.server.name" + +port = 14004 + +# Max size for uploads +#max_request_size = 20_000_000 # in bytes, ~20 MB + +# Disable registration. No new users will be able to register on this server #registration_disabled = true + +# Disable encryption, so no new encrypted rooms can be created +# Note: existing rooms will continue to work #encryption_disabled = true # Default path is in this user's data #database_path = "/home/timo/MyConduitServer" -port = 14004 +# You should probably leave this at 0.0.0.0 address = "0.0.0.0" -# Not necessary when using a reverse proxy +# TLS support +# Note: Not necessary when using a reverse proxy: #[global.tls] #certs = "/etc/letsencrypt/live/your.server.name/fullchain.pem" #key = "/etc/letsencrypt/live/your.server.name/privkey.pem" From 4d4969862a2513c44c59149ec6cb255180a48a11 Mon Sep 17 00:00:00 2001 From: CapsizeGlimmer <> Date: Fri, 10 Jul 2020 02:49:05 -0400 Subject: [PATCH 0190/1727] Implement /rooms//joined_members --- src/client_server.rs | 35 ++++++++++++++++++++++++++++++++++- src/database/rooms.rs | 2 +- src/main.rs | 1 + sytest/sytest-whitelist | 1 + 4 files changed, 37 insertions(+), 2 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 79885fa..c63e8b9 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -34,7 +34,8 @@ use ruma::{ media::{create_content, get_content, get_content_thumbnail, get_media_config}, membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, joined_rooms, kick_user, leave_room, unban_user, + join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, + unban_user, }, message::{create_message_event, get_message_events}, presence::set_presence, @@ -1667,6 +1668,38 @@ pub fn kick_user_route( Ok(kick_user::Response.into()) } +#[get("/_matrix/client/r0/rooms/<_room_id>/joined_members", data = "")] +pub fn joined_members_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(&user_id, &body.room_id).unwrap_or(false) { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You aren't a member of the room.", + )); + } + + let mut joined = BTreeMap::new(); + for user_id in db.rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { + let display_name = db.users.displayname(&user_id)?; + let avatar_url = db.users.avatar_url(&user_id)?; + + joined.insert( + user_id, + joined_members::RoomMember { + display_name, + avatar_url, + }, + ); + } + + Ok(joined_members::Response { joined }.into()) +} + #[post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "")] pub fn ban_user_route( db: State<'_, Database>, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 298212d..0395cc2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -81,7 +81,7 @@ impl Rooms { Ok(hashmap) } - /// Returns the full room state. + /// Returns the all state entries for this type. pub fn room_state_type( &self, room_id: &RoomId, diff --git a/src/main.rs b/src/main.rs index a530a20..ba22b64 100644 --- a/src/main.rs +++ b/src/main.rs @@ -64,6 +64,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_alias_route, client_server::join_room_by_id_route, client_server::join_room_by_id_or_alias_route, + client_server::joined_members_route, client_server::leave_room_route, client_server::forget_room_route, client_server::joined_rooms_route, diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index ad55536..ec2aba4 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -1,3 +1,4 @@ +/joined_members return joined members /joined_rooms returns only joined rooms 3pid invite join valid signature but revoked keys are rejected 3pid invite join valid signature but unreachable ID server are rejected From 42e0102a2a1de4b08afd52cb7ed26b692047733e Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 25 Jul 2020 23:08:00 -0400 Subject: [PATCH 0191/1727] Conditionally emit rocket::http_verb attr macros if lib/bin --- Cargo.lock | 2 - Cargo.toml | 44 ++-- src/client_server.rs | 491 ++++++++++++++++++++++++++++++++----------- src/error.rs | 23 +- src/lib.rs | 24 +++ src/main.rs | 1 + src/push_rules.rs | 5 +- src/ruma_wrapper.rs | 33 ++- 8 files changed, 453 insertions(+), 170 deletions(-) create mode 100644 src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index ab5b551..45a5edd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -261,7 +261,6 @@ checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.12.3", "directories", "http", "image", @@ -276,7 +275,6 @@ dependencies = [ "serde_json", "sled", "thiserror", - "tokio", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ec1ee63..02a90cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,19 +12,31 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"] } -http = "0.2.1" -log = "0.4.8" -sled = "0.32.0" -directories = "2.0.2" -js_int = "0.1.5" -serde_json = { version = "1.0.53", features = ["raw_value"] } -serde = "1.0.111" -tokio = { version = "0.2.21", features = ["macros"] } -rand = "0.7.3" -rust-argon2 = "0.8.2" -reqwest = "0.10.6" -base64 = "0.12.1" -thiserror = "1.0.19" -image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "e047c647ddcb368e7eb1e05ae8823a9494273457" } +# TODO: This can become optional as soon as proper configs are supported +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"], optional = false } # Used to handle requests +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "e047c647ddcb368e7eb1e05ae8823a9494273457" } # Used for matrix spec type definitions and helpers +sled = "0.32.0" # Used for storing data permanently +log = "0.4.8" # Used for emitting log entries +http = "0.2.1" # Used for rocket<->ruma conversions +directories = "2.0.2" # Used to find data directory for default db path +js_int = "0.1.5" # Used for number types for ruma +serde_json = { version = "1.0.53", features = ["raw_value"] } # Used for ruma wrapper +serde = "1.0.111" # Used for pdu definition +rand = "0.7.3" # Used for secure identifiers +rust-argon2 = "0.8.2" # Used to hash passwords +reqwest = "0.10.6" # Used to send requests +thiserror = "1.0.19" # Used for conduit::Error type +image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to generate thumbnails for images + +[features] +default = ["conduit_bin"] +conduit_bin = [] # TODO: add rocket to this when it is optional + +[[bin]] +name = "conduit" +path = "src/main.rs" +required-features = ["conduit_bin"] + +[lib] +name = "conduit" +path = "src/lib.rs" diff --git a/src/client_server.rs b/src/client_server.rs index c63e8b9..d1addc8 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -7,7 +7,12 @@ use std::{ use crate::{utils, ConduitResult, Database, Error, Ruma}; use keys::{upload_signatures, upload_signing_keys}; use log::warn; + +#[cfg(not(feature = "conduit_bin"))] +use super::State; +#[cfg(feature = "conduit_bin")] use rocket::{delete, get, options, post, put, State}; + use ruma::{ api::client::{ error::ErrorKind, @@ -77,7 +82,7 @@ const TOKEN_LENGTH: usize = 256; const MXC_LENGTH: usize = 256; const SESSION_ID_LENGTH: usize = 256; -#[get("/_matrix/client/versions")] +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] pub fn get_supported_versions_route() -> ConduitResult { let mut unstable_features = BTreeMap::new(); @@ -90,7 +95,10 @@ pub fn get_supported_versions_route() -> ConduitResult, body: Ruma, @@ -120,7 +128,10 @@ pub fn get_register_available_route( Ok(get_username_availability::Response { available: true }.into()) } -#[post("/_matrix/client/r0/register", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/register", data = "") +)] pub fn register_route( db: State<'_, Database>, body: Ruma, @@ -223,7 +234,7 @@ pub fn register_route( .into()) } -#[get("/_matrix/client/r0/login")] +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] pub fn get_login_route() -> ConduitResult { Ok(get_login_types::Response { flows: vec![get_login_types::LoginType::Password], @@ -231,7 +242,10 @@ pub fn get_login_route() -> ConduitResult { .into()) } -#[post("/_matrix/client/r0/login", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/login", data = "") +)] pub fn login_route( db: State<'_, Database>, body: Ruma, @@ -289,7 +303,10 @@ pub fn login_route( .into()) } -#[post("/_matrix/client/r0/logout", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/logout", data = "") +)] pub fn logout_route( db: State<'_, Database>, body: Ruma, @@ -302,7 +319,10 @@ pub fn logout_route( Ok(logout::Response.into()) } -#[post("/_matrix/client/r0/logout/all", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/logout/all", data = "") +)] pub fn logout_all_route( db: State<'_, Database>, body: Ruma, @@ -318,7 +338,10 @@ pub fn logout_all_route( Ok(logout_all::Response.into()) } -#[post("/_matrix/client/r0/account/password", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/account/password", data = "") +)] pub fn change_password_route( db: State<'_, Database>, body: Ruma, @@ -367,7 +390,10 @@ pub fn change_password_route( Ok(change_password::Response.into()) } -#[post("/_matrix/client/r0/account/deactivate", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/account/deactivate", data = "") +)] pub fn deactivate_route( db: State<'_, Database>, body: Ruma, @@ -440,7 +466,7 @@ pub fn deactivate_route( .into()) } -#[get("/_matrix/client/r0/capabilities")] +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/capabilities"))] pub fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); available.insert( @@ -465,7 +491,10 @@ pub fn get_capabilities_route() -> ConduitResult { .into()) } -#[get("/_matrix/client/r0/pushrules", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules", data = "") +)] pub fn get_pushrules_all_route( db: State<'_, Database>, body: Ruma, @@ -486,10 +515,10 @@ pub fn get_pushrules_all_route( .into()) } -#[put( +#[cfg_attr(feature = "conduit_bin", put( "/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>", //data = "" -)] +))] pub fn set_pushrule_route( //db: State<'_, Database>, //body: Ruma, @@ -502,7 +531,10 @@ pub fn set_pushrule_route( Ok(set_pushrule::Response.into()) } -#[put("/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>/enabled")] +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>/enabled") +)] pub fn set_pushrule_enabled_route( _scope: String, _kind: String, @@ -513,7 +545,10 @@ pub fn set_pushrule_enabled_route( Ok(set_pushrule_enabled::Response.into()) } -#[get("/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>") +)] pub fn get_filter_route( _user_id: String, _filter_id: String, @@ -531,7 +566,10 @@ pub fn get_filter_route( .into()) } -#[post("/_matrix/client/r0/user/<_user_id>/filter")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/user/<_user_id>/filter") +)] pub fn create_filter_route(_user_id: String) -> ConduitResult { // TODO Ok(create_filter::Response { @@ -540,9 +578,12 @@ pub fn create_filter_route(_user_id: String) -> ConduitResult/account_data/<_type>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + put( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" + ) )] pub fn set_global_account_data_route( db: State<'_, Database>, @@ -568,9 +609,12 @@ pub fn set_global_account_data_route( Ok(set_global_account_data::Response.into()) } -#[get( - "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + data = "" + ) )] pub fn get_global_account_data_route( db: State<'_, Database>, @@ -595,7 +639,10 @@ pub fn get_global_account_data_route( .into()) } -#[put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "") +)] pub fn set_displayname_route( db: State<'_, Database>, body: Ruma, @@ -661,7 +708,10 @@ pub fn set_displayname_route( Ok(set_display_name::Response.into()) } -#[get("/_matrix/client/r0/profile/<_user_id>/displayname", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/profile/<_user_id>/displayname", data = "") +)] pub fn get_displayname_route( db: State<'_, Database>, body: Ruma, @@ -674,7 +724,10 @@ pub fn get_displayname_route( .into()) } -#[put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "") +)] pub fn set_avatar_url_route( db: State<'_, Database>, body: Ruma, @@ -751,7 +804,10 @@ pub fn set_avatar_url_route( Ok(set_avatar_url::Response.into()) } -#[get("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "") +)] pub fn get_avatar_url_route( db: State<'_, Database>, body: Ruma, @@ -764,7 +820,10 @@ pub fn get_avatar_url_route( .into()) } -#[get("/_matrix/client/r0/profile/<_user_id>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/profile/<_user_id>", data = "") +)] pub fn get_profile_route( db: State<'_, Database>, body: Ruma, @@ -789,7 +848,10 @@ pub fn get_profile_route( .into()) } -#[put("/_matrix/client/r0/presence/<_user_id>/status", data = "")] +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/presence/<_user_id>/status", data = "") +)] pub fn set_presence_route( db: State<'_, Database>, body: Ruma, @@ -819,7 +881,10 @@ pub fn set_presence_route( Ok(set_presence::Response.into()) } -#[post("/_matrix/client/r0/keys/upload", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/keys/upload", data = "") +)] pub fn upload_keys_route( db: State<'_, Database>, body: Ruma, @@ -848,7 +913,10 @@ pub fn upload_keys_route( .into()) } -#[post("/_matrix/client/r0/keys/query", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/keys/query", data = "") +)] pub fn get_keys_route( db: State<'_, Database>, body: Ruma, @@ -925,7 +993,10 @@ pub fn get_keys_route( .into()) } -#[post("/_matrix/client/r0/keys/claim", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/keys/claim", data = "") +)] pub fn claim_keys_route( db: State<'_, Database>, body: Ruma, @@ -953,7 +1024,10 @@ pub fn claim_keys_route( .into()) } -#[post("/_matrix/client/unstable/room_keys/version", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/unstable/room_keys/version", data = "") +)] pub fn create_backup_route( db: State<'_, Database>, body: Ruma, @@ -966,9 +1040,12 @@ pub fn create_backup_route( Ok(create_backup::Response { version }.into()) } -#[put( - "/_matrix/client/unstable/room_keys/version/<_version>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + put( + "/_matrix/client/unstable/room_keys/version/<_version>", + data = "" + ) )] pub fn update_backup_route( db: State<'_, Database>, @@ -982,7 +1059,10 @@ pub fn update_backup_route( Ok(update_backup::Response.into()) } -#[get("/_matrix/client/unstable/room_keys/version", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/unstable/room_keys/version", data = "") +)] pub fn get_latest_backup_route( db: State<'_, Database>, body: Ruma, @@ -1006,9 +1086,12 @@ pub fn get_latest_backup_route( .into()) } -#[get( - "/_matrix/client/unstable/room_keys/version/<_version>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/client/unstable/room_keys/version/<_version>", + data = "" + ) )] pub fn get_backup_route( db: State<'_, Database>, @@ -1033,7 +1116,11 @@ pub fn get_backup_route( .into()) } -#[put("/_matrix/client/unstable/room_keys/keys", data = "")] +/// Add the received backup_keys to the database. +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/unstable/room_keys/keys", data = "") +)] pub fn add_backup_keys_route( db: State<'_, Database>, body: Ruma, @@ -1060,7 +1147,10 @@ pub fn add_backup_keys_route( .into()) } -#[get("/_matrix/client/unstable/room_keys/keys", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/unstable/room_keys/keys", data = "") +)] pub fn get_backup_keys_route( db: State<'_, Database>, body: Ruma, @@ -1072,7 +1162,10 @@ pub fn get_backup_keys_route( Ok(get_backup_keys::Response { rooms }.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/read_markers", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_room_id>/read_markers", data = "") +)] pub fn set_read_marker_route( db: State<'_, Database>, body: Ruma, @@ -1134,9 +1227,12 @@ pub fn set_read_marker_route( Ok(set_read_marker::Response.into()) } -#[put( - "/_matrix/client/r0/rooms/<_room_id>/typing/<_user_id>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + put( + "/_matrix/client/r0/rooms/<_room_id>/typing/<_user_id>", + data = "" + ) )] pub fn create_typing_event_route( db: State<'_, Database>, @@ -1163,7 +1259,10 @@ pub fn create_typing_event_route( Ok(create_typing_event::Response.into()) } -#[post("/_matrix/client/r0/createRoom", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/createRoom", data = "") +)] pub fn create_room_route( db: State<'_, Database>, body: Ruma, @@ -1428,7 +1527,10 @@ pub fn create_room_route( Ok(create_room::Response { room_id }.into()) } -#[get("/_matrix/client/r0/joined_rooms", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/joined_rooms", data = "") +)] pub fn joined_rooms_route( db: State<'_, Database>, body: Ruma, @@ -1445,9 +1547,12 @@ pub fn joined_rooms_route( .into()) } -#[put( - "/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + put( + "/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", + data = "" + ) )] pub fn redact_event_route( db: State<'_, Database>, @@ -1475,7 +1580,10 @@ pub fn redact_event_route( Ok(redact_event::Response { event_id }.into()) } -#[put("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/directory/room/<_room_alias>", data = "") +)] pub fn create_alias_route( db: State<'_, Database>, body: Ruma, @@ -1491,7 +1599,10 @@ pub fn create_alias_route( Ok(create_alias::Response.into()) } -#[delete("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/r0/directory/room/<_room_alias>", data = "") +)] pub fn delete_alias_route( db: State<'_, Database>, body: Ruma, @@ -1502,7 +1613,10 @@ pub fn delete_alias_route( Ok(delete_alias::Response.into()) } -#[get("/_matrix/client/r0/directory/room/<_room_alias>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/directory/room/<_room_alias>", data = "") +)] pub fn get_alias_route( db: State<'_, Database>, body: Ruma, @@ -1527,7 +1641,10 @@ pub fn get_alias_route( .into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/join", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_room_id>/join", data = "") +)] pub fn join_room_by_id_route( db: State<'_, Database>, body: Ruma, @@ -1562,7 +1679,10 @@ pub fn join_room_by_id_route( .into()) } -#[post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "") +)] pub fn join_room_by_id_or_alias_route( db: State<'_, Database>, body: Ruma, @@ -1591,7 +1711,10 @@ pub fn join_room_by_id_or_alias_route( .into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "") +)] pub fn leave_room_route( db: State<'_, Database>, body: Ruma, @@ -1629,7 +1752,10 @@ pub fn leave_room_route( Ok(leave_room::Response.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/kick", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_room_id>/kick", data = "") +)] pub fn kick_user_route( db: State<'_, Database>, body: Ruma, @@ -1668,7 +1794,10 @@ pub fn kick_user_route( Ok(kick_user::Response.into()) } -#[get("/_matrix/client/r0/rooms/<_room_id>/joined_members", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_room_id>/joined_members", data = "") +)] pub fn joined_members_route( db: State<'_, Database>, body: Ruma, @@ -1700,7 +1829,10 @@ pub fn joined_members_route( Ok(joined_members::Response { joined }.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "") +)] pub fn ban_user_route( db: State<'_, Database>, body: Ruma, @@ -1747,7 +1879,10 @@ pub fn ban_user_route( Ok(ban_user::Response.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/unban", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_room_id>/unban", data = "") +)] pub fn unban_user_route( db: State<'_, Database>, body: Ruma, @@ -1785,7 +1920,10 @@ pub fn unban_user_route( Ok(unban_user::Response.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "") +)] pub fn forget_room_route( db: State<'_, Database>, body: Ruma, @@ -1798,7 +1936,10 @@ pub fn forget_room_route( Ok(forget_room::Response.into()) } -#[post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "") +)] pub fn invite_user_route( db: State<'_, Database>, body: Ruma, @@ -1829,7 +1970,10 @@ pub fn invite_user_route( } } -#[put("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/directory/list/room/<_room_id>", data = "") +)] pub async fn set_room_visibility_route( db: State<'_, Database>, body: Ruma, @@ -1843,7 +1987,10 @@ pub async fn set_room_visibility_route( Ok(set_room_visibility::Response.into()) } -#[get("/_matrix/client/r0/directory/list/room/<_room_id>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/directory/list/room/<_room_id>", data = "") +)] pub async fn get_room_visibility_route( db: State<'_, Database>, body: Ruma, @@ -1859,7 +2006,10 @@ pub async fn get_room_visibility_route( .into()) } -#[get("/_matrix/client/r0/publicRooms", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/publicRooms", data = "") +)] pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma, @@ -1908,7 +2058,10 @@ pub async fn get_public_rooms_route( .into()) } -#[post("/_matrix/client/r0/publicRooms", data = "<_body>")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/publicRooms", data = "<_body>") +)] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, _body: Ruma, @@ -2020,7 +2173,10 @@ pub async fn get_public_rooms_filtered_route( .into()) } -#[post("/_matrix/client/r0/user_directory/search", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/user_directory/search", data = "") +)] pub fn search_users_route( db: State<'_, Database>, body: Ruma, @@ -2060,7 +2216,10 @@ pub fn search_users_route( .into()) } -#[get("/_matrix/client/r0/rooms/<_room_id>/members", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_room_id>/members", data = "") +)] pub fn get_member_events_route( db: State<'_, Database>, body: Ruma, @@ -2086,7 +2245,10 @@ pub fn get_member_events_route( .into()) } -#[get("/_matrix/client/r0/thirdparty/protocols")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/thirdparty/protocols") +)] pub fn get_protocols_route() -> ConduitResult { warn!("TODO: get_protocols_route"); Ok(get_protocols::Response { @@ -2095,9 +2257,12 @@ pub fn get_protocols_route() -> ConduitResult { .into()) } -#[get( - "/_matrix/client/r0/rooms/<_room_id>/event/<_event_id>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/client/r0/rooms/<_room_id>/event/<_event_id>", + data = "" + ) )] pub fn get_room_event_route( db: State<'_, Database>, @@ -2124,9 +2289,12 @@ pub fn get_room_event_route( .into()) } -#[put( - "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + put( + "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", + data = "" + ) )] pub fn create_message_event_route( db: State<'_, Database>, @@ -2159,9 +2327,12 @@ pub fn create_message_event_route( Ok(create_message_event::Response { event_id }.into()) } -#[put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + put( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + data = "" + ) )] pub fn create_state_event_for_key_route( db: State<'_, Database>, @@ -2221,9 +2392,12 @@ pub fn create_state_event_for_key_route( Ok(create_state_event_for_key::Response { event_id }.into()) } -#[put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + put( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + data = "" + ) )] pub fn create_state_event_for_empty_key_route( db: State<'_, Database>, @@ -2268,7 +2442,10 @@ pub fn create_state_event_for_empty_key_route( .into()) } -#[get("/_matrix/client/r0/rooms/<_room_id>/state", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_room_id>/state", data = "") +)] pub fn get_state_events_route( db: State<'_, Database>, body: Ruma, @@ -2294,9 +2471,12 @@ pub fn get_state_events_route( .into()) } -#[get( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + data = "" + ) )] pub fn get_state_events_for_key_route( db: State<'_, Database>, @@ -2329,9 +2509,12 @@ pub fn get_state_events_for_key_route( .into()) } -#[get( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + data = "" + ) )] pub fn get_state_events_for_empty_key_route( db: State<'_, Database>, @@ -2363,7 +2546,10 @@ pub fn get_state_events_for_empty_key_route( .into()) } -#[get("/_matrix/client/r0/sync", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/sync", data = "") +)] pub fn sync_route( db: State<'_, Database>, body: Ruma, @@ -2703,10 +2889,10 @@ pub fn sync_route( .deserialize() .map_err(|_| Error::bad_database("EDU in database is invalid."))?; if let Some(timestamp) = edu.content.last_active_ago { - let last_active_ago = - js_int::UInt::try_from(utils::millis_since_unix_epoch()) - .expect("time is valid") - - timestamp; + let mut last_active_ago = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + last_active_ago -= timestamp; edu.content.last_active_ago = Some(last_active_ago); } Ok::<_, Error>(edu.into()) @@ -2745,9 +2931,12 @@ pub fn sync_route( .into()) } -#[get( - "/_matrix/client/r0/rooms/<_room_id>/context/<_event_id>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/client/r0/rooms/<_room_id>/context/<_event_id>", + data = "" + ) )] pub fn get_context_route( db: State<'_, Database>, @@ -2847,7 +3036,10 @@ pub fn get_context_route( .into()) } -#[get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "") +)] pub fn get_message_events_route( db: State<'_, Database>, body: Ruma, @@ -2944,7 +3136,7 @@ pub fn get_message_events_route( } } -#[get("/_matrix/client/r0/voip/turnServer")] +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] pub fn turn_server_route() -> ConduitResult { Err(Error::BadRequest( ErrorKind::NotFound, @@ -2952,7 +3144,7 @@ pub fn turn_server_route() -> ConduitResult { )) } -#[post("/_matrix/client/r0/publicised_groups")] +#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/publicised_groups"))] pub fn publicised_groups_route() -> ConduitResult { Err(Error::BadRequest( ErrorKind::NotFound, @@ -2960,9 +3152,12 @@ pub fn publicised_groups_route() -> ConduitResult/<_txn_id>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + put( + "/_matrix/client/r0/sendToDevice/<_event_type>/<_txn_id>", + data = "" + ) )] pub fn send_event_to_device_route( db: State<'_, Database>, @@ -3009,7 +3204,7 @@ pub fn send_event_to_device_route( Ok(send_event_to_device::Response.into()) } -#[get("/_matrix/media/r0/config")] +#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] pub fn get_media_config_route( db: State<'_, Database>, ) -> ConduitResult { @@ -3019,7 +3214,10 @@ pub fn get_media_config_route( .into()) } -#[post("/_matrix/media/r0/upload", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/media/r0/upload", data = "") +)] pub fn create_content_route( db: State<'_, Database>, body: Ruma, @@ -3039,9 +3237,12 @@ pub fn create_content_route( Ok(create_content::Response { content_uri: mxc }.into()) } -#[get( - "/_matrix/media/r0/download/<_server_name>/<_media_id>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/media/r0/download/<_server_name>/<_media_id>", + data = "" + ) )] pub fn get_content_route( db: State<'_, Database>, @@ -3064,9 +3265,12 @@ pub fn get_content_route( } } -#[get( - "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", + data = "" + ) )] pub fn get_content_thumbnail_route( db: State<'_, Database>, @@ -3089,7 +3293,10 @@ pub fn get_content_thumbnail_route( } } -#[get("/_matrix/client/r0/devices", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/devices", data = "") +)] pub fn get_devices_route( db: State<'_, Database>, body: Ruma, @@ -3105,7 +3312,10 @@ pub fn get_devices_route( Ok(get_devices::Response { devices }.into()) } -#[get("/_matrix/client/r0/devices/<_device_id>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/devices/<_device_id>", data = "") +)] pub fn get_device_route( db: State<'_, Database>, body: Ruma, @@ -3121,7 +3331,10 @@ pub fn get_device_route( Ok(get_device::Response { device }.into()) } -#[put("/_matrix/client/r0/devices/<_device_id>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/devices/<_device_id>", data = "") +)] pub fn update_device_route( db: State<'_, Database>, body: Ruma, @@ -3142,7 +3355,10 @@ pub fn update_device_route( Ok(update_device::Response.into()) } -#[delete("/_matrix/client/r0/devices/<_device_id>", data = "")] +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/r0/devices/<_device_id>", data = "") +)] pub fn delete_device_route( db: State<'_, Database>, body: Ruma, @@ -3186,7 +3402,10 @@ pub fn delete_device_route( Ok(delete_device::Response.into()) } -#[post("/_matrix/client/r0/delete_devices", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/delete_devices", data = "") +)] pub fn delete_devices_route( db: State<'_, Database>, body: Ruma, @@ -3231,7 +3450,10 @@ pub fn delete_devices_route( Ok(delete_devices::Response.into()) } -#[post("/_matrix/client/unstable/keys/device_signing/upload", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/unstable/keys/device_signing/upload", data = "") +)] pub fn upload_signing_keys_route( db: State<'_, Database>, body: Ruma, @@ -3282,7 +3504,10 @@ pub fn upload_signing_keys_route( Ok(upload_signing_keys::Response.into()) } -#[post("/_matrix/client/unstable/keys/signatures/upload", data = "")] +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/unstable/keys/signatures/upload", data = "") +)] pub fn upload_signatures_route( db: State<'_, Database>, body: Ruma, @@ -3331,7 +3556,7 @@ pub fn upload_signatures_route( Ok(upload_signatures::Response.into()) } -#[get("/_matrix/client/r0/pushers")] +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] pub fn pushers_route() -> ConduitResult { Ok(get_pushers::Response { pushers: Vec::new(), @@ -3339,7 +3564,7 @@ pub fn pushers_route() -> ConduitResult { .into()) } -#[post("/_matrix/client/r0/pushers/set")] +#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/pushers/set"))] pub fn set_pushers_route() -> ConduitResult { Ok(get_pushers::Response { pushers: Vec::new(), @@ -3347,9 +3572,12 @@ pub fn set_pushers_route() -> ConduitResult { .into()) } -#[put( - "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags/<_tag>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + put( + "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags/<_tag>", + data = "" + ) )] pub fn update_tag_route( db: State<'_, Database>, @@ -3384,9 +3612,12 @@ pub fn update_tag_route( Ok(create_tag::Response.into()) } -#[delete( - "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags/<_tag>", - data = "" +#[cfg_attr( + feature = "conduit_bin", + delete( + "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags/<_tag>", + data = "" + ) )] pub fn delete_tag_route( db: State<'_, Database>, @@ -3418,9 +3649,12 @@ pub fn delete_tag_route( Ok(delete_tag::Response.into()) } -#[get( - "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags", - data = "" +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags", + data = "" + ) )] pub fn get_tags_route( db: State<'_, Database>, @@ -3445,6 +3679,7 @@ pub fn get_tags_route( .into()) } +#[cfg(feature = "conduit_bin")] #[options("/<_segments..>")] pub fn options_route( _segments: rocket::http::uri::Segments<'_>, diff --git a/src/error.rs b/src/error.rs index 7305073..af5405c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,16 +1,18 @@ -use crate::RumaResponse; -use http::StatusCode; use log::error; -use rocket::{ - response::{self, Responder}, - Request, -}; -use ruma::api::client::{ - error::{Error as RumaError, ErrorKind}, - r0::uiaa::{UiaaInfo, UiaaResponse}, -}; +use ruma::api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}; use thiserror::Error; +#[cfg(feature = "conduit_bin")] +use { + crate::RumaResponse, + http::StatusCode, + rocket::{ + response::{self, Responder}, + Request, + }, + ruma::api::client::{error::Error as RumaError, r0::uiaa::UiaaResponse}, +}; + pub type Result = std::result::Result; #[derive(Error, Debug)] @@ -46,6 +48,7 @@ impl Error { } } +#[cfg(feature = "conduit_bin")] impl<'r, 'o> Responder<'r, 'o> for Error where 'o: 'r, diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..cd5029c --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,24 @@ +pub mod client_server; +mod database; +mod error; +mod pdu; +pub mod push_rules; +mod ruma_wrapper; +mod utils; + +pub use database::Database; +pub use error::{Error, Result}; +pub use pdu::PduEvent; +pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; +use std::ops::Deref; + +pub struct State<'r, T: Send + Sync + 'static>(&'r T); + +impl<'r, T: Send + Sync + 'static> Deref for State<'r, T> { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &T { + self.0 + } +} diff --git a/src/main.rs b/src/main.rs index ba22b64..1feee4d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -13,6 +13,7 @@ mod utils; pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; +pub use rocket::State; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; use rocket::{fairing::AdHoc, routes}; diff --git a/src/push_rules.rs b/src/push_rules.rs index 43afbca..32c709e 100644 --- a/src/push_rules.rs +++ b/src/push_rules.rs @@ -1,4 +1,3 @@ -use js_int::uint; use ruma::{ push::{ Action, ConditionalPushRule, ConditionalPushRuleInit, PatternedPushRule, @@ -185,7 +184,7 @@ pub fn encrypted_room_one_to_one_rule() -> ConditionalPushRule { rule_id: ".m.rule.encrypted_room_one_to_one".to_owned(), conditions: vec![ PushCondition::RoomMemberCount { - is: RoomMemberCountIs::from(uint!(2)..), + is: RoomMemberCountIs::from(2_u32.into()..), }, PushCondition::EventMatch { key: "type".to_owned(), @@ -208,7 +207,7 @@ pub fn room_one_to_one_rule() -> ConditionalPushRule { rule_id: ".m.rule.room_one_to_one".to_owned(), conditions: vec![ PushCondition::RoomMemberCount { - is: RoomMemberCountIs::from(uint!(2)..), + is: RoomMemberCountIs::from(2_u32.into()..), }, PushCondition::EventMatch { key: "type".to_owned(), diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 5b380b3..2a82b10 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,15 +1,24 @@ -use crate::{utils, Error}; -use log::warn; -use rocket::{ - data::{Data, FromDataFuture, FromTransformedData, Transform, TransformFuture, Transformed}, - http::Status, - response::{self, Responder}, - Outcome::*, - Request, State, +use crate::Error; +use ruma::identifiers::{DeviceId, UserId}; +use std::{convert::TryInto, ops::Deref}; + +#[cfg(feature = "conduit_bin")] +use { + crate::utils, + log::warn, + rocket::{ + data::{ + Data, FromDataFuture, FromTransformedData, Transform, TransformFuture, Transformed, + }, + http::Status, + response::{self, Responder}, + tokio::io::AsyncReadExt, + Outcome::*, + Request, State, + }, + ruma::api::Endpoint, + std::io::Cursor, }; -use ruma::{api::Endpoint, DeviceId, UserId}; -use std::{convert::TryInto, io::Cursor, ops::Deref}; -use tokio::io::AsyncReadExt; /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. @@ -20,6 +29,7 @@ pub struct Ruma { pub json_body: Option>, // This is None when body is not a valid string } +#[cfg(feature = "conduit_bin")] impl<'a, T: Endpoint> FromTransformedData<'a> for Ruma { type Error = (); // TODO: Better error handling type Owned = Data; @@ -119,6 +129,7 @@ impl>>> From for RumaResponse { } } +#[cfg(feature = "conduit_bin")] impl<'r, 'o, T> Responder<'r, 'o> for RumaResponse where T: Send + TryInto>>, From e6a1b0b66f78d6b77f3a462d7420ae2bd2421e7f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 26 Jul 2020 09:39:57 -0400 Subject: [PATCH 0192/1727] Add cfg_attr gate to server_server endpoints --- src/server_server.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 99d75c4..a214143 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -130,14 +130,14 @@ pub async fn send_request( } } -#[get("/.well-known/matrix/server")] +#[cfg_attr(feature = "conduit_bin",get("/.well-known/matrix/server"))] pub fn well_known_server() -> Json { rocket::response::content::Json( json!({ "m.server": "matrixtesting.koesters.xyz:14004"}).to_string(), ) } -#[get("/_matrix/federation/v1/version")] +#[cfg_attr(feature = "conduit_bin",get("/_matrix/federation/v1/version"))] pub fn get_server_version() -> MatrixResult { MatrixResult(Ok(get_server_version::Response { server: Some(get_server_version::Server { @@ -147,7 +147,7 @@ pub fn get_server_version() -> MatrixResult })) } -#[get("/_matrix/key/v2/server")] +#[cfg_attr(feature = "conduit_bin",get("/_matrix/key/v2/server"))] pub fn get_server_keys(db: State<'_, Database>) -> Json { let mut verify_keys = BTreeMap::new(); verify_keys.insert( @@ -177,7 +177,7 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { Json(response.to_string()) } -#[get("/_matrix/key/v2/server/<_key_id>")] +#[cfg_attr(feature = "conduit_bin",get("/_matrix/key/v2/server/<_key_id>"))] pub fn get_server_keys_deprecated(db: State<'_, Database>, _key_id: String) -> Json { get_server_keys(db) } From cc411c530b9c7c798ca38abe6888cefcc42bfbc1 Mon Sep 17 00:00:00 2001 From: gnieto Date: Wed, 29 Jul 2020 07:48:34 +0200 Subject: [PATCH 0193/1727] Fix custom account data serialization (#165) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stylistic change Fix custom data serialization Build a custom event before storing global account data Co-authored-by: Guillem Nieto Reviewed-on: https://git.koesters.xyz/timo/conduit/pulls/165 Reviewed-by: Timo Kösters --- src/client_server.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index d1addc8..c5bba03 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -67,11 +67,12 @@ use ruma::{ unversioned::get_supported_versions, }, events::{ + custom::CustomEventContent, room::{ canonical_alias, guest_access, history_visibility, join_rules, member, name, redaction, topic, }, - AnyEphemeralRoomEvent, AnyEvent, AnySyncEphemeralRoomEvent, EventType, + AnyEphemeralRoomEvent, AnyEvent, AnySyncEphemeralRoomEvent, BasicEvent, EventType, }, Raw, RoomAliasId, RoomId, RoomVersionId, UserId, }; @@ -601,8 +602,13 @@ pub fn set_global_account_data_route( db.account_data.update( None, user_id, - EventType::Custom(event_type), - &content, + event_type.clone().into(), + &BasicEvent { + content: CustomEventContent { + event_type, + json: content, + }, + }, &db.globals, )?; From 489cbc0a930ab632a96e7fde6432875976f4d912 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 27 Jul 2020 17:36:54 +0200 Subject: [PATCH 0194/1727] refactor: use <_> instead of <_parameter_name> --- Cargo.lock | 28 +- Cargo.toml | 7 +- src/client_server.rs | 670 ++++++++++++++++++------------------------ src/database.rs | 77 ++++- src/database/rooms.rs | 20 +- src/main.rs | 2 +- src/ruma_wrapper.rs | 6 +- 7 files changed, 407 insertions(+), 403 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45a5edd..37a620b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -275,6 +275,7 @@ dependencies = [ "serde_json", "sled", "thiserror", + "tokio", ] [[package]] @@ -1484,7 +1485,6 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67#8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67" dependencies = [ "async-trait", "atomic", @@ -1509,7 +1509,6 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67#8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67" dependencies = [ "devise", "glob", @@ -1521,7 +1520,6 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67#8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67" dependencies = [ "cookie", "http", @@ -1543,7 +1541,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "ruma-api", "ruma-client-api", @@ -1558,7 +1556,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "http", "percent-encoding", @@ -1573,7 +1571,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "proc-macro2", "quote", @@ -1583,7 +1581,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "http", "js_int", @@ -1600,7 +1598,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "js_int", "ruma-serde", @@ -1612,7 +1610,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "js_int", "ruma-common", @@ -1627,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "proc-macro2", "quote", @@ -1637,7 +1635,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "js_int", "ruma-api", @@ -1652,7 +1650,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "rand", "serde", @@ -1662,7 +1660,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "proc-macro2", "quote", @@ -1673,7 +1671,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "form_urlencoded", "itoa", @@ -1685,7 +1683,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=e047c647ddcb368e7eb1e05ae8823a9494273457#e047c647ddcb368e7eb1e05ae8823a9494273457" +source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" dependencies = [ "base64 0.12.3", "ring", diff --git a/Cargo.toml b/Cargo.toml index 02a90cd..e5df8dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,8 +13,11 @@ edition = "2018" [dependencies] # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"], optional = false } # Used to handle requests -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "e047c647ddcb368e7eb1e05ae8823a9494273457" } # Used for matrix spec type definitions and helpers +#rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"] } # Used to handle requests +rocket = { path = "../rocket/core/lib", features = ["tls"] } + +tokio = "0.2.22" # Used for long polling +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "d5d2d1d893fa12d27960e4c58d6c09b215d06e95" } # Used for matrix spec type definitions and helpers sled = "0.32.0" # Used for storing data permanently log = "0.4.8" # Used for emitting log entries http = "0.2.1" # Used for rocket<->ruma conversions diff --git a/src/client_server.rs b/src/client_server.rs index c5bba03..baeb839 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -11,7 +11,7 @@ use log::warn; #[cfg(not(feature = "conduit_bin"))] use super::State; #[cfg(feature = "conduit_bin")] -use rocket::{delete, get, options, post, put, State}; +use rocket::{delete, get, options, post, put, State, tokio}; use ruma::{ api::client::{ @@ -312,10 +312,10 @@ pub fn logout_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); - db.users.remove_device(&user_id, device_id)?; + db.users.remove_device(&sender_id, device_id)?; Ok(logout::Response.into()) } @@ -328,11 +328,11 @@ pub fn logout_all_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - for device_id in db.users.all_device_ids(user_id) { + for device_id in db.users.all_device_ids(sender_id) { if let Ok(device_id) = device_id { - db.users.remove_device(&user_id, &device_id)?; + db.users.remove_device(&sender_id, &device_id)?; } } @@ -347,7 +347,7 @@ pub fn change_password_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { @@ -363,29 +363,29 @@ pub fn change_password_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa - .try_auth(&user_id, device_id, auth, &uiaainfo, &db.users, &db.globals)?; + .try_auth(&sender_id, device_id, auth, &uiaainfo, &db.users, &db.globals)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } - db.users.set_password(&user_id, &body.new_password)?; + db.users.set_password(&sender_id, &body.new_password)?; // TODO: Read logout_devices field when it's available and respect that, currently not supported in Ruma // See: https://github.com/ruma/ruma/issues/107 // Logout all devices except the current one for id in db .users - .all_device_ids(&user_id) + .all_device_ids(&sender_id) .filter_map(|id| id.ok()) .filter(|id| id != device_id) { - db.users.remove_device(&user_id, &id)?; + db.users.remove_device(&sender_id, &id)?; } Ok(change_password::Response.into()) @@ -399,7 +399,7 @@ pub fn deactivate_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { @@ -414,7 +414,7 @@ pub fn deactivate_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &user_id, + &sender_id, &device_id, auth, &uiaainfo, @@ -427,15 +427,15 @@ pub fn deactivate_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } // Leave all joined rooms and reject all invitations for room_id in db .rooms - .rooms_joined(&user_id) - .chain(db.rooms.rooms_invited(&user_id)) + .rooms_joined(&sender_id) + .chain(db.rooms.rooms_invited(&sender_id)) { let room_id = room_id?; let event = member::MemberEventContent { @@ -448,18 +448,18 @@ pub fn deactivate_route( db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomMember, serde_json::to_value(event).expect("event is valid, we just created it"), None, - Some(user_id.to_string()), + Some(sender_id.to_string()), None, &db.globals, )?; } // Remove devices and mark account as deactivated - db.users.deactivate_account(&user_id)?; + db.users.deactivate_account(&sender_id)?; Ok(deactivate::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, @@ -500,11 +500,11 @@ pub fn get_pushrules_all_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let event = db .account_data - .get::(None, &user_id, EventType::PushRules)? + .get::(None, &sender_id, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -517,15 +517,12 @@ pub fn get_pushrules_all_route( } #[cfg_attr(feature = "conduit_bin", put( - "/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>", + "/_matrix/client/r0/pushrules/<_>/<_>/<_>", //data = "" ))] pub fn set_pushrule_route( //db: State<'_, Database>, //body: Ruma, - _scope: String, - _kind: String, - _rule_id: String, ) -> ConduitResult { // TODO warn!("TODO: set_pushrule_route"); @@ -534,12 +531,9 @@ pub fn set_pushrule_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>/enabled") + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled") )] pub fn set_pushrule_enabled_route( - _scope: String, - _kind: String, - _rule_id: String, ) -> ConduitResult { // TODO warn!("TODO: set_pushrule_enabled_route"); @@ -548,11 +542,9 @@ pub fn set_pushrule_enabled_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/user/<_user_id>/filter/<_filter_id>") + get("/_matrix/client/r0/user/<_>/filter/<_>") )] pub fn get_filter_route( - _user_id: String, - _filter_id: String, ) -> ConduitResult { // TODO Ok(get_filter::Response { @@ -569,9 +561,9 @@ pub fn get_filter_route( #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/user/<_user_id>/filter") + post("/_matrix/client/r0/user/<_>/filter") )] -pub fn create_filter_route(_user_id: String) -> ConduitResult { +pub fn create_filter_route() -> ConduitResult { // TODO Ok(create_filter::Response { filter_id: utils::random_string(10), @@ -582,17 +574,15 @@ pub fn create_filter_route(_user_id: String) -> ConduitResult/account_data/<_type>", + "/_matrix/client/r0/user/<_>/account_data/<_>", data = "" ) )] pub fn set_global_account_data_route( db: State<'_, Database>, body: Ruma, - _user_id: String, - _type: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let content = serde_json::from_str::(body.data.get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; @@ -601,7 +591,7 @@ pub fn set_global_account_data_route( db.account_data.update( None, - user_id, + sender_id, event_type.clone().into(), &BasicEvent { content: CustomEventContent { @@ -618,23 +608,21 @@ pub fn set_global_account_data_route( #[cfg_attr( feature = "conduit_bin", get( - "/_matrix/client/r0/user/<_user_id>/account_data/<_type>", + "/_matrix/client/r0/user/<_>/account_data/<_>", data = "" ) )] pub fn get_global_account_data_route( db: State<'_, Database>, body: Ruma, - _user_id: String, - _type: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let data = db .account_data .get::( None, - user_id, + sender_id, EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; @@ -647,30 +635,29 @@ pub fn get_global_account_data_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_user_id>/displayname", data = "") + put("/_matrix/client/r0/profile/<_>/displayname", data = "") )] pub fn set_displayname_route( db: State<'_, Database>, body: Ruma, - _user_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); db.users - .set_displayname(&user_id, body.displayname.clone())?; + .set_displayname(&sender_id, body.displayname.clone())?; // Send a new membership event into all joined rooms - for room_id in db.rooms.rooms_joined(&user_id) { + for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomMember, serde_json::to_value(ruma::events::room::member::MemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_value::>( db.rooms - .room_state_get(&room_id, &EventType::RoomMember, &user_id.to_string())? + .room_state_get(&room_id, &EventType::RoomMember, &sender_id.to_string())? .ok_or_else(|| { Error::bad_database( "Tried to send displayname update for user not in the room.", @@ -685,7 +672,7 @@ pub fn set_displayname_route( }) .expect("event is valid, we just created it"), None, - Some(user_id.to_string()), + Some(sender_id.to_string()), None, &db.globals, )?; @@ -695,9 +682,9 @@ pub fn set_displayname_route( db.global_edus.update_presence( ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&user_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, currently_active: None, - displayname: db.users.displayname(&user_id)?, + displayname: db.users.displayname(&sender_id)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -706,7 +693,7 @@ pub fn set_displayname_route( presence: ruma::presence::PresenceState::Online, status_msg: None, }, - sender: user_id.clone(), + sender: sender_id.clone(), }, &db.globals, )?; @@ -716,30 +703,27 @@ pub fn set_displayname_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_user_id>/displayname", data = "") + get("/_matrix/client/r0/profile/<_>/displayname", data = "") )] pub fn get_displayname_route( db: State<'_, Database>, body: Ruma, - _user_id: String, ) -> ConduitResult { - let user_id = body.body.user_id.clone(); Ok(get_display_name::Response { - displayname: db.users.displayname(&user_id)?, + displayname: db.users.displayname(&body.user_id)?, } .into()) } #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "") + put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") )] pub fn set_avatar_url_route( db: State<'_, Database>, body: Ruma, - _user_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); if let Some(avatar_url) = &body.avatar_url { if !avatar_url.starts_with("mxc://") { @@ -753,20 +737,20 @@ pub fn set_avatar_url_route( // TODO also make sure this is valid mxc:// format (not only starting with it) } - db.users.set_avatar_url(&user_id, body.avatar_url.clone())?; + db.users.set_avatar_url(&sender_id, body.avatar_url.clone())?; // Send a new membership event into all joined rooms - for room_id in db.rooms.rooms_joined(&user_id) { + for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomMember, serde_json::to_value(ruma::events::room::member::MemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_value::>( db.rooms - .room_state_get(&room_id, &EventType::RoomMember, &user_id.to_string())? + .room_state_get(&room_id, &EventType::RoomMember, &sender_id.to_string())? .ok_or_else(|| { Error::bad_database( "Tried to send avatar url update for user not in the room.", @@ -781,7 +765,7 @@ pub fn set_avatar_url_route( }) .expect("event is valid, we just created it"), None, - Some(user_id.to_string()), + Some(sender_id.to_string()), None, &db.globals, )?; @@ -791,9 +775,9 @@ pub fn set_avatar_url_route( db.global_edus.update_presence( ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&user_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, currently_active: None, - displayname: db.users.displayname(&user_id)?, + displayname: db.users.displayname(&sender_id)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -802,7 +786,7 @@ pub fn set_avatar_url_route( presence: ruma::presence::PresenceState::Online, status_msg: None, }, - sender: user_id.clone(), + sender: sender_id.clone(), }, &db.globals, )?; @@ -812,32 +796,28 @@ pub fn set_avatar_url_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_user_id>/avatar_url", data = "") + get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") )] pub fn get_avatar_url_route( db: State<'_, Database>, body: Ruma, - _user_id: String, ) -> ConduitResult { - let user_id = body.body.user_id.clone(); Ok(get_avatar_url::Response { - avatar_url: db.users.avatar_url(&user_id)?, + avatar_url: db.users.avatar_url(&body.user_id)?, } .into()) } #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_user_id>", data = "") + get("/_matrix/client/r0/profile/<_>", data = "") )] pub fn get_profile_route( db: State<'_, Database>, body: Ruma, - _user_id: String, ) -> ConduitResult { - let user_id = body.body.user_id.clone(); - let avatar_url = db.users.avatar_url(&user_id)?; - let displayname = db.users.displayname(&user_id)?; + let avatar_url = db.users.avatar_url(&body.user_id)?; + let displayname = db.users.displayname(&body.user_id)?; if avatar_url.is_none() && displayname.is_none() { // Return 404 if we don't have a profile for this id @@ -856,21 +836,20 @@ pub fn get_profile_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/presence/<_user_id>/status", data = "") + put("/_matrix/client/r0/presence/<_>/status", data = "") )] pub fn set_presence_route( db: State<'_, Database>, body: Ruma, - _user_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); db.global_edus.update_presence( ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&user_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, currently_active: None, - displayname: db.users.displayname(&user_id)?, + displayname: db.users.displayname(&sender_id)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -879,7 +858,7 @@ pub fn set_presence_route( presence: body.presence, status_msg: body.status_msg.clone(), }, - sender: user_id.clone(), + sender: sender_id.clone(), }, &db.globals, )?; @@ -895,26 +874,26 @@ pub fn upload_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); if let Some(one_time_keys) = &body.one_time_keys { for (key_key, key_value) in one_time_keys { db.users - .add_one_time_key(user_id, device_id, key_key, key_value)?; + .add_one_time_key(sender_id, device_id, key_key, key_value)?; } } if let Some(device_keys) = &body.device_keys { // This check is needed to assure that signatures are kept - if db.users.get_device_keys(user_id, device_id)?.is_none() { + if db.users.get_device_keys(sender_id, device_id)?.is_none() { db.users - .add_device_keys(user_id, device_id, device_keys, &db.globals)?; + .add_device_keys(sender_id, device_id, device_keys, &db.globals)?; } } Ok(upload_keys::Response { - one_time_key_counts: db.users.count_one_time_keys(user_id, device_id)?, + one_time_key_counts: db.users.count_one_time_keys(sender_id, device_id)?, } .into()) } @@ -927,7 +906,7 @@ pub fn get_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut master_keys = BTreeMap::new(); let mut self_signing_keys = BTreeMap::new(); @@ -1038,10 +1017,10 @@ pub fn create_backup_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let version = db .key_backups - .create_backup(&user_id, &body.algorithm, &db.globals)?; + .create_backup(&sender_id, &body.algorithm, &db.globals)?; Ok(create_backup::Response { version }.into()) } @@ -1049,18 +1028,17 @@ pub fn create_backup_route( #[cfg_attr( feature = "conduit_bin", put( - "/_matrix/client/unstable/room_keys/version/<_version>", + "/_matrix/client/unstable/room_keys/version/<_>", data = "" ) )] pub fn update_backup_route( db: State<'_, Database>, body: Ruma, - _version: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); db.key_backups - .update_backup(&user_id, &body.version, &body.algorithm, &db.globals)?; + .update_backup(&sender_id, &body.version, &body.algorithm, &db.globals)?; Ok(update_backup::Response.into()) } @@ -1073,11 +1051,11 @@ pub fn get_latest_backup_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let (version, algorithm) = db.key_backups - .get_latest_backup(&user_id)? + .get_latest_backup(&sender_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Key backup does not exist.", @@ -1085,8 +1063,8 @@ pub fn get_latest_backup_route( Ok(get_latest_backup::Response { algorithm, - count: (db.key_backups.count_keys(user_id, &version)? as u32).into(), - etag: db.key_backups.get_etag(user_id, &version)?, + count: (db.key_backups.count_keys(sender_id, &version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &version)?, version, } .into()) @@ -1095,19 +1073,18 @@ pub fn get_latest_backup_route( #[cfg_attr( feature = "conduit_bin", get( - "/_matrix/client/unstable/room_keys/version/<_version>", + "/_matrix/client/unstable/room_keys/version/<_>", data = "" ) )] pub fn get_backup_route( db: State<'_, Database>, body: Ruma, - _version: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let algorithm = db.key_backups - .get_backup(&user_id, &body.version)? + .get_backup(&sender_id, &body.version)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Key backup does not exist.", @@ -1115,8 +1092,8 @@ pub fn get_backup_route( Ok(get_backup::Response { algorithm, - count: (db.key_backups.count_keys(user_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(user_id, &body.version)?, + count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &body.version)?, version: body.version.clone(), } .into()) @@ -1131,12 +1108,12 @@ pub fn add_backup_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); for (room_id, room) in &body.rooms { for (session_id, key_data) in &room.sessions { db.key_backups.add_key( - &user_id, + &sender_id, &body.version, &room_id, &session_id, @@ -1147,8 +1124,8 @@ pub fn add_backup_keys_route( } Ok(add_backup_keys::Response { - count: (db.key_backups.count_keys(user_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(user_id, &body.version)?, + count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &body.version)?, } .into()) } @@ -1161,23 +1138,22 @@ pub fn get_backup_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let rooms = db.key_backups.get_all(&user_id, &body.version)?; + let rooms = db.key_backups.get_all(&sender_id, &body.version)?; Ok(get_backup_keys::Response { rooms }.into()) } #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_room_id>/read_markers", data = "") + post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") )] pub fn set_read_marker_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let fully_read_event = ruma::events::fully_read::FullyReadEvent { content: ruma::events::fully_read::FullyReadEventContent { @@ -1187,7 +1163,7 @@ pub fn set_read_marker_route( }; db.account_data.update( Some(&body.room_id), - &user_id, + &sender_id, EventType::FullyRead, &fully_read_event, &db.globals, @@ -1196,7 +1172,7 @@ pub fn set_read_marker_route( if let Some(event) = &body.read_receipt { db.rooms.edus.room_read_set( &body.room_id, - &user_id, + &sender_id, db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", @@ -1205,7 +1181,7 @@ pub fn set_read_marker_route( let mut user_receipts = BTreeMap::new(); user_receipts.insert( - user_id.clone(), + sender_id.clone(), ruma::events::receipt::Receipt { ts: Some(SystemTime::now()), }, @@ -1219,7 +1195,7 @@ pub fn set_read_marker_route( ); db.rooms.edus.roomlatest_update( - &user_id, + &sender_id, &body.room_id, AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( ruma::events::receipt::ReceiptEvent { @@ -1236,21 +1212,19 @@ pub fn set_read_marker_route( #[cfg_attr( feature = "conduit_bin", put( - "/_matrix/client/r0/rooms/<_room_id>/typing/<_user_id>", + "/_matrix/client/r0/rooms/<_>/typing/<_>", data = "" ) )] pub fn create_typing_event_route( db: State<'_, Database>, body: Ruma, - _room_id: String, - _user_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); if body.typing { db.rooms.edus.roomactive_add( - &user_id, + &sender_id, &body.room_id, body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) + utils::millis_since_unix_epoch(), @@ -1259,7 +1233,7 @@ pub fn create_typing_event_route( } else { db.rooms .edus - .roomactive_remove(&user_id, &body.room_id, &db.globals)?; + .roomactive_remove(&sender_id, &body.room_id, &db.globals)?; } Ok(create_typing_event::Response.into()) @@ -1273,7 +1247,7 @@ pub fn create_room_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let room_id = RoomId::new(db.globals.server_name()); @@ -1296,7 +1270,7 @@ pub fn create_room_route( } })?; - let mut content = ruma::events::room::create::CreateEventContent::new(user_id.clone()); + let mut content = ruma::events::room::create::CreateEventContent::new(sender_id.clone()); content.federate = body.creation_content.as_ref().map_or(true, |c| c.federate); content.predecessor = body .creation_content @@ -1307,7 +1281,7 @@ pub fn create_room_route( // 1. The room create event db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomCreate, serde_json::to_value(content).expect("event is valid, we just created it"), None, @@ -1319,18 +1293,18 @@ pub fn create_room_route( // 2. Let the room creator join db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomMember, serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, is_direct: body.is_direct, third_party_invite: None, }) .expect("event is valid, we just created it"), None, - Some(user_id.to_string()), + Some(sender_id.to_string()), None, &db.globals, )?; @@ -1344,9 +1318,9 @@ pub fn create_room_route( // 3. Power levels let mut users = BTreeMap::new(); - users.insert(user_id.clone(), 100.into()); - for invite_user_id in &body.invite { - users.insert(invite_user_id.clone(), 100.into()); + users.insert(sender_id.clone(), 100.into()); + for invite_ in &body.invite { + users.insert(invite_.clone(), 100.into()); } let power_levels_content = if let Some(power_levels) = &body.power_level_content_override { @@ -1372,7 +1346,7 @@ pub fn create_room_route( }; db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomPowerLevels, power_levels_content, None, @@ -1385,7 +1359,7 @@ pub fn create_room_route( // 4.1 Join Rules db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomJoinRules, match preset { create_room::RoomPreset::PublicChat => serde_json::to_value( @@ -1407,7 +1381,7 @@ pub fn create_room_route( // 4.2 History Visibility db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomHistoryVisibility, serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( history_visibility::HistoryVisibility::Shared, @@ -1422,7 +1396,7 @@ pub fn create_room_route( // 4.3 Guest Access db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomGuestAccess, match preset { create_room::RoomPreset::PublicChat => serde_json::to_value( @@ -1454,7 +1428,7 @@ pub fn create_room_route( db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), event_type.clone(), serde_json::from_str(content.get()).map_err(|_| { Error::BadRequest(ErrorKind::BadJson, "Invalid initial_state content.") @@ -1470,7 +1444,7 @@ pub fn create_room_route( if let Some(name) = &body.name { db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomName, serde_json::to_value( name::NameEventContent::new(name.clone()) @@ -1487,7 +1461,7 @@ pub fn create_room_route( if let Some(topic) = &body.topic { db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomTopic, serde_json::to_value(topic::TopicEventContent { topic: topic.clone(), @@ -1504,7 +1478,7 @@ pub fn create_room_route( for user in &body.invite { db.rooms.append_pdu( room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomMember, serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Invite, @@ -1541,12 +1515,12 @@ pub fn joined_rooms_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); Ok(joined_rooms::Response { joined_rooms: db .rooms - .rooms_joined(&user_id) + .rooms_joined(&sender_id) .filter_map(|r| r.ok()) .collect(), } @@ -1556,22 +1530,19 @@ pub fn joined_rooms_route( #[cfg_attr( feature = "conduit_bin", put( - "/_matrix/client/r0/rooms/<_room_id>/redact/<_event_id>/<_txn_id>", + "/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "" ) )] pub fn redact_event_route( db: State<'_, Database>, body: Ruma, - _room_id: String, - _event_id: String, - _txn_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let event_id = db.rooms.append_pdu( body.room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomRedaction, serde_json::to_value(redaction::RedactionEventContent { reason: body.reason.clone(), @@ -1588,12 +1559,11 @@ pub fn redact_event_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/directory/room/<_room_alias>", data = "") + put("/_matrix/client/r0/directory/room/<_>", data = "") )] pub fn create_alias_route( db: State<'_, Database>, body: Ruma, - _room_alias: String, ) -> ConduitResult { if db.rooms.id_from_alias(&body.room_alias)?.is_some() { return Err(Error::Conflict("Alias already exists.")); @@ -1607,12 +1577,11 @@ pub fn create_alias_route( #[cfg_attr( feature = "conduit_bin", - delete("/_matrix/client/r0/directory/room/<_room_alias>", data = "") + delete("/_matrix/client/r0/directory/room/<_>", data = "") )] pub fn delete_alias_route( db: State<'_, Database>, body: Ruma, - _room_alias: String, ) -> ConduitResult { db.rooms.set_alias(&body.room_alias, None, &db.globals)?; @@ -1621,12 +1590,11 @@ pub fn delete_alias_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/directory/room/<_room_alias>", data = "") + get("/_matrix/client/r0/directory/room/<_>", data = "") )] pub fn get_alias_route( db: State<'_, Database>, body: Ruma, - _room_alias: String, ) -> ConduitResult { if body.room_alias.server_name() != db.globals.server_name() { todo!("ask remote server"); @@ -1649,32 +1617,31 @@ pub fn get_alias_route( #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_room_id>/join", data = "") + post("/_matrix/client/r0/rooms/<_>/join", data = "") )] pub fn join_room_by_id_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); // TODO: Ask a remote server if we don't have this room let event = member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, is_direct: None, third_party_invite: None, }; db.rooms.append_pdu( body.room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomMember, serde_json::to_value(event).expect("event is valid, we just created it"), None, - Some(user_id.to_string()), + Some(sender_id.to_string()), None, &db.globals, )?; @@ -1687,12 +1654,11 @@ pub fn join_room_by_id_route( #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/join/<_room_id_or_alias>", data = "") + post("/_matrix/client/r0/join/<_>", data = "") )] pub fn join_room_by_id_or_alias_route( db: State<'_, Database>, body: Ruma, - _room_id_or_alias: String, ) -> ConduitResult { let room_id = RoomId::try_from(body.room_id_or_alias.clone()).or_else(|alias| { Ok::<_, Error>(db.rooms.id_from_alias(&alias)?.ok_or(Error::BadRequest( @@ -1702,7 +1668,7 @@ pub fn join_room_by_id_or_alias_route( })?; let body = Ruma { - user_id: body.user_id.clone(), + sender_id: body.sender_id.clone(), device_id: body.device_id.clone(), json_body: None, body: join_room_by_id::Request { @@ -1712,25 +1678,24 @@ pub fn join_room_by_id_or_alias_route( }; Ok(join_room_by_id_or_alias::Response { - room_id: join_room_by_id_route(db, body, "".to_owned())?.0.room_id, + room_id: join_room_by_id_route(db, body)?.0.room_id, } .into()) } #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_room_id>/leave", data = "") + post("/_matrix/client/r0/rooms/<_>/leave", data = "") )] pub fn leave_room_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut event = serde_json::from_value::>( db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? + .room_state_get(&body.room_id, &EventType::RoomMember, &sender_id.to_string())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -1746,11 +1711,11 @@ pub fn leave_room_route( db.rooms.append_pdu( body.room_id.clone(), - user_id.clone(), + sender_id.clone(), EventType::RoomMember, serde_json::to_value(event).expect("event is valid, we just created it"), None, - Some(user_id.to_string()), + Some(sender_id.to_string()), None, &db.globals, )?; @@ -1760,18 +1725,17 @@ pub fn leave_room_route( #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_room_id>/kick", data = "") + post("/_matrix/client/r0/rooms/<_>/kick", data = "") )] pub fn kick_user_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut event = serde_json::from_value::>( db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? + .room_state_get(&body.room_id, &EventType::RoomMember, &body.user_id.to_string())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot kick member that's not in the room.", @@ -1779,7 +1743,7 @@ pub fn kick_user_route( .content .clone(), ) - .map_err(|_| Error::bad_database("Invalid member event in database."))? + .expect("Raw::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid member event in database."))?; @@ -1788,11 +1752,11 @@ pub fn kick_user_route( db.rooms.append_pdu( body.room_id.clone(), - user_id.clone(), // Sender + sender_id.clone(), EventType::RoomMember, serde_json::to_value(event).expect("event is valid, we just created it"), None, - Some(body.body.user_id.to_string()), + Some(body.user_id.to_string()), None, &db.globals, )?; @@ -1802,16 +1766,15 @@ pub fn kick_user_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_room_id>/joined_members", data = "") + get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") )] pub fn joined_members_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(&user_id, &body.room_id).unwrap_or(false) { + if !db.rooms.is_joined(&sender_id, &body.room_id).unwrap_or(false) { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -1837,25 +1800,24 @@ pub fn joined_members_route( #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_room_id>/ban", data = "") + post("/_matrix/client/r0/rooms/<_>/ban", data = "") )] pub fn ban_user_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); // TODO: reason let event = db .rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? + .room_state_get(&body.room_id, &EventType::RoomMember, &body.user_id.to_string())? .map_or( Ok::<_, Error>(member::MemberEventContent { membership: member::MembershipState::Ban, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, + displayname: db.users.displayname(&body.user_id)?, + avatar_url: db.users.avatar_url(&body.user_id)?, is_direct: None, third_party_invite: None, }), @@ -1863,7 +1825,7 @@ pub fn ban_user_route( let mut event = serde_json::from_value::>( event.content.clone(), ) - .map_err(|_| Error::bad_database("Invalid member event in database."))? + .expect("Raw::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Ban; @@ -1873,11 +1835,11 @@ pub fn ban_user_route( db.rooms.append_pdu( body.room_id.clone(), - user_id.clone(), // Sender + sender_id.clone(), EventType::RoomMember, serde_json::to_value(event).expect("event is valid, we just created it"), None, - Some(body.body.user_id.to_string()), + Some(body.user_id.to_string()), None, &db.globals, )?; @@ -1887,18 +1849,17 @@ pub fn ban_user_route( #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_room_id>/unban", data = "") + post("/_matrix/client/r0/rooms/<_>/unban", data = "") )] pub fn unban_user_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut event = serde_json::from_value::>( db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &user_id.to_string())? + .room_state_get(&body.room_id, &EventType::RoomMember, &body.user_id.to_string())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot unban a user who is not banned.", @@ -1914,11 +1875,11 @@ pub fn unban_user_route( db.rooms.append_pdu( body.room_id.clone(), - user_id.clone(), // Sender + sender_id.clone(), EventType::RoomMember, serde_json::to_value(event).expect("event is valid, we just created it"), None, - Some(body.body.user_id.to_string()), + Some(body.user_id.to_string()), None, &db.globals, )?; @@ -1928,33 +1889,33 @@ pub fn unban_user_route( #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_room_id>/forget", data = "") + post("/_matrix/client/r0/rooms/<_>/forget", data = "") )] pub fn forget_room_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - db.rooms.forget(&body.room_id, &user_id)?; + db.rooms.forget(&body.room_id, &sender_id)?; Ok(forget_room::Response.into()) } #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_room_id>/invite", data = "") + post("/_matrix/client/r0/rooms/<_>/invite", data = "") )] pub fn invite_user_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { db.rooms.append_pdu( body.room_id.clone(), - body.user_id.clone().expect("user is authenticated"), + sender_id.clone(), EventType::RoomMember, serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Invite, @@ -1978,12 +1939,11 @@ pub fn invite_user_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/directory/list/room/<_room_id>", data = "") + put("/_matrix/client/r0/directory/list/room/<_>", data = "") )] pub async fn set_room_visibility_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { match body.visibility { room::Visibility::Public => db.rooms.set_public(&body.room_id, true)?, @@ -1995,12 +1955,11 @@ pub async fn set_room_visibility_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/directory/list/room/<_room_id>", data = "") + get("/_matrix/client/r0/directory/list/room/<_>", data = "") )] pub async fn get_room_visibility_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { Ok(get_room_visibility::Response { visibility: if db.rooms.is_public_room(&body.room_id)? { @@ -2027,7 +1986,7 @@ pub async fn get_public_rooms_route( server, since, }, - user_id, + sender_id, device_id, json_body, } = body; @@ -2047,7 +2006,7 @@ pub async fn get_public_rooms_route( server, since, }, - user_id, + sender_id, device_id, json_body, }, @@ -2224,16 +2183,15 @@ pub fn search_users_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_room_id>/members", data = "") + get("/_matrix/client/r0/rooms/<_>/members", data = "") )] pub fn get_member_events_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id)? { + if !db.rooms.is_joined(sender_id, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -2266,19 +2224,17 @@ pub fn get_protocols_route() -> ConduitResult { #[cfg_attr( feature = "conduit_bin", get( - "/_matrix/client/r0/rooms/<_room_id>/event/<_event_id>", + "/_matrix/client/r0/rooms/<_>/event/<_>", data = "" ) )] pub fn get_room_event_route( db: State<'_, Database>, body: Ruma, - _room_id: String, - _event_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id)? { + if !db.rooms.is_joined(sender_id, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -2298,25 +2254,22 @@ pub fn get_room_event_route( #[cfg_attr( feature = "conduit_bin", put( - "/_matrix/client/r0/rooms/<_room_id>/send/<_event_type>/<_txn_id>", + "/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "" ) )] pub fn create_message_event_route( db: State<'_, Database>, body: Ruma, - _room_id: String, - _event_type: String, - _txn_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); let event_id = db.rooms.append_pdu( body.room_id.clone(), - user_id.clone(), + sender_id.clone(), body.event_type.clone(), serde_json::from_str( body.json_body @@ -2336,18 +2289,15 @@ pub fn create_message_event_route( #[cfg_attr( feature = "conduit_bin", put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + "/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "" ) )] pub fn create_state_event_for_key_route( db: State<'_, Database>, body: Ruma, - _room_id: String, - _event_type: String, - _state_key: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let content = serde_json::from_str::( body.json_body @@ -2386,7 +2336,7 @@ pub fn create_state_event_for_key_route( let event_id = db.rooms.append_pdu( body.room_id.clone(), - user_id.clone(), + sender_id.clone(), body.event_type.clone(), content, None, @@ -2401,15 +2351,13 @@ pub fn create_state_event_for_key_route( #[cfg_attr( feature = "conduit_bin", put( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + "/_matrix/client/r0/rooms/<_>/state/<_>", data = "" ) )] pub fn create_state_event_for_empty_key_route( db: State<'_, Database>, body: Ruma, - _room_id: String, - _event_type: String, ) -> ConduitResult { // This just calls create_state_event_for_key_route let Ruma { @@ -2419,7 +2367,7 @@ pub fn create_state_event_for_empty_key_route( event_type, data, }, - user_id, + sender_id, device_id, json_body, } = body; @@ -2434,13 +2382,10 @@ pub fn create_state_event_for_empty_key_route( data, state_key: "".to_owned(), }, - user_id, + sender_id, device_id, json_body, }, - _room_id, - _event_type, - "".to_owned(), )? .0 .event_id, @@ -2450,16 +2395,15 @@ pub fn create_state_event_for_empty_key_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_room_id>/state", data = "") + get("/_matrix/client/r0/rooms/<_>/state", data = "") )] pub fn get_state_events_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id)? { + if !db.rooms.is_joined(sender_id, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view the room state.", @@ -2480,20 +2424,17 @@ pub fn get_state_events_route( #[cfg_attr( feature = "conduit_bin", get( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>/<_state_key>", + "/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "" ) )] pub fn get_state_events_for_key_route( db: State<'_, Database>, body: Ruma, - _room_id: String, - _event_type: String, - _state_key: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id)? { + if !db.rooms.is_joined(sender_id, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view the room state.", @@ -2518,19 +2459,17 @@ pub fn get_state_events_for_key_route( #[cfg_attr( feature = "conduit_bin", get( - "/_matrix/client/r0/rooms/<_room_id>/state/<_event_type>", + "/_matrix/client/r0/rooms/<_>/state/<_>", data = "" ) )] pub fn get_state_events_for_empty_key_route( db: State<'_, Database>, body: Ruma, - _room_id: String, - _event_type: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id)? { + if !db.rooms.is_joined(sender_id, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view the room state.", @@ -2556,14 +2495,16 @@ pub fn get_state_events_for_empty_key_route( feature = "conduit_bin", get("/_matrix/client/r0/sync", data = "") )] -pub fn sync_route( +pub async fn sync_events_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - std::thread::sleep(Duration::from_millis(1000)); - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); + // Setup watchers, so if there's no response, we can wait for them + let watcher = db.watch(sender_id, device_id); + let next_batch = db.globals.current_count()?.to_string(); let mut joined_rooms = BTreeMap::new(); @@ -2573,12 +2514,12 @@ pub fn sync_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); - for room_id in db.rooms.rooms_joined(&user_id) { + for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; let mut pdus = db .rooms - .pdus_since(&user_id, &room_id, since)? + .pdus_since(&sender_id, &room_id, since)? .filter_map(|r| r.ok()) // Filter out buggy events .collect::>(); @@ -2589,7 +2530,7 @@ pub fn sync_route( send_notification_counts = true; if pdu.kind == EventType::RoomMember { send_member_count = true; - if !joined_since_last_sync && pdu.state_key == Some(user_id.to_string()) { + if !joined_since_last_sync && pdu.state_key == Some(sender_id.to_string()) { let content = serde_json::from_value::< Raw, >(pdu.content.clone()) @@ -2621,7 +2562,7 @@ pub fn sync_route( for hero in db .rooms - .all_pdus(&user_id, &room_id)? + .all_pdus(&sender_id, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|pdu| pdu.kind == EventType::RoomMember) .map(|pdu| { @@ -2674,7 +2615,7 @@ pub fn sync_route( // Filter for possible heroes .filter_map(|u| u) { - if heroes.contains(&hero) || hero == user_id.to_string() { + if heroes.contains(&hero) || hero == sender_id.to_string() { continue; } @@ -2692,10 +2633,10 @@ pub fn sync_route( }; let notification_count = if send_notification_counts { - if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &user_id)? { + if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &sender_id)? { Some( (db.rooms - .pdus_since(&user_id, &room_id, last_read)? + .pdus_since(&sender_id, &room_id, last_read)? .filter_map(|pdu| pdu.ok()) // Filter out buggy events .filter(|pdu| { matches!( @@ -2763,7 +2704,7 @@ pub fn sync_route( account_data: sync_events::AccountData { events: db .account_data - .changes_since(Some(&room_id), &user_id, since)? + .changes_since(Some(&room_id), &sender_id, since)? .into_iter() .filter_map(|(_, v)| { serde_json::from_str(v.json().get()) @@ -2807,9 +2748,9 @@ pub fn sync_route( } let mut left_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_left(&user_id) { + for room_id in db.rooms.rooms_left(&sender_id) { let room_id = room_id?; - let pdus = db.rooms.pdus_since(&user_id, &room_id, since)?; + let pdus = db.rooms.pdus_since(&sender_id, &room_id, since)?; let room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events .map(|pdu| pdu.to_sync_room_event()) @@ -2856,7 +2797,7 @@ pub fn sync_route( } let mut invited_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_invited(&user_id) { + for room_id in db.rooms.rooms_invited(&sender_id) { let room_id = room_id?; let invited_room = sync_events::InvitedRoom { @@ -2877,9 +2818,9 @@ pub fn sync_route( // Remove all to-device events the device received *last time* db.users - .remove_to_device_events(user_id, device_id, since)?; + .remove_to_device_events(sender_id, device_id, since)?; - Ok(sync_events::Response { + let response = sync_events::Response { next_batch, rooms: sync_events::Rooms { leave: left_rooms, @@ -2909,7 +2850,7 @@ pub fn sync_route( account_data: sync_events::AccountData { events: db .account_data - .changes_since(None, &user_id, since)? + .changes_since(None, &sender_id, since)? .into_iter() .filter_map(|(_, v)| { serde_json::from_str(v.json().get()) @@ -2931,28 +2872,48 @@ pub fn sync_route( }, device_one_time_keys_count: Default::default(), // TODO to_device: sync_events::ToDevice { - events: db.users.get_to_device_events(user_id, device_id)?, + events: db.users.get_to_device_events(sender_id, device_id)?, }, + }; + + // TODO: Retry the endpoint instead of returning (waiting for #118) + if !body.full_state && response.rooms.is_empty() + && response.presence.is_empty() + && response.account_data.is_empty() + && response.device_lists.is_empty() + && response.device_one_time_keys_count.is_empty() + && response.to_device.is_empty() + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let mut duration = body.timeout.unwrap_or(Duration::default()); + if duration.as_secs() > 10 { + duration = Duration::from_secs(10); + } + let mut delay = tokio::time::delay_for(duration); + tokio::select! { + _ = &mut delay => {} + _ = watcher => {} + } } - .into()) + + Ok(response.into()) } #[cfg_attr( feature = "conduit_bin", get( - "/_matrix/client/r0/rooms/<_room_id>/context/<_event_id>", + "/_matrix/client/r0/rooms/<_>/context/<_>", data = "" ) )] pub fn get_context_route( db: State<'_, Database>, body: Ruma, - _room_id: String, - _event_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id)? { + if !db.rooms.is_joined(sender_id, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -2975,7 +2936,7 @@ pub fn get_context_route( let events_before = db .rooms - .pdus_until(&user_id, &body.room_id, base_token) + .pdus_until(&sender_id, &body.room_id, base_token) .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -2985,14 +2946,7 @@ pub fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect::>(); - let start_token = events_before.last().map_or(Ok(None), |(_, e)| { - Ok::<_, Error>(Some( - db.rooms - .get_pdu_count(&e.event_id)? - .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? - .to_string(), - )) - })?; + let start_token = events_before.last().map(|(count, _)| count.to_string()); let events_before = events_before .into_iter() @@ -3001,7 +2955,7 @@ pub fn get_context_route( let events_after = db .rooms - .pdus_after(&user_id, &body.room_id, base_token) + .pdus_after(&sender_id, &body.room_id, base_token) .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -3011,15 +2965,7 @@ pub fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect::>(); - let end_token = if let Some(last_event) = events_after.last() { - Some( - utils::u64_from_bytes(&last_event.0) - .map_err(|_| Error::bad_database("Invalid pdu id in db."))? - .to_string(), - ) - } else { - None - }; + let end_token = events_after.last().map(|(count, _)| count.to_string()); let events_after = events_after .into_iter() @@ -3044,16 +2990,15 @@ pub fn get_context_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_room_id>/messages", data = "") + get("/_matrix/client/r0/rooms/<_>/messages", data = "") )] pub fn get_message_events_route( db: State<'_, Database>, body: Ruma, - _room_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(user_id, &body.room_id)? { + if !db.rooms.is_joined(sender_id, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -3066,7 +3011,7 @@ pub fn get_message_events_route( .parse() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; - let to = body.to.as_ref().map(|t| t.as_bytes()); + let to = body.to.as_ref().map(|t| t.parse()); // Use limit or else 10 let limit = body @@ -3078,21 +3023,13 @@ pub fn get_message_events_route( get_message_events::Direction::Forward => { let events_after = db .rooms - .pdus_after(&user_id, &body.room_id, from) + .pdus_after(&sender_id, &body.room_id, from) .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events - .take_while(|(k, _)| Some(&**k) != to) // Stop at `to` + .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect::>(); - let end_token = if let Some(last_event) = events_after.last() { - Some( - utils::u64_from_bytes(&last_event.0) - .map_err(|_| Error::bad_database("Invalid pdu id in db."))? - .to_string(), - ) - } else { - None - }; + let end_token = events_after.last().map(|(count, _)| count.to_string()); let events_after = events_after .into_iter() @@ -3110,21 +3047,13 @@ pub fn get_message_events_route( get_message_events::Direction::Backward => { let events_before = db .rooms - .pdus_until(&user_id, &body.room_id, from) + .pdus_until(&sender_id, &body.room_id, from) .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events - .take_while(|(k, _)| Some(&**k) != to) // Stop at `to` + .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect::>(); - let start_token = if let Some(last_event) = events_before.last() { - Some( - utils::u64_from_bytes(&last_event.0) - .map_err(|_| Error::bad_database("Invalid pdu id in db."))? - .to_string(), - ) - } else { - None - }; + let start_token = events_before.last().map(|(count, _)| count.to_string()); let events_before = events_before .into_iter() @@ -3161,24 +3090,22 @@ pub fn publicised_groups_route() -> ConduitResult/<_txn_id>", + "/_matrix/client/r0/sendToDevice/<_>/<_>", data = "" ) )] pub fn send_event_to_device_route( db: State<'_, Database>, body: Ruma, - _event_type: String, - _txn_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { match target_device_id_maybe { to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => { db.users.add_to_device_event( - user_id, + sender_id, &target_user_id, &target_device_id, &body.event_type, @@ -3192,7 +3119,7 @@ pub fn send_event_to_device_route( to_device::DeviceIdOrAllDevices::AllDevices => { for target_device_id in db.users.all_device_ids(&target_user_id) { db.users.add_to_device_event( - user_id, + sender_id, &target_user_id, &target_device_id?, &body.event_type, @@ -3307,11 +3234,11 @@ pub fn get_devices_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let devices = db .users - .all_devices_metadata(user_id) + .all_devices_metadata(sender_id) .filter_map(|r| r.ok()) // Filter out buggy devices .collect::>(); @@ -3327,11 +3254,11 @@ pub fn get_device_route( body: Ruma, _device_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device = db .users - .get_device_metadata(&user_id, &body.body.device_id)? + .get_device_metadata(&sender_id, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; Ok(get_device::Response { device }.into()) @@ -3346,17 +3273,17 @@ pub fn update_device_route( body: Ruma, _device_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut device = db .users - .get_device_metadata(&user_id, &body.body.device_id)? + .get_device_metadata(&sender_id, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; device.display_name = body.display_name.clone(); db.users - .update_device_metadata(&user_id, &body.body.device_id, &device)?; + .update_device_metadata(&sender_id, &body.body.device_id, &device)?; Ok(update_device::Response.into()) } @@ -3370,7 +3297,7 @@ pub fn delete_device_route( body: Ruma, _device_id: String, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); // UIAA @@ -3386,7 +3313,7 @@ pub fn delete_device_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &user_id, + &sender_id, &device_id, auth, &uiaainfo, @@ -3399,11 +3326,11 @@ pub fn delete_device_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } - db.users.remove_device(&user_id, &body.body.device_id)?; + db.users.remove_device(&sender_id, &body.body.device_id)?; Ok(delete_device::Response.into()) } @@ -3416,7 +3343,7 @@ pub fn delete_devices_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); // UIAA @@ -3432,7 +3359,7 @@ pub fn delete_devices_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &user_id, + &sender_id, &device_id, auth, &uiaainfo, @@ -3445,12 +3372,12 @@ pub fn delete_devices_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } for device_id in &body.devices { - db.users.remove_device(&user_id, &device_id)? + db.users.remove_device(&sender_id, &device_id)? } Ok(delete_devices::Response.into()) @@ -3464,7 +3391,7 @@ pub fn upload_signing_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); // UIAA @@ -3480,7 +3407,7 @@ pub fn upload_signing_keys_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &user_id, + &sender_id, &device_id, auth, &uiaainfo, @@ -3493,13 +3420,13 @@ pub fn upload_signing_keys_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } if let Some(master_key) = &body.master_key { db.users.add_cross_signing_keys( - user_id, + sender_id, &master_key, &body.self_signing_key, &body.user_signing_key, @@ -3518,7 +3445,7 @@ pub fn upload_signatures_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); for (user_id, signed_keys) in &body.signed_keys { for (key_id, signed_key) in signed_keys { @@ -3581,22 +3508,19 @@ pub fn set_pushers_route() -> ConduitResult { #[cfg_attr( feature = "conduit_bin", put( - "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags/<_tag>", + "/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "" ) )] pub fn update_tag_route( db: State<'_, Database>, - _user_id: String, - _room_id: String, - _tag: String, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut tags_event = db .account_data - .get::(Some(&body.room_id), user_id, EventType::Tag)? + .get::(Some(&body.room_id), sender_id, EventType::Tag)? .unwrap_or_else(|| ruma::events::tag::TagEvent { content: ruma::events::tag::TagEventContent { tags: BTreeMap::new(), @@ -3609,7 +3533,7 @@ pub fn update_tag_route( db.account_data.update( Some(&body.room_id), - user_id, + sender_id, EventType::Tag, &tags_event, &db.globals, @@ -3621,22 +3545,19 @@ pub fn update_tag_route( #[cfg_attr( feature = "conduit_bin", delete( - "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags/<_tag>", + "/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "" ) )] pub fn delete_tag_route( db: State<'_, Database>, - _user_id: String, - _room_id: String, - _tag: String, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut tags_event = db .account_data - .get::(Some(&body.room_id), user_id, EventType::Tag)? + .get::(Some(&body.room_id), sender_id, EventType::Tag)? .unwrap_or_else(|| ruma::events::tag::TagEvent { content: ruma::events::tag::TagEventContent { tags: BTreeMap::new(), @@ -3646,7 +3567,7 @@ pub fn delete_tag_route( db.account_data.update( Some(&body.room_id), - user_id, + sender_id, EventType::Tag, &tags_event, &db.globals, @@ -3658,22 +3579,20 @@ pub fn delete_tag_route( #[cfg_attr( feature = "conduit_bin", get( - "/_matrix/client/r0/user/<_user_id>/rooms/<_room_id>/tags", + "/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "" ) )] pub fn get_tags_route( db: State<'_, Database>, - _user_id: String, - _room_id: String, body: Ruma, ) -> ConduitResult { - let user_id = body.user_id.as_ref().expect("user is authenticated"); + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); Ok(get_tags::Response { tags: db .account_data - .get::(Some(&body.room_id), user_id, EventType::Tag)? + .get::(Some(&body.room_id), sender_id, EventType::Tag)? .unwrap_or_else(|| ruma::events::tag::TagEvent { content: ruma::events::tag::TagEventContent { tags: BTreeMap::new(), @@ -3686,9 +3605,8 @@ pub fn get_tags_route( } #[cfg(feature = "conduit_bin")] -#[options("/<_segments..>")] +#[options("/<_..>")] pub fn options_route( - _segments: rocket::http::uri::Segments<'_>, ) -> ConduitResult { Ok(send_event_to_device::Response.into()) } diff --git a/src/database.rs b/src/database.rs index 370fde7..250de23 100644 --- a/src/database.rs +++ b/src/database.rs @@ -12,7 +12,9 @@ use directories::ProjectDirs; use log::info; use std::fs::remove_dir_all; -use rocket::Config; +use futures::StreamExt; +use rocket::{futures, Config}; +use ruma::{DeviceId, UserId}; pub struct Database { pub globals: globals::Globals, @@ -124,4 +126,77 @@ impl Database { _db: db, }) } + + pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> () { + let mut userid_prefix = user_id.to_string().as_bytes().to_vec(); + userid_prefix.push(0xff); + let mut userdeviceid_prefix = userid_prefix.clone(); + userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); + userdeviceid_prefix.push(0xff); + + let mut futures = futures::stream::FuturesUnordered::new(); + + futures.push(self.users.keychangeid_userid.watch_prefix(b"")); + + // Return when *any* user changed his key + // TODO: only send for user they share a room with + futures.push( + self.users + .todeviceid_events + .watch_prefix(&userdeviceid_prefix), + ); + + // TODO: only send for user they share a room with + futures.push(self.global_edus.presenceid_presence.watch_prefix(b"")); + + futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix)); + futures.push(self.rooms.userroomid_invited.watch_prefix(&userid_prefix)); + futures.push(self.rooms.userroomid_left.watch_prefix(&userid_prefix)); + + // Events for rooms we are in + for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { + let mut roomid_prefix = room_id.to_string().as_bytes().to_vec(); + roomid_prefix.push(0xff); + + // PDUs + futures.push(self.rooms.pduid_pdu.watch_prefix(&roomid_prefix)); + + // EDUs + futures.push( + self.rooms + .edus + .roomid_lastroomactiveupdate + .watch_prefix(&roomid_prefix), + ); + + futures.push( + self.rooms + .edus + .roomlatestid_roomlatest + .watch_prefix(&roomid_prefix), + ); + + // Room account data + let mut roomuser_prefix = roomid_prefix.clone(); + roomuser_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.account_data + .roomuserdataid_accountdata + .watch_prefix(&roomuser_prefix), + ); + } + + let mut globaluserdata_prefix = vec![0xff]; + globaluserdata_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.account_data + .roomuserdataid_accountdata + .watch_prefix(&globaluserdata_prefix), + ); + + // Wait until one of them finds something + futures.next().await; + } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0395cc2..fe5721c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -666,7 +666,7 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> impl Iterator> { + ) -> impl Iterator> { // Create the first part of the full pdu id let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -677,6 +677,7 @@ impl Rooms { let current: &[u8] = ¤t; let user_id = user_id.clone(); + let prefixlen = prefix.len(); self.pduid_pdu .range(..current) .rev() @@ -688,7 +689,11 @@ impl Rooms { if pdu.sender != user_id { pdu.unsigned.remove("transaction_id"); } - Ok((k, pdu)) + Ok(( + utils::u64_from_bytes(&k[prefixlen..]) + .map_err(|_| Error::bad_database("Invalid pdu id in db."))?, + pdu, + )) }) } @@ -699,7 +704,7 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, from: u64, - ) -> impl Iterator> { + ) -> impl Iterator> { // Create the first part of the full pdu id let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -710,6 +715,7 @@ impl Rooms { let current: &[u8] = ¤t; let user_id = user_id.clone(); + let prefixlen = prefix.len(); self.pduid_pdu .range(current..) .filter_map(|r| r.ok()) @@ -720,7 +726,11 @@ impl Rooms { if pdu.sender != user_id { pdu.unsigned.remove("transaction_id"); } - Ok((k, pdu)) + Ok(( + utils::u64_from_bytes(&k[prefixlen..]) + .map_err(|_| Error::bad_database("Invalid pdu id in db."))?, + pdu, + )) }) } @@ -919,7 +929,7 @@ impl Rooms { }) } - /// Returns an iterator over all left members of a room. + /// Returns an iterator over all rooms this user joined. pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { self.userroomid_joined .scan_prefix(user_id.to_string()) diff --git a/src/main.rs b/src/main.rs index 1feee4d..2caee4c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -86,7 +86,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_state_events_route, client_server::get_state_events_for_key_route, client_server::get_state_events_for_empty_key_route, - client_server::sync_route, + client_server::sync_events_route, client_server::get_context_route, client_server::get_message_events_route, client_server::turn_server_route, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 2a82b10..66f4d4c 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -13,7 +13,7 @@ use { http::Status, response::{self, Responder}, tokio::io::AsyncReadExt, - Outcome::*, + outcome::Outcome::*, Request, State, }, ruma::api::Endpoint, @@ -24,7 +24,7 @@ use { /// first. pub struct Ruma { pub body: T, - pub user_id: Option, + pub sender_id: Option, pub device_id: Option>, pub json_body: Option>, // This is None when body is not a valid string } @@ -94,7 +94,7 @@ impl<'a, T: Endpoint> FromTransformedData<'a> for Ruma { match T::try_from(http_request) { Ok(t) => Success(Ruma { body: t, - user_id, + sender_id: user_id, device_id, // TODO: Can we avoid parsing it again? (We only need this for append_pdu) json_body: utils::string_from_bytes(&body) From dd3dab39ae1c2f81aebef1b2041e12046abb61f1 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 27 Jul 2020 17:38:00 +0200 Subject: [PATCH 0195/1727] feat: whoami route --- src/client_server.rs | 184 +++++++++++++++++++------------------------ src/main.rs | 1 + src/ruma_wrapper.rs | 2 +- 3 files changed, 83 insertions(+), 104 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index baeb839..2a67a57 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -11,14 +11,14 @@ use log::warn; #[cfg(not(feature = "conduit_bin"))] use super::State; #[cfg(feature = "conduit_bin")] -use rocket::{delete, get, options, post, put, State, tokio}; +use rocket::{delete, get, options, post, put, tokio, State}; use ruma::{ api::client::{ error::ErrorKind, r0::{ account::{ - change_password, deactivate, get_username_availability, register, + change_password, deactivate, get_username_availability, register, whoami, ThirdPartyIdRemovalStatus, }, alias::{create_alias, delete_alias, get_alias}, @@ -304,6 +304,18 @@ pub fn login_route( .into()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/account/whoami", data = "") +)] +pub fn whoami_route(body: Ruma) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + Ok(whoami::Response { + user_id: sender_id.clone(), + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/logout", data = "") @@ -361,9 +373,14 @@ pub fn change_password_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = - db.uiaa - .try_auth(&sender_id, device_id, auth, &uiaainfo, &db.users, &db.globals)?; + let (worked, uiaainfo) = db.uiaa.try_auth( + &sender_id, + device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; if !worked { return Err(Error::Uiaa(uiaainfo)); } @@ -520,8 +537,7 @@ pub fn get_pushrules_all_route( "/_matrix/client/r0/pushrules/<_>/<_>/<_>", //data = "" ))] -pub fn set_pushrule_route( - //db: State<'_, Database>, +pub fn set_pushrule_route(//db: State<'_, Database>, //body: Ruma, ) -> ConduitResult { // TODO @@ -533,19 +549,14 @@ pub fn set_pushrule_route( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled") )] -pub fn set_pushrule_enabled_route( -) -> ConduitResult { +pub fn set_pushrule_enabled_route() -> ConduitResult { // TODO warn!("TODO: set_pushrule_enabled_route"); Ok(set_pushrule_enabled::Response.into()) } -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/filter/<_>") -)] -pub fn get_filter_route( -) -> ConduitResult { +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] +pub fn get_filter_route() -> ConduitResult { // TODO Ok(get_filter::Response { filter: filter::FilterDefinition { @@ -559,10 +570,7 @@ pub fn get_filter_route( .into()) } -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/user/<_>/filter") -)] +#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] pub fn create_filter_route() -> ConduitResult { // TODO Ok(create_filter::Response { @@ -573,10 +581,7 @@ pub fn create_filter_route() -> ConduitResult { #[cfg_attr( feature = "conduit_bin", - put( - "/_matrix/client/r0/user/<_>/account_data/<_>", - data = "" - ) + put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") )] pub fn set_global_account_data_route( db: State<'_, Database>, @@ -607,10 +612,7 @@ pub fn set_global_account_data_route( #[cfg_attr( feature = "conduit_bin", - get( - "/_matrix/client/r0/user/<_>/account_data/<_>", - data = "" - ) + get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") )] pub fn get_global_account_data_route( db: State<'_, Database>, @@ -737,7 +739,8 @@ pub fn set_avatar_url_route( // TODO also make sure this is valid mxc:// format (not only starting with it) } - db.users.set_avatar_url(&sender_id, body.avatar_url.clone())?; + db.users + .set_avatar_url(&sender_id, body.avatar_url.clone())?; // Send a new membership event into all joined rooms for room_id in db.rooms.rooms_joined(&sender_id) { @@ -1027,10 +1030,7 @@ pub fn create_backup_route( #[cfg_attr( feature = "conduit_bin", - put( - "/_matrix/client/unstable/room_keys/version/<_>", - data = "" - ) + put("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] pub fn update_backup_route( db: State<'_, Database>, @@ -1072,23 +1072,20 @@ pub fn get_latest_backup_route( #[cfg_attr( feature = "conduit_bin", - get( - "/_matrix/client/unstable/room_keys/version/<_>", - data = "" - ) + get("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] pub fn get_backup_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let algorithm = - db.key_backups - .get_backup(&sender_id, &body.version)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Key backup does not exist.", - ))?; + let algorithm = db + .key_backups + .get_backup(&sender_id, &body.version)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; Ok(get_backup::Response { algorithm, @@ -1211,10 +1208,7 @@ pub fn set_read_marker_route( #[cfg_attr( feature = "conduit_bin", - put( - "/_matrix/client/r0/rooms/<_>/typing/<_>", - data = "" - ) + put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "") )] pub fn create_typing_event_route( db: State<'_, Database>, @@ -1529,10 +1523,7 @@ pub fn joined_rooms_route( #[cfg_attr( feature = "conduit_bin", - put( - "/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", - data = "" - ) + put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") )] pub fn redact_event_route( db: State<'_, Database>, @@ -1695,7 +1686,11 @@ pub fn leave_room_route( let mut event = serde_json::from_value::>( db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &sender_id.to_string())? + .room_state_get( + &body.room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -1735,7 +1730,11 @@ pub fn kick_user_route( let mut event = serde_json::from_value::>( db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &body.user_id.to_string())? + .room_state_get( + &body.room_id, + &EventType::RoomMember, + &body.user_id.to_string(), + )? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot kick member that's not in the room.", @@ -1774,7 +1773,11 @@ pub fn joined_members_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(&sender_id, &body.room_id).unwrap_or(false) { + if !db + .rooms + .is_joined(&sender_id, &body.room_id) + .unwrap_or(false) + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -1812,7 +1815,11 @@ pub fn ban_user_route( let event = db .rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &body.user_id.to_string())? + .room_state_get( + &body.room_id, + &EventType::RoomMember, + &body.user_id.to_string(), + )? .map_or( Ok::<_, Error>(member::MemberEventContent { membership: member::MembershipState::Ban, @@ -1859,7 +1866,11 @@ pub fn unban_user_route( let mut event = serde_json::from_value::>( db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, &body.user_id.to_string())? + .room_state_get( + &body.room_id, + &EventType::RoomMember, + &body.user_id.to_string(), + )? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot unban a user who is not banned.", @@ -2223,10 +2234,7 @@ pub fn get_protocols_route() -> ConduitResult { #[cfg_attr( feature = "conduit_bin", - get( - "/_matrix/client/r0/rooms/<_>/event/<_>", - data = "" - ) + get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") )] pub fn get_room_event_route( db: State<'_, Database>, @@ -2253,10 +2261,7 @@ pub fn get_room_event_route( #[cfg_attr( feature = "conduit_bin", - put( - "/_matrix/client/r0/rooms/<_>/send/<_>/<_>", - data = "" - ) + put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") )] pub fn create_message_event_route( db: State<'_, Database>, @@ -2288,10 +2293,7 @@ pub fn create_message_event_route( #[cfg_attr( feature = "conduit_bin", - put( - "/_matrix/client/r0/rooms/<_>/state/<_>/<_>", - data = "" - ) + put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") )] pub fn create_state_event_for_key_route( db: State<'_, Database>, @@ -2350,10 +2352,7 @@ pub fn create_state_event_for_key_route( #[cfg_attr( feature = "conduit_bin", - put( - "/_matrix/client/r0/rooms/<_>/state/<_>", - data = "" - ) + put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") )] pub fn create_state_event_for_empty_key_route( db: State<'_, Database>, @@ -2423,10 +2422,7 @@ pub fn get_state_events_route( #[cfg_attr( feature = "conduit_bin", - get( - "/_matrix/client/r0/rooms/<_>/state/<_>/<_>", - data = "" - ) + get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") )] pub fn get_state_events_for_key_route( db: State<'_, Database>, @@ -2458,10 +2454,7 @@ pub fn get_state_events_for_key_route( #[cfg_attr( feature = "conduit_bin", - get( - "/_matrix/client/r0/rooms/<_>/state/<_>", - data = "" - ) + get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") )] pub fn get_state_events_for_empty_key_route( db: State<'_, Database>, @@ -2877,7 +2870,8 @@ pub async fn sync_events_route( }; // TODO: Retry the endpoint instead of returning (waiting for #118) - if !body.full_state && response.rooms.is_empty() + if !body.full_state + && response.rooms.is_empty() && response.presence.is_empty() && response.account_data.is_empty() && response.device_lists.is_empty() @@ -2902,10 +2896,7 @@ pub async fn sync_events_route( #[cfg_attr( feature = "conduit_bin", - get( - "/_matrix/client/r0/rooms/<_>/context/<_>", - data = "" - ) + get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") )] pub fn get_context_route( db: State<'_, Database>, @@ -3089,10 +3080,7 @@ pub fn publicised_groups_route() -> ConduitResult/<_>", - data = "" - ) + put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") )] pub fn send_event_to_device_route( db: State<'_, Database>, @@ -3507,10 +3495,7 @@ pub fn set_pushers_route() -> ConduitResult { #[cfg_attr( feature = "conduit_bin", - put( - "/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", - data = "" - ) + put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") )] pub fn update_tag_route( db: State<'_, Database>, @@ -3544,10 +3529,7 @@ pub fn update_tag_route( #[cfg_attr( feature = "conduit_bin", - delete( - "/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", - data = "" - ) + delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") )] pub fn delete_tag_route( db: State<'_, Database>, @@ -3578,10 +3560,7 @@ pub fn delete_tag_route( #[cfg_attr( feature = "conduit_bin", - get( - "/_matrix/client/r0/user/<_>/rooms/<_>/tags", - data = "" - ) + get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") )] pub fn get_tags_route( db: State<'_, Database>, @@ -3606,7 +3585,6 @@ pub fn get_tags_route( #[cfg(feature = "conduit_bin")] #[options("/<_..>")] -pub fn options_route( -) -> ConduitResult { +pub fn options_route() -> ConduitResult { Ok(send_event_to_device::Response.into()) } diff --git a/src/main.rs b/src/main.rs index 2caee4c..cc30ff6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,6 +28,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::register_route, client_server::get_login_route, client_server::login_route, + client_server::whoami_route, client_server::logout_route, client_server::logout_all_route, client_server::change_password_route, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 66f4d4c..0593436 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -11,9 +11,9 @@ use { Data, FromDataFuture, FromTransformedData, Transform, TransformFuture, Transformed, }, http::Status, + outcome::Outcome::*, response::{self, Responder}, tokio::io::AsyncReadExt, - outcome::Outcome::*, Request, State, }, ruma::api::Endpoint, From 21eb8d4fe30b86a9b9f9b9e42e9b8df8de384fc6 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Mon, 27 Jul 2020 21:53:28 +0200 Subject: [PATCH 0196/1727] fix: problems with pdu serialization --- Cargo.lock | 3 ++ Cargo.toml | 2 +- src/pdu.rs | 108 +++++++++++++++++++++++++++++++++++++++++------------ 3 files changed, 88 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 37a620b..e40fd09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1485,6 +1485,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" +source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f383f15047d0dda71dd21399dfea96161fe2bd0e" dependencies = [ "async-trait", "atomic", @@ -1509,6 +1510,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" +source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f383f15047d0dda71dd21399dfea96161fe2bd0e" dependencies = [ "devise", "glob", @@ -1520,6 +1522,7 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" +source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f383f15047d0dda71dd21399dfea96161fe2bd0e" dependencies = [ "cookie", "http", diff --git a/Cargo.toml b/Cargo.toml index e5df8dd..de8fb47 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2018" [dependencies] # TODO: This can become optional as soon as proper configs are supported #rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"] } # Used to handle requests -rocket = { path = "../rocket/core/lib", features = ["tls"] } +rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } tokio = "0.2.22" # Used for long polling ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "d5d2d1d893fa12d27960e4c58d6c09b215d06e95" } # Used for matrix spec type definitions and helpers diff --git a/src/pdu.rs b/src/pdu.rs index 0cfdb63..c149297 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -79,39 +79,99 @@ impl PduEvent { } pub fn to_sync_room_event(&self) -> Raw { - let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::(&json) - .map(Raw::from) - .expect("AnySyncRoomEvent can always be built from a full PDU event") + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, + }); + + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &self.redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") } + pub fn to_room_event(&self) -> Raw { - let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::(&json) - .map(Raw::from) - .expect("AnyRoomEvent can always be built from a full PDU event") + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, + "room_id": self.room_id, + }); + + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &self.redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") } + pub fn to_state_event(&self) -> Raw { - let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::(&json) - .map(Raw::from) - .expect("AnyStateEvent can always be built from a full PDU event") + let json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, + "room_id": self.room_id, + "state_key": self.state_key, + }); + + serde_json::from_value(json).expect("Raw::from_value always works") } + pub fn to_sync_state_event(&self) -> Raw { - let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::(&json) - .map(Raw::from) - .expect("AnySyncStateEvent can always be built from a full PDU event") + let json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, + "state_key": self.state_key, + }); + + serde_json::from_value(json).expect("Raw::from_value always works") } + pub fn to_stripped_state_event(&self) -> Raw { - let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::(&json) - .map(Raw::from) - .expect("AnyStrippedStateEvent can always be built from a full PDU event") + let json = json!({ + "content": self.content, + "type": self.kind, + "sender": self.sender, + "state_key": self.state_key, + }); + + serde_json::from_value(json).expect("Raw::from_value always works") } + pub fn to_member_event(&self) -> Raw> { - let json = serde_json::to_string(&self).expect("PDUs are always valid"); - serde_json::from_str::>(&json) - .map(Raw::from) - .expect("StateEvent can always be built from a full PDU event") + let json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "redacts": self.redacts, + "unsigned": self.unsigned, + "room_id": self.room_id, + "state_key": self.state_key, + }); + + serde_json::from_value(json).expect("Raw::from_value always works") } } From 05f9d927b833e22709d0eb0b46cc4d15a60f3408 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 28 Jul 2020 14:02:29 +0200 Subject: [PATCH 0197/1727] fix: account data --- src/client_server.rs | 7 ++----- src/database/account_data.rs | 19 +++++++++++++------ 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 2a67a57..6cc8e3d 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -622,17 +622,14 @@ pub fn get_global_account_data_route( let data = db .account_data - .get::( + .get::>( None, sender_id, EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - Ok(get_global_account_data::Response { - account_data: Raw::from(data), - } - .into()) + Ok(get_global_account_data::Response { account_data: data }.into()) } #[cfg_attr( diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 1afbcd6..99e0d5c 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,5 +1,6 @@ use crate::{utils, Error, Result}; use ruma::{ + api::client::error::ErrorKind, events::{AnyEvent as EduEvent, EventType}, Raw, RoomId, UserId, }; @@ -19,7 +20,7 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, event_type: EventType, - event: &T, + data: &T, globals: &super::globals::Globals, ) -> Result<()> { let mut prefix = room_id @@ -42,10 +43,16 @@ impl AccountData { key.push(0xff); key.extend_from_slice(event_type.to_string().as_bytes()); - self.roomuserdataid_accountdata.insert( - key, - &*serde_json::to_string(&event).expect("Map::to_string always works"), - )?; + let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling + if json.get("type").is_none() || json.get("content").is_none() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Account data doesn't have all required fields.", + )); + } + + self.roomuserdataid_accountdata + .insert(key, &*json.to_string())?; Ok(()) } @@ -60,7 +67,7 @@ impl AccountData { self.find_event(room_id, user_id, &kind) .map(|r| { let (_, v) = r?; - serde_json::from_slice(&v).map_err(|_| Error::BadDatabase("could not deserialize")) + serde_json::from_slice(&v).map_err(|_| Error::bad_database("could not deserialize")) }) .transpose() } From d891bbb5dc0640794feceece27137563e532d9da Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 28 Jul 2020 15:58:50 +0200 Subject: [PATCH 0198/1727] improve: presence --- Cargo.toml | 1 + src/client_server.rs | 189 ++++++++++++++++++++--------------- src/database.rs | 10 +- src/database/global_edus.rs | 62 ------------ src/database/rooms/edus.rs | 191 +++++++++++++++++++++++++++++++++++- 5 files changed, 304 insertions(+), 149 deletions(-) delete mode 100644 src/database/global_edus.rs diff --git a/Cargo.toml b/Cargo.toml index de8fb47..c2607a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_p tokio = "0.2.22" # Used for long polling ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "d5d2d1d893fa12d27960e4c58d6c09b215d06e95" } # Used for matrix spec type definitions and helpers +#ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } sled = "0.32.0" # Used for storing data permanently log = "0.4.8" # Used for emitting log entries http = "0.2.1" # Used for rocket<->ruma conversions diff --git a/src/client_server.rs b/src/client_server.rs index 6cc8e3d..de76eef 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1,5 +1,5 @@ use std::{ - collections::BTreeMap, + collections::{hash_map, BTreeMap, HashMap}, convert::{TryFrom, TryInto}, time::{Duration, SystemTime}, }; @@ -645,7 +645,7 @@ pub fn set_displayname_route( db.users .set_displayname(&sender_id, body.displayname.clone())?; - // Send a new membership event into all joined rooms + // Send a new membership event and presence update into all joined rooms for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; db.rooms.append_pdu( @@ -675,27 +675,29 @@ pub fn set_displayname_route( None, &db.globals, )?; - } - // Presence update - db.global_edus.update_presence( - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_id)?, - currently_active: None, - displayname: db.users.displayname(&sender_id)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), - presence: ruma::presence::PresenceState::Online, - status_msg: None, + // Presence update + db.rooms.edus.update_presence( + &sender_id, + &room_id, + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&sender_id)?, + currently_active: None, + displayname: db.users.displayname(&sender_id)?, + last_active_ago: Some( + utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + ), + presence: ruma::presence::PresenceState::Online, + status_msg: None, + }, + sender: sender_id.clone(), }, - sender: sender_id.clone(), - }, - &db.globals, - )?; + &db.globals, + )?; + } Ok(set_display_name::Response.into()) } @@ -739,7 +741,7 @@ pub fn set_avatar_url_route( db.users .set_avatar_url(&sender_id, body.avatar_url.clone())?; - // Send a new membership event into all joined rooms + // Send a new membership event and presence update into all joined rooms for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; db.rooms.append_pdu( @@ -769,27 +771,29 @@ pub fn set_avatar_url_route( None, &db.globals, )?; - } - // Presence update - db.global_edus.update_presence( - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_id)?, - currently_active: None, - displayname: db.users.displayname(&sender_id)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), - presence: ruma::presence::PresenceState::Online, - status_msg: None, + // Presence update + db.rooms.edus.update_presence( + &sender_id, + &room_id, + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&sender_id)?, + currently_active: None, + displayname: db.users.displayname(&sender_id)?, + last_active_ago: Some( + utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + ), + presence: ruma::presence::PresenceState::Online, + status_msg: None, + }, + sender: sender_id.clone(), }, - sender: sender_id.clone(), - }, - &db.globals, - )?; + &db.globals, + )?; + } Ok(set_avatar_url::Response.into()) } @@ -844,24 +848,30 @@ pub fn set_presence_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - db.global_edus.update_presence( - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_id)?, - currently_active: None, - displayname: db.users.displayname(&sender_id)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), - presence: body.presence, - status_msg: body.status_msg.clone(), + for room_id in db.rooms.rooms_joined(&sender_id) { + let room_id = room_id?; + + db.rooms.edus.update_presence( + &sender_id, + &room_id, + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&sender_id)?, + currently_active: None, + displayname: db.users.displayname(&sender_id)?, + last_active_ago: Some( + utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + ), + presence: body.presence, + status_msg: body.status_msg.clone(), + }, + sender: sender_id.clone(), }, - sender: sender_id.clone(), - }, - &db.globals, - )?; + &db.globals, + )?; + } Ok(set_presence::Response.into()) } @@ -2492,6 +2502,9 @@ pub async fn sync_events_route( let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); + // TODO: match body.set_presence { + db.rooms.edus.ping_presence(&sender_id)?; + // Setup watchers, so if there's no response, we can wait for them let watcher = db.watch(sender_id, device_id); @@ -2504,6 +2517,8 @@ pub async fn sync_events_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); + let mut presence_updates = HashMap::new(); + for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; @@ -2735,6 +2750,40 @@ pub async fn sync_events_route( if !joined_room.is_empty() { joined_rooms.insert(room_id.clone(), joined_room); } + + // Take presence updates from this room + for (user_id, presence) in + db.rooms + .edus + .presence_since(&room_id, since, &db.rooms, &db.globals)? + { + match presence_updates.entry(user_id) { + hash_map::Entry::Vacant(v) => { + v.insert(presence); + } + hash_map::Entry::Occupied(mut o) => { + let p = o.get_mut(); + + // Update existing presence event with more info + p.content.presence = presence.content.presence; + if let Some(status_msg) = presence.content.status_msg { + p.content.status_msg = Some(status_msg); + } + if let Some(last_active_ago) = presence.content.last_active_ago { + p.content.last_active_ago = Some(last_active_ago); + } + if let Some(displayname) = presence.content.displayname { + p.content.displayname = Some(displayname); + } + if let Some(avatar_url) = presence.content.avatar_url { + p.content.avatar_url = Some(avatar_url); + } + if let Some(currently_active) = presence.content.currently_active { + p.content.currently_active = Some(currently_active); + } + } + } + } } let mut left_rooms = BTreeMap::new(); @@ -2818,23 +2867,9 @@ pub async fn sync_events_route( invite: invited_rooms, }, presence: sync_events::Presence { - events: db - .global_edus - .presence_since(since)? - .map(|edu| { - let mut edu = edu? - .deserialize() - .map_err(|_| Error::bad_database("EDU in database is invalid."))?; - if let Some(timestamp) = edu.content.last_active_ago { - let mut last_active_ago = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - last_active_ago -= timestamp; - edu.content.last_active_ago = Some(last_active_ago); - } - Ok::<_, Error>(edu.into()) - }) - .filter_map(|edu| edu.ok()) // Filter out buggy events + events: presence_updates + .into_iter() + .map(|(_, v)| Raw::from(v)) .collect(), }, account_data: sync_events::AccountData { @@ -2878,8 +2913,8 @@ pub async fn sync_events_route( // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives let mut duration = body.timeout.unwrap_or(Duration::default()); - if duration.as_secs() > 10 { - duration = Duration::from_secs(10); + if duration.as_secs() > 30 { + duration = Duration::from_secs(30); } let mut delay = tokio::time::delay_for(duration); tokio::select! { diff --git a/src/database.rs b/src/database.rs index 250de23..a837638 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,5 +1,4 @@ pub(self) mod account_data; -pub(self) mod global_edus; pub(self) mod globals; pub(self) mod key_backups; pub(self) mod media; @@ -22,7 +21,6 @@ pub struct Database { pub uiaa: uiaa::Uiaa, pub rooms: rooms::Rooms, pub account_data: account_data::AccountData, - pub global_edus: global_edus::GlobalEdus, pub media: media::Media, pub key_backups: key_backups::KeyBackups, pub _db: sled::Db, @@ -93,6 +91,8 @@ impl Database { roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest")?, // Read receipts roomactiveid_userid: db.open_tree("roomactiveid_userid")?, // Typing notifs roomid_lastroomactiveupdate: db.open_tree("roomid_lastroomactiveupdate")?, + presenceid_presence: db.open_tree("presenceid_presence")?, + userid_lastpresenceupdate: db.open_tree("userid_lastpresenceupdate")?, }, pduid_pdu: db.open_tree("pduid_pdu")?, eventid_pduid: db.open_tree("eventid_pduid")?, @@ -112,9 +112,6 @@ impl Database { account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, }, - global_edus: global_edus::GlobalEdus { - presenceid_presence: db.open_tree("presenceid_presence")?, // Presence - }, media: media::Media { mediaid_file: db.open_tree("mediaid_file")?, }, @@ -146,9 +143,6 @@ impl Database { .watch_prefix(&userdeviceid_prefix), ); - // TODO: only send for user they share a room with - futures.push(self.global_edus.presenceid_presence.watch_prefix(b"")); - futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix)); futures.push(self.rooms.userroomid_invited.watch_prefix(&userid_prefix)); futures.push(self.rooms.userroomid_left.watch_prefix(&userid_prefix)); diff --git a/src/database/global_edus.rs b/src/database/global_edus.rs deleted file mode 100644 index 94f2de8..0000000 --- a/src/database/global_edus.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::{Error, Result}; -use ruma::Raw; - -pub struct GlobalEdus { - //pub globalallid_globalall: sled::Tree, // ToDevice, GlobalAllId = UserId + Count - pub(super) presenceid_presence: sled::Tree, // Presence, PresenceId = Count + UserId -} - -impl GlobalEdus { - /// Adds a global event which will be saved until a new event replaces it (e.g. presence updates). - pub fn update_presence( - &self, - presence: ruma::events::presence::PresenceEvent, - globals: &super::globals::Globals, - ) -> Result<()> { - // Remove old entry - if let Some(old) = self - .presenceid_presence - .iter() - .keys() - .rev() - .filter_map(|r| r.ok()) - .find(|key| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == presence.sender.to_string().as_bytes() - }) - { - // This is the old global_latest - self.presenceid_presence.remove(old)?; - } - - let mut presence_id = globals.next_count()?.to_be_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&presence.sender.to_string().as_bytes()); - - self.presenceid_presence.insert( - presence_id, - &*serde_json::to_string(&presence).expect("PresenceEvent can be serialized"), - )?; - - Ok(()) - } - - /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. - pub fn presence_since( - &self, - since: u64, - ) -> Result>>> { - let first_possible_edu = (since + 1).to_be_bytes().to_vec(); // +1 so we don't send the event at since - - Ok(self - .presenceid_presence - .range(&*first_possible_edu..) - .filter_map(|r| r.ok()) - .map(|(_, v)| { - Ok(serde_json::from_slice(&v) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?) - })) - } -} diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 22d0166..62df0cc 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,15 +1,25 @@ use crate::{utils, Error, Result}; +use js_int::UInt; use ruma::{ - events::{AnyEvent as EduEvent, SyncEphemeralRoomEvent}, + events::{ + presence::{PresenceEvent, PresenceEventContent}, + AnyEvent as EduEvent, SyncEphemeralRoomEvent, + }, + presence::PresenceState, Raw, RoomId, UserId, }; -use std::convert::TryFrom; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, +}; pub struct RoomEdus { pub(in super::super) roomuserid_lastread: sled::Tree, // RoomUserId = Room + User pub(in super::super) roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Count + UserId pub(in super::super) roomactiveid_userid: sled::Tree, // Typing, RoomActiveId = RoomId + TimeoutTime + Count pub(in super::super) roomid_lastroomactiveupdate: sled::Tree, // LastRoomActiveUpdate = Count + pub(in super::super) presenceid_presence: sled::Tree, // PresenceId = RoomId + Count + UserId + pub(in super::super) userid_lastpresenceupdate: sled::Tree, // LastPresenceUpdate = Count } impl RoomEdus { @@ -263,4 +273,181 @@ impl RoomEdus { })?)) }) } + + /// Adds a presence event which will be saved until a new event replaces it. + /// + /// Note: This method takes a RoomId because presence updates are always bound to rooms to + /// make sure users outside these rooms can't see them. + pub fn update_presence( + &self, + user_id: &UserId, + room_id: &RoomId, + presence: ruma::events::presence::PresenceEvent, + globals: &super::super::globals::Globals, + ) -> Result<()> { + // TODO: Remove old entry? Or maybe just wipe completely from time to time? + + let count = globals.next_count()?.to_be_bytes(); + + let mut presence_id = room_id.to_string().as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&count); + presence_id.push(0xff); + presence_id.extend_from_slice(&presence.sender.to_string().as_bytes()); + + self.presenceid_presence.insert( + presence_id, + &*serde_json::to_string(&presence).expect("PresenceEvent can be serialized"), + )?; + + self.userid_lastpresenceupdate.insert( + &user_id.to_string().as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + + Ok(()) + } + + /// Resets the presence timeout, so the user will stay in their current presence state. + pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { + self.userid_lastpresenceupdate.insert( + &user_id.to_string().as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + + Ok(()) + } + + /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. + pub fn last_presence_update(&self, user_id: &UserId) -> Result> { + self.userid_lastpresenceupdate + .get(&user_id.to_string().as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") + }) + }) + .transpose() + } + + /// Sets all users to offline who have been quiet for too long. + pub fn presence_maintain( + &self, + rooms: &super::Rooms, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let current_timestamp = utils::millis_since_unix_epoch(); + + for (user_id_bytes, last_timestamp) in self + .userid_lastpresenceupdate + .iter() + .filter_map(|r| r.ok()) + .filter_map(|(k, bytes)| { + Some(( + k, + utils::u64_from_bytes(&bytes) + .map_err(|_| { + Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") + }) + .ok()?, + )) + }) + .take_while(|(_, timestamp)| current_timestamp - timestamp > 5 * 60_000) // 5 Minutes + { + self.userid_lastpresenceupdate.remove(&user_id_bytes)?; + + // Send new presence events to set the user offline + let count = globals.next_count()?.to_be_bytes(); + let user_id = utils::string_from_bytes(&user_id_bytes) + .map_err(|_| { + Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") + })? + .try_into() + .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; + for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { + let mut presence_id = room_id.to_string().as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&count); + presence_id.push(0xff); + presence_id.extend_from_slice(&user_id_bytes); + + self.presenceid_presence.insert( + presence_id, + &*serde_json::to_string(&PresenceEvent { + content: PresenceEventContent { + avatar_url: None, + currently_active: None, + displayname: None, + last_active_ago: Some( + last_timestamp.try_into().expect("time is valid"), + ), + presence: PresenceState::Offline, + status_msg: None, + }, + sender: user_id.clone(), + }) + .expect("PresenceEvent can be serialized"), + )?; + } + } + + Ok(()) + } + + /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. + pub fn presence_since( + &self, + room_id: &RoomId, + since: u64, + rooms: &super::Rooms, + globals: &super::super::globals::Globals, + ) -> Result> { + self.presence_maintain(rooms, globals)?; + + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let mut first_possible_edu = prefix.clone(); + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + let mut hashmap = HashMap::new(); + + for (key, value) in self + .presenceid_presence + .range(&*first_possible_edu..) + .filter_map(|r| r.ok()) + .take_while(|(key, _)| key.starts_with(&prefix)) + { + let user_id = UserId::try_from( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, + ) + .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; + + let mut presence = serde_json::from_slice::(&value) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?; + + let current_timestamp: UInt = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + + if presence.content.presence == PresenceState::Online { + // Don't set last_active_ago when the user is online + presence.content.last_active_ago = None; + } else { + // Convert from timestamp to duration + presence.content.last_active_ago = presence + .content + .last_active_ago + .map(|timestamp| current_timestamp - timestamp); + } + + hashmap.insert(user_id, presence); + } + + Ok(hashmap) + } } From 310b0fcd869449e0206e85817a4703a6d882ced2 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 28 Jul 2020 22:21:57 +0200 Subject: [PATCH 0199/1727] fix ci --- sytest/sytest-whitelist | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index ec2aba4..3c1095b 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -36,7 +36,6 @@ Current state appears in timeline in private history Current state appears in timeline in private history with many messages before Deleted tags appear in an incremental v2 /sync Deleting a non-existent alias should return a 404 -Device messages over federation wake up /sync Device messages wake up /sync Events come down the correct room GET /device/{deviceId} @@ -120,6 +119,6 @@ User in shared private room does appear in user directory User is offline if they set_presence=offline in their sync Users with sufficient power-level can delete other's aliases Version responds 200 OK with valid structure -Wildcard device messages over federation wake up /sync +We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) Wildcard device messages wake up /sync query for user with no keys returns empty key dict From 069338776919c88f6f90fbd0a6be3fa6b26c2de2 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 29 Jul 2020 17:03:04 +0200 Subject: [PATCH 0200/1727] improvement: more efficient /sync and only send device updates when sharing a room --- src/client_server.rs | 80 +++++++++++++++++++++++++++---------------- src/database/rooms.rs | 37 ++++++-------------- src/database/users.rs | 57 +++++++++++++++++++++++------- 3 files changed, 105 insertions(+), 69 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index de76eef..cd61746 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, BTreeMap, HashMap}, + collections::{hash_map, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, time::{Duration, SystemTime}, }; @@ -898,7 +898,7 @@ pub fn upload_keys_route( // This check is needed to assure that signatures are kept if db.users.get_device_keys(sender_id, device_id)?.is_none() { db.users - .add_device_keys(sender_id, device_id, device_keys, &db.globals)?; + .add_device_keys(sender_id, device_id, device_keys, &db.rooms, &db.globals)?; } } @@ -2518,20 +2518,41 @@ pub async fn sync_events_route( .unwrap_or(0); let mut presence_updates = HashMap::new(); + let mut device_list_updates = HashSet::new(); for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; - let mut pdus = db + let mut non_timeline_pdus = db .rooms .pdus_since(&sender_id, &room_id, since)? - .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(|r| r.ok()); // Filter out buggy events + + // Take the last 10 events for the timeline + let timeline_pdus = non_timeline_pdus + .by_ref() + .rev() + .take(10) + .collect::>() + .into_iter() + .rev() .collect::>(); + // They /sync response doesn't always return all messages, so we say the output is + // limited unless there are events in non_timeline_pdus + //let mut limited = false; + + let mut state_pdus = Vec::new(); + for pdu in non_timeline_pdus { + if pdu.state_key.is_some() { + state_pdus.push(pdu); + } + } + let mut send_member_count = false; let mut joined_since_last_sync = false; let mut send_notification_counts = false; - for pdu in &pdus { + for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)?.filter_map(|r| r.ok()) { send_notification_counts = true; if pdu.kind == EventType::RoomMember { send_member_count = true; @@ -2544,8 +2565,8 @@ pub async fn sync_events_route( .map_err(|_| Error::bad_database("Invalid PDU in database."))?; if content.membership == ruma::events::room::member::MembershipState::Join { joined_since_last_sync = true; - // Both send_member_count and joined_since_last_sync are set. There's nothing more - // to do + // Both send_member_count and joined_since_last_sync are set. There's + // nothing more to do break; } } @@ -2574,7 +2595,7 @@ pub async fn sync_events_route( let content = serde_json::from_value::< Raw, >(pdu.content.clone()) - .map_err(|_| Error::bad_database("Invalid member event in database."))? + .expect("Raw::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid member event in database."))?; @@ -2592,7 +2613,7 @@ pub async fn sync_events_route( .content .clone(), ) - .map_err(|_| Error::bad_database("Invalid member event in database."))? + .expect("Raw::from_value always works") .deserialize() .map_err(|_| { Error::bad_database("Invalid member event in database.") @@ -2659,15 +2680,7 @@ pub async fn sync_events_route( None }; - // They /sync response doesn't always return all messages, so we say the output is - // limited unless there are enough events - let mut limited = true; - pdus = pdus.split_off(pdus.len().checked_sub(10).unwrap_or_else(|| { - limited = false; - 0 - })); - - let prev_batch = pdus.first().map_or(Ok::<_, Error>(None), |e| { + let prev_batch = timeline_pdus.first().map_or(Ok::<_, Error>(None), |e| { Ok(Some( db.rooms .get_pdu_count(&e.event_id)? @@ -2676,7 +2689,7 @@ pub async fn sync_events_route( )) })?; - let room_events = pdus + let room_events = timeline_pdus .into_iter() .map(|pdu| pdu.to_sync_room_event()) .collect::>(); @@ -2728,7 +2741,7 @@ pub async fn sync_events_route( notification_count, }, timeline: sync_events::Timeline { - limited: limited || joined_since_last_sync, + limited: false || joined_since_last_sync, prev_batch, events: room_events, }, @@ -2751,6 +2764,13 @@ pub async fn sync_events_route( joined_rooms.insert(room_id.clone(), joined_room); } + // Look for device list updates in this room + device_list_updates.extend( + db.users + .keys_changed(&room_id, since) + .filter_map(|r| r.ok()), + ); + // Take presence updates from this room for (user_id, presence) in db.rooms @@ -2885,14 +2905,7 @@ pub async fn sync_events_route( .collect::>(), }, device_lists: sync_events::DeviceLists { - changed: if since != 0 { - db.users - .keys_changed(since) - .filter_map(|u| u.ok()) - .collect() // Filter out buggy events - } else { - Vec::new() - }, + changed: device_list_updates.into_iter().collect(), left: Vec::new(), // TODO }, device_one_time_keys_count: Default::default(), // TODO @@ -3450,6 +3463,7 @@ pub fn upload_signing_keys_route( &master_key, &body.self_signing_key, &body.user_signing_key, + &db.rooms, &db.globals, )?; } @@ -3500,8 +3514,14 @@ pub fn upload_signatures_route( ))? .to_owned(), ); - db.users - .sign_key(&user_id, &key_id, signature, &sender_id, &db.globals)?; + db.users.sign_key( + &user_id, + &key_id, + signature, + &sender_id, + &db.rooms, + &db.globals, + )?; } } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fe5721c..4cd47a1 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -611,44 +611,29 @@ impl Rooms { self.pdus_since(user_id, room_id, 0) } - /// Returns an iterator over all events in a room that happened after the event with id `since`. + /// Returns an iterator over all events in a room that happened after the event with id `since` + /// in reverse-chronological order. pub fn pdus_since( &self, user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result>> { - // Create the first part of the full pdu id - let mut pdu_id = room_id.to_string().as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&(since).to_be_bytes()); - - self.pdus_since_pduid(user_id, room_id, &pdu_id) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since`. - pub fn pdus_since_pduid( - &self, - user_id: &UserId, - room_id: &RoomId, - pdu_id: &[u8], - ) -> Result>> { - // Create the first part of the full pdu id + ) -> Result>> { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); + // Skip the first pdu if it's exactly at since, because we sent that last time + let mut first_pdu_id = prefix.clone(); + first_pdu_id.extend_from_slice(&(since+1).to_be_bytes()); + + let mut last_pdu_id = prefix.clone(); + last_pdu_id.extend_from_slice(&u64::MAX.to_be_bytes()); + let user_id = user_id.clone(); Ok(self .pduid_pdu - .range(pdu_id..) - // Skip the first pdu if it's exactly at since, because we sent that last time - .skip(if self.pduid_pdu.get(pdu_id)?.is_some() { - 1 - } else { - 0 - }) + .range(first_pdu_id..last_pdu_id) .filter_map(|r| r.ok()) - .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(_, v)| { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; diff --git a/src/database/users.rs b/src/database/users.rs index 5030f32..7fbdd80 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -9,7 +9,7 @@ use ruma::{ }, }, events::{AnyToDeviceEvent, EventType}, - DeviceId, Raw, UserId, + DeviceId, Raw, UserId, RoomId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; @@ -22,7 +22,7 @@ pub struct Users { pub(super) token_userdeviceid: sled::Tree, pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + AlgorithmAndDeviceId - pub(super) keychangeid_userid: sled::Tree, // KeyChangeId = Count + pub(super) keychangeid_userid: sled::Tree, // KeyChangeId = RoomId + Count pub(super) keyid_key: sled::Tree, // KeyId = UserId + KeyId (depends on key type) pub(super) userid_masterkeyid: sled::Tree, pub(super) userid_selfsigningkeyid: sled::Tree, @@ -371,6 +371,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, device_keys: &DeviceKeys, + rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); @@ -382,8 +383,15 @@ impl Users { &*serde_json::to_string(&device_keys).expect("DeviceKeys::to_string always works"), )?; - self.keychangeid_userid - .insert(globals.next_count()?.to_be_bytes(), &*user_id.to_string())?; + let count = globals.next_count()?.to_be_bytes(); + for room_id in rooms.rooms_joined(&user_id) { + let mut key = room_id?.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&count); + + self.keychangeid_userid + .insert(key, &*user_id.to_string())?; + } Ok(()) } @@ -394,6 +402,7 @@ impl Users { master_key: &CrossSigningKey, self_signing_key: &Option, user_signing_key: &Option, + rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { // TODO: Check signatures @@ -482,8 +491,15 @@ impl Users { .insert(&*user_id.to_string(), user_signing_key_key)?; } - self.keychangeid_userid - .insert(globals.next_count()?.to_be_bytes(), &*user_id.to_string())?; + let count = globals.next_count()?.to_be_bytes(); + for room_id in rooms.rooms_joined(&user_id) { + let mut key = room_id?.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&count); + + self.keychangeid_userid + .insert(key, &*user_id.to_string())?; + } Ok(()) } @@ -494,6 +510,7 @@ impl Users { key_id: &str, signature: (String, String), sender_id: &UserId, + rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { let mut key = target_id.to_string().as_bytes().to_vec(); @@ -525,19 +542,33 @@ impl Users { .expect("CrossSigningKey::to_string always works"), )?; - self.keychangeid_userid - .insert(globals.next_count()?.to_be_bytes(), &*target_id.to_string())?; + // TODO: Should we notify about this change? + let count = globals.next_count()?.to_be_bytes(); + for room_id in rooms.rooms_joined(&target_id) { + let mut key = room_id?.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&count); + + self.keychangeid_userid + .insert(key, &*target_id.to_string())?; + } Ok(()) } - pub fn keys_changed(&self, since: u64) -> impl Iterator> { + pub fn keys_changed(&self, room_id: &RoomId, since: u64) -> impl Iterator> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + let mut start = prefix.clone(); + start.extend_from_slice(&(since + 1).to_be_bytes()); + self.keychangeid_userid - .range((since + 1).to_be_bytes()..) - .values() - .map(|bytes| { + .range(start..) + .filter_map(|r| r.ok()) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(_, bytes)| { Ok( - UserId::try_from(utils::string_from_bytes(&bytes?).map_err(|_| { + UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database( "User ID in devicekeychangeid_userid is invalid unicode.", ) From 66bc25fcd36940bb32c1873c0beed3335604bd55 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 29 Jul 2020 17:37:26 +0200 Subject: [PATCH 0201/1727] feat: implement /keys/changes --- Cargo.lock | 70 +++++++++++++++++++------------------- src/client_server.rs | 43 +++++++++++++++++++++-- src/database/rooms.rs | 6 ++-- src/database/rooms/edus.rs | 3 +- src/database/users.rs | 23 ++++++++----- src/main.rs | 1 + 6 files changed, 96 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e40fd09..225dc09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -214,9 +214,9 @@ checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "bytemuck" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40636046a60a45ee5185e885a3ccb771f7a2065fb7cbcc2a7ecfd9896d1c365" +checksum = "db7a1029718df60331e557c9e83a55523c955e5dd2a7bfeffad6bbd50b538ae9" [[package]] name = "byteorder" @@ -814,9 +814,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.7" +version = "0.23.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2397fc43bd5648b7117aabb3c5e62d0e62c194826ec77b0b4d0c41e62744635" +checksum = "543904170510c1b5fb65140485d84de4a57fddb2ed685481e9020ce3d2c9f64c" dependencies = [ "bytemuck", "byteorder", @@ -882,9 +882,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.42" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52732a3d3ad72c58ad2dc70624f9c17b46ecd0943b9a4f1ee37c4c18c5d983e2" +checksum = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73" dependencies = [ "wasm-bindgen", ] @@ -916,9 +916,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7d4bd64732af4bf3a67f367c27df8520ad7e230c5817b8ff485864d80242b9" +checksum = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10" [[package]] name = "lock_api" @@ -1239,18 +1239,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +checksum = "ca4433fff2ae79342e497d9f8ee990d174071408f28f726d6d83af93e58e48aa" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +checksum = "2c0e815c3ee9a031fdf5af21c10aa17c573c9c6a566328d99e3936c34e36461f" dependencies = [ "proc-macro2", "quote", @@ -1305,9 +1305,9 @@ checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "proc-macro-hack" -version = "0.5.16" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" +checksum = "99c605b9a0adc77b7211c6b1f722dcb613d68d66859a44f3d485a6da332b0598" [[package]] name = "proc-macro-nested" @@ -1485,7 +1485,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f383f15047d0dda71dd21399dfea96161fe2bd0e" +source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f6d40ecd5d871d97837b3116eb670fb3c06d95b9" dependencies = [ "async-trait", "atomic", @@ -1510,7 +1510,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f383f15047d0dda71dd21399dfea96161fe2bd0e" +source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f6d40ecd5d871d97837b3116eb670fb3c06d95b9" dependencies = [ "devise", "glob", @@ -1522,7 +1522,7 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f383f15047d0dda71dd21399dfea96161fe2bd0e" +source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f6d40ecd5d871d97837b3116eb670fb3c06d95b9" dependencies = [ "cookie", "http", @@ -1838,9 +1838,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3" +checksum = "164eacbdb13512ec2745fb09d51fd5b22b0d65ed294a1dcf7285a360c80a675c" dependencies = [ "itoa", "ryu", @@ -2029,9 +2029,9 @@ checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" [[package]] name = "syn" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb7f4c519df8c117855e19dd8cc851e89eb746fe7a73f0157e0d95fdec5369b0" +checksum = "4cdb98bcb1f9d81d07b536179c269ea15999b5d14ea958196413869445bb5250" dependencies = [ "proc-macro2", "quote", @@ -2339,9 +2339,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.65" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3edbcc9536ab7eababcc6d2374a0b7bfe13a2b6d562c5e07f370456b1a8f33d" +checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" dependencies = [ "cfg-if", "serde", @@ -2351,9 +2351,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.65" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ed2fb8c84bfad20ea66b26a3743f3e7ba8735a69fe7d95118c33ec8fc1244d" +checksum = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0" dependencies = [ "bumpalo", "lazy_static", @@ -2366,9 +2366,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.15" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ad6e4e8b2b7f8c90b6e09a9b590ea15cb0d1dbe28502b5a405cd95d1981671" +checksum = "95f8d235a77f880bcef268d379810ea6c0af2eacfa90b1ad5af731776e0c4699" dependencies = [ "cfg-if", "js-sys", @@ -2378,9 +2378,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.65" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb071268b031a64d92fc6cf691715ca5a40950694d8f683c5bb43db7c730929e" +checksum = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2388,9 +2388,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.65" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf592c807080719d1ff2f245a687cbadb3ed28b2077ed7084b47aba8b691f2c6" +checksum = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556" dependencies = [ "proc-macro2", "quote", @@ -2401,15 +2401,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.65" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b6c0220ded549d63860c78c38f3bcc558d1ca3f4efa74942c536ddbbb55e87" +checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" [[package]] name = "web-sys" -version = "0.3.42" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be2398f326b7ba09815d0b403095f34dd708579220d099caae89be0b32137b2" +checksum = "dda38f4e5ca63eda02c059d243aa25b5f35ab98451e518c51612cd0f1bd19a47" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/src/client_server.rs b/src/client_server.rs index cd61746..0536c8e 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -35,7 +35,7 @@ use ruma::{ set_room_visibility, }, filter::{self, create_filter, get_filter}, - keys::{self, claim_keys, get_keys, upload_keys}, + keys::{self, claim_keys, get_key_changes, get_keys, upload_keys}, media::{create_content, get_content, get_content_thumbnail, get_media_config}, membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, @@ -2552,7 +2552,11 @@ pub async fn sync_events_route( let mut send_member_count = false; let mut joined_since_last_sync = false; let mut send_notification_counts = false; - for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)?.filter_map(|r| r.ok()) { + for pdu in db + .rooms + .pdus_since(&sender_id, &room_id, since)? + .filter_map(|r| r.ok()) + { send_notification_counts = true; if pdu.kind == EventType::RoomMember { send_member_count = true; @@ -2767,7 +2771,7 @@ pub async fn sync_events_route( // Look for device list updates in this room device_list_updates.extend( db.users - .keys_changed(&room_id, since) + .keys_changed(&room_id, since, None) .filter_map(|r| r.ok()), ); @@ -3529,6 +3533,39 @@ pub fn upload_signatures_route( Ok(upload_signatures::Response.into()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/keys/changes", data = "") +)] +pub fn get_key_changes_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut device_list_updates = HashSet::new(); + for room_id in db.rooms.rooms_joined(sender_id).filter_map(|r| r.ok()) { + device_list_updates.extend( + db.users + .keys_changed( + &room_id, + body.from.parse().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.") + })?, + Some(body.to.parse().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.") + })?), + ) + .filter_map(|r| r.ok()), + ); + } + Ok(get_key_changes::Response { + changed: device_list_updates.into_iter().collect(), + left: Vec::new(), // TODO + } + .into()) +} + #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] pub fn pushers_route() -> ConduitResult { Ok(get_pushers::Response { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4cd47a1..7895f02 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -611,8 +611,8 @@ impl Rooms { self.pdus_since(user_id, room_id, 0) } - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in reverse-chronological order. + /// Returns a double-ended iterator over all events in a room that happened after the event with id `since` + /// in chronological order. pub fn pdus_since( &self, user_id: &UserId, @@ -624,7 +624,7 @@ impl Rooms { // Skip the first pdu if it's exactly at since, because we sent that last time let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since+1).to_be_bytes()); + first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); let mut last_pdu_id = prefix.clone(); last_pdu_id.extend_from_slice(&u64::MAX.to_be_bytes()); diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 62df0cc..fff30c2 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -352,7 +352,8 @@ impl RoomEdus { .ok()?, )) }) - .take_while(|(_, timestamp)| current_timestamp - timestamp > 5 * 60_000) // 5 Minutes + .take_while(|(_, timestamp)| current_timestamp - timestamp > 5 * 60_000) + // 5 Minutes { self.userid_lastpresenceupdate.remove(&user_id_bytes)?; diff --git a/src/database/users.rs b/src/database/users.rs index 7fbdd80..7fb9767 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -9,7 +9,7 @@ use ruma::{ }, }, events::{AnyToDeviceEvent, EventType}, - DeviceId, Raw, UserId, RoomId, + DeviceId, Raw, RoomId, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; @@ -389,8 +389,7 @@ impl Users { key.push(0xff); key.extend_from_slice(&count); - self.keychangeid_userid - .insert(key, &*user_id.to_string())?; + self.keychangeid_userid.insert(key, &*user_id.to_string())?; } Ok(()) @@ -497,8 +496,7 @@ impl Users { key.push(0xff); key.extend_from_slice(&count); - self.keychangeid_userid - .insert(key, &*user_id.to_string())?; + self.keychangeid_userid.insert(key, &*user_id.to_string())?; } Ok(()) @@ -556,14 +554,23 @@ impl Users { Ok(()) } - pub fn keys_changed(&self, room_id: &RoomId, since: u64) -> impl Iterator> { + pub fn keys_changed( + &self, + room_id: &RoomId, + from: u64, + to: Option, + ) -> impl Iterator> { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); + let mut start = prefix.clone(); - start.extend_from_slice(&(since + 1).to_be_bytes()); + start.extend_from_slice(&(from + 1).to_be_bytes()); + + let mut end = prefix.clone(); + end.extend_from_slice(&to.unwrap_or(u64::MAX).to_be_bytes()); self.keychangeid_userid - .range(start..) + .range(start..end) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(_, bytes)| { diff --git a/src/main.rs b/src/main.rs index cc30ff6..86d8446 100644 --- a/src/main.rs +++ b/src/main.rs @@ -108,6 +108,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::options_route, client_server::upload_signing_keys_route, client_server::upload_signatures_route, + client_server::get_key_changes_route, client_server::pushers_route, client_server::set_pushers_route, //server_server::well_known_server, From e0d0fb4703de4fd245ad193bc8b109ccad1939da Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 29 Jul 2020 20:44:06 +0200 Subject: [PATCH 0202/1727] fix: only send device_one_time_keys_count when there are updates --- src/client_server.rs | 10 +++++++--- src/database.rs | 1 + src/database/users.rs | 25 +++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 0536c8e..4a2f33b 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -890,7 +890,7 @@ pub fn upload_keys_route( if let Some(one_time_keys) = &body.one_time_keys { for (key_key, key_value) in one_time_keys { db.users - .add_one_time_key(sender_id, device_id, key_key, key_value)?; + .add_one_time_key(sender_id, device_id, key_key, key_value, &db.globals)?; } } @@ -1002,7 +1002,7 @@ pub fn claim_keys_route( for (device_id, key_algorithm) in map { if let Some(one_time_keys) = db.users - .take_one_time_key(user_id, device_id, key_algorithm)? + .take_one_time_key(user_id, device_id, key_algorithm, &db.globals)? { let mut c = BTreeMap::new(); c.insert(one_time_keys.0, one_time_keys.1); @@ -2912,7 +2912,11 @@ pub async fn sync_events_route( changed: device_list_updates.into_iter().collect(), left: Vec::new(), // TODO }, - device_one_time_keys_count: Default::default(), // TODO + device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_id)? > since { + db.users.count_one_time_keys(sender_id, device_id)? + } else { + BTreeMap::new() + }, to_device: sync_events::ToDevice { events: db.users.get_to_device_events(sender_id, device_id)?, }, diff --git a/src/database.rs b/src/database.rs index a837638..5a1ed0f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -75,6 +75,7 @@ impl Database { userdeviceid_metadata: db.open_tree("userdeviceid_metadata")?, token_userdeviceid: db.open_tree("token_userdeviceid")?, onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys")?, + userid_lastonetimekeyupdate: db.open_tree("userid_lastonetimekeyupdate")?, keychangeid_userid: db.open_tree("devicekeychangeid_userid")?, keyid_key: db.open_tree("keyid_key")?, userid_masterkeyid: db.open_tree("userid_masterkeyid")?, diff --git a/src/database/users.rs b/src/database/users.rs index 7fb9767..c792767 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -22,6 +22,7 @@ pub struct Users { pub(super) token_userdeviceid: sled::Tree, pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + AlgorithmAndDeviceId + pub(super) userid_lastonetimekeyupdate: sled::Tree, // LastOneTimeKeyUpdate = Count pub(super) keychangeid_userid: sled::Tree, // KeyChangeId = RoomId + Count pub(super) keyid_key: sled::Tree, // KeyId = UserId + KeyId (depends on key type) pub(super) userid_masterkeyid: sled::Tree, @@ -270,6 +271,7 @@ impl Users { device_id: &DeviceId, one_time_key_key: &AlgorithmAndDeviceId, one_time_key_value: &OneTimeKey, + globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); @@ -294,14 +296,32 @@ impl Users { .expect("OneTimeKey::to_string always works"), )?; + self.userid_lastonetimekeyupdate.insert( + &user_id.to_string().as_bytes(), + &globals.next_count()?.to_be_bytes(), + )?; + Ok(()) } + pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { + self + .userid_lastonetimekeyupdate + .get(&user_id.to_string().as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") + }) + }) + .unwrap_or(Ok(0)) + } + pub fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &KeyAlgorithm, + globals: &super::globals::Globals, ) -> Result> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -311,6 +331,11 @@ impl Users { prefix.extend_from_slice(key_algorithm.to_string().as_bytes()); prefix.push(b':'); + self.userid_lastonetimekeyupdate.insert( + &user_id.to_string().as_bytes(), + &globals.next_count()?.to_be_bytes(), + )?; + self.onetimekeyid_onetimekeys .scan_prefix(&prefix) .next() From 62df9ca5803d049e5b255474220da77fb49b954e Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 25 Jul 2020 23:56:50 -0400 Subject: [PATCH 0203/1727] Fix all clippy warnings, shorten line len in client_server --- src/client_server.rs | 185 ++++++++++++++++++++++++++++-------------- src/database/media.rs | 11 +-- src/database/rooms.rs | 55 ++++++------- src/database/uiaa.rs | 2 +- src/database/users.rs | 2 +- 5 files changed, 155 insertions(+), 100 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 4a2f33b..6f9688f 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -230,7 +230,7 @@ pub fn register_route( Ok(register::Response { access_token: Some(token), user_id, - device_id: Some(device_id.into()), + device_id: Some(device_id), } .into()) } @@ -257,11 +257,22 @@ pub fn login_route( if let (login::UserInfo::MatrixId(username), login::LoginInfo::Password { password }) = (body.user.clone(), body.login_info.clone()) { - let user_id = UserId::parse_with_server_name(username, db.globals.server_name()).map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; - let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Wrong username or password."))?; + let user_id = UserId::parse_with_server_name(username, db.globals.server_name()) + .map_err(|_| Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid." + ))?; + let hash = db.users.password_hash(&user_id)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Wrong username or password." + ))?; if hash.is_empty() { - return Err(Error::BadRequest(ErrorKind::UserDeactivated, "The user has been deactivated")); + return Err(Error::BadRequest( + ErrorKind::UserDeactivated, + "The user has been deactivated" + )); } let hash_matches = @@ -298,7 +309,7 @@ pub fn login_route( user_id, access_token: token, home_server: Some(db.globals.server_name().to_owned()), - device_id: device_id.into(), + device_id, well_known: None, } .into()) @@ -1702,8 +1713,7 @@ pub fn leave_room_route( ErrorKind::BadState, "Cannot leave a room you are not a member of.", ))? - .content - .clone(), + .content, ) .map_err(|_| Error::bad_database("Invalid member event in database."))? .deserialize() @@ -1746,8 +1756,7 @@ pub fn kick_user_route( ErrorKind::BadState, "Cannot kick member that's not in the room.", ))? - .content - .clone(), + .content, ) .expect("Raw::from_value always works") .deserialize() @@ -1882,8 +1891,7 @@ pub fn unban_user_route( ErrorKind::BadState, "Cannot unban a user who is not banned.", ))? - .content - .clone(), + .content, ) .map_err(|_| Error::bad_database("Invalid member event in database."))? .deserialize() @@ -2049,6 +2057,8 @@ pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, _body: Ruma, ) -> ConduitResult { + use ruma::events::room; + let mut chunk = db .rooms .public_rooms() @@ -2060,63 +2070,110 @@ pub async fn get_public_rooms_filtered_route( let chunk = directory::PublicRoomsChunk { aliases: Vec::new(), - canonical_alias: state.get(&(EventType::RoomCanonicalAlias, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { - Ok(serde_json::from_value::< - Raw, + canonical_alias: state + .get(&(EventType::RoomCanonicalAlias, "".to_owned())) + .map_or(Ok::<_, Error>(None), |s| { + Ok(serde_json::from_value::< + Raw, >(s.content.clone()) - .map_err(|_| Error::bad_database("Invalid canonical alias event in database."))? + .map_err(|_| { + Error::bad_database("Invalid canonical alias event in database.") + })? .deserialize() - .map_err(|_| Error::bad_database("Invalid canonical alias event in database."))? + .map_err(|_| { + Error::bad_database("Invalid canonical alias event in database.") + })? .alias) - })?, - name: state.get(&(EventType::RoomName, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { - Ok(serde_json::from_value::>( - s.content.clone(), - ) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .deserialize() - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name() - .map(|n| n.to_owned())) - })?, + })?, + name: state.get(&(EventType::RoomName, "".to_owned())).map_or( + Ok::<_, Error>(None), + |s| { + Ok(serde_json::from_value::>( + s.content.clone(), + ) + .map_err(|_| Error::bad_database("Invalid room name event in database."))? + .deserialize() + .map_err(|_| Error::bad_database("Invalid room name event in database."))? + .name() + .map(|n| n.to_owned())) + }, + )?, num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), room_id, - topic: state.get(&(EventType::RoomTopic, "".to_owned())).map_or(Ok::<_, Error>(None), |s| { - Ok(Some(serde_json::from_value::< - Raw, + topic: state.get(&(EventType::RoomTopic, "".to_owned())).map_or( + Ok::<_, Error>(None), + |s| { + Ok(Some( + serde_json::from_value::>( + s.content.clone(), + ) + .map_err(|_| { + Error::bad_database("Invalid room topic event in database.") + })? + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid room topic event in database.") + })? + .topic, + )) + }, + )?, + world_readable: state + .get(&(EventType::RoomHistoryVisibility, "".to_owned())) + .map_or(Ok::<_, Error>(false), |s| { + Ok(serde_json::from_value::< + Raw, >(s.content.clone()) - .map_err(|_| Error::bad_database("Invalid room topic event in database."))? + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + })? .deserialize() - .map_err(|_| Error::bad_database("Invalid room topic event in database."))? - .topic)) - })?, - world_readable: state.get(&(EventType::RoomHistoryVisibility, "".to_owned())).map_or(Ok::<_, Error>(false), |s| { - Ok(serde_json::from_value::< - Raw, - >(s.content.clone()) - .map_err(|_| Error::bad_database("Invalid room history visibility event in database."))? - .deserialize() - .map_err(|_| Error::bad_database("Invalid room history visibility event in database."))? - .history_visibility == history_visibility::HistoryVisibility::WorldReadable) - })?, - guest_can_join: state.get(&(EventType::RoomGuestAccess, "".to_owned())).map_or(Ok::<_, Error>(false), |s| { - Ok(serde_json::from_value::< - Raw, - >(s.content.clone()) - .map_err(|_| Error::bad_database("Invalid room guest access event in database."))? - .deserialize() - .map_err(|_| Error::bad_database("Invalid room guest access event in database."))? - .guest_access == guest_access::GuestAccess::CanJoin) - })?, - avatar_url: state.get(&(EventType::RoomAvatar, "".to_owned())).map_or( Ok::<_, Error>(None),|s| { - Ok(Some(serde_json::from_value::< - Raw, - >(s.content.clone()) - .map_err(|_| Error::bad_database("Invalid room avatar event in database."))? - .deserialize() - .map_err(|_| Error::bad_database("Invalid room avatar event in database."))? - .url)) - })?, + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + })? + .history_visibility + == history_visibility::HistoryVisibility::WorldReadable) + })?, + guest_can_join: state + .get(&(EventType::RoomGuestAccess, "".to_owned())) + .map_or(Ok::<_, Error>(false), |s| { + Ok( + serde_json::from_value::< + Raw, + >(s.content.clone()) + .map_err(|_| { + Error::bad_database("Invalid room guest access event in database.") + })? + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid room guest access event in database.") + })? + .guest_access + == guest_access::GuestAccess::CanJoin, + ) + })?, + avatar_url: state.get(&(EventType::RoomAvatar, "".to_owned())).map_or( + Ok::<_, Error>(None), + |s| { + Ok(Some( + serde_json::from_value::>( + s.content.clone(), + ) + .map_err(|_| { + Error::bad_database("Invalid room avatar event in database.") + })? + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid room avatar event in database.") + })? + .url, + )) + }, + )?, }; Ok::<_, Error>(chunk) }) @@ -2338,7 +2395,11 @@ pub fn create_state_event_for_key_route( .filter(|room| room == &body.room_id) // Make sure it's the right room .is_none() { - return Err(Error::BadRequest(ErrorKind::Forbidden, "You are only allowed to send canonical_alias events when it's aliases already exists")); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You are only allowed to send canonical_alias \ + events when it's aliases already exists", + )); } } } diff --git a/src/database/media.rs b/src/database/media.rs index 0d0820d..cec74a6 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -1,6 +1,8 @@ use crate::{utils, Error, Result}; use std::mem; +pub type FileMeta = (Option, String, Vec); + pub struct Media { pub(super) mediaid_file: sled::Tree, // MediaId = MXC + WidthHeight + Filename + ContentType } @@ -29,7 +31,7 @@ impl Media { } /// Downloads a file. - pub fn get(&self, mxc: String) -> Result, String, Vec)>> { + pub fn get(&self, mxc: String) -> Result> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail @@ -66,12 +68,7 @@ impl Media { } /// Downloads a file's thumbnail. - pub fn get_thumbnail( - &self, - mxc: String, - width: u32, - height: u32, - ) -> Result, String, Vec)>> { + pub fn get_thumbnail(&self, mxc: String, width: u32, height: u32) -> Result> { let mut main_prefix = mxc.as_bytes().to_vec(); main_prefix.push(0xff); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7895f02..ae0d084 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -250,6 +250,7 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. + #[allow(clippy::too_many_arguments, clippy::blocks_in_if_conditions)] pub fn append_pdu( &self, room_id: RoomId, @@ -288,7 +289,7 @@ impl Rooms { }, |power_levels| { Ok(serde_json::from_value::>( - power_levels.content.clone(), + power_levels.content, ) .expect("Raw::from_value always works.") .deserialize() @@ -298,13 +299,13 @@ impl Rooms { let sender_membership = self .room_state_get(&room_id, &EventType::RoomMember, &sender.to_string())? .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { - Ok(serde_json::from_value::>( - pdu.content.clone(), + Ok( + serde_json::from_value::>(pdu.content) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid Member event in db."))? + .membership, ) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership) })?; let sender_power = power_levels.users.get(&sender).map_or_else( @@ -341,7 +342,7 @@ impl Rooms { )? .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { Ok(serde_json::from_value::>( - pdu.content.clone(), + pdu.content, ) .expect("Raw::from_value always works.") .deserialize() @@ -373,7 +374,7 @@ impl Rooms { .map_or(Ok::<_, Error>(join_rules::JoinRule::Public), |pdu| { Ok(serde_json::from_value::< Raw, - >(pdu.content.clone()) + >(pdu.content) .expect("Raw::from_value always works.") .deserialize() .map_err(|_| { @@ -501,7 +502,7 @@ impl Rooms { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { if let Some(prev_pdu) = self.room_state_get(&room_id, &event_type, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); + unsigned.insert("prev_content".to_owned(), prev_pdu.content); unsigned.insert( "prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender).expect("UserId::to_value always works"), @@ -575,28 +576,24 @@ impl Rooms { self.roomstateid_pdu.insert(key, &*pdu_json.to_string())?; } - match event_type { - EventType::RoomRedaction => { - if let Some(redact_id) = &redacts { - // TODO: Reason - let _reason = - serde_json::from_value::>(content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid redaction event content.", - ) - })? - .reason; + if let EventType::RoomRedaction = event_type { + if let Some(redact_id) = &redacts { + // TODO: Reason + let _reason = + serde_json::from_value::>(content) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid redaction event content.", + ) + })? + .reason; - self.redact_pdu(&redact_id)?; - } + self.redact_pdu(&redact_id)?; } - _ => {} } - self.edus.room_read_set(&room_id, &sender, index)?; Ok(pdu.event_id) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 4366eb2..cece8db 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -42,7 +42,7 @@ impl Uiaa { .map(|session| { Ok::<_, Error>(self.get_uiaa_session(&user_id, &device_id, session)?) }) - .unwrap_or(Ok(uiaainfo.clone()))?; + .unwrap_or_else(|| Ok(uiaainfo.clone()))?; // Find out what the user completed match &**kind { diff --git a/src/database/users.rs b/src/database/users.rs index c792767..1ec677c 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -502,7 +502,7 @@ impl Users { )); } - let mut user_signing_key_key = prefix.clone(); + let mut user_signing_key_key = prefix; user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); self.keyid_key.insert( From 2da48b941d9fbf013b807bfcfbe2f101d34ecfb7 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 27 Jul 2020 19:46:38 -0400 Subject: [PATCH 0204/1727] Convert all map_err -> expect when deserializing to Raw --- src/client_server.rs | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 6f9688f..bac4b25 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -676,7 +676,7 @@ pub fn set_displayname_route( .content .clone(), ) - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| Error::bad_database("Database contains invalid PDU."))? }) @@ -772,7 +772,7 @@ pub fn set_avatar_url_route( .content .clone(), ) - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| Error::bad_database("Database contains invalid PDU."))? }) @@ -1715,7 +1715,7 @@ pub fn leave_room_route( ))? .content, ) - .map_err(|_| Error::bad_database("Invalid member event in database."))? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| Error::bad_database("Invalid member event in database."))?; @@ -1846,7 +1846,7 @@ pub fn ban_user_route( }), |event| { let mut event = serde_json::from_value::>( - event.content.clone(), + event.content, ) .expect("Raw::from_value always works") .deserialize() @@ -1893,7 +1893,7 @@ pub fn unban_user_route( ))? .content, ) - .map_err(|_| Error::bad_database("Invalid member event in database."))? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| Error::bad_database("Invalid member event in database."))?; @@ -2076,9 +2076,7 @@ pub async fn get_public_rooms_filtered_route( Ok(serde_json::from_value::< Raw, >(s.content.clone()) - .map_err(|_| { - Error::bad_database("Invalid canonical alias event in database.") - })? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database("Invalid canonical alias event in database.") @@ -2091,7 +2089,7 @@ pub async fn get_public_rooms_filtered_route( Ok(serde_json::from_value::>( s.content.clone(), ) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| Error::bad_database("Invalid room name event in database."))? .name() @@ -2107,9 +2105,7 @@ pub async fn get_public_rooms_filtered_route( serde_json::from_value::>( s.content.clone(), ) - .map_err(|_| { - Error::bad_database("Invalid room topic event in database.") - })? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database("Invalid room topic event in database.") @@ -2124,11 +2120,7 @@ pub async fn get_public_rooms_filtered_route( Ok(serde_json::from_value::< Raw, >(s.content.clone()) - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - })? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database( @@ -2145,9 +2137,7 @@ pub async fn get_public_rooms_filtered_route( serde_json::from_value::< Raw, >(s.content.clone()) - .map_err(|_| { - Error::bad_database("Invalid room guest access event in database.") - })? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database("Invalid room guest access event in database.") @@ -2163,9 +2153,7 @@ pub async fn get_public_rooms_filtered_route( serde_json::from_value::>( s.content.clone(), ) - .map_err(|_| { - Error::bad_database("Invalid room avatar event in database.") - })? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database("Invalid room avatar event in database.") @@ -2377,7 +2365,7 @@ pub fn create_state_event_for_key_route( let canonical_alias = serde_json::from_value::< Raw, >(content.clone()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid canonical alias."))? + .expect("from_value::> can never fail") .deserialize() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid canonical alias."))?; From 1c6f2119334e68b467ee4b8e4e4fabedea1f8131 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 28 Jul 2020 08:59:30 -0400 Subject: [PATCH 0205/1727] Create media::FileMeta to represent a file and meta data --- src/client_server.rs | 14 +++++++++++--- src/database.rs | 2 +- src/database/media.rs | 24 ++++++++++++++++++++---- src/lib.rs | 2 +- 4 files changed, 33 insertions(+), 9 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index bac4b25..e5a6df8 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -4,7 +4,9 @@ use std::{ time::{Duration, SystemTime}, }; -use crate::{utils, ConduitResult, Database, Error, Ruma}; +use crate::{ + database::media::FileMeta, pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma, +}; use keys::{upload_signatures, upload_signing_keys}; use log::warn; @@ -3273,7 +3275,11 @@ pub fn get_content_route( _server_name: String, _media_id: String, ) -> ConduitResult { - if let Some((filename, content_type, file)) = db + if let Some(FileMeta { + filename, + content_type, + file, + }) = db .media .get(format!("mxc://{}/{}", body.server_name, body.media_id))? { @@ -3301,7 +3307,9 @@ pub fn get_content_thumbnail_route( _server_name: String, _media_id: String, ) -> ConduitResult { - if let Some((_, content_type, file)) = db.media.get_thumbnail( + if let Some(FileMeta { + content_type, file, .. + }) = db.media.get_thumbnail( format!("mxc://{}/{}", body.server_name, body.media_id), body.width .try_into() diff --git a/src/database.rs b/src/database.rs index 5a1ed0f..536978e 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,7 +1,7 @@ pub(self) mod account_data; pub(self) mod globals; pub(self) mod key_backups; -pub(self) mod media; +pub(crate) mod media; pub(self) mod rooms; pub(self) mod uiaa; pub(self) mod users; diff --git a/src/database/media.rs b/src/database/media.rs index cec74a6..e9dcb4a 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -1,7 +1,11 @@ use crate::{utils, Error, Result}; use std::mem; -pub type FileMeta = (Option, String, Vec); +pub struct FileMeta { + pub filename: Option, + pub content_type: String, + pub file: Vec, +} pub struct Media { pub(super) mediaid_file: sled::Tree, // MediaId = MXC + WidthHeight + Filename + ContentType @@ -61,7 +65,11 @@ impl Media { })?) }; - Ok(Some((filename, content_type, file.to_vec()))) + Ok(Some(FileMeta { + filename, + content_type, + file: file.to_vec(), + })) } else { Ok(None) } @@ -107,7 +115,11 @@ impl Media { ) }; - Ok(Some((filename, content_type, file.to_vec()))) + Ok(Some(FileMeta { + filename, + content_type, + file: file.to_vec(), + })) } else if let Some(r) = self.mediaid_file.scan_prefix(&original_prefix).next() { // Generate a thumbnail let (key, file) = r?; @@ -154,7 +166,11 @@ impl Media { self.mediaid_file.insert(thumbnail_key, &*thumbnail_bytes)?; - Ok(Some((filename, content_type, thumbnail_bytes))) + Ok(Some(FileMeta { + filename, + content_type, + file: thumbnail_bytes.to_vec(), + })) } else { Ok(None) } diff --git a/src/lib.rs b/src/lib.rs index cd5029c..5cd8d48 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,7 +6,7 @@ pub mod push_rules; mod ruma_wrapper; mod utils; -pub use database::Database; +pub use database::{media::FileMeta, Database}; pub use error::{Error, Result}; pub use pdu::PduEvent; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; From d948f896c0e3e9ccb436015c8dbfe4cc9911156d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 28 Jul 2020 09:00:23 -0400 Subject: [PATCH 0206/1727] Turn append_pdu args into struct PduBuilder --- src/client_server.rs | 545 +++++++++++++++++++++++------------------- src/database.rs | 2 +- src/database/rooms.rs | 23 +- src/pdu.rs | 12 + 4 files changed, 322 insertions(+), 260 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index e5a6df8..16bf73d 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -477,13 +477,15 @@ pub fn deactivate_route( }; db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(event).expect("event is valid, we just created it"), - None, - Some(sender_id.to_string()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, &db.globals, )?; } @@ -662,30 +664,32 @@ pub fn set_displayname_route( for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(ruma::events::room::member::MemberEventContent { - displayname: body.displayname.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get(&room_id, &EventType::RoomMember, &sender_id.to_string())? - .ok_or_else(|| { - Error::bad_database( - "Tried to send displayname update for user not in the room.", - ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - None, - Some(sender_id.to_string()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + displayname: body.displayname.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get(&room_id, &EventType::RoomMember, &sender_id.to_string())? + .ok_or_else(|| { + Error::bad_database( + "Tried to send displayname update for user not in the room.", + ) + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, &db.globals, )?; @@ -758,30 +762,32 @@ pub fn set_avatar_url_route( for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(ruma::events::room::member::MemberEventContent { - avatar_url: body.avatar_url.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get(&room_id, &EventType::RoomMember, &sender_id.to_string())? - .ok_or_else(|| { - Error::bad_database( - "Tried to send avatar url update for user not in the room.", - ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - None, - Some(sender_id.to_string()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + avatar_url: body.avatar_url.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get(&room_id, &EventType::RoomMember, &sender_id.to_string())? + .ok_or_else(|| { + Error::bad_database( + "Tried to send avatar url update for user not in the room.", + ) + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, &db.globals, )?; @@ -1294,32 +1300,36 @@ pub fn create_room_route( // 1. The room create event db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomCreate, - serde_json::to_value(content).expect("event is valid, we just created it"), - None, - Some("".to_owned()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomCreate, + content: serde_json::to_value(content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, &db.globals, )?; // 2. Let the room creator join db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: body.is_direct, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - None, - Some(sender_id.to_string()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: body.is_direct, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, &db.globals, )?; @@ -1359,72 +1369,82 @@ pub fn create_room_route( .expect("event is valid, we just created it") }; db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomPowerLevels, - power_levels_content, - None, - Some("".to_owned()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomPowerLevels, + content: power_levels_content, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, &db.globals, )?; // 4. Events set by preset // 4.1 Join Rules db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomJoinRules, - match preset { - create_room::RoomPreset::PublicChat => serde_json::to_value( - join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), - ) - .expect("event is valid, we just created it"), - // according to spec "invite" is the default - _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( - join_rules::JoinRule::Invite, - )) - .expect("event is valid, we just created it"), + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomJoinRules, + content: match preset { + create_room::RoomPreset::PublicChat => serde_json::to_value( + join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), + ) + .expect("event is valid, we just created it"), + // according to spec "invite" is the default + _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( + join_rules::JoinRule::Invite, + )) + .expect("event is valid, we just created it"), + }, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, }, - None, - Some("".to_owned()), - None, &db.globals, )?; // 4.2 History Visibility db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomHistoryVisibility, - serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( - history_visibility::HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - None, - Some("".to_owned()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomHistoryVisibility, + content: serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( + history_visibility::HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, &db.globals, )?; // 4.3 Guest Access db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomGuestAccess, - match preset { - create_room::RoomPreset::PublicChat => serde_json::to_value( - guest_access::GuestAccessEventContent::new(guest_access::GuestAccess::Forbidden), - ) - .expect("event is valid, we just created it"), - _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::CanJoin, - )) - .expect("event is valid, we just created it"), + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomGuestAccess, + content: match preset { + create_room::RoomPreset::PublicChat => { + serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::Forbidden, + )) + .expect("event is valid, we just created it") + } + _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::CanJoin, + )) + .expect("event is valid, we just created it"), + }, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, }, - None, - Some("".to_owned()), - None, &db.globals, )?; @@ -1441,15 +1461,17 @@ pub fn create_room_route( } db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - event_type.clone(), - serde_json::from_str(content.get()).map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid initial_state content.") - })?, - None, - state_key.clone(), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: event_type.clone(), + content: serde_json::from_str(content.get()).map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid initial_state content.") + })?, + unsigned: None, + state_key: state_key.clone(), + redacts: None, + }, &db.globals, )?; } @@ -1457,33 +1479,38 @@ pub fn create_room_route( // 6. Events implied by name and topic if let Some(name) = &body.name { db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomName, - serde_json::to_value( - name::NameEventContent::new(name.clone()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid."))?, - ) - .expect("event is valid, we just created it"), - None, - Some("".to_owned()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomName, + content: serde_json::to_value( + name::NameEventContent::new(name.clone()).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") + })?, + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, &db.globals, )?; } if let Some(topic) = &body.topic { db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomTopic, - serde_json::to_value(topic::TopicEventContent { - topic: topic.clone(), - }) - .expect("event is valid, we just created it"), - None, - Some("".to_owned()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomTopic, + content: serde_json::to_value(topic::TopicEventContent { + topic: topic.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, &db.globals, )?; } @@ -1491,20 +1518,22 @@ pub fn create_room_route( // 7. Events implied by invite (and TODO: invite_3pid) for user in &body.invite { db.rooms.append_pdu( - room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user)?, - avatar_url: db.users.avatar_url(&user)?, - is_direct: body.is_direct, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - None, - Some(user.to_string()), - None, + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user)?, + avatar_url: db.users.avatar_url(&user)?, + is_direct: body.is_direct, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user.to_string()), + redacts: None, + }, &db.globals, )?; } @@ -1552,16 +1581,18 @@ pub fn redact_event_route( let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let event_id = db.rooms.append_pdu( - body.room_id.clone(), - sender_id.clone(), - EventType::RoomRedaction, - serde_json::to_value(redaction::RedactionEventContent { - reason: body.reason.clone(), - }) - .expect("event is valid, we just created it"), - None, - None, - Some(body.event_id.clone()), + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomRedaction, + content: serde_json::to_value(redaction::RedactionEventContent { + reason: body.reason.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: Some(body.event_id.clone()), + }, &db.globals, )?; @@ -1647,13 +1678,15 @@ pub fn join_room_by_id_route( }; db.rooms.append_pdu( - body.room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(event).expect("event is valid, we just created it"), - None, - Some(sender_id.to_string()), - None, + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, &db.globals, )?; @@ -1724,13 +1757,15 @@ pub fn leave_room_route( event.membership = member::MembershipState::Leave; db.rooms.append_pdu( - body.room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(event).expect("event is valid, we just created it"), - None, - Some(sender_id.to_string()), - None, + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, &db.globals, )?; @@ -1768,13 +1803,15 @@ pub fn kick_user_route( // TODO: reason db.rooms.append_pdu( - body.room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(event).expect("event is valid, we just created it"), - None, - Some(body.user_id.to_string()), - None, + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, &db.globals, )?; @@ -1859,13 +1896,15 @@ pub fn ban_user_route( )?; db.rooms.append_pdu( - body.room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(event).expect("event is valid, we just created it"), - None, - Some(body.user_id.to_string()), - None, + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, &db.globals, )?; @@ -1902,13 +1941,15 @@ pub fn unban_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; db.rooms.append_pdu( - body.room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(event).expect("event is valid, we just created it"), - None, - Some(body.user_id.to_string()), - None, + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, &db.globals, )?; @@ -1942,20 +1983,22 @@ pub fn invite_user_route( if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { db.rooms.append_pdu( - body.room_id.clone(), - sender_id.clone(), - EventType::RoomMember, - serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, - is_direct: None, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - None, - Some(user_id.to_string()), - None, + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, &db.globals, )?; @@ -2327,18 +2370,20 @@ pub fn create_message_event_route( unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); let event_id = db.rooms.append_pdu( - body.room_id.clone(), - sender_id.clone(), - body.event_type.clone(), - serde_json::from_str( - body.json_body - .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? - .get(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, - Some(unsigned), - None, - None, + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: body.event_type.clone(), + content: serde_json::from_str( + body.json_body + .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? + .get(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + unsigned: Some(unsigned), + state_key: None, + redacts: None, + }, &db.globals, )?; @@ -2395,13 +2440,15 @@ pub fn create_state_event_for_key_route( } let event_id = db.rooms.append_pdu( - body.room_id.clone(), - sender_id.clone(), - body.event_type.clone(), - content, - None, - Some(body.state_key.clone()), - None, + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: body.event_type.clone(), + content, + unsigned: None, + state_key: Some(body.state_key.clone()), + redacts: None, + }, &db.globals, )?; @@ -2696,7 +2743,7 @@ pub async fn sync_events_route( // Filter for possible heroes .filter_map(|u| u) { - if heroes.contains(&hero) || hero == sender_id.to_string() { + if heroes.contains(&hero) || hero == sender_id.as_str() { continue; } @@ -2796,7 +2843,7 @@ pub async fn sync_events_route( notification_count, }, timeline: sync_events::Timeline { - limited: false || joined_since_last_sync, + limited: joined_since_last_sync, prev_batch, events: room_events, }, @@ -2984,7 +3031,7 @@ pub async fn sync_events_route( { // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives - let mut duration = body.timeout.unwrap_or(Duration::default()); + let mut duration = body.timeout.unwrap_or_default(); if duration.as_secs() > 30 { duration = Duration::from_secs(30); } diff --git a/src/database.rs b/src/database.rs index 536978e..e3d003c 100644 --- a/src/database.rs +++ b/src/database.rs @@ -125,7 +125,7 @@ impl Database { }) } - pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> () { + pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { let mut userid_prefix = user_id.to_string().as_bytes().to_vec(); userid_prefix.push(0xff); let mut userdeviceid_prefix = userid_prefix.clone(); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ae0d084..9ff11b8 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2,7 +2,7 @@ mod edus; pub use edus::RoomEdus; -use crate::{utils, Error, PduEvent, Result}; +use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; use log::error; use ruma::{ api::client::error::ErrorKind, @@ -250,18 +250,21 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. - #[allow(clippy::too_many_arguments, clippy::blocks_in_if_conditions)] + #[allow(clippy::blocks_in_if_conditions)] pub fn append_pdu( &self, - room_id: RoomId, - sender: UserId, - event_type: EventType, - content: serde_json::Value, - unsigned: Option>, - state_key: Option, - redacts: Option, + pdu_builder: PduBuilder, globals: &super::globals::Globals, ) -> Result { + let PduBuilder { + room_id, + sender, + event_type, + content, + unsigned, + state_key, + redacts, + } = pdu_builder; // TODO: Make sure this isn't called twice in parallel let prev_events = self.get_pdu_leaves(&room_id)?; @@ -623,7 +626,7 @@ impl Rooms { let mut first_pdu_id = prefix.clone(); first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - let mut last_pdu_id = prefix.clone(); + let mut last_pdu_id = prefix; last_pdu_id.extend_from_slice(&u64::MAX.to_be_bytes()); let user_id = user_id.clone(); diff --git a/src/pdu.rs b/src/pdu.rs index c149297..b689a3e 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -175,3 +175,15 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } } + +/// Build the start of a PDU in order to add it to the `Database`. +#[derive(Debug)] +pub struct PduBuilder { + pub room_id: RoomId, + pub sender: UserId, + pub event_type: EventType, + pub content: serde_json::Value, + pub unsigned: Option>, + pub state_key: Option, + pub redacts: Option, +} From b67372b11bb7e10a7150a444e4ec3dd4b8f385a2 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Wed, 29 Jul 2020 21:27:49 +0200 Subject: [PATCH 0207/1727] refactor: make all mods in database pub Now they can easily export their own structs --- src/database.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/database.rs b/src/database.rs index e3d003c..4787669 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,10 +1,10 @@ -pub(self) mod account_data; -pub(self) mod globals; -pub(self) mod key_backups; -pub(crate) mod media; -pub(self) mod rooms; -pub(self) mod uiaa; -pub(self) mod users; +pub mod account_data; +pub mod globals; +pub mod key_backups; +pub mod media; +pub mod rooms; +pub mod uiaa; +pub mod users; use crate::{Error, Result}; use directories::ProjectDirs; From ce460ea15964ff8bd438b5022c9dbb35781b594b Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 30 Jul 2020 14:05:08 +0200 Subject: [PATCH 0208/1727] fix: send device list updates when user is in no rooms --- src/client_server.rs | 52 ++++++++++++++++++++++++++++++++++--------- src/database.rs | 20 ++++++++++++++--- src/database/users.rs | 27 +++++++++++++++++----- 3 files changed, 80 insertions(+), 19 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 16bf73d..46791f7 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -672,7 +672,11 @@ pub fn set_displayname_route( displayname: body.displayname.clone(), ..serde_json::from_value::>( db.rooms - .room_state_get(&room_id, &EventType::RoomMember, &sender_id.to_string())? + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? .ok_or_else(|| { Error::bad_database( "Tried to send displayname update for user not in the room.", @@ -770,7 +774,11 @@ pub fn set_avatar_url_route( avatar_url: body.avatar_url.clone(), ..serde_json::from_value::>( db.rooms - .room_state_get(&room_id, &EventType::RoomMember, &sender_id.to_string())? + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? .ok_or_else(|| { Error::bad_database( "Tried to send avatar url update for user not in the room.", @@ -1884,12 +1892,11 @@ pub fn ban_user_route( third_party_invite: None, }), |event| { - let mut event = serde_json::from_value::>( - event.content, - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + let mut event = + serde_json::from_value::>(event.content) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Ban; Ok(event) }, @@ -2211,6 +2218,7 @@ pub async fn get_public_rooms_filtered_route( Ok::<_, Error>(chunk) }) .filter_map(|r| r.ok()) // Filter out buggy rooms + // We need to collect all, so we can sort by member count .collect::>(); chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); @@ -2618,6 +2626,13 @@ pub async fn sync_events_route( let mut presence_updates = HashMap::new(); let mut device_list_updates = HashSet::new(); + // Look for device list updates of this account + device_list_updates.extend( + db.users + .keys_changed(&sender_id.to_string(), since, None) + .filter_map(|r| r.ok()), + ); + for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; @@ -2869,7 +2884,7 @@ pub async fn sync_events_route( // Look for device list updates in this room device_list_updates.extend( db.users - .keys_changed(&room_id, since, None) + .keys_changed(&room_id.to_string(), since, None) .filter_map(|r| r.ok()), ); @@ -3652,11 +3667,28 @@ pub fn get_key_changes_route( let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut device_list_updates = HashSet::new(); + + device_list_updates.extend( + db.users + .keys_changed( + &sender_id.to_string(), + body.from + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, + Some( + body.to + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?, + ), + ) + .filter_map(|r| r.ok()), + ); + for room_id in db.rooms.rooms_joined(sender_id).filter_map(|r| r.ok()) { device_list_updates.extend( db.users .keys_changed( - &room_id, + &room_id.to_string(), body.from.parse().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.") })?, diff --git a/src/database.rs b/src/database.rs index 4787669..844a1f4 100644 --- a/src/database.rs +++ b/src/database.rs @@ -126,16 +126,17 @@ impl Database { } pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { - let mut userid_prefix = user_id.to_string().as_bytes().to_vec(); + let userid_bytes = user_id.to_string().as_bytes().to_vec(); + + let mut userid_prefix = userid_bytes.clone(); userid_prefix.push(0xff); + let mut userdeviceid_prefix = userid_prefix.clone(); userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); userdeviceid_prefix.push(0xff); let mut futures = futures::stream::FuturesUnordered::new(); - futures.push(self.users.keychangeid_userid.watch_prefix(b"")); - // Return when *any* user changed his key // TODO: only send for user they share a room with futures.push( @@ -171,6 +172,9 @@ impl Database { .watch_prefix(&roomid_prefix), ); + // Key changes + futures.push(self.users.keychangeid_userid.watch_prefix(&roomid_prefix)); + // Room account data let mut roomuser_prefix = roomid_prefix.clone(); roomuser_prefix.extend_from_slice(&userid_prefix); @@ -191,6 +195,16 @@ impl Database { .watch_prefix(&globaluserdata_prefix), ); + // More key changes (used when user is not joined to any rooms) + futures.push(self.users.keychangeid_userid.watch_prefix(&userid_prefix)); + + // One time keys + futures.push( + self.users + .userid_lastonetimekeyupdate + .watch_prefix(&userid_bytes), + ); + // Wait until one of them finds something futures.next().await; } diff --git a/src/database/users.rs b/src/database/users.rs index 1ec677c..f031534 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -9,7 +9,7 @@ use ruma::{ }, }, events::{AnyToDeviceEvent, EventType}, - DeviceId, Raw, RoomId, UserId, + DeviceId, Raw, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; @@ -23,7 +23,7 @@ pub struct Users { pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + AlgorithmAndDeviceId pub(super) userid_lastonetimekeyupdate: sled::Tree, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: sled::Tree, // KeyChangeId = RoomId + Count + pub(super) keychangeid_userid: sled::Tree, // KeyChangeId = UserId/RoomId + Count pub(super) keyid_key: sled::Tree, // KeyId = UserId + KeyId (depends on key type) pub(super) userid_masterkeyid: sled::Tree, pub(super) userid_selfsigningkeyid: sled::Tree, @@ -305,8 +305,7 @@ impl Users { } pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { - self - .userid_lastonetimekeyupdate + self.userid_lastonetimekeyupdate .get(&user_id.to_string().as_bytes())? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { @@ -417,6 +416,11 @@ impl Users { self.keychangeid_userid.insert(key, &*user_id.to_string())?; } + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&count); + self.keychangeid_userid.insert(key, &*user_id.to_string())?; + Ok(()) } @@ -524,6 +528,11 @@ impl Users { self.keychangeid_userid.insert(key, &*user_id.to_string())?; } + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&count); + self.keychangeid_userid.insert(key, &*user_id.to_string())?; + Ok(()) } @@ -576,16 +585,22 @@ impl Users { .insert(key, &*target_id.to_string())?; } + let mut key = target_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&count); + self.keychangeid_userid + .insert(key, &*target_id.to_string())?; + Ok(()) } pub fn keys_changed( &self, - room_id: &RoomId, + user_or_room_id: &str, from: u64, to: Option, ) -> impl Iterator> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); let mut start = prefix.clone(); From 52b1df6fd25489d89f885e401b7cdc88c1d11ef1 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 30 Jul 2020 11:12:49 +0200 Subject: [PATCH 0209/1727] improvement: handle limit and set limited in /user_directory/search --- src/client_server.rs | 63 +++++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index 46791f7..17218df 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -2262,39 +2262,42 @@ pub fn search_users_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - Ok(search_users::Response { - results: db - .users - .iter() - .filter_map(|user_id| { - // Filter out buggy users (they should not exist, but you never know...) - let user_id = user_id.ok()?; - if db.users.is_deactivated(&user_id).ok()? { - return None; - } + let limit = if let Some(limit) = body.limit { + u64::from(limit) + } else { + 10 + } as usize; - let user = search_users::User { - user_id: user_id.clone(), - display_name: db.users.displayname(&user_id).ok()?, - avatar_url: db.users.avatar_url(&user_id).ok()?, - }; + let mut users = db.users.iter().filter_map(|user_id| { + // Filter out buggy users (they should not exist, but you never know...) + let user_id = user_id.ok()?; + if db.users.is_deactivated(&user_id).ok()? { + return None; + } - if !user.user_id.to_string().contains(&body.search_term) - && user - .display_name - .as_ref() - .filter(|name| name.contains(&body.search_term)) - .is_none() - { - return None; - } + let user = search_users::User { + user_id: user_id.clone(), + display_name: db.users.displayname(&user_id).ok()?, + avatar_url: db.users.avatar_url(&user_id).ok()?, + }; - Some(user) - }) - .collect(), - limited: false, - } - .into()) + if !user.user_id.to_string().contains(&body.search_term) + && user + .display_name + .as_ref() + .filter(|name| name.contains(&body.search_term)) + .is_none() + { + return None; + } + + Some(user) + }); + + let results = users.by_ref().take(limit).collect(); + let limited = users.next().is_some(); + + Ok(search_users::Response { results, limited }.into()) } #[cfg_attr( From f2fab27d819f7b33f246491e8d4d4ba0b12eea72 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 27 Jul 2020 20:48:51 -0400 Subject: [PATCH 0210/1727] Implement filtering invites if sender is ignored by receiver --- src/client_server.rs | 22 ++++++++++++++++++++++ src/database/rooms.rs | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/src/client_server.rs b/src/client_server.rs index 17218df..cf3f67c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -487,6 +487,7 @@ pub fn deactivate_route( redacts: None, }, &db.globals, + &db.account_data, )?; } @@ -695,6 +696,7 @@ pub fn set_displayname_route( redacts: None, }, &db.globals, + &db.account_data, )?; // Presence update @@ -797,6 +799,7 @@ pub fn set_avatar_url_route( redacts: None, }, &db.globals, + &db.account_data, )?; // Presence update @@ -1318,6 +1321,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; // 2. Let the room creator join @@ -1339,6 +1343,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; // Figure out preset. We need it for power levels and preset specific events @@ -1387,6 +1392,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; // 4. Events set by preset @@ -1412,6 +1418,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; // 4.2 History Visibility @@ -1429,6 +1436,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; // 4.3 Guest Access @@ -1454,6 +1462,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; // 5. Events listed in initial_state @@ -1481,6 +1490,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; } @@ -1502,6 +1512,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; } @@ -1520,6 +1531,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; } @@ -1543,6 +1555,7 @@ pub fn create_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; } @@ -1602,6 +1615,7 @@ pub fn redact_event_route( redacts: Some(body.event_id.clone()), }, &db.globals, + &db.account_data, )?; Ok(redact_event::Response { event_id }.into()) @@ -1696,6 +1710,7 @@ pub fn join_room_by_id_route( redacts: None, }, &db.globals, + &db.account_data, )?; Ok(join_room_by_id::Response { @@ -1775,6 +1790,7 @@ pub fn leave_room_route( redacts: None, }, &db.globals, + &db.account_data, )?; Ok(leave_room::Response.into()) @@ -1821,6 +1837,7 @@ pub fn kick_user_route( redacts: None, }, &db.globals, + &db.account_data, )?; Ok(kick_user::Response.into()) @@ -1913,6 +1930,7 @@ pub fn ban_user_route( redacts: None, }, &db.globals, + &db.account_data, )?; Ok(ban_user::Response.into()) @@ -1958,6 +1976,7 @@ pub fn unban_user_route( redacts: None, }, &db.globals, + &db.account_data, )?; Ok(unban_user::Response.into()) @@ -2007,6 +2026,7 @@ pub fn invite_user_route( redacts: None, }, &db.globals, + &db.account_data, )?; Ok(invite_user::Response.into()) @@ -2396,6 +2416,7 @@ pub fn create_message_event_route( redacts: None, }, &db.globals, + &db.account_data, )?; Ok(create_message_event::Response { event_id }.into()) @@ -2461,6 +2482,7 @@ pub fn create_state_event_for_key_route( redacts: None, }, &db.globals, + &db.account_data, )?; Ok(create_state_event_for_key::Response { event_id }.into()) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 9ff11b8..f30ce14 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -7,6 +7,7 @@ use log::error; use ruma::{ api::client::error::ErrorKind, events::{ + ignored_user_list, room::{ join_rules, member, power_levels::{self, PowerLevelsEventContent}, @@ -255,6 +256,7 @@ impl Rooms { &self, pdu_builder: PduBuilder, globals: &super::globals::Globals, + account_data: &super::account_data::AccountData, ) -> Result { let PduBuilder { room_id, @@ -411,6 +413,43 @@ impl Rooms { || join_rules == join_rules::JoinRule::Public } } else if target_membership == member::MembershipState::Invite { + // we want to know if the sender is ignored by the receiver + let is_ignored = if let Ok(Some(ignored)) = + account_data.get::( + None, // we cannot use the provided room_id it's the invite room + &target_user_id, // receiver + EventType::IgnoredUserList, + ) { + ignored.ignored_users.contains(&sender) + } else { + false + }; + + if is_ignored { + let mut event = + serde_json::from_value::>(content) + .expect("from_value::> cannot fail") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; + + event.membership = member::MembershipState::Leave; + + return self.append_pdu( + room_id, + target_user_id.clone(), + EventType::RoomMember, + serde_json::to_value(event) + .expect("event is valid, we just created it"), + None, + Some(target_user_id.to_string()), + None, + globals, + account_data, + ); + } + if let Some(third_party_invite_json) = content.get("third_party_invite") { if current_membership == member::MembershipState::Ban { false From 8aac332b3ab023f723df08dd052c02ae16f26b32 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 29 Jul 2020 13:47:50 -0400 Subject: [PATCH 0211/1727] Move ignore invite logic into update_member --- src/database/rooms.rs | 86 ++++++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 42 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f30ce14..59ebc72 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -355,12 +355,13 @@ impl Rooms { .membership) })?; - let target_membership = + let member_content = serde_json::from_value::>(content.clone()) .expect("Raw::from_value always works.") .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership; + .map_err(|_| Error::bad_database("Invalid Member event in db."))?; + + let target_membership = member_content.membership; let target_power = power_levels.users.get(&target_user_id).map_or_else( || { @@ -413,43 +414,6 @@ impl Rooms { || join_rules == join_rules::JoinRule::Public } } else if target_membership == member::MembershipState::Invite { - // we want to know if the sender is ignored by the receiver - let is_ignored = if let Ok(Some(ignored)) = - account_data.get::( - None, // we cannot use the provided room_id it's the invite room - &target_user_id, // receiver - EventType::IgnoredUserList, - ) { - ignored.ignored_users.contains(&sender) - } else { - false - }; - - if is_ignored { - let mut event = - serde_json::from_value::>(content) - .expect("from_value::> cannot fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid member event in database.") - })?; - - event.membership = member::MembershipState::Leave; - - return self.append_pdu( - room_id, - target_user_id.clone(), - EventType::RoomMember, - serde_json::to_value(event) - .expect("event is valid, we just created it"), - None, - Some(target_user_id.to_string()), - None, - globals, - account_data, - ); - } - if let Some(third_party_invite_json) = content.get("third_party_invite") { if current_membership == member::MembershipState::Ban { false @@ -502,7 +466,14 @@ impl Rooms { if authorized { // Update our membership info - self.update_membership(&room_id, &target_user_id, &target_membership)?; + self.update_membership( + &room_id, + &target_user_id, + member_content, + &sender, + account_data, + globals, + )?; } authorized @@ -780,8 +751,12 @@ impl Rooms { &self, room_id: &RoomId, user_id: &UserId, - membership: &member::MembershipState, + mut member_event: member::MemberEventContent, + sender: &UserId, + account_data: &super::account_data::AccountData, + globals: &super::globals::Globals, ) -> Result<()> { + let membership = member_event.membership; let mut userroom_id = user_id.to_string().as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.to_string().as_bytes()); @@ -799,6 +774,33 @@ impl Rooms { self.userroomid_left.remove(&userroom_id)?; } member::MembershipState::Invite => { + // We want to know if the sender is ignored by the receiver + let is_ignored = account_data + .get::( + None, // Ignored users are in global account data + &user_id, // Receiver + EventType::IgnoredUserList, + )? + .map_or(false, |ignored| ignored.ignored_users.contains(&sender)); + + if is_ignored { + member_event.membership = member::MembershipState::Leave; + + return self + .append_pdu( + room_id.clone(), + user_id.clone(), + EventType::RoomMember, + serde_json::to_value(member_event) + .expect("event is valid, we just created it"), + None, + Some(user_id.to_string()), + None, + globals, + account_data, + ) + .map(|_| ()); + } self.userroomid_invited.insert(&userroom_id, &[])?; self.roomuserid_invited.insert(&roomuser_id, &[])?; self.userroomid_joined.remove(&userroom_id)?; From 25c0e75f29b60d8c9104e51e26f83bdc98cb2a71 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 29 Jul 2020 14:28:40 -0400 Subject: [PATCH 0212/1727] Cargo fmt --- src/database/account_data.rs | 3 +-- src/database/key_backups.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 99e0d5c..a917123 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -4,8 +4,7 @@ use ruma::{ events::{AnyEvent as EduEvent, EventType}, Raw, RoomId, UserId, }; -use serde::de::DeserializeOwned; -use serde::Serialize; +use serde::{de::DeserializeOwned, Serialize}; use sled::IVec; use std::{collections::HashMap, convert::TryFrom}; diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index a506564..5b37f1b 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -4,7 +4,7 @@ use ruma::{ error::ErrorKind, r0::backup::{BackupAlgorithm, KeyData, Sessions}, }, - {RoomId, UserId}, + RoomId, UserId, }; use std::{collections::BTreeMap, convert::TryFrom}; From 99220565d4f71b734f88d2dc9c341a3ac72903c1 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 29 Jul 2020 17:07:12 -0400 Subject: [PATCH 0213/1727] Fix invite leave auth error by moving update_membership --- src/client_server.rs | 4 +-- src/database/rooms.rs | 64 +++++++++++++++++++++++++++---------------- 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index cf3f67c..aad0b63 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -3243,8 +3243,8 @@ pub fn get_message_events_route( .collect::>(); Ok(get_message_events::Response { - start: Some(body.from.clone()), - end: start_token, + start: start_token, + end: Some(body.from.clone()), chunk: events_before, state: Vec::new(), } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 59ebc72..c44eb5a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -355,13 +355,12 @@ impl Rooms { .membership) })?; - let member_content = + let target_membership = serde_json::from_value::>(content.clone()) .expect("Raw::from_value always works.") .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))?; - - let target_membership = member_content.membership; + .map_err(|_| Error::bad_database("Invalid Member event in db."))? + .membership; let target_power = power_levels.users.get(&target_user_id).map_or_else( || { @@ -464,18 +463,6 @@ impl Rooms { false }; - if authorized { - // Update our membership info - self.update_membership( - &room_id, - &target_user_id, - member_content, - &sender, - account_data, - globals, - )?; - } - authorized } EventType::RoomCreate => prev_events.is_empty(), @@ -533,7 +520,7 @@ impl Rooms { .expect("time is valid"), kind: event_type.clone(), content: content.clone(), - state_key, + state_key: state_key.clone(), prev_events, depth: depth .try_into() @@ -606,6 +593,35 @@ impl Rooms { self.redact_pdu(&redact_id)?; } + EventType::RoomMember => { + if let Some(state_key) = state_key { + // if the state_key fails + let target_user_id = UserId::try_from(state_key).map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "State key of member event does not contain user id.", + ) + })?; + // Update our membership info, we do this here incase a user is invited + // and imediatly leaves we need the DB to record the invite event for auth + self.update_membership( + &room_id, + &target_user_id, + serde_json::from_value::(content).map_err( + |_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid redaction event content.", + ) + }, + )?, + &sender, + account_data, + globals, + )?; + } + } + _ => {} } self.edus.room_read_set(&room_id, &sender, index)?; @@ -751,12 +767,12 @@ impl Rooms { &self, room_id: &RoomId, user_id: &UserId, - mut member_event: member::MemberEventContent, + mut member_content: member::MemberEventContent, sender: &UserId, account_data: &super::account_data::AccountData, globals: &super::globals::Globals, ) -> Result<()> { - let membership = member_event.membership; + let membership = member_content.membership; let mut userroom_id = user_id.to_string().as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.to_string().as_bytes()); @@ -776,22 +792,24 @@ impl Rooms { member::MembershipState::Invite => { // We want to know if the sender is ignored by the receiver let is_ignored = account_data - .get::( + .get::( None, // Ignored users are in global account data &user_id, // Receiver EventType::IgnoredUserList, )? - .map_or(false, |ignored| ignored.ignored_users.contains(&sender)); + .map_or(false, |ignored| { + ignored.content.ignored_users.contains(&sender) + }); if is_ignored { - member_event.membership = member::MembershipState::Leave; + member_content.membership = member::MembershipState::Leave; return self .append_pdu( room_id.clone(), user_id.clone(), EventType::RoomMember, - serde_json::to_value(member_event) + serde_json::to_value(member_content) .expect("event is valid, we just created it"), None, Some(user_id.to_string()), From c8d7d80eb24acf2a62304a87b3e08b7cd2a2f86f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 30 Jul 2020 07:50:09 -0400 Subject: [PATCH 0214/1727] Fix start/end token swap left from dropped commits --- src/client_server.rs | 4 ++-- src/database/rooms.rs | 10 +++------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/src/client_server.rs b/src/client_server.rs index aad0b63..cf3f67c 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -3243,8 +3243,8 @@ pub fn get_message_events_route( .collect::>(); Ok(get_message_events::Response { - start: start_token, - end: Some(body.from.clone()), + start: Some(body.from.clone()), + end: start_token, chunk: events_before, state: Vec::new(), } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c44eb5a..5c17f27 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -596,14 +596,10 @@ impl Rooms { EventType::RoomMember => { if let Some(state_key) = state_key { // if the state_key fails - let target_user_id = UserId::try_from(state_key).map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "State key of member event does not contain user id.", - ) - })?; + let target_user_id = UserId::try_from(state_key) + .expect("This state_key was previously validated"); // Update our membership info, we do this here incase a user is invited - // and imediatly leaves we need the DB to record the invite event for auth + // and immediately leaves we need the DB to record the invite event for auth self.update_membership( &room_id, &target_user_id, From 7a70d8488f0a4e6b286bc151520efd2dd85ecb0e Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 30 Jul 2020 08:43:51 -0400 Subject: [PATCH 0215/1727] Rebase with master and update append_pdu call --- src/database/rooms.rs | 67 ++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5c17f27..fe63318 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -388,7 +388,7 @@ impl Rooms { .join_rule) })?; - let authorized = if target_membership == member::MembershipState::Join { + if target_membership == member::MembershipState::Join { let mut prev_events = prev_events.iter(); let prev_event = self .get_pdu(prev_events.next().ok_or(Error::BadRequest( @@ -461,14 +461,11 @@ impl Rooms { } } else { false - }; - - authorized + } } EventType::RoomCreate => prev_events.is_empty(), // Not allow any of the following events if the sender is not joined. _ if sender_membership != member::MembershipState::Join => false, - _ => { // TODO sender_power.unwrap_or(&power_levels.users_default) @@ -576,22 +573,24 @@ impl Rooms { self.roomstateid_pdu.insert(key, &*pdu_json.to_string())?; } - if let EventType::RoomRedaction = event_type { - if let Some(redact_id) = &redacts { - // TODO: Reason - let _reason = - serde_json::from_value::>(content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid redaction event content.", - ) - })? - .reason; + match event_type { + EventType::RoomRedaction => { + if let Some(redact_id) = &redacts { + // TODO: Reason + let _reason = + serde_json::from_value::>(content) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid redaction event content.", + ) + })? + .reason; - self.redact_pdu(&redact_id)?; + self.redact_pdu(&redact_id)?; + } } EventType::RoomMember => { if let Some(state_key) = state_key { @@ -800,20 +799,22 @@ impl Rooms { if is_ignored { member_content.membership = member::MembershipState::Leave; - return self - .append_pdu( - room_id.clone(), - user_id.clone(), - EventType::RoomMember, - serde_json::to_value(member_content) + self.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: user_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(member_content) .expect("event is valid, we just created it"), - None, - Some(user_id.to_string()), - None, - globals, - account_data, - ) - .map(|_| ()); + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + globals, + account_data, + )?; + + return Ok(()); } self.userroomid_invited.insert(&userroom_id, &[])?; self.roomuserid_invited.insert(&roomuser_id, &[])?; From c1c62b7eb45a147f7f9b269bd784ba51cbb1b357 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 30 Jul 2020 18:14:47 +0200 Subject: [PATCH 0216/1727] refactor: put endpoints into modules --- src/client_server.rs | 3844 --------------------------- src/client_server/account.rs | 308 +++ src/client_server/alias.rs | 67 + src/client_server/backup.rs | 142 + src/client_server/capabilities.rs | 31 + src/client_server/config.rs | 67 + src/client_server/context.rs | 92 + src/client_server/device.rs | 170 ++ src/client_server/directory.rs | 258 ++ src/client_server/filter.rs | 29 + src/client_server/keys.rs | 323 +++ src/client_server/media.rs | 107 + src/client_server/membership.rs | 422 +++ src/client_server/message.rs | 128 + src/client_server/mod.rs | 78 + src/client_server/presence.rs | 45 + src/client_server/profile.rs | 236 ++ src/client_server/push.rs | 75 + src/client_server/read_marker.rs | 74 + src/client_server/redact.rs | 39 + src/client_server/room.rs | 345 +++ src/client_server/session.rs | 128 + src/client_server/state.rs | 216 ++ src/client_server/sync.rs | 477 ++++ src/client_server/tag.rs | 100 + src/client_server/thirdparty.rs | 19 + src/client_server/to_device.rs | 56 + src/client_server/typing.rs | 33 + src/client_server/unversioned.rs | 19 + src/client_server/user_directory.rs | 52 + src/client_server/voip.rs | 13 + src/lib.rs | 5 +- src/main.rs | 3 +- 33 files changed, 4153 insertions(+), 3848 deletions(-) delete mode 100644 src/client_server.rs create mode 100644 src/client_server/account.rs create mode 100644 src/client_server/alias.rs create mode 100644 src/client_server/backup.rs create mode 100644 src/client_server/capabilities.rs create mode 100644 src/client_server/config.rs create mode 100644 src/client_server/context.rs create mode 100644 src/client_server/device.rs create mode 100644 src/client_server/directory.rs create mode 100644 src/client_server/filter.rs create mode 100644 src/client_server/keys.rs create mode 100644 src/client_server/media.rs create mode 100644 src/client_server/membership.rs create mode 100644 src/client_server/message.rs create mode 100644 src/client_server/mod.rs create mode 100644 src/client_server/presence.rs create mode 100644 src/client_server/profile.rs create mode 100644 src/client_server/push.rs create mode 100644 src/client_server/read_marker.rs create mode 100644 src/client_server/redact.rs create mode 100644 src/client_server/room.rs create mode 100644 src/client_server/session.rs create mode 100644 src/client_server/state.rs create mode 100644 src/client_server/sync.rs create mode 100644 src/client_server/tag.rs create mode 100644 src/client_server/thirdparty.rs create mode 100644 src/client_server/to_device.rs create mode 100644 src/client_server/typing.rs create mode 100644 src/client_server/unversioned.rs create mode 100644 src/client_server/user_directory.rs create mode 100644 src/client_server/voip.rs diff --git a/src/client_server.rs b/src/client_server.rs deleted file mode 100644 index cf3f67c..0000000 --- a/src/client_server.rs +++ /dev/null @@ -1,3844 +0,0 @@ -use std::{ - collections::{hash_map, BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, - time::{Duration, SystemTime}, -}; - -use crate::{ - database::media::FileMeta, pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma, -}; -use keys::{upload_signatures, upload_signing_keys}; -use log::warn; - -#[cfg(not(feature = "conduit_bin"))] -use super::State; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, options, post, put, tokio, State}; - -use ruma::{ - api::client::{ - error::ErrorKind, - r0::{ - account::{ - change_password, deactivate, get_username_availability, register, whoami, - ThirdPartyIdRemovalStatus, - }, - alias::{create_alias, delete_alias, get_alias}, - backup::{ - add_backup_keys, create_backup, get_backup, get_backup_keys, get_latest_backup, - update_backup, - }, - capabilities::get_capabilities, - config::{get_global_account_data, set_global_account_data}, - context::get_context, - device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, - directory::{ - self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, - set_room_visibility, - }, - filter::{self, create_filter, get_filter}, - keys::{self, claim_keys, get_key_changes, get_keys, upload_keys}, - media::{create_content, get_content, get_content_thumbnail, get_media_config}, - membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, - unban_user, - }, - message::{create_message_event, get_message_events}, - presence::set_presence, - profile::{ - get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, - }, - push::{get_pushers, get_pushrules_all, set_pushrule, set_pushrule_enabled}, - read_marker::set_read_marker, - redact::redact_event, - room::{self, create_room, get_room_event}, - session::{get_login_types, login, logout, logout_all}, - state::{ - create_state_event_for_empty_key, create_state_event_for_key, get_state_events, - get_state_events_for_empty_key, get_state_events_for_key, - }, - sync::sync_events, - tag::{create_tag, delete_tag, get_tags}, - thirdparty::get_protocols, - to_device::{self, send_event_to_device}, - typing::create_typing_event, - uiaa::{AuthFlow, UiaaInfo}, - user_directory::search_users, - }, - unversioned::get_supported_versions, - }, - events::{ - custom::CustomEventContent, - room::{ - canonical_alias, guest_access, history_visibility, join_rules, member, name, redaction, - topic, - }, - AnyEphemeralRoomEvent, AnyEvent, AnySyncEphemeralRoomEvent, BasicEvent, EventType, - }, - Raw, RoomAliasId, RoomId, RoomVersionId, UserId, -}; - -const GUEST_NAME_LENGTH: usize = 10; -const DEVICE_ID_LENGTH: usize = 10; -const TOKEN_LENGTH: usize = 256; -const MXC_LENGTH: usize = 256; -const SESSION_ID_LENGTH: usize = 256; - -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] -pub fn get_supported_versions_route() -> ConduitResult { - let mut unstable_features = BTreeMap::new(); - - unstable_features.insert("org.matrix.e2e_cross_signing".to_owned(), true); - - Ok(get_supported_versions::Response { - versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], - unstable_features, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/register/available", data = "") -)] -pub fn get_register_available_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - // Validate user id - let user_id = UserId::parse_with_server_name(body.username.clone(), db.globals.server_name()) - .ok() - .filter(|user_id| { - !user_id.is_historical() && user_id.server_name() == db.globals.server_name() - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; - - // Check if username is creative enough - if db.users.exists(&user_id)? { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } - - // TODO add check for appservice namespaces - - // If no if check is true we have an username that's available to be used. - Ok(get_username_availability::Response { available: true }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/register", data = "") -)] -pub fn register_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - if db.globals.registration_disabled() { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Registration has been disabled.", - )); - } - - // Validate user id - let user_id = UserId::parse_with_server_name( - body.username - .clone() - .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)) - .to_lowercase(), - db.globals.server_name(), - ) - .ok() - .filter(|user_id| !user_id.is_historical() && user_id.server_name() == db.globals.server_name()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; - - // Check if username is creative enough - if db.users.exists(&user_id)? { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } - - // UIAA - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec!["m.login.dummy".to_owned()], - }], - completed: Vec::new(), - params: Default::default(), - session: None, - auth_error: None, - }; - - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = - db.uiaa - .try_auth(&user_id, "".into(), auth, &uiaainfo, &db.users, &db.globals)?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, "".into(), &uiaainfo)?; - return Err(Error::Uiaa(uiaainfo)); - } - - let password = body.password.clone().unwrap_or_default(); - - // Create user - db.users.create(&user_id, &password)?; - - // Generate new device id if the user didn't specify one - let device_id = body - .device_id - .clone() - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); - - // Generate new token for the device - let token = utils::random_string(TOKEN_LENGTH); - - // Add device - db.users.create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - )?; - - // Initial data - db.account_data.update( - None, - &user_id, - EventType::PushRules, - &ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: crate::push_rules::default_pushrules(&user_id), - }, - }, - &db.globals, - )?; - - Ok(register::Response { - access_token: Some(token), - user_id, - device_id: Some(device_id), - } - .into()) -} - -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] -pub fn get_login_route() -> ConduitResult { - Ok(get_login_types::Response { - flows: vec![get_login_types::LoginType::Password], - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/login", data = "") -)] -pub fn login_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - // Validate login method - let user_id = - // TODO: Other login methods - if let (login::UserInfo::MatrixId(username), login::LoginInfo::Password { password }) = - (body.user.clone(), body.login_info.clone()) - { - let user_id = UserId::parse_with_server_name(username, db.globals.server_name()) - .map_err(|_| Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid." - ))?; - let hash = db.users.password_hash(&user_id)? - .ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "Wrong username or password." - ))?; - - if hash.is_empty() { - return Err(Error::BadRequest( - ErrorKind::UserDeactivated, - "The user has been deactivated" - )); - } - - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); - - if !hash_matches { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Wrong username or password.")); - } - - user_id - } else { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); - }; - - // Generate new device id if the user didn't specify one - let device_id = body - .body - .device_id - .clone() - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); - - // Generate a new token for the device - let token = utils::random_string(TOKEN_LENGTH); - - // Add device - db.users.create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - )?; - - Ok(login::Response { - user_id, - access_token: token, - home_server: Some(db.globals.server_name().to_owned()), - device_id, - well_known: None, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/account/whoami", data = "") -)] -pub fn whoami_route(body: Ruma) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - Ok(whoami::Response { - user_id: sender_id.clone(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/logout", data = "") -)] -pub fn logout_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - - db.users.remove_device(&sender_id, device_id)?; - - Ok(logout::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/logout/all", data = "") -)] -pub fn logout_all_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - for device_id in db.users.all_device_ids(sender_id) { - if let Ok(device_id) = device_id { - db.users.remove_device(&sender_id, &device_id)?; - } - } - - Ok(logout_all::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/account/password", data = "") -)] -pub fn change_password_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], - }], - completed: Vec::new(), - params: Default::default(), - session: None, - auth_error: None, - }; - - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - device_id, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; - return Err(Error::Uiaa(uiaainfo)); - } - - db.users.set_password(&sender_id, &body.new_password)?; - - // TODO: Read logout_devices field when it's available and respect that, currently not supported in Ruma - // See: https://github.com/ruma/ruma/issues/107 - // Logout all devices except the current one - for id in db - .users - .all_device_ids(&sender_id) - .filter_map(|id| id.ok()) - .filter(|id| id != device_id) - { - db.users.remove_device(&sender_id, &id)?; - } - - Ok(change_password::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/account/deactivate", data = "") -)] -pub fn deactivate_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], - }], - completed: Vec::new(), - params: Default::default(), - session: None, - auth_error: None, - }; - - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - &device_id, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; - return Err(Error::Uiaa(uiaainfo)); - } - - // Leave all joined rooms and reject all invitations - for room_id in db - .rooms - .rooms_joined(&sender_id) - .chain(db.rooms.rooms_invited(&sender_id)) - { - let room_id = room_id?; - let event = member::MemberEventContent { - membership: member::MembershipState::Leave, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - }; - - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - } - - // Remove devices and mark account as deactivated - db.users.deactivate_account(&sender_id)?; - - Ok(deactivate::Response { - id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, - } - .into()) -} - -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/capabilities"))] -pub fn get_capabilities_route() -> ConduitResult { - let mut available = BTreeMap::new(); - available.insert( - RoomVersionId::Version5, - get_capabilities::RoomVersionStability::Stable, - ); - available.insert( - RoomVersionId::Version6, - get_capabilities::RoomVersionStability::Stable, - ); - - Ok(get_capabilities::Response { - capabilities: get_capabilities::Capabilities { - change_password: None, // None means it is possible - room_versions: Some(get_capabilities::RoomVersionsCapability { - default: "6".to_owned(), - available, - }), - custom_capabilities: BTreeMap::new(), - }, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules", data = "") -)] -pub fn get_pushrules_all_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let event = db - .account_data - .get::(None, &sender_id, EventType::PushRules)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "PushRules event not found.", - ))?; - - Ok(get_pushrules_all::Response { - global: event.content.global, - } - .into()) -} - -#[cfg_attr(feature = "conduit_bin", put( - "/_matrix/client/r0/pushrules/<_>/<_>/<_>", - //data = "" -))] -pub fn set_pushrule_route(//db: State<'_, Database>, - //body: Ruma, -) -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_route"); - Ok(set_pushrule::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled") -)] -pub fn set_pushrule_enabled_route() -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_enabled_route"); - Ok(set_pushrule_enabled::Response.into()) -} - -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] -pub fn get_filter_route() -> ConduitResult { - // TODO - Ok(get_filter::Response { - filter: filter::FilterDefinition { - event_fields: None, - event_format: None, - account_data: None, - room: None, - presence: None, - }, - } - .into()) -} - -#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] -pub fn create_filter_route() -> ConduitResult { - // TODO - Ok(create_filter::Response { - filter_id: utils::random_string(10), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") -)] -pub fn set_global_account_data_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let content = serde_json::from_str::(body.data.get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; - - let event_type = body.event_type.to_string(); - - db.account_data.update( - None, - sender_id, - event_type.clone().into(), - &BasicEvent { - content: CustomEventContent { - event_type, - json: content, - }, - }, - &db.globals, - )?; - - Ok(set_global_account_data::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") -)] -pub fn get_global_account_data_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let data = db - .account_data - .get::>( - None, - sender_id, - EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), - )? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - - Ok(get_global_account_data::Response { account_data: data }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_>/displayname", data = "") -)] -pub fn set_displayname_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - db.users - .set_displayname(&sender_id, body.displayname.clone())?; - - // Send a new membership event and presence update into all joined rooms - for room_id in db.rooms.rooms_joined(&sender_id) { - let room_id = room_id?; - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { - displayname: body.displayname.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get( - &room_id, - &EventType::RoomMember, - &sender_id.to_string(), - )? - .ok_or_else(|| { - Error::bad_database( - "Tried to send displayname update for user not in the room.", - ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - // Presence update - db.rooms.edus.update_presence( - &sender_id, - &room_id, - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_id)?, - currently_active: None, - displayname: db.users.displayname(&sender_id)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), - presence: ruma::presence::PresenceState::Online, - status_msg: None, - }, - sender: sender_id.clone(), - }, - &db.globals, - )?; - } - - Ok(set_display_name::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>/displayname", data = "") -)] -pub fn get_displayname_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - Ok(get_display_name::Response { - displayname: db.users.displayname(&body.user_id)?, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") -)] -pub fn set_avatar_url_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if let Some(avatar_url) = &body.avatar_url { - if !avatar_url.starts_with("mxc://") { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "avatar_url has to start with mxc://.", - )); - } - - // TODO in the future when we can handle media uploads make sure that this url is our own server - // TODO also make sure this is valid mxc:// format (not only starting with it) - } - - db.users - .set_avatar_url(&sender_id, body.avatar_url.clone())?; - - // Send a new membership event and presence update into all joined rooms - for room_id in db.rooms.rooms_joined(&sender_id) { - let room_id = room_id?; - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { - avatar_url: body.avatar_url.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get( - &room_id, - &EventType::RoomMember, - &sender_id.to_string(), - )? - .ok_or_else(|| { - Error::bad_database( - "Tried to send avatar url update for user not in the room.", - ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - // Presence update - db.rooms.edus.update_presence( - &sender_id, - &room_id, - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_id)?, - currently_active: None, - displayname: db.users.displayname(&sender_id)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), - presence: ruma::presence::PresenceState::Online, - status_msg: None, - }, - sender: sender_id.clone(), - }, - &db.globals, - )?; - } - - Ok(set_avatar_url::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") -)] -pub fn get_avatar_url_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - Ok(get_avatar_url::Response { - avatar_url: db.users.avatar_url(&body.user_id)?, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>", data = "") -)] -pub fn get_profile_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let avatar_url = db.users.avatar_url(&body.user_id)?; - let displayname = db.users.displayname(&body.user_id)?; - - if avatar_url.is_none() && displayname.is_none() { - // Return 404 if we don't have a profile for this id - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Profile was not found.", - )); - } - - Ok(get_profile::Response { - avatar_url, - displayname, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/presence/<_>/status", data = "") -)] -pub fn set_presence_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - for room_id in db.rooms.rooms_joined(&sender_id) { - let room_id = room_id?; - - db.rooms.edus.update_presence( - &sender_id, - &room_id, - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_id)?, - currently_active: None, - displayname: db.users.displayname(&sender_id)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), - presence: body.presence, - status_msg: body.status_msg.clone(), - }, - sender: sender_id.clone(), - }, - &db.globals, - )?; - } - - Ok(set_presence::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/upload", data = "") -)] -pub fn upload_keys_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - - if let Some(one_time_keys) = &body.one_time_keys { - for (key_key, key_value) in one_time_keys { - db.users - .add_one_time_key(sender_id, device_id, key_key, key_value, &db.globals)?; - } - } - - if let Some(device_keys) = &body.device_keys { - // This check is needed to assure that signatures are kept - if db.users.get_device_keys(sender_id, device_id)?.is_none() { - db.users - .add_device_keys(sender_id, device_id, device_keys, &db.rooms, &db.globals)?; - } - } - - Ok(upload_keys::Response { - one_time_key_counts: db.users.count_one_time_keys(sender_id, device_id)?, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/query", data = "") -)] -pub fn get_keys_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let mut master_keys = BTreeMap::new(); - let mut self_signing_keys = BTreeMap::new(); - let mut user_signing_keys = BTreeMap::new(); - let mut device_keys = BTreeMap::new(); - - for (user_id, device_ids) in &body.device_keys { - if device_ids.is_empty() { - let mut container = BTreeMap::new(); - for device_id in db.users.all_device_ids(user_id) { - let device_id = device_id?; - if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? { - let metadata = db - .users - .get_device_metadata(user_id, &device_id)? - .ok_or_else(|| { - Error::bad_database("all_device_keys contained nonexistent device.") - })?; - - keys.unsigned = Some(keys::UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }); - - container.insert(device_id, keys); - } - } - device_keys.insert(user_id.clone(), container); - } else { - for device_id in device_ids { - let mut container = BTreeMap::new(); - if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? { - let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( - Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to get keys for nonexistent device.", - ), - )?; - - keys.unsigned = Some(keys::UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }); - - container.insert(device_id.clone(), keys); - } - device_keys.insert(user_id.clone(), container); - } - } - - if let Some(master_key) = db.users.get_master_key(user_id, sender_id)? { - master_keys.insert(user_id.clone(), master_key); - } - if let Some(self_signing_key) = db.users.get_self_signing_key(user_id, sender_id)? { - self_signing_keys.insert(user_id.clone(), self_signing_key); - } - if user_id == sender_id { - if let Some(user_signing_key) = db.users.get_user_signing_key(sender_id)? { - user_signing_keys.insert(user_id.clone(), user_signing_key); - } - } - } - - Ok(get_keys::Response { - master_keys, - self_signing_keys, - user_signing_keys, - device_keys, - failures: BTreeMap::new(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/claim", data = "") -)] -pub fn claim_keys_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let mut one_time_keys = BTreeMap::new(); - for (user_id, map) in &body.one_time_keys { - let mut container = BTreeMap::new(); - for (device_id, key_algorithm) in map { - if let Some(one_time_keys) = - db.users - .take_one_time_key(user_id, device_id, key_algorithm, &db.globals)? - { - let mut c = BTreeMap::new(); - c.insert(one_time_keys.0, one_time_keys.1); - container.insert(device_id.clone(), c); - } - } - one_time_keys.insert(user_id.clone(), container); - } - - Ok(claim_keys::Response { - failures: BTreeMap::new(), - one_time_keys, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/room_keys/version", data = "") -)] -pub fn create_backup_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let version = db - .key_backups - .create_backup(&sender_id, &body.algorithm, &db.globals)?; - - Ok(create_backup::Response { version }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] -pub fn update_backup_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - db.key_backups - .update_backup(&sender_id, &body.version, &body.algorithm, &db.globals)?; - - Ok(update_backup::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/version", data = "") -)] -pub fn get_latest_backup_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let (version, algorithm) = - db.key_backups - .get_latest_backup(&sender_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Key backup does not exist.", - ))?; - - Ok(get_latest_backup::Response { - algorithm, - count: (db.key_backups.count_keys(sender_id, &version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &version)?, - version, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] -pub fn get_backup_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let algorithm = db - .key_backups - .get_backup(&sender_id, &body.version)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Key backup does not exist.", - ))?; - - Ok(get_backup::Response { - algorithm, - count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &body.version)?, - version: body.version.clone(), - } - .into()) -} - -/// Add the received backup_keys to the database. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys", data = "") -)] -pub fn add_backup_keys_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - for (room_id, room) in &body.rooms { - for (session_id, key_data) in &room.sessions { - db.key_backups.add_key( - &sender_id, - &body.version, - &room_id, - &session_id, - &key_data, - &db.globals, - )? - } - } - - Ok(add_backup_keys::Response { - count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &body.version)?, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys", data = "") -)] -pub fn get_backup_keys_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let rooms = db.key_backups.get_all(&sender_id, &body.version)?; - - Ok(get_backup_keys::Response { rooms }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") -)] -pub fn set_read_marker_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let fully_read_event = ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { - event_id: body.fully_read.clone(), - }, - room_id: body.room_id.clone(), - }; - db.account_data.update( - Some(&body.room_id), - &sender_id, - EventType::FullyRead, - &fully_read_event, - &db.globals, - )?; - - if let Some(event) = &body.read_receipt { - db.rooms.edus.room_read_set( - &body.room_id, - &sender_id, - db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?, - )?; - - let mut user_receipts = BTreeMap::new(); - user_receipts.insert( - sender_id.clone(), - ruma::events::receipt::Receipt { - ts: Some(SystemTime::now()), - }, - ); - let mut receipt_content = BTreeMap::new(); - receipt_content.insert( - event.clone(), - ruma::events::receipt::Receipts { - read: Some(user_receipts), - }, - ); - - db.rooms.edus.roomlatest_update( - &sender_id, - &body.room_id, - AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( - ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - )), - &db.globals, - )?; - } - Ok(set_read_marker::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "") -)] -pub fn create_typing_event_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if body.typing { - db.rooms.edus.roomactive_add( - &sender_id, - &body.room_id, - body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) - + utils::millis_since_unix_epoch(), - &db.globals, - )?; - } else { - db.rooms - .edus - .roomactive_remove(&sender_id, &body.room_id, &db.globals)?; - } - - Ok(create_typing_event::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/createRoom", data = "") -)] -pub fn create_room_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let room_id = RoomId::new(db.globals.server_name()); - - let alias = body - .room_alias_name - .as_ref() - .map_or(Ok(None), |localpart| { - // TODO: Check for invalid characters and maximum length - let alias = - RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - - if db.rooms.id_from_alias(&alias)?.is_some() { - Err(Error::BadRequest( - ErrorKind::RoomInUse, - "Room alias already exists.", - )) - } else { - Ok(Some(alias)) - } - })?; - - let mut content = ruma::events::room::create::CreateEventContent::new(sender_id.clone()); - content.federate = body.creation_content.as_ref().map_or(true, |c| c.federate); - content.predecessor = body - .creation_content - .as_ref() - .and_then(|c| c.predecessor.clone()); - content.room_version = RoomVersionId::Version6; - - // 1. The room create event - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomCreate, - content: serde_json::to_value(content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - // 2. Let the room creator join - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: body.is_direct, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - // Figure out preset. We need it for power levels and preset specific events - let visibility = body.visibility.unwrap_or(room::Visibility::Private); - let preset = body.preset.unwrap_or_else(|| match visibility { - room::Visibility::Private => create_room::RoomPreset::PrivateChat, - room::Visibility::Public => create_room::RoomPreset::PublicChat, - }); - - // 3. Power levels - let mut users = BTreeMap::new(); - users.insert(sender_id.clone(), 100.into()); - for invite_ in &body.invite { - users.insert(invite_.clone(), 100.into()); - } - - let power_levels_content = if let Some(power_levels) = &body.power_level_content_override { - serde_json::from_str(power_levels.json().get()).map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") - })? - } else { - serde_json::to_value(ruma::events::room::power_levels::PowerLevelsEventContent { - ban: 50.into(), - events: BTreeMap::new(), - events_default: 0.into(), - invite: 50.into(), - kick: 50.into(), - redact: 50.into(), - state_default: 50.into(), - users, - users_default: 0.into(), - notifications: ruma::events::room::power_levels::NotificationPowerLevels { - room: 50.into(), - }, - }) - .expect("event is valid, we just created it") - }; - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomPowerLevels, - content: power_levels_content, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - // 4. Events set by preset - // 4.1 Join Rules - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomJoinRules, - content: match preset { - create_room::RoomPreset::PublicChat => serde_json::to_value( - join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), - ) - .expect("event is valid, we just created it"), - // according to spec "invite" is the default - _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( - join_rules::JoinRule::Invite, - )) - .expect("event is valid, we just created it"), - }, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - // 4.2 History Visibility - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomHistoryVisibility, - content: serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( - history_visibility::HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - // 4.3 Guest Access - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomGuestAccess, - content: match preset { - create_room::RoomPreset::PublicChat => { - serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::Forbidden, - )) - .expect("event is valid, we just created it") - } - _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::CanJoin, - )) - .expect("event is valid, we just created it"), - }, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - // 5. Events listed in initial_state - for create_room::InitialStateEvent { - event_type, - state_key, - content, - } in &body.initial_state - { - // Silently skip encryption events if they are not allowed - if event_type == &EventType::RoomEncryption && db.globals.encryption_disabled() { - continue; - } - - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: event_type.clone(), - content: serde_json::from_str(content.get()).map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid initial_state content.") - })?, - unsigned: None, - state_key: state_key.clone(), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - } - - // 6. Events implied by name and topic - if let Some(name) = &body.name { - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomName, - content: serde_json::to_value( - name::NameEventContent::new(name.clone()).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") - })?, - ) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - } - - if let Some(topic) = &body.topic { - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomTopic, - content: serde_json::to_value(topic::TopicEventContent { - topic: topic.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - } - - // 7. Events implied by invite (and TODO: invite_3pid) - for user in &body.invite { - db.rooms.append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user)?, - avatar_url: db.users.avatar_url(&user)?, - is_direct: body.is_direct, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - } - - // Homeserver specific stuff - if let Some(alias) = alias { - db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; - } - - if let Some(room::Visibility::Public) = body.visibility { - db.rooms.set_public(&room_id, true)?; - } - - Ok(create_room::Response { room_id }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/joined_rooms", data = "") -)] -pub fn joined_rooms_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - Ok(joined_rooms::Response { - joined_rooms: db - .rooms - .rooms_joined(&sender_id) - .filter_map(|r| r.ok()) - .collect(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") -)] -pub fn redact_event_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let event_id = db.rooms.append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomRedaction, - content: serde_json::to_value(redaction::RedactionEventContent { - reason: body.reason.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: Some(body.event_id.clone()), - }, - &db.globals, - &db.account_data, - )?; - - Ok(redact_event::Response { event_id }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/directory/room/<_>", data = "") -)] -pub fn create_alias_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - if db.rooms.id_from_alias(&body.room_alias)?.is_some() { - return Err(Error::Conflict("Alias already exists.")); - } - - db.rooms - .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?; - - Ok(create_alias::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/directory/room/<_>", data = "") -)] -pub fn delete_alias_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - db.rooms.set_alias(&body.room_alias, None, &db.globals)?; - - Ok(delete_alias::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/directory/room/<_>", data = "") -)] -pub fn get_alias_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - if body.room_alias.server_name() != db.globals.server_name() { - todo!("ask remote server"); - } - - let room_id = db - .rooms - .id_from_alias(&body.room_alias)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room with alias not found.", - ))?; - - Ok(get_alias::Response { - room_id, - servers: vec![db.globals.server_name().to_string()], - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/join", data = "") -)] -pub fn join_room_by_id_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - // TODO: Ask a remote server if we don't have this room - - let event = member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: None, - third_party_invite: None, - }; - - db.rooms.append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(join_room_by_id::Response { - room_id: body.room_id.clone(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/join/<_>", data = "") -)] -pub fn join_room_by_id_or_alias_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let room_id = RoomId::try_from(body.room_id_or_alias.clone()).or_else(|alias| { - Ok::<_, Error>(db.rooms.id_from_alias(&alias)?.ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room not found (TODO: Federation).", - ))?) - })?; - - let body = Ruma { - sender_id: body.sender_id.clone(), - device_id: body.device_id.clone(), - json_body: None, - body: join_room_by_id::Request { - room_id, - third_party_signed: body.third_party_signed.clone(), - }, - }; - - Ok(join_room_by_id_or_alias::Response { - room_id: join_room_by_id_route(db, body)?.0.room_id, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/leave", data = "") -)] -pub fn leave_room_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let mut event = serde_json::from_value::>( - db.rooms - .room_state_get( - &body.room_id, - &EventType::RoomMember, - &sender_id.to_string(), - )? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content, - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = member::MembershipState::Leave; - - db.rooms.append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(leave_room::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/kick", data = "") -)] -pub fn kick_user_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let mut event = serde_json::from_value::>( - db.rooms - .room_state_get( - &body.room_id, - &EventType::RoomMember, - &body.user_id.to_string(), - )? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot kick member that's not in the room.", - ))? - .content, - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = ruma::events::room::member::MembershipState::Leave; - // TODO: reason - - db.rooms.append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(kick_user::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") -)] -pub fn joined_members_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if !db - .rooms - .is_joined(&sender_id, &body.room_id) - .unwrap_or(false) - { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You aren't a member of the room.", - )); - } - - let mut joined = BTreeMap::new(); - for user_id in db.rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { - let display_name = db.users.displayname(&user_id)?; - let avatar_url = db.users.avatar_url(&user_id)?; - - joined.insert( - user_id, - joined_members::RoomMember { - display_name, - avatar_url, - }, - ); - } - - Ok(joined_members::Response { joined }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/ban", data = "") -)] -pub fn ban_user_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - // TODO: reason - - let event = db - .rooms - .room_state_get( - &body.room_id, - &EventType::RoomMember, - &body.user_id.to_string(), - )? - .map_or( - Ok::<_, Error>(member::MemberEventContent { - membership: member::MembershipState::Ban, - displayname: db.users.displayname(&body.user_id)?, - avatar_url: db.users.avatar_url(&body.user_id)?, - is_direct: None, - third_party_invite: None, - }), - |event| { - let mut event = - serde_json::from_value::>(event.content) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - event.membership = ruma::events::room::member::MembershipState::Ban; - Ok(event) - }, - )?; - - db.rooms.append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(ban_user::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/unban", data = "") -)] -pub fn unban_user_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let mut event = serde_json::from_value::>( - db.rooms - .room_state_get( - &body.room_id, - &EventType::RoomMember, - &body.user_id.to_string(), - )? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot unban a user who is not banned.", - ))? - .content, - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = ruma::events::room::member::MembershipState::Leave; - - db.rooms.append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(unban_user::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/forget", data = "") -)] -pub fn forget_room_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - db.rooms.forget(&body.room_id, &sender_id)?; - - Ok(forget_room::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/invite", data = "") -)] -pub fn invite_user_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { - db.rooms.append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, - is_direct: None, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(invite_user::Response.into()) - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) - } -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] -pub async fn set_room_visibility_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - match body.visibility { - room::Visibility::Public => db.rooms.set_public(&body.room_id, true)?, - room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, - } - - Ok(set_room_visibility::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] -pub async fn get_room_visibility_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - Ok(get_room_visibility::Response { - visibility: if db.rooms.is_public_room(&body.room_id)? { - room::Visibility::Public - } else { - room::Visibility::Private - }, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/publicRooms", data = "") -)] -pub async fn get_public_rooms_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let Ruma { - body: - get_public_rooms::Request { - limit, - server, - since, - }, - sender_id, - device_id, - json_body, - } = body; - - let get_public_rooms_filtered::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - } = get_public_rooms_filtered_route( - db, - Ruma { - body: get_public_rooms_filtered::Request { - filter: None, - limit, - room_network: get_public_rooms_filtered::RoomNetwork::Matrix, - server, - since, - }, - sender_id, - device_id, - json_body, - }, - ) - .await? - .0; - - Ok(get_public_rooms::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/publicRooms", data = "<_body>") -)] -pub async fn get_public_rooms_filtered_route( - db: State<'_, Database>, - _body: Ruma, -) -> ConduitResult { - use ruma::events::room; - - let mut chunk = db - .rooms - .public_rooms() - .map(|room_id| { - let room_id = room_id?; - - // TODO: Do not load full state? - let state = db.rooms.room_state_full(&room_id)?; - - let chunk = directory::PublicRoomsChunk { - aliases: Vec::new(), - canonical_alias: state - .get(&(EventType::RoomCanonicalAlias, "".to_owned())) - .map_or(Ok::<_, Error>(None), |s| { - Ok(serde_json::from_value::< - Raw, - >(s.content.clone()) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid canonical alias event in database.") - })? - .alias) - })?, - name: state.get(&(EventType::RoomName, "".to_owned())).map_or( - Ok::<_, Error>(None), - |s| { - Ok(serde_json::from_value::>( - s.content.clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name() - .map(|n| n.to_owned())) - }, - )?, - num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), - room_id, - topic: state.get(&(EventType::RoomTopic, "".to_owned())).map_or( - Ok::<_, Error>(None), - |s| { - Ok(Some( - serde_json::from_value::>( - s.content.clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid room topic event in database.") - })? - .topic, - )) - }, - )?, - world_readable: state - .get(&(EventType::RoomHistoryVisibility, "".to_owned())) - .map_or(Ok::<_, Error>(false), |s| { - Ok(serde_json::from_value::< - Raw, - >(s.content.clone()) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - })? - .history_visibility - == history_visibility::HistoryVisibility::WorldReadable) - })?, - guest_can_join: state - .get(&(EventType::RoomGuestAccess, "".to_owned())) - .map_or(Ok::<_, Error>(false), |s| { - Ok( - serde_json::from_value::< - Raw, - >(s.content.clone()) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid room guest access event in database.") - })? - .guest_access - == guest_access::GuestAccess::CanJoin, - ) - })?, - avatar_url: state.get(&(EventType::RoomAvatar, "".to_owned())).map_or( - Ok::<_, Error>(None), - |s| { - Ok(Some( - serde_json::from_value::>( - s.content.clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid room avatar event in database.") - })? - .url, - )) - }, - )?, - }; - Ok::<_, Error>(chunk) - }) - .filter_map(|r| r.ok()) // Filter out buggy rooms - // We need to collect all, so we can sort by member count - .collect::>(); - - chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); - - /* - chunk.extend_from_slice( - &server_server::send_request( - &db, - "privacytools.io".to_owned(), - ruma::api::federation::v1::get_public_rooms::Request { - limit: Some(20_u32.into()), - since: None, - room_network: ruma::api::federation::v1::get_public_rooms::RoomNetwork::Matrix, - }, - ) - .await - ? - .chunk - .into_iter() - .map(|c| serde_json::from_str(&serde_json::to_string(&c)?)?) - .collect::>(), - ); - */ - - let total_room_count_estimate = (chunk.len() as u32).into(); - - Ok(get_public_rooms_filtered::Response { - chunk, - prev_batch: None, - next_batch: None, - total_room_count_estimate: Some(total_room_count_estimate), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/user_directory/search", data = "") -)] -pub fn search_users_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let limit = if let Some(limit) = body.limit { - u64::from(limit) - } else { - 10 - } as usize; - - let mut users = db.users.iter().filter_map(|user_id| { - // Filter out buggy users (they should not exist, but you never know...) - let user_id = user_id.ok()?; - if db.users.is_deactivated(&user_id).ok()? { - return None; - } - - let user = search_users::User { - user_id: user_id.clone(), - display_name: db.users.displayname(&user_id).ok()?, - avatar_url: db.users.avatar_url(&user_id).ok()?, - }; - - if !user.user_id.to_string().contains(&body.search_term) - && user - .display_name - .as_ref() - .filter(|name| name.contains(&body.search_term)) - .is_none() - { - return None; - } - - Some(user) - }); - - let results = users.by_ref().take(limit).collect(); - let limited = users.next().is_some(); - - Ok(search_users::Response { results, limited }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/members", data = "") -)] -pub fn get_member_events_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } - - Ok(get_member_events::Response { - chunk: db - .rooms - .room_state_type(&body.room_id, &EventType::RoomMember)? - .values() - .map(|pdu| pdu.to_member_event()) - .collect(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/thirdparty/protocols") -)] -pub fn get_protocols_route() -> ConduitResult { - warn!("TODO: get_protocols_route"); - Ok(get_protocols::Response { - protocols: BTreeMap::new(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") -)] -pub fn get_room_event_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } - - Ok(get_room_event::Response { - event: db - .rooms - .get_pdu(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? - .to_room_event(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") -)] -pub fn create_message_event_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let mut unsigned = serde_json::Map::new(); - unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - - let event_id = db.rooms.append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: body.event_type.clone(), - content: serde_json::from_str( - body.json_body - .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? - .get(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, - unsigned: Some(unsigned), - state_key: None, - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(create_message_event::Response { event_id }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") -)] -pub fn create_state_event_for_key_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let content = serde_json::from_str::( - body.json_body - .as_ref() - .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? - .get(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - - if body.event_type == EventType::RoomCanonicalAlias { - let canonical_alias = serde_json::from_value::< - Raw, - >(content.clone()) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid canonical alias."))?; - - let mut aliases = canonical_alias.alt_aliases; - - if let Some(alias) = canonical_alias.alias { - aliases.push(alias); - } - - for alias in aliases { - if alias.server_name() != db.globals.server_name() - || db - .rooms - .id_from_alias(&alias)? - .filter(|room| room == &body.room_id) // Make sure it's the right room - .is_none() - { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You are only allowed to send canonical_alias \ - events when it's aliases already exists", - )); - } - } - } - - let event_id = db.rooms.append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: body.event_type.clone(), - content, - unsigned: None, - state_key: Some(body.state_key.clone()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(create_state_event_for_key::Response { event_id }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") -)] -pub fn create_state_event_for_empty_key_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - // This just calls create_state_event_for_key_route - let Ruma { - body: - create_state_event_for_empty_key::Request { - room_id, - event_type, - data, - }, - sender_id, - device_id, - json_body, - } = body; - - Ok(create_state_event_for_empty_key::Response { - event_id: create_state_event_for_key_route( - db, - Ruma { - body: create_state_event_for_key::Request { - room_id, - event_type, - data, - state_key: "".to_owned(), - }, - sender_id, - device_id, - json_body, - }, - )? - .0 - .event_id, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state", data = "") -)] -pub fn get_state_events_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view the room state.", - )); - } - - Ok(get_state_events::Response { - room_state: db - .rooms - .room_state_full(&body.room_id)? - .values() - .map(|pdu| pdu.to_state_event()) - .collect(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") -)] -pub fn get_state_events_for_key_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view the room state.", - )); - } - - let event = db - .rooms - .room_state_get(&body.room_id, &body.event_type, &body.state_key)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "State event not found.", - ))?; - - Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(&event.content) - .map_err(|_| Error::bad_database("Invalid event content in database"))?, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") -)] -pub fn get_state_events_for_empty_key_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view the room state.", - )); - } - - let event = db - .rooms - .room_state_get(&body.room_id, &body.event_type, "")? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "State event not found.", - ))?; - - Ok(get_state_events_for_empty_key::Response { - content: serde_json::value::to_raw_value(&event) - .map_err(|_| Error::bad_database("Invalid event content in database"))?, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/sync", data = "") -)] -pub async fn sync_events_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - - // TODO: match body.set_presence { - db.rooms.edus.ping_presence(&sender_id)?; - - // Setup watchers, so if there's no response, we can wait for them - let watcher = db.watch(sender_id, device_id); - - let next_batch = db.globals.current_count()?.to_string(); - - let mut joined_rooms = BTreeMap::new(); - let since = body - .since - .clone() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - - let mut presence_updates = HashMap::new(); - let mut device_list_updates = HashSet::new(); - - // Look for device list updates of this account - device_list_updates.extend( - db.users - .keys_changed(&sender_id.to_string(), since, None) - .filter_map(|r| r.ok()), - ); - - for room_id in db.rooms.rooms_joined(&sender_id) { - let room_id = room_id?; - - let mut non_timeline_pdus = db - .rooms - .pdus_since(&sender_id, &room_id, since)? - .filter_map(|r| r.ok()); // Filter out buggy events - - // Take the last 10 events for the timeline - let timeline_pdus = non_timeline_pdus - .by_ref() - .rev() - .take(10) - .collect::>() - .into_iter() - .rev() - .collect::>(); - - // They /sync response doesn't always return all messages, so we say the output is - // limited unless there are events in non_timeline_pdus - //let mut limited = false; - - let mut state_pdus = Vec::new(); - for pdu in non_timeline_pdus { - if pdu.state_key.is_some() { - state_pdus.push(pdu); - } - } - - let mut send_member_count = false; - let mut joined_since_last_sync = false; - let mut send_notification_counts = false; - for pdu in db - .rooms - .pdus_since(&sender_id, &room_id, since)? - .filter_map(|r| r.ok()) - { - send_notification_counts = true; - if pdu.kind == EventType::RoomMember { - send_member_count = true; - if !joined_since_last_sync && pdu.state_key == Some(sender_id.to_string()) { - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))?; - if content.membership == ruma::events::room::member::MembershipState::Join { - joined_since_last_sync = true; - // Both send_member_count and joined_since_last_sync are set. There's - // nothing more to do - break; - } - } - } - } - - let members = db.rooms.room_state_type(&room_id, &EventType::RoomMember)?; - - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - let joined_member_count = db.rooms.room_members(&room_id).count(); - let invited_member_count = db.rooms.room_members_invited(&room_id).count(); - - // Recalculate heroes (first 5 members) - let mut heroes = Vec::new(); - - if joined_member_count + invited_member_count <= 5 { - // Go through all PDUs and for each member event, check if the user is still joined or - // invited until we have 5 or we reach the end - - for hero in db - .rooms - .all_pdus(&sender_id, &room_id)? - .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|pdu| pdu.kind == EventType::RoomMember) - .map(|pdu| { - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - if let Some(state_key) = &pdu.state_key { - let current_content = serde_json::from_value::< - Raw, - >( - members - .get(state_key) - .ok_or_else(|| { - Error::bad_database( - "A user that joined once has no member event anymore.", - ) - })? - .content - .clone(), - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid member event in database.") - })?; - - // The membership was and still is invite or join - if matches!( - content.membership, - ruma::events::room::member::MembershipState::Join - | ruma::events::room::member::MembershipState::Invite - ) && matches!( - current_content.membership, - ruma::events::room::member::MembershipState::Join - | ruma::events::room::member::MembershipState::Invite - ) { - Ok::<_, Error>(Some(state_key.clone())) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - .filter_map(|u| u.ok()) // Filter out buggy users - // Filter for possible heroes - .filter_map(|u| u) - { - if heroes.contains(&hero) || hero == sender_id.as_str() { - continue; - } - - heroes.push(hero); - } - } - - ( - Some(joined_member_count), - Some(invited_member_count), - heroes, - ) - } else { - (None, None, Vec::new()) - }; - - let notification_count = if send_notification_counts { - if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &sender_id)? { - Some( - (db.rooms - .pdus_since(&sender_id, &room_id, last_read)? - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .filter(|pdu| { - matches!( - pdu.kind.clone(), - EventType::RoomMessage | EventType::RoomEncrypted - ) - }) - .count() as u32) - .into(), - ) - } else { - None - } - } else { - None - }; - - let prev_batch = timeline_pdus.first().map_or(Ok::<_, Error>(None), |e| { - Ok(Some( - db.rooms - .get_pdu_count(&e.event_id)? - .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? - .to_string(), - )) - })?; - - let room_events = timeline_pdus - .into_iter() - .map(|pdu| pdu.to_sync_room_event()) - .collect::>(); - - let mut edus = db - .rooms - .edus - .roomlatests_since(&room_id, since)? - .filter_map(|r| r.ok()) // Filter out buggy events - .collect::>(); - - if db - .rooms - .edus - .last_roomactive_update(&room_id, &db.globals)? - > since - { - edus.push( - serde_json::from_str( - &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( - db.rooms.edus.roomactives_all(&room_id)?, - )) - .expect("event is valid, we just created it"), - ) - .expect("event is valid, we just created it"), - ); - } - - let joined_room = sync_events::JoinedRoom { - account_data: sync_events::AccountData { - events: db - .account_data - .changes_since(Some(&room_id), &sender_id, since)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| Error::bad_database("Invalid account event in database.")) - .ok() - }) - .collect::>(), - }, - summary: sync_events::RoomSummary { - heroes, - joined_member_count: joined_member_count.map(|n| (n as u32).into()), - invited_member_count: invited_member_count.map(|n| (n as u32).into()), - }, - unread_notifications: sync_events::UnreadNotificationsCount { - highlight_count: None, - notification_count, - }, - timeline: sync_events::Timeline { - limited: joined_since_last_sync, - prev_batch, - events: room_events, - }, - // TODO: state before timeline - state: sync_events::State { - events: if joined_since_last_sync { - db.rooms - .room_state_full(&room_id)? - .into_iter() - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect() - } else { - Vec::new() - }, - }, - ephemeral: sync_events::Ephemeral { events: edus }, - }; - - if !joined_room.is_empty() { - joined_rooms.insert(room_id.clone(), joined_room); - } - - // Look for device list updates in this room - device_list_updates.extend( - db.users - .keys_changed(&room_id.to_string(), since, None) - .filter_map(|r| r.ok()), - ); - - // Take presence updates from this room - for (user_id, presence) in - db.rooms - .edus - .presence_since(&room_id, since, &db.rooms, &db.globals)? - { - match presence_updates.entry(user_id) { - hash_map::Entry::Vacant(v) => { - v.insert(presence); - } - hash_map::Entry::Occupied(mut o) => { - let p = o.get_mut(); - - // Update existing presence event with more info - p.content.presence = presence.content.presence; - if let Some(status_msg) = presence.content.status_msg { - p.content.status_msg = Some(status_msg); - } - if let Some(last_active_ago) = presence.content.last_active_ago { - p.content.last_active_ago = Some(last_active_ago); - } - if let Some(displayname) = presence.content.displayname { - p.content.displayname = Some(displayname); - } - if let Some(avatar_url) = presence.content.avatar_url { - p.content.avatar_url = Some(avatar_url); - } - if let Some(currently_active) = presence.content.currently_active { - p.content.currently_active = Some(currently_active); - } - } - } - } - } - - let mut left_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_left(&sender_id) { - let room_id = room_id?; - let pdus = db.rooms.pdus_since(&sender_id, &room_id, since)?; - let room_events = pdus - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .map(|pdu| pdu.to_sync_room_event()) - .collect(); - - // TODO: Only until leave point - let mut edus = db - .rooms - .edus - .roomlatests_since(&room_id, since)? - .filter_map(|r| r.ok()) // Filter out buggy events - .collect::>(); - - if db - .rooms - .edus - .last_roomactive_update(&room_id, &db.globals)? - > since - { - edus.push( - serde_json::from_str( - &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( - db.rooms.edus.roomactives_all(&room_id)?, - )) - .expect("event is valid, we just created it"), - ) - .expect("event is valid, we just created it"), - ); - } - - let left_room = sync_events::LeftRoom { - account_data: sync_events::AccountData { events: Vec::new() }, - timeline: sync_events::Timeline { - limited: false, - prev_batch: Some(next_batch.clone()), - events: room_events, - }, - state: sync_events::State { events: Vec::new() }, - }; - - if !left_room.is_empty() { - left_rooms.insert(room_id.clone(), left_room); - } - } - - let mut invited_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_invited(&sender_id) { - let room_id = room_id?; - - let invited_room = sync_events::InvitedRoom { - invite_state: sync_events::InviteState { - events: db - .rooms - .room_state_full(&room_id)? - .into_iter() - .map(|(_, pdu)| pdu.to_stripped_state_event()) - .collect(), - }, - }; - - if !invited_room.is_empty() { - invited_rooms.insert(room_id.clone(), invited_room); - } - } - - // Remove all to-device events the device received *last time* - db.users - .remove_to_device_events(sender_id, device_id, since)?; - - let response = sync_events::Response { - next_batch, - rooms: sync_events::Rooms { - leave: left_rooms, - join: joined_rooms, - invite: invited_rooms, - }, - presence: sync_events::Presence { - events: presence_updates - .into_iter() - .map(|(_, v)| Raw::from(v)) - .collect(), - }, - account_data: sync_events::AccountData { - events: db - .account_data - .changes_since(None, &sender_id, since)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| Error::bad_database("Invalid account event in database.")) - .ok() - }) - .collect::>(), - }, - device_lists: sync_events::DeviceLists { - changed: device_list_updates.into_iter().collect(), - left: Vec::new(), // TODO - }, - device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_id)? > since { - db.users.count_one_time_keys(sender_id, device_id)? - } else { - BTreeMap::new() - }, - to_device: sync_events::ToDevice { - events: db.users.get_to_device_events(sender_id, device_id)?, - }, - }; - - // TODO: Retry the endpoint instead of returning (waiting for #118) - if !body.full_state - && response.rooms.is_empty() - && response.presence.is_empty() - && response.account_data.is_empty() - && response.device_lists.is_empty() - && response.device_one_time_keys_count.is_empty() - && response.to_device.is_empty() - { - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let mut duration = body.timeout.unwrap_or_default(); - if duration.as_secs() > 30 { - duration = Duration::from_secs(30); - } - let mut delay = tokio::time::delay_for(duration); - tokio::select! { - _ = &mut delay => {} - _ = watcher => {} - } - } - - Ok(response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") -)] -pub fn get_context_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } - - let base_event = db - .rooms - .get_pdu(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Base event not found.", - ))? - .to_room_event(); - - let base_token = db - .rooms - .get_pdu_count(&body.event_id)? - .expect("event still exists"); - - let events_before = db - .rooms - .pdus_until(&sender_id, &body.room_id, base_token) - .take( - u32::try_from(body.limit).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") - })? as usize - / 2, - ) - .filter_map(|r| r.ok()) // Remove buggy events - .collect::>(); - - let start_token = events_before.last().map(|(count, _)| count.to_string()); - - let events_before = events_before - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect::>(); - - let events_after = db - .rooms - .pdus_after(&sender_id, &body.room_id, base_token) - .take( - u32::try_from(body.limit).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") - })? as usize - / 2, - ) - .filter_map(|r| r.ok()) // Remove buggy events - .collect::>(); - - let end_token = events_after.last().map(|(count, _)| count.to_string()); - - let events_after = events_after - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect::>(); - - Ok(get_context::Response { - start: start_token, - end: end_token, - events_before, - event: Some(base_event), - events_after, - state: db // TODO: State at event - .rooms - .room_state_full(&body.room_id)? - .values() - .map(|pdu| pdu.to_state_event()) - .collect(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/messages", data = "") -)] -pub fn get_message_events_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } - - let from = body - .from - .clone() - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; - - let to = body.to.as_ref().map(|t| t.parse()); - - // Use limit or else 10 - let limit = body - .limit - .try_into() - .map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?; - - match body.dir { - get_message_events::Direction::Forward => { - let events_after = db - .rooms - .pdus_after(&sender_id, &body.room_id, from) - .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events - .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` - .collect::>(); - - let end_token = events_after.last().map(|(count, _)| count.to_string()); - - let events_after = events_after - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect::>(); - - Ok(get_message_events::Response { - start: Some(body.from.clone()), - end: end_token, - chunk: events_after, - state: Vec::new(), - } - .into()) - } - get_message_events::Direction::Backward => { - let events_before = db - .rooms - .pdus_until(&sender_id, &body.room_id, from) - .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events - .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` - .collect::>(); - - let start_token = events_before.last().map(|(count, _)| count.to_string()); - - let events_before = events_before - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect::>(); - - Ok(get_message_events::Response { - start: Some(body.from.clone()), - end: start_token, - chunk: events_before, - state: Vec::new(), - } - .into()) - } - } -} - -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] -pub fn turn_server_route() -> ConduitResult { - Err(Error::BadRequest( - ErrorKind::NotFound, - "There is no turn server yet.", - )) -} - -#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/publicised_groups"))] -pub fn publicised_groups_route() -> ConduitResult { - Err(Error::BadRequest( - ErrorKind::NotFound, - "There are not publicised groups yet.", - )) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") -)] -pub fn send_event_to_device_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - for (target_user_id, map) in &body.messages { - for (target_device_id_maybe, event) in map { - match target_device_id_maybe { - to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => { - db.users.add_to_device_event( - sender_id, - &target_user_id, - &target_device_id, - &body.event_type, - serde_json::from_str(event.get()).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") - })?, - &db.globals, - )? - } - - to_device::DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(&target_user_id) { - db.users.add_to_device_event( - sender_id, - &target_user_id, - &target_device_id?, - &body.event_type, - serde_json::from_str(event.get()).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") - })?, - &db.globals, - )?; - } - } - } - } - } - - Ok(send_event_to_device::Response.into()) -} - -#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] -pub fn get_media_config_route( - db: State<'_, Database>, -) -> ConduitResult { - Ok(get_media_config::Response { - upload_size: db.globals.max_request_size().into(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/media/r0/upload", data = "") -)] -pub fn create_content_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let mxc = format!( - "mxc://{}/{}", - db.globals.server_name(), - utils::random_string(MXC_LENGTH) - ); - db.media.create( - mxc.clone(), - body.filename.as_ref(), - &body.content_type, - &body.file, - )?; - - Ok(create_content::Response { content_uri: mxc }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get( - "/_matrix/media/r0/download/<_server_name>/<_media_id>", - data = "" - ) -)] -pub fn get_content_route( - db: State<'_, Database>, - body: Ruma, - _server_name: String, - _media_id: String, -) -> ConduitResult { - if let Some(FileMeta { - filename, - content_type, - file, - }) = db - .media - .get(format!("mxc://{}/{}", body.server_name, body.media_id))? - { - Ok(get_content::Response { - file, - content_type, - content_disposition: filename.unwrap_or_default(), // TODO: Spec says this should be optional - } - .into()) - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) - } -} - -#[cfg_attr( - feature = "conduit_bin", - get( - "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", - data = "" - ) -)] -pub fn get_content_thumbnail_route( - db: State<'_, Database>, - body: Ruma, - _server_name: String, - _media_id: String, -) -> ConduitResult { - if let Some(FileMeta { - content_type, file, .. - }) = db.media.get_thumbnail( - format!("mxc://{}/{}", body.server_name, body.media_id), - body.width - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - body.height - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - )? { - Ok(get_content_thumbnail::Response { file, content_type }.into()) - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) - } -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/devices", data = "") -)] -pub fn get_devices_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let devices = db - .users - .all_devices_metadata(sender_id) - .filter_map(|r| r.ok()) // Filter out buggy devices - .collect::>(); - - Ok(get_devices::Response { devices }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/devices/<_device_id>", data = "") -)] -pub fn get_device_route( - db: State<'_, Database>, - body: Ruma, - _device_id: String, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let device = db - .users - .get_device_metadata(&sender_id, &body.body.device_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - - Ok(get_device::Response { device }.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/devices/<_device_id>", data = "") -)] -pub fn update_device_route( - db: State<'_, Database>, - body: Ruma, - _device_id: String, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let mut device = db - .users - .get_device_metadata(&sender_id, &body.body.device_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - - device.display_name = body.display_name.clone(); - - db.users - .update_device_metadata(&sender_id, &body.body.device_id, &device)?; - - Ok(update_device::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/devices/<_device_id>", data = "") -)] -pub fn delete_device_route( - db: State<'_, Database>, - body: Ruma, - _device_id: String, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - - // UIAA - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], - }], - completed: Vec::new(), - params: Default::default(), - session: None, - auth_error: None, - }; - - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - &device_id, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; - return Err(Error::Uiaa(uiaainfo)); - } - - db.users.remove_device(&sender_id, &body.body.device_id)?; - - Ok(delete_device::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/delete_devices", data = "") -)] -pub fn delete_devices_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - - // UIAA - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], - }], - completed: Vec::new(), - params: Default::default(), - session: None, - auth_error: None, - }; - - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - &device_id, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; - return Err(Error::Uiaa(uiaainfo)); - } - - for device_id in &body.devices { - db.users.remove_device(&sender_id, &device_id)? - } - - Ok(delete_devices::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/keys/device_signing/upload", data = "") -)] -pub fn upload_signing_keys_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); - - // UIAA - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], - }], - completed: Vec::new(), - params: Default::default(), - session: None, - auth_error: None, - }; - - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - &device_id, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; - return Err(Error::Uiaa(uiaainfo)); - } - - if let Some(master_key) = &body.master_key { - db.users.add_cross_signing_keys( - sender_id, - &master_key, - &body.self_signing_key, - &body.user_signing_key, - &db.rooms, - &db.globals, - )?; - } - - Ok(upload_signing_keys::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/keys/signatures/upload", data = "") -)] -pub fn upload_signatures_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - for (user_id, signed_keys) in &body.signed_keys { - for (key_id, signed_key) in signed_keys { - for signature in signed_key - .get("signatures") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Missing signatures field.", - ))? - .get(sender_id.to_string()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid user in signatures field.", - ))? - .as_object() - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid signature.", - ))? - .clone() - .into_iter() - { - // Signature validation? - let signature = ( - signature.0, - signature - .1 - .as_str() - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid signature value.", - ))? - .to_owned(), - ); - db.users.sign_key( - &user_id, - &key_id, - signature, - &sender_id, - &db.rooms, - &db.globals, - )?; - } - } - } - - Ok(upload_signatures::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/keys/changes", data = "") -)] -pub fn get_key_changes_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let mut device_list_updates = HashSet::new(); - - device_list_updates.extend( - db.users - .keys_changed( - &sender_id.to_string(), - body.from - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, - Some( - body.to - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?, - ), - ) - .filter_map(|r| r.ok()), - ); - - for room_id in db.rooms.rooms_joined(sender_id).filter_map(|r| r.ok()) { - device_list_updates.extend( - db.users - .keys_changed( - &room_id.to_string(), - body.from.parse().map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.") - })?, - Some(body.to.parse().map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.") - })?), - ) - .filter_map(|r| r.ok()), - ); - } - Ok(get_key_changes::Response { - changed: device_list_updates.into_iter().collect(), - left: Vec::new(), // TODO - } - .into()) -} - -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] -pub fn pushers_route() -> ConduitResult { - Ok(get_pushers::Response { - pushers: Vec::new(), - } - .into()) -} - -#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/pushers/set"))] -pub fn set_pushers_route() -> ConduitResult { - Ok(get_pushers::Response { - pushers: Vec::new(), - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") -)] -pub fn update_tag_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let mut tags_event = db - .account_data - .get::(Some(&body.room_id), sender_id, EventType::Tag)? - .unwrap_or_else(|| ruma::events::tag::TagEvent { - content: ruma::events::tag::TagEventContent { - tags: BTreeMap::new(), - }, - }); - tags_event - .content - .tags - .insert(body.tag.to_string(), body.tag_info.clone()); - - db.account_data.update( - Some(&body.room_id), - sender_id, - EventType::Tag, - &tags_event, - &db.globals, - )?; - - Ok(create_tag::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") -)] -pub fn delete_tag_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - let mut tags_event = db - .account_data - .get::(Some(&body.room_id), sender_id, EventType::Tag)? - .unwrap_or_else(|| ruma::events::tag::TagEvent { - content: ruma::events::tag::TagEventContent { - tags: BTreeMap::new(), - }, - }); - tags_event.content.tags.remove(&body.tag); - - db.account_data.update( - Some(&body.room_id), - sender_id, - EventType::Tag, - &tags_event, - &db.globals, - )?; - - Ok(delete_tag::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") -)] -pub fn get_tags_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - Ok(get_tags::Response { - tags: db - .account_data - .get::(Some(&body.room_id), sender_id, EventType::Tag)? - .unwrap_or_else(|| ruma::events::tag::TagEvent { - content: ruma::events::tag::TagEventContent { - tags: BTreeMap::new(), - }, - }) - .content - .tags, - } - .into()) -} - -#[cfg(feature = "conduit_bin")] -#[options("/<_..>")] -pub fn options_route() -> ConduitResult { - Ok(send_event_to_device::Response.into()) -} diff --git a/src/client_server/account.rs b/src/client_server/account.rs new file mode 100644 index 0000000..bfb768a --- /dev/null +++ b/src/client_server/account.rs @@ -0,0 +1,308 @@ +use super::{State, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; +use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::{ + account::{ + change_password, deactivate, get_username_availability, register, whoami, + ThirdPartyIdRemovalStatus, + }, + uiaa::{AuthFlow, UiaaInfo}, + }, + }, + events::{room::member, EventType}, + UserId, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post}; + +const GUEST_NAME_LENGTH: usize = 10; + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/register/available", data = "") +)] +pub fn get_register_available_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + // Validate user id + let user_id = UserId::parse_with_server_name(body.username.clone(), db.globals.server_name()) + .ok() + .filter(|user_id| { + !user_id.is_historical() && user_id.server_name() == db.globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; + + // Check if username is creative enough + if db.users.exists(&user_id)? { + return Err(Error::BadRequest( + ErrorKind::UserInUse, + "Desired user ID is already taken.", + )); + } + + // TODO add check for appservice namespaces + + // If no if check is true we have an username that's available to be used. + Ok(get_username_availability::Response { available: true }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/register", data = "") +)] +pub fn register_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + if db.globals.registration_disabled() { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Registration has been disabled.", + )); + } + + // Validate user id + let user_id = UserId::parse_with_server_name( + body.username + .clone() + .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)) + .to_lowercase(), + db.globals.server_name(), + ) + .ok() + .filter(|user_id| !user_id.is_historical() && user_id.server_name() == db.globals.server_name()) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; + + // Check if username is creative enough + if db.users.exists(&user_id)? { + return Err(Error::BadRequest( + ErrorKind::UserInUse, + "Desired user ID is already taken.", + )); + } + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.dummy".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = + db.uiaa + .try_auth(&user_id, "".into(), auth, &uiaainfo, &db.users, &db.globals)?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&user_id, "".into(), &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + let password = body.password.clone().unwrap_or_default(); + + // Create user + db.users.create(&user_id, &password)?; + + // Generate new device id if the user didn't specify one + let device_id = body + .device_id + .clone() + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); + + // Generate new token for the device + let token = utils::random_string(TOKEN_LENGTH); + + // Add device + db.users.create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + )?; + + // Initial data + db.account_data.update( + None, + &user_id, + EventType::PushRules, + &ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: crate::push_rules::default_pushrules(&user_id), + }, + }, + &db.globals, + )?; + + Ok(register::Response { + access_token: Some(token), + user_id, + device_id: Some(device_id), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/account/password", data = "") +)] +pub fn change_password_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db.uiaa.try_auth( + &sender_id, + device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + db.users.set_password(&sender_id, &body.new_password)?; + + // TODO: Read logout_devices field when it's available and respect that, currently not supported in Ruma + // See: https://github.com/ruma/ruma/issues/107 + // Logout all devices except the current one + for id in db + .users + .all_device_ids(&sender_id) + .filter_map(|id| id.ok()) + .filter(|id| id != device_id) + { + db.users.remove_device(&sender_id, &id)?; + } + + Ok(change_password::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/account/whoami", data = "") +)] +pub fn whoami_route(body: Ruma) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + Ok(whoami::Response { + user_id: sender_id.clone(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/account/deactivate", data = "") +)] +pub fn deactivate_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db.uiaa.try_auth( + &sender_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + // Leave all joined rooms and reject all invitations + for room_id in db + .rooms + .rooms_joined(&sender_id) + .chain(db.rooms.rooms_invited(&sender_id)) + { + let room_id = room_id?; + let event = member::MemberEventContent { + membership: member::MembershipState::Leave, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + }; + + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + } + + // Remove devices and mark account as deactivated + db.users.deactivate_account(&sender_id)?; + + Ok(deactivate::Response { + id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, + } + .into()) +} diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs new file mode 100644 index 0000000..4399cb5 --- /dev/null +++ b/src/client_server/alias.rs @@ -0,0 +1,67 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + r0::alias::{create_alias, delete_alias, get_alias}, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{delete, get, put}; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/directory/room/<_>", data = "") +)] +pub fn create_alias_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + if db.rooms.id_from_alias(&body.room_alias)?.is_some() { + return Err(Error::Conflict("Alias already exists.")); + } + + db.rooms + .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?; + + Ok(create_alias::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/r0/directory/room/<_>", data = "") +)] +pub fn delete_alias_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + db.rooms.set_alias(&body.room_alias, None, &db.globals)?; + + Ok(delete_alias::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/directory/room/<_>", data = "") +)] +pub fn get_alias_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + if body.room_alias.server_name() != db.globals.server_name() { + todo!("ask remote server"); + } + + let room_id = db + .rooms + .id_from_alias(&body.room_alias)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room with alias not found.", + ))?; + + Ok(get_alias::Response { + room_id, + servers: vec![db.globals.server_name().to_string()], + } + .into()) +} diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs new file mode 100644 index 0000000..a104964 --- /dev/null +++ b/src/client_server/backup.rs @@ -0,0 +1,142 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + r0::backup::{ + add_backup_keys, create_backup, get_backup, get_backup_keys, get_latest_backup, + update_backup, + }, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post, put}; + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/unstable/room_keys/version", data = "") +)] +pub fn create_backup_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let version = db + .key_backups + .create_backup(&sender_id, &body.algorithm, &db.globals)?; + + Ok(create_backup::Response { version }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/unstable/room_keys/version/<_>", data = "") +)] +pub fn update_backup_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + db.key_backups + .update_backup(&sender_id, &body.version, &body.algorithm, &db.globals)?; + + Ok(update_backup::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/unstable/room_keys/version", data = "") +)] +pub fn get_latest_backup_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let (version, algorithm) = + db.key_backups + .get_latest_backup(&sender_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; + + Ok(get_latest_backup::Response { + algorithm, + count: (db.key_backups.count_keys(sender_id, &version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &version)?, + version, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/unstable/room_keys/version/<_>", data = "") +)] +pub fn get_backup_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let algorithm = db + .key_backups + .get_backup(&sender_id, &body.version)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; + + Ok(get_backup::Response { + algorithm, + count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &body.version)?, + version: body.version.clone(), + } + .into()) +} + +/// Add the received backup_keys to the database. +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/unstable/room_keys/keys", data = "") +)] +pub fn add_backup_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + for (room_id, room) in &body.rooms { + for (session_id, key_data) in &room.sessions { + db.key_backups.add_key( + &sender_id, + &body.version, + &room_id, + &session_id, + &key_data, + &db.globals, + )? + } + } + + Ok(add_backup_keys::Response { + count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &body.version)?, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/unstable/room_keys/keys", data = "") +)] +pub fn get_backup_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let rooms = db.key_backups.get_all(&sender_id, &body.version)?; + + Ok(get_backup_keys::Response { rooms }.into()) +} diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs new file mode 100644 index 0000000..afa0604 --- /dev/null +++ b/src/client_server/capabilities.rs @@ -0,0 +1,31 @@ +use crate::ConduitResult; +use ruma::{api::client::r0::capabilities::get_capabilities, RoomVersionId}; +use std::collections::BTreeMap; + +#[cfg(feature = "conduit_bin")] +use rocket::get; + +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/capabilities"))] +pub fn get_capabilities_route() -> ConduitResult { + let mut available = BTreeMap::new(); + available.insert( + RoomVersionId::Version5, + get_capabilities::RoomVersionStability::Stable, + ); + available.insert( + RoomVersionId::Version6, + get_capabilities::RoomVersionStability::Stable, + ); + + Ok(get_capabilities::Response { + capabilities: get_capabilities::Capabilities { + change_password: None, // None means it is possible + room_versions: Some(get_capabilities::RoomVersionsCapability { + default: "6".to_owned(), + available, + }), + custom_capabilities: BTreeMap::new(), + }, + } + .into()) +} diff --git a/src/client_server/config.rs b/src/client_server/config.rs new file mode 100644 index 0000000..8cb6a0d --- /dev/null +++ b/src/client_server/config.rs @@ -0,0 +1,67 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::config::{get_global_account_data, set_global_account_data}, + }, + events::{custom::CustomEventContent, BasicEvent, EventType}, + Raw, +}; +use std::convert::TryFrom; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, put}; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") +)] +pub fn set_global_account_data_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let content = serde_json::from_str::(body.data.get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; + + let event_type = body.event_type.to_string(); + + db.account_data.update( + None, + sender_id, + event_type.clone().into(), + &BasicEvent { + content: CustomEventContent { + event_type, + json: content, + }, + }, + &db.globals, + )?; + + Ok(set_global_account_data::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") +)] +pub fn get_global_account_data_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let data = db + .account_data + .get::>( + None, + sender_id, + EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), + )? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + + Ok(get_global_account_data::Response { account_data: data }.into()) +} diff --git a/src/client_server/context.rs b/src/client_server/context.rs new file mode 100644 index 0000000..7a6cbce --- /dev/null +++ b/src/client_server/context.rs @@ -0,0 +1,92 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Ruma}; +use ruma::api::client::{error::ErrorKind, r0::context::get_context}; +use std::convert::TryFrom; + +#[cfg(feature = "conduit_bin")] +use rocket::get; + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") +)] +pub fn get_context_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(sender_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + let base_event = db + .rooms + .get_pdu(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Base event not found.", + ))? + .to_room_event(); + + let base_token = db + .rooms + .get_pdu_count(&body.event_id)? + .expect("event still exists"); + + let events_before = db + .rooms + .pdus_until(&sender_id, &body.room_id, base_token) + .take( + u32::try_from(body.limit).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize + / 2, + ) + .filter_map(|r| r.ok()) // Remove buggy events + .collect::>(); + + let start_token = events_before.last().map(|(count, _)| count.to_string()); + + let events_before = events_before + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect::>(); + + let events_after = db + .rooms + .pdus_after(&sender_id, &body.room_id, base_token) + .take( + u32::try_from(body.limit).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize + / 2, + ) + .filter_map(|r| r.ok()) // Remove buggy events + .collect::>(); + + let end_token = events_after.last().map(|(count, _)| count.to_string()); + + let events_after = events_after + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect::>(); + + Ok(get_context::Response { + start: start_token, + end: end_token, + events_before, + event: Some(base_event), + events_after, + state: db // TODO: State at event + .rooms + .room_state_full(&body.room_id)? + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), + } + .into()) +} diff --git a/src/client_server/device.rs b/src/client_server/device.rs new file mode 100644 index 0000000..379f827 --- /dev/null +++ b/src/client_server/device.rs @@ -0,0 +1,170 @@ +use super::State; +use crate::{utils, ConduitResult, Database, Error, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + r0::{ + device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, + uiaa::{AuthFlow, UiaaInfo}, + }, +}; + +use super::SESSION_ID_LENGTH; +#[cfg(feature = "conduit_bin")] +use rocket::{delete, get, post, put}; + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/devices", data = "") +)] +pub fn get_devices_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let devices = db + .users + .all_devices_metadata(sender_id) + .filter_map(|r| r.ok()) // Filter out buggy devices + .collect::>(); + + Ok(get_devices::Response { devices }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/devices/<_device_id>", data = "") +)] +pub fn get_device_route( + db: State<'_, Database>, + body: Ruma, + _device_id: String, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let device = db + .users + .get_device_metadata(&sender_id, &body.body.device_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; + + Ok(get_device::Response { device }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/devices/<_device_id>", data = "") +)] +pub fn update_device_route( + db: State<'_, Database>, + body: Ruma, + _device_id: String, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut device = db + .users + .get_device_metadata(&sender_id, &body.body.device_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; + + device.display_name = body.display_name.clone(); + + db.users + .update_device_metadata(&sender_id, &body.body.device_id, &device)?; + + Ok(update_device::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/r0/devices/<_device_id>", data = "") +)] +pub fn delete_device_route( + db: State<'_, Database>, + body: Ruma, + _device_id: String, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db.uiaa.try_auth( + &sender_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + db.users.remove_device(&sender_id, &body.body.device_id)?; + + Ok(delete_device::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/delete_devices", data = "") +)] +pub fn delete_devices_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db.uiaa.try_auth( + &sender_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + for device_id in &body.devices { + db.users.remove_device(&sender_id, &device_id)? + } + + Ok(delete_devices::Response.into()) +} diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs new file mode 100644 index 0000000..510511c --- /dev/null +++ b/src/client_server/directory.rs @@ -0,0 +1,258 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Result, Ruma}; +use ruma::{ + api::client::r0::{ + directory::{ + self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, + set_room_visibility, + }, + room, + }, + events::{ + room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, + EventType, + }, + Raw, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post, put}; + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/publicRooms", data = "") +)] +pub async fn get_public_rooms_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let Ruma { + body: + get_public_rooms::Request { + limit, + server, + since, + }, + sender_id, + device_id, + json_body, + } = body; + + let get_public_rooms_filtered::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + } = get_public_rooms_filtered_route( + db, + Ruma { + body: get_public_rooms_filtered::Request { + filter: None, + limit, + room_network: get_public_rooms_filtered::RoomNetwork::Matrix, + server, + since, + }, + sender_id, + device_id, + json_body, + }, + ) + .await? + .0; + + Ok(get_public_rooms::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/publicRooms", data = "<_body>") +)] +pub async fn get_public_rooms_filtered_route( + db: State<'_, Database>, + _body: Ruma, +) -> ConduitResult { + let mut chunk = + db.rooms + .public_rooms() + .map(|room_id| { + let room_id = room_id?; + + // TODO: Do not load full state? + let state = db.rooms.room_state_full(&room_id)?; + + let chunk = directory::PublicRoomsChunk { + aliases: Vec::new(), + canonical_alias: state + .get(&(EventType::RoomCanonicalAlias, "".to_owned())) + .map_or(Ok::<_, Error>(None), |s| { + Ok(serde_json::from_value::< + Raw, + >(s.content.clone()) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid canonical alias event in database.") + })? + .alias) + })?, + name: state.get(&(EventType::RoomName, "".to_owned())).map_or( + Ok::<_, Error>(None), + |s| { + Ok(serde_json::from_value::>( + s.content.clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid room name event in database.") + })? + .name() + .map(|n| n.to_owned())) + }, + )?, + num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), + room_id, + topic: state.get(&(EventType::RoomTopic, "".to_owned())).map_or( + Ok::<_, Error>(None), + |s| { + Ok(Some( + serde_json::from_value::>( + s.content.clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid room topic event in database.") + })? + .topic, + )) + }, + )?, + world_readable: state + .get(&(EventType::RoomHistoryVisibility, "".to_owned())) + .map_or(Ok::<_, Error>(false), |s| { + Ok(serde_json::from_value::< + Raw, + >(s.content.clone()) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + })? + .history_visibility + == history_visibility::HistoryVisibility::WorldReadable) + })?, + guest_can_join: state + .get(&(EventType::RoomGuestAccess, "".to_owned())) + .map_or(Ok::<_, Error>(false), |s| { + Ok( + serde_json::from_value::>( + s.content.clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid room guest access event in database.") + })? + .guest_access + == guest_access::GuestAccess::CanJoin, + ) + })?, + avatar_url: state + .get(&(EventType::RoomAvatar, "".to_owned())) + .map(|s| { + Ok::<_, Error>( + serde_json::from_value::>( + s.content.clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid room avatar event in database.") + })? + .url, + ) + }) + .transpose()?, + }; + Ok(chunk) + }) + .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms + // We need to collect all, so we can sort by member count + .collect::>(); + + chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); + + /* + chunk.extend_from_slice( + &server_server::send_request( + &db, + "privacytools.io".to_owned(), + ruma::api::federation::v1::get_public_rooms::Request { + limit: Some(20_u32.into()), + since: None, + room_network: ruma::api::federation::v1::get_public_rooms::RoomNetwork::Matrix, + }, + ) + .await + ? + .chunk + .into_iter() + .map(|c| serde_json::from_str(&serde_json::to_string(&c)?)?) + .collect::>(), + ); + */ + + let total_room_count_estimate = (chunk.len() as u32).into(); + + Ok(get_public_rooms_filtered::Response { + chunk, + prev_batch: None, + next_batch: None, + total_room_count_estimate: Some(total_room_count_estimate), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/directory/list/room/<_>", data = "") +)] +pub async fn set_room_visibility_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + match body.visibility { + room::Visibility::Public => db.rooms.set_public(&body.room_id, true)?, + room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, + } + + Ok(set_room_visibility::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/directory/list/room/<_>", data = "") +)] +pub async fn get_room_visibility_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + Ok(get_room_visibility::Response { + visibility: if db.rooms.is_public_room(&body.room_id)? { + room::Visibility::Public + } else { + room::Visibility::Private + }, + } + .into()) +} diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs new file mode 100644 index 0000000..165419a --- /dev/null +++ b/src/client_server/filter.rs @@ -0,0 +1,29 @@ +use crate::{utils, ConduitResult}; +use ruma::api::client::r0::filter::{self, create_filter, get_filter}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post}; + +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] +pub fn get_filter_route() -> ConduitResult { + // TODO + Ok(get_filter::Response { + filter: filter::FilterDefinition { + event_fields: None, + event_format: None, + account_data: None, + room: None, + presence: None, + }, + } + .into()) +} + +#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] +pub fn create_filter_route() -> ConduitResult { + // TODO + Ok(create_filter::Response { + filter_id: utils::random_string(10), + } + .into()) +} diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs new file mode 100644 index 0000000..4067210 --- /dev/null +++ b/src/client_server/keys.rs @@ -0,0 +1,323 @@ +use super::State; +use super::SESSION_ID_LENGTH; +use crate::{utils, ConduitResult, Database, Error, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + r0::{ + keys::{ + self, claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + upload_signing_keys, + }, + uiaa::{AuthFlow, UiaaInfo}, + }, +}; +use std::collections::{BTreeMap, HashSet}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post}; + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/keys/upload", data = "") +)] +pub fn upload_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + if let Some(one_time_keys) = &body.one_time_keys { + for (key_key, key_value) in one_time_keys { + db.users + .add_one_time_key(sender_id, device_id, key_key, key_value, &db.globals)?; + } + } + + if let Some(device_keys) = &body.device_keys { + // This check is needed to assure that signatures are kept + if db.users.get_device_keys(sender_id, device_id)?.is_none() { + db.users + .add_device_keys(sender_id, device_id, device_keys, &db.rooms, &db.globals)?; + } + } + + Ok(upload_keys::Response { + one_time_key_counts: db.users.count_one_time_keys(sender_id, device_id)?, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/keys/query", data = "") +)] +pub fn get_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut master_keys = BTreeMap::new(); + let mut self_signing_keys = BTreeMap::new(); + let mut user_signing_keys = BTreeMap::new(); + let mut device_keys = BTreeMap::new(); + + for (user_id, device_ids) in &body.device_keys { + if device_ids.is_empty() { + let mut container = BTreeMap::new(); + for device_id in db.users.all_device_ids(user_id) { + let device_id = device_id?; + if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? { + let metadata = db + .users + .get_device_metadata(user_id, &device_id)? + .ok_or_else(|| { + Error::bad_database("all_device_keys contained nonexistent device.") + })?; + + keys.unsigned = Some(keys::UnsignedDeviceInfo { + device_display_name: metadata.display_name, + }); + + container.insert(device_id, keys); + } + } + device_keys.insert(user_id.clone(), container); + } else { + for device_id in device_ids { + let mut container = BTreeMap::new(); + if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? { + let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( + Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to get keys for nonexistent device.", + ), + )?; + + keys.unsigned = Some(keys::UnsignedDeviceInfo { + device_display_name: metadata.display_name, + }); + + container.insert(device_id.clone(), keys); + } + device_keys.insert(user_id.clone(), container); + } + } + + if let Some(master_key) = db.users.get_master_key(user_id, sender_id)? { + master_keys.insert(user_id.clone(), master_key); + } + if let Some(self_signing_key) = db.users.get_self_signing_key(user_id, sender_id)? { + self_signing_keys.insert(user_id.clone(), self_signing_key); + } + if user_id == sender_id { + if let Some(user_signing_key) = db.users.get_user_signing_key(sender_id)? { + user_signing_keys.insert(user_id.clone(), user_signing_key); + } + } + } + + Ok(get_keys::Response { + master_keys, + self_signing_keys, + user_signing_keys, + device_keys, + failures: BTreeMap::new(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/keys/claim", data = "") +)] +pub fn claim_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let mut one_time_keys = BTreeMap::new(); + for (user_id, map) in &body.one_time_keys { + let mut container = BTreeMap::new(); + for (device_id, key_algorithm) in map { + if let Some(one_time_keys) = + db.users + .take_one_time_key(user_id, device_id, key_algorithm, &db.globals)? + { + let mut c = BTreeMap::new(); + c.insert(one_time_keys.0, one_time_keys.1); + container.insert(device_id.clone(), c); + } + } + one_time_keys.insert(user_id.clone(), container); + } + + Ok(claim_keys::Response { + failures: BTreeMap::new(), + one_time_keys, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/unstable/keys/device_signing/upload", data = "") +)] +pub fn upload_signing_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db.uiaa.try_auth( + &sender_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + if let Some(master_key) = &body.master_key { + db.users.add_cross_signing_keys( + sender_id, + &master_key, + &body.self_signing_key, + &body.user_signing_key, + &db.rooms, + &db.globals, + )?; + } + + Ok(upload_signing_keys::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/unstable/keys/signatures/upload", data = "") +)] +pub fn upload_signatures_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + for (user_id, signed_keys) in &body.signed_keys { + for (key_id, signed_key) in signed_keys { + for signature in signed_key + .get("signatures") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Missing signatures field.", + ))? + .get(sender_id.to_string()) + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid user in signatures field.", + ))? + .as_object() + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid signature.", + ))? + .clone() + .into_iter() + { + // Signature validation? + let signature = ( + signature.0, + signature + .1 + .as_str() + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid signature value.", + ))? + .to_owned(), + ); + db.users.sign_key( + &user_id, + &key_id, + signature, + &sender_id, + &db.rooms, + &db.globals, + )?; + } + } + } + + Ok(upload_signatures::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/keys/changes", data = "") +)] +pub fn get_key_changes_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut device_list_updates = HashSet::new(); + + device_list_updates.extend( + db.users + .keys_changed( + &sender_id.to_string(), + body.from + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, + Some( + body.to + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?, + ), + ) + .filter_map(|r| r.ok()), + ); + + for room_id in db.rooms.rooms_joined(sender_id).filter_map(|r| r.ok()) { + device_list_updates.extend( + db.users + .keys_changed( + &room_id.to_string(), + body.from.parse().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.") + })?, + Some(body.to.parse().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.") + })?), + ) + .filter_map(|r| r.ok()), + ); + } + Ok(get_key_changes::Response { + changed: device_list_updates.into_iter().collect(), + left: Vec::new(), // TODO + } + .into()) +} diff --git a/src/client_server/media.rs b/src/client_server/media.rs new file mode 100644 index 0000000..efcb3a6 --- /dev/null +++ b/src/client_server/media.rs @@ -0,0 +1,107 @@ +use super::State; +use crate::{database::media::FileMeta, utils, ConduitResult, Database, Error, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + r0::media::{create_content, get_content, get_content_thumbnail, get_media_config}, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post}; +use std::convert::TryInto; + +const MXC_LENGTH: usize = 256; + +#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] +pub fn get_media_config_route( + db: State<'_, Database>, +) -> ConduitResult { + Ok(get_media_config::Response { + upload_size: db.globals.max_request_size().into(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/media/r0/upload", data = "") +)] +pub fn create_content_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let mxc = format!( + "mxc://{}/{}", + db.globals.server_name(), + utils::random_string(MXC_LENGTH) + ); + db.media.create( + mxc.clone(), + body.filename.as_ref(), + &body.content_type, + &body.file, + )?; + + Ok(create_content::Response { content_uri: mxc }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/media/r0/download/<_server_name>/<_media_id>", + data = "" + ) +)] +pub fn get_content_route( + db: State<'_, Database>, + body: Ruma, + _server_name: String, + _media_id: String, +) -> ConduitResult { + if let Some(FileMeta { + filename, + content_type, + file, + }) = db + .media + .get(format!("mxc://{}/{}", body.server_name, body.media_id))? + { + Ok(get_content::Response { + file, + content_type, + content_disposition: filename.unwrap_or_default(), // TODO: Spec says this should be optional + } + .into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +#[cfg_attr( + feature = "conduit_bin", + get( + "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", + data = "" + ) +)] +pub fn get_content_thumbnail_route( + db: State<'_, Database>, + body: Ruma, + _server_name: String, + _media_id: String, +) -> ConduitResult { + if let Some(FileMeta { + content_type, file, .. + }) = db.media.get_thumbnail( + format!("mxc://{}/{}", body.server_name, body.media_id), + body.width + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + body.height + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + )? { + Ok(get_content_thumbnail::Response { file, content_type }.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs new file mode 100644 index 0000000..716d5e4 --- /dev/null +++ b/src/client_server/membership.rs @@ -0,0 +1,422 @@ +use super::State; +use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::membership::{ + ban_user, forget_room, get_member_events, invite_user, join_room_by_id, + join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, + unban_user, + }, + }, + events::{room::member, EventType}, + Raw, RoomId, +}; +use std::{collections::BTreeMap, convert::TryFrom}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post}; + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/join", data = "") +)] +pub fn join_room_by_id_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + // TODO: Ask a remote server if we don't have this room + + let event = member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: None, + third_party_invite: None, + }; + + db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(join_room_by_id::Response { + room_id: body.room_id.clone(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/join/<_>", data = "") +)] +pub fn join_room_by_id_or_alias_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let room_id = RoomId::try_from(body.room_id_or_alias.clone()).or_else(|alias| { + Ok::<_, Error>(db.rooms.id_from_alias(&alias)?.ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room not found (TODO: Federation).", + ))?) + })?; + + let body = Ruma { + sender_id: body.sender_id.clone(), + device_id: body.device_id.clone(), + json_body: None, + body: join_room_by_id::Request { + room_id, + third_party_signed: body.third_party_signed.clone(), + }, + }; + + Ok(join_room_by_id_or_alias::Response { + room_id: join_room_by_id_route(db, body)?.0.room_id, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/leave", data = "") +)] +pub fn leave_room_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut event = serde_json::from_value::>( + db.rooms + .room_state_get( + &body.room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot leave a room you are not a member of.", + ))? + .content, + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = member::MembershipState::Leave; + + db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(leave_room::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/invite", data = "") +)] +pub fn invite_user_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { + db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(invite_user::Response.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) + } +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/kick", data = "") +)] +pub fn kick_user_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut event = serde_json::from_value::>( + db.rooms + .room_state_get( + &body.room_id, + &EventType::RoomMember, + &body.user_id.to_string(), + )? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot kick member that's not in the room.", + ))? + .content, + ) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = ruma::events::room::member::MembershipState::Leave; + // TODO: reason + + db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(kick_user::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/ban", data = "") +)] +pub fn ban_user_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + // TODO: reason + + let event = db + .rooms + .room_state_get( + &body.room_id, + &EventType::RoomMember, + &body.user_id.to_string(), + )? + .map_or( + Ok::<_, Error>(member::MemberEventContent { + membership: member::MembershipState::Ban, + displayname: db.users.displayname(&body.user_id)?, + avatar_url: db.users.avatar_url(&body.user_id)?, + is_direct: None, + third_party_invite: None, + }), + |event| { + let mut event = + serde_json::from_value::>(event.content) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + event.membership = ruma::events::room::member::MembershipState::Ban; + Ok(event) + }, + )?; + + db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(ban_user::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/unban", data = "") +)] +pub fn unban_user_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut event = serde_json::from_value::>( + db.rooms + .room_state_get( + &body.room_id, + &EventType::RoomMember, + &body.user_id.to_string(), + )? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot unban a user who is not banned.", + ))? + .content, + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = ruma::events::room::member::MembershipState::Leave; + + db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(unban_user::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/forget", data = "") +)] +pub fn forget_room_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + db.rooms.forget(&body.room_id, &sender_id)?; + + Ok(forget_room::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/joined_rooms", data = "") +)] +pub fn joined_rooms_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + Ok(joined_rooms::Response { + joined_rooms: db + .rooms + .rooms_joined(&sender_id) + .filter_map(|r| r.ok()) + .collect(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_>/members", data = "") +)] +pub fn get_member_events_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(sender_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + Ok(get_member_events::Response { + chunk: db + .rooms + .room_state_type(&body.room_id, &EventType::RoomMember)? + .values() + .map(|pdu| pdu.to_member_event()) + .collect(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") +)] +pub fn joined_members_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if !db + .rooms + .is_joined(&sender_id, &body.room_id) + .unwrap_or(false) + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You aren't a member of the room.", + )); + } + + let mut joined = BTreeMap::new(); + for user_id in db.rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { + let display_name = db.users.displayname(&user_id)?; + let avatar_url = db.users.avatar_url(&user_id)?; + + joined.insert( + user_id, + joined_members::RoomMember { + display_name, + avatar_url, + }, + ); + } + + Ok(joined_members::Response { joined }.into()) +} diff --git a/src/client_server/message.rs b/src/client_server/message.rs new file mode 100644 index 0000000..04d965d --- /dev/null +++ b/src/client_server/message.rs @@ -0,0 +1,128 @@ +use super::State; +use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + r0::message::{create_message_event, get_message_events}, +}; +use std::convert::TryInto; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, put}; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") +)] +pub fn create_message_event_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut unsigned = serde_json::Map::new(); + unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); + + let event_id = db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: body.event_type.clone(), + content: serde_json::from_str( + body.json_body + .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? + .get(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + unsigned: Some(unsigned), + state_key: None, + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(create_message_event::Response { event_id }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_>/messages", data = "") +)] +pub fn get_message_events_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(sender_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + let from = body + .from + .clone() + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; + + let to = body.to.as_ref().map(|t| t.parse()); + + // Use limit or else 10 + let limit = body + .limit + .try_into() + .map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?; + + match body.dir { + get_message_events::Direction::Forward => { + let events_after = db + .rooms + .pdus_after(&sender_id, &body.room_id, from) + .take(limit) + .filter_map(|r| r.ok()) // Filter out buggy events + .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` + .collect::>(); + + let end_token = events_after.last().map(|(count, _)| count.to_string()); + + let events_after = events_after + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect::>(); + + Ok(get_message_events::Response { + start: Some(body.from.clone()), + end: end_token, + chunk: events_after, + state: Vec::new(), + } + .into()) + } + get_message_events::Direction::Backward => { + let events_before = db + .rooms + .pdus_until(&sender_id, &body.room_id, from) + .take(limit) + .filter_map(|r| r.ok()) // Filter out buggy events + .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` + .collect::>(); + + let start_token = events_before.last().map(|(count, _)| count.to_string()); + + let events_before = events_before + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect::>(); + + Ok(get_message_events::Response { + start: Some(body.from.clone()), + end: start_token, + chunk: events_before, + state: Vec::new(), + } + .into()) + } + } +} diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs new file mode 100644 index 0000000..7703198 --- /dev/null +++ b/src/client_server/mod.rs @@ -0,0 +1,78 @@ +mod account; +mod alias; +mod backup; +mod capabilities; +mod config; +mod context; +mod device; +mod directory; +mod filter; +mod keys; +mod media; +mod membership; +mod message; +mod presence; +mod profile; +mod push; +mod read_marker; +mod redact; +mod room; +mod session; +mod state; +mod sync; +mod tag; +mod thirdparty; +mod to_device; +mod typing; +mod unversioned; +mod user_directory; +mod voip; + +pub use account::*; +pub use alias::*; +pub use backup::*; +pub use capabilities::*; +pub use config::*; +pub use context::*; +pub use device::*; +pub use directory::*; +pub use filter::*; +pub use keys::*; +pub use media::*; +pub use membership::*; +pub use message::*; +pub use presence::*; +pub use profile::*; +pub use push::*; +pub use read_marker::*; +pub use redact::*; +pub use room::*; +pub use session::*; +pub use state::*; +pub use sync::*; +pub use tag::*; +pub use thirdparty::*; +pub use to_device::*; +pub use typing::*; +pub use unversioned::*; +pub use user_directory::*; +pub use voip::*; + +#[cfg(not(feature = "conduit_bin"))] +use super::State; +#[cfg(feature = "conduit_bin")] +use { + crate::ConduitResult, + rocket::{options, State}, + ruma::api::client::r0::to_device::send_event_to_device, +}; + +const DEVICE_ID_LENGTH: usize = 10; +const TOKEN_LENGTH: usize = 256; +const SESSION_ID_LENGTH: usize = 256; + +#[cfg(feature = "conduit_bin")] +#[options("/<_..>")] +pub fn options_route() -> ConduitResult { + Ok(send_event_to_device::Response.into()) +} diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs new file mode 100644 index 0000000..0b6a51f --- /dev/null +++ b/src/client_server/presence.rs @@ -0,0 +1,45 @@ +use super::State; +use crate::{utils, ConduitResult, Database, Ruma}; +use ruma::api::client::r0::presence::set_presence; +use std::convert::TryInto; + +#[cfg(feature = "conduit_bin")] +use rocket::put; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/presence/<_>/status", data = "") +)] +pub fn set_presence_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + for room_id in db.rooms.rooms_joined(&sender_id) { + let room_id = room_id?; + + db.rooms.edus.update_presence( + &sender_id, + &room_id, + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&sender_id)?, + currently_active: None, + displayname: db.users.displayname(&sender_id)?, + last_active_ago: Some( + utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + ), + presence: body.presence, + status_msg: body.status_msg.clone(), + }, + sender: sender_id.clone(), + }, + &db.globals, + )?; + } + + Ok(set_presence::Response.into()) +} diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs new file mode 100644 index 0000000..1313db7 --- /dev/null +++ b/src/client_server/profile.rs @@ -0,0 +1,236 @@ +use super::State; +use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::profile::{ + get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, + }, + }, + events::EventType, + Raw, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, put}; +use std::convert::TryInto; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/profile/<_>/displayname", data = "") +)] +pub fn set_displayname_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + db.users + .set_displayname(&sender_id, body.displayname.clone())?; + + // Send a new membership event and presence update into all joined rooms + for room_id in db.rooms.rooms_joined(&sender_id) { + let room_id = room_id?; + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + displayname: body.displayname.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? + .ok_or_else(|| { + Error::bad_database( + "Tried to send displayname update for user not in the room.", + ) + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // Presence update + db.rooms.edus.update_presence( + &sender_id, + &room_id, + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&sender_id)?, + currently_active: None, + displayname: db.users.displayname(&sender_id)?, + last_active_ago: Some( + utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + ), + presence: ruma::presence::PresenceState::Online, + status_msg: None, + }, + sender: sender_id.clone(), + }, + &db.globals, + )?; + } + + Ok(set_display_name::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/profile/<_>/displayname", data = "") +)] +pub fn get_displayname_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + Ok(get_display_name::Response { + displayname: db.users.displayname(&body.user_id)?, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") +)] +pub fn set_avatar_url_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if let Some(avatar_url) = &body.avatar_url { + if !avatar_url.starts_with("mxc://") { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "avatar_url has to start with mxc://.", + )); + } + + // TODO in the future when we can handle media uploads make sure that this url is our own server + // TODO also make sure this is valid mxc:// format (not only starting with it) + } + + db.users + .set_avatar_url(&sender_id, body.avatar_url.clone())?; + + // Send a new membership event and presence update into all joined rooms + for room_id in db.rooms.rooms_joined(&sender_id) { + let room_id = room_id?; + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + avatar_url: body.avatar_url.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? + .ok_or_else(|| { + Error::bad_database( + "Tried to send avatar url update for user not in the room.", + ) + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // Presence update + db.rooms.edus.update_presence( + &sender_id, + &room_id, + ruma::events::presence::PresenceEvent { + content: ruma::events::presence::PresenceEventContent { + avatar_url: db.users.avatar_url(&sender_id)?, + currently_active: None, + displayname: db.users.displayname(&sender_id)?, + last_active_ago: Some( + utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + ), + presence: ruma::presence::PresenceState::Online, + status_msg: None, + }, + sender: sender_id.clone(), + }, + &db.globals, + )?; + } + + Ok(set_avatar_url::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") +)] +pub fn get_avatar_url_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + Ok(get_avatar_url::Response { + avatar_url: db.users.avatar_url(&body.user_id)?, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/profile/<_>", data = "") +)] +pub fn get_profile_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let avatar_url = db.users.avatar_url(&body.user_id)?; + let displayname = db.users.displayname(&body.user_id)?; + + if avatar_url.is_none() && displayname.is_none() { + // Return 404 if we don't have a profile for this id + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Profile was not found.", + )); + } + + Ok(get_profile::Response { + avatar_url, + displayname, + } + .into()) +} diff --git a/src/client_server/push.rs b/src/client_server/push.rs new file mode 100644 index 0000000..906f4a7 --- /dev/null +++ b/src/client_server/push.rs @@ -0,0 +1,75 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Ruma}; +use log::warn; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::push::{get_pushers, get_pushrules_all, set_pushrule, set_pushrule_enabled}, + }, + events::EventType, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post, put}; + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules", data = "") +)] +pub fn get_pushrules_all_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let event = db + .account_data + .get::(None, &sender_id, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + Ok(get_pushrules_all::Response { + global: event.content.global, + } + .into()) +} + +#[cfg_attr(feature = "conduit_bin", put( + "/_matrix/client/r0/pushrules/<_>/<_>/<_>", + //data = "" +))] +pub fn set_pushrule_route(//db: State<'_, Database>, + //body: Ruma, +) -> ConduitResult { + // TODO + warn!("TODO: set_pushrule_route"); + Ok(set_pushrule::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled") +)] +pub fn set_pushrule_enabled_route() -> ConduitResult { + // TODO + warn!("TODO: set_pushrule_enabled_route"); + Ok(set_pushrule_enabled::Response.into()) +} + +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] +pub fn get_pushers_route() -> ConduitResult { + Ok(get_pushers::Response { + pushers: Vec::new(), + } + .into()) +} + +#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/pushers/set"))] +pub fn set_pushers_route() -> ConduitResult { + Ok(get_pushers::Response { + pushers: Vec::new(), + } + .into()) +} diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs new file mode 100644 index 0000000..ff72765 --- /dev/null +++ b/src/client_server/read_marker.rs @@ -0,0 +1,74 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::{error::ErrorKind, r0::read_marker::set_read_marker}, + events::{AnyEphemeralRoomEvent, AnyEvent, EventType}, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::post; +use std::{collections::BTreeMap, time::SystemTime}; + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") +)] +pub fn set_read_marker_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: body.fully_read.clone(), + }, + room_id: body.room_id.clone(), + }; + db.account_data.update( + Some(&body.room_id), + &sender_id, + EventType::FullyRead, + &fully_read_event, + &db.globals, + )?; + + if let Some(event) = &body.read_receipt { + db.rooms.edus.room_read_set( + &body.room_id, + &sender_id, + db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, + )?; + + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_id.clone(), + ruma::events::receipt::Receipt { + ts: Some(SystemTime::now()), + }, + ); + let mut receipt_content = BTreeMap::new(); + receipt_content.insert( + event.clone(), + ruma::events::receipt::Receipts { + read: Some(user_receipts), + }, + ); + + db.rooms.edus.roomlatest_update( + &sender_id, + &body.room_id, + AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )), + &db.globals, + )?; + } + Ok(set_read_marker::Response.into()) +} diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs new file mode 100644 index 0000000..fc65c23 --- /dev/null +++ b/src/client_server/redact.rs @@ -0,0 +1,39 @@ +use super::State; +use crate::{pdu::PduBuilder, ConduitResult, Database, Ruma}; +use ruma::{ + api::client::r0::redact::redact_event, + events::{room::redaction, EventType}, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::put; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") +)] +pub fn redact_event_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let event_id = db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomRedaction, + content: serde_json::to_value(redaction::RedactionEventContent { + reason: body.reason.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: Some(body.event_id.clone()), + }, + &db.globals, + &db.account_data, + )?; + + Ok(redact_event::Response { event_id }.into()) +} diff --git a/src/client_server/room.rs b/src/client_server/room.rs new file mode 100644 index 0000000..54e57fd --- /dev/null +++ b/src/client_server/room.rs @@ -0,0 +1,345 @@ +use super::State; +use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::room::{self, create_room, get_room_event}, + }, + events::{ + room::{guest_access, history_visibility, join_rules, member, name, topic}, + EventType, + }, + RoomAliasId, RoomId, RoomVersionId, +}; +use std::{collections::BTreeMap, convert::TryFrom}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post}; + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/createRoom", data = "") +)] +pub fn create_room_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let room_id = RoomId::new(db.globals.server_name()); + + let alias = body + .room_alias_name + .as_ref() + .map_or(Ok(None), |localpart| { + // TODO: Check for invalid characters and maximum length + let alias = + RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + + if db.rooms.id_from_alias(&alias)?.is_some() { + Err(Error::BadRequest( + ErrorKind::RoomInUse, + "Room alias already exists.", + )) + } else { + Ok(Some(alias)) + } + })?; + + let mut content = ruma::events::room::create::CreateEventContent::new(sender_id.clone()); + content.federate = body.creation_content.as_ref().map_or(true, |c| c.federate); + content.predecessor = body + .creation_content + .as_ref() + .and_then(|c| c.predecessor.clone()); + content.room_version = RoomVersionId::Version6; + + // 1. The room create event + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomCreate, + content: serde_json::to_value(content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // 2. Let the room creator join + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: body.is_direct, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // Figure out preset. We need it for power levels and preset specific events + let visibility = body.visibility.unwrap_or(room::Visibility::Private); + let preset = body.preset.unwrap_or_else(|| match visibility { + room::Visibility::Private => create_room::RoomPreset::PrivateChat, + room::Visibility::Public => create_room::RoomPreset::PublicChat, + }); + + // 3. Power levels + let mut users = BTreeMap::new(); + users.insert(sender_id.clone(), 100.into()); + for invite_ in &body.invite { + users.insert(invite_.clone(), 100.into()); + } + + let power_levels_content = if let Some(power_levels) = &body.power_level_content_override { + serde_json::from_str(power_levels.json().get()).map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") + })? + } else { + serde_json::to_value(ruma::events::room::power_levels::PowerLevelsEventContent { + ban: 50.into(), + events: BTreeMap::new(), + events_default: 0.into(), + invite: 50.into(), + kick: 50.into(), + redact: 50.into(), + state_default: 50.into(), + users, + users_default: 0.into(), + notifications: ruma::events::room::power_levels::NotificationPowerLevels { + room: 50.into(), + }, + }) + .expect("event is valid, we just created it") + }; + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomPowerLevels, + content: power_levels_content, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // 4. Events set by preset + // 4.1 Join Rules + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomJoinRules, + content: match preset { + create_room::RoomPreset::PublicChat => serde_json::to_value( + join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), + ) + .expect("event is valid, we just created it"), + // according to spec "invite" is the default + _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( + join_rules::JoinRule::Invite, + )) + .expect("event is valid, we just created it"), + }, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // 4.2 History Visibility + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomHistoryVisibility, + content: serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( + history_visibility::HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // 4.3 Guest Access + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomGuestAccess, + content: match preset { + create_room::RoomPreset::PublicChat => { + serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::Forbidden, + )) + .expect("event is valid, we just created it") + } + _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::CanJoin, + )) + .expect("event is valid, we just created it"), + }, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // 5. Events listed in initial_state + for create_room::InitialStateEvent { + event_type, + state_key, + content, + } in &body.initial_state + { + // Silently skip encryption events if they are not allowed + if event_type == &EventType::RoomEncryption && db.globals.encryption_disabled() { + continue; + } + + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: event_type.clone(), + content: serde_json::from_str(content.get()).map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid initial_state content.") + })?, + unsigned: None, + state_key: state_key.clone(), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + } + + // 6. Events implied by name and topic + if let Some(name) = &body.name { + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomName, + content: serde_json::to_value( + name::NameEventContent::new(name.clone()).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") + })?, + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + } + + if let Some(topic) = &body.topic { + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomTopic, + content: serde_json::to_value(topic::TopicEventContent { + topic: topic.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + } + + // 7. Events implied by invite (and TODO: invite_3pid) + for user in &body.invite { + db.rooms.append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user)?, + avatar_url: db.users.avatar_url(&user)?, + is_direct: body.is_direct, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + } + + // Homeserver specific stuff + if let Some(alias) = alias { + db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + } + + if let Some(room::Visibility::Public) = body.visibility { + db.rooms.set_public(&room_id, true)?; + } + + Ok(create_room::Response { room_id }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") +)] +pub fn get_room_event_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(sender_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + Ok(get_room_event::Response { + event: db + .rooms + .get_pdu(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? + .to_room_event(), + } + .into()) +} diff --git a/src/client_server/session.rs b/src/client_server/session.rs new file mode 100644 index 0000000..a431d23 --- /dev/null +++ b/src/client_server/session.rs @@ -0,0 +1,128 @@ +use super::State; +use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; +use crate::{utils, ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::session::{get_login_types, login, logout, logout_all}, + }, + UserId, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, post}; + +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] +pub fn get_login_route() -> ConduitResult { + Ok(get_login_types::Response { + flows: vec![get_login_types::LoginType::Password], + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/login", data = "") +)] +pub fn login_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + // Validate login method + let user_id = + // TODO: Other login methods + if let (login::UserInfo::MatrixId(username), login::LoginInfo::Password { password }) = + (body.user.clone(), body.login_info.clone()) + { + let user_id = UserId::parse_with_server_name(username, db.globals.server_name()) + .map_err(|_| Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid." + ))?; + let hash = db.users.password_hash(&user_id)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Wrong username or password." + ))?; + + if hash.is_empty() { + return Err(Error::BadRequest( + ErrorKind::UserDeactivated, + "The user has been deactivated" + )); + } + + let hash_matches = + argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); + + if !hash_matches { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Wrong username or password.")); + } + + user_id + } else { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); + }; + + // Generate new device id if the user didn't specify one + let device_id = body + .body + .device_id + .clone() + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); + + // Generate a new token for the device + let token = utils::random_string(TOKEN_LENGTH); + + // Add device + db.users.create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + )?; + + Ok(login::Response { + user_id, + access_token: token, + home_server: Some(db.globals.server_name().to_owned()), + device_id, + well_known: None, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/logout", data = "") +)] +pub fn logout_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + db.users.remove_device(&sender_id, device_id)?; + + Ok(logout::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/logout/all", data = "") +)] +pub fn logout_all_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + for device_id in db.users.all_device_ids(sender_id) { + if let Ok(device_id) = device_id { + db.users.remove_device(&sender_id, &device_id)?; + } + } + + Ok(logout_all::Response.into()) +} diff --git a/src/client_server/state.rs b/src/client_server/state.rs new file mode 100644 index 0000000..814a246 --- /dev/null +++ b/src/client_server/state.rs @@ -0,0 +1,216 @@ +use super::State; +use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::state::{ + create_state_event_for_empty_key, create_state_event_for_key, get_state_events, + get_state_events_for_empty_key, get_state_events_for_key, + }, + }, + events::{room::canonical_alias, EventType}, + Raw, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, put}; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") +)] +pub fn create_state_event_for_key_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let content = serde_json::from_str::( + body.json_body + .as_ref() + .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? + .get(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; + + if body.event_type == EventType::RoomCanonicalAlias { + let canonical_alias = serde_json::from_value::< + Raw, + >(content.clone()) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid canonical alias."))?; + + let mut aliases = canonical_alias.alt_aliases; + + if let Some(alias) = canonical_alias.alias { + aliases.push(alias); + } + + for alias in aliases { + if alias.server_name() != db.globals.server_name() + || db + .rooms + .id_from_alias(&alias)? + .filter(|room| room == &body.room_id) // Make sure it's the right room + .is_none() + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You are only allowed to send canonical_alias \ + events when it's aliases already exists", + )); + } + } + } + + let event_id = db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: body.event_type.clone(), + content, + unsigned: None, + state_key: Some(body.state_key.clone()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(create_state_event_for_key::Response { event_id }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") +)] +pub fn create_state_event_for_empty_key_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + // This just calls create_state_event_for_key_route + let Ruma { + body: + create_state_event_for_empty_key::Request { + room_id, + event_type, + data, + }, + sender_id, + device_id, + json_body, + } = body; + + Ok(create_state_event_for_empty_key::Response { + event_id: create_state_event_for_key_route( + db, + Ruma { + body: create_state_event_for_key::Request { + room_id, + event_type, + data, + state_key: "".to_owned(), + }, + sender_id, + device_id, + json_body, + }, + )? + .0 + .event_id, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_>/state", data = "") +)] +pub fn get_state_events_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(sender_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); + } + + Ok(get_state_events::Response { + room_state: db + .rooms + .room_state_full(&body.room_id)? + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") +)] +pub fn get_state_events_for_key_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(sender_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); + } + + let event = db + .rooms + .room_state_get(&body.room_id, &body.event_type, &body.state_key)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "State event not found.", + ))?; + + Ok(get_state_events_for_key::Response { + content: serde_json::value::to_raw_value(&event.content) + .map_err(|_| Error::bad_database("Invalid event content in database"))?, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") +)] +pub fn get_state_events_for_empty_key_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(sender_id, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); + } + + let event = db + .rooms + .room_state_get(&body.room_id, &body.event_type, "")? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "State event not found.", + ))?; + + Ok(get_state_events_for_empty_key::Response { + content: serde_json::value::to_raw_value(&event) + .map_err(|_| Error::bad_database("Invalid event content in database"))?, + } + .into()) +} diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs new file mode 100644 index 0000000..71e3422 --- /dev/null +++ b/src/client_server/sync.rs @@ -0,0 +1,477 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::r0::sync::sync_events, + events::{AnySyncEphemeralRoomEvent, EventType}, + Raw, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, tokio}; +use std::{ + collections::{hash_map, BTreeMap, HashMap, HashSet}, + time::Duration, +}; + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/sync", data = "") +)] +pub async fn sync_events_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + // TODO: match body.set_presence { + db.rooms.edus.ping_presence(&sender_id)?; + + // Setup watchers, so if there's no response, we can wait for them + let watcher = db.watch(sender_id, device_id); + + let next_batch = db.globals.current_count()?.to_string(); + + let mut joined_rooms = BTreeMap::new(); + let since = body + .since + .clone() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); + + let mut presence_updates = HashMap::new(); + let mut device_list_updates = HashSet::new(); + + // Look for device list updates of this account + device_list_updates.extend( + db.users + .keys_changed(&sender_id.to_string(), since, None) + .filter_map(|r| r.ok()), + ); + + for room_id in db.rooms.rooms_joined(&sender_id) { + let room_id = room_id?; + + let mut non_timeline_pdus = db + .rooms + .pdus_since(&sender_id, &room_id, since)? + .filter_map(|r| r.ok()); // Filter out buggy events + + // Take the last 10 events for the timeline + let timeline_pdus = non_timeline_pdus + .by_ref() + .rev() + .take(10) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // They /sync response doesn't always return all messages, so we say the output is + // limited unless there are events in non_timeline_pdus + //let mut limited = false; + + let mut state_pdus = Vec::new(); + for pdu in non_timeline_pdus { + if pdu.state_key.is_some() { + state_pdus.push(pdu); + } + } + + let mut send_member_count = false; + let mut joined_since_last_sync = false; + let mut send_notification_counts = false; + for pdu in db + .rooms + .pdus_since(&sender_id, &room_id, since)? + .filter_map(|r| r.ok()) + { + send_notification_counts = true; + if pdu.kind == EventType::RoomMember { + send_member_count = true; + if !joined_since_last_sync && pdu.state_key == Some(sender_id.to_string()) { + let content = serde_json::from_value::< + Raw, + >(pdu.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database."))?; + if content.membership == ruma::events::room::member::MembershipState::Join { + joined_since_last_sync = true; + // Both send_member_count and joined_since_last_sync are set. There's + // nothing more to do + break; + } + } + } + } + + let members = db.rooms.room_state_type(&room_id, &EventType::RoomMember)?; + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + let joined_member_count = db.rooms.room_members(&room_id).count(); + let invited_member_count = db.rooms.room_members_invited(&room_id).count(); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still joined or + // invited until we have 5 or we reach the end + + for hero in db + .rooms + .all_pdus(&sender_id, &room_id)? + .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus + .filter(|pdu| pdu.kind == EventType::RoomMember) + .map(|pdu| { + let content = serde_json::from_value::< + Raw, + >(pdu.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + if let Some(state_key) = &pdu.state_key { + let current_content = serde_json::from_value::< + Raw, + >( + members + .get(state_key) + .ok_or_else(|| { + Error::bad_database( + "A user that joined once has no member event anymore.", + ) + })? + .content + .clone(), + ) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; + + // The membership was and still is invite or join + if matches!( + content.membership, + ruma::events::room::member::MembershipState::Join + | ruma::events::room::member::MembershipState::Invite + ) && matches!( + current_content.membership, + ruma::events::room::member::MembershipState::Join + | ruma::events::room::member::MembershipState::Invite + ) { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + .filter_map(|u| u.ok()) // Filter out buggy users + // Filter for possible heroes + .filter_map(|u| u) + { + if heroes.contains(&hero) || hero == sender_id.as_str() { + continue; + } + + heroes.push(hero); + } + } + + ( + Some(joined_member_count), + Some(invited_member_count), + heroes, + ) + } else { + (None, None, Vec::new()) + }; + + let notification_count = if send_notification_counts { + if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &sender_id)? { + Some( + (db.rooms + .pdus_since(&sender_id, &room_id, last_read)? + .filter_map(|pdu| pdu.ok()) // Filter out buggy events + .filter(|pdu| { + matches!( + pdu.kind.clone(), + EventType::RoomMessage | EventType::RoomEncrypted + ) + }) + .count() as u32) + .into(), + ) + } else { + None + } + } else { + None + }; + + let prev_batch = timeline_pdus.first().map_or(Ok::<_, Error>(None), |e| { + Ok(Some( + db.rooms + .get_pdu_count(&e.event_id)? + .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? + .to_string(), + )) + })?; + + let room_events = timeline_pdus + .into_iter() + .map(|pdu| pdu.to_sync_room_event()) + .collect::>(); + + let mut edus = db + .rooms + .edus + .roomlatests_since(&room_id, since)? + .filter_map(|r| r.ok()) // Filter out buggy events + .collect::>(); + + if db + .rooms + .edus + .last_roomactive_update(&room_id, &db.globals)? + > since + { + edus.push( + serde_json::from_str( + &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( + db.rooms.edus.roomactives_all(&room_id)?, + )) + .expect("event is valid, we just created it"), + ) + .expect("event is valid, we just created it"), + ); + } + + let joined_room = sync_events::JoinedRoom { + account_data: sync_events::AccountData { + events: db + .account_data + .changes_since(Some(&room_id), &sender_id, since)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() + }) + .collect::>(), + }, + summary: sync_events::RoomSummary { + heroes, + joined_member_count: joined_member_count.map(|n| (n as u32).into()), + invited_member_count: invited_member_count.map(|n| (n as u32).into()), + }, + unread_notifications: sync_events::UnreadNotificationsCount { + highlight_count: None, + notification_count, + }, + timeline: sync_events::Timeline { + limited: joined_since_last_sync, + prev_batch, + events: room_events, + }, + // TODO: state before timeline + state: sync_events::State { + events: if joined_since_last_sync { + db.rooms + .room_state_full(&room_id)? + .into_iter() + .map(|(_, pdu)| pdu.to_sync_state_event()) + .collect() + } else { + Vec::new() + }, + }, + ephemeral: sync_events::Ephemeral { events: edus }, + }; + + if !joined_room.is_empty() { + joined_rooms.insert(room_id.clone(), joined_room); + } + + // Look for device list updates in this room + device_list_updates.extend( + db.users + .keys_changed(&room_id.to_string(), since, None) + .filter_map(|r| r.ok()), + ); + + // Take presence updates from this room + for (user_id, presence) in + db.rooms + .edus + .presence_since(&room_id, since, &db.rooms, &db.globals)? + { + match presence_updates.entry(user_id) { + hash_map::Entry::Vacant(v) => { + v.insert(presence); + } + hash_map::Entry::Occupied(mut o) => { + let p = o.get_mut(); + + // Update existing presence event with more info + p.content.presence = presence.content.presence; + if let Some(status_msg) = presence.content.status_msg { + p.content.status_msg = Some(status_msg); + } + if let Some(last_active_ago) = presence.content.last_active_ago { + p.content.last_active_ago = Some(last_active_ago); + } + if let Some(displayname) = presence.content.displayname { + p.content.displayname = Some(displayname); + } + if let Some(avatar_url) = presence.content.avatar_url { + p.content.avatar_url = Some(avatar_url); + } + if let Some(currently_active) = presence.content.currently_active { + p.content.currently_active = Some(currently_active); + } + } + } + } + } + + let mut left_rooms = BTreeMap::new(); + for room_id in db.rooms.rooms_left(&sender_id) { + let room_id = room_id?; + let pdus = db.rooms.pdus_since(&sender_id, &room_id, since)?; + let room_events = pdus + .filter_map(|pdu| pdu.ok()) // Filter out buggy events + .map(|pdu| pdu.to_sync_room_event()) + .collect(); + + // TODO: Only until leave point + let mut edus = db + .rooms + .edus + .roomlatests_since(&room_id, since)? + .filter_map(|r| r.ok()) // Filter out buggy events + .collect::>(); + + if db + .rooms + .edus + .last_roomactive_update(&room_id, &db.globals)? + > since + { + edus.push( + serde_json::from_str( + &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( + db.rooms.edus.roomactives_all(&room_id)?, + )) + .expect("event is valid, we just created it"), + ) + .expect("event is valid, we just created it"), + ); + } + + let left_room = sync_events::LeftRoom { + account_data: sync_events::AccountData { events: Vec::new() }, + timeline: sync_events::Timeline { + limited: false, + prev_batch: Some(next_batch.clone()), + events: room_events, + }, + state: sync_events::State { events: Vec::new() }, + }; + + if !left_room.is_empty() { + left_rooms.insert(room_id.clone(), left_room); + } + } + + let mut invited_rooms = BTreeMap::new(); + for room_id in db.rooms.rooms_invited(&sender_id) { + let room_id = room_id?; + + let invited_room = sync_events::InvitedRoom { + invite_state: sync_events::InviteState { + events: db + .rooms + .room_state_full(&room_id)? + .into_iter() + .map(|(_, pdu)| pdu.to_stripped_state_event()) + .collect(), + }, + }; + + if !invited_room.is_empty() { + invited_rooms.insert(room_id.clone(), invited_room); + } + } + + // Remove all to-device events the device received *last time* + db.users + .remove_to_device_events(sender_id, device_id, since)?; + + let response = sync_events::Response { + next_batch, + rooms: sync_events::Rooms { + leave: left_rooms, + join: joined_rooms, + invite: invited_rooms, + }, + presence: sync_events::Presence { + events: presence_updates + .into_iter() + .map(|(_, v)| Raw::from(v)) + .collect(), + }, + account_data: sync_events::AccountData { + events: db + .account_data + .changes_since(None, &sender_id, since)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() + }) + .collect::>(), + }, + device_lists: sync_events::DeviceLists { + changed: device_list_updates.into_iter().collect(), + left: Vec::new(), // TODO + }, + device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_id)? > since { + db.users.count_one_time_keys(sender_id, device_id)? + } else { + BTreeMap::new() + }, + to_device: sync_events::ToDevice { + events: db.users.get_to_device_events(sender_id, device_id)?, + }, + }; + + // TODO: Retry the endpoint instead of returning (waiting for #118) + if !body.full_state + && response.rooms.is_empty() + && response.presence.is_empty() + && response.account_data.is_empty() + && response.device_lists.is_empty() + && response.device_one_time_keys_count.is_empty() + && response.to_device.is_empty() + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let mut duration = body.timeout.unwrap_or_default(); + if duration.as_secs() > 30 { + duration = Duration::from_secs(30); + } + let mut delay = tokio::time::delay_for(duration); + tokio::select! { + _ = &mut delay => {} + _ = watcher => {} + } + } + + Ok(response.into()) +} diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs new file mode 100644 index 0000000..99ee6e3 --- /dev/null +++ b/src/client_server/tag.rs @@ -0,0 +1,100 @@ +use super::State; +use crate::{ConduitResult, Database, Ruma}; +use ruma::{ + api::client::r0::tag::{create_tag, delete_tag, get_tags}, + events::EventType, +}; +use std::collections::BTreeMap; + +#[cfg(feature = "conduit_bin")] +use rocket::{delete, get, put}; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") +)] +pub fn update_tag_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut tags_event = db + .account_data + .get::(Some(&body.room_id), sender_id, EventType::Tag)? + .unwrap_or_else(|| ruma::events::tag::TagEvent { + content: ruma::events::tag::TagEventContent { + tags: BTreeMap::new(), + }, + }); + tags_event + .content + .tags + .insert(body.tag.to_string(), body.tag_info.clone()); + + db.account_data.update( + Some(&body.room_id), + sender_id, + EventType::Tag, + &tags_event, + &db.globals, + )?; + + Ok(create_tag::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") +)] +pub fn delete_tag_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let mut tags_event = db + .account_data + .get::(Some(&body.room_id), sender_id, EventType::Tag)? + .unwrap_or_else(|| ruma::events::tag::TagEvent { + content: ruma::events::tag::TagEventContent { + tags: BTreeMap::new(), + }, + }); + tags_event.content.tags.remove(&body.tag); + + db.account_data.update( + Some(&body.room_id), + sender_id, + EventType::Tag, + &tags_event, + &db.globals, + )?; + + Ok(delete_tag::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") +)] +pub fn get_tags_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + Ok(get_tags::Response { + tags: db + .account_data + .get::(Some(&body.room_id), sender_id, EventType::Tag)? + .unwrap_or_else(|| ruma::events::tag::TagEvent { + content: ruma::events::tag::TagEventContent { + tags: BTreeMap::new(), + }, + }) + .content + .tags, + } + .into()) +} diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs new file mode 100644 index 0000000..d9b540b --- /dev/null +++ b/src/client_server/thirdparty.rs @@ -0,0 +1,19 @@ +use crate::ConduitResult; +use ruma::api::client::r0::thirdparty::get_protocols; + +use log::warn; +#[cfg(feature = "conduit_bin")] +use rocket::get; +use std::collections::BTreeMap; + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/thirdparty/protocols") +)] +pub fn get_protocols_route() -> ConduitResult { + warn!("TODO: get_protocols_route"); + Ok(get_protocols::Response { + protocols: BTreeMap::new(), + } + .into()) +} diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs new file mode 100644 index 0000000..db4f36e --- /dev/null +++ b/src/client_server/to_device.rs @@ -0,0 +1,56 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + r0::to_device::{self, send_event_to_device}, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::put; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") +)] +pub fn send_event_to_device_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + for (target_user_id, map) in &body.messages { + for (target_device_id_maybe, event) in map { + match target_device_id_maybe { + to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => { + db.users.add_to_device_event( + sender_id, + &target_user_id, + &target_device_id, + &body.event_type, + serde_json::from_str(event.get()).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, + &db.globals, + )? + } + + to_device::DeviceIdOrAllDevices::AllDevices => { + for target_device_id in db.users.all_device_ids(&target_user_id) { + db.users.add_to_device_event( + sender_id, + &target_user_id, + &target_device_id?, + &body.event_type, + serde_json::from_str(event.get()).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, + &db.globals, + )?; + } + } + } + } + } + + Ok(send_event_to_device::Response.into()) +} diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs new file mode 100644 index 0000000..7eba13e --- /dev/null +++ b/src/client_server/typing.rs @@ -0,0 +1,33 @@ +use super::State; +use crate::{utils, ConduitResult, Database, Ruma}; +use ruma::api::client::r0::typing::create_typing_event; + +#[cfg(feature = "conduit_bin")] +use rocket::put; + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "") +)] +pub fn create_typing_event_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + if body.typing { + db.rooms.edus.roomactive_add( + &sender_id, + &body.room_id, + body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) + + utils::millis_since_unix_epoch(), + &db.globals, + )?; + } else { + db.rooms + .edus + .roomactive_remove(&sender_id, &body.room_id, &db.globals)?; + } + + Ok(create_typing_event::Response.into()) +} diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs new file mode 100644 index 0000000..e71c194 --- /dev/null +++ b/src/client_server/unversioned.rs @@ -0,0 +1,19 @@ +use crate::ConduitResult; +use ruma::api::client::unversioned::get_supported_versions; +use std::collections::BTreeMap; + +#[cfg(feature = "conduit_bin")] +use rocket::get; + +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] +pub fn get_supported_versions_route() -> ConduitResult { + let mut unstable_features = BTreeMap::new(); + + unstable_features.insert("org.matrix.e2e_cross_signing".to_owned(), true); + + Ok(get_supported_versions::Response { + versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], + unstable_features, + } + .into()) +} diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs new file mode 100644 index 0000000..746deb3 --- /dev/null +++ b/src/client_server/user_directory.rs @@ -0,0 +1,52 @@ +use super::State; +use crate::{ConduitResult, Database, Ruma}; +use ruma::api::client::r0::user_directory::search_users; + +#[cfg(feature = "conduit_bin")] +use rocket::post; + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/user_directory/search", data = "") +)] +pub fn search_users_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let limit = if let Some(limit) = body.limit { + u64::from(limit) + } else { + 10 + } as usize; + + let mut users = db.users.iter().filter_map(|user_id| { + // Filter out buggy users (they should not exist, but you never know...) + let user_id = user_id.ok()?; + if db.users.is_deactivated(&user_id).ok()? { + return None; + } + + let user = search_users::User { + user_id: user_id.clone(), + display_name: db.users.displayname(&user_id).ok()?, + avatar_url: db.users.avatar_url(&user_id).ok()?, + }; + + if !user.user_id.to_string().contains(&body.search_term) + && user + .display_name + .as_ref() + .filter(|name| name.contains(&body.search_term)) + .is_none() + { + return None; + } + + Some(user) + }); + + let results = users.by_ref().take(limit).collect(); + let limited = users.next().is_some(); + + Ok(search_users::Response { results, limited }.into()) +} diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs new file mode 100644 index 0000000..4671284 --- /dev/null +++ b/src/client_server/voip.rs @@ -0,0 +1,13 @@ +use crate::{ConduitResult, Error}; +use ruma::api::client::{error::ErrorKind, r0::message::create_message_event}; + +#[cfg(feature = "conduit_bin")] +use rocket::get; + +#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] +pub fn turn_server_route() -> ConduitResult { + Err(Error::BadRequest( + ErrorKind::NotFound, + "There is no turn server yet.", + )) +} diff --git a/src/lib.rs b/src/lib.rs index 5cd8d48..d6f0b55 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,14 +2,15 @@ pub mod client_server; mod database; mod error; mod pdu; -pub mod push_rules; +mod push_rules; mod ruma_wrapper; mod utils; -pub use database::{media::FileMeta, Database}; +pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; + use std::ops::Deref; pub struct State<'r, T: Send + Sync + 'static>(&'r T); diff --git a/src/main.rs b/src/main.rs index 86d8446..27320b7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -91,7 +91,6 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_context_route, client_server::get_message_events_route, client_server::turn_server_route, - client_server::publicised_groups_route, client_server::send_event_to_device_route, client_server::get_media_config_route, client_server::create_content_route, @@ -109,7 +108,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::upload_signing_keys_route, client_server::upload_signatures_route, client_server::get_key_changes_route, - client_server::pushers_route, + client_server::get_pushers_route, client_server::set_pushers_route, //server_server::well_known_server, //server_server::get_server_version, From 0616acbde61b3da3594af0e24b916bb722fee2d5 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 30 Jul 2020 20:49:29 +0200 Subject: [PATCH 0217/1727] feat: handle inhibit_login in /register --- src/client_server/account.rs | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index bfb768a..8764446 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -121,6 +121,28 @@ pub fn register_route( // Create user db.users.create(&user_id, &password)?; + // Initial data + db.account_data.update( + None, + &user_id, + EventType::PushRules, + &ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: crate::push_rules::default_pushrules(&user_id), + }, + }, + &db.globals, + )?; + + if body.inhibit_login { + return Ok(register::Response { + access_token: None, + user_id, + device_id: None, + } + .into()); + } + // Generate new device id if the user didn't specify one let device_id = body .device_id @@ -138,19 +160,6 @@ pub fn register_route( body.initial_device_display_name.clone(), )?; - // Initial data - db.account_data.update( - None, - &user_id, - EventType::PushRules, - &ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: crate::push_rules::default_pushrules(&user_id), - }, - }, - &db.globals, - )?; - Ok(register::Response { access_token: Some(token), user_id, From 98f37302a6c9641c04beacaa92356c320cfc0942 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Thu, 30 Jul 2020 22:09:11 +0200 Subject: [PATCH 0218/1727] feat: handle /publicRooms pagination --- src/client_server/directory.rs | 177 +++++++++++++++++++++------------ 1 file changed, 112 insertions(+), 65 deletions(-) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 510511c..9bed45b 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,12 +1,15 @@ use super::State; use crate::{ConduitResult, Database, Error, Result, Ruma}; use ruma::{ - api::client::r0::{ - directory::{ - self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, - set_room_visibility, + api::client::{ + error::ErrorKind, + r0::{ + directory::{ + self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, + set_room_visibility, + }, + room, }, - room, }, events::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, @@ -20,65 +23,39 @@ use rocket::{get, post, put}; #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/publicRooms", data = "") -)] -pub async fn get_public_rooms_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let Ruma { - body: - get_public_rooms::Request { - limit, - server, - since, - }, - sender_id, - device_id, - json_body, - } = body; - - let get_public_rooms_filtered::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - } = get_public_rooms_filtered_route( - db, - Ruma { - body: get_public_rooms_filtered::Request { - filter: None, - limit, - room_network: get_public_rooms_filtered::RoomNetwork::Matrix, - server, - since, - }, - sender_id, - device_id, - json_body, - }, - ) - .await? - .0; - - Ok(get_public_rooms::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/publicRooms", data = "<_body>") + post("/_matrix/client/r0/publicRooms", data = "") )] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, - _body: Ruma, + body: Ruma, ) -> ConduitResult { - let mut chunk = + let limit = body.limit.map_or(10, u64::from); + let mut since = 0_u64; + + if let Some(s) = &body.since { + let mut characters = s.chars(); + let backwards = match characters.next() { + Some('n') => false, + Some('p') => true, + _ => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid `since` token", + )) + } + }; + + since = characters + .collect::() + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?; + + if backwards { + since = since.saturating_sub(limit); + } + } + + let mut all_rooms = db.rooms .public_rooms() .map(|room_id| { @@ -190,10 +167,10 @@ pub async fn get_public_rooms_filtered_route( // We need to collect all, so we can sort by member count .collect::>(); - chunk.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); + all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); /* - chunk.extend_from_slice( + all_rooms.extend_from_slice( &server_server::send_request( &db, "privacytools.io".to_owned(), @@ -212,17 +189,87 @@ pub async fn get_public_rooms_filtered_route( ); */ - let total_room_count_estimate = (chunk.len() as u32).into(); + let total_room_count_estimate = (all_rooms.len() as u32).into(); + + let chunk = all_rooms + .into_iter() + .skip(since as usize) + .take(limit as usize) + .collect::>(); + + let prev_batch = if since == 0 { + None + } else { + Some(format!("p{}", since)) + }; + + let next_batch = if chunk.len() < limit as usize { + None + } else { + Some(format!("n{}", since + limit)) + }; Ok(get_public_rooms_filtered::Response { chunk, - prev_batch: None, - next_batch: None, + prev_batch, + next_batch, total_room_count_estimate: Some(total_room_count_estimate), } .into()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/publicRooms", data = "") +)] +pub async fn get_public_rooms_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let Ruma { + body: + get_public_rooms::Request { + limit, + server, + since, + }, + sender_id, + device_id, + json_body, + } = body; + + let get_public_rooms_filtered::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + } = get_public_rooms_filtered_route( + db, + Ruma { + body: get_public_rooms_filtered::Request { + filter: None, + limit, + room_network: get_public_rooms_filtered::RoomNetwork::Matrix, + server, + since, + }, + sender_id, + device_id, + json_body, + }, + ) + .await? + .0; + + Ok(get_public_rooms::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/directory/list/room/<_>", data = "") From 47abe6686ebfb83331dfdabd871d8920b35afaaa Mon Sep 17 00:00:00 2001 From: Guillem Nieto Date: Sun, 2 Aug 2020 09:24:47 +0200 Subject: [PATCH 0219/1727] Send invites only if invited during the sync request Fixes: https://git.koesters.xyz/timo/conduit/issues/175 --- src/client_server/sync.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 71e3422..0f683c8 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -391,6 +391,31 @@ pub async fn sync_events_route( let mut invited_rooms = BTreeMap::new(); for room_id in db.rooms.rooms_invited(&sender_id) { let room_id = room_id?; + let mut invited_since_last_sync = false; + for pdu in db + .rooms + .pdus_since(&sender_id, &room_id, since)? + .filter_map(|r| r.ok()) + { + if pdu.kind == EventType::RoomMember { + if pdu.state_key == Some(sender_id.to_string()) { + let content = serde_json::from_value::< + Raw, + >(pdu.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database."))?; + if content.membership == ruma::events::room::member::MembershipState::Invite { + invited_since_last_sync = true; + break; + } + } + } + } + + if !invited_since_last_sync { + continue; + } let invited_room = sync_events::InvitedRoom { invite_state: sync_events::InviteState { From b5755936471c10d39239620e42d275aaaa5630ab Mon Sep 17 00:00:00 2001 From: Guillem Nieto Date: Sun, 2 Aug 2020 09:38:20 +0200 Subject: [PATCH 0220/1727] Do not ignore db errors --- src/client_server/sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 0f683c8..4e670ec 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -84,8 +84,8 @@ pub async fn sync_events_route( for pdu in db .rooms .pdus_since(&sender_id, &room_id, since)? - .filter_map(|r| r.ok()) { + let pdu = pdu?; send_notification_counts = true; if pdu.kind == EventType::RoomMember { send_member_count = true; @@ -395,8 +395,8 @@ pub async fn sync_events_route( for pdu in db .rooms .pdus_since(&sender_id, &room_id, since)? - .filter_map(|r| r.ok()) { + let pdu = pdu?; if pdu.kind == EventType::RoomMember { if pdu.state_key == Some(sender_id.to_string()) { let content = serde_json::from_value::< From da1c53aaef07fd937661d116fba74cfe86588762 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 3 Aug 2020 20:56:14 -0400 Subject: [PATCH 0221/1727] Make State struct constructable by lib user --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index d6f0b55..96236bf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,11 +9,11 @@ mod utils; pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; +pub use rocket::Config; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; - use std::ops::Deref; -pub struct State<'r, T: Send + Sync + 'static>(&'r T); +pub struct State<'r, T: Send + Sync + 'static>(pub &'r T); impl<'r, T: Send + Sync + 'static> Deref for State<'r, T> { type Target = T; From 624ff08a074348486e352d723af1a7f0ebf76e48 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Thu, 23 Jul 2020 23:58:08 +0200 Subject: [PATCH 0222/1727] Initial docker files trinity The image builds as is, but running it exits with: standard_init_linux.go:211: exec user process caused "no such file or directory" --- .dockerignore | 27 ++++++++++++ Dockerfile | 106 +++++++++++++++++++++++++++++++++++++++++++++ docker-compose.yml | 21 +++++++++ 3 files changed, 154 insertions(+) create mode 100644 .dockerignore create mode 100644 Dockerfile create mode 100644 docker-compose.yml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..dd4433d --- /dev/null +++ b/.dockerignore @@ -0,0 +1,27 @@ +# Local build and dev artifacts +target +sytest + +# Docker files +Dockerfile* +docker-compose* + +# IDE files +.vscode +.idea +*.iml + +# Git folder +.git +.gitea + +# Dot files +.env +.gitignore + +# Toml files +rustfmt.toml +Rocket-example.toml + +# Documentation +*.md diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..dc87e0c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,106 @@ +# Using multistage build: +# https://docs.docker.com/develop/develop-images/multistage-build/ +# https://whitfin.io/speeding-up-rust-docker-builds/ + + +########################## BUILD IMAGE ########################## +# Musl build image to build Conduits statically compiled binary +FROM rustlang/rust:nightly-alpine3.12 as builder + +# Don't download Rust docs +RUN rustup set profile minimal + +ENV USER "conduit" +#ENV RUSTFLAGS='-C link-arg=-s' + +# Install packages needed for building all crates +RUN apk add --no-cache \ + musl-dev \ + openssl-dev \ + pkgconf + +# Create dummy project to fetch all dependencies. +# Rebuilds are a lot faster when there are no changes in the +# dependencies. +RUN cargo new --bin /app +WORKDIR /app + +# Copy cargo files which specify needed dependencies +COPY ./Cargo.* ./ + +# Add musl target, as we want to run your project in +# an alpine linux image +RUN rustup target add x86_64-unknown-linux-musl + +# Build dependencies and remove dummy project, except +# target folder, as it contains the dependencies +RUN cargo build --release --color=always ; \ + find . -not -path "./target*" -delete + +# Now copy and build the real project with the pre-built +# dependencies. +COPY . . +RUN cargo build --release --color=always + +########################## RUNTIME IMAGE ########################## +# Create new stage with a minimal image for the actual +# runtime image/container +FROM alpine:3.12 + +ARG BUILD_DATE +ARG VERSION +ARG GIT_REF=HEAD + +# Labels inspired by this medium post: +# https://medium.com/@chamilad/lets-make-your-docker-image-better-than-90-of-existing-ones-8b1e5de950d +LABEL org.label-schema.build-date=${BUILD_DATE} \ + org.label-schema.name="Conduit" \ + org.label-schema.version=${VERSION} \ + org.label-schema.vendor="Conduit Authors" \ + org.label-schema.description="A Matrix homeserver written in Rust" \ + org.label-schema.url="https://conduit.rs/" \ + org.label-schema.vcs-ref=$GIT_REF \ + org.label-schema.vcs-url="https://git.koesters.xyz/timo/conduit.git" \ + ord.label-schema.docker.build="docker build . -t conduit:latest --build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)"\ + maintainer="weasy@hotmail.de" + +# Change some Rocket.rs default configs. They can then +# be changed to different values using env variables. +ENV ROCKET_CLI_COLORS="on" +#ENV ROCKET_SERVER_NAME="conduit.rs" +ENV ROCKET_ENV="production" +ENV ROCKET_ADDRESS=0.0.0.0 +ENV ROCKET_PORT=14004 +ENV ROCKET_LOG="normal" +ENV ROCKET_DATABASE_PATH="/data/sled" +ENV ROCKET_REGISTRATION_DISABLED="true" +#ENV ROCKET_WORKERS=10 + +EXPOSE 14004 + +# Copy config files from context and the binary from +# the "builder" stage to the current stage into folder +# /srv/conduit and create data folder for database +RUN mkdir -p /srv/conduit /data/sled + +COPY --from=builder /app/target/release/conduit ./srv/conduit/ + +# Add www-data user and group with UID 82, as used by alpine +# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install +RUN set -x ; \ + addgroup -Sg 82 www-data 2>/dev/null ; \ + adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \ + addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1 + +# Change ownership of Conduit files to www-data user and group +RUN chown -cR www-data:www-data /srv/conduit /data + +VOLUME /data + +RUN apk add --no-cache \ + ca-certificates + +# Set user to www-data +USER www-data +WORKDIR /srv/conduit +ENTRYPOINT [ "/srv/conduit/conduit" ] diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..91626dd --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,21 @@ +# Conduit +version: '3' + +services: + conduit: + image: conduit_homeserver + restart: unless-stopped + ports: + - 14004:14004 + volumes: + - db:/data/sled + environment: + ROCKET_SERVER_NAME: example.com # replace with your own name + ### Uncomment and change values as needed + #ROCKET_LOG: normal + #ROCKET_REGISTRATION_DISABLED: 'true' + #ROCKET_DATABASE_PATH: /data/sled + #ROCKET_WORKERS: 10 + +volumes: + db: From a21858758cdefc4978cc5f08d37158a9b3d52f18 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Mon, 27 Jul 2020 18:10:34 +0200 Subject: [PATCH 0223/1727] Change labels from label-schema to opencontainer.image --- Dockerfile | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/Dockerfile b/Dockerfile index dc87e0c..5217e65 100644 --- a/Dockerfile +++ b/Dockerfile @@ -47,21 +47,25 @@ RUN cargo build --release --color=always # runtime image/container FROM alpine:3.12 -ARG BUILD_DATE +ARG CREATED ARG VERSION ARG GIT_REF=HEAD -# Labels inspired by this medium post: -# https://medium.com/@chamilad/lets-make-your-docker-image-better-than-90-of-existing-ones-8b1e5de950d -LABEL org.label-schema.build-date=${BUILD_DATE} \ - org.label-schema.name="Conduit" \ - org.label-schema.version=${VERSION} \ - org.label-schema.vendor="Conduit Authors" \ - org.label-schema.description="A Matrix homeserver written in Rust" \ - org.label-schema.url="https://conduit.rs/" \ - org.label-schema.vcs-ref=$GIT_REF \ - org.label-schema.vcs-url="https://git.koesters.xyz/timo/conduit.git" \ - ord.label-schema.docker.build="docker build . -t conduit:latest --build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)"\ +# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md +# including a custom label specifying the build command +LABEL org.opencontainers.image.created=${CREATED} \ + org.opencontainers.image.authors="Conduit Contributors, weasy@hotmail.de" \ + org.opencontainers.image.title="Conduit" \ + org.opencontainers.image.version=${VERSION} \ + org.opencontainers.image.vendor="Conduit Contributors" \ + org.opencontainers.image.description="A Matrix homeserver written in Rust" \ + org.opencontainers.image.url="https://conduit.rs/" \ + org.opencontainers.image.revision=$GIT_REF \ + org.opencontainers.image.source="https://git.koesters.xyz/timo/conduit.git" \ + org.opencontainers.image.documentation.="" \ + org.opencontainers.image.licenses="AGPL-3.0" \ + org.opencontainers.image.ref.name="" \ + org.label-schema.docker.build="docker build . -t conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)"\ maintainer="weasy@hotmail.de" # Change some Rocket.rs default configs. They can then From ecb641624425dba5dc6b10330b5a91d084b40f6b Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Sat, 1 Aug 2020 15:18:49 +0200 Subject: [PATCH 0224/1727] =?UTF-8?q?Image=20now=20builds=20and=20runs=20?= =?UTF-8?q?=F0=9F=8E=89=20Thx=20to=20the=20help=20of=20yzhr?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Dockerfile | 66 +++++++++++++--------------------------------- docker-compose.yml | 2 +- 2 files changed, 19 insertions(+), 49 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5217e65..6825d82 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,43 +4,23 @@ ########################## BUILD IMAGE ########################## -# Musl build image to build Conduits statically compiled binary -FROM rustlang/rust:nightly-alpine3.12 as builder +# Alpine build image to build Conduits statically compiled binary +FROM alpine:3.12 as builder -# Don't download Rust docs -RUN rustup set profile minimal - -ENV USER "conduit" -#ENV RUSTFLAGS='-C link-arg=-s' +# Add 'edge'-repository to get Rust 1.45 +RUN sed -i \ + -e 's|v3\.12|edge|' \ + /etc/apk/repositories # Install packages needed for building all crates RUN apk add --no-cache \ - musl-dev \ - openssl-dev \ - pkgconf + cargo \ + openssl-dev -# Create dummy project to fetch all dependencies. -# Rebuilds are a lot faster when there are no changes in the -# dependencies. -RUN cargo new --bin /app -WORKDIR /app - -# Copy cargo files which specify needed dependencies -COPY ./Cargo.* ./ - -# Add musl target, as we want to run your project in -# an alpine linux image -RUN rustup target add x86_64-unknown-linux-musl - -# Build dependencies and remove dummy project, except -# target folder, as it contains the dependencies -RUN cargo build --release --color=always ; \ - find . -not -path "./target*" -delete - -# Now copy and build the real project with the pre-built -# dependencies. +# Copy project from current folder and build it COPY . . -RUN cargo build --release --color=always +RUN cargo install --path . +#RUN cargo install --git "https://git.koesters.xyz/timo/conduit.git" ########################## RUNTIME IMAGE ########################## # Create new stage with a minimal image for the actual @@ -68,26 +48,15 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.label-schema.docker.build="docker build . -t conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)"\ maintainer="weasy@hotmail.de" -# Change some Rocket.rs default configs. They can then -# be changed to different values using env variables. -ENV ROCKET_CLI_COLORS="on" -#ENV ROCKET_SERVER_NAME="conduit.rs" -ENV ROCKET_ENV="production" -ENV ROCKET_ADDRESS=0.0.0.0 -ENV ROCKET_PORT=14004 -ENV ROCKET_LOG="normal" -ENV ROCKET_DATABASE_PATH="/data/sled" -ENV ROCKET_REGISTRATION_DISABLED="true" -#ENV ROCKET_WORKERS=10 EXPOSE 14004 # Copy config files from context and the binary from # the "builder" stage to the current stage into folder # /srv/conduit and create data folder for database -RUN mkdir -p /srv/conduit /data/sled +RUN mkdir -p /srv/conduit/.local/share/conduit -COPY --from=builder /app/target/release/conduit ./srv/conduit/ +COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/ # Add www-data user and group with UID 82, as used by alpine # https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install @@ -97,12 +66,13 @@ RUN set -x ; \ addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1 # Change ownership of Conduit files to www-data user and group -RUN chown -cR www-data:www-data /srv/conduit /data - -VOLUME /data +RUN chown -cR www-data:www-data /srv/conduit RUN apk add --no-cache \ - ca-certificates + ca-certificates \ + libgcc + +VOLUME ["/srv/conduit/.local/share/conduit"] # Set user to www-data USER www-data diff --git a/docker-compose.yml b/docker-compose.yml index 91626dd..d0e4135 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,7 +8,7 @@ services: ports: - 14004:14004 volumes: - - db:/data/sled + - db:/srv/conduit/.local/share/conduit environment: ROCKET_SERVER_NAME: example.com # replace with your own name ### Uncomment and change values as needed From 0338053774197c0c4df0729af6a034b30ac7f907 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Sat, 1 Aug 2020 18:20:30 +0200 Subject: [PATCH 0225/1727] Add ability to switch between local and remote build with build arg a... ...nd add env vars to docker-compose --- Dockerfile | 15 ++++++++++++--- docker-compose.yml | 11 +++++++---- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6825d82..009e0a6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,10 @@ # Alpine build image to build Conduits statically compiled binary FROM alpine:3.12 as builder +# Specifies if the local project is build or if the git master branch +# is build. +ARG LOCAL=false + # Add 'edge'-repository to get Rust 1.45 RUN sed -i \ -e 's|v3\.12|edge|' \ @@ -17,10 +21,15 @@ RUN apk add --no-cache \ cargo \ openssl-dev -# Copy project from current folder and build it + +# Copy project files from current folder COPY . . -RUN cargo install --path . -#RUN cargo install --git "https://git.koesters.xyz/timo/conduit.git" +# Build it from local files or from official git repository +RUN if [[ $LOCAL == "true" ]]; then \ + cargo install --path . ; \ + else \ + cargo install --git "https://git.koesters.xyz/timo/conduit.git" ; \ + fi ########################## RUNTIME IMAGE ########################## # Create new stage with a minimal image for the actual diff --git a/docker-compose.yml b/docker-compose.yml index d0e4135..36a928f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,10 +12,13 @@ services: environment: ROCKET_SERVER_NAME: example.com # replace with your own name ### Uncomment and change values as needed - #ROCKET_LOG: normal - #ROCKET_REGISTRATION_DISABLED: 'true' - #ROCKET_DATABASE_PATH: /data/sled - #ROCKET_WORKERS: 10 + # ROCKET_LOG: normal + # ROCKET_PORT: 14004 + # ROCKET_REGISTRATION_DISABLED: 'true' + # ROCKET_ENCRYPTION_DISABLED: 'true' + # ROCKET_DATABASE_PATH: /srv/conduit/.local/share/conduit + # ROCKET_WORKERS: 10 + # ROCKET_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB volumes: db: From 5f3cb3f9261537b884a91949421f10c8a451eb9a Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Sun, 2 Aug 2020 12:58:52 +0200 Subject: [PATCH 0226/1727] Minor modifications to compose file and update Dockerfile comments --- Dockerfile | 25 +++++++++++++++---------- docker-compose.yml | 13 ++++++++----- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/Dockerfile b/Dockerfile index 009e0a6..e185381 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,9 +7,11 @@ # Alpine build image to build Conduits statically compiled binary FROM alpine:3.12 as builder -# Specifies if the local project is build or if the git master branch -# is build. +# Specifies if the local project is build or if Conduit gets build +# from the official git repository. Defaults to the git repo. ARG LOCAL=false +# Specifies which revision/commit is build. Defaults to HEAD +ARG GIT_REF=HEAD # Add 'edge'-repository to get Rust 1.45 RUN sed -i \ @@ -24,11 +26,11 @@ RUN apk add --no-cache \ # Copy project files from current folder COPY . . -# Build it from local files or from official git repository +# Build it from the copied local files or from the official git repository RUN if [[ $LOCAL == "true" ]]; then \ cargo install --path . ; \ else \ - cargo install --git "https://git.koesters.xyz/timo/conduit.git" ; \ + cargo install --git "https://git.koesters.xyz/timo/conduit.git" --rev ${GIT_REF}; \ fi ########################## RUNTIME IMAGE ########################## @@ -43,7 +45,7 @@ ARG GIT_REF=HEAD # Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # including a custom label specifying the build command LABEL org.opencontainers.image.created=${CREATED} \ - org.opencontainers.image.authors="Conduit Contributors, weasy@hotmail.de" \ + org.opencontainers.image.authors="Conduit Contributors" \ org.opencontainers.image.title="Conduit" \ org.opencontainers.image.version=${VERSION} \ org.opencontainers.image.vendor="Conduit Contributors" \ @@ -54,17 +56,16 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.documentation.="" \ org.opencontainers.image.licenses="AGPL-3.0" \ org.opencontainers.image.ref.name="" \ - org.label-schema.docker.build="docker build . -t conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)"\ - maintainer="weasy@hotmail.de" + org.label-schema.docker.build="docker build . -t conduit_homeserver:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \ + maintainer="Weasy666" - -EXPOSE 14004 +# Standard port on which Rocket launches +EXPOSE 8000 # Copy config files from context and the binary from # the "builder" stage to the current stage into folder # /srv/conduit and create data folder for database RUN mkdir -p /srv/conduit/.local/share/conduit - COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/ # Add www-data user and group with UID 82, as used by alpine @@ -77,13 +78,17 @@ RUN set -x ; \ # Change ownership of Conduit files to www-data user and group RUN chown -cR www-data:www-data /srv/conduit +# Install packages needed to run Conduit RUN apk add --no-cache \ ca-certificates \ libgcc +# Create a volume for the database, to persist its contents VOLUME ["/srv/conduit/.local/share/conduit"] # Set user to www-data USER www-data +# Set container home directory WORKDIR /srv/conduit +# Run Conduit ENTRYPOINT [ "/srv/conduit/conduit" ] diff --git a/docker-compose.yml b/docker-compose.yml index 36a928f..7c27360 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,17 +2,20 @@ version: '3' services: - conduit: + homeserver: image: conduit_homeserver restart: unless-stopped ports: - - 14004:14004 + - 14004:8000 volumes: - db:/srv/conduit/.local/share/conduit + ### Uncomment if you want to use Rocket.toml to configure Conduit + ### Note: Set env vars will override Rocket.toml values + # - ./Rocket.toml:/srv/conduit/Rocket.toml environment: - ROCKET_SERVER_NAME: example.com # replace with your own name - ### Uncomment and change values as needed - # ROCKET_LOG: normal + ROCKET_SERVER_NAME: localhost:8000 # replace with your own name + ### Uncomment and change values as desired + # ROCKET_LOG: normal # Available levels are: off, debug, normal, critical # ROCKET_PORT: 14004 # ROCKET_REGISTRATION_DISABLED: 'true' # ROCKET_ENCRYPTION_DISABLED: 'true' From 7456caeefd487bffdbcc706fcf7a6310d662e807 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Sun, 2 Aug 2020 15:55:40 +0200 Subject: [PATCH 0227/1727] Add Element-Web to compose and provide extra compose files for using.. ..Conduit behind Traefik Reverse Proxy --- docker-compose.override.traefik.yml | 21 +++++++++++++ docker-compose.traefik.yml | 47 +++++++++++++++++++++++++++++ docker-compose.yml | 18 +++++++++-- 3 files changed, 84 insertions(+), 2 deletions(-) create mode 100644 docker-compose.override.traefik.yml create mode 100644 docker-compose.traefik.yml diff --git a/docker-compose.override.traefik.yml b/docker-compose.override.traefik.yml new file mode 100644 index 0000000..3a772e1 --- /dev/null +++ b/docker-compose.override.traefik.yml @@ -0,0 +1,21 @@ +# Conduit - Traefik Reverse Proxy Labels +version: '3' + +services: + homeserver: + labels: + - "traefik.enable=true" + - "traefik.docker.network=proxy" + + - "traefik.http.routers.to-conduit.rule=Host(`.`)" # Change to the address on which Conduit is hosted + - "traefik.http.routers.to-conduit.tls=true" + - "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt" + + element-web: + labels: + - "traefik.enable=true" + - "traefik.docker.network=proxy" + + - "traefik.http.routers.to-element-web.rule=Host(`.`)" # Change to the address on which Element-Web is hosted + - "traefik.http.routers.to-element-web.tls=true" + - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt" diff --git a/docker-compose.traefik.yml b/docker-compose.traefik.yml new file mode 100644 index 0000000..9e8235d --- /dev/null +++ b/docker-compose.traefik.yml @@ -0,0 +1,47 @@ +# Conduit - Behind Traefik Reverse Proxy +version: '3' + +services: + homeserver: + image: conduit_homeserver:latest + restart: unless-stopped + volumes: + - db:/srv/conduit/.local/share/conduit + ### Uncomment if you want to use Rocket.toml to configure Conduit + ### Note: Set env vars will override Rocket.toml values + # - ./Rocket.toml:/srv/conduit/Rocket.toml + networks: + - proxy + environment: + ROCKET_SERVER_NAME: localhost:8000 # replace with your own name + ### Uncomment and change values as desired + # ROCKET_LOG: normal # Available levels are: off, debug, normal, critical + # ROCKET_PORT: 14004 + # ROCKET_REGISTRATION_DISABLED: 'true' + # ROCKET_ENCRYPTION_DISABLED: 'true' + # ROCKET_DATABASE_PATH: /srv/conduit/.local/share/conduit + # ROCKET_WORKERS: 10 + # ROCKET_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + + ### Uncomment if you want to use your own Element-Web App. + ### Note: You need to provide a config.json for Element and you also need a second + ### Domain or Subdomain for the communication between Element and Conduit + ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md + # element-web: + # image: vectorim/riot-web:latest + # restart: unless-stopped + # volumes: + # - ./element_config.json:/app/config.json + # networks: + # - proxy + # depends_on: + # - homeserver + +volumes: + db: + +networks: + # This is the network Traefik listens to, if you network has a different + # name, don't forget to change it here and in the docker-compose.override.yml + proxy: + external: true diff --git a/docker-compose.yml b/docker-compose.yml index 7c27360..3a390aa 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,10 +3,10 @@ version: '3' services: homeserver: - image: conduit_homeserver + image: conduit_homeserver:latest restart: unless-stopped ports: - - 14004:8000 + - 8448:8000 volumes: - db:/srv/conduit/.local/share/conduit ### Uncomment if you want to use Rocket.toml to configure Conduit @@ -23,5 +23,19 @@ services: # ROCKET_WORKERS: 10 # ROCKET_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + ### Uncomment if you want to use your own Element-Web App. + ### Note: You need to provide a config.json for Element and you also need a second + ### Domain or Subdomain for the communication between Element and Conduit + ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md + # element-web: + # image: vectorim/riot-web:latest + # restart: unless-stopped + # ports: + # - 8009:80 + # volumes: + # - ./element_config.json:/app/config.json + # depends_on: + # - homeserver + volumes: db: From 31c725660ff21b043008bb980fd6b12ebc43a8f2 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Sun, 2 Aug 2020 16:29:50 +0200 Subject: [PATCH 0228/1727] Add build option to compose file --- docker-compose.override.traefik.yml | 4 ++-- docker-compose.traefik.yml | 12 +++++++++++- docker-compose.yml | 12 +++++++++++- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/docker-compose.override.traefik.yml b/docker-compose.override.traefik.yml index 3a772e1..8b4be50 100644 --- a/docker-compose.override.traefik.yml +++ b/docker-compose.override.traefik.yml @@ -5,7 +5,7 @@ services: homeserver: labels: - "traefik.enable=true" - - "traefik.docker.network=proxy" + - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - "traefik.http.routers.to-conduit.rule=Host(`.`)" # Change to the address on which Conduit is hosted - "traefik.http.routers.to-conduit.tls=true" @@ -14,7 +14,7 @@ services: element-web: labels: - "traefik.enable=true" - - "traefik.docker.network=proxy" + - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - "traefik.http.routers.to-element-web.rule=Host(`.`)" # Change to the address on which Element-Web is hosted - "traefik.http.routers.to-element-web.tls=true" diff --git a/docker-compose.traefik.yml b/docker-compose.traefik.yml index 9e8235d..8edc29c 100644 --- a/docker-compose.traefik.yml +++ b/docker-compose.traefik.yml @@ -3,7 +3,17 @@ version: '3' services: homeserver: + ### If you already built the Conduit image with 'docker build', then you are ready to + ### go. Otherwise, you need to comment the 'image' line and uncomment the 'build' lines + ### and run: CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d image: conduit_homeserver:latest + # build: + # context: . + # args: + # CREATED: + # VERSION: + # LOCAL: false + # GIT_REF: HEAD restart: unless-stopped volumes: - db:/srv/conduit/.local/share/conduit @@ -16,7 +26,7 @@ services: ROCKET_SERVER_NAME: localhost:8000 # replace with your own name ### Uncomment and change values as desired # ROCKET_LOG: normal # Available levels are: off, debug, normal, critical - # ROCKET_PORT: 14004 + # ROCKET_PORT: 8000 # ROCKET_REGISTRATION_DISABLED: 'true' # ROCKET_ENCRYPTION_DISABLED: 'true' # ROCKET_DATABASE_PATH: /srv/conduit/.local/share/conduit diff --git a/docker-compose.yml b/docker-compose.yml index 3a390aa..48470e6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,7 +3,17 @@ version: '3' services: homeserver: + ### If you already built the Conduit image with 'docker build', then you are ready to + ### go. Otherwise, you need to comment the 'image' line and uncomment the 'build' lines + ### and run: CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d image: conduit_homeserver:latest + # build: + # context: . + # args: + # CREATED: + # VERSION: + # LOCAL: "false" + # GIT_REF: HEAD restart: unless-stopped ports: - 8448:8000 @@ -16,7 +26,7 @@ services: ROCKET_SERVER_NAME: localhost:8000 # replace with your own name ### Uncomment and change values as desired # ROCKET_LOG: normal # Available levels are: off, debug, normal, critical - # ROCKET_PORT: 14004 + # ROCKET_PORT: 8000 # ROCKET_REGISTRATION_DISABLED: 'true' # ROCKET_ENCRYPTION_DISABLED: 'true' # ROCKET_DATABASE_PATH: /srv/conduit/.local/share/conduit From 7288010e555d7f7ccc20c4330634976e18d6fa77 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Tue, 4 Aug 2020 22:04:27 +0200 Subject: [PATCH 0229/1727] Move additional files into dedicated folder and make build the def... ...fault in the compose files. --- Dockerfile | 4 ++-- docker-compose.override.traefik.yml | 21 ----------------- docker-compose.yml | 23 ++++++++++--------- docker/docker-compose.override.traefik.yml | 22 ++++++++++++++++++ .../docker-compose.traefik.yml | 23 ++++++++++--------- 5 files changed, 48 insertions(+), 45 deletions(-) delete mode 100644 docker-compose.override.traefik.yml create mode 100644 docker/docker-compose.override.traefik.yml rename docker-compose.traefik.yml => docker/docker-compose.traefik.yml (77%) diff --git a/Dockerfile b/Dockerfile index e185381..7aa05c0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ ########################## BUILD IMAGE ########################## -# Alpine build image to build Conduits statically compiled binary +# Alpine build image to build Conduit's statically compiled binary FROM alpine:3.12 as builder # Specifies if the local project is build or if Conduit gets build @@ -51,7 +51,7 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.vendor="Conduit Contributors" \ org.opencontainers.image.description="A Matrix homeserver written in Rust" \ org.opencontainers.image.url="https://conduit.rs/" \ - org.opencontainers.image.revision=$GIT_REF \ + org.opencontainers.image.revision=${GIT_REF} \ org.opencontainers.image.source="https://git.koesters.xyz/timo/conduit.git" \ org.opencontainers.image.documentation.="" \ org.opencontainers.image.licenses="AGPL-3.0" \ diff --git a/docker-compose.override.traefik.yml b/docker-compose.override.traefik.yml deleted file mode 100644 index 8b4be50..0000000 --- a/docker-compose.override.traefik.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Conduit - Traefik Reverse Proxy Labels -version: '3' - -services: - homeserver: - labels: - - "traefik.enable=true" - - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - - - "traefik.http.routers.to-conduit.rule=Host(`.`)" # Change to the address on which Conduit is hosted - - "traefik.http.routers.to-conduit.tls=true" - - "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt" - - element-web: - labels: - - "traefik.enable=true" - - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - - - "traefik.http.routers.to-element-web.rule=Host(`.`)" # Change to the address on which Element-Web is hosted - - "traefik.http.routers.to-element-web.tls=true" - - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt" diff --git a/docker-compose.yml b/docker-compose.yml index 48470e6..afd3699 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,17 +3,18 @@ version: '3' services: homeserver: - ### If you already built the Conduit image with 'docker build', then you are ready to - ### go. Otherwise, you need to comment the 'image' line and uncomment the 'build' lines - ### and run: CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d - image: conduit_homeserver:latest - # build: - # context: . - # args: - # CREATED: - # VERSION: - # LOCAL: "false" - # GIT_REF: HEAD + ### If you already built the Conduit image with 'docker build', then you can uncomment the + ### 'image' line and comment out the 'build' option. + # image: conduit_homeserver:latest + ### If you want meaningful labels in you built Conduit image, you should run docker-compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + build: + context: . + args: + CREATED: + VERSION: + LOCAL: "false" + GIT_REF: HEAD restart: unless-stopped ports: - 8448:8000 diff --git a/docker/docker-compose.override.traefik.yml b/docker/docker-compose.override.traefik.yml new file mode 100644 index 0000000..2096d79 --- /dev/null +++ b/docker/docker-compose.override.traefik.yml @@ -0,0 +1,22 @@ +# Conduit - Traefik Reverse Proxy Labels +version: '3' + +services: + homeserver: + labels: + - "traefik.enable=true" + - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network + + - "traefik.http.routers.to-conduit.rule=Host(`.`)" # Change to the address on which Conduit is hosted + - "traefik.http.routers.to-conduit.tls=true" + - "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt" + + ### Uncomment this if you uncommented Element-Web App in the docker-compose.yml + # element-web: + # labels: + # - "traefik.enable=true" + # - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network + + # - "traefik.http.routers.to-element-web.rule=Host(`.`)" # Change to the address on which Element-Web is hosted + # - "traefik.http.routers.to-element-web.tls=true" + # - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt" diff --git a/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml similarity index 77% rename from docker-compose.traefik.yml rename to docker/docker-compose.traefik.yml index 8edc29c..ad1dad8 100644 --- a/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -3,17 +3,18 @@ version: '3' services: homeserver: - ### If you already built the Conduit image with 'docker build', then you are ready to - ### go. Otherwise, you need to comment the 'image' line and uncomment the 'build' lines - ### and run: CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d - image: conduit_homeserver:latest - # build: - # context: . - # args: - # CREATED: - # VERSION: - # LOCAL: false - # GIT_REF: HEAD + ### If you already built the Conduit image with 'docker build', then you can uncomment the + ### 'image' line and comment out the 'build' option. + # image: conduit_homeserver:latest + ### If you want meaningful labels in you built Conduit image, you should run docker-compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + build: + context: . + args: + CREATED: + VERSION: + LOCAL: false + GIT_REF: HEAD restart: unless-stopped volumes: - db:/srv/conduit/.local/share/conduit From 87ed132ae4d23724818b4f3ee967a1ed24220a92 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Tue, 4 Aug 2020 22:06:13 +0200 Subject: [PATCH 0230/1727] Add README in docker folder and mention docker in Conduit's README --- README.md | 3 +++ docker/README.md | 62 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 docker/README.md diff --git a/README.md b/README.md index 404636a..4c84040 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,9 @@ Yes! Just open a Matrix client ( or Element Android for You just have to clone the repo, build it with `cargo build --release` and call the binary (target/release/conduit) from somewhere like a systemd script. It's explained in more detail [here](https://git.koesters.xyz/timo/conduit/wiki/Deploy). +Or you can just build the docker image and run it with docker or docker-compose. +It's explained in more details [here](https://git.koesters.xyz/timo/conduit/wiki/Docker) or in the [README](docker/README.md) in the docker folder. + #### What is it build on? - [Ruma](https://www.ruma.io): Useful structures for endpoint requests and responses that can be (de)serialized diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000..f7add18 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,62 @@ +# Docker +> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate. + +This text is also available at the [official wiki](https://git.koesters.xyz/timo/conduit/wiki/docker). + +## Build & Dockerfile +The Dockerfile provided by Conduit has two stages, each of which creates an image. +1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository. +2. **Runtime:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. + +The Dockerfile includes a few build arguments that should be supplied when building it. + +``` Dockerfile +ARG LOCAL=false +ARG CREATED +ARG VERSION +ARG GIT_REF=HEAD +``` + +- **CREATED:** Date and time as string (date-time as defined by RFC 3339). Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.created`. Supply by it like this `$(date -u +'%Y-%m-%dT%H:%M:%SZ')` +- **VERSION:** The SemVer version of Conduit, which is in the image. Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.version`. If you have a `Cargo.toml` in your build context, you can get it with `$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)` +- **LOCAL:** *(Optional)* A boolean value, specifies if the local build context should be used, or if the official repository will be cloned. If not supplied with the build command, it will default to `false`. +- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `HEAD`. + +To build the image you can use the following command + +``` bash +docker build . -t conduit_homeserver:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) +``` + +which also will tag the resulting image as `conduit_homeserver:latest`. +**Note:** it ommits the two optional `build-arg`s. + +## Run +After building the image you can simply run it with + +``` bash +docker run conduit_homeserver:latest -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e ROCKET_SERVER_NAME="localhost:8000" +``` + +For detached mode, you also need to use the `-d` flag. You can pass in more env vars as are shown here, for an overview of possible values, you can take a look at the `docker-compose.yml` file. +If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. + + +# Docker-compose +If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the `docker-compose.traefik.yml` including `docker-compose.override.traefik.yml` or the normal `docker-compose.yml` for every other reverse proxy. + +## Build +To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with: + +``` bash +CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up +``` + +This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. For possible `build-args`, please take a look at the above `Build & Dockerfile` section. + +## Run +If you already have built the image, you can just start the container and everything else in the compose file in detached mode with: + +``` bash +docker-compose up -d +``` From 2fc99c05e1ebff3c9955b1930b0896d660e5a230 Mon Sep 17 00:00:00 2001 From: Timo Date: Wed, 12 Aug 2020 21:17:53 +0200 Subject: [PATCH 0231/1727] docs: add documentation to the repo and improve layout --- DEPLOY_FROM_SOURCE.md | 100 ++++++++++++++++++++++++++++++++++++++++++ README.md | 13 +++--- docker/README.md | 24 +++++++--- 3 files changed, 125 insertions(+), 12 deletions(-) create mode 100644 DEPLOY_FROM_SOURCE.md diff --git a/DEPLOY_FROM_SOURCE.md b/DEPLOY_FROM_SOURCE.md new file mode 100644 index 0000000..2d6804d --- /dev/null +++ b/DEPLOY_FROM_SOURCE.md @@ -0,0 +1,100 @@ +# Deploy from source + +## Prerequisites + +Make sure you have `libssl-dev` and `pkg-config` installed and the [rust toolchain](https://rustup.rs) is available on at least on user. + + +## Install Conduit + +```bash +$ sudo useradd -m conduit +$ sudo -u conduit cargo install --git "https://git.koesters.xyz/timo/conduit.git" +``` + + +## Setup systemd service + +In this guide, we set up a systemd service for Conduit, so it's easy to start, stop Conduit and set it to autostart when your server reboots. Paste the default systemd service below and configure it to fit your setup (in /etc/systemd/system/conduit.service). + +```systemd +[Unit] +Description=Conduit +After=network.target + +[Service] +Environment="ROCKET_SERVER_NAME=conduit.rs" # EDIT THIS + +Environment="ROCKET_PORT=14004" # Reverse proxy port + +#Environment="ROCKET_REGISTRATION_DISABLED=true" +#Environment="ROCKET_LOG=normal" # Detailed logging + +Environment="ROCKET_ENV=production" +User=conduit +Group=conduit +Type=simple +Restart=always +ExecStart=/home/conduit/.cargo/bin/conduit + +[Install] +WantedBy=multi-user.target +``` + +Finally, run +```bash +$ sudo systemctl daemon-reload +``` + + +## Setup Reverse Proxy + +This depends on whether you use Apache, Nginx or something else. For Apache it looks like this (in /etc/apache2/sites-enabled/050-conduit.conf): +``` + + +ServerName conduit.koesters.xyz # EDIT THIS + +AllowEncodedSlashes NoDecode + +ServerAlias conduit.koesters.xyz # EDIT THIS + +ProxyPreserveHost On +ProxyRequests off +AllowEncodedSlashes NoDecode +ProxyPass / http://localhost:14004/ nocanon +ProxyPassReverse / http://localhost:14004/ nocanon + +Include /etc/letsencrypt/options-ssl-apache.conf + +# EDIT THESE: +SSLCertificateFile /etc/letsencrypt/live/conduit.koesters.xyz/fullchain.pem +SSLCertificateKeyFile /etc/letsencrypt/live/conduit.koesters.xyz/privkey.pem + +``` + +Then run +```bash +$ sudo systemctl reload apache2 +``` + + +## SSL Certificate + +The easiest way to get an SSL certificate for the domain is to install `certbot` and run this: +```bash +$ sudo certbot -d conduit.koesters.xyz +``` + + +## You're done! + +Now you can start Conduit with +```bash +$ sudo systemctl start conduit +``` + +and set it to start automatically when your system boots with +```bash +$ sudo systemctl enable conduit +``` diff --git a/README.md b/README.md index 4c84040..ad13089 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Liberapay](https://img.shields.io/liberapay/receives/timokoesters?logo=liberapay)](https://liberapay.com/timokoesters) [![Matrix](https://img.shields.io/matrix/conduit:koesters.xyz?server_fqdn=matrix.koesters.xyz&logo=matrix)](https://matrix.to/#/#conduit:koesters.xyz) -#### What is the goal +#### What is the goal? A fast Matrix homeserver that's easy to set up and just works. You can install it on a mini-computer like the Raspberry Pi to host Matrix for your family, friends or company. @@ -18,11 +18,14 @@ Yes! Just open a Matrix client ( or Element Android for #### How can I deploy my own? -You just have to clone the repo, build it with `cargo build --release` and call the binary (target/release/conduit) from somewhere like a systemd script. -It's explained in more detail [here](https://git.koesters.xyz/timo/conduit/wiki/Deploy). +##### From source -Or you can just build the docker image and run it with docker or docker-compose. -It's explained in more details [here](https://git.koesters.xyz/timo/conduit/wiki/Docker) or in the [README](docker/README.md) in the docker folder. +Clone the repo, build it with `cargo build --release` and call the binary +(target/release/conduit) from somewhere like a systemd script. [Read more](DEPLOY_FROM_SOURCE.md) + +##### Using Docker + +Build the docker image and run it with docker or docker-compose. [Read more](docker/README.md) #### What is it build on? diff --git a/docker/README.md b/docker/README.md index f7add18..5a6ecde 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,9 +1,12 @@ -# Docker +# Deploy using Docker + > **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate. -This text is also available at the [official wiki](https://git.koesters.xyz/timo/conduit/wiki/docker). -## Build & Dockerfile +## Docker + +### Build & Dockerfile + The Dockerfile provided by Conduit has two stages, each of which creates an image. 1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository. 2. **Runtime:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. @@ -31,7 +34,9 @@ docker build . -t conduit_homeserver:latest --build-arg CREATED=$(date -u +'%Y-% which also will tag the resulting image as `conduit_homeserver:latest`. **Note:** it ommits the two optional `build-arg`s. -## Run + +### Run + After building the image you can simply run it with ``` bash @@ -42,10 +47,13 @@ For detached mode, you also need to use the `-d` flag. You can pass in more env If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. -# Docker-compose +## Docker-compose + If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the `docker-compose.traefik.yml` including `docker-compose.override.traefik.yml` or the normal `docker-compose.yml` for every other reverse proxy. -## Build + +### Build + To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with: ``` bash @@ -54,7 +62,9 @@ CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9 This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. For possible `build-args`, please take a look at the above `Build & Dockerfile` section. -## Run + +### Run + If you already have built the image, you can just start the container and everything else in the compose file in detached mode with: ``` bash From 75ea0b3163663e5a44092649977b25c3cf4cc271 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 12 Aug 2020 23:32:39 +0200 Subject: [PATCH 0232/1727] Update ruma --- Cargo.lock | 137 +++++++++++++++++----------- Cargo.toml | 2 +- src/client_server/alias.rs | 6 +- src/client_server/directory.rs | 8 +- src/client_server/keys.rs | 28 +++--- src/client_server/membership.rs | 6 +- src/client_server/message.rs | 10 +- src/client_server/state.rs | 28 +++--- src/client_server/to_device.rs | 2 +- src/client_server/user_directory.rs | 8 +- src/client_server/voip.rs | 4 +- src/database/users.rs | 25 ++--- src/main.rs | 6 +- src/ruma_wrapper.rs | 4 +- 14 files changed, 152 insertions(+), 122 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 225dc09..014c5cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,9 +17,9 @@ checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" [[package]] name = "adler32" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d" +checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" [[package]] name = "aead" @@ -100,10 +100,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] -name = "async-trait" -version = "0.1.36" +name = "assign" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" +checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" + +[[package]] +name = "async-trait" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caae68055714ff28740f310927e04f2eba76ff580b16fb18ed90073ee71646f7" dependencies = [ "proc-macro2", "quote", @@ -621,9 +627,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60fb4bb6bba52f78a471264d9a3b7d026cc0af47b22cd2cffbc0b787ca003e63" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", "version_check", @@ -692,9 +698,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb" +checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" dependencies = [ "autocfg", ] @@ -830,9 +836,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b88cd59ee5f71fea89a62248fc8f387d44400cefe05ef548466d61ced9029a7" +checksum = "86b45e59b16c76b11bf9738fd5d38879d3bd28ad292d7b313608becb17ae2df9" dependencies = [ "autocfg", "hashbrown", @@ -1303,6 +1309,15 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + [[package]] name = "proc-macro-hack" version = "0.5.18" @@ -1543,8 +1558,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ "ruma-api", "ruma-client-api", @@ -1552,14 +1567,13 @@ dependencies = [ "ruma-events", "ruma-federation-api", "ruma-identifiers", - "ruma-identifiers-macros", "ruma-signatures", ] [[package]] name = "ruma-api" -version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.17.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ "http", "percent-encoding", @@ -1573,9 +1587,10 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.17.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -1583,9 +1598,10 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.10.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ + "assign", "http", "js_int", "ruma-api", @@ -1601,9 +1617,10 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ "js_int", + "ruma-identifiers", "ruma-serde", "serde", "serde_json", @@ -1612,8 +1629,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.22.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ "js_int", "ruma-common", @@ -1627,9 +1644,10 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.21.3" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.22.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -1637,8 +1655,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.0.3" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ "js_int", "ruma-api", @@ -1652,29 +1670,42 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.17.4" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ "rand", + "ruma-identifiers-macros", + "ruma-identifiers-validation", "serde", "strum", ] [[package]] name = "ruma-identifiers-macros" -version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.17.4" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ "proc-macro2", "quote", - "ruma-identifiers", + "ruma-identifiers-validation", "syn", ] +[[package]] +name = "ruma-identifiers-validation" +version = "0.1.1" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +dependencies = [ + "ruma-serde", + "serde", + "serde_json", + "strum", +] + [[package]] name = "ruma-serde" -version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +version = "0.2.3" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ "form_urlencoded", "itoa", @@ -1686,7 +1717,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=d5d2d1d893fa12d27960e4c58d6c09b215d06e95#d5d2d1d893fa12d27960e4c58d6c09b215d06e95" +source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" dependencies = [ "base64 0.12.3", "ring", @@ -1818,18 +1849,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +checksum = "e54c9a88f2da7238af84b5101443f0c0d0a3bbdc455e34a5c9497b1903ed55d5" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +checksum = "609feed1d0a73cc36a0182a840a9b37b4a82f0b1150369f0536a9e3f2a31dc48" dependencies = [ "proc-macro2", "quote", @@ -1880,9 +1911,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +checksum = "a3e12110bc539e657a646068aaf5eb5b63af9d0c1f7b29c97113fad80e15f035" dependencies = [ "arc-swap", "libc", @@ -1914,9 +1945,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" +checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252" [[package]] name = "socket2" @@ -2002,18 +2033,18 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "strum" -version = "0.18.0" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bd81eb48f4c437cadc685403cad539345bf703d78e63707418431cecd4522b" +checksum = "3924a58d165da3b7b2922c667ab0673c7b5fd52b5c19ea3442747bcb3cd15abe" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" -version = "0.18.0" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" +checksum = "2d2ab682ecdcae7f5f45ae85cd7c1e6c8e68ea42c8a612d47fedf831c037146a" dependencies = [ "heck", "proc-macro2", @@ -2029,9 +2060,9 @@ checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" [[package]] name = "syn" -version = "1.0.36" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cdb98bcb1f9d81d07b536179c269ea15999b5d14ea958196413869445bb5250" +checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4" dependencies = [ "proc-macro2", "quote", @@ -2213,9 +2244,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbdf4ccd1652592b01286a5dbe1e2a77d78afaa34beadd9872a5f7396f92aaa9" +checksum = "6d79ca061b032d6ce30c660fded31189ca0b9922bf483cd70759f13a2d86786c" dependencies = [ "cfg-if", "log", @@ -2224,9 +2255,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94ae75f0d28ae10786f3b1895c55fe72e79928fd5ccdebb5438c75e93fec178f" +checksum = "db63662723c316b43ca36d833707cc93dff82a02ba3d7e354f342682cc8b3545" dependencies = [ "lazy_static", ] diff --git a/Cargo.toml b/Cargo.toml index c2607a7..42ca8b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ edition = "2018" rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } tokio = "0.2.22" # Used for long polling -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "d5d2d1d893fa12d27960e4c58d6c09b215d06e95" } # Used for matrix spec type definitions and helpers +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "987d48666cf166cf12100b5dbc61b5e3385c4014" } # Used for matrix spec type definitions and helpers #ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } sled = "0.32.0" # Used for storing data permanently log = "0.4.8" # Used for emitting log entries diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 4399cb5..087221b 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -14,7 +14,7 @@ use rocket::{delete, get, put}; )] pub fn create_alias_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { if db.rooms.id_from_alias(&body.room_alias)?.is_some() { return Err(Error::Conflict("Alias already exists.")); @@ -32,7 +32,7 @@ pub fn create_alias_route( )] pub fn delete_alias_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { db.rooms.set_alias(&body.room_alias, None, &db.globals)?; @@ -45,7 +45,7 @@ pub fn delete_alias_route( )] pub fn get_alias_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { if body.room_alias.server_name() != db.globals.server_name() { todo!("ask remote server"); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 9bed45b..279df18 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -27,7 +27,7 @@ use rocket::{get, post, put}; )] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let limit = body.limit.map_or(10, u64::from); let mut since = 0_u64; @@ -224,11 +224,11 @@ pub async fn get_public_rooms_filtered_route( )] pub async fn get_public_rooms_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let Ruma { body: - get_public_rooms::Request { + get_public_rooms::IncomingRequest { limit, server, since, @@ -246,7 +246,7 @@ pub async fn get_public_rooms_route( } = get_public_rooms_filtered_route( db, Ruma { - body: get_public_rooms_filtered::Request { + body: get_public_rooms_filtered::IncomingRequest { filter: None, limit, room_network: get_public_rooms_filtered::RoomNetwork::Matrix, diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 4067210..f88878c 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -1,15 +1,17 @@ -use super::State; -use super::SESSION_ID_LENGTH; +use super::{State, SESSION_ID_LENGTH}; use crate::{utils, ConduitResult, Database, Error, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - r0::{ - keys::{ - self, claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, - upload_signing_keys, +use ruma::{ + api::client::{ + error::ErrorKind, + r0::{ + keys::{ + claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + upload_signing_keys, + }, + uiaa::{AuthFlow, UiaaInfo}, }, - uiaa::{AuthFlow, UiaaInfo}, }, + encryption::UnsignedDeviceInfo, }; use std::collections::{BTreeMap, HashSet}; @@ -54,7 +56,7 @@ pub fn upload_keys_route( )] pub fn get_keys_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -76,7 +78,7 @@ pub fn get_keys_route( Error::bad_database("all_device_keys contained nonexistent device.") })?; - keys.unsigned = Some(keys::UnsignedDeviceInfo { + keys.unsigned = Some(UnsignedDeviceInfo { device_display_name: metadata.display_name, }); @@ -95,7 +97,7 @@ pub fn get_keys_route( ), )?; - keys.unsigned = Some(keys::UnsignedDeviceInfo { + keys.unsigned = Some(UnsignedDeviceInfo { device_display_name: metadata.display_name, }); @@ -278,7 +280,7 @@ pub fn upload_signatures_route( )] pub fn get_key_changes_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 716d5e4..0ada7c4 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -23,7 +23,7 @@ use rocket::{get, post}; )] pub fn join_room_by_id_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -76,7 +76,7 @@ pub fn join_room_by_id_or_alias_route( sender_id: body.sender_id.clone(), device_id: body.device_id.clone(), json_body: None, - body: join_room_by_id::Request { + body: join_room_by_id::IncomingRequest { room_id, third_party_signed: body.third_party_signed.clone(), }, @@ -94,7 +94,7 @@ pub fn join_room_by_id_or_alias_route( )] pub fn leave_room_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 04d965d..d851214 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -2,7 +2,7 @@ use super::State; use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; use ruma::api::client::{ error::ErrorKind, - r0::message::{create_message_event, get_message_events}, + r0::message::{get_message_events, send_message_event}, }; use std::convert::TryInto; @@ -13,10 +13,10 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") )] -pub fn create_message_event_route( +pub fn send_message_event_route( db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let mut unsigned = serde_json::Map::new(); @@ -41,7 +41,7 @@ pub fn create_message_event_route( &db.account_data, )?; - Ok(create_message_event::Response { event_id }.into()) + Ok(send_message_event::Response { event_id }.into()) } #[cfg_attr( diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 814a246..60b3e9f 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -4,8 +4,8 @@ use ruma::{ api::client::{ error::ErrorKind, r0::state::{ - create_state_event_for_empty_key, create_state_event_for_key, get_state_events, - get_state_events_for_empty_key, get_state_events_for_key, + get_state_events, get_state_events_for_empty_key, get_state_events_for_key, + send_state_event_for_empty_key, send_state_event_for_key, }, }, events::{room::canonical_alias, EventType}, @@ -19,10 +19,10 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") )] -pub fn create_state_event_for_key_route( +pub fn send_state_event_for_key_route( db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let content = serde_json::from_str::( @@ -78,21 +78,21 @@ pub fn create_state_event_for_key_route( &db.account_data, )?; - Ok(create_state_event_for_key::Response { event_id }.into()) + Ok(send_state_event_for_key::Response { event_id }.into()) } #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") )] -pub fn create_state_event_for_empty_key_route( +pub fn send_state_event_for_empty_key_route( db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - // This just calls create_state_event_for_key_route + body: Ruma, +) -> ConduitResult { + // This just calls send_state_event_for_key_route let Ruma { body: - create_state_event_for_empty_key::Request { + send_state_event_for_empty_key::IncomingRequest { room_id, event_type, data, @@ -102,11 +102,11 @@ pub fn create_state_event_for_empty_key_route( json_body, } = body; - Ok(create_state_event_for_empty_key::Response { - event_id: create_state_event_for_key_route( + Ok(send_state_event_for_empty_key::Response { + event_id: send_state_event_for_key_route( db, Ruma { - body: create_state_event_for_key::Request { + body: send_state_event_for_key::IncomingRequest { room_id, event_type, data, diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index db4f36e..ca423fe 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -14,7 +14,7 @@ use rocket::put; )] pub fn send_event_to_device_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index 746deb3..f47643c 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -11,13 +11,9 @@ use rocket::post; )] pub fn search_users_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { - let limit = if let Some(limit) = body.limit { - u64::from(limit) - } else { - 10 - } as usize; + let limit = u64::from(body.limit) as usize; let mut users = db.users.iter().filter_map(|user_id| { // Filter out buggy users (they should not exist, but you never know...) diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 4671284..33080ea 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,11 +1,11 @@ use crate::{ConduitResult, Error}; -use ruma::api::client::{error::ErrorKind, r0::message::create_message_event}; +use ruma::api::client::{error::ErrorKind, r0::message::send_message_event}; #[cfg(feature = "conduit_bin")] use rocket::get; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] -pub fn turn_server_route() -> ConduitResult { +pub fn turn_server_route() -> ConduitResult { Err(Error::BadRequest( ErrorKind::NotFound, "There is no turn server yet.", diff --git a/src/database/users.rs b/src/database/users.rs index f031534..594cc2d 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -5,11 +5,12 @@ use ruma::{ error::ErrorKind, r0::{ device::Device, - keys::{AlgorithmAndDeviceId, CrossSigningKey, DeviceKeys, KeyAlgorithm, OneTimeKey}, + keys::{CrossSigningKey, OneTimeKey}, }, }, + encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, - DeviceId, Raw, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, Raw, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; @@ -21,7 +22,7 @@ pub struct Users { pub(super) userdeviceid_metadata: sled::Tree, // This is also used to check if a device exists pub(super) token_userdeviceid: sled::Tree, - pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + AlgorithmAndDeviceId + pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + DeviceKeyId pub(super) userid_lastonetimekeyupdate: sled::Tree, // LastOneTimeKeyUpdate = Count pub(super) keychangeid_userid: sled::Tree, // KeyChangeId = UserId/RoomId + Count pub(super) keyid_key: sled::Tree, // KeyId = UserId + KeyId (depends on key type) @@ -269,7 +270,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - one_time_key_key: &AlgorithmAndDeviceId, + one_time_key_key: &DeviceKeyId, one_time_key_value: &OneTimeKey, globals: &super::globals::Globals, ) -> Result<()> { @@ -282,11 +283,11 @@ impl Users { assert!(self.userdeviceid_metadata.get(&key)?.is_some()); key.push(0xff); - // TODO: Use AlgorithmAndDeviceId::to_string when it's available (and update everything, + // TODO: Use DeviceKeyId::to_string when it's available (and update everything, // because there are no wrapping quotation marks anymore) key.extend_from_slice( &serde_json::to_string(one_time_key_key) - .expect("AlgorithmAndDeviceId::to_string always works") + .expect("DeviceKeyId::to_string always works") .as_bytes(), ); @@ -319,9 +320,9 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - key_algorithm: &KeyAlgorithm, + key_algorithm: &DeviceKeyAlgorithm, globals: &super::globals::Globals, - ) -> Result> { + ) -> Result> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); @@ -361,7 +362,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { + ) -> Result> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -374,13 +375,13 @@ impl Users { .keys() .map(|bytes| { Ok::<_, Error>( - serde_json::from_slice::( + serde_json::from_slice::( &*bytes?.rsplit(|&b| b == 0xff).next().ok_or_else(|| { Error::bad_database("OneTimeKey ID in db is invalid.") })?, ) - .map_err(|_| Error::bad_database("AlgorithmAndDeviceID in db is invalid."))? - .0, + .map_err(|_| Error::bad_database("DeviceKeyId in db is invalid."))? + .algorithm(), ) }) { diff --git a/src/main.rs b/src/main.rs index 27320b7..f91a10f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -81,9 +81,9 @@ fn setup_rocket() -> rocket::Rocket { client_server::search_users_route, client_server::get_member_events_route, client_server::get_protocols_route, - client_server::create_message_event_route, - client_server::create_state_event_for_key_route, - client_server::create_state_event_for_empty_key_route, + client_server::send_message_event_route, + client_server::send_state_event_for_key_route, + client_server::send_state_event_for_empty_key_route, client_server::get_state_events_route, client_server::get_state_events_for_key_route, client_server::get_state_events_for_empty_key_route, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 0593436..8d86204 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -16,7 +16,7 @@ use { tokio::io::AsyncReadExt, Request, State, }, - ruma::api::Endpoint, + ruma::api::IncomingRequest, std::io::Cursor, }; @@ -30,7 +30,7 @@ pub struct Ruma { } #[cfg(feature = "conduit_bin")] -impl<'a, T: Endpoint> FromTransformedData<'a> for Ruma { +impl<'a, T: IncomingRequest> FromTransformedData<'a> for Ruma { type Error = (); // TODO: Better error handling type Owned = Data; type Borrowed = Self::Owned; From 0fef955b28dd6435ec1b3be62ddf309a351ef22f Mon Sep 17 00:00:00 2001 From: Faelar Date: Mon, 17 Aug 2020 13:59:35 +0200 Subject: [PATCH 0233/1727] Fix license. --- .gitea/PULL_REQUEST_TEMPLATE.md | 2 +- Cargo.toml | 2 +- Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md index 7b15cba..38cef90 100644 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -1 +1 @@ -- [ ] I agree to release my code and all other changes of this PR under the AGPL-3.0 license +- [ ] I agree to release my code and all other changes of this PR under the AGPL-3.0-only license diff --git a/Cargo.toml b/Cargo.toml index 42ca8b5..90633de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "conduit" description = "A Matrix homeserver written in Rust" -license = "AGPL-3.0" +license = "AGPL-3.0-only" authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://git.koesters.xyz/timo/conduit" diff --git a/Dockerfile b/Dockerfile index 7aa05c0..fa4b16d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -54,7 +54,7 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.revision=${GIT_REF} \ org.opencontainers.image.source="https://git.koesters.xyz/timo/conduit.git" \ org.opencontainers.image.documentation.="" \ - org.opencontainers.image.licenses="AGPL-3.0" \ + org.opencontainers.image.licenses="AGPL-3.0-only" \ org.opencontainers.image.ref.name="" \ org.label-schema.docker.build="docker build . -t conduit_homeserver:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \ maintainer="Weasy666" From 6b7e92ccf5a8a3d348ab882c48bab172ff866546 Mon Sep 17 00:00:00 2001 From: Sebastian Spaeth Date: Tue, 18 Aug 2020 17:42:11 +0200 Subject: [PATCH 0234/1727] Make default server name more obvious Too many people fell into the trap and left conduit.rs in the deployment configuration, wondering why users are all listed as @conduit.rs. Make the default server name really easy to identify and make it obvious that this needs to be changed. --- DEPLOY_FROM_SOURCE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPLOY_FROM_SOURCE.md b/DEPLOY_FROM_SOURCE.md index 2d6804d..4d685f6 100644 --- a/DEPLOY_FROM_SOURCE.md +++ b/DEPLOY_FROM_SOURCE.md @@ -23,7 +23,7 @@ Description=Conduit After=network.target [Service] -Environment="ROCKET_SERVER_NAME=conduit.rs" # EDIT THIS +Environment="ROCKET_SERVER_NAME=YOURSERVERNAME.HERE" # EDIT THIS Environment="ROCKET_PORT=14004" # Reverse proxy port From f40f1d9f7528c72c2b1bc40d9e36e5ebaa6235d0 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Fri, 31 Jul 2020 14:40:28 +0200 Subject: [PATCH 0235/1727] docs: endpoint documentation --- src/client_server/account.rs | 35 +++++++++++++++++++++++++++++++ src/client_server/capabilities.rs | 3 +++ src/client_server/session.rs | 32 +++++++++++++++++++++++++++- src/client_server/unversioned.rs | 10 +++++++++ src/database/users.rs | 4 +++- src/main.rs | 2 +- 6 files changed, 83 insertions(+), 3 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 8764446..15efab8 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -20,6 +20,12 @@ use rocket::{get, post}; const GUEST_NAME_LENGTH: usize = 10; +/// # `GET /_matrix/client/r0/register/available` +/// +/// Checks if a username is valid and available on this server. +/// +/// - Returns true if no user or appservice on this server claimed this username +/// - This will not reserve the username, so the username might become invalid when trying to register #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/register/available", data = "") @@ -53,6 +59,15 @@ pub fn get_register_available_route( Ok(get_username_availability::Response { available: true }.into()) } +/// # `GET /_matrix/client/r0/register` +/// +/// Register an account on this homeserver. +/// +/// - Returns the device id and access_token unless `inhibit_login` is true +/// - When registering a guest account, all parameters except initial_device_display_name will be +/// ignored +/// - Creates a new account and a device for it +/// - The account will be populated with default account data #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/register", data = "") @@ -168,6 +183,13 @@ pub fn register_route( .into()) } +/// # `POST /_matrix/client/r0/account/password` +/// +/// Changes the password of this account. +/// +/// - Invalidates all other access tokens if logout_devices is true +/// - Deletes all other devices and most of their data (to-device events, last seen, etc.) if +/// logout_devices is true #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/account/password", data = "") @@ -225,6 +247,11 @@ pub fn change_password_route( Ok(change_password::Response.into()) } +/// # `GET _matrix/client/r0/account/whoami` +/// +/// Get user_id of this account. +/// +/// - Also works for Application Services #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/account/whoami", data = "") @@ -237,6 +264,14 @@ pub fn whoami_route(body: Ruma) -> ConduitResult ConduitResult { let mut available = BTreeMap::new(); diff --git a/src/client_server/session.rs b/src/client_server/session.rs index a431d23..4011058 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -12,14 +12,28 @@ use ruma::{ #[cfg(feature = "conduit_bin")] use rocket::{get, post}; +/// # `GET /_matrix/client/r0/login` +/// +/// Get the homeserver's supported login types. One of these should be used as the `type` field +/// when logging in. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] -pub fn get_login_route() -> ConduitResult { +pub fn get_login_types_route() -> ConduitResult { Ok(get_login_types::Response { flows: vec![get_login_types::LoginType::Password], } .into()) } +/// # `POST /_matrix/client/r0/login` +/// +/// Authenticates the user and returns an access token it can use in subsequent requests. +/// +/// - The returned access token is associated with the user and device +/// - Old access tokens of that device should be invalidated +/// - If `device_id` is unknown, a new device will be created +/// +/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see +/// supported login types. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/login", data = "") @@ -74,6 +88,7 @@ pub fn login_route( // Generate a new token for the device let token = utils::random_string(TOKEN_LENGTH); + // TODO: Don't always create a new device // Add device db.users.create_device( &user_id, @@ -92,6 +107,12 @@ pub fn login_route( .into()) } +/// # `POST /_matrix/client/r0/logout` +/// +/// Log out the current device. +/// +/// - Invalidates the access token +/// - Deletes the device and most of it's data (to-device events, last seen, etc.) #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/logout", data = "") @@ -108,6 +129,15 @@ pub fn logout_route( Ok(logout::Response.into()) } +/// # `POST /_matrix/client/r0/logout/all` +/// +/// Log out all devices of this user. +/// +/// - Invalidates all access tokens +/// - Deletes devices and most of their data (to-device events, last seen, etc.) +/// +/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html) +/// from each device of this user. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/logout/all", data = "") diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index e71c194..3ff8bec 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -5,6 +5,16 @@ use std::collections::BTreeMap; #[cfg(feature = "conduit_bin")] use rocket::get; +/// # `GET /_matrix/client/versions` +/// +/// Get the versions of the specification and unstable features supported by this server. +/// +/// - Versions take the form MAJOR.MINOR.PATCH +/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value +/// - Unstable features should be namespaced and may include version information in their name +/// +/// Note: Unstable features are used while developing new features. Clients should avoid using +/// unstable features in their stable releases #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] pub fn get_supported_versions_route() -> ConduitResult { let mut unstable_features = BTreeMap::new(); diff --git a/src/database/users.rs b/src/database/users.rs index 594cc2d..2500b4c 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -859,7 +859,9 @@ impl Users { self.remove_device(&user_id, &device_id?)?; } - // Set the password to "" to indicate a deactivated account + // Set the password to "" to indicate a deactivated account. Hashes will never result in an + // empty string, so the user will not be able to log in again. Systems like changing the + // password without logging in should check if the account is deactivated. self.userid_password.insert(user_id.to_string(), "")?; // TODO: Unhook 3PID diff --git a/src/main.rs b/src/main.rs index f91a10f..c7cd837 100644 --- a/src/main.rs +++ b/src/main.rs @@ -26,7 +26,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_supported_versions_route, client_server::get_register_available_route, client_server::register_route, - client_server::get_login_route, + client_server::get_login_types_route, client_server::login_route, client_server::whoami_route, client_server::logout_route, From 04eee089e0c40fd61e16e130562ad2409b316cd3 Mon Sep 17 00:00:00 2001 From: Timo Date: Fri, 14 Aug 2020 11:31:31 +0200 Subject: [PATCH 0236/1727] feat: first steps towards joining rooms over federation --- Cargo.lock | 27 ++++--- Cargo.toml | 6 +- src/client_server/alias.rs | 23 ++++-- src/client_server/directory.rs | 79 +++++++++++------- src/client_server/membership.rs | 124 +++++++++++++++++++++++----- src/client_server/room.rs | 15 ++-- src/error.rs | 7 ++ src/lib.rs | 1 + src/main.rs | 14 ++-- src/server_server.rs | 138 +++++++++++++++++--------------- 10 files changed, 291 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 014c5cc..1166304 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -267,6 +267,7 @@ checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" name = "conduit" version = "0.1.0" dependencies = [ + "base64 0.12.3", "directories", "http", "image", @@ -1559,7 +1560,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "ruma-api", "ruma-client-api", @@ -1573,7 +1574,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "http", "percent-encoding", @@ -1588,7 +1589,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1599,7 +1600,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "assign", "http", @@ -1617,7 +1618,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "js_int", "ruma-identifiers", @@ -1630,7 +1631,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "js_int", "ruma-common", @@ -1645,7 +1646,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1656,7 +1657,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "js_int", "ruma-api", @@ -1671,7 +1672,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1683,7 +1684,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "proc-macro2", "quote", @@ -1694,7 +1695,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "ruma-serde", "serde", @@ -1705,7 +1706,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "form_urlencoded", "itoa", @@ -1717,7 +1718,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=987d48666cf166cf12100b5dbc61b5e3385c4014#987d48666cf166cf12100b5dbc61b5e3385c4014" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" dependencies = [ "base64 0.12.3", "ring", diff --git a/Cargo.toml b/Cargo.toml index 90633de..4945e3c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,9 +16,10 @@ edition = "2018" #rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"] } # Used to handle requests rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } -tokio = "0.2.22" # Used for long polling -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "987d48666cf166cf12100b5dbc61b5e3385c4014" } # Used for matrix spec type definitions and helpers +#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "987d48666cf166cf12100b5dbc61b5e3385c4014" } # Used for matrix spec type definitions and helpers +ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fixes" } # Used for matrix spec type definitions and helpers #ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +tokio = "0.2.22" # Used for long polling sled = "0.32.0" # Used for storing data permanently log = "0.4.8" # Used for emitting log entries http = "0.2.1" # Used for rocket<->ruma conversions @@ -31,6 +32,7 @@ rust-argon2 = "0.8.2" # Used to hash passwords reqwest = "0.10.6" # Used to send requests thiserror = "1.0.19" # Used for conduit::Error type image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to generate thumbnails for images +base64 = "0.12.3" # Used to encode server public key [features] default = ["conduit_bin"] diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 087221b..848b935 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,9 +1,9 @@ use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; -use ruma::api::client::{ +use crate::{ConduitResult, Database, Error, Ruma, server_server}; +use ruma::api::{federation, client::{ error::ErrorKind, r0::alias::{create_alias, delete_alias, get_alias}, -}; +}}; #[cfg(feature = "conduit_bin")] use rocket::{delete, get, put}; @@ -43,12 +43,25 @@ pub fn delete_alias_route( feature = "conduit_bin", get("/_matrix/client/r0/directory/room/<_>", data = "") )] -pub fn get_alias_route( +pub async fn get_alias_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { if body.room_alias.server_name() != db.globals.server_name() { - todo!("ask remote server"); + let response = server_server::send_request( + &db, + body.room_alias.server_name().to_string(), + federation::query::get_room_information::v1::Request { + room_alias: body.room_alias.to_string(), + }, + ) + .await?; + + return Ok(get_alias::Response { + room_id: response.room_id, + servers: response.servers, + } + .into()); } let room_id = db diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 279df18..26188f7 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,15 +1,18 @@ use super::State; -use crate::{ConduitResult, Database, Error, Result, Ruma}; +use crate::{server_server, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::{ - directory::{ - self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, - set_room_visibility, + api::{ + client::{ + error::ErrorKind, + r0::{ + directory::{ + self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, + set_room_visibility, + }, + room, }, - room, }, + federation, }, events::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, @@ -29,6 +32,46 @@ pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { + if let Some(other_server) = body + .server + .clone() + .filter(|server| server != &db.globals.server_name().as_str()) + { + let response = server_server::send_request( + &db, + other_server, + federation::directory::get_public_rooms::v1::Request { + limit: body.limit, + since: body.since.clone(), + room_network: federation::directory::get_public_rooms::v1::RoomNetwork::Matrix, + }, + ) + .await?; + + return Ok(get_public_rooms_filtered::Response { + chunk: response + .chunk + .into_iter() + .map(|c| { + // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk + // to ruma::api::client::r0::directory::PublicRoomsChunk + Ok::<_, Error>( + serde_json::from_str( + &serde_json::to_string(&c) + .expect("PublicRoomsChunk::to_string always works"), + ) + .expect("federation and client-server PublicRoomsChunk are the same type"), + ) + }) + .filter_map(|r| r.ok()) + .collect(), + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + } + .into()); + } + let limit = body.limit.map_or(10, u64::from); let mut since = 0_u64; @@ -169,26 +212,6 @@ pub async fn get_public_rooms_filtered_route( all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); - /* - all_rooms.extend_from_slice( - &server_server::send_request( - &db, - "privacytools.io".to_owned(), - ruma::api::federation::v1::get_public_rooms::Request { - limit: Some(20_u32.into()), - since: None, - room_network: ruma::api::federation::v1::get_public_rooms::RoomNetwork::Matrix, - }, - ) - .await - ? - .chunk - .into_iter() - .map(|c| serde_json::from_str(&serde_json::to_string(&c)?)?) - .collect::>(), - ); - */ - let total_room_count_estimate = (all_rooms.len() as u32).into(); let chunk = all_rooms diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0ada7c4..84c0ebd 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -1,16 +1,24 @@ use super::State; -use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; +use crate::{ + client_server, pdu::PduBuilder, server_server, utils, ConduitResult, Database, Error, Ruma, +}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, - unban_user, + api::{ + client::{ + error::ErrorKind, + r0::{ + alias, + membership::{ + ban_user, forget_room, get_member_events, invite_user, join_room_by_id, + join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, + unban_user, + }, + }, }, + federation, }, events::{room::member, EventType}, - Raw, RoomId, + EventId, Raw, RoomId, RoomVersionId, }; use std::{collections::BTreeMap, convert::TryFrom}; @@ -21,13 +29,81 @@ use rocket::{get, post}; feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/join", data = "") )] -pub fn join_room_by_id_route( +pub async fn join_room_by_id_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - // TODO: Ask a remote server if we don't have this room + // Ask a remote server if we don't have this room + if !db.rooms.exists(&body.room_id)? && body.room_id.server_name() != db.globals.server_name() { + let make_join_response = server_server::send_request( + &db, + body.room_id.server_name().to_string(), + federation::membership::create_join_event_template::v1::Request { + room_id: body.room_id.clone(), + user_id: sender_id.clone(), + ver: vec![RoomVersionId::Version5, RoomVersionId::Version6], + }, + ) + .await?; + + let mut join_event_stub_value = + serde_json::from_str::(make_join_response.event.json().get()) + .map_err(|_| { + Error::BadServerResponse("Invalid make_join event json received from server.") + })?; + + let join_event_stub = + join_event_stub_value + .as_object_mut() + .ok_or(Error::BadServerResponse( + "Invalid make join event object received from server.", + ))?; + + join_event_stub.insert( + "origin".to_owned(), + db.globals.server_name().to_owned().to_string().into(), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + utils::millis_since_unix_epoch().into(), + ); + + // Generate event id + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&join_event_stub_value) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // We don't leave the event id into the pdu because that's only allowed in v1 or v2 rooms + let join_event_stub = join_event_stub_value.as_object_mut().unwrap(); + join_event_stub.remove("event_id"); + + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut join_event_stub_value, + ) + .expect("event is valid, we just created it"); + + let send_join_response = server_server::send_request( + &db, + body.room_id.server_name().to_string(), + federation::membership::create_join_event::v2::Request { + room_id: body.room_id.clone(), + event_id, + pdu_stub: serde_json::from_value::>(join_event_stub_value) + .expect("Raw::from_value always works"), + }, + ) + .await?; + + dbg!(send_join_response); + todo!("Take send_join_response and 'create' the room using that data"); + } let event = member::MemberEventContent { membership: member::MembershipState::Join, @@ -61,16 +137,28 @@ pub fn join_room_by_id_route( feature = "conduit_bin", post("/_matrix/client/r0/join/<_>", data = "") )] -pub fn join_room_by_id_or_alias_route( +pub async fn join_room_by_id_or_alias_route( db: State<'_, Database>, + db2: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let room_id = RoomId::try_from(body.room_id_or_alias.clone()).or_else(|alias| { - Ok::<_, Error>(db.rooms.id_from_alias(&alias)?.ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room not found (TODO: Federation).", - ))?) - })?; + let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { + Ok(room_id) => room_id, + Err(room_alias) => { + client_server::get_alias_route( + db, + Ruma { + body: alias::get_alias::IncomingRequest { room_alias }, + sender_id: body.sender_id.clone(), + device_id: body.device_id.clone(), + json_body: None, + }, + ) + .await? + .0 + .room_id + } + }; let body = Ruma { sender_id: body.sender_id.clone(), @@ -83,7 +171,7 @@ pub fn join_room_by_id_or_alias_route( }; Ok(join_room_by_id_or_alias::Response { - room_id: join_room_by_id_route(db, body)?.0.room_id, + room_id: join_room_by_id_route(db2, body).await?.0.room_id, } .into()) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 54e57fd..b5f1529 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -92,13 +92,6 @@ pub fn create_room_route( &db.account_data, )?; - // Figure out preset. We need it for power levels and preset specific events - let visibility = body.visibility.unwrap_or(room::Visibility::Private); - let preset = body.preset.unwrap_or_else(|| match visibility { - room::Visibility::Private => create_room::RoomPreset::PrivateChat, - room::Visibility::Public => create_room::RoomPreset::PublicChat, - }); - // 3. Power levels let mut users = BTreeMap::new(); users.insert(sender_id.clone(), 100.into()); @@ -142,6 +135,14 @@ pub fn create_room_route( )?; // 4. Events set by preset + + // Figure out preset. We need it for preset specific events + let visibility = body.visibility.unwrap_or(room::Visibility::Private); + let preset = body.preset.unwrap_or_else(|| match visibility { + room::Visibility::Private => create_room::RoomPreset::PrivateChat, + room::Visibility::Public => create_room::RoomPreset::PublicChat, + }); + // 4.1 Join Rules db.rooms.append_pdu( PduBuilder { diff --git a/src/error.rs b/src/error.rs index af5405c..623aa0e 100644 --- a/src/error.rs +++ b/src/error.rs @@ -27,6 +27,13 @@ pub enum Error { #[from] source: image::error::ImageError, }, + #[error("Could not connect to server.")] + ReqwestError { + #[from] + source: reqwest::Error, + }, + #[error("{0}")] + BadServerResponse(&'static str), #[error("{0}")] BadConfig(&'static str), #[error("{0}")] diff --git a/src/lib.rs b/src/lib.rs index 96236bf..f761413 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,5 @@ pub mod client_server; +pub mod server_server; mod database; mod error; mod pdu; diff --git a/src/main.rs b/src/main.rs index c7cd837..93ca74e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,13 +1,13 @@ #![warn(rust_2018_idioms)] -pub mod push_rules; +pub mod server_server; +pub mod client_server; -mod client_server; +mod push_rules; mod database; mod error; mod pdu; mod ruma_wrapper; -//mod server_server; mod utils; pub use database::Database; @@ -110,10 +110,10 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_key_changes_route, client_server::get_pushers_route, client_server::set_pushers_route, - //server_server::well_known_server, - //server_server::get_server_version, - //server_server::get_server_keys, - //server_server::get_server_keys_deprecated, + server_server::well_known_server, + server_server::get_server_version, + server_server::get_server_keys, + server_server::get_server_keys_deprecated, ], ) .attach(AdHoc::on_attach("Config", |mut rocket| async { diff --git a/src/server_server.rs b/src/server_server.rs index a214143..0af5546 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,16 +1,15 @@ -use crate::{Database, MatrixResult}; +use crate::{ConduitResult, Database, Result}; use http::header::{HeaderValue, AUTHORIZATION}; -use log::error; use rocket::{get, response::content::Json, State}; -use ruma::api::Endpoint; -use ruma::api::client::error::Error; use ruma::api::federation::discovery::{ - get_server_keys::v2 as get_server_keys, get_server_version::v1 as get_server_version, + get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, }; +use ruma::api::OutgoingRequest; use serde_json::json; use std::{ collections::BTreeMap, convert::TryFrom, + fmt::Debug, time::{Duration, SystemTime}, }; @@ -33,36 +32,51 @@ pub async fn request_well_known(db: &crate::Database, destination: &str) -> Opti Some(body.get("m.server")?.as_str()?.to_owned()) } -pub async fn send_request( +pub async fn send_request( db: &crate::Database, destination: String, request: T, -) -> Option { - let mut http_request: http::Request<_> = request.try_into().unwrap(); - +) -> Result +where + T: Debug, +{ let actual_destination = "https://".to_owned() + &request_well_known(db, &destination) .await .unwrap_or(destination.clone() + ":8448"); - *http_request.uri_mut() = (actual_destination + T::METADATA.path).parse().unwrap(); + + let mut http_request = request + .try_into_http_request(&actual_destination, Some("")) + .unwrap(); let mut request_map = serde_json::Map::new(); if !http_request.body().is_empty() { request_map.insert( "content".to_owned(), - serde_json::to_value(http_request.body()).unwrap(), + serde_json::from_slice(http_request.body()).unwrap(), ); }; request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); - request_map.insert("uri".to_owned(), T::METADATA.path.into()); - request_map.insert("origin".to_owned(), db.globals.server_name().into()); + request_map.insert( + "uri".to_owned(), + http_request + .uri() + .path_and_query() + .expect("all requests have a path") + .to_string() + .into(), + ); + request_map.insert( + "origin".to_owned(), + db.globals.server_name().as_str().into(), + ); request_map.insert("destination".to_owned(), destination.into()); let mut request_json = request_map.into(); ruma::signatures::sign_json( - db.globals.server_name(), + db.globals.server_name().as_str(), db.globals.keypair(), &mut request_json, ) @@ -72,31 +86,32 @@ pub async fn send_request( .as_object() .unwrap() .values() - .next() - .unwrap() - .as_object() - .unwrap() - .iter() - .map(|(k, v)| (k, v.as_str().unwrap())); + .map(|v| { + v.as_object() + .unwrap() + .iter() + .map(|(k, v)| (k, v.as_str().unwrap())) + }); - for s in signatures { - http_request.headers_mut().insert( - AUTHORIZATION, - HeaderValue::from_str(&format!( - "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - db.globals.server_name(), - s.0, - s.1 - )) - .unwrap(), - ); + for signature_server in signatures { + for s in signature_server { + http_request.headers_mut().insert( + AUTHORIZATION, + HeaderValue::from_str(&format!( + "X-Matrix origin={},key=\"{}\",sig=\"{}\"", + db.globals.server_name(), + s.0, + s.1 + )) + .unwrap(), + ); + } } - let reqwest_response = db - .globals - .reqwest_client() - .execute(http_request.into()) - .await; + let reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + let reqwest_response = db.globals.reqwest_client().execute(reqwest_request).await; // Because reqwest::Response -> http::Response is complicated: match reqwest_response { @@ -117,59 +132,56 @@ pub async fn send_request( .unwrap() .into_iter() .collect(); - Some( - ::try_from(http_response.body(body).unwrap()) - .ok() - .unwrap(), + Ok( + T::IncomingResponse::try_from(http_response.body(body).unwrap()) + .expect("TODO: error handle other server errors"), ) } - Err(e) => { - error!("{}", e); - None - } + Err(e) => Err(e.into()), } } -#[cfg_attr(feature = "conduit_bin",get("/.well-known/matrix/server"))] +#[cfg_attr(feature = "conduit_bin", get("/.well-known/matrix/server"))] pub fn well_known_server() -> Json { - rocket::response::content::Json( - json!({ "m.server": "matrixtesting.koesters.xyz:14004"}).to_string(), - ) + rocket::response::content::Json(json!({ "m.server": "pc.koesters.xyz:59003"}).to_string()) } -#[cfg_attr(feature = "conduit_bin",get("/_matrix/federation/v1/version"))] -pub fn get_server_version() -> MatrixResult { - MatrixResult(Ok(get_server_version::Response { +#[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] +pub fn get_server_version() -> ConduitResult { + Ok(get_server_version::Response { server: Some(get_server_version::Server { name: Some("Conduit".to_owned()), version: Some(env!("CARGO_PKG_VERSION").to_owned()), }), - })) + } + .into()) } -#[cfg_attr(feature = "conduit_bin",get("/_matrix/key/v2/server"))] +#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] pub fn get_server_keys(db: State<'_, Database>) -> Json { let mut verify_keys = BTreeMap::new(); verify_keys.insert( format!("ed25519:{}", db.globals.keypair().version()), - get_server_keys::VerifyKey { + VerifyKey { key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), }, ); let mut response = serde_json::from_slice( - http::Response::try_from(get_server_keys::Response { - server_name: db.globals.server_name().to_owned(), - verify_keys, - old_verify_keys: BTreeMap::new(), - signatures: BTreeMap::new(), - valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2), + http::Response::try_from(get_server_keys::v2::Response { + server_key: ServerKey { + server_name: db.globals.server_name().to_owned(), + verify_keys, + old_verify_keys: BTreeMap::new(), + signatures: BTreeMap::new(), + valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2), + }, }) .unwrap() .body(), ) .unwrap(); ruma::signatures::sign_json( - db.globals.server_name(), + db.globals.server_name().as_str(), db.globals.keypair(), &mut response, ) @@ -177,7 +189,7 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { Json(response.to_string()) } -#[cfg_attr(feature = "conduit_bin",get("/_matrix/key/v2/server/<_key_id>"))] -pub fn get_server_keys_deprecated(db: State<'_, Database>, _key_id: String) -> Json { +#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] +pub fn get_server_keys_deprecated(db: State<'_, Database>) -> Json { get_server_keys(db) } From 940f533d6187a4178786762b7063d7931c6fb9e5 Mon Sep 17 00:00:00 2001 From: Timo Date: Wed, 12 Aug 2020 15:33:45 +0200 Subject: [PATCH 0237/1727] refactor: avoid some allocations when redacting --- src/pdu.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/pdu.rs b/src/pdu.rs index b689a3e..9936802 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -37,11 +37,12 @@ pub struct PduEvent { impl PduEvent { pub fn redact(&mut self) -> Result<()> { self.unsigned.clear(); - let allowed = match self.kind { - EventType::RoomMember => vec!["membership"], - EventType::RoomCreate => vec!["creator"], - EventType::RoomJoinRules => vec!["join_rule"], - EventType::RoomPowerLevels => vec![ + + let allowed: &[&str] = match self.kind { + EventType::RoomMember => &["membership"], + EventType::RoomCreate => &["creator"], + EventType::RoomJoinRules => &["join_rule"], + EventType::RoomPowerLevels => &[ "ban", "events", "events_default", @@ -51,8 +52,8 @@ impl PduEvent { "users", "users_default", ], - EventType::RoomHistoryVisibility => vec!["history_visibility"], - _ => vec![], + EventType::RoomHistoryVisibility => &["history_visibility"], + _ => &[], }; let old_content = self @@ -63,8 +64,8 @@ impl PduEvent { let mut new_content = serde_json::Map::new(); for key in allowed { - if let Some(value) = old_content.remove(key) { - new_content.insert(key.to_owned(), value); + if let Some(value) = old_content.remove(*key) { + new_content.insert((*key).to_owned(), value); } } From 4be68eba76415880d576a0b34e286711137c8b49 Mon Sep 17 00:00:00 2001 From: Timo Date: Fri, 14 Aug 2020 11:29:32 +0200 Subject: [PATCH 0238/1727] feat: allow querying conduit's room list over federation --- src/main.rs | 2 + src/server_server.rs | 95 +++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 92 insertions(+), 5 deletions(-) diff --git a/src/main.rs b/src/main.rs index 93ca74e..ab9f544 100644 --- a/src/main.rs +++ b/src/main.rs @@ -114,6 +114,8 @@ fn setup_rocket() -> rocket::Rocket { server_server::get_server_version, server_server::get_server_keys, server_server::get_server_keys_deprecated, + server_server::get_public_rooms_route, + server_server::send_transaction_message_route, ], ) .attach(AdHoc::on_attach("Config", |mut rocket| async { diff --git a/src/server_server.rs b/src/server_server.rs index 0af5546..f48f502 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,10 +1,14 @@ -use crate::{ConduitResult, Database, Result}; +use crate::{client_server, ConduitResult, Database, Error, Result, Ruma}; use http::header::{HeaderValue, AUTHORIZATION}; -use rocket::{get, response::content::Json, State}; -use ruma::api::federation::discovery::{ - get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, +use rocket::{get, post, put, response::content::Json, State}; +use ruma::api::federation::{ + directory::get_public_rooms, + discovery::{ + get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, + }, + transactions::send_transaction_message, }; -use ruma::api::OutgoingRequest; +use ruma::api::{client, OutgoingRequest}; use serde_json::json; use std::{ collections::BTreeMap, @@ -193,3 +197,84 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { pub fn get_server_keys_deprecated(db: State<'_, Database>) -> Json { get_server_keys(db) } + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/federation/v1/publicRooms", data = "") +)] +pub async fn get_public_rooms_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let Ruma { + body: + get_public_rooms::v1::Request { + room_network: _room_network, // TODO + limit, + since, + }, + sender_id, + device_id, + json_body, + } = body; + + let client::r0::directory::get_public_rooms_filtered::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + } = client_server::get_public_rooms_filtered_route( + db, + Ruma { + body: client::r0::directory::get_public_rooms_filtered::IncomingRequest { + filter: None, + limit, + room_network: client::r0::directory::get_public_rooms_filtered::RoomNetwork::Matrix, + server: None, + since, + }, + sender_id, + device_id, + json_body, + }, + ) + .await? + .0; + + Ok(get_public_rooms::v1::Response { + chunk: chunk + .into_iter() + .map(|c| { + // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk + // to ruma::api::client::r0::directory::PublicRoomsChunk + Ok::<_, Error>( + serde_json::from_str( + &serde_json::to_string(&c) + .expect("PublicRoomsChunk::to_string always works"), + ) + .expect("federation and client-server PublicRoomsChunk are the same type"), + ) + }) + .filter_map(|r| r.ok()) + .collect(), + prev_batch, + next_batch, + total_room_count_estimate, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/federation/v1/send/<_>", data = "") +)] +pub fn send_transaction_message_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + dbg!(&*body); + Ok(send_transaction_message::v1::Response { + pdus: BTreeMap::new(), + } + .into()) +} From 3ff17f69ad5937ded80af888744ca491ef73c094 Mon Sep 17 00:00:00 2001 From: Timo Date: Fri, 14 Aug 2020 11:34:15 +0200 Subject: [PATCH 0239/1727] fmt --- src/client_server/alias.rs | 13 ++++++++----- src/client_server/sync.rs | 10 ++-------- src/lib.rs | 2 +- src/main.rs | 4 ++-- 4 files changed, 13 insertions(+), 16 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 848b935..a029388 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,9 +1,12 @@ use super::State; -use crate::{ConduitResult, Database, Error, Ruma, server_server}; -use ruma::api::{federation, client::{ - error::ErrorKind, - r0::alias::{create_alias, delete_alias, get_alias}, -}}; +use crate::{server_server, ConduitResult, Database, Error, Ruma}; +use ruma::api::{ + client::{ + error::ErrorKind, + r0::alias::{create_alias, delete_alias, get_alias}, + }, + federation, +}; #[cfg(feature = "conduit_bin")] use rocket::{delete, get, put}; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 4e670ec..e744ef9 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -81,10 +81,7 @@ pub async fn sync_events_route( let mut send_member_count = false; let mut joined_since_last_sync = false; let mut send_notification_counts = false; - for pdu in db - .rooms - .pdus_since(&sender_id, &room_id, since)? - { + for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)? { let pdu = pdu?; send_notification_counts = true; if pdu.kind == EventType::RoomMember { @@ -392,10 +389,7 @@ pub async fn sync_events_route( for room_id in db.rooms.rooms_invited(&sender_id) { let room_id = room_id?; let mut invited_since_last_sync = false; - for pdu in db - .rooms - .pdus_since(&sender_id, &room_id, since)? - { + for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)? { let pdu = pdu?; if pdu.kind == EventType::RoomMember { if pdu.state_key == Some(sender_id.to_string()) { diff --git a/src/lib.rs b/src/lib.rs index f761413..eea32c7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,10 @@ pub mod client_server; -pub mod server_server; mod database; mod error; mod pdu; mod push_rules; mod ruma_wrapper; +pub mod server_server; mod utils; pub use database::Database; diff --git a/src/main.rs b/src/main.rs index ab9f544..d3a673f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,12 +1,12 @@ #![warn(rust_2018_idioms)] -pub mod server_server; pub mod client_server; +pub mod server_server; -mod push_rules; mod database; mod error; mod pdu; +mod push_rules; mod ruma_wrapper; mod utils; From 69a7cb51427bbc86be3dfdece4880c2743ba7de5 Mon Sep 17 00:00:00 2001 From: Timo Date: Fri, 14 Aug 2020 11:36:25 +0200 Subject: [PATCH 0240/1727] fix dependency version --- Cargo.lock | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1166304..949f90d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1560,7 +1560,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "ruma-api", "ruma-client-api", @@ -1574,7 +1574,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "http", "percent-encoding", @@ -1589,7 +1589,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1600,7 +1600,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "assign", "http", @@ -1618,7 +1618,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "js_int", "ruma-identifiers", @@ -1631,7 +1631,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "js_int", "ruma-common", @@ -1646,7 +1646,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1657,7 +1657,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "js_int", "ruma-api", @@ -1672,7 +1672,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1684,7 +1684,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "proc-macro2", "quote", @@ -1695,7 +1695,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "ruma-serde", "serde", @@ -1706,7 +1706,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "form_urlencoded", "itoa", @@ -1718,7 +1718,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#8868d2f72bc5d54f04154fb4fe71b08e4f69a0ae" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "base64 0.12.3", "ring", From 27d35f5ab415fe7a96a3cb5e14d5fc7ad98417e1 Mon Sep 17 00:00:00 2001 From: Timo Date: Tue, 18 Aug 2020 12:43:27 +0200 Subject: [PATCH 0241/1727] docs: fix /register method --- src/client_server/account.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 15efab8..1cdbeca 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -59,7 +59,7 @@ pub fn get_register_available_route( Ok(get_username_availability::Response { available: true }.into()) } -/// # `GET /_matrix/client/r0/register` +/// # `POST /_matrix/client/r0/register` /// /// Register an account on this homeserver. /// From e457e190881ce6e733226940b09180993dc1ab41 Mon Sep 17 00:00:00 2001 From: timokoesters Date: Tue, 18 Aug 2020 12:15:27 +0200 Subject: [PATCH 0242/1727] feat: search pdus --- Cargo.lock | 13 ----- Cargo.toml | 4 +- src/client_server/mod.rs | 2 + src/client_server/search.rs | 93 ++++++++++++++++++++++++++++++++++++ src/database.rs | 2 + src/database/rooms.rs | 95 ++++++++++++++++++++++++++++++++++++- src/main.rs | 1 + 7 files changed, 193 insertions(+), 17 deletions(-) create mode 100644 src/client_server/search.rs diff --git a/Cargo.lock b/Cargo.lock index 949f90d..267f409 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1560,7 +1560,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "ruma-api", "ruma-client-api", @@ -1574,7 +1573,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "http", "percent-encoding", @@ -1589,7 +1587,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1600,7 +1597,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "assign", "http", @@ -1618,7 +1614,6 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "js_int", "ruma-identifiers", @@ -1631,7 +1626,6 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "js_int", "ruma-common", @@ -1646,7 +1640,6 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1657,7 +1650,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "js_int", "ruma-api", @@ -1672,7 +1664,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1684,7 +1675,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "proc-macro2", "quote", @@ -1695,7 +1685,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "ruma-serde", "serde", @@ -1706,7 +1695,6 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "form_urlencoded", "itoa", @@ -1718,7 +1706,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#7f8c78e8ba4be7fda450285e62493f6b33cb085a" dependencies = [ "base64 0.12.3", "ring", diff --git a/Cargo.toml b/Cargo.toml index 4945e3c..ceb7883 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,8 +17,8 @@ edition = "2018" rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } #ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "987d48666cf166cf12100b5dbc61b5e3385c4014" } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fixes" } # Used for matrix spec type definitions and helpers -#ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +#ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fixes" } # Used for matrix spec type definitions and helpers +ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } tokio = "0.2.22" # Used for long polling sled = "0.32.0" # Used for storing data permanently log = "0.4.8" # Used for emitting log entries diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index 7703198..e5a36f3 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -17,6 +17,7 @@ mod push; mod read_marker; mod redact; mod room; +mod search; mod session; mod state; mod sync; @@ -47,6 +48,7 @@ pub use push::*; pub use read_marker::*; pub use redact::*; pub use room::*; +pub use search::*; pub use session::*; pub use state::*; pub use sync::*; diff --git a/src/client_server/search.rs b/src/client_server/search.rs new file mode 100644 index 0000000..9e465dd --- /dev/null +++ b/src/client_server/search.rs @@ -0,0 +1,93 @@ +use super::State; +use crate::{ConduitResult, Database, Error, Ruma}; +use js_int::uint; +use ruma::api::client::{error::ErrorKind, r0::search::search_events}; + +#[cfg(feature = "conduit_bin")] +use rocket::post; +use search_events::{ResultCategories, ResultRoomEvents, SearchResult}; +use std::collections::BTreeMap; + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/search", data = "") +)] +pub fn search_events_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let search_criteria = body.search_categories.room_events.as_ref().unwrap(); + let filter = search_criteria + .filter + .as_ref() + .unwrap(); + + let room_id = filter.rooms + .as_ref() + .unwrap() + .first() + .unwrap(); + + let limit = filter.limit.map_or(10, |l| u64::from(l) as usize); + + if !db.rooms.is_joined(sender_id, &room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + let skip = match body.next_batch.as_ref().map(|s| s.parse()) { + Some(Ok(s)) => s, + Some(Err(_)) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid next_batch token.", + )) + } + None => 0, // Default to the start + }; + + let search = db + .rooms + .search_pdus(&room_id, &search_criteria.search_term)?; + + let results = search + .0 + .map(|result| { + Ok::<_, Error>(SearchResult { + context: None, + rank: None, + result: dbg!(db + .rooms + .get_pdu_from_id(dbg!(&result))? + .map(|pdu| pdu.to_room_event())), + }) + }) + .filter_map(|r| r.ok()) + .skip(skip) + .take(limit) + .collect::>(); + + let next_batch = if results.len() < limit as usize { + None + } else { + Some((skip + limit).to_string()) + }; + + Ok(search_events::Response { + search_categories: ResultCategories { + room_events: Some(ResultRoomEvents { + count: uint!(0), // TODO + groups: BTreeMap::new(), // TODO + next_batch, + results, + state: BTreeMap::new(), // TODO + highlights: search.1, + }), + }, + } + .into()) +} diff --git a/src/database.rs b/src/database.rs index 844a1f4..eb27325 100644 --- a/src/database.rs +++ b/src/database.rs @@ -104,6 +104,8 @@ impl Database { aliasid_alias: db.open_tree("alias_roomid")?, publicroomids: db.open_tree("publicroomids")?, + tokenids: db.open_tree("tokenids")?, + userroomid_joined: db.open_tree("userroomid_joined")?, roomuserid_joined: db.open_tree("roomuserid_joined")?, userroomid_invited: db.open_tree("userroomid_invited")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fe63318..3b3c2c6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -35,6 +35,8 @@ pub struct Rooms { pub(super) aliasid_alias: sled::Tree, // AliasId = RoomId + Count pub(super) publicroomids: sled::Tree, + pub(super) tokenids: sled::Tree, // TokenId = RoomId + Token + PduId + pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, pub(super) userroomid_invited: sled::Tree, @@ -562,7 +564,7 @@ impl Rooms { self.pduid_pdu.insert(&pdu_id, &*pdu_json.to_string())?; self.eventid_pduid - .insert(pdu.event_id.to_string(), pdu_id)?; + .insert(pdu.event_id.to_string(), pdu_id.clone())?; if let Some(state_key) = pdu.state_key { let mut key = room_id.to_string().as_bytes().to_vec(); @@ -573,7 +575,7 @@ impl Rooms { self.roomstateid_pdu.insert(key, &*pdu_json.to_string())?; } - match event_type { + match dbg!(event_type) { EventType::RoomRedaction => { if let Some(redact_id) = &redacts { // TODO: Reason @@ -616,6 +618,21 @@ impl Rooms { )?; } } + EventType::RoomMessage => { + if let Some(body) = dbg!(content).get("body").and_then(|b| b.as_str()) { + for word in body + .split_terminator(|c: char| !c.is_alphanumeric()) + .map(str::to_lowercase) + { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(word.as_bytes()); + key.push(0xff); + key.extend_from_slice(&pdu_id); + self.tokenids.insert(key, &[])?; + } + } + } _ => {} } self.edus.room_read_set(&room_id, &sender, index)?; @@ -928,6 +945,80 @@ impl Rooms { }) } + pub fn search_pdus( + &self, + room_id: &RoomId, + search_string: &str, + ) -> Result<(impl Iterator, Vec)> { + let mut prefix = room_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + let words = search_string + .split_terminator(|c: char| !c.is_alphanumeric()) + .map(str::to_lowercase) + .collect::>(); + + let mut iterators = words.iter().map(|word| { + let mut prefix2 = prefix.clone(); + prefix2.extend_from_slice(word.as_bytes()); + prefix2.push(0xff); + self.tokenids + .scan_prefix(&prefix2) + .keys() + .filter_map(|r| r.ok()) + .map(|key| { + let pduid_index = key + .iter() + .enumerate() + .filter(|(_, &b)| b == 0xff) + .nth(1) + .ok_or_else(|| Error::bad_database("Invalid tokenid in db."))? + .0 + 1; // +1 because the pdu id starts AFTER the separator + + let pdu_id = + key.subslice(pduid_index, key.len() - pduid_index); + + Ok::<_, Error>(pdu_id) + }) + .filter_map(|r| r.ok()) + .peekable() + }); + + let first_iterator = match iterators.next() { + Some(i) => i, + None => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "search_term needs to contain at least one word.", + )) + } + }; + + let mut other_iterators = iterators.collect::>(); + + Ok(( + first_iterator.filter(move |target| { + other_iterators + .iter_mut() + .map(|it| { + while let Some(element) = it.peek() { + if dbg!(element) > dbg!(target) { + return false; + } else if element == target { + return true; + } else { + it.next(); + } + } + + false + }) + .all(|b| b) + }), + words, + )) + } + /// Returns an iterator over all joined members of a room. pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { self.roomuserid_joined diff --git a/src/main.rs b/src/main.rs index d3a673f..bbe7c96 100644 --- a/src/main.rs +++ b/src/main.rs @@ -90,6 +90,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::sync_events_route, client_server::get_context_route, client_server::get_message_events_route, + client_server::search_events_route, client_server::turn_server_route, client_server::send_event_to_device_route, client_server::get_media_config_route, From f23fb32e957bc20599315552b30e0f8f6a9f8343 Mon Sep 17 00:00:00 2001 From: Timo Date: Wed, 19 Aug 2020 18:26:39 +0200 Subject: [PATCH 0243/1727] fix: set limited to true when skipping messages in /sync --- Cargo.lock | 13 +++++++++++++ Cargo.toml | 4 ++-- src/client_server/search.rs | 6 +++--- src/client_server/sync.rs | 5 +++-- src/database/rooms.rs | 6 +++--- 5 files changed, 24 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 267f409..0a7334c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1560,6 +1560,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "ruma-api", "ruma-client-api", @@ -1573,6 +1574,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "http", "percent-encoding", @@ -1587,6 +1589,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1597,6 +1600,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "assign", "http", @@ -1614,6 +1618,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "js_int", "ruma-identifiers", @@ -1626,6 +1631,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "js_int", "ruma-common", @@ -1640,6 +1646,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1650,6 +1657,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "js_int", "ruma-api", @@ -1664,6 +1672,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1675,6 +1684,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "proc-macro2", "quote", @@ -1685,6 +1695,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "ruma-serde", "serde", @@ -1695,6 +1706,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "form_urlencoded", "itoa", @@ -1706,6 +1718,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" dependencies = [ "base64 0.12.3", "ring", diff --git a/Cargo.toml b/Cargo.toml index ceb7883..4945e3c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,8 +17,8 @@ edition = "2018" rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } #ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "987d48666cf166cf12100b5dbc61b5e3385c4014" } # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fixes" } # Used for matrix spec type definitions and helpers -ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fixes" } # Used for matrix spec type definitions and helpers +#ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } tokio = "0.2.22" # Used for long polling sled = "0.32.0" # Used for storing data permanently log = "0.4.8" # Used for emitting log entries diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 9e465dd..1107555 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -60,10 +60,10 @@ pub fn search_events_route( Ok::<_, Error>(SearchResult { context: None, rank: None, - result: dbg!(db + result: db .rooms - .get_pdu_from_id(dbg!(&result))? - .map(|pdu| pdu.to_room_event())), + .get_pdu_from_id(&result)? + .map(|pdu| pdu.to_room_event()), }) }) .filter_map(|r| r.ok()) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index e744ef9..201e8bc 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -69,13 +69,14 @@ pub async fn sync_events_route( // They /sync response doesn't always return all messages, so we say the output is // limited unless there are events in non_timeline_pdus - //let mut limited = false; + let mut limited = false; let mut state_pdus = Vec::new(); for pdu in non_timeline_pdus { if pdu.state_key.is_some() { state_pdus.push(pdu); } + limited = true; } let mut send_member_count = false; @@ -271,7 +272,7 @@ pub async fn sync_events_route( notification_count, }, timeline: sync_events::Timeline { - limited: joined_since_last_sync, + limited: limited || joined_since_last_sync, prev_batch, events: room_events, }, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3b3c2c6..294531e 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -575,7 +575,7 @@ impl Rooms { self.roomstateid_pdu.insert(key, &*pdu_json.to_string())?; } - match dbg!(event_type) { + match event_type { EventType::RoomRedaction => { if let Some(redact_id) = &redacts { // TODO: Reason @@ -619,7 +619,7 @@ impl Rooms { } } EventType::RoomMessage => { - if let Some(body) = dbg!(content).get("body").and_then(|b| b.as_str()) { + if let Some(body) = content.get("body").and_then(|b| b.as_str()) { for word in body .split_terminator(|c: char| !c.is_alphanumeric()) .map(str::to_lowercase) @@ -1002,7 +1002,7 @@ impl Rooms { .iter_mut() .map(|it| { while let Some(element) = it.peek() { - if dbg!(element) > dbg!(target) { + if element > target { return false; } else if element == target { return true; From 4323cf5feca2a549e4b2cc05f0b2ec98350c9839 Mon Sep 17 00:00:00 2001 From: Timo Date: Fri, 21 Aug 2020 21:22:59 +0200 Subject: [PATCH 0244/1727] improvement: device list works better The only situation that isn't working yet is sending `left` events for users when the sender leaves the room --- src/client_server/search.rs | 11 +- src/client_server/sync.rs | 202 +++++++++++++++++++++++++----------- src/database/rooms.rs | 82 ++++++++------- src/database/users.rs | 75 ++++++------- src/utils.rs | 27 +++++ 5 files changed, 246 insertions(+), 151 deletions(-) diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 1107555..dec1ec9 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -19,16 +19,9 @@ pub fn search_events_route( let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); - let filter = search_criteria - .filter - .as_ref() - .unwrap(); + let filter = search_criteria.filter.as_ref().unwrap(); - let room_id = filter.rooms - .as_ref() - .unwrap() - .first() - .unwrap(); + let room_id = filter.rooms.as_ref().unwrap().first().unwrap(); let limit = filter.limit.map_or(10, |l| u64::from(l) as usize); diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 201e8bc..ecc7144 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -2,14 +2,15 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::r0::sync::sync_events, - events::{AnySyncEphemeralRoomEvent, EventType}, - Raw, + events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, + Raw, RoomId, UserId, }; #[cfg(feature = "conduit_bin")] use rocket::{get, tokio}; use std::{ collections::{hash_map, BTreeMap, HashMap, HashSet}, + convert::TryFrom, time::Duration, }; @@ -40,7 +41,9 @@ pub async fn sync_events_route( .unwrap_or(0); let mut presence_updates = HashMap::new(); + let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in let mut device_list_updates = HashSet::new(); + let mut device_list_left = HashSet::new(); // Look for device list updates of this account device_list_updates.extend( @@ -67,6 +70,8 @@ pub async fn sync_events_route( .rev() .collect::>(); + let send_notification_counts = !timeline_pdus.is_empty(); + // They /sync response doesn't always return all messages, so we say the output is // limited unless there are events in non_timeline_pdus let mut limited = false; @@ -79,32 +84,86 @@ pub async fn sync_events_route( limited = true; } + let encrypted_room = db + .rooms + .room_state_get(&room_id, &EventType::RoomEncryption, "")? + .is_some(); + + // TODO: optimize this? let mut send_member_count = false; let mut joined_since_last_sync = false; - let mut send_notification_counts = false; - for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)? { - let pdu = pdu?; - send_notification_counts = true; + let mut new_encrypted_room = false; + for (state_key, pdu) in db + .rooms + .pdus_since(&sender_id, &room_id, since)? + .filter_map(|r| r.ok()) + .filter_map(|pdu| Some((pdu.state_key.clone()?, pdu))) + { if pdu.kind == EventType::RoomMember { send_member_count = true; - if !joined_since_last_sync && pdu.state_key == Some(sender_id.to_string()) { - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))?; - if content.membership == ruma::events::room::member::MembershipState::Join { - joined_since_last_sync = true; - // Both send_member_count and joined_since_last_sync are set. There's - // nothing more to do - break; + + let content = serde_json::from_value::< + Raw, + >(pdu.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database."))?; + + if pdu.state_key == Some(sender_id.to_string()) + && content.membership == MembershipState::Join + { + joined_since_last_sync = true; + } else if encrypted_room && content.membership == MembershipState::Join { + // A new user joined an encrypted room + let user_id = UserId::try_from(state_key) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + // Add encryption update if we didn't share an encrypted room already + if !share_encrypted_room(&db, &sender_id, &user_id, &room_id) { + device_list_updates.insert(user_id); } + } else if encrypted_room && content.membership == MembershipState::Leave { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert( + UserId::try_from(state_key) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?, + ); } + } else if pdu.kind == EventType::RoomEncryption { + new_encrypted_room = true; } } - let members = db.rooms.room_state_type(&room_id, &EventType::RoomMember)?; + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend( + db.rooms + .room_members(&room_id) + .filter_map(|user_id| { + Some( + UserId::try_from(user_id.ok()?.clone()) + .map_err(|_| { + Error::bad_database("Invalid member event state key in db.") + }) + .ok()?, + ) + }) + .filter(|user_id| { + // Don't send key updates from the sender to the sender + sender_id != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target already + !share_encrypted_room(&db, sender_id, user_id, &room_id) + }), + ); + } + + // Look for device list updates in this room + device_list_updates.extend( + db.users + .keys_changed(&room_id.to_string(), since, None) + .filter_map(|r| r.ok()), + ); let (joined_member_count, invited_member_count, heroes) = if send_member_count { let joined_member_count = db.rooms.room_members(&room_id).count(); @@ -131,35 +190,17 @@ pub async fn sync_events_route( .map_err(|_| Error::bad_database("Invalid member event in database."))?; if let Some(state_key) = &pdu.state_key { - let current_content = serde_json::from_value::< - Raw, - >( - members - .get(state_key) - .ok_or_else(|| { - Error::bad_database( - "A user that joined once has no member event anymore.", - ) - })? - .content - .clone(), - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid member event in database.") + let user_id = UserId::try_from(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") })?; // The membership was and still is invite or join if matches!( content.membership, - ruma::events::room::member::MembershipState::Join - | ruma::events::room::member::MembershipState::Invite - ) && matches!( - current_content.membership, - ruma::events::room::member::MembershipState::Join - | ruma::events::room::member::MembershipState::Invite - ) { + MembershipState::Join | MembershipState::Invite + ) && (db.rooms.is_joined(&user_id, &room_id)? + || db.rooms.is_invited(&user_id, &room_id)?) + { Ok::<_, Error>(Some(state_key.clone())) } else { Ok(None) @@ -295,13 +336,6 @@ pub async fn sync_events_route( joined_rooms.insert(room_id.clone(), joined_room); } - // Look for device list updates in this room - device_list_updates.extend( - db.users - .keys_changed(&room_id.to_string(), since, None) - .filter_map(|r| r.ok()), - ); - // Take presence updates from this room for (user_id, presence) in db.rooms @@ -392,18 +426,17 @@ pub async fn sync_events_route( let mut invited_since_last_sync = false; for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)? { let pdu = pdu?; - if pdu.kind == EventType::RoomMember { - if pdu.state_key == Some(sender_id.to_string()) { - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))?; - if content.membership == ruma::events::room::member::MembershipState::Invite { - invited_since_last_sync = true; - break; - } + if pdu.kind == EventType::RoomMember && pdu.state_key == Some(sender_id.to_string()) { + let content = serde_json::from_value::< + Raw, + >(pdu.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database."))?; + + if content.membership == MembershipState::Invite { + invited_since_last_sync = true; + break; } } } @@ -428,6 +461,28 @@ pub async fn sync_events_route( } } + // TODO: mark users as left when WE left an encrypted room they were in + for user_id in left_encrypted_users { + // If the user doesn't share an encrypted room with the target anymore, we need to tell + // them + if db + .rooms + .get_shared_rooms(vec![sender_id.clone(), user_id.clone()]) + .filter_map(|r| r.ok()) + .filter_map(|other_room_id| { + Some( + db.rooms + .room_state_get(&other_room_id, &EventType::RoomEncryption, "") + .ok()? + .is_some(), + ) + }) + .all(|encrypted| !encrypted) + { + device_list_left.insert(user_id); + } + } + // Remove all to-device events the device received *last time* db.users .remove_to_device_events(sender_id, device_id, since)?; @@ -459,7 +514,7 @@ pub async fn sync_events_route( }, device_lists: sync_events::DeviceLists { changed: device_list_updates.into_iter().collect(), - left: Vec::new(), // TODO + left: device_list_left.into_iter().collect(), }, device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_id)? > since { db.users.count_one_time_keys(sender_id, device_id)? @@ -495,3 +550,24 @@ pub async fn sync_events_route( Ok(response.into()) } + +fn share_encrypted_room( + db: &Database, + sender_id: &UserId, + user_id: &UserId, + ignore_room: &RoomId, +) -> bool { + db.rooms + .get_shared_rooms(vec![sender_id.clone(), user_id.clone()]) + .filter_map(|r| r.ok()) + .filter(|room_id| room_id != ignore_room) + .filter_map(|other_room_id| { + Some( + db.rooms + .room_state_get(&other_room_id, &EventType::RoomEncryption, "") + .ok()? + .is_some(), + ) + }) + .any(|encrypted| encrypted) +} diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 294531e..767f581 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -945,11 +945,11 @@ impl Rooms { }) } - pub fn search_pdus( - &self, + pub fn search_pdus<'a>( + &'a self, room_id: &RoomId, search_string: &str, - ) -> Result<(impl Iterator, Vec)> { + ) -> Result<(impl Iterator + 'a, Vec)> { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -958,7 +958,7 @@ impl Rooms { .map(str::to_lowercase) .collect::>(); - let mut iterators = words.iter().map(|word| { + let iterators = words.clone().into_iter().map(move |word| { let mut prefix2 = prefix.clone(); prefix2.extend_from_slice(word.as_bytes()); prefix2.push(0xff); @@ -973,50 +973,56 @@ impl Rooms { .filter(|(_, &b)| b == 0xff) .nth(1) .ok_or_else(|| Error::bad_database("Invalid tokenid in db."))? - .0 + 1; // +1 because the pdu id starts AFTER the separator + .0 + + 1; // +1 because the pdu id starts AFTER the separator - let pdu_id = - key.subslice(pduid_index, key.len() - pduid_index); + let pdu_id = key.subslice(pduid_index, key.len() - pduid_index); Ok::<_, Error>(pdu_id) }) .filter_map(|r| r.ok()) - .peekable() }); - let first_iterator = match iterators.next() { - Some(i) => i, - None => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "search_term needs to contain at least one word.", - )) - } - }; + Ok((utils::common_elements(iterators).unwrap(), words)) + } - let mut other_iterators = iterators.collect::>(); + pub fn get_shared_rooms<'a>( + &'a self, + users: Vec, + ) -> impl Iterator> + 'a { + let iterators = users.into_iter().map(move |user_id| { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); - Ok(( - first_iterator.filter(move |target| { - other_iterators - .iter_mut() - .map(|it| { - while let Some(element) = it.peek() { - if element > target { - return false; - } else if element == target { - return true; - } else { - it.next(); - } - } + self.userroomid_joined + .scan_prefix(&prefix) + .keys() + .filter_map(|r| r.ok()) + .map(|key| { + let roomid_index = key + .iter() + .enumerate() + .filter(|(_, &b)| b == 0xff) + .nth(0) + .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? + .0 + + 1; // +1 because the room id starts AFTER the separator - false - }) - .all(|b| b) - }), - words, - )) + let room_id = key.subslice(roomid_index, key.len() - roomid_index); + + Ok::<_, Error>(room_id) + }) + .filter_map(|r| r.ok()) + }); + + utils::common_elements(iterators) + .expect("users is not empty") + .map(|bytes| { + RoomId::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { + Error::bad_database("Invalid RoomId bytes in userroomid_joined") + })?) + .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) + }) } /// Returns an iterator over all joined members of a room. diff --git a/src/database/users.rs b/src/database/users.rs index 2500b4c..1b6a681 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -408,19 +408,7 @@ impl Users { &*serde_json::to_string(&device_keys).expect("DeviceKeys::to_string always works"), )?; - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(&user_id) { - let mut key = room_id?.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - - self.keychangeid_userid.insert(key, &*user_id.to_string())?; - } - - let mut key = user_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - self.keychangeid_userid.insert(key, &*user_id.to_string())?; + self.mark_device_key_update(user_id, rooms, globals)?; Ok(()) } @@ -520,19 +508,7 @@ impl Users { .insert(&*user_id.to_string(), user_signing_key_key)?; } - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(&user_id) { - let mut key = room_id?.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - - self.keychangeid_userid.insert(key, &*user_id.to_string())?; - } - - let mut key = user_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - self.keychangeid_userid.insert(key, &*user_id.to_string())?; + self.mark_device_key_update(user_id, rooms, globals)?; Ok(()) } @@ -576,21 +552,7 @@ impl Users { )?; // TODO: Should we notify about this change? - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(&target_id) { - let mut key = room_id?.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - - self.keychangeid_userid - .insert(key, &*target_id.to_string())?; - } - - let mut key = target_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - self.keychangeid_userid - .insert(key, &*target_id.to_string())?; + self.mark_device_key_update(target_id, rooms, globals)?; Ok(()) } @@ -628,6 +590,37 @@ impl Users { }) } + fn mark_device_key_update( + &self, + user_id: &UserId, + rooms: &super::rooms::Rooms, + globals: &super::globals::Globals, + ) -> Result<()> { + let count = globals.next_count()?.to_be_bytes(); + for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { + // Don't send key updates to unencrypted rooms + if rooms + .room_state_get(&room_id, &EventType::RoomEncryption, "")? + .is_none() + { + return Ok(()); + } + + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&count); + + self.keychangeid_userid.insert(key, &*user_id.to_string())?; + } + + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&count); + self.keychangeid_userid.insert(key, &*user_id.to_string())?; + + Ok(()) + } + pub fn get_device_keys( &self, user_id: &UserId, diff --git a/src/utils.rs b/src/utils.rs index 0ab3bfa..473c18f 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,5 +1,6 @@ use argon2::{Config, Variant}; use rand::prelude::*; +use sled::IVec; use std::{ convert::TryInto, time::{SystemTime, UNIX_EPOCH}, @@ -59,3 +60,29 @@ pub fn calculate_hash(password: &str) -> Result { let salt = random_string(32); argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } + +pub fn common_elements( + mut iterators: impl Iterator>, +) -> Option> { + let first_iterator = iterators.next()?; + let mut other_iterators = iterators.map(|i| i.peekable()).collect::>(); + + Some(first_iterator.filter(move |target| { + other_iterators + .iter_mut() + .map(|it| { + while let Some(element) = it.peek() { + if element > target { + return false; + } else if element == target { + return true; + } else { + it.next(); + } + } + + false + }) + .all(|b| b) + })) +} From 64789537f51fdb09a75f832a64b31b15c4f5bfd1 Mon Sep 17 00:00:00 2001 From: Timo Date: Sat, 22 Aug 2020 21:00:38 +0200 Subject: [PATCH 0245/1727] fix: device list when leaving rooms --- src/client_server/sync.rs | 69 ++++++++++++++++++++++++--------------- 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index ecc7144..ab41642 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -380,31 +380,6 @@ pub async fn sync_events_route( .map(|pdu| pdu.to_sync_room_event()) .collect(); - // TODO: Only until leave point - let mut edus = db - .rooms - .edus - .roomlatests_since(&room_id, since)? - .filter_map(|r| r.ok()) // Filter out buggy events - .collect::>(); - - if db - .rooms - .edus - .last_roomactive_update(&room_id, &db.globals)? - > since - { - edus.push( - serde_json::from_str( - &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( - db.rooms.edus.roomactives_all(&room_id)?, - )) - .expect("event is valid, we just created it"), - ) - .expect("event is valid, we just created it"), - ); - } - let left_room = sync_events::LeftRoom { account_data: sync_events::AccountData { events: Vec::new() }, timeline: sync_events::Timeline { @@ -415,6 +390,49 @@ pub async fn sync_events_route( state: sync_events::State { events: Vec::new() }, }; + let mut left_since_last_sync = false; + for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)? { + let pdu = pdu?; + if pdu.kind == EventType::RoomMember && pdu.state_key == Some(sender_id.to_string()) { + let content = serde_json::from_value::< + Raw, + >(pdu.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database."))?; + + if content.membership == MembershipState::Leave { + left_since_last_sync = true; + break; + } + } + } + + if left_since_last_sync { + device_list_left.extend( + db.rooms + .room_members(&room_id) + .filter_map(|user_id| { + Some( + UserId::try_from(user_id.ok()?.clone()) + .map_err(|_| { + Error::bad_database("Invalid member event state key in db.") + }) + .ok()?, + ) + }) + .filter(|user_id| { + // Don't send key updates from the sender to the sender + sender_id != user_id + }) + .filter(|user_id| { + // Only send if the sender doesn't share any encrypted room with the target + // anymore + !share_encrypted_room(&db, sender_id, user_id, &room_id) + }), + ); + } + if !left_room.is_empty() { left_rooms.insert(room_id.clone(), left_room); } @@ -461,7 +479,6 @@ pub async fn sync_events_route( } } - // TODO: mark users as left when WE left an encrypted room they were in for user_id in left_encrypted_users { // If the user doesn't share an encrypted room with the target anymore, we need to tell // them From df936e8e7ef1336d0f558bbc6b72a834a3fc695a Mon Sep 17 00:00:00 2001 From: Timo Date: Sat, 22 Aug 2020 21:19:14 +0200 Subject: [PATCH 0246/1727] improvement: guests are more spec-conform --- src/client_server/account.rs | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 1cdbeca..e7a27fb 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -15,6 +15,7 @@ use ruma::{ UserId, }; +use register::RegistrationKind; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -83,12 +84,18 @@ pub fn register_route( )); } + let is_guest = matches!(body.kind, Some(RegistrationKind::Guest)); + // Validate user id let user_id = UserId::parse_with_server_name( - body.username - .clone() - .unwrap_or_else(|| utils::random_string(GUEST_NAME_LENGTH)) - .to_lowercase(), + if is_guest { + utils::random_string(GUEST_NAME_LENGTH) + } else { + body.username.clone().ok_or_else(|| { + Error::BadRequest(ErrorKind::MissingParam, "Missing username field.") + })? + } + .to_lowercase(), db.globals.server_name(), ) .ok() @@ -131,7 +138,12 @@ pub fn register_route( return Err(Error::Uiaa(uiaainfo)); } - let password = body.password.clone().unwrap_or_default(); + let password = if is_guest { + None + } else { + body.password.clone() + } + .unwrap_or_default(); // Create user db.users.create(&user_id, &password)?; @@ -149,7 +161,7 @@ pub fn register_route( &db.globals, )?; - if body.inhibit_login { + if !is_guest && body.inhibit_login { return Ok(register::Response { access_token: None, user_id, @@ -159,10 +171,12 @@ pub fn register_route( } // Generate new device id if the user didn't specify one - let device_id = body - .device_id - .clone() - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); + let device_id = if is_guest { + None + } else { + body.device_id.clone() + } + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); // Generate new token for the device let token = utils::random_string(TOKEN_LENGTH); From c092f06068b553f5f4903075dc0eeeb98db80765 Mon Sep 17 00:00:00 2001 From: Timo Date: Sat, 22 Aug 2020 21:38:01 +0200 Subject: [PATCH 0247/1727] docs: /sync --- src/client_server/sync.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index ab41642..2307f02 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -14,6 +14,17 @@ use std::{ time::Duration, }; +/// # `GET /_matrix/client/r0/sync` +/// +/// Synchronize the client's state with the latest state on the server. +/// +/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a +/// previous request. +/// - Calling this endpoint without a `since` parameter will return all recent events, the state +/// of all rooms and more data. This should only be called on the initial login of the device. +/// - To get incremental updates, you can call this endpoint with a `since` parameter. This will +/// return all recent events, state updates and more data that happened since the last /sync +/// request. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/sync", data = "") From 804d097a18000e8ae3ece82a1b513b2d6eddd196 Mon Sep 17 00:00:00 2001 From: Timo Date: Sat, 22 Aug 2020 22:02:32 +0200 Subject: [PATCH 0248/1727] fix: wake up /sync for typing events --- src/database.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/database.rs b/src/database.rs index eb27325..7bbb6dd 100644 --- a/src/database.rs +++ b/src/database.rs @@ -129,7 +129,6 @@ impl Database { pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { let userid_bytes = user_id.to_string().as_bytes().to_vec(); - let mut userid_prefix = userid_bytes.clone(); userid_prefix.push(0xff); @@ -153,7 +152,8 @@ impl Database { // Events for rooms we are in for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - let mut roomid_prefix = room_id.to_string().as_bytes().to_vec(); + let roomid_bytes = room_id.to_string().as_bytes().to_vec(); + let mut roomid_prefix = roomid_bytes.clone(); roomid_prefix.push(0xff); // PDUs @@ -164,7 +164,7 @@ impl Database { self.rooms .edus .roomid_lastroomactiveupdate - .watch_prefix(&roomid_prefix), + .watch_prefix(&roomid_bytes), ); futures.push( From 366554630a6bcb76ca6c6bc89f418e5acaef841e Mon Sep 17 00:00:00 2001 From: Timo Date: Sat, 22 Aug 2020 22:49:19 +0200 Subject: [PATCH 0249/1727] fix: account registration --- src/client_server/account.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index e7a27fb..fe5ac97 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -86,14 +86,20 @@ pub fn register_route( let is_guest = matches!(body.kind, Some(RegistrationKind::Guest)); + let mut missing_username = false; + // Validate user id let user_id = UserId::parse_with_server_name( if is_guest { utils::random_string(GUEST_NAME_LENGTH) } else { - body.username.clone().ok_or_else(|| { - Error::BadRequest(ErrorKind::MissingParam, "Missing username field.") - })? + body.username.clone().unwrap_or_else(|| { + // If the user didn't send a username field, that means the client is just trying + // the get an UIAA error to see available flows + missing_username = true; + // Just give the user a random name. He won't be able to register with it anyway. + utils::random_string(GUEST_NAME_LENGTH) + }) } .to_lowercase(), db.globals.server_name(), @@ -106,7 +112,7 @@ pub fn register_route( ))?; // Check if username is creative enough - if db.users.exists(&user_id)? { + if !missing_username && db.users.exists(&user_id)? { return Err(Error::BadRequest( ErrorKind::UserInUse, "Desired user ID is already taken.", @@ -138,6 +144,10 @@ pub fn register_route( return Err(Error::Uiaa(uiaainfo)); } + if missing_username { + return Err(Error::BadRequest(ErrorKind::MissingParam, "Missing username field.")); + } + let password = if is_guest { None } else { From 7ba9263cc669eaadecd22558eb4e9410c96593c8 Mon Sep 17 00:00:00 2001 From: Timo Date: Sat, 22 Aug 2020 23:09:53 +0200 Subject: [PATCH 0250/1727] improvement: show most recent PDUs first when searching --- src/client_server/account.rs | 5 ++++- src/database/rooms.rs | 13 +++++++++++-- src/utils.rs | 16 ++++++++++------ 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index fe5ac97..9837d1b 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -145,7 +145,10 @@ pub fn register_route( } if missing_username { - return Err(Error::BadRequest(ErrorKind::MissingParam, "Missing username field.")); + return Err(Error::BadRequest( + ErrorKind::MissingParam, + "Missing username field.", + )); } let password = if is_guest { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 767f581..d2cd5e9 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -965,6 +965,7 @@ impl Rooms { self.tokenids .scan_prefix(&prefix2) .keys() + .rev() // Newest pdus first .filter_map(|r| r.ok()) .map(|key| { let pduid_index = key @@ -983,7 +984,14 @@ impl Rooms { .filter_map(|r| r.ok()) }); - Ok((utils::common_elements(iterators).unwrap(), words)) + Ok(( + utils::common_elements(iterators, |a, b| { + // We compare b with a because we reversed the iterator earlier + b.cmp(a) + }) + .unwrap(), + words, + )) } pub fn get_shared_rooms<'a>( @@ -1015,7 +1023,8 @@ impl Rooms { .filter_map(|r| r.ok()) }); - utils::common_elements(iterators) + // We use the default compare function because keys are sorted correctly (not reversed) + utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { RoomId::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { diff --git a/src/utils.rs b/src/utils.rs index 473c18f..8cf1b2c 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,7 +1,9 @@ use argon2::{Config, Variant}; +use cmp::Ordering; use rand::prelude::*; use sled::IVec; use std::{ + cmp, convert::TryInto, time::{SystemTime, UNIX_EPOCH}, }; @@ -63,6 +65,7 @@ pub fn calculate_hash(password: &str) -> Result { pub fn common_elements( mut iterators: impl Iterator>, + check_order: impl Fn(&IVec, &IVec) -> Ordering, ) -> Option> { let first_iterator = iterators.next()?; let mut other_iterators = iterators.map(|i| i.peekable()).collect::>(); @@ -72,12 +75,13 @@ pub fn common_elements( .iter_mut() .map(|it| { while let Some(element) = it.peek() { - if element > target { - return false; - } else if element == target { - return true; - } else { - it.next(); + match check_order(element, target) { + Ordering::Greater => return false, // We went too far + Ordering::Equal => return true, // Element is in both iters + Ordering::Less => { + // Keep searching + it.next(); + } } } From 38663228f524d15fae9c4b3d1e7c64a7bf61d308 Mon Sep 17 00:00:00 2001 From: Timo Date: Sun, 23 Aug 2020 16:47:27 +0200 Subject: [PATCH 0251/1727] fix: put reason of redaction in the redacted event --- src/database/rooms.rs | 22 ++++------------------ src/pdu.rs | 4 ++-- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d2cd5e9..3c1febd 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -11,7 +11,6 @@ use ruma::{ room::{ join_rules, member, power_levels::{self, PowerLevelsEventContent}, - redaction, }, EventType, }, @@ -566,7 +565,7 @@ impl Rooms { self.eventid_pduid .insert(pdu.event_id.to_string(), pdu_id.clone())?; - if let Some(state_key) = pdu.state_key { + if let Some(state_key) = &pdu.state_key { let mut key = room_id.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.kind.to_string().as_bytes()); @@ -578,20 +577,7 @@ impl Rooms { match event_type { EventType::RoomRedaction => { if let Some(redact_id) = &redacts { - // TODO: Reason - let _reason = - serde_json::from_value::>(content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid redaction event content.", - ) - })? - .reason; - - self.redact_pdu(&redact_id)?; + self.redact_pdu(&redact_id, &pdu)?; } } EventType::RoomMember => { @@ -758,12 +744,12 @@ impl Rooms { } /// Replace a PDU with the redacted form. - pub fn redact_pdu(&self, event_id: &EventId) -> Result<()> { + pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { if let Some(pdu_id) = self.get_pdu_id(event_id)? { let mut pdu = self .get_pdu_from_id(&pdu_id)? .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact()?; + pdu.redact(&reason)?; self.replace_pdu(&pdu_id, &pdu)?; Ok(()) } else { diff --git a/src/pdu.rs b/src/pdu.rs index 9936802..4458423 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -35,7 +35,7 @@ pub struct PduEvent { } impl PduEvent { - pub fn redact(&mut self) -> Result<()> { + pub fn redact(&mut self, reason: &PduEvent) -> Result<()> { self.unsigned.clear(); let allowed: &[&str] = match self.kind { @@ -71,7 +71,7 @@ impl PduEvent { self.unsigned.insert( "redacted_because".to_owned(), - json!({"content": {}, "type": "m.room.redaction"}), + serde_json::to_string(reason).expect("PduEvent::to_string always works").into() ); self.content = new_content.into(); From c4f5a0a6316200f797bf81d77c3cd2815a73706d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 6 Aug 2020 08:29:59 -0400 Subject: [PATCH 0252/1727] Keep track of State at event for state resolution feat: first steps towards joining rooms over federation Add state-res as a dependency of conduit Add reverse_topological_power_sort before append_pdu Implement statehashstatid_pduid tree for keeping track of state Clean up implementation of state_hash as key for tracking state --- Cargo.lock | 206 +++++++++++- Cargo.toml | 7 +- src/client_server/account.rs | 6 +- src/client_server/alias.rs | 16 +- src/client_server/context.rs | 30 +- src/client_server/device.rs | 8 +- src/client_server/directory.rs | 13 +- src/client_server/filter.rs | 21 +- src/client_server/keys.rs | 2 +- src/client_server/media.rs | 4 +- src/client_server/membership.rs | 125 ++++++-- src/client_server/message.rs | 43 +-- src/client_server/room.rs | 2 +- src/client_server/session.rs | 20 +- src/client_server/state.rs | 48 ++- src/client_server/sync.rs | 2 +- src/client_server/unversioned.rs | 13 +- src/database.rs | 5 +- src/database/rooms.rs | 525 +++++++++++++++++++++---------- src/database/uiaa.rs | 6 +- src/pdu.rs | 29 ++ src/ruma_wrapper.rs | 10 +- src/server_server.rs | 27 +- src/utils.rs | 6 + 24 files changed, 818 insertions(+), 356 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0a7334c..ffee8ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,6 +75,15 @@ dependencies = [ "opaque-debug 0.2.3", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "arc-swap" version = "0.4.7" @@ -248,6 +257,17 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "chrono" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "942f72db697d8767c22d46a598e01f2d3b475501ea43d0db4f16d90259182d0b" +dependencies = [ + "num-integer", + "num-traits", + "time 0.1.43", +] + [[package]] name = "cloudabi" version = "0.1.0" @@ -281,6 +301,7 @@ dependencies = [ "serde", "serde_json", "sled", + "state-res", "thiserror", "tokio", ] @@ -456,6 +477,12 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +[[package]] +name = "either" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f" + [[package]] name = "encoding_rs" version = "0.8.23" @@ -872,6 +899,15 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +[[package]] +name = "itertools" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "0.4.6" @@ -951,6 +987,21 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d947cbb889ed21c2a84be6ffbaebf5b4e0f4340638cba0444907e38b56be084" +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.8" @@ -1439,6 +1490,31 @@ dependencies = [ "syn", ] +[[package]] +name = "regex" +version = "1.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +dependencies = [ + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -1560,21 +1636,23 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "ruma-api", + "ruma-appservice-api", "ruma-client-api", "ruma-common", "ruma-events", "ruma-federation-api", "ruma-identifiers", + "ruma-serde", "ruma-signatures", ] [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "http", "percent-encoding", @@ -1589,7 +1667,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1597,14 +1675,28 @@ dependencies = [ "syn", ] +[[package]] +name = "ruma-appservice-api" +version = "0.2.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" +dependencies = [ + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "serde", + "serde_json", +] + [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "assign", "http", "js_int", + "percent-encoding", "ruma-api", "ruma-common", "ruma-events", @@ -1618,7 +1710,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "js_int", "ruma-identifiers", @@ -1631,7 +1723,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "js_int", "ruma-common", @@ -1646,7 +1738,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1657,7 +1749,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "js_int", "ruma-api", @@ -1672,7 +1764,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1684,7 +1776,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "proc-macro2", "quote", @@ -1695,7 +1787,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "ruma-serde", "serde", @@ -1706,7 +1798,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "form_urlencoded", "itoa", @@ -1718,7 +1810,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "base64 0.12.3", "ring", @@ -1910,6 +2002,15 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sharded-slab" +version = "0.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e" +dependencies = [ + "lazy_static", +] + [[package]] name = "signal-hook-registry" version = "1.2.1" @@ -1983,6 +2084,22 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" +[[package]] +name = "state-res" +version = "0.1.0" +source = "git+https://github.com/ruma/state-res#789c8140890e076d38b23fa1147c4ff0500c0d38" +dependencies = [ + "itertools", + "js_int", + "maplit", + "ruma", + "serde", + "serde_json", + "thiserror", + "tracing", + "tracing-subscriber", +] + [[package]] name = "stdweb" version = "0.4.20" @@ -2104,6 +2221,15 @@ dependencies = [ "syn", ] +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +dependencies = [ + "lazy_static", +] + [[package]] name = "time" version = "0.1.43" @@ -2251,9 +2377,21 @@ checksum = "6d79ca061b032d6ce30c660fded31189ca0b9922bf483cd70759f13a2d86786c" dependencies = [ "cfg-if", "log", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fe233f4227389ab7df5b32649239da7ebe0b281824b4e84b342d04d3fd8c25e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tracing-core" version = "0.1.14" @@ -2263,6 +2401,48 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "tracing-log" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6ccba2f8f16e0ed268fc765d9b7ff22e965e7185d32f8f1ec8294fe17d86e79" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abd165311cc4d7a555ad11cc77a37756df836182db0d81aac908c8184c584f40" +dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", + "tracing-serde", +] + [[package]] name = "try-lock" version = "0.2.3" diff --git a/Cargo.toml b/Cargo.toml index 4945e3c..4c14d71 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,9 +16,7 @@ edition = "2018" #rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"] } # Used to handle requests rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } -#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "987d48666cf166cf12100b5dbc61b5e3385c4014" } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fixes" } # Used for matrix spec type definitions and helpers -#ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "aff914050eb297bd82b8aafb12158c88a9e480e1" } # Used for matrix spec type definitions and helpers tokio = "0.2.22" # Used for long polling sled = "0.32.0" # Used for storing data permanently log = "0.4.8" # Used for emitting log entries @@ -33,6 +31,9 @@ reqwest = "0.10.6" # Used to send requests thiserror = "1.0.19" # Used for conduit::Error type image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to generate thumbnails for images base64 = "0.12.3" # Used to encode server public key +# state-res = { path = "../../state-res" } +state-res = { git = "https://github.com/ruma/state-res", version = "0.1.0" } + [features] default = ["conduit_bin"] diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 9837d1b..9fa1a9c 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -75,7 +75,7 @@ pub fn get_register_available_route( )] pub fn register_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { if db.globals.registration_disabled() { return Err(Error::BadRequest( @@ -223,7 +223,7 @@ pub fn register_route( )] pub fn change_password_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); @@ -305,7 +305,7 @@ pub fn whoami_route(body: Ruma) -> ConduitResult, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index a029388..7dc9078 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -26,7 +26,7 @@ pub fn create_alias_route( db.rooms .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?; - Ok(create_alias::Response.into()) + Ok(create_alias::Response::new().into()) } #[cfg_attr( @@ -39,7 +39,7 @@ pub fn delete_alias_route( ) -> ConduitResult { db.rooms.set_alias(&body.room_alias, None, &db.globals)?; - Ok(delete_alias::Response.into()) + Ok(delete_alias::Response::new().into()) } #[cfg_attr( @@ -60,11 +60,7 @@ pub async fn get_alias_route( ) .await?; - return Ok(get_alias::Response { - room_id: response.room_id, - servers: response.servers, - } - .into()); + return Ok(get_alias::Response::new(response.room_id, response.servers).into()); } let room_id = db @@ -75,9 +71,5 @@ pub async fn get_alias_route( "Room with alias not found.", ))?; - Ok(get_alias::Response { - room_id, - servers: vec![db.globals.server_name().to_string()], - } - .into()) + Ok(get_alias::Response::new(room_id, vec![db.globals.server_name().to_string()]).into()) } diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 7a6cbce..7b1fad9 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -12,7 +12,7 @@ use rocket::get; )] pub fn get_context_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -75,18 +75,18 @@ pub fn get_context_route( .map(|(_, pdu)| pdu.to_room_event()) .collect::>(); - Ok(get_context::Response { - start: start_token, - end: end_token, - events_before, - event: Some(base_event), - events_after, - state: db // TODO: State at event - .rooms - .room_state_full(&body.room_id)? - .values() - .map(|pdu| pdu.to_state_event()) - .collect(), - } - .into()) + let mut resp = get_context::Response::new(); + resp.start = start_token; + resp.end = end_token; + resp.events_before = events_before; + resp.event = Some(base_event); + resp.events_after = events_after; + resp.state = db // TODO: State at event + .rooms + .room_state_full(&body.room_id)? + .values() + .map(|pdu| pdu.to_state_event()) + .collect(); + + Ok(resp.into()) } diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 379f827..89033f0 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -37,7 +37,7 @@ pub fn get_devices_route( )] pub fn get_device_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, _device_id: String, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -56,7 +56,7 @@ pub fn get_device_route( )] pub fn update_device_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, _device_id: String, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -80,7 +80,7 @@ pub fn update_device_route( )] pub fn delete_device_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, _device_id: String, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -127,7 +127,7 @@ pub fn delete_device_route( )] pub fn delete_devices_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 26188f7..0aace15 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -6,7 +6,7 @@ use ruma::{ error::ErrorKind, r0::{ directory::{ - self, get_public_rooms, get_public_rooms_filtered, get_room_visibility, + get_public_rooms, get_public_rooms_filtered, get_room_visibility, set_room_visibility, }, room, @@ -14,6 +14,7 @@ use ruma::{ }, federation, }, + directory::PublicRoomsChunk, events::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, EventType, @@ -35,15 +36,15 @@ pub async fn get_public_rooms_filtered_route( if let Some(other_server) = body .server .clone() - .filter(|server| server != &db.globals.server_name().as_str()) + .filter(|server| server != db.globals.server_name().as_str()) { let response = server_server::send_request( &db, other_server, federation::directory::get_public_rooms::v1::Request { limit: body.limit, - since: body.since.clone(), - room_network: federation::directory::get_public_rooms::v1::RoomNetwork::Matrix, + since: body.since.as_deref(), + room_network: ruma::directory::RoomNetwork::Matrix, }, ) .await?; @@ -107,7 +108,7 @@ pub async fn get_public_rooms_filtered_route( // TODO: Do not load full state? let state = db.rooms.room_state_full(&room_id)?; - let chunk = directory::PublicRoomsChunk { + let chunk = PublicRoomsChunk { aliases: Vec::new(), canonical_alias: state .get(&(EventType::RoomCanonicalAlias, "".to_owned())) @@ -272,7 +273,7 @@ pub async fn get_public_rooms_route( body: get_public_rooms_filtered::IncomingRequest { filter: None, limit, - room_network: get_public_rooms_filtered::RoomNetwork::Matrix, + room_network: ruma::directory::RoomNetwork::Matrix, server, since, }, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 165419a..4322de3 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -7,23 +7,18 @@ use rocket::{get, post}; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] pub fn get_filter_route() -> ConduitResult { // TODO - Ok(get_filter::Response { - filter: filter::FilterDefinition { - event_fields: None, - event_format: None, - account_data: None, - room: None, - presence: None, - }, - } + Ok(get_filter::Response::new(filter::FilterDefinition { + event_fields: None, + event_format: None, + account_data: None, + room: None, + presence: None, + }) .into()) } #[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] pub fn create_filter_route() -> ConduitResult { // TODO - Ok(create_filter::Response { - filter_id: utils::random_string(10), - } - .into()) + Ok(create_filter::Response::new(utils::random_string(10)).into()) } diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index f88878c..3311529 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -167,7 +167,7 @@ pub fn claim_keys_route( )] pub fn upload_signing_keys_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/media.rs b/src/client_server/media.rs index efcb3a6..79c1f08 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -53,7 +53,7 @@ pub fn create_content_route( )] pub fn get_content_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, _server_name: String, _media_id: String, ) -> ConduitResult { @@ -85,7 +85,7 @@ pub fn get_content_route( )] pub fn get_content_thumbnail_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, _server_name: String, _media_id: String, ) -> ConduitResult { diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 84c0ebd..c04cf7f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -20,6 +20,8 @@ use ruma::{ events::{room::member, EventType}, EventId, Raw, RoomId, RoomVersionId, }; +use state_res::StateEvent; + use std::{collections::BTreeMap, convert::TryFrom}; #[cfg(feature = "conduit_bin")] @@ -92,17 +94,73 @@ pub async fn join_room_by_id_route( let send_join_response = server_server::send_request( &db, body.room_id.server_name().to_string(), - federation::membership::create_join_event::v2::Request { + federation::membership::create_join_event::v1::Request { room_id: body.room_id.clone(), event_id, - pdu_stub: serde_json::from_value::>(join_event_stub_value) + pdu_stub: serde_json::from_value(join_event_stub_value) .expect("Raw::from_value always works"), }, ) .await?; - dbg!(send_join_response); - todo!("Take send_join_response and 'create' the room using that data"); + dbg!(&send_join_response); + // todo!("Take send_join_response and 'create' the room using that data"); + + let mut event_map = send_join_response + .room_state + .state + .iter() + .map(|pdu| pdu.deserialize().map(StateEvent::Full)) + .map(|ev| { + let ev = ev?; + Ok::<_, serde_json::Error>((ev.event_id(), ev)) + }) + .collect::, _>>() + .map_err(|_| Error::bad_database("Invalid PDU found in db."))?; + + let _auth_chain = send_join_response + .room_state + .auth_chain + .iter() + .flat_map(|pdu| pdu.deserialize().ok()) + .map(StateEvent::Full) + .collect::>(); + + // TODO make StateResolution's methods free functions ? or no self param ? + let sorted_events_ids = state_res::StateResolution::default() + .reverse_topological_power_sort( + &body.room_id, + &event_map.keys().cloned().collect::>(), + &mut event_map, + &db.rooms, + &[], // TODO auth_diff: is this none since we have a set of resolved events we only want to sort + ); + + for ev_id in &sorted_events_ids { + // this is a `state_res::StateEvent` that holds a `ruma::Pdu` + let pdu = event_map.get(ev_id).ok_or_else(|| { + Error::Conflict("Found event_id in sorted events that is not in resolved state") + })?; + + db.rooms.append_pdu( + PduBuilder { + room_id: pdu.room_id().unwrap_or(&body.room_id).clone(), + sender: pdu.sender().clone(), + event_type: pdu.kind(), + content: pdu.content().clone(), + unsigned: Some( + pdu.unsigned() + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(), + ), + state_key: pdu.state_key(), + redacts: pdu.redacts().cloned(), + }, + &db.globals, + &db.account_data, + )?; + } } let event = member::MemberEventContent { @@ -127,10 +185,7 @@ pub async fn join_room_by_id_route( &db.account_data, )?; - Ok(join_room_by_id::Response { - room_id: body.room_id.clone(), - } - .into()) + Ok(join_room_by_id::Response::new(body.room_id.clone()).into()) } #[cfg_attr( @@ -140,7 +195,7 @@ pub async fn join_room_by_id_route( pub async fn join_room_by_id_or_alias_route( db: State<'_, Database>, db2: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, @@ -148,7 +203,13 @@ pub async fn join_room_by_id_or_alias_route( client_server::get_alias_route( db, Ruma { - body: alias::get_alias::IncomingRequest { room_alias }, + body: alias::get_alias::IncomingRequest::try_from(http::Request::new( + serde_json::json!({ "room_alias": room_alias }) + .to_string() + .as_bytes() + .to_vec(), + )) + .unwrap(), sender_id: body.sender_id.clone(), device_id: body.device_id.clone(), json_body: None, @@ -160,14 +221,32 @@ pub async fn join_room_by_id_or_alias_route( } }; + // TODO ruma needs to implement the same constructors for the Incoming variants + let tps = if let Some(in_tps) = &body.third_party_signed { + Some(ruma::api::client::r0::membership::ThirdPartySigned { + token: &in_tps.token, + sender: &in_tps.sender, + signatures: in_tps.signatures.clone(), + mxid: &in_tps.mxid, + }) + } else { + None + }; + let body = Ruma { sender_id: body.sender_id.clone(), device_id: body.device_id.clone(), json_body: None, - body: join_room_by_id::IncomingRequest { - room_id, - third_party_signed: body.third_party_signed.clone(), - }, + body: join_room_by_id::IncomingRequest::try_from(http::Request::new( + serde_json::json!({ + "room_id": room_id, + "third_party_signed": tps, + }) + .to_string() + .as_bytes() + .to_vec(), + )) + .unwrap(), }; Ok(join_room_by_id_or_alias::Response { @@ -219,7 +298,7 @@ pub fn leave_room_route( &db.account_data, )?; - Ok(leave_room::Response.into()) + Ok(leave_room::Response::new().into()) } #[cfg_attr( @@ -266,7 +345,7 @@ pub fn invite_user_route( )] pub fn kick_user_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -304,7 +383,7 @@ pub fn kick_user_route( &db.account_data, )?; - Ok(kick_user::Response.into()) + Ok(kick_user::Response::new().into()) } #[cfg_attr( @@ -313,7 +392,7 @@ pub fn kick_user_route( )] pub fn ban_user_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -359,7 +438,7 @@ pub fn ban_user_route( &db.account_data, )?; - Ok(ban_user::Response.into()) + Ok(ban_user::Response::new().into()) } #[cfg_attr( @@ -368,7 +447,7 @@ pub fn ban_user_route( )] pub fn unban_user_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -405,7 +484,7 @@ pub fn unban_user_route( &db.account_data, )?; - Ok(unban_user::Response.into()) + Ok(unban_user::Response::new().into()) } #[cfg_attr( @@ -414,13 +493,13 @@ pub fn unban_user_route( )] pub fn forget_room_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); db.rooms.forget(&body.room_id, &sender_id)?; - Ok(forget_room::Response.into()) + Ok(forget_room::Response::new().into()) } #[cfg_attr( diff --git a/src/client_server/message.rs b/src/client_server/message.rs index d851214..1b461d2 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -1,8 +1,11 @@ use super::State; use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - r0::message::{get_message_events, send_message_event}, +use ruma::{ + api::client::{ + error::ErrorKind, + r0::message::{get_message_events, send_message_event}, + }, + events::EventContent, }; use std::convert::TryInto; @@ -26,7 +29,7 @@ pub fn send_message_event_route( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), - event_type: body.event_type.clone(), + event_type: body.content.event_type().into(), content: serde_json::from_str( body.json_body .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? @@ -41,7 +44,7 @@ pub fn send_message_event_route( &db.account_data, )?; - Ok(send_message_event::Response { event_id }.into()) + Ok(send_message_event::Response::new(event_id).into()) } #[cfg_attr( @@ -50,7 +53,7 @@ pub fn send_message_event_route( )] pub fn get_message_events_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -92,13 +95,13 @@ pub fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect::>(); - Ok(get_message_events::Response { - start: Some(body.from.clone()), - end: end_token, - chunk: events_after, - state: Vec::new(), - } - .into()) + let mut resp = get_message_events::Response::new(); + resp.start = Some(body.from.clone()); + resp.end = end_token; + resp.chunk = events_after; + resp.state = Vec::new(); + + Ok(resp.into()) } get_message_events::Direction::Backward => { let events_before = db @@ -116,13 +119,13 @@ pub fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect::>(); - Ok(get_message_events::Response { - start: Some(body.from.clone()), - end: start_token, - chunk: events_before, - state: Vec::new(), - } - .into()) + let mut resp = get_message_events::Response::new(); + resp.start = Some(body.from.clone()); + resp.end = start_token; + resp.chunk = events_before; + resp.state = Vec::new(); + + Ok(resp.into()) } } } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index b5f1529..589a2dc 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -315,7 +315,7 @@ pub fn create_room_route( db.rooms.set_public(&room_id, true)?; } - Ok(create_room::Response { room_id }.into()) + Ok(create_room::Response::new(room_id).into()) } #[cfg_attr( diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 4011058..948b455 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -1,5 +1,4 @@ -use super::State; -use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; +use super::{State, DEVICE_ID_LENGTH, TOKEN_LENGTH}; use crate::{utils, ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ @@ -18,10 +17,7 @@ use rocket::{get, post}; /// when logging in. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] pub fn get_login_types_route() -> ConduitResult { - Ok(get_login_types::Response { - flows: vec![get_login_types::LoginType::Password], - } - .into()) + Ok(get_login_types::Response::new(vec![get_login_types::LoginType::Password]).into()) } /// # `POST /_matrix/client/r0/login` @@ -40,15 +36,15 @@ pub fn get_login_types_route() -> ConduitResult { )] pub fn login_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { // Validate login method let user_id = // TODO: Other login methods - if let (login::UserInfo::MatrixId(username), login::LoginInfo::Password { password }) = - (body.user.clone(), body.login_info.clone()) + if let (login::IncomingUserInfo::MatrixId(username), login::IncomingLoginInfo::Password { password }) = + (&body.user, &body.login_info) { - let user_id = UserId::parse_with_server_name(username, db.globals.server_name()) + let user_id = UserId::parse_with_server_name(username.to_string(), db.globals.server_name()) .map_err(|_| Error::BadRequest( ErrorKind::InvalidUsername, "Username is invalid." @@ -126,7 +122,7 @@ pub fn logout_route( db.users.remove_device(&sender_id, device_id)?; - Ok(logout::Response.into()) + Ok(logout::Response::new().into()) } /// # `POST /_matrix/client/r0/logout/all` @@ -154,5 +150,5 @@ pub fn logout_all_route( } } - Ok(logout_all::Response.into()) + Ok(logout_all::Response::new().into()) } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 60b3e9f..14cc497 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -8,9 +8,9 @@ use ruma::{ send_state_event_for_empty_key, send_state_event_for_key, }, }, - events::{room::canonical_alias, EventType}, - Raw, + events::{AnyStateEventContent, EventContent}, }; +use std::convert::TryFrom; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -33,17 +33,10 @@ pub fn send_state_event_for_key_route( ) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - if body.event_type == EventType::RoomCanonicalAlias { - let canonical_alias = serde_json::from_value::< - Raw, - >(content.clone()) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid canonical alias."))?; + if let AnyStateEventContent::RoomCanonicalAlias(canonical_alias) = &body.content { + let mut aliases = canonical_alias.alt_aliases.clone(); - let mut aliases = canonical_alias.alt_aliases; - - if let Some(alias) = canonical_alias.alias { + if let Some(alias) = canonical_alias.alias.clone() { aliases.push(alias); } @@ -68,7 +61,7 @@ pub fn send_state_event_for_key_route( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), - event_type: body.event_type.clone(), + event_type: body.content.event_type().into(), content, unsigned: None, state_key: Some(body.state_key.clone()), @@ -78,7 +71,7 @@ pub fn send_state_event_for_key_route( &db.account_data, )?; - Ok(send_state_event_for_key::Response { event_id }.into()) + Ok(send_state_event_for_key::Response::new(event_id).into()) } #[cfg_attr( @@ -93,25 +86,28 @@ pub fn send_state_event_for_empty_key_route( let Ruma { body: send_state_event_for_empty_key::IncomingRequest { - room_id, - event_type, - data, + room_id, content, .. }, sender_id, device_id, json_body, } = body; - Ok(send_state_event_for_empty_key::Response { - event_id: send_state_event_for_key_route( + Ok(send_state_event_for_empty_key::Response::new( + send_state_event_for_key_route( db, Ruma { - body: send_state_event_for_key::IncomingRequest { - room_id, - event_type, - data, - state_key: "".to_owned(), - }, + body: send_state_event_for_key::IncomingRequest::try_from(http::Request::new( + serde_json::json!({ + "room_id": room_id, + "state_key": "", + "content": content, + }) + .to_string() + .as_bytes() + .to_vec(), + )) + .unwrap(), sender_id, device_id, json_body, @@ -119,7 +115,7 @@ pub fn send_state_event_for_empty_key_route( )? .0 .event_id, - } + ) .into()) } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2307f02..ae4c224 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -31,7 +31,7 @@ use std::{ )] pub async fn sync_events_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 3ff8bec..ea7f633 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,6 +1,5 @@ use crate::ConduitResult; use ruma::api::client::unversioned::get_supported_versions; -use std::collections::BTreeMap; #[cfg(feature = "conduit_bin")] use rocket::get; @@ -17,13 +16,11 @@ use rocket::get; /// unstable features in their stable releases #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] pub fn get_supported_versions_route() -> ConduitResult { - let mut unstable_features = BTreeMap::new(); + let mut resp = + get_supported_versions::Response::new(vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()]); - unstable_features.insert("org.matrix.e2e_cross_signing".to_owned(), true); + resp.unstable_features + .insert("org.matrix.e2e_cross_signing".to_owned(), true); - Ok(get_supported_versions::Response { - versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], - unstable_features, - } - .into()) + Ok(resp.into()) } diff --git a/src/database.rs b/src/database.rs index 7bbb6dd..6cd65c3 100644 --- a/src/database.rs +++ b/src/database.rs @@ -97,8 +97,8 @@ impl Database { }, pduid_pdu: db.open_tree("pduid_pdu")?, eventid_pduid: db.open_tree("eventid_pduid")?, + roomstateid_pduid: db.open_tree("roomstateid_pduid")?, roomid_pduleaves: db.open_tree("roomid_pduleaves")?, - roomstateid_pdu: db.open_tree("roomstateid_pdu")?, alias_roomid: db.open_tree("alias_roomid")?, aliasid_alias: db.open_tree("alias_roomid")?, @@ -111,6 +111,9 @@ impl Database { userroomid_invited: db.open_tree("userroomid_invited")?, roomuserid_invited: db.open_tree("roomuserid_invited")?, userroomid_left: db.open_tree("userroomid_left")?, + + stateid_pduid: db.open_tree("stateid_pduid")?, + pduid_statehash: db.open_tree("pduid_statehash")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d2cd5e9..0d36326 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -9,7 +9,7 @@ use ruma::{ events::{ ignored_user_list, room::{ - join_rules, member, + member, power_levels::{self, PowerLevelsEventContent}, redaction, }, @@ -18,19 +18,31 @@ use ruma::{ EventId, Raw, RoomAliasId, RoomId, UserId, }; use sled::IVec; +use state_res::{event_auth, Requester, StateEvent, StateMap, StateStore}; + use std::{ - collections::{BTreeMap, HashMap}, + collections::{hash_map::DefaultHasher, BTreeMap, HashMap}, convert::{TryFrom, TryInto}, + hash::{Hash, Hasher}, mem, + result::Result as StdResult, }; +/// The unique identifier of each state group. +/// +/// This is created when a state group is added to the database by +/// hashing the entire state. +pub type StateHashId = String; + +/// This identifier consists of roomId + count. It represents a +/// unique event, it will never be overwritten or removed. +pub type PduId = IVec; + pub struct Rooms { pub edus: edus::RoomEdus, pub(super) pduid_pdu: sled::Tree, // PduId = RoomId + Count pub(super) eventid_pduid: sled::Tree, pub(super) roomid_pduleaves: sled::Tree, - pub(super) roomstateid_pdu: sled::Tree, // RoomStateId = Room + StateType + StateKey - pub(super) alias_roomid: sled::Tree, pub(super) aliasid_alias: sled::Tree, // AliasId = RoomId + Count pub(super) publicroomids: sled::Tree, @@ -42,9 +54,263 @@ pub struct Rooms { pub(super) userroomid_invited: sled::Tree, pub(super) roomuserid_invited: sled::Tree, pub(super) userroomid_left: sled::Tree, + + // STATE TREES + /// This holds the full current state, including the latest event. + pub(super) roomstateid_pduid: sled::Tree, // RoomStateId = Room + StateType + StateKey + /// This holds the full room state minus the latest event. + pub(super) pduid_statehash: sled::Tree, // PDU id -> StateHash + /// Also holds the full room state minus the latest event. + pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + (EventType, StateKey) } +impl StateStore for Rooms { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> StdResult { + let pid = self + .eventid_pduid + .get(event_id.as_bytes()) + .map_err(|e| e.to_string())? + .ok_or_else(|| "PDU via room_id and event_id not found in the db.".to_owned())?; + + utils::deserialize( + &self + .pduid_pdu + .get(pid) + .map_err(|e| e.to_string())? + .ok_or_else(|| "PDU via pduid not found in db.".to_owned())?, + ) + .and_then(|pdu: StateEvent| { + // conduit's PDU's always contain a room_id but some + // of ruma's do not so this must be an Option + if pdu.room_id() == Some(room_id) { + Ok(pdu) + } else { + Err(Error::bad_database("Found PDU for incorrect room in db.")) + } + }) + .map_err(|e| e.to_string()) + } +} + +// These are the methods related to STATE resolution. impl Rooms { + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `pduid_statehash`. + /// The incoming event is the `pdu_id` passed to this method. + pub fn append_state_pdu(&self, room_id: &RoomId, pdu_id: &[u8]) -> Result { + let state_hash = self.new_state_hash_id(room_id)?; + let state = self.current_state_pduids(room_id)?; + + let mut key = state_hash.as_bytes().to_vec(); + key.push(0xff); + + // TODO eventually we could avoid writing to the DB so much on every event + // by keeping track of the delta and write that every so often + for ((ev_ty, state_key), pid) in state { + let mut state_id = key.to_vec(); + state_id.extend_from_slice(ev_ty.to_string().as_bytes()); + key.push(0xff); + state_id.extend_from_slice(state_key.expect("state event").as_bytes()); + key.push(0xff); + + self.stateid_pduid.insert(&state_id, &pid)?; + } + + // This event's state does not include the event itself. `current_state_pduids` + // uses `roomstateid_pduid` before the current event is inserted to the tree so the state + // will be everything up to but not including the incoming event. + self.pduid_statehash.insert(pdu_id, state_hash.as_bytes())?; + + Ok(state_hash) + } + + /// Builds a `StateMap` by iterating over all keys that start + /// with `state_hash`, this gives the full state at event "x". + pub fn get_statemap_by_hash(&self, state_hash: StateHashId) -> Result> { + self.stateid_pduid + .scan_prefix(state_hash.as_bytes()) + .values() + .map(|pduid| { + self.pduid_pdu.get(&pduid?)?.map_or_else( + || Err(Error::bad_database("Failed to find StateMap.")), + |b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }, + ) + }) + .map(|pdu| { + let pdu = pdu?; + Ok(((pdu.kind, pdu.state_key), pdu.event_id)) + }) + .collect::>>() + } + + // TODO make this return Result + /// Fetches the previous StateHash ID to `current`. + pub fn prev_state_hash(&self, current: StateHashId) -> Option { + let mut found = false; + for pair in self.pduid_statehash.iter().rev() { + let prev = utils::string_from_bytes(&pair.ok()?.1).ok()?; + if current == prev { + found = true; + } + if current != prev && found { + return Some(prev); + } + } + None + } + + /// Fetch the current State using the `roomstateid_pduid` tree. + pub fn current_state_pduids(&self, room_id: &RoomId) -> Result> { + // TODO this could also scan roomstateid_pduid if we passed in room_id ? + self.roomstateid_pduid + .scan_prefix(room_id.as_bytes()) + .values() + .map(|pduid| { + let pduid = &pduid?; + self.pduid_pdu.get(pduid)?.map_or_else( + || { + Err(Error::bad_database( + "Failed to find current state of pduid's.", + )) + }, + |b| { + Ok(( + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + pduid.clone(), + )) + }, + ) + }) + .map(|pair| { + let (pdu, id) = pair?; + Ok(((pdu.kind, pdu.state_key), id)) + }) + .collect::>>() + } + + /// Returns the last state hash key added to the db. + pub fn current_state_hash(&self, room_id: &RoomId) -> Result { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + // We must check here because this method is called outside and before + // `append_state_pdu` so the DB can be empty + if self.pduid_statehash.scan_prefix(prefix).next().is_none() { + // TODO use ring crate to hash + return Ok(room_id.as_str().to_owned()); + } + + self.pduid_statehash + .iter() + .next_back() + .map(|pair| { + utils::string_from_bytes(&pair?.1) + .map_err(|_| Error::bad_database("Invalid state hash string in db.")) + }) + .ok_or_else(|| Error::bad_database("No PDU's found for this room."))? + } + + /// This fetches auth event_ids from the current state using the + /// full `roomstateid_pdu` tree. + pub fn get_auth_event_ids( + &self, + room_id: &RoomId, + kind: &EventType, + sender: &UserId, + state_key: Option<&str>, + content: serde_json::Value, + ) -> Result> { + let auth_events = state_res::auth_types_for_event( + kind.clone(), + sender, + state_key.map(|s| s.to_string()), + content, + ); + + let mut events = vec![]; + for (event_type, state_key) in auth_events { + if let Some(state_key) = state_key.as_ref() { + if let Some(id) = self.room_state_get(room_id, &event_type, state_key)? { + events.push(id.event_id); + } + } + } + Ok(events) + } + + // This fetches auth events from the current state using the + /// full `roomstateid_pdu` tree. + pub fn get_auth_events( + &self, + room_id: &RoomId, + kind: &EventType, + sender: &UserId, + state_key: Option<&str>, + content: serde_json::Value, + ) -> Result> { + let auth_events = state_res::auth_types_for_event( + kind.clone(), + sender, + state_key.map(|s| s.to_string()), + content, + ); + + let mut events = StateMap::new(); + for (event_type, state_key) in auth_events { + if let Some(s_key) = state_key.as_ref() { + if let Some(pdu) = self.room_state_get(room_id, &event_type, s_key)? { + events.insert((event_type, state_key), pdu); + } + } + } + Ok(events) + } + + /// Generate a new StateHash. + /// + /// A unique hash made from hashing the current states pduid's. + /// Because `append_state_pdu` handles the empty state db case it does not + /// have to be here. + fn new_state_hash_id(&self, room_id: &RoomId) -> Result { + // Use hashed roomId as the first StateHash key for first state event in room + if self + .pduid_statehash + .scan_prefix(room_id.as_bytes()) + .next() + .is_none() + { + // TODO use ring crate to hash + return Ok(room_id.as_str().to_owned()); + } + + let pdu_ids_to_hash = self + .pduid_statehash + .scan_prefix(room_id.as_bytes()) + .values() + .next_back() + .unwrap() // We just checked if the tree was empty + .map(|hash| { + self.stateid_pduid + .scan_prefix(hash) + .values() + // pduid is roomId + count so just hash the whole thing + .map(|pid| Ok(pid?.to_vec())) + .collect::>>>() + })??; + + let mut hasher = DefaultHasher::new(); + pdu_ids_to_hash.hash(&mut hasher); + let hash = hasher.finish().to_string(); + // TODO not sure how you want to hash this + Ok(hash) + } + /// Checks if a room exists. pub fn exists(&self, room_id: &RoomId) -> Result { let mut prefix = room_id.to_string().as_bytes().to_vec(); @@ -64,16 +330,20 @@ impl Rooms { room_id: &RoomId, ) -> Result> { let mut hashmap = HashMap::new(); - for pdu in self - .roomstateid_pdu - .scan_prefix(&room_id.to_string().as_bytes()) - .values() - .map(|value| { - Ok::<_, Error>( - serde_json::from_slice::(&value?) + for pdu in + self.roomstateid_pduid + .scan_prefix(&room_id.to_string().as_bytes()) + .values() + .map(|value| { + Ok::<_, Error>( + serde_json::from_slice::( + &self.pduid_pdu.get(value?)?.ok_or_else(|| { + Error::bad_database("PDU not found for ID in db.") + })?, + ) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - ) - }) + ) + }) { let pdu = pdu?; let state_key = pdu.state_key.clone().ok_or_else(|| { @@ -95,16 +365,20 @@ impl Rooms { prefix.extend_from_slice(&event_type.to_string().as_bytes()); let mut hashmap = HashMap::new(); - for pdu in self - .roomstateid_pdu - .scan_prefix(&prefix) - .values() - .map(|value| { - Ok::<_, Error>( - serde_json::from_slice::(&value?) + for pdu in + self.roomstateid_pduid + .scan_prefix(&prefix) + .values() + .map(|value| { + Ok::<_, Error>( + serde_json::from_slice::( + &self.pduid_pdu.get(value?)?.ok_or_else(|| { + Error::bad_database("PDU not found for ID in db.") + })?, + ) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - ) - }) + ) + }) { let pdu = pdu?; let state_key = pdu.state_key.clone().ok_or_else(|| { @@ -115,23 +389,28 @@ impl Rooms { Ok(hashmap) } - /// Returns the full room state. + /// Returns a single PDU in `room_id` with key (`event_type`, `state_key`). pub fn room_state_get( &self, room_id: &RoomId, event_type: &EventType, state_key: &str, ) -> Result> { - let mut key = room_id.to_string().as_bytes().to_vec(); + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&event_type.to_string().as_bytes()); key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); - self.roomstateid_pdu.get(&key)?.map_or(Ok(None), |value| { + self.roomstateid_pduid.get(&key)?.map_or(Ok(None), |value| { Ok::<_, Error>(Some( - serde_json::from_slice::(&value) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + serde_json::from_slice::( + &self + .pduid_pdu + .get(value)? + .ok_or_else(|| Error::bad_database("PDU not found for ID in db."))?, + ) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) } @@ -139,7 +418,7 @@ impl Rooms { /// Returns the `count` of this pdu's id. pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid - .get(event_id.to_string().as_bytes())? + .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( utils::u64_from_bytes( @@ -153,7 +432,7 @@ impl Rooms { /// Returns the json of a pdu. pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid - .get(event_id.to_string().as_bytes())? + .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { @@ -174,7 +453,7 @@ impl Rooms { /// Returns the pdu. pub fn get_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid - .get(event_id.to_string().as_bytes())? + .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { @@ -238,16 +517,15 @@ impl Rooms { /// Replace the leaves of a room with a new event. pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { self.roomid_pduleaves.remove(key?)?; } - prefix.extend_from_slice(event_id.to_string().as_bytes()); - self.roomid_pduleaves - .insert(&prefix, &*event_id.to_string())?; + prefix.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&prefix, event_id.as_bytes())?; Ok(()) } @@ -272,6 +550,14 @@ impl Rooms { // TODO: Make sure this isn't called twice in parallel let prev_events = self.get_pdu_leaves(&room_id)?; + let auth_events = self.get_auth_events( + &room_id, + &event_type, + &sender, + state_key.as_deref(), + content.clone(), + )?; + // Is the event authorized? if let Some(state_key) = &state_key { let power_levels = self @@ -333,138 +619,24 @@ impl Rooms { // Don't allow encryption events when it's disabled !globals.encryption_disabled() } - EventType::RoomMember => { - let target_user_id = UserId::try_from(&**state_key).map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "State key of member event does not contain user id.", - ) - })?; - - let current_membership = self - .room_state_get( - &room_id, - &EventType::RoomMember, - &target_user_id.to_string(), - )? - .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { - Ok(serde_json::from_value::>( - pdu.content, - ) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership) - })?; - - let target_membership = - serde_json::from_value::>(content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership; - - let target_power = power_levels.users.get(&target_user_id).map_or_else( - || { - if target_membership != member::MembershipState::Join { - None - } else { - Some(&power_levels.users_default) - } - }, - // If it's okay, wrap with Some(_) - Some, - ); - - let join_rules = - self.room_state_get(&room_id, &EventType::RoomJoinRules, "")? - .map_or(Ok::<_, Error>(join_rules::JoinRule::Public), |pdu| { - Ok(serde_json::from_value::< - Raw, - >(pdu.content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| { - Error::bad_database("Database contains invalid JoinRules event") - })? - .join_rule) - })?; - - if target_membership == member::MembershipState::Join { - let mut prev_events = prev_events.iter(); - let prev_event = self - .get_pdu(prev_events.next().ok_or(Error::BadRequest( - ErrorKind::Unknown, - "Membership can't be the first event", - ))?)? - .ok_or_else(|| { - Error::bad_database("PDU leaf points to invalid event!") - })?; - if prev_event.kind == EventType::RoomCreate - && prev_event.prev_events.is_empty() - { - true - } else if sender != target_user_id { - false - } else if let member::MembershipState::Ban = current_membership { - false - } else { - join_rules == join_rules::JoinRule::Invite - && (current_membership == member::MembershipState::Join - || current_membership == member::MembershipState::Invite) - || join_rules == join_rules::JoinRule::Public - } - } else if target_membership == member::MembershipState::Invite { - if let Some(third_party_invite_json) = content.get("third_party_invite") { - if current_membership == member::MembershipState::Ban { - false - } else { - let _third_party_invite = - serde_json::from_value::( - third_party_invite_json.clone(), - ) - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "ThirdPartyInvite is invalid", - ) - })?; - todo!("handle third party invites"); - } - } else if sender_membership != member::MembershipState::Join - || current_membership == member::MembershipState::Join - || current_membership == member::MembershipState::Ban - { - false - } else { - sender_power - .filter(|&p| p >= &power_levels.invite) - .is_some() - } - } else if target_membership == member::MembershipState::Leave { - if sender == target_user_id { - current_membership == member::MembershipState::Join - || current_membership == member::MembershipState::Invite - } else if sender_membership != member::MembershipState::Join - || current_membership == member::MembershipState::Ban - && sender_power.filter(|&p| p < &power_levels.ban).is_some() - { - false - } else { - sender_power.filter(|&p| p >= &power_levels.kick).is_some() - && target_power < sender_power - } - } else if target_membership == member::MembershipState::Ban { - if sender_membership != member::MembershipState::Join { - false - } else { - sender_power.filter(|&p| p >= &power_levels.ban).is_some() - && target_power < sender_power - } - } else { - false - } - } + EventType::RoomMember => event_auth::is_membership_change_allowed( + // TODO this is a bit of a hack but not sure how to have a type + // declared in `state_res` crate be + Requester { + prev_event_ids: prev_events.to_owned(), + room_id: &room_id, + content: &content, + state_key: Some(state_key.to_owned()), + sender: &sender, + }, + &auth_events + .iter() + .map(|((ty, key), pdu)| { + Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res()?)) + }) + .collect::>>()?, + ) + .ok_or(Error::Conflict("Found incoming PDU with invalid data."))?, EventType::RoomCreate => prev_events.is_empty(), // Not allow any of the following events if the sender is not joined. _ if sender_membership != member::MembershipState::Join => false, @@ -474,7 +646,7 @@ impl Rooms { >= &power_levels.state_default } } { - error!("Unauthorized"); + error!("Unauthorized {}", event_type); // Not authorized return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -483,7 +655,7 @@ impl Rooms { } } else if !self.is_joined(&sender, &room_id)? { // TODO: auth rules apply to all events, not only those with a state key - error!("Unauthorized"); + error!("Unauthorized {}", event_type); return Err(Error::BadRequest( ErrorKind::Forbidden, "Event is not authorized", @@ -524,7 +696,10 @@ impl Rooms { depth: depth .try_into() .map_err(|_| Error::bad_database("Depth is invalid"))?, - auth_events: Vec::new(), + auth_events: auth_events + .into_iter() + .map(|(_, pdu)| pdu.event_id) + .collect(), redacts: redacts.clone(), unsigned, hashes: ruma::events::pdu::EventHash { @@ -564,15 +739,19 @@ impl Rooms { self.pduid_pdu.insert(&pdu_id, &*pdu_json.to_string())?; self.eventid_pduid - .insert(pdu.event_id.to_string(), pdu_id.clone())?; + .insert(pdu.event_id.to_string(), &*pdu_id)?; - if let Some(state_key) = pdu.state_key { - let mut key = room_id.to_string().as_bytes().to_vec(); + if let Some(state_key) = &pdu.state_key { + // We call this first because our StateHash relies on the + // state before the new event + self.append_state_pdu(&room_id, &pdu_id)?; + + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.kind.to_string().as_bytes()); key.push(0xff); key.extend_from_slice(state_key.as_bytes()); - self.roomstateid_pdu.insert(key, &*pdu_json.to_string())?; + self.roomstateid_pduid.insert(key, pdu_id.as_slice())?; } match event_type { diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index cece8db..e318f43 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -2,7 +2,7 @@ use crate::{Error, Result}; use ruma::{ api::client::{ error::ErrorKind, - r0::uiaa::{AuthData, UiaaInfo}, + r0::uiaa::{IncomingAuthData, UiaaInfo}, }, DeviceId, UserId, }; @@ -26,12 +26,12 @@ impl Uiaa { &self, user_id: &UserId, device_id: &DeviceId, - auth: &AuthData, + auth: &IncomingAuthData, uiaainfo: &UiaaInfo, users: &super::users::Users, globals: &super::globals::Globals, ) -> Result<(bool, UiaaInfo)> { - if let AuthData::DirectRequest { + if let IncomingAuthData::DirectRequest { kind, session, auth_parameters, diff --git a/src/pdu.rs b/src/pdu.rs index 9936802..5485f23 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -177,6 +177,35 @@ impl PduEvent { } } +impl PduEvent { + pub fn convert_for_state_res(&self) -> Result { + serde_json::from_value(json!({ + "event_id": self.event_id, + "room_id": self.room_id, + "sender": self.sender, + "origin": self.origin, + "origin_server_ts": self.origin_server_ts, + "type": self.kind, + "content": self.content, + "state_key": self.state_key, + "prev_events": self.prev_events + .iter() + .map(|id| (id, EventHash { sha256: "hello".into() })) + .collect::>(), + "depth": self.depth, + "auth_events": self.auth_events + .iter() + .map(|id| (id, EventHash { sha256: "hello".into() })) + .collect::>(), + "redacts": self.redacts, + "unsigned": self.unsigned, + "hashes": self.hashes, + "signatures": self.signatures, + })) + .map_err(|_| Error::bad_database("Failed to convert PDU to ruma::Pdu type.")) + } +} + /// Build the start of a PDU in order to add it to the `Database`. #[derive(Debug)] pub struct PduBuilder { diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 8d86204..80e6e58 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,5 +1,8 @@ use crate::Error; -use ruma::identifiers::{DeviceId, UserId}; +use ruma::{ + api::IncomingRequest, + identifiers::{DeviceId, UserId}, +}; use std::{convert::TryInto, ops::Deref}; #[cfg(feature = "conduit_bin")] @@ -16,13 +19,12 @@ use { tokio::io::AsyncReadExt, Request, State, }, - ruma::api::IncomingRequest, std::io::Cursor, }; /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { +pub struct Ruma { pub body: T, pub sender_id: Option, pub device_id: Option>, @@ -110,7 +112,7 @@ impl<'a, T: IncomingRequest> FromTransformedData<'a> for Ruma { } } -impl Deref for Ruma { +impl Deref for Ruma { type Target = T; fn deref(&self) -> &Self::Target { diff --git a/src/server_server.rs b/src/server_server.rs index f48f502..e47b50a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,14 +1,17 @@ use crate::{client_server, ConduitResult, Database, Error, Result, Ruma}; use http::header::{HeaderValue, AUTHORIZATION}; use rocket::{get, post, put, response::content::Json, State}; -use ruma::api::federation::{ - directory::get_public_rooms, - discovery::{ - get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, +use ruma::api::{ + client, + federation::{ + directory::get_public_rooms, + discovery::{ + get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, + }, + transactions::send_transaction_message, }, - transactions::send_transaction_message, + OutgoingRequest, }; -use ruma::api::{client, OutgoingRequest}; use serde_json::json; use std::{ collections::BTreeMap, @@ -204,11 +207,11 @@ pub fn get_server_keys_deprecated(db: State<'_, Database>) -> Json { )] pub async fn get_public_rooms_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let Ruma { body: - get_public_rooms::v1::Request { + get_public_rooms::v1::IncomingRequest { room_network: _room_network, // TODO limit, since, @@ -229,7 +232,7 @@ pub async fn get_public_rooms_route( body: client::r0::directory::get_public_rooms_filtered::IncomingRequest { filter: None, limit, - room_network: client::r0::directory::get_public_rooms_filtered::RoomNetwork::Matrix, + room_network: ruma::directory::RoomNetwork::Matrix, server: None, since, }, @@ -268,9 +271,9 @@ pub async fn get_public_rooms_route( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") )] -pub fn send_transaction_message_route( - db: State<'_, Database>, - body: Ruma, +pub fn send_transaction_message_route<'a>( + _db: State<'a, Database>, + body: Ruma, ) -> ConduitResult { dbg!(&*body); Ok(send_transaction_message::v1::Response { diff --git a/src/utils.rs b/src/utils.rs index 8cf1b2c..77a7d1f 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,3 +1,4 @@ +use crate::Error; use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; @@ -90,3 +91,8 @@ pub fn common_elements( .all(|b| b) })) } + +pub fn deserialize<'de, T: serde::Deserialize<'de>>(val: &'de sled::IVec) -> Result { + serde_json::from_slice::(val.as_ref()) + .map_err(|_| Error::bad_database("PDU in db is invalid.")) +} From cb68bf9e0c073aa060b3fe6d9aa14bbfaebc5dae Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 18 Aug 2020 14:32:38 -0400 Subject: [PATCH 0253/1727] Use ring crate to generate StatHashes when saving stateid/statehash --- Cargo.lock | 1 + Cargo.toml | 2 +- src/database/rooms.rs | 20 ++++++++++++-------- src/utils.rs | 2 +- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ffee8ea..faa9e89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -295,6 +295,7 @@ dependencies = [ "log", "rand", "reqwest", + "ring", "rocket", "ruma", "rust-argon2 0.8.2", diff --git a/Cargo.toml b/Cargo.toml index 4c14d71..78d8f76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ image = { version = "0.23.4", default-features = false, features = ["jpeg", "png base64 = "0.12.3" # Used to encode server public key # state-res = { path = "../../state-res" } state-res = { git = "https://github.com/ruma/state-res", version = "0.1.0" } - +ring = "0.16.15" [features] default = ["conduit_bin"] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0d36326..6366c8c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -4,6 +4,7 @@ pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; use log::error; +use ring::digest; use ruma::{ api::client::error::ErrorKind, events::{ @@ -21,9 +22,8 @@ use sled::IVec; use state_res::{event_auth, Requester, StateEvent, StateMap, StateStore}; use std::{ - collections::{hash_map::DefaultHasher, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap}, convert::{TryFrom, TryInto}, - hash::{Hash, Hasher}, mem, result::Result as StdResult, }; @@ -285,8 +285,10 @@ impl Rooms { .next() .is_none() { - // TODO use ring crate to hash - return Ok(room_id.as_str().to_owned()); + return utils::string_from_bytes( + digest::digest(&digest::SHA256, room_id.as_bytes()).as_ref(), + ) + .map_err(|_| Error::bad_database("Empty state generated invalid string from hash.")); } let pdu_ids_to_hash = self @@ -304,11 +306,13 @@ impl Rooms { .collect::>>>() })??; - let mut hasher = DefaultHasher::new(); - pdu_ids_to_hash.hash(&mut hasher); - let hash = hasher.finish().to_string(); + let hash = digest::digest( + &digest::SHA256, + &pdu_ids_to_hash.into_iter().flatten().collect::>(), + ); // TODO not sure how you want to hash this - Ok(hash) + utils::string_from_bytes(hash.as_ref()) + .map_err(|_| Error::bad_database("State generated invalid string from hash.")) } /// Checks if a room exists. diff --git a/src/utils.rs b/src/utils.rs index 77a7d1f..b549153 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -94,5 +94,5 @@ pub fn common_elements( pub fn deserialize<'de, T: serde::Deserialize<'de>>(val: &'de sled::IVec) -> Result { serde_json::from_slice::(val.as_ref()) - .map_err(|_| Error::bad_database("PDU in db is invalid.")) + .map_err(|_| Error::bad_database("Found invalid bytes as PDU in db.")) } From 846a0098c182272a5669bacf0d27fa988eaa4c23 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 18 Aug 2020 16:26:03 -0400 Subject: [PATCH 0254/1727] Split append_pdu -> append_pdu and build_and_append Move all state event appending to append_state_pdu. --- Cargo.lock | 22 ++-- src/client_server/account.rs | 2 +- src/client_server/membership.rs | 37 ++---- src/client_server/message.rs | 2 +- src/client_server/profile.rs | 4 +- src/client_server/redact.rs | 2 +- src/client_server/room.rs | 20 +-- src/client_server/state.rs | 2 +- src/database/rooms.rs | 217 ++++++++++++++++---------------- src/pdu.rs | 28 ++++- 10 files changed, 175 insertions(+), 161 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index faa9e89..3c5d836 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -116,9 +116,9 @@ checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" [[package]] name = "async-trait" -version = "0.1.37" +version = "0.1.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caae68055714ff28740f310927e04f2eba76ff580b16fb18ed90073ee71646f7" +checksum = "6e1a4a2f97ce50c9d0282c1468816208588441492b40d813b2e0419c22c05e7f" dependencies = [ "proc-macro2", "quote", @@ -1194,9 +1194,9 @@ checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" [[package]] name = "once_cell" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" [[package]] name = "opaque-debug" @@ -1860,9 +1860,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ "base64 0.12.3", "log", @@ -2088,7 +2088,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res#789c8140890e076d38b23fa1147c4ff0500c0d38" +source = "git+https://github.com/ruma/state-res#4e9b428c0db50ac3a3421ced12a6fd202a1c36a3" dependencies = [ "itertools", "js_int", @@ -2281,9 +2281,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" +checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" [[package]] name = "tokio" @@ -2384,9 +2384,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe233f4227389ab7df5b32649239da7ebe0b281824b4e84b342d04d3fd8c25e" +checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" dependencies = [ "proc-macro2", "quote", diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 9fa1a9c..9e52f6d 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -354,7 +354,7 @@ pub fn deactivate_route( third_party_invite: None, }; - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index c04cf7f..824e871 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -1,6 +1,8 @@ use super::State; use crate::{ - client_server, pdu::PduBuilder, server_server, utils, ConduitResult, Database, Error, Ruma, + client_server, + pdu::{PduBuilder, PduEvent}, + server_server, utils, ConduitResult, Database, Error, Ruma, }; use ruma::{ api::{ @@ -142,24 +144,9 @@ pub async fn join_room_by_id_route( Error::Conflict("Found event_id in sorted events that is not in resolved state") })?; - db.rooms.append_pdu( - PduBuilder { - room_id: pdu.room_id().unwrap_or(&body.room_id).clone(), - sender: pdu.sender().clone(), - event_type: pdu.kind(), - content: pdu.content().clone(), - unsigned: Some( - pdu.unsigned() - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(), - ), - state_key: pdu.state_key(), - redacts: pdu.redacts().cloned(), - }, - &db.globals, - &db.account_data, - )?; + // We do not rebuild the PDU in this case only insert to DB + db.rooms + .append_pdu(PduEvent::try_from(pdu)?, &db.globals, &db.account_data)?; } } @@ -171,7 +158,7 @@ pub async fn join_room_by_id_route( third_party_invite: None, }; - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), @@ -284,7 +271,7 @@ pub fn leave_room_route( event.membership = member::MembershipState::Leave; - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), @@ -312,7 +299,7 @@ pub fn invite_user_route( let sender_id = body.sender_id.as_ref().expect("user is authenticated"); if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), @@ -369,7 +356,7 @@ pub fn kick_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; // TODO: reason - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), @@ -424,7 +411,7 @@ pub fn ban_user_route( }, )?; - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), @@ -470,7 +457,7 @@ pub fn unban_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 1b461d2..03832d8 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -25,7 +25,7 @@ pub fn send_message_event_route( let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - let event_id = db.rooms.append_pdu( + let event_id = db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 1313db7..0707b34 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -31,7 +31,7 @@ pub fn set_displayname_route( // Send a new membership event and presence update into all joined rooms for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -134,7 +134,7 @@ pub fn set_avatar_url_route( // Send a new membership event and presence update into all joined rooms for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index fc65c23..8708692 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -18,7 +18,7 @@ pub fn redact_event_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let event_id = db.rooms.append_pdu( + let event_id = db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 589a2dc..3ee21b6 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -56,7 +56,7 @@ pub fn create_room_route( content.room_version = RoomVersionId::Version6; // 1. The room create event - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -71,7 +71,7 @@ pub fn create_room_route( )?; // 2. Let the room creator join - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -120,7 +120,7 @@ pub fn create_room_route( }) .expect("event is valid, we just created it") }; - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -144,7 +144,7 @@ pub fn create_room_route( }); // 4.1 Join Rules - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -169,7 +169,7 @@ pub fn create_room_route( )?; // 4.2 History Visibility - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -187,7 +187,7 @@ pub fn create_room_route( )?; // 4.3 Guest Access - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -224,7 +224,7 @@ pub fn create_room_route( continue; } - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -243,7 +243,7 @@ pub fn create_room_route( // 6. Events implied by name and topic if let Some(name) = &body.name { - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -264,7 +264,7 @@ pub fn create_room_route( } if let Some(topic) = &body.topic { - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), @@ -284,7 +284,7 @@ pub fn create_room_route( // 7. Events implied by invite (and TODO: invite_3pid) for user in &body.invite { - db.rooms.append_pdu( + db.rooms.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: sender_id.clone(), diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 14cc497..2920de2 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -57,7 +57,7 @@ pub fn send_state_event_for_key_route( } } - let event_id = db.rooms.append_pdu( + let event_id = db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), sender: sender_id.clone(), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6366c8c..0339b7f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -4,6 +4,7 @@ pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; use log::error; +// TODO if ruma-signatures re-exports `use ruma::signatures::digest;` use ring::digest; use ruma::{ api::client::error::ErrorKind, @@ -99,7 +100,13 @@ impl Rooms { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `pduid_statehash`. /// The incoming event is the `pdu_id` passed to this method. - pub fn append_state_pdu(&self, room_id: &RoomId, pdu_id: &[u8]) -> Result { + pub fn append_state_pdu( + &self, + room_id: &RoomId, + pdu_id: &[u8], + state_key: &str, + kind: &EventType, + ) -> Result { let state_hash = self.new_state_hash_id(room_id)?; let state = self.current_state_pduids(room_id)?; @@ -123,6 +130,13 @@ impl Rooms { // will be everything up to but not including the incoming event. self.pduid_statehash.insert(pdu_id, state_hash.as_bytes())?; + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(kind.to_string().as_bytes()); + key.push(0xff); + key.extend_from_slice(state_key.as_bytes()); + self.roomstateid_pduid.insert(key, pdu_id)?; + Ok(state_hash) } @@ -535,8 +549,92 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. - #[allow(clippy::blocks_in_if_conditions)] pub fn append_pdu( + &self, + pdu: PduEvent, + globals: &super::globals::Globals, + account_data: &super::account_data::AccountData, + ) -> Result { + let mut pdu_json = serde_json::to_value(&pdu).expect("event is valid, we just created it"); + ruma::signatures::hash_and_sign_event( + globals.server_name().as_str(), + globals.keypair(), + &mut pdu_json, + ) + .expect("event is valid, we just created it"); + + self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; + + // Increment the last index and use that + // This is also the next_batch/since value + let index = globals.next_count()?; + + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&index.to_be_bytes()); + + self.pduid_pdu.insert(&pdu_id, &*pdu_json.to_string())?; + + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + + if let Some(state_key) = &pdu.state_key { + self.append_state_pdu(&pdu.room_id, &pdu_id, state_key, &pdu.kind)?; + } + + match pdu.kind { + EventType::RoomRedaction => { + if let Some(redact_id) = &pdu.redacts { + // TODO: Reason + let _reason = serde_json::from_value::>( + pdu.content, + ) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid redaction event content.", + ) + })? + .reason; + + self.redact_pdu(&redact_id)?; + } + } + EventType::RoomMember => { + if let Some(state_key) = &pdu.state_key { + // if the state_key fails + let target_user_id = UserId::try_from(state_key.as_str()) + .expect("This state_key was previously validated"); + // Update our membership info, we do this here incase a user is invited + // and immediately leaves we need the DB to record the invite event for auth + self.update_membership( + &pdu.room_id, + &target_user_id, + serde_json::from_value::(pdu.content).map_err( + |_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid redaction event content.", + ) + }, + )?, + &pdu.sender, + account_data, + globals, + )?; + } + } + _ => {} + } + self.edus.room_read_set(&pdu.room_id, &pdu.sender, index)?; + + Ok(pdu.event_id) + } + + /// Creates a new persisted data unit and adds it to a room. + pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, globals: &super::globals::Globals, @@ -618,6 +716,7 @@ impl Rooms { ); // Is the event allowed? + #[allow(clippy::blocks_in_if_conditions)] if !match event_type { EventType::RoomEncryption => { // Don't allow encryption events when it's disabled @@ -687,15 +786,15 @@ impl Rooms { let mut pdu = PduEvent { event_id: EventId::try_from("$thiswillbefilledinlater").expect("we know this is valid"), - room_id: room_id.clone(), - sender: sender.clone(), + room_id, + sender, origin: globals.server_name().to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), - kind: event_type.clone(), - content: content.clone(), - state_key: state_key.clone(), + kind: event_type, + content, + state_key, prev_events, depth: depth .try_into() @@ -704,7 +803,7 @@ impl Rooms { .into_iter() .map(|(_, pdu)| pdu.event_id) .collect(), - redacts: redacts.clone(), + redacts, unsigned, hashes: ruma::events::pdu::EventHash { sha256: "aaa".to_owned(), @@ -722,105 +821,7 @@ impl Rooms { )) .expect("ruma's reference hashes are valid event ids"); - let mut pdu_json = serde_json::to_value(&pdu).expect("event is valid, we just created it"); - ruma::signatures::hash_and_sign_event( - globals.server_name().as_str(), - globals.keypair(), - &mut pdu_json, - ) - .expect("event is valid, we just created it"); - - self.replace_pdu_leaves(&room_id, &pdu.event_id)?; - - // Increment the last index and use that - // This is also the next_batch/since value - let index = globals.next_count()?; - - let mut pdu_id = room_id.to_string().as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&index.to_be_bytes()); - - self.pduid_pdu.insert(&pdu_id, &*pdu_json.to_string())?; - - self.eventid_pduid - .insert(pdu.event_id.to_string(), &*pdu_id)?; - - if let Some(state_key) = &pdu.state_key { - // We call this first because our StateHash relies on the - // state before the new event - self.append_state_pdu(&room_id, &pdu_id)?; - - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu.kind.to_string().as_bytes()); - key.push(0xff); - key.extend_from_slice(state_key.as_bytes()); - self.roomstateid_pduid.insert(key, pdu_id.as_slice())?; - } - - match event_type { - EventType::RoomRedaction => { - if let Some(redact_id) = &redacts { - // TODO: Reason - let _reason = - serde_json::from_value::>(content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid redaction event content.", - ) - })? - .reason; - - self.redact_pdu(&redact_id)?; - } - } - EventType::RoomMember => { - if let Some(state_key) = state_key { - // if the state_key fails - let target_user_id = UserId::try_from(state_key) - .expect("This state_key was previously validated"); - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &room_id, - &target_user_id, - serde_json::from_value::(content).map_err( - |_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid redaction event content.", - ) - }, - )?, - &sender, - account_data, - globals, - )?; - } - } - EventType::RoomMessage => { - if let Some(body) = content.get("body").and_then(|b| b.as_str()) { - for word in body - .split_terminator(|c: char| !c.is_alphanumeric()) - .map(str::to_lowercase) - { - let mut key = room_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - self.tokenids.insert(key, &[])?; - } - } - } - _ => {} - } - self.edus.room_read_set(&room_id, &sender, index)?; - - Ok(pdu.event_id) + self.append_pdu(pdu, globals, account_data) } /// Returns an iterator over all PDUs in a room. @@ -999,7 +1000,7 @@ impl Rooms { if is_ignored { member_content.membership = member::MembershipState::Leave; - self.append_pdu( + self.build_and_append_pdu( PduBuilder { room_id: room_id.clone(), sender: user_id.clone(), diff --git a/src/pdu.rs b/src/pdu.rs index 5485f23..eec8e49 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,7 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::collections::HashMap; +use std::{collections::HashMap, convert::TryFrom}; #[derive(Deserialize, Serialize)] pub struct PduEvent { @@ -177,6 +177,30 @@ impl PduEvent { } } +impl TryFrom<&state_res::StateEvent> for PduEvent { + type Error = Error; + fn try_from(pdu: &state_res::StateEvent) -> Result { + serde_json::from_value(json!({ + "event_id": pdu.event_id(), + "room_id": pdu.room_id(), + "sender": pdu.sender(), + "origin": pdu.origin(), + "origin_server_ts": pdu.origin_server_ts(), + "event_type": pdu.kind(), + "content": pdu.content(), + "state_key": pdu.state_key(), + "prev_events": pdu.prev_event_ids(), + "depth": pdu.depth(), + "auth_events": pdu.auth_events(), + "redacts": pdu.redacts(), + "unsigned": pdu.unsigned(), + "hashes": pdu.hashes(), + "signatures": pdu.signatures(), + })) + .map_err(|_| Error::bad_database("Failed to convert PDU to ruma::Pdu type.")) + } +} + impl PduEvent { pub fn convert_for_state_res(&self) -> Result { serde_json::from_value(json!({ @@ -190,11 +214,13 @@ impl PduEvent { "state_key": self.state_key, "prev_events": self.prev_events .iter() + // TODO How do we create one of these .map(|id| (id, EventHash { sha256: "hello".into() })) .collect::>(), "depth": self.depth, "auth_events": self.auth_events .iter() + // TODO How do we create one of these .map(|id| (id, EventHash { sha256: "hello".into() })) .collect::>(), "redacts": self.redacts, From d73c6aa8addcaac5f13cbaa009960659a4365bb2 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 19 Aug 2020 17:27:24 -0400 Subject: [PATCH 0255/1727] Add roomid_statehash tree, clean up review issues --- src/client_server/membership.rs | 10 +--- src/database.rs | 1 + src/database/rooms.rs | 95 +++++++++++++++++---------------- 3 files changed, 52 insertions(+), 54 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 824e871..555291e 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -112,11 +112,7 @@ pub async fn join_room_by_id_route( .room_state .state .iter() - .map(|pdu| pdu.deserialize().map(StateEvent::Full)) - .map(|ev| { - let ev = ev?; - Ok::<_, serde_json::Error>((ev.event_id(), ev)) - }) + .map(|pdu| pdu.deserialize().map(StateEvent::Full).map(|ev| (ev.event_id(), ev))) .collect::, _>>() .map_err(|_| Error::bad_database("Invalid PDU found in db."))?; @@ -140,9 +136,7 @@ pub async fn join_room_by_id_route( for ev_id in &sorted_events_ids { // this is a `state_res::StateEvent` that holds a `ruma::Pdu` - let pdu = event_map.get(ev_id).ok_or_else(|| { - Error::Conflict("Found event_id in sorted events that is not in resolved state") - })?; + let pdu = event_map.get(ev_id).expect("Found event_id in sorted events that is not in resolved state"); // We do not rebuild the PDU in this case only insert to DB db.rooms diff --git a/src/database.rs b/src/database.rs index 6cd65c3..a105058 100644 --- a/src/database.rs +++ b/src/database.rs @@ -114,6 +114,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, + roomid_statehash: db.open_tree("roomid_statehash")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0339b7f..6273005 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -63,6 +63,8 @@ pub struct Rooms { pub(super) pduid_statehash: sled::Tree, // PDU id -> StateHash /// Also holds the full room state minus the latest event. pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + (EventType, StateKey) + /// The room_id -> the latest StateHash + pub(super) roomid_statehash: sled::Tree, } impl StateStore for Rooms { @@ -93,53 +95,7 @@ impl StateStore for Rooms { } } -// These are the methods related to STATE resolution. impl Rooms { - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `pduid_statehash`. - /// The incoming event is the `pdu_id` passed to this method. - pub fn append_state_pdu( - &self, - room_id: &RoomId, - pdu_id: &[u8], - state_key: &str, - kind: &EventType, - ) -> Result { - let state_hash = self.new_state_hash_id(room_id)?; - let state = self.current_state_pduids(room_id)?; - - let mut key = state_hash.as_bytes().to_vec(); - key.push(0xff); - - // TODO eventually we could avoid writing to the DB so much on every event - // by keeping track of the delta and write that every so often - for ((ev_ty, state_key), pid) in state { - let mut state_id = key.to_vec(); - state_id.extend_from_slice(ev_ty.to_string().as_bytes()); - key.push(0xff); - state_id.extend_from_slice(state_key.expect("state event").as_bytes()); - key.push(0xff); - - self.stateid_pduid.insert(&state_id, &pid)?; - } - - // This event's state does not include the event itself. `current_state_pduids` - // uses `roomstateid_pduid` before the current event is inserted to the tree so the state - // will be everything up to but not including the incoming event. - self.pduid_statehash.insert(pdu_id, state_hash.as_bytes())?; - - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(kind.to_string().as_bytes()); - key.push(0xff); - key.extend_from_slice(state_key.as_bytes()); - self.roomstateid_pduid.insert(key, pdu_id)?; - - Ok(state_hash) - } - /// Builds a `StateMap` by iterating over all keys that start /// with `state_hash`, this gives the full state at event "x". pub fn get_statemap_by_hash(&self, state_hash: StateHashId) -> Result> { @@ -633,6 +589,53 @@ impl Rooms { Ok(pdu.event_id) } + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `pduid_statehash`. + /// The incoming event is the `pdu_id` passed to this method. + fn append_state_pdu( + &self, + room_id: &RoomId, + pdu_id: &[u8], + state_key: &str, + kind: &EventType, + ) -> Result { + let state_hash = self.new_state_hash_id(room_id)?; + let state = self.current_state_pduids(room_id)?; + + let mut key = state_hash.as_bytes().to_vec(); + key.push(0xff); + + // TODO eventually we could avoid writing to the DB so much on every event + // by keeping track of the delta and write that every so often + for ((ev_ty, state_key), pid) in state { + let mut state_id = key.to_vec(); + state_id.extend_from_slice(ev_ty.to_string().as_bytes()); + key.push(0xff); + state_id.extend_from_slice(state_key.expect("state event").as_bytes()); + key.push(0xff); + + self.stateid_pduid.insert(&state_id, &pid)?; + } + + // This event's state does not include the event itself. `current_state_pduids` + // uses `roomstateid_pduid` before the current event is inserted to the tree so the state + // will be everything up to but not including the incoming event. + self.pduid_statehash.insert(pdu_id, state_hash.as_bytes())?; + + self.roomid_statehash.insert(room_id.as_bytes(), state_hash.as_bytes())?; + + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(kind.to_string().as_bytes()); + key.push(0xff); + key.extend_from_slice(state_key.as_bytes()); + self.roomstateid_pduid.insert(key, pdu_id)?; + + Ok(state_hash) + } + /// Creates a new persisted data unit and adds it to a room. pub fn build_and_append_pdu( &self, From 64fb0374b60461ad1b7600e92032dfc1e30b39d3 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 19 Aug 2020 19:29:39 -0400 Subject: [PATCH 0256/1727] Use Vec instead of string for digest bytes and add roomid_statehash --- src/database/rooms.rs | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6273005..c8ff198 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -33,7 +33,7 @@ use std::{ /// /// This is created when a state group is added to the database by /// hashing the entire state. -pub type StateHashId = String; +pub type StateHashId = Vec; /// This identifier consists of roomId + count. It represents a /// unique event, it will never be overwritten or removed. @@ -100,7 +100,7 @@ impl Rooms { /// with `state_hash`, this gives the full state at event "x". pub fn get_statemap_by_hash(&self, state_hash: StateHashId) -> Result> { self.stateid_pduid - .scan_prefix(state_hash.as_bytes()) + .scan_prefix(&state_hash) .values() .map(|pduid| { self.pduid_pdu.get(&pduid?)?.map_or_else( @@ -123,12 +123,12 @@ impl Rooms { pub fn prev_state_hash(&self, current: StateHashId) -> Option { let mut found = false; for pair in self.pduid_statehash.iter().rev() { - let prev = utils::string_from_bytes(&pair.ok()?.1).ok()?; - if current == prev { + let prev = pair.ok()?.1; + if current == prev.as_ref() { found = true; } - if current != prev && found { - return Some(prev); + if current != prev.as_ref() && found { + return Some(prev.to_vec()); } } None @@ -172,17 +172,14 @@ impl Rooms { // We must check here because this method is called outside and before // `append_state_pdu` so the DB can be empty if self.pduid_statehash.scan_prefix(prefix).next().is_none() { - // TODO use ring crate to hash - return Ok(room_id.as_str().to_owned()); + // return the hash of the room_id, this represents a room with no state + return self.new_state_hash_id(room_id); } self.pduid_statehash .iter() .next_back() - .map(|pair| { - utils::string_from_bytes(&pair?.1) - .map_err(|_| Error::bad_database("Invalid state hash string in db.")) - }) + .map(|pair| Ok(pair?.1.to_vec())) .ok_or_else(|| Error::bad_database("No PDU's found for this room."))? } @@ -255,10 +252,9 @@ impl Rooms { .next() .is_none() { - return utils::string_from_bytes( - digest::digest(&digest::SHA256, room_id.as_bytes()).as_ref(), - ) - .map_err(|_| Error::bad_database("Empty state generated invalid string from hash.")); + return Ok(digest::digest(&digest::SHA256, room_id.as_bytes()) + .as_ref() + .to_vec()); } let pdu_ids_to_hash = self @@ -280,9 +276,7 @@ impl Rooms { &digest::SHA256, &pdu_ids_to_hash.into_iter().flatten().collect::>(), ); - // TODO not sure how you want to hash this - utils::string_from_bytes(hash.as_ref()) - .map_err(|_| Error::bad_database("State generated invalid string from hash.")) + Ok(hash.as_ref().to_vec()) } /// Checks if a room exists. @@ -604,7 +598,7 @@ impl Rooms { let state_hash = self.new_state_hash_id(room_id)?; let state = self.current_state_pduids(room_id)?; - let mut key = state_hash.as_bytes().to_vec(); + let mut key = state_hash.to_vec(); key.push(0xff); // TODO eventually we could avoid writing to the DB so much on every event @@ -622,9 +616,10 @@ impl Rooms { // This event's state does not include the event itself. `current_state_pduids` // uses `roomstateid_pduid` before the current event is inserted to the tree so the state // will be everything up to but not including the incoming event. - self.pduid_statehash.insert(pdu_id, state_hash.as_bytes())?; + self.pduid_statehash.insert(pdu_id, state_hash.as_slice())?; - self.roomid_statehash.insert(room_id.as_bytes(), state_hash.as_bytes())?; + self.roomid_statehash + .insert(room_id.as_bytes(), state_hash.as_slice())?; let mut key = room_id.as_bytes().to_vec(); key.push(0xff); From ea2003240400a5870f1f755d8a13bfb618101aed Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 19 Aug 2020 19:30:28 -0400 Subject: [PATCH 0257/1727] Helper for join_room_by_id route so routes aren't calling routes --- src/client_server/membership.rs | 325 ++++++++++++++++---------------- 1 file changed, 163 insertions(+), 162 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 555291e..0d9fa12 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -13,14 +13,14 @@ use ruma::{ membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, - unban_user, + unban_user, IncomingThirdPartySigned, }, }, }, federation, }, events::{room::member, EventType}, - EventId, Raw, RoomId, RoomVersionId, + EventId, Raw, RoomId, RoomVersionId, UserId, }; use state_res::StateEvent; @@ -37,136 +37,13 @@ pub async fn join_room_by_id_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - - // Ask a remote server if we don't have this room - if !db.rooms.exists(&body.room_id)? && body.room_id.server_name() != db.globals.server_name() { - let make_join_response = server_server::send_request( - &db, - body.room_id.server_name().to_string(), - federation::membership::create_join_event_template::v1::Request { - room_id: body.room_id.clone(), - user_id: sender_id.clone(), - ver: vec![RoomVersionId::Version5, RoomVersionId::Version6], - }, - ) - .await?; - - let mut join_event_stub_value = - serde_json::from_str::(make_join_response.event.json().get()) - .map_err(|_| { - Error::BadServerResponse("Invalid make_join event json received from server.") - })?; - - let join_event_stub = - join_event_stub_value - .as_object_mut() - .ok_or(Error::BadServerResponse( - "Invalid make join event object received from server.", - ))?; - - join_event_stub.insert( - "origin".to_owned(), - db.globals.server_name().to_owned().to_string().into(), - ); - join_event_stub.insert( - "origin_server_ts".to_owned(), - utils::millis_since_unix_epoch().into(), - ); - - // Generate event id - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&join_event_stub_value) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // We don't leave the event id into the pdu because that's only allowed in v1 or v2 rooms - let join_event_stub = join_event_stub_value.as_object_mut().unwrap(); - join_event_stub.remove("event_id"); - - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut join_event_stub_value, - ) - .expect("event is valid, we just created it"); - - let send_join_response = server_server::send_request( - &db, - body.room_id.server_name().to_string(), - federation::membership::create_join_event::v1::Request { - room_id: body.room_id.clone(), - event_id, - pdu_stub: serde_json::from_value(join_event_stub_value) - .expect("Raw::from_value always works"), - }, - ) - .await?; - - dbg!(&send_join_response); - // todo!("Take send_join_response and 'create' the room using that data"); - - let mut event_map = send_join_response - .room_state - .state - .iter() - .map(|pdu| pdu.deserialize().map(StateEvent::Full).map(|ev| (ev.event_id(), ev))) - .collect::, _>>() - .map_err(|_| Error::bad_database("Invalid PDU found in db."))?; - - let _auth_chain = send_join_response - .room_state - .auth_chain - .iter() - .flat_map(|pdu| pdu.deserialize().ok()) - .map(StateEvent::Full) - .collect::>(); - - // TODO make StateResolution's methods free functions ? or no self param ? - let sorted_events_ids = state_res::StateResolution::default() - .reverse_topological_power_sort( - &body.room_id, - &event_map.keys().cloned().collect::>(), - &mut event_map, - &db.rooms, - &[], // TODO auth_diff: is this none since we have a set of resolved events we only want to sort - ); - - for ev_id in &sorted_events_ids { - // this is a `state_res::StateEvent` that holds a `ruma::Pdu` - let pdu = event_map.get(ev_id).expect("Found event_id in sorted events that is not in resolved state"); - - // We do not rebuild the PDU in this case only insert to DB - db.rooms - .append_pdu(PduEvent::try_from(pdu)?, &db.globals, &db.account_data)?; - } - } - - let event = member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: None, - third_party_invite: None, - }; - - db.rooms.build_and_append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(join_room_by_id::Response::new(body.room_id.clone()).into()) + join_room_by_id_helper( + &db, + body.sender_id.as_ref(), + &body.room_id, + body.third_party_signed.as_ref(), + ) + .await } #[cfg_attr( @@ -185,7 +62,7 @@ pub async fn join_room_by_id_or_alias_route( db, Ruma { body: alias::get_alias::IncomingRequest::try_from(http::Request::new( - serde_json::json!({ "room_alias": room_alias }) + serde_json::json!({ "room_alias": room_alias, }) .to_string() .as_bytes() .to_vec(), @@ -202,36 +79,16 @@ pub async fn join_room_by_id_or_alias_route( } }; - // TODO ruma needs to implement the same constructors for the Incoming variants - let tps = if let Some(in_tps) = &body.third_party_signed { - Some(ruma::api::client::r0::membership::ThirdPartySigned { - token: &in_tps.token, - sender: &in_tps.sender, - signatures: in_tps.signatures.clone(), - mxid: &in_tps.mxid, - }) - } else { - None - }; - - let body = Ruma { - sender_id: body.sender_id.clone(), - device_id: body.device_id.clone(), - json_body: None, - body: join_room_by_id::IncomingRequest::try_from(http::Request::new( - serde_json::json!({ - "room_id": room_id, - "third_party_signed": tps, - }) - .to_string() - .as_bytes() - .to_vec(), - )) - .unwrap(), - }; - Ok(join_room_by_id_or_alias::Response { - room_id: join_room_by_id_route(db2, body).await?.0.room_id, + room_id: join_room_by_id_helper( + &db2, + body.sender_id.as_ref(), + &room_id, + body.third_party_signed.as_ref(), + ) + .await? + .0 + .room_id, } .into()) } @@ -568,3 +425,147 @@ pub fn joined_members_route( Ok(joined_members::Response { joined }.into()) } + +async fn join_room_by_id_helper( + db: &Database, + sender_id: Option<&UserId>, + room_id: &RoomId, + _third_party_signed: Option<&IncomingThirdPartySigned>, +) -> ConduitResult { + let sender_id = sender_id.expect("user is authenticated"); + + // Ask a remote server if we don't have this room + if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() { + let make_join_response = server_server::send_request( + &db, + room_id.server_name().to_string(), + federation::membership::create_join_event_template::v1::Request { + room_id: room_id.clone(), + user_id: sender_id.clone(), + ver: vec![RoomVersionId::Version5, RoomVersionId::Version6], + }, + ) + .await?; + + let mut join_event_stub_value = + serde_json::from_str::(make_join_response.event.json().get()) + .map_err(|_| { + Error::BadServerResponse("Invalid make_join event json received from server.") + })?; + + let join_event_stub = + join_event_stub_value + .as_object_mut() + .ok_or(Error::BadServerResponse( + "Invalid make join event object received from server.", + ))?; + + join_event_stub.insert( + "origin".to_owned(), + db.globals.server_name().to_owned().to_string().into(), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + utils::millis_since_unix_epoch().into(), + ); + + // Generate event id + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&join_event_stub_value) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // We don't leave the event id into the pdu because that's only allowed in v1 or v2 rooms + let join_event_stub = join_event_stub_value.as_object_mut().unwrap(); + join_event_stub.remove("event_id"); + + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut join_event_stub_value, + ) + .expect("event is valid, we just created it"); + + let send_join_response = server_server::send_request( + &db, + room_id.server_name().to_string(), + federation::membership::create_join_event::v1::Request { + room_id: room_id.clone(), + event_id, + pdu_stub: serde_json::from_value(join_event_stub_value) + .expect("Raw::from_value always works"), + }, + ) + .await?; + + dbg!(&send_join_response); + // todo!("Take send_join_response and 'create' the room using that data"); + + let mut event_map = send_join_response + .room_state + .state + .iter() + .map(|pdu| { + pdu.deserialize() + .map(StateEvent::Full) + .map(|ev| (ev.event_id(), ev)) + }) + .collect::, _>>() + .map_err(|_| Error::bad_database("Invalid PDU found in db."))?; + + let _auth_chain = send_join_response + .room_state + .auth_chain + .iter() + .flat_map(|pdu| pdu.deserialize().ok()) + .map(StateEvent::Full) + .collect::>(); + + // TODO make StateResolution's methods free functions ? or no self param ? + let sorted_events_ids = state_res::StateResolution::default() + .reverse_topological_power_sort( + &room_id, + &event_map.keys().cloned().collect::>(), + &mut event_map, + &db.rooms, + &[], // TODO auth_diff: is this none since we have a set of resolved events we only want to sort + ); + + for ev_id in &sorted_events_ids { + // this is a `state_res::StateEvent` that holds a `ruma::Pdu` + let pdu = event_map + .get(ev_id) + .expect("Found event_id in sorted events that is not in resolved state"); + + // We do not rebuild the PDU in this case only insert to DB + db.rooms + .append_pdu(PduEvent::try_from(pdu)?, &db.globals, &db.account_data)?; + } + } + + let event = member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: None, + third_party_invite: None, + }; + + db.rooms.build_and_append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(join_room_by_id::Response::new(room_id.clone()).into()) +} From 5ccdd3694bd86b7048d089d6a2101ae740a1d0f9 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 20 Aug 2020 12:12:02 -0400 Subject: [PATCH 0258/1727] Add helper function for get_alias route --- src/client_server/alias.rs | 28 +++++++++++++++++--------- src/client_server/membership.rs | 35 +++++++++------------------------ 2 files changed, 28 insertions(+), 35 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 7dc9078..12bb8df 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,11 +1,14 @@ use super::State; use crate::{server_server, ConduitResult, Database, Error, Ruma}; -use ruma::api::{ - client::{ - error::ErrorKind, - r0::alias::{create_alias, delete_alias, get_alias}, +use ruma::{ + api::{ + client::{ + error::ErrorKind, + r0::alias::{create_alias, delete_alias, get_alias}, + }, + federation, }, - federation, + RoomAliasId, }; #[cfg(feature = "conduit_bin")] @@ -50,12 +53,19 @@ pub async fn get_alias_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - if body.room_alias.server_name() != db.globals.server_name() { + get_alias_helper(db, &body.room_alias).await +} + +pub async fn get_alias_helper( + db: State<'_, Database>, + room_alias: &RoomAliasId, +) -> ConduitResult { + if room_alias.server_name() != db.globals.server_name() { let response = server_server::send_request( &db, - body.room_alias.server_name().to_string(), + room_alias.server_name().to_string(), federation::query::get_room_information::v1::Request { - room_alias: body.room_alias.to_string(), + room_alias: room_alias.to_string(), }, ) .await?; @@ -65,7 +75,7 @@ pub async fn get_alias_route( let room_id = db .rooms - .id_from_alias(&body.room_alias)? + .id_from_alias(&room_alias)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Room with alias not found.", diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0d9fa12..0075861 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -8,13 +8,10 @@ use ruma::{ api::{ client::{ error::ErrorKind, - r0::{ - alias, - membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, - unban_user, IncomingThirdPartySigned, - }, + r0::membership::{ + ban_user, forget_room, get_member_events, invite_user, join_room_by_id, + join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, + unban_user, IncomingThirdPartySigned, }, }, federation, @@ -58,24 +55,10 @@ pub async fn join_room_by_id_or_alias_route( let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, Err(room_alias) => { - client_server::get_alias_route( - db, - Ruma { - body: alias::get_alias::IncomingRequest::try_from(http::Request::new( - serde_json::json!({ "room_alias": room_alias, }) - .to_string() - .as_bytes() - .to_vec(), - )) - .unwrap(), - sender_id: body.sender_id.clone(), - device_id: body.device_id.clone(), - json_body: None, - }, - ) - .await? - .0 - .room_id + client_server::get_alias_helper(db, &room_alias) + .await? + .0 + .room_id } }; @@ -495,7 +478,7 @@ async fn join_room_by_id_helper( room_id: room_id.clone(), event_id, pdu_stub: serde_json::from_value(join_event_stub_value) - .expect("Raw::from_value always works"), + .expect("we just created this event"), }, ) .await?; From fe795d38ead8b83c413187f68e83ccea4185c8c7 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 21 Aug 2020 17:19:18 -0400 Subject: [PATCH 0259/1727] Replace route calling routes with helpers This fixes the panic from ruma "index out of bounds" --- src/client_server/alias.rs | 4 +- src/client_server/membership.rs | 20 ++--- src/client_server/state.rs | 142 ++++++++++++++++++-------------- 3 files changed, 90 insertions(+), 76 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 12bb8df..669f558 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -53,11 +53,11 @@ pub async fn get_alias_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - get_alias_helper(db, &body.room_alias).await + get_alias_helper(&db, &body.room_alias).await } pub async fn get_alias_helper( - db: State<'_, Database>, + db: &Database, room_alias: &RoomAliasId, ) -> ConduitResult { if room_alias.server_name() != db.globals.server_name() { diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0075861..996d3c4 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -49,13 +49,12 @@ pub async fn join_room_by_id_route( )] pub async fn join_room_by_id_or_alias_route( db: State<'_, Database>, - db2: State<'_, Database>, body: Ruma, ) -> ConduitResult { let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, Err(room_alias) => { - client_server::get_alias_helper(db, &room_alias) + client_server::get_alias_helper(&db, &room_alias) .await? .0 .room_id @@ -64,7 +63,7 @@ pub async fn join_room_by_id_or_alias_route( Ok(join_room_by_id_or_alias::Response { room_id: join_room_by_id_helper( - &db2, + &db, body.sender_id.as_ref(), &room_id, body.third_party_signed.as_ref(), @@ -507,14 +506,13 @@ async fn join_room_by_id_helper( .collect::>(); // TODO make StateResolution's methods free functions ? or no self param ? - let sorted_events_ids = state_res::StateResolution::default() - .reverse_topological_power_sort( - &room_id, - &event_map.keys().cloned().collect::>(), - &mut event_map, - &db.rooms, - &[], // TODO auth_diff: is this none since we have a set of resolved events we only want to sort - ); + let sorted_events_ids = state_res::StateResolution::reverse_topological_power_sort( + &room_id, + &event_map.keys().cloned().collect::>(), + &mut event_map, + &db.rooms, + &[], // TODO auth_diff: is this none since we have a set of resolved events we only want to sort + ); for ev_id in &sorted_events_ids { // this is a `state_res::StateEvent` that holds a `ruma::Pdu` diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 2920de2..867b051 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -9,8 +9,8 @@ use ruma::{ }, }, events::{AnyStateEventContent, EventContent}, + RoomId, UserId, }; -use std::convert::TryFrom; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -33,45 +33,14 @@ pub fn send_state_event_for_key_route( ) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - if let AnyStateEventContent::RoomCanonicalAlias(canonical_alias) = &body.content { - let mut aliases = canonical_alias.alt_aliases.clone(); - - if let Some(alias) = canonical_alias.alias.clone() { - aliases.push(alias); - } - - for alias in aliases { - if alias.server_name() != db.globals.server_name() - || db - .rooms - .id_from_alias(&alias)? - .filter(|room| room == &body.room_id) // Make sure it's the right room - .is_none() - { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You are only allowed to send canonical_alias \ - events when it's aliases already exists", - )); - } - } - } - - let event_id = db.rooms.build_and_append_pdu( - PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), - event_type: body.content.event_type().into(), - content, - unsigned: None, - state_key: Some(body.state_key.clone()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - - Ok(send_state_event_for_key::Response::new(event_id).into()) + send_state_event_for_key_helper( + &db, + sender_id, + &body.content, + content, + &body.room_id, + Some(body.state_key.clone()), + ) } #[cfg_attr( @@ -84,34 +53,30 @@ pub fn send_state_event_for_empty_key_route( ) -> ConduitResult { // This just calls send_state_event_for_key_route let Ruma { - body: - send_state_event_for_empty_key::IncomingRequest { - room_id, content, .. - }, + body, sender_id, - device_id, + device_id: _, json_body, } = body; + let json = serde_json::from_str::( + json_body + .as_ref() + .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? + .get(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; + Ok(send_state_event_for_empty_key::Response::new( - send_state_event_for_key_route( - db, - Ruma { - body: send_state_event_for_key::IncomingRequest::try_from(http::Request::new( - serde_json::json!({ - "room_id": room_id, - "state_key": "", - "content": content, - }) - .to_string() - .as_bytes() - .to_vec(), - )) - .unwrap(), - sender_id, - device_id, - json_body, - }, + send_state_event_for_key_helper( + &db, + sender_id + .as_ref() + .expect("no user for send state empty key rout"), + &body.content, + json, + &body.room_id, + None, )? .0 .event_id, @@ -210,3 +175,54 @@ pub fn get_state_events_for_empty_key_route( } .into()) } + +pub fn send_state_event_for_key_helper( + db: &Database, + sender: &UserId, + content: &AnyStateEventContent, + json: serde_json::Value, + room_id: &RoomId, + state_key: Option, +) -> ConduitResult { + let sender_id = sender; + + if let AnyStateEventContent::RoomCanonicalAlias(canonical_alias) = content { + let mut aliases = canonical_alias.alt_aliases.clone(); + + if let Some(alias) = canonical_alias.alias.clone() { + aliases.push(alias); + } + + for alias in aliases { + if alias.server_name() != db.globals.server_name() + || db + .rooms + .id_from_alias(&alias)? + .filter(|room| room == room_id) // Make sure it's the right room + .is_none() + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You are only allowed to send canonical_alias \ + events when it's aliases already exists", + )); + } + } + } + + let event_id = db.rooms.build_and_append_pdu( + PduBuilder { + room_id: room_id.clone(), + sender: sender_id.clone(), + event_type: content.event_type().into(), + content: json, + unsigned: None, + state_key, + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + Ok(send_state_event_for_key::Response::new(event_id).into()) +} From 672bf4f47376eea520c19fa1b9108478ec56974f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 21 Aug 2020 20:18:56 -0400 Subject: [PATCH 0260/1727] Cargo lock update and a few doc additions --- Cargo.lock | 22 +++++++++++----------- src/client_server/config.rs | 6 +----- src/client_server/room.rs | 1 + src/database/rooms.rs | 2 +- 4 files changed, 14 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c5d836..0e5da6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -144,9 +144,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" @@ -247,9 +247,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "cc" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" +checksum = "66120af515773fb005778dc07c261bd201ec8ce50bd6e7144c927753fe013381" [[package]] name = "cfg-if" @@ -960,9 +960,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10" +checksum = "755456fae044e6fa1ebbbd1b3e902ae19e73097ed4ed87bb79934a867c007bc3" [[package]] name = "lock_api" @@ -1358,9 +1358,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" [[package]] name = "proc-macro-crate" @@ -2088,7 +2088,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res#4e9b428c0db50ac3a3421ced12a6fd202a1c36a3" +source = "git+https://github.com/ruma/state-res#d93a965ad17781fa9554bb3cea71673c054b9f3f" dependencies = [ "itertools", "js_int", @@ -2179,9 +2179,9 @@ checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" [[package]] name = "syn" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4" +checksum = "891d8d6567fe7c7f8835a3a98af4208f3846fba258c1bc3c31d6e506239f11f9" dependencies = [ "proc-macro2", "quote", diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 8cb6a0d..45aec33 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -56,11 +56,7 @@ pub fn get_global_account_data_route( let data = db .account_data - .get::>( - None, - sender_id, - EventType::try_from(&body.event_type).expect("EventType::try_from can never fail"), - )? + .get::>(None, sender_id, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; Ok(get_global_account_data::Response { account_data: data }.into()) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 3ee21b6..c0603d3 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -195,6 +195,7 @@ pub fn create_room_route( content: match preset { create_room::RoomPreset::PublicChat => { serde_json::to_value(guest_access::GuestAccessEventContent::new( + // In a public room a joining is the only way to access guest_access::GuestAccess::Forbidden, )) .expect("event is valid, we just created it") diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c8ff198..0129742 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -722,7 +722,7 @@ impl Rooms { } EventType::RoomMember => event_auth::is_membership_change_allowed( // TODO this is a bit of a hack but not sure how to have a type - // declared in `state_res` crate be + // declared in `state_res` crate easily convert to/from conduit::PduEvent Requester { prev_event_ids: prev_events.to_owned(), room_id: &room_id, From 972babbc795c9bfc6f3aee7b6351c0deb7e51b5d Mon Sep 17 00:00:00 2001 From: Timo Date: Wed, 19 Aug 2020 18:26:39 +0200 Subject: [PATCH 0261/1727] fix: set limited to true when skipping messages in /sync --- src/client_server/config.rs | 3 +-- src/client_server/search.rs | 27 +++++++++++++-------------- src/database/rooms.rs | 19 +++++++++++++++++-- src/pdu.rs | 24 +++++++++++++++++++++++- 4 files changed, 54 insertions(+), 19 deletions(-) diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 45aec33..baa9381 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -5,10 +5,9 @@ use ruma::{ error::ErrorKind, r0::config::{get_global_account_data, set_global_account_data}, }, - events::{custom::CustomEventContent, BasicEvent, EventType}, + events::{custom::CustomEventContent, BasicEvent}, Raw, }; -use std::convert::TryFrom; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; diff --git a/src/client_server/search.rs b/src/client_server/search.rs index dec1ec9..082711d 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -14,7 +14,7 @@ use std::collections::BTreeMap; )] pub fn search_events_route( db: State<'_, Database>, - body: Ruma, + body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -56,7 +56,8 @@ pub fn search_events_route( result: db .rooms .get_pdu_from_id(&result)? - .map(|pdu| pdu.to_room_event()), + // TODO this is an awkward type conversion see method + .map(|pdu| pdu.to_any_event()), }) }) .filter_map(|r| r.ok()) @@ -70,17 +71,15 @@ pub fn search_events_route( Some((skip + limit).to_string()) }; - Ok(search_events::Response { - search_categories: ResultCategories { - room_events: Some(ResultRoomEvents { - count: uint!(0), // TODO - groups: BTreeMap::new(), // TODO - next_batch, - results, - state: BTreeMap::new(), // TODO - highlights: search.1, - }), - }, - } + Ok(search_events::Response::new(ResultCategories { + room_events: Some(ResultRoomEvents { + count: uint!(0), // TODO + groups: BTreeMap::new(), // TODO + next_batch, + results, + state: BTreeMap::new(), // TODO + highlights: search.1, + }), + }) .into()) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0129742..d087d65 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -532,7 +532,7 @@ impl Rooms { self.append_state_pdu(&pdu.room_id, &pdu_id, state_key, &pdu.kind)?; } - match pdu.kind { + match &pdu.kind { EventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { // TODO: Reason @@ -553,7 +553,7 @@ impl Rooms { } } EventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { + if let Some(state_key) = pdu.state_key.as_ref() { // if the state_key fails let target_user_id = UserId::try_from(state_key.as_str()) .expect("This state_key was previously validated"); @@ -576,6 +576,21 @@ impl Rooms { )?; } } + EventType::RoomMessage => { + if let Some(body) = pdu.content.get("body").and_then(|b| b.as_str()) { + for word in body + .split_terminator(|c: char| !c.is_alphanumeric()) + .map(str::to_lowercase) + { + let mut key = pdu.room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(word.as_bytes()); + key.push(0xff); + key.extend_from_slice(&pdu_id); + self.tokenids.insert(key, &[])?; + } + } + } _ => {} } self.edus.room_read_set(&pdu.room_id, &pdu.sender, index)?; diff --git a/src/pdu.rs b/src/pdu.rs index eec8e49..b565a24 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -2,7 +2,7 @@ use crate::{Error, Result}; use js_int::UInt; use ruma::{ events::{ - pdu::EventHash, room::member::MemberEventContent, AnyRoomEvent, AnyStateEvent, + pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, EventId, Raw, RoomId, ServerName, UserId, @@ -99,6 +99,28 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + /// This only works for events that are also AnyRoomEvents. + pub fn to_any_event(&self) -> Raw { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, + "room_id": self.room_id, + }); + + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &self.redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } + pub fn to_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, From 3c26166fb59d78cf887645144ffbb108328247df Mon Sep 17 00:00:00 2001 From: Timo Date: Fri, 21 Aug 2020 21:22:59 +0200 Subject: [PATCH 0262/1727] improvement: device list works better The only situation that isn't working yet is sending `left` events for users when the sender leaves the room --- src/client_server/sync.rs | 20 ++++++-------------- src/database/rooms.rs | 9 ++++----- src/utils.rs | 6 ------ 3 files changed, 10 insertions(+), 25 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index ae4c224..accb199 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -149,15 +149,7 @@ pub async fn sync_events_route( device_list_updates.extend( db.rooms .room_members(&room_id) - .filter_map(|user_id| { - Some( - UserId::try_from(user_id.ok()?.clone()) - .map_err(|_| { - Error::bad_database("Invalid member event state key in db.") - }) - .ok()?, - ) - }) + .filter_map(|user_id| Some(user_id.ok()?)) .filter(|user_id| { // Don't send key updates from the sender to the sender sender_id != user_id @@ -491,9 +483,7 @@ pub async fn sync_events_route( } for user_id in left_encrypted_users { - // If the user doesn't share an encrypted room with the target anymore, we need to tell - // them - if db + let user_target_encrypted = db .rooms .get_shared_rooms(vec![sender_id.clone(), user_id.clone()]) .filter_map(|r| r.ok()) @@ -505,8 +495,10 @@ pub async fn sync_events_route( .is_some(), ) }) - .all(|encrypted| !encrypted) - { + .all(|encrypted| !encrypted); + // If the user doesn't share an encrypted room with the target anymore, we need to tell + // them + if user_target_encrypted { device_list_left.insert(user_id); } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d087d65..575a2bf 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -75,23 +75,23 @@ impl StateStore for Rooms { .map_err(|e| e.to_string())? .ok_or_else(|| "PDU via room_id and event_id not found in the db.".to_owned())?; - utils::deserialize( + serde_json::from_slice( &self .pduid_pdu .get(pid) .map_err(|e| e.to_string())? .ok_or_else(|| "PDU via pduid not found in db.".to_owned())?, ) + .map_err(|e| e.to_string()) .and_then(|pdu: StateEvent| { // conduit's PDU's always contain a room_id but some // of ruma's do not so this must be an Option if pdu.room_id() == Some(room_id) { Ok(pdu) } else { - Err(Error::bad_database("Found PDU for incorrect room in db.")) + Err("Found PDU for incorrect room in db.".into()) } }) - .map_err(|e| e.to_string()) } } @@ -1207,8 +1207,7 @@ impl Rooms { let roomid_index = key .iter() .enumerate() - .filter(|(_, &b)| b == 0xff) - .nth(0) + .find(|(_, &b)| b == 0xff) .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? .0 + 1; // +1 because the room id starts AFTER the separator diff --git a/src/utils.rs b/src/utils.rs index b549153..8cf1b2c 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,4 +1,3 @@ -use crate::Error; use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; @@ -91,8 +90,3 @@ pub fn common_elements( .all(|b| b) })) } - -pub fn deserialize<'de, T: serde::Deserialize<'de>>(val: &'de sled::IVec) -> Result { - serde_json::from_slice::(val.as_ref()) - .map_err(|_| Error::bad_database("Found invalid bytes as PDU in db.")) -} From 1848f08292875030f100bc4ffa987e31c15d6912 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 21 Aug 2020 21:44:55 -0400 Subject: [PATCH 0263/1727] Use full sorting algorithm on incoming PDU's in membership --- src/client_server/membership.rs | 50 +++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 996d3c4..3fa3b6a 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -497,7 +497,7 @@ async fn join_room_by_id_helper( .collect::, _>>() .map_err(|_| Error::bad_database("Invalid PDU found in db."))?; - let _auth_chain = send_join_response + let auth_chain = send_join_response .room_state .auth_chain .iter() @@ -505,16 +505,54 @@ async fn join_room_by_id_helper( .map(StateEvent::Full) .collect::>(); - // TODO make StateResolution's methods free functions ? or no self param ? - let sorted_events_ids = state_res::StateResolution::reverse_topological_power_sort( + let power_events = event_map + .values() + .filter(|pdu| pdu.is_power_event()) + .map(|pdu| pdu.event_id()) + .collect::>(); + + // TODO these events are not guaranteed to be sorted but they are resolved, do + // we need the auth_chain + let sorted_power_events = state_res::StateResolution::reverse_topological_power_sort( &room_id, - &event_map.keys().cloned().collect::>(), + &power_events, &mut event_map, &db.rooms, - &[], // TODO auth_diff: is this none since we have a set of resolved events we only want to sort + &auth_chain // if we only use it here just build this list in the first place + .iter() + .map(|pdu| pdu.event_id()) + .collect::>(), ); - for ev_id in &sorted_events_ids { + // TODO we may be able to skip this since they are resolved according to spec + let resolved_power = state_res::StateResolution::iterative_auth_check( + room_id, + &RoomVersionId::Version6, + &sorted_power_events, + &BTreeMap::new(), // unconflicted events + &mut event_map, + &db.rooms, + ) + .expect("iterative auth check failed on resolved events"); + // TODO do we need to dedup them + + let events_to_sort = event_map + .keys() + .filter(|id| !sorted_power_events.contains(id)) + .cloned() + .collect::>(); + + let power_level = resolved_power.get(&(EventType::RoomPowerLevels, Some("".into()))); + + let sorted_event_ids = state_res::StateResolution::mainline_sort( + room_id, + &events_to_sort, + power_level, + &mut event_map, + &db.rooms, + ); + + for ev_id in &sorted_event_ids { // this is a `state_res::StateEvent` that holds a `ruma::Pdu` let pdu = event_map .get(ev_id) From 27ffe778233370347d50b914862b6e866dae557b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 23 Aug 2020 08:32:43 -0400 Subject: [PATCH 0264/1727] Use helper instead of route for get_public_rooms_filtered --- src/client_server/directory.rs | 224 ++++++++++++++++++--------------- src/client_server/sync.rs | 10 +- src/server_server.rs | 25 ++-- 3 files changed, 131 insertions(+), 128 deletions(-) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 0aace15..5e03274 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -14,7 +14,7 @@ use ruma::{ }, federation, }, - directory::PublicRoomsChunk, + directory::{Filter, PublicRoomsChunk, RoomNetwork}, events::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, EventType, @@ -33,17 +33,123 @@ pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - if let Some(other_server) = body - .server + let Ruma { + body: + get_public_rooms_filtered::IncomingRequest { + limit, + server, + since, + filter, + room_network, + }, + .. + } = body; + get_public_rooms_filtered_helper( + &db, + server.as_deref(), + limit, + since.as_deref(), + filter, // This is not used yet + Some(room_network), // This is not used + ) + .await +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/publicRooms", data = "") +)] +pub async fn get_public_rooms_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let Ruma { + body: + get_public_rooms::IncomingRequest { + limit, + server, + since, + }, + .. + } = body; + + let get_public_rooms_filtered::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + } = get_public_rooms_filtered_helper( + &db, + server.as_deref(), + limit, + since.as_deref(), + None, // This is not used + None, // This is not used + ) + .await? + .0; + + Ok(get_public_rooms::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/directory/list/room/<_>", data = "") +)] +pub async fn set_room_visibility_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + match body.visibility { + room::Visibility::Public => db.rooms.set_public(&body.room_id, true)?, + room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, + } + + Ok(set_room_visibility::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/directory/list/room/<_>", data = "") +)] +pub async fn get_room_visibility_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + Ok(get_room_visibility::Response { + visibility: if db.rooms.is_public_room(&body.room_id)? { + room::Visibility::Public + } else { + room::Visibility::Private + }, + } + .into()) +} + +pub async fn get_public_rooms_filtered_helper( + db: &Database, + server: Option<&str>, + limit: Option, + since: Option<&str>, + _filter: Option, + _network: Option, +) -> ConduitResult { + if let Some(other_server) = server .clone() - .filter(|server| server != db.globals.server_name().as_str()) + .filter(|server| *server != db.globals.server_name().as_str()) { let response = server_server::send_request( &db, - other_server, + other_server.to_owned(), federation::directory::get_public_rooms::v1::Request { - limit: body.limit, - since: body.since.as_deref(), + limit, + since: since.as_deref(), room_network: ruma::directory::RoomNetwork::Matrix, }, ) @@ -73,10 +179,10 @@ pub async fn get_public_rooms_filtered_route( .into()); } - let limit = body.limit.map_or(10, u64::from); - let mut since = 0_u64; + let limit = limit.map_or(10, u64::from); + let mut num_since = 0_u64; - if let Some(s) = &body.since { + if let Some(s) = &since { let mut characters = s.chars(); let backwards = match characters.next() { Some('n') => false, @@ -89,13 +195,13 @@ pub async fn get_public_rooms_filtered_route( } }; - since = characters + num_since = characters .collect::() .parse() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?; if backwards { - since = since.saturating_sub(limit); + num_since = num_since.saturating_sub(limit); } } @@ -217,20 +323,20 @@ pub async fn get_public_rooms_filtered_route( let chunk = all_rooms .into_iter() - .skip(since as usize) + .skip(num_since as usize) .take(limit as usize) .collect::>(); - let prev_batch = if since == 0 { + let prev_batch = if num_since == 0 { None } else { - Some(format!("p{}", since)) + Some(format!("p{}", num_since)) }; let next_batch = if chunk.len() < limit as usize { None } else { - Some(format!("n{}", since + limit)) + Some(format!("n{}", num_since + limit)) }; Ok(get_public_rooms_filtered::Response { @@ -241,89 +347,3 @@ pub async fn get_public_rooms_filtered_route( } .into()) } - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/publicRooms", data = "") -)] -pub async fn get_public_rooms_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let Ruma { - body: - get_public_rooms::IncomingRequest { - limit, - server, - since, - }, - sender_id, - device_id, - json_body, - } = body; - - let get_public_rooms_filtered::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - } = get_public_rooms_filtered_route( - db, - Ruma { - body: get_public_rooms_filtered::IncomingRequest { - filter: None, - limit, - room_network: ruma::directory::RoomNetwork::Matrix, - server, - since, - }, - sender_id, - device_id, - json_body, - }, - ) - .await? - .0; - - Ok(get_public_rooms::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - } - .into()) -} - -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] -pub async fn set_room_visibility_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - match body.visibility { - room::Visibility::Public => db.rooms.set_public(&body.room_id, true)?, - room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, - } - - Ok(set_room_visibility::Response.into()) -} - -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] -pub async fn get_room_visibility_route( - db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - Ok(get_room_visibility::Response { - visibility: if db.rooms.is_public_room(&body.room_id)? { - room::Visibility::Public - } else { - room::Visibility::Private - }, - } - .into()) -} diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index accb199..ccb25d1 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -415,15 +415,7 @@ pub async fn sync_events_route( device_list_left.extend( db.rooms .room_members(&room_id) - .filter_map(|user_id| { - Some( - UserId::try_from(user_id.ok()?.clone()) - .map_err(|_| { - Error::bad_database("Invalid member event state key in db.") - }) - .ok()?, - ) - }) + .filter_map(|user_id| Some(user_id.ok()?)) .filter(|user_id| { // Don't send key updates from the sender to the sender sender_id != user_id diff --git a/src/server_server.rs b/src/server_server.rs index e47b50a..ac4407b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -216,9 +216,7 @@ pub async fn get_public_rooms_route( limit, since, }, - sender_id, - device_id, - json_body, + .. } = body; let client::r0::directory::get_public_rooms_filtered::Response { @@ -226,20 +224,13 @@ pub async fn get_public_rooms_route( prev_batch, next_batch, total_room_count_estimate, - } = client_server::get_public_rooms_filtered_route( - db, - Ruma { - body: client::r0::directory::get_public_rooms_filtered::IncomingRequest { - filter: None, - limit, - room_network: ruma::directory::RoomNetwork::Matrix, - server: None, - since, - }, - sender_id, - device_id, - json_body, - }, + } = client_server::get_public_rooms_filtered_helper( + &db, + None, + limit, + since.as_deref(), + None, + Some(ruma::directory::RoomNetwork::Matrix), ) .await? .0; From 33215d6099e7aa4728c240c26d243b3862004197 Mon Sep 17 00:00:00 2001 From: Timo Date: Sun, 23 Aug 2020 17:29:39 +0200 Subject: [PATCH 0265/1727] fix: send notification count updates when private read receipts change --- src/client_server/read_marker.rs | 5 +- src/client_server/sync.rs | 20 ++--- src/client_server/typing.rs | 4 +- src/database.rs | 13 +-- src/database/rooms.rs | 2 +- src/database/rooms/edus.rs | 148 ++++++++++++++++++------------- 6 files changed, 107 insertions(+), 85 deletions(-) diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index ff72765..1b8bd8e 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -34,13 +34,14 @@ pub fn set_read_marker_route( )?; if let Some(event) = &body.read_receipt { - db.rooms.edus.room_read_set( + db.rooms.edus.private_read_set( &body.room_id, &sender_id, db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", ))?, + &db.globals, )?; let mut user_receipts = BTreeMap::new(); @@ -58,7 +59,7 @@ pub fn set_read_marker_route( }, ); - db.rooms.edus.roomlatest_update( + db.rooms.edus.readreceipt_update( &sender_id, &body.room_id, AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2307f02..8f37354 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -81,7 +81,12 @@ pub async fn sync_events_route( .rev() .collect::>(); - let send_notification_counts = !timeline_pdus.is_empty(); + let send_notification_counts = !timeline_pdus.is_empty() + || db + .rooms + .edus + .last_privateread_update(&sender_id, &room_id)? + > since; // They /sync response doesn't always return all messages, so we say the output is // limited unless there are events in non_timeline_pdus @@ -242,7 +247,7 @@ pub async fn sync_events_route( }; let notification_count = if send_notification_counts { - if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &sender_id)? { + if let Some(last_read) = db.rooms.edus.private_read_get(&room_id, &sender_id)? { Some( (db.rooms .pdus_since(&sender_id, &room_id, last_read)? @@ -280,20 +285,15 @@ pub async fn sync_events_route( let mut edus = db .rooms .edus - .roomlatests_since(&room_id, since)? + .readreceipts_since(&room_id, since)? .filter_map(|r| r.ok()) // Filter out buggy events .collect::>(); - if db - .rooms - .edus - .last_roomactive_update(&room_id, &db.globals)? - > since - { + if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since { edus.push( serde_json::from_str( &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( - db.rooms.edus.roomactives_all(&room_id)?, + db.rooms.edus.typings_all(&room_id)?, )) .expect("event is valid, we just created it"), ) diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 7eba13e..89e1e4a 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -16,7 +16,7 @@ pub fn create_typing_event_route( let sender_id = body.sender_id.as_ref().expect("user is authenticated"); if body.typing { - db.rooms.edus.roomactive_add( + db.rooms.edus.typing_add( &sender_id, &body.room_id, body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) @@ -26,7 +26,7 @@ pub fn create_typing_event_route( } else { db.rooms .edus - .roomactive_remove(&sender_id, &body.room_id, &db.globals)?; + .typing_remove(&sender_id, &body.room_id, &db.globals)?; } Ok(create_typing_event::Response.into()) diff --git a/src/database.rs b/src/database.rs index 7bbb6dd..41781b9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -88,10 +88,11 @@ impl Database { }, rooms: rooms::Rooms { edus: rooms::RoomEdus { - roomuserid_lastread: db.open_tree("roomuserid_lastread")?, // "Private" read receipt - roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest")?, // Read receipts - roomactiveid_userid: db.open_tree("roomactiveid_userid")?, // Typing notifs - roomid_lastroomactiveupdate: db.open_tree("roomid_lastroomactiveupdate")?, + readreceiptid_readreceipt: db.open_tree("readreceiptid_readreceipt")?, + roomuserid_privateread: db.open_tree("roomuserid_privateread")?, // "Private" read receipt + roomuserid_lastprivatereadupdate: db.open_tree("roomid_lastprivatereadupdate")?, + typingid_userid: db.open_tree("typingid_userid")?, + roomid_lasttypingupdate: db.open_tree("roomid_lasttypingupdate")?, presenceid_presence: db.open_tree("presenceid_presence")?, userid_lastpresenceupdate: db.open_tree("userid_lastpresenceupdate")?, }, @@ -163,14 +164,14 @@ impl Database { futures.push( self.rooms .edus - .roomid_lastroomactiveupdate + .roomid_lasttypingupdate .watch_prefix(&roomid_bytes), ); futures.push( self.rooms .edus - .roomlatestid_roomlatest + .readreceiptid_readreceipt .watch_prefix(&roomid_prefix), ); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3c1febd..bb14c8a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -621,7 +621,7 @@ impl Rooms { } _ => {} } - self.edus.room_read_set(&room_id, &sender, index)?; + self.edus.private_read_set(&room_id, &sender, index, &globals)?; Ok(pdu.event_id) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index fff30c2..fbd3edb 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -14,17 +14,18 @@ use std::{ }; pub struct RoomEdus { - pub(in super::super) roomuserid_lastread: sled::Tree, // RoomUserId = Room + User - pub(in super::super) roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Count + UserId - pub(in super::super) roomactiveid_userid: sled::Tree, // Typing, RoomActiveId = RoomId + TimeoutTime + Count - pub(in super::super) roomid_lastroomactiveupdate: sled::Tree, // LastRoomActiveUpdate = Count + pub(in super::super) readreceiptid_readreceipt: sled::Tree, // ReadReceiptId = RoomId + Count + UserId + pub(in super::super) roomuserid_privateread: sled::Tree, // RoomUserId = Room + User, PrivateRead = Count + pub(in super::super) roomuserid_lastprivatereadupdate: sled::Tree, // LastPrivateReadUpdate = Count + pub(in super::super) typingid_userid: sled::Tree, // TypingId = RoomId + TimeoutTime + Count + pub(in super::super) roomid_lasttypingupdate: sled::Tree, // LastRoomTypingUpdate = Count pub(in super::super) presenceid_presence: sled::Tree, // PresenceId = RoomId + Count + UserId pub(in super::super) userid_lastpresenceupdate: sled::Tree, // LastPresenceUpdate = Count } impl RoomEdus { /// Adds an event which will be saved until a new event replaces it (e.g. read receipt). - pub fn roomlatest_update( + pub fn readreceipt_update( &self, user_id: &UserId, room_id: &RoomId, @@ -36,7 +37,7 @@ impl RoomEdus { // Remove old entry if let Some(old) = self - .roomlatestid_roomlatest + .readreceiptid_readreceipt .scan_prefix(&prefix) .keys() .rev() @@ -50,7 +51,7 @@ impl RoomEdus { }) { // This is the old room_latest - self.roomlatestid_roomlatest.remove(old)?; + self.readreceiptid_readreceipt.remove(old)?; } let mut room_latest_id = prefix; @@ -58,7 +59,7 @@ impl RoomEdus { room_latest_id.push(0xff); room_latest_id.extend_from_slice(&user_id.to_string().as_bytes()); - self.roomlatestid_roomlatest.insert( + self.readreceiptid_readreceipt.insert( room_latest_id, &*serde_json::to_string(&event).expect("EduEvent::to_string always works"), )?; @@ -67,7 +68,7 @@ impl RoomEdus { } /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - pub fn roomlatests_since( + pub fn readreceipts_since( &self, room_id: &RoomId, since: u64, @@ -79,7 +80,7 @@ impl RoomEdus { first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since Ok(self - .roomlatestid_roomlatest + .readreceiptid_readreceipt .range(&*first_possible_edu..) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) @@ -90,9 +91,54 @@ impl RoomEdus { })) } - /// Sets a user as typing until the timeout timestamp is reached or roomactive_remove is + /// Sets a private read marker at `count`. + pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64, globals: &super::super::globals::Globals) -> Result<()> { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&user_id.to_string().as_bytes()); + + self.roomuserid_privateread + .insert(&key, &count.to_be_bytes())?; + + self.roomuserid_lastprivatereadupdate + .insert(&key, &globals.next_count()?.to_be_bytes())?; + + Ok(()) + } + + /// Returns the private read marker. + pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&user_id.to_string().as_bytes()); + + self.roomuserid_privateread.get(key)?.map_or(Ok(None), |v| { + Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { + Error::bad_database("Invalid private read marker bytes") + })?)) + }) + } + + /// Returns the count of the last typing update in this room. + pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut key = room_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&user_id.to_string().as_bytes()); + + Ok(self + .roomuserid_lastprivatereadupdate + .get(&key)? + .map_or(Ok::<_, Error>(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") + })?)) + })? + .unwrap_or(0)) + } + + /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. - pub fn roomactive_add( + pub fn typing_add( &self, user_id: &UserId, room_id: &RoomId, @@ -104,22 +150,22 @@ impl RoomEdus { let count = globals.next_count()?.to_be_bytes(); - let mut room_active_id = prefix; - room_active_id.extend_from_slice(&timeout.to_be_bytes()); - room_active_id.push(0xff); - room_active_id.extend_from_slice(&count); + let mut room_typing_id = prefix; + room_typing_id.extend_from_slice(&timeout.to_be_bytes()); + room_typing_id.push(0xff); + room_typing_id.extend_from_slice(&count); - self.roomactiveid_userid - .insert(&room_active_id, &*user_id.to_string().as_bytes())?; + self.typingid_userid + .insert(&room_typing_id, &*user_id.to_string().as_bytes())?; - self.roomid_lastroomactiveupdate + self.roomid_lasttypingupdate .insert(&room_id.to_string().as_bytes(), &count)?; Ok(()) } /// Removes a user from typing before the timeout is reached. - pub fn roomactive_remove( + pub fn typing_remove( &self, user_id: &UserId, room_id: &RoomId, @@ -132,19 +178,19 @@ impl RoomEdus { let mut found_outdated = false; - // Maybe there are multiple ones from calling roomactive_add multiple times + // Maybe there are multiple ones from calling roomtyping_add multiple times for outdated_edu in self - .roomactiveid_userid + .typingid_userid .scan_prefix(&prefix) .filter_map(|r| r.ok()) .filter(|(_, v)| v == user_id.as_bytes()) { - self.roomactiveid_userid.remove(outdated_edu.0)?; + self.typingid_userid.remove(outdated_edu.0)?; found_outdated = true; } if found_outdated { - self.roomid_lastroomactiveupdate.insert( + self.roomid_lasttypingupdate.insert( &room_id.to_string().as_bytes(), &globals.next_count()?.to_be_bytes(), )?; @@ -154,7 +200,7 @@ impl RoomEdus { } /// Makes sure that typing events with old timestamps get removed. - fn roomactives_maintain( + fn typings_maintain( &self, room_id: &RoomId, globals: &super::super::globals::Globals, @@ -168,7 +214,7 @@ impl RoomEdus { // Find all outdated edus before inserting a new one for outdated_edu in self - .roomactiveid_userid + .typingid_userid .scan_prefix(&prefix) .keys() .map(|key| { @@ -176,21 +222,21 @@ impl RoomEdus { Ok::<_, Error>(( key.clone(), utils::u64_from_bytes(key.split(|&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomActive has invalid timestamp or delimiters.") + Error::bad_database("RoomTyping has invalid timestamp or delimiters.") })?) - .map_err(|_| Error::bad_database("RoomActive has invalid timestamp bytes."))?, + .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, )) }) .filter_map(|r| r.ok()) .take_while(|&(_, timestamp)| timestamp < current_timestamp) { // This is an outdated edu (time > timestamp) - self.roomactiveid_userid.remove(outdated_edu.0)?; + self.typingid_userid.remove(outdated_edu.0)?; found_outdated = true; } if found_outdated { - self.roomid_lastroomactiveupdate.insert( + self.roomid_lasttypingupdate.insert( &room_id.to_string().as_bytes(), &globals.next_count()?.to_be_bytes(), )?; @@ -199,16 +245,16 @@ impl RoomEdus { Ok(()) } - /// Returns an iterator over all active events (e.g. typing notifications). - pub fn last_roomactive_update( + /// Returns the count of the last typing update in this room. + pub fn last_typing_update( &self, room_id: &RoomId, globals: &super::super::globals::Globals, ) -> Result { - self.roomactives_maintain(room_id, globals)?; + self.typings_maintain(room_id, globals)?; Ok(self - .roomid_lastroomactiveupdate + .roomid_lasttypingupdate .get(&room_id.to_string().as_bytes())? .map_or(Ok::<_, Error>(None), |bytes| { Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { @@ -218,7 +264,7 @@ impl RoomEdus { .unwrap_or(0)) } - pub fn roomactives_all( + pub fn typings_all( &self, room_id: &RoomId, ) -> Result> { @@ -228,17 +274,15 @@ impl RoomEdus { let mut user_ids = Vec::new(); for user_id in self - .roomactiveid_userid + .typingid_userid .scan_prefix(prefix) .values() .map(|user_id| { Ok::<_, Error>( UserId::try_from(utils::string_from_bytes(&user_id?).map_err(|_| { - Error::bad_database("User ID in roomactiveid_userid is invalid unicode.") + Error::bad_database("User ID in typingid_userid is invalid unicode.") })?) - .map_err(|_| { - Error::bad_database("User ID in roomactiveid_userid is invalid.") - })?, + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?, ) }) { @@ -250,30 +294,6 @@ impl RoomEdus { }) } - /// Sets a private read marker at `count`. - pub fn room_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { - let mut key = room_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); - - self.roomuserid_lastread.insert(key, &count.to_be_bytes())?; - - Ok(()) - } - - /// Returns the private read marker. - pub fn room_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); - - self.roomuserid_lastread.get(key)?.map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to From 0c1cc8d82bb1474f6585a516a01fbabe8b040a66 Mon Sep 17 00:00:00 2001 From: Timo Date: Mon, 24 Aug 2020 10:45:57 +0200 Subject: [PATCH 0266/1727] Fix CI --- sytest/sytest-whitelist | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index 3c1095b..b0b2097 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -17,6 +17,7 @@ Can invite users to invite-only rooms Can list tags for a room Can logout all devices Can logout current device +Can re-join room if re-invited Can read configuration endpoint Can recv a device message using /sync Can recv device messages until they are acknowledged @@ -113,7 +114,6 @@ Typing events appear in incremental sync Typing events appear in initial sync Uninvited users cannot join the room User appears in user directory -User directory correctly update on display name change User in dir while user still shares private rooms User in shared private room does appear in user directory User is offline if they set_presence=offline in their sync From 38ac3e42be87509ec04c35adfb65b3ba89daecbf Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Mon, 24 Aug 2020 23:30:39 +0200 Subject: [PATCH 0267/1727] Docker add healthcheck and mention Docker Hub image --- Dockerfile | 8 ++++++-- README.md | 9 ++++++++- docker-compose.yml | 23 ++++++++++++----------- docker/README.md | 8 ++++---- docker/docker-compose.traefik.yml | 23 ++++++++++++----------- 5 files changed, 42 insertions(+), 29 deletions(-) diff --git a/Dockerfile b/Dockerfile index fa4b16d..ff84ac6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -53,10 +53,10 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.url="https://conduit.rs/" \ org.opencontainers.image.revision=${GIT_REF} \ org.opencontainers.image.source="https://git.koesters.xyz/timo/conduit.git" \ - org.opencontainers.image.documentation.="" \ org.opencontainers.image.licenses="AGPL-3.0-only" \ + org.opencontainers.image.documentation="" \ org.opencontainers.image.ref.name="" \ - org.label-schema.docker.build="docker build . -t conduit_homeserver:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \ + org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \ maintainer="Weasy666" # Standard port on which Rocket launches @@ -81,11 +81,15 @@ RUN chown -cR www-data:www-data /srv/conduit # Install packages needed to run Conduit RUN apk add --no-cache \ ca-certificates \ + curl \ libgcc # Create a volume for the database, to persist its contents VOLUME ["/srv/conduit/.local/share/conduit"] +# Test if Conduit is still alive, uses the same endpoint as Element +HEALTHCHECK --start-period=2s CMD curl --fail -s http://localhost:8000/_matrix/client/versions || curl -k --fail -s https://localhost:8000/_matrix/client/versions || exit 1 + # Set user to www-data USER www-data # Set container home directory diff --git a/README.md b/README.md index ad13089..44ab0d6 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,14 @@ Clone the repo, build it with `cargo build --release` and call the binary ##### Using Docker -Build the docker image and run it with docker or docker-compose. [Read more](docker/README.md) +Pull and run the docker image with + +``` bash +docker pull matrixconduit/matrix-conduit:latest +docker run -d matrixconduit/matrix-conduit:latest -p 8448:8000 -v db:/srv/conduit/.local/share/conduit +``` + +Or build and run it with docker or docker-compose. [Read more](docker/README.md) #### What is it build on? diff --git a/docker-compose.yml b/docker-compose.yml index afd3699..f06eaca 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,18 +3,19 @@ version: '3' services: homeserver: - ### If you already built the Conduit image with 'docker build', then you can uncomment the - ### 'image' line and comment out the 'build' option. - # image: conduit_homeserver:latest - ### If you want meaningful labels in you built Conduit image, you should run docker-compose like this: + ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, + ### then you are ready to go. + image: matrixconduit/matrix-conduit:latest + ### If you want to build a fresh image from the sources, then comment the image line and uncomment the + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d - build: - context: . - args: - CREATED: - VERSION: - LOCAL: "false" - GIT_REF: HEAD + # build: + # context: . + # args: + # CREATED: + # VERSION: + # LOCAL: 'false' + # GIT_REF: HEAD restart: unless-stopped ports: - 8448:8000 diff --git a/docker/README.md b/docker/README.md index 5a6ecde..c569c5f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -28,10 +28,10 @@ ARG GIT_REF=HEAD To build the image you can use the following command ``` bash -docker build . -t conduit_homeserver:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) +docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) ``` -which also will tag the resulting image as `conduit_homeserver:latest`. +which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`. **Note:** it ommits the two optional `build-arg`s. @@ -40,7 +40,7 @@ which also will tag the resulting image as `conduit_homeserver:latest`. After building the image you can simply run it with ``` bash -docker run conduit_homeserver:latest -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e ROCKET_SERVER_NAME="localhost:8000" +docker run -d matrixconduit/matrix-conduit:latest -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e ROCKET_SERVER_NAME="localhost:8000" ``` For detached mode, you also need to use the `-d` flag. You can pass in more env vars as are shown here, for an overview of possible values, you can take a look at the `docker-compose.yml` file. @@ -49,7 +49,7 @@ If you just want to test Conduit for a short time, you can use the `--rm` flag, ## Docker-compose -If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the `docker-compose.traefik.yml` including `docker-compose.override.traefik.yml` or the normal `docker-compose.yml` for every other reverse proxy. +If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) including [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. ### Build diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index ad1dad8..111eaa5 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -3,18 +3,19 @@ version: '3' services: homeserver: - ### If you already built the Conduit image with 'docker build', then you can uncomment the - ### 'image' line and comment out the 'build' option. - # image: conduit_homeserver:latest - ### If you want meaningful labels in you built Conduit image, you should run docker-compose like this: + ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, + ### then you are ready to go. + image: matrixconduit/matrix-conduit:latest + ### If you want to build a fresh image from the sources, then comment the image line and uncomment the + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d - build: - context: . - args: - CREATED: - VERSION: - LOCAL: false - GIT_REF: HEAD + # build: + # context: . + # args: + # CREATED: + # VERSION: + # LOCAL: 'false' + # GIT_REF: HEAD restart: unless-stopped volumes: - db:/srv/conduit/.local/share/conduit From d9a29e3e5c2b8390bb1a4b99a631b3ac615b0763 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 25 Aug 2020 15:30:25 -0400 Subject: [PATCH 0268/1727] Fix state for empty key route Replace None with Some("") for state_key --- src/client_server/room.rs | 2 +- src/client_server/state.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index c0603d3..7f4a15f 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -195,7 +195,7 @@ pub fn create_room_route( content: match preset { create_room::RoomPreset::PublicChat => { serde_json::to_value(guest_access::GuestAccessEventContent::new( - // In a public room a joining is the only way to access + // In a public room, joining is the only way to access guest_access::GuestAccess::Forbidden, )) .expect("event is valid, we just created it") diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 867b051..12c5cac 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -76,7 +76,7 @@ pub fn send_state_event_for_empty_key_route( &body.content, json, &body.room_id, - None, + Some("".into()), )? .0 .event_id, From 4954df3cc3b2df89a154d1d1272e093930642418 Mon Sep 17 00:00:00 2001 From: Timo Date: Tue, 25 Aug 2020 13:24:38 +0200 Subject: [PATCH 0269/1727] feat: handle txn ids --- src/client_server/message.rs | 39 ++++++++++++++++++++++++++---- src/client_server/to_device.rs | 14 +++++++++++ src/database.rs | 8 +++++- src/database/rooms.rs | 3 ++- src/database/rooms/edus.rs | 8 +++++- src/database/transaction_ids.rs | 43 +++++++++++++++++++++++++++++++++ src/pdu.rs | 4 ++- sytest/sytest-whitelist | 1 + 8 files changed, 111 insertions(+), 9 deletions(-) create mode 100644 src/database/transaction_ids.rs diff --git a/src/client_server/message.rs b/src/client_server/message.rs index d851214..844f44d 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -1,10 +1,13 @@ use super::State; -use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - r0::message::{get_message_events, send_message_event}, +use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::message::{get_message_events, send_message_event}, + }, + EventId, }; -use std::convert::TryInto; +use std::convert::{TryFrom, TryInto}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -18,6 +21,29 @@ pub fn send_message_event_route( body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + // Check if this is a new transaction id + if let Some(response) = db + .transaction_ids + .existing_txnid(sender_id, device_id, &body.txn_id)? + { + // The client might have sent a txnid of the /sendToDevice endpoint + // This txnid has no response associated with it + if response.is_empty() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to use txn id already used for an incompatible endpoint.", + )); + } + + let event_id = EventId::try_from( + utils::string_from_bytes(&response) + .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?, + ) + .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; + return Ok(send_message_event::Response { event_id }.into()); + } let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); @@ -29,6 +55,7 @@ pub fn send_message_event_route( event_type: body.event_type.clone(), content: serde_json::from_str( body.json_body + .as_ref() .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? .get(), ) @@ -41,6 +68,8 @@ pub fn send_message_event_route( &db.account_data, )?; + db.transaction_ids + .add_txnid(sender_id, device_id, &body.txn_id, event_id.as_bytes())?; Ok(send_message_event::Response { event_id }.into()) } diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index ca423fe..8c06d64 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -17,6 +17,16 @@ pub fn send_event_to_device_route( body: Ruma, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + // Check if this is a new transaction id + if db + .transaction_ids + .existing_txnid(sender_id, device_id, &body.txn_id)? + .is_some() + { + return Ok(send_event_to_device::Response.into()); + } for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { @@ -52,5 +62,9 @@ pub fn send_event_to_device_route( } } + // Save transaction id with empty data + db.transaction_ids + .add_txnid(sender_id, device_id, &body.txn_id, &[])?; + Ok(send_event_to_device::Response.into()) } diff --git a/src/database.rs b/src/database.rs index 41781b9..b43cc5b 100644 --- a/src/database.rs +++ b/src/database.rs @@ -3,6 +3,7 @@ pub mod globals; pub mod key_backups; pub mod media; pub mod rooms; +pub mod transaction_ids; pub mod uiaa; pub mod users; @@ -23,6 +24,7 @@ pub struct Database { pub account_data: account_data::AccountData, pub media: media::Media, pub key_backups: key_backups::KeyBackups, + pub transaction_ids: transaction_ids::TransactionIds, pub _db: sled::Db, } @@ -90,7 +92,8 @@ impl Database { edus: rooms::RoomEdus { readreceiptid_readreceipt: db.open_tree("readreceiptid_readreceipt")?, roomuserid_privateread: db.open_tree("roomuserid_privateread")?, // "Private" read receipt - roomuserid_lastprivatereadupdate: db.open_tree("roomid_lastprivatereadupdate")?, + roomuserid_lastprivatereadupdate: db + .open_tree("roomid_lastprivatereadupdate")?, typingid_userid: db.open_tree("typingid_userid")?, roomid_lasttypingupdate: db.open_tree("roomid_lasttypingupdate")?, presenceid_presence: db.open_tree("presenceid_presence")?, @@ -124,6 +127,9 @@ impl Database { backupid_etag: db.open_tree("backupid_etag")?, backupkeyid_backup: db.open_tree("backupkeyid_backupmetadata")?, }, + transaction_ids: transaction_ids::TransactionIds { + userdevicetxnid_response: db.open_tree("userdevicetxnid_response")?, + }, _db: db, }) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index bb14c8a..8cfb612 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -621,7 +621,8 @@ impl Rooms { } _ => {} } - self.edus.private_read_set(&room_id, &sender, index, &globals)?; + self.edus + .private_read_set(&room_id, &sender, index, &globals)?; Ok(pdu.event_id) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index fbd3edb..d60e1f1 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -92,7 +92,13 @@ impl RoomEdus { } /// Sets a private read marker at `count`. - pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64, globals: &super::super::globals::Globals) -> Result<()> { + pub fn private_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + globals: &super::super::globals::Globals, + ) -> Result<()> { let mut key = room_id.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&user_id.to_string().as_bytes()); diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs new file mode 100644 index 0000000..9485b36 --- /dev/null +++ b/src/database/transaction_ids.rs @@ -0,0 +1,43 @@ +use crate::Result; +use ruma::{DeviceId, UserId}; +use sled::IVec; + +pub struct TransactionIds { + pub(super) userdevicetxnid_response: sled::Tree, // Response can be empty (/sendToDevice) or the event id (/send) +} + +impl TransactionIds { + pub fn add_txnid( + &self, + user_id: &UserId, + device_id: &DeviceId, + txn_id: &str, + data: &[u8], + ) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(txn_id.as_bytes()); + + self.userdevicetxnid_response.insert(key, data)?; + + Ok(()) + } + + pub fn existing_txnid( + &self, + user_id: &UserId, + device_id: &DeviceId, + txn_id: &str, + ) -> Result> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(txn_id.as_bytes()); + + // If there's no entry, this is a new transaction + Ok(self.userdevicetxnid_response.get(key)?) + } +} diff --git a/src/pdu.rs b/src/pdu.rs index 4458423..c948fef 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -71,7 +71,9 @@ impl PduEvent { self.unsigned.insert( "redacted_because".to_owned(), - serde_json::to_string(reason).expect("PduEvent::to_string always works").into() + serde_json::to_string(reason) + .expect("PduEvent::to_string always works") + .into(), ); self.content = new_content.into(); diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index b0b2097..1585233 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -38,6 +38,7 @@ Current state appears in timeline in private history with many messages before Deleted tags appear in an incremental v2 /sync Deleting a non-existent alias should return a 404 Device messages wake up /sync +Device messages with the same txn_id are deduplicated Events come down the correct room GET /device/{deviceId} GET /device/{deviceId} gives a 404 for unknown devices From 3f4cb753eef3fb544f71a61144bdd1d9686e5bf4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 27 Aug 2020 14:48:20 +0200 Subject: [PATCH 0270/1727] improvement: add remaining key backup endpoints --- Cargo.lock | 137 +++++++++++++--------------- src/client_server/backup.rs | 174 +++++++++++++++++++++++++++++++++++- src/database/key_backups.rs | 154 +++++++++++++++++++++++++++++++ src/main.rs | 7 ++ 4 files changed, 394 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0a7334c..98dbac9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -107,9 +107,9 @@ checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" [[package]] name = "async-trait" -version = "0.1.37" +version = "0.1.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caae68055714ff28740f310927e04f2eba76ff580b16fb18ed90073ee71646f7" +checksum = "6e1a4a2f97ce50c9d0282c1468816208588441492b40d813b2e0419c22c05e7f" dependencies = [ "proc-macro2", "quote", @@ -135,9 +135,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" @@ -148,7 +148,7 @@ dependencies = [ "addr2line", "cfg-if", "libc", - "miniz_oxide 0.4.0", + "miniz_oxide 0.4.1", "object", "rustc-demangle", ] @@ -159,12 +159,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - [[package]] name = "base64" version = "0.12.3" @@ -238,9 +232,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "cc" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" +checksum = "66120af515773fb005778dc07c261bd201ec8ce50bd6e7144c927753fe013381" [[package]] name = "cfg-if" @@ -267,7 +261,7 @@ checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.12.3", + "base64", "directories", "http", "image", @@ -277,7 +271,7 @@ dependencies = [ "reqwest", "rocket", "ruma", - "rust-argon2 0.8.2", + "rust-argon2", "serde", "serde_json", "sled", @@ -298,7 +292,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1373a16a4937bc34efec7b391f9c1500c30b8478a701a4f44c9165cc0475a6e0" dependencies = [ "aes-gcm", - "base64 0.12.3", + "base64", "hkdf", "percent-encoding", "rand", @@ -458,9 +452,9 @@ checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" [[package]] name = "encoding_rs" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" +checksum = "a51b8cf747471cb9499b6d59e59b0444f4c90eba8968c4e44874e92b5b64ace2" dependencies = [ "cfg-if", ] @@ -644,7 +638,7 @@ checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ "cfg-if", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] @@ -788,7 +782,7 @@ dependencies = [ "itoa", "pin-project", "socket2", - "time 0.1.43", + "time 0.1.44", "tokio", "tower-service", "tracing", @@ -923,9 +917,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10" +checksum = "755456fae044e6fa1ebbbd1b3e902ae19e73097ed4ed87bb79934a867c007bc3" [[package]] name = "lock_api" @@ -1005,9 +999,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "4d7559a8a40d0f97e1edea3220f698f78b1c5ab67532e49f68fde3910323b722" dependencies = [ "adler", ] @@ -1142,9 +1136,9 @@ checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" [[package]] name = "once_cell" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" [[package]] name = "opaque-debug" @@ -1306,9 +1300,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" [[package]] name = "proc-macro-crate" @@ -1410,13 +1404,13 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_users" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom", "redox_syscall", - "rust-argon2 0.7.0", + "rust-argon2", ] [[package]] @@ -1450,11 +1444,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12427a5577082c24419c9c417db35cfeb65962efc7675bb6b0d5f1f9d315bfe6" +checksum = "e9eaa17ac5d7b838b7503d118fa16ad88f440498bf9ffe5424e621f93190d61e" dependencies = [ - "base64 0.12.3", + "base64", "bytes", "encoding_rs", "futures-core", @@ -1560,7 +1554,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "ruma-api", "ruma-client-api", @@ -1574,7 +1568,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "http", "percent-encoding", @@ -1589,7 +1583,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1600,7 +1594,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "assign", "http", @@ -1618,7 +1612,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "js_int", "ruma-identifiers", @@ -1631,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "js_int", "ruma-common", @@ -1646,7 +1640,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1657,7 +1651,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "js_int", "ruma-api", @@ -1672,7 +1666,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1684,7 +1678,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "proc-macro2", "quote", @@ -1695,7 +1689,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "ruma-serde", "serde", @@ -1706,7 +1700,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "form_urlencoded", "itoa", @@ -1718,33 +1712,21 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#c2adc9ecb85538505ff351dbd883c9106f651744" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ - "base64 0.12.3", + "base64", "ring", "serde_json", "untrusted", ] -[[package]] -name = "rust-argon2" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" -dependencies = [ - "base64 0.11.0", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils", -] - [[package]] name = "rust-argon2" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" dependencies = [ - "base64 0.12.3", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1767,11 +1749,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ - "base64 0.12.3", + "base64", "log", "ring", "sct", @@ -1970,9 +1952,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" +checksum = "33a71ea1ea5f8747d1af1979bfb7e65c3a025a70609f04ceb78425bc5adad8e6" dependencies = [ "version_check", ] @@ -2061,9 +2043,9 @@ checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" [[package]] name = "syn" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4" +checksum = "891d8d6567fe7c7f8835a3a98af4208f3846fba258c1bc3c31d6e506239f11f9" dependencies = [ "proc-macro2", "quote", @@ -2106,11 +2088,12 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] @@ -2154,9 +2137,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" +checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" [[package]] name = "tokio" @@ -2256,9 +2239,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db63662723c316b43ca36d833707cc93dff82a02ba3d7e354f342682cc8b3545" +checksum = "4f0e00789804e99b20f12bc7003ca416309d28a6f495d6af58d1e2c2842461b5" dependencies = [ "lazy_static", ] @@ -2369,6 +2352,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" version = "0.2.67" diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index a104964..9994f19 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -3,13 +3,15 @@ use crate::{ConduitResult, Database, Error, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::backup::{ - add_backup_keys, create_backup, get_backup, get_backup_keys, get_latest_backup, - update_backup, + add_backup_key_session, add_backup_key_sessions, add_backup_keys, create_backup, + delete_backup, delete_backup_key_session, delete_backup_key_sessions, delete_backup_keys, + get_backup, get_backup_key_session, get_backup_key_sessions, get_backup_keys, + get_latest_backup, update_backup, }, }; #[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; +use rocket::{delete, get, post, put}; #[cfg_attr( feature = "conduit_bin", @@ -95,7 +97,22 @@ pub fn get_backup_route( .into()) } -/// Add the received backup_keys to the database. +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/unstable/room_keys/version/<_>", data = "") +)] +pub fn delete_backup_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + db.key_backups.delete_backup(&sender_id, &body.version)?; + + Ok(delete_backup::Response.into()) +} + +/// Add the received backup keys to the database. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys", data = "") @@ -126,6 +143,62 @@ pub fn add_backup_keys_route( .into()) } +/// Add the received backup keys to the database. +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/unstable/room_keys/keys/<_>", data = "") +)] +pub fn add_backup_key_sessions_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + for (session_id, key_data) in &body.sessions { + db.key_backups.add_key( + &sender_id, + &body.version, + &body.room_id, + &session_id, + &key_data, + &db.globals, + )? + } + + Ok(add_backup_key_sessions::Response { + count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &body.version)?, + } + .into()) +} + +/// Add the received backup key to the database. +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") +)] +pub fn add_backup_key_session_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + db.key_backups.add_key( + &sender_id, + &body.version, + &body.room_id, + &body.session_id, + &body.session_data, + &db.globals, + )?; + + Ok(add_backup_key_session::Response { + count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &body.version)?, + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys", data = "") @@ -140,3 +213,96 @@ pub fn get_backup_keys_route( Ok(get_backup_keys::Response { rooms }.into()) } + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/unstable/room_keys/keys/<_>", data = "") +)] +pub fn get_backup_key_sessions_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let sessions = db + .key_backups + .get_room(&sender_id, &body.version, &body.room_id); + + Ok(get_backup_key_sessions::Response { sessions }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") +)] +pub fn get_backup_key_session_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + let key_data = + db.key_backups + .get_session(&sender_id, &body.version, &body.room_id, &body.session_id)?; + + Ok(get_backup_key_session::Response { key_data }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/unstable/room_keys/keys", data = "") +)] +pub fn delete_backup_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + db.key_backups.delete_all_keys(&sender_id, &body.version)?; + + Ok(delete_backup_keys::Response { + count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &body.version)?, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "") +)] +pub fn delete_backup_key_sessions_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + db.key_backups + .delete_room_keys(&sender_id, &body.version, &body.room_id)?; + + Ok(delete_backup_key_sessions::Response { + count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &body.version)?, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") +)] +pub fn delete_backup_key_session_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + db.key_backups + .delete_room_key(&sender_id, &body.version, &body.room_id, &body.session_id)?; + + Ok(delete_backup_key_session::Response { + count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_id, &body.version)?, + } + .into()) +} diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 5b37f1b..1ce7595 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -37,6 +37,28 @@ impl KeyBackups { Ok(version) } + pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + + self.backupid_algorithm.remove(&key)?; + self.backupid_etag.remove(&key)?; + + key.push(0xff); + + for outdated_key in self + .backupkeyid_backup + .scan_prefix(&key) + .keys() + .filter_map(|r| r.ok()) + { + self.backupkeyid_backup.remove(outdated_key)?; + } + + Ok(()) + } + pub fn update_backup( &self, user_id: &UserId, @@ -163,6 +185,7 @@ impl KeyBackups { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); + prefix.push(0xff); let mut rooms = BTreeMap::::new(); @@ -204,4 +227,135 @@ impl KeyBackups { Ok(rooms) } + + pub fn get_room( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + ) -> BTreeMap { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(version.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xff); + + self.backupkeyid_backup + .scan_prefix(&prefix) + .map(|r| { + let (key, value) = r?; + let mut parts = key.rsplit(|&b| b == 0xff); + + let session_id = + utils::string_from_bytes(&parts.next().ok_or_else(|| { + Error::bad_database("backupkeyid_backup key is invalid.") + })?) + .map_err(|_| { + Error::bad_database("backupkeyid_backup session_id is invalid.") + })?; + + let key_data = serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("KeyData in backupkeyid_backup is invalid.") + })?; + + Ok::<_, Error>((session_id, key_data)) + }) + .filter_map(|r| r.ok()) + .collect() + } + + pub fn get_session( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + ) -> Result> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(version.as_bytes()); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(session_id.as_bytes()); + + self.backupkeyid_backup + .get(&key)? + .map(|value| { + serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid.")) + }) + .transpose() + } + + pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + key.push(0xff); + + for outdated_key in self + .backupkeyid_backup + .scan_prefix(&key) + .keys() + .filter_map(|r| r.ok()) + { + self.backupkeyid_backup.remove(outdated_key)?; + } + + Ok(()) + } + + pub fn delete_room_keys( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + ) -> Result<()> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + key.push(0xff); + key.extend_from_slice(&room_id.as_bytes()); + key.push(0xff); + + for outdated_key in self + .backupkeyid_backup + .scan_prefix(&key) + .keys() + .filter_map(|r| r.ok()) + { + self.backupkeyid_backup.remove(outdated_key)?; + } + + Ok(()) + } + + pub fn delete_room_key( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + ) -> Result<()> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + key.push(0xff); + key.extend_from_slice(&room_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(&session_id.as_bytes()); + + for outdated_key in self + .backupkeyid_backup + .scan_prefix(&key) + .keys() + .filter_map(|r| r.ok()) + { + self.backupkeyid_backup.remove(outdated_key)?; + } + + Ok(()) + } } diff --git a/src/main.rs b/src/main.rs index bbe7c96..96d0e99 100644 --- a/src/main.rs +++ b/src/main.rs @@ -53,9 +53,16 @@ fn setup_rocket() -> rocket::Rocket { client_server::claim_keys_route, client_server::create_backup_route, client_server::update_backup_route, + client_server::delete_backup_route, client_server::get_latest_backup_route, client_server::get_backup_route, + client_server::add_backup_key_sessions_route, client_server::add_backup_keys_route, + client_server::delete_backup_key_session_route, + client_server::delete_backup_key_sessions_route, + client_server::delete_backup_keys_route, + client_server::get_backup_key_session_route, + client_server::get_backup_key_sessions_route, client_server::get_backup_keys_route, client_server::set_read_marker_route, client_server::create_typing_event_route, From f46c2d1eec808658f671a4845b0ecd39221c8826 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 26 Aug 2020 11:15:52 -0400 Subject: [PATCH 0271/1727] Fix review issues, move state-res to spec-comp branch --- Cargo.lock | 79 ++++++++++++++------------------- Cargo.toml | 2 +- src/client_server/directory.rs | 31 ++++--------- src/client_server/membership.rs | 16 +++---- src/client_server/room.rs | 1 - src/client_server/state.rs | 35 ++++++++------- src/client_server/sync.rs | 4 +- src/database/rooms.rs | 76 +++++++++++++++---------------- 8 files changed, 106 insertions(+), 138 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e5da6f..62b13d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -157,7 +157,7 @@ dependencies = [ "addr2line", "cfg-if", "libc", - "miniz_oxide 0.4.0", + "miniz_oxide 0.4.1", "object", "rustc-demangle", ] @@ -168,12 +168,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - [[package]] name = "base64" version = "0.12.3" @@ -265,7 +259,7 @@ checksum = "942f72db697d8767c22d46a598e01f2d3b475501ea43d0db4f16d90259182d0b" dependencies = [ "num-integer", "num-traits", - "time 0.1.43", + "time 0.1.44", ] [[package]] @@ -287,7 +281,7 @@ checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.12.3", + "base64", "directories", "http", "image", @@ -298,7 +292,7 @@ dependencies = [ "ring", "rocket", "ruma", - "rust-argon2 0.8.2", + "rust-argon2", "serde", "serde_json", "sled", @@ -320,7 +314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1373a16a4937bc34efec7b391f9c1500c30b8478a701a4f44c9165cc0475a6e0" dependencies = [ "aes-gcm", - "base64 0.12.3", + "base64", "hkdf", "percent-encoding", "rand", @@ -486,9 +480,9 @@ checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f" [[package]] name = "encoding_rs" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" +checksum = "a51b8cf747471cb9499b6d59e59b0444f4c90eba8968c4e44874e92b5b64ace2" dependencies = [ "cfg-if", ] @@ -672,7 +666,7 @@ checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ "cfg-if", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] @@ -816,7 +810,7 @@ dependencies = [ "itoa", "pin-project", "socket2", - "time 0.1.43", + "time 0.1.44", "tokio", "tower-service", "tracing", @@ -1057,9 +1051,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "4d7559a8a40d0f97e1edea3220f698f78b1c5ab67532e49f68fde3910323b722" dependencies = [ "adler", ] @@ -1462,13 +1456,13 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_users" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom", "redox_syscall", - "rust-argon2 0.7.0", + "rust-argon2", ] [[package]] @@ -1527,11 +1521,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12427a5577082c24419c9c417db35cfeb65962efc7675bb6b0d5f1f9d315bfe6" +checksum = "e9eaa17ac5d7b838b7503d118fa16ad88f440498bf9ffe5424e621f93190d61e" dependencies = [ - "base64 0.12.3", + "base64", "bytes", "encoding_rs", "futures-core", @@ -1813,31 +1807,19 @@ name = "ruma-signatures" version = "0.6.0-dev.1" source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ - "base64 0.12.3", + "base64", "ring", "serde_json", "untrusted", ] -[[package]] -name = "rust-argon2" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" -dependencies = [ - "base64 0.11.0", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils", -] - [[package]] name = "rust-argon2" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" dependencies = [ - "base64 0.12.3", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1864,7 +1846,7 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ - "base64 0.12.3", + "base64", "log", "ring", "sct", @@ -2072,9 +2054,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" +checksum = "33a71ea1ea5f8747d1af1979bfb7e65c3a025a70609f04ceb78425bc5adad8e6" dependencies = [ "version_check", ] @@ -2088,7 +2070,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res#d93a965ad17781fa9554bb3cea71673c054b9f3f" +source = "git+https://github.com/ruma/state-res?branch=spec-comp#17958665f6592af3ef478024fd1d75c384a30e7f" dependencies = [ "itertools", "js_int", @@ -2233,11 +2215,12 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] @@ -2395,9 +2378,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db63662723c316b43ca36d833707cc93dff82a02ba3d7e354f342682cc8b3545" +checksum = "4f0e00789804e99b20f12bc7003ca416309d28a6f495d6af58d1e2c2842461b5" dependencies = [ "lazy_static", ] @@ -2550,6 +2533,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" version = "0.2.67" diff --git a/Cargo.toml b/Cargo.toml index 78d8f76..15cee72 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ thiserror = "1.0.19" # Used for conduit::Error type image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to generate thumbnails for images base64 = "0.12.3" # Used to encode server public key # state-res = { path = "../../state-res" } -state-res = { git = "https://github.com/ruma/state-res", version = "0.1.0" } +state-res = { git = "https://github.com/ruma/state-res", version = "0.1.0", branch = "spec-comp" } ring = "0.16.15" [features] diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 5e03274..3b10686 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -63,26 +63,11 @@ pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let Ruma { - body: - get_public_rooms::IncomingRequest { - limit, - server, - since, - }, - .. - } = body; - - let get_public_rooms_filtered::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - } = get_public_rooms_filtered_helper( + let response = get_public_rooms_filtered_helper( &db, - server.as_deref(), - limit, - since.as_deref(), + body.body.server.as_deref(), + body.body.limit, + body.body.since.as_deref(), None, // This is not used None, // This is not used ) @@ -90,10 +75,10 @@ pub async fn get_public_rooms_route( .0; Ok(get_public_rooms::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, } .into()) } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 3fa3b6a..90683e6 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -483,12 +483,12 @@ async fn join_room_by_id_helper( .await?; dbg!(&send_join_response); - // todo!("Take send_join_response and 'create' the room using that data"); let mut event_map = send_join_response .room_state .state .iter() + .chain(send_join_response.room_state.auth_chain.iter()) .map(|pdu| { pdu.deserialize() .map(StateEvent::Full) @@ -497,14 +497,6 @@ async fn join_room_by_id_helper( .collect::, _>>() .map_err(|_| Error::bad_database("Invalid PDU found in db."))?; - let auth_chain = send_join_response - .room_state - .auth_chain - .iter() - .flat_map(|pdu| pdu.deserialize().ok()) - .map(StateEvent::Full) - .collect::>(); - let power_events = event_map .values() .filter(|pdu| pdu.is_power_event()) @@ -518,9 +510,11 @@ async fn join_room_by_id_helper( &power_events, &mut event_map, &db.rooms, - &auth_chain // if we only use it here just build this list in the first place + &send_join_response + .room_state + .auth_chain .iter() - .map(|pdu| pdu.event_id()) + .filter_map(|pdu| Some(StateEvent::Full(pdu.deserialize().ok()?).event_id())) .collect::>(), ); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 7f4a15f..3ee21b6 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -195,7 +195,6 @@ pub fn create_room_route( content: match preset { create_room::RoomPreset::PublicChat => { serde_json::to_value(guest_access::GuestAccessEventContent::new( - // In a public room, joining is the only way to access guest_access::GuestAccess::Forbidden, )) .expect("event is valid, we just created it") diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 12c5cac..e7d2bcf 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -1,5 +1,5 @@ use super::State; -use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; +use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -9,7 +9,7 @@ use ruma::{ }, }, events::{AnyStateEventContent, EventContent}, - RoomId, UserId, + EventId, RoomId, UserId, }; #[cfg(feature = "conduit_bin")] @@ -33,13 +33,16 @@ pub fn send_state_event_for_key_route( ) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - send_state_event_for_key_helper( - &db, - sender_id, - &body.content, - content, - &body.room_id, - Some(body.state_key.clone()), + Ok( + send_state_event_for_key::Response::new(send_state_event_for_key_helper( + &db, + sender_id, + &body.content, + content, + &body.room_id, + Some(body.state_key.clone()), + )?) + .into(), ) } @@ -67,8 +70,8 @@ pub fn send_state_event_for_empty_key_route( ) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - Ok(send_state_event_for_empty_key::Response::new( - send_state_event_for_key_helper( + Ok( + send_state_event_for_empty_key::Response::new(send_state_event_for_key_helper( &db, sender_id .as_ref() @@ -77,11 +80,9 @@ pub fn send_state_event_for_empty_key_route( json, &body.room_id, Some("".into()), - )? - .0 - .event_id, + )?) + .into(), ) - .into()) } #[cfg_attr( @@ -183,7 +184,7 @@ pub fn send_state_event_for_key_helper( json: serde_json::Value, room_id: &RoomId, state_key: Option, -) -> ConduitResult { +) -> Result { let sender_id = sender; if let AnyStateEventContent::RoomCanonicalAlias(canonical_alias) = content { @@ -224,5 +225,5 @@ pub fn send_state_event_for_key_helper( &db.account_data, )?; - Ok(send_state_event_for_key::Response::new(event_id).into()) + Ok(event_id) } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index ccb25d1..7432960 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -475,7 +475,7 @@ pub async fn sync_events_route( } for user_id in left_encrypted_users { - let user_target_encrypted = db + let still_share_encrypted_room = db .rooms .get_shared_rooms(vec![sender_id.clone(), user_id.clone()]) .filter_map(|r| r.ok()) @@ -490,7 +490,7 @@ pub async fn sync_events_route( .all(|encrypted| !encrypted); // If the user doesn't share an encrypted room with the target anymore, we need to tell // them - if user_target_encrypted { + if still_share_encrypted_room { device_list_left.insert(user_id); } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 575a2bf..66af736 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -4,7 +4,6 @@ pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; use log::error; -// TODO if ruma-signatures re-exports `use ruma::signatures::digest;` use ring::digest; use ruma::{ api::client::error::ErrorKind, @@ -96,9 +95,9 @@ impl StateStore for Rooms { } impl Rooms { - /// Builds a `StateMap` by iterating over all keys that start - /// with `state_hash`, this gives the full state at event "x". - pub fn get_statemap_by_hash(&self, state_hash: StateHashId) -> Result> { + /// Builds a StateMap by iterating over all keys that start + /// with state_hash, this gives the full state for the given state_hash. + pub fn state_full(&self, state_hash: StateHashId) -> Result> { self.stateid_pduid .scan_prefix(&state_hash) .values() @@ -242,8 +241,6 @@ impl Rooms { /// Generate a new StateHash. /// /// A unique hash made from hashing the current states pduid's. - /// Because `append_state_pdu` handles the empty state db case it does not - /// have to be here. fn new_state_hash_id(&self, room_id: &RoomId) -> Result { // Use hashed roomId as the first StateHash key for first state event in room if self @@ -281,7 +278,7 @@ impl Rooms { /// Checks if a room exists. pub fn exists(&self, room_id: &RoomId) -> Result { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); // Look for PDUs in that room. @@ -300,7 +297,7 @@ impl Rooms { let mut hashmap = HashMap::new(); for pdu in self.roomstateid_pduid - .scan_prefix(&room_id.to_string().as_bytes()) + .scan_prefix(&room_id.as_bytes()) .values() .map(|value| { Ok::<_, Error>( @@ -322,13 +319,13 @@ impl Rooms { Ok(hashmap) } - /// Returns the all state entries for this type. + /// Returns all state entries for this type. pub fn room_state_type( &self, room_id: &RoomId, event_type: &EventType, ) -> Result> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(&event_type.to_string().as_bytes()); @@ -357,7 +354,7 @@ impl Rooms { Ok(hashmap) } - /// Returns a single PDU in `room_id` with key (`event_type`, `state_key`). + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). pub fn room_state_get( &self, room_id: &RoomId, @@ -459,7 +456,7 @@ impl Rooms { /// Returns the leaf pdus of a room. pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut events = Vec::new(); @@ -582,7 +579,7 @@ impl Rooms { .split_terminator(|c: char| !c.is_alphanumeric()) .map(str::to_lowercase) { - let mut key = pdu.room_id.to_string().as_bytes().to_vec(); + let mut key = pdu.room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(word.as_bytes()); key.push(0xff); @@ -752,7 +749,10 @@ impl Rooms { }) .collect::>>()?, ) - .ok_or(Error::Conflict("Found incoming PDU with invalid data."))?, + .map_err(|e| { + log::error!("{}", e); + Error::Conflict("Found incoming PDU with invalid data.") + })?, EventType::RoomCreate => prev_events.is_empty(), // Not allow any of the following events if the sender is not joined. _ if sender_membership != member::MembershipState::Join => false, @@ -982,13 +982,13 @@ impl Rooms { globals: &super::globals::Globals, ) -> Result<()> { let membership = member_content.membership; - let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + userroom_id.extend_from_slice(room_id.as_bytes()); - let mut roomuser_id = room_id.to_string().as_bytes().to_vec(); + let mut roomuser_id = room_id.as_bytes().to_vec(); roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.to_string().as_bytes()); + roomuser_id.extend_from_slice(user_id.as_bytes()); match &membership { member::MembershipState::Join => { @@ -1051,9 +1051,9 @@ impl Rooms { /// Makes a user forget a room. pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + userroom_id.extend_from_slice(room_id.as_bytes()); self.userroomid_left.remove(userroom_id)?; @@ -1069,8 +1069,8 @@ impl Rooms { if let Some(room_id) = room_id { // New alias self.alias_roomid - .insert(alias.alias(), &*room_id.to_string())?; - let mut aliasid = room_id.to_string().as_bytes().to_vec(); + .insert(alias.alias(), room_id.as_bytes())?; + let mut aliasid = room_id.as_bytes().to_vec(); aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); self.aliasid_alias.insert(aliasid, &*alias.alias())?; } else { @@ -1105,7 +1105,7 @@ impl Rooms { } pub fn room_aliases(&self, room_id: &RoomId) -> impl Iterator> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.aliasid_alias @@ -1119,16 +1119,16 @@ impl Rooms { pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { if public { - self.publicroomids.insert(room_id.to_string(), &[])?; + self.publicroomids.insert(room_id.as_bytes(), &[])?; } else { - self.publicroomids.remove(room_id.to_string())?; + self.publicroomids.remove(room_id.as_bytes())?; } Ok(()) } pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.contains_key(room_id.to_string())?) + Ok(self.publicroomids.contains_key(room_id.as_bytes())?) } pub fn public_rooms(&self) -> impl Iterator> { @@ -1147,7 +1147,7 @@ impl Rooms { room_id: &RoomId, search_string: &str, ) -> Result<(impl Iterator + 'a, Vec)> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let words = search_string @@ -1233,7 +1233,7 @@ impl Rooms { /// Returns an iterator over all joined members of a room. pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { self.roomuserid_joined - .scan_prefix(room_id.to_string()) + .scan_prefix(room_id.as_bytes()) .keys() .map(|key| { Ok(UserId::try_from( @@ -1254,7 +1254,7 @@ impl Rooms { /// Returns an iterator over all invited members of a room. pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator> { self.roomuserid_invited - .scan_prefix(room_id.to_string()) + .scan_prefix(room_id.as_bytes()) .keys() .map(|key| { Ok(UserId::try_from( @@ -1275,7 +1275,7 @@ impl Rooms { /// Returns an iterator over all rooms this user joined. pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { self.userroomid_joined - .scan_prefix(user_id.to_string()) + .scan_prefix(user_id.as_bytes()) .keys() .map(|key| { Ok(RoomId::try_from( @@ -1296,7 +1296,7 @@ impl Rooms { /// Returns an iterator over all rooms a user was invited to. pub fn rooms_invited(&self, user_id: &UserId) -> impl Iterator> { self.userroomid_invited - .scan_prefix(&user_id.to_string()) + .scan_prefix(&user_id.as_bytes()) .keys() .map(|key| { Ok(RoomId::try_from( @@ -1317,7 +1317,7 @@ impl Rooms { /// Returns an iterator over all rooms a user left. pub fn rooms_left(&self, user_id: &UserId) -> impl Iterator> { self.userroomid_left - .scan_prefix(&user_id.to_string()) + .scan_prefix(&user_id.as_bytes()) .keys() .map(|key| { Ok(RoomId::try_from( @@ -1336,25 +1336,25 @@ impl Rooms { } pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + userroom_id.extend_from_slice(room_id.as_bytes()); Ok(self.userroomid_joined.get(userroom_id)?.is_some()) } pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + userroom_id.extend_from_slice(room_id.as_bytes()); Ok(self.userroomid_invited.get(userroom_id)?.is_some()) } pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + userroom_id.extend_from_slice(room_id.as_bytes()); Ok(self.userroomid_left.get(userroom_id)?.is_some()) } From 3b40f3d60ef9839c72763169f6ae1daed7b04895 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 27 Aug 2020 16:10:20 -0400 Subject: [PATCH 0272/1727] Update state-res crate --- Cargo.lock | 2 +- src/database/rooms.rs | 73 +++++++++++++++++++++++++------------------ 2 files changed, 44 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62b13d0..eebaddc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2070,7 +2070,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=spec-comp#17958665f6592af3ef478024fd1d75c384a30e7f" +source = "git+https://github.com/ruma/state-res?branch=spec-comp#394d26744a6586ccdc01838964bb27dab289eee5" dependencies = [ "itertools", "js_int", diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 66af736..ee070b3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -19,13 +19,12 @@ use ruma::{ EventId, Raw, RoomAliasId, RoomId, UserId, }; use sled::IVec; -use state_res::{event_auth, Requester, StateEvent, StateMap, StateStore}; +use state_res::{event_auth, Error as StateError, Requester, StateEvent, StateMap, StateStore}; use std::{ collections::{BTreeMap, HashMap}, convert::{TryFrom, TryInto}, mem, - result::Result as StdResult, }; /// The unique identifier of each state group. @@ -67,28 +66,32 @@ pub struct Rooms { } impl StateStore for Rooms { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> StdResult { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result { let pid = self .eventid_pduid .get(event_id.as_bytes()) - .map_err(|e| e.to_string())? - .ok_or_else(|| "PDU via room_id and event_id not found in the db.".to_owned())?; + .map_err(StateError::custom)? + .ok_or_else(|| { + StateError::NotFound("PDU via room_id and event_id not found in the db.".into()) + })?; serde_json::from_slice( &self .pduid_pdu .get(pid) - .map_err(|e| e.to_string())? - .ok_or_else(|| "PDU via pduid not found in db.".to_owned())?, + .map_err(StateError::custom)? + .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, ) - .map_err(|e| e.to_string()) + .map_err(Into::into) .and_then(|pdu: StateEvent| { // conduit's PDU's always contain a room_id but some // of ruma's do not so this must be an Option if pdu.room_id() == Some(room_id) { Ok(pdu) } else { - Err("Found PDU for incorrect room in db.".into()) + Err(StateError::NotFound( + "Found PDU for incorrect room in db.".into(), + )) } }) } @@ -732,27 +735,37 @@ impl Rooms { // Don't allow encryption events when it's disabled !globals.encryption_disabled() } - EventType::RoomMember => event_auth::is_membership_change_allowed( - // TODO this is a bit of a hack but not sure how to have a type - // declared in `state_res` crate easily convert to/from conduit::PduEvent - Requester { - prev_event_ids: prev_events.to_owned(), - room_id: &room_id, - content: &content, - state_key: Some(state_key.to_owned()), - sender: &sender, - }, - &auth_events - .iter() - .map(|((ty, key), pdu)| { - Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res()?)) - }) - .collect::>>()?, - ) - .map_err(|e| { - log::error!("{}", e); - Error::Conflict("Found incoming PDU with invalid data.") - })?, + EventType::RoomMember => { + let prev_event = self + .get_pdu(prev_events.iter().next().ok_or(Error::BadRequest( + ErrorKind::Unknown, + "Membership can't be the first event", + ))?)? + .map(|pdu| pdu.convert_for_state_res()) + .transpose()?; + event_auth::valid_membership_change( + // TODO this is a bit of a hack but not sure how to have a type + // declared in `state_res` crate easily convert to/from conduit::PduEvent + Requester { + prev_event_ids: prev_events.to_owned(), + room_id: &room_id, + content: &content, + state_key: Some(state_key.to_owned()), + sender: &sender, + }, + prev_event.as_ref(), + &auth_events + .iter() + .map(|((ty, key), pdu)| { + Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res()?)) + }) + .collect::>>()?, + ) + .map_err(|e| { + log::error!("{}", e); + Error::Conflict("Found incoming PDU with invalid data.") + })? + } EventType::RoomCreate => prev_events.is_empty(), // Not allow any of the following events if the sender is not joined. _ if sender_membership != member::MembershipState::Join => false, From 2a63d0955ab06d1d1cbcae8fdf078d02a49fa65b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 30 Aug 2020 16:08:47 -0400 Subject: [PATCH 0273/1727] Sort and authenticate the events from /send_join response --- src/client_server/membership.rs | 49 +++++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 90683e6..6d1931b 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -497,17 +497,18 @@ async fn join_room_by_id_helper( .collect::, _>>() .map_err(|_| Error::bad_database("Invalid PDU found in db."))?; - let power_events = event_map + let control_events = event_map .values() .filter(|pdu| pdu.is_power_event()) .map(|pdu| pdu.event_id()) .collect::>(); - // TODO these events are not guaranteed to be sorted but they are resolved, do - // we need the auth_chain - let sorted_power_events = state_res::StateResolution::reverse_topological_power_sort( + // These events are not guaranteed to be sorted but they are resolved according to spec + // we auth them anyways to weed out faulty/malicious server. The following is basically the + // full state resolution algorithm. + let sorted_control_events = state_res::StateResolution::reverse_topological_power_sort( &room_id, - &power_events, + &control_events, &mut event_map, &db.rooms, &send_join_response @@ -518,26 +519,31 @@ async fn join_room_by_id_helper( .collect::>(), ); - // TODO we may be able to skip this since they are resolved according to spec - let resolved_power = state_res::StateResolution::iterative_auth_check( + // Auth check each event against the "partial" state created by the preceding events + let resolved_control_events = state_res::StateResolution::iterative_auth_check( room_id, &RoomVersionId::Version6, - &sorted_power_events, - &BTreeMap::new(), // unconflicted events + &sorted_control_events, + &BTreeMap::new(), // We have no "clean/resolved" events to add (these extend the `resolved_control_events`) &mut event_map, &db.rooms, ) .expect("iterative auth check failed on resolved events"); - // TODO do we need to dedup them + // This removes the control events that failed auth, leaving the resolved + // to be mainline sorted let events_to_sort = event_map .keys() - .filter(|id| !sorted_power_events.contains(id)) + .filter(|id| { + !sorted_control_events.contains(id) + || resolved_control_events.values().any(|rid| *id == rid) + }) .cloned() .collect::>(); - let power_level = resolved_power.get(&(EventType::RoomPowerLevels, Some("".into()))); - + let power_level = + resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".into()))); + // Sort the remaining non control events let sorted_event_ids = state_res::StateResolution::mainline_sort( room_id, &events_to_sort, @@ -546,7 +552,22 @@ async fn join_room_by_id_helper( &db.rooms, ); - for ev_id in &sorted_event_ids { + let resolved_events = state_res::StateResolution::iterative_auth_check( + room_id, + &RoomVersionId::Version6, + &sorted_event_ids, + &resolved_control_events, + &mut event_map, + &db.rooms, + ) + .expect("iterative auth check failed on resolved events"); + + // filter the events that failed the auth check keeping the remaining events + // sorted correctly + for ev_id in sorted_event_ids + .iter() + .filter(|id| resolved_events.values().any(|rid| rid == *id)) + { // this is a `state_res::StateEvent` that holds a `ruma::Pdu` let pdu = event_map .get(ev_id) From df55e8ed0b130df3f9197be59196fc7b0590c5f8 Mon Sep 17 00:00:00 2001 From: Faelar Date: Thu, 6 Aug 2020 13:21:53 +0200 Subject: [PATCH 0274/1727] Add room upgrade. --- src/client_server/room.rs | 199 +++++++++++++++++++++++++++++++++++++- src/database.rs | 1 + src/database/rooms.rs | 128 ++++++++++++++++++++++++ src/main.rs | 1 + sytest/sytest-whitelist | 1 + 5 files changed, 327 insertions(+), 3 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index b5f1529..1b43873 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -3,15 +3,15 @@ use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::room::{self, create_room, get_room_event}, + r0::room::{self, create_room, get_room_event, upgrade_room}, }, events::{ room::{guest_access, history_visibility, join_rules, member, name, topic}, EventType, }, - RoomAliasId, RoomId, RoomVersionId, + Raw, RoomAliasId, RoomId, RoomVersionId, }; -use std::{collections::BTreeMap, convert::TryFrom}; +use std::{cmp::max, collections::BTreeMap, convert::TryFrom}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -344,3 +344,196 @@ pub fn get_room_event_route( } .into()) } + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_room_id>/upgrade", data = "") +)] +pub fn upgrade_room_route( + db: State<'_, Database>, + body: Ruma, + _room_id: String, +) -> ConduitResult { + let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + + // Validate the room version requested + let new_version = + RoomVersionId::try_from(body.new_version.clone()).expect("invalid room version id"); + + if !matches!( + new_version, + RoomVersionId::Version5 | RoomVersionId::Version6 + ) { + return Err(Error::BadRequest( + ErrorKind::UnsupportedRoomVersion, + "This server does not support that room version.", + )); + } + + // Create a replacement room + let replacement_room = RoomId::new(db.globals.server_name()); + + // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further + // Fail if the sender does not have the required permissions + let tombstone_event_id = db.rooms.append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomTombstone, + content: serde_json::to_value(ruma::events::room::tombstone::TombstoneEventContent { + body: "This room has been replaced".to_string(), + replacement_room: replacement_room.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // Get the old room federations status + let federate = serde_json::from_value::>( + db.rooms + .room_state_get(&body.room_id, &EventType::RoomCreate, "")? + .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .content, + ) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid room event in database."))? + .federate; + + // Use the m.room.tombstone event as the predecessor + let predecessor = Some(ruma::events::room::create::PreviousRoom::new( + body.room_id.clone(), + tombstone_event_id, + )); + + // Send a m.room.create event containing a predecessor field and the applicable room_version + let mut create_event_content = + ruma::events::room::create::CreateEventContent::new(sender_id.clone()); + create_event_content.federate = federate; + create_event_content.room_version = new_version; + create_event_content.predecessor = predecessor; + + db.rooms.append_pdu( + PduBuilder { + room_id: replacement_room.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomCreate, + content: serde_json::to_value(create_event_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // Join the new room + db.rooms.append_pdu( + PduBuilder { + room_id: replacement_room.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + + // Recommended transferable state events list from the specs + let transferable_state_events = vec![ + EventType::RoomServerAcl, + EventType::RoomEncryption, + EventType::RoomName, + EventType::RoomAvatar, + EventType::RoomTopic, + EventType::RoomGuestAccess, + EventType::RoomHistoryVisibility, + EventType::RoomJoinRules, + EventType::RoomPowerLevels, + ]; + + // Replicate transferable state events to the new room + for event_type in transferable_state_events { + let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? { + Some(v) => v.content.clone(), + None => continue, // Skipping missing events. + }; + + db.rooms.append_pdu( + PduBuilder { + room_id: replacement_room.clone(), + sender: sender_id.clone(), + event_type, + content: event_content, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + )?; + } + + // Moves any local aliases to the new room + for alias in db.rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) { + db.rooms + .set_alias(&alias, Some(&replacement_room), &db.globals)?; + } + + // Get the old room power levels + let mut power_levels_event_content = + serde_json::from_value::>( + db.rooms + .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? + .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .content, + ) + .expect("database contains invalid PDU") + .deserialize() + .map_err(|_| Error::bad_database("Invalid room event in database."))?; + + // Setting events_default and invite to the greater of 50 and users_default + 1 + let new_level = max( + 50.into(), + power_levels_event_content.users_default + 1.into(), + ); + power_levels_event_content.events_default = new_level; + power_levels_event_content.invite = new_level; + + // Modify the power levels in the old room to prevent sending of events and inviting new users + db.rooms + .append_pdu( + PduBuilder { + room_id: body.room_id.clone(), + sender: sender_id.clone(), + event_type: EventType::RoomPowerLevels, + content: serde_json::to_value(power_levels_event_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &db.globals, + &db.account_data, + ) + .ok(); + + // Return the replacement room id + Ok(upgrade_room::Response { replacement_room }.into()) +} diff --git a/src/database.rs b/src/database.rs index b43cc5b..2bb75a5 100644 --- a/src/database.rs +++ b/src/database.rs @@ -112,6 +112,7 @@ impl Database { userroomid_joined: db.open_tree("userroomid_joined")?, roomuserid_joined: db.open_tree("roomuserid_joined")?, + roomuseroncejoinedids: db.open_tree("roomuseroncejoinedids")?, userroomid_invited: db.open_tree("userroomid_invited")?, roomuserid_invited: db.open_tree("roomuserid_invited")?, userroomid_left: db.open_tree("userroomid_left")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 8cfb612..eee47f3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -38,6 +38,7 @@ pub struct Rooms { pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, + pub(super) roomuseroncejoinedids: sled::Tree, pub(super) userroomid_invited: sled::Tree, pub(super) roomuserid_invited: sled::Tree, pub(super) userroomid_left: sled::Tree, @@ -782,6 +783,104 @@ impl Rooms { match &membership { member::MembershipState::Join => { + // Check if the user never joined this room + if !self.once_joined(&user_id, &room_id)? { + // Add the user ID to the join list then + self.roomuseroncejoinedids.insert(&userroom_id, &[])?; + + // Check if the room has a predecessor + if let Some(predecessor) = serde_json::from_value::< + Raw, + >( + self.room_state_get(&room_id, &EventType::RoomCreate, "")? + .ok_or_else(|| { + Error::bad_database("Found room without m.room.create event.") + })? + .content, + ) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid room event in database."))? + .predecessor + { + // Copy user settings from predecessor to the current room: + + // - Push rules + // + // TODO: finish this once push rules are implemented. + // + // let mut push_rules_event_content = account_data + // .get::( + // None, + // user_id, + // EventType::PushRules, + // )?; + // + // NOTE: find where `predecessor.room_id` match + // and update to `room_id`. + // + // account_data + // .update( + // None, + // user_id, + // EventType::PushRules, + // &push_rules_event_content, + // globals, + // ) + // .ok(); + + // - Tags + if let Some(basic_event) = account_data.get::( + Some(&predecessor.room_id), + user_id, + EventType::Tag, + )? { + let tag_event_content = basic_event.content; + + account_data + .update( + Some(room_id), + user_id, + EventType::Tag, + &tag_event_content, + globals, + ) + .ok(); + }; + + // - Direct chat + if let Some(basic_event) = account_data + .get::( + None, + user_id, + EventType::Direct, + )? + { + let mut direct_event_content = basic_event.content; + let mut room_ids_updated = false; + + for room_ids in direct_event_content.0.values_mut() { + if room_ids.iter().any(|r| r == &predecessor.room_id) { + room_ids.push(room_id.clone()); + room_ids_updated = true; + } + } + + if room_ids_updated { + account_data + .update( + None, + user_id, + EventType::Direct, + &direct_event_content, + globals, + ) + .ok(); + } + }; + } + } + self.userroomid_joined.insert(&userroom_id, &[])?; self.roomuserid_joined.insert(&roomuser_id, &[])?; self.userroomid_invited.remove(&userroom_id)?; @@ -1042,6 +1141,27 @@ impl Rooms { }) } + /// Returns an iterator over all User IDs who ever joined a room. + pub fn room_useroncejoined(&self, room_id: &RoomId) -> impl Iterator> { + self.roomuseroncejoinedids + .scan_prefix(room_id.to_string()) + .keys() + .map(|key| { + Ok(UserId::try_from( + utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in room_useroncejoined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid."))?) + }) + } + /// Returns an iterator over all invited members of a room. pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator> { self.roomuserid_invited @@ -1126,6 +1246,14 @@ impl Rooms { }) } + pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + + Ok(self.roomuseroncejoinedids.get(userroom_id)?.is_some()) + } + pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.to_string().as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/main.rs b/src/main.rs index 96d0e99..eb060e3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -118,6 +118,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_key_changes_route, client_server::get_pushers_route, client_server::set_pushers_route, + client_server::upgrade_room_route, server_server::well_known_server, server_server::get_server_version, server_server::get_server_keys, diff --git a/sytest/sytest-whitelist b/sytest/sytest-whitelist index 1585233..e1f4e5c 100644 --- a/sytest/sytest-whitelist +++ b/sytest/sytest-whitelist @@ -89,6 +89,7 @@ POST /rooms/:room_id/join can join a room POST /rooms/:room_id/leave can leave a room POST /rooms/:room_id/state/m.room.name sets name POST /rooms/:room_id/state/m.room.topic sets topic +POST /rooms/:room_id/upgrade can upgrade a room version POSTed media can be thumbnailed PUT /device/{deviceId} gives a 404 for unknown devices PUT /device/{deviceId} updates device fields From 515465f9004a285cb61ea093ea88ec55cf5bec8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 31 Aug 2020 13:23:39 +0200 Subject: [PATCH 0275/1727] fix: make element not show "unknown user" warning The 404 error for /profile in the spec says "There is no profile information for this user or this user does not exist.", but Element assumes every 404 is a user that does not exist. --- src/client_server/profile.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 1313db7..ebcc7eb 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -217,11 +217,8 @@ pub fn get_profile_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let avatar_url = db.users.avatar_url(&body.user_id)?; - let displayname = db.users.displayname(&body.user_id)?; - - if avatar_url.is_none() && displayname.is_none() { - // Return 404 if we don't have a profile for this id + if !db.users.exists(&body.user_id)? { + // Return 404 if this user doesn't exist return Err(Error::BadRequest( ErrorKind::NotFound, "Profile was not found.", @@ -229,8 +226,8 @@ pub fn get_profile_route( } Ok(get_profile::Response { - avatar_url, - displayname, + avatar_url: db.users.avatar_url(&body.user_id)?, + displayname: db.users.displayname(&body.user_id)?, } .into()) } From 698e44a73237c6db520786c6248df20543aae47f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 1 Sep 2020 13:07:32 +0200 Subject: [PATCH 0276/1727] Fix /upgrade account data problems --- src/database/rooms.rs | 47 ++++++++++++++++--------------------------- 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index eee47f3..22e61e6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -804,7 +804,6 @@ impl Rooms { .predecessor { // Copy user settings from predecessor to the current room: - // - Push rules // // TODO: finish this once push rules are implemented. @@ -829,37 +828,27 @@ impl Rooms { // ) // .ok(); - // - Tags - if let Some(basic_event) = account_data.get::( + // Copy old tags to new room + if let Some(tag_event) = account_data.get::( Some(&predecessor.room_id), user_id, EventType::Tag, )? { - let tag_event_content = basic_event.content; - account_data - .update( - Some(room_id), - user_id, - EventType::Tag, - &tag_event_content, - globals, - ) + .update(Some(room_id), user_id, EventType::Tag, &tag_event, globals) .ok(); }; - // - Direct chat - if let Some(basic_event) = account_data + // Copy direct chat flag + if let Some(mut direct_event) = account_data .get::( - None, - user_id, - EventType::Direct, - )? - { - let mut direct_event_content = basic_event.content; + None, + user_id, + EventType::Direct, + )? { let mut room_ids_updated = false; - for room_ids in direct_event_content.0.values_mut() { + for room_ids in direct_event.content.0.values_mut() { if room_ids.iter().any(|r| r == &predecessor.room_id) { room_ids.push(room_id.clone()); room_ids_updated = true; @@ -867,15 +856,13 @@ impl Rooms { } if room_ids_updated { - account_data - .update( - None, - user_id, - EventType::Direct, - &direct_event_content, - globals, - ) - .ok(); + account_data.update( + None, + user_id, + EventType::Direct, + &direct_event, + globals, + )?; } }; } From 1e8fbd8d50c1bf0fe80cb77b8df8b5a45c3698f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 8 Sep 2020 17:32:03 +0200 Subject: [PATCH 0277/1727] Update ruma version --- Cargo.lock | 18 +--------- Cargo.toml | 9 ++--- src/client_server/account.rs | 10 +++--- src/client_server/alias.rs | 8 ++--- src/client_server/backup.rs | 10 +++--- src/client_server/config.rs | 4 +-- src/client_server/context.rs | 2 +- src/client_server/device.rs | 8 ++--- src/client_server/directory.rs | 14 ++++---- src/client_server/filter.rs | 2 +- src/client_server/keys.rs | 18 +++++----- src/client_server/media.rs | 8 ++--- src/client_server/membership.rs | 32 +++++++++--------- src/client_server/message.rs | 8 ++--- src/client_server/presence.rs | 2 +- src/client_server/profile.rs | 10 +++--- src/client_server/read_marker.rs | 4 +-- src/client_server/redact.rs | 2 +- src/client_server/room.rs | 51 ++++++++++------------------- src/client_server/search.rs | 22 ++++++++----- src/client_server/session.rs | 2 +- src/client_server/state.rs | 6 ++-- src/client_server/sync.rs | 2 +- src/client_server/tag.rs | 6 ++-- src/client_server/to_device.rs | 2 +- src/client_server/typing.rs | 8 ++--- src/client_server/user_directory.rs | 2 +- src/database/media.rs | 4 +-- src/database/users.rs | 6 ++-- src/error.rs | 6 ++-- src/pdu.rs | 2 +- src/ruma_wrapper.rs | 22 ++++++++----- src/server_server.rs | 6 ++-- 33 files changed, 147 insertions(+), 169 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eebaddc..c88c578 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1631,7 +1631,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1647,7 +1646,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "http", "percent-encoding", @@ -1662,7 +1660,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1673,7 +1670,6 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "ruma-api", "ruma-common", @@ -1686,7 +1682,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "assign", "http", @@ -1705,9 +1700,9 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "js_int", + "ruma-api", "ruma-identifiers", "ruma-serde", "serde", @@ -1718,7 +1713,6 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "js_int", "ruma-common", @@ -1733,7 +1727,6 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1744,7 +1737,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "js_int", "ruma-api", @@ -1759,7 +1751,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1771,7 +1762,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "proc-macro2", "quote", @@ -1782,18 +1772,14 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ - "ruma-serde", "serde", - "serde_json", "strum", ] [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "form_urlencoded", "itoa", @@ -1805,7 +1791,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=aff914050eb297bd82b8aafb12158c88a9e480e1#aff914050eb297bd82b8aafb12158c88a9e480e1" dependencies = [ "base64", "ring", @@ -2070,7 +2055,6 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=spec-comp#394d26744a6586ccdc01838964bb27dab289eee5" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 15cee72..5d35433 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,8 +15,9 @@ edition = "2018" # TODO: This can become optional as soon as proper configs are supported #rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"] } # Used to handle requests rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } - -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "aff914050eb297bd82b8aafb12158c88a9e480e1" } # Used for matrix spec type definitions and helpers +#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "aff914050eb297bd82b8aafb12158c88a9e480e1" } # Used for matrix spec type definitions and helpers +#ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fixes" } # Used for matrix spec type definitions and helpers +ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } tokio = "0.2.22" # Used for long polling sled = "0.32.0" # Used for storing data permanently log = "0.4.8" # Used for emitting log entries @@ -31,8 +32,8 @@ reqwest = "0.10.6" # Used to send requests thiserror = "1.0.19" # Used for conduit::Error type image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to generate thumbnails for images base64 = "0.12.3" # Used to encode server public key -# state-res = { path = "../../state-res" } -state-res = { git = "https://github.com/ruma/state-res", version = "0.1.0", branch = "spec-comp" } +#state-res = { git = "https://github.com/ruma/state-res", version = "0.1.0", branch = "spec-comp" } +state-res = { path = "../state-res" } ring = "0.16.15" [features] diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 9e52f6d..cb77a15 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -33,7 +33,7 @@ const GUEST_NAME_LENGTH: usize = 10; )] pub fn get_register_available_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { // Validate user id let user_id = UserId::parse_with_server_name(body.username.clone(), db.globals.server_name()) @@ -75,7 +75,7 @@ pub fn get_register_available_route( )] pub fn register_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { if db.globals.registration_disabled() { return Err(Error::BadRequest( @@ -84,7 +84,7 @@ pub fn register_route( )); } - let is_guest = matches!(body.kind, Some(RegistrationKind::Guest)); + let is_guest = body.kind == RegistrationKind::Guest; let mut missing_username = false; @@ -223,7 +223,7 @@ pub fn register_route( )] pub fn change_password_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); @@ -305,7 +305,7 @@ pub fn whoami_route(body: Ruma) -> ConduitResult, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 669f558..1d30261 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -20,7 +20,7 @@ use rocket::{delete, get, put}; )] pub fn create_alias_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { if db.rooms.id_from_alias(&body.room_alias)?.is_some() { return Err(Error::Conflict("Alias already exists.")); @@ -38,7 +38,7 @@ pub fn create_alias_route( )] pub fn delete_alias_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { db.rooms.set_alias(&body.room_alias, None, &db.globals)?; @@ -51,7 +51,7 @@ pub fn delete_alias_route( )] pub async fn get_alias_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { get_alias_helper(&db, &body.room_alias).await } @@ -65,7 +65,7 @@ pub async fn get_alias_helper( &db, room_alias.server_name().to_string(), federation::query::get_room_information::v1::Request { - room_alias: room_alias.to_string(), + room_alias, }, ) .await?; diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index a104964..8966c01 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -33,7 +33,7 @@ pub fn create_backup_route( )] pub fn update_backup_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); db.key_backups @@ -75,7 +75,7 @@ pub fn get_latest_backup_route( )] pub fn get_backup_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let algorithm = db @@ -90,7 +90,7 @@ pub fn get_backup_route( algorithm, count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_id, &body.version)?, - version: body.version.clone(), + version: body.version.to_owned(), } .into()) } @@ -102,7 +102,7 @@ pub fn get_backup_route( )] pub fn add_backup_keys_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -132,7 +132,7 @@ pub fn add_backup_keys_route( )] pub fn get_backup_keys_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/config.rs b/src/client_server/config.rs index baa9381..515ad16 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -18,7 +18,7 @@ use rocket::{get, put}; )] pub fn set_global_account_data_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -49,7 +49,7 @@ pub fn set_global_account_data_route( )] pub fn get_global_account_data_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 7b1fad9..9593726 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -12,7 +12,7 @@ use rocket::get; )] pub fn get_context_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 89033f0..6352d0d 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -37,7 +37,7 @@ pub fn get_devices_route( )] pub fn get_device_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, _device_id: String, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -56,7 +56,7 @@ pub fn get_device_route( )] pub fn update_device_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, _device_id: String, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -80,7 +80,7 @@ pub fn update_device_route( )] pub fn delete_device_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, _device_id: String, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -127,7 +127,7 @@ pub fn delete_device_route( )] pub fn delete_devices_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 3b10686..34feb71 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -14,7 +14,7 @@ use ruma::{ }, federation, }, - directory::{Filter, PublicRoomsChunk, RoomNetwork}, + directory::{IncomingFilter, PublicRoomsChunk, IncomingRoomNetwork}, events::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, EventType, @@ -31,7 +31,7 @@ use rocket::{get, post, put}; )] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let Ruma { body: @@ -61,7 +61,7 @@ pub async fn get_public_rooms_filtered_route( )] pub async fn get_public_rooms_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let response = get_public_rooms_filtered_helper( &db, @@ -89,7 +89,7 @@ pub async fn get_public_rooms_route( )] pub async fn set_room_visibility_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { match body.visibility { room::Visibility::Public => db.rooms.set_public(&body.room_id, true)?, @@ -105,7 +105,7 @@ pub async fn set_room_visibility_route( )] pub async fn get_room_visibility_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { Ok(get_room_visibility::Response { visibility: if db.rooms.is_public_room(&body.room_id)? { @@ -122,8 +122,8 @@ pub async fn get_public_rooms_filtered_helper( server: Option<&str>, limit: Option, since: Option<&str>, - _filter: Option, - _network: Option, + _filter: Option, + _network: Option, ) -> ConduitResult { if let Some(other_server) = server .clone() diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 4322de3..4b1c3a0 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -7,7 +7,7 @@ use rocket::{get, post}; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] pub fn get_filter_route() -> ConduitResult { // TODO - Ok(get_filter::Response::new(filter::FilterDefinition { + Ok(get_filter::Response::new(filter::IncomingFilterDefinition { event_fields: None, event_format: None, account_data: None, diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 3311529..0e7b1ef 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -11,7 +11,7 @@ use ruma::{ uiaa::{AuthFlow, UiaaInfo}, }, }, - encryption::UnsignedDeviceInfo, + encryption::IncomingUnsignedDeviceInfo, }; use std::collections::{BTreeMap, HashSet}; @@ -24,7 +24,7 @@ use rocket::{get, post}; )] pub fn upload_keys_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); @@ -56,7 +56,7 @@ pub fn upload_keys_route( )] pub fn get_keys_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -78,9 +78,9 @@ pub fn get_keys_route( Error::bad_database("all_device_keys contained nonexistent device.") })?; - keys.unsigned = Some(UnsignedDeviceInfo { + keys.unsigned = IncomingUnsignedDeviceInfo { device_display_name: metadata.display_name, - }); + }; container.insert(device_id, keys); } @@ -97,9 +97,9 @@ pub fn get_keys_route( ), )?; - keys.unsigned = Some(UnsignedDeviceInfo { + keys.unsigned = IncomingUnsignedDeviceInfo { device_display_name: metadata.display_name, - }); + }; container.insert(device_id.clone(), keys); } @@ -167,7 +167,7 @@ pub fn claim_keys_route( )] pub fn upload_signing_keys_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); @@ -280,7 +280,7 @@ pub fn upload_signatures_route( )] pub fn get_key_changes_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 79c1f08..038012e 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -27,7 +27,7 @@ pub fn get_media_config_route( )] pub fn create_content_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let mxc = format!( "mxc://{}/{}", @@ -36,7 +36,7 @@ pub fn create_content_route( ); db.media.create( mxc.clone(), - body.filename.as_ref(), + &body.filename, &body.content_type, &body.file, )?; @@ -53,7 +53,7 @@ pub fn create_content_route( )] pub fn get_content_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, _server_name: String, _media_id: String, ) -> ConduitResult { @@ -85,7 +85,7 @@ pub fn get_content_route( )] pub fn get_content_thumbnail_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, _server_name: String, _media_id: String, ) -> ConduitResult { diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 6d1931b..606e470 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -32,7 +32,7 @@ use rocket::{get, post}; )] pub async fn join_room_by_id_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { join_room_by_id_helper( &db, @@ -49,7 +49,7 @@ pub async fn join_room_by_id_route( )] pub async fn join_room_by_id_or_alias_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => room_id, @@ -81,7 +81,7 @@ pub async fn join_room_by_id_or_alias_route( )] pub fn leave_room_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -127,11 +127,11 @@ pub fn leave_room_route( )] pub fn invite_user_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if let invite_user::InvitationRecipient::UserId { user_id } = &body.recipient { + if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { db.rooms.build_and_append_pdu( PduBuilder { room_id: body.room_id.clone(), @@ -165,7 +165,7 @@ pub fn invite_user_route( )] pub fn kick_user_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -212,7 +212,7 @@ pub fn kick_user_route( )] pub fn ban_user_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -267,7 +267,7 @@ pub fn ban_user_route( )] pub fn unban_user_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -313,7 +313,7 @@ pub fn unban_user_route( )] pub fn forget_room_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -348,7 +348,7 @@ pub fn joined_rooms_route( )] pub fn get_member_events_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -376,7 +376,7 @@ pub fn get_member_events_route( )] pub fn joined_members_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -422,9 +422,9 @@ async fn join_room_by_id_helper( &db, room_id.server_name().to_string(), federation::membership::create_join_event_template::v1::Request { - room_id: room_id.clone(), - user_id: sender_id.clone(), - ver: vec![RoomVersionId::Version5, RoomVersionId::Version6], + room_id, + user_id: sender_id, + ver: &[RoomVersionId::Version5, RoomVersionId::Version6], }, ) .await?; @@ -474,8 +474,8 @@ async fn join_room_by_id_helper( &db, room_id.server_name().to_string(), federation::membership::create_join_event::v1::Request { - room_id: room_id.clone(), - event_id, + room_id, + event_id: &event_id, pdu_stub: serde_json::from_value(join_event_stub_value) .expect("we just created this event"), }, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 03832d8..09c3517 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -18,7 +18,7 @@ use rocket::{get, put}; )] pub fn send_message_event_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -53,7 +53,7 @@ pub fn send_message_event_route( )] pub fn get_message_events_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -96,7 +96,7 @@ pub fn get_message_events_route( .collect::>(); let mut resp = get_message_events::Response::new(); - resp.start = Some(body.from.clone()); + resp.start = Some(body.from.to_owned()); resp.end = end_token; resp.chunk = events_after; resp.state = Vec::new(); @@ -120,7 +120,7 @@ pub fn get_message_events_route( .collect::>(); let mut resp = get_message_events::Response::new(); - resp.start = Some(body.from.clone()); + resp.start = Some(body.from.to_owned()); resp.end = start_token; resp.chunk = events_before; resp.state = Vec::new(); diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 0b6a51f..d105eb6 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -12,7 +12,7 @@ use rocket::put; )] pub fn set_presence_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 0707b34..386d898 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -21,7 +21,7 @@ use std::convert::TryInto; )] pub fn set_displayname_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -98,7 +98,7 @@ pub fn set_displayname_route( )] pub fn get_displayname_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { Ok(get_display_name::Response { displayname: db.users.displayname(&body.user_id)?, @@ -112,7 +112,7 @@ pub fn get_displayname_route( )] pub fn set_avatar_url_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -201,7 +201,7 @@ pub fn set_avatar_url_route( )] pub fn get_avatar_url_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { Ok(get_avatar_url::Response { avatar_url: db.users.avatar_url(&body.user_id)?, @@ -215,7 +215,7 @@ pub fn get_avatar_url_route( )] pub fn get_profile_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let avatar_url = db.users.avatar_url(&body.user_id)?; let displayname = db.users.displayname(&body.user_id)?; diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index ff72765..023eece 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -15,7 +15,7 @@ use std::{collections::BTreeMap, time::SystemTime}; )] pub fn set_read_marker_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -52,7 +52,7 @@ pub fn set_read_marker_route( ); let mut receipt_content = BTreeMap::new(); receipt_content.insert( - event.clone(), + event.to_owned(), ruma::events::receipt::Receipts { read: Some(user_receipts), }, diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 8708692..cd1b443 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -14,7 +14,7 @@ use rocket::put; )] pub fn redact_event_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 3ee21b6..9918123 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,7 +22,7 @@ use rocket::{get, post}; )] pub fn create_room_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -48,11 +48,8 @@ pub fn create_room_route( })?; let mut content = ruma::events::room::create::CreateEventContent::new(sender_id.clone()); - content.federate = body.creation_content.as_ref().map_or(true, |c| c.federate); - content.predecessor = body - .creation_content - .as_ref() - .and_then(|c| c.predecessor.clone()); + content.federate = body.creation_content.federate; + content.predecessor = body.creation_content.predecessor.clone(); content.room_version = RoomVersionId::Version6; // 1. The room create event @@ -80,7 +77,7 @@ pub fn create_room_route( membership: member::MembershipState::Join, displayname: db.users.displayname(&sender_id)?, avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: body.is_direct, + is_direct: Some(body.is_direct), third_party_invite: None, }) .expect("event is valid, we just created it"), @@ -137,8 +134,7 @@ pub fn create_room_route( // 4. Events set by preset // Figure out preset. We need it for preset specific events - let visibility = body.visibility.unwrap_or(room::Visibility::Private); - let preset = body.preset.unwrap_or_else(|| match visibility { + let preset = body.preset.unwrap_or_else(|| match body.visibility { room::Visibility::Private => create_room::RoomPreset::PrivateChat, room::Visibility::Public => create_room::RoomPreset::PublicChat, }); @@ -213,32 +209,19 @@ pub fn create_room_route( )?; // 5. Events listed in initial_state - for create_room::InitialStateEvent { - event_type, - state_key, - content, - } in &body.initial_state - { + for event in &body.initial_state { + let pdu_builder = serde_json::from_str::( + &serde_json::to_string(&event).expect("AnyInitialStateEvent::to_string always works"), + ).map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event."))?; + // Silently skip encryption events if they are not allowed - if event_type == &EventType::RoomEncryption && db.globals.encryption_disabled() { + if pdu_builder.event_type == EventType::RoomEncryption && db.globals.encryption_disabled() + { continue; } - db.rooms.build_and_append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: event_type.clone(), - content: serde_json::from_str(content.get()).map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid initial_state content.") - })?, - unsigned: None, - state_key: state_key.clone(), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; + db.rooms + .build_and_append_pdu(pdu_builder, &db.globals, &db.account_data)?; } // 6. Events implied by name and topic @@ -293,7 +276,7 @@ pub fn create_room_route( membership: member::MembershipState::Invite, displayname: db.users.displayname(&user)?, avatar_url: db.users.avatar_url(&user)?, - is_direct: body.is_direct, + is_direct: Some(body.is_direct), third_party_invite: None, }) .expect("event is valid, we just created it"), @@ -311,7 +294,7 @@ pub fn create_room_route( db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; } - if let Some(room::Visibility::Public) = body.visibility { + if body.visibility == room::Visibility::Public { db.rooms.set_public(&room_id, true)?; } @@ -324,7 +307,7 @@ pub fn create_room_route( )] pub fn get_room_event_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 082711d..2967e00 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,11 +1,10 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; -use js_int::uint; use ruma::api::client::{error::ErrorKind, r0::search::search_events}; #[cfg(feature = "conduit_bin")] use rocket::post; -use search_events::{ResultCategories, ResultRoomEvents, SearchResult}; +use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}; use std::collections::BTreeMap; #[cfg_attr( @@ -14,7 +13,7 @@ use std::collections::BTreeMap; )] pub fn search_events_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -51,13 +50,18 @@ pub fn search_events_route( .0 .map(|result| { Ok::<_, Error>(SearchResult { - context: None, + context: EventContextResult { + end: None, + events_after: Vec::new(), + events_before: Vec::new(), + profile_info: BTreeMap::new(), + start: None, + }, rank: None, result: db .rooms .get_pdu_from_id(&result)? - // TODO this is an awkward type conversion see method - .map(|pdu| pdu.to_any_event()), + .map(|pdu| pdu.to_room_event()), }) }) .filter_map(|r| r.ok()) @@ -72,14 +76,14 @@ pub fn search_events_route( }; Ok(search_events::Response::new(ResultCategories { - room_events: Some(ResultRoomEvents { - count: uint!(0), // TODO + room_events: ResultRoomEvents { + count: None, // TODO? maybe not groups: BTreeMap::new(), // TODO next_batch, results, state: BTreeMap::new(), // TODO highlights: search.1, - }), + }, }) .into()) } diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 948b455..9cd051c 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -36,7 +36,7 @@ pub fn get_login_types_route() -> ConduitResult { )] pub fn login_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { // Validate login method let user_id = diff --git a/src/client_server/state.rs b/src/client_server/state.rs index e7d2bcf..75463cb 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -21,7 +21,7 @@ use rocket::{get, put}; )] pub fn send_state_event_for_key_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -40,7 +40,7 @@ pub fn send_state_event_for_key_route( &body.content, content, &body.room_id, - Some(body.state_key.clone()), + Some(body.state_key.to_owned()), )?) .into(), ) @@ -52,7 +52,7 @@ pub fn send_state_event_for_key_route( )] pub fn send_state_event_for_empty_key_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { // This just calls send_state_event_for_key_route let Ruma { diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 7432960..167ee75 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -31,7 +31,7 @@ use std::{ )] pub async fn sync_events_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); let device_id = body.device_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 99ee6e3..d04dd3a 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -15,7 +15,7 @@ use rocket::{delete, get, put}; )] pub fn update_tag_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -49,7 +49,7 @@ pub fn update_tag_route( )] pub fn delete_tag_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); @@ -80,7 +80,7 @@ pub fn delete_tag_route( )] pub fn get_tags_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index ca423fe..fe74101 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -14,7 +14,7 @@ use rocket::put; )] pub fn send_event_to_device_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 7eba13e..b15121c 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,5 +1,6 @@ use super::State; use crate::{utils, ConduitResult, Database, Ruma}; +use create_typing_event::Typing; use ruma::api::client::r0::typing::create_typing_event; #[cfg(feature = "conduit_bin")] @@ -11,16 +12,15 @@ use rocket::put; )] pub fn create_typing_event_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if body.typing { + if let Typing::Yes(duration) = body.state { db.rooms.edus.roomactive_add( &sender_id, &body.room_id, - body.timeout.map(|d| d.as_millis() as u64).unwrap_or(30000) - + utils::millis_since_unix_epoch(), + duration.as_millis() as u64 + utils::millis_since_unix_epoch(), &db.globals, )?; } else { diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index f47643c..dcf48fe 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -11,7 +11,7 @@ use rocket::post; )] pub fn search_users_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let limit = u64::from(body.limit) as usize; diff --git a/src/database/media.rs b/src/database/media.rs index e9dcb4a..63fa11c 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -16,7 +16,7 @@ impl Media { pub fn create( &self, mxc: String, - filename: Option<&String>, + filename: &Option, content_type: &str, file: &[u8], ) -> Result<()> { @@ -25,7 +25,7 @@ impl Media { key.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail key.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail key.push(0xff); - key.extend_from_slice(filename.map(|f| f.as_bytes()).unwrap_or_default()); + key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default()); key.push(0xff); key.extend_from_slice(content_type.as_bytes()); diff --git a/src/database/users.rs b/src/database/users.rs index 1b6a681..10e1ef3 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,7 +8,7 @@ use ruma::{ keys::{CrossSigningKey, OneTimeKey}, }, }, - encryption::DeviceKeys, + encryption::IncomingDeviceKeys, events::{AnyToDeviceEvent, EventType}, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, Raw, UserId, }; @@ -395,7 +395,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - device_keys: &DeviceKeys, + device_keys: &IncomingDeviceKeys, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { @@ -625,7 +625,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { + ) -> Result> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.as_bytes()); diff --git a/src/error.rs b/src/error.rs index 623aa0e..f521da4 100644 --- a/src/error.rs +++ b/src/error.rs @@ -70,14 +70,14 @@ where use ErrorKind::*; let (kind, status_code) = match self { Self::BadRequest(kind, _) => ( - kind, + kind.clone(), match kind { Forbidden | GuestAccessForbidden | ThreepidAuthFailed | ThreepidDenied => { StatusCode::FORBIDDEN } - Unauthorized | UnknownToken | MissingToken => StatusCode::UNAUTHORIZED, + Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED, NotFound => StatusCode::NOT_FOUND, - LimitExceeded => StatusCode::TOO_MANY_REQUESTS, + LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS, UserDeactivated => StatusCode::FORBIDDEN, TooLarge => StatusCode::PAYLOAD_TOO_LARGE, _ => StatusCode::BAD_REQUEST, diff --git a/src/pdu.rs b/src/pdu.rs index b565a24..1526484 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -255,7 +255,7 @@ impl PduEvent { } /// Build the start of a PDU in order to add it to the `Database`. -#[derive(Debug)] +#[derive(Debug, Deserialize)] pub struct PduBuilder { pub room_id: RoomId, pub sender: UserId, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 80e6e58..734d214 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,9 +1,9 @@ use crate::Error; use ruma::{ - api::IncomingRequest, + api::{Outgoing, OutgoingRequest}, identifiers::{DeviceId, UserId}, }; -use std::{convert::TryInto, ops::Deref}; +use std::{convert::TryFrom, convert::TryInto, ops::Deref}; #[cfg(feature = "conduit_bin")] use { @@ -24,15 +24,21 @@ use { /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { - pub body: T, +pub struct Ruma { + pub body: T::Incoming, pub sender_id: Option, pub device_id: Option>, pub json_body: Option>, // This is None when body is not a valid string } #[cfg(feature = "conduit_bin")] -impl<'a, T: IncomingRequest> FromTransformedData<'a> for Ruma { +impl<'a, T: Outgoing + OutgoingRequest> FromTransformedData<'a> for Ruma +where + ::Incoming: TryFrom>> + std::fmt::Debug, + <::Incoming as std::convert::TryFrom< + http::request::Request>, + >>::Error: std::fmt::Debug, +{ type Error = (); // TODO: Better error handling type Owned = Data; type Borrowed = Self::Owned; @@ -93,7 +99,7 @@ impl<'a, T: IncomingRequest> FromTransformedData<'a> for Ruma { let http_request = http_request.body(body.clone()).unwrap(); log::info!("{:?}", http_request); - match T::try_from(http_request) { + match ::Incoming::try_from(http_request) { Ok(t) => Success(Ruma { body: t, sender_id: user_id, @@ -112,8 +118,8 @@ impl<'a, T: IncomingRequest> FromTransformedData<'a> for Ruma { } } -impl Deref for Ruma { - type Target = T; +impl Deref for Ruma { + type Target = T::Incoming; fn deref(&self) -> &Self::Target { &self.body diff --git a/src/server_server.rs b/src/server_server.rs index ac4407b..d39abe6 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -207,7 +207,7 @@ pub fn get_server_keys_deprecated(db: State<'_, Database>) -> Json { )] pub async fn get_public_rooms_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let Ruma { body: @@ -230,7 +230,7 @@ pub async fn get_public_rooms_route( limit, since.as_deref(), None, - Some(ruma::directory::RoomNetwork::Matrix), + Some(ruma::directory::IncomingRoomNetwork::Matrix), ) .await? .0; @@ -264,7 +264,7 @@ pub async fn get_public_rooms_route( )] pub fn send_transaction_message_route<'a>( _db: State<'a, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { dbg!(&*body); Ok(send_transaction_message::v1::Response { From 12a8c9badd202f3019cab71a1f3a3134c8fb75ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 12 Sep 2020 21:30:07 +0200 Subject: [PATCH 0278/1727] fix: join rooms over federation --- Cargo.lock | 140 +++++++------ Cargo.toml | 8 +- src/client_server/account.rs | 4 +- src/client_server/alias.rs | 4 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 8 +- src/client_server/membership.rs | 106 +++++----- src/client_server/message.rs | 5 +- src/client_server/profile.rs | 8 +- src/client_server/redact.rs | 4 +- src/client_server/room.rs | 51 ++--- src/client_server/search.rs | 2 +- src/client_server/state.rs | 4 +- src/database.rs | 1 - src/database/rooms.rs | 340 ++++++++++++++------------------ src/pdu.rs | 112 +++++------ src/server_server.rs | 1 + 17 files changed, 395 insertions(+), 405 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c88c578..dc215c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -116,9 +116,9 @@ checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" [[package]] name = "async-trait" -version = "0.1.38" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e1a4a2f97ce50c9d0282c1468816208588441492b40d813b2e0419c22c05e7f" +checksum = "687c230d85c0a52504709705fc8a53e4a692b83a2184f03dae73e38e1e93a783" dependencies = [ "proc-macro2", "quote", @@ -223,9 +223,9 @@ checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "bytemuck" -version = "1.3.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7a1029718df60331e557c9e83a55523c955e5dd2a7bfeffad6bbd50b538ae9" +checksum = "41aa2ec95ca3b5c54cf73c91acf06d24f4495d5f1b1c12506ae3483d646177ac" [[package]] name = "byteorder" @@ -301,6 +301,12 @@ dependencies = [ "tokio", ] +[[package]] +name = "const_fn" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce90df4c658c62f12d78f7508cf92f9173e5184a539c10bfe54a3107b3ffd0f2" + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -319,7 +325,7 @@ dependencies = [ "percent-encoding", "rand", "sha2", - "time 0.2.16", + "time 0.2.19", "version_check", ] @@ -660,9 +666,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ "cfg-if", "libc", @@ -721,12 +727,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.8.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" -dependencies = [ - "autocfg", -] +checksum = "00d63df3d41950fb462ed38308eea019113ad1508da725bbedcd0fa5a85ef5f7" [[package]] name = "heck" @@ -843,9 +846,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.8" +version = "0.23.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "543904170510c1b5fb65140485d84de4a57fddb2ed685481e9020ce3d2c9f64c" +checksum = "974e194911d1f7efe3cd8a8f9db3b767e43536327e899e8bc9a12ef5711b74d2" dependencies = [ "bytemuck", "byteorder", @@ -859,9 +862,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b45e59b16c76b11bf9738fd5d38879d3bd28ad292d7b313608becb17ae2df9" +checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" dependencies = [ "autocfg", "hashbrown", @@ -920,9 +923,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73" +checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8" dependencies = [ "wasm-bindgen", ] @@ -954,9 +957,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.76" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755456fae044e6fa1ebbbd1b3e902ae19e73097ed4ed87bb79934a867c007bc3" +checksum = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235" [[package]] name = "lock_api" @@ -1120,9 +1123,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853" dependencies = [ "cfg-if", "libc", @@ -1379,9 +1382,9 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.19" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" +checksum = "36e28516df94f3dd551a587da5357459d9b36d945a7c37c3557928c1c2ff2a2c" dependencies = [ "unicode-xid", ] @@ -1587,7 +1590,7 @@ dependencies = [ "rocket_codegen", "rocket_http", "state", - "time 0.2.16", + "time 0.2.19", "tokio", "toml", "version_check", @@ -1622,7 +1625,7 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.16", + "time 0.2.19", "tokio", "tokio-rustls", "unicode-xid", @@ -1631,6 +1634,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1646,6 +1650,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "http", "percent-encoding", @@ -1660,6 +1665,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1670,6 +1676,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "ruma-api", "ruma-common", @@ -1682,6 +1689,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "assign", "http", @@ -1700,6 +1708,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "js_int", "ruma-api", @@ -1713,6 +1722,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "js_int", "ruma-common", @@ -1727,6 +1737,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1737,6 +1748,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "js_int", "ruma-api", @@ -1751,6 +1763,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1762,6 +1775,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "proc-macro2", "quote", @@ -1772,6 +1786,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "serde", "strum", @@ -1780,6 +1795,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "form_urlencoded", "itoa", @@ -1791,6 +1807,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225" dependencies = [ "base64", "ring", @@ -1910,18 +1927,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54c9a88f2da7238af84b5101443f0c0d0a3bbdc455e34a5c9497b1903ed55d5" +checksum = "96fe57af81d28386a513cbc6858332abc6117cfdb5999647c6444b8f43a370a5" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "609feed1d0a73cc36a0182a840a9b37b4a82f0b1150369f0536a9e3f2a31dc48" +checksum = "f630a6370fd8e457873b4bd2ffdae75408bc291ba72be773772a4c2a065d9ae8" dependencies = [ "proc-macro2", "quote", @@ -2021,9 +2038,9 @@ checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252" [[package]] name = "socket2" -version = "0.3.12" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +checksum = "b1fa70dc5c8104ec096f4fe7ede7a221d35ae13dcd19ba1ad9a81d2cab9a1c44" dependencies = [ "cfg-if", "libc", @@ -2055,6 +2072,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" +source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#0081081604b051d412a2365b68357e064c33320c" dependencies = [ "itertools", "js_int", @@ -2139,15 +2157,15 @@ dependencies = [ [[package]] name = "subtle" -version = "2.2.3" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" +checksum = "343f3f510c2915908f155e94f17220b19ccfacf2a64a2a5d8004f2c3e311e7fd" [[package]] name = "syn" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891d8d6567fe7c7f8835a3a98af4208f3846fba258c1bc3c31d6e506239f11f9" +checksum = "963f7d3cc59b59b9325165add223142bbf1df27655d07789f109896d353d8350" dependencies = [ "proc-macro2", "quote", @@ -2210,11 +2228,11 @@ dependencies = [ [[package]] name = "time" -version = "0.2.16" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a51cadc5b1eec673a685ff7c33192ff7b7603d0b75446fb354939ee615acb15" +checksum = "80c1a1fd93112fc50b11c43a1def21f926be3c18884fad676ea879572da070a1" dependencies = [ - "cfg-if", + "const_fn", "libc", "standback", "stdweb", @@ -2288,9 +2306,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" +checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" dependencies = [ "futures-core", "rustls", @@ -2362,9 +2380,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f0e00789804e99b20f12bc7003ca416309d28a6f495d6af58d1e2c2842461b5" +checksum = "5bcf46c1f1f06aeea2d6b81f3c863d0930a596c86ad1920d4e5bad6dd1d7119a" dependencies = [ "lazy_static", ] @@ -2382,9 +2400,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6ccba2f8f16e0ed268fc765d9b7ff22e965e7185d32f8f1ec8294fe17d86e79" +checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" dependencies = [ "serde", "tracing-core", @@ -2392,9 +2410,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abd165311cc4d7a555ad11cc77a37756df836182db0d81aac908c8184c584f40" +checksum = "82bb5079aa76438620837198db8a5c529fb9878c730bc2b28179b0241cf04c10" dependencies = [ "ansi_term", "chrono", @@ -2525,9 +2543,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" +checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" dependencies = [ "cfg-if", "serde", @@ -2537,9 +2555,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0" +checksum = "f22b422e2a757c35a73774860af8e112bff612ce6cb604224e8e47641a9e4f68" dependencies = [ "bumpalo", "lazy_static", @@ -2552,9 +2570,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95f8d235a77f880bcef268d379810ea6c0af2eacfa90b1ad5af731776e0c4699" +checksum = "b7866cab0aa01de1edf8b5d7936938a7e397ee50ce24119aef3e1eaa3b6171da" dependencies = [ "cfg-if", "js-sys", @@ -2564,9 +2582,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2" +checksum = "6b13312a745c08c469f0b292dd2fcd6411dba5f7160f593da6ef69b64e407038" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2574,9 +2592,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556" +checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" dependencies = [ "proc-macro2", "quote", @@ -2587,15 +2605,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" +checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" [[package]] name = "web-sys" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda38f4e5ca63eda02c059d243aa25b5f35ab98451e518c51612cd0f1bd19a47" +checksum = "4bf6ef87ad7ae8008e15a355ce696bed26012b7caa21605188cfd8214ab51e2d" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 5d35433..1b7a700 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,8 +16,8 @@ edition = "2018" #rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"] } # Used to handle requests rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } #ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "aff914050eb297bd82b8aafb12158c88a9e480e1" } # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fixes" } # Used for matrix spec type definitions and helpers -ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fed-fixes" } # Used for matrix spec type definitions and helpers +#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } tokio = "0.2.22" # Used for long polling sled = "0.32.0" # Used for storing data permanently log = "0.4.8" # Used for emitting log entries @@ -32,8 +32,8 @@ reqwest = "0.10.6" # Used to send requests thiserror = "1.0.19" # Used for conduit::Error type image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to generate thumbnails for images base64 = "0.12.3" # Used to encode server public key -#state-res = { git = "https://github.com/ruma/state-res", version = "0.1.0", branch = "spec-comp" } -state-res = { path = "../state-res" } +state-res = { git = "https://github.com/timokoesters/state-res", branch = "spec-comp", features = ["unstable-pre-spec"] } +#state-res = { path = "../state-res", features = ["unstable-pre-spec"] } ring = "0.16.15" [features] diff --git a/src/client_server/account.rs b/src/client_server/account.rs index cb77a15..3db933c 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -356,14 +356,14 @@ pub fn deactivate_route( db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_id.to_string()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 1d30261..bfdaeca 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -64,9 +64,7 @@ pub async fn get_alias_helper( let response = server_server::send_request( &db, room_alias.server_name().to_string(), - federation::query::get_room_information::v1::Request { - room_alias, - }, + federation::query::get_room_information::v1::Request { room_alias }, ) .await?; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 34feb71..2764d2c 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -14,7 +14,7 @@ use ruma::{ }, federation, }, - directory::{IncomingFilter, PublicRoomsChunk, IncomingRoomNetwork}, + directory::{IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk}, events::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, EventType, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 038012e..d077447 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -34,12 +34,8 @@ pub fn create_content_route( db.globals.server_name(), utils::random_string(MXC_LENGTH) ); - db.media.create( - mxc.clone(), - &body.filename, - &body.content_type, - &body.file, - )?; + db.media + .create(mxc.clone(), &body.filename, &body.content_type, &body.file)?; Ok(create_content::Response { content_uri: mxc }.into()) } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 606e470..ea2271b 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,6 +4,7 @@ use crate::{ pdu::{PduBuilder, PduEvent}, server_server, utils, ConduitResult, Database, Error, Ruma, }; +use log::warn; use ruma::{ api::{ client::{ @@ -20,8 +21,7 @@ use ruma::{ EventId, Raw, RoomId, RoomVersionId, UserId, }; use state_res::StateEvent; - -use std::{collections::BTreeMap, convert::TryFrom}; +use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -106,14 +106,14 @@ pub fn leave_room_route( db.rooms.build_and_append_pdu( PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_id.to_string()), redacts: None, }, + &sender_id, + &body.room_id, &db.globals, &db.account_data, )?; @@ -134,8 +134,6 @@ pub fn invite_user_route( if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { db.rooms.build_and_append_pdu( PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Invite, @@ -149,6 +147,8 @@ pub fn invite_user_route( state_key: Some(user_id.to_string()), redacts: None, }, + &sender_id, + &body.room_id, &db.globals, &db.account_data, )?; @@ -191,14 +191,14 @@ pub fn kick_user_route( db.rooms.build_and_append_pdu( PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, }, + &sender_id, + &body.room_id, &db.globals, &db.account_data, )?; @@ -246,14 +246,14 @@ pub fn ban_user_route( db.rooms.build_and_append_pdu( PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, }, + &sender_id, + &body.room_id, &db.globals, &db.account_data, )?; @@ -292,14 +292,14 @@ pub fn unban_user_route( db.rooms.build_and_append_pdu( PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, }, + &sender_id, + &body.room_id, &db.globals, &db.account_data, )?; @@ -473,7 +473,7 @@ async fn join_room_by_id_helper( let send_join_response = server_server::send_request( &db, room_id.server_name().to_string(), - federation::membership::create_join_event::v1::Request { + federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, pdu_stub: serde_json::from_value(join_event_stub_value) @@ -482,25 +482,39 @@ async fn join_room_by_id_helper( ) .await?; - dbg!(&send_join_response); - let mut event_map = send_join_response .room_state .state .iter() .chain(send_join_response.room_state.auth_chain.iter()) .map(|pdu| { - pdu.deserialize() - .map(StateEvent::Full) - .map(|ev| (ev.event_id(), ev)) + let mut value = serde_json::from_str(pdu.json().get()) + .expect("converting raw jsons to values always works"); + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + value + .as_object_mut() + .ok_or_else(|| Error::BadServerResponse("PDU is not an object."))? + .insert("event_id".to_owned(), event_id.to_string().into()); + + serde_json::from_value::(value) + .map(|ev| (event_id, Arc::new(ev))) + .map_err(|e| { + warn!("{}", e); + Error::BadServerResponse("Invalid PDU bytes in send_join response.") + }) }) - .collect::, _>>() - .map_err(|_| Error::bad_database("Invalid PDU found in db."))?; + .collect::>, _>>()?; let control_events = event_map .values() .filter(|pdu| pdu.is_power_event()) - .map(|pdu| pdu.event_id()) + .map(|pdu| pdu.event_id().clone()) .collect::>(); // These events are not guaranteed to be sorted but they are resolved according to spec @@ -515,7 +529,9 @@ async fn join_room_by_id_helper( .room_state .auth_chain .iter() - .filter_map(|pdu| Some(StateEvent::Full(pdu.deserialize().ok()?).event_id())) + .filter_map(|pdu| { + Some(StateEvent::Full(pdu.deserialize().ok()?).event_id().clone()) + }) .collect::>(), ); @@ -575,31 +591,31 @@ async fn join_room_by_id_helper( // We do not rebuild the PDU in this case only insert to DB db.rooms - .append_pdu(PduEvent::try_from(pdu)?, &db.globals, &db.account_data)?; + .append_pdu(PduEvent::from(&**pdu), &db.globals, &db.account_data)?; } + } else { + let event = member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: None, + third_party_invite: None, + }; + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.account_data, + )?; } - let event = member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: None, - third_party_invite: None, - }; - - db.rooms.build_and_append_pdu( - PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &db.globals, - &db.account_data, - )?; - Ok(join_room_by_id::Response::new(room_id.clone()).into()) } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 09c3517..025331e 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -27,11 +27,10 @@ pub fn send_message_event_route( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), event_type: body.content.event_type().into(), content: serde_json::from_str( body.json_body + .as_ref() .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? .get(), ) @@ -40,6 +39,8 @@ pub fn send_message_event_route( state_key: None, redacts: None, }, + &sender_id, + &body.room_id, &db.globals, &db.account_data, )?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 386d898..2a2e05e 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -33,8 +33,6 @@ pub fn set_displayname_route( let room_id = room_id?; db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(ruma::events::room::member::MemberEventContent { displayname: body.displayname.clone(), @@ -62,6 +60,8 @@ pub fn set_displayname_route( state_key: Some(sender_id.to_string()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; @@ -136,8 +136,6 @@ pub fn set_avatar_url_route( let room_id = room_id?; db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(ruma::events::room::member::MemberEventContent { avatar_url: body.avatar_url.clone(), @@ -165,6 +163,8 @@ pub fn set_avatar_url_route( state_key: Some(sender_id.to_string()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index cd1b443..5117348 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -20,8 +20,6 @@ pub fn redact_event_route( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - room_id: body.room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomRedaction, content: serde_json::to_value(redaction::RedactionEventContent { reason: body.reason.clone(), @@ -31,6 +29,8 @@ pub fn redact_event_route( state_key: None, redacts: Some(body.event_id.clone()), }, + &sender_id, + &body.room_id, &db.globals, &db.account_data, )?; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 9918123..9a83f81 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -55,14 +55,14 @@ pub fn create_room_route( // 1. The room create event db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomCreate, content: serde_json::to_value(content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; @@ -70,8 +70,6 @@ pub fn create_room_route( // 2. Let the room creator join db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Join, @@ -85,6 +83,8 @@ pub fn create_room_route( state_key: Some(sender_id.to_string()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; @@ -119,14 +119,14 @@ pub fn create_room_route( }; db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomPowerLevels, content: power_levels_content, unsigned: None, state_key: Some("".to_owned()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; @@ -142,8 +142,6 @@ pub fn create_room_route( // 4.1 Join Rules db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomJoinRules, content: match preset { create_room::RoomPreset::PublicChat => serde_json::to_value( @@ -160,6 +158,8 @@ pub fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; @@ -167,8 +167,6 @@ pub fn create_room_route( // 4.2 History Visibility db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomHistoryVisibility, content: serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( history_visibility::HistoryVisibility::Shared, @@ -178,6 +176,8 @@ pub fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; @@ -185,8 +185,6 @@ pub fn create_room_route( // 4.3 Guest Access db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomGuestAccess, content: match preset { create_room::RoomPreset::PublicChat => { @@ -204,6 +202,8 @@ pub fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; @@ -212,24 +212,27 @@ pub fn create_room_route( for event in &body.initial_state { let pdu_builder = serde_json::from_str::( &serde_json::to_string(&event).expect("AnyInitialStateEvent::to_string always works"), - ).map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event."))?; + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event."))?; // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == EventType::RoomEncryption && db.globals.encryption_disabled() - { + if pdu_builder.event_type == EventType::RoomEncryption && db.globals.encryption_disabled() { continue; } - db.rooms - .build_and_append_pdu(pdu_builder, &db.globals, &db.account_data)?; + db.rooms.build_and_append_pdu( + pdu_builder, + &sender_id, + &room_id, + &db.globals, + &db.account_data, + )?; } // 6. Events implied by name and topic if let Some(name) = &body.name { db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomName, content: serde_json::to_value( name::NameEventContent::new(name.clone()).map_err(|_| { @@ -241,6 +244,8 @@ pub fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; @@ -249,8 +254,6 @@ pub fn create_room_route( if let Some(topic) = &body.topic { db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomTopic, content: serde_json::to_value(topic::TopicEventContent { topic: topic.clone(), @@ -260,6 +263,8 @@ pub fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; @@ -269,8 +274,6 @@ pub fn create_room_route( for user in &body.invite { db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Invite, @@ -284,6 +287,8 @@ pub fn create_room_route( state_key: Some(user.to_string()), redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 2967e00..3b03e7a 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -77,7 +77,7 @@ pub fn search_events_route( Ok(search_events::Response::new(ResultCategories { room_events: ResultRoomEvents { - count: None, // TODO? maybe not + count: None, // TODO? maybe not groups: BTreeMap::new(), // TODO next_batch, results, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 75463cb..1fe3cd6 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -213,14 +213,14 @@ pub fn send_state_event_for_key_helper( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: sender_id.clone(), event_type: content.event_type().into(), content: json, unsigned: None, state_key, redacts: None, }, + &sender_id, + &room_id, &db.globals, &db.account_data, )?; diff --git a/src/database.rs b/src/database.rs index a105058..0d18020 100644 --- a/src/database.rs +++ b/src/database.rs @@ -97,7 +97,6 @@ impl Database { }, pduid_pdu: db.open_tree("pduid_pdu")?, eventid_pduid: db.open_tree("eventid_pduid")?, - roomstateid_pduid: db.open_tree("roomstateid_pduid")?, roomid_pduleaves: db.open_tree("roomid_pduleaves")?, alias_roomid: db.open_tree("alias_roomid")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ee070b3..6033378 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -25,6 +25,7 @@ use std::{ collections::{BTreeMap, HashMap}, convert::{TryFrom, TryInto}, mem, + sync::Arc, }; /// The unique identifier of each state group. @@ -33,10 +34,6 @@ use std::{ /// hashing the entire state. pub type StateHashId = Vec; -/// This identifier consists of roomId + count. It represents a -/// unique event, it will never be overwritten or removed. -pub type PduId = IVec; - pub struct Rooms { pub edus: edus::RoomEdus, pub(super) pduid_pdu: sled::Tree, // PduId = RoomId + Count @@ -54,22 +51,22 @@ pub struct Rooms { pub(super) roomuserid_invited: sled::Tree, pub(super) userroomid_left: sled::Tree, - // STATE TREES - /// This holds the full current state, including the latest event. - pub(super) roomstateid_pduid: sled::Tree, // RoomStateId = Room + StateType + StateKey - /// This holds the full room state minus the latest event. - pub(super) pduid_statehash: sled::Tree, // PDU id -> StateHash - /// Also holds the full room state minus the latest event. - pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + (EventType, StateKey) - /// The room_id -> the latest StateHash + /// Remember the current state hash of a room. pub(super) roomid_statehash: sled::Tree, + /// Remember the state hash at events in the past. + pub(super) pduid_statehash: sled::Tree, + /// The state for a given state hash. + pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + EventType + StateKey } impl StateStore for Rooms { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result { + fn get_event( + &self, + room_id: &RoomId, + event_id: &EventId, + ) -> state_res::Result> { let pid = self - .eventid_pduid - .get(event_id.as_bytes()) + .get_pdu_id(event_id) .map_err(StateError::custom)? .ok_or_else(|| { StateError::NotFound("PDU via room_id and event_id not found in the db.".into()) @@ -87,7 +84,7 @@ impl StateStore for Rooms { // conduit's PDU's always contain a room_id but some // of ruma's do not so this must be an Option if pdu.room_id() == Some(room_id) { - Ok(pdu) + Ok(Arc::new(pdu)) } else { Err(StateError::NotFound( "Found PDU for incorrect room in db.".into(), @@ -136,53 +133,12 @@ impl Rooms { None } - /// Fetch the current State using the `roomstateid_pduid` tree. - pub fn current_state_pduids(&self, room_id: &RoomId) -> Result> { - // TODO this could also scan roomstateid_pduid if we passed in room_id ? - self.roomstateid_pduid - .scan_prefix(room_id.as_bytes()) - .values() - .map(|pduid| { - let pduid = &pduid?; - self.pduid_pdu.get(pduid)?.map_or_else( - || { - Err(Error::bad_database( - "Failed to find current state of pduid's.", - )) - }, - |b| { - Ok(( - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - pduid.clone(), - )) - }, - ) - }) - .map(|pair| { - let (pdu, id) = pair?; - Ok(((pdu.kind, pdu.state_key), id)) - }) - .collect::>>() - } - /// Returns the last state hash key added to the db. - pub fn current_state_hash(&self, room_id: &RoomId) -> Result { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - // We must check here because this method is called outside and before - // `append_state_pdu` so the DB can be empty - if self.pduid_statehash.scan_prefix(prefix).next().is_none() { - // return the hash of the room_id, this represents a room with no state - return self.new_state_hash_id(room_id); - } - - self.pduid_statehash - .iter() - .next_back() - .map(|pair| Ok(pair?.1.to_vec())) - .ok_or_else(|| Error::bad_database("No PDU's found for this room."))? + pub fn current_state_hash(&self, room_id: &RoomId) -> Result> { + Ok(self + .roomid_statehash + .get(room_id.as_bytes())? + .map(|bytes| bytes.to_vec())) } /// This fetches auth event_ids from the current state using the @@ -243,39 +199,11 @@ impl Rooms { /// Generate a new StateHash. /// - /// A unique hash made from hashing the current states pduid's. - fn new_state_hash_id(&self, room_id: &RoomId) -> Result { - // Use hashed roomId as the first StateHash key for first state event in room - if self - .pduid_statehash - .scan_prefix(room_id.as_bytes()) - .next() - .is_none() - { - return Ok(digest::digest(&digest::SHA256, room_id.as_bytes()) - .as_ref() - .to_vec()); - } - - let pdu_ids_to_hash = self - .pduid_statehash - .scan_prefix(room_id.as_bytes()) - .values() - .next_back() - .unwrap() // We just checked if the tree was empty - .map(|hash| { - self.stateid_pduid - .scan_prefix(hash) - .values() - // pduid is roomId + count so just hash the whole thing - .map(|pid| Ok(pid?.to_vec())) - .collect::>>>() - })??; - - let hash = digest::digest( - &digest::SHA256, - &pdu_ids_to_hash.into_iter().flatten().collect::>(), - ); + /// A unique hash made from hashing all PDU ids of the state joined with 0xff. + fn calculate_hash(&self, pdu_id_bytes: &[&[u8]]) -> Result { + // We only hash the pdu's event ids, not the whole pdu + let bytes = pdu_id_bytes.join(&0xff); + let hash = digest::digest(&digest::SHA256, &bytes); Ok(hash.as_ref().to_vec()) } @@ -297,29 +225,38 @@ impl Rooms { &self, room_id: &RoomId, ) -> Result> { - let mut hashmap = HashMap::new(); - for pdu in - self.roomstateid_pduid - .scan_prefix(&room_id.as_bytes()) + if let Some(current_state_hash) = self.current_state_hash(room_id)? { + let mut prefix = current_state_hash; + prefix.push(0xff); + + let mut hashmap = HashMap::new(); + for pdu in self + .stateid_pduid + .scan_prefix(prefix) .values() - .map(|value| { + .map(|pdu_id| { Ok::<_, Error>( serde_json::from_slice::( - &self.pduid_pdu.get(value?)?.ok_or_else(|| { - Error::bad_database("PDU not found for ID in db.") + &self.pduid_pdu.get(pdu_id?)?.ok_or_else(|| { + Error::bad_database("PDU in state not found in database.") })?, ) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + .map_err(|_| { + Error::bad_database("Invalid PDU bytes in current room state.") + })?, ) }) - { - let pdu = pdu?; - let state_key = pdu.state_key.clone().ok_or_else(|| { - Error::bad_database("Room state contains event without state_key.") - })?; - hashmap.insert((pdu.kind.clone(), state_key), pdu); + { + let pdu = pdu?; + let state_key = pdu.state_key.clone().ok_or_else(|| { + Error::bad_database("Room state contains event without state_key.") + })?; + hashmap.insert((pdu.kind.clone(), state_key), pdu); + } + Ok(hashmap) + } else { + Ok(HashMap::new()) } - Ok(hashmap) } /// Returns all state entries for this type. @@ -328,33 +265,40 @@ impl Rooms { room_id: &RoomId, event_type: &EventType, ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(&event_type.to_string().as_bytes()); + if let Some(current_state_hash) = self.current_state_hash(room_id)? { + let mut prefix = current_state_hash; + prefix.push(0xff); + prefix.extend_from_slice(&event_type.to_string().as_bytes()); + prefix.push(0xff); - let mut hashmap = HashMap::new(); - for pdu in - self.roomstateid_pduid + let mut hashmap = HashMap::new(); + for pdu in self + .stateid_pduid .scan_prefix(&prefix) .values() - .map(|value| { + .map(|pdu_id| { Ok::<_, Error>( serde_json::from_slice::( - &self.pduid_pdu.get(value?)?.ok_or_else(|| { - Error::bad_database("PDU not found for ID in db.") + &self.pduid_pdu.get(pdu_id?)?.ok_or_else(|| { + Error::bad_database("PDU in state not found in database.") })?, ) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + .map_err(|_| { + Error::bad_database("Invalid PDU bytes in current room state.") + })?, ) }) - { - let pdu = pdu?; - let state_key = pdu.state_key.clone().ok_or_else(|| { - Error::bad_database("Room state contains event without state_key.") - })?; - hashmap.insert(state_key, pdu); + { + let pdu = pdu?; + let state_key = pdu.state_key.clone().ok_or_else(|| { + Error::bad_database("Room state contains event without state_key.") + })?; + hashmap.insert(state_key, pdu); + } + Ok(hashmap) + } else { + Ok(HashMap::new()) } - Ok(hashmap) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -364,23 +308,24 @@ impl Rooms { event_type: &EventType, state_key: &str, ) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&event_type.to_string().as_bytes()); - key.push(0xff); - key.extend_from_slice(&state_key.as_bytes()); + if let Some(current_state_hash) = self.current_state_hash(room_id)? { + let mut key = current_state_hash; + key.push(0xff); + key.extend_from_slice(&event_type.to_string().as_bytes()); + key.push(0xff); + key.extend_from_slice(&state_key.as_bytes()); - self.roomstateid_pduid.get(&key)?.map_or(Ok(None), |value| { - Ok::<_, Error>(Some( - serde_json::from_slice::( - &self - .pduid_pdu - .get(value)? - .ok_or_else(|| Error::bad_database("PDU not found for ID in db."))?, - ) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + self.stateid_pduid.get(&key)?.map_or(Ok(None), |pdu_id| { + Ok::<_, Error>(Some( + serde_json::from_slice::(&self.pduid_pdu.get(pdu_id)?.ok_or_else( + || Error::bad_database("PDU in state not found in database."), + )?) + .map_err(|_| Error::bad_database("Invalid PDU bytes in current room state."))?, + )) + }) + } else { + Ok(None) + } } /// Returns the `count` of this pdu's id. @@ -528,8 +473,8 @@ impl Rooms { self.eventid_pduid .insert(pdu.event_id.as_bytes(), &*pdu_id)?; - if let Some(state_key) = &pdu.state_key { - self.append_state_pdu(&pdu.room_id, &pdu_id, state_key, &pdu.kind)?; + if pdu.state_key.is_some() { + self.append_to_state(&pdu_id, &pdu)?; } match &pdu.kind { @@ -603,59 +548,69 @@ impl Rooms { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `pduid_statehash`. /// The incoming event is the `pdu_id` passed to this method. - fn append_state_pdu( - &self, - room_id: &RoomId, - pdu_id: &[u8], - state_key: &str, - kind: &EventType, - ) -> Result { - let state_hash = self.new_state_hash_id(room_id)?; - let state = self.current_state_pduids(room_id)?; + fn append_to_state(&self, new_pdu_id: &[u8], new_pdu: &PduEvent) -> Result { + let old_state = + if let Some(old_state_hash) = self.roomid_statehash.get(new_pdu.room_id.as_bytes())? { + // Store state for event. The state does not include the event itself. + // Instead it's the state before the pdu, so the room's old state. + self.pduid_statehash.insert(new_pdu_id, &old_state_hash)?; + if new_pdu.state_key.is_none() { + return Ok(old_state_hash.to_vec()); + } - let mut key = state_hash.to_vec(); - key.push(0xff); + let mut prefix = old_state_hash.to_vec(); + prefix.push(0xff); + self.stateid_pduid + .scan_prefix(&prefix) + .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) + .map(|(k, v)| (k.subslice(prefix.len(), k.len() - prefix.len()), v)) + .collect::>() + } else { + HashMap::new() + }; - // TODO eventually we could avoid writing to the DB so much on every event - // by keeping track of the delta and write that every so often - for ((ev_ty, state_key), pid) in state { - let mut state_id = key.to_vec(); - state_id.extend_from_slice(ev_ty.to_string().as_bytes()); - key.push(0xff); - state_id.extend_from_slice(state_key.expect("state event").as_bytes()); + if let Some(state_key) = &new_pdu.state_key { + let mut new_state = old_state; + let mut pdu_key = new_pdu.kind.as_str().as_bytes().to_vec(); + pdu_key.push(0xff); + pdu_key.extend_from_slice(state_key.as_bytes()); + new_state.insert(pdu_key.into(), new_pdu_id.into()); + + let new_state_hash = + self.calculate_hash(&new_state.values().map(|b| &**b).collect::>())?; + + let mut key = new_state_hash.to_vec(); key.push(0xff); - self.stateid_pduid.insert(&state_id, &pid)?; + // TODO: we could avoid writing to the DB on every state event by keeping + // track of the delta and write that every so often + for (key_without_prefix, pdu_id) in new_state { + let mut state_id = key.clone(); + state_id.extend_from_slice(&key_without_prefix); + self.stateid_pduid.insert(&state_id, &pdu_id)?; + } + + self.roomid_statehash + .insert(new_pdu.room_id.as_bytes(), &*new_state_hash)?; + + Ok(new_state_hash) + } else { + Err(Error::bad_database( + "Tried to insert non-state event into room without a state.", + )) } - - // This event's state does not include the event itself. `current_state_pduids` - // uses `roomstateid_pduid` before the current event is inserted to the tree so the state - // will be everything up to but not including the incoming event. - self.pduid_statehash.insert(pdu_id, state_hash.as_slice())?; - - self.roomid_statehash - .insert(room_id.as_bytes(), state_hash.as_slice())?; - - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(kind.to_string().as_bytes()); - key.push(0xff); - key.extend_from_slice(state_key.as_bytes()); - self.roomstateid_pduid.insert(key, pdu_id)?; - - Ok(state_hash) } /// Creates a new persisted data unit and adds it to a room. pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, globals: &super::globals::Globals, account_data: &super::account_data::AccountData, ) -> Result { let PduBuilder { - room_id, - sender, event_type, content, unsigned, @@ -741,8 +696,7 @@ impl Rooms { ErrorKind::Unknown, "Membership can't be the first event", ))?)? - .map(|pdu| pdu.convert_for_state_res()) - .transpose()?; + .map(|pdu| pdu.convert_for_state_res()); event_auth::valid_membership_change( // TODO this is a bit of a hack but not sure how to have a type // declared in `state_res` crate easily convert to/from conduit::PduEvent @@ -753,11 +707,12 @@ impl Rooms { state_key: Some(state_key.to_owned()), sender: &sender, }, - prev_event.as_ref(), + prev_event, + None, &auth_events .iter() .map(|((ty, key), pdu)| { - Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res()?)) + Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res())) }) .collect::>>()?, ) @@ -812,9 +767,8 @@ impl Rooms { let mut pdu = PduEvent { event_id: EventId::try_from("$thiswillbefilledinlater").expect("we know this is valid"), - room_id, - sender, - origin: globals.server_name().to_owned(), + room_id: room_id.clone(), + sender: sender.clone(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), @@ -834,7 +788,7 @@ impl Rooms { hashes: ruma::events::pdu::EventHash { sha256: "aaa".to_owned(), }, - signatures: HashMap::new(), + signatures: BTreeMap::new(), }; // Generate event id @@ -1028,8 +982,6 @@ impl Rooms { self.build_and_append_pdu( PduBuilder { - room_id: room_id.clone(), - sender: user_id.clone(), event_type: EventType::RoomMember, content: serde_json::to_value(member_content) .expect("event is valid, we just created it"), @@ -1037,6 +989,8 @@ impl Rooms { state_key: Some(user_id.to_string()), redacts: None, }, + &user_id, + &room_id, globals, account_data, )?; diff --git a/src/pdu.rs b/src/pdu.rs index 1526484..f23e688 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -5,18 +5,17 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - EventId, Raw, RoomId, ServerName, UserId, + EventId, Raw, RoomId, ServerKeyId, ServerName, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{collections::HashMap, convert::TryFrom}; +use std::{collections::BTreeMap, convert::TryInto, sync::Arc, time::UNIX_EPOCH}; -#[derive(Deserialize, Serialize)] +#[derive(Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: EventId, pub room_id: RoomId, pub sender: UserId, - pub origin: Box, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: EventType, @@ -31,7 +30,7 @@ pub struct PduEvent { #[serde(default, skip_serializing_if = "serde_json::Map::is_empty")] pub unsigned: serde_json::Map, pub hashes: EventHash, - pub signatures: HashMap>, + pub signatures: BTreeMap, BTreeMap>, } impl PduEvent { @@ -199,66 +198,69 @@ impl PduEvent { } } -impl TryFrom<&state_res::StateEvent> for PduEvent { - type Error = Error; - fn try_from(pdu: &state_res::StateEvent) -> Result { - serde_json::from_value(json!({ - "event_id": pdu.event_id(), - "room_id": pdu.room_id(), - "sender": pdu.sender(), - "origin": pdu.origin(), - "origin_server_ts": pdu.origin_server_ts(), - "event_type": pdu.kind(), - "content": pdu.content(), - "state_key": pdu.state_key(), - "prev_events": pdu.prev_event_ids(), - "depth": pdu.depth(), - "auth_events": pdu.auth_events(), - "redacts": pdu.redacts(), - "unsigned": pdu.unsigned(), - "hashes": pdu.hashes(), - "signatures": pdu.signatures(), - })) - .map_err(|_| Error::bad_database("Failed to convert PDU to ruma::Pdu type.")) +impl From<&state_res::StateEvent> for PduEvent { + fn from(pdu: &state_res::StateEvent) -> Self { + Self { + event_id: pdu.event_id().clone(), + room_id: pdu.room_id().unwrap().clone(), + sender: pdu.sender().clone(), + origin_server_ts: (pdu + .origin_server_ts() + .duration_since(UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64) + .try_into() + .expect("time is valid"), + kind: pdu.kind(), + content: pdu.content().clone(), + state_key: pdu.state_key(), + prev_events: pdu.prev_event_ids(), + depth: pdu.depth().clone(), + auth_events: pdu.auth_events(), + redacts: pdu.redacts().cloned(), + unsigned: pdu.unsigned().clone().into_iter().collect(), + hashes: pdu.hashes().clone(), + signatures: pdu.signatures(), + } } } impl PduEvent { - pub fn convert_for_state_res(&self) -> Result { - serde_json::from_value(json!({ - "event_id": self.event_id, - "room_id": self.room_id, - "sender": self.sender, - "origin": self.origin, - "origin_server_ts": self.origin_server_ts, - "type": self.kind, - "content": self.content, - "state_key": self.state_key, - "prev_events": self.prev_events - .iter() - // TODO How do we create one of these - .map(|id| (id, EventHash { sha256: "hello".into() })) - .collect::>(), - "depth": self.depth, - "auth_events": self.auth_events - .iter() - // TODO How do we create one of these - .map(|id| (id, EventHash { sha256: "hello".into() })) - .collect::>(), - "redacts": self.redacts, - "unsigned": self.unsigned, - "hashes": self.hashes, - "signatures": self.signatures, - })) - .map_err(|_| Error::bad_database("Failed to convert PDU to ruma::Pdu type.")) + pub fn convert_for_state_res(&self) -> Arc { + Arc::new( + serde_json::from_value(json!({ + "event_id": self.event_id, + "room_id": self.room_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "type": self.kind, + "content": self.content, + "state_key": self.state_key, + "prev_events": self.prev_events + .iter() + // TODO How do we create one of these + .map(|id| (id, EventHash { sha256: "hello".into() })) + .collect::>(), + "depth": self.depth, + "auth_events": self.auth_events + .iter() + // TODO How do we create one of these + .map(|id| (id, EventHash { sha256: "hello".into() })) + .collect::>(), + "redacts": self.redacts, + "unsigned": self.unsigned, + "hashes": self.hashes, + "signatures": self.signatures, + })) + .expect("all conduit PDUs are state events"), + ) } } /// Build the start of a PDU in order to add it to the `Database`. #[derive(Debug, Deserialize)] pub struct PduBuilder { - pub room_id: RoomId, - pub sender: UserId, + #[serde(rename = "type")] pub event_type: EventType, pub content: serde_json::Value, pub unsigned: Option>, diff --git a/src/server_server.rs b/src/server_server.rs index d39abe6..ffa5c5b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -139,6 +139,7 @@ where .unwrap() .into_iter() .collect(); + Ok( T::IncomingResponse::try_from(http_response.body(body).unwrap()) .expect("TODO: error handle other server errors"), From 1f2843498863065ca2775bd0a02a37db3f8f0002 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 12 Sep 2020 22:41:33 +0200 Subject: [PATCH 0279/1727] feat: hacky transactions --- src/server_server.rs | 40 +++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ffa5c5b..d7d0e23 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,16 +1,19 @@ use crate::{client_server, ConduitResult, Database, Error, Result, Ruma}; use http::header::{HeaderValue, AUTHORIZATION}; use rocket::{get, post, put, response::content::Json, State}; -use ruma::api::{ - client, - federation::{ - directory::get_public_rooms, - discovery::{ - get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, +use ruma::{ + api::{ + client, + federation::{ + directory::get_public_rooms, + discovery::{ + get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, + }, + transactions::send_transaction_message, }, - transactions::send_transaction_message, + OutgoingRequest, }, - OutgoingRequest, + EventId, }; use serde_json::json; use std::{ @@ -264,10 +267,29 @@ pub async fn get_public_rooms_route( put("/_matrix/federation/v1/send/<_>", data = "") )] pub fn send_transaction_message_route<'a>( - _db: State<'a, Database>, + db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { dbg!(&*body); + for pdu in &body.pdus { + let mut value = serde_json::to_value(pdu).expect("all ruma pdus are json values"); + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value).expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + value + .as_object_mut() + .expect("ruma pdus are json objects") + .insert("event_id".to_owned(), event_id.to_string().into()); + + db.rooms.append_pdu( + serde_json::from_value(value).expect("all ruma pdus are conduit pdus"), + &db.globals, + &db.account_data, + )?; + } Ok(send_transaction_message::v1::Response { pdus: BTreeMap::new(), } From af53485d70d474468b02d10badd6959fa05f1068 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 12 Sep 2020 23:38:52 +0200 Subject: [PATCH 0280/1727] fix: avoid pdus without event ids --- src/client_server/membership.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ea2271b..20cf315 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -502,8 +502,10 @@ async fn join_room_by_id_helper( .ok_or_else(|| Error::BadServerResponse("PDU is not an object."))? .insert("event_id".to_owned(), event_id.to_string().into()); + dbg!(&value); + serde_json::from_value::(value) - .map(|ev| (event_id, Arc::new(ev))) + .map(|ev| (dbg!(&ev).event_id().clone(), Arc::new(ev))) .map_err(|e| { warn!("{}", e); Error::BadServerResponse("Invalid PDU bytes in send_join response.") @@ -520,19 +522,14 @@ async fn join_room_by_id_helper( // These events are not guaranteed to be sorted but they are resolved according to spec // we auth them anyways to weed out faulty/malicious server. The following is basically the // full state resolution algorithm. + let event_ids = event_map.keys().cloned().collect::>(); + let sorted_control_events = state_res::StateResolution::reverse_topological_power_sort( &room_id, &control_events, &mut event_map, &db.rooms, - &send_join_response - .room_state - .auth_chain - .iter() - .filter_map(|pdu| { - Some(StateEvent::Full(pdu.deserialize().ok()?).event_id().clone()) - }) - .collect::>(), + &event_ids, ); // Auth check each event against the "partial" state created by the preceding events From 1f292c09f2e8a0679467ec5f3b699918d60cea64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 13 Sep 2020 22:24:36 +0200 Subject: [PATCH 0281/1727] improvement: better federation joins --- Cargo.lock | 30 ++++++------ src/client_server/membership.rs | 81 +++++++++++++++++++++---------- src/database/rooms.rs | 86 +++++++++++++++++++++------------ src/server_server.rs | 18 ++++--- 4 files changed, 136 insertions(+), 79 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 646cdcc..6ffd347 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1634,7 +1634,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1650,7 +1650,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "http", "percent-encoding", @@ -1665,7 +1665,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1676,7 +1676,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "ruma-api", "ruma-common", @@ -1689,7 +1689,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "assign", "http", @@ -1708,7 +1708,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "js_int", "ruma-api", @@ -1722,7 +1722,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "js_int", "ruma-common", @@ -1737,7 +1737,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1748,7 +1748,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "js_int", "ruma-api", @@ -1763,7 +1763,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1775,7 +1775,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "proc-macro2", "quote", @@ -1786,7 +1786,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "serde", "strum", @@ -1795,7 +1795,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "form_urlencoded", "itoa", @@ -1807,7 +1807,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" dependencies = [ "base64", "ring", @@ -2072,7 +2072,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#0081081604b051d412a2365b68357e064c33320c" +source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#a9186476b748c901fbf4356414247a0b3ac01b5f" dependencies = [ "itertools", "js_int", diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 20cf315..9285648 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -2,7 +2,7 @@ use super::State; use crate::{ client_server, pdu::{PduBuilder, PduEvent}, - server_server, utils, ConduitResult, Database, Error, Ruma, + server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; use log::warn; use ruma::{ @@ -17,11 +17,14 @@ use ruma::{ }, federation, }, + events::pdu::Pdu, events::{room::member, EventType}, EventId, Raw, RoomId, RoomVersionId, UserId, }; use state_res::StateEvent; -use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; +use std::{ + collections::BTreeMap, collections::HashMap, collections::HashSet, convert::TryFrom, sync::Arc, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -482,36 +485,49 @@ async fn join_room_by_id_helper( ) .await?; - let mut event_map = send_join_response + let add_event_id = |pdu: &Raw| { + let mut value = serde_json::from_str(pdu.json().get()) + .expect("converting raw jsons to values always works"); + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + value + .as_object_mut() + .ok_or_else(|| Error::BadServerResponse("PDU is not an object."))? + .insert("event_id".to_owned(), event_id.to_string().into()); + + Ok((event_id, value)) + }; + + let room_state = send_join_response.room_state.state.iter().map(add_event_id); + + let state_events = room_state + .clone() + .map(|pdu: Result<(EventId, serde_json::Value)>| Ok(pdu?.0)) + .collect::>>()?; + + let auth_chain = send_join_response .room_state - .state + .auth_chain .iter() - .chain(send_join_response.room_state.auth_chain.iter()) - .map(|pdu| { - let mut value = serde_json::from_str(pdu.json().get()) - .expect("converting raw jsons to values always works"); - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&value) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - value - .as_object_mut() - .ok_or_else(|| Error::BadServerResponse("PDU is not an object."))? - .insert("event_id".to_owned(), event_id.to_string().into()); - - dbg!(&value); + .map(add_event_id); + let mut event_map = room_state + .chain(auth_chain) + .map(|r| { + let (event_id, value) = r?; serde_json::from_value::(value) - .map(|ev| (dbg!(&ev).event_id().clone(), Arc::new(ev))) + .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { warn!("{}", e); Error::BadServerResponse("Invalid PDU bytes in send_join response.") }) }) - .collect::>, _>>()?; + .collect::>>>()?; let control_events = event_map .values() @@ -575,6 +591,8 @@ async fn join_room_by_id_helper( ) .expect("iterative auth check failed on resolved events"); + let mut state = HashMap::new(); + // filter the events that failed the auth check keeping the remaining events // sorted correctly for ev_id in sorted_event_ids @@ -587,9 +605,22 @@ async fn join_room_by_id_helper( .expect("Found event_id in sorted events that is not in resolved state"); // We do not rebuild the PDU in this case only insert to DB - db.rooms - .append_pdu(PduEvent::from(&**pdu), &db.globals, &db.account_data)?; + let pdu_id = + db.rooms + .append_pdu(&PduEvent::from(&**pdu), &db.globals, &db.account_data)?; + + if state_events.contains(ev_id) { + state.insert( + ( + pdu.kind(), + pdu.state_key().expect("State events have a state key"), + ), + pdu_id, + ); + } } + + db.rooms.force_state(room_id, state)?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 87f4dcd..b538c85 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -220,6 +220,31 @@ impl Rooms { .is_some()) } + /// Returns the full room state. + pub fn force_state( + &self, + room_id: &RoomId, + state: HashMap<(EventType, String), Vec>, + ) -> Result<()> { + let state_hash = + self.calculate_hash(&state.values().map(|pdu_id| &**pdu_id).collect::>())?; + let mut prefix = state_hash.clone(); + prefix.push(0xff); + + for ((event_type, state_key), pdu_id) in state { + let mut state_id = prefix.clone(); + state_id.extend_from_slice(&event_type.as_str().as_bytes()); + state_id.push(0xff); + state_id.extend_from_slice(&state_key.as_bytes()); + self.stateid_pduid.insert(state_id, pdu_id)?; + } + + self.roomid_statehash + .insert(room_id.as_bytes(), &*state_hash)?; + + Ok(()) + } + /// Returns the full room state. pub fn room_state_full( &self, @@ -446,10 +471,10 @@ impl Rooms { /// Creates a new persisted data unit and adds it to a room. pub fn append_pdu( &self, - pdu: PduEvent, + pdu: &PduEvent, globals: &super::globals::Globals, account_data: &super::account_data::AccountData, - ) -> Result { + ) -> Result> { let mut pdu_json = serde_json::to_value(&pdu).expect("event is valid, we just created it"); ruma::signatures::hash_and_sign_event( globals.server_name().as_str(), @@ -473,10 +498,6 @@ impl Rooms { self.eventid_pduid .insert(pdu.event_id.as_bytes(), &*pdu_id)?; - if pdu.state_key.is_some() { - self.append_to_state(&pdu_id, &pdu)?; - } - match pdu.kind { EventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { @@ -484,23 +505,22 @@ impl Rooms { } } EventType::RoomMember => { - if let Some(state_key) = pdu.state_key { + if let Some(state_key) = &pdu.state_key { // if the state_key fails - let target_user_id = UserId::try_from(state_key) + let target_user_id = UserId::try_from(state_key.clone()) .expect("This state_key was previously validated"); // Update our membership info, we do this here incase a user is invited // and immediately leaves we need the DB to record the invite event for auth self.update_membership( &pdu.room_id, &target_user_id, - serde_json::from_value::(pdu.content).map_err( - |_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid redaction event content.", - ) - }, - )?, + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid redaction event content.", + ) + })?, &pdu.sender, account_data, globals, @@ -528,7 +548,7 @@ impl Rooms { self.edus .private_read_set(&pdu.room_id, &pdu.sender, index, &globals)?; - Ok(pdu.event_id) + Ok(pdu_id) } /// Generates a new StateHash and associates it with the incoming event. @@ -789,7 +809,13 @@ impl Rooms { )) .expect("ruma's reference hashes are valid event ids"); - self.append_pdu(pdu, globals, account_data) + let pdu_id = self.append_pdu(&pdu, globals, account_data)?; + + if pdu.state_key.is_some() { + self.append_to_state(&pdu_id, &pdu)?; + } + + Ok(pdu.event_id) } /// Returns an iterator over all PDUs in a room. @@ -953,19 +979,17 @@ impl Rooms { self.roomuseroncejoinedids.insert(&userroom_id, &[])?; // Check if the room has a predecessor - if let Some(predecessor) = serde_json::from_value::< - Raw, - >( - self.room_state_get(&room_id, &EventType::RoomCreate, "")? - .ok_or_else(|| { - Error::bad_database("Found room without m.room.create event.") - })? - .content, - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid room event in database."))? - .predecessor + if let Some(predecessor) = self + .room_state_get(&room_id, &EventType::RoomCreate, "")? + .and_then(|create| { + serde_json::from_value::< + Raw, + >(create.content) + .expect("Raw::from_value always works") + .deserialize() + .ok() + }) + .and_then(|content| content.predecessor) { // Copy user settings from predecessor to the current room: // - Push rules diff --git a/src/server_server.rs b/src/server_server.rs index d7d0e23..6634d5a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, ConduitResult, Database, Error, Result, Ruma}; +use crate::{client_server, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use http::header::{HeaderValue, AUTHORIZATION}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ @@ -270,9 +270,11 @@ pub fn send_transaction_message_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { - dbg!(&*body); + //dbg!(&*body); for pdu in &body.pdus { - let mut value = serde_json::to_value(pdu).expect("all ruma pdus are json values"); + let mut value = serde_json::from_str(pdu.json().get()) + .expect("converting raw jsons to values always works"); + let event_id = EventId::try_from(&*format!( "${}", ruma::signatures::reference_hash(&value).expect("ruma can calculate reference hashes") @@ -284,11 +286,11 @@ pub fn send_transaction_message_route<'a>( .expect("ruma pdus are json objects") .insert("event_id".to_owned(), event_id.to_string().into()); - db.rooms.append_pdu( - serde_json::from_value(value).expect("all ruma pdus are conduit pdus"), - &db.globals, - &db.account_data, - )?; + let pdu = + serde_json::from_value::(value).expect("all ruma pdus are conduit pdus"); + if db.rooms.exists(&pdu.room_id)? { + db.rooms.append_pdu(&pdu, &db.globals, &db.account_data)?; + } } Ok(send_transaction_message::v1::Response { pdus: BTreeMap::new(), From c5313b3e8f8d024c540d0428a6c75828d82f95c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 14 Sep 2020 11:00:31 +0200 Subject: [PATCH 0282/1727] improvement: try out multiple servers when joining remote rooms --- Cargo.lock | 35 +++++++++++----------- src/client_server/alias.rs | 4 +-- src/client_server/directory.rs | 12 ++++---- src/client_server/membership.rs | 52 +++++++++++++++++++++------------ src/server_server.rs | 10 +++---- 5 files changed, 65 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6ffd347..28a4395 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -157,7 +157,7 @@ dependencies = [ "addr2line", "cfg-if", "libc", - "miniz_oxide 0.4.1", + "miniz_oxide 0.4.2", "object", "rustc-demangle", ] @@ -1054,11 +1054,12 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d7559a8a40d0f97e1edea3220f698f78b1c5ab67532e49f68fde3910323b722" +checksum = "c60c0dfe32c10b43a144bad8fc83538c52f58302c92300ea7ec7bf7b38d5a7b9" dependencies = [ "adler", + "autocfg", ] [[package]] @@ -1634,7 +1635,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1650,7 +1651,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "http", "percent-encoding", @@ -1665,7 +1666,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1676,7 +1677,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "ruma-api", "ruma-common", @@ -1689,7 +1690,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "assign", "http", @@ -1708,7 +1709,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "js_int", "ruma-api", @@ -1722,7 +1723,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "js_int", "ruma-common", @@ -1737,7 +1738,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1748,7 +1749,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "js_int", "ruma-api", @@ -1763,7 +1764,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1775,7 +1776,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "proc-macro2", "quote", @@ -1786,7 +1787,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "serde", "strum", @@ -1795,7 +1796,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "form_urlencoded", "itoa", @@ -1807,7 +1808,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#8d763abaecb13f4799a31ecf1e0da77d2bc956a6" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" dependencies = [ "base64", "ring", diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index bfdaeca..0ec43f5 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -63,7 +63,7 @@ pub async fn get_alias_helper( if room_alias.server_name() != db.globals.server_name() { let response = server_server::send_request( &db, - room_alias.server_name().to_string(), + room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) .await?; @@ -79,5 +79,5 @@ pub async fn get_alias_helper( "Room with alias not found.", ))?; - Ok(get_alias::Response::new(room_id, vec![db.globals.server_name().to_string()]).into()) + Ok(get_alias::Response::new(room_id, vec![db.globals.server_name().to_owned()]).into()) } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 2764d2c..a68d8dd 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -19,7 +19,7 @@ use ruma::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, EventType, }, - Raw, + Raw, ServerName, }; #[cfg(feature = "conduit_bin")] @@ -65,9 +65,9 @@ pub async fn get_public_rooms_route( ) -> ConduitResult { let response = get_public_rooms_filtered_helper( &db, - body.body.server.as_deref(), - body.body.limit, - body.body.since.as_deref(), + body.server.as_deref(), + body.limit, + body.since.as_deref(), None, // This is not used None, // This is not used ) @@ -119,7 +119,7 @@ pub async fn get_room_visibility_route( pub async fn get_public_rooms_filtered_helper( db: &Database, - server: Option<&str>, + server: Option<&ServerName>, limit: Option, since: Option<&str>, _filter: Option, @@ -131,7 +131,7 @@ pub async fn get_public_rooms_filtered_helper( { let response = server_server::send_request( &db, - other_server.to_owned(), + other_server, federation::directory::get_public_rooms::v1::Request { limit, since: since.as_deref(), diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 9285648..8d19402 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -19,7 +19,7 @@ use ruma::{ }, events::pdu::Pdu, events::{room::member, EventType}, - EventId, Raw, RoomId, RoomVersionId, UserId, + EventId, Raw, RoomId, RoomVersionId, ServerName, UserId, }; use state_res::StateEvent; use std::{ @@ -41,6 +41,7 @@ pub async fn join_room_by_id_route( &db, body.sender_id.as_ref(), &body.room_id, + &[body.room_id.server_name().to_owned()], body.third_party_signed.as_ref(), ) .await @@ -54,13 +55,12 @@ pub async fn join_room_by_id_or_alias_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let room_id = match RoomId::try_from(body.room_id_or_alias.clone()) { - Ok(room_id) => room_id, + let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) { + Ok(room_id) => (vec![room_id.server_name().to_owned()], room_id), Err(room_alias) => { - client_server::get_alias_helper(&db, &room_alias) - .await? - .0 - .room_id + let response = client_server::get_alias_helper(&db, &room_alias).await?; + + (response.0.servers, response.0.room_id) } }; @@ -69,6 +69,7 @@ pub async fn join_room_by_id_or_alias_route( &db, body.sender_id.as_ref(), &room_id, + &servers, body.third_party_signed.as_ref(), ) .await? @@ -415,22 +416,37 @@ async fn join_room_by_id_helper( db: &Database, sender_id: Option<&UserId>, room_id: &RoomId, + servers: &[Box], _third_party_signed: Option<&IncomingThirdPartySigned>, ) -> ConduitResult { let sender_id = sender_id.expect("user is authenticated"); // Ask a remote server if we don't have this room if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() { - let make_join_response = server_server::send_request( - &db, - room_id.server_name().to_string(), - federation::membership::create_join_event_template::v1::Request { - room_id, - user_id: sender_id, - ver: &[RoomVersionId::Version5, RoomVersionId::Version6], - }, - ) - .await?; + let mut make_join_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in joining.", + )); + + for remote_server in servers { + let make_join_response = server_server::send_request( + &db, + remote_server, + federation::membership::create_join_event_template::v1::Request { + room_id, + user_id: sender_id, + ver: &[RoomVersionId::Version5, RoomVersionId::Version6], + }, + ) + .await; + + make_join_response_and_server = make_join_response.map(|r| (r, remote_server)); + + if make_join_response_and_server.is_ok() { + break; + } + } + + let (make_join_response, remote_server) = make_join_response_and_server?; let mut join_event_stub_value = serde_json::from_str::(make_join_response.event.json().get()) @@ -475,7 +491,7 @@ async fn join_room_by_id_helper( let send_join_response = server_server::send_request( &db, - room_id.server_name().to_string(), + remote_server, federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, diff --git a/src/server_server.rs b/src/server_server.rs index 6634d5a..fc1da00 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -13,7 +13,7 @@ use ruma::{ }, OutgoingRequest, }, - EventId, + EventId, ServerName, }; use serde_json::json; use std::{ @@ -44,16 +44,16 @@ pub async fn request_well_known(db: &crate::Database, destination: &str) -> Opti pub async fn send_request( db: &crate::Database, - destination: String, + destination: &ServerName, request: T, ) -> Result where T: Debug, { let actual_destination = "https://".to_owned() - + &request_well_known(db, &destination) + + &request_well_known(db, &destination.as_str()) .await - .unwrap_or(destination.clone() + ":8448"); + .unwrap_or(destination.as_str().to_owned() + ":8448"); let mut http_request = request .try_into_http_request(&actual_destination, Some("")) @@ -82,7 +82,7 @@ where "origin".to_owned(), db.globals.server_name().as_str().into(), ); - request_map.insert("destination".to_owned(), destination.into()); + request_map.insert("destination".to_owned(), destination.as_str().into()); let mut request_json = request_map.into(); ruma::signatures::sign_json( From 4e44fedbcd823876862315ac3590cc3d21a2825e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 14 Sep 2020 11:42:16 +0200 Subject: [PATCH 0283/1727] fix: room list over federation --- Cargo.lock | 28 +++++------ src/client_server/directory.rs | 32 +++++-------- src/server_server.rs | 88 +++++++++++++++++++++++----------- 3 files changed, 84 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 28a4395..865540c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1635,7 +1635,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1651,7 +1651,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "http", "percent-encoding", @@ -1666,7 +1666,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1677,7 +1677,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "ruma-api", "ruma-common", @@ -1690,7 +1690,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "assign", "http", @@ -1709,7 +1709,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "js_int", "ruma-api", @@ -1723,7 +1723,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "js_int", "ruma-common", @@ -1738,7 +1738,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1749,7 +1749,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "js_int", "ruma-api", @@ -1764,7 +1764,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1776,7 +1776,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "proc-macro2", "quote", @@ -1787,7 +1787,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "serde", "strum", @@ -1796,7 +1796,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "form_urlencoded", "itoa", @@ -1808,7 +1808,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ee66e30cbd58aecbbfde1d7008d7d6457deef87b" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" dependencies = [ "base64", "ring", diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index a68d8dd..f30825d 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -14,6 +14,7 @@ use ruma::{ }, federation, }, + directory::RoomNetwork, directory::{IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk}, events::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, @@ -33,24 +34,13 @@ pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let Ruma { - body: - get_public_rooms_filtered::IncomingRequest { - limit, - server, - since, - filter, - room_network, - }, - .. - } = body; get_public_rooms_filtered_helper( &db, - server.as_deref(), - limit, - since.as_deref(), - filter, // This is not used yet - Some(room_network), // This is not used + body.server.as_deref(), + body.limit, + body.since.as_deref(), + &body.filter, + &body.room_network, ) .await } @@ -68,8 +58,8 @@ pub async fn get_public_rooms_route( body.server.as_deref(), body.limit, body.since.as_deref(), - None, // This is not used - None, // This is not used + &IncomingFilter::default(), + &IncomingRoomNetwork::Matrix, ) .await? .0; @@ -122,8 +112,8 @@ pub async fn get_public_rooms_filtered_helper( server: Option<&ServerName>, limit: Option, since: Option<&str>, - _filter: Option, - _network: Option, + _filter: &IncomingFilter, + _network: &IncomingRoomNetwork, ) -> ConduitResult { if let Some(other_server) = server .clone() @@ -135,7 +125,7 @@ pub async fn get_public_rooms_filtered_helper( federation::directory::get_public_rooms::v1::Request { limit, since: since.as_deref(), - room_network: ruma::directory::RoomNetwork::Matrix, + room_network: RoomNetwork::Matrix, }, ) .await?; diff --git a/src/server_server.rs b/src/server_server.rs index fc1da00..6c53aed 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2,8 +2,8 @@ use crate::{client_server, ConduitResult, Database, Error, PduEvent, Result, Rum use http::header::{HeaderValue, AUTHORIZATION}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ + api::federation::directory::get_public_rooms_filtered, api::{ - client, federation::{ directory::get_public_rooms, discovery::{ @@ -13,6 +13,7 @@ use ruma::{ }, OutgoingRequest, }, + directory::{IncomingFilter, IncomingRoomNetwork}, EventId, ServerName, }; use serde_json::json; @@ -209,38 +210,24 @@ pub fn get_server_keys_deprecated(db: State<'_, Database>) -> Json { feature = "conduit_bin", post("/_matrix/federation/v1/publicRooms", data = "") )] -pub async fn get_public_rooms_route( +pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, - body: Ruma>, -) -> ConduitResult { - let Ruma { - body: - get_public_rooms::v1::IncomingRequest { - room_network: _room_network, // TODO - limit, - since, - }, - .. - } = body; - - let client::r0::directory::get_public_rooms_filtered::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - } = client_server::get_public_rooms_filtered_helper( + body: Ruma>, +) -> ConduitResult { + let response = client_server::get_public_rooms_filtered_helper( &db, None, - limit, - since.as_deref(), - None, - Some(ruma::directory::IncomingRoomNetwork::Matrix), + body.limit, + body.since.as_deref(), + &body.filter, + &body.room_network, ) .await? .0; - Ok(get_public_rooms::v1::Response { - chunk: chunk + Ok(get_public_rooms_filtered::v1::Response { + chunk: response + .chunk .into_iter() .map(|c| { // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk @@ -255,9 +242,52 @@ pub async fn get_public_rooms_route( }) .filter_map(|r| r.ok()) .collect(), - prev_batch, - next_batch, - total_room_count_estimate, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v1/publicRooms", data = "") +)] +pub async fn get_public_rooms_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let response = client_server::get_public_rooms_filtered_helper( + &db, + None, + body.limit, + body.since.as_deref(), + &IncomingFilter::default(), + &IncomingRoomNetwork::Matrix, + ) + .await? + .0; + + Ok(get_public_rooms::v1::Response { + chunk: response + .chunk + .into_iter() + .map(|c| { + // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk + // to ruma::api::client::r0::directory::PublicRoomsChunk + Ok::<_, Error>( + serde_json::from_str( + &serde_json::to_string(&c) + .expect("PublicRoomsChunk::to_string always works"), + ) + .expect("federation and client-server PublicRoomsChunk are the same type"), + ) + }) + .filter_map(|r| r.ok()) + .collect(), + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, } .into()) } From aa5e9e607ecc739d0d991ea7221dadd0125f6d64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 14 Sep 2020 14:20:38 +0200 Subject: [PATCH 0284/1727] feat: download media and thumbnails over federation --- src/client_server/media.rs | 69 +++++++++++++++++++++++++++++++++++--- src/database/media.rs | 28 ++++++++++++++-- 2 files changed, 90 insertions(+), 7 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index d077447..8f33743 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -1,5 +1,7 @@ use super::State; -use crate::{database::media::FileMeta, utils, ConduitResult, Database, Error, Ruma}; +use crate::{ + database::media::FileMeta, server_server, utils, ConduitResult, Database, Error, Ruma, +}; use ruma::api::client::{ error::ErrorKind, r0::media::{create_content, get_content, get_content_thumbnail, get_media_config}, @@ -35,7 +37,7 @@ pub fn create_content_route( utils::random_string(MXC_LENGTH) ); db.media - .create(mxc.clone(), &body.filename, &body.content_type, &body.file)?; + .create(mxc.clone(), &body.filename.as_deref(), &body.content_type, &body.file)?; Ok(create_content::Response { content_uri: mxc }.into()) } @@ -47,19 +49,25 @@ pub fn create_content_route( data = "" ) )] -pub fn get_content_route( +pub async fn get_content_route( db: State<'_, Database>, body: Ruma>, _server_name: String, _media_id: String, ) -> ConduitResult { + let mxc = format!( + "mxc://{}/{}", + db.globals.server_name(), + utils::random_string(MXC_LENGTH) + ); + if let Some(FileMeta { filename, content_type, file, }) = db .media - .get(format!("mxc://{}/{}", body.server_name, body.media_id))? + .get(&mxc)? { Ok(get_content::Response { file, @@ -67,6 +75,26 @@ pub fn get_content_route( content_disposition: filename.unwrap_or_default(), // TODO: Spec says this should be optional } .into()) + } else if body.allow_remote { + let get_content_response = server_server::send_request( + &db, + body.server_name.as_ref(), + get_content::Request { + allow_remote: false, + server_name: &body.server_name, + media_id: &body.media_id, + }, + ) + .await?; + + db.media.create( + mxc, + &Some(&get_content_response.content_disposition), + &get_content_response.content_type, + &get_content_response.file, + )?; + + Ok(get_content_response.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -79,7 +107,7 @@ pub fn get_content_route( data = "" ) )] -pub fn get_content_thumbnail_route( +pub async fn get_content_thumbnail_route( db: State<'_, Database>, body: Ruma>, _server_name: String, @@ -97,6 +125,37 @@ pub fn get_content_thumbnail_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, )? { Ok(get_content_thumbnail::Response { file, content_type }.into()) + } else if body.allow_remote { + let get_thumbnail_response = server_server::send_request( + &db, + body.server_name.as_ref(), + get_content_thumbnail::Request { + allow_remote: false, + height: body.height, + width: body.width, + method: body.method, + server_name: &body.server_name, + media_id: &body.media_id, + }, + ) + .await?; + + let mxc = format!( + "mxc://{}/{}", + db.globals.server_name(), + utils::random_string(MXC_LENGTH) + ); + + db.media.upload_thumbnail( + mxc, + &None, + &get_thumbnail_response.content_type, + body.width.try_into().expect("all UInts are valid u32s"), + body.height.try_into().expect("all UInts are valid u32s"), + &get_thumbnail_response.file, + )?; + + Ok(get_thumbnail_response.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } diff --git a/src/database/media.rs b/src/database/media.rs index 63fa11c..869d5d8 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -16,7 +16,7 @@ impl Media { pub fn create( &self, mxc: String, - filename: &Option, + filename: &Option<&str>, content_type: &str, file: &[u8], ) -> Result<()> { @@ -34,8 +34,32 @@ impl Media { Ok(()) } + /// Uploads or replaces a file thumbnail. + pub fn upload_thumbnail( + &self, + mxc: String, + filename: &Option, + content_type: &str, + width: u32, + height: u32, + file: &[u8], + ) -> Result<()> { + let mut key = mxc.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&width.to_be_bytes()); + key.extend_from_slice(&height.to_be_bytes()); + key.push(0xff); + key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default()); + key.push(0xff); + key.extend_from_slice(content_type.as_bytes()); + + self.mediaid_file.insert(key, file)?; + + Ok(()) + } + /// Downloads a file. - pub fn get(&self, mxc: String) -> Result> { + pub fn get(&self, mxc: &str) -> Result> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail From d1099e9224f3b47d9de9135ab751edd9152dc3b0 Mon Sep 17 00:00:00 2001 From: Timo Date: Tue, 25 Aug 2020 11:49:51 +0200 Subject: [PATCH 0285/1727] Update dependencies --- Cargo.lock | 274 +------------------------------------ Cargo.toml | 57 +++++--- src/client_server/media.rs | 22 +-- 3 files changed, 56 insertions(+), 297 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 865540c..bde0b0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,80 +1,11 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "addr2line" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" - [[package]] name = "adler32" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array", -] - -[[package]] -name = "aes" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7001367fde4c768a19d1029f0a8be5abd9308e1119846d5bd9ad26297b8faf5" -dependencies = [ - "aes-soft", - "aesni", - "block-cipher", -] - -[[package]] -name = "aes-gcm" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f5007801316299f922a6198d1d09a0bae95786815d066d5880d13f7c45ead1" -dependencies = [ - "aead", - "aes", - "block-cipher", - "ghash", - "subtle", -] - -[[package]] -name = "aes-soft" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" -dependencies = [ - "block-cipher", - "byteorder", - "opaque-debug 0.2.3", -] - -[[package]] -name = "aesni" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050d39b0b7688b3a3254394c3e30a9d66c41dcf9b05b0e2dbdc623f6505d264" -dependencies = [ - "block-cipher", - "opaque-debug 0.2.3", -] - [[package]] name = "ansi_term" version = "0.12.1" @@ -90,12 +21,6 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" -[[package]] -name = "array-init" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30bbe2f5e3d117f55bd8c7a1f9191e4a5deba9f15f595bbea4f670c59c765db" - [[package]] name = "arrayref" version = "0.3.6" @@ -148,20 +73,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" -[[package]] -name = "backtrace" -version = "0.3.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide 0.4.2", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.6" @@ -197,24 +108,6 @@ dependencies = [ "constant_time_eq", ] -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-cipher" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa136449e765dc7faa244561ccae839c394048667929af599b5d931ebe7b7f10" -dependencies = [ - "generic-array", -] - [[package]] name = "bumpalo" version = "3.4.0" @@ -319,12 +212,7 @@ version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1373a16a4937bc34efec7b391f9c1500c30b8478a701a4f44c9165cc0475a6e0" dependencies = [ - "aes-gcm", - "base64", - "hkdf", "percent-encoding", - "rand", - "sha2", "time 0.2.19", "version_check", ] @@ -345,12 +233,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" -[[package]] -name = "cpuid-bool" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" - [[package]] name = "crc32fast" version = "1.2.0" @@ -386,16 +268,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "deflate" version = "0.8.6" @@ -436,22 +308,12 @@ dependencies = [ "syn", ] -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - [[package]] name = "directories" -version = "2.0.2" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" +checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" dependencies = [ - "cfg-if", "dirs-sys", ] @@ -654,16 +516,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "generic-array" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" -dependencies = [ - "typenum", - "version_check", -] - [[package]] name = "getrandom" version = "0.1.15" @@ -675,15 +527,6 @@ dependencies = [ "wasi 0.9.0+wasi-snapshot-preview1", ] -[[package]] -name = "ghash" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6e27f0689a6e15944bdce7e45425efb87eaa8ab0c6e87f11d0987a9133e2531" -dependencies = [ - "polyval", -] - [[package]] name = "gif" version = "0.10.3" @@ -694,12 +537,6 @@ dependencies = [ "lzw", ] -[[package]] -name = "gimli" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" - [[package]] name = "glob" version = "0.3.0" @@ -749,26 +586,6 @@ dependencies = [ "libc", ] -[[package]] -name = "hkdf" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe1149865383e4526a43aee8495f9a325f0b806c63ce6427d06336a590abbbc9" -dependencies = [ - "digest", - "hmac", -] - -[[package]] -name = "hmac" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" -dependencies = [ - "crypto-mac", - "digest", -] - [[package]] name = "http" version = "0.2.1" @@ -1052,16 +869,6 @@ dependencies = [ "adler32", ] -[[package]] -name = "miniz_oxide" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c60c0dfe32c10b43a144bad8fc83538c52f58302c92300ea7ec7bf7b38d5a7b9" -dependencies = [ - "adler", - "autocfg", -] - [[package]] name = "mio" version = "0.6.22" @@ -1184,30 +991,12 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" - [[package]] name = "once_cell" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - [[package]] name = "openssl" version = "0.10.30" @@ -1341,17 +1130,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide 0.3.7", -] - -[[package]] -name = "polyval" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a50142b55ab3ed0e9f68dfb3709f1d90d29da24e91033f28b96330643107dc" -dependencies = [ - "cfg-if", - "universal-hash", + "miniz_oxide", ] [[package]] @@ -1828,12 +1607,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "rustc-demangle" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" - [[package]] name = "rustc_version" version = "0.2.3" @@ -1975,19 +1748,6 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" -[[package]] -name = "sha2" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" -dependencies = [ - "block-buffer", - "cfg-if", - "cpuid-bool", - "digest", - "opaque-debug 0.3.0", -] - [[package]] name = "sharded-slab" version = "0.0.9" @@ -2015,12 +1775,10 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "sled" -version = "0.32.1" +version = "0.34.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e3dbbb8ee10611bd1d020767c27599ccbbf8365f7e0ed7e54429cc8b9433ad8" +checksum = "f72c064e63fbca3138ad07f3588c58093f1684f3a99f60dcfa6d46b87e60fde7" dependencies = [ - "array-init", - "backtrace", "crc32fast", "crossbeam-epoch", "crossbeam-utils", @@ -2156,12 +1914,6 @@ dependencies = [ "syn", ] -[[package]] -name = "subtle" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343f3f510c2915908f155e94f17220b19ccfacf2a64a2a5d8004f2c3e311e7fd" - [[package]] name = "syn" version = "1.0.40" @@ -2436,12 +2188,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "typenum" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" - [[package]] name = "unicase" version = "2.6.0" @@ -2481,16 +2227,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "untrusted" version = "0.7.1" diff --git a/Cargo.toml b/Cargo.toml index 1b7a700..60296a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,28 +12,49 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +# Used to handle requests # TODO: This can become optional as soon as proper configs are supported -#rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", features = ["tls"] } # Used to handle requests -rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } -#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "aff914050eb297bd82b8aafb12158c88a9e480e1" } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fed-fixes" } # Used for matrix spec type definitions and helpers +#rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", default-features = false, features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } + +# Used for matrix spec type definitions and helpers +#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "aff914050eb297bd82b8aafb12158c88a9e480e1" } +ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fed-fixes" } #ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } -tokio = "0.2.22" # Used for long polling -sled = "0.32.0" # Used for storing data permanently -log = "0.4.8" # Used for emitting log entries -http = "0.2.1" # Used for rocket<->ruma conversions -directories = "2.0.2" # Used to find data directory for default db path -js_int = "0.1.5" # Used for number types for ruma -serde_json = { version = "1.0.53", features = ["raw_value"] } # Used for ruma wrapper -serde = "1.0.111" # Used for pdu definition -rand = "0.7.3" # Used for secure identifiers -rust-argon2 = "0.8.2" # Used to hash passwords -reqwest = "0.10.6" # Used to send requests -thiserror = "1.0.19" # Used for conduit::Error type -image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to generate thumbnails for images -base64 = "0.12.3" # Used to encode server public key + +# Used when doing state resolution state-res = { git = "https://github.com/timokoesters/state-res", branch = "spec-comp", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } + +# Used for long polling +tokio = "0.2.22" +# Used for storing data permanently +sled = "0.34.4" +# Used for emitting log entries +log = "0.4.11" +# Used for rocket<->ruma conversions +http = "0.2.1" +# Used to find data directory for default db path +directories = "3.0.1" +# Used for number types for ruma +js_int = "0.1.9" +# Used for ruma wrapper +serde_json = { version = "1.0.57", features = ["raw_value"] } +# Used for pdu definition +serde = "1.0.116" +# Used for secure identifiers +rand = "0.7.3" +# Used to hash passwords +rust-argon2 = "0.8.2" +# Used to send requests +reqwest = "0.10.8" +# Used for conduit::Error type +thiserror = "1.0.20" +# Used to generate thumbnails for images +image = { version = "0.23.9", default-features = false, features = ["jpeg", "png", "gif"] } +# Used to encode server public key +base64 = "0.12.3" +# Used when hashing the state ring = "0.16.15" [features] diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 8f33743..f897a67 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -36,8 +36,12 @@ pub fn create_content_route( db.globals.server_name(), utils::random_string(MXC_LENGTH) ); - db.media - .create(mxc.clone(), &body.filename.as_deref(), &body.content_type, &body.file)?; + db.media.create( + mxc.clone(), + &body.filename.as_deref(), + &body.content_type, + &body.file, + )?; Ok(create_content::Response { content_uri: mxc }.into()) } @@ -55,19 +59,17 @@ pub async fn get_content_route( _server_name: String, _media_id: String, ) -> ConduitResult { - let mxc = format!( - "mxc://{}/{}", - db.globals.server_name(), - utils::random_string(MXC_LENGTH) - ); + let mxc = format!( + "mxc://{}/{}", + db.globals.server_name(), + utils::random_string(MXC_LENGTH) + ); if let Some(FileMeta { filename, content_type, file, - }) = db - .media - .get(&mxc)? + }) = db.media.get(&mxc)? { Ok(get_content::Response { file, From 9f05ef926af6f0c7c7d2886b99b87ccd6e218e7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 14 Sep 2020 16:23:15 +0200 Subject: [PATCH 0286/1727] fix: filter public room dir --- src/client_server/directory.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index f30825d..871a780 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -14,6 +14,7 @@ use ruma::{ }, federation, }, + directory::Filter, directory::RoomNetwork, directory::{IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk}, events::{ @@ -112,7 +113,7 @@ pub async fn get_public_rooms_filtered_helper( server: Option<&ServerName>, limit: Option, since: Option<&str>, - _filter: &IncomingFilter, + filter: &IncomingFilter, _network: &IncomingRoomNetwork, ) -> ConduitResult { if let Some(other_server) = server @@ -122,9 +123,12 @@ pub async fn get_public_rooms_filtered_helper( let response = server_server::send_request( &db, other_server, - federation::directory::get_public_rooms::v1::Request { + federation::directory::get_public_rooms_filtered::v1::Request { limit, since: since.as_deref(), + filter: Filter { + generic_search_term: filter.generic_search_term.as_deref(), + }, room_network: RoomNetwork::Matrix, }, ) From f7816b11de0889fca761f55510a3313dcfa78a42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 14 Sep 2020 20:23:19 +0200 Subject: [PATCH 0287/1727] feat: send messages over federation --- Cargo.lock | 32 ++--- src/client_server/account.rs | 4 +- src/client_server/alias.rs | 2 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 4 +- src/client_server/membership.rs | 206 +++++++++++++++++--------------- src/client_server/message.rs | 4 +- src/client_server/profile.rs | 8 +- src/client_server/redact.rs | 4 +- src/client_server/room.rs | 37 +++--- src/client_server/state.rs | 12 +- src/database.rs | 1 + src/database/rooms.rs | 177 ++++++++++++++++++--------- src/pdu.rs | 27 ++++- src/server_server.rs | 22 ++-- 15 files changed, 324 insertions(+), 218 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bde0b0d..e0de2a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1414,7 +1414,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1430,7 +1430,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "http", "percent-encoding", @@ -1445,7 +1445,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1456,7 +1456,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "ruma-api", "ruma-common", @@ -1469,7 +1469,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "assign", "http", @@ -1488,7 +1488,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "js_int", "ruma-api", @@ -1502,7 +1502,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "js_int", "ruma-common", @@ -1517,7 +1517,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1528,7 +1528,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "js_int", "ruma-api", @@ -1543,7 +1543,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1555,7 +1555,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "proc-macro2", "quote", @@ -1566,7 +1566,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "serde", "strum", @@ -1575,7 +1575,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "form_urlencoded", "itoa", @@ -1587,7 +1587,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#088382dbdc176e61fa5bde679ae38093865e7053" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" dependencies = [ "base64", "ring", @@ -1916,9 +1916,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963f7d3cc59b59b9325165add223142bbf1df27655d07789f109896d353d8350" +checksum = "6690e3e9f692504b941dc6c3b188fd28df054f7fb8469ab40680df52fdcc842b" dependencies = [ "proc-macro2", "quote", diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 3db933c..2ec9282 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -303,7 +303,7 @@ pub fn whoami_route(body: Ruma) -> ConduitResult, body: Ruma>, ) -> ConduitResult { @@ -366,7 +366,7 @@ pub fn deactivate_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; } // Remove devices and mark account as deactivated diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 0ec43f5..c5c514e 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -62,7 +62,7 @@ pub async fn get_alias_helper( ) -> ConduitResult { if room_alias.server_name() != db.globals.server_name() { let response = server_server::send_request( - &db, + &db.globals, room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 871a780..372ce98 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -121,7 +121,7 @@ pub async fn get_public_rooms_filtered_helper( .filter(|server| *server != db.globals.server_name().as_str()) { let response = server_server::send_request( - &db, + &db.globals, other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index f897a67..8f7a9b9 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -79,7 +79,7 @@ pub async fn get_content_route( .into()) } else if body.allow_remote { let get_content_response = server_server::send_request( - &db, + &db.globals, body.server_name.as_ref(), get_content::Request { allow_remote: false, @@ -129,7 +129,7 @@ pub async fn get_content_thumbnail_route( Ok(get_content_thumbnail::Response { file, content_type }.into()) } else if body.allow_remote { let get_thumbnail_response = server_server::send_request( - &db, + &db.globals, body.server_name.as_ref(), get_content_thumbnail::Request { allow_remote: false, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 8d19402..18fb5a9 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -83,7 +83,7 @@ pub async fn join_room_by_id_or_alias_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/leave", data = "") )] -pub fn leave_room_route( +pub async fn leave_room_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -108,19 +108,21 @@ pub fn leave_room_route( event.membership = member::MembershipState::Leave; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.account_data, - )?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.account_data, + ) + .await?; Ok(leave_room::Response::new().into()) } @@ -129,33 +131,35 @@ pub fn leave_room_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/invite", data = "") )] -pub fn invite_user_route( +pub async fn invite_user_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, - is_direct: None, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.account_data, - )?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.account_data, + ) + .await?; Ok(invite_user::Response.into()) } else { @@ -167,7 +171,7 @@ pub fn invite_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/kick", data = "") )] -pub fn kick_user_route( +pub async fn kick_user_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -193,19 +197,21 @@ pub fn kick_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; // TODO: reason - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.account_data, - )?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.account_data, + ) + .await?; Ok(kick_user::Response::new().into()) } @@ -214,7 +220,7 @@ pub fn kick_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/ban", data = "") )] -pub fn ban_user_route( +pub async fn ban_user_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -248,19 +254,21 @@ pub fn ban_user_route( }, )?; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.account_data, - )?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.account_data, + ) + .await?; Ok(ban_user::Response::new().into()) } @@ -269,7 +277,7 @@ pub fn ban_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/unban", data = "") )] -pub fn unban_user_route( +pub async fn unban_user_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -294,19 +302,21 @@ pub fn unban_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.account_data, - )?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.account_data, + ) + .await?; Ok(unban_user::Response::new().into()) } @@ -429,7 +439,7 @@ async fn join_room_by_id_helper( for remote_server in servers { let make_join_response = server_server::send_request( - &db, + &db.globals, remote_server, federation::membership::create_join_event_template::v1::Request { room_id, @@ -490,7 +500,7 @@ async fn join_room_by_id_helper( .expect("event is valid, we just created it"); let send_join_response = server_server::send_request( - &db, + &db.globals, remote_server, federation::membership::create_join_event::v2::Request { room_id, @@ -621,9 +631,12 @@ async fn join_room_by_id_helper( .expect("Found event_id in sorted events that is not in resolved state"); // We do not rebuild the PDU in this case only insert to DB - let pdu_id = - db.rooms - .append_pdu(&PduEvent::from(&**pdu), &db.globals, &db.account_data)?; + let pdu_id = db.rooms.append_pdu( + &PduEvent::from(&**pdu), + &serde_json::to_value(&**pdu).expect("PDU is valid value"), + &db.globals, + &db.account_data, + )?; if state_events.contains(ev_id) { state.insert( @@ -646,19 +659,22 @@ async fn join_room_by_id_helper( third_party_invite: None, }; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - )?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.account_data, + ) + .await?; } Ok(join_room_by_id::Response::new(room_id.clone()).into()) diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 8a09aba..4ba0d9f 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -17,7 +17,7 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") )] -pub fn send_message_event_route( +pub async fn send_message_event_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -67,7 +67,7 @@ pub fn send_message_event_route( &body.room_id, &db.globals, &db.account_data, - )?; + ).await?; db.transaction_ids .add_txnid(sender_id, device_id, &body.txn_id, event_id.as_bytes())?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index c1c0253..be893e1 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -19,7 +19,7 @@ use std::convert::TryInto; feature = "conduit_bin", put("/_matrix/client/r0/profile/<_>/displayname", data = "") )] -pub fn set_displayname_route( +pub async fn set_displayname_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -64,7 +64,7 @@ pub fn set_displayname_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; // Presence update db.rooms.edus.update_presence( @@ -110,7 +110,7 @@ pub fn get_displayname_route( feature = "conduit_bin", put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") )] -pub fn set_avatar_url_route( +pub async fn set_avatar_url_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -167,7 +167,7 @@ pub fn set_avatar_url_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; // Presence update db.rooms.edus.update_presence( diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 5117348..701fc00 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -12,7 +12,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") )] -pub fn redact_event_route( +pub async fn redact_event_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -33,7 +33,7 @@ pub fn redact_event_route( &body.room_id, &db.globals, &db.account_data, - )?; + ).await?; Ok(redact_event::Response { event_id }.into()) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index a5280cf..0e5c571 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -20,7 +20,7 @@ use rocket::{get, post}; feature = "conduit_bin", post("/_matrix/client/r0/createRoom", data = "") )] -pub fn create_room_route( +pub async fn create_room_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -65,7 +65,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; // 2. Let the room creator join db.rooms.build_and_append_pdu( @@ -87,7 +87,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; // 3. Power levels let mut users = BTreeMap::new(); @@ -129,7 +129,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; // 4. Events set by preset @@ -162,7 +162,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; // 4.2 History Visibility db.rooms.build_and_append_pdu( @@ -180,7 +180,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; // 4.3 Guest Access db.rooms.build_and_append_pdu( @@ -206,7 +206,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; // 5. Events listed in initial_state for event in &body.initial_state { @@ -226,7 +226,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; } // 6. Events implied by name and topic @@ -248,7 +248,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; } if let Some(topic) = &body.topic { @@ -267,7 +267,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; } // 7. Events implied by invite (and TODO: invite_3pid) @@ -291,7 +291,7 @@ pub fn create_room_route( &room_id, &db.globals, &db.account_data, - )?; + ).await?; } // Homeserver specific stuff @@ -337,7 +337,7 @@ pub fn get_room_event_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_room_id>/upgrade", data = "") )] -pub fn upgrade_room_route( +pub async fn upgrade_room_route( db: State<'_, Database>, body: Ruma>, _room_id: String, @@ -379,7 +379,7 @@ pub fn upgrade_room_route( &body.room_id, &db.globals, &db.account_data, - )?; + ).await?; // Get the old room federations status let federate = serde_json::from_value::>( @@ -419,7 +419,7 @@ pub fn upgrade_room_route( &replacement_room, &db.globals, &db.account_data, - )?; + ).await?; // Join the new room db.rooms.build_and_append_pdu( @@ -441,7 +441,7 @@ pub fn upgrade_room_route( &replacement_room, &db.globals, &db.account_data, - )?; + ).await?; // Recommended transferable state events list from the specs let transferable_state_events = vec![ @@ -475,7 +475,7 @@ pub fn upgrade_room_route( &replacement_room, &db.globals, &db.account_data, - )?; + ).await?; } // Moves any local aliases to the new room @@ -505,7 +505,7 @@ pub fn upgrade_room_route( power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - db.rooms + let _ = db.rooms .build_and_append_pdu( PduBuilder { event_type: EventType::RoomPowerLevels, @@ -519,8 +519,7 @@ pub fn upgrade_room_route( &body.room_id, &db.globals, &db.account_data, - ) - .ok(); + ).await; // Return the replacement room id Ok(upgrade_room::Response { replacement_room }.into()) diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 1fe3cd6..e9d20e2 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -19,7 +19,7 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") )] -pub fn send_state_event_for_key_route( +pub async fn send_state_event_for_key_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -41,7 +41,7 @@ pub fn send_state_event_for_key_route( content, &body.room_id, Some(body.state_key.to_owned()), - )?) + ).await?) .into(), ) } @@ -50,7 +50,7 @@ pub fn send_state_event_for_key_route( feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") )] -pub fn send_state_event_for_empty_key_route( +pub async fn send_state_event_for_empty_key_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -80,7 +80,7 @@ pub fn send_state_event_for_empty_key_route( json, &body.room_id, Some("".into()), - )?) + ).await?) .into(), ) } @@ -177,7 +177,7 @@ pub fn get_state_events_for_empty_key_route( .into()) } -pub fn send_state_event_for_key_helper( +pub async fn send_state_event_for_key_helper( db: &Database, sender: &UserId, content: &AnyStateEventContent, @@ -223,7 +223,7 @@ pub fn send_state_event_for_key_helper( &room_id, &db.globals, &db.account_data, - )?; + ).await?; Ok(event_id) } diff --git a/src/database.rs b/src/database.rs index 83f30c9..e1a356c 100644 --- a/src/database.rs +++ b/src/database.rs @@ -109,6 +109,7 @@ impl Database { tokenids: db.open_tree("tokenids")?, + roomserverids: db.open_tree("roomserverids")?, userroomid_joined: db.open_tree("userroomid_joined")?, roomuserid_joined: db.open_tree("roomuserid_joined")?, roomuseroncejoinedids: db.open_tree("roomuseroncejoinedids")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b538c85..ba54e7f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2,11 +2,12 @@ mod edus; pub use edus::RoomEdus; -use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; +use crate::{pdu::PduBuilder, server_server, utils, Error, PduEvent, Result}; use log::error; use ring::digest; use ruma::{ api::client::error::ErrorKind, + api::federation, events::{ ignored_user_list, room::{ @@ -15,7 +16,7 @@ use ruma::{ }, EventType, }, - EventId, Raw, RoomAliasId, RoomId, UserId, + EventId, Raw, RoomAliasId, RoomId, ServerName, UserId, }; use sled::IVec; use state_res::{event_auth, Error as StateError, Requester, StateEvent, StateMap, StateStore}; @@ -25,6 +26,7 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, + time::SystemTime, }; /// The unique identifier of each state group. @@ -44,6 +46,8 @@ pub struct Rooms { pub(super) tokenids: sled::Tree, // TokenId = RoomId + Token + PduId + /// Participating servers in a room. + pub(super) roomserverids: sled::Tree, // RoomServerId = RoomId + ServerName pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, pub(super) roomuseroncejoinedids: sled::Tree, @@ -169,8 +173,7 @@ impl Rooms { Ok(events) } - // This fetches auth events from the current state using the - /// full `roomstateid_pdu` tree. + /// This fetches auth events from the current state. pub fn get_auth_events( &self, room_id: &RoomId, @@ -472,17 +475,10 @@ impl Rooms { pub fn append_pdu( &self, pdu: &PduEvent, + pdu_json: &serde_json::Value, globals: &super::globals::Globals, account_data: &super::account_data::AccountData, ) -> Result> { - let mut pdu_json = serde_json::to_value(&pdu).expect("event is valid, we just created it"); - ruma::signatures::hash_and_sign_event( - globals.server_name().as_str(), - globals.keypair(), - &mut pdu_json, - ) - .expect("event is valid, we just created it"); - self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; // Increment the last index and use that @@ -610,7 +606,7 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. - pub fn build_and_append_pdu( + pub async fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, @@ -799,22 +795,59 @@ impl Rooms { signatures: BTreeMap::new(), }; + // Hash and sign + let mut pdu_json = serde_json::to_value(&pdu).expect("event is valid, we just created it"); + pdu_json + .as_object_mut() + .expect("json is object") + .remove("event_id"); + + ruma::signatures::hash_and_sign_event( + globals.server_name().as_str(), + globals.keypair(), + &mut pdu_json, + ) + .expect("event is valid, we just created it"); + // Generate event id pdu.event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash( - &serde_json::to_value(&pdu).expect("event is valid, we just created it") - ) - .expect("ruma can calculate reference hashes") + ruma::signatures::reference_hash(&pdu_json) + .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); - let pdu_id = self.append_pdu(&pdu, globals, account_data)?; + pdu_json + .as_object_mut() + .expect("json is object") + .insert("event_id".to_owned(), pdu.event_id.to_string().into()); + + let pdu_id = self.append_pdu(&pdu, &pdu_json, globals, account_data)?; if pdu.state_key.is_some() { self.append_to_state(&pdu_id, &pdu)?; } + pdu_json + .as_object_mut() + .expect("json is object") + .remove("event_id"); + + let response = server_server::send_request( + &globals, + "koesters.xyz".try_into().unwrap(), + federation::transactions::send_transaction_message::v1::Request { + origin: globals.server_name(), + pdus: &[serde_json::from_value(pdu_json).expect("Raw::from_value always works")], + edus: &[], + origin_server_ts: SystemTime::now(), + transaction_id: &utils::random_string(16), + }, + ) + .await; + + let _ = dbg!(response); + Ok(pdu.event_id) } @@ -957,12 +990,17 @@ impl Rooms { &self, room_id: &RoomId, user_id: &UserId, - mut member_content: member::MemberEventContent, + member_content: member::MemberEventContent, sender: &UserId, account_data: &super::account_data::AccountData, globals: &super::globals::Globals, ) -> Result<()> { let membership = member_content.membership; + + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xff); + roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); + let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -1056,6 +1094,7 @@ impl Rooms { } } + self.roomserverids.insert(&roomserver_id, &[])?; self.userroomid_joined.insert(&userroom_id, &[])?; self.roomuserid_joined.insert(&roomuser_id, &[])?; self.userroomid_invited.remove(&userroom_id)?; @@ -1075,25 +1114,10 @@ impl Rooms { }); if is_ignored { - member_content.membership = member::MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &user_id, - &room_id, - globals, - account_data, - )?; - return Ok(()); } + + self.roomserverids.insert(&roomserver_id, &[])?; self.userroomid_invited.insert(&userroom_id, &[])?; self.roomuserid_invited.insert(&roomuser_id, &[])?; self.userroomid_joined.remove(&userroom_id)?; @@ -1101,6 +1125,14 @@ impl Rooms { self.userroomid_left.remove(&userroom_id)?; } member::MembershipState::Leave | member::MembershipState::Ban => { + if self + .room_members(room_id) + .chain(self.room_members_invited(room_id)) + .filter_map(|r| r.ok()) + .all(|u| u.server_name() != user_id.server_name()) + { + self.roomserverids.remove(&roomserver_id)?; + } self.userroomid_left.insert(&userroom_id, &[])?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; @@ -1294,10 +1326,34 @@ impl Rooms { }) } + /// Returns an iterator over all joined members of a room. + pub fn room_servers(&self, room_id: &RoomId) -> impl Iterator>> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + self.roomserverids.scan_prefix(prefix).keys().map(|key| { + Ok(Box::::try_from( + utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Server name in roomserverids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Server name in roomserverids is invalid."))?) + }) + } + /// Returns an iterator over all joined members of a room. pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + self.roomuserid_joined - .scan_prefix(room_id.as_bytes()) + .scan_prefix(prefix) .keys() .map(|key| { Ok(UserId::try_from( @@ -1317,8 +1373,11 @@ impl Rooms { /// Returns an iterator over all User IDs who ever joined a room. pub fn room_useroncejoined(&self, room_id: &RoomId) -> impl Iterator> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + self.roomuseroncejoinedids - .scan_prefix(room_id.to_string()) + .scan_prefix(prefix) .keys() .map(|key| { Ok(UserId::try_from( @@ -1338,8 +1397,11 @@ impl Rooms { /// Returns an iterator over all invited members of a room. pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + self.roomuserid_invited - .scan_prefix(room_id.as_bytes()) + .scan_prefix(prefix) .keys() .map(|key| { Ok(UserId::try_from( @@ -1380,8 +1442,11 @@ impl Rooms { /// Returns an iterator over all rooms a user was invited to. pub fn rooms_invited(&self, user_id: &UserId) -> impl Iterator> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + self.userroomid_invited - .scan_prefix(&user_id.as_bytes()) + .scan_prefix(prefix) .keys() .map(|key| { Ok(RoomId::try_from( @@ -1401,23 +1466,23 @@ impl Rooms { /// Returns an iterator over all rooms a user left. pub fn rooms_left(&self, user_id: &UserId) -> impl Iterator> { - self.userroomid_left - .scan_prefix(&user_id.as_bytes()) - .keys() - .map(|key| { - Ok(RoomId::try_from( - utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_left is invalid unicode.") - })?, + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + + self.userroomid_left.scan_prefix(prefix).keys().map(|key| { + Ok(RoomId::try_from( + utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), ) - .map_err(|_| Error::bad_database("Room ID in userroomid_left is invalid."))?) - }) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_left is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_left is invalid."))?) + }) } pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { diff --git a/src/pdu.rs b/src/pdu.rs index 7f842e2..c904230 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -6,7 +6,7 @@ use ruma::{ AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, EventId, Raw, RoomId, ServerKeyId, ServerName, UserId, -}; +events::pdu::PduStub}; use serde::{Deserialize, Serialize}; use serde_json::json; use std::{collections::BTreeMap, convert::TryInto, sync::Arc, time::UNIX_EPOCH}; @@ -198,6 +198,31 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + + pub fn to_outgoing_federation_event(&self) -> Raw { + let mut json = json!({ + "room_id": self.room_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "type": self.kind, + "content": self.content, + "prev_events": self.prev_events, + "depth": self.depth, + "auth_events": self.auth_events, + "unsigned": self.unsigned, + "hashes": self.hashes, + "signatures": self.signatures, + }); + + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &self.redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } } impl From<&state_res::StateEvent> for PduEvent { diff --git a/src/server_server.rs b/src/server_server.rs index 6c53aed..9f4be13 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -24,9 +24,9 @@ use std::{ time::{Duration, SystemTime}, }; -pub async fn request_well_known(db: &crate::Database, destination: &str) -> Option { +pub async fn request_well_known(globals: &crate::database::globals::Globals, destination: &str) -> Option { let body: serde_json::Value = serde_json::from_str( - &db.globals + &globals .reqwest_client() .get(&format!( "https://{}/.well-known/matrix/server", @@ -44,7 +44,7 @@ pub async fn request_well_known(db: &crate::Database, destination: &str) -> Opti } pub async fn send_request( - db: &crate::Database, + globals: &crate::database::globals::Globals, destination: &ServerName, request: T, ) -> Result @@ -52,7 +52,7 @@ where T: Debug, { let actual_destination = "https://".to_owned() - + &request_well_known(db, &destination.as_str()) + + &request_well_known(globals, &destination.as_str()) .await .unwrap_or(destination.as_str().to_owned() + ":8448"); @@ -81,14 +81,14 @@ where ); request_map.insert( "origin".to_owned(), - db.globals.server_name().as_str().into(), + globals.server_name().as_str().into(), ); request_map.insert("destination".to_owned(), destination.as_str().into()); let mut request_json = request_map.into(); ruma::signatures::sign_json( - db.globals.server_name().as_str(), - db.globals.keypair(), + globals.server_name().as_str(), + globals.keypair(), &mut request_json, ) .unwrap(); @@ -110,7 +110,7 @@ where AUTHORIZATION, HeaderValue::from_str(&format!( "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - db.globals.server_name(), + globals.server_name(), s.0, s.1 )) @@ -122,7 +122,7 @@ where let reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); - let reqwest_response = db.globals.reqwest_client().execute(reqwest_request).await; + let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; // Because reqwest::Response -> http::Response is complicated: match reqwest_response { @@ -317,9 +317,9 @@ pub fn send_transaction_message_route<'a>( .insert("event_id".to_owned(), event_id.to_string().into()); let pdu = - serde_json::from_value::(value).expect("all ruma pdus are conduit pdus"); + serde_json::from_value::(value.clone()).expect("all ruma pdus are conduit pdus"); if db.rooms.exists(&pdu.room_id)? { - db.rooms.append_pdu(&pdu, &db.globals, &db.account_data)?; + db.rooms.append_pdu(&pdu, &value, &db.globals, &db.account_data)?; } } Ok(send_transaction_message::v1::Response { From 71500b14b902321e91cab432d55dd3f3ae7aedfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 15 Sep 2020 08:16:20 +0200 Subject: [PATCH 0288/1727] fix: send to all servers and fix media store --- src/client_server/alias.rs | 2 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 38 ++++++++-------------------- src/client_server/membership.rs | 4 +-- src/database/rooms.rs | 44 ++++++++++++++++++++++----------- src/server_server.rs | 29 ++++++++++++---------- 6 files changed, 60 insertions(+), 59 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index c5c514e..c2c3eb9 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -63,7 +63,7 @@ pub async fn get_alias_helper( if room_alias.server_name() != db.globals.server_name() { let response = server_server::send_request( &db.globals, - room_alias.server_name(), + room_alias.server_name().to_owned(), federation::query::get_room_information::v1::Request { room_alias }, ) .await?; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 372ce98..c82a15f 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -122,7 +122,7 @@ pub async fn get_public_rooms_filtered_helper( { let response = server_server::send_request( &db.globals, - other_server, + other_server.to_owned(), federation::directory::get_public_rooms_filtered::v1::Request { limit, since: since.as_deref(), diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 8f7a9b9..8a93d49 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -48,22 +48,13 @@ pub fn create_content_route( #[cfg_attr( feature = "conduit_bin", - get( - "/_matrix/media/r0/download/<_server_name>/<_media_id>", - data = "" - ) + get("/_matrix/media/r0/download/<_>/<_>", data = "") )] pub async fn get_content_route( db: State<'_, Database>, body: Ruma>, - _server_name: String, - _media_id: String, ) -> ConduitResult { - let mxc = format!( - "mxc://{}/{}", - db.globals.server_name(), - utils::random_string(MXC_LENGTH) - ); + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { filename, @@ -77,10 +68,10 @@ pub async fn get_content_route( content_disposition: filename.unwrap_or_default(), // TODO: Spec says this should be optional } .into()) - } else if body.allow_remote { + } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_content_response = server_server::send_request( &db.globals, - body.server_name.as_ref(), + body.server_name.clone(), get_content::Request { allow_remote: false, server_name: &body.server_name, @@ -104,21 +95,18 @@ pub async fn get_content_route( #[cfg_attr( feature = "conduit_bin", - get( - "/_matrix/media/r0/thumbnail/<_server_name>/<_media_id>", - data = "" - ) + get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "") )] pub async fn get_content_thumbnail_route( db: State<'_, Database>, body: Ruma>, - _server_name: String, - _media_id: String, ) -> ConduitResult { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + if let Some(FileMeta { content_type, file, .. }) = db.media.get_thumbnail( - format!("mxc://{}/{}", body.server_name, body.media_id), + mxc.clone(), body.width .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, @@ -127,10 +115,10 @@ pub async fn get_content_thumbnail_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, )? { Ok(get_content_thumbnail::Response { file, content_type }.into()) - } else if body.allow_remote { + } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_thumbnail_response = server_server::send_request( &db.globals, - body.server_name.as_ref(), + body.server_name.clone(), get_content_thumbnail::Request { allow_remote: false, height: body.height, @@ -142,12 +130,6 @@ pub async fn get_content_thumbnail_route( ) .await?; - let mxc = format!( - "mxc://{}/{}", - db.globals.server_name(), - utils::random_string(MXC_LENGTH) - ); - db.media.upload_thumbnail( mxc, &None, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 18fb5a9..f60601f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -440,7 +440,7 @@ async fn join_room_by_id_helper( for remote_server in servers { let make_join_response = server_server::send_request( &db.globals, - remote_server, + remote_server.clone(), federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_id, @@ -501,7 +501,7 @@ async fn join_room_by_id_helper( let send_join_response = server_server::send_request( &db.globals, - remote_server, + remote_server.clone(), federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ba54e7f..3c3a0b2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1,9 +1,10 @@ mod edus; pub use edus::RoomEdus; +use rocket::futures; use crate::{pdu::PduBuilder, server_server, utils, Error, PduEvent, Result}; -use log::error; +use log::{error, warn}; use ring::digest; use ruma::{ api::client::error::ErrorKind, @@ -833,20 +834,35 @@ impl Rooms { .expect("json is object") .remove("event_id"); - let response = server_server::send_request( - &globals, - "koesters.xyz".try_into().unwrap(), - federation::transactions::send_transaction_message::v1::Request { - origin: globals.server_name(), - pdus: &[serde_json::from_value(pdu_json).expect("Raw::from_value always works")], - edus: &[], - origin_server_ts: SystemTime::now(), - transaction_id: &utils::random_string(16), - }, - ) - .await; + let raw_json = + serde_json::from_value::>(pdu_json).expect("Raw::from_value always works"); - let _ = dbg!(response); + let pdus = &[raw_json]; + let transaction_id = utils::random_string(16); + + for result in futures::future::join_all( + self.room_servers(room_id) + .filter_map(|r| r.ok()) + .filter(|server| &**server != globals.server_name()) + .map(|server| { + server_server::send_request( + &globals, + server, + federation::transactions::send_transaction_message::v1::Request { + origin: globals.server_name(), + pdus, + edus: &[], + origin_server_ts: SystemTime::now(), + transaction_id: &transaction_id, + }, + ) + }), + ) + .await { + if let Err(e) = result { + warn!("{}", e); + } + } Ok(pdu.event_id) } diff --git a/src/server_server.rs b/src/server_server.rs index 9f4be13..da5a6c1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,5 +1,6 @@ use crate::{client_server, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use http::header::{HeaderValue, AUTHORIZATION}; +use log::warn; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::federation::directory::get_public_rooms_filtered, @@ -24,7 +25,10 @@ use std::{ time::{Duration, SystemTime}, }; -pub async fn request_well_known(globals: &crate::database::globals::Globals, destination: &str) -> Option { +pub async fn request_well_known( + globals: &crate::database::globals::Globals, + destination: &str, +) -> Option { let body: serde_json::Value = serde_json::from_str( &globals .reqwest_client() @@ -45,7 +49,7 @@ pub async fn request_well_known(globals: &crate::database::globals::Globals, des pub async fn send_request( globals: &crate::database::globals::Globals, - destination: &ServerName, + destination: Box, request: T, ) -> Result where @@ -79,10 +83,7 @@ where .to_string() .into(), ); - request_map.insert( - "origin".to_owned(), - globals.server_name().as_str().into(), - ); + request_map.insert("origin".to_owned(), globals.server_name().as_str().into()); request_map.insert("destination".to_owned(), destination.as_str().into()); let mut request_json = request_map.into(); @@ -144,10 +145,11 @@ where .into_iter() .collect(); - Ok( - T::IncomingResponse::try_from(http_response.body(body).unwrap()) - .expect("TODO: error handle other server errors"), - ) + let response = T::IncomingResponse::try_from(http_response.body(body).unwrap()); + response.map_err(|e| { + warn!("{}", e); + Error::BadServerResponse("Server returned bad response.") + }) } Err(e) => Err(e.into()), } @@ -316,10 +318,11 @@ pub fn send_transaction_message_route<'a>( .expect("ruma pdus are json objects") .insert("event_id".to_owned(), event_id.to_string().into()); - let pdu = - serde_json::from_value::(value.clone()).expect("all ruma pdus are conduit pdus"); + let pdu = serde_json::from_value::(value.clone()) + .expect("all ruma pdus are conduit pdus"); if db.rooms.exists(&pdu.room_id)? { - db.rooms.append_pdu(&pdu, &value, &db.globals, &db.account_data)?; + db.rooms + .append_pdu(&pdu, &value, &db.globals, &db.account_data)?; } } Ok(send_transaction_message::v1::Response { From 0b263208e39a735fcb6970d168494783ff9994a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 15 Sep 2020 08:55:02 +0200 Subject: [PATCH 0289/1727] fix: don't panic on bad server names --- src/server_server.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index da5a6c1..40ad654 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -62,14 +62,18 @@ where let mut http_request = request .try_into_http_request(&actual_destination, Some("")) - .unwrap(); + .map_err(|e| { + warn!("{}: {}", actual_destination, e); + Error::BadServerResponse("Invalid destination") + })?; let mut request_map = serde_json::Map::new(); if !http_request.body().is_empty() { request_map.insert( "content".to_owned(), - serde_json::from_slice(http_request.body()).unwrap(), + serde_json::from_slice(http_request.body()) + .expect("body is valid json, we just created it"), ); }; @@ -92,7 +96,7 @@ where globals.keypair(), &mut request_json, ) - .unwrap(); + .expect("our request json is what ruma expects"); let signatures = request_json["signatures"] .as_object() @@ -145,7 +149,11 @@ where .into_iter() .collect(); - let response = T::IncomingResponse::try_from(http_response.body(body).unwrap()); + let response = T::IncomingResponse::try_from( + http_response + .body(body) + .expect("reqwest body is valid http body"), + ); response.map_err(|e| { warn!("{}", e); Error::BadServerResponse("Server returned bad response.") From b7ab57897bc96e468421cf82ecd7d49e75c3f7dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 15 Sep 2020 16:13:54 +0200 Subject: [PATCH 0290/1727] fix: sending slowness --- Cargo.lock | 2 +- src/client_server/account.rs | 30 +- src/client_server/membership.rs | 6 + src/client_server/message.rs | 42 +-- src/client_server/profile.rs | 138 +++++---- src/client_server/redact.rs | 36 ++- src/client_server/room.rs | 516 ++++++++++++++++++-------------- src/client_server/state.rs | 48 +-- src/database.rs | 5 + src/database/globals.rs | 21 +- src/database/rooms.rs | 58 ++-- src/database/rooms/edus.rs | 1 + src/database/sending.rs | 83 +++++ src/main.rs | 2 + src/pdu.rs | 3 +- 15 files changed, 574 insertions(+), 417 deletions(-) create mode 100644 src/database/sending.rs diff --git a/Cargo.lock b/Cargo.lock index e0de2a7..30144ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1831,7 +1831,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#a9186476b748c901fbf4356414247a0b3ac01b5f" +source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#1d01b6e65b6afd50e65085fb40f1e7d2782f519e" dependencies = [ "itertools", "js_int", diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 2ec9282..7e0f942 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -354,19 +354,23 @@ pub async fn deactivate_route( third_party_invite: None, }; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; } // Remove devices and mark account as deactivated diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index f60601f..c4eed95 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -120,6 +120,7 @@ pub async fn leave_room_route( &sender_id, &body.room_id, &db.globals, + &db.sending, &db.account_data, ) .await?; @@ -157,6 +158,7 @@ pub async fn invite_user_route( &sender_id, &body.room_id, &db.globals, + &db.sending, &db.account_data, ) .await?; @@ -209,6 +211,7 @@ pub async fn kick_user_route( &sender_id, &body.room_id, &db.globals, + &db.sending, &db.account_data, ) .await?; @@ -266,6 +269,7 @@ pub async fn ban_user_route( &sender_id, &body.room_id, &db.globals, + &db.sending, &db.account_data, ) .await?; @@ -314,6 +318,7 @@ pub async fn unban_user_route( &sender_id, &body.room_id, &db.globals, + &db.sending, &db.account_data, ) .await?; @@ -672,6 +677,7 @@ async fn join_room_by_id_helper( &sender_id, &room_id, &db.globals, + &db.sending, &db.account_data, ) .await?; diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 4ba0d9f..3944d5b 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -49,25 +49,29 @@ pub async fn send_message_event_route( let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - let event_id = db.rooms.build_and_append_pdu( - PduBuilder { - event_type: body.content.event_type().into(), - content: serde_json::from_str( - body.json_body - .as_ref() - .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? - .get(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, - unsigned: Some(unsigned), - state_key: None, - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.account_data, - ).await?; + let event_id = db + .rooms + .build_and_append_pdu( + PduBuilder { + event_type: body.content.event_type().into(), + content: serde_json::from_str( + body.json_body + .as_ref() + .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? + .get(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + unsigned: Some(unsigned), + state_key: None, + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; db.transaction_ids .add_txnid(sender_id, device_id, &body.txn_id, event_id.as_bytes())?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index be893e1..53893c0 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -31,40 +31,43 @@ pub async fn set_displayname_route( // Send a new membership event and presence update into all joined rooms for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { - displayname: body.displayname.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get( - &room_id, - &EventType::RoomMember, - &sender_id.to_string(), - )? - .ok_or_else(|| { - Error::bad_database( + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + displayname: body.displayname.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? + .ok_or_else(|| { + Error::bad_database( "Tried to send displayname update for user not in the room.", ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // Presence update db.rooms.edus.update_presence( @@ -134,40 +137,43 @@ pub async fn set_avatar_url_route( // Send a new membership event and presence update into all joined rooms for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { - avatar_url: body.avatar_url.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get( - &room_id, - &EventType::RoomMember, - &sender_id.to_string(), - )? - .ok_or_else(|| { - Error::bad_database( - "Tried to send avatar url update for user not in the room.", - ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + avatar_url: body.avatar_url.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? + .ok_or_else(|| { + Error::bad_database( + "Tried to send avatar url update for user not in the room.", + ) + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // Presence update db.rooms.edus.update_presence( diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 701fc00..24df8dd 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -18,22 +18,26 @@ pub async fn redact_event_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let event_id = db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomRedaction, - content: serde_json::to_value(redaction::RedactionEventContent { - reason: body.reason.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: Some(body.event_id.clone()), - }, - &sender_id, - &body.room_id, - &db.globals, - &db.account_data, - ).await?; + let event_id = db + .rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomRedaction, + content: serde_json::to_value(redaction::RedactionEventContent { + reason: body.reason.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: Some(body.event_id.clone()), + }, + &sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; Ok(redact_event::Response { event_id }.into()) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 0e5c571..d21148b 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -53,41 +53,47 @@ pub async fn create_room_route( content.room_version = RoomVersionId::Version6; // 1. The room create event - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCreate, - content: serde_json::to_value(content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCreate, + content: serde_json::to_value(content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // 2. Let the room creator join - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: Some(body.is_direct), - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: Some(body.is_direct), + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // 3. Power levels let mut users = BTreeMap::new(); @@ -117,19 +123,22 @@ pub async fn create_room_route( }) .expect("event is valid, we just created it") }; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomPowerLevels, - content: power_levels_content, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomPowerLevels, + content: power_levels_content, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // 4. Events set by preset @@ -140,73 +149,84 @@ pub async fn create_room_route( }); // 4.1 Join Rules - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomJoinRules, - content: match preset { - create_room::RoomPreset::PublicChat => serde_json::to_value( - join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), - ) - .expect("event is valid, we just created it"), - // according to spec "invite" is the default - _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( - join_rules::JoinRule::Invite, - )) - .expect("event is valid, we just created it"), + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomJoinRules, + content: match preset { + create_room::RoomPreset::PublicChat => serde_json::to_value( + join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), + ) + .expect("event is valid, we just created it"), + // according to spec "invite" is the default + _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( + join_rules::JoinRule::Invite, + )) + .expect("event is valid, we just created it"), + }, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, }, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // 4.2 History Visibility - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomHistoryVisibility, - content: serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( - history_visibility::HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomHistoryVisibility, + content: serde_json::to_value( + history_visibility::HistoryVisibilityEventContent::new( + history_visibility::HistoryVisibility::Shared, + ), + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // 4.3 Guest Access - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomGuestAccess, - content: match preset { - create_room::RoomPreset::PublicChat => { - serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::Forbidden, + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomGuestAccess, + content: match preset { + create_room::RoomPreset::PublicChat => { + serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::Forbidden, + )) + .expect("event is valid, we just created it") + } + _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::CanJoin, )) - .expect("event is valid, we just created it") - } - _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::CanJoin, - )) - .expect("event is valid, we just created it"), + .expect("event is valid, we just created it"), + }, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, }, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // 5. Events listed in initial_state for event in &body.initial_state { @@ -220,78 +240,90 @@ pub async fn create_room_route( continue; } - db.rooms.build_and_append_pdu( - pdu_builder, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + pdu_builder, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; } // 6. Events implied by name and topic if let Some(name) = &body.name { - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomName, - content: serde_json::to_value( - name::NameEventContent::new(name.clone()).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") - })?, - ) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomName, + content: serde_json::to_value( + name::NameEventContent::new(name.clone()).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") + })?, + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; } if let Some(topic) = &body.topic { - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomTopic, - content: serde_json::to_value(topic::TopicEventContent { - topic: topic.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomTopic, + content: serde_json::to_value(topic::TopicEventContent { + topic: topic.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; } // 7. Events implied by invite (and TODO: invite_3pid) for user in &body.invite { - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user)?, - avatar_url: db.users.avatar_url(&user)?, - is_direct: Some(body.is_direct), - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user)?, + avatar_url: db.users.avatar_url(&user)?, + is_direct: Some(body.is_direct), + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; } // Homeserver specific stuff @@ -363,23 +395,29 @@ pub async fn upgrade_room_route( // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions - let tombstone_event_id = db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomTombstone, - content: serde_json::to_value(ruma::events::room::tombstone::TombstoneEventContent { - body: "This room has been replaced".to_string(), - replacement_room: replacement_room.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_id, - &body.room_id, - &db.globals, - &db.account_data, - ).await?; + let tombstone_event_id = db + .rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomTombstone, + content: serde_json::to_value( + ruma::events::room::tombstone::TombstoneEventContent { + body: "This room has been replaced".to_string(), + replacement_room: replacement_room.clone(), + }, + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // Get the old room federations status let federate = serde_json::from_value::>( @@ -406,42 +444,48 @@ pub async fn upgrade_room_route( create_event_content.room_version = new_version; create_event_content.predecessor = predecessor; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCreate, - content: serde_json::to_value(create_event_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_id, - &replacement_room, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCreate, + content: serde_json::to_value(create_event_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_id, + &replacement_room, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // Join the new room - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: None, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - sender_id, - &replacement_room, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + sender_id, + &replacement_room, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; // Recommended transferable state events list from the specs let transferable_state_events = vec![ @@ -463,19 +507,22 @@ pub async fn upgrade_room_route( None => continue, // Skipping missing events. }; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type, - content: event_content, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_id, - &replacement_room, - &db.globals, - &db.account_data, - ).await?; + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type, + content: event_content, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_id, + &replacement_room, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; } // Moves any local aliases to the new room @@ -505,7 +552,8 @@ pub async fn upgrade_room_route( power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - let _ = db.rooms + let _ = db + .rooms .build_and_append_pdu( PduBuilder { event_type: EventType::RoomPowerLevels, @@ -518,8 +566,10 @@ pub async fn upgrade_room_route( sender_id, &body.room_id, &db.globals, + &db.sending, &db.account_data, - ).await; + ) + .await; // Return the replacement room id Ok(upgrade_room::Response { replacement_room }.into()) diff --git a/src/client_server/state.rs b/src/client_server/state.rs index e9d20e2..46182a1 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -33,17 +33,18 @@ pub async fn send_state_event_for_key_route( ) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - Ok( - send_state_event_for_key::Response::new(send_state_event_for_key_helper( + Ok(send_state_event_for_key::Response::new( + send_state_event_for_key_helper( &db, sender_id, &body.content, content, &body.room_id, Some(body.state_key.to_owned()), - ).await?) - .into(), + ) + .await?, ) + .into()) } #[cfg_attr( @@ -70,8 +71,8 @@ pub async fn send_state_event_for_empty_key_route( ) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - Ok( - send_state_event_for_empty_key::Response::new(send_state_event_for_key_helper( + Ok(send_state_event_for_empty_key::Response::new( + send_state_event_for_key_helper( &db, sender_id .as_ref() @@ -80,9 +81,10 @@ pub async fn send_state_event_for_empty_key_route( json, &body.room_id, Some("".into()), - ).await?) - .into(), + ) + .await?, ) + .into()) } #[cfg_attr( @@ -211,19 +213,23 @@ pub async fn send_state_event_for_key_helper( } } - let event_id = db.rooms.build_and_append_pdu( - PduBuilder { - event_type: content.event_type().into(), - content: json, - unsigned: None, - state_key, - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.account_data, - ).await?; + let event_id = db + .rooms + .build_and_append_pdu( + PduBuilder { + event_type: content.event_type().into(), + content: json, + unsigned: None, + state_key, + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + ) + .await?; Ok(event_id) } diff --git a/src/database.rs b/src/database.rs index e1a356c..4b2cba1 100644 --- a/src/database.rs +++ b/src/database.rs @@ -3,6 +3,7 @@ pub mod globals; pub mod key_backups; pub mod media; pub mod rooms; +pub mod sending; pub mod transaction_ids; pub mod uiaa; pub mod users; @@ -25,6 +26,7 @@ pub struct Database { pub media: media::Media, pub key_backups: key_backups::KeyBackups, pub transaction_ids: transaction_ids::TransactionIds, + pub sending: sending::Sending, pub _db: sled::Db, } @@ -135,6 +137,9 @@ impl Database { transaction_ids: transaction_ids::TransactionIds { userdevicetxnid_response: db.open_tree("userdevicetxnid_response")?, }, + sending: sending::Sending { + serverpduids: db.open_tree("serverpduids")?, + }, _db: db, }) } diff --git a/src/database/globals.rs b/src/database/globals.rs index 5db2806..8951425 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,12 +1,13 @@ use crate::{utils, Error, Result}; use ruma::ServerName; -use std::convert::TryInto; +use std::{convert::TryInto, sync::Arc}; pub const COUNTER: &str = "c"; +#[derive(Clone)] pub struct Globals { pub(super) globals: sled::Tree, - keypair: ruma::signatures::Ed25519KeyPair, + keypair: Arc, reqwest_client: reqwest::Client, server_name: Box, max_request_size: u32, @@ -16,13 +17,15 @@ pub struct Globals { impl Globals { pub fn load(globals: sled::Tree, config: &rocket::Config) -> Result { - let keypair = ruma::signatures::Ed25519KeyPair::new( - &*globals - .update_and_fetch("keypair", utils::generate_keypair)? - .expect("utils::generate_keypair always returns Some"), - "key1".to_owned(), - ) - .map_err(|_| Error::bad_database("Private or public keys are invalid."))?; + let keypair = Arc::new( + ruma::signatures::Ed25519KeyPair::new( + &*globals + .update_and_fetch("keypair", utils::generate_keypair)? + .expect("utils::generate_keypair always returns Some"), + "key1".to_owned(), + ) + .map_err(|_| Error::bad_database("Private or public keys are invalid."))?, + ); Ok(Self { globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3c3a0b2..2246a61 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1,14 +1,12 @@ mod edus; pub use edus::RoomEdus; -use rocket::futures; -use crate::{pdu::PduBuilder, server_server, utils, Error, PduEvent, Result}; -use log::{error, warn}; +use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; +use log::error; use ring::digest; use ruma::{ api::client::error::ErrorKind, - api::federation, events::{ ignored_user_list, room::{ @@ -27,7 +25,6 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, - time::SystemTime, }; /// The unique identifier of each state group. @@ -36,6 +33,7 @@ use std::{ /// hashing the entire state. pub type StateHashId = Vec; +#[derive(Clone)] pub struct Rooms { pub edus: edus::RoomEdus, pub(super) pduid_pdu: sled::Tree, // PduId = RoomId + Count @@ -415,6 +413,16 @@ impl Rooms { }) } + /// Returns the pdu. + pub fn get_pdu_json_from_id(&self, pdu_id: &IVec) -> Result> { + self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &IVec, pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(&pdu_id)?.is_some() { @@ -613,6 +621,7 @@ impl Rooms { sender: &UserId, room_id: &RoomId, globals: &super::globals::Globals, + sending: &super::sending::Sending, account_data: &super::account_data::AccountData, ) -> Result { let PduBuilder { @@ -829,39 +838,12 @@ impl Rooms { self.append_to_state(&pdu_id, &pdu)?; } - pdu_json - .as_object_mut() - .expect("json is object") - .remove("event_id"); - - let raw_json = - serde_json::from_value::>(pdu_json).expect("Raw::from_value always works"); - - let pdus = &[raw_json]; - let transaction_id = utils::random_string(16); - - for result in futures::future::join_all( - self.room_servers(room_id) - .filter_map(|r| r.ok()) - .filter(|server| &**server != globals.server_name()) - .map(|server| { - server_server::send_request( - &globals, - server, - federation::transactions::send_transaction_message::v1::Request { - origin: globals.server_name(), - pdus, - edus: &[], - origin_server_ts: SystemTime::now(), - transaction_id: &transaction_id, - }, - ) - }), - ) - .await { - if let Err(e) = result { - warn!("{}", e); - } + for server in self + .room_servers(room_id) + .filter_map(|r| r.ok()) + .filter(|server| &**server != globals.server_name()) + { + sending.send_pdu(server, &pdu_id)?; } Ok(pdu.event_id) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index d60e1f1..a794c69 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -13,6 +13,7 @@ use std::{ convert::{TryFrom, TryInto}, }; +#[derive(Clone)] pub struct RoomEdus { pub(in super::super) readreceiptid_readreceipt: sled::Tree, // ReadReceiptId = RoomId + Count + UserId pub(in super::super) roomuserid_privateread: sled::Tree, // RoomUserId = Room + User, PrivateRead = Count diff --git a/src/database/sending.rs b/src/database/sending.rs new file mode 100644 index 0000000..187fd57 --- /dev/null +++ b/src/database/sending.rs @@ -0,0 +1,83 @@ +use std::{convert::TryFrom, time::SystemTime}; + +use crate::{server_server, utils, Error, Result}; +use rocket::futures::stream::{FuturesUnordered, StreamExt}; +use ruma::{api::federation, Raw, ServerName}; +use tokio::select; + +pub struct Sending { + /// The state for a given state hash. + pub(super) serverpduids: sled::Tree, // ServerPduId = ServerName + PduId +} + +impl Sending { + pub fn start_handler(&self, globals: &super::globals::Globals, rooms: &super::rooms::Rooms) { + let serverpduids = self.serverpduids.clone(); + let rooms = rooms.clone(); + let globals = globals.clone(); + + tokio::spawn(async move { + let mut futures = FuturesUnordered::new(); + let mut subscriber = serverpduids.watch_prefix(b""); + loop { + select! { + Some(_) = futures.next() => {}, + Some(event) = &mut subscriber => { + let serverpduid = if let sled::Event::Insert {key, ..} = event { + key + } else + { return Err::<(), Error>(Error::bad_database("")); }; + let mut parts = serverpduid.splitn(2, |&b| b == 0xff); + let server = Box::::try_from( + utils::string_from_bytes(parts.next().expect("splitn will always return 1 or more elements")) + .map_err(|_| Error::bad_database("ServerName in serverpduid bytes are invalid."))? + ).map_err(|_| Error::bad_database("ServerName in serverpduid is invalid."))?; + + let pdu_id = parts.next().ok_or_else(|| Error::bad_database("Invalid serverpduid in db."))?; + let mut pdu_json = rooms.get_pdu_json_from_id(&pdu_id.into())?.ok_or_else(|| Error::bad_database("Event in serverpduids not found in db."))?; + + pdu_json + .as_object_mut() + .expect("json is object") + .remove("event_id"); + + let raw_json = + serde_json::from_value::>(pdu_json).expect("Raw::from_value always works"); + + let globals = &globals; + + futures.push( + async move { + let pdus = vec![raw_json]; + let transaction_id = utils::random_string(16); + + server_server::send_request( + &globals, + server, + federation::transactions::send_transaction_message::v1::Request { + origin: globals.server_name(), + pdus: &pdus, + edus: &[], + origin_server_ts: SystemTime::now(), + transaction_id: &transaction_id, + }, + ).await + } + ); + }, + } + } + }); + } + /* + */ + + pub fn send_pdu(&self, server: Box, pdu_id: &[u8]) -> Result<()> { + let mut key = server.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pdu_id); + self.serverpduids.insert(key, b"")?; + + Ok(()) + } +} diff --git a/src/main.rs b/src/main.rs index eb060e3..2817ab9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -130,6 +130,8 @@ fn setup_rocket() -> rocket::Rocket { .attach(AdHoc::on_attach("Config", |mut rocket| async { let data = Database::load_or_create(rocket.config().await).expect("valid config"); + data.sending.start_handler(&data.globals, &data.rooms); + Ok(rocket.manage(data)) })) } diff --git a/src/pdu.rs b/src/pdu.rs index c904230..6d78092 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,12 +1,13 @@ use crate::{Error, Result}; use js_int::UInt; use ruma::{ + events::pdu::PduStub, events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, EventId, Raw, RoomId, ServerKeyId, ServerName, UserId, -events::pdu::PduStub}; +}; use serde::{Deserialize, Serialize}; use serde_json::json; use std::{collections::BTreeMap, convert::TryInto, sync::Arc, time::UNIX_EPOCH}; From 1bf614b0f57a023cbc467f1e5bf03d1eae87b755 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 15 Sep 2020 17:02:20 +0200 Subject: [PATCH 0291/1727] fix: remove transaction_id from pdus over federation --- src/database/sending.rs | 6 ++++++ src/pdu.rs | 5 ++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index 187fd57..77998e7 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -36,6 +36,12 @@ impl Sending { let pdu_id = parts.next().ok_or_else(|| Error::bad_database("Invalid serverpduid in db."))?; let mut pdu_json = rooms.get_pdu_json_from_id(&pdu_id.into())?.ok_or_else(|| Error::bad_database("Event in serverpduids not found in db."))?; + if let Some(unsigned) = pdu_json + .as_object_mut() + .expect("json is object") + .get_mut("unsigned") { + unsigned.as_object_mut().expect("unsigned is object").remove("transaction_id"); + } pdu_json .as_object_mut() .expect("json is object") diff --git a/src/pdu.rs b/src/pdu.rs index 6d78092..957d9e0 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -201,6 +201,9 @@ impl PduEvent { } pub fn to_outgoing_federation_event(&self) -> Raw { + let mut unsigned = self.unsigned.clone(); + unsigned.remove("transaction_id"); + let mut json = json!({ "room_id": self.room_id, "sender": self.sender, @@ -210,7 +213,7 @@ impl PduEvent { "prev_events": self.prev_events, "depth": self.depth, "auth_events": self.auth_events, - "unsigned": self.unsigned, + "unsigned": unsigned, "hashes": self.hashes, "signatures": self.signatures, }); From 005e00e9b18459d569cca1993138a85d10dfc271 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 15 Sep 2020 17:16:55 +0200 Subject: [PATCH 0292/1727] fix: remove well-known --- src/main.rs | 1 - src/server_server.rs | 5 ----- 2 files changed, 6 deletions(-) diff --git a/src/main.rs b/src/main.rs index 2817ab9..f81c7f4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -119,7 +119,6 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_pushers_route, client_server::set_pushers_route, client_server::upgrade_room_route, - server_server::well_known_server, server_server::get_server_version, server_server::get_server_keys, server_server::get_server_keys_deprecated, diff --git a/src/server_server.rs b/src/server_server.rs index 40ad654..106f60e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -163,11 +163,6 @@ where } } -#[cfg_attr(feature = "conduit_bin", get("/.well-known/matrix/server"))] -pub fn well_known_server() -> Json { - rocket::response::content::Json(json!({ "m.server": "pc.koesters.xyz:59003"}).to_string()) -} - #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] pub fn get_server_version() -> ConduitResult { Ok(get_server_version::Response { From dd749b8aee7c09ca8084059f91cd922e95fb6424 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 15 Sep 2020 21:46:10 +0200 Subject: [PATCH 0293/1727] fix: server keys and destination resolution when server name contains port --- src/database/globals.rs | 45 ++++++++++++++++++++++++++++++++--------- src/server_server.rs | 9 +++++++-- src/utils.rs | 9 +++++++-- 3 files changed, 49 insertions(+), 14 deletions(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index 8951425..8ce9c01 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,4 +1,5 @@ use crate::{utils, Error, Result}; +use log::error; use ruma::ServerName; use std::{convert::TryInto, sync::Arc}; @@ -17,19 +18,43 @@ pub struct Globals { impl Globals { pub fn load(globals: sled::Tree, config: &rocket::Config) -> Result { - let keypair = Arc::new( - ruma::signatures::Ed25519KeyPair::new( - &*globals - .update_and_fetch("keypair", utils::generate_keypair)? - .expect("utils::generate_keypair always returns Some"), - "key1".to_owned(), - ) - .map_err(|_| Error::bad_database("Private or public keys are invalid."))?, - ); + let bytes = &*globals + .update_and_fetch("keypair", utils::generate_keypair)? + .expect("utils::generate_keypair always returns Some"); + + let mut parts = bytes.splitn(2, |&b| b == 0xff); + + let keypair = utils::string_from_bytes( + // 1. version + parts + .next() + .expect("splitn always returns at least one element"), + ) + .map_err(|_| Error::bad_database("Invalid version bytes in keypair.")) + .and_then(|version| { + // 2. key + parts + .next() + .ok_or_else(|| Error::bad_database("Invalid keypair format in database.")) + .map(|key| (version, key)) + }) + .and_then(|(version, key)| { + ruma::signatures::Ed25519KeyPair::new(&key, version) + .map_err(|_| Error::bad_database("Private or public keys are invalid.")) + }); + + let keypair = match keypair { + Ok(k) => k, + Err(e) => { + error!("Keypair invalid. Deleting..."); + globals.remove("keypair")?; + return Err(e); + } + }; Ok(Self { globals, - keypair, + keypair: Arc::new(keypair), reqwest_client: reqwest::Client::new(), server_name: config .get_str("server_name") diff --git a/src/server_server.rs b/src/server_server.rs index 106f60e..f334d6b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -17,7 +17,6 @@ use ruma::{ directory::{IncomingFilter, IncomingRoomNetwork}, EventId, ServerName, }; -use serde_json::json; use std::{ collections::BTreeMap, convert::TryFrom, @@ -58,7 +57,13 @@ where let actual_destination = "https://".to_owned() + &request_well_known(globals, &destination.as_str()) .await - .unwrap_or(destination.as_str().to_owned() + ":8448"); + .unwrap_or_else(|| { + let mut destination = destination.as_str().to_owned(); + if destination.find(':').is_none() { + destination += ":8448"; + } + destination + }); let mut http_request = request .try_into_http_request(&actual_destination, Some("")) diff --git a/src/utils.rs b/src/utils.rs index 8cf1b2c..452b7c5 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -29,8 +29,13 @@ pub fn increment(old: Option<&[u8]>) -> Option> { pub fn generate_keypair(old: Option<&[u8]>) -> Option> { Some(old.map(|s| s.to_vec()).unwrap_or_else(|| { - ruma::signatures::Ed25519KeyPair::generate() - .expect("Ed25519KeyPair generation always works (?)") + let mut value = random_string(8).as_bytes().to_vec(); + value.push(0xff); + value.extend_from_slice( + &ruma::signatures::Ed25519KeyPair::generate() + .expect("Ed25519KeyPair generation always works (?)"), + ); + value })) } From f4078a29eb2e4975bc5664aab718875ce67da6a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 16 Sep 2020 10:49:54 +0200 Subject: [PATCH 0294/1727] fix: synapse complains about missing origin --- src/database/rooms.rs | 6 ++++++ src/database/sending.rs | 1 + 2 files changed, 7 insertions(+) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2246a61..8e68033 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -812,6 +812,12 @@ impl Rooms { .expect("json is object") .remove("event_id"); + // Add origin because synapse likes that (and it's required in the spec) + pdu_json + .as_object_mut() + .expect("json is object") + .insert("origin".to_owned(), globals.server_name().as_str().into()); + ruma::signatures::hash_and_sign_event( globals.server_name().as_str(), globals.keypair(), diff --git a/src/database/sending.rs b/src/database/sending.rs index 77998e7..a3f1574 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -42,6 +42,7 @@ impl Sending { .get_mut("unsigned") { unsigned.as_object_mut().expect("unsigned is object").remove("transaction_id"); } + pdu_json .as_object_mut() .expect("json is object") From a567cd81d5e849285e6ef14b4d7ac41dd436c8fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 16 Sep 2020 15:08:51 +0200 Subject: [PATCH 0295/1727] improvement: better logs on deserialization errors --- src/server_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server_server.rs b/src/server_server.rs index f334d6b..aef3991 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -160,7 +160,7 @@ where .expect("reqwest body is valid http body"), ); response.map_err(|e| { - warn!("{}", e); + warn!("Server returned bad response: {:?}", e); Error::BadServerResponse("Server returned bad response.") }) } From 4db6d7e4308b206e0f34e291f1d23e7a54ea254a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 16 Sep 2020 18:10:17 +0200 Subject: [PATCH 0296/1727] fix: remove avatar url checks They are not in the spec and maubot relies on that --- src/client_server/profile.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 53893c0..686d4c3 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -119,18 +119,6 @@ pub async fn set_avatar_url_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - if let Some(avatar_url) = &body.avatar_url { - if !avatar_url.starts_with("mxc://") { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "avatar_url has to start with mxc://.", - )); - } - - // TODO in the future when we can handle media uploads make sure that this url is our own server - // TODO also make sure this is valid mxc:// format (not only starting with it) - } - db.users .set_avatar_url(&sender_id, body.avatar_url.clone())?; From 3e03787551c8337109f9f7b7d4ad964062c672ae Mon Sep 17 00:00:00 2001 From: Valkum Date: Wed, 16 Sep 2020 19:53:27 +0200 Subject: [PATCH 0297/1727] Add Complement dockerfile and move sytest dir --- tests/Complement.Dockerfile | 10 ++++++++++ {sytest => tests/sytest}/are-we-synapse-yet.list | 0 {sytest => tests/sytest}/are-we-synapse-yet.py | 0 {sytest => tests/sytest}/show-expected-fail-tests.sh | 0 {sytest => tests/sytest}/sytest-blacklist | 0 {sytest => tests/sytest}/sytest-whitelist | 0 6 files changed, 10 insertions(+) create mode 100644 tests/Complement.Dockerfile rename {sytest => tests/sytest}/are-we-synapse-yet.list (100%) rename {sytest => tests/sytest}/are-we-synapse-yet.py (100%) rename {sytest => tests/sytest}/show-expected-fail-tests.sh (100%) rename {sytest => tests/sytest}/sytest-blacklist (100%) rename {sytest => tests/sytest}/sytest-whitelist (100%) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile new file mode 100644 index 0000000..56e4bf7 --- /dev/null +++ b/tests/Complement.Dockerfile @@ -0,0 +1,10 @@ +FROM valkum/docker-rust-ci:latest +WORKDIR /build + +COPY . . +RUN cargo build + +ENV SERVER_NAME=localhost +EXPOSE 14004 8448 + +CMD sed "s/server_name: your.server.name/server_name: ${SERVER_NAME}/g" Rocket-example.toml Rocket.toml && ./target/debug/conduit \ No newline at end of file diff --git a/sytest/are-we-synapse-yet.list b/tests/sytest/are-we-synapse-yet.list similarity index 100% rename from sytest/are-we-synapse-yet.list rename to tests/sytest/are-we-synapse-yet.list diff --git a/sytest/are-we-synapse-yet.py b/tests/sytest/are-we-synapse-yet.py similarity index 100% rename from sytest/are-we-synapse-yet.py rename to tests/sytest/are-we-synapse-yet.py diff --git a/sytest/show-expected-fail-tests.sh b/tests/sytest/show-expected-fail-tests.sh similarity index 100% rename from sytest/show-expected-fail-tests.sh rename to tests/sytest/show-expected-fail-tests.sh diff --git a/sytest/sytest-blacklist b/tests/sytest/sytest-blacklist similarity index 100% rename from sytest/sytest-blacklist rename to tests/sytest/sytest-blacklist diff --git a/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist similarity index 100% rename from sytest/sytest-whitelist rename to tests/sytest/sytest-whitelist From 506c2a3146bb5314c95ad75ed069869a724ce628 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 16 Sep 2020 21:11:38 +0200 Subject: [PATCH 0298/1727] fix: can't find count from event in db --- src/client_server/sync.rs | 31 ++++++++++++++----------------- src/database/rooms.rs | 25 +++++++++++++------------ 2 files changed, 27 insertions(+), 29 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index eeeec00..6ece180 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -93,7 +93,7 @@ pub async fn sync_events_route( let mut limited = false; let mut state_pdus = Vec::new(); - for pdu in non_timeline_pdus { + for (_, pdu) in non_timeline_pdus { if pdu.state_key.is_some() { state_pdus.push(pdu); } @@ -113,7 +113,7 @@ pub async fn sync_events_route( .rooms .pdus_since(&sender_id, &room_id, since)? .filter_map(|r| r.ok()) - .filter_map(|pdu| Some((pdu.state_key.clone()?, pdu))) + .filter_map(|(_, pdu)| Some((pdu.state_key.clone()?, pdu))) { if pdu.kind == EventType::RoomMember { send_member_count = true; @@ -188,8 +188,8 @@ pub async fn sync_events_route( .rooms .all_pdus(&sender_id, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|pdu| pdu.kind == EventType::RoomMember) - .map(|pdu| { + .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) + .map(|(_, pdu)| { let content = serde_json::from_value::< Raw, >(pdu.content.clone()) @@ -244,7 +244,7 @@ pub async fn sync_events_route( (db.rooms .pdus_since(&sender_id, &room_id, last_read)? .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .filter(|pdu| { + .filter(|(_, pdu)| { matches!( pdu.kind.clone(), EventType::RoomMessage | EventType::RoomEncrypted @@ -260,18 +260,15 @@ pub async fn sync_events_route( None }; - let prev_batch = timeline_pdus.first().map_or(Ok::<_, Error>(None), |e| { - Ok(Some( - db.rooms - .get_pdu_count(&e.event_id)? - .ok_or_else(|| Error::bad_database("Can't find count from event in db."))? - .to_string(), - )) - })?; + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { + Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string())) + })?; let room_events = timeline_pdus .into_iter() - .map(|pdu| pdu.to_sync_room_event()) + .map(|(_, pdu)| pdu.to_sync_room_event()) .collect::>(); let mut edus = db @@ -380,7 +377,7 @@ pub async fn sync_events_route( let pdus = db.rooms.pdus_since(&sender_id, &room_id, since)?; let room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .map(|pdu| pdu.to_sync_room_event()) + .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); let left_room = sync_events::LeftRoom { @@ -395,7 +392,7 @@ pub async fn sync_events_route( let mut left_since_last_sync = false; for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)? { - let pdu = pdu?; + let (_, pdu) = pdu?; if pdu.kind == EventType::RoomMember && pdu.state_key == Some(sender_id.to_string()) { let content = serde_json::from_value::< Raw, @@ -438,7 +435,7 @@ pub async fn sync_events_route( let room_id = room_id?; let mut invited_since_last_sync = false; for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)? { - let pdu = pdu?; + let (_, pdu) = pdu?; if pdu.kind == EventType::RoomMember && pdu.state_key == Some(sender_id.to_string()) { let content = serde_json::from_value::< Raw, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 8e68033..263f51b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -355,18 +355,19 @@ impl Rooms { } } + /// Returns the `count` of this pdu's id. + pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { + Ok( + utils::u64_from_bytes(&pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?, + ) + } + /// Returns the `count` of this pdu's id. pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu_id| { - Ok(Some( - utils::u64_from_bytes( - &pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()], - ) - .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?, - )) - }) + .map_or(Ok(None), |pdu_id| self.pdu_count(&pdu_id).map(Some)) } /// Returns the json of a pdu. @@ -860,7 +861,7 @@ impl Rooms { &self, user_id: &UserId, room_id: &RoomId, - ) -> Result>> { + ) -> Result>> { self.pdus_since(user_id, room_id, 0) } @@ -871,7 +872,7 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result>> { + ) -> Result>> { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -887,13 +888,13 @@ impl Rooms { .pduid_pdu .range(first_pdu_id..last_pdu_id) .filter_map(|r| r.ok()) - .map(move |(_, v)| { + .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; if pdu.sender != user_id { pdu.unsigned.remove("transaction_id"); } - Ok(pdu) + Ok((pdu_id, pdu)) })) } From 8d66428bebf3f2f595fb138c804f0c747917e399 Mon Sep 17 00:00:00 2001 From: Valkum Date: Thu, 17 Sep 2020 12:50:37 +0200 Subject: [PATCH 0299/1727] Add intermediate container to hide ARGs ARGs contain secrets to speed up builds --- tests/Complement.Dockerfile | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 56e4bf7..35b3324 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -1,10 +1,21 @@ -FROM valkum/docker-rust-ci:latest +FROM valkum/docker-rust-ci:latest as builder WORKDIR /build +ARG RUSTC_WRAPPER +ARG AWS_ACCESS_KEY_ID +ARG AWS_SECRET_ACCESS_KEY +ARG SCCACHE_BUCKET +ARG SCCACHE_ENDPOINT +ARG SCCACHE_S3_USE_SSL + COPY . . RUN cargo build +FROM valkum/docker-rust-ci:latest +WORKDIR /build +COPY --from=builder /build/target/debug/conduit /conduit + ENV SERVER_NAME=localhost EXPOSE 14004 8448 -CMD sed "s/server_name: your.server.name/server_name: ${SERVER_NAME}/g" Rocket-example.toml Rocket.toml && ./target/debug/conduit \ No newline at end of file +CMD sed "s/server_name: your.server.name/server_name: ${SERVER_NAME}/g" Rocket-example.toml Rocket.toml && /conduit \ No newline at end of file From da28c12eebc95be72308302abe69743852cec552 Mon Sep 17 00:00:00 2001 From: Valkum Date: Thu, 17 Sep 2020 15:36:36 +0200 Subject: [PATCH 0300/1727] Try to add TLS reverse proxy for complement --- tests/Complement.Dockerfile | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 35b3324..10a33d7 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -13,9 +13,16 @@ RUN cargo build FROM valkum/docker-rust-ci:latest WORKDIR /build + +RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.1.1/caddy_2.1.1_linux_amd64.tar.gz" +RUN tar xzf caddy_2.1.1_linux_amd64.tar.gz + COPY --from=builder /build/target/debug/conduit /conduit ENV SERVER_NAME=localhost -EXPOSE 14004 8448 +COPY Rocket-example.toml Rocket.toml +RUN sed -i "s/server_name: your.server.name/server_name: ${SERVER_NAME}/g" Rocket.toml +RUN sed -i "s/port = 14004/port: 8008/g" Rocket.toml -CMD sed "s/server_name: your.server.name/server_name: ${SERVER_NAME}/g" Rocket-example.toml Rocket.toml && /conduit \ No newline at end of file +EXPOSE 8008 8448 +CMD caddy --from 8448 --to localhost:8008 & && /conduit \ No newline at end of file From 7c456a0390a5e14d8b758bc8f80c93a7c597f0ea Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Thu, 17 Sep 2020 20:14:07 +0200 Subject: [PATCH 0301/1727] Update dockerignore Removing Rocket-example.toml from here is fine, as it is not included in the last stage of our build stages --- .dockerignore | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.dockerignore b/.dockerignore index dd4433d..80b3072 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,6 @@ # Local build and dev artifacts target -sytest +tests # Docker files Dockerfile* @@ -21,7 +21,6 @@ docker-compose* # Toml files rustfmt.toml -Rocket-example.toml # Documentation *.md From 009e36b68010d4b8bb42acc05c3614f8742994bd Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Thu, 17 Sep 2020 21:11:18 +0200 Subject: [PATCH 0302/1727] Fix Caddy integration for reverse-proxy --- tests/Complement.Dockerfile | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 10a33d7..f32f878 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -1,5 +1,5 @@ FROM valkum/docker-rust-ci:latest as builder -WORKDIR /build +WORKDIR /workdir ARG RUSTC_WRAPPER ARG AWS_ACCESS_KEY_ID @@ -12,17 +12,19 @@ COPY . . RUN cargo build FROM valkum/docker-rust-ci:latest -WORKDIR /build +WORKDIR /workdir RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.1.1/caddy_2.1.1_linux_amd64.tar.gz" RUN tar xzf caddy_2.1.1_linux_amd64.tar.gz -COPY --from=builder /build/target/debug/conduit /conduit +COPY --from=builder /workdir/target/debug/conduit /workdir/conduit + +COPY Rocket-example.toml Rocket.toml ENV SERVER_NAME=localhost -COPY Rocket-example.toml Rocket.toml -RUN sed -i "s/server_name: your.server.name/server_name: ${SERVER_NAME}/g" Rocket.toml -RUN sed -i "s/port = 14004/port: 8008/g" Rocket.toml + +RUN sed -i "s/server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" Rocket.toml +RUN sed -i "s/port = 14004/port = 8008/g" Rocket.toml EXPOSE 8008 8448 -CMD caddy --from 8448 --to localhost:8008 & && /conduit \ No newline at end of file +CMD /workdir/caddy reverse-proxy --from ${SERVER_NAME}:8448 --to localhost:8008 > /dev/null 2>&1 & /workdir/conduit \ No newline at end of file From ea3aaa6b5c06e01bef52a66b64fe45d74d5f60c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 17 Sep 2020 14:44:47 +0200 Subject: [PATCH 0303/1727] improvement: more efficient /sync with gaps --- Cargo.lock | 48 +++---- src/client_server/context.rs | 10 +- src/client_server/membership.rs | 11 +- src/client_server/message.rs | 12 ++ src/client_server/sync.rs | 152 +++++++++++++------- src/database/rooms.rs | 247 ++++++++++++-------------------- src/main.rs | 1 + src/pdu.rs | 6 +- src/server_server.rs | 4 +- src/stateres.rs | 59 -------- 10 files changed, 251 insertions(+), 299 deletions(-) delete mode 100644 src/stateres.rs diff --git a/Cargo.lock b/Cargo.lock index 30144ca..e142d72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -134,9 +134,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "cc" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66120af515773fb005778dc07c261bd201ec8ce50bd6e7144c927753fe013381" +checksum = "ef611cc68ff783f18535d77ddd080185275713d852c4f5cbb6122c462a7a825c" [[package]] name = "cfg-if" @@ -213,7 +213,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1373a16a4937bc34efec7b391f9c1500c30b8478a701a4f44c9165cc0475a6e0" dependencies = [ "percent-encoding", - "time 0.2.19", + "time 0.2.20", "version_check", ] @@ -342,9 +342,9 @@ checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" [[package]] name = "either" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" @@ -1370,7 +1370,7 @@ dependencies = [ "rocket_codegen", "rocket_http", "state", - "time 0.2.19", + "time 0.2.20", "tokio", "toml", "version_check", @@ -1405,7 +1405,7 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.19", + "time 0.2.20", "tokio", "tokio-rustls", "unicode-xid", @@ -1414,7 +1414,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1430,7 +1430,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "http", "percent-encoding", @@ -1445,7 +1445,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1456,7 +1456,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "ruma-api", "ruma-common", @@ -1469,7 +1469,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "assign", "http", @@ -1488,7 +1488,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "js_int", "ruma-api", @@ -1502,7 +1502,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "js_int", "ruma-common", @@ -1517,7 +1517,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1528,7 +1528,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "js_int", "ruma-api", @@ -1543,7 +1543,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1555,7 +1555,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "proc-macro2", "quote", @@ -1566,7 +1566,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "serde", "strum", @@ -1575,7 +1575,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "form_urlencoded", "itoa", @@ -1587,7 +1587,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#6ccb3ecaf69167ba405379826a9d87a98f168df8" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "base64", "ring", @@ -1831,7 +1831,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#1d01b6e65b6afd50e65085fb40f1e7d2782f519e" +source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#d11a3feb5307715ab5d86af8f25d4bccfee6264b" dependencies = [ "itertools", "js_int", @@ -1981,9 +1981,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80c1a1fd93112fc50b11c43a1def21f926be3c18884fad676ea879572da070a1" +checksum = "0d4953c513c9bf1b97e9cdd83f11d60c4b0a83462880a360d80d96953a953fee" dependencies = [ "const_fn", "libc", diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 9593726..4c9be20 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -49,7 +49,10 @@ pub fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect::>(); - let start_token = events_before.last().map(|(count, _)| count.to_string()); + let start_token = events_before + .last() + .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) + .map(|count| count.to_string()); let events_before = events_before .into_iter() @@ -68,7 +71,10 @@ pub fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect::>(); - let end_token = events_after.last().map(|(count, _)| count.to_string()); + let end_token = events_after + .last() + .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) + .map(|count| count.to_string()); let events_after = events_after .into_iter() diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index c4eed95..628045d 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -601,8 +601,7 @@ async fn join_room_by_id_helper( .cloned() .collect::>(); - let power_level = - resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".into()))); + let power_level = resolved_control_events.get(&(EventType::RoomPowerLevels, "".into())); // Sort the remaining non control events let sorted_event_ids = state_res::StateResolution::mainline_sort( room_id, @@ -644,13 +643,7 @@ async fn join_room_by_id_helper( )?; if state_events.contains(ev_id) { - state.insert( - ( - pdu.kind(), - pdu.state_key().expect("State events have a state key"), - ), - pdu_id, - ); + state.insert((pdu.kind(), pdu.state_key()), pdu_id); } } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 3944d5b..5a4488f 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -117,6 +117,12 @@ pub fn get_message_events_route( .pdus_after(&sender_id, &body.room_id, from) .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(|(pdu_id, pdu)| { + db.rooms + .pdu_count(&pdu_id) + .map(|pdu_count| (pdu_count, pdu)) + .ok() + }) .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect::>(); @@ -141,6 +147,12 @@ pub fn get_message_events_route( .pdus_until(&sender_id, &body.room_id, from) .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(|(pdu_id, pdu)| { + db.rooms + .pdu_count(&pdu_id) + .map(|pdu_count| (pdu_count, pdu)) + .ok() + }) .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect::>(); diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6ece180..0e40bfb 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -105,50 +105,92 @@ pub async fn sync_events_route( .room_state_get(&room_id, &EventType::RoomEncryption, "")? .is_some(); - // TODO: optimize this? - let mut send_member_count = false; - let mut joined_since_last_sync = false; - let mut new_encrypted_room = false; - for (state_key, pdu) in db + // Database queries: + let since_state_hash = db .rooms - .pdus_since(&sender_id, &room_id, since)? - .filter_map(|r| r.ok()) - .filter_map(|(_, pdu)| Some((pdu.state_key.clone()?, pdu))) - { - if pdu.kind == EventType::RoomMember { - send_member_count = true; + .pdus_until(sender_id, &room_id, since) + .next() + .and_then(|pdu| pdu.ok()) + .and_then(|pdu| db.rooms.pdu_state_hash(&pdu.0).ok()?); - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) + let since_members = since_state_hash + .as_ref() + .and_then(|state_hash| db.rooms.state_type(state_hash, &EventType::RoomMember).ok()); + + let since_encryption = since_state_hash.as_ref().and_then(|state_hash| { + db.rooms + .state_get(&state_hash, &EventType::RoomEncryption, "") + .ok() + }); + + let current_members = db.rooms.room_state_type(&room_id, &EventType::RoomMember)?; + + // Calculations: + let new_encrypted_room = encrypted_room && since_encryption.is_none(); + + let send_member_count = since_members.as_ref().map_or(true, |since_members| { + current_members.len() != since_members.len() + }); + + let since_sender_member = since_members.as_ref().and_then(|members| { + members.get(sender_id.as_str()).and_then(|pdu| { + serde_json::from_value::>( + pdu.content.clone(), + ) .expect("Raw::from_value always works") .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))?; + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }) + }); - if pdu.state_key == Some(sender_id.to_string()) - && content.membership == MembershipState::Join - { - joined_since_last_sync = true; - } else if encrypted_room && content.membership == MembershipState::Join { - // A new user joined an encrypted room - let user_id = UserId::try_from(state_key) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - // Add encryption update if we didn't share an encrypted room already - if !share_encrypted_room(&db, &sender_id, &user_id, &room_id) { - device_list_updates.insert(user_id); + if encrypted_room { + for (user_id, current_member) in current_members { + let current_membership = serde_json::from_value::< + Raw, + >(current_member.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + let since_membership = since_members + .as_ref() + .and_then(|members| { + members.get(&user_id).and_then(|since_member| { + serde_json::from_value::< + Raw, + >(since_member.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }) + }) + .map_or(MembershipState::Leave, |member| member.membership); + + let user_id = UserId::try_from(user_id) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + match (since_membership, current_membership) { + (MembershipState::Leave, MembershipState::Join) => { + // A new user joined an encrypted room + if !share_encrypted_room(&db, &sender_id, &user_id, &room_id) { + device_list_updates.insert(user_id); + } } - } else if encrypted_room && content.membership == MembershipState::Leave { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert( - UserId::try_from(state_key) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?, - ); + (MembershipState::Join, MembershipState::Leave) => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} } - } else if pdu.kind == EventType::RoomEncryption { - new_encrypted_room = true; } } + let joined_since_last_sync = + since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + if joined_since_last_sync && encrypted_room || new_encrypted_room { // If the user is in a new encrypted room, give them all joined users device_list_updates.extend( @@ -390,23 +432,37 @@ pub async fn sync_events_route( state: sync_events::State { events: Vec::new() }, }; - let mut left_since_last_sync = false; - for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)? { - let (_, pdu) = pdu?; - if pdu.kind == EventType::RoomMember && pdu.state_key == Some(sender_id.to_string()) { - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) + let since_member = db + .rooms + .pdus_until(sender_id, &room_id, since) + .next() + .and_then(|pdu| pdu.ok()) + .and_then(|pdu| { + db.rooms + .pdu_state_hash(&pdu.0) + .ok()? + .ok_or_else(|| Error::bad_database("Pdu in db doesn't have a state hash.")) + .ok() + }) + .and_then(|state_hash| { + db.rooms + .state_get(&state_hash, &EventType::RoomMember, sender_id.as_str()) + .ok()? + .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) + .ok() + }) + .and_then(|pdu| { + serde_json::from_value::>( + pdu.content.clone(), + ) .expect("Raw::from_value always works") .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))?; + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); - if content.membership == MembershipState::Leave { - left_since_last_sync = true; - break; - } - } - } + let left_since_last_sync = + since_member.map_or(false, |member| member.membership == MembershipState::Join); if left_since_last_sync { device_list_left.extend( diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 263f51b..5958626 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -31,7 +31,7 @@ use std::{ /// /// This is created when a state group is added to the database by /// hashing the entire state. -pub type StateHashId = Vec; +pub type StateHashId = IVec; #[derive(Clone)] pub struct Rooms { @@ -100,7 +100,7 @@ impl StateStore for Rooms { impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - pub fn state_full(&self, state_hash: StateHashId) -> Result> { + pub fn state_full(&self, state_hash: &StateHashId) -> Result> { self.stateid_pduid .scan_prefix(&state_hash) .values() @@ -115,61 +115,87 @@ impl Rooms { }) .map(|pdu| { let pdu = pdu?; - Ok(((pdu.kind, pdu.state_key), pdu.event_id)) + Ok(( + ( + pdu.kind.clone(), + pdu.state_key + .as_ref() + .ok_or_else(|| Error::bad_database("State event has no state key."))? + .clone(), + ), + pdu, + )) }) .collect::>>() } - // TODO make this return Result - /// Fetches the previous StateHash ID to `current`. - pub fn prev_state_hash(&self, current: StateHashId) -> Option { - let mut found = false; - for pair in self.pduid_statehash.iter().rev() { - let prev = pair.ok()?.1; - if current == prev.as_ref() { - found = true; - } - if current != prev.as_ref() && found { - return Some(prev.to_vec()); - } + /// Returns all state entries for this type. + pub fn state_type( + &self, + state_hash: &StateHashId, + event_type: &EventType, + ) -> Result> { + let mut prefix = state_hash.to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&event_type.to_string().as_bytes()); + prefix.push(0xff); + + let mut hashmap = HashMap::new(); + for pdu in self + .stateid_pduid + .scan_prefix(&prefix) + .values() + .map(|pdu_id| { + Ok::<_, Error>( + serde_json::from_slice::(&self.pduid_pdu.get(pdu_id?)?.ok_or_else( + || Error::bad_database("PDU in state not found in database."), + )?) + .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, + ) + }) + { + let pdu = pdu?; + let state_key = pdu.state_key.clone().ok_or_else(|| { + Error::bad_database("Room state contains event without state_key.") + })?; + hashmap.insert(state_key, pdu); } - None + Ok(hashmap) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + pub fn state_get( + &self, + state_hash: &StateHashId, + event_type: &EventType, + state_key: &str, + ) -> Result> { + let mut key = state_hash.to_vec(); + key.push(0xff); + key.extend_from_slice(&event_type.to_string().as_bytes()); + key.push(0xff); + key.extend_from_slice(&state_key.as_bytes()); + + self.stateid_pduid.get(&key)?.map_or(Ok(None), |pdu_id| { + Ok::<_, Error>(Some( + serde_json::from_slice::( + &self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("PDU in state not found in database.") + })?, + ) + .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, + )) + }) + } + + /// Returns the last state hash key added to the db. + pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { + Ok(self.pduid_statehash.get(pdu_id)?) } /// Returns the last state hash key added to the db. pub fn current_state_hash(&self, room_id: &RoomId) -> Result> { - Ok(self - .roomid_statehash - .get(room_id.as_bytes())? - .map(|bytes| bytes.to_vec())) - } - - /// This fetches auth event_ids from the current state using the - /// full `roomstateid_pdu` tree. - pub fn get_auth_event_ids( - &self, - room_id: &RoomId, - kind: &EventType, - sender: &UserId, - state_key: Option<&str>, - content: serde_json::Value, - ) -> Result> { - let auth_events = state_res::auth_types_for_event( - kind.clone(), - sender, - state_key.map(|s| s.to_string()), - content, - ); - - let mut events = vec![]; - for (event_type, state_key) in auth_events { - if let Some(state_key) = state_key.as_ref() { - if let Some(id) = self.room_state_get(room_id, &event_type, state_key)? { - events.push(id.event_id); - } - } - } - Ok(events) + Ok(self.roomid_statehash.get(room_id.as_bytes())?) } /// This fetches auth events from the current state. @@ -190,10 +216,8 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some(s_key) = state_key.as_ref() { - if let Some(pdu) = self.room_state_get(room_id, &event_type, s_key)? { - events.insert((event_type, state_key), pdu); - } + if let Some(pdu) = self.room_state_get(room_id, &event_type, &state_key)? { + events.insert((event_type, state_key), pdu); } } Ok(events) @@ -206,7 +230,7 @@ impl Rooms { // We only hash the pdu's event ids, not the whole pdu let bytes = pdu_id_bytes.join(&0xff); let hash = digest::digest(&digest::SHA256, &bytes); - Ok(hash.as_ref().to_vec()) + Ok(hash.as_ref().into()) } /// Checks if a room exists. @@ -230,7 +254,7 @@ impl Rooms { ) -> Result<()> { let state_hash = self.calculate_hash(&state.values().map(|pdu_id| &**pdu_id).collect::>())?; - let mut prefix = state_hash.clone(); + let mut prefix = state_hash.to_vec(); prefix.push(0xff); for ((event_type, state_key), pdu_id) in state { @@ -248,41 +272,11 @@ impl Rooms { } /// Returns the full room state. - pub fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result> { + pub fn room_state_full(&self, room_id: &RoomId) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { - let mut prefix = current_state_hash; - prefix.push(0xff); - - let mut hashmap = HashMap::new(); - for pdu in self - .stateid_pduid - .scan_prefix(prefix) - .values() - .map(|pdu_id| { - Ok::<_, Error>( - serde_json::from_slice::( - &self.pduid_pdu.get(pdu_id?)?.ok_or_else(|| { - Error::bad_database("PDU in state not found in database.") - })?, - ) - .map_err(|_| { - Error::bad_database("Invalid PDU bytes in current room state.") - })?, - ) - }) - { - let pdu = pdu?; - let state_key = pdu.state_key.clone().ok_or_else(|| { - Error::bad_database("Room state contains event without state_key.") - })?; - hashmap.insert((pdu.kind.clone(), state_key), pdu); - } - Ok(hashmap) + self.state_full(¤t_state_hash) } else { - Ok(HashMap::new()) + Ok(BTreeMap::new()) } } @@ -293,36 +287,7 @@ impl Rooms { event_type: &EventType, ) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { - let mut prefix = current_state_hash; - prefix.push(0xff); - prefix.extend_from_slice(&event_type.to_string().as_bytes()); - prefix.push(0xff); - - let mut hashmap = HashMap::new(); - for pdu in self - .stateid_pduid - .scan_prefix(&prefix) - .values() - .map(|pdu_id| { - Ok::<_, Error>( - serde_json::from_slice::( - &self.pduid_pdu.get(pdu_id?)?.ok_or_else(|| { - Error::bad_database("PDU in state not found in database.") - })?, - ) - .map_err(|_| { - Error::bad_database("Invalid PDU bytes in current room state.") - })?, - ) - }) - { - let pdu = pdu?; - let state_key = pdu.state_key.clone().ok_or_else(|| { - Error::bad_database("Room state contains event without state_key.") - })?; - hashmap.insert(state_key, pdu); - } - Ok(hashmap) + self.state_type(¤t_state_hash, event_type) } else { Ok(HashMap::new()) } @@ -336,20 +301,7 @@ impl Rooms { state_key: &str, ) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { - let mut key = current_state_hash; - key.push(0xff); - key.extend_from_slice(&event_type.to_string().as_bytes()); - key.push(0xff); - key.extend_from_slice(&state_key.as_bytes()); - - self.stateid_pduid.get(&key)?.map_or(Ok(None), |pdu_id| { - Ok::<_, Error>(Some( - serde_json::from_slice::(&self.pduid_pdu.get(pdu_id)?.ok_or_else( - || Error::bad_database("PDU in state not found in database."), - )?) - .map_err(|_| Error::bad_database("Invalid PDU bytes in current room state."))?, - )) - }) + self.state_get(¤t_state_hash, event_type, state_key) } else { Ok(None) } @@ -562,14 +514,15 @@ impl Rooms { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `pduid_statehash`. /// The incoming event is the `pdu_id` passed to this method. - fn append_to_state(&self, new_pdu_id: &[u8], new_pdu: &PduEvent) -> Result { + pub fn append_to_state(&self, new_pdu_id: &[u8], new_pdu: &PduEvent) -> Result { let old_state = if let Some(old_state_hash) = self.roomid_statehash.get(new_pdu.room_id.as_bytes())? { // Store state for event. The state does not include the event itself. // Instead it's the state before the pdu, so the room's old state. - self.pduid_statehash.insert(new_pdu_id, &old_state_hash)?; + self.pduid_statehash + .insert(dbg!(new_pdu_id), &old_state_hash)?; if new_pdu.state_key.is_none() { - return Ok(old_state_hash.to_vec()); + return Ok(old_state_hash); } let mut prefix = old_state_hash.to_vec(); @@ -841,9 +794,7 @@ impl Rooms { let pdu_id = self.append_pdu(&pdu, &pdu_json, globals, account_data)?; - if pdu.state_key.is_some() { - self.append_to_state(&pdu_id, &pdu)?; - } + self.append_to_state(&pdu_id, &pdu)?; for server in self .room_servers(room_id) @@ -905,7 +856,7 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> impl Iterator> { + ) -> impl Iterator> { // Create the first part of the full pdu id let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -916,23 +867,18 @@ impl Rooms { let current: &[u8] = ¤t; let user_id = user_id.clone(); - let prefixlen = prefix.len(); self.pduid_pdu .range(..current) .rev() .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(k, v)| { + .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; if pdu.sender != user_id { pdu.unsigned.remove("transaction_id"); } - Ok(( - utils::u64_from_bytes(&k[prefixlen..]) - .map_err(|_| Error::bad_database("Invalid pdu id in db."))?, - pdu, - )) + Ok((pdu_id, pdu)) }) } @@ -943,7 +889,7 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, from: u64, - ) -> impl Iterator> { + ) -> impl Iterator> { // Create the first part of the full pdu id let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); @@ -954,22 +900,17 @@ impl Rooms { let current: &[u8] = ¤t; let user_id = user_id.clone(); - let prefixlen = prefix.len(); self.pduid_pdu .range(current..) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(k, v)| { + .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; if pdu.sender != user_id { pdu.unsigned.remove("transaction_id"); } - Ok(( - utils::u64_from_bytes(&k[prefixlen..]) - .map_err(|_| Error::bad_database("Invalid pdu id in db."))?, - pdu, - )) + Ok((pdu_id, pdu)) }) } diff --git a/src/main.rs b/src/main.rs index f81c7f4..06fda59 100644 --- a/src/main.rs +++ b/src/main.rs @@ -123,6 +123,7 @@ fn setup_rocket() -> rocket::Rocket { server_server::get_server_keys, server_server::get_server_keys_deprecated, server_server::get_public_rooms_route, + server_server::get_public_rooms_filtered_route, server_server::send_transaction_message_route, ], ) diff --git a/src/pdu.rs b/src/pdu.rs index 957d9e0..d5b5415 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result}; +use crate::Error; use js_int::UInt; use ruma::{ events::pdu::PduStub, @@ -35,7 +35,7 @@ pub struct PduEvent { } impl PduEvent { - pub fn redact(&mut self, reason: &PduEvent) -> Result<()> { + pub fn redact(&mut self, reason: &PduEvent) -> crate::Result<()> { self.unsigned.clear(); let allowed: &[&str] = match self.kind { @@ -244,7 +244,7 @@ impl From<&state_res::StateEvent> for PduEvent { .expect("time is valid"), kind: pdu.kind(), content: pdu.content().clone(), - state_key: pdu.state_key(), + state_key: Some(pdu.state_key()), prev_events: pdu.prev_event_ids(), depth: pdu.depth().clone(), auth_events: pdu.auth_events(), diff --git a/src/server_server.rs b/src/server_server.rs index aef3991..6f2b179 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -329,8 +329,10 @@ pub fn send_transaction_message_route<'a>( let pdu = serde_json::from_value::(value.clone()) .expect("all ruma pdus are conduit pdus"); if db.rooms.exists(&pdu.room_id)? { - db.rooms + let pdu_id = db + .rooms .append_pdu(&pdu, &value, &db.globals, &db.account_data)?; + db.rooms.append_to_state(&pdu_id, &pdu)?; } } Ok(send_transaction_message::v1::Response { diff --git a/src/stateres.rs b/src/stateres.rs deleted file mode 100644 index ee47099..0000000 --- a/src/stateres.rs +++ /dev/null @@ -1,59 +0,0 @@ -use std::collections::HashMap; - -fn stateres(state_a: HashMap, state_b: HashMap) { - let mut unconflicted = todo!("state at fork event"); - - let mut conflicted: HashMap = state_a - .iter() - .filter(|(key_a, value_a)| match state_b.remove(key_a) { - Some(value_b) if value_a == value_b => unconflicted.insert(key_a, value_a), - _ => false, - }) - .collect(); - - // We removed unconflicted from state_b, now we can easily insert all events that are only in fork b - conflicted.extend(state_b); - - let partial_state = unconflicted.clone(); - - let full_conflicted = conflicted.clone(); // TODO: auth events - - let output_rev = Vec::new(); - let event_map = HashMap::new(); - let incoming_edges = HashMap::new(); - - for event in full_conflicted { - event_map.insert(event.event_id, event); - incoming_edges.insert(event.event_id, 0); - } - - for e in conflicted_control_events { - for a in e.auth_events { - incoming_edges[a.event_id] += 1; - } - } - - while incoming_edges.len() > 0 { - let mut count_0 = incoming_edges - .iter() - .filter(|(_, c)| c == 0) - .collect::>(); - - count_0.sort_by(|(x, _), (y, _)| { - x.power_level - .cmp(&a.power_level) - .then_with(|| x.origin_server.ts.cmp(&y.origin_server_ts)) - .then_with(|| x.event_id.cmp(&y.event_id)) - }); - - for (id, count) in count_0 { - output_rev.push(event_map[id]); - - for auth_event in event_map[id].auth_events { - incoming_edges[auth_event.event_id] -= 1; - } - - incoming_edges.remove(id); - } - } -} From 8bcfff276652e20fee14e13109d80a514e1a107d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 17 Sep 2020 19:58:19 +0200 Subject: [PATCH 0304/1727] fix: no notification counts for fast /syncs --- src/database/rooms.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5958626..18881dd 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -447,6 +447,11 @@ impl Rooms { // This is also the next_batch/since value let index = globals.next_count()?; + // Mark as read first so the sending client doesn't get a notification even if appending + // fails + self.edus + .private_read_set(&pdu.room_id, &pdu.sender, index, &globals)?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&index.to_be_bytes()); @@ -503,9 +508,6 @@ impl Rooms { _ => {} } - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, index, &globals)?; - Ok(pdu_id) } @@ -520,7 +522,7 @@ impl Rooms { // Store state for event. The state does not include the event itself. // Instead it's the state before the pdu, so the room's old state. self.pduid_statehash - .insert(dbg!(new_pdu_id), &old_state_hash)?; + .insert(new_pdu_id, &old_state_hash)?; if new_pdu.state_key.is_none() { return Ok(old_state_hash); } From 267c7216162932c9f6e35d69be4c6303d5134952 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 17 Sep 2020 22:41:43 +0200 Subject: [PATCH 0305/1727] fix: encryption and sync spam --- src/client_server/sync.rs | 106 +++++++++++++++++++++++--------------- src/database/rooms.rs | 3 +- src/database/users.rs | 2 +- 3 files changed, 67 insertions(+), 44 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 0e40bfb..2f2c8ea 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -100,47 +100,61 @@ pub async fn sync_events_route( limited = true; } + // Database queries: let encrypted_room = db .rooms .room_state_get(&room_id, &EventType::RoomEncryption, "")? .is_some(); - // Database queries: + // These type is Option>. The outer Option is None when there is no event between + // since and the current room state, meaning there should be no updates. + // The inner Option is None when there is an event, but there is no state hash associated + // with it. This can happen for the RoomCreate event, so all updates should arrive. let since_state_hash = db .rooms - .pdus_until(sender_id, &room_id, since) + .pdus_after(sender_id, &room_id, since) // - 1 So we can get the event at since .next() - .and_then(|pdu| pdu.ok()) - .and_then(|pdu| db.rooms.pdu_state_hash(&pdu.0).ok()?); + .map(|pdu| db.rooms.pdu_state_hash(&pdu.ok()?.0).ok()?); - let since_members = since_state_hash - .as_ref() - .and_then(|state_hash| db.rooms.state_type(state_hash, &EventType::RoomMember).ok()); + let since_members = since_state_hash.as_ref().map(|state_hash| { + state_hash.as_ref().and_then(|state_hash| { + db.rooms + .state_type(&state_hash, &EventType::RoomMember) + .ok() + }) + }); - let since_encryption = since_state_hash.as_ref().and_then(|state_hash| { - db.rooms - .state_get(&state_hash, &EventType::RoomEncryption, "") - .ok() + let since_encryption = since_state_hash.as_ref().map(|state_hash| { + state_hash.as_ref().and_then(|state_hash| { + db.rooms + .state_get(&state_hash, &EventType::RoomEncryption, "") + .ok() + }) }); let current_members = db.rooms.room_state_type(&room_id, &EventType::RoomMember)?; // Calculations: - let new_encrypted_room = encrypted_room && since_encryption.is_none(); + let new_encrypted_room = + encrypted_room && since_encryption.map_or(false, |encryption| encryption.is_none()); - let send_member_count = since_members.as_ref().map_or(true, |since_members| { - current_members.len() != since_members.len() + let send_member_count = since_members.as_ref().map_or(false, |since_members| { + since_members.as_ref().map_or(true, |since_members| { + current_members.len() != since_members.len() + }) }); - let since_sender_member = since_members.as_ref().and_then(|members| { - members.get(sender_id.as_str()).and_then(|pdu| { - serde_json::from_value::>( - pdu.content.clone(), - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() + let since_sender_member = since_members.as_ref().map(|since_members| { + since_members.as_ref().and_then(|members| { + members.get(sender_id.as_str()).and_then(|pdu| { + serde_json::from_value::>( + pdu.content.clone(), + ) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }) }) }); @@ -154,20 +168,29 @@ pub async fn sync_events_route( .map_err(|_| Error::bad_database("Invalid PDU in database."))? .membership; - let since_membership = since_members - .as_ref() - .and_then(|members| { - members.get(&user_id).and_then(|since_member| { - serde_json::from_value::< - Raw, - >(since_member.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }) - }) - .map_or(MembershipState::Leave, |member| member.membership); + let since_membership = + since_members + .as_ref() + .map_or(MembershipState::Join, |members| { + members + .as_ref() + .and_then(|members| { + members.get(&user_id).and_then(|since_member| { + serde_json::from_value::< + Raw, + >( + since_member.content.clone() + ) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid PDU in database.") + }) + .ok() + }) + }) + .map_or(MembershipState::Leave, |member| member.membership) + }); let user_id = UserId::try_from(user_id) .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; @@ -188,8 +211,9 @@ pub async fn sync_events_route( } } - let joined_since_last_sync = - since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + let joined_since_last_sync = since_sender_member.map_or(false, |member| { + member.map_or(true, |member| member.membership != MembershipState::Join) + }); if joined_since_last_sync && encrypted_room || new_encrypted_room { // If the user is in a new encrypted room, give them all joined users @@ -434,7 +458,7 @@ pub async fn sync_events_route( let since_member = db .rooms - .pdus_until(sender_id, &room_id, since) + .pdus_after(sender_id, &room_id, since) .next() .and_then(|pdu| pdu.ok()) .and_then(|pdu| { @@ -581,7 +605,7 @@ pub async fn sync_events_route( changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_id)? > since { + device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_id)? > since || since == 0 { db.users.count_one_time_keys(sender_id, device_id)? } else { BTreeMap::new() diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 18881dd..108edb5 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -521,8 +521,7 @@ impl Rooms { if let Some(old_state_hash) = self.roomid_statehash.get(new_pdu.room_id.as_bytes())? { // Store state for event. The state does not include the event itself. // Instead it's the state before the pdu, so the room's old state. - self.pduid_statehash - .insert(new_pdu_id, &old_state_hash)?; + self.pduid_statehash.insert(new_pdu_id, &old_state_hash)?; if new_pdu.state_key.is_none() { return Ok(old_state_hash); } diff --git a/src/database/users.rs b/src/database/users.rs index 10e1ef3..2e26c1e 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -603,7 +603,7 @@ impl Users { .room_state_get(&room_id, &EventType::RoomEncryption, "")? .is_none() { - return Ok(()); + continue; } let mut key = room_id.to_string().as_bytes().to_vec(); From 19207845bc2ec7db6d0ede2ff2b345162bc817ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 20 Sep 2020 13:49:13 +0200 Subject: [PATCH 0306/1727] Fix ruma dependency --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98dbac9..9ea58b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1554,7 +1554,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "ruma-api", "ruma-client-api", @@ -1568,7 +1568,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "http", "percent-encoding", @@ -1583,7 +1583,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1594,7 +1594,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "assign", "http", @@ -1612,7 +1612,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "js_int", "ruma-identifiers", @@ -1625,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "js_int", "ruma-common", @@ -1640,7 +1640,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1651,7 +1651,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "js_int", "ruma-api", @@ -1666,7 +1666,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1678,7 +1678,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "proc-macro2", "quote", @@ -1689,7 +1689,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "ruma-serde", "serde", @@ -1700,7 +1700,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "form_urlencoded", "itoa", @@ -1712,7 +1712,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-old-fixes#195b15be25ba1f2d4e0b520f01ecb77143c01eb0" dependencies = [ "base64", "ring", diff --git a/Cargo.toml b/Cargo.toml index 4945e3c..db3c685 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ edition = "2018" rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", features = ["tls"] } #ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "987d48666cf166cf12100b5dbc61b5e3385c4014" } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fixes" } # Used for matrix spec type definitions and helpers +ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-old-fixes" } # Used for matrix spec type definitions and helpers #ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } tokio = "0.2.22" # Used for long polling sled = "0.32.0" # Used for storing data permanently From e08dfd982b87b632590bf125cdc48ee83bad9ebe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 23 Sep 2020 12:03:08 +0200 Subject: [PATCH 0307/1727] improvement: look at SRV record when sending requests --- Cargo.lock | 239 +++++++++++++++++++++++++++++++++----- Cargo.toml | 2 + src/client_server/sync.rs | 4 +- src/server_server.rs | 49 ++++++-- 4 files changed, 253 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e142d72..4cce666 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,20 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +[[package]] +name = "addr2line" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" + [[package]] name = "adler32" version = "1.2.0" @@ -73,6 +88,20 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +[[package]] +name = "backtrace" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide 0.4.2", + "object", + "rustc-demangle", +] + [[package]] name = "base-x" version = "0.2.6" @@ -192,6 +221,7 @@ dependencies = [ "state-res", "thiserror", "tokio", + "trust-dns-resolver", ] [[package]] @@ -213,7 +243,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1373a16a4937bc34efec7b391f9c1500c30b8478a701a4f44c9165cc0475a6e0" dependencies = [ "percent-encoding", - "time 0.2.20", + "time 0.2.21", "version_check", ] @@ -355,6 +385,18 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-as-inner" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "fnv" version = "1.0.7" @@ -537,6 +579,12 @@ dependencies = [ "lzw", ] +[[package]] +name = "gimli" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" + [[package]] name = "glob" version = "0.3.0" @@ -586,6 +634,17 @@ dependencies = [ "libc", ] +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi 0.3.9", +] + [[package]] name = "http" version = "0.2.1" @@ -614,10 +673,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" [[package]] -name = "hyper" -version = "0.13.7" +name = "httpdate" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" +checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" + +[[package]] +name = "hyper" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f3afcfae8af5ad0576a31e768415edb627824129e8e5a29b8bfccb2f234e835" dependencies = [ "bytes", "futures-channel", @@ -627,10 +692,10 @@ dependencies = [ "http", "http-body", "httparse", + "httpdate", "itoa", "pin-project", "socket2", - "time 0.1.44", "tokio", "tower-service", "tracing", @@ -695,9 +760,12 @@ checksum = "cb6ee2a7da03bfc3b66ca47c92c2e392fcc053ea040a85561749b026f7aad09a" [[package]] name = "instant" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b141fdc7836c525d4d594027d318c84161ca17aaf8113ab1f81ab93ae897485" +checksum = "63312a18f7ea8760cdd0a7c5aac1a619752a246b833545e3e36d1f81f7cd9e66" +dependencies = [ + "cfg-if", +] [[package]] name = "iovec" @@ -708,6 +776,18 @@ dependencies = [ "libc", ] +[[package]] +name = "ipconfig" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2", + "widestring", + "winapi 0.3.9", + "winreg 0.6.2", +] + [[package]] name = "ipnet" version = "2.3.0" @@ -778,6 +858,12 @@ version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235" +[[package]] +name = "linked-hash-map" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" + [[package]] name = "lock_api" version = "0.4.1" @@ -796,6 +882,15 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "lzw" version = "0.10.0" @@ -808,6 +903,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.0.1" @@ -837,9 +938,9 @@ checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" [[package]] name = "memoffset" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" dependencies = [ "autocfg", ] @@ -869,6 +970,16 @@ dependencies = [ "adler32", ] +[[package]] +name = "miniz_oxide" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c60c0dfe32c10b43a144bad8fc83538c52f58302c92300ea7ec7bf7b38d5a7b9" +dependencies = [ + "adler", + "autocfg", +] + [[package]] name = "mio" version = "0.6.22" @@ -991,6 +1102,12 @@ dependencies = [ "libc", ] +[[package]] +name = "object" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" + [[package]] name = "once_cell" version = "1.4.1" @@ -1130,7 +1247,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide", + "miniz_oxide 0.3.7", ] [[package]] @@ -1181,6 +1298,12 @@ dependencies = [ "yansi", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.7" @@ -1334,7 +1457,17 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", + "winreg 0.7.0", +] + +[[package]] +name = "resolv-conf" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11834e137f3b14e309437a8276714eed3a80d1ef894869e510f2c0c0b98b9f4a" +dependencies = [ + "hostname", + "quick-error", ] [[package]] @@ -1370,7 +1503,7 @@ dependencies = [ "rocket_codegen", "rocket_http", "state", - "time 0.2.20", + "time 0.2.21", "tokio", "toml", "version_check", @@ -1405,7 +1538,7 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.20", + "time 0.2.21", "tokio", "tokio-rustls", "unicode-xid", @@ -1414,7 +1547,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1430,7 +1562,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "http", "percent-encoding", @@ -1445,7 +1576,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1456,7 +1586,6 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "ruma-api", "ruma-common", @@ -1469,7 +1598,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "assign", "http", @@ -1488,7 +1616,6 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "js_int", "ruma-api", @@ -1502,7 +1629,6 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "js_int", "ruma-common", @@ -1517,7 +1643,6 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1528,7 +1653,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "js_int", "ruma-api", @@ -1543,7 +1667,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1555,7 +1678,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "proc-macro2", "quote", @@ -1566,7 +1688,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "serde", "strum", @@ -1575,7 +1696,6 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "form_urlencoded", "itoa", @@ -1587,7 +1707,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#425d34d4cfb5aefe5bab6957d71bc9389384c1e5" dependencies = [ "base64", "ring", @@ -1607,6 +1726,12 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rustc-demangle" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" + [[package]] name = "rustc_version" version = "0.2.3" @@ -1831,7 +1956,6 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#d11a3feb5307715ab5d86af8f25d4bccfee6264b" dependencies = [ "itertools", "js_int", @@ -1981,9 +2105,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d4953c513c9bf1b97e9cdd83f11d60c4b0a83462880a360d80d96953a953fee" +checksum = "2c2e31fb28e2a9f01f5ed6901b066c1ba2333c04b64dc61254142bafcb3feb2c" dependencies = [ "const_fn", "libc", @@ -2182,6 +2306,46 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "trust-dns-proto" +version = "0.19.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdd7061ba6f4d4d9721afedffbfd403f20f39a4301fee1b70d6fcd09cca69f28" +dependencies = [ + "async-trait", + "backtrace", + "enum-as-inner", + "futures", + "idna", + "lazy_static", + "log", + "rand", + "smallvec", + "thiserror", + "tokio", + "url", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.19.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f23cdfdc3d8300b3c50c9e84302d3bd6d860fb9529af84ace6cf9665f181b77" +dependencies = [ + "backtrace", + "cfg-if", + "futures", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "trust-dns-proto", +] + [[package]] name = "try-lock" version = "0.2.3" @@ -2366,6 +2530,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "widestring" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a763e303c0e0f23b0da40888724762e802a8ffefbc22de4127ef42493c2ea68c" + [[package]] name = "winapi" version = "0.2.8" @@ -2400,6 +2570,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "winreg" version = "0.7.0" diff --git a/Cargo.toml b/Cargo.toml index 60296a2..2126e42 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,6 +56,8 @@ image = { version = "0.23.9", default-features = false, features = ["jpeg", "png base64 = "0.12.3" # Used when hashing the state ring = "0.16.15" +# Used when querying the SRV record of other servers +trust-dns-resolver = "0.19.5" [features] default = ["conduit_bin"] diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2f2c8ea..aec03af 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -605,7 +605,9 @@ pub async fn sync_events_route( changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_id)? > since || since == 0 { + device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_id)? > since + || since == 0 + { db.users.count_one_time_keys(sender_id, device_id)? } else { BTreeMap::new() diff --git a/src/server_server.rs b/src/server_server.rs index 6f2b179..3ebbeac 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,5 +1,5 @@ use crate::{client_server, ConduitResult, Database, Error, PduEvent, Result, Ruma}; -use http::header::{HeaderValue, AUTHORIZATION}; +use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::warn; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ @@ -23,6 +23,7 @@ use std::{ fmt::Debug, time::{Duration, SystemTime}, }; +use trust_dns_resolver::AsyncResolver; pub async fn request_well_known( globals: &crate::database::globals::Globals, @@ -54,16 +55,36 @@ pub async fn send_request( where T: Debug, { + let resolver = AsyncResolver::tokio_from_system_conf() + .await + .map_err(|_| Error::BadConfig("Failed to set up trust dns resolver with system config."))?; + + let mut host = None; + let actual_destination = "https://".to_owned() - + &request_well_known(globals, &destination.as_str()) - .await - .unwrap_or_else(|| { - let mut destination = destination.as_str().to_owned(); - if destination.find(':').is_none() { - destination += ":8448"; + + &if let Some(mut delegated_hostname) = + request_well_known(globals, &destination.as_str()).await + { + if let Ok(Some(srv)) = resolver + .srv_lookup(format!("_matrix._tcp.{}", delegated_hostname)) + .await + .map(|srv| srv.iter().next().map(|result| result.target().to_string())) + { + host = Some(delegated_hostname); + srv.trim_end_matches('.').to_owned() + } else { + if delegated_hostname.find(':').is_none() { + delegated_hostname += ":8448"; } - destination - }); + delegated_hostname + } + } else { + let mut destination = destination.as_str().to_owned(); + if destination.find(':').is_none() { + destination += ":8448"; + } + destination + }; let mut http_request = request .try_into_http_request(&actual_destination, Some("")) @@ -129,9 +150,17 @@ where } } - let reqwest_request = reqwest::Request::try_from(http_request) + if let Some(host) = host { + http_request + .headers_mut() + .insert(HOST, HeaderValue::from_str(&host).unwrap()); + } + + let mut reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); + *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); + let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; // Because reqwest::Response -> http::Response is complicated: From 26e200e290133e0e2424da0d1e354eaa764c6d4c Mon Sep 17 00:00:00 2001 From: miruka Date: Fri, 25 Sep 2020 14:18:36 -0400 Subject: [PATCH 0308/1727] Reduce media ID length from 256 to 32 Most common filesystems limit paths to 255 bytes. This change brings down the media ID length to be similar to Synapse servers (25), and makes it possible for clients to download media with the ID included in the filename. --- src/client_server/media.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index efcb3a6..394cf74 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -9,7 +9,7 @@ use ruma::api::client::{ use rocket::{get, post}; use std::convert::TryInto; -const MXC_LENGTH: usize = 256; +const MXC_LENGTH: usize = 32; #[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] pub fn get_media_config_route( From ab332363ce3af716694cde7bf35cf421e91707d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 23 Sep 2020 15:23:29 +0200 Subject: [PATCH 0309/1727] fix: don't send new requests to servers if we are already waiting --- Cargo.lock | 15 ++++ src/database/sending.rs | 149 ++++++++++++++++++++++++++-------------- src/server_server.rs | 6 +- 3 files changed, 117 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4cce666..5f6b21c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1547,6 +1547,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1562,6 +1563,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "http", "percent-encoding", @@ -1576,6 +1578,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1586,6 +1589,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "ruma-api", "ruma-common", @@ -1598,6 +1602,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "assign", "http", @@ -1616,6 +1621,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "js_int", "ruma-api", @@ -1629,6 +1635,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "js_int", "ruma-common", @@ -1643,6 +1650,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1653,6 +1661,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "js_int", "ruma-api", @@ -1667,6 +1676,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1678,6 +1688,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "proc-macro2", "quote", @@ -1688,6 +1699,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "serde", "strum", @@ -1696,6 +1708,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "form_urlencoded", "itoa", @@ -1707,6 +1720,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" dependencies = [ "base64", "ring", @@ -1956,6 +1970,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" +source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#d11a3feb5307715ab5d86af8f25d4bccfee6264b" dependencies = [ "itertools", "js_int", diff --git a/src/database/sending.rs b/src/database/sending.rs index a3f1574..d3c7fc6 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,8 +1,11 @@ -use std::{convert::TryFrom, time::SystemTime}; +use std::{collections::HashSet, convert::TryFrom, time::SystemTime}; use crate::{server_server, utils, Error, Result}; +use federation::transactions::send_transaction_message; +use log::warn; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{api::federation, Raw, ServerName}; +use sled::IVec; use tokio::select; pub struct Sending { @@ -18,66 +21,49 @@ impl Sending { tokio::spawn(async move { let mut futures = FuturesUnordered::new(); + let mut waiting_servers = HashSet::new(); + let mut subscriber = serverpduids.watch_prefix(b""); loop { select! { - Some(_) = futures.next() => {}, - Some(event) = &mut subscriber => { - let serverpduid = if let sled::Event::Insert {key, ..} = event { - key - } else - { return Err::<(), Error>(Error::bad_database("")); }; - let mut parts = serverpduid.splitn(2, |&b| b == 0xff); - let server = Box::::try_from( - utils::string_from_bytes(parts.next().expect("splitn will always return 1 or more elements")) - .map_err(|_| Error::bad_database("ServerName in serverpduid bytes are invalid."))? - ).map_err(|_| Error::bad_database("ServerName in serverpduid is invalid."))?; - - let pdu_id = parts.next().ok_or_else(|| Error::bad_database("Invalid serverpduid in db."))?; - let mut pdu_json = rooms.get_pdu_json_from_id(&pdu_id.into())?.ok_or_else(|| Error::bad_database("Event in serverpduids not found in db."))?; - - if let Some(unsigned) = pdu_json - .as_object_mut() - .expect("json is object") - .get_mut("unsigned") { - unsigned.as_object_mut().expect("unsigned is object").remove("transaction_id"); - } - - pdu_json - .as_object_mut() - .expect("json is object") - .remove("event_id"); - - let raw_json = - serde_json::from_value::>(pdu_json).expect("Raw::from_value always works"); - - let globals = &globals; - - futures.push( - async move { - let pdus = vec![raw_json]; - let transaction_id = utils::random_string(16); - - server_server::send_request( - &globals, - server, - federation::transactions::send_transaction_message::v1::Request { - origin: globals.server_name(), - pdus: &pdus, - edus: &[], - origin_server_ts: SystemTime::now(), - transaction_id: &transaction_id, - }, - ).await + Some(server) = futures.next() => { + warn!("response: {:?}", &server); + match server { + Ok((server, _response)) => { + waiting_servers.remove(&server) } - ); + Err((server, _e)) => { + waiting_servers.remove(&server) + } + }; }, + Some(event) = &mut subscriber => { + if let sled::Event::Insert { key, .. } = event { + let serverpduid = key.clone(); + let mut parts = serverpduid.splitn(2, |&b| b == 0xff); + + if let Some((server, pdu_id)) = utils::string_from_bytes( + parts + .next() + .expect("splitn will always return 1 or more elements"), + ) + .map_err(|_| Error::bad_database("ServerName in serverpduid bytes are invalid.")) + .and_then(|server_str|Box::::try_from(server_str) + .map_err(|_| Error::bad_database("ServerName in serverpduid is invalid."))) + .ok() + .filter(|server| waiting_servers.insert(server.clone())) + .and_then(|server| parts + .next() + .ok_or_else(|| Error::bad_database("Invalid serverpduid in db.")).ok().map(|pdu_id| (server, pdu_id))) + { + futures.push(Self::handle_event(server, pdu_id.into(), &globals, &rooms)); + } + } + } } } }); } - /* - */ pub fn send_pdu(&self, server: Box, pdu_id: &[u8]) -> Result<()> { let mut key = server.as_bytes().to_vec(); @@ -87,4 +73,63 @@ impl Sending { Ok(()) } + + async fn handle_event( + server: Box, + pdu_id: IVec, + globals: &super::globals::Globals, + rooms: &super::rooms::Rooms, + ) -> std::result::Result< + (Box, send_transaction_message::v1::Response), + (Box, Error), + > { + let mut pdu_json = rooms + .get_pdu_json_from_id(&pdu_id) + .map_err(|e| (server.clone(), e))? + .ok_or_else(|| { + ( + server.clone(), + Error::bad_database("Event in serverpduids not found in db."), + ) + })?; + + if let Some(unsigned) = pdu_json + .as_object_mut() + .expect("json is object") + .get_mut("unsigned") + { + unsigned + .as_object_mut() + .expect("unsigned is object") + .remove("transaction_id"); + } + + pdu_json + .as_object_mut() + .expect("json is object") + .remove("event_id"); + + let raw_json = + serde_json::from_value::>(pdu_json).expect("Raw::from_value always works"); + + let globals = &globals; + + let pdus = vec![raw_json]; + let transaction_id = utils::random_string(16); + + server_server::send_request( + &globals, + server.clone(), + send_transaction_message::v1::Request { + origin: globals.server_name(), + pdus: &pdus, + edus: &[], + origin_server_ts: SystemTime::now(), + transaction_id: &transaction_id, + }, + ) + .await + .map(|response| (server.clone(), response)) + .map_err(|e| (server, e)) + } } diff --git a/src/server_server.rs b/src/server_server.rs index 3ebbeac..d67b0b6 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -161,6 +161,7 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); + let url = reqwest_request.url().clone(); let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; // Because reqwest::Response -> http::Response is complicated: @@ -189,7 +190,10 @@ where .expect("reqwest body is valid http body"), ); response.map_err(|e| { - warn!("Server returned bad response: {:?}", e); + warn!( + "Server returned bad response {} ({}): {:?}", + destination, url, e + ); Error::BadServerResponse("Server returned bad response.") }) } From 0d6159c2dafc87685dc2e6fd79b034e4b3f21c7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 25 Sep 2020 12:26:29 +0200 Subject: [PATCH 0310/1727] improvement: get_missing_events route and cleanup --- Cargo.lock | 32 ++++++++++++------------- src/database/sending.rs | 52 ++++++++++++----------------------------- src/main.rs | 1 + src/pdu.rs | 43 ++++++++++++++-------------------- src/server_server.rs | 47 +++++++++++++++++++++++++++++++++++-- 5 files changed, 95 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5f6b21c..ed20ca3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -627,9 +627,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +checksum = "4c30f6d0bc6b00693347368a67d41b58f2fb851215ff1da49e90fe2c5c667151" dependencies = [ "libc", ] @@ -1547,7 +1547,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1563,7 +1563,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "http", "percent-encoding", @@ -1578,7 +1578,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1589,7 +1589,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "ruma-api", "ruma-common", @@ -1602,7 +1602,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "assign", "http", @@ -1621,7 +1621,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "js_int", "ruma-api", @@ -1635,7 +1635,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "js_int", "ruma-common", @@ -1650,7 +1650,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1661,7 +1661,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "js_int", "ruma-api", @@ -1676,7 +1676,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1688,7 +1688,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "proc-macro2", "quote", @@ -1699,7 +1699,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "serde", "strum", @@ -1708,7 +1708,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "form_urlencoded", "itoa", @@ -1720,7 +1720,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#a6486e7a00183b4578650528d65e83318da53b23" +source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "base64", "ring", diff --git a/src/database/sending.rs b/src/database/sending.rs index d3c7fc6..1ed94cc 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,10 +1,10 @@ use std::{collections::HashSet, convert::TryFrom, time::SystemTime}; -use crate::{server_server, utils, Error, Result}; +use crate::{server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; use log::warn; use rocket::futures::stream::{FuturesUnordered, StreamExt}; -use ruma::{api::federation, Raw, ServerName}; +use ruma::{api::federation, ServerName}; use sled::IVec; use tokio::select; @@ -83,49 +83,27 @@ impl Sending { (Box, send_transaction_message::v1::Response), (Box, Error), > { - let mut pdu_json = rooms - .get_pdu_json_from_id(&pdu_id) - .map_err(|e| (server.clone(), e))? - .ok_or_else(|| { - ( - server.clone(), - Error::bad_database("Event in serverpduids not found in db."), - ) - })?; - - if let Some(unsigned) = pdu_json - .as_object_mut() - .expect("json is object") - .get_mut("unsigned") - { - unsigned - .as_object_mut() - .expect("unsigned is object") - .remove("transaction_id"); - } - - pdu_json - .as_object_mut() - .expect("json is object") - .remove("event_id"); - - let raw_json = - serde_json::from_value::>(pdu_json).expect("Raw::from_value always works"); - - let globals = &globals; - - let pdus = vec![raw_json]; - let transaction_id = utils::random_string(16); + let pdu_json = PduEvent::to_outgoing_federation_event( + rooms + .get_pdu_json_from_id(&pdu_id) + .map_err(|e| (server.clone(), e))? + .ok_or_else(|| { + ( + server.clone(), + Error::bad_database("Event in serverpduids not found in db."), + ) + })?, + ); server_server::send_request( &globals, server.clone(), send_transaction_message::v1::Request { origin: globals.server_name(), - pdus: &pdus, + pdus: &[pdu_json], edus: &[], origin_server_ts: SystemTime::now(), - transaction_id: &transaction_id, + transaction_id: &utils::random_string(16), }, ) .await diff --git a/src/main.rs b/src/main.rs index 06fda59..fa1cc5c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -125,6 +125,7 @@ fn setup_rocket() -> rocket::Rocket { server_server::get_public_rooms_route, server_server::get_public_rooms_filtered_route, server_server::send_transaction_message_route, + server_server::get_missing_events_route, ], ) .attach(AdHoc::on_attach("Config", |mut rocket| async { diff --git a/src/pdu.rs b/src/pdu.rs index d5b5415..4b1df4b 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,7 +1,6 @@ use crate::Error; use js_int::UInt; use ruma::{ - events::pdu::PduStub, events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, @@ -200,32 +199,26 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } - pub fn to_outgoing_federation_event(&self) -> Raw { - let mut unsigned = self.unsigned.clone(); - unsigned.remove("transaction_id"); - - let mut json = json!({ - "room_id": self.room_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "type": self.kind, - "content": self.content, - "prev_events": self.prev_events, - "depth": self.depth, - "auth_events": self.auth_events, - "unsigned": unsigned, - "hashes": self.hashes, - "signatures": self.signatures, - }); - - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &self.redacts { - json["redacts"] = json!(redacts); + pub fn to_outgoing_federation_event( + mut pdu_json: serde_json::Value, + ) -> Raw { + if let Some(unsigned) = pdu_json + .as_object_mut() + .expect("json is object") + .get_mut("unsigned") + { + unsigned + .as_object_mut() + .expect("unsigned is object") + .remove("transaction_id"); } - serde_json::from_value(json).expect("Raw::from_value always works") + pdu_json + .as_object_mut() + .expect("json is object") + .remove("event_id"); + + serde_json::from_value::>(pdu_json).expect("Raw::from_value always works") } } diff --git a/src/server_server.rs b/src/server_server.rs index d67b0b6..2d52c4a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -3,13 +3,13 @@ use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::warn; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ - api::federation::directory::get_public_rooms_filtered, api::{ federation::{ - directory::get_public_rooms, + directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, }, + event::get_missing_events, transactions::send_transaction_message, }, OutgoingRequest, @@ -373,3 +373,46 @@ pub fn send_transaction_message_route<'a>( } .into()) } + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/federation/v1/get_missing_events/<_>", data = "") +)] +pub fn get_missing_events_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + let mut queued_events = body.latest_events.clone(); + let mut events = Vec::new(); + + let mut i = 0; + while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { + if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { + if body.earliest_events.contains( + &serde_json::from_value( + pdu.get("event_id") + .cloned() + .ok_or_else(|| Error::bad_database("Event in db has no event_id field."))?, + ) + .map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?, + ) { + i += 1; + continue; + } + queued_events.extend_from_slice( + &serde_json::from_value::>( + pdu.get("prev_events").cloned().ok_or_else(|| { + Error::bad_database("Invalid prev_events field of pdu in db.") + })?, + ) + .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, + ); + events.push(PduEvent::to_outgoing_federation_event(pdu)); + } + i += 1; + } + + dbg!(&events); + + Ok(get_missing_events::v1::Response { events }.into()) +} From bcd1fe18561cbda0f26f53464325fccd177e42ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 5 Oct 2020 22:19:22 +0200 Subject: [PATCH 0311/1727] feat: admin room --- Cargo.lock | 15 - src/client_server/account.rs | 301 +++++++++++++++-- src/client_server/membership.rs | 196 ++++++----- src/client_server/message.rs | 43 ++- src/client_server/profile.rs | 140 ++++---- src/client_server/redact.rs | 37 +-- src/client_server/room.rs | 556 +++++++++++++++----------------- src/client_server/state.rs | 31 +- src/database/rooms.rs | 49 ++- src/database/users.rs | 5 + src/main.rs | 1 + src/server_server.rs | 64 +++- 12 files changed, 864 insertions(+), 574 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ed20ca3..6571f7c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1547,7 +1547,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "ruma-api", "ruma-appservice-api", @@ -1563,7 +1562,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "http", "percent-encoding", @@ -1578,7 +1576,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1589,7 +1586,6 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "ruma-api", "ruma-common", @@ -1602,7 +1598,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "assign", "http", @@ -1621,7 +1616,6 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "js_int", "ruma-api", @@ -1635,7 +1629,6 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "js_int", "ruma-common", @@ -1650,7 +1643,6 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1661,7 +1653,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "js_int", "ruma-api", @@ -1676,7 +1667,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "rand", "ruma-identifiers-macros", @@ -1688,7 +1678,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "proc-macro2", "quote", @@ -1699,7 +1688,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "serde", "strum", @@ -1708,7 +1696,6 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "form_urlencoded", "itoa", @@ -1720,7 +1707,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" dependencies = [ "base64", "ring", @@ -1970,7 +1956,6 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#d11a3feb5307715ab5d86af8f25d4bccfee6264b" dependencies = [ "itertools", "js_int", diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 7e0f942..66b4a62 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,3 +1,5 @@ +use std::{collections::BTreeMap, convert::TryInto}; + use super::{State, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; use ruma::{ @@ -11,8 +13,11 @@ use ruma::{ uiaa::{AuthFlow, UiaaInfo}, }, }, - events::{room::member, EventType}, - UserId, + events::{ + room::canonical_alias, room::guest_access, room::history_visibility, room::join_rules, + room::member, room::name, room::topic, EventType, + }, + RoomAliasId, RoomId, RoomVersionId, UserId, }; use register::RegistrationKind; @@ -73,7 +78,7 @@ pub fn get_register_available_route( feature = "conduit_bin", post("/_matrix/client/r0/register", data = "") )] -pub fn register_route( +pub async fn register_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -202,6 +207,265 @@ pub fn register_route( body.initial_device_display_name.clone(), )?; + // If this is the first user on this server, create the admins room + if db.users.count() == 1 { + // Create a user for the server + let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + .expect("@conduit:server_name is valid"); + + db.users.create(&conduit_user, "")?; + + let room_id = RoomId::new(db.globals.server_name()); + + let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone()); + content.federate = true; + content.predecessor = None; + content.room_version = RoomVersionId::Version6; + + // 1. The room create event + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCreate, + content: serde_json::to_value(content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + // 2. Make conduit bot join + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(conduit_user.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + // 3. Power levels + let mut users = BTreeMap::new(); + users.insert(conduit_user.clone(), 100.into()); + users.insert(user_id.clone(), 100.into()); + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomPowerLevels, + content: serde_json::to_value( + ruma::events::room::power_levels::PowerLevelsEventContent { + ban: 50.into(), + events: BTreeMap::new(), + events_default: 0.into(), + invite: 50.into(), + kick: 50.into(), + redact: 50.into(), + state_default: 50.into(), + users, + users_default: 0.into(), + notifications: ruma::events::room::power_levels::NotificationPowerLevels { + room: 50.into(), + }, + }, + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + // 4.1 Join Rules + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomJoinRules, + content: serde_json::to_value(join_rules::JoinRulesEventContent::new( + join_rules::JoinRule::Invite, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + // 4.2 History Visibility + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomHistoryVisibility, + content: serde_json::to_value( + history_visibility::HistoryVisibilityEventContent::new( + history_visibility::HistoryVisibility::Shared, + ), + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + // 4.3 Guest Access + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomGuestAccess, + content: serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::Forbidden, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + // 6. Events implied by name and topic + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomName, + content: serde_json::to_value( + name::NameEventContent::new("Admin Room".to_owned()).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") + })?, + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomTopic, + content: serde_json::to_value(topic::TopicEventContent { + topic: format!("Manage {}", db.globals.server_name()), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + // Room alias + let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCanonicalAlias, + content: serde_json::to_value(canonical_alias::CanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + + // Invite and join the real user + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &user_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + } + Ok(register::Response { access_token: Some(token), user_id, @@ -354,23 +618,20 @@ pub async fn deactivate_route( third_party_invite: None, }; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; } // Remove devices and mark account as deactivated diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 628045d..526e82f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -108,22 +108,20 @@ pub async fn leave_room_route( event.membership = member::MembershipState::Leave; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; Ok(leave_room::Response::new().into()) } @@ -139,29 +137,27 @@ pub async fn invite_user_route( let sender_id = body.sender_id.as_ref().expect("user is authenticated"); if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, - is_direct: None, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; Ok(invite_user::Response.into()) } else { @@ -199,22 +195,20 @@ pub async fn kick_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; // TODO: reason - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; Ok(kick_user::Response::new().into()) } @@ -257,22 +251,20 @@ pub async fn ban_user_route( }, )?; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; Ok(ban_user::Response::new().into()) } @@ -306,22 +298,20 @@ pub async fn unban_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; Ok(unban_user::Response::new().into()) } @@ -640,6 +630,7 @@ async fn join_room_by_id_helper( &serde_json::to_value(&**pdu).expect("PDU is valid value"), &db.globals, &db.account_data, + &db.sending, )?; if state_events.contains(ev_id) { @@ -657,23 +648,20 @@ async fn join_room_by_id_helper( third_party_invite: None, }; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; } Ok(join_room_by_id::Response::new(room_id.clone()).into()) diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 5a4488f..c32bd68 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -49,29 +49,26 @@ pub async fn send_message_event_route( let mut unsigned = serde_json::Map::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - let event_id = db - .rooms - .build_and_append_pdu( - PduBuilder { - event_type: body.content.event_type().into(), - content: serde_json::from_str( - body.json_body - .as_ref() - .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? - .get(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, - unsigned: Some(unsigned), - state_key: None, - redacts: None, - }, - &sender_id, - &body.room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + let event_id = db.rooms.build_and_append_pdu( + PduBuilder { + event_type: body.content.event_type().into(), + content: serde_json::from_str( + body.json_body + .as_ref() + .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? + .get(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + unsigned: Some(unsigned), + state_key: None, + redacts: None, + }, + &sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; db.transaction_ids .add_txnid(sender_id, device_id, &body.txn_id, event_id.as_bytes())?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 686d4c3..9c6bd51 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -31,43 +31,41 @@ pub async fn set_displayname_route( // Send a new membership event and presence update into all joined rooms for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { - displayname: body.displayname.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get( - &room_id, - &EventType::RoomMember, - &sender_id.to_string(), - )? - .ok_or_else(|| { - Error::bad_database( + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + displayname: body.displayname.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? + .ok_or_else(|| { + Error::bad_database( "Tried to send displayname update for user not in the room.", ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; // Presence update db.rooms.edus.update_presence( @@ -125,43 +123,41 @@ pub async fn set_avatar_url_route( // Send a new membership event and presence update into all joined rooms for room_id in db.rooms.rooms_joined(&sender_id) { let room_id = room_id?; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { - avatar_url: body.avatar_url.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get( - &room_id, - &EventType::RoomMember, - &sender_id.to_string(), - )? - .ok_or_else(|| { - Error::bad_database( - "Tried to send avatar url update for user not in the room.", - ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + avatar_url: body.avatar_url.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_id.to_string(), + )? + .ok_or_else(|| { + Error::bad_database( + "Tried to send avatar url update for user not in the room.", + ) + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; // Presence update db.rooms.edus.update_presence( diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 24df8dd..b13cd80 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -18,26 +18,23 @@ pub async fn redact_event_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let event_id = db - .rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomRedaction, - content: serde_json::to_value(redaction::RedactionEventContent { - reason: body.reason.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: Some(body.event_id.clone()), - }, - &sender_id, - &body.room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + let event_id = db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomRedaction, + content: serde_json::to_value(redaction::RedactionEventContent { + reason: body.reason.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: Some(body.event_id.clone()), + }, + &sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; Ok(redact_event::Response { event_id }.into()) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index d21148b..28d30e2 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -53,47 +53,43 @@ pub async fn create_room_route( content.room_version = RoomVersionId::Version6; // 1. The room create event - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCreate, - content: serde_json::to_value(content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCreate, + content: serde_json::to_value(content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; // 2. Let the room creator join - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: Some(body.is_direct), - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: Some(body.is_direct), + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; // 3. Power levels let mut users = BTreeMap::new(); @@ -123,22 +119,20 @@ pub async fn create_room_route( }) .expect("event is valid, we just created it") }; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomPowerLevels, - content: power_levels_content, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomPowerLevels, + content: power_levels_content, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; // 4. Events set by preset @@ -149,84 +143,76 @@ pub async fn create_room_route( }); // 4.1 Join Rules - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomJoinRules, - content: match preset { - create_room::RoomPreset::PublicChat => serde_json::to_value( - join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), - ) - .expect("event is valid, we just created it"), - // according to spec "invite" is the default - _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( - join_rules::JoinRule::Invite, - )) - .expect("event is valid, we just created it"), - }, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; - - // 4.2 History Visibility - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomHistoryVisibility, - content: serde_json::to_value( - history_visibility::HistoryVisibilityEventContent::new( - history_visibility::HistoryVisibility::Shared, - ), + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomJoinRules, + content: match preset { + create_room::RoomPreset::PublicChat => serde_json::to_value( + join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), ) .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, + // according to spec "invite" is the default + _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( + join_rules::JoinRule::Invite, + )) + .expect("event is valid, we just created it"), }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; + + // 4.2 History Visibility + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomHistoryVisibility, + content: serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( + history_visibility::HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; // 4.3 Guest Access - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomGuestAccess, - content: match preset { - create_room::RoomPreset::PublicChat => { - serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::Forbidden, - )) - .expect("event is valid, we just created it") - } - _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::CanJoin, + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomGuestAccess, + content: match preset { + create_room::RoomPreset::PublicChat => { + serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::Forbidden, )) - .expect("event is valid, we just created it"), - }, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, + .expect("event is valid, we just created it") + } + _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( + guest_access::GuestAccess::CanJoin, + )) + .expect("event is valid, we just created it"), }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; // 5. Events listed in initial_state for event in &body.initial_state { @@ -240,90 +226,82 @@ pub async fn create_room_route( continue; } - db.rooms - .build_and_append_pdu( - pdu_builder, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + pdu_builder, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; } // 6. Events implied by name and topic if let Some(name) = &body.name { - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomName, - content: serde_json::to_value( - name::NameEventContent::new(name.clone()).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") - })?, - ) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomName, + content: serde_json::to_value( + name::NameEventContent::new(name.clone()).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") + })?, + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; } if let Some(topic) = &body.topic { - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomTopic, - content: serde_json::to_value(topic::TopicEventContent { - topic: topic.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomTopic, + content: serde_json::to_value(topic::TopicEventContent { + topic: topic.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; } // 7. Events implied by invite (and TODO: invite_3pid) for user in &body.invite { - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user)?, - avatar_url: db.users.avatar_url(&user)?, - is_direct: Some(body.is_direct), - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user.to_string()), - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user)?, + avatar_url: db.users.avatar_url(&user)?, + is_direct: Some(body.is_direct), + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user.to_string()), + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; } // Homeserver specific stuff @@ -395,29 +373,24 @@ pub async fn upgrade_room_route( // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions - let tombstone_event_id = db - .rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomTombstone, - content: serde_json::to_value( - ruma::events::room::tombstone::TombstoneEventContent { - body: "This room has been replaced".to_string(), - replacement_room: replacement_room.clone(), - }, - ) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_id, - &body.room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + let tombstone_event_id = db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomTombstone, + content: serde_json::to_value(ruma::events::room::tombstone::TombstoneEventContent { + body: "This room has been replaced".to_string(), + replacement_room: replacement_room.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; // Get the old room federations status let federate = serde_json::from_value::>( @@ -444,48 +417,44 @@ pub async fn upgrade_room_route( create_event_content.room_version = new_version; create_event_content.predecessor = predecessor; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCreate, - content: serde_json::to_value(create_event_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_id, - &replacement_room, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCreate, + content: serde_json::to_value(create_event_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_id, + &replacement_room, + &db.globals, + &db.sending, + &db.account_data, + )?; // Join the new room - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, - is_direct: None, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_id.to_string()), - redacts: None, - }, - sender_id, - &replacement_room, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_id.to_string()), + redacts: None, + }, + sender_id, + &replacement_room, + &db.globals, + &db.sending, + &db.account_data, + )?; // Recommended transferable state events list from the specs let transferable_state_events = vec![ @@ -507,22 +476,20 @@ pub async fn upgrade_room_route( None => continue, // Skipping missing events. }; - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type, - content: event_content, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_id, - &replacement_room, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type, + content: event_content, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_id, + &replacement_room, + &db.globals, + &db.sending, + &db.account_data, + )?; } // Moves any local aliases to the new room @@ -552,24 +519,21 @@ pub async fn upgrade_room_route( power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - let _ = db - .rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomPowerLevels, - content: serde_json::to_value(power_levels_event_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_id, - &body.room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await; + let _ = db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomPowerLevels, + content: serde_json::to_value(power_levels_event_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_id, + &body.room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; // Return the replacement room id Ok(upgrade_room::Response { replacement_room }.into()) diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 46182a1..1e13b42 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -213,23 +213,20 @@ pub async fn send_state_event_for_key_helper( } } - let event_id = db - .rooms - .build_and_append_pdu( - PduBuilder { - event_type: content.event_type().into(), - content: json, - unsigned: None, - state_key, - redacts: None, - }, - &sender_id, - &room_id, - &db.globals, - &db.sending, - &db.account_data, - ) - .await?; + let event_id = db.rooms.build_and_append_pdu( + PduBuilder { + event_type: content.event_type().into(), + content: json, + unsigned: None, + state_key, + redacts: None, + }, + &sender_id, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; Ok(event_id) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 108edb5..ab05b39 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -10,7 +10,7 @@ use ruma::{ events::{ ignored_user_list, room::{ - member, + member, message, power_levels::{self, PowerLevelsEventContent}, }, EventType, @@ -440,6 +440,7 @@ impl Rooms { pdu_json: &serde_json::Value, globals: &super::globals::Globals, account_data: &super::account_data::AccountData, + sending: &super::sending::Sending, ) -> Result> { self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; @@ -452,7 +453,8 @@ impl Rooms { self.edus .private_read_set(&pdu.room_id, &pdu.sender, index, &globals)?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + let room_id = pdu.room_id.clone(); + let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&index.to_be_bytes()); @@ -503,6 +505,45 @@ impl Rooms { key.extend_from_slice(&pdu_id); self.tokenids.insert(key, &[])?; } + + if body.starts_with(&format!("@conduit:{}: ", globals.server_name())) + && self + .id_from_alias( + &format!("#admins:{}", globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid room alias"), + )? + .as_ref() + == Some(&pdu.room_id) + { + let mut parts = body.split_whitespace().skip(1); + if let Some(command) = parts.next() { + let args = parts.collect::>(); + + self.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMessage, + content: serde_json::to_value( + message::TextMessageEventContent { + body: format!("Command: {}, Args: {:?}", command, args), + formatted: None, + relates_to: None, + }, + ) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &UserId::try_from(format!("@conduit:{}", globals.server_name())) + .expect("@conduit:server_name is valid"), + &room_id, + &globals, + &sending, + &account_data, + )?; + } + } } } _ => {} @@ -570,7 +611,7 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. - pub async fn build_and_append_pdu( + pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, @@ -793,7 +834,7 @@ impl Rooms { .expect("json is object") .insert("event_id".to_owned(), pdu.event_id.to_string().into()); - let pdu_id = self.append_pdu(&pdu, &pdu_json, globals, account_data)?; + let pdu_id = self.append_pdu(&pdu, &pdu_json, globals, account_data, sending)?; self.append_to_state(&pdu_id, &pdu)?; diff --git a/src/database/users.rs b/src/database/users.rs index 2e26c1e..0d35e36 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -57,6 +57,11 @@ impl Users { Ok(()) } + /// Returns the number of users registered on this server. + pub fn count(&self) -> usize { + self.userid_password.iter().count() + } + /// Find out which user an access token belongs to. pub fn find_from_token(&self, token: &str) -> Result> { self.token_userdeviceid diff --git a/src/main.rs b/src/main.rs index fa1cc5c..8fb5fda 100644 --- a/src/main.rs +++ b/src/main.rs @@ -126,6 +126,7 @@ fn setup_rocket() -> rocket::Rocket { server_server::get_public_rooms_filtered_route, server_server::send_transaction_message_route, server_server::get_missing_events_route, + server_server::get_profile_information_route, ], ) .attach(AdHoc::on_attach("Config", |mut rocket| async { diff --git a/src/server_server.rs b/src/server_server.rs index 2d52c4a..0c175bf 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,5 @@ use crate::{client_server, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::warn; use rocket::{get, post, put, response::content::Json, State}; @@ -10,6 +11,7 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, }, event::get_missing_events, + query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, @@ -362,9 +364,9 @@ pub fn send_transaction_message_route<'a>( let pdu = serde_json::from_value::(value.clone()) .expect("all ruma pdus are conduit pdus"); if db.rooms.exists(&pdu.room_id)? { - let pdu_id = db - .rooms - .append_pdu(&pdu, &value, &db.globals, &db.account_data)?; + let pdu_id = + db.rooms + .append_pdu(&pdu, &value, &db.globals, &db.account_data, &db.sending)?; db.rooms.append_to_state(&pdu_id, &pdu)?; } } @@ -416,3 +418,59 @@ pub fn get_missing_events_route<'a>( Ok(get_missing_events::v1::Response { events }.into()) } + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v1/query/profile", data = "") +)] +pub fn get_profile_information_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + let mut displayname = None; + let mut avatar_url = None; + + match body.field { + Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, + Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?, + None => { + displayname = db.users.displayname(&body.user_id)?; + avatar_url = db.users.avatar_url(&body.user_id)?; + } + } + + Ok(get_profile_information::v1::Response { + displayname, + avatar_url, + } + .into()) +} + +/* +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v2/invite/<_>/<_>", data = "") +)] +pub fn get_user_devices_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + let mut displayname = None; + let mut avatar_url = None; + + match body.field { + Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, + Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?, + None => { + displayname = db.users.displayname(&body.user_id)?; + avatar_url = db.users.avatar_url(&body.user_id)?; + } + } + + Ok(get_profile_information::v1::Response { + displayname, + avatar_url, + } + .into()) +} +*/ From c15ae3c126020c3826f15e5f50ca70a669fc1402 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 6 Oct 2020 20:43:35 +0200 Subject: [PATCH 0312/1727] fix: invalid typing bytes because of 0xff in numbers --- src/database/rooms/edus.rs | 9 ++++++--- src/database/sending.rs | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index a794c69..29f5407 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -11,6 +11,7 @@ use ruma::{ use std::{ collections::HashMap, convert::{TryFrom, TryInto}, + mem, }; #[derive(Clone)] @@ -228,9 +229,11 @@ impl RoomEdus { let key = key?; Ok::<_, Error>(( key.clone(), - utils::u64_from_bytes(key.split(|&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?) + utils::u64_from_bytes( + &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { + Error::bad_database("RoomTyping has invalid timestamp or delimiters.") + })?[0..mem::size_of::()], + ) .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, )) }) diff --git a/src/database/sending.rs b/src/database/sending.rs index 1ed94cc..c818cbf 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -28,6 +28,7 @@ impl Sending { select! { Some(server) = futures.next() => { warn!("response: {:?}", &server); + warn!("futures left: {}", &futures.len()); match server { Ok((server, _response)) => { waiting_servers.remove(&server) From 6afc4c9b3e066f2d071e8420c9e4111d0dc65d96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 6 Oct 2020 21:04:51 +0200 Subject: [PATCH 0313/1727] feat: federation disabled by default It can be enable in the Rocket.toml config or using ROCKET_FEDERATION_ENABLED=true --- DEPLOY_FROM_SOURCE.md | 3 +++ Rocket-example.toml | 2 ++ docker-compose.yml | 1 + src/database/globals.rs | 6 ++++++ src/server_server.rs | 39 ++++++++++++++++++++++++++++++++++++++- 5 files changed, 50 insertions(+), 1 deletion(-) diff --git a/DEPLOY_FROM_SOURCE.md b/DEPLOY_FROM_SOURCE.md index 4d685f6..456fe6e 100644 --- a/DEPLOY_FROM_SOURCE.md +++ b/DEPLOY_FROM_SOURCE.md @@ -27,7 +27,10 @@ Environment="ROCKET_SERVER_NAME=YOURSERVERNAME.HERE" # EDIT THIS Environment="ROCKET_PORT=14004" # Reverse proxy port +#Environment="ROCKET_MAX_REQUEST_SIZE=20000000" # in bytes #Environment="ROCKET_REGISTRATION_DISABLED=true" +#Environment="ROCKET_ENCRYPTION_DISABLED=true" +#Environment="ROCKET_FEDERATION_ENABLED=true" #Environment="ROCKET_LOG=normal" # Detailed logging Environment="ROCKET_ENV=production" diff --git a/Rocket-example.toml b/Rocket-example.toml index 41b36d3..8eb48e9 100644 --- a/Rocket-example.toml +++ b/Rocket-example.toml @@ -16,6 +16,8 @@ port = 14004 # Note: existing rooms will continue to work #encryption_disabled = true +#federation_enabled = true + # Default path is in this user's data #database_path = "/home/timo/MyConduitServer" diff --git a/docker-compose.yml b/docker-compose.yml index f06eaca..7d19762 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -31,6 +31,7 @@ services: # ROCKET_PORT: 8000 # ROCKET_REGISTRATION_DISABLED: 'true' # ROCKET_ENCRYPTION_DISABLED: 'true' + # ROCKET_FEDERATION_ENABLED: 'true' # ROCKET_DATABASE_PATH: /srv/conduit/.local/share/conduit # ROCKET_WORKERS: 10 # ROCKET_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB diff --git a/src/database/globals.rs b/src/database/globals.rs index 8ce9c01..37f10ee 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -14,6 +14,7 @@ pub struct Globals { max_request_size: u32, registration_disabled: bool, encryption_disabled: bool, + federation_enabled: bool, } impl Globals { @@ -69,6 +70,7 @@ impl Globals { .map_err(|_| Error::BadConfig("Invalid max_request_size."))?, registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), encryption_disabled: config.get_bool("encryption_disabled").unwrap_or(false), + federation_enabled: config.get_bool("federation_enabled").unwrap_or(false), }) } @@ -114,4 +116,8 @@ impl Globals { pub fn encryption_disabled(&self) -> bool { self.encryption_disabled } + + pub fn federation_enabled(&self) -> bool { + self.federation_enabled + } } diff --git a/src/server_server.rs b/src/server_server.rs index 0c175bf..79976c0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -57,6 +57,10 @@ pub async fn send_request( where T: Debug, { + if !globals.federation_enabled() { + return Err(Error::BadConfig("Federation is disabled.")); + } + let resolver = AsyncResolver::tokio_from_system_conf() .await .map_err(|_| Error::BadConfig("Failed to set up trust dns resolver with system config."))?; @@ -204,7 +208,11 @@ where } #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] -pub fn get_server_version() -> ConduitResult { +pub fn get_server_version(db: State<'_, Database>) -> ConduitResult { + if !db.globals.federation_enabled() { + return Err(Error::BadConfig("Federation is disabled.")); + } + Ok(get_server_version::Response { server: Some(get_server_version::Server { name: Some("Conduit".to_owned()), @@ -216,6 +224,11 @@ pub fn get_server_version() -> ConduitResult { #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] pub fn get_server_keys(db: State<'_, Database>) -> Json { + if !db.globals.federation_enabled() { + // TODO: Use proper types + return Json("Federation is disabled.".to_owned()); + } + let mut verify_keys = BTreeMap::new(); verify_keys.insert( format!("ed25519:{}", db.globals.keypair().version()), @@ -259,6 +272,10 @@ pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { + if !db.globals.federation_enabled() { + return Err(Error::BadConfig("Federation is disabled.")); + } + let response = client_server::get_public_rooms_filtered_helper( &db, None, @@ -302,6 +319,10 @@ pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { + if !db.globals.federation_enabled() { + return Err(Error::BadConfig("Federation is disabled.")); + } + let response = client_server::get_public_rooms_filtered_helper( &db, None, @@ -345,6 +366,10 @@ pub fn send_transaction_message_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { + if !db.globals.federation_enabled() { + return Err(Error::BadConfig("Federation is disabled.")); + } + //dbg!(&*body); for pdu in &body.pdus { let mut value = serde_json::from_str(pdu.json().get()) @@ -384,6 +409,10 @@ pub fn get_missing_events_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { + if !db.globals.federation_enabled() { + return Err(Error::BadConfig("Federation is disabled.")); + } + let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); @@ -427,6 +456,10 @@ pub fn get_profile_information_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { + if !db.globals.federation_enabled() { + return Err(Error::BadConfig("Federation is disabled.")); + } + let mut displayname = None; let mut avatar_url = None; @@ -455,6 +488,10 @@ pub fn get_user_devices_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { + if !db.globals.federation_enabled() { + return Err(Error::BadConfig("Federation is disabled.")); + } + let mut displayname = None; let mut avatar_url = None; From 304c53c4f53397a251e567a891c54ece7835173f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 7 Oct 2020 12:29:19 +0200 Subject: [PATCH 0314/1727] style: make clippy happier --- src/client_server/room.rs | 8 ++------ src/client_server/sync.rs | 2 +- src/database/sending.rs | 2 +- src/pdu.rs | 4 ++-- src/server_server.rs | 2 +- 5 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 28d30e2..744d949 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -354,12 +354,8 @@ pub async fn upgrade_room_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - // Validate the room version requested - let new_version = - RoomVersionId::try_from(body.new_version.clone()).expect("invalid room version id"); - if !matches!( - new_version, + body.new_version, RoomVersionId::Version5 | RoomVersionId::Version6 ) { return Err(Error::BadRequest( @@ -414,7 +410,7 @@ pub async fn upgrade_room_route( let mut create_event_content = ruma::events::room::create::CreateEventContent::new(sender_id.clone()); create_event_content.federate = federate; - create_event_content.room_version = new_version; + create_event_content.room_version = body.new_version.clone(); create_event_content.predecessor = predecessor; db.rooms.build_and_append_pdu( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index aec03af..6f41160 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -477,7 +477,7 @@ pub async fn sync_events_route( }) .and_then(|pdu| { serde_json::from_value::>( - pdu.content.clone(), + pdu.content, ) .expect("Raw::from_value always works") .deserialize() diff --git a/src/database/sending.rs b/src/database/sending.rs index c818cbf..24a783b 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -84,7 +84,7 @@ impl Sending { (Box, send_transaction_message::v1::Response), (Box, Error), > { - let pdu_json = PduEvent::to_outgoing_federation_event( + let pdu_json = PduEvent::convert_to_outgoing_federation_event( rooms .get_pdu_json_from_id(&pdu_id) .map_err(|e| (server.clone(), e))? diff --git a/src/pdu.rs b/src/pdu.rs index 4b1df4b..7118bfc 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -199,7 +199,7 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } - pub fn to_outgoing_federation_event( + pub fn convert_to_outgoing_federation_event( mut pdu_json: serde_json::Value, ) -> Raw { if let Some(unsigned) = pdu_json @@ -239,7 +239,7 @@ impl From<&state_res::StateEvent> for PduEvent { content: pdu.content().clone(), state_key: Some(pdu.state_key()), prev_events: pdu.prev_event_ids(), - depth: pdu.depth().clone(), + depth: *pdu.depth(), auth_events: pdu.auth_events(), redacts: pdu.redacts().cloned(), unsigned: pdu.unsigned().clone().into_iter().collect(), diff --git a/src/server_server.rs b/src/server_server.rs index 79976c0..462d636 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -438,7 +438,7 @@ pub fn get_missing_events_route<'a>( ) .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, ); - events.push(PduEvent::to_outgoing_federation_event(pdu)); + events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); } i += 1; } From 9d1387954f0fcb59ddf805b972a7cbef489a7367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 16 Oct 2020 14:04:29 +0200 Subject: [PATCH 0315/1727] Update dependencies, remove dbgs --- Cargo.lock | 119 +++++++++++++++++++++++-------------------- rust-toolchain | 2 +- src/server_server.rs | 2 - 3 files changed, 64 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 857635d..a658ee2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -90,14 +90,14 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.51" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec1931848a574faa8f7c71a12ea00453ff5effbb5f51afe7f77d7a48cace6ac1" +checksum = "707b586e0e2f247cbde68cdd2c3ce69ea7b7be43e1c5b426e37c9319c4b9838e" dependencies = [ "addr2line", - "cfg-if", + "cfg-if 1.0.0", "libc", - "miniz_oxide 0.4.2", + "miniz_oxide 0.4.3", "object", "rustc-demangle", ] @@ -163,9 +163,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "cc" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef611cc68ff783f18535d77ddd080185275713d852c4f5cbb6122c462a7a825c" +checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d" [[package]] name = "cfg-if" @@ -173,6 +173,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + [[package]] name = "chrono" version = "0.4.19" @@ -197,9 +203,9 @@ dependencies = [ [[package]] name = "color_quant" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd" +checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" @@ -271,7 +277,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -281,7 +287,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ "autocfg", - "cfg-if", + "cfg-if 0.1.10", "crossbeam-utils", "lazy_static", "maybe-uninit", @@ -296,7 +302,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ "autocfg", - "cfg-if", + "cfg-if 0.1.10", "lazy_static", ] @@ -384,7 +390,7 @@ version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a51b8cf747471cb9499b6d59e59b0444f4c90eba8968c4e44874e92b5b64ace2" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -566,7 +572,7 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -766,7 +772,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63312a18f7ea8760cdd0a7c5aac1a619752a246b833545e3e36d1f81f7cd9e66" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -881,7 +887,7 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -968,9 +974,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c60c0dfe32c10b43a144bad8fc83538c52f58302c92300ea7ec7bf7b38d5a7b9" +checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" dependencies = [ "adler", "autocfg", @@ -982,7 +988,7 @@ version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fuchsia-zircon", "fuchsia-zircon-sys", "iovec", @@ -1042,7 +1048,7 @@ version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] @@ -1100,9 +1106,9 @@ dependencies = [ [[package]] name = "object" -version = "0.20.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" +checksum = "37fd5004feb2ce328a52b0b3d01dbf4ffff72583493900ed15f22d4111c51693" [[package]] name = "once_cell" @@ -1117,7 +1123,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" dependencies = [ "bitflags", - "cfg-if", + "cfg-if 0.1.10", "foreign-types", "lazy_static", "libc", @@ -1160,7 +1166,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "cloudabi", "instant", "libc", @@ -1198,18 +1204,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.26" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13fbdfd6bdee3dc9be46452f86af4a4072975899cf8592466668620bebfbcc17" +checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.26" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c82fb1329f632c3552cf352d14427d57a511b1cf41db93b3a7d77906a82dcc8e" +checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" dependencies = [ "proc-macro2", "quote", @@ -1230,9 +1236,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "png" @@ -1389,9 +1395,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.3.9" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "8963b85b8ce3074fecffde43b4b0dded83ce2f367dc8d363afc56679f3ee820b" dependencies = [ "regex-syntax", ] @@ -1408,9 +1414,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" +checksum = "8cab7a364d15cde1e505267766a2d3c4e22a843e1a601f0fa7564c0f82ced11c" [[package]] name = "remove_dir_all" @@ -1738,9 +1744,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +checksum = "b2610b7f643d18c87dff3b489950269617e6601a51f1f05aa5daefee36f64f0b" [[package]] name = "rustc_version" @@ -1836,18 +1842,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fe57af81d28386a513cbc6858332abc6117cfdb5999647c6444b8f43a370a5" +checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f630a6370fd8e457873b4bd2ffdae75408bc291ba72be773772a4c2a065d9ae8" +checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" dependencies = [ "proc-macro2", "quote", @@ -1856,9 +1862,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a230ea9107ca2220eea9d46de97eddcb04cd00e92d13dda78e478dd33fa82bd4" +checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" dependencies = [ "itoa", "ryu", @@ -1936,7 +1942,7 @@ version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1fa70dc5c8104ec096f4fe7ede7a221d35ae13dcd19ba1ad9a81d2cab9a1c44" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "redox_syscall", "winapi 0.3.9", @@ -1950,9 +1956,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a71ea1ea5f8747d1af1979bfb7e65c3a025a70609f04ceb78425bc5adad8e6" +checksum = "f4e0831040d2cf2bdfd51b844be71885783d489898a192f254ae25d57cce725c" dependencies = [ "version_check", ] @@ -1966,7 +1972,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#d11a3feb5307715ab5d86af8f25d4bccfee6264b" +source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#a7d76935f12757aecfee305838069c9bcbe7d34a" dependencies = [ "itertools", "js_int", @@ -2051,9 +2057,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.42" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c51d92969d209b54a98397e1b91c8ae82d8c87a7bb87df0b29aa2ad81454228" +checksum = "e03e57e4fcbfe7749842d53e24ccb9aa12b7252dbe5e91d2acad31834c8b8fdd" dependencies = [ "proc-macro2", "quote", @@ -2066,7 +2072,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "rand", "redox_syscall", @@ -2230,9 +2236,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" dependencies = [ "serde", ] @@ -2249,7 +2255,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "log", "pin-project-lite", "tracing-attributes", @@ -2299,9 +2305,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82bb5079aa76438620837198db8a5c529fb9878c730bc2b28179b0241cf04c10" +checksum = "4ef0a5e15477aa303afbfac3a44cba9b6430fdaad52423b1e6c0dbbe28c3eedd" dependencies = [ "ansi_term", "chrono", @@ -2313,6 +2319,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", + "tracing", "tracing-core", "tracing-log", "tracing-serde", @@ -2345,7 +2352,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f23cdfdc3d8300b3c50c9e84302d3bd6d860fb9529af84ace6cf9665f181b77" dependencies = [ "backtrace", - "cfg-if", + "cfg-if 0.1.10", "futures", "ipconfig", "lazy_static", @@ -2460,7 +2467,7 @@ version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "serde", "serde_json", "wasm-bindgen-macro", @@ -2487,7 +2494,7 @@ version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7866cab0aa01de1edf8b5d7936938a7e397ee50ce24119aef3e1eaa3b6171da" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "js-sys", "wasm-bindgen", "web-sys", diff --git a/rust-toolchain b/rust-toolchain index 50aceaa..21998d3 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.45.0 +1.47.0 diff --git a/src/server_server.rs b/src/server_server.rs index 462d636..b8b575e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -443,8 +443,6 @@ pub fn get_missing_events_route<'a>( i += 1; } - dbg!(&events); - Ok(get_missing_events::v1::Response { events }.into()) } From 9109cb492f170d8066ba5229726945fdc7896d91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 17 Oct 2020 11:24:57 +0200 Subject: [PATCH 0316/1727] fix: double join over federation --- src/client_server/membership.rs | 27 ++++++++++++++++++++++----- src/database/rooms.rs | 2 +- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 526e82f..9c1e7c6 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -22,9 +22,7 @@ use ruma::{ EventId, Raw, RoomId, RoomVersionId, ServerName, UserId, }; use state_res::StateEvent; -use std::{ - collections::BTreeMap, collections::HashMap, collections::HashSet, convert::TryFrom, sync::Arc, -}; +use std::{collections::BTreeMap, collections::HashMap, collections::HashSet, convert::TryFrom, iter, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -474,6 +472,17 @@ async fn join_room_by_id_helper( "origin_server_ts".to_owned(), utils::millis_since_unix_epoch().into(), ); + join_event_stub.insert( + "content".to_owned(), + serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Join, + displayname: db.users.displayname(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + ); // Generate event id let event_id = EventId::try_from(&*format!( @@ -494,14 +503,20 @@ async fn join_room_by_id_helper( ) .expect("event is valid, we just created it"); + // Add event_id back + let join_event_stub = join_event_stub_value.as_object_mut().unwrap(); + join_event_stub.insert("event_id".to_owned(), event_id.to_string().into()); + + // It has enough fields to be called a proper event now + let join_event = join_event_stub_value; + let send_join_response = server_server::send_request( &db.globals, remote_server.clone(), federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, - pdu_stub: serde_json::from_value(join_event_stub_value) - .expect("we just created this event"), + pdu_stub: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) .await?; @@ -529,6 +544,7 @@ async fn join_room_by_id_helper( let state_events = room_state .clone() .map(|pdu: Result<(EventId, serde_json::Value)>| Ok(pdu?.0)) + .chain(iter::once(Ok(event_id.clone()))) // Add join event we just created .collect::>>()?; let auth_chain = send_join_response @@ -539,6 +555,7 @@ async fn join_room_by_id_helper( let mut event_map = room_state .chain(auth_chain) + .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created .map(|r| { let (event_id, value) = r?; serde_json::from_value::(value) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ab05b39..db473ff 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -483,7 +483,7 @@ impl Rooms { .map_err(|_| { Error::BadRequest( ErrorKind::InvalidParam, - "Invalid redaction event content.", + "Invalid member event content.", ) })?, &pdu.sender, From 12b0efac8bc6b4045e2c6d6bfb648de95f2ec3f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Oct 2020 08:56:21 +0200 Subject: [PATCH 0317/1727] fix: random timeline reloads --- src/client_server/membership.rs | 13 +++++++++-- src/client_server/sync.rs | 10 ++++----- src/database/rooms.rs | 38 +++++++++++++++++++++------------ src/server_server.rs | 16 +++++++++++--- 4 files changed, 53 insertions(+), 24 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 9c1e7c6..06e5adf 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -22,7 +22,10 @@ use ruma::{ EventId, Raw, RoomId, RoomVersionId, ServerName, UserId, }; use state_res::StateEvent; -use std::{collections::BTreeMap, collections::HashMap, collections::HashSet, convert::TryFrom, iter, sync::Arc}; +use std::{ + collections::BTreeMap, collections::HashMap, collections::HashSet, convert::TryFrom, iter, + sync::Arc, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -642,9 +645,15 @@ async fn join_room_by_id_helper( .expect("Found event_id in sorted events that is not in resolved state"); // We do not rebuild the PDU in this case only insert to DB - let pdu_id = db.rooms.append_pdu( + let count = db.globals.next_count()?; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + db.rooms.append_pdu( &PduEvent::from(&**pdu), &serde_json::to_value(&**pdu).expect("PDU is valid value"), + count, + pdu_id.clone().into(), &db.globals, &db.account_data, &db.sending, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6f41160..688d304 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -110,11 +110,11 @@ pub async fn sync_events_route( // since and the current room state, meaning there should be no updates. // The inner Option is None when there is an event, but there is no state hash associated // with it. This can happen for the RoomCreate event, so all updates should arrive. - let since_state_hash = db - .rooms - .pdus_after(sender_id, &room_id, since) // - 1 So we can get the event at since - .next() - .map(|pdu| db.rooms.pdu_state_hash(&pdu.ok()?.0).ok()?); + let first_pdu_after_since = db.rooms.pdus_after(sender_id, &room_id, since).next(); + + let since_state_hash = first_pdu_after_since + .as_ref() + .map(|pdu| db.rooms.pdu_state_hash(&pdu.as_ref().ok()?.0).ok()?); let since_members = since_state_hash.as_ref().map(|state_hash| { state_hash.as_ref().and_then(|state_hash| { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index db473ff..35c3eac 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -438,25 +438,18 @@ impl Rooms { &self, pdu: &PduEvent, pdu_json: &serde_json::Value, + count: u64, + pdu_id: IVec, globals: &super::globals::Globals, account_data: &super::account_data::AccountData, sending: &super::sending::Sending, - ) -> Result> { + ) -> Result<()> { self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; - // Increment the last index and use that - // This is also the next_batch/since value - let index = globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending // fails self.edus - .private_read_set(&pdu.room_id, &pdu.sender, index, &globals)?; - - let room_id = pdu.room_id.clone(); - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&index.to_be_bytes()); + .private_read_set(&pdu.room_id, &pdu.sender, count, &globals)?; self.pduid_pdu.insert(&pdu_id, &*pdu_json.to_string())?; @@ -537,7 +530,7 @@ impl Rooms { }, &UserId::try_from(format!("@conduit:{}", globals.server_name())) .expect("@conduit:server_name is valid"), - &room_id, + &pdu.room_id, &globals, &sending, &account_data, @@ -549,7 +542,7 @@ impl Rooms { _ => {} } - Ok(pdu_id) + Ok(()) } /// Generates a new StateHash and associates it with the incoming event. @@ -834,10 +827,27 @@ impl Rooms { .expect("json is object") .insert("event_id".to_owned(), pdu.event_id.to_string().into()); - let pdu_id = self.append_pdu(&pdu, &pdu_json, globals, account_data, sending)?; + // Increment the last index and use that + // This is also the next_batch/since value + let count = globals.next_count()?; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. self.append_to_state(&pdu_id, &pdu)?; + self.append_pdu( + &pdu, + &pdu_json, + count, + pdu_id.clone().into(), + globals, + account_data, + sending, + )?; + for server in self .room_servers(room_id) .filter_map(|r| r.ok()) diff --git a/src/server_server.rs b/src/server_server.rs index b8b575e..3fefbd5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -389,9 +389,19 @@ pub fn send_transaction_message_route<'a>( let pdu = serde_json::from_value::(value.clone()) .expect("all ruma pdus are conduit pdus"); if db.rooms.exists(&pdu.room_id)? { - let pdu_id = - db.rooms - .append_pdu(&pdu, &value, &db.globals, &db.account_data, &db.sending)?; + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + db.rooms.append_pdu( + &pdu, + &value, + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.sending, + )?; db.rooms.append_to_state(&pdu_id, &pdu)?; } } From 151ef07fb307666cceca856c5d4c31dcbb43e3a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Oct 2020 08:48:31 +0200 Subject: [PATCH 0318/1727] docs: cross compile --- CROSS_COMPILE.md | 13 +++++++++++++ Cargo.lock | 10 ++++++++++ Cargo.toml | 1 + 3 files changed, 24 insertions(+) create mode 100644 CROSS_COMPILE.md diff --git a/CROSS_COMPILE.md b/CROSS_COMPILE.md new file mode 100644 index 0000000..3c7bbcb --- /dev/null +++ b/CROSS_COMPILE.md @@ -0,0 +1,13 @@ +Install docker: + +$ sudo apt install docker +$ sudo usermod -aG docker $USER + +Then log out and back in. + +$ sudo systemctl start docker + +$ cargo install cross +$ cross build --release --features tls_vendored --target armv7-unknown-linux-musleabihf + +The cross-compiled binary is at target/armv7-unknown-linux-musleabihf/release/conduit diff --git a/Cargo.lock b/Cargo.lock index a658ee2..6fed2af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1136,6 +1136,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +[[package]] +name = "openssl-src" +version = "111.12.0+1.1.1h" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.58" @@ -1145,6 +1154,7 @@ dependencies = [ "autocfg", "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] diff --git a/Cargo.toml b/Cargo.toml index 2126e42..acab8d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,6 +62,7 @@ trust-dns-resolver = "0.19.5" [features] default = ["conduit_bin"] conduit_bin = [] # TODO: add rocket to this when it is optional +tls_vendored = ["reqwest/native-tls-vendored"] [[bin]] name = "conduit" From ce94ad05a7a1bcc6e861ca4d8e51395e4ee4e5af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Oct 2020 13:23:06 +0200 Subject: [PATCH 0319/1727] Update README.md --- README.md | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 44ab0d6..48b51ae 100644 --- a/README.md +++ b/README.md @@ -6,22 +6,22 @@ #### What is the goal? -A fast Matrix homeserver that's easy to set up and just works. You can install it on a mini-computer like the Raspberry Pi to host Matrix for your family, friends or company. - -#### Is it fast? - -See it in action: +A fast Matrix homeserver that's easy to set up and just works. You can install +it on a mini-computer like the Raspberry Pi to host Matrix for your family, +friends or company. #### Can I try it out? -Yes! Just open a Matrix client ( or Element Android for example) and register on the `https://conduit.koesters.xyz` homeserver. +Yes! Just open a Matrix client ( or Element Android for +example) and register on the `https://conduit.koesters.xyz` homeserver. #### How can I deploy my own? ##### From source Clone the repo, build it with `cargo build --release` and call the binary -(target/release/conduit) from somewhere like a systemd script. [Read more](DEPLOY_FROM_SOURCE.md) +(target/release/conduit) from somewhere like a systemd script. [Read +more](DEPLOY_FROM_SOURCE.md) ##### Using Docker @@ -36,14 +36,16 @@ Or build and run it with docker or docker-compose. [Read more](docker/README.md) #### What is it build on? -- [Ruma](https://www.ruma.io): Useful structures for endpoint requests and responses that can be (de)serialized -- [Sled](https://github.com/spacejam/sled): A simple (key, value) database with good performance +- [Ruma](https://www.ruma.io): Useful structures for endpoint requests and + responses that can be (de)serialized +- [Sled](https://github.com/spacejam/sled): A simple (key, value) database with + good performance - [Rocket](https://rocket.rs): A flexible web framework #### What are the biggest things still missing? -- Federation (Talk to other Matrix servers) - Appservices (Bridges and Bots) +- Most federation features (invites, e2ee) - Push notifications on mobile - Notification settings - Lots of testing @@ -52,8 +54,10 @@ Also check out the [milestones](https://git.koesters.xyz/timo/conduit/milestones #### How can I contribute? -1. Look for an issue you would like to work on and make sure it's not assigned to other users -2. Ask someone to assign the issue to you (comment on the issue or chat in #conduit:matrix.org) +1. Look for an issue you would like to work on and make sure it's not assigned + to other users +2. Ask someone to assign the issue to you (comment on the issue or chat in + #conduit:matrix.org) 3. Fork the repo and work on the issue. #conduit:matrix.org is happy to help :) 4. Submit a PR From a2dbc6fe6de27e3241c71cf63c2b6d35efe8da67 Mon Sep 17 00:00:00 2001 From: Timo Date: Mon, 24 Aug 2020 11:32:15 +0200 Subject: [PATCH 0320/1727] Change license to Apache-2.0 --- .gitea/PULL_REQUEST_TEMPLATE.md | 2 +- Cargo.toml | 2 +- Dockerfile | 2 +- LICENSE | 837 +++++++------------------------- 4 files changed, 179 insertions(+), 664 deletions(-) diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md index 38cef90..0e4e01b 100644 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -1 +1 @@ -- [ ] I agree to release my code and all other changes of this PR under the AGPL-3.0-only license +- [ ] I agree to release my code and all other changes of this PR under the Apache-2.0 license diff --git a/Cargo.toml b/Cargo.toml index acab8d0..9e23c36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "conduit" description = "A Matrix homeserver written in Rust" -license = "AGPL-3.0-only" +license = "Apache-2.0" authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://git.koesters.xyz/timo/conduit" diff --git a/Dockerfile b/Dockerfile index ff84ac6..3ed7371 100644 --- a/Dockerfile +++ b/Dockerfile @@ -53,7 +53,7 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.url="https://conduit.rs/" \ org.opencontainers.image.revision=${GIT_REF} \ org.opencontainers.image.source="https://git.koesters.xyz/timo/conduit.git" \ - org.opencontainers.image.licenses="AGPL-3.0-only" \ + org.opencontainers.image.licenses="Apache-2.0" \ org.opencontainers.image.documentation="" \ org.opencontainers.image.ref.name="" \ org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \ diff --git a/LICENSE b/LICENSE index be3f7b2..d9a10c0 100644 --- a/LICENSE +++ b/LICENSE @@ -1,661 +1,176 @@ - GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS From 243126d3930a23e115f7282357fda09528fb2e39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Oct 2020 16:19:14 +0200 Subject: [PATCH 0321/1727] Allow reading state if history_visibility is world readable See https://matrix.org/docs/spec/client_server/r0.6.1#id87 --- src/client_server/state.rs | 81 ++++++++++++++++++++++++++++++++------ 1 file changed, 68 insertions(+), 13 deletions(-) diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 1e13b42..0d46d18 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -8,7 +8,11 @@ use ruma::{ send_state_event_for_empty_key, send_state_event_for_key, }, }, - events::{AnyStateEventContent, EventContent}, + events::{ + room::history_visibility::HistoryVisibility, + room::history_visibility::HistoryVisibilityEventContent, AnyStateEventContent, + EventContent, EventType, + }, EventId, RoomId, UserId, }; @@ -97,11 +101,28 @@ pub fn get_state_events_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + // Users not in the room should not be able to access the state unless history_visibility is + // WorldReadable if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view the room state.", - )); + if !matches!( + db.rooms + .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + .map(|event| { + serde_json::from_value::(event.content) + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + }) + .map(|e| e.history_visibility) + }), + Some(Ok(HistoryVisibility::WorldReadable)) + ) { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); + } } Ok(get_state_events::Response { @@ -125,11 +146,28 @@ pub fn get_state_events_for_key_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + // Users not in the room should not be able to access the state unless history_visibility is + // WorldReadable if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view the room state.", - )); + if !matches!( + db.rooms + .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + .map(|event| { + serde_json::from_value::(event.content) + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + }) + .map(|e| e.history_visibility) + }), + Some(Ok(HistoryVisibility::WorldReadable)) + ) { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); + } } let event = db @@ -157,11 +195,28 @@ pub fn get_state_events_for_empty_key_route( ) -> ConduitResult { let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + // Users not in the room should not be able to access the state unless history_visibility is + // WorldReadable if !db.rooms.is_joined(sender_id, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view the room state.", - )); + if !matches!( + db.rooms + .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + .map(|event| { + serde_json::from_value::(event.content) + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + }) + .map(|e| e.history_visibility) + }), + Some(Ok(HistoryVisibility::WorldReadable)) + ) { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); + } } let event = db From f6f15d93965acdb2eda31d910c95044ff541314c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Oct 2020 16:40:02 +0200 Subject: [PATCH 0322/1727] Use conduit.rs server in the README shield --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 48b51ae..6c9fa74 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ### A Matrix homeserver written in Rust [![Liberapay](https://img.shields.io/liberapay/receives/timokoesters?logo=liberapay)](https://liberapay.com/timokoesters) -[![Matrix](https://img.shields.io/matrix/conduit:koesters.xyz?server_fqdn=matrix.koesters.xyz&logo=matrix)](https://matrix.to/#/#conduit:koesters.xyz) +[![Matrix](https://img.shields.io/matrix/conduit:conduit.rs?server_fqdn=conduit.koesters.xyz)](https://matrix.to/#/#conduit:matrix.org) #### What is the goal? From f0a21b61655687bb039bac088d6215f73714f7d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Oct 2020 20:33:12 +0200 Subject: [PATCH 0323/1727] fix: use device_id when registering TIL body.device_id != (*body).device_id, which is pretty bad, so I renamed body.device_id to body.sender_device --- src/client_server/account.rs | 42 +++++++-------- src/client_server/backup.rs | 89 ++++++++++++++++---------------- src/client_server/config.rs | 8 +-- src/client_server/context.rs | 8 +-- src/client_server/device.rs | 47 ++++++++--------- src/client_server/keys.rs | 64 ++++++++++++++--------- src/client_server/membership.rs | 62 +++++++++++----------- src/client_server/message.rs | 28 +++++----- src/client_server/presence.rs | 12 ++--- src/client_server/profile.rs | 40 +++++++------- src/client_server/push.rs | 4 +- src/client_server/read_marker.rs | 10 ++-- src/client_server/redact.rs | 4 +- src/client_server/room.rs | 56 ++++++++++---------- src/client_server/search.rs | 4 +- src/client_server/session.rs | 12 ++--- src/client_server/state.rs | 26 +++++----- src/client_server/sync.rs | 70 ++++++++++++------------- src/client_server/tag.rs | 16 +++--- src/client_server/to_device.rs | 12 ++--- src/client_server/typing.rs | 6 +-- src/ruma_wrapper.rs | 10 ++-- tests/sytest/sytest-whitelist | 2 + 23 files changed, 325 insertions(+), 307 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 66b4a62..09d9f18 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -489,8 +489,8 @@ pub fn change_password_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { @@ -504,8 +504,8 @@ pub fn change_password_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - device_id, + &sender_user, + sender_device, auth, &uiaainfo, &db.users, @@ -517,22 +517,22 @@ pub fn change_password_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } - db.users.set_password(&sender_id, &body.new_password)?; + db.users.set_password(&sender_user, &body.new_password)?; // TODO: Read logout_devices field when it's available and respect that, currently not supported in Ruma // See: https://github.com/ruma/ruma/issues/107 // Logout all devices except the current one for id in db .users - .all_device_ids(&sender_id) + .all_device_ids(&sender_user) .filter_map(|id| id.ok()) - .filter(|id| id != device_id) + .filter(|id| id != sender_device) { - db.users.remove_device(&sender_id, &id)?; + db.users.remove_device(&sender_user, &id)?; } Ok(change_password::Response.into()) @@ -548,9 +548,9 @@ pub fn change_password_route( get("/_matrix/client/r0/account/whoami", data = "") )] pub fn whoami_route(body: Ruma) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(whoami::Response { - user_id: sender_id.clone(), + user_id: sender_user.clone(), } .into()) } @@ -571,8 +571,8 @@ pub async fn deactivate_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { @@ -586,8 +586,8 @@ pub async fn deactivate_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - &device_id, + &sender_user, + &sender_device, auth, &uiaainfo, &db.users, @@ -599,15 +599,15 @@ pub async fn deactivate_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } // Leave all joined rooms and reject all invitations for room_id in db .rooms - .rooms_joined(&sender_id) - .chain(db.rooms.rooms_invited(&sender_id)) + .rooms_joined(&sender_user) + .chain(db.rooms.rooms_invited(&sender_user)) { let room_id = room_id?; let event = member::MemberEventContent { @@ -623,10 +623,10 @@ pub async fn deactivate_route( event_type: EventType::RoomMember, content: serde_json::to_value(event).expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_id.to_string()), + state_key: Some(sender_user.to_string()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -635,7 +635,7 @@ pub async fn deactivate_route( } // Remove devices and mark account as deactivated - db.users.deactivate_account(&sender_id)?; + db.users.deactivate_account(&sender_user)?; Ok(deactivate::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 5d9a925..6e02198 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -21,10 +21,10 @@ pub fn create_backup_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let version = db .key_backups - .create_backup(&sender_id, &body.algorithm, &db.globals)?; + .create_backup(&sender_user, &body.algorithm, &db.globals)?; Ok(create_backup::Response { version }.into()) } @@ -37,9 +37,9 @@ pub fn update_backup_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups - .update_backup(&sender_id, &body.version, &body.algorithm, &db.globals)?; + .update_backup(&sender_user, &body.version, &body.algorithm, &db.globals)?; Ok(update_backup::Response.into()) } @@ -52,11 +52,11 @@ pub fn get_latest_backup_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = db.key_backups - .get_latest_backup(&sender_id)? + .get_latest_backup(&sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Key backup does not exist.", @@ -64,8 +64,8 @@ pub fn get_latest_backup_route( Ok(get_latest_backup::Response { algorithm, - count: (db.key_backups.count_keys(sender_id, &version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &version)?, + count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), + etag: db.key_backups.get_etag(sender_user, &version)?, version, } .into()) @@ -79,10 +79,10 @@ pub fn get_backup_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db .key_backups - .get_backup(&sender_id, &body.version)? + .get_backup(&sender_user, &body.version)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Key backup does not exist.", @@ -90,8 +90,8 @@ pub fn get_backup_route( Ok(get_backup::Response { algorithm, - count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &body.version)?, + count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_user, &body.version)?, version: body.version.to_owned(), } .into()) @@ -105,9 +105,9 @@ pub fn delete_backup_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups.delete_backup(&sender_id, &body.version)?; + db.key_backups.delete_backup(&sender_user, &body.version)?; Ok(delete_backup::Response.into()) } @@ -121,12 +121,12 @@ pub fn add_backup_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for (room_id, room) in &body.rooms { for (session_id, key_data) in &room.sessions { db.key_backups.add_key( - &sender_id, + &sender_user, &body.version, &room_id, &session_id, @@ -137,8 +137,8 @@ pub fn add_backup_keys_route( } Ok(add_backup_keys::Response { - count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &body.version)?, + count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_user, &body.version)?, } .into()) } @@ -152,11 +152,11 @@ pub fn add_backup_key_sessions_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for (session_id, key_data) in &body.sessions { db.key_backups.add_key( - &sender_id, + &sender_user, &body.version, &body.room_id, &session_id, @@ -166,8 +166,8 @@ pub fn add_backup_key_sessions_route( } Ok(add_backup_key_sessions::Response { - count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &body.version)?, + count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_user, &body.version)?, } .into()) } @@ -181,10 +181,10 @@ pub fn add_backup_key_session_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.add_key( - &sender_id, + &sender_user, &body.version, &body.room_id, &body.session_id, @@ -193,8 +193,8 @@ pub fn add_backup_key_session_route( )?; Ok(add_backup_key_session::Response { - count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &body.version)?, + count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_user, &body.version)?, } .into()) } @@ -207,9 +207,9 @@ pub fn get_backup_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let rooms = db.key_backups.get_all(&sender_id, &body.version)?; + let rooms = db.key_backups.get_all(&sender_user, &body.version)?; Ok(get_backup_keys::Response { rooms }.into()) } @@ -222,11 +222,11 @@ pub fn get_backup_key_sessions_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sessions = db .key_backups - .get_room(&sender_id, &body.version, &body.room_id); + .get_room(&sender_user, &body.version, &body.room_id); Ok(get_backup_key_sessions::Response { sessions }.into()) } @@ -239,11 +239,11 @@ pub fn get_backup_key_session_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let key_data = db.key_backups - .get_session(&sender_id, &body.version, &body.room_id, &body.session_id)?; + .get_session(&sender_user, &body.version, &body.room_id, &body.session_id)?; Ok(get_backup_key_session::Response { key_data }.into()) } @@ -256,13 +256,14 @@ pub fn delete_backup_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups.delete_all_keys(&sender_id, &body.version)?; + db.key_backups + .delete_all_keys(&sender_user, &body.version)?; Ok(delete_backup_keys::Response { - count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &body.version)?, + count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_user, &body.version)?, } .into()) } @@ -275,14 +276,14 @@ pub fn delete_backup_key_sessions_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups - .delete_room_keys(&sender_id, &body.version, &body.room_id)?; + .delete_room_keys(&sender_user, &body.version, &body.room_id)?; Ok(delete_backup_key_sessions::Response { - count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &body.version)?, + count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_user, &body.version)?, } .into()) } @@ -295,14 +296,14 @@ pub fn delete_backup_key_session_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups - .delete_room_key(&sender_id, &body.version, &body.room_id, &body.session_id)?; + .delete_room_key(&sender_user, &body.version, &body.room_id, &body.session_id)?; Ok(delete_backup_key_session::Response { - count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_id, &body.version)?, + count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(sender_user, &body.version)?, } .into()) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 515ad16..adff05a 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -20,7 +20,7 @@ pub fn set_global_account_data_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let content = serde_json::from_str::(body.data.get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; @@ -29,7 +29,7 @@ pub fn set_global_account_data_route( db.account_data.update( None, - sender_id, + sender_user, event_type.clone().into(), &BasicEvent { content: CustomEventContent { @@ -51,11 +51,11 @@ pub fn get_global_account_data_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let data = db .account_data - .get::>(None, sender_id, body.event_type.clone().into())? + .get::>(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; Ok(get_global_account_data::Response { account_data: data }.into()) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 4c9be20..a1b848a 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -14,9 +14,9 @@ pub fn get_context_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_id, &body.room_id)? { + if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -39,7 +39,7 @@ pub fn get_context_route( let events_before = db .rooms - .pdus_until(&sender_id, &body.room_id, base_token) + .pdus_until(&sender_user, &body.room_id, base_token) .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -61,7 +61,7 @@ pub fn get_context_route( let events_after = db .rooms - .pdus_after(&sender_id, &body.room_id, base_token) + .pdus_after(&sender_user, &body.room_id, base_token) .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 6352d0d..233d233 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -20,11 +20,11 @@ pub fn get_devices_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let devices = db .users - .all_devices_metadata(sender_id) + .all_devices_metadata(sender_user) .filter_map(|r| r.ok()) // Filter out buggy devices .collect::>(); @@ -33,18 +33,17 @@ pub fn get_devices_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/devices/<_device_id>", data = "") + get("/_matrix/client/r0/devices/<_>", data = "") )] pub fn get_device_route( db: State<'_, Database>, body: Ruma>, - _device_id: String, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device = db .users - .get_device_metadata(&sender_id, &body.body.device_id)? + .get_device_metadata(&sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; Ok(get_device::Response { device }.into()) @@ -52,39 +51,37 @@ pub fn get_device_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/devices/<_device_id>", data = "") + put("/_matrix/client/r0/devices/<_>", data = "") )] pub fn update_device_route( db: State<'_, Database>, body: Ruma>, - _device_id: String, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device = db .users - .get_device_metadata(&sender_id, &body.body.device_id)? + .get_device_metadata(&sender_user, &body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; device.display_name = body.display_name.clone(); db.users - .update_device_metadata(&sender_id, &body.body.device_id, &device)?; + .update_device_metadata(&sender_user, &body.device_id, &device)?; Ok(update_device::Response.into()) } #[cfg_attr( feature = "conduit_bin", - delete("/_matrix/client/r0/devices/<_device_id>", data = "") + delete("/_matrix/client/r0/devices/<_>", data = "") )] pub fn delete_device_route( db: State<'_, Database>, body: Ruma>, - _device_id: String, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); // UIAA let mut uiaainfo = UiaaInfo { @@ -99,8 +96,8 @@ pub fn delete_device_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - &device_id, + &sender_user, + &sender_device, auth, &uiaainfo, &db.users, @@ -112,11 +109,11 @@ pub fn delete_device_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } - db.users.remove_device(&sender_id, &body.body.device_id)?; + db.users.remove_device(&sender_user, &body.device_id)?; Ok(delete_device::Response.into()) } @@ -129,8 +126,8 @@ pub fn delete_devices_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); // UIAA let mut uiaainfo = UiaaInfo { @@ -145,8 +142,8 @@ pub fn delete_devices_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - &device_id, + &sender_user, + &sender_device, auth, &uiaainfo, &db.users, @@ -158,12 +155,12 @@ pub fn delete_devices_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } for device_id in &body.devices { - db.users.remove_device(&sender_id, &device_id)? + db.users.remove_device(&sender_user, &device_id)? } Ok(delete_devices::Response.into()) diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 0e7b1ef..2af88cf 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -26,26 +26,40 @@ pub fn upload_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); if let Some(one_time_keys) = &body.one_time_keys { for (key_key, key_value) in one_time_keys { - db.users - .add_one_time_key(sender_id, device_id, key_key, key_value, &db.globals)?; + db.users.add_one_time_key( + sender_user, + sender_device, + key_key, + key_value, + &db.globals, + )?; } } if let Some(device_keys) = &body.device_keys { // This check is needed to assure that signatures are kept - if db.users.get_device_keys(sender_id, device_id)?.is_none() { - db.users - .add_device_keys(sender_id, device_id, device_keys, &db.rooms, &db.globals)?; + if db + .users + .get_device_keys(sender_user, sender_device)? + .is_none() + { + db.users.add_device_keys( + sender_user, + sender_device, + device_keys, + &db.rooms, + &db.globals, + )?; } } Ok(upload_keys::Response { - one_time_key_counts: db.users.count_one_time_keys(sender_id, device_id)?, + one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, } .into()) } @@ -58,7 +72,7 @@ pub fn get_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut master_keys = BTreeMap::new(); let mut self_signing_keys = BTreeMap::new(); @@ -107,14 +121,14 @@ pub fn get_keys_route( } } - if let Some(master_key) = db.users.get_master_key(user_id, sender_id)? { + if let Some(master_key) = db.users.get_master_key(user_id, sender_user)? { master_keys.insert(user_id.clone(), master_key); } - if let Some(self_signing_key) = db.users.get_self_signing_key(user_id, sender_id)? { + if let Some(self_signing_key) = db.users.get_self_signing_key(user_id, sender_user)? { self_signing_keys.insert(user_id.clone(), self_signing_key); } - if user_id == sender_id { - if let Some(user_signing_key) = db.users.get_user_signing_key(sender_id)? { + if user_id == sender_user { + if let Some(user_signing_key) = db.users.get_user_signing_key(sender_user)? { user_signing_keys.insert(user_id.clone(), user_signing_key); } } @@ -169,8 +183,8 @@ pub fn upload_signing_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); // UIAA let mut uiaainfo = UiaaInfo { @@ -185,8 +199,8 @@ pub fn upload_signing_keys_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_id, - &device_id, + &sender_user, + &sender_device, auth, &uiaainfo, &db.users, @@ -198,13 +212,13 @@ pub fn upload_signing_keys_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_id, &device_id, &uiaainfo)?; + db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } if let Some(master_key) = &body.master_key { db.users.add_cross_signing_keys( - sender_id, + sender_user, &master_key, &body.self_signing_key, &body.user_signing_key, @@ -224,7 +238,7 @@ pub fn upload_signatures_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for (user_id, signed_keys) in &body.signed_keys { for (key_id, signed_key) in signed_keys { @@ -234,7 +248,7 @@ pub fn upload_signatures_route( ErrorKind::InvalidParam, "Missing signatures field.", ))? - .get(sender_id.to_string()) + .get(sender_user.to_string()) .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Invalid user in signatures field.", @@ -263,7 +277,7 @@ pub fn upload_signatures_route( &user_id, &key_id, signature, - &sender_id, + &sender_user, &db.rooms, &db.globals, )?; @@ -282,14 +296,14 @@ pub fn get_key_changes_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device_list_updates = HashSet::new(); device_list_updates.extend( db.users .keys_changed( - &sender_id.to_string(), + &sender_user.to_string(), body.from .parse() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, @@ -302,7 +316,7 @@ pub fn get_key_changes_route( .filter_map(|r| r.ok()), ); - for room_id in db.rooms.rooms_joined(sender_id).filter_map(|r| r.ok()) { + for room_id in db.rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) { device_list_updates.extend( db.users .keys_changed( diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 06e5adf..f99ff56 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -40,7 +40,7 @@ pub async fn join_room_by_id_route( ) -> ConduitResult { join_room_by_id_helper( &db, - body.sender_id.as_ref(), + body.sender_user.as_ref(), &body.room_id, &[body.room_id.server_name().to_owned()], body.third_party_signed.as_ref(), @@ -68,7 +68,7 @@ pub async fn join_room_by_id_or_alias_route( Ok(join_room_by_id_or_alias::Response { room_id: join_room_by_id_helper( &db, - body.sender_id.as_ref(), + body.sender_user.as_ref(), &room_id, &servers, body.third_party_signed.as_ref(), @@ -88,14 +88,14 @@ pub async fn leave_room_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event = serde_json::from_value::>( db.rooms .room_state_get( &body.room_id, &EventType::RoomMember, - &sender_id.to_string(), + &sender_user.to_string(), )? .ok_or(Error::BadRequest( ErrorKind::BadState, @@ -114,10 +114,10 @@ pub async fn leave_room_route( event_type: EventType::RoomMember, content: serde_json::to_value(event).expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_id.to_string()), + state_key: Some(sender_user.to_string()), redacts: None, }, - &sender_id, + &sender_user, &body.room_id, &db.globals, &db.sending, @@ -135,7 +135,7 @@ pub async fn invite_user_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { db.rooms.build_and_append_pdu( @@ -153,7 +153,7 @@ pub async fn invite_user_route( state_key: Some(user_id.to_string()), redacts: None, }, - &sender_id, + &sender_user, &body.room_id, &db.globals, &db.sending, @@ -174,7 +174,7 @@ pub async fn kick_user_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event = serde_json::from_value::>( db.rooms @@ -204,7 +204,7 @@ pub async fn kick_user_route( state_key: Some(body.user_id.to_string()), redacts: None, }, - &sender_id, + &sender_user, &body.room_id, &db.globals, &db.sending, @@ -222,7 +222,7 @@ pub async fn ban_user_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: reason @@ -260,7 +260,7 @@ pub async fn ban_user_route( state_key: Some(body.user_id.to_string()), redacts: None, }, - &sender_id, + &sender_user, &body.room_id, &db.globals, &db.sending, @@ -278,7 +278,7 @@ pub async fn unban_user_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event = serde_json::from_value::>( db.rooms @@ -307,7 +307,7 @@ pub async fn unban_user_route( state_key: Some(body.user_id.to_string()), redacts: None, }, - &sender_id, + &sender_user, &body.room_id, &db.globals, &db.sending, @@ -325,9 +325,9 @@ pub fn forget_room_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.forget(&body.room_id, &sender_id)?; + db.rooms.forget(&body.room_id, &sender_user)?; Ok(forget_room::Response::new().into()) } @@ -340,12 +340,12 @@ pub fn joined_rooms_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(joined_rooms::Response { joined_rooms: db .rooms - .rooms_joined(&sender_id) + .rooms_joined(&sender_user) .filter_map(|r| r.ok()) .collect(), } @@ -360,9 +360,9 @@ pub fn get_member_events_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_id, &body.room_id)? { + if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -388,11 +388,11 @@ pub fn joined_members_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db .rooms - .is_joined(&sender_id, &body.room_id) + .is_joined(&sender_user, &body.room_id) .unwrap_or(false) { return Err(Error::BadRequest( @@ -420,12 +420,12 @@ pub fn joined_members_route( async fn join_room_by_id_helper( db: &Database, - sender_id: Option<&UserId>, + sender_user: Option<&UserId>, room_id: &RoomId, servers: &[Box], _third_party_signed: Option<&IncomingThirdPartySigned>, ) -> ConduitResult { - let sender_id = sender_id.expect("user is authenticated"); + let sender_user = sender_user.expect("user is authenticated"); // Ask a remote server if we don't have this room if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() { @@ -439,7 +439,7 @@ async fn join_room_by_id_helper( remote_server.clone(), federation::membership::create_join_event_template::v1::Request { room_id, - user_id: sender_id, + user_id: sender_user, ver: &[RoomVersionId::Version5, RoomVersionId::Version6], }, ) @@ -479,8 +479,8 @@ async fn join_room_by_id_helper( "content".to_owned(), serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, + displayname: db.users.displayname(&sender_user)?, + avatar_url: db.users.avatar_url(&sender_user)?, is_direct: None, third_party_invite: None, }) @@ -668,8 +668,8 @@ async fn join_room_by_id_helper( } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, + displayname: db.users.displayname(&sender_user)?, + avatar_url: db.users.avatar_url(&sender_user)?, is_direct: None, third_party_invite: None, }; @@ -679,10 +679,10 @@ async fn join_room_by_id_helper( event_type: EventType::RoomMember, content: serde_json::to_value(event).expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_id.to_string()), + state_key: Some(sender_user.to_string()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index c32bd68..9b038bf 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -21,13 +21,13 @@ pub async fn send_message_event_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); // Check if this is a new transaction id - if let Some(response) = db - .transaction_ids - .existing_txnid(sender_id, device_id, &body.txn_id)? + if let Some(response) = + db.transaction_ids + .existing_txnid(sender_user, sender_device, &body.txn_id)? { // The client might have sent a txnid of the /sendToDevice endpoint // This txnid has no response associated with it @@ -63,15 +63,19 @@ pub async fn send_message_event_route( state_key: None, redacts: None, }, - &sender_id, + &sender_user, &body.room_id, &db.globals, &db.sending, &db.account_data, )?; - db.transaction_ids - .add_txnid(sender_id, device_id, &body.txn_id, event_id.as_bytes())?; + db.transaction_ids.add_txnid( + sender_user, + sender_device, + &body.txn_id, + event_id.as_bytes(), + )?; Ok(send_message_event::Response::new(event_id).into()) } @@ -84,9 +88,9 @@ pub fn get_message_events_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_id, &body.room_id)? { + if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -111,7 +115,7 @@ pub fn get_message_events_route( get_message_events::Direction::Forward => { let events_after = db .rooms - .pdus_after(&sender_id, &body.room_id, from) + .pdus_after(&sender_user, &body.room_id, from) .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { @@ -141,7 +145,7 @@ pub fn get_message_events_route( get_message_events::Direction::Backward => { let events_before = db .rooms - .pdus_until(&sender_id, &body.room_id, from) + .pdus_until(&sender_user, &body.room_id, from) .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index d105eb6..c529932 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -14,19 +14,19 @@ pub fn set_presence_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for room_id in db.rooms.rooms_joined(&sender_id) { + for room_id in db.rooms.rooms_joined(&sender_user) { let room_id = room_id?; db.rooms.edus.update_presence( - &sender_id, + &sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_user)?, currently_active: None, - displayname: db.users.displayname(&sender_id)?, + displayname: db.users.displayname(&sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -35,7 +35,7 @@ pub fn set_presence_route( presence: body.presence, status_msg: body.status_msg.clone(), }, - sender: sender_id.clone(), + sender: sender_user.clone(), }, &db.globals, )?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 9c6bd51..d6b9212 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -23,13 +23,13 @@ pub async fn set_displayname_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users - .set_displayname(&sender_id, body.displayname.clone())?; + .set_displayname(&sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms - for room_id in db.rooms.rooms_joined(&sender_id) { + for room_id in db.rooms.rooms_joined(&sender_user) { let room_id = room_id?; db.rooms.build_and_append_pdu( PduBuilder { @@ -41,7 +41,7 @@ pub async fn set_displayname_route( .room_state_get( &room_id, &EventType::RoomMember, - &sender_id.to_string(), + &sender_user.to_string(), )? .ok_or_else(|| { Error::bad_database( @@ -57,10 +57,10 @@ pub async fn set_displayname_route( }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_id.to_string()), + state_key: Some(sender_user.to_string()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -69,13 +69,13 @@ pub async fn set_displayname_route( // Presence update db.rooms.edus.update_presence( - &sender_id, + &sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_user)?, currently_active: None, - displayname: db.users.displayname(&sender_id)?, + displayname: db.users.displayname(&sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -84,7 +84,7 @@ pub async fn set_displayname_route( presence: ruma::presence::PresenceState::Online, status_msg: None, }, - sender: sender_id.clone(), + sender: sender_user.clone(), }, &db.globals, )?; @@ -115,13 +115,13 @@ pub async fn set_avatar_url_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users - .set_avatar_url(&sender_id, body.avatar_url.clone())?; + .set_avatar_url(&sender_user, body.avatar_url.clone())?; // Send a new membership event and presence update into all joined rooms - for room_id in db.rooms.rooms_joined(&sender_id) { + for room_id in db.rooms.rooms_joined(&sender_user) { let room_id = room_id?; db.rooms.build_and_append_pdu( PduBuilder { @@ -133,7 +133,7 @@ pub async fn set_avatar_url_route( .room_state_get( &room_id, &EventType::RoomMember, - &sender_id.to_string(), + &sender_user.to_string(), )? .ok_or_else(|| { Error::bad_database( @@ -149,10 +149,10 @@ pub async fn set_avatar_url_route( }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_id.to_string()), + state_key: Some(sender_user.to_string()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -161,13 +161,13 @@ pub async fn set_avatar_url_route( // Presence update db.rooms.edus.update_presence( - &sender_id, + &sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_id)?, + avatar_url: db.users.avatar_url(&sender_user)?, currently_active: None, - displayname: db.users.displayname(&sender_id)?, + displayname: db.users.displayname(&sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -176,7 +176,7 @@ pub async fn set_avatar_url_route( presence: ruma::presence::PresenceState::Online, status_msg: None, }, - sender: sender_id.clone(), + sender: sender_user.clone(), }, &db.globals, )?; diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 906f4a7..568d30c 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -20,11 +20,11 @@ pub fn get_pushrules_all_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event = db .account_data - .get::(None, &sender_id, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 34d1ccc..77b4141 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -17,7 +17,7 @@ pub fn set_read_marker_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let fully_read_event = ruma::events::fully_read::FullyReadEvent { content: ruma::events::fully_read::FullyReadEventContent { @@ -27,7 +27,7 @@ pub fn set_read_marker_route( }; db.account_data.update( Some(&body.room_id), - &sender_id, + &sender_user, EventType::FullyRead, &fully_read_event, &db.globals, @@ -36,7 +36,7 @@ pub fn set_read_marker_route( if let Some(event) = &body.read_receipt { db.rooms.edus.private_read_set( &body.room_id, - &sender_id, + &sender_user, db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", @@ -46,7 +46,7 @@ pub fn set_read_marker_route( let mut user_receipts = BTreeMap::new(); user_receipts.insert( - sender_id.clone(), + sender_user.clone(), ruma::events::receipt::Receipt { ts: Some(SystemTime::now()), }, @@ -60,7 +60,7 @@ pub fn set_read_marker_route( ); db.rooms.edus.readreceipt_update( - &sender_id, + &sender_user, &body.room_id, AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( ruma::events::receipt::ReceiptEvent { diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index b13cd80..b4fc4bb 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -16,7 +16,7 @@ pub async fn redact_event_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = db.rooms.build_and_append_pdu( PduBuilder { @@ -29,7 +29,7 @@ pub async fn redact_event_route( state_key: None, redacts: Some(body.event_id.clone()), }, - &sender_id, + &sender_user, &body.room_id, &db.globals, &db.sending, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 744d949..92d8b8e 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -24,7 +24,7 @@ pub async fn create_room_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let room_id = RoomId::new(db.globals.server_name()); @@ -47,7 +47,7 @@ pub async fn create_room_route( } })?; - let mut content = ruma::events::room::create::CreateEventContent::new(sender_id.clone()); + let mut content = ruma::events::room::create::CreateEventContent::new(sender_user.clone()); content.federate = body.creation_content.federate; content.predecessor = body.creation_content.predecessor.clone(); content.room_version = RoomVersionId::Version6; @@ -61,7 +61,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -74,17 +74,17 @@ pub async fn create_room_route( event_type: EventType::RoomMember, content: serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, + displayname: db.users.displayname(&sender_user)?, + avatar_url: db.users.avatar_url(&sender_user)?, is_direct: Some(body.is_direct), third_party_invite: None, }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_id.to_string()), + state_key: Some(sender_user.to_string()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -93,7 +93,7 @@ pub async fn create_room_route( // 3. Power levels let mut users = BTreeMap::new(); - users.insert(sender_id.clone(), 100.into()); + users.insert(sender_user.clone(), 100.into()); for invite_ in &body.invite { users.insert(invite_.clone(), 100.into()); } @@ -127,7 +127,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -161,7 +161,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -180,7 +180,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -207,7 +207,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -228,7 +228,7 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( pdu_builder, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -251,7 +251,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -271,7 +271,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -296,7 +296,7 @@ pub async fn create_room_route( state_key: Some(user.to_string()), redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, @@ -324,9 +324,9 @@ pub fn get_room_event_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_id, &body.room_id)? { + if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -352,7 +352,7 @@ pub async fn upgrade_room_route( body: Ruma>, _room_id: String, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !matches!( body.new_version, @@ -381,7 +381,7 @@ pub async fn upgrade_room_route( state_key: Some("".to_owned()), redacts: None, }, - sender_id, + sender_user, &body.room_id, &db.globals, &db.sending, @@ -408,7 +408,7 @@ pub async fn upgrade_room_route( // Send a m.room.create event containing a predecessor field and the applicable room_version let mut create_event_content = - ruma::events::room::create::CreateEventContent::new(sender_id.clone()); + ruma::events::room::create::CreateEventContent::new(sender_user.clone()); create_event_content.federate = federate; create_event_content.room_version = body.new_version.clone(); create_event_content.predecessor = predecessor; @@ -422,7 +422,7 @@ pub async fn upgrade_room_route( state_key: Some("".to_owned()), redacts: None, }, - sender_id, + sender_user, &replacement_room, &db.globals, &db.sending, @@ -435,17 +435,17 @@ pub async fn upgrade_room_route( event_type: EventType::RoomMember, content: serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_id)?, - avatar_url: db.users.avatar_url(&sender_id)?, + displayname: db.users.displayname(&sender_user)?, + avatar_url: db.users.avatar_url(&sender_user)?, is_direct: None, third_party_invite: None, }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_id.to_string()), + state_key: Some(sender_user.to_string()), redacts: None, }, - sender_id, + sender_user, &replacement_room, &db.globals, &db.sending, @@ -480,7 +480,7 @@ pub async fn upgrade_room_route( state_key: Some("".to_owned()), redacts: None, }, - sender_id, + sender_user, &replacement_room, &db.globals, &db.sending, @@ -524,7 +524,7 @@ pub async fn upgrade_room_route( state_key: Some("".to_owned()), redacts: None, }, - sender_id, + sender_user, &body.room_id, &db.globals, &db.sending, diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 3b03e7a..6e2b7ff 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -15,7 +15,7 @@ pub fn search_events_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); let filter = search_criteria.filter.as_ref().unwrap(); @@ -24,7 +24,7 @@ pub fn search_events_route( let limit = filter.limit.map_or(10, |l| u64::from(l) as usize); - if !db.rooms.is_joined(sender_id, &room_id)? { + if !db.rooms.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 9cd051c..f10bf71 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -117,10 +117,10 @@ pub fn logout_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - db.users.remove_device(&sender_id, device_id)?; + db.users.remove_device(&sender_user, sender_device)?; Ok(logout::Response::new().into()) } @@ -142,11 +142,11 @@ pub fn logout_all_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for device_id in db.users.all_device_ids(sender_id) { + for device_id in db.users.all_device_ids(sender_user) { if let Ok(device_id) = device_id { - db.users.remove_device(&sender_id, &device_id)?; + db.users.remove_device(&sender_user, &device_id)?; } } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 0d46d18..90abac7 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -27,7 +27,7 @@ pub async fn send_state_event_for_key_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let content = serde_json::from_str::( body.json_body @@ -40,7 +40,7 @@ pub async fn send_state_event_for_key_route( Ok(send_state_event_for_key::Response::new( send_state_event_for_key_helper( &db, - sender_id, + sender_user, &body.content, content, &body.room_id, @@ -62,8 +62,8 @@ pub async fn send_state_event_for_empty_key_route( // This just calls send_state_event_for_key_route let Ruma { body, - sender_id, - device_id: _, + sender_user, + sender_device: _, json_body, } = body; @@ -78,7 +78,7 @@ pub async fn send_state_event_for_empty_key_route( Ok(send_state_event_for_empty_key::Response::new( send_state_event_for_key_helper( &db, - sender_id + sender_user .as_ref() .expect("no user for send state empty key rout"), &body.content, @@ -99,11 +99,11 @@ pub fn get_state_events_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_id, &body.room_id)? { + if !db.rooms.is_joined(sender_user, &body.room_id)? { if !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? @@ -144,11 +144,11 @@ pub fn get_state_events_for_key_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_id, &body.room_id)? { + if !db.rooms.is_joined(sender_user, &body.room_id)? { if !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? @@ -193,11 +193,11 @@ pub fn get_state_events_for_empty_key_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_id, &body.room_id)? { + if !db.rooms.is_joined(sender_user, &body.room_id)? { if !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? @@ -242,7 +242,7 @@ pub async fn send_state_event_for_key_helper( room_id: &RoomId, state_key: Option, ) -> Result { - let sender_id = sender; + let sender_user = sender; if let AnyStateEventContent::RoomCanonicalAlias(canonical_alias) = content { let mut aliases = canonical_alias.alt_aliases.clone(); @@ -276,7 +276,7 @@ pub async fn send_state_event_for_key_helper( state_key, redacts: None, }, - &sender_id, + &sender_user, &room_id, &db.globals, &db.sending, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 688d304..caab9ea 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -33,14 +33,14 @@ pub async fn sync_events_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); // TODO: match body.set_presence { - db.rooms.edus.ping_presence(&sender_id)?; + db.rooms.edus.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them - let watcher = db.watch(sender_id, device_id); + let watcher = db.watch(sender_user, sender_device); let next_batch = db.globals.current_count()?.to_string(); @@ -59,16 +59,16 @@ pub async fn sync_events_route( // Look for device list updates of this account device_list_updates.extend( db.users - .keys_changed(&sender_id.to_string(), since, None) + .keys_changed(&sender_user.to_string(), since, None) .filter_map(|r| r.ok()), ); - for room_id in db.rooms.rooms_joined(&sender_id) { + for room_id in db.rooms.rooms_joined(&sender_user) { let room_id = room_id?; let mut non_timeline_pdus = db .rooms - .pdus_since(&sender_id, &room_id, since)? + .pdus_since(&sender_user, &room_id, since)? .filter_map(|r| r.ok()); // Filter out buggy events // Take the last 10 events for the timeline @@ -85,7 +85,7 @@ pub async fn sync_events_route( || db .rooms .edus - .last_privateread_update(&sender_id, &room_id)? + .last_privateread_update(&sender_user, &room_id)? > since; // They /sync response doesn't always return all messages, so we say the output is @@ -110,7 +110,7 @@ pub async fn sync_events_route( // since and the current room state, meaning there should be no updates. // The inner Option is None when there is an event, but there is no state hash associated // with it. This can happen for the RoomCreate event, so all updates should arrive. - let first_pdu_after_since = db.rooms.pdus_after(sender_id, &room_id, since).next(); + let first_pdu_after_since = db.rooms.pdus_after(sender_user, &room_id, since).next(); let since_state_hash = first_pdu_after_since .as_ref() @@ -146,7 +146,7 @@ pub async fn sync_events_route( let since_sender_member = since_members.as_ref().map(|since_members| { since_members.as_ref().and_then(|members| { - members.get(sender_id.as_str()).and_then(|pdu| { + members.get(sender_user.as_str()).and_then(|pdu| { serde_json::from_value::>( pdu.content.clone(), ) @@ -198,7 +198,7 @@ pub async fn sync_events_route( match (since_membership, current_membership) { (MembershipState::Leave, MembershipState::Join) => { // A new user joined an encrypted room - if !share_encrypted_room(&db, &sender_id, &user_id, &room_id) { + if !share_encrypted_room(&db, &sender_user, &user_id, &room_id) { device_list_updates.insert(user_id); } } @@ -223,11 +223,11 @@ pub async fn sync_events_route( .filter_map(|user_id| Some(user_id.ok()?)) .filter(|user_id| { // Don't send key updates from the sender to the sender - sender_id != user_id + sender_user != user_id }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&db, sender_id, user_id, &room_id) + !share_encrypted_room(&db, sender_user, user_id, &room_id) }), ); } @@ -252,7 +252,7 @@ pub async fn sync_events_route( for hero in db .rooms - .all_pdus(&sender_id, &room_id)? + .all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) .map(|(_, pdu)| { @@ -287,7 +287,7 @@ pub async fn sync_events_route( // Filter for possible heroes .filter_map(|u| u) { - if heroes.contains(&hero) || hero == sender_id.as_str() { + if heroes.contains(&hero) || hero == sender_user.as_str() { continue; } @@ -305,10 +305,10 @@ pub async fn sync_events_route( }; let notification_count = if send_notification_counts { - if let Some(last_read) = db.rooms.edus.private_read_get(&room_id, &sender_id)? { + if let Some(last_read) = db.rooms.edus.private_read_get(&room_id, &sender_user)? { Some( (db.rooms - .pdus_since(&sender_id, &room_id, last_read)? + .pdus_since(&sender_user, &room_id, last_read)? .filter_map(|pdu| pdu.ok()) // Filter out buggy events .filter(|(_, pdu)| { matches!( @@ -360,7 +360,7 @@ pub async fn sync_events_route( account_data: sync_events::AccountData { events: db .account_data - .changes_since(Some(&room_id), &sender_id, since)? + .changes_since(Some(&room_id), &sender_user, since)? .into_iter() .filter_map(|(_, v)| { serde_json::from_str(v.json().get()) @@ -438,9 +438,9 @@ pub async fn sync_events_route( } let mut left_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_left(&sender_id) { + for room_id in db.rooms.rooms_left(&sender_user) { let room_id = room_id?; - let pdus = db.rooms.pdus_since(&sender_id, &room_id, since)?; + let pdus = db.rooms.pdus_since(&sender_user, &room_id, since)?; let room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events .map(|(_, pdu)| pdu.to_sync_room_event()) @@ -458,7 +458,7 @@ pub async fn sync_events_route( let since_member = db .rooms - .pdus_after(sender_id, &room_id, since) + .pdus_after(sender_user, &room_id, since) .next() .and_then(|pdu| pdu.ok()) .and_then(|pdu| { @@ -470,7 +470,7 @@ pub async fn sync_events_route( }) .and_then(|state_hash| { db.rooms - .state_get(&state_hash, &EventType::RoomMember, sender_id.as_str()) + .state_get(&state_hash, &EventType::RoomMember, sender_user.as_str()) .ok()? .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) .ok() @@ -495,12 +495,12 @@ pub async fn sync_events_route( .filter_map(|user_id| Some(user_id.ok()?)) .filter(|user_id| { // Don't send key updates from the sender to the sender - sender_id != user_id + sender_user != user_id }) .filter(|user_id| { // Only send if the sender doesn't share any encrypted room with the target // anymore - !share_encrypted_room(&db, sender_id, user_id, &room_id) + !share_encrypted_room(&db, sender_user, user_id, &room_id) }), ); } @@ -511,12 +511,12 @@ pub async fn sync_events_route( } let mut invited_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_invited(&sender_id) { + for room_id in db.rooms.rooms_invited(&sender_user) { let room_id = room_id?; let mut invited_since_last_sync = false; - for pdu in db.rooms.pdus_since(&sender_id, &room_id, since)? { + for pdu in db.rooms.pdus_since(&sender_user, &room_id, since)? { let (_, pdu) = pdu?; - if pdu.kind == EventType::RoomMember && pdu.state_key == Some(sender_id.to_string()) { + if pdu.kind == EventType::RoomMember && pdu.state_key == Some(sender_user.to_string()) { let content = serde_json::from_value::< Raw, >(pdu.content.clone()) @@ -554,7 +554,7 @@ pub async fn sync_events_route( for user_id in left_encrypted_users { let still_share_encrypted_room = db .rooms - .get_shared_rooms(vec![sender_id.clone(), user_id.clone()]) + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) .filter_map(|r| r.ok()) .filter_map(|other_room_id| { Some( @@ -574,7 +574,7 @@ pub async fn sync_events_route( // Remove all to-device events the device received *last time* db.users - .remove_to_device_events(sender_id, device_id, since)?; + .remove_to_device_events(sender_user, sender_device, since)?; let response = sync_events::Response { next_batch, @@ -592,7 +592,7 @@ pub async fn sync_events_route( account_data: sync_events::AccountData { events: db .account_data - .changes_since(None, &sender_id, since)? + .changes_since(None, &sender_user, since)? .into_iter() .filter_map(|(_, v)| { serde_json::from_str(v.json().get()) @@ -605,15 +605,15 @@ pub async fn sync_events_route( changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_id)? > since + device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_user)? > since || since == 0 { - db.users.count_one_time_keys(sender_id, device_id)? + db.users.count_one_time_keys(sender_user, sender_device)? } else { BTreeMap::new() }, to_device: sync_events::ToDevice { - events: db.users.get_to_device_events(sender_id, device_id)?, + events: db.users.get_to_device_events(sender_user, sender_device)?, }, }; @@ -644,12 +644,12 @@ pub async fn sync_events_route( fn share_encrypted_room( db: &Database, - sender_id: &UserId, + sender_user: &UserId, user_id: &UserId, ignore_room: &RoomId, ) -> bool { db.rooms - .get_shared_rooms(vec![sender_id.clone(), user_id.clone()]) + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) .filter_map(|r| r.ok()) .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index d04dd3a..c605313 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -17,11 +17,11 @@ pub fn update_tag_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db .account_data - .get::(Some(&body.room_id), sender_id, EventType::Tag)? + .get::(Some(&body.room_id), sender_user, EventType::Tag)? .unwrap_or_else(|| ruma::events::tag::TagEvent { content: ruma::events::tag::TagEventContent { tags: BTreeMap::new(), @@ -34,7 +34,7 @@ pub fn update_tag_route( db.account_data.update( Some(&body.room_id), - sender_id, + sender_user, EventType::Tag, &tags_event, &db.globals, @@ -51,11 +51,11 @@ pub fn delete_tag_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db .account_data - .get::(Some(&body.room_id), sender_id, EventType::Tag)? + .get::(Some(&body.room_id), sender_user, EventType::Tag)? .unwrap_or_else(|| ruma::events::tag::TagEvent { content: ruma::events::tag::TagEventContent { tags: BTreeMap::new(), @@ -65,7 +65,7 @@ pub fn delete_tag_route( db.account_data.update( Some(&body.room_id), - sender_id, + sender_user, EventType::Tag, &tags_event, &db.globals, @@ -82,12 +82,12 @@ pub fn get_tags_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_tags::Response { tags: db .account_data - .get::(Some(&body.room_id), sender_id, EventType::Tag)? + .get::(Some(&body.room_id), sender_user, EventType::Tag)? .unwrap_or_else(|| ruma::events::tag::TagEvent { content: ruma::events::tag::TagEventContent { tags: BTreeMap::new(), diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index e736388..6719dae 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -16,13 +16,13 @@ pub fn send_event_to_device_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); - let device_id = body.device_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); // Check if this is a new transaction id if db .transaction_ids - .existing_txnid(sender_id, device_id, &body.txn_id)? + .existing_txnid(sender_user, sender_device, &body.txn_id)? .is_some() { return Ok(send_event_to_device::Response.into()); @@ -33,7 +33,7 @@ pub fn send_event_to_device_route( match target_device_id_maybe { to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => { db.users.add_to_device_event( - sender_id, + sender_user, &target_user_id, &target_device_id, &body.event_type, @@ -47,7 +47,7 @@ pub fn send_event_to_device_route( to_device::DeviceIdOrAllDevices::AllDevices => { for target_device_id in db.users.all_device_ids(&target_user_id) { db.users.add_to_device_event( - sender_id, + sender_user, &target_user_id, &target_device_id?, &body.event_type, @@ -64,7 +64,7 @@ pub fn send_event_to_device_route( // Save transaction id with empty data db.transaction_ids - .add_txnid(sender_id, device_id, &body.txn_id, &[])?; + .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; Ok(send_event_to_device::Response.into()) } diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index b019769..e90746e 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -14,11 +14,11 @@ pub fn create_typing_event_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - let sender_id = body.sender_id.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let Typing::Yes(duration) = body.state { db.rooms.edus.typing_add( - &sender_id, + &sender_user, &body.room_id, duration.as_millis() as u64 + utils::millis_since_unix_epoch(), &db.globals, @@ -26,7 +26,7 @@ pub fn create_typing_event_route( } else { db.rooms .edus - .typing_remove(&sender_id, &body.room_id, &db.globals)?; + .typing_remove(&sender_user, &body.room_id, &db.globals)?; } Ok(create_typing_event::Response.into()) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 734d214..7722a42 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -26,8 +26,8 @@ use { /// first. pub struct Ruma { pub body: T::Incoming, - pub sender_id: Option, - pub device_id: Option>, + pub sender_user: Option, + pub sender_device: Option>, pub json_body: Option>, // This is None when body is not a valid string } @@ -61,7 +61,7 @@ where .await .expect("database was loaded"); - let (user_id, device_id) = if T::METADATA.requires_authentication { + let (sender_user, sender_device) = if T::METADATA.requires_authentication { // Get token from header or query value let token = match request .headers() @@ -102,8 +102,8 @@ where match ::Incoming::try_from(http_request) { Ok(t) => Success(Ruma { body: t, - sender_id: user_id, - device_id, + sender_user, + sender_device, // TODO: Can we avoid parsing it again? (We only need this for append_pdu) json_body: utils::string_from_bytes(&body) .ok() diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist index e1f4e5c..d3271dd 100644 --- a/tests/sytest/sytest-whitelist +++ b/tests/sytest/sytest-whitelist @@ -83,6 +83,7 @@ POST /register rejects registration of usernames with '|' POST /register rejects registration of usernames with '}' POST /register rejects registration of usernames with '£' POST /register rejects registration of usernames with 'é' +POST /register returns the same device_id as that in the request POST /rooms/:room_id/ban can ban a user POST /rooms/:room_id/invite can send an invite POST /rooms/:room_id/join can join a room @@ -116,6 +117,7 @@ Typing events appear in incremental sync Typing events appear in initial sync Uninvited users cannot join the room User appears in user directory +User directory correctly update on display name change User in dir while user still shares private rooms User in shared private room does appear in user directory User is offline if they set_presence=offline in their sync From 2d6b7750fd98fef867e84097445f74496d1b3b3f Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Sun, 18 Oct 2020 23:06:08 +0200 Subject: [PATCH 0324/1727] Fix order of docker command arguments and change repository link to... ...github.com repo, to relieve some pressure from git.koesters.xyz. --- Dockerfile | 4 ++-- README.md | 2 +- docker/README.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 3ed7371..a97f4cf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ FROM alpine:3.12 as builder # from the official git repository. Defaults to the git repo. ARG LOCAL=false # Specifies which revision/commit is build. Defaults to HEAD -ARG GIT_REF=HEAD +ARG GIT_REF=origin/master # Add 'edge'-repository to get Rust 1.45 RUN sed -i \ @@ -30,7 +30,7 @@ COPY . . RUN if [[ $LOCAL == "true" ]]; then \ cargo install --path . ; \ else \ - cargo install --git "https://git.koesters.xyz/timo/conduit.git" --rev ${GIT_REF}; \ + cargo install --git "https://github.com/timokoesters/conduit.git" --rev ${GIT_REF}; \ fi ########################## RUNTIME IMAGE ########################## diff --git a/README.md b/README.md index 6c9fa74..c839775 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ Pull and run the docker image with ``` bash docker pull matrixconduit/matrix-conduit:latest -docker run -d matrixconduit/matrix-conduit:latest -p 8448:8000 -v db:/srv/conduit/.local/share/conduit +docker run -d -p 8448:8000 -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest ``` Or build and run it with docker or docker-compose. [Read more](docker/README.md) diff --git a/docker/README.md b/docker/README.md index c569c5f..f90b9a4 100644 --- a/docker/README.md +++ b/docker/README.md @@ -40,7 +40,7 @@ which also will tag the resulting image as `matrixconduit/matrix-conduit:latest` After building the image you can simply run it with ``` bash -docker run -d matrixconduit/matrix-conduit:latest -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e ROCKET_SERVER_NAME="localhost:8000" +docker run -d -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e ROCKET_SERVER_NAME="localhost:8000" matrixconduit/matrix-conduit:latest ``` For detached mode, you also need to use the `-d` flag. You can pass in more env vars as are shown here, for an overview of possible values, you can take a look at the `docker-compose.yml` file. From 6bb8284fc01774c026485f25bf239efea42d2d42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 19 Oct 2020 15:29:36 +0200 Subject: [PATCH 0325/1727] improvement: correct thumbnailing algorithm --- src/client_server/membership.rs | 4 +- src/database/media.rs | 79 ++++++++++++++++++++++++++++++++- src/server_server.rs | 2 +- 3 files changed, 81 insertions(+), 4 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index f99ff56..5d028d9 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -561,10 +561,10 @@ async fn join_room_by_id_helper( .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created .map(|r| { let (event_id, value) = r?; - serde_json::from_value::(value) + serde_json::from_value::(value.clone()) .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { - warn!("{}", e); + warn!("{}: {}", value, e); Error::BadServerResponse("Invalid PDU bytes in send_join response.") }) }) diff --git a/src/database/media.rs b/src/database/media.rs index 869d5d8..3ecf4bd 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -1,3 +1,5 @@ +use image::{imageops::FilterType, GenericImageView}; + use crate::{utils, Error, Result}; use std::mem; @@ -99,8 +101,34 @@ impl Media { } } + /// Returns width, height of the thumbnail and whether it should be cropped. Returns None when + /// the server should send the original file. + pub fn thumbnail_properties(&self, width: u32, height: u32) -> Option<(u32, u32, bool)> { + match (width, height) { + (0..=32, 0..=32) => Some((32, 32, true)), + (0..=96, 0..=96) => Some((96, 96, true)), + (0..=320, 0..=240) => Some((320, 240, false)), + (0..=640, 0..=480) => Some((640, 480, false)), + (0..=800, 0..=600) => Some((800, 600, false)), + _ => None, + } + } + /// Downloads a file's thumbnail. + /// + /// Here's an example on how it works: + /// + /// - Client requests an image with width=567, height=567 + /// - Server rounds that up to (800, 600), so it doesn't have to save too many thumbnails + /// - Server rounds that up again to (958, 600) to fix the aspect ratio (only for width,height>96) + /// - Server creates the thumbnail and sends it to the user + /// + /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. pub fn get_thumbnail(&self, mxc: String, width: u32, height: u32) -> Result> { + let (width, height, crop) = self + .thumbnail_properties(width, height) + .unwrap_or((0, 0, false)); // 0, 0 because that's the original file + let mut main_prefix = mxc.as_bytes().to_vec(); main_prefix.push(0xff); @@ -146,6 +174,7 @@ impl Media { })) } else if let Some(r) = self.mediaid_file.scan_prefix(&original_prefix).next() { // Generate a thumbnail + let (key, file) = r?; let mut parts = key.rsplit(|&b| b == 0xff); @@ -169,7 +198,55 @@ impl Media { }; if let Ok(image) = image::load_from_memory(&file) { - let thumbnail = image.thumbnail(width, height); + let original_width = image.width(); + let original_height = image.height(); + if width > original_width || height > original_height { + return Ok(Some(FileMeta { + filename, + content_type, + file: file.to_vec(), + })); + } + + let thumbnail = if crop { + image.resize_to_fill(width, height, FilterType::Triangle) + } else { + let (exact_width, exact_height) = { + // Copied from image::dynimage::resize_dimensions + let ratio = u64::from(original_width) * u64::from(height); + let nratio = u64::from(width) * u64::from(original_height); + + let use_width = nratio > ratio; + let intermediate = if use_width { + u64::from(original_height) * u64::from(width) / u64::from(width) + } else { + u64::from(original_width) * u64::from(height) + / u64::from(original_height) + }; + if use_width { + if intermediate <= u64::from(::std::u32::MAX) { + (width, intermediate as u32) + } else { + ( + (u64::from(width) * u64::from(::std::u32::MAX) / intermediate) + as u32, + ::std::u32::MAX, + ) + } + } else if intermediate <= u64::from(::std::u32::MAX) { + (intermediate as u32, height) + } else { + ( + ::std::u32::MAX, + (u64::from(height) * u64::from(::std::u32::MAX) / intermediate) + as u32, + ) + } + }; + + image.thumbnail_exact(exact_width, exact_height) + }; + let mut thumbnail_bytes = Vec::new(); thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; diff --git a/src/server_server.rs b/src/server_server.rs index 3fefbd5..184f333 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -393,6 +393,7 @@ pub fn send_transaction_message_route<'a>( let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + db.rooms.append_to_state(&pdu_id, &pdu)?; db.rooms.append_pdu( &pdu, &value, @@ -402,7 +403,6 @@ pub fn send_transaction_message_route<'a>( &db.account_data, &db.sending, )?; - db.rooms.append_to_state(&pdu_id, &pdu)?; } } Ok(send_transaction_message::v1::Response { From 6dbe1956954ce611dd4ba2213a2df56ee0454910 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Oct 2020 21:28:02 +0200 Subject: [PATCH 0326/1727] improvement: flush after every request that manipulates the db --- src/client_server/account.rs | 12 +++++-- src/client_server/alias.rs | 8 +++-- src/client_server/backup.rs | 46 ++++++++++++++++-------- src/client_server/capabilities.rs | 2 +- src/client_server/config.rs | 8 +++-- src/client_server/context.rs | 2 +- src/client_server/device.rs | 16 ++++++--- src/client_server/directory.rs | 2 ++ src/client_server/filter.rs | 4 +-- src/client_server/keys.rs | 20 +++++++---- src/client_server/media.rs | 6 ++-- src/client_server/membership.rs | 42 ++++++++++++++-------- src/client_server/message.rs | 4 ++- src/client_server/mod.rs | 2 +- src/client_server/presence.rs | 4 ++- src/client_server/profile.rs | 10 ++++-- src/client_server/push.rs | 21 ++++++++--- src/client_server/read_marker.rs | 5 ++- src/client_server/redact.rs | 2 ++ src/client_server/room.rs | 6 +++- src/client_server/search.rs | 2 +- src/client_server/session.rs | 14 +++++--- src/client_server/state.rs | 56 +++++++++++++++-------------- src/client_server/tag.rs | 10 ++++-- src/client_server/thirdparty.rs | 2 +- src/client_server/to_device.rs | 4 ++- src/client_server/unversioned.rs | 2 +- src/client_server/user_directory.rs | 2 +- src/client_server/voip.rs | 2 +- src/database.rs | 5 +++ 30 files changed, 216 insertions(+), 105 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 09d9f18..74f862c 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -36,7 +36,7 @@ const GUEST_NAME_LENGTH: usize = 10; feature = "conduit_bin", get("/_matrix/client/r0/register/available", data = "") )] -pub fn get_register_available_route( +pub async fn get_register_available_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -466,6 +466,8 @@ pub async fn register_route( )?; } + db.flush().await?; + Ok(register::Response { access_token: Some(token), user_id, @@ -485,7 +487,7 @@ pub async fn register_route( feature = "conduit_bin", post("/_matrix/client/r0/account/password", data = "") )] -pub fn change_password_route( +pub async fn change_password_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -535,6 +537,8 @@ pub fn change_password_route( db.users.remove_device(&sender_user, &id)?; } + db.flush().await?; + Ok(change_password::Response.into()) } @@ -547,7 +551,7 @@ pub fn change_password_route( feature = "conduit_bin", get("/_matrix/client/r0/account/whoami", data = "") )] -pub fn whoami_route(body: Ruma) -> ConduitResult { +pub async fn whoami_route(body: Ruma) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(whoami::Response { user_id: sender_user.clone(), @@ -637,6 +641,8 @@ pub async fn deactivate_route( // Remove devices and mark account as deactivated db.users.deactivate_account(&sender_user)?; + db.flush().await?; + Ok(deactivate::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, } diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index c2c3eb9..094e70a 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -18,7 +18,7 @@ use rocket::{delete, get, put}; feature = "conduit_bin", put("/_matrix/client/r0/directory/room/<_>", data = "") )] -pub fn create_alias_route( +pub async fn create_alias_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -29,6 +29,8 @@ pub fn create_alias_route( db.rooms .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?; + db.flush().await?; + Ok(create_alias::Response::new().into()) } @@ -36,12 +38,14 @@ pub fn create_alias_route( feature = "conduit_bin", delete("/_matrix/client/r0/directory/room/<_>", data = "") )] -pub fn delete_alias_route( +pub async fn delete_alias_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { db.rooms.set_alias(&body.room_alias, None, &db.globals)?; + db.flush().await?; + Ok(delete_alias::Response::new().into()) } diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 6e02198..c84af0a 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -17,7 +17,7 @@ use rocket::{delete, get, post, put}; feature = "conduit_bin", post("/_matrix/client/unstable/room_keys/version", data = "") )] -pub fn create_backup_route( +pub async fn create_backup_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -26,6 +26,8 @@ pub fn create_backup_route( .key_backups .create_backup(&sender_user, &body.algorithm, &db.globals)?; + db.flush().await?; + Ok(create_backup::Response { version }.into()) } @@ -33,7 +35,7 @@ pub fn create_backup_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] -pub fn update_backup_route( +pub async fn update_backup_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -41,6 +43,8 @@ pub fn update_backup_route( db.key_backups .update_backup(&sender_user, &body.version, &body.algorithm, &db.globals)?; + db.flush().await?; + Ok(update_backup::Response.into()) } @@ -48,7 +52,7 @@ pub fn update_backup_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/version", data = "") )] -pub fn get_latest_backup_route( +pub async fn get_latest_backup_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -75,7 +79,7 @@ pub fn get_latest_backup_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] -pub fn get_backup_route( +pub async fn get_backup_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -101,7 +105,7 @@ pub fn get_backup_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] -pub fn delete_backup_route( +pub async fn delete_backup_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -109,6 +113,8 @@ pub fn delete_backup_route( db.key_backups.delete_backup(&sender_user, &body.version)?; + db.flush().await?; + Ok(delete_backup::Response.into()) } @@ -117,7 +123,7 @@ pub fn delete_backup_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys", data = "") )] -pub fn add_backup_keys_route( +pub async fn add_backup_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -136,6 +142,8 @@ pub fn add_backup_keys_route( } } + db.flush().await?; + Ok(add_backup_keys::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, @@ -148,7 +156,7 @@ pub fn add_backup_keys_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys/<_>", data = "") )] -pub fn add_backup_key_sessions_route( +pub async fn add_backup_key_sessions_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -165,6 +173,8 @@ pub fn add_backup_key_sessions_route( )? } + db.flush().await?; + Ok(add_backup_key_sessions::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, @@ -177,7 +187,7 @@ pub fn add_backup_key_sessions_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") )] -pub fn add_backup_key_session_route( +pub async fn add_backup_key_session_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -192,6 +202,8 @@ pub fn add_backup_key_session_route( &db.globals, )?; + db.flush().await?; + Ok(add_backup_key_session::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, @@ -203,7 +215,7 @@ pub fn add_backup_key_session_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys", data = "") )] -pub fn get_backup_keys_route( +pub async fn get_backup_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -218,7 +230,7 @@ pub fn get_backup_keys_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys/<_>", data = "") )] -pub fn get_backup_key_sessions_route( +pub async fn get_backup_key_sessions_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -235,7 +247,7 @@ pub fn get_backup_key_sessions_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") )] -pub fn get_backup_key_session_route( +pub async fn get_backup_key_session_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -252,7 +264,7 @@ pub fn get_backup_key_session_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys", data = "") )] -pub fn delete_backup_keys_route( +pub async fn delete_backup_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -261,6 +273,8 @@ pub fn delete_backup_keys_route( db.key_backups .delete_all_keys(&sender_user, &body.version)?; + db.flush().await?; + Ok(delete_backup_keys::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, @@ -272,7 +286,7 @@ pub fn delete_backup_keys_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "") )] -pub fn delete_backup_key_sessions_route( +pub async fn delete_backup_key_sessions_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -281,6 +295,8 @@ pub fn delete_backup_key_sessions_route( db.key_backups .delete_room_keys(&sender_user, &body.version, &body.room_id)?; + db.flush().await?; + Ok(delete_backup_key_sessions::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, @@ -292,7 +308,7 @@ pub fn delete_backup_key_sessions_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") )] -pub fn delete_backup_key_session_route( +pub async fn delete_backup_key_session_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -301,6 +317,8 @@ pub fn delete_backup_key_session_route( db.key_backups .delete_room_key(&sender_user, &body.version, &body.room_id, &body.session_id)?; + db.flush().await?; + Ok(delete_backup_key_session::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index ddf90f8..54c08ba 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -9,7 +9,7 @@ use rocket::get; /// /// Get information on this server's supported feature set and other relevent capabilities. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/capabilities"))] -pub fn get_capabilities_route() -> ConduitResult { +pub async fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); available.insert( RoomVersionId::Version5, diff --git a/src/client_server/config.rs b/src/client_server/config.rs index adff05a..dd8de64 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -16,7 +16,7 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") )] -pub fn set_global_account_data_route( +pub async fn set_global_account_data_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -40,6 +40,8 @@ pub fn set_global_account_data_route( &db.globals, )?; + db.flush().await?; + Ok(set_global_account_data::Response.into()) } @@ -47,7 +49,7 @@ pub fn set_global_account_data_route( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") )] -pub fn get_global_account_data_route( +pub async fn get_global_account_data_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -58,5 +60,7 @@ pub fn get_global_account_data_route( .get::>(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + db.flush().await?; + Ok(get_global_account_data::Response { account_data: data }.into()) } diff --git a/src/client_server/context.rs b/src/client_server/context.rs index a1b848a..f2a8cd4 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -10,7 +10,7 @@ use rocket::get; feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") )] -pub fn get_context_route( +pub async fn get_context_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 233d233..86ac511 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -16,7 +16,7 @@ use rocket::{delete, get, post, put}; feature = "conduit_bin", get("/_matrix/client/r0/devices", data = "") )] -pub fn get_devices_route( +pub async fn get_devices_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -35,7 +35,7 @@ pub fn get_devices_route( feature = "conduit_bin", get("/_matrix/client/r0/devices/<_>", data = "") )] -pub fn get_device_route( +pub async fn get_device_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -53,7 +53,7 @@ pub fn get_device_route( feature = "conduit_bin", put("/_matrix/client/r0/devices/<_>", data = "") )] -pub fn update_device_route( +pub async fn update_device_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -69,6 +69,8 @@ pub fn update_device_route( db.users .update_device_metadata(&sender_user, &body.device_id, &device)?; + db.flush().await?; + Ok(update_device::Response.into()) } @@ -76,7 +78,7 @@ pub fn update_device_route( feature = "conduit_bin", delete("/_matrix/client/r0/devices/<_>", data = "") )] -pub fn delete_device_route( +pub async fn delete_device_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -115,6 +117,8 @@ pub fn delete_device_route( db.users.remove_device(&sender_user, &body.device_id)?; + db.flush().await?; + Ok(delete_device::Response.into()) } @@ -122,7 +126,7 @@ pub fn delete_device_route( feature = "conduit_bin", post("/_matrix/client/r0/delete_devices", data = "") )] -pub fn delete_devices_route( +pub async fn delete_devices_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -163,5 +167,7 @@ pub fn delete_devices_route( db.users.remove_device(&sender_user, &device_id)? } + db.flush().await?; + Ok(delete_devices::Response.into()) } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index c82a15f..202417e 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -87,6 +87,8 @@ pub async fn set_room_visibility_route( room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, } + db.flush().await?; + Ok(set_room_visibility::Response.into()) } diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 4b1c3a0..b6dc583 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -5,7 +5,7 @@ use ruma::api::client::r0::filter::{self, create_filter, get_filter}; use rocket::{get, post}; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] -pub fn get_filter_route() -> ConduitResult { +pub async fn get_filter_route() -> ConduitResult { // TODO Ok(get_filter::Response::new(filter::IncomingFilterDefinition { event_fields: None, @@ -18,7 +18,7 @@ pub fn get_filter_route() -> ConduitResult { } #[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] -pub fn create_filter_route() -> ConduitResult { +pub async fn create_filter_route() -> ConduitResult { // TODO Ok(create_filter::Response::new(utils::random_string(10)).into()) } diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 2af88cf..58c79da 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -22,7 +22,7 @@ use rocket::{get, post}; feature = "conduit_bin", post("/_matrix/client/r0/keys/upload", data = "") )] -pub fn upload_keys_route( +pub async fn upload_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -58,6 +58,8 @@ pub fn upload_keys_route( } } + db.flush().await?; + Ok(upload_keys::Response { one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, } @@ -68,7 +70,7 @@ pub fn upload_keys_route( feature = "conduit_bin", post("/_matrix/client/r0/keys/query", data = "") )] -pub fn get_keys_route( +pub async fn get_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -148,7 +150,7 @@ pub fn get_keys_route( feature = "conduit_bin", post("/_matrix/client/r0/keys/claim", data = "") )] -pub fn claim_keys_route( +pub async fn claim_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -168,6 +170,8 @@ pub fn claim_keys_route( one_time_keys.insert(user_id.clone(), container); } + db.flush().await?; + Ok(claim_keys::Response { failures: BTreeMap::new(), one_time_keys, @@ -179,7 +183,7 @@ pub fn claim_keys_route( feature = "conduit_bin", post("/_matrix/client/unstable/keys/device_signing/upload", data = "") )] -pub fn upload_signing_keys_route( +pub async fn upload_signing_keys_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -227,6 +231,8 @@ pub fn upload_signing_keys_route( )?; } + db.flush().await?; + Ok(upload_signing_keys::Response.into()) } @@ -234,7 +240,7 @@ pub fn upload_signing_keys_route( feature = "conduit_bin", post("/_matrix/client/unstable/keys/signatures/upload", data = "") )] -pub fn upload_signatures_route( +pub async fn upload_signatures_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -285,6 +291,8 @@ pub fn upload_signatures_route( } } + db.flush().await?; + Ok(upload_signatures::Response.into()) } @@ -292,7 +300,7 @@ pub fn upload_signatures_route( feature = "conduit_bin", get("/_matrix/client/r0/keys/changes", data = "") )] -pub fn get_key_changes_route( +pub async fn get_key_changes_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 551546b..96874cc 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -14,7 +14,7 @@ use std::convert::TryInto; const MXC_LENGTH: usize = 32; #[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] -pub fn get_media_config_route( +pub async fn get_media_config_route( db: State<'_, Database>, ) -> ConduitResult { Ok(get_media_config::Response { @@ -27,7 +27,7 @@ pub fn get_media_config_route( feature = "conduit_bin", post("/_matrix/media/r0/upload", data = "") )] -pub fn create_content_route( +pub async fn create_content_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -43,6 +43,8 @@ pub fn create_content_route( &body.file, )?; + db.flush().await?; + Ok(create_content::Response { content_uri: mxc }.into()) } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 5d028d9..3380601 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -65,17 +65,19 @@ pub async fn join_room_by_id_or_alias_route( } }; + let join_room_response = join_room_by_id_helper( + &db, + body.sender_user.as_ref(), + &room_id, + &servers, + body.third_party_signed.as_ref(), + ) + .await?; + + db.flush().await?; + Ok(join_room_by_id_or_alias::Response { - room_id: join_room_by_id_helper( - &db, - body.sender_user.as_ref(), - &room_id, - &servers, - body.third_party_signed.as_ref(), - ) - .await? - .0 - .room_id, + room_id: join_room_response.0.room_id, } .into()) } @@ -124,6 +126,8 @@ pub async fn leave_room_route( &db.account_data, )?; + db.flush().await?; + Ok(leave_room::Response::new().into()) } @@ -160,6 +164,8 @@ pub async fn invite_user_route( &db.account_data, )?; + db.flush().await?; + Ok(invite_user::Response.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) @@ -211,6 +217,8 @@ pub async fn kick_user_route( &db.account_data, )?; + db.flush().await?; + Ok(kick_user::Response::new().into()) } @@ -267,6 +275,8 @@ pub async fn ban_user_route( &db.account_data, )?; + db.flush().await?; + Ok(ban_user::Response::new().into()) } @@ -314,6 +324,8 @@ pub async fn unban_user_route( &db.account_data, )?; + db.flush().await?; + Ok(unban_user::Response::new().into()) } @@ -321,7 +333,7 @@ pub async fn unban_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/forget", data = "") )] -pub fn forget_room_route( +pub async fn forget_room_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -329,6 +341,8 @@ pub fn forget_room_route( db.rooms.forget(&body.room_id, &sender_user)?; + db.flush().await?; + Ok(forget_room::Response::new().into()) } @@ -336,7 +350,7 @@ pub fn forget_room_route( feature = "conduit_bin", get("/_matrix/client/r0/joined_rooms", data = "") )] -pub fn joined_rooms_route( +pub async fn joined_rooms_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -356,7 +370,7 @@ pub fn joined_rooms_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/members", data = "") )] -pub fn get_member_events_route( +pub async fn get_member_events_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -384,7 +398,7 @@ pub fn get_member_events_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") )] -pub fn joined_members_route( +pub async fn joined_members_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 9b038bf..f9c8ba1 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -77,6 +77,8 @@ pub async fn send_message_event_route( event_id.as_bytes(), )?; + db.flush().await?; + Ok(send_message_event::Response::new(event_id).into()) } @@ -84,7 +86,7 @@ pub async fn send_message_event_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/messages", data = "") )] -pub fn get_message_events_route( +pub async fn get_message_events_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index e5a36f3..672957b 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -75,6 +75,6 @@ const SESSION_ID_LENGTH: usize = 256; #[cfg(feature = "conduit_bin")] #[options("/<_..>")] -pub fn options_route() -> ConduitResult { +pub async fn options_route() -> ConduitResult { Ok(send_event_to_device::Response.into()) } diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index c529932..e597c69 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -10,7 +10,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/presence/<_>/status", data = "") )] -pub fn set_presence_route( +pub async fn set_presence_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -41,5 +41,7 @@ pub fn set_presence_route( )?; } + db.flush().await?; + Ok(set_presence::Response.into()) } diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index d6b9212..d754ace 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -90,6 +90,8 @@ pub async fn set_displayname_route( )?; } + db.flush().await?; + Ok(set_display_name::Response.into()) } @@ -97,7 +99,7 @@ pub async fn set_displayname_route( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>/displayname", data = "") )] -pub fn get_displayname_route( +pub async fn get_displayname_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -182,6 +184,8 @@ pub async fn set_avatar_url_route( )?; } + db.flush().await?; + Ok(set_avatar_url::Response.into()) } @@ -189,7 +193,7 @@ pub async fn set_avatar_url_route( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") )] -pub fn get_avatar_url_route( +pub async fn get_avatar_url_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -203,7 +207,7 @@ pub fn get_avatar_url_route( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>", data = "") )] -pub fn get_profile_route( +pub async fn get_profile_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 568d30c..05ba8d0 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -16,7 +16,7 @@ use rocket::{get, post, put}; feature = "conduit_bin", get("/_matrix/client/r0/pushrules", data = "") )] -pub fn get_pushrules_all_route( +pub async fn get_pushrules_all_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -40,11 +40,15 @@ pub fn get_pushrules_all_route( "/_matrix/client/r0/pushrules/<_>/<_>/<_>", //data = "" ))] -pub fn set_pushrule_route(//db: State<'_, Database>, +pub async fn set_pushrule_route( + db: State<'_, Database>, //body: Ruma, ) -> ConduitResult { // TODO warn!("TODO: set_pushrule_route"); + + db.flush().await?; + Ok(set_pushrule::Response.into()) } @@ -52,14 +56,19 @@ pub fn set_pushrule_route(//db: State<'_, Database>, feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled") )] -pub fn set_pushrule_enabled_route() -> ConduitResult { +pub async fn set_pushrule_enabled_route( + db: State<'_, Database>, +) -> ConduitResult { // TODO warn!("TODO: set_pushrule_enabled_route"); + + db.flush().await?; + Ok(set_pushrule_enabled::Response.into()) } #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] -pub fn get_pushers_route() -> ConduitResult { +pub async fn get_pushers_route() -> ConduitResult { Ok(get_pushers::Response { pushers: Vec::new(), } @@ -67,7 +76,9 @@ pub fn get_pushers_route() -> ConduitResult { } #[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/pushers/set"))] -pub fn set_pushers_route() -> ConduitResult { +pub async fn set_pushers_route(db: State<'_, Database>) -> ConduitResult { + db.flush().await?; + Ok(get_pushers::Response { pushers: Vec::new(), } diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 77b4141..f3e7211 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -13,7 +13,7 @@ use std::{collections::BTreeMap, time::SystemTime}; feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") )] -pub fn set_read_marker_route( +pub async fn set_read_marker_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -71,5 +71,8 @@ pub fn set_read_marker_route( &db.globals, )?; } + + db.flush().await?; + Ok(set_read_marker::Response.into()) } diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index b4fc4bb..486eb6c 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -36,5 +36,7 @@ pub async fn redact_event_route( &db.account_data, )?; + db.flush().await?; + Ok(redact_event::Response { event_id }.into()) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 92d8b8e..d1d051f 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -313,6 +313,8 @@ pub async fn create_room_route( db.rooms.set_public(&room_id, true)?; } + db.flush().await?; + Ok(create_room::Response::new(room_id).into()) } @@ -320,7 +322,7 @@ pub async fn create_room_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") )] -pub fn get_room_event_route( +pub async fn get_room_event_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -531,6 +533,8 @@ pub async fn upgrade_room_route( &db.account_data, )?; + db.flush().await?; + // Return the replacement room id Ok(upgrade_room::Response { replacement_room }.into()) } diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 6e2b7ff..0950b25 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -11,7 +11,7 @@ use std::collections::BTreeMap; feature = "conduit_bin", post("/_matrix/client/r0/search", data = "") )] -pub fn search_events_route( +pub async fn search_events_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/session.rs b/src/client_server/session.rs index f10bf71..c8775ef 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -16,7 +16,7 @@ use rocket::{get, post}; /// Get the homeserver's supported login types. One of these should be used as the `type` field /// when logging in. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] -pub fn get_login_types_route() -> ConduitResult { +pub async fn get_login_types_route() -> ConduitResult { Ok(get_login_types::Response::new(vec![get_login_types::LoginType::Password]).into()) } @@ -34,7 +34,7 @@ pub fn get_login_types_route() -> ConduitResult { feature = "conduit_bin", post("/_matrix/client/r0/login", data = "") )] -pub fn login_route( +pub async fn login_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -93,6 +93,8 @@ pub fn login_route( body.initial_device_display_name.clone(), )?; + db.flush().await?; + Ok(login::Response { user_id, access_token: token, @@ -113,7 +115,7 @@ pub fn login_route( feature = "conduit_bin", post("/_matrix/client/r0/logout", data = "") )] -pub fn logout_route( +pub async fn logout_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -122,6 +124,8 @@ pub fn logout_route( db.users.remove_device(&sender_user, sender_device)?; + db.flush().await?; + Ok(logout::Response::new().into()) } @@ -138,7 +142,7 @@ pub fn logout_route( feature = "conduit_bin", post("/_matrix/client/r0/logout/all", data = "") )] -pub fn logout_all_route( +pub async fn logout_all_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -150,5 +154,7 @@ pub fn logout_all_route( } } + db.flush().await?; + Ok(logout_all::Response::new().into()) } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 90abac7..eae96b5 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -37,18 +37,19 @@ pub async fn send_state_event_for_key_route( ) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - Ok(send_state_event_for_key::Response::new( - send_state_event_for_key_helper( - &db, - sender_user, - &body.content, - content, - &body.room_id, - Some(body.state_key.to_owned()), - ) - .await?, + let event_id = send_state_event_for_key_helper( + &db, + sender_user, + &body.content, + content, + &body.room_id, + Some(body.state_key.to_owned()), ) - .into()) + .await?; + + db.flush().await?; + + Ok(send_state_event_for_key::Response { event_id }.into()) } #[cfg_attr( @@ -75,27 +76,28 @@ pub async fn send_state_event_for_empty_key_route( ) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - Ok(send_state_event_for_empty_key::Response::new( - send_state_event_for_key_helper( - &db, - sender_user - .as_ref() - .expect("no user for send state empty key rout"), - &body.content, - json, - &body.room_id, - Some("".into()), - ) - .await?, + let event_id = send_state_event_for_key_helper( + &db, + sender_user + .as_ref() + .expect("no user for send state empty key rout"), + &body.content, + json, + &body.room_id, + Some("".into()), ) - .into()) + .await?; + + db.flush().await?; + + Ok(send_state_event_for_empty_key::Response { event_id }.into()) } #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state", data = "") )] -pub fn get_state_events_route( +pub async fn get_state_events_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -140,7 +142,7 @@ pub fn get_state_events_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") )] -pub fn get_state_events_for_key_route( +pub async fn get_state_events_for_key_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { @@ -189,7 +191,7 @@ pub fn get_state_events_for_key_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") )] -pub fn get_state_events_for_empty_key_route( +pub async fn get_state_events_for_empty_key_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index c605313..7bbf9e8 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -13,7 +13,7 @@ use rocket::{delete, get, put}; feature = "conduit_bin", put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") )] -pub fn update_tag_route( +pub async fn update_tag_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -40,6 +40,8 @@ pub fn update_tag_route( &db.globals, )?; + db.flush().await?; + Ok(create_tag::Response.into()) } @@ -47,7 +49,7 @@ pub fn update_tag_route( feature = "conduit_bin", delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") )] -pub fn delete_tag_route( +pub async fn delete_tag_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -71,6 +73,8 @@ pub fn delete_tag_route( &db.globals, )?; + db.flush().await?; + Ok(delete_tag::Response.into()) } @@ -78,7 +82,7 @@ pub fn delete_tag_route( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") )] -pub fn get_tags_route( +pub async fn get_tags_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index d9b540b..c775e9b 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -10,7 +10,7 @@ use std::collections::BTreeMap; feature = "conduit_bin", get("/_matrix/client/r0/thirdparty/protocols") )] -pub fn get_protocols_route() -> ConduitResult { +pub async fn get_protocols_route() -> ConduitResult { warn!("TODO: get_protocols_route"); Ok(get_protocols::Response { protocols: BTreeMap::new(), diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 6719dae..8cc3e29 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -12,7 +12,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") )] -pub fn send_event_to_device_route( +pub async fn send_event_to_device_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { @@ -66,5 +66,7 @@ pub fn send_event_to_device_route( db.transaction_ids .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; + db.flush().await?; + Ok(send_event_to_device::Response.into()) } diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index ea7f633..e51ed56 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -15,7 +15,7 @@ use rocket::get; /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] -pub fn get_supported_versions_route() -> ConduitResult { +pub async fn get_supported_versions_route() -> ConduitResult { let mut resp = get_supported_versions::Response::new(vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()]); diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index dcf48fe..5829364 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -9,7 +9,7 @@ use rocket::post; feature = "conduit_bin", post("/_matrix/client/r0/user_directory/search", data = "") )] -pub fn search_users_route( +pub async fn search_users_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 33080ea..a8db62a 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -5,7 +5,7 @@ use ruma::api::client::{error::ErrorKind, r0::message::send_message_event}; use rocket::get; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] -pub fn turn_server_route() -> ConduitResult { +pub async fn turn_server_route() -> ConduitResult { Err(Error::BadRequest( ErrorKind::NotFound, "There is no turn server yet.", diff --git a/src/database.rs b/src/database.rs index 4b2cba1..6bdc32a 100644 --- a/src/database.rs +++ b/src/database.rs @@ -227,4 +227,9 @@ impl Database { // Wait until one of them finds something futures.next().await; } + + pub async fn flush(&self) -> Result<()> { + self._db.flush_async().await?; + Ok(()) + } } From 6b3934e31da101873369984af0a4ca1ec6ac574d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Oct 2020 21:43:59 +0200 Subject: [PATCH 0327/1727] feat: configurable cache capacity --- Cargo.toml | 2 +- src/database.rs | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9e23c36..8b29be8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ state-res = { git = "https://github.com/timokoesters/state-res", branch = "spec- # Used for long polling tokio = "0.2.22" # Used for storing data permanently -sled = "0.34.4" +sled = { version = "0.34.4", default-features = false } # Used for emitting log entries log = "0.4.11" # Used for rocket<->ruma conversions diff --git a/src/database.rs b/src/database.rs index 6bdc32a..883ef85 100644 --- a/src/database.rs +++ b/src/database.rs @@ -10,12 +10,11 @@ pub mod users; use crate::{Error, Result}; use directories::ProjectDirs; -use log::info; -use std::fs::remove_dir_all; - use futures::StreamExt; +use log::info; use rocket::{futures, Config}; use ruma::{DeviceId, UserId}; +use std::{convert::TryFrom, fs::remove_dir_all}; pub struct Database { pub globals: globals::Globals, @@ -66,7 +65,19 @@ impl Database { .to_owned()) })?; - let db = sled::open(&path)?; + let db = sled::Config::default() + .path(&path) + .cache_capacity( + u64::try_from( + config + .get_int("cache_capacity") + .unwrap_or(1024 * 1024 * 1024), + ) + .map_err(|_| Error::BadConfig("Cache capacity needs to be a u64."))?, + ) + .print_profile_on_drop(false) + .open()?; + info!("Opened sled database at {}", path); Ok(Self { From df82314440c711d90168584f57ce42ad1e122f65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 23 Oct 2020 14:35:41 +0200 Subject: [PATCH 0328/1727] improvement: welcome message --- src/client_server/account.rs | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 74f862c..fad59c3 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -14,8 +14,14 @@ use ruma::{ }, }, events::{ - room::canonical_alias, room::guest_access, room::history_visibility, room::join_rules, - room::member, room::name, room::topic, EventType, + room::canonical_alias, + room::guest_access, + room::history_visibility, + room::join_rules, + room::member, + room::name, + room::{message, topic}, + EventType, }, RoomAliasId, RoomId, RoomVersionId, UserId, }; @@ -464,6 +470,32 @@ pub async fn register_route( &db.sending, &db.account_data, )?; + + // Send welcome message + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMessage, + content: serde_json::to_value(message::MessageEventContent::Text( + message::TextMessageEventContent { + body: "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), + formatted: Some(message::FormattedBody { + format: message::MessageFormat::Html, + body: "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing /join #conduit:matrix.org. Important: Please don't join any other Matrix rooms over federation without permission from the room's admins. Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), + }), + relates_to: None, + }, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &room_id, + &db.globals, + &db.sending, + &db.account_data, + )?; } db.flush().await?; From 2f5df4aac97d22f28b43b64272f2e75ca4272c22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Oct 2020 16:08:54 +0200 Subject: [PATCH 0329/1727] improvement: more reliable federation sending --- src/database.rs | 3 +- src/database/rooms.rs | 2 +- src/database/sending.rs | 163 ++++++++++++++++++++++++++++++++-------- src/server_server.rs | 5 +- 4 files changed, 137 insertions(+), 36 deletions(-) diff --git a/src/database.rs b/src/database.rs index 883ef85..3b0bd6f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -149,7 +149,8 @@ impl Database { userdevicetxnid_response: db.open_tree("userdevicetxnid_response")?, }, sending: sending::Sending { - serverpduids: db.open_tree("serverpduids")?, + servernamepduids: db.open_tree("servernamepduids")?, + servercurrentpdus: db.open_tree("servercurrentpdus")?, }, _db: db, }) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 35c3eac..1cc20a4 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -367,7 +367,7 @@ impl Rooms { } /// Returns the pdu. - pub fn get_pdu_json_from_id(&self, pdu_id: &IVec) -> Result> { + pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) diff --git a/src/database/sending.rs b/src/database/sending.rs index 24a783b..33ee530 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,8 +1,8 @@ -use std::{collections::HashSet, convert::TryFrom, time::SystemTime}; +use std::{collections::HashMap, convert::TryFrom, time::SystemTime}; use crate::{server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::warn; +use log::debug; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{api::federation, ServerName}; use sled::IVec; @@ -10,54 +10,145 @@ use tokio::select; pub struct Sending { /// The state for a given state hash. - pub(super) serverpduids: sled::Tree, // ServerPduId = ServerName + PduId + pub(super) servernamepduids: sled::Tree, // ServernamePduId = ServerName + PduId + pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = ServerName + PduId (pduid can be empty for reservation) } impl Sending { pub fn start_handler(&self, globals: &super::globals::Globals, rooms: &super::rooms::Rooms) { - let serverpduids = self.serverpduids.clone(); + let servernamepduids = self.servernamepduids.clone(); + let servercurrentpdus = self.servercurrentpdus.clone(); let rooms = rooms.clone(); let globals = globals.clone(); tokio::spawn(async move { let mut futures = FuturesUnordered::new(); - let mut waiting_servers = HashSet::new(); - let mut subscriber = serverpduids.watch_prefix(b""); + // Retry requests we could not finish yet + let mut current_transactions = HashMap::new(); + + for (server, pdu) in servercurrentpdus + .iter() + .filter_map(|r| r.ok()) + .map(|(key, _)| { + let mut parts = key.splitn(2, |&b| b == 0xff); + let server = parts.next().expect("splitn always returns one element"); + let pdu = parts.next().ok_or_else(|| { + Error::bad_database("Invalid bytes in servercurrentpdus.") + })?; + + Ok::<_, Error>(( + Box::::try_from(utils::string_from_bytes(&server).map_err( + |_| { + Error::bad_database( + "Invalid server bytes in server_currenttransaction", + ) + }, + )?) + .map_err(|_| { + Error::bad_database( + "Invalid server string in server_currenttransaction", + ) + })?, + IVec::from(pdu), + )) + }) + .filter_map(|r| r.ok()) + { + if !pdu.is_empty() { + current_transactions + .entry(server) + .or_insert_with(Vec::new) + .push(pdu); + } + } + + for (server, pdus) in current_transactions { + futures.push(Self::handle_event(server, pdus, &globals, &rooms)); + } + + let mut subscriber = servernamepduids.watch_prefix(b""); loop { select! { Some(server) = futures.next() => { - warn!("response: {:?}", &server); - warn!("futures left: {}", &futures.len()); + debug!("response: {:?}", &server); match server { Ok((server, _response)) => { - waiting_servers.remove(&server) + let mut prefix = server.as_bytes().to_vec(); + prefix.push(0xff); + + for key in servercurrentpdus + .scan_prefix(&prefix) + .keys() + .filter_map(|r| r.ok()) + { + // Don't remove reservation yet + if prefix.len() != key.len() { + servercurrentpdus.remove(key).unwrap(); + } + } + + // Find events that have been added since starting the last request + let new_pdus = servernamepduids + .scan_prefix(&prefix) + .keys() + .filter_map(|r| r.ok()) + .map(|k| { + k.subslice(prefix.len(), k.len() - prefix.len()) + }).collect::>(); + + if !new_pdus.is_empty() { + for pdu_id in &new_pdus { + let mut current_key = prefix.clone(); + current_key.extend_from_slice(pdu_id); + servercurrentpdus.insert(¤t_key, &[]).unwrap(); + servernamepduids.remove(¤t_key).unwrap(); + } + + futures.push(Self::handle_event(server, new_pdus, &globals, &rooms)); + } else { + servercurrentpdus.remove(&prefix).unwrap(); + } } - Err((server, _e)) => { - waiting_servers.remove(&server) + Err((_server, _e)) => { + // TODO: exponential backoff } }; }, Some(event) = &mut subscriber => { if let sled::Event::Insert { key, .. } = event { - let serverpduid = key.clone(); - let mut parts = serverpduid.splitn(2, |&b| b == 0xff); + let servernamepduid = key.clone(); + let mut parts = servernamepduid.splitn(2, |&b| b == 0xff); if let Some((server, pdu_id)) = utils::string_from_bytes( parts .next() .expect("splitn will always return 1 or more elements"), ) - .map_err(|_| Error::bad_database("ServerName in serverpduid bytes are invalid.")) + .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) .and_then(|server_str|Box::::try_from(server_str) - .map_err(|_| Error::bad_database("ServerName in serverpduid is invalid."))) + .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))) .ok() - .filter(|server| waiting_servers.insert(server.clone())) .and_then(|server| parts - .next() - .ok_or_else(|| Error::bad_database("Invalid serverpduid in db.")).ok().map(|pdu_id| (server, pdu_id))) + .next() + .ok_or_else(|| Error::bad_database("Invalid servernamepduid in db.")) + .ok() + .map(|pdu_id| (server, pdu_id)) + ) + // TODO: exponential backoff + .filter(|(server, _)| { + let mut prefix = server.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + servercurrentpdus + .compare_and_swap(prefix, Option::<&[u8]>::None, Some(&[])) // Try to reserve + == Ok(Ok(())) + }) { - futures.push(Self::handle_event(server, pdu_id.into(), &globals, &rooms)); + servercurrentpdus.insert(&key, &[]).unwrap(); + servernamepduids.remove(&key).unwrap(); + + futures.push(Self::handle_event(server, vec![pdu_id.into()], &globals, &rooms)); } } } @@ -70,38 +161,44 @@ impl Sending { let mut key = server.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu_id); - self.serverpduids.insert(key, b"")?; + self.servernamepduids.insert(key, b"")?; Ok(()) } async fn handle_event( server: Box, - pdu_id: IVec, + pdu_ids: Vec, globals: &super::globals::Globals, rooms: &super::rooms::Rooms, ) -> std::result::Result< (Box, send_transaction_message::v1::Response), (Box, Error), > { - let pdu_json = PduEvent::convert_to_outgoing_federation_event( - rooms - .get_pdu_json_from_id(&pdu_id) - .map_err(|e| (server.clone(), e))? - .ok_or_else(|| { - ( - server.clone(), - Error::bad_database("Event in serverpduids not found in db."), - ) - })?, - ); + let pdu_jsons = pdu_ids + .iter() + .map(|pdu_id| { + Ok::<_, (Box, Error)>(PduEvent::convert_to_outgoing_federation_event( + rooms + .get_pdu_json_from_id(pdu_id) + .map_err(|e| (server.clone(), e))? + .ok_or_else(|| { + ( + server.clone(), + Error::bad_database("Event in servernamepduids not found in db."), + ) + })?, + )) + }) + .filter_map(|r| r.ok()) + .collect::>(); server_server::send_request( &globals, server.clone(), send_transaction_message::v1::Request { origin: globals.server_name(), - pdus: &[pdu_json], + pdus: &pdu_jsons, edus: &[], origin_server_ts: SystemTime::now(), transaction_id: &utils::random_string(16), diff --git a/src/server_server.rs b/src/server_server.rs index 184f333..ccb1399 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -186,7 +186,10 @@ where let body = reqwest_response .bytes() .await - .unwrap() + .unwrap_or_else(|e| { + warn!("server error: {}", e); + Vec::new().into() + }) // TODO: handle timeout .into_iter() .collect(); From 07621969638d3da6b638d161f4feb86a9affb511 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 27 Oct 2020 20:25:43 +0100 Subject: [PATCH 0330/1727] fix: don't send new events from left rooms --- src/client_server/membership.rs | 5 ++- src/client_server/profile.rs | 2 ++ src/client_server/room.rs | 4 ++- src/client_server/state.rs | 12 ++++--- src/client_server/sync.rs | 64 +++++++++++++++++++++------------ src/database/rooms.rs | 40 +++++++++++---------- 6 files changed, 79 insertions(+), 48 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 3380601..d79079d 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -103,6 +103,7 @@ pub async fn leave_room_route( ErrorKind::BadState, "Cannot leave a room you are not a member of.", ))? + .1 .content, ) .expect("from_value::> can never fail") @@ -193,6 +194,7 @@ pub async fn kick_user_route( ErrorKind::BadState, "Cannot kick member that's not in the room.", ))? + .1 .content, ) .expect("Raw::from_value always works") @@ -249,7 +251,7 @@ pub async fn ban_user_route( is_direct: None, third_party_invite: None, }), - |event| { + |(_, event)| { let mut event = serde_json::from_value::>(event.content) .expect("Raw::from_value always works") @@ -301,6 +303,7 @@ pub async fn unban_user_route( ErrorKind::BadState, "Cannot unban a user who is not banned.", ))? + .1 .content, ) .expect("from_value::> can never fail") diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index d754ace..3fa1da6 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -48,6 +48,7 @@ pub async fn set_displayname_route( "Tried to send displayname update for user not in the room.", ) })? + .1 .content .clone(), ) @@ -142,6 +143,7 @@ pub async fn set_avatar_url_route( "Tried to send avatar url update for user not in the room.", ) })? + .1 .content .clone(), ) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index d1d051f..eeab68b 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -395,6 +395,7 @@ pub async fn upgrade_room_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .1 .content, ) .expect("Raw::from_value always works") @@ -470,7 +471,7 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? { - Some(v) => v.content.clone(), + Some((_, v)) => v.content.clone(), None => continue, // Skipping missing events. }; @@ -502,6 +503,7 @@ pub async fn upgrade_room_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .1 .content, ) .expect("database contains invalid PDU") diff --git a/src/client_server/state.rs b/src/client_server/state.rs index eae96b5..dbc7fdd 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -109,7 +109,7 @@ pub async fn get_state_events_route( if !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? - .map(|event| { + .map(|(_, event)| { serde_json::from_value::(event.content) .map_err(|_| { Error::bad_database( @@ -154,7 +154,7 @@ pub async fn get_state_events_for_key_route( if !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? - .map(|event| { + .map(|(_, event)| { serde_json::from_value::(event.content) .map_err(|_| { Error::bad_database( @@ -178,7 +178,8 @@ pub async fn get_state_events_for_key_route( .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", - ))?; + ))? + .1; Ok(get_state_events_for_key::Response { content: serde_json::value::to_raw_value(&event.content) @@ -203,7 +204,7 @@ pub async fn get_state_events_for_empty_key_route( if !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? - .map(|event| { + .map(|(_, event)| { serde_json::from_value::(event.content) .map_err(|_| { Error::bad_database( @@ -227,7 +228,8 @@ pub async fn get_state_events_for_empty_key_route( .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", - ))?; + ))? + .1; Ok(get_state_events_for_empty_key::Response { content: serde_json::value::to_raw_value(&event) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index caab9ea..360691a 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -440,23 +440,8 @@ pub async fn sync_events_route( let mut left_rooms = BTreeMap::new(); for room_id in db.rooms.rooms_left(&sender_user) { let room_id = room_id?; - let pdus = db.rooms.pdus_since(&sender_user, &room_id, since)?; - let room_events = pdus - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - let left_room = sync_events::LeftRoom { - account_data: sync_events::AccountData { events: Vec::new() }, - timeline: sync_events::Timeline { - limited: false, - prev_batch: Some(next_batch.clone()), - events: room_events, - }, - state: sync_events::State { events: Vec::new() }, - }; - - let since_member = db + let since_member = if let Some(since_member) = db .rooms .pdus_after(sender_user, &room_id, since) .next() @@ -475,20 +460,25 @@ pub async fn sync_events_route( .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) .ok() }) - .and_then(|pdu| { + .and_then(|(pdu_id, pdu)| { serde_json::from_value::>( - pdu.content, + pdu.content.clone(), ) .expect("Raw::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .map(|content| (pdu_id, pdu, content)) .ok() - }); + }) { + since_member + } else { + // We couldn't find the since_member event. This is very weird - we better abort + continue; + }; - let left_since_last_sync = - since_member.map_or(false, |member| member.membership == MembershipState::Join); + let left_since_last_sync = since_member.2.membership == MembershipState::Join; - if left_since_last_sync { + let left_room = if left_since_last_sync { device_list_left.extend( db.rooms .room_members(&room_id) @@ -503,7 +493,35 @@ pub async fn sync_events_route( !share_encrypted_room(&db, sender_user, user_id, &room_id) }), ); - } + + let pdus = db.rooms.pdus_since(&sender_user, &room_id, since)?; + let mut room_events = pdus + .filter_map(|pdu| pdu.ok()) // Filter out buggy events + .take_while(|(pdu_id, _)| since_member.0 != pdu_id) + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect::>(); + room_events.push(since_member.1.to_sync_room_event()); + + sync_events::LeftRoom { + account_data: sync_events::AccountData { events: Vec::new() }, + timeline: sync_events::Timeline { + limited: false, + prev_batch: Some(next_batch.clone()), + events: room_events, + }, + state: sync_events::State { events: Vec::new() }, + } + } else { + sync_events::LeftRoom { + account_data: sync_events::AccountData { events: Vec::new() }, + timeline: sync_events::Timeline { + limited: false, + prev_batch: Some(next_batch.clone()), + events: Vec::new(), + }, + state: sync_events::State { events: Vec::new() }, + } + }; if !left_room.is_empty() { left_rooms.insert(room_id.clone(), left_room); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 1cc20a4..05abe03 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -169,7 +169,7 @@ impl Rooms { state_hash: &StateHashId, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result> { let mut key = state_hash.to_vec(); key.push(0xff); key.extend_from_slice(&event_type.to_string().as_bytes()); @@ -177,14 +177,15 @@ impl Rooms { key.extend_from_slice(&state_key.as_bytes()); self.stateid_pduid.get(&key)?.map_or(Ok(None), |pdu_id| { - Ok::<_, Error>(Some( + Ok::<_, Error>(Some(( + pdu_id.clone(), serde_json::from_slice::( - &self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { + &self.pduid_pdu.get(&pdu_id)?.ok_or_else(|| { Error::bad_database("PDU in state not found in database.") })?, ) .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, - )) + ))) }) } @@ -216,7 +217,7 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some(pdu) = self.room_state_get(room_id, &event_type, &state_key)? { + if let Some((_, pdu)) = self.room_state_get(room_id, &event_type, &state_key)? { events.insert((event_type, state_key), pdu); } } @@ -299,7 +300,7 @@ impl Rooms { room_id: &RoomId, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { self.state_get(¤t_state_hash, event_type, state_key) } else { @@ -653,7 +654,7 @@ impl Rooms { }, }) }, - |power_levels| { + |(_, power_levels)| { Ok(serde_json::from_value::>( power_levels.content, ) @@ -664,15 +665,18 @@ impl Rooms { )?; let sender_membership = self .room_state_get(&room_id, &EventType::RoomMember, &sender.to_string())? - .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { - Ok( - serde_json::from_value::>(pdu.content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership, - ) - })?; + .map_or( + Ok::<_, Error>(member::MembershipState::Leave), + |(_, pdu)| { + Ok( + serde_json::from_value::>(pdu.content) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid Member event in db."))? + .membership, + ) + }, + )?; let sender_power = power_levels.users.get(&sender).map_or_else( || { @@ -759,7 +763,7 @@ impl Rooms { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some(prev_pdu) = self.room_state_get(&room_id, &event_type, &state_key)? { + if let Some((_, prev_pdu)) = self.room_state_get(&room_id, &event_type, &state_key)? { unsigned.insert("prev_content".to_owned(), prev_pdu.content); unsigned.insert( "prev_sender".to_owned(), @@ -1017,7 +1021,7 @@ impl Rooms { // Check if the room has a predecessor if let Some(predecessor) = self .room_state_get(&room_id, &EventType::RoomCreate, "")? - .and_then(|create| { + .and_then(|(_, create)| { serde_json::from_value::< Raw, >(create.content) From 335a33c901b5ed13b665d3256c6616b4ca7b2c03 Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Fri, 30 Oct 2020 02:57:22 +0100 Subject: [PATCH 0331/1727] Fix and Improve Complement testing Dockerfile --- tests/Complement.Dockerfile | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index f32f878..f3cbd99 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -14,17 +14,25 @@ RUN cargo build FROM valkum/docker-rust-ci:latest WORKDIR /workdir -RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.1.1/caddy_2.1.1_linux_amd64.tar.gz" -RUN tar xzf caddy_2.1.1_linux_amd64.tar.gz +RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.2.1/caddy_2.2.1_linux_amd64.tar.gz" +RUN tar xzf caddy_2.2.1_linux_amd64.tar.gz COPY --from=builder /workdir/target/debug/conduit /workdir/conduit COPY Rocket-example.toml Rocket.toml ENV SERVER_NAME=localhost +ENV ROCKET_LOG=normal -RUN sed -i "s/server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" Rocket.toml RUN sed -i "s/port = 14004/port = 8008/g" Rocket.toml +RUN echo "federation_enabled = true" >> Rocket.toml +# Enabled Caddy auto cert generation for complement provided CA. +RUN echo '{"apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"localhost:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json + EXPOSE 8008 8448 -CMD /workdir/caddy reverse-proxy --from ${SERVER_NAME}:8448 --to localhost:8008 > /dev/null 2>&1 & /workdir/conduit \ No newline at end of file + +CMD sed -i "s/server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" Rocket.toml && \ + sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ + /workdir/caddy start --config caddy.json > /dev/null && \ + /workdir/conduit \ No newline at end of file From 16b22bb432b3424000c76870eaf967873047dfd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 3 Nov 2020 21:20:35 +0100 Subject: [PATCH 0332/1727] fix: don't allow more than 50 PDUs in a transaction --- src/database/sending.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index 33ee530..597778f 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -54,13 +54,14 @@ impl Sending { )) }) .filter_map(|r| r.ok()) + .filter(|pdu| !pdu.is_empty()) // Skip reservation key + .take(50) + // This should not contain more than 50 anyway { - if !pdu.is_empty() { - current_transactions - .entry(server) - .or_insert_with(Vec::new) - .push(pdu); - } + current_transactions + .entry(server) + .or_insert_with(Vec::new) + .push(pdu); } for (server, pdus) in current_transactions { @@ -95,7 +96,9 @@ impl Sending { .filter_map(|r| r.ok()) .map(|k| { k.subslice(prefix.len(), k.len() - prefix.len()) - }).collect::>(); + }) + .take(50) + .collect::>(); if !new_pdus.is_empty() { for pdu_id in &new_pdus { @@ -108,6 +111,7 @@ impl Sending { futures.push(Self::handle_event(server, new_pdus, &globals, &rooms)); } else { servercurrentpdus.remove(&prefix).unwrap(); + // servercurrentpdus with the prefix should be empty now } } Err((_server, _e)) => { From 9f8cffcd22dcf4ca27a82c262aeb25c7ec05e257 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 9 Nov 2020 12:21:04 +0100 Subject: [PATCH 0333/1727] Admin room improvements --- src/client_server/account.rs | 13 ++++++ src/client_server/membership.rs | 8 +++- src/client_server/message.rs | 1 + src/client_server/profile.rs | 2 + src/client_server/redact.rs | 1 + src/client_server/room.rs | 15 +++++++ src/client_server/state.rs | 1 + src/database.rs | 21 ++++++++-- src/database/account_data.rs | 1 + src/database/admin.rs | 74 +++++++++++++++++++++++++++++++++ src/database/key_backups.rs | 1 + src/database/media.rs | 1 + src/database/rooms.rs | 34 +++++---------- src/database/sending.rs | 3 +- src/database/transaction_ids.rs | 1 + src/database/uiaa.rs | 1 + src/database/users.rs | 1 + src/server_server.rs | 2 +- 18 files changed, 152 insertions(+), 29 deletions(-) create mode 100644 src/database/admin.rs diff --git a/src/client_server/account.rs b/src/client_server/account.rs index fad59c3..81119ba 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -241,6 +241,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -264,6 +265,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -300,6 +302,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -319,6 +322,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -340,6 +344,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -359,6 +364,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -380,6 +386,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -398,6 +405,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -422,6 +430,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -447,6 +456,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; db.rooms.build_and_append_pdu( @@ -468,6 +478,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -494,6 +505,7 @@ pub async fn register_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; } @@ -666,6 +678,7 @@ pub async fn deactivate_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index d79079d..25cad85 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -124,6 +124,7 @@ pub async fn leave_room_route( &body.room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -162,6 +163,7 @@ pub async fn invite_user_route( &body.room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -216,6 +218,7 @@ pub async fn kick_user_route( &body.room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -274,6 +277,7 @@ pub async fn ban_user_route( &body.room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -324,6 +328,7 @@ pub async fn unban_user_route( &body.room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -673,7 +678,7 @@ async fn join_room_by_id_helper( pdu_id.clone().into(), &db.globals, &db.account_data, - &db.sending, + &db.admin, )?; if state_events.contains(ev_id) { @@ -703,6 +708,7 @@ async fn join_room_by_id_helper( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index f9c8ba1..327b9ab 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -67,6 +67,7 @@ pub async fn send_message_event_route( &body.room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 3fa1da6..22d13cb 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -65,6 +65,7 @@ pub async fn set_displayname_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -160,6 +161,7 @@ pub async fn set_avatar_url_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 486eb6c..6f7728a 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -33,6 +33,7 @@ pub async fn redact_event_route( &body.room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index eeab68b..fdc9529 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -65,6 +65,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -88,6 +89,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -131,6 +133,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -165,6 +168,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -184,6 +188,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -211,6 +216,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -232,6 +238,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; } @@ -255,6 +262,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; } @@ -275,6 +283,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; } @@ -300,6 +309,7 @@ pub async fn create_room_route( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; } @@ -387,6 +397,7 @@ pub async fn upgrade_room_route( &body.room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -429,6 +440,7 @@ pub async fn upgrade_room_route( &replacement_room, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -452,6 +464,7 @@ pub async fn upgrade_room_route( &replacement_room, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; @@ -487,6 +500,7 @@ pub async fn upgrade_room_route( &replacement_room, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; } @@ -532,6 +546,7 @@ pub async fn upgrade_room_route( &body.room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index dbc7fdd..ca6bdf7 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -284,6 +284,7 @@ pub async fn send_state_event_for_key_helper( &room_id, &db.globals, &db.sending, + &db.admin, &db.account_data, )?; diff --git a/src/database.rs b/src/database.rs index 3b0bd6f..51c3895 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,4 +1,5 @@ pub mod account_data; +pub mod admin; pub mod globals; pub mod key_backups; pub mod media; @@ -12,10 +13,14 @@ use crate::{Error, Result}; use directories::ProjectDirs; use futures::StreamExt; use log::info; -use rocket::{futures, Config}; +use rocket::{ + futures::{self, channel::mpsc}, + Config, +}; use ruma::{DeviceId, UserId}; use std::{convert::TryFrom, fs::remove_dir_all}; +#[derive(Clone)] pub struct Database { pub globals: globals::Globals, pub users: users::Users, @@ -26,6 +31,7 @@ pub struct Database { pub key_backups: key_backups::KeyBackups, pub transaction_ids: transaction_ids::TransactionIds, pub sending: sending::Sending, + pub admin: admin::Admin, pub _db: sled::Db, } @@ -80,7 +86,9 @@ impl Database { info!("Opened sled database at {}", path); - Ok(Self { + let (admin_sender, admin_receiver) = mpsc::unbounded(); + + let db = Self { globals: globals::Globals::load(db.open_tree("global")?, config)?, users: users::Users { userid_password: db.open_tree("userid_password")?, @@ -152,8 +160,15 @@ impl Database { servernamepduids: db.open_tree("servernamepduids")?, servercurrentpdus: db.open_tree("servercurrentpdus")?, }, + admin: admin::Admin { + sender: admin_sender, + }, _db: db, - }) + }; + + db.admin.start_handler(db.clone(), admin_receiver); + + Ok(db) } pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { diff --git a/src/database/account_data.rs b/src/database/account_data.rs index a917123..9a6a050 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -8,6 +8,7 @@ use serde::{de::DeserializeOwned, Serialize}; use sled::IVec; use std::{collections::HashMap, convert::TryFrom}; +#[derive(Clone)] pub struct AccountData { pub(super) roomuserdataid_accountdata: sled::Tree, // RoomUserDataId = Room + User + Count + Type } diff --git a/src/database/admin.rs b/src/database/admin.rs new file mode 100644 index 0000000..f8b2385 --- /dev/null +++ b/src/database/admin.rs @@ -0,0 +1,74 @@ +use std::convert::{TryFrom, TryInto}; + +use crate::{pdu::PduBuilder, Error}; +use rocket::futures::{channel::mpsc, stream::StreamExt}; +use ruma::{events::room::message, events::EventType, UserId}; +use tokio::select; + +pub enum AdminCommand { + SendTextMessage(message::TextMessageEventContent), +} + +#[derive(Clone)] +pub struct Admin { + pub sender: mpsc::UnboundedSender, +} + +impl Admin { + pub fn start_handler( + &self, + db: super::Database, + mut receiver: mpsc::UnboundedReceiver, + ) { + tokio::spawn(async move { + // TODO: Use futures when we have long admin commands + //let mut futures = FuturesUnordered::new(); + + let conduit_user = UserId::try_from(format!("@conduit:{}", db.globals.server_name())) + .expect("@conduit:server_name is valid"); + + let conduit_room = db + .rooms + .id_from_alias( + &format!("#admins:{}", db.globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid room alias"), + ) + .unwrap() + .ok_or_else(|| Error::BadConfig("Conduit instance does not have an #admins room.")) + .unwrap(); + + loop { + select! { + Some(event) = receiver.next() => { + match event { + AdminCommand::SendTextMessage(message) => { + println!("{:?}", message); + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMessage, + content: serde_json::to_value(message).expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + &db.globals, + &db.sending, + &db.admin, + &db.account_data, + ).unwrap(); + } + } + } + } + } + }); + } + + pub fn send(&self, command: AdminCommand) { + self.sender.unbounded_send(command).unwrap() + } +} diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 1ce7595..a50e45e 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -8,6 +8,7 @@ use ruma::{ }; use std::{collections::BTreeMap, convert::TryFrom}; +#[derive(Clone)] pub struct KeyBackups { pub(super) backupid_algorithm: sled::Tree, // BackupId = UserId + Version(Count) pub(super) backupid_etag: sled::Tree, // BackupId = UserId + Version(Count) diff --git a/src/database/media.rs b/src/database/media.rs index 3ecf4bd..8c59aa4 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -9,6 +9,7 @@ pub struct FileMeta { pub file: Vec, } +#[derive(Clone)] pub struct Media { pub(super) mediaid_file: sled::Tree, // MediaId = MXC + WidthHeight + Filename + ContentType } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 05abe03..8ab900f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,6 +27,8 @@ use std::{ sync::Arc, }; +use super::admin::AdminCommand; + /// The unique identifier of each state group. /// /// This is created when a state group is added to the database by @@ -443,7 +445,7 @@ impl Rooms { pdu_id: IVec, globals: &super::globals::Globals, account_data: &super::account_data::AccountData, - sending: &super::sending::Sending, + admin: &super::admin::Admin, ) -> Result<()> { self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; @@ -514,28 +516,13 @@ impl Rooms { if let Some(command) = parts.next() { let args = parts.collect::>(); - self.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMessage, - content: serde_json::to_value( - message::TextMessageEventContent { - body: format!("Command: {}, Args: {:?}", command, args), - formatted: None, - relates_to: None, - }, - ) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, + admin.send(AdminCommand::SendTextMessage( + message::TextMessageEventContent { + body: format!("Command: {}, Args: {:?}", command, args), + formatted: None, + relates_to: None, }, - &UserId::try_from(format!("@conduit:{}", globals.server_name())) - .expect("@conduit:server_name is valid"), - &pdu.room_id, - &globals, - &sending, - &account_data, - )?; + )); } } } @@ -612,6 +599,7 @@ impl Rooms { room_id: &RoomId, globals: &super::globals::Globals, sending: &super::sending::Sending, + admin: &super::admin::Admin, account_data: &super::account_data::AccountData, ) -> Result { let PduBuilder { @@ -849,7 +837,7 @@ impl Rooms { pdu_id.clone().into(), globals, account_data, - sending, + admin, )?; for server in self diff --git a/src/database/sending.rs b/src/database/sending.rs index 597778f..e3fca4f 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -8,6 +8,7 @@ use ruma::{api::federation, ServerName}; use sled::IVec; use tokio::select; +#[derive(Clone)] pub struct Sending { /// The state for a given state hash. pub(super) servernamepduids: sled::Tree, // ServernamePduId = ServerName + PduId @@ -54,7 +55,7 @@ impl Sending { )) }) .filter_map(|r| r.ok()) - .filter(|pdu| !pdu.is_empty()) // Skip reservation key + .filter(|(_, pdu)| !pdu.is_empty()) // Skip reservation key .take(50) // This should not contain more than 50 anyway { diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index 9485b36..7c0eb98 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -2,6 +2,7 @@ use crate::Result; use ruma::{DeviceId, UserId}; use sled::IVec; +#[derive(Clone)] pub struct TransactionIds { pub(super) userdevicetxnid_response: sled::Tree, // Response can be empty (/sendToDevice) or the event id (/send) } diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index e318f43..381a701 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -7,6 +7,7 @@ use ruma::{ DeviceId, UserId, }; +#[derive(Clone)] pub struct Uiaa { pub(super) userdeviceid_uiaainfo: sled::Tree, // User-interactive authentication } diff --git a/src/database/users.rs b/src/database/users.rs index 0d35e36..2a03960 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -14,6 +14,7 @@ use ruma::{ }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; +#[derive(Clone)] pub struct Users { pub(super) userid_password: sled::Tree, pub(super) userid_displayname: sled::Tree, diff --git a/src/server_server.rs b/src/server_server.rs index ccb1399..0f24e15 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -404,7 +404,7 @@ pub fn send_transaction_message_route<'a>( pdu_id.clone().into(), &db.globals, &db.account_data, - &db.sending, + &db.admin, )?; } } From e4c5ed96a9612b0e10a42bb108cf2ae58a776d3a Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Mon, 9 Nov 2020 17:21:35 +0100 Subject: [PATCH 0334/1727] Sync with newest complement changes --- tests/Complement.Dockerfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index f3cbd99..21c3105 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -32,7 +32,9 @@ RUN echo '{"apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448 EXPOSE 8008 8448 -CMD sed -i "s/server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" Rocket.toml && \ +CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement CA support" && true) || \ + sed -i "s/server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" Rocket.toml && \ sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ /workdir/caddy start --config caddy.json > /dev/null && \ - /workdir/conduit \ No newline at end of file + /workdir/conduit + \ No newline at end of file From fc08b13db1987064c3a9532a9cda0e4d7d0bd25a Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Mon, 9 Nov 2020 17:32:04 +0100 Subject: [PATCH 0335/1727] Change CA to PKI per naming in Complement --- tests/Complement.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 21c3105..306105a 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -32,7 +32,7 @@ RUN echo '{"apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448 EXPOSE 8008 8448 -CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement CA support" && true) || \ +CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement PKI support" && true) || \ sed -i "s/server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" Rocket.toml && \ sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ /workdir/caddy start --config caddy.json > /dev/null && \ From 79692db45dfe2fec6d0fec280bfba2be740d58a5 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Sun, 31 May 2020 22:49:07 +0200 Subject: [PATCH 0336/1727] First version of cargo-deb packaging setup --- Cargo.toml | 22 ++++++++++++++++ debian/config | 23 +++++++++++++++++ debian/env | 48 +++++++++++++++++++++++++++++++++++ debian/matrix-conduit.service | 21 +++++++++++++++ debian/postinst | 26 +++++++++++++++++++ debian/postrm | 22 ++++++++++++++++ debian/templates | 14 ++++++++++ 7 files changed, 176 insertions(+) create mode 100644 debian/config create mode 100644 debian/env create mode 100644 debian/matrix-conduit.service create mode 100644 debian/postinst create mode 100644 debian/postrm create mode 100644 debian/templates diff --git a/Cargo.toml b/Cargo.toml index 8b29be8..1feb7ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,3 +72,25 @@ required-features = ["conduit_bin"] [lib] name = "conduit" path = "src/lib.rs" + +[package.metadata.deb] +name = "matrix-conduit" +maintainer = "Paul van Tilburg " +copyright = "2020, Timo Kösters " +license-file = ["LICENSE", "3"] +depends = "$auto, ca-certificates" +extended-description = """\ +A fast Matrix homeserver that is optimized for smaller, personal servers, \ +instead of a server that has high scalability.""" +section = "net" +priority = "optional" +assets = [ + ["debian/env", "etc/matrix-conduit/env", "644"], + ["README.md", "usr/share/doc/matrix-conduit/", "644"], + ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], +] +conf-files = [ + "/etc/matrix-conduit/env" +] +maintainer-scripts = "debian/" +systemd-units = { unit-name = "matrix-conduit" } diff --git a/debian/config b/debian/config new file mode 100644 index 0000000..a9ad498 --- /dev/null +++ b/debian/config @@ -0,0 +1,23 @@ +#!/bin/sh +set -e + +# Source debconf library. +. /usr/share/debconf/confmodule + +CONDUIT_CONFIG_PATH=/etc/matrix-conduit +CONDUIT_CONFIG_FILE="$CONDUIT_CONFIG_PATH/env" + +# Ask for the Matrix homeserver name and port. +db_input high matrix-conduit/hostname || true +db_go + +db_input medium matrix-conduit/port || true +db_go + +# Update the values in the config. +db_get matrix-conduit/hostname +sed -i -e "s/^ROCKET_SERVER_NAME=.*/ROCKET_SERVER_NAME=\"$RET\"/" $CONDUIT_CONFIG_FILE +db_get matrix-conduit/port +sed -i -e "s/^ROCKET_PORT=.*/ROCKET_PORT=\"$RET\"/" $CONDUIT_CONFIG_FILE + +exit 0 diff --git a/debian/env b/debian/env new file mode 100644 index 0000000..3f72c5b --- /dev/null +++ b/debian/env @@ -0,0 +1,48 @@ +# Conduit homeserver configuration +# +# Conduit is an application based on the Rocket web framework. +# Configuration of Conduit can happen either via a `Rocket.toml` file that +# is placed in /var/lib/matrix-conduit or via setting the environment +# variables below. + +# The server (host)name of the Matrix homeserver. +# +# This is the hostname the homeserver will be reachable at via a client. +ROCKET_SERVER_NAME="YOURSERVERNAME.HERE" + +# The address the Matrix homeserver listens on. +# +# By default the server listens on 0.0.0.0. Change this for example to +# 127.0.0.1 to only listen on the localhost when using a reverse proxy. +#ROCKET_ADDRESS="0.0.0.0" + +# The port of the Matrix homeserver. +# +# This port is often accessed by a reverse proxy. +ROCKET_PORT="14004" + +# The maximum size of a Matrix HTTP requests in bytes. +# +# This mostly affects the size of files that can be downloaded/uploaded. +ROCKET_MAX_REQUEST_SIZE=20000000 + +# Whether user registration is allowed. +# +# User registration is allowed by default. +#ROCKET_REGISTRATION_DISABLED=true + +# Whether encryption is enabled. +# +# (End-to-end) encryption is enabled by default. +#ROCKET_ENCRYPTION_DISABLED=true + +# Whether federation with other Matrix servers is enabled. +# +# Federation is disabled by default; it is still experimental. +#ROCKET_FEDERATION_ENABLED=true + +# The log level of the homeserver. +# +# The log level is "critical" by default. +# Allowed values are: "off", "normal", "debug", "critical" +#ROCKET_LOG="normal" diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service new file mode 100644 index 0000000..96c4856 --- /dev/null +++ b/debian/matrix-conduit.service @@ -0,0 +1,21 @@ +[Unit] +Description=Conduit Matrix homeserver +After=network.target + +[Service] +User=_matrix-conduit +Group=_matrix-conduit +Type=simple + +Environment="ROCKET_ENV=production" +Environment="ROCKET_DATABASE_PATH=/var/lib/matrix-conduit" +EnvironmentFile=/etc/matrix-conduit/env + +ExecStart=/usr/sbin/matrix-conduit +Restart=on-failure +RestartSec=10 +StartLimitInterval=1m +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/debian/postinst b/debian/postinst new file mode 100644 index 0000000..ee684da --- /dev/null +++ b/debian/postinst @@ -0,0 +1,26 @@ +#!/bin/sh +set -e + +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit + +case "$1" in + configure) + # Create the `_matrix-conduit` user if it does not exist yet. + if ! getent passwd _matrix-conduit > /dev/null ; then + echo 'Adding system user for the Conduit Matrix homeserver' 1>&2 + adduser --system --group --quiet \ + --home $CONDUIT_DATABASE_PATH \ + --disabled-login \ + --force-badname \ + _matrix-conduit + fi + + # Create the database path if it does not exist yet. + if [ ! -d "$CONDUIT_DATABASE_PATH" ]; then + mkdir -p "$CONDUIT_DATABASE_PATH" + chown _matrix-conduit "$CONDUIT_DATABASE_PATH" + fi + ;; +esac + +#DEBHELPER# diff --git a/debian/postrm b/debian/postrm new file mode 100644 index 0000000..04ca325 --- /dev/null +++ b/debian/postrm @@ -0,0 +1,22 @@ +#!/bin/sh +set -e + +CONDUIT_CONFIG_PATH=/etc/matrix-conduit +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit + +case $1 in + purge) + # Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior + # "configuration files must be preserved when the package is removed, and + # only deleted when the package is purged." + if [ -d "$CONDUIT_CONFIG_PATH" ]; then + rm -r "$CONDUIT_CONFIG_PATH" + fi + + if [ -d "$CONDUIT_DATABASE_PATH" ]; then + rm -r "$CONDUIT_DATABASE_PATH" + fi + ;; +esac + +#DEBHELPER# diff --git a/debian/templates b/debian/templates new file mode 100644 index 0000000..66bf55c --- /dev/null +++ b/debian/templates @@ -0,0 +1,14 @@ +Template: matrix-conduit/hostname +Type: string +Default: localhost +Description: The server (host)name of the Matrix homeserver. + This is the hostname the homeserver will be reachable at via a client. + . + If set to "localhost", you can connect with a client locally and clients + from other hosts and also other servers will not be able to reach you! + +Template: matrix-conduit/port +Type: string +Default: 14004 +Description: The port of the Matrix homeserver + This port is often accessed by a reverse proxy. From f72554de1014eea6b5c224548e9d0aeb15553cf7 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 13 Nov 2020 20:35:22 +0100 Subject: [PATCH 0337/1727] Split config into a Debian and local part * The Debian part will be generated and managed by Debconf and configure homeserver name, address and port * The local part will just be a config file that shows the other configuration options Added the address configuration and moved the config generation from the config to the postinst script. --- Cargo.toml | 4 +-- debian/config | 14 +++------- debian/env | 48 ----------------------------------- debian/env.local | 33 ++++++++++++++++++++++++ debian/matrix-conduit.service | 3 ++- debian/postinst | 47 ++++++++++++++++++++++++++++++++++ debian/templates | 13 +++++++--- 7 files changed, 98 insertions(+), 64 deletions(-) delete mode 100644 debian/env create mode 100644 debian/env.local diff --git a/Cargo.toml b/Cargo.toml index 1feb7ca..d0dfcf4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,12 +85,12 @@ instead of a server that has high scalability.""" section = "net" priority = "optional" assets = [ - ["debian/env", "etc/matrix-conduit/env", "644"], + ["debian/env.local", "etc/matrix-conduit/local", "644"], ["README.md", "usr/share/doc/matrix-conduit/", "644"], ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], ] conf-files = [ - "/etc/matrix-conduit/env" + "/etc/matrix-conduit/local" ] maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } diff --git a/debian/config b/debian/config index a9ad498..8710ef9 100644 --- a/debian/config +++ b/debian/config @@ -4,20 +4,14 @@ set -e # Source debconf library. . /usr/share/debconf/confmodule -CONDUIT_CONFIG_PATH=/etc/matrix-conduit -CONDUIT_CONFIG_FILE="$CONDUIT_CONFIG_PATH/env" - -# Ask for the Matrix homeserver name and port. +# Ask for the Matrix homeserver name, address and port. db_input high matrix-conduit/hostname || true db_go +db_input low matrix-conduit/address || true +db_go + db_input medium matrix-conduit/port || true db_go -# Update the values in the config. -db_get matrix-conduit/hostname -sed -i -e "s/^ROCKET_SERVER_NAME=.*/ROCKET_SERVER_NAME=\"$RET\"/" $CONDUIT_CONFIG_FILE -db_get matrix-conduit/port -sed -i -e "s/^ROCKET_PORT=.*/ROCKET_PORT=\"$RET\"/" $CONDUIT_CONFIG_FILE - exit 0 diff --git a/debian/env b/debian/env deleted file mode 100644 index 3f72c5b..0000000 --- a/debian/env +++ /dev/null @@ -1,48 +0,0 @@ -# Conduit homeserver configuration -# -# Conduit is an application based on the Rocket web framework. -# Configuration of Conduit can happen either via a `Rocket.toml` file that -# is placed in /var/lib/matrix-conduit or via setting the environment -# variables below. - -# The server (host)name of the Matrix homeserver. -# -# This is the hostname the homeserver will be reachable at via a client. -ROCKET_SERVER_NAME="YOURSERVERNAME.HERE" - -# The address the Matrix homeserver listens on. -# -# By default the server listens on 0.0.0.0. Change this for example to -# 127.0.0.1 to only listen on the localhost when using a reverse proxy. -#ROCKET_ADDRESS="0.0.0.0" - -# The port of the Matrix homeserver. -# -# This port is often accessed by a reverse proxy. -ROCKET_PORT="14004" - -# The maximum size of a Matrix HTTP requests in bytes. -# -# This mostly affects the size of files that can be downloaded/uploaded. -ROCKET_MAX_REQUEST_SIZE=20000000 - -# Whether user registration is allowed. -# -# User registration is allowed by default. -#ROCKET_REGISTRATION_DISABLED=true - -# Whether encryption is enabled. -# -# (End-to-end) encryption is enabled by default. -#ROCKET_ENCRYPTION_DISABLED=true - -# Whether federation with other Matrix servers is enabled. -# -# Federation is disabled by default; it is still experimental. -#ROCKET_FEDERATION_ENABLED=true - -# The log level of the homeserver. -# -# The log level is "critical" by default. -# Allowed values are: "off", "normal", "debug", "critical" -#ROCKET_LOG="normal" diff --git a/debian/env.local b/debian/env.local new file mode 100644 index 0000000..cd552de --- /dev/null +++ b/debian/env.local @@ -0,0 +1,33 @@ +# Conduit homeserver local configuration +# +# Conduit is an application based on the Rocket web framework. +# Configuration of Conduit happens via Debconf (see the resulting config in +# `/etc/matrix-conduit/debian`) and optionally by uncommenting and tweaking the +# variables in this file below. + +# The maximum size of a Matrix HTTP requests in bytes. +# +# This mostly affects the size of files that can be downloaded/uploaded. +# It defaults to 20971520 (20MB). +#ROCKET_MAX_REQUEST_SIZE=20971520 + +# Whether user registration is allowed. +# +# User registration is not disabled by default. +#ROCKET_REGISTRATION_DISABLED=false + +# Whether encryption is enabled. +# +# (End-to-end) encryption is not disabled by default. +#ROCKET_ENCRYPTION_DISABLED=false + +# Whether federation with other Matrix servers is enabled. +# +# Federation is not enabled by default; it is still experimental. +#ROCKET_FEDERATION_ENABLED=false + +# The log level of the homeserver. +# +# The log level is "critical" by default. +# Allowed values are: "off", "normal", "debug", "critical" +#ROCKET_LOG="critical" diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service index 96c4856..42969c1 100644 --- a/debian/matrix-conduit.service +++ b/debian/matrix-conduit.service @@ -9,7 +9,8 @@ Type=simple Environment="ROCKET_ENV=production" Environment="ROCKET_DATABASE_PATH=/var/lib/matrix-conduit" -EnvironmentFile=/etc/matrix-conduit/env +EnvironmentFile=/etc/matrix-conduit/debian +EnvironmentFile=/etc/matrix-conduit/local ExecStart=/usr/sbin/matrix-conduit Restart=on-failure diff --git a/debian/postinst b/debian/postinst index ee684da..bd7fb85 100644 --- a/debian/postinst +++ b/debian/postinst @@ -1,6 +1,10 @@ #!/bin/sh set -e +. /usr/share/debconf/confmodule + +CONDUIT_CONFIG_PATH=/etc/matrix-conduit +CONDUIT_CONFIG_FILE="$CONDUIT_CONFIG_PATH/debian" CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit case "$1" in @@ -20,6 +24,49 @@ case "$1" in mkdir -p "$CONDUIT_DATABASE_PATH" chown _matrix-conduit "$CONDUIT_DATABASE_PATH" fi + + # Write the debconf values in the config. + db_get matrix-conduit/hostname + ROCKET_SERVER_NAME="$RET" + db_get matrix-conduit/address + ROCKET_ADDRESS="$RET" + db_get matrix-conduit/port + ROCKET_PORT="$RET" + cat >"$CONDUIT_CONFIG_FILE" << EOF +# Conduit homeserver Debian configuration +# +# Conduit is an application based on the Rocket web framework. +# Configuration of Conduit happens via Debconf (of which the resulting config +# is in this file) and optionally by uncommenting and tweaking the variables in +# /etc/matrix-conduit/local. + +# THIS FILE IS GENERATED BY DEBCONF AND WILL BE OVERRIDDEN! +# +# Please make changes by running: +# +# \$ dpkg-reconfigure matrix-conduit +# +# or by providing overriding changes in /etc/matrix-conduit/local. + +# The server (host)name of the Matrix homeserver. +# +# This is the hostname the homeserver will be reachable at via a client. +ROCKET_SERVER_NAME="$ROCKET_SERVER_NAME" + +# The address the Matrix homeserver listens on. +# +# By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to +# only listen on the localhost when using a reverse proxy. +ROCKET_ADDRESS="$ROCKET_ADDRESS" + +# The port of the Matrix homeserver. +# +# This port is could be any available port if accessed by a reverse proxy. +# By default the server listens on port 8000. +ROCKET_PORT="$ROCKET_PORT" + +# THIS FILE IS GENERATED BY DEBCONF AND WILL BE OVERRIDDEN! +EOF ;; esac diff --git a/debian/templates b/debian/templates index 66bf55c..a408f84 100644 --- a/debian/templates +++ b/debian/templates @@ -1,14 +1,21 @@ Template: matrix-conduit/hostname Type: string Default: localhost -Description: The server (host)name of the Matrix homeserver. +Description: The server (host)name of the Matrix homeserver This is the hostname the homeserver will be reachable at via a client. . If set to "localhost", you can connect with a client locally and clients - from other hosts and also other servers will not be able to reach you! + from other hosts and also other homeservers will not be able to reach you! + +Template: matrix-conduit/address +Type: string +Default: 127.0.0.1 +Description: The listen address of the Matrix homeserver + This is the address the homeserver will listen on. Leave it set to 127.0.0.1 + when using a reverse proxy. Template: matrix-conduit/port Type: string Default: 14004 Description: The port of the Matrix homeserver - This port is often accessed by a reverse proxy. + This port is most often just accessed by a reverse proxy. From 1b4a79d47c7f91b6d3562520637d28f6f00ec6c9 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 13 Nov 2020 20:50:58 +0100 Subject: [PATCH 0338/1727] Add and install README.Debian This file documents how the packaging is organized and how to configure and use it. It also details what the default deployment is like. --- Cargo.toml | 1 + debian/README.Debian | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 debian/README.Debian diff --git a/Cargo.toml b/Cargo.toml index d0dfcf4..76c52e5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,6 +86,7 @@ section = "net" priority = "optional" assets = [ ["debian/env.local", "etc/matrix-conduit/local", "644"], + ["debian/README.Debian", "usr/share/doc/matrix-conduit/", "644"], ["README.md", "usr/share/doc/matrix-conduit/", "644"], ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], ] diff --git a/debian/README.Debian b/debian/README.Debian new file mode 100644 index 0000000..69fb975 --- /dev/null +++ b/debian/README.Debian @@ -0,0 +1,29 @@ +Conduit for Debian +================== + +Configuration +------------- + +When installed, Debconf handles the configuration of the homeserver (host)name, +the address and port it listens on. These configuration variables end up in +/etc/matrix-conduit/debian. + +You can tweak more detailed settings by uncommenting and setting the variables +in /etc/matrix-conduit/local. This involves settings such as the maximum file +size for download/upload, enabling federation, etc. + +Running +------- + +The package uses the matrix-conduit.service systemd unit file to start and +stop Conduit. It loads the configuration files mentioned above to set up the +environment before running the server. + +This package assumes by default that Conduit is placed behind a reverse proxy +such as Apache or nginx. This default deployment entails just listening on +127.0.0.1 and the free port 14004 and is reachable via a client using the URL +http://localhost:14004. + +At a later stage this packaging may support also setting up TLS and running +stand-alone. In this case, however, you need to set up some certificates and +renewal, for it to work properly. From 1a341543ba155e3b4416368344cbb705690aceca Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 13 Nov 2020 21:37:22 +0100 Subject: [PATCH 0339/1727] Lock down the Conduit process in the systemd unit This will secure the service more and allow only what is necessary. --- debian/matrix-conduit.service | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service index 42969c1..5ab7917 100644 --- a/debian/matrix-conduit.service +++ b/debian/matrix-conduit.service @@ -7,6 +7,33 @@ User=_matrix-conduit Group=_matrix-conduit Type=simple +AmbientCapabilities= +CapabilityBoundingSet= +LockPersonality=yes +MemoryDenyWriteExecute=yes +NoNewPrivileges=yes +ProtectClock=yes +ProtectControlGroups=yes +ProtectHome=yes +ProtectHostname=yes +ProtectKernelLogs=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +ProtectSystem=strict +PrivateDevices=yes +PrivateMounts=yes +PrivateTmp=yes +PrivateUsers=yes +RemoveIPC=yes +RestrictAddressFamilies=AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +SystemCallArchitectures=native +SystemCallFilter=@system-service +SystemCallErrorNumber=EPERM +StateDirectory=matrix-conduit + Environment="ROCKET_ENV=production" Environment="ROCKET_DATABASE_PATH=/var/lib/matrix-conduit" EnvironmentFile=/etc/matrix-conduit/debian From e691e880e071b59d6baa13908dd0d75654572add Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 2 Dec 2020 15:49:50 +0100 Subject: [PATCH 0340/1727] fix: logging thread crash when admin room does not exist --- src/database/admin.rs | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index f8b2385..87a60a1 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,6 +1,7 @@ use std::convert::{TryFrom, TryInto}; -use crate::{pdu::PduBuilder, Error}; +use crate::pdu::PduBuilder; +use log::warn; use rocket::futures::{channel::mpsc, stream::StreamExt}; use ruma::{events::room::message, events::EventType, UserId}; use tokio::select; @@ -34,10 +35,12 @@ impl Admin { .try_into() .expect("#admins:server_name is a valid room alias"), ) - .unwrap() - .ok_or_else(|| Error::BadConfig("Conduit instance does not have an #admins room.")) .unwrap(); + if conduit_room.is_none() { + warn!("Conduit instance does not have an #admins room. Logging to that room will not work."); + } + loop { select! { Some(event) = receiver.next() => { @@ -45,21 +48,23 @@ impl Admin { AdminCommand::SendTextMessage(message) => { println!("{:?}", message); - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMessage, - content: serde_json::to_value(message).expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - ).unwrap(); + if let Some(conduit_room) = &conduit_room { + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMessage, + content: serde_json::to_value(message).expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + &db.globals, + &db.sending, + &db.admin, + &db.account_data, + ).unwrap(); + } } } } From 9439f2c18388dcca213623f4f7cc9f6df887f400 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Nov 2020 23:13:06 +0100 Subject: [PATCH 0341/1727] feat: send logs into admin room Log entries will automatically be deduplicated, so a message won't be sent if the same line has already been sent in the last 30 mins --- src/database.rs | 8 +++--- src/database/globals.rs | 4 +-- src/error.rs | 61 ++++++++++++++++++++++++++++++++++++++++- src/main.rs | 17 ++++++++---- src/ruma_wrapper.rs | 2 +- src/server_server.rs | 18 ++++++------ 6 files changed, 87 insertions(+), 23 deletions(-) diff --git a/src/database.rs b/src/database.rs index 51c3895..3f860c9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -39,7 +39,7 @@ impl Database { /// Tries to remove the old database but ignores all errors. pub fn try_remove(server_name: &str) -> Result<()> { let mut path = ProjectDirs::from("xyz", "koesters", "conduit") - .ok_or(Error::BadConfig( + .ok_or_else(|| Error::bad_config( "The OS didn't return a valid home directory path.", ))? .data_dir() @@ -59,7 +59,7 @@ impl Database { .map(|x| Ok::<_, Error>(x.to_owned())) .unwrap_or_else(|_| { let path = ProjectDirs::from("xyz", "koesters", "conduit") - .ok_or(Error::BadConfig( + .ok_or_else(|| Error::bad_config( "The OS didn't return a valid home directory path.", ))? .data_dir() @@ -67,7 +67,7 @@ impl Database { Ok(path .to_str() - .ok_or(Error::BadConfig("Database path contains invalid unicode."))? + .ok_or_else(|| Error::bad_config("Database path contains invalid unicode."))? .to_owned()) })?; @@ -79,7 +79,7 @@ impl Database { .get_int("cache_capacity") .unwrap_or(1024 * 1024 * 1024), ) - .map_err(|_| Error::BadConfig("Cache capacity needs to be a u64."))?, + .map_err(|_| Error::bad_config("Cache capacity needs to be a u64."))?, ) .print_profile_on_drop(false) .open()?; diff --git a/src/database/globals.rs b/src/database/globals.rs index 37f10ee..359d064 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -62,12 +62,12 @@ impl Globals { .unwrap_or("localhost") .to_string() .try_into() - .map_err(|_| Error::BadConfig("Invalid server_name."))?, + .map_err(|_| Error::bad_config("Invalid server_name."))?, max_request_size: config .get_int("max_request_size") .unwrap_or(20 * 1024 * 1024) // Default to 20 MB .try_into() - .map_err(|_| Error::BadConfig("Invalid max_request_size."))?, + .map_err(|_| Error::bad_config("Invalid max_request_size."))?, registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), encryption_disabled: config.get_bool("encryption_disabled").unwrap_or(false), federation_enabled: config.get_bool("federation_enabled").unwrap_or(false), diff --git a/src/error.rs b/src/error.rs index f521da4..d54b3fa 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,7 +1,14 @@ +use std::{time::Duration, collections::HashMap, sync::RwLock, time::Instant}; + use log::error; -use ruma::api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}; +use ruma::{ + api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}, + events::room::message, +}; use thiserror::Error; +use crate::{database::admin::AdminCommand, Database}; + #[cfg(feature = "conduit_bin")] use { crate::RumaResponse, @@ -53,6 +60,11 @@ impl Error { error!("BadDatabase: {}", message); Self::BadDatabase(message) } + + pub fn bad_config(message: &'static str) -> Self { + error!("BadConfig: {}", message); + Self::BadConfig(message) + } } #[cfg(feature = "conduit_bin")] @@ -95,3 +107,50 @@ where .respond_to(r) } } + +pub struct ConduitLogger { + pub db: Database, + pub last_logs: RwLock>, +} + +impl log::Log for ConduitLogger { + fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { + true + } + + fn log(&self, record: &log::Record<'_>) { + let output = format!("{} - {}", record.level(), record.args()); + + println!("{}", output); + + if self.enabled(record.metadata()) + && record + .module_path() + .map_or(false, |path| path.starts_with("conduit::")) + { + if self + .last_logs + .read() + .unwrap() + .get(&output) + .map_or(false, |i| i.elapsed() < Duration::from_secs(60 * 30)) + { + return; + } + + if let Ok(mut_last_logs) = &mut self.last_logs.try_write() { + mut_last_logs.insert(output.clone(), Instant::now()); + } + + self.db.admin.send(AdminCommand::SendTextMessage( + message::TextMessageEventContent { + body: output, + formatted: None, + relates_to: None, + }, + )); + } + } + + fn flush(&self) {} +} diff --git a/src/main.rs b/src/main.rs index 8fb5fda..f2edc13 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,7 +11,8 @@ mod ruma_wrapper; mod utils; pub use database::Database; -pub use error::{Error, Result}; +pub use error::{ConduitLogger, Error, Result}; +use log::LevelFilter; pub use pdu::PduEvent; pub use rocket::State; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; @@ -19,6 +20,9 @@ pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; use rocket::{fairing::AdHoc, routes}; fn setup_rocket() -> rocket::Rocket { + // Force log level off, so we can use our own logger + std::env::set_var("ROCKET_LOG", "off"); + rocket::ignite() .mount( "/", @@ -133,6 +137,12 @@ fn setup_rocket() -> rocket::Rocket { let data = Database::load_or_create(rocket.config().await).expect("valid config"); data.sending.start_handler(&data.globals, &data.rooms); + log::set_boxed_logger(Box::new(ConduitLogger { + db: data.clone(), + last_logs: Default::default(), + })) + .unwrap(); + log::set_max_level(LevelFilter::Info); Ok(rocket.manage(data)) })) @@ -140,10 +150,5 @@ fn setup_rocket() -> rocket::Rocket { #[rocket::main] async fn main() { - // Default log level - if std::env::var("ROCKET_LOG").is_err() { - std::env::set_var("ROCKET_LOG", "critical"); - } - setup_rocket().launch().await.unwrap(); } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 7722a42..8da3e17 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -97,7 +97,7 @@ where handle.read_to_end(&mut body).await.unwrap(); let http_request = http_request.body(body.clone()).unwrap(); - log::info!("{:?}", http_request); + log::debug!("{:?}", http_request); match ::Incoming::try_from(http_request) { Ok(t) => Success(Ruma { diff --git a/src/server_server.rs b/src/server_server.rs index 0f24e15..0649bed 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -58,12 +58,12 @@ where T: Debug, { if !globals.federation_enabled() { - return Err(Error::BadConfig("Federation is disabled.")); + return Err(Error::bad_config("Federation is disabled.")); } let resolver = AsyncResolver::tokio_from_system_conf() .await - .map_err(|_| Error::BadConfig("Failed to set up trust dns resolver with system config."))?; + .map_err(|_| Error::bad_config("Failed to set up trust dns resolver with system config."))?; let mut host = None; @@ -213,7 +213,7 @@ where #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] pub fn get_server_version(db: State<'_, Database>) -> ConduitResult { if !db.globals.federation_enabled() { - return Err(Error::BadConfig("Federation is disabled.")); + return Err(Error::bad_config("Federation is disabled.")); } Ok(get_server_version::Response { @@ -276,7 +276,7 @@ pub async fn get_public_rooms_filtered_route( body: Ruma>, ) -> ConduitResult { if !db.globals.federation_enabled() { - return Err(Error::BadConfig("Federation is disabled.")); + return Err(Error::bad_config("Federation is disabled.")); } let response = client_server::get_public_rooms_filtered_helper( @@ -323,7 +323,7 @@ pub async fn get_public_rooms_route( body: Ruma>, ) -> ConduitResult { if !db.globals.federation_enabled() { - return Err(Error::BadConfig("Federation is disabled.")); + return Err(Error::bad_config("Federation is disabled.")); } let response = client_server::get_public_rooms_filtered_helper( @@ -370,7 +370,7 @@ pub fn send_transaction_message_route<'a>( body: Ruma>, ) -> ConduitResult { if !db.globals.federation_enabled() { - return Err(Error::BadConfig("Federation is disabled.")); + return Err(Error::bad_config("Federation is disabled.")); } //dbg!(&*body); @@ -423,7 +423,7 @@ pub fn get_missing_events_route<'a>( body: Ruma>, ) -> ConduitResult { if !db.globals.federation_enabled() { - return Err(Error::BadConfig("Federation is disabled.")); + return Err(Error::bad_config("Federation is disabled.")); } let mut queued_events = body.latest_events.clone(); @@ -468,7 +468,7 @@ pub fn get_profile_information_route<'a>( body: Ruma>, ) -> ConduitResult { if !db.globals.federation_enabled() { - return Err(Error::BadConfig("Federation is disabled.")); + return Err(Error::bad_config("Federation is disabled.")); } let mut displayname = None; @@ -500,7 +500,7 @@ pub fn get_user_devices_route<'a>( body: Ruma>, ) -> ConduitResult { if !db.globals.federation_enabled() { - return Err(Error::BadConfig("Federation is disabled.")); + return Err(Error::bad_config("Federation is disabled.")); } let mut displayname = None; From 6e3608157301c726d137574f0407b1aeaaa07da3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 15 Nov 2020 12:17:21 +0100 Subject: [PATCH 0342/1727] improvement: more logging --- src/client_server/account.rs | 5 +++++ src/client_server/directory.rs | 8 +++++++- src/client_server/room.rs | 3 +++ src/client_server/session.rs | 3 +++ src/database.rs | 10 ++++------ src/error.rs | 2 +- src/server_server.rs | 6 +++--- 7 files changed, 26 insertions(+), 11 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 81119ba..f48543e 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -2,6 +2,7 @@ use std::{collections::BTreeMap, convert::TryInto}; use super::{State, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; +use log::info; use ruma::{ api::client::{ error::ErrorKind, @@ -510,6 +511,8 @@ pub async fn register_route( )?; } + info!("{} registered on this server", user_id); + db.flush().await?; Ok(register::Response { @@ -686,6 +689,8 @@ pub async fn deactivate_route( // Remove devices and mark account as deactivated db.users.deactivate_account(&sender_user)?; + info!("{} deactivated their account", sender_user); + db.flush().await?; Ok(deactivate::Response { diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 202417e..048b410 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,5 +1,6 @@ use super::State; use crate::{server_server, ConduitResult, Database, Error, Result, Ruma}; +use log::info; use ruma::{ api::{ client::{ @@ -82,8 +83,13 @@ pub async fn set_room_visibility_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + match body.visibility { - room::Visibility::Public => db.rooms.set_public(&body.room_id, true)?, + room::Visibility::Public => { + db.rooms.set_public(&body.room_id, true)?; + info!("{} made {} public", sender_user, body.room_id); + } room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index fdc9529..a50f69c 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -1,5 +1,6 @@ use super::State; use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; +use log::info; use ruma::{ api::client::{ error::ErrorKind, @@ -323,6 +324,8 @@ pub async fn create_room_route( db.rooms.set_public(&room_id, true)?; } + info!("{} created a room", sender_user); + db.flush().await?; Ok(create_room::Response::new(room_id).into()) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c8775ef..da3d8d8 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -1,5 +1,6 @@ use super::{State, DEVICE_ID_LENGTH, TOKEN_LENGTH}; use crate::{utils, ConduitResult, Database, Error, Ruma}; +use log::info; use ruma::{ api::client::{ error::ErrorKind, @@ -93,6 +94,8 @@ pub async fn login_route( body.initial_device_display_name.clone(), )?; + info!("{} logged in", user_id); + db.flush().await?; Ok(login::Response { diff --git a/src/database.rs b/src/database.rs index 3f860c9..576a250 100644 --- a/src/database.rs +++ b/src/database.rs @@ -39,9 +39,7 @@ impl Database { /// Tries to remove the old database but ignores all errors. pub fn try_remove(server_name: &str) -> Result<()> { let mut path = ProjectDirs::from("xyz", "koesters", "conduit") - .ok_or_else(|| Error::bad_config( - "The OS didn't return a valid home directory path.", - ))? + .ok_or_else(|| Error::bad_config("The OS didn't return a valid home directory path."))? .data_dir() .to_path_buf(); path.push(server_name); @@ -59,9 +57,9 @@ impl Database { .map(|x| Ok::<_, Error>(x.to_owned())) .unwrap_or_else(|_| { let path = ProjectDirs::from("xyz", "koesters", "conduit") - .ok_or_else(|| Error::bad_config( - "The OS didn't return a valid home directory path.", - ))? + .ok_or_else(|| { + Error::bad_config("The OS didn't return a valid home directory path.") + })? .data_dir() .join(server_name); diff --git a/src/error.rs b/src/error.rs index d54b3fa..4c24fd7 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,4 @@ -use std::{time::Duration, collections::HashMap, sync::RwLock, time::Instant}; +use std::{collections::HashMap, sync::RwLock, time::Duration, time::Instant}; use log::error; use ruma::{ diff --git a/src/server_server.rs b/src/server_server.rs index 0649bed..d8d0fa5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -61,9 +61,9 @@ where return Err(Error::bad_config("Federation is disabled.")); } - let resolver = AsyncResolver::tokio_from_system_conf() - .await - .map_err(|_| Error::bad_config("Failed to set up trust dns resolver with system config."))?; + let resolver = AsyncResolver::tokio_from_system_conf().await.map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?; let mut host = None; From 96dd3b2880fedd218d7faa052e2f38febb90c690 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 27 Oct 2020 19:10:09 -0400 Subject: [PATCH 0343/1727] Update ruma to latest, fix unstable origin feature in ruma --- Cargo.lock | 487 +++++++++++++++++++++++------- Cargo.toml | 17 +- src/client_server/backup.rs | 23 +- src/client_server/capabilities.rs | 8 +- src/client_server/filter.rs | 8 +- src/client_server/keys.rs | 8 +- src/client_server/media.rs | 24 +- src/client_server/membership.rs | 58 ++-- src/client_server/state.rs | 54 ++-- src/database/rooms.rs | 48 +-- src/database/users.rs | 6 +- src/ruma_wrapper.rs | 47 +-- src/server_server.rs | 23 +- 13 files changed, 577 insertions(+), 234 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6fed2af..461972b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -44,9 +44,9 @@ checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assign" @@ -221,7 +221,7 @@ dependencies = [ "reqwest", "ring", "rocket", - "ruma", + "ruma 0.0.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", "rust-argon2", "serde", "serde_json", @@ -273,11 +273,11 @@ checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" [[package]] name = "crc32fast" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", ] [[package]] @@ -464,9 +464,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8e3078b7b2a8a671cb7a3d17b4760e4181ea243227776ba83fd043b4ca034e" +checksum = "95314d38584ffbfda215621d723e0a3906f032e03ae5551e650058dac83d4797" dependencies = [ "futures-channel", "futures-core", @@ -479,9 +479,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a4d35f7401e948629c9c3d6638fb9bf94e0b2121e96c3b428cc4e631f3eb74" +checksum = "0448174b01148032eed37ac4aed28963aaaa8cfa93569a08e5b479bbc6c2c151" dependencies = [ "futures-core", "futures-sink", @@ -489,15 +489,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d674eaa0056896d5ada519900dbf97ead2e46a7b6621e8160d79e2f2e1e2784b" +checksum = "18eaa56102984bed2c88ea39026cff3ce3b4c7f508ca970cedf2450ea10d4e46" [[package]] name = "futures-executor" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc709ca1da6f66143b8c9bec8e6260181869893714e9b5a490b169b0414144ab" +checksum = "f5f8e0c9258abaea85e78ebdda17ef9666d390e987f006be6080dfe354b708cb" dependencies = [ "futures-core", "futures-task", @@ -506,15 +506,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc94b64bb39543b4e432f1790b6bf18e3ee3b74653c5449f63310e9a74b123c" +checksum = "6e1798854a4727ff944a7b12aa999f58ce7aa81db80d2dfaaf2ba06f065ddd2b" [[package]] name = "futures-macro" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f57ed14da4603b2554682e9f2ff3c65d7567b53188db96cb71538217fc64581b" +checksum = "e36fccf3fc58563b4a14d265027c627c3b665d7fed489427e88e7cc929559efe" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -524,24 +524,24 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d8764258ed64ebc5d9ed185cf86a95db5cac810269c5d20ececb32e0088abbd" +checksum = "0e3ca3f17d6e8804ae5d3df7a7d35b2b3a6fe89dac84b31872720fc3060a0b11" [[package]] name = "futures-task" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd26820a9f3637f1302da8bceba3ff33adbe53464b54ca24d4e2d4f1db30f94" +checksum = "96d502af37186c4fef99453df03e374683f8a1eec9dcc1e66b3b82dc8278ce3c" dependencies = [ "once_cell", ] [[package]] name = "futures-util" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a894a0acddba51a2d49a6f4263b1e64b8c579ece8af50fa86503d52cd1eea34" +checksum = "abcb44342f62e6f3e8ac427b8aa815f724fd705dfad060b18ac7866c15bb8e34" dependencies = [ "futures-channel", "futures-core", @@ -550,7 +550,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project", + "pin-project 1.0.1", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -566,6 +566,19 @@ dependencies = [ "byteorder", ] +[[package]] +name = "generator" +version = "0.6.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc" +dependencies = [ + "cc", + "libc", + "log", + "rustc_version", + "winapi 0.3.9", +] + [[package]] name = "getrandom" version = "0.1.15" @@ -601,9 +614,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" +checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ "bytes", "fnv", @@ -616,6 +629,7 @@ dependencies = [ "tokio", "tokio-util", "tracing", + "tracing-futures", ] [[package]] @@ -702,7 +716,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project", + "pin-project 0.4.27", "socket2", "tokio", "tower-service", @@ -736,12 +750,13 @@ dependencies = [ [[package]] name = "image" -version = "0.23.10" +version = "0.23.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985fc06b1304d19c28d5c562ed78ef5316183f2b0053b46763a0b94862373c34" +checksum = "b4f0a8345b33b082aedec2f4d7d4a926b845cee184cbe78b703413066564431b" dependencies = [ "bytemuck", "byteorder", + "color_quant", "gif", "jpeg-decoder", "num-iter", @@ -768,11 +783,11 @@ checksum = "cb6ee2a7da03bfc3b66ca47c92c2e392fcc053ea040a85561749b026f7aad09a" [[package]] name = "instant" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63312a18f7ea8760cdd0a7c5aac1a619752a246b833545e3e36d1f81f7cd9e66" +checksum = "cb1fc4429a33e1f80d41dc9fea4d108a88bec1de8053878898ae448a0b52f613" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", ] [[package]] @@ -862,9 +877,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" +checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" [[package]] name = "linked-hash-map" @@ -890,6 +905,19 @@ dependencies = [ "cfg-if 0.1.10", ] +[[package]] +name = "loom" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" +dependencies = [ + "cfg-if 0.1.10", + "generator", + "scoped-tls", + "serde", + "serde_json", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -1218,7 +1246,16 @@ version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" dependencies = [ - "pin-project-internal", + "pin-project-internal 0.4.27", +] + +[[package]] +name = "pin-project" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee41d838744f60d959d7074e3afb6b35c7456d0f61cad38a24e35e6553f73841" +dependencies = [ + "pin-project-internal 1.0.1", ] [[package]] @@ -1233,10 +1270,21 @@ dependencies = [ ] [[package]] -name = "pin-project-lite" -version = "0.1.10" +name = "pin-project-internal" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e555d9e657502182ac97b539fb3dae8b79cda19e3e4f8ffb5e8de4f18df93c95" +checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-utils" @@ -1559,38 +1607,88 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ - "ruma-api", - "ruma-appservice-api", - "ruma-client-api", - "ruma-common", - "ruma-events", - "ruma-federation-api", - "ruma-identifiers", - "ruma-serde", - "ruma-signatures", + "assign", + "js_int", + "ruma-api 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-appservice-api 0.2.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-client-api 0.10.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-events 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-federation-api 0.0.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-signatures 0.6.0-dev.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", +] + +[[package]] +name = "ruma" +version = "0.0.1" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" +dependencies = [ + "assign", + "js_int", + "ruma-api 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-appservice-api 0.2.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-client-api 0.10.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-events 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-federation-api 0.0.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-signatures 0.6.0-dev.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", ] [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ "http", "percent-encoding", - "ruma-api-macros", - "ruma-identifiers", - "ruma-serde", + "ruma-api-macros 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", "serde", "serde_json", "strum", + "thiserror", +] + +[[package]] +name = "ruma-api" +version = "0.17.0-alpha.1" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" +dependencies = [ + "http", + "percent-encoding", + "ruma-api-macros 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "serde", + "serde_json", + "strum", + "thiserror", ] [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ruma-api-macros" +version = "0.17.0-alpha.1" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1601,12 +1699,25 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ - "ruma-api", - "ruma-common", - "ruma-events", - "ruma-identifiers", + "ruma-api 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-events 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-appservice-api" +version = "0.2.0-alpha.1" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" +dependencies = [ + "ruma-api 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-events 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", "serde", "serde_json", ] @@ -1614,17 +1725,38 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ "assign", "http", "js_int", + "maplit", "percent-encoding", - "ruma-api", - "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", + "ruma-api 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-events 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "serde", + "serde_json", + "strum", +] + +[[package]] +name = "ruma-client-api" +version = "0.10.0-alpha.1" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" +dependencies = [ + "assign", + "http", + "js_int", + "maplit", + "percent-encoding", + "ruma-api 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-events 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", "serde", "serde_json", "strum", @@ -1633,12 +1765,63 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ "js_int", - "ruma-api", - "ruma-identifiers", - "ruma-serde", + "ruma-common-macros 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "serde", + "serde_json", + "strum", +] + +[[package]] +name = "ruma-common" +version = "0.2.0" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" +dependencies = [ + "js_int", + "ruma-common-macros 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "serde", + "serde_json", + "strum", +] + +[[package]] +name = "ruma-common-macros" +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ruma-common-macros" +version = "0.2.0" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ruma-events" +version = "0.22.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +dependencies = [ + "js_int", + "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-events-macros 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", "serde", "serde_json", "strum", @@ -1647,13 +1830,13 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" dependencies = [ "js_int", - "ruma-common", - "ruma-events-macros", - "ruma-identifiers", - "ruma-serde", + "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-events-macros 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", "serde", "serde_json", "strum", @@ -1662,7 +1845,18 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ruma-events-macros" +version = "0.22.0-alpha.1" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1673,14 +1867,29 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ "js_int", - "ruma-api", - "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", + "ruma-api 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-events 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-federation-api" +version = "0.0.3" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" +dependencies = [ + "js_int", + "ruma-api 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-events 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", "serde", "serde_json", ] @@ -1688,11 +1897,24 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +dependencies = [ + "ruma-identifiers-macros 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-identifiers-validation 0.1.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "serde", + "strum", +] + +[[package]] +name = "ruma-identifiers" +version = "0.17.4" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" dependencies = [ "rand", - "ruma-identifiers-macros", - "ruma-identifiers-validation", + "ruma-identifiers-macros 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers-validation 0.1.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", "serde", "strum", ] @@ -1700,18 +1922,38 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ "proc-macro2", "quote", - "ruma-identifiers-validation", + "ruma-identifiers-validation 0.1.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "syn", +] + +[[package]] +name = "ruma-identifiers-macros" +version = "0.17.4" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" +dependencies = [ + "proc-macro2", + "quote", + "ruma-identifiers-validation 0.1.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", "syn", ] [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +dependencies = [ + "serde", + "strum", +] + +[[package]] +name = "ruma-identifiers-validation" +version = "0.1.1" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" dependencies = [ "serde", "strum", @@ -1720,7 +1962,19 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +dependencies = [ + "form_urlencoded", + "itoa", + "js_int", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-serde" +version = "0.2.3" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" dependencies = [ "form_urlencoded", "itoa", @@ -1732,10 +1986,25 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#47fab87325b71b7f6c2fb3cd276d1f813e42abf7" +source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ "base64", "ring", + "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "serde_json", + "untrusted", +] + +[[package]] +name = "ruma-signatures" +version = "0.6.0-dev.1" +source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" +dependencies = [ + "base64", + "ring", + "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", "serde_json", "untrusted", ] @@ -1754,9 +2023,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2610b7f643d18c87dff3b489950269617e6601a51f1f05aa5daefee36f64f0b" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" [[package]] name = "rustc_version" @@ -1796,6 +2065,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" + [[package]] name = "scopeguard" version = "1.1.0" @@ -1901,11 +2176,12 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "sharded-slab" -version = "0.0.9" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e" +checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127" dependencies = [ "lazy_static", + "loom", ] [[package]] @@ -1982,12 +2258,11 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=spec-comp#a7d76935f12757aecfee305838069c9bcbe7d34a" dependencies = [ "itertools", "js_int", "maplit", - "ruma", + "ruma 0.0.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", "serde", "serde_json", "thiserror", @@ -2067,9 +2342,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.44" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e03e57e4fcbfe7749842d53e24ccb9aa12b7252dbe5e91d2acad31834c8b8fdd" +checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac" dependencies = [ "proc-macro2", "quote", @@ -2292,6 +2567,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "tracing-futures" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +dependencies = [ + "pin-project 0.4.27", + "tracing", +] + [[package]] name = "tracing-log" version = "0.1.1" @@ -2315,9 +2600,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef0a5e15477aa303afbfac3a44cba9b6430fdaad52423b1e6c0dbbe28c3eedd" +checksum = "2810660b9d5b18895d140caba6401765749a6a162e5d0736cfc44ea50db9d79d" dependencies = [ "ansi_term", "chrono", @@ -2561,9 +2846,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0e26e7a4d998e3d7949c69444b8b4916bac810da0d3a82ae612c89e952782f4" +checksum = "8795d6e0e17485803cc10ef126bb8c0d59b7c61b219d66cfe0b3216dd0e8580a" [[package]] name = "widestring" diff --git a/Cargo.toml b/Cargo.toml index 8b29be8..a6f89b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,12 +19,13 @@ rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_p # Used for matrix spec type definitions and helpers #ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "aff914050eb297bd82b8aafb12158c88a9e480e1" } -ruma = { git = "https://github.com/timokoesters/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "timo-fed-fixes" } -#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } +# ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", branch = "spec-comp", features = ["unstable-pre-spec"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec"] } +# state-res = { git = "https://github.com/timokoesters/state-res", branch = "spec-comp", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } +# state-res = { path = "../../state-res", features = ["unstable-pre-spec"] } # Used for long polling tokio = "0.2.22" @@ -72,3 +73,11 @@ required-features = ["conduit_bin"] [lib] name = "conduit" path = "src/lib.rs" + +# [patch."https://github.com/timokoesters/ruma"] +# ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"] } +# # ruma = { git = "https://github.com/ruma/ruma", rev = "64b9c646d15a359d62ab464a95176ff94adb2554", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"]} + +[patch."https://github.com/ruma/state-res"] +state-res = { path = "../../state-res", features = ["unstable-pre-spec"] } +# state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp" } \ No newline at end of file diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index c84af0a..607fa39 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -107,7 +107,7 @@ pub async fn get_backup_route( )] pub async fn delete_backup_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -158,7 +158,7 @@ pub async fn add_backup_keys_route( )] pub async fn add_backup_key_sessions_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -189,7 +189,7 @@ pub async fn add_backup_key_sessions_route( )] pub async fn add_backup_key_session_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -232,7 +232,7 @@ pub async fn get_backup_keys_route( )] pub async fn get_backup_key_sessions_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -249,13 +249,14 @@ pub async fn get_backup_key_sessions_route( )] pub async fn get_backup_key_session_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let key_data = - db.key_backups - .get_session(&sender_user, &body.version, &body.room_id, &body.session_id)?; + let key_data = db + .key_backups + .get_session(&sender_user, &body.version, &body.room_id, &body.session_id)? + .ok_or_else(|| Error::BadDatabase("Backup key not found for this user's session"))?; Ok(get_backup_key_session::Response { key_data }.into()) } @@ -266,7 +267,7 @@ pub async fn get_backup_key_session_route( )] pub async fn delete_backup_keys_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -288,7 +289,7 @@ pub async fn delete_backup_keys_route( )] pub async fn delete_backup_key_sessions_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -310,7 +311,7 @@ pub async fn delete_backup_key_sessions_route( )] pub async fn delete_backup_key_session_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 54c08ba..fa12a08 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -22,11 +22,11 @@ pub async fn get_capabilities_route() -> ConduitResult ConduitResult { // TODO Ok(get_filter::Response::new(filter::IncomingFilterDefinition { event_fields: None, - event_format: None, - account_data: None, - room: None, - presence: None, + event_format: filter::EventFormat::default(), + account_data: filter::IncomingFilter::default(), + room: filter::IncomingRoomFilter::default(), + presence: filter::IncomingFilter::default(), }) .into()) } diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 58c79da..8426518 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -11,7 +11,7 @@ use ruma::{ uiaa::{AuthFlow, UiaaInfo}, }, }, - encryption::IncomingUnsignedDeviceInfo, + encryption::UnsignedDeviceInfo, }; use std::collections::{BTreeMap, HashSet}; @@ -24,7 +24,7 @@ use rocket::{get, post}; )] pub async fn upload_keys_route( db: State<'_, Database>, - body: Ruma>, + body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -94,7 +94,7 @@ pub async fn get_keys_route( Error::bad_database("all_device_keys contained nonexistent device.") })?; - keys.unsigned = IncomingUnsignedDeviceInfo { + keys.unsigned = UnsignedDeviceInfo { device_display_name: metadata.display_name, }; @@ -113,7 +113,7 @@ pub async fn get_keys_route( ), )?; - keys.unsigned = IncomingUnsignedDeviceInfo { + keys.unsigned = UnsignedDeviceInfo { device_display_name: metadata.display_name, }; diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 96874cc..af7880c 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -39,7 +39,7 @@ pub async fn create_content_route( db.media.create( mxc.clone(), &body.filename.as_deref(), - &body.content_type, + body.content_type.as_deref().unwrap_or("img"), // TODO this is now optional handle &body.file, )?; @@ -66,8 +66,8 @@ pub async fn get_content_route( { Ok(get_content::Response { file, - content_type, - content_disposition: filename.unwrap_or_default(), // TODO: Spec says this should be optional + content_type: Some(content_type), + content_disposition: filename, } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { @@ -84,8 +84,11 @@ pub async fn get_content_route( db.media.create( mxc, - &Some(&get_content_response.content_disposition), - &get_content_response.content_type, + &get_content_response.content_disposition.as_deref(), + get_content_response // TODO this is now optional handle + .content_type + .as_deref() + .unwrap_or("img"), &get_content_response.file, )?; @@ -116,7 +119,11 @@ pub async fn get_content_thumbnail_route( .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, )? { - Ok(get_content_thumbnail::Response { file, content_type }.into()) + Ok(get_content_thumbnail::Response { + file, + content_type: Some(content_type), + } + .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_thumbnail_response = server_server::send_request( &db.globals, @@ -135,7 +142,10 @@ pub async fn get_content_thumbnail_route( db.media.upload_thumbnail( mxc, &None, - &get_thumbnail_response.content_type, + get_thumbnail_response + .content_type + .as_deref() + .unwrap_or("img"), // TODO now optional, deal with it somehow body.width.try_into().expect("all UInts are valid u32s"), body.height.try_into().expect("all UInts are valid u32s"), &get_thumbnail_response.file, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 25cad85..50a8cca 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -23,7 +23,11 @@ use ruma::{ }; use state_res::StateEvent; use std::{ - collections::BTreeMap, collections::HashMap, collections::HashSet, convert::TryFrom, iter, + collections::BTreeMap, + collections::HashMap, + collections::HashSet, + convert::{TryFrom, TryInto}, + iter, sync::Arc, }; @@ -509,31 +513,42 @@ async fn join_room_by_id_helper( .expect("event is valid, we just created it"), ); + // TODO fixup CanonicalJsonValue + // use that instead of serde_json::Map... maybe? + let mut canon_json_stub = + serde_json::from_value(join_event_stub_value).expect("json Value is canonical JSON"); // Generate event id let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&join_event_stub_value) + ruma::signatures::reference_hash(&canon_json_stub, &RoomVersionId::Version6) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); // We don't leave the event id into the pdu because that's only allowed in v1 or v2 rooms - let join_event_stub = join_event_stub_value.as_object_mut().unwrap(); - join_event_stub.remove("event_id"); + // let join_event_stub = join_event_stub_value.as_object_mut().unwrap(); + // join_event_stub.remove("event_id"); + + canon_json_stub.remove("event_id"); ruma::signatures::hash_and_sign_event( db.globals.server_name().as_str(), db.globals.keypair(), - &mut join_event_stub_value, + &mut canon_json_stub, + &RoomVersionId::Version6, ) .expect("event is valid, we just created it"); // Add event_id back - let join_event_stub = join_event_stub_value.as_object_mut().unwrap(); - join_event_stub.insert("event_id".to_owned(), event_id.to_string().into()); + canon_json_stub.insert( + "event_id".to_owned(), + serde_json::json!(event_id) + .try_into() + .expect("EventId is a valid CanonicalJsonValue"), + ); // It has enough fields to be called a proper event now - let join_event = join_event_stub_value; + let join_event = canon_json_stub; let send_join_response = server_server::send_request( &db.globals, @@ -541,27 +556,31 @@ async fn join_room_by_id_helper( federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, - pdu_stub: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + pdu_stub: PduEvent::convert_to_outgoing_federation_event( + serde_json::to_value(&join_event) + .expect("we just validated and ser/de this event"), + ), }, ) .await?; - let add_event_id = |pdu: &Raw| { + let add_event_id = |pdu: &Raw| -> Result<(EventId, serde_json::Value)> { let mut value = serde_json::from_str(pdu.json().get()) .expect("converting raw jsons to values always works"); let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&value) + ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); - value - .as_object_mut() - .ok_or_else(|| Error::BadServerResponse("PDU is not an object."))? - .insert("event_id".to_owned(), event_id.to_string().into()); + value.insert( + "event_id".to_owned(), + serde_json::from_value(serde_json::json!(event_id)) + .expect("a valid EventId can be converted to CanonicalJsonValue"), + ); - Ok((event_id, value)) + Ok((event_id, serde_json::json!(value))) // TODO CanonicalJsonValue fixup? }; let room_state = send_join_response.room_state.state.iter().map(add_event_id); @@ -580,7 +599,10 @@ async fn join_room_by_id_helper( let mut event_map = room_state .chain(auth_chain) - .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created + .chain(iter::once(Ok(( + event_id, + serde_json::to_value(join_event).unwrap(), + )))) // Add join event we just created .map(|r| { let (event_id, value) = r?; serde_json::from_value::(value.clone()) @@ -595,7 +617,7 @@ async fn join_room_by_id_helper( let control_events = event_map .values() .filter(|pdu| pdu.is_power_event()) - .map(|pdu| pdu.event_id().clone()) + .map(|pdu| pdu.event_id()) .collect::>(); // These events are not guaranteed to be sorted but they are resolved according to spec diff --git a/src/client_server/state.rs b/src/client_server/state.rs index ca6bdf7..3777862 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -99,14 +99,14 @@ pub async fn send_state_event_for_empty_key_route( )] pub async fn get_state_events_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? { - if !matches!( + if !db.rooms.is_joined(sender_user, &body.room_id)? + && !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|(_, event)| { @@ -119,12 +119,12 @@ pub async fn get_state_events_route( .map(|e| e.history_visibility) }), Some(Ok(HistoryVisibility::WorldReadable)) - ) { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view the room state.", - )); - } + ) + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); } Ok(get_state_events::Response { @@ -144,14 +144,14 @@ pub async fn get_state_events_route( )] pub async fn get_state_events_for_key_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? { - if !matches!( + if !db.rooms.is_joined(sender_user, &body.room_id)? + && !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|(_, event)| { @@ -164,12 +164,12 @@ pub async fn get_state_events_for_key_route( .map(|e| e.history_visibility) }), Some(Ok(HistoryVisibility::WorldReadable)) - ) { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view the room state.", - )); - } + ) + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); } let event = db @@ -194,14 +194,14 @@ pub async fn get_state_events_for_key_route( )] pub async fn get_state_events_for_empty_key_route( db: State<'_, Database>, - body: Ruma, + body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? { - if !matches!( + if !db.rooms.is_joined(sender_user, &body.room_id)? + && !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|(_, event)| { @@ -214,12 +214,12 @@ pub async fn get_state_events_for_empty_key_route( .map(|e| e.history_visibility) }), Some(Ok(HistoryVisibility::WorldReadable)) - ) { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view the room state.", - )); - } + ) + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view the room state.", + )); } let event = db diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 8ab900f..d9238a9 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -15,7 +15,7 @@ use ruma::{ }, EventType, }, - EventId, Raw, RoomAliasId, RoomId, ServerName, UserId, + EventId, Raw, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; use state_res::{event_auth, Error as StateError, Requester, StateEvent, StateMap, StateStore}; @@ -196,7 +196,7 @@ impl Rooms { Ok(self.pduid_statehash.get(pdu_id)?) } - /// Returns the last state hash key added to the db. + /// Returns the last state hash key added to the db for the given room. pub fn current_state_hash(&self, room_id: &RoomId) -> Result> { Ok(self.roomid_statehash.get(room_id.as_bytes())?) } @@ -249,7 +249,7 @@ impl Rooms { .is_some()) } - /// Returns the full room state. + /// Force the creation of a new StateHash and insert it into the db. pub fn force_state( &self, room_id: &RoomId, @@ -436,6 +436,7 @@ impl Rooms { Ok(()) } + #[allow(clippy::too_many_arguments)] /// Creates a new persisted data unit and adds it to a room. pub fn append_pdu( &self, @@ -687,7 +688,7 @@ impl Rooms { } EventType::RoomMember => { let prev_event = self - .get_pdu(prev_events.iter().next().ok_or(Error::BadRequest( + .get_pdu(prev_events.get(0).ok_or(Error::BadRequest( ErrorKind::Unknown, "Membership can't be the first event", ))?)? @@ -703,7 +704,7 @@ impl Rooms { sender: &sender, }, prev_event, - None, + None, // TODO: third party invite &auth_events .iter() .map(|((ty, key), pdu)| { @@ -761,7 +762,7 @@ impl Rooms { } let mut pdu = PduEvent { - event_id: EventId::try_from("$thiswillbefilledinlater").expect("we know this is valid"), + event_id: ruma::event_id!("$thiswillbefilledinlater"), room_id: room_id.clone(), sender: sender.clone(), origin_server_ts: utils::millis_since_unix_epoch() @@ -787,37 +788,42 @@ impl Rooms { }; // Hash and sign - let mut pdu_json = serde_json::to_value(&pdu).expect("event is valid, we just created it"); - pdu_json - .as_object_mut() - .expect("json is object") - .remove("event_id"); + let mut pdu_json: BTreeMap = + serde_json::from_value(serde_json::json!(&pdu)) + .expect("event is valid, we just created it"); + + pdu_json.remove("event_id"); // Add origin because synapse likes that (and it's required in the spec) - pdu_json - .as_object_mut() - .expect("json is object") - .insert("origin".to_owned(), globals.server_name().as_str().into()); + pdu_json.insert( + "origin".to_owned(), + serde_json::json!(globals.server_name()) + .try_into() + .expect("server name is a valid CanonicalJsonValue"), + ); ruma::signatures::hash_and_sign_event( globals.server_name().as_str(), globals.keypair(), &mut pdu_json, + &RoomVersionId::Version6, ) .expect("event is valid, we just created it"); // Generate event id pdu.event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&pdu_json) + ruma::signatures::reference_hash(&pdu_json, &RoomVersionId::Version6) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); - pdu_json - .as_object_mut() - .expect("json is object") - .insert("event_id".to_owned(), pdu.event_id.to_string().into()); + pdu_json.insert( + "event_id".to_owned(), + serde_json::json!(pdu.event_id) + .try_into() + .expect("EventId is a valid CanonicalJsonValue"), + ); // Increment the last index and use that // This is also the next_batch/since value @@ -832,7 +838,7 @@ impl Rooms { self.append_pdu( &pdu, - &pdu_json, + &serde_json::json!(pdu_json), // TODO fixup CanonicalJsonValue count, pdu_id.clone().into(), globals, diff --git a/src/database/users.rs b/src/database/users.rs index 2a03960..885c041 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,7 +8,7 @@ use ruma::{ keys::{CrossSigningKey, OneTimeKey}, }, }, - encryption::IncomingDeviceKeys, + encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, Raw, UserId, }; @@ -401,7 +401,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - device_keys: &IncomingDeviceKeys, + device_keys: &DeviceKeys, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { @@ -631,7 +631,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { + ) -> Result> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.as_bytes()); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 8da3e17..8dfd208 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,7 +1,8 @@ use crate::Error; use ruma::{ - api::{Outgoing, OutgoingRequest}, + api::{AuthScheme, OutgoingRequest}, identifiers::{DeviceId, UserId}, + Outgoing, }; use std::{convert::TryFrom, convert::TryInto, ops::Deref}; @@ -61,28 +62,30 @@ where .await .expect("database was loaded"); - let (sender_user, sender_device) = if T::METADATA.requires_authentication { - // Get token from header or query value - let token = match request - .headers() - .get_one("Authorization") - .map(|s| s[7..].to_owned()) // Split off "Bearer " - .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) - { - // TODO: M_MISSING_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some(token) => token, - }; + let (sender_user, sender_device) = + // TODO: Do we need to matches! anything else here? ServerSignatures + if matches!(T::METADATA.authentication, AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken) { + // Get token from header or query value + let token = match request + .headers() + .get_one("Authorization") + .map(|s| s[7..].to_owned()) // Split off "Bearer " + .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) + { + // TODO: M_MISSING_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some(token) => token, + }; - // Check if token is valid - match db.users.find_from_token(&token).unwrap() { - // TODO: M_UNKNOWN_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some((user_id, device_id)) => (Some(user_id), Some(device_id.into())), - } - } else { - (None, None) - }; + // Check if token is valid + match db.users.find_from_token(&token).unwrap() { + // TODO: M_UNKNOWN_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some((user_id, device_id)) => (Some(user_id), Some(device_id.into())), + } + } else { + (None, None) + }; let mut http_request = http::Request::builder() .uri(request.uri().to_string()) diff --git a/src/server_server.rs b/src/server_server.rs index d8d0fa5..5b70780 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -17,11 +17,11 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - EventId, ServerName, + EventId, RoomVersionId, ServerName, }; use std::{ collections::BTreeMap, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, time::{Duration, SystemTime}, }; @@ -95,7 +95,7 @@ where let mut http_request = request .try_into_http_request(&actual_destination, Some("")) .map_err(|e| { - warn!("{}: {}", actual_destination, e); + warn!("failed to find destination {}: {}", actual_destination, e); Error::BadServerResponse("Invalid destination") })?; @@ -122,7 +122,9 @@ where request_map.insert("origin".to_owned(), globals.server_name().as_str().into()); request_map.insert("destination".to_owned(), destination.as_str().into()); - let mut request_json = request_map.into(); + let mut request_json = + serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); + ruma::signatures::sign_json( globals.server_name().as_str(), globals.keypair(), @@ -130,6 +132,9 @@ where ) .expect("our request json is what ruma expects"); + let request_json: serde_json::Map = + serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); + let signatures = request_json["signatures"] .as_object() .unwrap() @@ -234,7 +239,9 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { let mut verify_keys = BTreeMap::new(); verify_keys.insert( - format!("ed25519:{}", db.globals.keypair().version()), + format!("ed25519:{}", db.globals.keypair().version()) + .try_into() + .expect("DB stores valid ServerKeyId's"), VerifyKey { key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), }, @@ -259,7 +266,7 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { &mut response, ) .unwrap(); - Json(response.to_string()) + Json(ruma::serde::to_canonical_json_string(&response).expect("JSON is canonical")) } #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] @@ -365,7 +372,7 @@ pub async fn get_public_rooms_route( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") )] -pub fn send_transaction_message_route<'a>( +pub async fn send_transaction_message_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { @@ -451,7 +458,7 @@ pub fn get_missing_events_route<'a>( ) .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, ); - events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); + events.push(serde_json::from_value(pdu).expect("Raw<..> is always valid")); } i += 1; } From db8a0c5d69ada292879a8356e10eb96c6f961c8f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 8 Nov 2020 14:44:02 -0500 Subject: [PATCH 0344/1727] Add closest_parent method to Rooms Db insert in order /send pdus --- Cargo.lock | 1 + Cargo.toml | 8 +- src/database/rooms.rs | 57 +++++++- src/server_server.rs | 296 ++++++++++++++++++++++++++++++++++++++---- 4 files changed, 334 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 461972b..3f8703e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2258,6 +2258,7 @@ checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" [[package]] name = "state-res" version = "0.1.0" +source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#1cd1a16cdefabb126a781a50b3d5eb1fdb3d3afb" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index a6f89b9..b72b92f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client- # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec"] } # Used for long polling @@ -78,6 +78,6 @@ path = "src/lib.rs" # ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"] } # # ruma = { git = "https://github.com/ruma/ruma", rev = "64b9c646d15a359d62ab464a95176ff94adb2554", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"]} -[patch."https://github.com/ruma/state-res"] -state-res = { path = "../../state-res", features = ["unstable-pre-spec"] } -# state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp" } \ No newline at end of file +# [patch."https://github.com/ruma/state-res"] +# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +# # state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp" } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d9238a9..28e1f60 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -35,6 +35,11 @@ use super::admin::AdminCommand; /// hashing the entire state. pub type StateHashId = IVec; +pub enum ClosestParent { + Append, + Insert(u64), +} + #[derive(Clone)] pub struct Rooms { pub edus: edus::RoomEdus, @@ -74,7 +79,10 @@ impl StateStore for Rooms { .get_pdu_id(event_id) .map_err(StateError::custom)? .ok_or_else(|| { - StateError::NotFound("PDU via room_id and event_id not found in the db.".into()) + StateError::NotFound(format!( + "PDU via room_id and event_id not found in the db.\n{}", + event_id.as_str() + )) })?; serde_json::from_slice( @@ -395,6 +403,47 @@ impl Rooms { } } + pub fn get_closest_parent( + &self, + incoming_prev_ids: &[EventId], + their_state: &BTreeMap>, + ) -> Result> { + match self.pduid_pdu.last()? { + Some(val) + if incoming_prev_ids.contains( + &serde_json::from_slice::(&val.1) + .map_err(|_| { + Error::bad_database("last DB entry contains invalid PDU bytes") + })? + .event_id, + ) => + { + Ok(Some(ClosestParent::Append)) + } + _ => { + let mut prev_ids = incoming_prev_ids.to_vec(); + while let Some(id) = prev_ids.pop() { + match self.get_pdu_id(&id)? { + Some(pdu_id) => { + return Ok(Some(ClosestParent::Insert(self.pdu_count(&pdu_id)?))); + } + None => { + prev_ids.extend(their_state.get(&id).map_or( + Err(Error::BadServerResponse( + "Failed to find previous event for PDU in state", + )), + // `prev_event_ids` will return an empty Vec instead of failing + // so it works perfect for our use here + |pdu| Ok(pdu.prev_event_ids()), + )?); + } + } + } + Ok(None) + } + } + } + /// Returns the leaf pdus of a room. pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); @@ -438,6 +487,9 @@ impl Rooms { #[allow(clippy::too_many_arguments)] /// Creates a new persisted data unit and adds it to a room. + /// + /// By this point the incoming event should be fully authenticated, no auth happens + /// in `append_pdu`. pub fn append_pdu( &self, pdu: &PduEvent, @@ -554,6 +606,7 @@ impl Rooms { self.stateid_pduid .scan_prefix(&prefix) .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) + // Chop the old state_hash out leaving behind the (EventType, StateKey) .map(|(k, v)| (k.subslice(prefix.len(), k.len() - prefix.len()), v)) .collect::>() } else { @@ -851,7 +904,7 @@ impl Rooms { .filter_map(|r| r.ok()) .filter(|server| &**server != globals.server_name()) { - sending.send_pdu(server, &pdu_id)?; + sending.send_pdu(&server, &pdu_id)?; } Ok(pdu.event_id) diff --git a/src/server_server.rs b/src/server_server.rs index 5b70780..41520fb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,7 @@ -use crate::{client_server, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{ + client_server, database::rooms::ClosestParent, utils, ConduitResult, Database, Error, PduEvent, + Result, Ruma, +}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::warn; @@ -381,44 +384,266 @@ pub async fn send_transaction_message_route<'a>( } //dbg!(&*body); + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we? + // SPEC: + // Servers MUST strictly enforce the JSON format specified in the appendices. + // This translates to a 400 M_BAD_JSON error on most endpoints, or discarding of + // events over federation. For example, the Federation API's /send endpoint would + // discard the event whereas the Client Server API's /send/{eventType} endpoint + // would return a M_BAD_JSON error. + let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { - let mut value = serde_json::from_str(pdu.json().get()) - .expect("converting raw jsons to values always works"); - - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&value).expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - value - .as_object_mut() - .expect("ruma pdus are json objects") - .insert("event_id".to_owned(), event_id.to_string().into()); - + println!("LOOP"); + let (event_id, value) = process_incoming_pdu(pdu); let pdu = serde_json::from_value::(value.clone()) .expect("all ruma pdus are conduit pdus"); - if db.rooms.exists(&pdu.room_id)? { + let room_id = &pdu.room_id; + + if value.get("state_key").is_none() { + if !db.rooms.is_joined(&pdu.sender, &pdu.room_id)? { + // TODO: auth rules apply to all events, not only those with a state key + log::error!("Unauthorized {}", pdu.kind); + return Err(Error::BadRequest( + ruma::api::client::error::ErrorKind::Forbidden, + "Event is not authorized", + )); + } + + // TODO: We should be doing the same get_closest_parent thing here too? + // same as for state events ~100 lines down let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, &pdu)?; db.rooms.append_pdu( &pdu, &value, count, - pdu_id.clone().into(), + pdu_id.into(), &db.globals, &db.account_data, &db.admin, )?; + + resolved_map.insert(event_id, Ok::<(), String>(())); + continue; } + + let now = std::time::Instant::now(); + let get_state_response = match send_request( + &db.globals, + body.body.origin.clone(), + ruma::api::federation::event::get_room_state::v1::Request { + room_id, + event_id: &event_id, + }, + ) + .await + { + Ok(res) => res, + // We can't hard fail because there are some valid errors, just + // keep checking PDU's + // + // As an example a possible error + // {"errcode":"M_FORBIDDEN","error":"Host not in room."} + Err(err) => { + log::error!("Request failed: {}", err); + resolved_map.insert(event_id, Err(err.to_string())); + dbg!(now.elapsed()); + continue; + } + }; + dbg!(now.elapsed()); + + let their_current_state = get_state_response + .pdus + .iter() + .chain(get_state_response.auth_chain.iter()) // add auth events + .map(|pdu| { + let (event_id, json) = process_incoming_pdu(pdu); + ( + event_id.clone(), + std::sync::Arc::new( + // When creating a StateEvent the event_id arg will be used + // over any found in the json and it will not use ruma::reference_hash + // to generate one + state_res::StateEvent::from_id_value(event_id, json) + .expect("valid pdu json"), + ), + ) + }) + .collect::>(); + + if value.get("state_key").is_none() { + if !db.rooms.is_joined(&pdu.sender, &pdu.room_id)? { + // TODO: auth rules apply to all events, not only those with a state key + log::error!("Sender is not joined {}", pdu.kind); + + resolved_map.insert(event_id, Err("User is not in this room".into())); + continue; + } + + // // TODO: We should be doing the same get_closest_parent thing here too? + // // same as for state events ~100 lines down + // let count = db.globals.next_count()?; + // let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + // pdu_id.push(0xff); + // pdu_id.extend_from_slice(&count.to_be_bytes()); + // db.rooms.append_pdu( + // &pdu, + // &value, + // count, + // pdu_id.into(), + // &db.globals, + // &db.account_data, + // &db.sending, + // )?; + + // If the event is older than the last event in pduid_pdu Tree then find the + // closest ancestor we know of and insert after the known ancestor by + // altering the known events pduid to = same roomID + same count bytes + 0x1 + // pushing a single byte every time a simple append cannot be done. + match db + .rooms + .get_closest_parent(&pdu.prev_events, &their_current_state)? + { + Some(ClosestParent::Append) => { + let count = db.globals.next_count()?; + dbg!(&count); + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_pdu( + &pdu, + &value, + count, + pdu_id.into(), + &db.globals, + &db.account_data, + &db.sending, + )?; + } + Some(ClosestParent::Insert(old_count)) => { + let count = old_count; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + // Create a new count that is after old_count but before + // the pdu appended after + pdu_id.push(1); + + db.rooms.append_pdu( + &pdu, + &value, + count, + pdu_id.into(), + &db.globals, + &db.account_data, + &db.sending, + )?; + } + _ => panic!("Not a sequential event or no parents found"), + }; + resolved_map.insert(event_id, Ok::<(), String>(())); + continue; + } + + let our_current_state = db.rooms.room_state_full(room_id)?; + match state_res::StateResolution::resolve( + room_id, + &ruma::RoomVersionId::Version6, + &[ + our_current_state + .iter() + .map(|((ev, sk), v)| ((ev.clone(), sk.to_owned()), v.event_id.clone())) + .collect::>(), + // TODO we may not want the auth events chained in here for resolution? + their_current_state + .iter() + .map(|(_id, v)| ((v.kind(), v.state_key()), v.event_id().clone())) + .collect::>(), + ], + Some( + our_current_state + .iter() + .map(|(_k, v)| (v.event_id.clone(), v.convert_for_state_res())) + .chain( + their_current_state + .iter() + .map(|(id, ev)| (id.clone(), ev.clone())), + ) + .collect::>(), + ), + &db.rooms, + ) { + Ok(resolved) if resolved.values().any(|id| &event_id == id) => { + // If the event is older than the last event in pduid_pdu Tree then find the + // closest ancestor we know of and insert after the known ancestor by + // altering the known events pduid to = same roomID + same count bytes + 0x1 + // pushing a single byte every time a simple append cannot be done. + match db + .rooms + .get_closest_parent(&pdu.prev_events, &their_current_state)? + { + Some(ClosestParent::Append) => { + let count = db.globals.next_count()?; + dbg!(&count); + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_pdu( + &pdu, + &value, + count, + pdu_id.into(), + &db.globals, + &db.account_data, + &db.sending, + )?; + } + Some(ClosestParent::Insert(old_count)) => { + println!("INSERT PDU FOUND {}", old_count); + let count = old_count; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + // Create a new count that is after old_count but before + // the pdu appended after + pdu_id.push(1); + + db.rooms.append_pdu( + &pdu, + &value, + count, + pdu_id.into(), + &db.globals, + &db.account_data, + &db.sending, + )?; + } + _ => panic!("Not a sequential event or no parents found"), + } + + resolved_map.insert(event_id, Ok::<(), String>(())); + } + // If the eventId is not found in the resolved state auth has failed + Ok(_) => { + // TODO have state_res give the actual auth error in this case + resolved_map.insert( + event_id, + Err("This event failed authentication, not found in resolved set".into()), + ); + } + Err(e) => { + resolved_map.insert(event_id, Err(e.to_string())); + } + }; } - Ok(send_transaction_message::v1::Response { - pdus: BTreeMap::new(), - } - .into()) + + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } #[cfg_attr( @@ -529,3 +754,30 @@ pub fn get_user_devices_route<'a>( .into()) } */ + +/// Generates a correct eventId for the incoming pdu. +/// +/// Returns a `state_res::StateEvent` which can be converted freely and has accessor methods. +fn process_incoming_pdu(pdu: &ruma::Raw) -> (EventId, serde_json::Value) { + let mut value = serde_json::from_str(pdu.json().get()) + .expect("converting raw jsons to values always works"); + + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + value.insert( + "event_id".to_owned(), + serde_json::json!(event_id) + .try_into() + .expect("EventId is a valid CanonicalJsonValue"), + ); + + ( + event_id, + serde_json::to_value(value).expect("JSON Value is a CanonicalJsonValue"), + ) +} From 0d69ebdc6c7259a544dc83958eaba1d50312e128 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 8 Nov 2020 14:45:52 -0500 Subject: [PATCH 0345/1727] Reformat imports and fix clippy warnings --- src/client_server/account.rs | 11 ++++------- src/client_server/directory.rs | 4 +--- src/client_server/membership.rs | 7 ++----- src/client_server/state.rs | 8 +++++--- src/database/sending.rs | 5 +++-- src/ruma_wrapper.rs | 5 ++++- src/utils.rs | 16 +++++++++++++++- 7 files changed, 34 insertions(+), 22 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index f48543e..ab90de5 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -15,13 +15,10 @@ use ruma::{ }, }, events::{ - room::canonical_alias, - room::guest_access, - room::history_visibility, - room::join_rules, - room::member, - room::name, - room::{message, topic}, + room::{ + canonical_alias, guest_access, history_visibility, join_rules, member, message, name, + topic, + }, EventType, }, RoomAliasId, RoomId, RoomVersionId, UserId, diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 048b410..d8af2e3 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -15,9 +15,7 @@ use ruma::{ }, federation, }, - directory::Filter, - directory::RoomNetwork, - directory::{IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk}, + directory::{Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk, RoomNetwork}, events::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, EventType, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 50a8cca..0e6235f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -17,15 +17,12 @@ use ruma::{ }, federation, }, - events::pdu::Pdu, - events::{room::member, EventType}, + events::{pdu::Pdu, room::member, EventType}, EventId, Raw, RoomId, RoomVersionId, ServerName, UserId, }; use state_res::StateEvent; use std::{ - collections::BTreeMap, - collections::HashMap, - collections::HashSet, + collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, iter, sync::Arc, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 3777862..010b20d 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -9,9 +9,8 @@ use ruma::{ }, }, events::{ - room::history_visibility::HistoryVisibility, - room::history_visibility::HistoryVisibilityEventContent, AnyStateEventContent, - EventContent, EventType, + room::history_visibility::{HistoryVisibility, HistoryVisibilityEventContent}, + AnyStateEventContent, EventContent, EventType, }, EventId, RoomId, UserId, }; @@ -103,6 +102,7 @@ pub async fn get_state_events_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable if !db.rooms.is_joined(sender_user, &body.room_id)? @@ -148,6 +148,7 @@ pub async fn get_state_events_for_key_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable if !db.rooms.is_joined(sender_user, &body.room_id)? @@ -198,6 +199,7 @@ pub async fn get_state_events_for_empty_key_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable if !db.rooms.is_joined(sender_user, &body.room_id)? diff --git a/src/database/sending.rs b/src/database/sending.rs index e3fca4f..c108e7e 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -116,6 +116,7 @@ impl Sending { } } Err((_server, _e)) => { + log::error!("server: {}\nerror: {}", _server, _e) // TODO: exponential backoff } }; @@ -131,7 +132,7 @@ impl Sending { .expect("splitn will always return 1 or more elements"), ) .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) - .and_then(|server_str|Box::::try_from(server_str) + .and_then(|server_str| Box::::try_from(server_str) .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))) .ok() .and_then(|server| parts @@ -162,7 +163,7 @@ impl Sending { }); } - pub fn send_pdu(&self, server: Box, pdu_id: &[u8]) -> Result<()> { + pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { let mut key = server.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu_id); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 8dfd208..1c5529a 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -4,7 +4,10 @@ use ruma::{ identifiers::{DeviceId, UserId}, Outgoing, }; -use std::{convert::TryFrom, convert::TryInto, ops::Deref}; +use std::{ + convert::{TryFrom, TryInto}, + ops::Deref, +}; #[cfg(feature = "conduit_bin")] use { diff --git a/src/utils.rs b/src/utils.rs index 452b7c5..e65ec86 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -89,9 +89,23 @@ pub fn common_elements( } } } - false }) .all(|b| b) })) } + +#[test] +fn sled_tests() { + let db = sled::Config::new().temporary(true).open().unwrap(); + + db.insert(1_u64.to_be_bytes(), vec![10]).unwrap(); + db.insert(2_u64.to_be_bytes(), vec![20]).unwrap(); + db.insert(3_u64.to_be_bytes(), vec![30]).unwrap(); + + let mut key = 1_u64.to_be_bytes().to_vec(); + key.push(1); + db.insert(key, vec![40]).unwrap(); + + println!("{:?}", db.iter().collect::, _>>().unwrap()) +} From c9a6ce54cb9d54009779878b821e82e76e6e1b6a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 8 Nov 2020 14:46:26 -0500 Subject: [PATCH 0346/1727] Add basic handling of EDUs for /send/txn --- src/server_server.rs | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/src/server_server.rs b/src/server_server.rs index 41520fb..6550b8f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -383,7 +383,41 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - //dbg!(&*body); + for edu in &body.edus { + match serde_json::from_str::(edu.json().get()) { + Ok(edu) => match edu.edu_type.as_str() { + "m.typing" => { + if let Some(typing) = edu.content.get("typing") { + if typing.as_bool().unwrap_or_default() { + db.rooms.edus.typing_add( + &UserId::try_from(edu.content["user_id"].as_str().unwrap()) + .unwrap(), + &RoomId::try_from(edu.content["room_id"].as_str().unwrap()) + .unwrap(), + 3000 + utils::millis_since_unix_epoch(), + &db.globals, + )?; + } else { + db.rooms.edus.typing_remove( + &UserId::try_from(edu.content["user_id"].as_str().unwrap()) + .unwrap(), + &RoomId::try_from(edu.content["room_id"].as_str().unwrap()) + .unwrap(), + &db.globals, + )?; + } + } + } + "m.presence" => {} + "m.receipt" => {} + _ => {} + }, + Err(_err) => { + log::error!("{}", _err); + continue; + } + } + } // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we? // SPEC: // Servers MUST strictly enforce the JSON format specified in the appendices. From dcd1163806e8736dd758cdef77c862b8b823088b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 3 Nov 2020 21:20:35 +0100 Subject: [PATCH 0347/1727] All outgoing pdus in Sending must be PduStubs --- src/database/sending.rs | 36 ++++++++++++++++++++++++------------ src/server_server.rs | 25 +++++++------------------ 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index c108e7e..14558e3 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -73,7 +73,7 @@ impl Sending { loop { select! { Some(server) = futures.next() => { - debug!("response: {:?}", &server); + debug!("sending response: {:?}", &server); match server { Ok((server, _response)) => { let mut prefix = server.as_bytes().to_vec(); @@ -184,17 +184,29 @@ impl Sending { let pdu_jsons = pdu_ids .iter() .map(|pdu_id| { - Ok::<_, (Box, Error)>(PduEvent::convert_to_outgoing_federation_event( - rooms - .get_pdu_json_from_id(pdu_id) - .map_err(|e| (server.clone(), e))? - .ok_or_else(|| { - ( - server.clone(), - Error::bad_database("Event in servernamepduids not found in db."), - ) - })?, - )) + Ok::<_, (Box, Error)>( + // TODO: this was a PduStub + // In order for sending to work these actually do have to be + // PduStub but, since they are Raw<..> we can fake it. + serde_json::from_str( + PduEvent::convert_to_outgoing_federation_event( + rooms + .get_pdu_json_from_id(pdu_id) + .map_err(|e| (server.clone(), e))? + .ok_or_else(|| { + ( + server.clone(), + Error::bad_database( + "Event in servernamepduids not found in db.", + ), + ) + })?, + ) + .json() + .get(), + ) + .expect("Raw<..> is always valid"), + ) }) .filter_map(|r| r.ok()) .collect::>(); diff --git a/src/server_server.rs b/src/server_server.rs index 6550b8f..de24c6d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,7 +20,7 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - EventId, RoomVersionId, ServerName, + EventId, RoomId, RoomVersionId, ServerName, UserId, }; use std::{ collections::BTreeMap, @@ -412,19 +412,10 @@ pub async fn send_transaction_message_route<'a>( "m.receipt" => {} _ => {} }, - Err(_err) => { - log::error!("{}", _err); - continue; - } + Err(_err) => continue, } } - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we? - // SPEC: - // Servers MUST strictly enforce the JSON format specified in the appendices. - // This translates to a 400 M_BAD_JSON error on most endpoints, or discarding of - // events over federation. For example, the Federation API's /send endpoint would - // discard the event whereas the Client Server API's /send/{eventType} endpoint - // would return a M_BAD_JSON error. + let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { println!("LOOP"); @@ -437,16 +428,14 @@ pub async fn send_transaction_message_route<'a>( if !db.rooms.is_joined(&pdu.sender, &pdu.room_id)? { // TODO: auth rules apply to all events, not only those with a state key log::error!("Unauthorized {}", pdu.kind); - return Err(Error::BadRequest( - ruma::api::client::error::ErrorKind::Forbidden, - "Event is not authorized", - )); + + resolved_map.insert(event_id, Err("User is not in this room".into())); + continue; } // TODO: We should be doing the same get_closest_parent thing here too? // same as for state events ~100 lines down let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); @@ -677,7 +666,7 @@ pub async fn send_transaction_message_route<'a>( }; } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } #[cfg_attr( From eca0bbb35a9c062c21392c60455f624374c5cd17 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 8 Nov 2020 13:49:02 -0500 Subject: [PATCH 0348/1727] Fix federated join miss hashing the join event created --- src/server_server.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index de24c6d..ccdeb74 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -412,10 +412,19 @@ pub async fn send_transaction_message_route<'a>( "m.receipt" => {} _ => {} }, - Err(_err) => continue, + Err(_err) => { + log::error!("{}", _err); + continue; + } } } - + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we? + // SPEC: + // Servers MUST strictly enforce the JSON format specified in the appendices. + // This translates to a 400 M_BAD_JSON error on most endpoints, or discarding of + // events over federation. For example, the Federation API's /send endpoint would + // discard the event whereas the Client Server API's /send/{eventType} endpoint + // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { println!("LOOP"); From b13049a6fac23a41f726d7f0173554e1a40394af Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 8 Nov 2020 13:49:02 -0500 Subject: [PATCH 0349/1727] Fix federated join miss hashing the join event created --- src/client_server/membership.rs | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0e6235f..5d621fa 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -512,22 +512,16 @@ async fn join_room_by_id_helper( // TODO fixup CanonicalJsonValue // use that instead of serde_json::Map... maybe? - let mut canon_json_stub = + let mut canon_json_stub: BTreeMap<_, ruma::signatures::CanonicalJsonValue> = serde_json::from_value(join_event_stub_value).expect("json Value is canonical JSON"); - // Generate event id - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&canon_json_stub, &RoomVersionId::Version6) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); // We don't leave the event id into the pdu because that's only allowed in v1 or v2 rooms // let join_event_stub = join_event_stub_value.as_object_mut().unwrap(); // join_event_stub.remove("event_id"); - canon_json_stub.remove("event_id"); + // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + // who the hell knew... ruma::signatures::hash_and_sign_event( db.globals.server_name().as_str(), db.globals.keypair(), @@ -536,6 +530,14 @@ async fn join_room_by_id_helper( ) .expect("event is valid, we just created it"); + // Generate event id + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&canon_json_stub, &RoomVersionId::Version6) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + // Add event_id back canon_json_stub.insert( "event_id".to_owned(), @@ -545,7 +547,7 @@ async fn join_room_by_id_helper( ); // It has enough fields to be called a proper event now - let join_event = canon_json_stub; + let join_event = dbg!(canon_json_stub); let send_join_response = server_server::send_request( &db.globals, @@ -606,7 +608,7 @@ async fn join_room_by_id_helper( .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { warn!("{}: {}", value, e); - Error::BadServerResponse("Invalid PDU bytes in send_join response.") + Error::BadServerResponse("Invalid PDU in send_join response.") }) }) .collect::>>>()?; From acd144e93413dd509e2d75bdb381243d5d8187b8 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 8 Nov 2020 13:54:59 -0500 Subject: [PATCH 0350/1727] Fix get_closest_parent and cleanup federation/send/:txn --- Cargo.lock | 286 +++++--------------------------- Cargo.toml | 12 +- src/client_server/membership.rs | 4 +- src/database/rooms.rs | 5 +- src/server_server.rs | 81 +++------ src/utils.rs | 15 -- 6 files changed, 69 insertions(+), 334 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f8703e..d2eef80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -221,7 +221,7 @@ dependencies = [ "reqwest", "ring", "rocket", - "ruma 0.0.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma", "rust-argon2", "serde", "serde_json", @@ -1611,33 +1611,15 @@ source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a dependencies = [ "assign", "js_int", - "ruma-api 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-appservice-api 0.2.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-client-api 0.10.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-events 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-federation-api 0.0.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-signatures 0.6.0-dev.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", -] - -[[package]] -name = "ruma" -version = "0.0.1" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "assign", - "js_int", - "ruma-api 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-appservice-api 0.2.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-client-api 0.10.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-events 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-federation-api 0.0.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-signatures 0.6.0-dev.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-api", + "ruma-appservice-api", + "ruma-client-api", + "ruma-common", + "ruma-events", + "ruma-federation-api", + "ruma-identifiers", + "ruma-serde", + "ruma-signatures", ] [[package]] @@ -1647,27 +1629,10 @@ source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a dependencies = [ "http", "percent-encoding", - "ruma-api-macros 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "serde", - "serde_json", - "strum", - "thiserror", -] - -[[package]] -name = "ruma-api" -version = "0.17.0-alpha.1" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "http", - "percent-encoding", - "ruma-api-macros 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-api-macros", + "ruma-common", + "ruma-identifiers", + "ruma-serde", "serde", "serde_json", "strum", @@ -1685,39 +1650,15 @@ dependencies = [ "syn", ] -[[package]] -name = "ruma-api-macros" -version = "0.17.0-alpha.1" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ - "ruma-api 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-events 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-appservice-api" -version = "0.2.0-alpha.1" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "ruma-api 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-events 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", "serde", "serde_json", ] @@ -1732,31 +1673,11 @@ dependencies = [ "js_int", "maplit", "percent-encoding", - "ruma-api 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-events 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "serde", - "serde_json", - "strum", -] - -[[package]] -name = "ruma-client-api" -version = "0.10.0-alpha.1" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "assign", - "http", - "js_int", - "maplit", - "percent-encoding", - "ruma-api 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-events 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", "serde", "serde_json", "strum", @@ -1768,23 +1689,9 @@ version = "0.2.0" source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ "js_int", - "ruma-common-macros 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "serde", - "serde_json", - "strum", -] - -[[package]] -name = "ruma-common" -version = "0.2.0" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "js_int", - "ruma-common-macros 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-common-macros", + "ruma-identifiers", + "ruma-serde", "serde", "serde_json", "strum", @@ -1801,42 +1708,16 @@ dependencies = [ "syn", ] -[[package]] -name = "ruma-common-macros" -version = "0.2.0" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-events" version = "0.22.0-alpha.1" source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ "js_int", - "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-events-macros 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "serde", - "serde_json", - "strum", -] - -[[package]] -name = "ruma-events" -version = "0.22.0-alpha.1" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "js_int", - "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-events-macros 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-common", + "ruma-events-macros", + "ruma-identifiers", + "ruma-serde", "serde", "serde_json", "strum", @@ -1853,43 +1734,17 @@ dependencies = [ "syn", ] -[[package]] -name = "ruma-events-macros" -version = "0.22.0-alpha.1" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-federation-api" version = "0.0.3" source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ "js_int", - "ruma-api 0.17.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-common 0.2.0 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-events 0.22.0-alpha.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-federation-api" -version = "0.0.3" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "js_int", - "ruma-api 0.17.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-common 0.2.0 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-events 0.22.0-alpha.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", "serde", "serde_json", ] @@ -1898,23 +1753,11 @@ dependencies = [ name = "ruma-identifiers" version = "0.17.4" source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" -dependencies = [ - "ruma-identifiers-macros 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-identifiers-validation 0.1.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "serde", - "strum", -] - -[[package]] -name = "ruma-identifiers" -version = "0.17.4" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" dependencies = [ "rand", - "ruma-identifiers-macros 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-identifiers-validation 0.1.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers-macros", + "ruma-identifiers-validation", + "ruma-serde", "serde", "strum", ] @@ -1926,18 +1769,7 @@ source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a dependencies = [ "proc-macro2", "quote", - "ruma-identifiers-validation 0.1.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "syn", -] - -[[package]] -name = "ruma-identifiers-macros" -version = "0.17.4" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "proc-macro2", - "quote", - "ruma-identifiers-validation 0.1.1 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers-validation", "syn", ] @@ -1950,15 +1782,6 @@ dependencies = [ "strum", ] -[[package]] -name = "ruma-identifiers-validation" -version = "0.1.1" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "serde", - "strum", -] - [[package]] name = "ruma-serde" version = "0.2.3" @@ -1971,18 +1794,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "ruma-serde" -version = "0.2.3" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "form_urlencoded", - "itoa", - "js_int", - "serde", - "serde_json", -] - [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" @@ -1990,21 +1801,8 @@ source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a dependencies = [ "base64", "ring", - "ruma-identifiers 0.17.4 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "ruma-serde 0.2.3 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", - "serde_json", - "untrusted", -] - -[[package]] -name = "ruma-signatures" -version = "0.6.0-dev.1" -source = "git+https://github.com/DevinR528/ruma?branch=unstable-join#424b138d84ccc47c0b212708a54f66bf88f7d57a" -dependencies = [ - "base64", - "ring", - "ruma-identifiers 0.17.4 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", - "ruma-serde 0.2.3 (git+https://github.com/DevinR528/ruma?branch=unstable-join)", + "ruma-identifiers", + "ruma-serde", "serde_json", "untrusted", ] @@ -2263,7 +2061,7 @@ dependencies = [ "itertools", "js_int", "maplit", - "ruma 0.0.1 (git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889)", + "ruma", "serde", "serde_json", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index b72b92f..e7b87fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,8 +18,8 @@ edition = "2018" rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"], rev = "aff914050eb297bd82b8aafb12158c88a9e480e1" } -ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "c15382ca41262058302959eac4029ab4a1ea5889" } +# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution @@ -73,11 +73,3 @@ required-features = ["conduit_bin"] [lib] name = "conduit" path = "src/lib.rs" - -# [patch."https://github.com/timokoesters/ruma"] -# ruma = { path = "../ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"] } -# # ruma = { git = "https://github.com/ruma/ruma", rev = "64b9c646d15a359d62ab464a95176ff94adb2554", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"]} - -# [patch."https://github.com/ruma/state-res"] -# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } -# # state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp" } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 5d621fa..a07fe72 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -515,9 +515,7 @@ async fn join_room_by_id_helper( let mut canon_json_stub: BTreeMap<_, ruma::signatures::CanonicalJsonValue> = serde_json::from_value(join_event_stub_value).expect("json Value is canonical JSON"); - // We don't leave the event id into the pdu because that's only allowed in v1 or v2 rooms - // let join_event_stub = join_event_stub_value.as_object_mut().unwrap(); - // join_event_stub.remove("event_id"); + // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms canon_json_stub.remove("event_id"); // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 28e1f60..08734b1 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -405,11 +405,12 @@ impl Rooms { pub fn get_closest_parent( &self, + room: &RoomId, incoming_prev_ids: &[EventId], their_state: &BTreeMap>, ) -> Result> { - match self.pduid_pdu.last()? { - Some(val) + match self.pduid_pdu.scan_prefix(room.as_bytes()).last() { + Some(Ok(val)) if incoming_prev_ids.contains( &serde_json::from_slice::(&val.1) .map_err(|_| { diff --git a/src/server_server.rs b/src/server_server.rs index ccdeb74..c865b02 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -418,6 +418,7 @@ pub async fn send_transaction_message_route<'a>( } } } + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we? // SPEC: // Servers MUST strictly enforce the JSON format specified in the appendices. @@ -427,42 +428,20 @@ pub async fn send_transaction_message_route<'a>( // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { - println!("LOOP"); let (event_id, value) = process_incoming_pdu(pdu); let pdu = serde_json::from_value::(value.clone()) .expect("all ruma pdus are conduit pdus"); let room_id = &pdu.room_id; - if value.get("state_key").is_none() { - if !db.rooms.is_joined(&pdu.sender, &pdu.room_id)? { - // TODO: auth rules apply to all events, not only those with a state key - log::error!("Unauthorized {}", pdu.kind); - - resolved_map.insert(event_id, Err("User is not in this room".into())); - continue; - } - - // TODO: We should be doing the same get_closest_parent thing here too? - // same as for state events ~100 lines down - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_pdu( - &pdu, - &value, - count, - pdu_id.into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - resolved_map.insert(event_id, Ok::<(), String>(())); + // If we have no idea about this room + // TODO: Does a server only send us events that we should know about or + // when everyone on this server leaves a room can we ignore those events? + if !db.rooms.exists(&pdu.room_id)? { + log::error!("Room does not exist on this server"); + resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let now = std::time::Instant::now(); let get_state_response = match send_request( &db.globals, body.body.origin.clone(), @@ -482,11 +461,9 @@ pub async fn send_transaction_message_route<'a>( Err(err) => { log::error!("Request failed: {}", err); resolved_map.insert(event_id, Err(err.to_string())); - dbg!(now.elapsed()); continue; } }; - dbg!(now.elapsed()); let their_current_state = get_state_response .pdus @@ -509,40 +486,21 @@ pub async fn send_transaction_message_route<'a>( if value.get("state_key").is_none() { if !db.rooms.is_joined(&pdu.sender, &pdu.room_id)? { - // TODO: auth rules apply to all events, not only those with a state key log::error!("Sender is not joined {}", pdu.kind); - resolved_map.insert(event_id, Err("User is not in this room".into())); continue; } - // // TODO: We should be doing the same get_closest_parent thing here too? - // // same as for state events ~100 lines down - // let count = db.globals.next_count()?; - // let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - // pdu_id.push(0xff); - // pdu_id.extend_from_slice(&count.to_be_bytes()); - // db.rooms.append_pdu( - // &pdu, - // &value, - // count, - // pdu_id.into(), - // &db.globals, - // &db.account_data, - // &db.sending, - // )?; - // If the event is older than the last event in pduid_pdu Tree then find the // closest ancestor we know of and insert after the known ancestor by // altering the known events pduid to = same roomID + same count bytes + 0x1 // pushing a single byte every time a simple append cannot be done. match db .rooms - .get_closest_parent(&pdu.prev_events, &their_current_state)? + .get_closest_parent(room_id, &pdu.prev_events, &their_current_state)? { Some(ClosestParent::Append) => { let count = db.globals.next_count()?; - dbg!(&count); let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); @@ -554,10 +512,12 @@ pub async fn send_transaction_message_route<'a>( pdu_id.into(), &db.globals, &db.account_data, - &db.sending, + &db.admin, )?; } Some(ClosestParent::Insert(old_count)) => { + println!("INSERT PDU FOUND {}", old_count); + let count = old_count; let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -573,7 +533,7 @@ pub async fn send_transaction_message_route<'a>( pdu_id.into(), &db.globals, &db.account_data, - &db.sending, + &db.admin, )?; } _ => panic!("Not a sequential event or no parents found"), @@ -615,13 +575,13 @@ pub async fn send_transaction_message_route<'a>( // closest ancestor we know of and insert after the known ancestor by // altering the known events pduid to = same roomID + same count bytes + 0x1 // pushing a single byte every time a simple append cannot be done. - match db - .rooms - .get_closest_parent(&pdu.prev_events, &their_current_state)? - { + match db.rooms.get_closest_parent( + room_id, + &pdu.prev_events, + &their_current_state, + )? { Some(ClosestParent::Append) => { let count = db.globals.next_count()?; - dbg!(&count); let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); @@ -633,11 +593,12 @@ pub async fn send_transaction_message_route<'a>( pdu_id.into(), &db.globals, &db.account_data, - &db.sending, + &db.admin, )?; } Some(ClosestParent::Insert(old_count)) => { - println!("INSERT PDU FOUND {}", old_count); + println!("INSERT STATE PDU FOUND {}", old_count); + let count = old_count; let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -653,7 +614,7 @@ pub async fn send_transaction_message_route<'a>( pdu_id.into(), &db.globals, &db.account_data, - &db.sending, + &db.admin, )?; } _ => panic!("Not a sequential event or no parents found"), diff --git a/src/utils.rs b/src/utils.rs index e65ec86..edcf48a 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -94,18 +94,3 @@ pub fn common_elements( .all(|b| b) })) } - -#[test] -fn sled_tests() { - let db = sled::Config::new().temporary(true).open().unwrap(); - - db.insert(1_u64.to_be_bytes(), vec![10]).unwrap(); - db.insert(2_u64.to_be_bytes(), vec![20]).unwrap(); - db.insert(3_u64.to_be_bytes(), vec![30]).unwrap(); - - let mut key = 1_u64.to_be_bytes().to_vec(); - key.push(1); - db.insert(key, vec![40]).unwrap(); - - println!("{:?}", db.iter().collect::, _>>().unwrap()) -} From 234b2264680a7c8e514e51672503519a08b05257 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 11 Nov 2020 14:30:12 -0500 Subject: [PATCH 0351/1727] Fix review issues, Remove EventHash's in prev/auth_events in StateEvent The latest state-res crate uses ruma's PduRoomV3 PDU's which don't have tuples of (EventId, EventHashs) like previous versions did (this was left from rebasing onto master). The Media DB now takes an optional content_type like the updated ruma structs. --- src/client_server/media.rs | 12 +++--------- src/database/media.rs | 18 ++++++++++++++---- src/database/rooms.rs | 1 + src/pdu.rs | 14 +++----------- src/server_server.rs | 4 ++-- 5 files changed, 23 insertions(+), 26 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index af7880c..6d72107 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -39,7 +39,7 @@ pub async fn create_content_route( db.media.create( mxc.clone(), &body.filename.as_deref(), - body.content_type.as_deref().unwrap_or("img"), // TODO this is now optional handle + &body.content_type.as_deref(), // TODO this is now optional handle &body.file, )?; @@ -85,10 +85,7 @@ pub async fn get_content_route( db.media.create( mxc, &get_content_response.content_disposition.as_deref(), - get_content_response // TODO this is now optional handle - .content_type - .as_deref() - .unwrap_or("img"), + &get_content_response.content_type.as_deref(), &get_content_response.file, )?; @@ -142,10 +139,7 @@ pub async fn get_content_thumbnail_route( db.media.upload_thumbnail( mxc, &None, - get_thumbnail_response - .content_type - .as_deref() - .unwrap_or("img"), // TODO now optional, deal with it somehow + &get_thumbnail_response.content_type, body.width.try_into().expect("all UInts are valid u32s"), body.height.try_into().expect("all UInts are valid u32s"), &get_thumbnail_response.file, diff --git a/src/database/media.rs b/src/database/media.rs index 8c59aa4..bfc6207 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -20,7 +20,7 @@ impl Media { &self, mxc: String, filename: &Option<&str>, - content_type: &str, + content_type: &Option<&str>, file: &[u8], ) -> Result<()> { let mut key = mxc.as_bytes().to_vec(); @@ -30,7 +30,12 @@ impl Media { key.push(0xff); key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default()); key.push(0xff); - key.extend_from_slice(content_type.as_bytes()); + key.extend_from_slice( + content_type + .as_ref() + .map(|c| c.as_bytes()) + .unwrap_or_default(), + ); self.mediaid_file.insert(key, file)?; @@ -42,7 +47,7 @@ impl Media { &self, mxc: String, filename: &Option, - content_type: &str, + content_type: &Option, width: u32, height: u32, file: &[u8], @@ -54,7 +59,12 @@ impl Media { key.push(0xff); key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default()); key.push(0xff); - key.extend_from_slice(content_type.as_bytes()); + key.extend_from_slice( + content_type + .as_ref() + .map(|c| c.as_bytes()) + .unwrap_or_default(), + ); self.mediaid_file.insert(key, file)?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 08734b1..d8e6131 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -647,6 +647,7 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. + #[allow(clippy::too_many_arguments)] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, diff --git a/src/pdu.rs b/src/pdu.rs index 7118bfc..a213d2c 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -225,7 +225,7 @@ impl PduEvent { impl From<&state_res::StateEvent> for PduEvent { fn from(pdu: &state_res::StateEvent) -> Self { Self { - event_id: pdu.event_id().clone(), + event_id: pdu.event_id(), room_id: pdu.room_id().unwrap().clone(), sender: pdu.sender().clone(), origin_server_ts: (pdu @@ -260,17 +260,9 @@ impl PduEvent { "type": self.kind, "content": self.content, "state_key": self.state_key, - "prev_events": self.prev_events - .iter() - // TODO How do we create one of these - .map(|id| (id, EventHash { sha256: "hello".into() })) - .collect::>(), + "prev_events": self.prev_events, "depth": self.depth, - "auth_events": self.auth_events - .iter() - // TODO How do we create one of these - .map(|id| (id, EventHash { sha256: "hello".into() })) - .collect::>(), + "auth_events": self.auth_events, "redacts": self.redacts, "unsigned": self.unsigned, "hashes": self.hashes, diff --git a/src/server_server.rs b/src/server_server.rs index c865b02..19a73f9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -419,7 +419,7 @@ pub async fn send_transaction_message_route<'a>( } } - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we? + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere? // SPEC: // Servers MUST strictly enforce the JSON format specified in the appendices. // This translates to a 400 M_BAD_JSON error on most endpoints, or discarding of @@ -554,7 +554,7 @@ pub async fn send_transaction_message_route<'a>( // TODO we may not want the auth events chained in here for resolution? their_current_state .iter() - .map(|(_id, v)| ((v.kind(), v.state_key()), v.event_id().clone())) + .map(|(_id, v)| ((v.kind(), v.state_key()), v.event_id())) .collect::>(), ], Some( From 86bb93f8cf8eddee51ecf9188c8358ddfd7f175a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 14 Nov 2020 16:18:15 -0500 Subject: [PATCH 0352/1727] Remove outdated TODOs, use StateEvent::from_id_value consistently --- src/client_server/membership.rs | 11 ++++++---- src/pdu.rs | 37 +++++++++++++++++++-------------- src/server_server.rs | 8 +++---- 3 files changed, 32 insertions(+), 24 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index a07fe72..39d69cd 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -545,7 +545,7 @@ async fn join_room_by_id_helper( ); // It has enough fields to be called a proper event now - let join_event = dbg!(canon_json_stub); + let join_event = canon_json_stub; let send_join_response = server_server::send_request( &db.globals, @@ -577,7 +577,7 @@ async fn join_room_by_id_helper( .expect("a valid EventId can be converted to CanonicalJsonValue"), ); - Ok((event_id, serde_json::json!(value))) // TODO CanonicalJsonValue fixup? + Ok((event_id, serde_json::json!(value))) }; let room_state = send_join_response.room_state.state.iter().map(add_event_id); @@ -602,7 +602,8 @@ async fn join_room_by_id_helper( )))) // Add join event we just created .map(|r| { let (event_id, value) = r?; - serde_json::from_value::(value.clone()) + // TODO remove .clone when I remove debug logging + state_res::StateEvent::from_id_value(event_id.clone(), value.clone()) .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { warn!("{}: {}", value, e); @@ -642,7 +643,9 @@ async fn join_room_by_id_helper( .expect("iterative auth check failed on resolved events"); // This removes the control events that failed auth, leaving the resolved - // to be mainline sorted + // to be mainline sorted. In the actual `state_res::StateResolution::resolve` + // function both are removed since these are all events we don't know of + // we must keep track of everything to add to our DB. let events_to_sort = event_map .keys() .filter(|id| { diff --git a/src/pdu.rs b/src/pdu.rs index a213d2c..effbc5d 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -252,22 +252,27 @@ impl From<&state_res::StateEvent> for PduEvent { impl PduEvent { pub fn convert_for_state_res(&self) -> Arc { Arc::new( - serde_json::from_value(json!({ - "event_id": self.event_id, - "room_id": self.room_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "type": self.kind, - "content": self.content, - "state_key": self.state_key, - "prev_events": self.prev_events, - "depth": self.depth, - "auth_events": self.auth_events, - "redacts": self.redacts, - "unsigned": self.unsigned, - "hashes": self.hashes, - "signatures": self.signatures, - })) + // For consistency of eventId (just in case) we use the one + // generated by conduit for everything. + state_res::StateEvent::from_id_value( + self.event_id.clone(), + json!({ + "event_id": self.event_id, + "room_id": self.room_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "type": self.kind, + "content": self.content, + "state_key": self.state_key, + "prev_events": self.prev_events, + "depth": self.depth, + "auth_events": self.auth_events, + "redacts": self.redacts, + "unsigned": self.unsigned, + "hashes": self.hashes, + "signatures": self.signatures, + }), + ) .expect("all conduit PDUs are state events"), ) } diff --git a/src/server_server.rs b/src/server_server.rs index 19a73f9..b9d26fd 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -26,6 +26,7 @@ use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, fmt::Debug, + sync::Arc, time::{Duration, SystemTime}, }; use trust_dns_resolver::AsyncResolver; @@ -551,7 +552,6 @@ pub async fn send_transaction_message_route<'a>( .iter() .map(|((ev, sk), v)| ((ev.clone(), sk.to_owned()), v.event_id.clone())) .collect::>(), - // TODO we may not want the auth events chained in here for resolution? their_current_state .iter() .map(|(_id, v)| ((v.kind(), v.state_key()), v.event_id())) @@ -750,10 +750,10 @@ pub fn get_user_devices_route<'a>( /// Generates a correct eventId for the incoming pdu. /// -/// Returns a `state_res::StateEvent` which can be converted freely and has accessor methods. +/// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. fn process_incoming_pdu(pdu: &ruma::Raw) -> (EventId, serde_json::Value) { - let mut value = serde_json::from_str(pdu.json().get()) - .expect("converting raw jsons to values always works"); + let mut value = + serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); let event_id = EventId::try_from(&*format!( "${}", From bb24f6ad90513326582a908672543749571a2054 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 15 Nov 2020 16:48:43 -0500 Subject: [PATCH 0353/1727] Address some review issues fmt, errors, comments --- src/client_server/backup.rs | 2 +- src/client_server/media.rs | 2 +- src/client_server/membership.rs | 2 -- src/database/rooms.rs | 15 +++++++++++- src/database/sending.rs | 6 ++--- src/ruma_wrapper.rs | 41 +++++++++++++++++---------------- src/server_server.rs | 21 ++++++++--------- 7 files changed, 50 insertions(+), 39 deletions(-) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 607fa39..676b5a3 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -256,7 +256,7 @@ pub async fn get_backup_key_session_route( let key_data = db .key_backups .get_session(&sender_user, &body.version, &body.room_id, &body.session_id)? - .ok_or_else(|| Error::BadDatabase("Backup key not found for this user's session"))?; + .ok_or_else(|| Error::bad_database("Backup key not found for this user's session."))?; Ok(get_backup_key_session::Response { key_data }.into()) } diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 6d72107..0c23488 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -39,7 +39,7 @@ pub async fn create_content_route( db.media.create( mxc.clone(), &body.filename.as_deref(), - &body.content_type.as_deref(), // TODO this is now optional handle + &body.content_type.as_deref(), &body.file, )?; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 39d69cd..849fb7e 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -519,7 +519,6 @@ async fn join_room_by_id_helper( canon_json_stub.remove("event_id"); // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - // who the hell knew... ruma::signatures::hash_and_sign_event( db.globals.server_name().as_str(), db.globals.keypair(), @@ -602,7 +601,6 @@ async fn join_room_by_id_helper( )))) // Add join event we just created .map(|r| { let (event_id, value) = r?; - // TODO remove .clone when I remove debug logging state_res::StateEvent::from_id_value(event_id.clone(), value.clone()) .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d8e6131..3d5b890 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -35,6 +35,11 @@ use super::admin::AdminCommand; /// hashing the entire state. pub type StateHashId = IVec; +/// An enum that represents the two valid states when searching +/// for an events "parent". +/// +/// An events parent is any event we are aware of that is part of +/// the events `prev_events` array. pub enum ClosestParent { Append, Insert(u64), @@ -80,7 +85,7 @@ impl StateStore for Rooms { .map_err(StateError::custom)? .ok_or_else(|| { StateError::NotFound(format!( - "PDU via room_id and event_id not found in the db.\n{}", + "PDU via room_id and event_id not found in the db: {}", event_id.as_str() )) })?; @@ -258,6 +263,8 @@ impl Rooms { } /// Force the creation of a new StateHash and insert it into the db. + /// + /// Whatever `state` is supplied to `force_state` __is__ the current room state snapshot. pub fn force_state( &self, room_id: &RoomId, @@ -403,6 +410,12 @@ impl Rooms { } } + /// Recursively search for a PDU from our DB that is also in the + /// `prev_events` field of the incoming PDU. + /// + /// First we check if the last PDU inserted to the given room is a parent + /// if not we recursively check older `prev_events` to insert the incoming + /// event after. pub fn get_closest_parent( &self, room: &RoomId, diff --git a/src/database/sending.rs b/src/database/sending.rs index 14558e3..6b9e0fe 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, convert::TryFrom, time::SystemTime}; use crate::{server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::debug; +use log::{debug, error}; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{api::federation, ServerName}; use sled::IVec; @@ -115,8 +115,8 @@ impl Sending { // servercurrentpdus with the prefix should be empty now } } - Err((_server, _e)) => { - log::error!("server: {}\nerror: {}", _server, _e) + Err((server, e)) => { + error!("server: {}\nerror: {}", server, e) // TODO: exponential backoff } }; diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 1c5529a..a68b09d 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -67,27 +67,28 @@ where let (sender_user, sender_device) = // TODO: Do we need to matches! anything else here? ServerSignatures - if matches!(T::METADATA.authentication, AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken) { - // Get token from header or query value - let token = match request - .headers() - .get_one("Authorization") - .map(|s| s[7..].to_owned()) // Split off "Bearer " - .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) - { - // TODO: M_MISSING_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some(token) => token, - }; - - // Check if token is valid - match db.users.find_from_token(&token).unwrap() { - // TODO: M_UNKNOWN_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some((user_id, device_id)) => (Some(user_id), Some(device_id.into())), + match T::METADATA.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + // Get token from header or query value + let token = match request + .headers() + .get_one("Authorization") + .map(|s| s[7..].to_owned()) // Split off "Bearer " + .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) + { + // TODO: M_MISSING_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some(token) => token, + }; + + // Check if token is valid + match db.users.find_from_token(&token).unwrap() { + // TODO: M_UNKNOWN_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some((user_id, device_id)) => (Some(user_id), Some(device_id.into())), + } } - } else { - (None, None) + _ => (None, None) }; let mut http_request = http::Request::builder() diff --git a/src/server_server.rs b/src/server_server.rs index b9d26fd..89d8eb1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -4,7 +4,7 @@ use crate::{ }; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::warn; +use log::{error, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -26,7 +26,6 @@ use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, fmt::Debug, - sync::Arc, time::{Duration, SystemTime}, }; use trust_dns_resolver::AsyncResolver; @@ -99,7 +98,7 @@ where let mut http_request = request .try_into_http_request(&actual_destination, Some("")) .map_err(|e| { - warn!("failed to find destination {}: {}", actual_destination, e); + warn!("Failed to find destination {}: {}", actual_destination, e); Error::BadServerResponse("Invalid destination") })?; @@ -264,12 +263,14 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { .body(), ) .unwrap(); + ruma::signatures::sign_json( db.globals.server_name().as_str(), db.globals.keypair(), &mut response, ) .unwrap(); + Json(ruma::serde::to_canonical_json_string(&response).expect("JSON is canonical")) } @@ -413,8 +414,8 @@ pub async fn send_transaction_message_route<'a>( "m.receipt" => {} _ => {} }, - Err(_err) => { - log::error!("{}", _err); + Err(err) => { + error!("{}", err); continue; } } @@ -434,11 +435,9 @@ pub async fn send_transaction_message_route<'a>( .expect("all ruma pdus are conduit pdus"); let room_id = &pdu.room_id; - // If we have no idea about this room - // TODO: Does a server only send us events that we should know about or - // when everyone on this server leaves a room can we ignore those events? + // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { - log::error!("Room does not exist on this server"); + error!("Room does not exist on this server."); resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } @@ -460,7 +459,7 @@ pub async fn send_transaction_message_route<'a>( // As an example a possible error // {"errcode":"M_FORBIDDEN","error":"Host not in room."} Err(err) => { - log::error!("Request failed: {}", err); + error!("Request failed: {}", err); resolved_map.insert(event_id, Err(err.to_string())); continue; } @@ -487,7 +486,7 @@ pub async fn send_transaction_message_route<'a>( if value.get("state_key").is_none() { if !db.rooms.is_joined(&pdu.sender, &pdu.room_id)? { - log::error!("Sender is not joined {}", pdu.kind); + error!("Sender is not joined {}", pdu.kind); resolved_map.insert(event_id, Err("User is not in this room".into())); continue; } From b6d721374f970cca912477a3972bb857758d983d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 18 Nov 2020 08:36:12 -0500 Subject: [PATCH 0354/1727] Have Media db return optional content_type, conversion fixes --- src/client_server/media.rs | 8 ++---- src/client_server/membership.rs | 3 +-- src/database/media.rs | 44 +++++++++++++++++++-------------- src/database/rooms.rs | 6 ++--- src/utils.rs | 17 +++++++++++++ 5 files changed, 47 insertions(+), 31 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 0c23488..e6bd182 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -66,7 +66,7 @@ pub async fn get_content_route( { Ok(get_content::Response { file, - content_type: Some(content_type), + content_type, content_disposition: filename, } .into()) @@ -116,11 +116,7 @@ pub async fn get_content_thumbnail_route( .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, )? { - Ok(get_content_thumbnail::Response { - file, - content_type: Some(content_type), - } - .into()) + Ok(get_content_thumbnail::Response { file, content_type }.into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_thumbnail_response = server_server::send_request( &db.globals, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 849fb7e..47fcde1 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -510,8 +510,7 @@ async fn join_room_by_id_helper( .expect("event is valid, we just created it"), ); - // TODO fixup CanonicalJsonValue - // use that instead of serde_json::Map... maybe? + // Convert `serde_json;:Value` to `CanonicalJsonObj` for hashing/signing let mut canon_json_stub: BTreeMap<_, ruma::signatures::CanonicalJsonValue> = serde_json::from_value(join_event_stub_value).expect("json Value is canonical JSON"); diff --git a/src/database/media.rs b/src/database/media.rs index bfc6207..89d48e1 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -5,7 +5,7 @@ use std::mem; pub struct FileMeta { pub filename: Option, - pub content_type: String, + pub content_type: Option, pub file: Vec, } @@ -83,12 +83,14 @@ impl Media { let (key, file) = r?; let mut parts = key.rsplit(|&b| b == 0xff); - let content_type = utils::string_from_bytes( - parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("Content type in mediaid_file is invalid unicode."))?; + let content_type = parts + .next() + .map(|bytes| { + Ok::<_, Error>(utils::string_from_bytes(bytes).map_err(|_| { + Error::bad_database("Content type in mediaid_file is invalid unicode.") + })?) + }) + .transpose()?; let filename_bytes = parts .next() @@ -158,12 +160,14 @@ impl Media { let (key, file) = r?; let mut parts = key.rsplit(|&b| b == 0xff); - let content_type = utils::string_from_bytes( - parts - .next() - .ok_or_else(|| Error::bad_database("Invalid Media ID in db"))?, - ) - .map_err(|_| Error::bad_database("Content type in mediaid_file is invalid unicode."))?; + let content_type = parts + .next() + .map(|bytes| { + Ok::<_, Error>(utils::string_from_bytes(bytes).map_err(|_| { + Error::bad_database("Content type in mediaid_file is invalid unicode.") + })?) + }) + .transpose()?; let filename_bytes = parts .next() @@ -189,12 +193,14 @@ impl Media { let (key, file) = r?; let mut parts = key.rsplit(|&b| b == 0xff); - let content_type = utils::string_from_bytes( - parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("Content type in mediaid_file is invalid unicode."))?; + let content_type = parts + .next() + .map(|bytes| { + Ok::<_, Error>(utils::string_from_bytes(bytes).map_err(|_| { + Error::bad_database("Content type in mediaid_file is invalid unicode.") + })?) + }) + .transpose()?; let filename_bytes = parts .next() diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3d5b890..a95587c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -499,7 +499,6 @@ impl Rooms { Ok(()) } - #[allow(clippy::too_many_arguments)] /// Creates a new persisted data unit and adds it to a room. /// /// By this point the incoming event should be fully authenticated, no auth happens @@ -856,9 +855,8 @@ impl Rooms { }; // Hash and sign - let mut pdu_json: BTreeMap = - serde_json::from_value(serde_json::json!(&pdu)) - .expect("event is valid, we just created it"); + let mut pdu_json = + utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); pdu_json.remove("event_id"); diff --git a/src/utils.rs b/src/utils.rs index edcf48a..c82e6fe 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,6 +1,7 @@ use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; +use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use sled::IVec; use std::{ cmp, @@ -94,3 +95,19 @@ pub fn common_elements( .all(|b| b) })) } + +/// Fallible conversion from any value that implements `Serialize` to a `CanonicalJsonObject`. +/// +/// `value` must serialize to an `serde_json::Value::Object`. +pub fn to_canonical_object( + value: T, +) -> Result { + use serde::ser::Error; + + match serde_json::to_value(value).map_err(CanonicalJsonError::SerDe)? { + serde_json::Value::Object(map) => try_from_json_map(map), + _ => Err(CanonicalJsonError::SerDe(serde_json::Error::custom( + "Value must be an object", + ))), + } +} From 27e686f9ff03718c166b8e9af3536bc36d22083e Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 30 Nov 2020 12:10:33 -0500 Subject: [PATCH 0355/1727] Convert uses of serde_json::Value to CanonicalJsonObject --- Cargo.lock | 431 +++++++++++++++++--------------- src/client_server/membership.rs | 59 ++--- src/database/rooms.rs | 18 +- src/database/sending.rs | 4 +- src/server_server.rs | 21 +- 5 files changed, 277 insertions(+), 256 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d2eef80..afb3abf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" dependencies = [ "gimli", ] @@ -30,12 +30,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "arc-swap" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" - [[package]] name = "arrayref" version = "0.3.6" @@ -56,9 +50,9 @@ checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" [[package]] name = "async-trait" -version = "0.1.41" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b246867b8b3b6ae56035f1eb1ed557c1d8eae97f0d53696138a50fa0e3a3b8c0" +checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" dependencies = [ "proc-macro2", "quote", @@ -90,9 +84,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.53" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707b586e0e2f247cbde68cdd2c3ce69ea7b7be43e1c5b426e37c9319c4b9838e" +checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" dependencies = [ "addr2line", "cfg-if 1.0.0", @@ -104,9 +98,9 @@ dependencies = [ [[package]] name = "base-x" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" [[package]] name = "base64" @@ -114,6 +108,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + [[package]] name = "binascii" version = "0.1.4" @@ -128,9 +128,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "blake2b_simd" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", "arrayvec", @@ -163,9 +163,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "cc" -version = "1.0.61" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d" +checksum = "95752358c8f7552394baf48cd82695b345628ad3f170d607de3ca03b8dacca15" [[package]] name = "cfg-if" @@ -211,7 +211,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64", + "base64 0.12.3", "directories", "http", "image", @@ -233,10 +233,20 @@ dependencies = [ ] [[package]] -name = "const_fn" -version = "0.4.2" +name = "console_error_panic_hook" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce90df4c658c62f12d78f7508cf92f9173e5184a539c10bfe54a3107b3ffd0f2" +checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" +dependencies = [ + "cfg-if 0.1.10", + "wasm-bindgen", +] + +[[package]] +name = "const_fn" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" [[package]] name = "constant_time_eq" @@ -246,20 +256,20 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "cookie" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1373a16a4937bc34efec7b391f9c1500c30b8478a701a4f44c9165cc0475a6e0" +checksum = "784ad0fbab4f3e9cef09f20e0aea6000ae08d2cb98ac4c0abc53df18803d702f" dependencies = [ "percent-encoding", - "time 0.2.22", + "time 0.2.23", "version_check", ] [[package]] name = "core-foundation" -version = "0.7.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" dependencies = [ "core-foundation-sys", "libc", @@ -267,9 +277,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.7.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" [[package]] name = "crc32fast" @@ -282,27 +292,26 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" dependencies = [ - "autocfg", - "cfg-if 0.1.10", + "cfg-if 1.0.0", + "const_fn", "crossbeam-utils", "lazy_static", - "maybe-uninit", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.7.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" dependencies = [ "autocfg", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "lazy_static", ] @@ -372,12 +381,6 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" -[[package]] -name = "dtoa" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" - [[package]] name = "either" version = "1.6.1" @@ -386,11 +389,11 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.24" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a51b8cf747471cb9499b6d59e59b0444f4c90eba8968c4e44874e92b5b64ace2" +checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", ] [[package]] @@ -464,9 +467,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95314d38584ffbfda215621d723e0a3906f032e03ae5551e650058dac83d4797" +checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" dependencies = [ "futures-channel", "futures-core", @@ -479,9 +482,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0448174b01148032eed37ac4aed28963aaaa8cfa93569a08e5b479bbc6c2c151" +checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" dependencies = [ "futures-core", "futures-sink", @@ -489,15 +492,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18eaa56102984bed2c88ea39026cff3ce3b4c7f508ca970cedf2450ea10d4e46" +checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" [[package]] name = "futures-executor" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5f8e0c9258abaea85e78ebdda17ef9666d390e987f006be6080dfe354b708cb" +checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" dependencies = [ "futures-core", "futures-task", @@ -506,15 +509,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e1798854a4727ff944a7b12aa999f58ce7aa81db80d2dfaaf2ba06f065ddd2b" +checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" [[package]] name = "futures-macro" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36fccf3fc58563b4a14d265027c627c3b665d7fed489427e88e7cc929559efe" +checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -524,24 +527,24 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e3ca3f17d6e8804ae5d3df7a7d35b2b3a6fe89dac84b31872720fc3060a0b11" +checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" [[package]] name = "futures-task" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d502af37186c4fef99453df03e374683f8a1eec9dcc1e66b3b82dc8278ce3c" +checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" dependencies = [ "once_cell", ] [[package]] name = "futures-util" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abcb44342f62e6f3e8ac427b8aa815f724fd705dfad060b18ac7866c15bb8e34" +checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" dependencies = [ "futures-channel", "futures-core", @@ -550,7 +553,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.1", + "pin-project 1.0.2", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -602,9 +605,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" [[package]] name = "glob" @@ -702,9 +705,9 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.8" +version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f3afcfae8af5ad0576a31e768415edb627824129e8e5a29b8bfccb2f234e835" +checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ "bytes", "futures-channel", @@ -716,7 +719,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 0.4.27", + "pin-project 1.0.2", "socket2", "tokio", "tower-service", @@ -750,9 +753,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.11" +version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f0a8345b33b082aedec2f4d7d4a926b845cee184cbe78b703413066564431b" +checksum = "7ce04077ead78e39ae8610ad26216aed811996b043d47beed5090db674f9e9b5" dependencies = [ "bytemuck", "byteorder", @@ -777,15 +780,15 @@ dependencies = [ [[package]] name = "inlinable_string" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6ee2a7da03bfc3b66ca47c92c2e392fcc053ea040a85561749b026f7aad09a" +checksum = "3094308123a0e9fd59659ce45e22de9f53fc1d2ac6e1feb9fef988e4f76cad77" [[package]] name = "instant" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb1fc4429a33e1f80d41dc9fea4d108a88bec1de8053878898ae448a0b52f613" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ "cfg-if 1.0.0", ] @@ -889,9 +892,9 @@ checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" [[package]] name = "lock_api" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" dependencies = [ "scopeguard", ] @@ -954,23 +957,17 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memoffset" -version = "0.5.6" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" dependencies = [ "autocfg", ] @@ -1042,9 +1039,9 @@ dependencies = [ [[package]] name = "miow" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" dependencies = [ "kernel32-sys", "net2", @@ -1054,9 +1051,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" +checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" dependencies = [ "lazy_static", "libc", @@ -1072,9 +1069,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.35" +version = "0.2.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853" +checksum = "d7cf75f38f16cb05ea017784dc6dbfd354f76c223dba37701734c4f5a9337d02" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1083,9 +1080,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ "autocfg", "num-traits", @@ -1093,9 +1090,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.41" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e6b7c748f995c4c29c5f5ae0248536e04a5739927c74ec0fa564805094b9f" +checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" dependencies = [ "autocfg", "num-integer", @@ -1104,9 +1101,9 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b4d7360f362cfb50dde8143501e6940b22f644be75a4cc90b2d81968908138" +checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" dependencies = [ "autocfg", "num-integer", @@ -1115,9 +1112,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", ] @@ -1134,15 +1131,15 @@ dependencies = [ [[package]] name = "object" -version = "0.21.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37fd5004feb2ce328a52b0b3d01dbf4ffff72583493900ed15f22d4111c51693" +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" [[package]] name = "once_cell" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" @@ -1189,9 +1186,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" dependencies = [ "instant", "lock_api", @@ -1251,11 +1248,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee41d838744f60d959d7074e3afb6b35c7456d0f61cad38a24e35e6553f73841" +checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" dependencies = [ - "pin-project-internal 1.0.1", + "pin-project-internal 1.0.2", ] [[package]] @@ -1271,9 +1268,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" +checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" dependencies = [ "proc-macro2", "quote", @@ -1286,6 +1283,12 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" +[[package]] +name = "pin-project-lite" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" + [[package]] name = "pin-utils" version = "0.1.0" @@ -1312,9 +1315,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "proc-macro-crate" @@ -1327,9 +1330,9 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.18" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99c605b9a0adc77b7211c6b1f722dcb613d68d66859a44f3d485a6da332b0598" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" @@ -1433,18 +1436,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745c1787167ddae5569661d5ffb8b25ae5fedbf46717eaa92d652221cec72623" +checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" +checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" dependencies = [ "proc-macro2", "quote", @@ -1453,9 +1456,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8963b85b8ce3074fecffde43b4b0dded83ce2f367dc8d363afc56679f3ee820b" +checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" dependencies = [ "regex-syntax", ] @@ -1472,9 +1475,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.20" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cab7a364d15cde1e505267766a2d3c4e22a843e1a601f0fa7564c0f82ced11c" +checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" [[package]] name = "remove_dir_all" @@ -1487,11 +1490,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9eaa17ac5d7b838b7503d118fa16ad88f440498bf9ffe5424e621f93190d61e" +checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" dependencies = [ - "base64", + "base64 0.13.0", "bytes", "encoding_rs", "futures-core", @@ -1508,7 +1511,7 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite", + "pin-project-lite 0.2.0", "serde", "serde_urlencoded", "tokio", @@ -1516,15 +1519,16 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] [[package]] name = "resolv-conf" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11834e137f3b14e309437a8276714eed3a80d1ef894869e510f2c0c0b98b9f4a" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ "hostname", "quick-error", @@ -1532,9 +1536,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.15" +version = "0.16.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" +checksum = "70017ed5c555d79ee3538fc63ca09c70ad8f317dcadc1adc2c496b60c22bb24f" dependencies = [ "cc", "libc", @@ -1563,7 +1567,7 @@ dependencies = [ "rocket_codegen", "rocket_http", "state", - "time 0.2.22", + "time 0.2.23", "tokio", "toml", "version_check", @@ -1598,7 +1602,7 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.22", + "time 0.2.23", "tokio", "tokio-rustls", "unicode-xid", @@ -1799,7 +1803,7 @@ name = "ruma-signatures" version = "0.6.0-dev.1" source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" dependencies = [ - "base64", + "base64 0.12.3", "ring", "ruma-identifiers", "ruma-serde", @@ -1809,11 +1813,11 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" +checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1840,7 +1844,7 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ - "base64", + "base64 0.12.3", "log", "ring", "sct", @@ -1887,9 +1891,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "0.4.4" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" +checksum = "c1759c2e3c8580017a484a7ac56d3abc5a6c1feadf88db2f3633f12ae4268c69" dependencies = [ "bitflags", "core-foundation", @@ -1900,9 +1904,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "0.4.3" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" +checksum = "f99b9d5e26d2a71633cc4f2ebae7cc9f874044e0c351a27e17892d76dce5678b" dependencies = [ "core-foundation-sys", "libc", @@ -1956,14 +1960,14 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" dependencies = [ - "dtoa", + "form_urlencoded", "itoa", + "ryu", "serde", - "url", ] [[package]] @@ -1984,11 +1988,10 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e12110bc539e657a646068aaf5eb5b63af9d0c1f7b29c97113fad80e15f035" +checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" dependencies = [ - "arc-swap", "libc", ] @@ -2000,9 +2003,9 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "sled" -version = "0.34.4" +version = "0.34.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f72c064e63fbca3138ad07f3588c58093f1684f3a99f60dcfa6d46b87e60fde7" +checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" dependencies = [ "crc32fast", "crossbeam-epoch", @@ -2016,17 +2019,17 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252" +checksum = "7acad6f34eb9e8a259d3283d1e8c1d34d7415943d4895f65cc73813c7396fc85" [[package]] name = "socket2" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1fa70dc5c8104ec096f4fe7ede7a221d35ae13dcd19ba1ad9a81d2cab9a1c44" +checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "redox_syscall", "winapi 0.3.9", @@ -2040,23 +2043,23 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4e0831040d2cf2bdfd51b844be71885783d489898a192f254ae25d57cce725c" +checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" dependencies = [ "version_check", ] [[package]] name = "state" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7345c971d1ef21ffdbd103a75990a15eb03604fc8b8852ca8cb418ee1a099028" +checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#1cd1a16cdefabb126a781a50b3d5eb1fdb3d3afb" +source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#d2a85669cc6056679ce6ca0fde4658a879ad2b08" dependencies = [ "itertools", "js_int", @@ -2141,9 +2144,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.48" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac" +checksum = "8833e20724c24de12bbaba5ad230ea61c3eafb05b881c7c9d3cfe8638b187e68" dependencies = [ "proc-macro2", "quote", @@ -2166,18 +2169,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42" +checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab" +checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" dependencies = [ "proc-macro2", "quote", @@ -2206,9 +2209,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55b7151c9065e80917fbf285d9a5d1432f60db41d170ccafc749a136b41a93af" +checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" dependencies = [ "const_fn", "libc", @@ -2244,15 +2247,24 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" +checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" +checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" dependencies = [ "bytes", "fnv", @@ -2264,7 +2276,7 @@ dependencies = [ "mio", "mio-uds", "num_cpus", - "pin-project-lite", + "pin-project-lite 0.1.11", "signal-hook-registry", "slab", "tokio-macros", @@ -2273,9 +2285,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2", "quote", @@ -2314,7 +2326,7 @@ dependencies = [ "futures-core", "futures-sink", "log", - "pin-project-lite", + "pin-project-lite 0.1.11", "tokio", ] @@ -2335,13 +2347,13 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" +checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "log", - "pin-project-lite", + "pin-project-lite 0.2.0", "tracing-attributes", "tracing-core", ] @@ -2399,9 +2411,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2810660b9d5b18895d140caba6401765749a6a162e5d0736cfc44ea50db9d79d" +checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" dependencies = [ "ansi_term", "chrono", @@ -2421,9 +2433,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.19.5" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd7061ba6f4d4d9721afedffbfd403f20f39a4301fee1b70d6fcd09cca69f28" +checksum = "53861fcb288a166aae4c508ae558ed18b53838db728d4d310aad08270a7d4c2b" dependencies = [ "async-trait", "backtrace", @@ -2441,9 +2453,9 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.19.5" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f23cdfdc3d8300b3c50c9e84302d3bd6d860fb9529af84ace6cf9665f181b77" +checksum = "6759e8efc40465547b0dfce9500d733c65f969a4cbbfbe3ccf68daaa46ef179e" dependencies = [ "backtrace", "cfg-if 0.1.10", @@ -2485,18 +2497,18 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.13" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" [[package]] name = "unicode-xid" @@ -2512,10 +2524,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" dependencies = [ + "form_urlencoded", "idna", "matches", "percent-encoding", @@ -2623,6 +2636,30 @@ version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" +[[package]] +name = "wasm-bindgen-test" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34d1cdc8b98a557f24733d50a1199c4b0635e465eecba9c45b214544da197f64" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "scoped-tls", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8fb9c67be7439ee8ab1b7db502a49c05e51e2835b66796c705134d9b8e1a585" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "web-sys" version = "0.3.45" @@ -2645,9 +2682,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8795d6e0e17485803cc10ef126bb8c0d59b7c61b219d66cfe0b3216dd0e8580a" +checksum = "3e2bb9fc8309084dd7cd651336673844c1d47f8ef6d2091ec160b27f5c4aa277" [[package]] name = "widestring" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 47fcde1..d5c50e5 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -18,12 +18,13 @@ use ruma::{ federation, }, events::{pdu::Pdu, room::member, EventType}, + serde::{to_canonical_value, CanonicalJsonObject}, EventId, Raw, RoomId, RoomVersionId, ServerName, UserId, }; use state_res::StateEvent; use std::{ collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryFrom, iter, sync::Arc, }; @@ -477,30 +478,25 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; - let mut join_event_stub_value = - serde_json::from_str::(make_join_response.event.json().get()) + let mut join_event_stub = + serde_json::from_str::(make_join_response.event.json().get()) .map_err(|_| { Error::BadServerResponse("Invalid make_join event json received from server.") })?; - let join_event_stub = - join_event_stub_value - .as_object_mut() - .ok_or(Error::BadServerResponse( - "Invalid make join event object received from server.", - ))?; - join_event_stub.insert( "origin".to_owned(), - db.globals.server_name().to_owned().to_string().into(), + to_canonical_value(db.globals.server_name()) + .map_err(|_| Error::bad_database("Invalid server name found"))?, ); join_event_stub.insert( "origin_server_ts".to_owned(), - utils::millis_since_unix_epoch().into(), + to_canonical_value(utils::millis_since_unix_epoch()) + .expect("Timestamp is valid js_int value"), ); join_event_stub.insert( "content".to_owned(), - serde_json::to_value(member::MemberEventContent { + to_canonical_value(member::MemberEventContent { membership: member::MembershipState::Join, displayname: db.users.displayname(&sender_user)?, avatar_url: db.users.avatar_url(&sender_user)?, @@ -510,18 +506,14 @@ async fn join_room_by_id_helper( .expect("event is valid, we just created it"), ); - // Convert `serde_json;:Value` to `CanonicalJsonObj` for hashing/signing - let mut canon_json_stub: BTreeMap<_, ruma::signatures::CanonicalJsonValue> = - serde_json::from_value(join_event_stub_value).expect("json Value is canonical JSON"); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - canon_json_stub.remove("event_id"); + join_event_stub.remove("event_id"); // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present ruma::signatures::hash_and_sign_event( db.globals.server_name().as_str(), db.globals.keypair(), - &mut canon_json_stub, + &mut join_event_stub, &RoomVersionId::Version6, ) .expect("event is valid, we just created it"); @@ -529,21 +521,19 @@ async fn join_room_by_id_helper( // Generate event id let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&canon_json_stub, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&join_event_stub, &RoomVersionId::Version6) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); // Add event_id back - canon_json_stub.insert( + join_event_stub.insert( "event_id".to_owned(), - serde_json::json!(event_id) - .try_into() - .expect("EventId is a valid CanonicalJsonValue"), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); // It has enough fields to be called a proper event now - let join_event = canon_json_stub; + let join_event = join_event_stub; let send_join_response = server_server::send_request( &db.globals, @@ -559,7 +549,7 @@ async fn join_room_by_id_helper( ) .await?; - let add_event_id = |pdu: &Raw| -> Result<(EventId, serde_json::Value)> { + let add_event_id = |pdu: &Raw| -> Result<(EventId, CanonicalJsonObject)> { let mut value = serde_json::from_str(pdu.json().get()) .expect("converting raw jsons to values always works"); let event_id = EventId::try_from(&*format!( @@ -571,18 +561,18 @@ async fn join_room_by_id_helper( value.insert( "event_id".to_owned(), - serde_json::from_value(serde_json::json!(event_id)) + to_canonical_value(&event_id) .expect("a valid EventId can be converted to CanonicalJsonValue"), ); - Ok((event_id, serde_json::json!(value))) + Ok((event_id, value)) }; let room_state = send_join_response.room_state.state.iter().map(add_event_id); let state_events = room_state .clone() - .map(|pdu: Result<(EventId, serde_json::Value)>| Ok(pdu?.0)) + .map(|pdu: Result<(EventId, CanonicalJsonObject)>| Ok(pdu?.0)) .chain(iter::once(Ok(event_id.clone()))) // Add join event we just created .collect::>>()?; @@ -594,16 +584,13 @@ async fn join_room_by_id_helper( let mut event_map = room_state .chain(auth_chain) - .chain(iter::once(Ok(( - event_id, - serde_json::to_value(join_event).unwrap(), - )))) // Add join event we just created + .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created .map(|r| { let (event_id, value) = r?; - state_res::StateEvent::from_id_value(event_id.clone(), value.clone()) + state_res::StateEvent::from_id_canon_obj(event_id.clone(), value.clone()) .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { - warn!("{}: {}", value, e); + warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") }) }) @@ -692,7 +679,7 @@ async fn join_room_by_id_helper( pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( &PduEvent::from(&**pdu), - &serde_json::to_value(&**pdu).expect("PDU is valid value"), + &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a95587c..651a596 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -15,6 +15,7 @@ use ruma::{ }, EventType, }, + serde::{to_canonical_value, CanonicalJsonObject}, EventId, Raw, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; @@ -506,7 +507,7 @@ impl Rooms { pub fn append_pdu( &self, pdu: &PduEvent, - pdu_json: &serde_json::Value, + pdu_json: &CanonicalJsonObject, count: u64, pdu_id: IVec, globals: &super::globals::Globals, @@ -520,7 +521,11 @@ impl Rooms { self.edus .private_read_set(&pdu.room_id, &pdu.sender, count, &globals)?; - self.pduid_pdu.insert(&pdu_id, &*pdu_json.to_string())?; + self.pduid_pdu.insert( + &pdu_id, + &*serde_json::to_string(pdu_json) + .expect("CanonicalJsonObject is always a valid String"), + )?; self.eventid_pduid .insert(pdu.event_id.as_bytes(), &*pdu_id)?; @@ -863,8 +868,7 @@ impl Rooms { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - serde_json::json!(globals.server_name()) - .try_into() + to_canonical_value(globals.server_name()) .expect("server name is a valid CanonicalJsonValue"), ); @@ -886,9 +890,7 @@ impl Rooms { pdu_json.insert( "event_id".to_owned(), - serde_json::json!(pdu.event_id) - .try_into() - .expect("EventId is a valid CanonicalJsonValue"), + to_canonical_value(&pdu.event_id).expect("EventId is a valid CanonicalJsonValue"), ); // Increment the last index and use that @@ -904,7 +906,7 @@ impl Rooms { self.append_pdu( &pdu, - &serde_json::json!(pdu_json), // TODO fixup CanonicalJsonValue + &pdu_json, count, pdu_id.clone().into(), globals, diff --git a/src/database/sending.rs b/src/database/sending.rs index 6b9e0fe..0be14f8 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -185,9 +185,7 @@ impl Sending { .iter() .map(|pdu_id| { Ok::<_, (Box, Error)>( - // TODO: this was a PduStub - // In order for sending to work these actually do have to be - // PduStub but, since they are Raw<..> we can fake it. + // TODO: check room version and remove event_id if needed serde_json::from_str( PduEvent::convert_to_outgoing_federation_event( rooms diff --git a/src/server_server.rs b/src/server_server.rs index 89d8eb1..8d3de1e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,6 +20,7 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + serde::{to_canonical_value, CanonicalJsonObject}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; use std::{ @@ -431,12 +432,13 @@ pub async fn send_transaction_message_route<'a>( let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { let (event_id, value) = process_incoming_pdu(pdu); - let pdu = serde_json::from_value::(value.clone()) + // TODO: this is an unfortunate conversion dance... + let pdu = serde_json::from_value::(serde_json::to_value(&value).expect("msg")) .expect("all ruma pdus are conduit pdus"); let room_id = &pdu.room_id; // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id)? { + if !db.rooms.exists(room_id)? { error!("Room does not exist on this server."); resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; @@ -477,7 +479,7 @@ pub async fn send_transaction_message_route<'a>( // When creating a StateEvent the event_id arg will be used // over any found in the json and it will not use ruma::reference_hash // to generate one - state_res::StateEvent::from_id_value(event_id, json) + state_res::StateEvent::from_id_canon_obj(event_id, json) .expect("valid pdu json"), ), ) @@ -485,7 +487,7 @@ pub async fn send_transaction_message_route<'a>( .collect::>(); if value.get("state_key").is_none() { - if !db.rooms.is_joined(&pdu.sender, &pdu.room_id)? { + if !db.rooms.is_joined(&pdu.sender, room_id)? { error!("Sender is not joined {}", pdu.kind); resolved_map.insert(event_id, Err("User is not in this room".into())); continue; @@ -750,7 +752,7 @@ pub fn get_user_devices_route<'a>( /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. -fn process_incoming_pdu(pdu: &ruma::Raw) -> (EventId, serde_json::Value) { +fn process_incoming_pdu(pdu: &ruma::Raw) -> (EventId, CanonicalJsonObject) { let mut value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); @@ -763,13 +765,8 @@ fn process_incoming_pdu(pdu: &ruma::Raw) -> (EventId, se value.insert( "event_id".to_owned(), - serde_json::json!(event_id) - .try_into() - .expect("EventId is a valid CanonicalJsonValue"), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); - ( - event_id, - serde_json::to_value(value).expect("JSON Value is a CanonicalJsonValue"), - ) + (event_id, value) } From c173ce43a5e6ca6abe0b23a590c124e85173593c Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 30 Nov 2020 14:46:47 -0500 Subject: [PATCH 0356/1727] convert_to_outgoing_federation_event takes CanonicalJsonObj --- src/client_server/membership.rs | 5 +---- src/database/rooms.rs | 4 ++-- src/pdu.rs | 31 ++++++++++++++++--------------- 3 files changed, 19 insertions(+), 21 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index d5c50e5..b5e4042 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -541,10 +541,7 @@ async fn join_room_by_id_helper( federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, - pdu_stub: PduEvent::convert_to_outgoing_federation_event( - serde_json::to_value(&join_event) - .expect("we just validated and ser/de this event"), - ), + pdu_stub: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) .await?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 651a596..e7b6eaa 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -385,8 +385,8 @@ impl Rooms { }) } - /// Returns the pdu. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { + /// Returns the pdu as a `BTreeMap`. + pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) diff --git a/src/pdu.rs b/src/pdu.rs index effbc5d..cffd4a3 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -5,6 +5,7 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, + serde::{CanonicalJsonObject, CanonicalJsonValue}, EventId, Raw, RoomId, ServerKeyId, ServerName, UserId, }; use serde::{Deserialize, Serialize}; @@ -200,25 +201,25 @@ impl PduEvent { } pub fn convert_to_outgoing_federation_event( - mut pdu_json: serde_json::Value, + mut pdu_json: CanonicalJsonObject, ) -> Raw { - if let Some(unsigned) = pdu_json - .as_object_mut() - .expect("json is object") - .get_mut("unsigned") - { - unsigned - .as_object_mut() - .expect("unsigned is object") - .remove("transaction_id"); + if let Some(CanonicalJsonValue::Object(unsigned)) = pdu_json.get_mut("unsigned") { + unsigned.remove("transaction_id"); } - pdu_json - .as_object_mut() - .expect("json is object") - .remove("event_id"); + pdu_json.remove("event_id"); - serde_json::from_value::>(pdu_json).expect("Raw::from_value always works") + // TODO: another option would be to convert it to a canonical string to validate size + // and return a Result> + // serde_json::from_str::>( + // ruma::serde::to_canonical_json_string(pdu_json).expect("CanonicalJson is valid serde_json::Value"), + // ) + // .expect("Raw::from_value always works") + + serde_json::from_value::>( + serde_json::to_value(pdu_json).expect("CanonicalJson is valid serde_json::Value"), + ) + .expect("Raw::from_value always works") } } From b869aab5d03b0ec606d664939782f42ea57ce624 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 4 Dec 2020 17:16:29 -0500 Subject: [PATCH 0357/1727] Cleanup removing debug printing and logging, append non state events --- src/client_server/backup.rs | 7 +- src/database/rooms.rs | 4 +- src/database/sending.rs | 4 +- src/pdu.rs | 35 ++++++++- src/ruma_wrapper.rs | 46 ++++++----- src/server_server.rs | 153 +++++++++++++----------------------- 6 files changed, 119 insertions(+), 130 deletions(-) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 676b5a3..0f34ba7 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -256,7 +256,12 @@ pub async fn get_backup_key_session_route( let key_data = db .key_backups .get_session(&sender_user, &body.version, &body.room_id, &body.session_id)? - .ok_or_else(|| Error::bad_database("Backup key not found for this user's session."))?; + .ok_or_else(|| { + Error::BadRequest( + ErrorKind::NotFound, + "Backup key not found for this user's session.", + ) + })?; Ok(get_backup_key_session::Response { key_data }.into()) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e7b6eaa..0618fd6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -41,7 +41,7 @@ pub type StateHashId = IVec; /// /// An events parent is any event we are aware of that is part of /// the events `prev_events` array. -pub enum ClosestParent { +pub(crate) enum ClosestParent { Append, Insert(u64), } @@ -417,7 +417,7 @@ impl Rooms { /// First we check if the last PDU inserted to the given room is a parent /// if not we recursively check older `prev_events` to insert the incoming /// event after. - pub fn get_closest_parent( + pub(crate) fn get_latest_pduid_before( &self, room: &RoomId, incoming_prev_ids: &[EventId], diff --git a/src/database/sending.rs b/src/database/sending.rs index 0be14f8..cd88e08 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, convert::TryFrom, time::SystemTime}; use crate::{server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::{debug, error}; +use log::{debug, warn}; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{api::federation, ServerName}; use sled::IVec; @@ -116,7 +116,7 @@ impl Sending { } } Err((server, e)) => { - error!("server: {}\nerror: {}", server, e) + warn!("Couldn't send transaction to {}: {}", server, e) // TODO: exponential backoff } }; diff --git a/src/pdu.rs b/src/pdu.rs index cffd4a3..e56e81a 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -5,12 +5,17 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - serde::{CanonicalJsonObject, CanonicalJsonValue}, - EventId, Raw, RoomId, ServerKeyId, ServerName, UserId, + serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue}, + EventId, Raw, RoomId, RoomVersionId, ServerKeyId, ServerName, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{collections::BTreeMap, convert::TryInto, sync::Arc, time::UNIX_EPOCH}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, + sync::Arc, + time::UNIX_EPOCH, +}; #[derive(Deserialize, Serialize, Debug)] pub struct PduEvent { @@ -279,6 +284,30 @@ impl PduEvent { } } +/// Generates a correct eventId for the incoming pdu. +/// +/// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. +pub(crate) fn process_incoming_pdu( + pdu: &ruma::Raw, +) -> (EventId, CanonicalJsonObject) { + let mut value = + serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); + + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + value.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + + (event_id, value) +} + /// Build the start of a PDU in order to add it to the `Database`. #[derive(Debug, Deserialize)] pub struct PduBuilder { diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index a68b09d..4b3d08d 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -65,31 +65,29 @@ where .await .expect("database was loaded"); - let (sender_user, sender_device) = - // TODO: Do we need to matches! anything else here? ServerSignatures - match T::METADATA.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - // Get token from header or query value - let token = match request - .headers() - .get_one("Authorization") - .map(|s| s[7..].to_owned()) // Split off "Bearer " - .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) - { - // TODO: M_MISSING_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some(token) => token, - }; - - // Check if token is valid - match db.users.find_from_token(&token).unwrap() { - // TODO: M_UNKNOWN_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some((user_id, device_id)) => (Some(user_id), Some(device_id.into())), - } + let (sender_user, sender_device) = match T::METADATA.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + // Get token from header or query value + let token = match request + .headers() + .get_one("Authorization") + .map(|s| s[7..].to_owned()) // Split off "Bearer " + .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) + { + // TODO: M_MISSING_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some(token) => token, + }; + + // Check if token is valid + match db.users.find_from_token(&token).unwrap() { + // TODO: M_UNKNOWN_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some((user_id, device_id)) => (Some(user_id), Some(device_id.into())), } - _ => (None, None) - }; + } + _ => (None, None), + }; let mut http_request = http::Request::builder() .uri(request.uri().to_string()) diff --git a/src/server_server.rs b/src/server_server.rs index 8d3de1e..a7f6391 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,13 +20,13 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - serde::{to_canonical_value, CanonicalJsonObject}, - EventId, RoomId, RoomVersionId, ServerName, UserId, + EventId, RoomId, ServerName, UserId, }; use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, fmt::Debug, + sync::Arc, time::{Duration, SystemTime}, }; use trust_dns_resolver::AsyncResolver; @@ -415,8 +415,7 @@ pub async fn send_transaction_message_route<'a>( "m.receipt" => {} _ => {} }, - Err(err) => { - error!("{}", err); + Err(_err) => { continue; } } @@ -431,19 +430,53 @@ pub async fn send_transaction_message_route<'a>( // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { - let (event_id, value) = process_incoming_pdu(pdu); - // TODO: this is an unfortunate conversion dance... - let pdu = serde_json::from_value::(serde_json::to_value(&value).expect("msg")) - .expect("all ruma pdus are conduit pdus"); + // Ruma/PduEvent/StateEvent satifies - 1. Is a valid event, otherwise it is dropped. + + // state-res checks signatures - 2. Passes signature checks, otherwise event is dropped. + + // 3. Passes hash checks, otherwise it is redacted before being processed further. + // TODO: redact event if hashing fails + let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); + + let pdu = serde_json::from_value::( + serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("all ruma pdus are conduit pdus"); let room_id = &pdu.room_id; // If we have no idea about this room skip the PDU if !db.rooms.exists(room_id)? { - error!("Room does not exist on this server."); resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } + // If it is not a state event, we can skip state-res + if value.get("state_key").is_none() { + if !db.rooms.is_joined(&pdu.sender, room_id)? { + warn!("Sender is not joined {}", pdu.kind); + resolved_map.insert(event_id, Err("User is not in this room".into())); + continue; + } + + let count = db.globals.next_count()?; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + db.rooms.append_pdu( + &pdu, + &value, + count, + pdu_id.into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + resolved_map.insert(event_id, Ok::<(), String>(())); + continue; + } + + // We have a state event so we need info for state-res let get_state_response = match send_request( &db.globals, body.body.origin.clone(), @@ -461,7 +494,6 @@ pub async fn send_transaction_message_route<'a>( // As an example a possible error // {"errcode":"M_FORBIDDEN","error":"Host not in room."} Err(err) => { - error!("Request failed: {}", err); resolved_map.insert(event_id, Err(err.to_string())); continue; } @@ -472,10 +504,10 @@ pub async fn send_transaction_message_route<'a>( .iter() .chain(get_state_response.auth_chain.iter()) // add auth events .map(|pdu| { - let (event_id, json) = process_incoming_pdu(pdu); + let (event_id, json) = crate::pdu::process_incoming_pdu(pdu); ( event_id.clone(), - std::sync::Arc::new( + Arc::new( // When creating a StateEvent the event_id arg will be used // over any found in the json and it will not use ruma::reference_hash // to generate one @@ -486,65 +518,12 @@ pub async fn send_transaction_message_route<'a>( }) .collect::>(); - if value.get("state_key").is_none() { - if !db.rooms.is_joined(&pdu.sender, room_id)? { - error!("Sender is not joined {}", pdu.kind); - resolved_map.insert(event_id, Err("User is not in this room".into())); - continue; - } - - // If the event is older than the last event in pduid_pdu Tree then find the - // closest ancestor we know of and insert after the known ancestor by - // altering the known events pduid to = same roomID + same count bytes + 0x1 - // pushing a single byte every time a simple append cannot be done. - match db - .rooms - .get_closest_parent(room_id, &pdu.prev_events, &their_current_state)? - { - Some(ClosestParent::Append) => { - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_pdu( - &pdu, - &value, - count, - pdu_id.into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - } - Some(ClosestParent::Insert(old_count)) => { - println!("INSERT PDU FOUND {}", old_count); - - let count = old_count; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - // Create a new count that is after old_count but before - // the pdu appended after - pdu_id.push(1); - - db.rooms.append_pdu( - &pdu, - &value, - count, - pdu_id.into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - } - _ => panic!("Not a sequential event or no parents found"), - }; - resolved_map.insert(event_id, Ok::<(), String>(())); - continue; - } - let our_current_state = db.rooms.room_state_full(room_id)?; + // State resolution takes care of these checks + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. + + // TODO: 6. Passes authorization rules based on the current state of the room, otherwise it is "soft failed". match state_res::StateResolution::resolve( room_id, &ruma::RoomVersionId::Version6, @@ -576,7 +555,7 @@ pub async fn send_transaction_message_route<'a>( // closest ancestor we know of and insert after the known ancestor by // altering the known events pduid to = same roomID + same count bytes + 0x1 // pushing a single byte every time a simple append cannot be done. - match db.rooms.get_closest_parent( + match db.rooms.get_latest_pduid_before( room_id, &pdu.prev_events, &their_current_state, @@ -598,8 +577,6 @@ pub async fn send_transaction_message_route<'a>( )?; } Some(ClosestParent::Insert(old_count)) => { - println!("INSERT STATE PDU FOUND {}", old_count); - let count = old_count; let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -618,14 +595,16 @@ pub async fn send_transaction_message_route<'a>( &db.admin, )?; } - _ => panic!("Not a sequential event or no parents found"), + _ => { + error!("Not a sequential event or no parents found"); + continue; + } } resolved_map.insert(event_id, Ok::<(), String>(())); } // If the eventId is not found in the resolved state auth has failed Ok(_) => { - // TODO have state_res give the actual auth error in this case resolved_map.insert( event_id, Err("This event failed authentication, not found in resolved set".into()), @@ -637,7 +616,7 @@ pub async fn send_transaction_message_route<'a>( }; } - Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } #[cfg_attr( @@ -748,25 +727,3 @@ pub fn get_user_devices_route<'a>( .into()) } */ - -/// Generates a correct eventId for the incoming pdu. -/// -/// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. -fn process_incoming_pdu(pdu: &ruma::Raw) -> (EventId, CanonicalJsonObject) { - let mut value = - serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); - - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - value.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - - (event_id, value) -} From 164b1633d8b9bfe0dfba03dca8703adf422d586b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 4 Dec 2020 18:16:17 -0500 Subject: [PATCH 0358/1727] Update ruma to latest, renamed server keys and removed PduStub --- Cargo.lock | 69 +++++++++++++++++---------------- Cargo.toml | 4 +- src/client_server/account.rs | 1 + src/client_server/config.rs | 2 +- src/client_server/directory.rs | 15 +++++-- src/client_server/membership.rs | 6 +-- src/client_server/presence.rs | 2 +- src/client_server/profile.rs | 2 +- src/client_server/room.rs | 15 ++++--- src/client_server/sync.rs | 3 +- src/database/account_data.rs | 3 +- src/database/admin.rs | 5 ++- src/database/rooms.rs | 11 +++--- src/database/rooms/edus.rs | 3 +- src/database/users.rs | 3 +- src/error.rs | 6 +-- src/pdu.rs | 13 ++++--- src/push_rules.rs | 23 +++++++---- src/server_server.rs | 24 +++++++----- 19 files changed, 123 insertions(+), 87 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index afb3abf..efc34e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1210,6 +1210,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "paste" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" + [[package]] name = "pear" version = "0.2.0-dev" @@ -1611,7 +1617,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "assign", "js_int", @@ -1629,24 +1635,22 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "http", "percent-encoding", "ruma-api-macros", - "ruma-common", "ruma-identifiers", "ruma-serde", "serde", "serde_json", - "strum", "thiserror", ] [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1657,12 +1661,13 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "ruma-api", "ruma-common", "ruma-events", "ruma-identifiers", + "ruma-serde", "serde", "serde_json", ] @@ -1670,7 +1675,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "assign", "http", @@ -1684,38 +1689,25 @@ dependencies = [ "ruma-serde", "serde", "serde_json", - "strum", ] [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "js_int", - "ruma-common-macros", + "maplit", "ruma-identifiers", "ruma-serde", "serde", "serde_json", - "strum", -] - -[[package]] -name = "ruma-common-macros" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", ] [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "js_int", "ruma-common", @@ -1724,13 +1716,12 @@ dependencies = [ "ruma-serde", "serde", "serde_json", - "strum", ] [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1741,7 +1732,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "js_int", "ruma-api", @@ -1756,8 +1747,9 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ + "paste", "rand", "ruma-identifiers-macros", "ruma-identifiers-validation", @@ -1769,7 +1761,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "proc-macro2", "quote", @@ -1780,28 +1772,39 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "serde", - "strum", ] [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "form_urlencoded", "itoa", "js_int", + "ruma-serde-macros", "serde", "serde_json", ] +[[package]] +name = "ruma-serde-macros" +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=c15382ca41262058302959eac4029ab4a1ea5889#c15382ca41262058302959eac4029ab4a1ea5889" +source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" dependencies = [ "base64 0.12.3", "ring", @@ -2059,7 +2062,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#d2a85669cc6056679ce6ca0fde4658a879ad2b08" +source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#99214e6fa6b9843b0d9e1f6ef0698d7fdb234fb2" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index e7b87fa..4902c7c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,14 @@ edition = "2018" rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "c15382ca41262058302959eac4029ab4a1ea5889" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "e8882fe8142d7b55ed4c8ccc6150946945f9e237" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "spec-comp", features = ["unstable-pre-spec"] } state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec", "gen-eventid"] } -# state-res = { path = "../../state-res", features = ["unstable-pre-spec"] } +# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling tokio = "0.2.22" diff --git a/src/client_server/account.rs b/src/client_server/account.rs index ab90de5..76bbebb 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -492,6 +492,7 @@ pub async fn register_route( body: "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing /join #conduit:matrix.org. Important: Please don't join any other Matrix rooms over federation without permission from the room's admins. Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), }), relates_to: None, + new_content: None, }, )) .expect("event is valid, we just created it"), diff --git a/src/client_server/config.rs b/src/client_server/config.rs index dd8de64..f1d233a 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -6,7 +6,7 @@ use ruma::{ r0::config::{get_global_account_data, set_global_account_data}, }, events::{custom::CustomEventContent, BasicEvent}, - Raw, + serde::Raw, }; #[cfg(feature = "conduit_bin")] diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index d8af2e3..559071a 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -20,7 +20,8 @@ use ruma::{ room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, EventType, }, - Raw, ServerName, + serde::Raw, + ServerName, }; #[cfg(feature = "conduit_bin")] @@ -83,7 +84,13 @@ pub async fn set_room_visibility_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - match body.visibility { + match &body.visibility { + room::Visibility::_Custom(_s) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room visibility type is not supported.", + )); + } room::Visibility::Public => { db.rooms.set_public(&body.room_id, true)?; info!("{} made {} public", sender_user, body.room_id); @@ -294,7 +301,9 @@ pub async fn get_public_rooms_filtered_helper( .url, ) }) - .transpose()?, + .transpose()? + // url is now an Option so we must flatten + .flatten(), }; Ok(chunk) }) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b5e4042..6d3a690 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -18,8 +18,8 @@ use ruma::{ federation, }, events::{pdu::Pdu, room::member, EventType}, - serde::{to_canonical_value, CanonicalJsonObject}, - EventId, Raw, RoomId, RoomVersionId, ServerName, UserId, + serde::{to_canonical_value, CanonicalJsonObject, Raw}, + EventId, RoomId, RoomVersionId, ServerName, UserId, }; use state_res::StateEvent; use std::{ @@ -541,7 +541,7 @@ async fn join_room_by_id_helper( federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, - pdu_stub: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) .await?; diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index e597c69..15c746e 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -32,7 +32,7 @@ pub async fn set_presence_route( .try_into() .expect("time is valid"), ), - presence: body.presence, + presence: body.presence.clone(), status_msg: body.status_msg.clone(), }, sender: sender_user.clone(), diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 22d13cb..828d259 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -8,7 +8,7 @@ use ruma::{ }, }, events::EventType, - Raw, + serde::Raw, }; #[cfg(feature = "conduit_bin")] diff --git a/src/client_server/room.rs b/src/client_server/room.rs index a50f69c..f92fc8d 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -10,7 +10,8 @@ use ruma::{ room::{guest_access, history_visibility, join_rules, member, name, topic}, EventType, }, - Raw, RoomAliasId, RoomId, RoomVersionId, + serde::Raw, + RoomAliasId, RoomId, RoomVersionId, }; use std::{cmp::max, collections::BTreeMap, convert::TryFrom}; @@ -141,10 +142,14 @@ pub async fn create_room_route( // 4. Events set by preset // Figure out preset. We need it for preset specific events - let preset = body.preset.unwrap_or_else(|| match body.visibility { - room::Visibility::Private => create_room::RoomPreset::PrivateChat, - room::Visibility::Public => create_room::RoomPreset::PublicChat, - }); + let preset = body + .preset + .clone() + .unwrap_or_else(|| match &body.visibility { + room::Visibility::Private => create_room::RoomPreset::PrivateChat, + room::Visibility::Public => create_room::RoomPreset::PublicChat, + room::Visibility::_Custom(s) => create_room::RoomPreset::_Custom(s.into()), + }); // 4.1 Join Rules db.rooms.build_and_append_pdu( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 360691a..d7c24dc 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -3,7 +3,8 @@ use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::r0::sync::sync_events, events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, - Raw, RoomId, UserId, + serde::Raw, + RoomId, UserId, }; #[cfg(feature = "conduit_bin")] diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 9a6a050..855ebfe 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -2,7 +2,8 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::error::ErrorKind, events::{AnyEvent as EduEvent, EventType}, - Raw, RoomId, UserId, + serde::Raw, + RoomId, UserId, }; use serde::{de::DeserializeOwned, Serialize}; use sled::IVec; diff --git a/src/database/admin.rs b/src/database/admin.rs index 87a60a1..10b6221 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -3,7 +3,10 @@ use std::convert::{TryFrom, TryInto}; use crate::pdu::PduBuilder; use log::warn; use rocket::futures::{channel::mpsc, stream::StreamExt}; -use ruma::{events::room::message, events::EventType, UserId}; +use ruma::{ + events::{room::message, EventType}, + UserId, +}; use tokio::select; pub enum AdminCommand { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0618fd6..fb139a6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -15,8 +15,8 @@ use ruma::{ }, EventType, }, - serde::{to_canonical_value, CanonicalJsonObject}, - EventId, Raw, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, + serde::{to_canonical_value, CanonicalJsonObject, Raw}, + EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; use state_res::{event_auth, Error as StateError, Requester, StateEvent, StateMap, StateStore}; @@ -102,7 +102,7 @@ impl StateStore for Rooms { .and_then(|pdu: StateEvent| { // conduit's PDU's always contain a room_id but some // of ruma's do not so this must be an Option - if pdu.room_id() == Some(room_id) { + if pdu.room_id() == room_id { Ok(Arc::new(pdu)) } else { Err(StateError::NotFound( @@ -278,7 +278,7 @@ impl Rooms { for ((event_type, state_key), pdu_id) in state { let mut state_id = prefix.clone(); - state_id.extend_from_slice(&event_type.as_str().as_bytes()); + state_id.extend_from_slice(&event_type.as_ref().as_bytes()); state_id.push(0xff); state_id.extend_from_slice(&state_key.as_bytes()); self.stateid_pduid.insert(state_id, pdu_id)?; @@ -592,6 +592,7 @@ impl Rooms { body: format!("Command: {}, Args: {:?}", command, args), formatted: None, relates_to: None, + new_content: None, }, )); } @@ -633,7 +634,7 @@ impl Rooms { if let Some(state_key) = &new_pdu.state_key { let mut new_state = old_state; - let mut pdu_key = new_pdu.kind.as_str().as_bytes().to_vec(); + let mut pdu_key = new_pdu.kind.as_ref().as_bytes().to_vec(); pdu_key.push(0xff); pdu_key.extend_from_slice(state_key.as_bytes()); new_state.insert(pdu_key.into(), new_pdu_id.into()); diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 29f5407..bf0cdfc 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -6,7 +6,8 @@ use ruma::{ AnyEvent as EduEvent, SyncEphemeralRoomEvent, }, presence::PresenceState, - Raw, RoomId, UserId, + serde::Raw, + RoomId, UserId, }; use std::{ collections::HashMap, diff --git a/src/database/users.rs b/src/database/users.rs index 885c041..0421ae2 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -10,7 +10,8 @@ use ruma::{ }, encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, Raw, UserId, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; diff --git a/src/error.rs b/src/error.rs index 4c24fd7..316ca74 100644 --- a/src/error.rs +++ b/src/error.rs @@ -143,11 +143,7 @@ impl log::Log for ConduitLogger { } self.db.admin.send(AdminCommand::SendTextMessage( - message::TextMessageEventContent { - body: output, - formatted: None, - relates_to: None, - }, + message::TextMessageEventContent::plain(output), )); } } diff --git a/src/pdu.rs b/src/pdu.rs index e56e81a..75ef492 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -5,8 +5,8 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue}, - EventId, Raw, RoomId, RoomVersionId, ServerKeyId, ServerName, UserId, + serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -36,7 +36,7 @@ pub struct PduEvent { #[serde(default, skip_serializing_if = "serde_json::Map::is_empty")] pub unsigned: serde_json::Map, pub hashes: EventHash, - pub signatures: BTreeMap, BTreeMap>, + pub signatures: BTreeMap, BTreeMap>, } impl PduEvent { @@ -205,9 +205,10 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + /// This does not return a full `Pdu` it is only to satisfy ruma's types. pub fn convert_to_outgoing_federation_event( mut pdu_json: CanonicalJsonObject, - ) -> Raw { + ) -> Raw { if let Some(CanonicalJsonValue::Object(unsigned)) = pdu_json.get_mut("unsigned") { unsigned.remove("transaction_id"); } @@ -232,7 +233,7 @@ impl From<&state_res::StateEvent> for PduEvent { fn from(pdu: &state_res::StateEvent) -> Self { Self { event_id: pdu.event_id(), - room_id: pdu.room_id().unwrap().clone(), + room_id: pdu.room_id().clone(), sender: pdu.sender().clone(), origin_server_ts: (pdu .origin_server_ts() @@ -288,7 +289,7 @@ impl PduEvent { /// /// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. pub(crate) fn process_incoming_pdu( - pdu: &ruma::Raw, + pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { let mut value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); diff --git a/src/push_rules.rs b/src/push_rules.rs index 32c709e..76a1a61 100644 --- a/src/push_rules.rs +++ b/src/push_rules.rs @@ -1,15 +1,18 @@ use ruma::{ push::{ - Action, ConditionalPushRule, ConditionalPushRuleInit, PatternedPushRule, - PatternedPushRuleInit, PushCondition, RoomMemberCountIs, Ruleset, Tweak, + Action, ConditionalPushRule, ConditionalPushRuleInit, ContentPushRule, OverridePushRule, + PatternedPushRule, PatternedPushRuleInit, PushCondition, RoomMemberCountIs, Ruleset, Tweak, + UnderridePushRule, }, UserId, }; pub fn default_pushrules(user_id: &UserId) -> Ruleset { let mut rules = Ruleset::default(); - rules.content = vec![contains_user_name_rule(&user_id)]; - rules.override_ = vec![ + + rules.add(ContentPushRule(contains_user_name_rule(&user_id))); + + for rule in vec![ master_rule(), suppress_notices_rule(), invite_for_me_rule(), @@ -17,14 +20,20 @@ pub fn default_pushrules(user_id: &UserId) -> Ruleset { contains_display_name_rule(), tombstone_rule(), roomnotif_rule(), - ]; - rules.underride = vec![ + ] { + rules.add(OverridePushRule(rule)); + } + + for rule in vec![ call_rule(), encrypted_room_one_to_one_rule(), room_one_to_one_rule(), message_rule(), encrypted_rule(), - ]; + ] { + rules.add(UnderridePushRule(rule)); + } + rules } diff --git a/src/server_server.rs b/src/server_server.rs index a7f6391..fe36e7c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -11,7 +11,8 @@ use ruma::{ federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ - get_server_keys, get_server_version::v1 as get_server_version, ServerKey, VerifyKey, + get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, + VerifyKey, }, event::get_missing_events, query::get_profile_information, @@ -20,11 +21,11 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - EventId, RoomId, ServerName, UserId, + EventId, RoomId, ServerName, ServerSigningKeyId, UserId, }; use std::{ collections::BTreeMap, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, sync::Arc, time::{Duration, SystemTime}, @@ -243,16 +244,17 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { let mut verify_keys = BTreeMap::new(); verify_keys.insert( - format!("ed25519:{}", db.globals.keypair().version()) - .try_into() - .expect("DB stores valid ServerKeyId's"), + ServerSigningKeyId::try_from( + format!("ed25519:{}", db.globals.keypair().version()).as_str(), + ) + .expect("found invalid server signing keys in DB"), VerifyKey { key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), }, ); let mut response = serde_json::from_slice( http::Response::try_from(get_server_keys::v2::Response { - server_key: ServerKey { + server_key: ServerSigningKeys { server_name: db.globals.server_name().to_owned(), verify_keys, old_verify_keys: BTreeMap::new(), @@ -430,7 +432,7 @@ pub async fn send_transaction_message_route<'a>( // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { - // Ruma/PduEvent/StateEvent satifies - 1. Is a valid event, otherwise it is dropped. + // Ruma/PduEvent/StateEvent satisfies - 1. Is a valid event, otherwise it is dropped. // state-res checks signatures - 2. Passes signature checks, otherwise event is dropped. @@ -450,7 +452,7 @@ pub async fn send_transaction_message_route<'a>( continue; } - // If it is not a state event, we can skip state-res + // If it is not a state event, we can skip state-res... maybe if value.get("state_key").is_none() { if !db.rooms.is_joined(&pdu.sender, room_id)? { warn!("Sender is not joined {}", pdu.kind); @@ -679,7 +681,9 @@ pub fn get_profile_information_route<'a>( let mut displayname = None; let mut avatar_url = None; - match body.field { + match &body.field { + // TODO: what to do with custom + Some(ProfileField::_Custom(_s)) => {} Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?, None => { From 45086b54b35de7ec22beb6fbe5cdfed31465bc0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 5 Dec 2020 21:03:43 +0100 Subject: [PATCH 0359/1727] improvement: upgrade dependencies, fix timeline reload bug --- Cargo.lock | 168 ++++++++++++++++++++++++---------------- Cargo.toml | 28 +++---- src/database.rs | 63 +++++++++------ src/database/admin.rs | 2 - src/database/globals.rs | 37 +++------ src/main.rs | 14 ++-- src/ruma_wrapper.rs | 5 +- src/server_server.rs | 33 ++++++-- 8 files changed, 202 insertions(+), 148 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efc34e4..5062b8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -61,9 +61,12 @@ dependencies = [ [[package]] name = "atomic" -version = "0.4.6" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" +checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +dependencies = [ + "autocfg", +] [[package]] name = "atty" @@ -163,9 +166,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "cc" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95752358c8f7552394baf48cd82695b345628ad3f170d607de3ca03b8dacca15" +checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" [[package]] name = "cfg-if" @@ -192,15 +195,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "cloudabi" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" -dependencies = [ - "bitflags", -] - [[package]] name = "color_quant" version = "1.1.0" @@ -211,7 +205,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "directories", "http", "image", @@ -256,9 +250,8 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "cookie" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784ad0fbab4f3e9cef09f20e0aea6000ae08d2cb98ac4c0abc53df18803d702f" +version = "0.15.0-dev" +source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", "time 0.2.23", @@ -328,7 +321,7 @@ dependencies = [ [[package]] name = "devise" version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=1e42a2691#1e42a2691ef9934a446b8ed0ca1c4c8cf283f8bf" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=3648468#3648468a9ede9ca896cd35bc1eb818c7a9fb3047" dependencies = [ "devise_codegen", "devise_core", @@ -337,7 +330,7 @@ dependencies = [ [[package]] name = "devise_codegen" version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=1e42a2691#1e42a2691ef9934a446b8ed0ca1c4c8cf283f8bf" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=3648468#3648468a9ede9ca896cd35bc1eb818c7a9fb3047" dependencies = [ "devise_core", "quote", @@ -346,7 +339,7 @@ dependencies = [ [[package]] name = "devise_core" version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=1e42a2691#1e42a2691ef9934a446b8ed0ca1c4c8cf283f8bf" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=3648468#3648468a9ede9ca896cd35bc1eb818c7a9fb3047" dependencies = [ "bitflags", "proc-macro2", @@ -408,6 +401,19 @@ dependencies = [ "syn", ] +[[package]] +name = "figment" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b13e2d266426f89e45fc544117ade84fad2a58ff676f34cc34e123fe4391b856" +dependencies = [ + "pear", + "serde", + "toml", + "uncased", + "version_check", +] + [[package]] name = "fnv" version = "1.0.7" @@ -846,9 +852,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.45" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8" +checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" dependencies = [ "wasm-bindgen", ] @@ -1009,9 +1015,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.22" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" dependencies = [ "cfg-if 0.1.10", "fuchsia-zircon", @@ -1197,12 +1203,11 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" +checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0" dependencies = [ - "cfg-if 0.1.10", - "cloudabi", + "cfg-if 1.0.0", "instant", "libc", "redox_syscall", @@ -1218,8 +1223,9 @@ checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" [[package]] name = "pear" -version = "0.2.0-dev" -source = "git+https://github.com/SergioBenitez/Pear.git?rev=4b68055#4b680556063568a42fcd4328335cdfdf7608be49" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f612cbd0f9dd03f5dd28a191c48e4148c3b027e41207b32eee130373c6c941" dependencies = [ "inlinable_string", "pear_codegen", @@ -1228,8 +1234,9 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.0-dev" -source = "git+https://github.com/SergioBenitez/Pear.git?rev=4b68055#4b680556063568a42fcd4328335cdfdf7608be49" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "602cf1780ee9bbca663ea75769e05643e16fe87d7c8ac9f4f385a2ed8940a75c" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", @@ -1357,8 +1364,9 @@ dependencies = [ [[package]] name = "proc-macro2-diagnostics" -version = "0.1.0" -source = "git+https://github.com/SergioBenitez/proc-macro2-diagnostics.git?rev=13fbb43#13fbb43db72034b6f9660a9b00e338cebd8dcf44" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" dependencies = [ "proc-macro2", "quote", @@ -1542,9 +1550,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.18" +version = "0.16.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70017ed5c555d79ee3538fc63ca09c70ad8f317dcadc1adc2c496b60c22bb24f" +checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" dependencies = [ "cc", "libc", @@ -1558,24 +1566,28 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f6d40ecd5d871d97837b3116eb670fb3c06d95b9" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" dependencies = [ "async-trait", "atomic", "atty", "binascii", + "either", + "figment", "futures", "log", "memchr", "num_cpus", - "pear", + "parking_lot", + "rand", "ref-cast", "rocket_codegen", "rocket_http", + "serde", "state", "time 0.2.23", "tokio", - "toml", + "ubyte", "version_check", "yansi", ] @@ -1583,7 +1595,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f6d40ecd5d871d97837b3116eb670fb3c06d95b9" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" dependencies = [ "devise", "glob", @@ -1595,14 +1607,16 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/timokoesters/Rocket.git?branch=empty_parameters#f6d40ecd5d871d97837b3116eb670fb3c06d95b9" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" dependencies = [ "cookie", + "either", "http", "hyper", "indexmap", "log", "mime", + "parking_lot", "pear", "percent-encoding", "ref-cast", @@ -1611,7 +1625,9 @@ dependencies = [ "time 0.2.23", "tokio", "tokio-rustls", + "uncased", "unicode-xid", + "version_check", ] [[package]] @@ -1952,9 +1968,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" +checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" dependencies = [ "itoa", "ryu", @@ -2022,9 +2038,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acad6f34eb9e8a259d3283d1e8c1d34d7415943d4895f65cc73813c7396fc85" +checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" [[package]] name = "socket2" @@ -2480,6 +2496,24 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "ubyte" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42756bb9e708855de2f8a98195643dff31a97f0485d90d8467b39dc24be9e8fe" +dependencies = [ + "serde", +] + +[[package]] +name = "uncased" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "369fa7fd7969c5373541d3c9a40dc1b76ce676fc87aba30d87c0ad3b97fad179" +dependencies = [ + "version_check", +] + [[package]] name = "unicase" version = "2.6.0" @@ -2573,11 +2607,11 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" +checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "serde", "serde_json", "wasm-bindgen-macro", @@ -2585,9 +2619,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f22b422e2a757c35a73774860af8e112bff612ce6cb604224e8e47641a9e4f68" +checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" dependencies = [ "bumpalo", "lazy_static", @@ -2600,11 +2634,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.18" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7866cab0aa01de1edf8b5d7936938a7e397ee50ce24119aef3e1eaa3b6171da" +checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -2612,9 +2646,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13312a745c08c469f0b292dd2fcd6411dba5f7160f593da6ef69b64e407038" +checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2622,9 +2656,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" +checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" dependencies = [ "proc-macro2", "quote", @@ -2635,15 +2669,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" +checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" [[package]] name = "wasm-bindgen-test" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d1cdc8b98a557f24733d50a1199c4b0635e465eecba9c45b214544da197f64" +checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" dependencies = [ "console_error_panic_hook", "js-sys", @@ -2655,9 +2689,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fb9c67be7439ee8ab1b7db502a49c05e51e2835b66796c705134d9b8e1a585" +checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" dependencies = [ "proc-macro2", "quote", @@ -2665,9 +2699,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.45" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bf6ef87ad7ae8008e15a355ce696bed26012b7caa21605188cfd8214ab51e2d" +checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" dependencies = [ "js-sys", "wasm-bindgen", @@ -2675,9 +2709,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ "ring", "untrusted", diff --git a/Cargo.toml b/Cargo.toml index 4902c7c..b1dec17 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,8 +14,8 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -#rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "8d779caa22c63b15a6c3ceb75d8f6d4971b2eb67", default-features = false, features = ["tls"] } # Used to handle requests -rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f336e5a172361fc1860461bb03667b1ed2", features = ["tls"] } # Used to handle requests +#rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "e8882fe8142d7b55ed4c8ccc6150946945f9e237" } @@ -27,10 +27,10 @@ ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } -# Used for long polling -tokio = "0.2.22" +# Used for long polling and federation sender, should be the same as rocket::tokio +tokio = { version = "0.2.23" } # Used for storing data permanently -sled = { version = "0.34.4", default-features = false } +sled = { version = "0.34.6", default-features = false } # Used for emitting log entries log = "0.4.11" # Used for rocket<->ruma conversions @@ -40,25 +40,25 @@ directories = "3.0.1" # Used for number types for ruma js_int = "0.1.9" # Used for ruma wrapper -serde_json = { version = "1.0.57", features = ["raw_value"] } +serde_json = { version = "1.0.60", features = ["raw_value"] } # Used for pdu definition -serde = "1.0.116" +serde = "1.0.117" # Used for secure identifiers rand = "0.7.3" # Used to hash passwords -rust-argon2 = "0.8.2" +rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.10.8" +reqwest = "0.10.9" # Used for conduit::Error type -thiserror = "1.0.20" +thiserror = "1.0.22" # Used to generate thumbnails for images -image = { version = "0.23.9", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.23.12", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key -base64 = "0.12.3" +base64 = "0.13.0" # Used when hashing the state -ring = "0.16.15" +ring = "0.16.19" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.19.5" +trust-dns-resolver = "0.19.6" [features] default = ["conduit_bin"] diff --git a/src/database.rs b/src/database.rs index 576a250..406ce77 100644 --- a/src/database.rs +++ b/src/database.rs @@ -13,12 +13,39 @@ use crate::{Error, Result}; use directories::ProjectDirs; use futures::StreamExt; use log::info; -use rocket::{ - futures::{self, channel::mpsc}, - Config, -}; -use ruma::{DeviceId, UserId}; -use std::{convert::TryFrom, fs::remove_dir_all}; +use rocket::futures::{self, channel::mpsc}; +use ruma::{DeviceId, ServerName, UserId}; +use serde::Deserialize; +use std::{convert::TryInto, fs::remove_dir_all}; + +#[derive(Clone, Deserialize)] +pub struct Config { + #[serde(default = "default_server_name")] + server_name: Box, + database_path: Option, + #[serde(default = "default_cache_capacity")] + cache_capacity: u64, + #[serde(default = "default_max_request_size")] + max_request_size: u32, + #[serde(default)] + registration_disabled: bool, + #[serde(default)] + encryption_disabled: bool, + #[serde(default)] + federation_enabled: bool, +} + +fn default_server_name() -> Box { + "localhost".try_into().expect("") +} + +fn default_cache_capacity() -> u64 { + 1024 * 1024 * 1024 +} + +fn default_max_request_size() -> u32 { + 20 * 1024 * 1024 // Default to 20 MB +} #[derive(Clone)] pub struct Database { @@ -49,19 +76,18 @@ impl Database { } /// Load an existing database or create a new one. - pub fn load_or_create(config: &Config) -> Result { - let server_name = config.get_str("server_name").unwrap_or("localhost"); - + pub fn load_or_create(config: Config) -> Result { let path = config - .get_str("database_path") - .map(|x| Ok::<_, Error>(x.to_owned())) - .unwrap_or_else(|_| { + .database_path + .clone() + .map(Ok::<_, Error>) + .unwrap_or_else(|| { let path = ProjectDirs::from("xyz", "koesters", "conduit") .ok_or_else(|| { Error::bad_config("The OS didn't return a valid home directory path.") })? .data_dir() - .join(server_name); + .join(config.server_name.as_str()); Ok(path .to_str() @@ -71,15 +97,8 @@ impl Database { let db = sled::Config::default() .path(&path) - .cache_capacity( - u64::try_from( - config - .get_int("cache_capacity") - .unwrap_or(1024 * 1024 * 1024), - ) - .map_err(|_| Error::bad_config("Cache capacity needs to be a u64."))?, - ) - .print_profile_on_drop(false) + .cache_capacity(config.cache_capacity) + .print_profile_on_drop(true) .open()?; info!("Opened sled database at {}", path); diff --git a/src/database/admin.rs b/src/database/admin.rs index 10b6221..778796f 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -49,8 +49,6 @@ impl Admin { Some(event) = receiver.next() => { match event { AdminCommand::SendTextMessage(message) => { - println!("{:?}", message); - if let Some(conduit_room) = &conduit_room { db.rooms.build_and_append_pdu( PduBuilder { diff --git a/src/database/globals.rs b/src/database/globals.rs index 359d064..403fadd 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,7 @@ -use crate::{utils, Error, Result}; +use crate::{database::Config, utils, Error, Result}; use log::error; use ruma::ServerName; -use std::{convert::TryInto, sync::Arc}; +use std::sync::Arc; pub const COUNTER: &str = "c"; @@ -10,15 +10,11 @@ pub struct Globals { pub(super) globals: sled::Tree, keypair: Arc, reqwest_client: reqwest::Client, - server_name: Box, - max_request_size: u32, - registration_disabled: bool, - encryption_disabled: bool, - federation_enabled: bool, + config: Config, } impl Globals { - pub fn load(globals: sled::Tree, config: &rocket::Config) -> Result { + pub fn load(globals: sled::Tree, config: Config) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -57,20 +53,7 @@ impl Globals { globals, keypair: Arc::new(keypair), reqwest_client: reqwest::Client::new(), - server_name: config - .get_str("server_name") - .unwrap_or("localhost") - .to_string() - .try_into() - .map_err(|_| Error::bad_config("Invalid server_name."))?, - max_request_size: config - .get_int("max_request_size") - .unwrap_or(20 * 1024 * 1024) // Default to 20 MB - .try_into() - .map_err(|_| Error::bad_config("Invalid max_request_size."))?, - registration_disabled: config.get_bool("registration_disabled").unwrap_or(false), - encryption_disabled: config.get_bool("encryption_disabled").unwrap_or(false), - federation_enabled: config.get_bool("federation_enabled").unwrap_or(false), + config, }) } @@ -102,22 +85,22 @@ impl Globals { } pub fn server_name(&self) -> &ServerName { - self.server_name.as_ref() + self.config.server_name.as_ref() } pub fn max_request_size(&self) -> u32 { - self.max_request_size + self.config.max_request_size } pub fn registration_disabled(&self) -> bool { - self.registration_disabled + self.config.registration_disabled } pub fn encryption_disabled(&self) -> bool { - self.encryption_disabled + self.config.encryption_disabled } pub fn federation_enabled(&self) -> bool { - self.federation_enabled + self.config.federation_enabled } } diff --git a/src/main.rs b/src/main.rs index f2edc13..75b74cc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -21,7 +21,7 @@ use rocket::{fairing::AdHoc, routes}; fn setup_rocket() -> rocket::Rocket { // Force log level off, so we can use our own logger - std::env::set_var("ROCKET_LOG", "off"); + std::env::set_var("ROCKET_LOG_LEVEL", "off"); rocket::ignite() .mount( @@ -123,9 +123,9 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_pushers_route, client_server::set_pushers_route, client_server::upgrade_room_route, - server_server::get_server_version, - server_server::get_server_keys, - server_server::get_server_keys_deprecated, + server_server::get_server_version_route, + server_server::get_server_keys_route, + server_server::get_server_keys_deprecated_route, server_server::get_public_rooms_route, server_server::get_public_rooms_filtered_route, server_server::send_transaction_message_route, @@ -133,8 +133,10 @@ fn setup_rocket() -> rocket::Rocket { server_server::get_profile_information_route, ], ) - .attach(AdHoc::on_attach("Config", |mut rocket| async { - let data = Database::load_or_create(rocket.config().await).expect("valid config"); + .attach(AdHoc::on_attach("Config", |rocket| async { + let data = + Database::load_or_create(rocket.figment().extract().expect("config is valid")) + .expect("config is valid"); data.sending.start_handler(&data.globals, &data.rooms); log::set_boxed_logger(Box::new(ConduitLogger { diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 4b3d08d..9597ac8 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -15,7 +15,8 @@ use { log::warn, rocket::{ data::{ - Data, FromDataFuture, FromTransformedData, Transform, TransformFuture, Transformed, + ByteUnit, Data, FromDataFuture, FromTransformedData, Transform, TransformFuture, + Transformed, }, http::Status, outcome::Outcome::*, @@ -97,7 +98,7 @@ where } let limit = db.globals.max_request_size(); - let mut handle = data.open().take(limit.into()); + let mut handle = data.open(ByteUnit::Byte(limit.into())); let mut body = Vec::new(); handle.read_to_end(&mut body).await.unwrap(); diff --git a/src/server_server.rs b/src/server_server.rs index fe36e7c..da046d3 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -193,6 +193,8 @@ where } } + let status = reqwest_response.status(); + let body = reqwest_response .bytes() .await @@ -201,17 +203,27 @@ where Vec::new().into() }) // TODO: handle timeout .into_iter() - .collect(); + .collect::>(); + + if status != 200 { + warn!( + "Server returned bad response {} ({}): {} {:?}", + destination, + url, + status, + utils::string_from_bytes(&body) + ); + } let response = T::IncomingResponse::try_from( http_response .body(body) .expect("reqwest body is valid http body"), ); - response.map_err(|e| { + response.map_err(|_| { warn!( - "Server returned bad response {} ({}): {:?}", - destination, url, e + "Server returned invalid response bytes {} ({})", + destination, url ); Error::BadServerResponse("Server returned bad response.") }) @@ -221,7 +233,9 @@ where } #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] -pub fn get_server_version(db: State<'_, Database>) -> ConduitResult { +pub fn get_server_version_route( + db: State<'_, Database>, +) -> ConduitResult { if !db.globals.federation_enabled() { return Err(Error::bad_config("Federation is disabled.")); } @@ -236,7 +250,7 @@ pub fn get_server_version(db: State<'_, Database>) -> ConduitResult) -> Json { +pub fn get_server_keys_route(db: State<'_, Database>) -> Json { if !db.globals.federation_enabled() { // TODO: Use proper types return Json("Federation is disabled.".to_owned()); @@ -278,8 +292,8 @@ pub fn get_server_keys(db: State<'_, Database>) -> Json { } #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] -pub fn get_server_keys_deprecated(db: State<'_, Database>) -> Json { - get_server_keys(db) +pub fn get_server_keys_deprecated_route(db: State<'_, Database>) -> Json { + get_server_keys_route(db) } #[cfg_attr( @@ -464,6 +478,9 @@ pub async fn send_transaction_message_route<'a>( let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, &pdu)?; + db.rooms.append_pdu( &pdu, &value, From d62f17a91a6647fd7b5bd503ddcd08e6a7e3e951 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 6 Dec 2020 11:05:51 +0100 Subject: [PATCH 0360/1727] improvement: cache actual destination --- src/database.rs | 4 +- src/database/globals.rs | 19 +++++++-- src/main.rs | 1 + src/server_server.rs | 85 ++++++++++++++++++++++++++--------------- 4 files changed, 74 insertions(+), 35 deletions(-) diff --git a/src/database.rs b/src/database.rs index 406ce77..4905070 100644 --- a/src/database.rs +++ b/src/database.rs @@ -76,7 +76,7 @@ impl Database { } /// Load an existing database or create a new one. - pub fn load_or_create(config: Config) -> Result { + pub async fn load_or_create(config: Config) -> Result { let path = config .database_path .clone() @@ -106,7 +106,7 @@ impl Database { let (admin_sender, admin_receiver) = mpsc::unbounded(); let db = Self { - globals: globals::Globals::load(db.open_tree("global")?, config)?, + globals: globals::Globals::load(db.open_tree("global")?, config).await?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 403fadd..1221609 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,20 +1,25 @@ use crate::{database::Config, utils, Error, Result}; +use trust_dns_resolver::TokioAsyncResolver; +use std::collections::HashMap; use log::error; use ruma::ServerName; use std::sync::Arc; +use std::sync::RwLock; pub const COUNTER: &str = "c"; #[derive(Clone)] pub struct Globals { pub(super) globals: sled::Tree, + config: Config, keypair: Arc, reqwest_client: reqwest::Client, - config: Config, + pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host + dns_resolver: TokioAsyncResolver, } impl Globals { - pub fn load(globals: sled::Tree, config: Config) -> Result { + pub async fn load(globals: sled::Tree, config: Config) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -51,9 +56,13 @@ impl Globals { Ok(Self { globals, + config, keypair: Arc::new(keypair), reqwest_client: reqwest::Client::new(), - config, + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().await.map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, + actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), }) } @@ -103,4 +112,8 @@ impl Globals { pub fn federation_enabled(&self) -> bool { self.config.federation_enabled } + + pub fn dns_resolver(&self) -> &TokioAsyncResolver { + &self.dns_resolver + } } diff --git a/src/main.rs b/src/main.rs index 75b74cc..58d3427 100644 --- a/src/main.rs +++ b/src/main.rs @@ -136,6 +136,7 @@ fn setup_rocket() -> rocket::Rocket { .attach(AdHoc::on_attach("Config", |rocket| async { let data = Database::load_or_create(rocket.figment().extract().expect("config is valid")) + .await .expect("config is valid"); data.sending.start_handler(&data.globals, &data.rooms); diff --git a/src/server_server.rs b/src/server_server.rs index da046d3..58dd872 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -30,7 +30,6 @@ use std::{ sync::Arc, time::{Duration, SystemTime}, }; -use trust_dns_resolver::AsyncResolver; pub async fn request_well_known( globals: &crate::database::globals::Globals, @@ -66,36 +65,26 @@ where return Err(Error::bad_config("Federation is disabled.")); } - let resolver = AsyncResolver::tokio_from_system_conf().await.map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?; + let maybe_result = globals + .actual_destination_cache + .read() + .unwrap() + .get(&destination) + .cloned(); - let mut host = None; - - let actual_destination = "https://".to_owned() - + &if let Some(mut delegated_hostname) = - request_well_known(globals, &destination.as_str()).await - { - if let Ok(Some(srv)) = resolver - .srv_lookup(format!("_matrix._tcp.{}", delegated_hostname)) - .await - .map(|srv| srv.iter().next().map(|result| result.target().to_string())) - { - host = Some(delegated_hostname); - srv.trim_end_matches('.').to_owned() - } else { - if delegated_hostname.find(':').is_none() { - delegated_hostname += ":8448"; - } - delegated_hostname - } - } else { - let mut destination = destination.as_str().to_owned(); - if destination.find(':').is_none() { - destination += ":8448"; - } - destination - }; + let (actual_destination, host) = if let Some(result) = maybe_result { + println!("Loaded {} -> {:?}", destination, result); + result + } else { + let result = find_actual_destination(globals, &destination).await; + globals + .actual_destination_cache + .write() + .unwrap() + .insert(destination.clone(), result.clone()); + println!("Saving {} -> {:?}", destination, result); + result + }; let mut http_request = request .try_into_http_request(&actual_destination, Some("")) @@ -232,6 +221,42 @@ where } } +/// Returns: actual_destination, host header +async fn find_actual_destination( + globals: &crate::database::globals::Globals, + destination: &Box, +) -> (String, Option) { + let mut host = None; + + let actual_destination = "https://".to_owned() + + &if let Some(mut delegated_hostname) = + request_well_known(globals, destination.as_str()).await + { + if let Ok(Some(srv)) = globals + .dns_resolver() + .srv_lookup(format!("_matrix._tcp.{}", delegated_hostname)) + .await + .map(|srv| srv.iter().next().map(|result| result.target().to_string())) + { + host = Some(delegated_hostname); + srv.trim_end_matches('.').to_owned() + } else { + if delegated_hostname.find(':').is_none() { + delegated_hostname += ":8448"; + } + delegated_hostname + } + } else { + let mut destination = destination.as_str().to_owned(); + if destination.find(':').is_none() { + destination += ":8448"; + } + destination + }; + + (actual_destination, host) +} + #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] pub fn get_server_version_route( db: State<'_, Database>, From 6e5b35ea92075cdc9fc62db0f7f946ae6b80d76e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 8 Dec 2020 10:33:44 +0100 Subject: [PATCH 0361/1727] feat: implement appservices this also reverts some stateres changes --- Cargo.lock | 247 ++++++++------------------------ Cargo.toml | 8 +- src/appservice_server.rs | 104 ++++++++++++++ src/client_server/account.rs | 35 +++-- src/client_server/alias.rs | 41 ++++-- src/client_server/media.rs | 2 +- src/client_server/membership.rs | 6 + src/client_server/message.rs | 3 +- src/client_server/profile.rs | 2 + src/client_server/redact.rs | 1 + src/client_server/room.rs | 15 ++ src/client_server/state.rs | 3 +- src/client_server/to_device.rs | 2 +- src/database.rs | 8 ++ src/database/admin.rs | 61 +++++--- src/database/appservice.rs | 67 +++++++++ src/database/globals.rs | 12 +- src/database/media.rs | 7 +- src/database/rooms.rs | 122 +++++++--------- src/database/sending.rs | 240 +++++++++++++++++++++---------- src/database/transaction_ids.rs | 8 +- src/error.rs | 4 +- src/lib.rs | 1 + src/main.rs | 4 +- src/ruma_wrapper.rs | 88 +++++++++--- src/server_server.rs | 189 +++--------------------- 26 files changed, 696 insertions(+), 584 deletions(-) create mode 100644 src/appservice_server.rs create mode 100644 src/database/appservice.rs diff --git a/Cargo.lock b/Cargo.lock index 5062b8c..6566b10 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,15 +21,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "arrayref" version = "0.3.6" @@ -182,19 +173,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "time 0.1.44", - "winapi 0.3.9", -] - [[package]] name = "color_quant" version = "1.1.0" @@ -219,6 +197,7 @@ dependencies = [ "rust-argon2", "serde", "serde_json", + "serde_yaml", "sled", "state-res", "thiserror", @@ -254,7 +233,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.23", + "time", "version_check", ] @@ -374,6 +353,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "dtoa" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" + [[package]] name = "either" version = "1.6.1" @@ -575,19 +560,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "generator" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc" -dependencies = [ - "cc", - "libc", - "log", - "rustc_version", - "winapi 0.3.9", -] - [[package]] name = "getrandom" version = "0.1.15" @@ -596,7 +568,7 @@ checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ "cfg-if 0.1.10", "libc", - "wasi 0.9.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -886,9 +858,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" +checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" [[package]] name = "linked-hash-map" @@ -914,19 +886,6 @@ dependencies = [ "cfg-if 0.1.10", ] -[[package]] -name = "loom" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" -dependencies = [ - "cfg-if 0.1.10", - "generator", - "scoped-tls", - "serde", - "serde_json", -] - [[package]] name = "lru-cache" version = "0.1.2" @@ -948,15 +907,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" -[[package]] -name = "matchers" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" -dependencies = [ - "regex-automata", -] - [[package]] name = "matches" version = "0.1.8" @@ -1468,31 +1418,6 @@ dependencies = [ "syn", ] -[[package]] -name = "regex" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" -dependencies = [ - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" -dependencies = [ - "byteorder", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" - [[package]] name = "remove_dir_all" version = "0.5.3" @@ -1585,7 +1510,7 @@ dependencies = [ "rocket_http", "serde", "state", - "time 0.2.23", + "time", "tokio", "ubyte", "version_check", @@ -1622,7 +1547,7 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.23", + "time", "tokio", "tokio-rustls", "uncased", @@ -1633,7 +1558,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "assign", "js_int", @@ -1651,7 +1576,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "http", "percent-encoding", @@ -1666,7 +1591,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1677,7 +1602,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "ruma-api", "ruma-common", @@ -1691,7 +1616,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "assign", "http", @@ -1710,7 +1635,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "js_int", "maplit", @@ -1723,7 +1648,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "js_int", "ruma-common", @@ -1737,7 +1662,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1748,7 +1673,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "js_int", "ruma-api", @@ -1763,7 +1688,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "paste", "rand", @@ -1777,7 +1702,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "proc-macro2", "quote", @@ -1788,7 +1713,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "serde", ] @@ -1796,7 +1721,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "form_urlencoded", "itoa", @@ -1809,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1820,7 +1745,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=e8882fe8142d7b55ed4c8ccc6150946945f9e237#e8882fe8142d7b55ed4c8ccc6150946945f9e237" +source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "base64 0.12.3", "ring", @@ -1948,18 +1873,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.117" +version = "1.0.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" +checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.117" +version = "1.0.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" +checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" dependencies = [ "proc-macro2", "quote", @@ -1989,22 +1914,24 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_yaml" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" +dependencies = [ + "dtoa", + "linked-hash-map", + "serde", + "yaml-rust", +] + [[package]] name = "sha1" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" -[[package]] -name = "sharded-slab" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127" -dependencies = [ - "lazy_static", - "loom", -] - [[package]] name = "signal-hook-registry" version = "1.2.2" @@ -2078,17 +2005,15 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#99214e6fa6b9843b0d9e1f6ef0698d7fdb234fb2" +source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#a1c15253f0777baad251da47c3f2c016cfed6f7e" dependencies = [ "itertools", - "js_int", "maplit", "ruma", "serde", "serde_json", "thiserror", "tracing", - "tracing-subscriber", ] [[package]] @@ -2163,9 +2088,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.53" +version = "1.0.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8833e20724c24de12bbaba5ad230ea61c3eafb05b881c7c9d3cfe8638b187e68" +checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44" dependencies = [ "proc-macro2", "quote", @@ -2206,26 +2131,6 @@ dependencies = [ "syn", ] -[[package]] -name = "thread_local" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "time" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi 0.3.9", -] - [[package]] name = "time" version = "0.2.23" @@ -2407,49 +2312,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tracing-log" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" -dependencies = [ - "ansi_term", - "chrono", - "lazy_static", - "matchers", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - [[package]] name = "trust-dns-proto" version = "0.19.6" @@ -2599,12 +2461,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasm-bindgen" version = "0.2.69" @@ -2791,6 +2647,15 @@ dependencies = [ "winapi-build", ] +[[package]] +name = "yaml-rust" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "yansi" version = "0.5.0" diff --git a/Cargo.toml b/Cargo.toml index b1dec17..1e4afe2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "e8882fe8142d7b55ed4c8ccc6150946945f9e237" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "ee814aa84934530d76f5e4b275d739805b49bdef" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution -# state-res = { git = "https://github.com/timokoesters/state-res", branch = "spec-comp", features = ["unstable-pre-spec"] } +# state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec", "gen-eventid"] } -# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = { version = "0.2.23" } @@ -41,6 +41,8 @@ directories = "3.0.1" js_int = "0.1.9" # Used for ruma wrapper serde_json = { version = "1.0.60", features = ["raw_value"] } +# Used for appservice registration files +serde_yaml = "0.8.14" # Used for pdu definition serde = "1.0.117" # Used for secure identifiers diff --git a/src/appservice_server.rs b/src/appservice_server.rs new file mode 100644 index 0000000..f1436e0 --- /dev/null +++ b/src/appservice_server.rs @@ -0,0 +1,104 @@ +use crate::{utils, Error, Result}; +use http::header::{HeaderValue, CONTENT_TYPE}; +use log::warn; +use ruma::api::OutgoingRequest; +use std::{ + convert::{TryFrom, TryInto}, + fmt::Debug, + time::Duration, +}; + +pub async fn send_request( + globals: &crate::database::globals::Globals, + registration: serde_yaml::Value, + request: T, +) -> Result +where + T: Debug, +{ + let destination = registration.get("url").unwrap().as_str().unwrap(); + let hs_token = registration.get("hs_token").unwrap().as_str().unwrap(); + + let mut http_request = request + .try_into_http_request(&destination, Some("")) + .unwrap(); + + let mut parts = http_request.uri().clone().into_parts(); + let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); + let symbol = if old_path_and_query.contains("?") { + "&" + } else { + "?" + }; + + parts.path_and_query = Some( + (old_path_and_query + symbol + "access_token=" + hs_token) + .parse() + .unwrap(), + ); + *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); + + http_request.headers_mut().insert( + CONTENT_TYPE, + HeaderValue::from_str("application/json").unwrap(), + ); + + let mut reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); + + let url = reqwest_request.url().clone(); + let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; + + // Because reqwest::Response -> http::Response is complicated: + match reqwest_response { + Ok(mut reqwest_response) => { + let status = reqwest_response.status(); + let mut http_response = http::Response::builder().status(status); + let headers = http_response.headers_mut().unwrap(); + + for (k, v) in reqwest_response.headers_mut().drain() { + if let Some(key) = k { + headers.insert(key, v); + } + } + + let status = reqwest_response.status(); + + let body = reqwest_response + .bytes() + .await + .unwrap_or_else(|e| { + warn!("server error: {}", e); + Vec::new().into() + }) // TODO: handle timeout + .into_iter() + .collect::>(); + + if status != 200 { + warn!( + "Server returned bad response {} ({}): {} {:?}", + destination, + url, + status, + utils::string_from_bytes(&body) + ); + } + + let response = T::IncomingResponse::try_from( + http_response + .body(body) + .expect("reqwest body is valid http body"), + ); + response.map_err(|_| { + warn!( + "Server returned invalid response bytes {} ({})", + destination, url + ); + Error::BadServerResponse("Server returned bad response.") + }) + } + Err(e) => Err(e.into()), + } +} diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 76bbebb..8fb926e 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -139,18 +139,20 @@ pub async fn register_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = - db.uiaa - .try_auth(&user_id, "".into(), auth, &uiaainfo, &db.users, &db.globals)?; - if !worked { + if !body.from_appservice { + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = + db.uiaa + .try_auth(&user_id, "".into(), auth, &uiaainfo, &db.users, &db.globals)?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&user_id, "".into(), &uiaainfo)?; return Err(Error::Uiaa(uiaainfo)); } - // Success! - } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, "".into(), &uiaainfo)?; - return Err(Error::Uiaa(uiaainfo)); } if missing_username { @@ -241,6 +243,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 2. Make conduit bot join @@ -265,6 +268,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 3. Power levels @@ -302,6 +306,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 4.1 Join Rules @@ -322,6 +327,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 4.2 History Visibility @@ -344,6 +350,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 4.3 Guest Access @@ -364,6 +371,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 6. Events implied by name and topic @@ -386,6 +394,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.rooms.build_and_append_pdu( @@ -405,6 +414,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // Room alias @@ -430,6 +440,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; @@ -456,6 +467,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.rooms.build_and_append_pdu( PduBuilder { @@ -478,6 +490,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // Send welcome message @@ -506,6 +519,7 @@ pub async fn register_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; } @@ -681,6 +695,7 @@ pub async fn deactivate_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; } diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 094e70a..ec73ffc 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,7 +1,8 @@ use super::State; -use crate::{server_server, ConduitResult, Database, Error, Ruma}; +use crate::{appservice_server, server_server, ConduitResult, Database, Error, Ruma}; use ruma::{ api::{ + appservice, client::{ error::ErrorKind, r0::alias::{create_alias, delete_alias, get_alias}, @@ -75,13 +76,37 @@ pub async fn get_alias_helper( return Ok(get_alias::Response::new(response.room_id, response.servers).into()); } - let room_id = db - .rooms - .id_from_alias(&room_alias)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room with alias not found.", - ))?; + let mut room_id = None; + match db.rooms.id_from_alias(&room_alias)? { + Some(r) => room_id = Some(r), + None => { + for (_id, registration) in db.appservice.iter_all().filter_map(|r| r.ok()) { + if appservice_server::send_request( + &db.globals, + registration, + appservice::query::query_room_alias::v1::Request { room_alias }, + ) + .await + .is_ok() + { + room_id = Some(db.rooms.id_from_alias(&room_alias)?.ok_or_else(|| { + Error::bad_config("Appservice lied to us. Room does not exist.") + })?); + break; + } + } + } + }; + + let room_id = match room_id { + Some(room_id) => room_id, + None => { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room with alias not found.", + )) + } + }; Ok(get_alias::Response::new(room_id, vec![db.globals.server_name().to_owned()]).into()) } diff --git a/src/client_server/media.rs b/src/client_server/media.rs index e6bd182..0776c9e 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -45,7 +45,7 @@ pub async fn create_content_route( db.flush().await?; - Ok(create_content::Response { content_uri: mxc }.into()) + Ok(create_content::Response { content_uri: mxc, blurhash: None }.into()) } #[cfg_attr( diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 6d3a690..46548d5 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -128,6 +128,7 @@ pub async fn leave_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.flush().await?; @@ -167,6 +168,7 @@ pub async fn invite_user_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.flush().await?; @@ -222,6 +224,7 @@ pub async fn kick_user_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.flush().await?; @@ -281,6 +284,7 @@ pub async fn ban_user_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.flush().await?; @@ -332,6 +336,7 @@ pub async fn unban_user_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.flush().await?; @@ -713,6 +718,7 @@ async fn join_room_by_id_helper( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 327b9ab..3640730 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -22,7 +22,7 @@ pub async fn send_message_event_route( body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_deref(); // Check if this is a new transaction id if let Some(response) = @@ -69,6 +69,7 @@ pub async fn send_message_event_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.transaction_ids.add_txnid( diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 828d259..761443d 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -67,6 +67,7 @@ pub async fn set_displayname_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // Presence update @@ -163,6 +164,7 @@ pub async fn set_avatar_url_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // Presence update diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 6f7728a..212e751 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -35,6 +35,7 @@ pub async fn redact_event_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.flush().await?; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index f92fc8d..e473e6e 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -69,6 +69,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 2. Let the room creator join @@ -93,6 +94,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 3. Power levels @@ -137,6 +139,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 4. Events set by preset @@ -176,6 +179,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 4.2 History Visibility @@ -196,6 +200,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 4.3 Guest Access @@ -224,6 +229,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // 5. Events listed in initial_state @@ -246,6 +252,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; } @@ -270,6 +277,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; } @@ -291,6 +299,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; } @@ -317,6 +326,7 @@ pub async fn create_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; } @@ -407,6 +417,7 @@ pub async fn upgrade_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // Get the old room federations status @@ -450,6 +461,7 @@ pub async fn upgrade_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // Join the new room @@ -474,6 +486,7 @@ pub async fn upgrade_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; // Recommended transferable state events list from the specs @@ -510,6 +523,7 @@ pub async fn upgrade_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; } @@ -556,6 +570,7 @@ pub async fn upgrade_room_route( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; db.flush().await?; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 010b20d..cecb79d 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -63,8 +63,8 @@ pub async fn send_state_event_for_empty_key_route( let Ruma { body, sender_user, - sender_device: _, json_body, + .. } = body; let json = serde_json::from_str::( @@ -288,6 +288,7 @@ pub async fn send_state_event_for_key_helper( &db.sending, &db.admin, &db.account_data, + &db.appservice, )?; Ok(event_id) diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 8cc3e29..5bc001e 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -17,7 +17,7 @@ pub async fn send_event_to_device_route( body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_deref(); // Check if this is a new transaction id if db diff --git a/src/database.rs b/src/database.rs index 4905070..5150517 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,5 +1,6 @@ pub mod account_data; pub mod admin; +pub mod appservice; pub mod globals; pub mod key_backups; pub mod media; @@ -16,6 +17,8 @@ use log::info; use rocket::futures::{self, channel::mpsc}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; +use std::collections::HashMap; +use std::sync::{Arc, RwLock}; use std::{convert::TryInto, fs::remove_dir_all}; #[derive(Clone, Deserialize)] @@ -59,6 +62,7 @@ pub struct Database { pub transaction_ids: transaction_ids::TransactionIds, pub sending: sending::Sending, pub admin: admin::Admin, + pub appservice: appservice::Appservice, pub _db: sled::Db, } @@ -180,6 +184,10 @@ impl Database { admin: admin::Admin { sender: admin_sender, }, + appservice: appservice::Appservice { + cached_registrations: Arc::new(RwLock::new(HashMap::new())), + id_appserviceregistrations: db.open_tree("id_appserviceregistrations")?, + }, _db: db, }; diff --git a/src/database/admin.rs b/src/database/admin.rs index 778796f..7de6bf9 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -10,7 +10,9 @@ use ruma::{ use tokio::select; pub enum AdminCommand { - SendTextMessage(message::TextMessageEventContent), + RegisterAppservice(serde_yaml::Value), + ListAppservices, + SendMessage(message::MessageEventContent), } #[derive(Clone)] @@ -44,28 +46,49 @@ impl Admin { warn!("Conduit instance does not have an #admins room. Logging to that room will not work."); } + let send_message = |message: message::MessageEventContent| { + if let Some(conduit_room) = &conduit_room { + db.rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMessage, + content: serde_json::to_value(message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + &db.globals, + &db.sending, + &db.admin, + &db.account_data, + &db.appservice, + ) + .unwrap(); + } + }; + loop { select! { Some(event) = receiver.next() => { match event { - AdminCommand::SendTextMessage(message) => { - if let Some(conduit_room) = &conduit_room { - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMessage, - content: serde_json::to_value(message).expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - ).unwrap(); - } + AdminCommand::RegisterAppservice(yaml) => { + db.appservice.register_appservice(yaml).unwrap(); // TODO handle error + } + AdminCommand::ListAppservices => { + let appservices = db.appservice.iter_ids().collect::>(); + let count = appservices.len(); + let output = format!( + "Appservices ({}): {}", + count, + appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") + ); + send_message(message::MessageEventContent::text_plain(output)); + } + AdminCommand::SendMessage(message) => { + send_message(message); } } } diff --git a/src/database/appservice.rs b/src/database/appservice.rs new file mode 100644 index 0000000..26ea5b9 --- /dev/null +++ b/src/database/appservice.rs @@ -0,0 +1,67 @@ +use crate::{utils, Error, Result}; +use std::collections::HashMap; +use std::sync::{Arc, RwLock}; + +#[derive(Clone)] +pub struct Appservice { + pub(super) cached_registrations: Arc>>, + pub(super) id_appserviceregistrations: sled::Tree, +} + +impl Appservice { + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<()> { + // TODO: Rumaify + let id = yaml.get("id").unwrap().as_str().unwrap(); + self.id_appserviceregistrations + .insert(id, serde_yaml::to_string(&yaml).unwrap().as_bytes())?; + self.cached_registrations + .write() + .unwrap() + .insert(id.to_owned(), yaml); + + Ok(()) + } + + pub fn get_registration(&self, id: &str) -> Result> { + self.cached_registrations + .read() + .unwrap() + .get(id) + .map_or_else( + || { + Ok(self + .id_appserviceregistrations + .get(id)? + .map(|bytes| { + Ok::<_, Error>(serde_yaml::from_slice(&bytes).map_err(|_| { + Error::bad_database( + "Invalid registration bytes in id_appserviceregistrations.", + ) + })?) + }) + .transpose()?) + }, + |r| Ok(Some(r.clone())), + ) + } + + pub fn iter_ids(&self) -> impl Iterator> { + self.id_appserviceregistrations.iter().keys().map(|id| { + Ok(utils::string_from_bytes(&id?).map_err(|_| { + Error::bad_database("Invalid id bytes in id_appserviceregistrations.") + })?) + }) + } + + pub fn iter_all<'a>( + &'a self, + ) -> impl Iterator> + 'a { + self.iter_ids().filter_map(|id| id.ok()).map(move |id| { + Ok(( + id.clone(), + self.get_registration(&id)? + .expect("iter_ids only returns appservices that exist"), + )) + }) + } +} diff --git a/src/database/globals.rs b/src/database/globals.rs index 1221609..e913c0f 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,10 +1,10 @@ use crate::{database::Config, utils, Error, Result}; -use trust_dns_resolver::TokioAsyncResolver; -use std::collections::HashMap; use log::error; use ruma::ServerName; +use std::collections::HashMap; use std::sync::Arc; use std::sync::RwLock; +use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; @@ -59,9 +59,11 @@ impl Globals { config, keypair: Arc::new(keypair), reqwest_client: reqwest::Client::new(), - dns_resolver: TokioAsyncResolver::tokio_from_system_conf().await.map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, + dns_resolver: TokioAsyncResolver::tokio_from_system_conf() + .await + .map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), }) } diff --git a/src/database/media.rs b/src/database/media.rs index 89d48e1..448d071 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -290,7 +290,12 @@ impl Media { file: thumbnail_bytes.to_vec(), })) } else { - Ok(None) + // Couldn't parse file to generate thumbnail, send original + Ok(Some(FileMeta { + filename, + content_type, + file: file.to_vec(), + })) } } else { Ok(None) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fb139a6..3e2a17f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -36,16 +36,6 @@ use super::admin::AdminCommand; /// hashing the entire state. pub type StateHashId = IVec; -/// An enum that represents the two valid states when searching -/// for an events "parent". -/// -/// An events parent is any event we are aware of that is part of -/// the events `prev_events` array. -pub(crate) enum ClosestParent { - Append, - Insert(u64), -} - #[derive(Clone)] pub struct Rooms { pub edus: edus::RoomEdus, @@ -411,54 +401,6 @@ impl Rooms { } } - /// Recursively search for a PDU from our DB that is also in the - /// `prev_events` field of the incoming PDU. - /// - /// First we check if the last PDU inserted to the given room is a parent - /// if not we recursively check older `prev_events` to insert the incoming - /// event after. - pub(crate) fn get_latest_pduid_before( - &self, - room: &RoomId, - incoming_prev_ids: &[EventId], - their_state: &BTreeMap>, - ) -> Result> { - match self.pduid_pdu.scan_prefix(room.as_bytes()).last() { - Some(Ok(val)) - if incoming_prev_ids.contains( - &serde_json::from_slice::(&val.1) - .map_err(|_| { - Error::bad_database("last DB entry contains invalid PDU bytes") - })? - .event_id, - ) => - { - Ok(Some(ClosestParent::Append)) - } - _ => { - let mut prev_ids = incoming_prev_ids.to_vec(); - while let Some(id) = prev_ids.pop() { - match self.get_pdu_id(&id)? { - Some(pdu_id) => { - return Ok(Some(ClosestParent::Insert(self.pdu_count(&pdu_id)?))); - } - None => { - prev_ids.extend(their_state.get(&id).map_or( - Err(Error::BadServerResponse( - "Failed to find previous event for PDU in state", - )), - // `prev_event_ids` will return an empty Vec instead of failing - // so it works perfect for our use here - |pdu| Ok(pdu.prev_event_ids()), - )?); - } - } - } - Ok(None) - } - } - } - /// Returns the leaf pdus of a room. pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); @@ -583,18 +525,59 @@ impl Rooms { .as_ref() == Some(&pdu.room_id) { - let mut parts = body.split_whitespace().skip(1); + let mut lines = body.lines(); + let command_line = lines.next().expect("each string has at least one line"); + let body = lines.collect::>(); + + let mut parts = command_line.split_whitespace().skip(1); if let Some(command) = parts.next() { let args = parts.collect::>(); - admin.send(AdminCommand::SendTextMessage( - message::TextMessageEventContent { - body: format!("Command: {}, Args: {:?}", command, args), - formatted: None, - relates_to: None, - new_content: None, - }, - )); + match command { + "register_appservice" => { + if body.len() > 2 + && body[0].trim() == "```" + && body.last().unwrap().trim() == "```" + { + let appservice_config = body[1..body.len() - 1].join("\n"); + let parsed_config = serde_yaml::from_str::( + &appservice_config, + ); + match parsed_config { + Ok(yaml) => { + admin.send(AdminCommand::RegisterAppservice(yaml)); + } + Err(e) => { + admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + format!( + "Could not parse appservice config: {}", + e + ), + ), + )); + } + } + } else { + admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + "Expected code block in command body.", + ), + )); + } + } + "list_appservices" => { + admin.send(AdminCommand::ListAppservices); + } + _ => { + admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain(format!( + "Command: {}, Args: {:?}", + command, args + )), + )); + } + } } } } @@ -675,6 +658,7 @@ impl Rooms { sending: &super::sending::Sending, admin: &super::admin::Admin, account_data: &super::account_data::AccountData, + appservice: &super::appservice::Appservice, ) -> Result { let PduBuilder { event_type, @@ -923,6 +907,10 @@ impl Rooms { sending.send_pdu(&server, &pdu_id)?; } + for appservice in appservice.iter_all().filter_map(|r| r.ok()) { + sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + Ok(pdu.event_id) } diff --git a/src/database/sending.rs b/src/database/sending.rs index cd88e08..7ce7d63 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,26 +1,35 @@ use std::{collections::HashMap, convert::TryFrom, time::SystemTime}; -use crate::{server_server, utils, Error, PduEvent, Result}; +use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::{debug, warn}; +use log::warn; use rocket::futures::stream::{FuturesUnordered, StreamExt}; -use ruma::{api::federation, ServerName}; +use ruma::{ + api::{appservice, federation}, + ServerName, +}; use sled::IVec; use tokio::select; #[derive(Clone)] pub struct Sending { /// The state for a given state hash. - pub(super) servernamepduids: sled::Tree, // ServernamePduId = ServerName + PduId - pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = ServerName + PduId (pduid can be empty for reservation) + pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+)ServerName + PduId + pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+)ServerName + PduId (pduid can be empty for reservation) } impl Sending { - pub fn start_handler(&self, globals: &super::globals::Globals, rooms: &super::rooms::Rooms) { + pub fn start_handler( + &self, + globals: &super::globals::Globals, + rooms: &super::rooms::Rooms, + appservice: &super::appservice::Appservice, + ) { let servernamepduids = self.servernamepduids.clone(); let servercurrentpdus = self.servercurrentpdus.clone(); let rooms = rooms.clone(); let globals = globals.clone(); + let appservice = appservice.clone(); tokio::spawn(async move { let mut futures = FuturesUnordered::new(); @@ -28,7 +37,7 @@ impl Sending { // Retry requests we could not finish yet let mut current_transactions = HashMap::new(); - for (server, pdu) in servercurrentpdus + for (server, pdu, is_appservice) in servercurrentpdus .iter() .filter_map(|r| r.ok()) .map(|(key, _)| { @@ -38,45 +47,61 @@ impl Sending { Error::bad_database("Invalid bytes in servercurrentpdus.") })?; + let server = utils::string_from_bytes(&server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + + // Appservices start with a plus + let (server, is_appservice) = if server.starts_with("+") { + (&server[1..], true) + } else { + (&*server, false) + }; + Ok::<_, Error>(( - Box::::try_from(utils::string_from_bytes(&server).map_err( - |_| { - Error::bad_database( - "Invalid server bytes in server_currenttransaction", - ) - }, - )?) - .map_err(|_| { + Box::::try_from(server).map_err(|_| { Error::bad_database( "Invalid server string in server_currenttransaction", ) })?, IVec::from(pdu), + is_appservice, )) }) .filter_map(|r| r.ok()) - .filter(|(_, pdu)| !pdu.is_empty()) // Skip reservation key + .filter(|(_, pdu, _)| !pdu.is_empty()) // Skip reservation key .take(50) // This should not contain more than 50 anyway { current_transactions - .entry(server) + .entry((server, is_appservice)) .or_insert_with(Vec::new) .push(pdu); } - for (server, pdus) in current_transactions { - futures.push(Self::handle_event(server, pdus, &globals, &rooms)); + for ((server, is_appservice), pdus) in current_transactions { + futures.push(Self::handle_event( + server, + is_appservice, + pdus, + &globals, + &rooms, + &appservice, + )); } let mut subscriber = servernamepduids.watch_prefix(b""); loop { select! { - Some(server) = futures.next() => { - debug!("sending response: {:?}", &server); - match server { - Ok((server, _response)) => { - let mut prefix = server.as_bytes().to_vec(); + Some(response) = futures.next() => { + match response { + Ok((server, is_appservice)) => { + let mut prefix = if is_appservice { + "+".as_bytes().to_vec() + } else { + Vec::new() + }; + prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); for key in servercurrentpdus @@ -109,13 +134,13 @@ impl Sending { servernamepduids.remove(¤t_key).unwrap(); } - futures.push(Self::handle_event(server, new_pdus, &globals, &rooms)); + futures.push(Self::handle_event(server, is_appservice, new_pdus, &globals, &rooms, &appservice)); } else { servercurrentpdus.remove(&prefix).unwrap(); // servercurrentpdus with the prefix should be empty now } } - Err((server, e)) => { + Err((server, _is_appservice, e)) => { warn!("Couldn't send transaction to {}: {}", server, e) // TODO: exponential backoff } @@ -126,24 +151,37 @@ impl Sending { let servernamepduid = key.clone(); let mut parts = servernamepduid.splitn(2, |&b| b == 0xff); - if let Some((server, pdu_id)) = utils::string_from_bytes( + if let Some((server, is_appservice, pdu_id)) = utils::string_from_bytes( parts .next() .expect("splitn will always return 1 or more elements"), ) .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) - .and_then(|server_str| Box::::try_from(server_str) - .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))) + .map(|server_str| { + // Appservices start with a plus + if server_str.starts_with("+") { + (server_str[1..].to_owned(), true) + } else { + (server_str, false) + } + }) + .and_then(|(server_str, is_appservice)| Box::::try_from(server_str) + .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid.")).map(|s| (s, is_appservice))) .ok() - .and_then(|server| parts + .and_then(|(server, is_appservice)| parts .next() .ok_or_else(|| Error::bad_database("Invalid servernamepduid in db.")) .ok() - .map(|pdu_id| (server, pdu_id)) + .map(|pdu_id| (server, is_appservice, pdu_id)) ) // TODO: exponential backoff - .filter(|(server, _)| { - let mut prefix = server.to_string().as_bytes().to_vec(); + .filter(|(server, is_appservice, _)| { + let mut prefix = if *is_appservice { + "+".as_bytes().to_vec() + } else { + Vec::new() + }; + prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); servercurrentpdus @@ -154,7 +192,7 @@ impl Sending { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); - futures.push(Self::handle_event(server, vec![pdu_id.into()], &globals, &rooms)); + futures.push(Self::handle_event(server, is_appservice, vec![pdu_id.into()], &globals, &rooms, &appservice)); } } } @@ -172,56 +210,102 @@ impl Sending { Ok(()) } + pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { + let mut key = "+".as_bytes().to_vec(); + key.extend_from_slice(appservice_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(pdu_id); + self.servernamepduids.insert(key, b"")?; + + Ok(()) + } + async fn handle_event( server: Box, + is_appservice: bool, pdu_ids: Vec, globals: &super::globals::Globals, rooms: &super::rooms::Rooms, - ) -> std::result::Result< - (Box, send_transaction_message::v1::Response), - (Box, Error), - > { - let pdu_jsons = pdu_ids - .iter() - .map(|pdu_id| { - Ok::<_, (Box, Error)>( - // TODO: check room version and remove event_id if needed - serde_json::from_str( - PduEvent::convert_to_outgoing_federation_event( - rooms - .get_pdu_json_from_id(pdu_id) - .map_err(|e| (server.clone(), e))? - .ok_or_else(|| { - ( - server.clone(), - Error::bad_database( - "Event in servernamepduids not found in db.", - ), - ) - })?, - ) - .json() - .get(), + appservice: &super::appservice::Appservice, + ) -> std::result::Result<(Box, bool), (Box, bool, Error)> { + if is_appservice { + let pdu_jsons = pdu_ids + .iter() + .map(|pdu_id| { + Ok::<_, (Box, Error)>( + rooms + .get_pdu_from_id(pdu_id) + .map_err(|e| (server.clone(), e))? + .ok_or_else(|| { + ( + server.clone(), + Error::bad_database( + "Event in servernamepduids not found in db.", + ), + ) + })? + .to_any_event(), ) - .expect("Raw<..> is always valid"), - ) - }) - .filter_map(|r| r.ok()) - .collect::>(); + }) + .filter_map(|r| r.ok()) + .collect::>(); + appservice_server::send_request( + &globals, + appservice + .get_registration(server.as_str()) + .unwrap() + .unwrap(), // TODO: handle error + appservice::event::push_events::v1::Request { + events: &pdu_jsons, + txn_id: &utils::random_string(16), + }, + ) + .await + .map(|_response| (server.clone(), is_appservice)) + .map_err(|e| (server, is_appservice, e)) + } else { + let pdu_jsons = pdu_ids + .iter() + .map(|pdu_id| { + Ok::<_, (Box, Error)>( + // TODO: check room version and remove event_id if needed + serde_json::from_str( + PduEvent::convert_to_outgoing_federation_event( + rooms + .get_pdu_json_from_id(pdu_id) + .map_err(|e| (server.clone(), e))? + .ok_or_else(|| { + ( + server.clone(), + Error::bad_database( + "Event in servernamepduids not found in db.", + ), + ) + })?, + ) + .json() + .get(), + ) + .expect("Raw<..> is always valid"), + ) + }) + .filter_map(|r| r.ok()) + .collect::>(); - server_server::send_request( - &globals, - server.clone(), - send_transaction_message::v1::Request { - origin: globals.server_name(), - pdus: &pdu_jsons, - edus: &[], - origin_server_ts: SystemTime::now(), - transaction_id: &utils::random_string(16), - }, - ) - .await - .map(|response| (server.clone(), response)) - .map_err(|e| (server, e)) + server_server::send_request( + &globals, + server.clone(), + send_transaction_message::v1::Request { + origin: globals.server_name(), + pdus: &pdu_jsons, + edus: &[], + origin_server_ts: SystemTime::now(), + transaction_id: &utils::random_string(16), + }, + ) + .await + .map(|_response| (server.clone(), is_appservice)) + .map_err(|e| (server, is_appservice, e)) + } } } diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index 7c0eb98..1f8ba7d 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -11,13 +11,13 @@ impl TransactionIds { pub fn add_txnid( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: Option<&DeviceId>, txn_id: &str, data: &[u8], ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); key.push(0xff); key.extend_from_slice(txn_id.as_bytes()); @@ -29,12 +29,12 @@ impl TransactionIds { pub fn existing_txnid( &self, user_id: &UserId, - device_id: &DeviceId, + device_id: Option<&DeviceId>, txn_id: &str, ) -> Result> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); key.push(0xff); key.extend_from_slice(txn_id.as_bytes()); diff --git a/src/error.rs b/src/error.rs index 316ca74..7d4a751 100644 --- a/src/error.rs +++ b/src/error.rs @@ -142,8 +142,8 @@ impl log::Log for ConduitLogger { mut_last_logs.insert(output.clone(), Instant::now()); } - self.db.admin.send(AdminCommand::SendTextMessage( - message::TextMessageEventContent::plain(output), + self.db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain(output), )); } } diff --git a/src/lib.rs b/src/lib.rs index eea32c7..aed129f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,4 @@ +pub mod appservice_server; pub mod client_server; mod database; mod error; diff --git a/src/main.rs b/src/main.rs index 58d3427..9574894 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] +pub mod appservice_server; pub mod client_server; pub mod server_server; @@ -139,7 +140,8 @@ fn setup_rocket() -> rocket::Rocket { .await .expect("config is valid"); - data.sending.start_handler(&data.globals, &data.rooms); + data.sending + .start_handler(&data.globals, &data.rooms, &data.appservice); log::set_boxed_logger(Box::new(ConduitLogger { db: data.clone(), last_logs: Default::default(), diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 9597ac8..0fdca74 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -12,7 +12,7 @@ use std::{ #[cfg(feature = "conduit_bin")] use { crate::utils, - log::warn, + log::{debug, warn}, rocket::{ data::{ ByteUnit, Data, FromDataFuture, FromTransformedData, Transform, TransformFuture, @@ -34,6 +34,7 @@ pub struct Ruma { pub sender_user: Option, pub sender_device: Option>, pub json_body: Option>, // This is None when body is not a valid string + pub from_appservice: bool, } #[cfg(feature = "conduit_bin")] @@ -66,28 +67,72 @@ where .await .expect("database was loaded"); - let (sender_user, sender_device) = match T::METADATA.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - // Get token from header or query value - let token = match request - .headers() - .get_one("Authorization") - .map(|s| s[7..].to_owned()) // Split off "Bearer " - .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())) - { - // TODO: M_MISSING_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some(token) => token, - }; + // Get token from header or query value + let token = request + .headers() + .get_one("Authorization") + .map(|s| s[7..].to_owned()) // Split off "Bearer " + .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())); - // Check if token is valid - match db.users.find_from_token(&token).unwrap() { - // TODO: M_UNKNOWN_TOKEN - None => return Failure((Status::Unauthorized, ())), - Some((user_id, device_id)) => (Some(user_id), Some(device_id.into())), + let (sender_user, sender_device, from_appservice) = if let Some((_id, registration)) = + db.appservice + .iter_all() + .filter_map(|r| r.ok()) + .find(|(_id, registration)| { + registration + .get("as_token") + .and_then(|as_token| as_token.as_str()) + .map_or(false, |as_token| token.as_deref() == Some(as_token)) + }) { + match T::METADATA.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + let user_id = request.get_query_value::("user_id").map_or_else( + || { + UserId::parse_with_server_name( + registration + .get("sender_localpart") + .unwrap() + .as_str() + .unwrap(), + db.globals.server_name(), + ) + .unwrap() + }, + |string| { + UserId::try_from(string.expect("parsing to string always works")) + .unwrap() + }, + ); + + if !db.users.exists(&user_id).unwrap() { + return Failure((Status::Unauthorized, ())); + } + + // TODO: Check if appservice is allowed to be that user + (Some(user_id), None, true) } + AuthScheme::ServerSignatures => (None, None, true), + AuthScheme::None => (None, None, true), + } + } else { + match T::METADATA.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + if let Some(token) = token { + match db.users.find_from_token(&token).unwrap() { + // TODO: M_UNKNOWN_TOKEN + None => return Failure((Status::Unauthorized, ())), + Some((user_id, device_id)) => { + (Some(user_id), Some(device_id.into()), false) + } + } + } else { + // TODO: M_MISSING_TOKEN + return Failure((Status::Unauthorized, ())); + } + } + AuthScheme::ServerSignatures => (None, None, false), + AuthScheme::None => (None, None, false), } - _ => (None, None), }; let mut http_request = http::Request::builder() @@ -103,7 +148,7 @@ where handle.read_to_end(&mut body).await.unwrap(); let http_request = http_request.body(body.clone()).unwrap(); - log::debug!("{:?}", http_request); + debug!("{:?}", http_request); match ::Incoming::try_from(http_request) { Ok(t) => Success(Ruma { @@ -114,6 +159,7 @@ where json_body: utils::string_from_bytes(&body) .ok() .and_then(|s| serde_json::value::RawValue::from_string(s).ok()), + from_appservice, }), Err(e) => { warn!("{:?}", e); diff --git a/src/server_server.rs b/src/server_server.rs index 58dd872..7d12c54 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,10 +1,7 @@ -use crate::{ - client_server, database::rooms::ClosestParent, utils, ConduitResult, Database, Error, PduEvent, - Result, Ruma, -}; +use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{error, warn}; +use log::warn; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -27,7 +24,6 @@ use std::{ collections::BTreeMap, convert::TryFrom, fmt::Debug, - sync::Arc, time::{Duration, SystemTime}, }; @@ -73,7 +69,6 @@ where .cloned(); let (actual_destination, host) = if let Some(result) = maybe_result { - println!("Loaded {} -> {:?}", destination, result); result } else { let result = find_actual_destination(globals, &destination).await; @@ -82,7 +77,6 @@ where .write() .unwrap() .insert(destination.clone(), result.clone()); - println!("Saving {} -> {:?}", destination, result); result }; @@ -491,173 +485,28 @@ pub async fn send_transaction_message_route<'a>( continue; } - // If it is not a state event, we can skip state-res... maybe - if value.get("state_key").is_none() { - if !db.rooms.is_joined(&pdu.sender, room_id)? { - warn!("Sender is not joined {}", pdu.kind); - resolved_map.insert(event_id, Err("User is not in this room".into())); - continue; - } + let count = db.globals.next_count()?; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); + db.rooms.append_to_state(&pdu_id, &pdu)?; - db.rooms.append_to_state(&pdu_id, &pdu)?; + db.rooms.append_pdu( + &pdu, + &value, + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; - db.rooms.append_pdu( - &pdu, - &value, - count, - pdu_id.into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - resolved_map.insert(event_id, Ok::<(), String>(())); - continue; + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } - // We have a state event so we need info for state-res - let get_state_response = match send_request( - &db.globals, - body.body.origin.clone(), - ruma::api::federation::event::get_room_state::v1::Request { - room_id, - event_id: &event_id, - }, - ) - .await - { - Ok(res) => res, - // We can't hard fail because there are some valid errors, just - // keep checking PDU's - // - // As an example a possible error - // {"errcode":"M_FORBIDDEN","error":"Host not in room."} - Err(err) => { - resolved_map.insert(event_id, Err(err.to_string())); - continue; - } - }; - - let their_current_state = get_state_response - .pdus - .iter() - .chain(get_state_response.auth_chain.iter()) // add auth events - .map(|pdu| { - let (event_id, json) = crate::pdu::process_incoming_pdu(pdu); - ( - event_id.clone(), - Arc::new( - // When creating a StateEvent the event_id arg will be used - // over any found in the json and it will not use ruma::reference_hash - // to generate one - state_res::StateEvent::from_id_canon_obj(event_id, json) - .expect("valid pdu json"), - ), - ) - }) - .collect::>(); - - let our_current_state = db.rooms.room_state_full(room_id)?; - // State resolution takes care of these checks - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. - - // TODO: 6. Passes authorization rules based on the current state of the room, otherwise it is "soft failed". - match state_res::StateResolution::resolve( - room_id, - &ruma::RoomVersionId::Version6, - &[ - our_current_state - .iter() - .map(|((ev, sk), v)| ((ev.clone(), sk.to_owned()), v.event_id.clone())) - .collect::>(), - their_current_state - .iter() - .map(|(_id, v)| ((v.kind(), v.state_key()), v.event_id())) - .collect::>(), - ], - Some( - our_current_state - .iter() - .map(|(_k, v)| (v.event_id.clone(), v.convert_for_state_res())) - .chain( - their_current_state - .iter() - .map(|(id, ev)| (id.clone(), ev.clone())), - ) - .collect::>(), - ), - &db.rooms, - ) { - Ok(resolved) if resolved.values().any(|id| &event_id == id) => { - // If the event is older than the last event in pduid_pdu Tree then find the - // closest ancestor we know of and insert after the known ancestor by - // altering the known events pduid to = same roomID + same count bytes + 0x1 - // pushing a single byte every time a simple append cannot be done. - match db.rooms.get_latest_pduid_before( - room_id, - &pdu.prev_events, - &their_current_state, - )? { - Some(ClosestParent::Append) => { - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_pdu( - &pdu, - &value, - count, - pdu_id.into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - } - Some(ClosestParent::Insert(old_count)) => { - let count = old_count; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - // Create a new count that is after old_count but before - // the pdu appended after - pdu_id.push(1); - - db.rooms.append_pdu( - &pdu, - &value, - count, - pdu_id.into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - } - _ => { - error!("Not a sequential event or no parents found"); - continue; - } - } - - resolved_map.insert(event_id, Ok::<(), String>(())); - } - // If the eventId is not found in the resolved state auth has failed - Ok(_) => { - resolved_map.insert( - event_id, - Err("This event failed authentication, not found in resolved set".into()), - ); - } - Err(e) => { - resolved_map.insert(event_id, Err(e.to_string())); - } - }; + resolved_map.insert(event_id, Ok::<(), String>(())); } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) From 6606e41dde413af64278e52ee2a376377c8c035e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 19 Dec 2020 16:00:11 +0100 Subject: [PATCH 0362/1727] feat: improved state store --- src/client_server/alias.rs | 32 +++--- src/client_server/directory.rs | 28 ++--- src/client_server/media.rs | 60 ++++++----- src/client_server/membership.rs | 55 +++++----- src/client_server/read_marker.rs | 19 +++- src/client_server/sync.rs | 111 ++++++++++--------- src/database.rs | 13 ++- src/database/globals.rs | 10 +- src/database/rooms.rs | 178 +++++++++++++++++-------------- src/database/sending.rs | 137 ++++++++++++++++++------ src/error.rs | 2 +- src/main.rs | 9 +- src/server_server.rs | 2 +- 13 files changed, 405 insertions(+), 251 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index ec73ffc..498e882 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,5 +1,5 @@ use super::State; -use crate::{appservice_server, server_server, ConduitResult, Database, Error, Ruma}; +use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::{ appservice, @@ -66,12 +66,14 @@ pub async fn get_alias_helper( room_alias: &RoomAliasId, ) -> ConduitResult { if room_alias.server_name() != db.globals.server_name() { - let response = server_server::send_request( - &db.globals, - room_alias.server_name().to_owned(), - federation::query::get_room_information::v1::Request { room_alias }, - ) - .await?; + let response = db + .sending + .send_federation_request( + &db.globals, + room_alias.server_name().to_owned(), + federation::query::get_room_information::v1::Request { room_alias }, + ) + .await?; return Ok(get_alias::Response::new(response.room_id, response.servers).into()); } @@ -81,13 +83,15 @@ pub async fn get_alias_helper( Some(r) => room_id = Some(r), None => { for (_id, registration) in db.appservice.iter_all().filter_map(|r| r.ok()) { - if appservice_server::send_request( - &db.globals, - registration, - appservice::query::query_room_alias::v1::Request { room_alias }, - ) - .await - .is_ok() + if db + .sending + .send_appservice_request( + &db.globals, + registration, + appservice::query::query_room_alias::v1::Request { room_alias }, + ) + .await + .is_ok() { room_id = Some(db.rooms.id_from_alias(&room_alias)?.ok_or_else(|| { Error::bad_config("Appservice lied to us. Room does not exist.") diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 559071a..fa5db3a 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,5 +1,5 @@ use super::State; -use crate::{server_server, ConduitResult, Database, Error, Result, Ruma}; +use crate::{ConduitResult, Database, Error, Result, Ruma}; use log::info; use ruma::{ api::{ @@ -133,19 +133,21 @@ pub async fn get_public_rooms_filtered_helper( .clone() .filter(|server| *server != db.globals.server_name().as_str()) { - let response = server_server::send_request( - &db.globals, - other_server.to_owned(), - federation::directory::get_public_rooms_filtered::v1::Request { - limit, - since: since.as_deref(), - filter: Filter { - generic_search_term: filter.generic_search_term.as_deref(), + let response = db + .sending + .send_federation_request( + &db.globals, + other_server.to_owned(), + federation::directory::get_public_rooms_filtered::v1::Request { + limit, + since: since.as_deref(), + filter: Filter { + generic_search_term: filter.generic_search_term.as_deref(), + }, + room_network: RoomNetwork::Matrix, }, - room_network: RoomNetwork::Matrix, - }, - ) - .await?; + ) + .await?; return Ok(get_public_rooms_filtered::Response { chunk: response diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 0776c9e..156040b 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -1,7 +1,5 @@ use super::State; -use crate::{ - database::media::FileMeta, server_server, utils, ConduitResult, Database, Error, Ruma, -}; +use crate::{database::media::FileMeta, utils, ConduitResult, Database, Error, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::media::{create_content, get_content, get_content_thumbnail, get_media_config}, @@ -45,7 +43,11 @@ pub async fn create_content_route( db.flush().await?; - Ok(create_content::Response { content_uri: mxc, blurhash: None }.into()) + Ok(create_content::Response { + content_uri: mxc, + blurhash: None, + } + .into()) } #[cfg_attr( @@ -71,16 +73,18 @@ pub async fn get_content_route( } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_content_response = server_server::send_request( - &db.globals, - body.server_name.clone(), - get_content::Request { - allow_remote: false, - server_name: &body.server_name, - media_id: &body.media_id, - }, - ) - .await?; + let get_content_response = db + .sending + .send_federation_request( + &db.globals, + body.server_name.clone(), + get_content::Request { + allow_remote: false, + server_name: &body.server_name, + media_id: &body.media_id, + }, + ) + .await?; db.media.create( mxc, @@ -118,19 +122,21 @@ pub async fn get_content_thumbnail_route( )? { Ok(get_content_thumbnail::Response { file, content_type }.into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_thumbnail_response = server_server::send_request( - &db.globals, - body.server_name.clone(), - get_content_thumbnail::Request { - allow_remote: false, - height: body.height, - width: body.width, - method: body.method, - server_name: &body.server_name, - media_id: &body.media_id, - }, - ) - .await?; + let get_thumbnail_response = db + .sending + .send_federation_request( + &db.globals, + body.server_name.clone(), + get_content_thumbnail::Request { + allow_remote: false, + height: body.height, + width: body.width, + method: body.method, + server_name: &body.server_name, + media_id: &body.media_id, + }, + ) + .await?; db.media.upload_thumbnail( mxc, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 46548d5..e8d57bc 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -2,7 +2,7 @@ use super::State; use crate::{ client_server, pdu::{PduBuilder, PduEvent}, - server_server, utils, ConduitResult, Database, Error, Result, Ruma, + utils, ConduitResult, Database, Error, Result, Ruma, }; use log::warn; use ruma::{ @@ -401,9 +401,10 @@ pub async fn get_member_events_route( Ok(get_member_events::Response { chunk: db .rooms - .room_state_type(&body.room_id, &EventType::RoomMember)? - .values() - .map(|pdu| pdu.to_member_event()) + .room_state_full(&body.room_id)? + .iter() + .filter(|(key, _)| key.0 == EventType::RoomMember) + .map(|(_, pdu)| pdu.to_member_event()) .collect(), } .into()) @@ -463,16 +464,18 @@ async fn join_room_by_id_helper( )); for remote_server in servers { - let make_join_response = server_server::send_request( - &db.globals, - remote_server.clone(), - federation::membership::create_join_event_template::v1::Request { - room_id, - user_id: sender_user, - ver: &[RoomVersionId::Version5, RoomVersionId::Version6], - }, - ) - .await; + let make_join_response = db + .sending + .send_federation_request( + &db.globals, + remote_server.clone(), + federation::membership::create_join_event_template::v1::Request { + room_id, + user_id: sender_user, + ver: &[RoomVersionId::Version5, RoomVersionId::Version6], + }, + ) + .await; make_join_response_and_server = make_join_response.map(|r| (r, remote_server)); @@ -540,16 +543,18 @@ async fn join_room_by_id_helper( // It has enough fields to be called a proper event now let join_event = join_event_stub; - let send_join_response = server_server::send_request( - &db.globals, - remote_server.clone(), - federation::membership::create_join_event::v2::Request { - room_id, - event_id: &event_id, - pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), - }, - ) - .await?; + let send_join_response = db + .sending + .send_federation_request( + &db.globals, + remote_server.clone(), + federation::membership::create_join_event::v2::Request { + room_id, + event_id: &event_id, + pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + }, + ) + .await?; let add_event_id = |pdu: &Raw| -> Result<(EventId, CanonicalJsonObject)> { let mut value = serde_json::from_str(pdu.json().get()) @@ -694,7 +699,7 @@ async fn join_room_by_id_helper( } } - db.rooms.force_state(room_id, state)?; + db.rooms.force_state(room_id, state, &db.globals)?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index f3e7211..0c4ec1a 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -1,7 +1,9 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ - api::client::{error::ErrorKind, r0::read_marker::set_read_marker}, + api::client::{ + error::ErrorKind, r0::capabilities::get_capabilities, r0::read_marker::set_read_marker, + }, events::{AnyEphemeralRoomEvent, AnyEvent, EventType}, }; @@ -76,3 +78,18 @@ pub async fn set_read_marker_route( Ok(set_read_marker::Response.into()) } + +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "") +)] +pub async fn set_receipt_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + db.flush().await?; + + Ok(set_read_marker::Response.into()) +} diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index d7c24dc..8213651 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -102,9 +102,15 @@ pub async fn sync_events_route( } // Database queries: - let encrypted_room = db - .rooms - .room_state_get(&room_id, &EventType::RoomEncryption, "")? + + let current_state = db.rooms.room_state_full(&room_id)?; + let current_members = current_state + .iter() + .filter(|(key, _)| key.0 == EventType::RoomMember) + .map(|(key, value)| (&key.1, value)) // Only keep state key + .collect::>(); + let encrypted_room = current_state + .get(&(EventType::RoomEncryption, "".to_owned())) .is_some(); // These type is Option>. The outer Option is None when there is no event between @@ -117,45 +123,45 @@ pub async fn sync_events_route( .as_ref() .map(|pdu| db.rooms.pdu_state_hash(&pdu.as_ref().ok()?.0).ok()?); - let since_members = since_state_hash.as_ref().map(|state_hash| { - state_hash.as_ref().and_then(|state_hash| { - db.rooms - .state_type(&state_hash, &EventType::RoomMember) - .ok() - }) + let since_state = since_state_hash.as_ref().map(|state_hash| { + state_hash + .as_ref() + .and_then(|state_hash| db.rooms.state_full(&room_id, &state_hash).ok()) }); - let since_encryption = since_state_hash.as_ref().map(|state_hash| { - state_hash.as_ref().and_then(|state_hash| { - db.rooms - .state_get(&state_hash, &EventType::RoomEncryption, "") - .ok() - }) + let since_encryption = since_state.as_ref().map(|state| { + state + .as_ref() + .map(|state| state.get(&(EventType::RoomEncryption, "".to_owned()))) }); - let current_members = db.rooms.room_state_type(&room_id, &EventType::RoomMember)?; - // Calculations: let new_encrypted_room = encrypted_room && since_encryption.map_or(false, |encryption| encryption.is_none()); - let send_member_count = since_members.as_ref().map_or(false, |since_members| { - since_members.as_ref().map_or(true, |since_members| { - current_members.len() != since_members.len() + let send_member_count = since_state.as_ref().map_or(false, |since_state| { + since_state.as_ref().map_or(true, |since_state| { + current_members.len() + != since_state + .iter() + .filter(|(key, _)| key.0 == EventType::RoomMember) + .count() }) }); - let since_sender_member = since_members.as_ref().map(|since_members| { - since_members.as_ref().and_then(|members| { - members.get(sender_user.as_str()).and_then(|pdu| { - serde_json::from_value::>( - pdu.content.clone(), - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }) + let since_sender_member = since_state.as_ref().map(|since_state| { + since_state.as_ref().and_then(|state| { + state + .get(&(EventType::RoomMember, sender_user.as_str().to_owned())) + .and_then(|pdu| { + serde_json::from_value::< + Raw, + >(pdu.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }) }) }); @@ -170,30 +176,32 @@ pub async fn sync_events_route( .membership; let since_membership = - since_members + since_state .as_ref() - .map_or(MembershipState::Join, |members| { - members + .map_or(MembershipState::Join, |since_state| { + since_state .as_ref() - .and_then(|members| { - members.get(&user_id).and_then(|since_member| { - serde_json::from_value::< - Raw, - >( - since_member.content.clone() - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid PDU in database.") + .and_then(|since_state| { + since_state + .get(&(EventType::RoomMember, user_id.clone())) + .and_then(|since_member| { + serde_json::from_value::< + Raw, + >( + since_member.content.clone() + ) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid PDU in database.") + }) + .ok() }) - .ok() - }) }) .map_or(MembershipState::Leave, |member| member.membership) }); - let user_id = UserId::try_from(user_id) + let user_id = UserId::try_from(user_id.clone()) .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; match (since_membership, current_membership) { @@ -456,7 +464,12 @@ pub async fn sync_events_route( }) .and_then(|state_hash| { db.rooms - .state_get(&state_hash, &EventType::RoomMember, sender_user.as_str()) + .state_get( + &room_id, + &state_hash, + &EventType::RoomMember, + sender_user.as_str(), + ) .ok()? .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) .ok() diff --git a/src/database.rs b/src/database.rs index 5150517..99bba83 100644 --- a/src/database.rs +++ b/src/database.rs @@ -20,6 +20,7 @@ use serde::Deserialize; use std::collections::HashMap; use std::sync::{Arc, RwLock}; use std::{convert::TryInto, fs::remove_dir_all}; +use tokio::sync::Semaphore; #[derive(Clone, Deserialize)] pub struct Config { @@ -30,6 +31,8 @@ pub struct Config { cache_capacity: u64, #[serde(default = "default_max_request_size")] max_request_size: u32, + #[serde(default = "default_max_concurrent_requests")] + max_concurrent_requests: u16, #[serde(default)] registration_disabled: bool, #[serde(default)] @@ -39,7 +42,9 @@ pub struct Config { } fn default_server_name() -> Box { - "localhost".try_into().expect("") + "localhost" + .try_into() + .expect("localhost is valid servername") } fn default_cache_capacity() -> u64 { @@ -50,6 +55,10 @@ fn default_max_request_size() -> u32 { 20 * 1024 * 1024 // Default to 20 MB } +fn default_max_concurrent_requests() -> u16 { + 4 +} + #[derive(Clone)] pub struct Database { pub globals: globals::Globals, @@ -159,6 +168,7 @@ impl Database { roomuserid_invited: db.open_tree("roomuserid_invited")?, userroomid_left: db.open_tree("userroomid_left")?, + statekey_short: db.open_tree("statekey_short")?, stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, @@ -180,6 +190,7 @@ impl Database { sending: sending::Sending { servernamepduids: db.open_tree("servernamepduids")?, servercurrentpdus: db.open_tree("servercurrentpdus")?, + maximum_requests: Arc::new(Semaphore::new(10)), }, admin: admin::Admin { sender: admin_sender, diff --git a/src/database/globals.rs b/src/database/globals.rs index e913c0f..485650f 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,6 +4,7 @@ use ruma::ServerName; use std::collections::HashMap; use std::sync::Arc; use std::sync::RwLock; +use std::time::Duration; use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; @@ -54,11 +55,18 @@ impl Globals { } }; + let reqwest_client = reqwest::Client::builder() + .connect_timeout(Duration::from_secs(30)) + .timeout(Duration::from_secs(60 * 3)) + .pool_max_idle_per_host(1) + .build() + .unwrap(); + Ok(Self { globals, config, keypair: Arc::new(keypair), - reqwest_client: reqwest::Client::new(), + reqwest_client, dns_resolver: TokioAsyncResolver::tokio_from_system_conf() .await .map_err(|_| { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3e2a17f..3f096a9 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -62,7 +62,8 @@ pub struct Rooms { /// Remember the state hash at events in the past. pub(super) pduid_statehash: sled::Tree, /// The state for a given state hash. - pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + EventType + StateKey + pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count + pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } impl StateStore for Rooms { @@ -106,21 +107,28 @@ impl StateStore for Rooms { impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - pub fn state_full(&self, state_hash: &StateHashId) -> Result> { + pub fn state_full( + &self, + room_id: &RoomId, + state_hash: &StateHashId, + ) -> Result> { self.stateid_pduid .scan_prefix(&state_hash) .values() - .map(|pduid| { - self.pduid_pdu.get(&pduid?)?.map_or_else( - || Err(Error::bad_database("Failed to find StateMap.")), + .map(|pduid_short| { + let mut pduid = room_id.as_bytes().to_vec(); + pduid.push(0xff); + pduid.extend_from_slice(&pduid_short?); + self.pduid_pdu.get(&pduid)?.map_or_else( + || Err(Error::bad_database("Failed to find PDU in state snapshot.")), |b| { serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")) }, ) }) + .filter_map(|r| r.ok()) .map(|pdu| { - let pdu = pdu?; Ok(( ( pdu.kind.clone(), @@ -135,64 +143,45 @@ impl Rooms { .collect::>>() } - /// Returns all state entries for this type. - pub fn state_type( - &self, - state_hash: &StateHashId, - event_type: &EventType, - ) -> Result> { - let mut prefix = state_hash.to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(&event_type.to_string().as_bytes()); - prefix.push(0xff); - - let mut hashmap = HashMap::new(); - for pdu in self - .stateid_pduid - .scan_prefix(&prefix) - .values() - .map(|pdu_id| { - Ok::<_, Error>( - serde_json::from_slice::(&self.pduid_pdu.get(pdu_id?)?.ok_or_else( - || Error::bad_database("PDU in state not found in database."), - )?) - .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, - ) - }) - { - let pdu = pdu?; - let state_key = pdu.state_key.clone().ok_or_else(|| { - Error::bad_database("Room state contains event without state_key.") - })?; - hashmap.insert(state_key, pdu); - } - Ok(hashmap) - } - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). pub fn state_get( &self, + room_id: &RoomId, state_hash: &StateHashId, event_type: &EventType, state_key: &str, ) -> Result> { - let mut key = state_hash.to_vec(); - key.push(0xff); - key.extend_from_slice(&event_type.to_string().as_bytes()); + let mut key = event_type.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); - self.stateid_pduid.get(&key)?.map_or(Ok(None), |pdu_id| { - Ok::<_, Error>(Some(( - pdu_id.clone(), - serde_json::from_slice::( - &self.pduid_pdu.get(&pdu_id)?.ok_or_else(|| { - Error::bad_database("PDU in state not found in database.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, - ))) - }) + let short = self.statekey_short.get(&key)?; + + if let Some(short) = short { + let mut stateid = state_hash.to_vec(); + stateid.push(0xff); + stateid.extend_from_slice(&short); + + self.stateid_pduid + .get(&stateid)? + .map_or(Ok(None), |pdu_id_short| { + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&pdu_id_short); + + Ok::<_, Error>(Some(( + pdu_id.clone().into(), + serde_json::from_slice::( + &self.pduid_pdu.get(&pdu_id)?.ok_or_else(|| { + Error::bad_database("PDU in state not found in database.") + })?, + ) + .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, + ))) + }) + } else { + return Ok(None); + } } /// Returns the last state hash key added to the db. @@ -260,6 +249,7 @@ impl Rooms { &self, room_id: &RoomId, state: HashMap<(EventType, String), Vec>, + globals: &super::globals::Globals, ) -> Result<()> { let state_hash = self.calculate_hash(&state.values().map(|pdu_id| &**pdu_id).collect::>())?; @@ -267,11 +257,29 @@ impl Rooms { prefix.push(0xff); for ((event_type, state_key), pdu_id) in state { + let mut statekey = event_type.as_ref().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(&state_key.as_bytes()); + + let short = match self.statekey_short.get(&statekey)? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid short bytes in statekey_short."))?, + None => { + let short = globals.next_count()?; + self.statekey_short + .insert(&statekey, &short.to_be_bytes())?; + short + } + }; + + let pdu_id_short = pdu_id + .splitn(2, |&b| b == 0xff) + .nth(1) + .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; + let mut state_id = prefix.clone(); - state_id.extend_from_slice(&event_type.as_ref().as_bytes()); - state_id.push(0xff); - state_id.extend_from_slice(&state_key.as_bytes()); - self.stateid_pduid.insert(state_id, pdu_id)?; + state_id.extend_from_slice(&short.to_be_bytes()); + self.stateid_pduid.insert(state_id, pdu_id_short)?; } self.roomid_statehash @@ -283,25 +291,12 @@ impl Rooms { /// Returns the full room state. pub fn room_state_full(&self, room_id: &RoomId) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { - self.state_full(¤t_state_hash) + self.state_full(&room_id, ¤t_state_hash) } else { Ok(BTreeMap::new()) } } - /// Returns all state entries for this type. - pub fn room_state_type( - &self, - room_id: &RoomId, - event_type: &EventType, - ) -> Result> { - if let Some(current_state_hash) = self.current_state_hash(room_id)? { - self.state_type(¤t_state_hash, event_type) - } else { - Ok(HashMap::new()) - } - } - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). pub fn room_state_get( &self, @@ -310,7 +305,7 @@ impl Rooms { state_key: &str, ) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { - self.state_get(¤t_state_hash, event_type, state_key) + self.state_get(&room_id, ¤t_state_hash, event_type, state_key) } else { Ok(None) } @@ -593,7 +588,12 @@ impl Rooms { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `pduid_statehash`. /// The incoming event is the `pdu_id` passed to this method. - pub fn append_to_state(&self, new_pdu_id: &[u8], new_pdu: &PduEvent) -> Result { + pub fn append_to_state( + &self, + new_pdu_id: &[u8], + new_pdu: &PduEvent, + globals: &super::globals::Globals, + ) -> Result { let old_state = if let Some(old_state_hash) = self.roomid_statehash.get(new_pdu.room_id.as_bytes())? { // Store state for event. The state does not include the event itself. @@ -608,7 +608,7 @@ impl Rooms { self.stateid_pduid .scan_prefix(&prefix) .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) - // Chop the old state_hash out leaving behind the (EventType, StateKey) + // Chop the old state_hash out leaving behind the short key (u64) .map(|(k, v)| (k.subslice(prefix.len(), k.len() - prefix.len()), v)) .collect::>() } else { @@ -620,7 +620,23 @@ impl Rooms { let mut pdu_key = new_pdu.kind.as_ref().as_bytes().to_vec(); pdu_key.push(0xff); pdu_key.extend_from_slice(state_key.as_bytes()); - new_state.insert(pdu_key.into(), new_pdu_id.into()); + + let short = match self.statekey_short.get(&pdu_key)? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid short bytes in statekey_short."))?, + None => { + let short = globals.next_count()?; + self.statekey_short.insert(&pdu_key, &short.to_be_bytes())?; + short + } + }; + + let new_pdu_id_short = new_pdu_id + .splitn(2, |&b| b == 0xff) + .nth(1) + .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; + + new_state.insert((&short.to_be_bytes()).into(), new_pdu_id_short.into()); let new_state_hash = self.calculate_hash(&new_state.values().map(|b| &**b).collect::>())?; @@ -628,12 +644,10 @@ impl Rooms { let mut key = new_state_hash.to_vec(); key.push(0xff); - // TODO: we could avoid writing to the DB on every state event by keeping - // track of the delta and write that every so often - for (key_without_prefix, pdu_id) in new_state { + for (short, short_pdu_id) in new_state { let mut state_id = key.clone(); - state_id.extend_from_slice(&key_without_prefix); - self.stateid_pduid.insert(&state_id, &pdu_id)?; + state_id.extend_from_slice(&short); + self.stateid_pduid.insert(&state_id, &short_pdu_id)?; } self.roomid_statehash @@ -887,7 +901,7 @@ impl Rooms { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - self.append_to_state(&pdu_id, &pdu)?; + self.append_to_state(&pdu_id, &pdu, &globals)?; self.append_pdu( &pdu, diff --git a/src/database/sending.rs b/src/database/sending.rs index 7ce7d63..f21b154 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,21 +1,29 @@ -use std::{collections::HashMap, convert::TryFrom, time::SystemTime}; +use std::{ + collections::HashMap, + convert::TryFrom, + fmt::Debug, + sync::Arc, + time::{Duration, Instant, SystemTime}, +}; use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; use log::warn; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ - api::{appservice, federation}, + api::{appservice, federation, OutgoingRequest}, ServerName, }; use sled::IVec; use tokio::select; +use tokio::sync::Semaphore; #[derive(Clone)] pub struct Sending { /// The state for a given state hash. pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+)ServerName + PduId pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+)ServerName + PduId (pduid can be empty for reservation) + pub(super) maximum_requests: Arc, } impl Sending { @@ -40,35 +48,7 @@ impl Sending { for (server, pdu, is_appservice) in servercurrentpdus .iter() .filter_map(|r| r.ok()) - .map(|(key, _)| { - let mut parts = key.splitn(2, |&b| b == 0xff); - let server = parts.next().expect("splitn always returns one element"); - let pdu = parts.next().ok_or_else(|| { - Error::bad_database("Invalid bytes in servercurrentpdus.") - })?; - - let server = utils::string_from_bytes(&server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - - // Appservices start with a plus - let (server, is_appservice) = if server.starts_with("+") { - (&server[1..], true) - } else { - (&*server, false) - }; - - Ok::<_, Error>(( - Box::::try_from(server).map_err(|_| { - Error::bad_database( - "Invalid server string in server_currenttransaction", - ) - })?, - IVec::from(pdu), - is_appservice, - )) - }) - .filter_map(|r| r.ok()) + .filter_map(|(key, _)| Self::parse_servercurrentpdus(key).ok()) .filter(|(_, pdu, _)| !pdu.is_empty()) // Skip reservation key .take(50) // This should not contain more than 50 anyway @@ -90,6 +70,8 @@ impl Sending { )); } + let mut last_failed_try: HashMap, (u32, Instant)> = HashMap::new(); + let mut subscriber = servernamepduids.watch_prefix(b""); loop { select! { @@ -140,9 +122,24 @@ impl Sending { // servercurrentpdus with the prefix should be empty now } } - Err((server, _is_appservice, e)) => { - warn!("Couldn't send transaction to {}: {}", server, e) - // TODO: exponential backoff + Err((server, is_appservice, e)) => { + warn!("Couldn't send transaction to {}: {}", server, e); + let mut prefix = if is_appservice { + "+".as_bytes().to_vec() + } else { + Vec::new() + }; + prefix.extend_from_slice(server.as_bytes()); + prefix.push(0xff); + last_failed_try.insert(server.clone(), match last_failed_try.get(&server) { + Some(last_failed) => { + (last_failed.0+1, Instant::now()) + }, + None => { + (1, Instant::now()) + } + }); + servercurrentpdus.remove(&prefix).unwrap(); } }; }, @@ -174,8 +171,19 @@ impl Sending { .ok() .map(|pdu_id| (server, is_appservice, pdu_id)) ) - // TODO: exponential backoff .filter(|(server, is_appservice, _)| { + if last_failed_try.get(server).map_or(false, |(tries, instant)| { + // Fail if a request has failed recently (exponential backoff) + let mut min_elapsed_duration = Duration::from_secs(60) * *tries * *tries; + if min_elapsed_duration > Duration::from_secs(60*60*24) { + min_elapsed_duration = Duration::from_secs(60*60*24); + } + + instant.elapsed() < min_elapsed_duration + }) { + return false; + } + let mut prefix = if *is_appservice { "+".as_bytes().to_vec() } else { @@ -308,4 +316,63 @@ impl Sending { .map_err(|e| (server, is_appservice, e)) } } + + fn parse_servercurrentpdus(key: IVec) -> Result<(Box, IVec, bool)> { + let mut parts = key.splitn(2, |&b| b == 0xff); + let server = parts.next().expect("splitn always returns one element"); + let pdu = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + + let server = utils::string_from_bytes(&server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + + // Appservices start with a plus + let (server, is_appservice) = if server.starts_with("+") { + (&server[1..], true) + } else { + (&*server, false) + }; + + Ok::<_, Error>(( + Box::::try_from(server).map_err(|_| { + Error::bad_database("Invalid server string in server_currenttransaction") + })?, + IVec::from(pdu), + is_appservice, + )) + } + + pub async fn send_federation_request( + &self, + globals: &crate::database::globals::Globals, + destination: Box, + request: T, + ) -> Result + where + T: Debug, + { + let permit = self.maximum_requests.acquire().await; + let response = server_server::send_request(globals, destination, request).await; + drop(permit); + + response + } + + pub async fn send_appservice_request( + &self, + globals: &crate::database::globals::Globals, + registration: serde_yaml::Value, + request: T, + ) -> Result + where + T: Debug, + { + let permit = self.maximum_requests.acquire().await; + let response = appservice_server::send_request(globals, registration, request).await; + drop(permit); + + response + } } diff --git a/src/error.rs b/src/error.rs index 7d4a751..d8e9d02 100644 --- a/src/error.rs +++ b/src/error.rs @@ -121,7 +121,7 @@ impl log::Log for ConduitLogger { fn log(&self, record: &log::Record<'_>) { let output = format!("{} - {}", record.level(), record.args()); - println!("{}", output); + eprintln!("{}", output); if self.enabled(record.metadata()) && record diff --git a/src/main.rs b/src/main.rs index 9574894..38a2ec9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -18,7 +18,7 @@ pub use pdu::PduEvent; pub use rocket::State; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use rocket::{fairing::AdHoc, routes}; +use rocket::{catch, catchers, fairing::AdHoc, routes, Request}; fn setup_rocket() -> rocket::Rocket { // Force log level off, so we can use our own logger @@ -70,6 +70,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_backup_key_sessions_route, client_server::get_backup_keys_route, client_server::set_read_marker_route, + client_server::set_receipt_route, client_server::create_typing_event_route, client_server::create_room_route, client_server::redact_event_route, @@ -134,6 +135,7 @@ fn setup_rocket() -> rocket::Rocket { server_server::get_profile_information_route, ], ) + .register(catchers![not_found_catcher]) .attach(AdHoc::on_attach("Config", |rocket| async { let data = Database::load_or_create(rocket.figment().extract().expect("config is valid")) @@ -157,3 +159,8 @@ fn setup_rocket() -> rocket::Rocket { async fn main() { setup_rocket().launch().await.unwrap(); } + +#[catch(404)] +fn not_found_catcher(_req: &'_ Request<'_>) -> String { + "404 Not Found".to_owned() +} diff --git a/src/server_server.rs b/src/server_server.rs index 7d12c54..0653959 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -490,7 +490,7 @@ pub async fn send_transaction_message_route<'a>( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, &pdu)?; + db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( &pdu, From f12fbca3c559ad9f441c497f9659fdfcce6208db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 22 Dec 2020 20:08:20 +0100 Subject: [PATCH 0363/1727] fix: send state in /sync, element displays wrong membership changes --- src/client_server/membership.rs | 2 +- src/client_server/sync.rs | 34 +++++++++++++++++++++----------- src/database/rooms.rs | 35 +++++++++++++++++++++++++++++---- src/server_server.rs | 2 +- 4 files changed, 55 insertions(+), 18 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e8d57bc..b459d37 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -686,7 +686,7 @@ async fn join_room_by_id_helper( pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( &PduEvent::from(&**pdu), - &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), + utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 8213651..3136116 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -91,15 +91,7 @@ pub async fn sync_events_route( // They /sync response doesn't always return all messages, so we say the output is // limited unless there are events in non_timeline_pdus - let mut limited = false; - - let mut state_pdus = Vec::new(); - for (_, pdu) in non_timeline_pdus { - if pdu.state_key.is_some() { - state_pdus.push(pdu); - } - limited = true; - } + let limited = non_timeline_pdus.next().is_some(); // Database queries: @@ -342,7 +334,7 @@ pub async fn sync_events_route( })?; let room_events = timeline_pdus - .into_iter() + .iter() .map(|(_, pdu)| pdu.to_sync_room_event()) .collect::>(); @@ -392,7 +384,6 @@ pub async fn sync_events_route( prev_batch, events: room_events, }, - // TODO: state before timeline state: sync_events::State { events: if joined_since_last_sync { db.rooms @@ -401,7 +392,26 @@ pub async fn sync_events_route( .map(|(_, pdu)| pdu.to_sync_state_event()) .collect() } else { - Vec::new() + match since_state { + None => Vec::new(), + Some(Some(since_state)) => current_state + .iter() + .filter(|(key, value)| { + since_state.get(key).map(|e| &e.event_id) != Some(&value.event_id) + }) + .filter(|(_, value)| { + !timeline_pdus.iter().any(|(_, timeline_pdu)| { + timeline_pdu.kind == value.kind + && timeline_pdu.state_key == value.state_key + }) + }) + .map(|(_, pdu)| pdu.to_sync_state_event()) + .collect(), + Some(None) => current_state + .iter() + .map(|(_, pdu)| pdu.to_sync_state_event()) + .collect(), + } }, }, ephemeral: sync_events::Ephemeral { events: edus }, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3f096a9..e59c77f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -15,7 +15,7 @@ use ruma::{ }, EventType, }, - serde::{to_canonical_value, CanonicalJsonObject, Raw}, + serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; @@ -444,13 +444,40 @@ impl Rooms { pub fn append_pdu( &self, pdu: &PduEvent, - pdu_json: &CanonicalJsonObject, + mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, globals: &super::globals::Globals, account_data: &super::account_data::AccountData, admin: &super::admin::Admin, ) -> Result<()> { + // Make unsigned fields correct. This is not properly documented in the spec, but state + // events need to have previous content in the unsigned field, so clients can easily + // interpret things like membership changes + if let Some(state_key) = &pdu.state_key { + if let CanonicalJsonValue::Object(unsigned) = pdu_json + .entry("unsigned".to_owned()) + .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) + { + if let Some(prev_state_hash) = self.pdu_state_hash(&pdu_id).unwrap() { + if let Some(prev_state) = self + .state_get(&pdu.room_id, &prev_state_hash, &pdu.kind, &state_key) + .unwrap() + { + unsigned.insert( + "prev_content".to_owned(), + CanonicalJsonValue::Object( + utils::to_canonical_object(prev_state.1.content) + .expect("event is valid, we just created it"), + ), + ); + } + } + } else { + error!("Invalid unsigned type in pdu."); + } + } + self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; // Mark as read first so the sending client doesn't get a notification even if appending @@ -460,7 +487,7 @@ impl Rooms { self.pduid_pdu.insert( &pdu_id, - &*serde_json::to_string(pdu_json) + &*serde_json::to_string(&pdu_json) .expect("CanonicalJsonObject is always a valid String"), )?; @@ -905,7 +932,7 @@ impl Rooms { self.append_pdu( &pdu, - &pdu_json, + pdu_json, count, pdu_id.clone().into(), globals, diff --git a/src/server_server.rs b/src/server_server.rs index 0653959..7abce5a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -494,7 +494,7 @@ pub async fn send_transaction_message_route<'a>( db.rooms.append_pdu( &pdu, - &value, + value, count, pdu_id.clone().into(), &db.globals, From 8dcc1dfe56f1ac9ce3b0b98e8876478898ebb3ef Mon Sep 17 00:00:00 2001 From: Leonhard Kuboschek Date: Tue, 8 Dec 2020 12:34:46 +0100 Subject: [PATCH 0364/1727] improvement: always use port from SRV lookups Also query SRV records when well-known is not found, fixes #29 --- src/server_server.rs | 199 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 157 insertions(+), 42 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 7abce5a..c47afab 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -24,30 +24,12 @@ use std::{ collections::BTreeMap, convert::TryFrom, fmt::Debug, + net::{IpAddr, SocketAddr}, time::{Duration, SystemTime}, }; -pub async fn request_well_known( - globals: &crate::database::globals::Globals, - destination: &str, -) -> Option { - let body: serde_json::Value = serde_json::from_str( - &globals - .reqwest_client() - .get(&format!( - "https://{}/.well-known/matrix/server", - destination - )) - .send() - .await - .ok()? - .text() - .await - .ok()?, - ) - .ok()?; - Some(body.get("m.server")?.as_str()?.to_owned()) -} + + pub async fn send_request( globals: &crate::database::globals::Globals, @@ -215,42 +197,130 @@ where } } +fn get_ip_with_port(destination_str: String) -> Option { + if destination_str.parse::().is_ok() { + Some(destination_str) + } else if let Ok(ip_addr) = destination_str.parse::() { + Some(SocketAddr::new(ip_addr, 8448).to_string()) + } else { + None + } +} + +fn add_port_to_hostname(destination_str: String) -> String { + match destination_str.find(':') { + None => destination_str.to_owned() + ":8448", + Some(_) => destination_str.to_string(), + } +} + /// Returns: actual_destination, host header +/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names +/// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination( globals: &crate::database::globals::Globals, destination: &Box, ) -> (String, Option) { let mut host = None; + let destination_str = destination.as_str().to_owned(); let actual_destination = "https://".to_owned() - + &if let Some(mut delegated_hostname) = - request_well_known(globals, destination.as_str()).await - { - if let Ok(Some(srv)) = globals - .dns_resolver() - .srv_lookup(format!("_matrix._tcp.{}", delegated_hostname)) - .await - .map(|srv| srv.iter().next().map(|result| result.target().to_string())) - { - host = Some(delegated_hostname); - srv.trim_end_matches('.').to_owned() - } else { - if delegated_hostname.find(':').is_none() { - delegated_hostname += ":8448"; + + &match get_ip_with_port(destination_str.clone()) { + Some(host_port) => { + // 1: IP literal with provided or default port + host_port + } + None => { + if destination_str.find(':').is_some() { + // 2: Hostname with included port + destination_str + } else { + match request_well_known(globals, &destination.as_str()).await { + // 3: A .well-known file is available + Some(delegated_hostname) => { + match get_ip_with_port(delegated_hostname.clone()) { + Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file + None => { + if destination_str.find(':').is_some() { + // 3.2: Hostname with port in .well-known file + destination_str + } else { + match query_srv_record(globals, &delegated_hostname).await { + // 3.3: SRV lookup successful + Some(hostname) => hostname, + // 3.4: No SRV records, just use the hostname from .well-known + None => add_port_to_hostname(delegated_hostname), + } + } + } + } + } + // 4: No .well-known or an error occured + None => { + match query_srv_record(globals, &destination_str).await { + // 4: SRV record found + Some(hostname) => { + host = Some(destination_str.to_owned()); + hostname + } + // 5: No SRV record found + None => add_port_to_hostname(destination_str.to_string()), + } + } + } } - delegated_hostname } - } else { - let mut destination = destination.as_str().to_owned(); - if destination.find(':').is_none() { - destination += ":8448"; - } - destination }; (actual_destination, host) } +async fn query_srv_record<'a>( + globals: &crate::database::globals::Globals, + hostname: &'a str, +) -> Option { + if let Ok(Some(host_port)) = globals + .dns_resolver() + .srv_lookup(format!("_matrix._tcp.{}", hostname)) + .await + .map(|srv| { + srv.iter().next().map(|result| { + format!( + "{}:{}", + result.target().to_string().trim_end_matches('.'), + result.port().to_string() + ) + }) + }) + { + Some(host_port) + } else { + None + } +} + +pub async fn request_well_known( + globals: &crate::database::globals::Globals, + destination: &str, +) -> Option { + let body: serde_json::Value = serde_json::from_str( + &globals + .reqwest_client() + .get(&format!( + "https://{}/.well-known/matrix/server", + destination + )) + .send() + .await + .ok()? + .text() + .await + .ok()?, + ) + .ok()?; + Some(body.get("m.server")?.as_str()?.to_owned()) +} + #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] pub fn get_server_version_route( db: State<'_, Database>, @@ -622,3 +692,48 @@ pub fn get_user_devices_route<'a>( .into()) } */ + +#[cfg(test)] +mod tests { + use super::{add_port_to_hostname, get_ip_with_port}; + + #[test] + fn ips_get_default_ports() { + assert_eq!( + get_ip_with_port(String::from("1.1.1.1")), + Some(String::from("1.1.1.1:8448")) + ); + assert_eq!( + get_ip_with_port(String::from("dead:beef::")), + Some(String::from("[dead:beef::]:8448")) + ); + } + + #[test] + fn ips_keep_custom_ports() { + assert_eq!( + get_ip_with_port(String::from("1.1.1.1:1234")), + Some(String::from("1.1.1.1:1234")) + ); + assert_eq!( + get_ip_with_port(String::from("[dead::beef]:8933")), + Some(String::from("[dead::beef]:8933")) + ); + } + + #[test] + fn hostnames_get_default_ports() { + assert_eq!( + add_port_to_hostname(String::from("example.com")), + "example.com:8448" + ) + } + + #[test] + fn hostnames_keep_custom_ports() { + assert_eq!( + add_port_to_hostname(String::from("example.com:1337")), + "example.com:1337" + ) + } +} From fb9bd34696d78f02e1d8cf65e53f52ce2782ff0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 23 Dec 2020 15:53:41 +0100 Subject: [PATCH 0365/1727] improvement: better warnings when server is unreachable --- src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/error.rs b/src/error.rs index d8e9d02..a5f2be4 100644 --- a/src/error.rs +++ b/src/error.rs @@ -34,7 +34,7 @@ pub enum Error { #[from] source: image::error::ImageError, }, - #[error("Could not connect to server.")] + #[error("Could not connect to server: {source}")] ReqwestError { #[from] source: reqwest::Error, From 2cf6fd57b7da57b116ca782130d97027a8eacb79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 23 Dec 2020 19:41:54 +0100 Subject: [PATCH 0366/1727] improvement: don't send pdus to appservices if it isn't interested TODO: we need to send pdus if a user of the appservice is in the room but not the appservice user itself --- Cargo.lock | 37 +++++++++++++++++++++++++++ Cargo.toml | 2 ++ src/database/rooms.rs | 59 ++++++++++++++++++++++++++++++++++++++++++- src/server_server.rs | 3 --- 4 files changed, 97 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6566b10..b5be6aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,6 +21,15 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +[[package]] +name = "aho-corasick" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +dependencies = [ + "memchr", +] + [[package]] name = "arrayref" version = "0.3.6" @@ -190,6 +199,7 @@ dependencies = [ "js_int", "log", "rand", + "regex", "reqwest", "ring", "rocket", @@ -1418,6 +1428,24 @@ dependencies = [ "syn", ] +[[package]] +name = "regex" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-syntax" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -2131,6 +2159,15 @@ dependencies = [ "syn", ] +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +dependencies = [ + "lazy_static", +] + [[package]] name = "time" version = "0.2.23" diff --git a/Cargo.toml b/Cargo.toml index 1e4afe2..4b87199 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,8 @@ base64 = "0.13.0" ring = "0.16.19" # Used when querying the SRV record of other servers trust-dns-resolver = "0.19.6" +# Used to find matching events for appservices +regex = "1.4.2" [features] default = ["conduit_bin"] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e59c77f..a6d8fea 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -4,6 +4,7 @@ pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; use log::error; +use regex::Regex; use ring::digest; use ruma::{ api::client::error::ErrorKind, @@ -949,7 +950,63 @@ impl Rooms { } for appservice in appservice.iter_all().filter_map(|r| r.ok()) { - sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else( + || Vec::new(), + |users| { + users + .iter() + .map(|users| { + users + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }, + ); + let aliases = namespaces + .get("aliases") + .and_then(|users| users.get("regex")) + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()); + let rooms = namespaces + .get("rooms") + .and_then(|rooms| rooms.as_sequence()); + + let room_aliases = self.room_aliases(&room_id); + + let bridge_user_id = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, globals.server_name()).ok() + }); + + if bridge_user_id.map_or(false, |bridge_user_id| { + self.is_joined(&bridge_user_id, room_id).unwrap_or(false) + }) || users.iter().any(|users| { + dbg!( + users.is_match(pdu.sender.as_str()) + || pdu.kind == EventType::RoomMember + && pdu.state_key.as_ref().map_or(false, |state_key| dbg!( + users.is_match(dbg!(&state_key)) + )) + ) + }) || aliases.map_or(false, |aliases| { + room_aliases + .filter_map(|r| r.ok()) + .any(|room_alias| aliases.is_match(room_alias.as_str())) + }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + { + sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + } } Ok(pdu.event_id) diff --git a/src/server_server.rs b/src/server_server.rs index c47afab..eb6b237 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -28,9 +28,6 @@ use std::{ time::{Duration, SystemTime}, }; - - - pub async fn send_request( globals: &crate::database::globals::Globals, destination: Box, From df16b2ba9817bf56d7b125db33c9539cfacf56d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 31 Dec 2020 14:52:08 +0100 Subject: [PATCH 0367/1727] fix: rare state races --- src/database/rooms.rs | 29 ++++++++++++++++++----------- src/server_server.rs | 2 +- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a6d8fea..ab3dd3f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -678,9 +678,6 @@ impl Rooms { self.stateid_pduid.insert(&state_id, &short_pdu_id)?; } - self.roomid_statehash - .insert(new_pdu.room_id.as_bytes(), &*new_state_hash)?; - Ok(new_state_hash) } else { Err(Error::bad_database( @@ -689,6 +686,13 @@ impl Rooms { } } + pub fn set_room_state(&self, room_id: &RoomId, state_hash: &StateHashId) -> Result<()> { + self.roomid_statehash + .insert(room_id.as_bytes(), state_hash)?; + + Ok(()) + } + /// Creates a new persisted data unit and adds it to a room. #[allow(clippy::too_many_arguments)] pub fn build_and_append_pdu( @@ -929,7 +933,7 @@ impl Rooms { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - self.append_to_state(&pdu_id, &pdu, &globals)?; + let statehashid = self.append_to_state(&pdu_id, &pdu, &globals)?; self.append_pdu( &pdu, @@ -941,6 +945,10 @@ impl Rooms { admin, )?; + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + self.set_room_state(&room_id, &statehashid)?; + for server in self .room_servers(room_id) .filter_map(|r| r.ok()) @@ -991,13 +999,12 @@ impl Rooms { if bridge_user_id.map_or(false, |bridge_user_id| { self.is_joined(&bridge_user_id, room_id).unwrap_or(false) }) || users.iter().any(|users| { - dbg!( - users.is_match(pdu.sender.as_str()) - || pdu.kind == EventType::RoomMember - && pdu.state_key.as_ref().map_or(false, |state_key| dbg!( - users.is_match(dbg!(&state_key)) - )) - ) + users.is_match(pdu.sender.as_str()) + || pdu.kind == EventType::RoomMember + && pdu + .state_key + .as_ref() + .map_or(false, |state_key| users.is_match(&state_key)) }) || aliases.map_or(false, |aliases| { room_aliases .filter_map(|r| r.ok()) diff --git a/src/server_server.rs b/src/server_server.rs index eb6b237..7b980e3 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -95,7 +95,7 @@ where ruma::signatures::sign_json( globals.server_name().as_str(), globals.keypair(), - &mut request_json, + dbg!(&mut request_json), ) .expect("our request json is what ruma expects"); From edfd3c1f34aae8ec29461cf8a5f529a54291c714 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 31 Dec 2020 21:07:05 +0100 Subject: [PATCH 0368/1727] improvement: better config, better logs --- src/appservice_server.rs | 6 +++--- src/client_server/search.rs | 4 ++-- src/database.rs | 35 +++++------------------------------ src/database/admin.rs | 2 +- src/database/sending.rs | 4 ++-- src/error.rs | 30 +++++++++++++++++++++++------- src/main.rs | 31 ++++++++++++++++++++++++------- src/server_server.rs | 16 ++++++++-------- 8 files changed, 68 insertions(+), 60 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index f1436e0..986909b 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -78,10 +78,10 @@ where if status != 200 { warn!( - "Server returned bad response {} ({}): {} {:?}", + "Appservice returned bad response {} {}\n{}\n{:?}", destination, - url, status, + url, utils::string_from_bytes(&body) ); } @@ -93,7 +93,7 @@ where ); response.map_err(|_| { warn!( - "Server returned invalid response bytes {} ({})", + "Appservice returned invalid response bytes {}\n{}", destination, url ); Error::BadServerResponse("Server returned bad response.") diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 0950b25..5fb87f0 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -77,8 +77,8 @@ pub async fn search_events_route( Ok(search_events::Response::new(ResultCategories { room_events: ResultRoomEvents { - count: None, // TODO? maybe not - groups: BTreeMap::new(), // TODO + count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it + groups: BTreeMap::new(), // TODO next_batch, results, state: BTreeMap::new(), // TODO diff --git a/src/database.rs b/src/database.rs index 99bba83..84be578 100644 --- a/src/database.rs +++ b/src/database.rs @@ -18,15 +18,14 @@ use rocket::futures::{self, channel::mpsc}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; use std::collections::HashMap; +use std::fs::remove_dir_all; use std::sync::{Arc, RwLock}; -use std::{convert::TryInto, fs::remove_dir_all}; use tokio::sync::Semaphore; #[derive(Clone, Deserialize)] pub struct Config { - #[serde(default = "default_server_name")] server_name: Box, - database_path: Option, + database_path: String, #[serde(default = "default_cache_capacity")] cache_capacity: u64, #[serde(default = "default_max_request_size")] @@ -41,12 +40,6 @@ pub struct Config { federation_enabled: bool, } -fn default_server_name() -> Box { - "localhost" - .try_into() - .expect("localhost is valid servername") -} - fn default_cache_capacity() -> u64 { 1024 * 1024 * 1024 } @@ -90,31 +83,13 @@ impl Database { /// Load an existing database or create a new one. pub async fn load_or_create(config: Config) -> Result { - let path = config - .database_path - .clone() - .map(Ok::<_, Error>) - .unwrap_or_else(|| { - let path = ProjectDirs::from("xyz", "koesters", "conduit") - .ok_or_else(|| { - Error::bad_config("The OS didn't return a valid home directory path.") - })? - .data_dir() - .join(config.server_name.as_str()); - - Ok(path - .to_str() - .ok_or_else(|| Error::bad_config("Database path contains invalid unicode."))? - .to_owned()) - })?; - let db = sled::Config::default() - .path(&path) + .path(&config.database_path) .cache_capacity(config.cache_capacity) - .print_profile_on_drop(true) + .print_profile_on_drop(false) .open()?; - info!("Opened sled database at {}", path); + info!("Opened sled database at {}", config.database_path); let (admin_sender, admin_receiver) = mpsc::unbounded(); diff --git a/src/database/admin.rs b/src/database/admin.rs index 7de6bf9..1fb1983 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -43,7 +43,7 @@ impl Admin { .unwrap(); if conduit_room.is_none() { - warn!("Conduit instance does not have an #admins room. Logging to that room will not work."); + warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this."); } let send_message = |message: message::MessageEventContent| { diff --git a/src/database/sending.rs b/src/database/sending.rs index f21b154..74aad32 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -8,7 +8,7 @@ use std::{ use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::warn; +use log::info; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, @@ -123,7 +123,7 @@ impl Sending { } } Err((server, is_appservice, e)) => { - warn!("Couldn't send transaction to {}: {}", server, e); + info!("Couldn't send transaction to {}\n{}", server, e); let mut prefix = if is_appservice { "+".as_bytes().to_vec() } else { diff --git a/src/error.rs b/src/error.rs index a5f2be4..c57843c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -121,29 +121,45 @@ impl log::Log for ConduitLogger { fn log(&self, record: &log::Record<'_>) { let output = format!("{} - {}", record.level(), record.args()); - eprintln!("{}", output); - if self.enabled(record.metadata()) - && record + && (record .module_path() .map_or(false, |path| path.starts_with("conduit::")) + || record + .module_path() + .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying + && record.metadata().level() <= log::Level::Warn) { + let first_line = output + .lines() + .next() + .expect("lines always returns one item"); + + eprintln!("{}", output); + + let mute_duration = match record.metadata().level() { + log::Level::Error => Duration::from_secs(60 * 5), // 5 minutes + log::Level::Warn => Duration::from_secs(60 * 60 * 24), // A day + _ => Duration::from_secs(60 * 60 * 24 * 7), // A week + }; + if self .last_logs .read() .unwrap() - .get(&output) - .map_or(false, |i| i.elapsed() < Duration::from_secs(60 * 30)) + .get(first_line) + .map_or(false, |i| i.elapsed() < mute_duration) + // Don't post this log again for some time { return; } if let Ok(mut_last_logs) = &mut self.last_logs.try_write() { - mut_last_logs.insert(output.clone(), Instant::now()); + mut_last_logs.insert(first_line.to_owned(), Instant::now()); } self.db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain(output), + message::MessageEventContent::notice_plain(output), )); } } diff --git a/src/main.rs b/src/main.rs index 38a2ec9..9c0eab6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -13,18 +13,32 @@ mod utils; pub use database::Database; pub use error::{ConduitLogger, Error, Result}; -use log::LevelFilter; pub use pdu::PduEvent; pub use rocket::State; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; +use log::LevelFilter; +use rocket::figment::{ + providers::{Env, Format, Toml}, + Figment, +}; use rocket::{catch, catchers, fairing::AdHoc, routes, Request}; fn setup_rocket() -> rocket::Rocket { // Force log level off, so we can use our own logger - std::env::set_var("ROCKET_LOG_LEVEL", "off"); + std::env::set_var("CONDUIT_LOG_LEVEL", "off"); - rocket::ignite() + let config = + Figment::from(rocket::Config::release_default()) + .merge( + Toml::file(Env::var("CONDUIT_CONFIG").expect( + "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", + )) + .nested(), + ) + .merge(Env::prefixed("CONDUIT_").global()); + + rocket::custom(config) .mount( "/", routes![ @@ -137,10 +151,13 @@ fn setup_rocket() -> rocket::Rocket { ) .register(catchers![not_found_catcher]) .attach(AdHoc::on_attach("Config", |rocket| async { - let data = - Database::load_or_create(rocket.figment().extract().expect("config is valid")) - .await - .expect("config is valid"); + let config = rocket + .figment() + .extract() + .expect("It looks like your config is invalid. Please take a look at the error"); + let data = Database::load_or_create(config) + .await + .expect("config is valid"); data.sending .start_handler(&data.globals, &data.rooms, &data.appservice); diff --git a/src/server_server.rs b/src/server_server.rs index 7b980e3..87e986d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::warn; +use log::{info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -95,7 +95,7 @@ where ruma::signatures::sign_json( globals.server_name().as_str(), globals.keypair(), - dbg!(&mut request_json), + &mut request_json, ) .expect("our request json is what ruma expects"); @@ -161,18 +161,18 @@ where .bytes() .await .unwrap_or_else(|e| { - warn!("server error: {}", e); + warn!("server error {}", e); Vec::new().into() }) // TODO: handle timeout .into_iter() .collect::>(); if status != 200 { - warn!( - "Server returned bad response {} ({}): {} {:?}", + info!( + "Server returned bad response {} {}\n{}\n{:?}", destination, - url, status, + url, utils::string_from_bytes(&body) ); } @@ -183,8 +183,8 @@ where .expect("reqwest body is valid http body"), ); response.map_err(|_| { - warn!( - "Server returned invalid response bytes {} ({})", + info!( + "Server returned invalid response bytes {}\n{}", destination, url ); Error::BadServerResponse("Server returned bad response.") From 85364a9c27b950a8de821726bc5406bde04713c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 1 Jan 2021 10:52:43 +0100 Subject: [PATCH 0369/1727] improvement: change federation_enabled to federation_disabled This enables federation by default --- src/database.rs | 2 +- src/database/globals.rs | 4 ++-- src/server_server.rs | 18 +++++++++--------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/database.rs b/src/database.rs index 84be578..99ea9f5 100644 --- a/src/database.rs +++ b/src/database.rs @@ -37,7 +37,7 @@ pub struct Config { #[serde(default)] encryption_disabled: bool, #[serde(default)] - federation_enabled: bool, + federation_disabled: bool, } fn default_cache_capacity() -> u64 { diff --git a/src/database/globals.rs b/src/database/globals.rs index 485650f..5444d6e 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -119,8 +119,8 @@ impl Globals { self.config.encryption_disabled } - pub fn federation_enabled(&self) -> bool { - self.config.federation_enabled + pub fn federation_disabled(&self) -> bool { + self.config.federation_disabled } pub fn dns_resolver(&self) -> &TokioAsyncResolver { diff --git a/src/server_server.rs b/src/server_server.rs index 87e986d..88b6a01 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -36,7 +36,7 @@ pub async fn send_request( where T: Debug, { - if !globals.federation_enabled() { + if globals.federation_disabled() { return Err(Error::bad_config("Federation is disabled.")); } @@ -322,7 +322,7 @@ pub async fn request_well_known( pub fn get_server_version_route( db: State<'_, Database>, ) -> ConduitResult { - if !db.globals.federation_enabled() { + if db.globals.federation_disabled() { return Err(Error::bad_config("Federation is disabled.")); } @@ -337,7 +337,7 @@ pub fn get_server_version_route( #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] pub fn get_server_keys_route(db: State<'_, Database>) -> Json { - if !db.globals.federation_enabled() { + if db.globals.federation_disabled() { // TODO: Use proper types return Json("Federation is disabled.".to_owned()); } @@ -390,7 +390,7 @@ pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - if !db.globals.federation_enabled() { + if db.globals.federation_disabled() { return Err(Error::bad_config("Federation is disabled.")); } @@ -437,7 +437,7 @@ pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - if !db.globals.federation_enabled() { + if db.globals.federation_disabled() { return Err(Error::bad_config("Federation is disabled.")); } @@ -484,7 +484,7 @@ pub async fn send_transaction_message_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { - if !db.globals.federation_enabled() { + if db.globals.federation_disabled() { return Err(Error::bad_config("Federation is disabled.")); } @@ -587,7 +587,7 @@ pub fn get_missing_events_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { - if !db.globals.federation_enabled() { + if db.globals.federation_disabled() { return Err(Error::bad_config("Federation is disabled.")); } @@ -632,7 +632,7 @@ pub fn get_profile_information_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { - if !db.globals.federation_enabled() { + if db.globals.federation_disabled() { return Err(Error::bad_config("Federation is disabled.")); } @@ -666,7 +666,7 @@ pub fn get_user_devices_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { - if !db.globals.federation_enabled() { + if db.globals.federation_disabled() { return Err(Error::bad_config("Federation is disabled.")); } From 3bdaf6e79e5ba0f893055c4744d0c107fbfbef77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 1 Jan 2021 11:24:09 +0100 Subject: [PATCH 0370/1727] improvement: better default config --- .gitignore | 1 + Rocket-example.toml | 31 ------------------------------- conduit-example.toml | 37 +++++++++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 31 deletions(-) delete mode 100644 Rocket-example.toml create mode 100644 conduit-example.toml diff --git a/.gitignore b/.gitignore index ee48b11..efbc1d9 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ **/*.rs.bk Rocket.toml +conduit.toml diff --git a/Rocket-example.toml b/Rocket-example.toml deleted file mode 100644 index 8eb48e9..0000000 --- a/Rocket-example.toml +++ /dev/null @@ -1,31 +0,0 @@ -[global] -# The name of this server -# Note: If server name != hostname, you need a .well-known file for federation -# to work -server_name = "your.server.name" - -port = 14004 - -# Max size for uploads -#max_request_size = 20_000_000 # in bytes, ~20 MB - -# Disable registration. No new users will be able to register on this server -#registration_disabled = true - -# Disable encryption, so no new encrypted rooms can be created -# Note: existing rooms will continue to work -#encryption_disabled = true - -#federation_enabled = true - -# Default path is in this user's data -#database_path = "/home/timo/MyConduitServer" - -# You should probably leave this at 0.0.0.0 -address = "0.0.0.0" - -# TLS support -# Note: Not necessary when using a reverse proxy: -#[global.tls] -#certs = "/etc/letsencrypt/live/your.server.name/fullchain.pem" -#key = "/etc/letsencrypt/live/your.server.name/privkey.pem" diff --git a/conduit-example.toml b/conduit-example.toml new file mode 100644 index 0000000..70d3ce4 --- /dev/null +++ b/conduit-example.toml @@ -0,0 +1,37 @@ +[global] +# The server_name is the name of this server. It is used as a suffix for user +# and room ids. Examples: matrix.org, conduit.rs +# The Conduit server needs to be reachable at https://your.server.name/ on port +# 443 (client-server) and 8448 (federation) OR you can create /.well-known +# files to redirect requests. See +# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client +# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# for more information + +# YOU NEED TO EDIT THIS +#server_name = "your.server.name" + +# This is the only directly where Conduit will save its data +database_path = "/var/lib/conduit/conduit.db" + +# The port Conduit will be running on. You need to set up a reverse proxy in +# your web server (e.g. apache or nginx), so all requests to /_matrix on port +# 443 and 8448 will be forwarded to the Conduit instance running on this port +port = 6167 + +# Max size for uploads +max_request_size = 20_000_000 # in bytes + +# Disable registration. No new users will be able to register on this server +#registration_disabled = false + +# Disable encryption, so no new encrypted rooms can be created +# Note: existing rooms will continue to work +#encryption_disabled = false +#federation_disabled = false + +#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 +#max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time +#workers = 4 # default: cpu core count * 2 + +address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy From d7e56dbfa0b9695458864983f07fe2aa1c36f11f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Oct 2020 14:18:20 +0200 Subject: [PATCH 0371/1727] docs: recommend using binaries --- DEPLOY_FROM_SOURCE.md => DEPLOY.md | 19 ++++++++++++++----- README.md | 9 ++++----- 2 files changed, 18 insertions(+), 10 deletions(-) rename DEPLOY_FROM_SOURCE.md => DEPLOY.md (72%) diff --git a/DEPLOY_FROM_SOURCE.md b/DEPLOY.md similarity index 72% rename from DEPLOY_FROM_SOURCE.md rename to DEPLOY.md index 456fe6e..443fac8 100644 --- a/DEPLOY_FROM_SOURCE.md +++ b/DEPLOY.md @@ -7,15 +7,24 @@ Make sure you have `libssl-dev` and `pkg-config` installed and the [rust toolcha ## Install Conduit +You have to download the binary that fits your machine. Run `uname -m` to see what you need: +- x84_64: `https://conduit.rs/master/x86_64/conduit-bin` +- armv7: `https://conduit.rs/master/armv7/conduit-bin` +- armv8: `https://conduit.rs/master/armv8/conduit-bin` +- arm: `https://conduit.rs/master/arm/conduit-bin` + ```bash $ sudo useradd -m conduit -$ sudo -u conduit cargo install --git "https://git.koesters.xyz/timo/conduit.git" +$ sudo -u conduit wget -O /home/conduit/conduit-bin && chmod +x /home/conduit/conduit-bin ``` ## Setup systemd service -In this guide, we set up a systemd service for Conduit, so it's easy to start, stop Conduit and set it to autostart when your server reboots. Paste the default systemd service below and configure it to fit your setup (in /etc/systemd/system/conduit.service). +In this guide, we set up a systemd service for Conduit, so it's easy to +start/stop Conduit and set it to autostart when your server reboots. Paste the +default systemd service you can find below into +`/etc/systemd/system/conduit.service` and configure it to fit your setup. ```systemd [Unit] @@ -38,7 +47,7 @@ User=conduit Group=conduit Type=simple Restart=always -ExecStart=/home/conduit/.cargo/bin/conduit +ExecStart=/home/conduit/conduit-bin [Install] WantedBy=multi-user.target @@ -92,12 +101,12 @@ $ sudo certbot -d conduit.koesters.xyz ## You're done! -Now you can start Conduit with +Now you can start Conduit with: ```bash $ sudo systemctl start conduit ``` -and set it to start automatically when your system boots with +Set it to start automatically when your system boots with: ```bash $ sudo systemctl enable conduit ``` diff --git a/README.md b/README.md index c839775..8e4b1b7 100644 --- a/README.md +++ b/README.md @@ -17,13 +17,12 @@ example) and register on the `https://conduit.koesters.xyz` homeserver. #### How can I deploy my own? -##### From source +##### Deploy -Clone the repo, build it with `cargo build --release` and call the binary -(target/release/conduit) from somewhere like a systemd script. [Read -more](DEPLOY_FROM_SOURCE.md) +Download or compile a conduit binary and call it from somewhere like a systemd script. [Read +more](DEPLOY.md) -##### Using Docker +##### Deploy using Docker Pull and run the docker image with From b4818716b880c147b34fdef078f0f0a9610d858d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 1 Jan 2021 13:47:53 +0100 Subject: [PATCH 0372/1727] improvement: better deploy guide --- DEPLOY.md | 146 ++++++++++++++++++++++++----------- src/client_server/account.rs | 2 +- src/client_server/room.rs | 2 +- src/database.rs | 18 +++-- src/database/globals.rs | 12 +-- src/database/rooms.rs | 4 +- src/server_server.rs | 18 ++--- 7 files changed, 131 insertions(+), 71 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 443fac8..f101539 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -1,53 +1,42 @@ -# Deploy from source +# Deploying Conduit -## Prerequisites +## Getting help -Make sure you have `libssl-dev` and `pkg-config` installed and the [rust toolchain](https://rustup.rs) is available on at least on user. +If you run into any problems while setting up Conduit, write an email to `support@conduit.rs`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +## Installing Conduit -## Install Conduit - -You have to download the binary that fits your machine. Run `uname -m` to see what you need: +You have to download the binary that fits your machine. Run `uname -m` to see +what you need. Now copy the right url: - x84_64: `https://conduit.rs/master/x86_64/conduit-bin` - armv7: `https://conduit.rs/master/armv7/conduit-bin` - armv8: `https://conduit.rs/master/armv8/conduit-bin` - arm: `https://conduit.rs/master/arm/conduit-bin` ```bash -$ sudo useradd -m conduit -$ sudo -u conduit wget -O /home/conduit/conduit-bin && chmod +x /home/conduit/conduit-bin +$ sudo wget -O /usr/local/bin/conduit +$ sudo chmod +x /usr/local/bin/conduit ``` -## Setup systemd service +## Setting up a systemd service -In this guide, we set up a systemd service for Conduit, so it's easy to -start/stop Conduit and set it to autostart when your server reboots. Paste the +Now we'll set up a systemd service for Conduit, so it's easy to start/stop +Conduit and set it to autostart when your server reboots. Simply paste the default systemd service you can find below into -`/etc/systemd/system/conduit.service` and configure it to fit your setup. +`/etc/systemd/system/conduit.service`. ```systemd [Unit] -Description=Conduit +Description=Conduit Matrix Server After=network.target [Service] -Environment="ROCKET_SERVER_NAME=YOURSERVERNAME.HERE" # EDIT THIS - -Environment="ROCKET_PORT=14004" # Reverse proxy port - -#Environment="ROCKET_MAX_REQUEST_SIZE=20000000" # in bytes -#Environment="ROCKET_REGISTRATION_DISABLED=true" -#Environment="ROCKET_ENCRYPTION_DISABLED=true" -#Environment="ROCKET_FEDERATION_ENABLED=true" -#Environment="ROCKET_LOG=normal" # Detailed logging - -Environment="ROCKET_ENV=production" -User=conduit -Group=conduit -Type=simple +Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" +User=root +Group=root Restart=always -ExecStart=/home/conduit/conduit-bin +ExecStart=/usr/local/bin/matrix-conduit [Install] WantedBy=multi-user.target @@ -59,43 +48,106 @@ $ sudo systemctl daemon-reload ``` -## Setup Reverse Proxy +## Creating the Conduit configuration file -This depends on whether you use Apache, Nginx or something else. For Apache it looks like this (in /etc/apache2/sites-enabled/050-conduit.conf): +Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment to read it. You need to change at least the server name.** +```toml +[global] +# The server_name is the name of this server. It is used as a suffix for user +# and room ids. Examples: matrix.org, conduit.rs +# The Conduit server needs to be reachable at https://your.server.name/ on port +# 443 (client-server) and 8448 (federation) OR you can create /.well-known +# files to redirect requests. See +# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client +# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# for more information + +# YOU NEED TO EDIT THIS +#server_name = "your.server.name" + +# This is the only directory where Conduit will save its data +database_path = "/var/lib/matrix-conduit/conduit_db" + +# The port Conduit will be running on. You need to set up a reverse proxy in +# your web server (e.g. apache or nginx), so all requests to /_matrix on port +# 443 and 8448 will be forwarded to the Conduit instance running on this port +port = 6167 + +# Max size for uploads +max_request_size = 20_000_000 # in bytes + +# Disabling registration means no new users will be able to register on this server +allow_registration = false + +# Disable encryption, so no new encrypted rooms can be created +# Note: existing rooms will continue to work +allow_encryption = true +allow_federation = true + +#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 +#max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time +#workers = 4 # default: cpu core count * 2 + +address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy ``` - -ServerName conduit.koesters.xyz # EDIT THIS + +## Setting up the Reverse Proxy + +This depends on whether you use Apache, Nginx or another web server. + +### Apache + +Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this: +``` +Listen 8448 + + + +ServerName your.server.name # EDIT THIS AllowEncodedSlashes NoDecode - -ServerAlias conduit.koesters.xyz # EDIT THIS - -ProxyPreserveHost On -ProxyRequests off -AllowEncodedSlashes NoDecode -ProxyPass / http://localhost:14004/ nocanon -ProxyPassReverse / http://localhost:14004/ nocanon +ProxyPass /_matrix/ http://localhost:6167/ +ProxyPassReverse /_matrix/ http://localhost:6167/ Include /etc/letsencrypt/options-ssl-apache.conf - -# EDIT THESE: -SSLCertificateFile /etc/letsencrypt/live/conduit.koesters.xyz/fullchain.pem -SSLCertificateKeyFile /etc/letsencrypt/live/conduit.koesters.xyz/privkey.pem +SSLCertificateFile /etc/letsencrypt/live/your.server.name/fullchain.pem # EDIT THIS +SSLCertificateKeyFile /etc/letsencrypt/live/your.server.name/privkey.pem # EDIT THIS ``` -Then run +**You need to make some edits again.** When you are done, run ```bash $ sudo systemctl reload apache2 ``` +### Nginx + +If you use Nginx and not Apache, add the following server section inside the +http section of `/etc/nginx/nginx.conf` +``` +server { + listen 443; + listen 8448; + server_name your.server.name; # EDIT THIS + + location /_matrix/ { + proxy_pass http://localhost:6167/_matrix/; + } +} +``` +**You need to make some edits again.** When you are done, run +```bash +$ sudo systemctl reload nginx +``` + + ## SSL Certificate -The easiest way to get an SSL certificate for the domain is to install `certbot` and run this: +The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: ```bash -$ sudo certbot -d conduit.koesters.xyz +$ sudo certbot -d your.server.name ``` diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 8fb926e..12c7f7e 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -86,7 +86,7 @@ pub async fn register_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - if db.globals.registration_disabled() { + if !db.globals.allow_registration() { return Err(Error::BadRequest( ErrorKind::Forbidden, "Registration has been disabled.", diff --git a/src/client_server/room.rs b/src/client_server/room.rs index e473e6e..092e083 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -240,7 +240,7 @@ pub async fn create_room_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event."))?; // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == EventType::RoomEncryption && db.globals.encryption_disabled() { + if pdu_builder.event_type == EventType::RoomEncryption && !db.globals.allow_encryption() { continue; } diff --git a/src/database.rs b/src/database.rs index 99ea9f5..607e1be 100644 --- a/src/database.rs +++ b/src/database.rs @@ -33,11 +33,19 @@ pub struct Config { #[serde(default = "default_max_concurrent_requests")] max_concurrent_requests: u16, #[serde(default)] - registration_disabled: bool, - #[serde(default)] - encryption_disabled: bool, - #[serde(default)] - federation_disabled: bool, + allow_registration: bool, + #[serde(default = "true_fn")] + allow_encryption: bool, + #[serde(default = "false_fn")] + allow_federation: bool, +} + +fn false_fn() -> bool { + false +} + +fn true_fn() -> bool { + true } fn default_cache_capacity() -> u64 { diff --git a/src/database/globals.rs b/src/database/globals.rs index 5444d6e..3e24d82 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -111,16 +111,16 @@ impl Globals { self.config.max_request_size } - pub fn registration_disabled(&self) -> bool { - self.config.registration_disabled + pub fn allow_registration(&self) -> bool { + self.config.allow_registration } - pub fn encryption_disabled(&self) -> bool { - self.config.encryption_disabled + pub fn allow_encryption(&self) -> bool { + self.config.allow_encryption } - pub fn federation_disabled(&self) -> bool { - self.config.federation_disabled + pub fn allow_federation(&self) -> bool { + self.config.allow_federation } pub fn dns_resolver(&self) -> &TokioAsyncResolver { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ab3dd3f..4081944 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -786,8 +786,8 @@ impl Rooms { #[allow(clippy::blocks_in_if_conditions)] if !match event_type { EventType::RoomEncryption => { - // Don't allow encryption events when it's disabled - !globals.encryption_disabled() + // Only allow encryption events if it's allowed in the config + globals.allow_encryption() } EventType::RoomMember => { let prev_event = self diff --git a/src/server_server.rs b/src/server_server.rs index 88b6a01..7ff9e3f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -36,7 +36,7 @@ pub async fn send_request( where T: Debug, { - if globals.federation_disabled() { + if !globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -322,7 +322,7 @@ pub async fn request_well_known( pub fn get_server_version_route( db: State<'_, Database>, ) -> ConduitResult { - if db.globals.federation_disabled() { + if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -337,7 +337,7 @@ pub fn get_server_version_route( #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] pub fn get_server_keys_route(db: State<'_, Database>) -> Json { - if db.globals.federation_disabled() { + if !db.globals.allow_federation() { // TODO: Use proper types return Json("Federation is disabled.".to_owned()); } @@ -390,7 +390,7 @@ pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - if db.globals.federation_disabled() { + if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -437,7 +437,7 @@ pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - if db.globals.federation_disabled() { + if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -484,7 +484,7 @@ pub async fn send_transaction_message_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { - if db.globals.federation_disabled() { + if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -587,7 +587,7 @@ pub fn get_missing_events_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { - if db.globals.federation_disabled() { + if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -632,7 +632,7 @@ pub fn get_profile_information_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { - if db.globals.federation_disabled() { + if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -666,7 +666,7 @@ pub fn get_user_devices_route<'a>( db: State<'a, Database>, body: Ruma>, ) -> ConduitResult { - if db.globals.federation_disabled() { + if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } From d45d033bf1a676cbb3ef7900a415003fdde2a66a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 10 Jan 2021 17:12:16 +0100 Subject: [PATCH 0373/1727] fix: send presence updates when going offline remaining bug: conduit sends presence updates every 5 minutes even if the user is already offline --- src/database/rooms/edus.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index bf0cdfc..29edc2a 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -386,8 +386,6 @@ impl RoomEdus { .take_while(|(_, timestamp)| current_timestamp - timestamp > 5 * 60_000) // 5 Minutes { - self.userid_lastpresenceupdate.remove(&user_id_bytes)?; - // Send new presence events to set the user offline let count = globals.next_count()?.to_be_bytes(); let user_id = utils::string_from_bytes(&user_id_bytes) @@ -421,6 +419,11 @@ impl RoomEdus { .expect("PresenceEvent can be serialized"), )?; } + + self.userid_lastpresenceupdate.insert( + &user_id.to_string().as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; } Ok(()) From ad7b3f184b340f5b23bc2612192e1550fcc43169 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 11 Jan 2021 17:26:47 +0100 Subject: [PATCH 0374/1727] improvement: send 200 response for turn server info We didn't implement it, but this will stop clients from retrying the endpoint every minute --- src/client_server/voip.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index a8db62a..9216f1a 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,13 +1,17 @@ -use crate::{ConduitResult, Error}; -use ruma::api::client::{error::ErrorKind, r0::message::send_message_event}; +use crate::ConduitResult; +use ruma::api::client::r0::voip::get_turn_server_info; +use std::time::Duration; #[cfg(feature = "conduit_bin")] use rocket::get; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] -pub async fn turn_server_route() -> ConduitResult { - Err(Error::BadRequest( - ErrorKind::NotFound, - "There is no turn server yet.", - )) +pub async fn turn_server_route() -> ConduitResult { + Ok(get_turn_server_info::Response { + username: "".to_owned(), + password: "".to_owned(), + uris: Vec::new(), + ttl: Duration::from_secs(60 * 60 * 24), + } + .into()) } From c263e130008b815ba7a752b3ea58841e2927e311 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Mon, 11 Jan 2021 20:28:47 +0100 Subject: [PATCH 0375/1727] fix: update binary file name in docs for consistency --- DEPLOY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index f101539..b547b64 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -14,8 +14,8 @@ what you need. Now copy the right url: - arm: `https://conduit.rs/master/arm/conduit-bin` ```bash -$ sudo wget -O /usr/local/bin/conduit -$ sudo chmod +x /usr/local/bin/conduit +$ sudo wget -O /usr/local/bin/matrix-conduit +$ sudo chmod +x /usr/local/bin/matrix-conduit ``` From 690c0660648147b04956f60d723ffb1843793c72 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 0376/1727] State resolution outline for /send --- Cargo.lock | 152 +++++++------------ Cargo.toml | 8 +- src/main.rs | 2 +- src/pdu.rs | 2 +- src/ruma_wrapper.rs | 14 +- src/server_server.rs | 347 +++++++++++++++++++++++++++++++++++++++---- 6 files changed, 384 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5be6aa..4734f80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -215,21 +215,11 @@ dependencies = [ "trust-dns-resolver", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -dependencies = [ - "cfg-if 0.1.10", - "wasm-bindgen", -] - [[package]] name = "const_fn" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" +checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" [[package]] name = "constant_time_eq" @@ -631,9 +621,9 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] @@ -660,9 +650,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" dependencies = [ "bytes", "fnv", @@ -758,9 +748,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", "hashbrown", @@ -1035,9 +1025,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.36" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cf75f38f16cb05ea017784dc6dbfd354f76c223dba37701734c4f5a9337d02" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1109,12 +1099,12 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" dependencies = [ "bitflags", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "foreign-types", "lazy_static", "libc", @@ -1129,18 +1119,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.12.0+1.1.1h" +version = "111.13.0+1.1.1i" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61" +checksum = "045e4dc48af57aad93d665885789b43222ae26f4886494da12d1ed58d309dcb6" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" dependencies = [ "autocfg", "cc", @@ -1163,9 +1153,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0" +checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ "cfg-if 1.0.0", "instant", @@ -1177,9 +1167,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" +checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "pear" @@ -1276,9 +1266,9 @@ checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "png" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe7f9f1c730833200b134370e1d5098964231af8450bce9b78ee3ab5278b970" +checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" dependencies = [ "bitflags", "crc32fast", @@ -1343,9 +1333,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" dependencies = [ "proc-macro2", ] @@ -1457,9 +1447,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.9" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", "bytes", @@ -1486,7 +1476,6 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] @@ -1586,7 +1575,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "js_int", @@ -1604,7 +1593,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "http", "percent-encoding", @@ -1619,7 +1608,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1630,7 +1619,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "ruma-api", "ruma-common", @@ -1644,7 +1633,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "http", @@ -1663,7 +1652,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "maplit", @@ -1676,7 +1665,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-common", @@ -1690,7 +1679,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1701,7 +1690,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-api", @@ -1716,7 +1705,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "paste", "rand", @@ -1730,7 +1719,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro2", "quote", @@ -1741,7 +1730,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "serde", ] @@ -1749,7 +1738,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "form_urlencoded", "itoa", @@ -1762,7 +1751,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1773,7 +1762,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "base64 0.12.3", "ring", @@ -1839,12 +1828,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -1962,9 +1945,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "signal-hook-registry" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ "libc", ] @@ -1999,13 +1982,12 @@ checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" [[package]] name = "socket2" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -2033,7 +2015,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#a1c15253f0777baad251da47c3f2c016cfed6f7e" +source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" dependencies = [ "itertools", "maplit", @@ -2116,9 +2098,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.54" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44" +checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" dependencies = [ "proc-macro2", "quote", @@ -2223,9 +2205,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" +checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ "bytes", "fnv", @@ -2293,9 +2275,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] @@ -2472,9 +2454,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "version_check" @@ -2566,30 +2548,6 @@ version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" -[[package]] -name = "wasm-bindgen-test" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" -dependencies = [ - "proc-macro2", - "quote", -] - [[package]] name = "web-sys" version = "0.3.46" diff --git a/Cargo.toml b/Cargo.toml index 4b87199..0ed8fb0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "ee814aa84934530d76f5e4b275d739805b49bdef" } -# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = { version = "0.2.23" } diff --git a/src/main.rs b/src/main.rs index 9c0eab6..fe7ab0d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -178,6 +178,6 @@ async fn main() { } #[catch(404)] -fn not_found_catcher(_req: &'_ Request<'_>) -> String { +fn not_found_catcher(_: &Request<'_>) -> String { "404 Not Found".to_owned() } diff --git a/src/pdu.rs b/src/pdu.rs index 75ef492..f6ec415 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -17,7 +17,7 @@ use std::{ time::UNIX_EPOCH, }; -#[derive(Deserialize, Serialize, Debug)] +#[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: EventId, pub room_id: RoomId, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 0fdca74..ce0cc74 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ use crate::Error; use ruma::{ - api::{AuthScheme, OutgoingRequest}, + api::{AuthScheme, IncomingRequest, OutgoingRequest}, identifiers::{DeviceId, UserId}, Outgoing, }; @@ -29,7 +29,7 @@ use { /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { +pub struct Ruma { pub body: T::Incoming, pub sender_user: Option, pub sender_device: Option>, @@ -40,10 +40,7 @@ pub struct Ruma { #[cfg(feature = "conduit_bin")] impl<'a, T: Outgoing + OutgoingRequest> FromTransformedData<'a> for Ruma where - ::Incoming: TryFrom>> + std::fmt::Debug, - <::Incoming as std::convert::TryFrom< - http::request::Request>, - >>::Error: std::fmt::Debug, + T::Incoming: IncomingRequest, { type Error = (); // TODO: Better error handling type Owned = Data; @@ -149,8 +146,7 @@ where let http_request = http_request.body(body.clone()).unwrap(); debug!("{:?}", http_request); - - match ::Incoming::try_from(http_request) { + match ::try_from_http_request(http_request) { Ok(t) => Success(Ruma { body: t, sender_user, @@ -170,7 +166,7 @@ where } } -impl Deref for Ruma { +impl Deref for Ruma { type Target = T::Incoming; fn deref(&self) -> &Self::Target { diff --git a/src/server_server.rs b/src/server_server.rs index 7ff9e3f..d68e9fa 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{info, warn}; +use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -11,17 +11,18 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::get_missing_events, + event::{get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - EventId, RoomId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; +use state_res::StateMap; use std::{ - collections::BTreeMap, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, @@ -476,6 +477,34 @@ pub async fn get_public_rooms_route( .into()) } +#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum PrevEvents { + Sequential(T), + Fork(Vec), +} + +impl IntoIterator for PrevEvents { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + match self { + Self::Sequential(item) => vec![item].into_iter(), + Self::Fork(list) => list.into_iter(), + } + } +} + +impl PrevEvents { + pub fn new(id: &[T]) -> Self { + match id { + [] => panic!("All events must have previous event"), + [single_id] => Self::Sequential(single_id.clone()), + rest => Self::Fork(rest.to_vec()), + } + } +} + #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -532,53 +561,313 @@ pub async fn send_transaction_message_route<'a>( // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { - // Ruma/PduEvent/StateEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 1. Is a valid event, otherwise it is dropped. + // Ruma/PduEvent/StateEvent satisfies this - // state-res checks signatures - 2. Passes signature checks, otherwise event is dropped. - - // 3. Passes hash checks, otherwise it is redacted before being processed further. - // TODO: redact event if hashing fails let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let keys = db.globals.keypair(); + let mut pub_key_set = BTreeMap::new(); + pub_key_set.insert( + "ed25519:1".to_string(), + String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), + ); + let mut pub_key_map = BTreeMap::new(); + pub_key_map.insert("domain".to_string(), pub_key_set); + + let value = + match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => { + resolved_map + .insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + } + } else { + value + } + } + Err(_e) => { + resolved_map.insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + }; + let pdu = serde_json::from_value::( serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), ) .expect("all ruma pdus are conduit pdus"); - let room_id = &pdu.room_id; // If we have no idea about this room skip the PDU - if !db.rooms.exists(room_id)? { + if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); + // TODO: remove the need to convert to state_res + let event = pdu.convert_for_state_res(); + let previous = pdu + .prev_events + .first() + .map(|id| { + db.rooms + .get_pdu(id) + .expect("todo") + .map(|ev| ev.convert_for_state_res()) + }) + .flatten(); - db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &pdu, - value, - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + // 4. + let auth_events = db.rooms.get_auth_events( + &pdu.room_id, + &pdu.kind, + &pdu.sender, + pdu.state_key.as_deref(), + pdu.content.clone(), )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + auth_events + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with auth events".into()), + ); + continue; } - resolved_map.insert(event_id, Ok::<(), String>(())); + let mut previous_states = vec![]; + for id in &pdu.prev_events { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + previous_states.push(state); + } else { + // fetch the state + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. + let state_at_event = if previous_states.is_empty() { + // State is empty + Default::default() + } else if previous_states.len() == 1 { + previous_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &previous_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + state_at_event + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Event failed auth with state_at + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with state at the event".into()), + ); + continue; + } + + // The event could still be soft failed + append_state_soft(&db, &pdu)?; + + // Gather the forward extremities and resolve + let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; + let mut fork_states = vec![]; + for id in &forward_extrems { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + fork_states.push(state); + } else { + // This is probably an error?? + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 6. + let state_at_forks = if fork_states.is_empty() { + // State is empty + Default::default() + } else if fork_states.len() == 1 { + fork_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous, + state_at_forks + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail + resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + } else { + append_state(&db, &pdu)?; + // Event has passed all auth/stateres checks + resolved_map.insert(event.event_id(), Ok(())); + } } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 9e83d2b2d570e1d0addc5a2cd64f34bf262a9fb1 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 31 Dec 2020 08:40:49 -0500 Subject: [PATCH 0377/1727] Update state-res, use the new Event trait This also bumps ruma to latest and removes js_int infavor of the ruma re-export --- Cargo.lock | 141 +++++++++++++++++++------------- Cargo.toml | 8 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 2 +- src/client_server/membership.rs | 22 +++-- src/client_server/message.rs | 7 +- src/database/rooms.rs | 68 +++++++++------ src/database/rooms/edus.rs | 3 +- src/database/users.rs | 3 +- src/pdu.rs | 116 +++++++++++++------------- src/server_server.rs | 64 ++++++++------- 11 files changed, 252 insertions(+), 184 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4734f80..609226a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -196,10 +196,9 @@ dependencies = [ "directories", "http", "image", - "js_int", "log", - "rand", "regex", + "rand 0.7.3", "reqwest", "ring", "rocket", @@ -571,6 +570,17 @@ dependencies = [ "wasi", ] +[[package]] +name = "getrandom" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "wasi", +] + [[package]] name = "gif" version = "0.11.1" @@ -833,9 +843,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.1.9" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96797f53235a1d6dc985f244a69de54b04c45b7e0e357a35c85a45a847d92f2" +checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" dependencies = [ "serde", ] @@ -1346,11 +1356,23 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.15", "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.0", + "rand_hc 0.3.0", ] [[package]] @@ -1360,7 +1382,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.0", ] [[package]] @@ -1369,7 +1401,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.15", +] + +[[package]] +name = "rand_core" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +dependencies = [ + "getrandom 0.2.0", ] [[package]] @@ -1378,7 +1419,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.0", ] [[package]] @@ -1393,7 +1443,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom", + "getrandom 0.1.15", "redox_syscall", "rust-argon2", ] @@ -1521,7 +1571,7 @@ dependencies = [ "memchr", "num_cpus", "parking_lot", - "rand", + "rand 0.7.3", "ref-cast", "rocket_codegen", "rocket_http", @@ -1575,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "js_int", @@ -1593,7 +1643,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "http", "percent-encoding", @@ -1608,7 +1658,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1619,7 +1669,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "ruma-api", "ruma-common", @@ -1633,7 +1683,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "http", @@ -1652,7 +1702,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "maplit", @@ -1665,7 +1715,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-common", @@ -1679,7 +1729,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1690,7 +1740,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-api", @@ -1705,21 +1755,21 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand", + "rand 0.8.0", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", + "ruma-serde-macros", "serde", - "strum", ] [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro2", "quote", @@ -1730,7 +1780,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "serde", ] @@ -1738,7 +1788,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "form_urlencoded", "itoa", @@ -1750,8 +1800,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +version = "0.2.3" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1762,9 +1812,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -2015,7 +2065,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" +source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" dependencies = [ "itertools", "maplit", @@ -2075,27 +2125,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" -[[package]] -name = "strum" -version = "0.19.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b89a286a7e3b5720b9a477b23253bc50debac207c8d21505f8e70b36792f11b5" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.19.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e61bb0be289045cb80bfce000512e32d09f8337e54c186725da381377ad1f8d5" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "syn" version = "1.0.55" @@ -2115,7 +2144,7 @@ checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ "cfg-if 0.1.10", "libc", - "rand", + "rand 0.7.3", "redox_syscall", "remove_dir_all", "winapi 0.3.9", @@ -2344,7 +2373,7 @@ dependencies = [ "idna", "lazy_static", "log", - "rand", + "rand 0.7.3", "smallvec", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 0ed8fb0..44df254 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,13 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# TODO: remove the gen-eventid feature +state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio @@ -37,8 +38,7 @@ log = "0.4.11" http = "0.2.1" # Used to find data directory for default db path directories = "3.0.1" -# Used for number types for ruma -js_int = "0.1.9" + # Used for ruma wrapper serde_json = { version = "1.0.60", features = ["raw_value"] } # Used for appservice registration files diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index fa5db3a..2bff20c 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -124,7 +124,7 @@ pub async fn get_room_visibility_route( pub async fn get_public_rooms_filtered_helper( db: &Database, server: Option<&ServerName>, - limit: Option, + limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 156040b..f792062 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -131,7 +131,7 @@ pub async fn get_content_thumbnail_route( allow_remote: false, height: body.height, width: body.width, - method: body.method, + method: body.method.clone(), server_name: &body.server_name, media_id: &body.media_id, }, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b459d37..eb44085 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,7 +21,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use state_res::StateEvent; +use state_res::Event; use std::{ collections::{BTreeMap, HashMap, HashSet}, convert::TryFrom, @@ -594,19 +594,19 @@ async fn join_room_by_id_helper( .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created .map(|r| { let (event_id, value) = r?; - state_res::StateEvent::from_id_canon_obj(event_id.clone(), value.clone()) + PduEvent::from_id_val(&event_id, value.clone()) .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") }) }) - .collect::>>>()?; + .collect::>>>()?; let control_events = event_map .values() - .filter(|pdu| pdu.is_power_event()) - .map(|pdu| pdu.event_id()) + .filter(|pdu| state_res::is_power_event(pdu)) + .map(|pdu| pdu.event_id.clone()) .collect::>(); // These events are not guaranteed to be sorted but they are resolved according to spec @@ -646,7 +646,8 @@ async fn join_room_by_id_helper( .cloned() .collect::>(); - let power_level = resolved_control_events.get(&(EventType::RoomPowerLevels, "".into())); + let power_level = + resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".to_string()))); // Sort the remaining non control events let sorted_event_ids = state_res::StateResolution::mainline_sort( room_id, @@ -685,8 +686,13 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( +<<<<<<< HEAD &PduEvent::from(&**pdu), utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +======= + &pdu, + &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +>>>>>>> 6232d1f (Update state-res, use the new Event trait) count, pdu_id.clone().into(), &db.globals, @@ -695,7 +701,9 @@ async fn join_room_by_id_helper( )?; if state_events.contains(ev_id) { - state.insert((pdu.kind(), pdu.state_key()), pdu_id); + if let Some(key) = &pdu.state_key { + state.insert((pdu.kind(), key.to_string()), pdu_id); + } } } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 3640730..c56cc94 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -8,7 +8,10 @@ use ruma::{ events::EventContent, EventId, }; -use std::convert::{TryFrom, TryInto}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -46,7 +49,7 @@ pub async fn send_message_event_route( return Ok(send_message_event::Response { event_id }.into()); } - let mut unsigned = serde_json::Map::new(); + let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); let event_id = db.rooms.build_and_append_pdu( diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4081944..87829a3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Requester, StateEvent, StateMap, StateStore}; +use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; use std::{ collections::{BTreeMap, HashMap}, @@ -67,12 +67,8 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event( - &self, - room_id: &RoomId, - event_id: &EventId, - ) -> state_res::Result> { +impl StateStore for Rooms { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { let pid = self .get_pdu_id(event_id) .map_err(StateError::custom)? @@ -91,7 +87,7 @@ impl StateStore for Rooms { .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, ) .map_err(Into::into) - .and_then(|pdu: StateEvent| { + .and_then(|pdu: PduEvent| { // conduit's PDU's always contain a room_id but some // of ruma's do not so this must be an Option if pdu.room_id() == room_id { @@ -112,7 +108,7 @@ impl Rooms { &self, room_id: &RoomId, state_hash: &StateHashId, - ) -> Result> { + ) -> Result> { self.stateid_pduid .scan_prefix(&state_hash) .values() @@ -141,7 +137,7 @@ impl Rooms { pdu, )) }) - .collect::>>() + .collect() } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -181,7 +177,7 @@ impl Rooms { ))) }) } else { - return Ok(None); + Ok(None) } } @@ -205,7 +201,7 @@ impl Rooms { content: serde_json::Value, ) -> Result> { let auth_events = state_res::auth_types_for_event( - kind.clone(), + kind, sender, state_key.map(|s| s.to_string()), content, @@ -213,7 +209,13 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some((_, pdu)) = self.room_state_get(room_id, &event_type, &state_key)? { + if let Some((_, pdu)) = self.room_state_get( + room_id, + &event_type, + &state_key + .as_deref() + .expect("found a non state event in auth events"), + )? { events.insert((event_type, state_key), pdu); } } @@ -290,7 +292,10 @@ impl Rooms { } /// Returns the full room state. - pub fn room_state_full(&self, room_id: &RoomId) -> Result> { + pub fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { self.state_full(&room_id, ¤t_state_hash) } else { @@ -795,23 +800,40 @@ impl Rooms { ErrorKind::Unknown, "Membership can't be the first event", ))?)? - .map(|pdu| pdu.convert_for_state_res()); + .map(Arc::new); event_auth::valid_membership_change( // TODO this is a bit of a hack but not sure how to have a type // declared in `state_res` crate easily convert to/from conduit::PduEvent - Requester { - prev_event_ids: prev_events.to_owned(), - room_id: &room_id, - content: &content, - state_key: Some(state_key.to_owned()), - sender: &sender, - }, + &Arc::new(PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater"), + room_id: room_id.clone(), + sender: sender.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind: event_type, + content, + state_key: Some(state_key.clone()), + prev_events, + depth: (prev_events.len() as u32).into(), + auth_events: auth_events + .into_iter() + .map(|(_, pdu)| pdu.event_id) + .collect(), + redacts, + unsigned: unsigned + .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), + hashes: ruma::events::pdu::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: BTreeMap::new(), + }), prev_event, None, // TODO: third party invite &auth_events .iter() .map(|((ty, key), pdu)| { - Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res())) + Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) .collect::>>()?, ) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 29edc2a..2b1b03d 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, @@ -7,7 +6,7 @@ use ruma::{ }, presence::PresenceState, serde::Raw, - RoomId, UserId, + RoomId, UInt, UserId, }; use std::{ collections::HashMap, diff --git a/src/database/users.rs b/src/database/users.rs index 0421ae2..05fd6d6 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ api::client::{ error::ErrorKind, @@ -11,7 +10,7 @@ use ruma::{ encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; diff --git a/src/pdu.rs b/src/pdu.rs index f6ec415..c764700 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,12 +1,11 @@ use crate::Error; -use js_int::UInt; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -33,8 +32,8 @@ pub struct PduEvent { pub auth_events: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub redacts: Option, - #[serde(default, skip_serializing_if = "serde_json::Map::is_empty")] - pub unsigned: serde_json::Map, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub unsigned: BTreeMap, pub hashes: EventHash, pub signatures: BTreeMap, BTreeMap>, } @@ -227,61 +226,66 @@ impl PduEvent { ) .expect("Raw::from_value always works") } -} -impl From<&state_res::StateEvent> for PduEvent { - fn from(pdu: &state_res::StateEvent) -> Self { - Self { - event_id: pdu.event_id(), - room_id: pdu.room_id().clone(), - sender: pdu.sender().clone(), - origin_server_ts: (pdu - .origin_server_ts() - .duration_since(UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64) - .try_into() - .expect("time is valid"), - kind: pdu.kind(), - content: pdu.content().clone(), - state_key: Some(pdu.state_key()), - prev_events: pdu.prev_event_ids(), - depth: *pdu.depth(), - auth_events: pdu.auth_events(), - redacts: pdu.redacts().cloned(), - unsigned: pdu.unsigned().clone().into_iter().collect(), - hashes: pdu.hashes().clone(), - signatures: pdu.signatures(), - } + pub fn from_id_val( + event_id: &EventId, + json: CanonicalJsonObject, + ) -> Result { + json.insert( + "event_id".to_string(), + ruma::serde::to_canonical_value(event_id).expect("event_id is a valid Value"), + ); + + serde_json::from_value(serde_json::to_value(json).expect("valid JSON")) } } -impl PduEvent { - pub fn convert_for_state_res(&self) -> Arc { - Arc::new( - // For consistency of eventId (just in case) we use the one - // generated by conduit for everything. - state_res::StateEvent::from_id_value( - self.event_id.clone(), - json!({ - "event_id": self.event_id, - "room_id": self.room_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "type": self.kind, - "content": self.content, - "state_key": self.state_key, - "prev_events": self.prev_events, - "depth": self.depth, - "auth_events": self.auth_events, - "redacts": self.redacts, - "unsigned": self.unsigned, - "hashes": self.hashes, - "signatures": self.signatures, - }), - ) - .expect("all conduit PDUs are state events"), - ) +impl state_res::Event for PduEvent { + fn event_id(&self) -> &EventId { + &self.event_id + } + + fn room_id(&self) -> &RoomId { + &self.room_id + } + + fn sender(&self) -> &UserId { + &self.sender + } + fn kind(&self) -> EventType { + self.kind.clone() + } + + fn content(&self) -> serde_json::Value { + self.content.clone() + } + fn origin_server_ts(&self) -> std::time::SystemTime { + UNIX_EPOCH + std::time::Duration::from_millis(self.origin_server_ts.into()) + } + + fn state_key(&self) -> Option { + self.state_key.clone() + } + fn prev_events(&self) -> Vec { + self.prev_events.to_vec() + } + fn depth(&self) -> &UInt { + &self.depth + } + fn auth_events(&self) -> Vec { + self.auth_events.to_vec() + } + fn redacts(&self) -> Option<&EventId> { + self.redacts.as_ref() + } + fn hashes(&self) -> &EventHash { + &self.hashes + } + fn signatures(&self) -> BTreeMap, BTreeMap> { + self.signatures.clone() + } + fn unsigned(&self) -> &BTreeMap { + &self.unsigned } } @@ -315,7 +319,7 @@ pub struct PduBuilder { #[serde(rename = "type")] pub event_type: EventType, pub content: serde_json::Value, - pub unsigned: Option>, + pub unsigned: Option>, pub state_key: Option, pub redacts: Option, } diff --git a/src/server_server.rs b/src/server_server.rs index d68e9fa..58d85b1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,12 +20,13 @@ use ruma::{ directory::{IncomingFilter, IncomingRoomNetwork}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::StateMap; +use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, + sync::Arc, time::{Duration, SystemTime}, }; @@ -610,17 +611,12 @@ pub async fn send_transaction_message_route<'a>( continue; } - // TODO: remove the need to convert to state_res - let event = pdu.convert_for_state_res(); + let event = Arc::new(pdu.clone()); + let previous = pdu .prev_events .first() - .map(|id| { - db.rooms - .get_pdu(id) - .expect("todo") - .map(|ev| ev.convert_for_state_res()) - }) + .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); // 4. @@ -637,27 +633,32 @@ pub async fn send_transaction_message_route<'a>( previous.clone(), auth_events .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) + .map(|(k, v)| (k, Arc::new(v))) .collect(), None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with auth events".into()), ); continue; } - let mut previous_states = vec![]; + let mut previous_states: Vec>> = vec![]; for id in &pdu.prev_events { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .collect(); previous_states.push(state); } else { // fetch the state @@ -693,7 +694,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -702,7 +703,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -712,17 +713,14 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_event, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Event failed auth with state_at resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with state at the event".into()), ); continue; @@ -733,14 +731,20 @@ pub async fn send_transaction_message_route<'a>( // Gather the forward extremities and resolve let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states = vec![]; + let mut fork_states: Vec>> = vec![]; for id in &forward_extrems { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + fork_states.push(state); } else { // This is probably an error?? @@ -776,7 +780,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -785,7 +789,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -795,20 +799,20 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Soft fail - resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + resolved_map.insert( + event.event_id().clone(), + Err("Event has been soft failed".into()), + ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id(), Ok(())); + resolved_map.insert(event.event_id().clone(), Ok(())); } } From 0ee239c9d78a4b02fa58d018db28cecdd8f9bd78 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 3 Jan 2021 17:26:17 -0500 Subject: [PATCH 0378/1727] Step 5 in /send just fetches state from incoming server --- Cargo.lock | 78 ++++++------- src/database/rooms.rs | 29 +---- src/pdu.rs | 2 +- src/server_server.rs | 264 +++++++++++++++++++++++------------------- 4 files changed, 186 insertions(+), 187 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 609226a..033c15b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] @@ -44,9 +44,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assign" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" +checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-trait" @@ -354,9 +354,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "either" @@ -561,11 +561,11 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi", ] @@ -819,9 +819,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" @@ -1017,9 +1017,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ "lazy_static", "libc", @@ -1109,9 +1109,9 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.31" +version = "0.10.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" +checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -1138,9 +1138,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.59" +version = "0.9.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" +checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" dependencies = [ "autocfg", "cc", @@ -1356,7 +1356,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -1401,7 +1401,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", ] [[package]] @@ -1443,25 +1443,25 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "redox_syscall", "rust-argon2", ] [[package]] name = "ref-cast" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -1954,9 +1954,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" +checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" dependencies = [ "itoa", "ryu", @@ -2026,9 +2026,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" +checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" [[package]] name = "socket2" @@ -2049,9 +2049,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" +checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" dependencies = [ "version_check", ] @@ -2065,7 +2065,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" +source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" dependencies = [ "itertools", "maplit", @@ -2127,9 +2127,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.55" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" +checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" dependencies = [ "proc-macro2", "quote", @@ -2152,18 +2152,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" dependencies = [ "proc-macro2", "quote", @@ -2673,9 +2673,9 @@ dependencies = [ [[package]] name = "yaml-rust" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 87829a3..48e7c14 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -802,32 +802,9 @@ impl Rooms { ))?)? .map(Arc::new); event_auth::valid_membership_change( - // TODO this is a bit of a hack but not sure how to have a type - // declared in `state_res` crate easily convert to/from conduit::PduEvent - &Arc::new(PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key: Some(state_key.clone()), - prev_events, - depth: (prev_events.len() as u32).into(), - auth_events: auth_events - .into_iter() - .map(|(_, pdu)| pdu.event_id) - .collect(), - redacts, - unsigned: unsigned - .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), - hashes: ruma::events::pdu::EventHash { - sha256: "aaa".to_owned(), - }, - signatures: BTreeMap::new(), - }), + Some(state_key.as_str()), + &sender, + content.clone(), prev_event, None, // TODO: third party invite &auth_events diff --git a/src/pdu.rs b/src/pdu.rs index c764700..2997317 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -229,7 +229,7 @@ impl PduEvent { pub fn from_id_val( event_id: &EventId, - json: CanonicalJsonObject, + mut json: CanonicalJsonObject, ) -> Result { json.insert( "event_id".to_string(), diff --git a/src/server_server.rs b/src/server_server.rs index 58d85b1..3c4308c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,13 +11,15 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + serde::Raw, + signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; @@ -578,32 +580,13 @@ pub async fn send_transaction_message_route<'a>( let mut pub_key_map = BTreeMap::new(); pub_key_map.insert("domain".to_string(), pub_key_set); - let value = - match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => { - resolved_map - .insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - } - } else { - value - } - } - Err(_e) => { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - }; - - let pdu = serde_json::from_value::( - serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("all ruma pdus are conduit pdus"); + let pdu = match signature_and_hash_check(&pub_key_map, value) { + Ok(pdu) => pdu, + Err(e) => { + resolved_map.insert(event_id, Err(e)); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -619,7 +602,10 @@ pub async fn send_transaction_message_route<'a>( .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); - // 4. + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // the auth events that would be correct for this pdu. Put another way we should use the auth events + // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( &pdu.room_id, &pdu.kind, @@ -627,6 +613,12 @@ pub async fn send_transaction_message_route<'a>( pdu.state_key.as_deref(), pdu.content.clone(), )?; + + let mut event_map: state_res::EventMap> = auth_events + .iter() + .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .collect(); + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, @@ -635,7 +627,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), - None, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -646,66 +638,38 @@ pub async fn send_transaction_message_route<'a>( continue; } - let mut previous_states: Vec>> = vec![]; - for id in &pdu.prev_events { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? + let server_name = body.body.origin.clone(); + let (state_at_event, incoming_auth_events): (StateMap>, _) = match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) + .await? .into_iter() - .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); - previous_states.push(state); - } else { - // fetch the state - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } - } - } - // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. - let state_at_event = if previous_states.is_empty() { - // State is empty - Default::default() - } else if previous_states.len() == 1 { - previous_states[0].clone() - } else { - match state_res::StateResolution::resolve( - &pdu.room_id, - &RoomVersionId::Version6, - &previous_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(), - None, - &db.rooms, - ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), - Err(e) => panic!("{:?}", e), + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await?, + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; } }; @@ -713,8 +677,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event, - None, + state_at_event.clone(), // TODO: could this be &state avoid .clone + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -747,22 +711,7 @@ pub async fn send_transaction_message_route<'a>( fork_states.push(state); } else { - // This is probably an error?? - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } + todo!("we don't know of a pdu that is part of our known forks OOPS") } } @@ -773,6 +722,18 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // Add as much as we can to the `event_map` (less DB hits) + event_map.extend( + incoming_auth_events + .into_iter() + .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + ); + event_map.extend( + state_at_event + .into_iter() + .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), + ); + match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, @@ -784,7 +745,7 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - None, + &mut event_map, &db.rooms, ) { Ok(res) => res @@ -819,8 +780,74 @@ pub async fn send_transaction_message_route<'a>( Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn signature_and_hash_check( + pub_key_map: &ruma::signatures::PublicKeyMap, + value: CanonicalJsonObject, +) -> std::result::Result { + let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".into()), + } + } else { + value + } + } + Err(_e) => return Err("Signature verification failed".into()), + }; + + serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// TODO: this needs to add events to the DB in a way that does not +/// effect the state of the room +async fn fetch_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + events: &[EventId], +) -> Result> { + let mut pdus = vec![]; + for id in events { + match db.rooms.get_pdu(id)? { + Some(pdu) => pdus.push(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: id }, + ) + .await + { + Ok(res) => { + let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(pdu) => { + // TODO: add to our DB somehow? + pdus.push(pdu); + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + } + } + Ok(pdus) +} + fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() + db.rooms.get_pdu_leaves(room_id) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -854,20 +881,15 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } + // db.rooms.append_pdu( + // pdu, + // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + // count, + // pdu_id.clone().into(), + // &db.globals, + // &db.account_data, + // &db.admin, + // )?; Ok(()) } From 7c4d74bf9ba04c70be602f9a8d34259024a34e6c Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 5 Jan 2021 09:21:41 -0500 Subject: [PATCH 0379/1727] Fix clippy warnings remove unused imports --- Cargo.lock | 2 +- src/appservice_server.rs | 2 +- src/client_server/membership.rs | 1 - src/database/globals.rs | 4 ++- src/database/sending.rs | 12 ++++----- src/pdu.rs | 7 +---- src/server_server.rs | 45 ++++++++++++++++++++------------- 7 files changed, 39 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 033c15b..7ef5efb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2065,7 +2065,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" +source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" dependencies = [ "itertools", "maplit", diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 986909b..04f14c0 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -25,7 +25,7 @@ where let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains("?") { + let symbol = if old_path_and_query.contains('?') { "&" } else { "?" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index eb44085..4e093c2 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -675,7 +675,6 @@ async fn join_room_by_id_helper( .iter() .filter(|id| resolved_events.values().any(|rid| rid == *id)) { - // this is a `state_res::StateEvent` that holds a `ruma::Pdu` let pdu = event_map .get(ev_id) .expect("Found event_id in sorted events that is not in resolved state"); diff --git a/src/database/globals.rs b/src/database/globals.rs index 3e24d82..c8e3b23 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -9,13 +9,15 @@ use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; +pub type DestinationCache = Arc, (String, Option)>>>; + #[derive(Clone)] pub struct Globals { pub(super) globals: sled::Tree, config: Config, keypair: Arc, reqwest_client: reqwest::Client, - pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host + pub actual_destination_cache: DestinationCache, // actual_destination, host dns_resolver: TokioAsyncResolver, } diff --git a/src/database/sending.rs b/src/database/sending.rs index 74aad32..709fa53 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -79,7 +79,7 @@ impl Sending { match response { Ok((server, is_appservice)) => { let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -125,7 +125,7 @@ impl Sending { Err((server, is_appservice, e)) => { info!("Couldn't send transaction to {}\n{}", server, e); let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -156,7 +156,7 @@ impl Sending { .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) .map(|server_str| { // Appservices start with a plus - if server_str.starts_with("+") { + if server_str.starts_with('+') { (server_str[1..].to_owned(), true) } else { (server_str, false) @@ -185,7 +185,7 @@ impl Sending { } let mut prefix = if *is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -219,7 +219,7 @@ impl Sending { } pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = "+".as_bytes().to_vec(); + let mut key = b"+".to_vec(); key.extend_from_slice(appservice_id.as_bytes()); key.push(0xff); key.extend_from_slice(pdu_id); @@ -329,7 +329,7 @@ impl Sending { })?; // Appservices start with a plus - let (server, is_appservice) = if server.starts_with("+") { + let (server, is_appservice) = if server.starts_with('+') { (&server[1..], true) } else { (&*server, false) diff --git a/src/pdu.rs b/src/pdu.rs index 2997317..86fbc9f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,12 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, - time::UNIX_EPOCH, -}; +use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { diff --git a/src/server_server.rs b/src/server_server.rs index 3c4308c..3de3636 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,14 +11,13 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - serde::Raw, signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; @@ -220,7 +219,7 @@ fn add_port_to_hostname(destination_str: String) -> String { /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination( globals: &crate::database::globals::Globals, - destination: &Box, + destination: &ServerName, ) -> (String, Option) { let mut host = None; @@ -594,13 +593,14 @@ pub async fn send_transaction_message_route<'a>( continue; } + let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - - let previous = pdu - .prev_events - .first() - .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) - .flatten(); + // Fetch any unknown events or retrieve them from the DB + let previous = + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { + mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not @@ -616,14 +616,14 @@ pub async fn send_transaction_message_route<'a>( let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - auth_events + &auth_events .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), @@ -638,7 +638,6 @@ pub async fn send_transaction_message_route<'a>( continue; } - let server_name = body.body.origin.clone(); let (state_at_event, incoming_auth_events): (StateMap>, _) = match db .sending .send_federation_request( @@ -652,8 +651,18 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) - .await? + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); @@ -677,8 +686,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event.clone(), // TODO: could this be &state avoid .clone - None, // TODO: third party invite + &state_at_event, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -760,7 +769,7 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks, + &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? From 8a035880f097d885baed6e9ee179ccbe3db16881 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 08:52:30 -0500 Subject: [PATCH 0380/1727] Remove StateStore trait from state-res collect events needed --- Cargo.lock | 70 +++++++++++--------- Cargo.toml | 2 +- src/client_server/membership.rs | 4 -- src/database/rooms.rs | 100 ++++++++++++++++++---------- src/server_server.rs | 111 +++++++++++++++++++------------- 5 files changed, 170 insertions(+), 117 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ef5efb..f439e51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -216,9 +216,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -543,7 +543,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.2", + "pin-project 1.0.3", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -567,18 +567,18 @@ checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] @@ -707,7 +707,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.2", + "pin-project 1.0.3", "socket2", "tokio", "tower-service", @@ -1221,11 +1221,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" +checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" dependencies = [ - "pin-project-internal 1.0.2", + "pin-project-internal 1.0.3", ] [[package]] @@ -1241,9 +1241,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" dependencies = [ "proc-macro2", "quote", @@ -1258,9 +1258,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" +checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" [[package]] name = "pin-utils" @@ -1365,13 +1365,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" dependencies = [ "libc", "rand_chacha 0.3.0", - "rand_core 0.6.0", + "rand_core 0.6.1", "rand_hc 0.3.0", ] @@ -1392,7 +1392,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1406,11 +1406,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.0", + "getrandom 0.2.1", ] [[package]] @@ -1428,7 +1428,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1518,7 +1518,7 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "serde", "serde_urlencoded", "tokio", @@ -1758,7 +1758,7 @@ version = "0.17.4" source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand 0.8.0", + "rand 0.8.1", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1977,9 +1977,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" +checksum = "971be8f6e4d4a47163b405a3df70d14359186f9ab0f3a3ec37df144ca1ce089f" dependencies = [ "dtoa", "linked-hash-map", @@ -2065,7 +2065,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" +source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" dependencies = [ "itertools", "maplit", @@ -2127,9 +2127,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" +checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" dependencies = [ "proc-macro2", "quote", @@ -2325,7 +2325,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "tracing-attributes", "tracing-core", ] @@ -2509,6 +2509,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" version = "0.2.69" diff --git a/Cargo.toml b/Cargo.toml index 44df254..004cbfd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 4e093c2..ea14268 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -618,7 +618,6 @@ async fn join_room_by_id_helper( &room_id, &control_events, &mut event_map, - &db.rooms, &event_ids, ); @@ -629,7 +628,6 @@ async fn join_room_by_id_helper( &sorted_control_events, &BTreeMap::new(), // We have no "clean/resolved" events to add (these extend the `resolved_control_events`) &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); @@ -654,7 +652,6 @@ async fn join_room_by_id_helper( &events_to_sort, power_level, &mut event_map, - &db.rooms, ); let resolved_events = state_res::StateResolution::iterative_auth_check( @@ -663,7 +660,6 @@ async fn join_room_by_id_helper( &sorted_event_ids, &resolved_control_events, &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 48e7c14..b84d1f9 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -67,40 +67,6 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { - let pid = self - .get_pdu_id(event_id) - .map_err(StateError::custom)? - .ok_or_else(|| { - StateError::NotFound(format!( - "PDU via room_id and event_id not found in the db: {}", - event_id.as_str() - )) - })?; - - serde_json::from_slice( - &self - .pduid_pdu - .get(pid) - .map_err(StateError::custom)? - .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, - ) - .map_err(Into::into) - .and_then(|pdu: PduEvent| { - // conduit's PDU's always contain a room_id but some - // of ruma's do not so this must be an Option - if pdu.room_id() == room_id { - Ok(Arc::new(pdu)) - } else { - Err(StateError::NotFound( - "Found PDU for incorrect room in db.".into(), - )) - } - }) - } -} - impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. @@ -222,6 +188,72 @@ impl Rooms { Ok(events) } + /// Returns a Vec of the related auth events to the given `event`. + /// + /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. + pub fn auth_events_full( + &self, + room_id: &RoomId, + event_ids: &[EventId], + ) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + if let Some(ev) = self.get_pdu(&ev_id)? { + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) + } + + /// Returns a Vec representing the difference in auth chains of the given `events`. + /// + /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). + pub fn auth_chain_diff( + &self, + room_id: &RoomId, + event_ids: Vec>, + ) -> Result> { + use std::collections::BTreeSet; + + let mut chains = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self + .auth_events_full(room_id, &ids)? + .into_iter() + .map(|pdu| pdu.event_id) + .collect::>(); + chains.push(chain); + } + + if let Some(chain) = chains.first() { + let rest = chains.iter().skip(1).flatten().cloned().collect(); + let common = chain.intersection(&rest).collect::>(); + + Ok(chains + .iter() + .flatten() + .filter(|id| !common.contains(&id)) + .cloned() + .collect::>() + .into_iter() + .collect()) + } else { + Ok(vec![]) + } + } + /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. diff --git a/src/server_server.rs b/src/server_server.rs index 3de3636..f68475c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -603,7 +603,7 @@ pub async fn send_transaction_message_route<'a>( }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not // the auth events that would be correct for this pdu. Put another way we should use the auth events // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( @@ -637,50 +637,56 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 4. - let (state_at_event, incoming_auth_events): (StateMap>, _) = match db - .sending - .send_federation_request( - &db.globals, - server_name.clone(), - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) - .collect(); - - ( - state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) - .await?, + // Step 5. event passes auth based on state at the event + let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = + match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, ) - } - Err(_) => { - resolved_map.insert( - event.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await + { + Ok(res) => { + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .collect(); + + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await? + .into_iter() + .map(Arc::new) + .collect(), + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; if !state_res::event_auth::auth_check( &RoomVersionId::Version6, @@ -698,6 +704,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 5. // The event could still be soft failed append_state_soft(&db, &pdu)?; @@ -724,18 +731,30 @@ pub async fn send_transaction_message_route<'a>( } } - // 6. + // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() } else if fork_states.len() == 1 { fork_states[0].clone() } else { + let auth_events = fork_states + .iter() + .map(|map| { + db.rooms.auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + }) + .collect(); + // Add as much as we can to the `event_map` (less DB hits) event_map.extend( incoming_auth_events .into_iter() - .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + .map(|pdu| (pdu.event_id().clone(), pdu)), ); event_map.extend( state_at_event @@ -754,8 +773,8 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), + &auth_events, &mut event_map, - &db.rooms, ) { Ok(res) => res .into_iter() From 88f3ee489b23536698fef8b97f79d4489dd9d547 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 15:05:09 -0500 Subject: [PATCH 0381/1727] Fill event_map with all events that will be needed for resolution --- Cargo.lock | 2 +- src/server_server.rs | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f439e51..d08de95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -197,8 +197,8 @@ dependencies = [ "http", "image", "log", - "regex", "rand 0.7.3", + "regex", "reqwest", "ring", "rocket", diff --git a/src/server_server.rs b/src/server_server.rs index f68475c..e87c05c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -565,7 +565,7 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this - + // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); // 2. Passes signature checks, otherwise event is dropped. @@ -741,16 +741,24 @@ pub async fn send_transaction_message_route<'a>( let auth_events = fork_states .iter() .map(|map| { - db.rooms.auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), - ) + db.rooms + .auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) }) - .collect(); + .collect::>>()?; - // Add as much as we can to the `event_map` (less DB hits) + // Add everything we will need to event_map + event_map.extend( + auth_events + .iter() + .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) + .flatten(), + ); event_map.extend( incoming_auth_events .into_iter() @@ -773,7 +781,10 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - &auth_events, + auth_events + .into_iter() + .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) + .collect(), &mut event_map, ) { Ok(res) => res From 27c4e9d9d5d362c174c6ca14df5cd1fe412a23a2 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 12 Jan 2021 08:26:52 -0500 Subject: [PATCH 0382/1727] Fix signature/hash checks, fetch recursive auth events --- src/client_server/membership.rs | 7 +- src/database/rooms.rs | 4 +- src/pdu.rs | 12 +- src/server_server.rs | 240 +++++++++++++++++++++++++------- 4 files changed, 193 insertions(+), 70 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ea14268..29b6c14 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -681,13 +681,8 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( -<<<<<<< HEAD - &PduEvent::from(&**pdu), - utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), -======= &pdu, - &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), ->>>>>>> 6232d1f (Update state-res, use the new Event trait) + utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b84d1f9..6b51d58 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; +use state_res::{event_auth, Event, StateMap}; use std::{ collections::{BTreeMap, HashMap}, @@ -193,7 +193,7 @@ impl Rooms { /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. pub fn auth_events_full( &self, - room_id: &RoomId, + _room_id: &RoomId, event_ids: &[EventId], ) -> Result> { let mut result = BTreeMap::new(); diff --git a/src/pdu.rs b/src/pdu.rs index 86fbc9f..750f9cf 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -4,7 +4,7 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; @@ -286,12 +286,11 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// -/// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. +/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn process_incoming_pdu( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { - let mut value = - serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); + let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); let event_id = EventId::try_from(&*format!( "${}", @@ -300,11 +299,6 @@ pub(crate) fn process_incoming_pdu( )) .expect("ruma's reference hashes are valid event ids"); - value.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - (event_id, value) } diff --git a/src/server_server.rs b/src/server_server.rs index e87c05c..141d5bb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,5 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -6,6 +7,7 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ + device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -18,13 +20,14 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - signatures::{CanonicalJsonObject, PublicKeyMap}, + serde::to_canonical_value, + signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, net::{IpAddr, SocketAddr}, sync::Arc, @@ -519,6 +522,8 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } + dbg!(&*body); + for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { Ok(edu) => match edu.edu_type.as_str() { @@ -546,6 +551,7 @@ pub async fn send_transaction_message_route<'a>( } "m.presence" => {} "m.receipt" => {} + "m.device_list_update" => {} _ => {} }, Err(_err) => { @@ -565,21 +571,52 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this + // We do not add the event_id field to the pdu here because of signature and hashes checks // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. // 3. Passes hash checks, otherwise it is redacted before being processed further. - let keys = db.globals.keypair(); - let mut pub_key_set = BTreeMap::new(); - pub_key_set.insert( - "ed25519:1".to_string(), - String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), - ); + let server_name = body.body.origin.clone(); let mut pub_key_map = BTreeMap::new(); - pub_key_map.insert("domain".to_string(), pub_key_set); + if let Some(sig) = value.get("signatures") { + match sig { + CanonicalJsonValue::Object(entity) => { + for key in entity.keys() { + // TODO: save this in a DB maybe... + // fetch the public signing key + let res = db + .sending + .send_federation_request( + &db.globals, + Box::::try_from(key.to_string()).unwrap(), + get_server_keys::v2::Request::new(), + ) + .await?; - let pdu = match signature_and_hash_check(&pub_key_map, value) { + pub_key_map.insert( + res.server_key.server_name.to_string(), + res.server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); + } + } + _ => { + resolved_map.insert( + event_id, + Err("`signatures` is not a JSON object".to_string()), + ); + continue; + } + } + } else { + resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); + continue; + } + + let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -587,50 +624,75 @@ pub async fn send_transaction_message_route<'a>( } }; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - // Fetch any unknown events or retrieve them from the DB + dbg!(&*event); + // Fetch any unknown prev_events or retrieve them from the DB let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { - mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), _ => None, }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not - // the auth events that would be correct for this pdu. Put another way we should use the auth events - // the pdu claims are its auth events - let auth_events = db.rooms.get_auth_events( - &pdu.room_id, - &pdu.kind, - &pdu.sender, - pdu.state_key.as_deref(), - pdu.content.clone(), - )?; + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) + .await + { + Ok(events) => events, + Err(_) => { + resolved_map.insert( + pdu.event_id, + Err("Failed to recursively gather auth events".into()), + ); + continue; + } + }; let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); - if !state_res::event_auth::auth_check( + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - &auth_events - .into_iter() - .map(|(k, v)| (k, Arc::new(v))) - .collect(), + &pdu.auth_events + .iter() + .map(|id| { + event_map + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + Error::Conflict( + "Auth event not found, event failed recursive auth checks.", + ) + }) + }) + .collect::>>()?, None, // TODO: third party invite ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { + .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( pdu.event_id, Err("Event has failed auth check with auth events".into()), @@ -816,31 +878,92 @@ pub async fn send_transaction_message_route<'a>( } } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, -) -> std::result::Result { - let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".into()), +) -> std::result::Result { + Ok( + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + error!("CONTENT HASH FAILED"); + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value } - } else { - value } - } - Err(_e) => return Err("Signature verification failed".into()), - }; - - serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }, ) - .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have missing events it fails. +async fn fetch_check_auth_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + event_ids: &[EventId], +) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + let ev = match db.rooms.get_pdu(&ev_id)? { + Some(pdu) => pdu, + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: &ev_id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(mut val) => { + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map") + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }; + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) } /// TODO: this needs to add events to the DB in a way that does not @@ -865,10 +988,21 @@ async fn fetch_events( .await { Ok(res) => { - let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); match signature_and_hash_check(key_map, value) { - Ok(pdu) => { + Ok(mut val) => { // TODO: add to our DB somehow? + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + pdus.push(pdu); } Err(e) => { @@ -898,7 +1032,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; db.rooms.append_pdu( pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, From 7c4e116caab10db9613455f755072a9b83835117 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 0383/1727] State resolution outline for /send --- src/server_server.rs | 53 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/server_server.rs b/src/server_server.rs index 141d5bb..31d6467 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1067,6 +1067,59 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { Ok(()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 2ac3ffbb2405cdaddb308cdc6e8be87a9c635c61 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 14:39:56 -0500 Subject: [PATCH 0384/1727] Convert uses of Box to a ref --- src/client_server/alias.rs | 2 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 4 +- src/client_server/membership.rs | 4 +- src/database/sending.rs | 4 +- src/pdu.rs | 2 +- src/server_server.rs | 117 ++++++++++++++++++++++---------- 7 files changed, 92 insertions(+), 43 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 498e882..0dc40a9 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -70,7 +70,7 @@ pub async fn get_alias_helper( .sending .send_federation_request( &db.globals, - room_alias.server_name().to_owned(), + room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) .await?; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 2bff20c..87d5fc8 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -137,7 +137,7 @@ pub async fn get_public_rooms_filtered_helper( .sending .send_federation_request( &db.globals, - other_server.to_owned(), + other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, since: since.as_deref(), diff --git a/src/client_server/media.rs b/src/client_server/media.rs index f792062..275038a 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -77,7 +77,7 @@ pub async fn get_content_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content::Request { allow_remote: false, server_name: &body.server_name, @@ -126,7 +126,7 @@ pub async fn get_content_thumbnail_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content_thumbnail::Request { allow_remote: false, height: body.height, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 29b6c14..40e4183 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -468,7 +468,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, @@ -547,7 +547,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, diff --git a/src/database/sending.rs b/src/database/sending.rs index 709fa53..e827dad 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -302,7 +302,7 @@ impl Sending { server_server::send_request( &globals, - server.clone(), + &*server, send_transaction_message::v1::Request { origin: globals.server_name(), pdus: &pdu_jsons, @@ -347,7 +347,7 @@ impl Sending { pub async fn send_federation_request( &self, globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where diff --git a/src/pdu.rs b/src/pdu.rs index 750f9cf..340ddee 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -287,7 +287,7 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. -pub(crate) fn process_incoming_pdu( +pub(crate) fn gen_event_id_canonical_json( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); diff --git a/src/server_server.rs b/src/server_server.rs index 31d6467..64e0a05 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,11 +20,12 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::{Event, StateMap}; +use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::{TryFrom, TryInto}, @@ -36,7 +37,7 @@ use std::{ pub async fn send_request( globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where @@ -50,7 +51,7 @@ where .actual_destination_cache .read() .unwrap() - .get(&destination) + .get(destination) .cloned(); let (actual_destination, host) = if let Some(result) = maybe_result { @@ -61,7 +62,7 @@ where .actual_destination_cache .write() .unwrap() - .insert(destination.clone(), result.clone()); + .insert(Box::::from(destination), result.clone()); result }; @@ -278,9 +279,9 @@ async fn find_actual_destination( (actual_destination, host) } -async fn query_srv_record<'a>( +async fn query_srv_record( globals: &crate::database::globals::Globals, - hostname: &'a str, + hostname: &str, ) -> Option { if let Ok(Some(host_port)) = globals .dns_resolver() @@ -572,11 +573,9 @@ pub async fn send_transaction_message_route<'a>( // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks - // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then - let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let server_name = body.body.origin.clone(); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { match sig { @@ -588,7 +587,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - Box::::try_from(key.to_string()).unwrap(), + <&ServerName>::try_from(key.as_str()).unwrap(), get_server_keys::v2::Request::new(), ) .await?; @@ -616,6 +615,9 @@ pub async fn send_transaction_message_route<'a>( continue; } + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { @@ -625,15 +627,20 @@ pub async fn send_transaction_message_route<'a>( }; // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type + // to our PduEvent type also finally verifying the first step listed above val.insert( "event_id".to_owned(), to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); - let pdu = serde_json::from_value::( + let pdu = match serde_json::from_value::( serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); + ) { + Ok(pdu) => pdu, + Err(_) => { + resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -644,18 +651,15 @@ pub async fn send_transaction_message_route<'a>( let event = Arc::new(pdu.clone()); dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // Recursively gather all auth events checking that the previous auth events are valid. let auth_events: Vec = - match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) - .await - { + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { Ok(events) => events, Err(_) => { resolved_map.insert( @@ -707,7 +711,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - server_name.clone(), + server_name, get_room_state_ids::v1::Request { room_id: pdu.room_id(), event_id: pdu.event_id(), @@ -716,8 +720,7 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -734,7 +737,7 @@ pub async fn send_transaction_message_route<'a>( ( state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) .await? .into_iter() .map(Arc::new) @@ -881,6 +884,52 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +async fn auth_each_event( + db: &Database, + value: CanonicalJsonObject, + event_id: EventId, + pub_key_map: &PublicKeyMap, + server_name: &ServerName, + auth_cache: EventMap>, +) -> std::result::Result { + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let mut val = signature_and_hash_check(&pub_key_map, value)?; + + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; + + // If we have no idea about this room skip the PDU + if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { + return Err("Room is unknown to this server".into()); + } + + // Fetch any unknown prev_events or retrieve them from the DB + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; + + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(events) => events, + Err(_) => return Err("Failed to recursively gather auth events".into()), + }; + + Ok(pdu) +} + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -909,7 +958,7 @@ fn signature_and_hash_check( /// events `auth_events`. If the chain is found to have missing events it fails. async fn fetch_check_auth_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, event_ids: &[EventId], ) -> Result> { @@ -929,13 +978,13 @@ async fn fetch_check_auth_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: &ev_id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { val.insert( @@ -970,7 +1019,7 @@ async fn fetch_check_auth_events( /// effect the state of the room async fn fetch_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, events: &[EventId], ) -> Result> { @@ -982,13 +1031,13 @@ async fn fetch_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { // TODO: add to our DB somehow? From 851eb555b6bb656b7316515fdc865c163c9e1874 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 21:32:22 -0500 Subject: [PATCH 0385/1727] Abstract event validation/fetching, add outlier and signing key DB trees Fixed the miss named commented out keys in conduit-example.toml. --- conduit-example.toml | 6 +- src/database.rs | 10 +- src/database/globals.rs | 77 ++++- src/database/rooms.rs | 97 ++---- src/error.rs | 7 +- src/main.rs | 1 + src/server_server.rs | 632 +++++++++++++++++++--------------------- 7 files changed, 416 insertions(+), 414 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 70d3ce4..bb3ae33 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -23,12 +23,12 @@ port = 6167 max_request_size = 20_000_000 # in bytes # Disable registration. No new users will be able to register on this server -#registration_disabled = false +#allow_registration = false # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work -#encryption_disabled = false -#federation_disabled = false +#allow_encryption = false +#allow_federation = false #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time diff --git a/src/database.rs b/src/database.rs index 607e1be..e94a517 100644 --- a/src/database.rs +++ b/src/database.rs @@ -22,7 +22,7 @@ use std::fs::remove_dir_all; use std::sync::{Arc, RwLock}; use tokio::sync::Semaphore; -#[derive(Clone, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Config { server_name: Box, database_path: String, @@ -102,7 +102,12 @@ impl Database { let (admin_sender, admin_receiver) = mpsc::unbounded(); let db = Self { - globals: globals::Globals::load(db.open_tree("global")?, config).await?, + globals: globals::Globals::load( + db.open_tree("global")?, + db.open_tree("servertimeout_signingkey")?, + config, + ) + .await?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, @@ -155,6 +160,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index c8e3b23..4670068 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,10 @@ use crate::{database::Config, utils, Error, Result}; use log::error; -use ruma::ServerName; -use std::collections::HashMap; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + ServerName, ServerSigningKeyId, +}; +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; @@ -19,10 +22,15 @@ pub struct Globals { reqwest_client: reqwest::Client, pub actual_destination_cache: DestinationCache, // actual_destination, host dns_resolver: TokioAsyncResolver, + pub(super) servertimeout_signingkey: sled::Tree, // ServerName -> algorithm:key + pubkey } impl Globals { - pub async fn load(globals: sled::Tree, config: Config) -> Result { + pub async fn load( + globals: sled::Tree, + server_keys: sled::Tree, + config: Config, + ) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -75,6 +83,7 @@ impl Globals { Error::bad_config("Failed to set up trust dns resolver with system config.") })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), + servertimeout_signingkey: server_keys, }) } @@ -128,4 +137,66 @@ impl Globals { pub fn dns_resolver(&self) -> &TokioAsyncResolver { &self.dns_resolver } + + /// TODO: the key valid until timestamp is only honored in room version > 4 + /// Remove the outdated keys and insert the new ones. + /// + /// This doesn't actually check that the keys provided are newer than the old set. + pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> { + // Remove outdated keys + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, _) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + + if now > valid_until { + self.servertimeout_signingkey.remove(k)?; + } + } + + let mut key = origin.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice( + &(keys + .valid_until_ts + .duration_since(std::time::UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64) + .to_be_bytes(), + ); + + self.servertimeout_signingkey.insert( + key, + serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"), + )?; + Ok(()) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + pub fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result> { + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, bytes) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + // If these keys are still valid use em! + if valid_until > now { + return serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys")); + } + } + Ok(BTreeMap::default()) + } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6b51d58..c37aa1a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -65,6 +65,9 @@ pub struct Rooms { /// The state for a given state hash. pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) + + /// Any pdu that has passed the steps up to auth with auth_events. + pub(super) eventid_outlierpdu: sled::Tree, } impl Rooms { @@ -188,72 +191,6 @@ impl Rooms { Ok(events) } - /// Returns a Vec of the related auth events to the given `event`. - /// - /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. - pub fn auth_events_full( - &self, - _room_id: &RoomId, - event_ids: &[EventId], - ) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - if let Some(ev) = self.get_pdu(&ev_id)? { - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) - } - - /// Returns a Vec representing the difference in auth chains of the given `events`. - /// - /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). - pub fn auth_chain_diff( - &self, - room_id: &RoomId, - event_ids: Vec>, - ) -> Result> { - use std::collections::BTreeSet; - - let mut chains = vec![]; - for ids in event_ids { - // TODO state store `auth_event_ids` returns self in the event ids list - // when an event returns `auth_event_ids` self is not contained - let chain = self - .auth_events_full(room_id, &ids)? - .into_iter() - .map(|pdu| pdu.event_id) - .collect::>(); - chains.push(chain); - } - - if let Some(chain) = chains.first() { - let rest = chains.iter().skip(1).flatten().cloned().collect(); - let common = chain.intersection(&rest).collect::>(); - - Ok(chains - .iter() - .flatten() - .filter(|id| !common.contains(&id)) - .cloned() - .collect::>() - .into_iter() - .collect()) - } else { - Ok(vec![]) - } - } - /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. @@ -475,6 +412,31 @@ impl Rooms { Ok(()) } + /// Returns the pdu from the outlier tree. + pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + + /// Returns true if the event_id was previously inserted. + pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + let res = self + .eventid_outlierpdu + .insert( + event_id.as_bytes(), + &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), + ) + .map(|op| op.is_some())?; + Ok(res) + } + /// Creates a new persisted data unit and adds it to a room. /// /// By this point the incoming event should be fully authenticated, no auth happens @@ -516,6 +478,9 @@ impl Rooms { } } + // We no longer keep this pdu as an outlier + self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; // Mark as read first so the sending client doesn't get a notification even if appending diff --git a/src/error.rs b/src/error.rs index c57843c..fed545c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -122,10 +122,9 @@ impl log::Log for ConduitLogger { let output = format!("{} - {}", record.level(), record.args()); if self.enabled(record.metadata()) - && (record - .module_path() - .map_or(false, |path| path.starts_with("conduit::")) - || record + && (record.module_path().map_or(false, |path| { + path.starts_with("conduit::") || path.starts_with("state") + }) || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/main.rs b/src/main.rs index fe7ab0d..9817c56 100644 --- a/src/main.rs +++ b/src/main.rs @@ -155,6 +155,7 @@ fn setup_rocket() -> rocket::Rocket { .figment() .extract() .expect("It looks like your config is invalid. Please take a look at the error"); + let data = Database::load_or_create(config) .await .expect("config is valid"); diff --git a/src/server_server.rs b/src/server_server.rs index 64e0a05..6907e34 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,5 +1,4 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; -use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -7,7 +6,6 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ - device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -20,7 +18,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -28,9 +25,12 @@ use ruma::{ use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, + future::Future, net::{IpAddr, SocketAddr}, + pin::Pin, + result::Result as StdResult, sync::Arc, time::{Duration, SystemTime}, }; @@ -575,6 +575,26 @@ pub async fn send_transaction_message_route<'a>( // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + // If we have no idea about this room skip the PDU + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); + continue; + } + }; + if !db.rooms.exists(&room_id)? { + resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); + continue; + } + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { @@ -583,20 +603,12 @@ pub async fn send_transaction_message_route<'a>( for key in entity.keys() { // TODO: save this in a DB maybe... // fetch the public signing key - let res = db - .sending - .send_federation_request( - &db.globals, - <&ServerName>::try_from(key.as_str()).unwrap(), - get_server_keys::v2::Request::new(), - ) - .await?; + let origin = <&ServerName>::try_from(key.as_str()).unwrap(); + let keys = fetch_signing_keys(&db, origin).await?; pub_key_map.insert( - res.server_key.server_name.to_string(), - res.server_key - .verify_keys - .into_iter() + origin.to_string(), + keys.into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(), ); @@ -615,10 +627,31 @@ pub async fn send_transaction_message_route<'a>( continue; } - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = match signature_and_hash_check(&pub_key_map, value) { + // TODO: make this persist but not a DB Tree... + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) + // like structure so that once an auth event is known it would know (using indexes maybe) all of + // the auth events that it references. + let mut auth_cache = EventMap::new(); + + // 1. check the server is in the room (optional) + // 2. check content hash, redact if doesn't match + // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events + // 6. persist this event as an outlier + // 7. if not timeline event: stop + let pdu = match validate_event( + &db, + value, + event_id.clone(), + &pub_key_map, + server_name, + // All the auth events gathered will be here + &mut auth_cache, + ) + .await + { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -626,59 +659,31 @@ pub async fn send_transaction_message_route<'a>( } }; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = match serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) { - Ok(pdu) => pdu, - Err(_) => { - resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); - continue; - } - }; + let pdu = Arc::new(pdu.clone()); - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id)? { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - - let event = Arc::new(pdu.clone()); - dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + let previous = match fetch_events( + &db, + server_name, + &pub_key_map, + &pdu.prev_events, + &mut auth_cache, + ) + .await + { + Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), _ => None, }; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => { - resolved_map.insert( - pdu.event_id, - Err("Failed to recursively gather auth events".into()), - ); - continue; - } - }; - - let mut event_map: state_res::EventMap> = auth_events + let mut event_map: state_res::EventMap> = auth_cache .iter() - .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(k, v)| (k.clone(), v.clone())) .collect(); // Check that the event passes auth based on the auth_events let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &pdu.auth_events .iter() @@ -696,9 +701,10 @@ pub async fn send_transaction_message_route<'a>( None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( - pdu.event_id, + pdu.event_id().clone(), Err("Event has failed auth check with auth events".into()), ); continue; @@ -720,7 +726,14 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events( + &db, + server_name, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, + ) + .await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -732,21 +745,26 @@ pub async fn send_transaction_message_route<'a>( let state = state .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); ( state, - fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) - .await? - .into_iter() - .map(Arc::new) - .collect(), + fetch_events( + &db, + server_name, + &pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await? + .into_iter() + .collect(), ) } Err(_) => { resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Fetching state for event failed".into()), ); continue; @@ -755,7 +773,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &state_at_event, None, // TODO: third party invite @@ -764,37 +782,21 @@ pub async fn send_transaction_message_route<'a>( { // Event failed auth with state_at resolved_map.insert( - pdu.event_id, + event_id, Err("Event has failed auth check with state at the event".into()), ); continue; } // End of step 5. - // The event could still be soft failed - append_state_soft(&db, &pdu)?; - // Gather the forward extremities and resolve - let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states: Vec>> = vec![]; - for id in &forward_extrems { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); - - fork_states.push(state); - } else { - todo!("we don't know of a pdu that is part of our known forks OOPS") + let fork_states = match forward_extremity_ids(&db, &pdu) { + Ok(states) => states, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; } - } + }; // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { @@ -803,19 +805,47 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { - let auth_events = fork_states - .iter() - .map(|map| { - db.rooms - .auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), + let mut auth_events = vec![]; + // this keeps track if we error so we can break out of these inner loops + // to continue on with the incoming PDU's + let mut failed = false; + for map in &fork_states { + let mut state_auth = vec![]; + for pdu in map.values() { + let event = match auth_cache.get(pdu.event_id()) { + Some(aev) => aev.clone(), + // We should know about every event at this point but just incase... + None => match fetch_events( + &db, + server_name, + &pub_key_map, + &[pdu.event_id().clone()], + &mut auth_cache, ) - .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) - }) - .collect::>>()?; + .await + .map(|mut vec| vec.remove(0)) + { + Ok(aev) => aev.clone(), + Err(_) => { + resolved_map.insert( + event_id.clone(), + Err("Event has been soft failed".into()), + ); + failed = true; + break; + } + }, + }; + state_auth.push(event); + } + if failed { + break; + } + auth_events.push(state_auth); + } + if failed { + continue; + } // Add everything we will need to event_map event_map.extend( @@ -862,74 +892,163 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous, &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { - // Soft fail + // Soft fail, we add the event as an outlier. resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Event has been soft failed".into()), ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id().clone(), Ok(())); + resolved_map.insert(pdu.event_id().clone(), Ok(())); } } Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } -async fn auth_each_event( - db: &Database, +/// Validate any event that is given to us by another server. +/// +/// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). +/// 2. Passes signature checks, otherwise event is dropped. +/// 3. Passes hash checks, otherwise it is redacted before being processed further. +/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). +/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +fn validate_event<'a>( + db: &'a Database, value: CanonicalJsonObject, event_id: EventId, - pub_key_map: &PublicKeyMap, - server_name: &ServerName, - auth_cache: EventMap>, -) -> std::result::Result { - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = signature_and_hash_check(&pub_key_map, value)?; + pub_key_map: &'a PublicKeyMap, + origin: &'a ServerName, + auth_cache: &'a mut EventMap>, +) -> Pin> + 'a + Send>> { + Box::pin(async move { + let mut val = signature_and_hash_check(&pub_key_map, value)?; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU".to_string())?; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { - return Err("Room is unknown to this server".into()); - } + fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) + .await + .map_err(|_| "Event failed auth chain check".to_string())?; - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + db.rooms + .append_pdu_outlier(pdu.event_id(), &pdu) + .map_err(|e| e.to_string())?; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => return Err("Failed to recursively gather auth events".into()), - }; - - Ok(pdu) + Ok(pdu) + }) } +/// Find the event and auth it. +/// +/// 1. Look in the main timeline (pduid_pdu tree) +/// 2. Look at outlier pdu tree +/// 3. Ask origin server over federation +/// 4. TODO: Ask other servers over federation? +async fn fetch_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + events: &[EventId], + auth_cache: &mut EventMap>, +) -> Result>> { + let mut pdus = vec![]; + for id in events { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db.rooms.get_pdu_outlier(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; + + Arc::new(pdu) + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }, + }; + pdus.push(pdu); + } + Ok(pdus) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + +/// Search the DB for the signing keys of the given server, if we don't have them +/// fetch them from the server and save to our DB. +async fn fetch_signing_keys( + db: &Database, + origin: &ServerName, +) -> Result> { + match db.globals.signing_keys_for(origin)? { + keys if !keys.is_empty() => Ok(keys), + _ => { + let keys = db + .sending + .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .await + .map_err(|_| Error::BadServerResponse("Failed to request server keys"))?; + db.globals.add_signing_key(origin, &keys.server_key)?; + Ok(keys.server_key.verify_keys) + } + } +} fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -954,122 +1073,29 @@ fn signature_and_hash_check( ) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have missing events it fails. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], -) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); +fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { + let mut fork_states = vec![]; + for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - let ev = match db.rooms.get_pdu(&ev_id)? { - Some(pdu) => pdu, - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &ev_id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map") - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, - }; - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) -} - -/// TODO: this needs to add events to the DB in a way that does not -/// effect the state of the room -async fn fetch_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - events: &[EventId], -) -> Result> { - let mut pdus = vec![]; - for id in events { - match db.rooms.get_pdu(id)? { - Some(pdu) => pdus.push(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - // TODO: add to our DB somehow? - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); - - pdus.push(pdu); - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + fork_states.push(state); + } else { + return Err(Error::Conflict( + "we don't know of a pdu that is part of our known forks OOPS", + )); } } - Ok(pdus) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - db.rooms.get_pdu_leaves(room_id) + Ok(fork_states) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -1078,9 +1104,12 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + db.rooms.append_pdu( - pdu, + &pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), @@ -1089,78 +1118,9 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { &db.admin, )?; - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // db.rooms.append_pdu( - // pdu, - // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - // count, - // pdu_id.clone().into(), - // &db.globals, - // &db.account_data, - // &db.admin, - // )?; - - Ok(()) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() -} - -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From 5a8041969dd50a205bb3634f8c5905d7c7717bf4 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 11:05:57 -0500 Subject: [PATCH 0386/1727] Fix ALL clippy warnings --- src/client_server/account.rs | 78 ++++------------------- src/client_server/membership.rs | 40 ++---------- src/client_server/message.rs | 6 +- src/client_server/profile.rs | 12 +--- src/client_server/redact.rs | 6 +- src/client_server/room.rs | 90 +++++--------------------- src/client_server/state.rs | 6 +- src/database/admin.rs | 6 +- src/database/rooms.rs | 109 ++++++++++++++------------------ src/database/sending.rs | 19 +++--- src/error.rs | 8 ++- src/server_server.rs | 4 +- 12 files changed, 106 insertions(+), 278 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 12c7f7e..76354b6 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -239,11 +239,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Make conduit bot join @@ -264,11 +260,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -302,11 +294,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.1 Join Rules @@ -323,11 +311,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -346,11 +330,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -367,11 +347,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 6. Events implied by name and topic @@ -390,11 +366,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( @@ -410,11 +382,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Room alias @@ -436,11 +404,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; @@ -463,11 +427,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( PduBuilder { @@ -486,11 +446,7 @@ pub async fn register_route( }, &user_id, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Send welcome message @@ -515,11 +471,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -691,11 +643,7 @@ pub async fn deactivate_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 40e4183..70bb480 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -124,11 +124,7 @@ pub async fn leave_room_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -164,11 +160,7 @@ pub async fn invite_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -220,11 +212,7 @@ pub async fn kick_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -280,11 +268,7 @@ pub async fn ban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -332,11 +316,7 @@ pub async fn unban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -685,9 +665,7 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; if state_events.contains(ev_id) { @@ -717,11 +695,7 @@ async fn join_room_by_id_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index c56cc94..c64c390 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -68,11 +68,7 @@ pub async fn send_message_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.transaction_ids.add_txnid( diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 761443d..21759a8 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -63,11 +63,7 @@ pub async fn set_displayname_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update @@ -160,11 +156,7 @@ pub async fn set_avatar_url_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 212e751..282c35a 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -31,11 +31,7 @@ pub async fn redact_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 092e083..631d87b 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -65,11 +65,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Let the room creator join @@ -90,11 +86,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -135,11 +127,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4. Events set by preset @@ -175,11 +163,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -196,11 +180,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -225,11 +205,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 5. Events listed in initial_state @@ -248,11 +224,7 @@ pub async fn create_room_route( pdu_builder, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -273,11 +245,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -295,11 +263,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -322,11 +286,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -413,11 +373,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Get the old room federations status @@ -457,11 +413,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Join the new room @@ -482,11 +434,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Recommended transferable state events list from the specs @@ -519,11 +467,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -566,11 +510,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index cecb79d..60e8363 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -284,11 +284,7 @@ pub async fn send_state_event_for_key_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; Ok(event_id) diff --git a/src/database/admin.rs b/src/database/admin.rs index 1fb1983..501722e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -60,11 +60,7 @@ impl Admin { }, &conduit_user, &conduit_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, ) .unwrap(); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c37aa1a..d62d4b0 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2,7 +2,7 @@ mod edus; pub use edus::RoomEdus; -use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; +use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use log::error; use regex::Regex; use ring::digest; @@ -447,9 +447,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, - globals: &super::globals::Globals, - account_data: &super::account_data::AccountData, - admin: &super::admin::Admin, + db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -486,7 +484,7 @@ impl Rooms { // Mark as read first so the sending client doesn't get a notification even if appending // fails self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count, &globals)?; + .private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; self.pduid_pdu.insert( &pdu_id, @@ -521,8 +519,8 @@ impl Rooms { ) })?, &pdu.sender, - account_data, - globals, + &db.account_data, + &db.globals, )?; } } @@ -540,10 +538,10 @@ impl Rooms { self.tokenids.insert(key, &[])?; } - if body.starts_with(&format!("@conduit:{}: ", globals.server_name())) + if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self .id_from_alias( - &format!("#admins:{}", globals.server_name()) + &format!("#admins:{}", db.globals.server_name()) .try_into() .expect("#admins:server_name is a valid room alias"), )? @@ -570,10 +568,11 @@ impl Rooms { ); match parsed_config { Ok(yaml) => { - admin.send(AdminCommand::RegisterAppservice(yaml)); + db.admin + .send(AdminCommand::RegisterAppservice(yaml)); } Err(e) => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( format!( "Could not parse appservice config: {}", @@ -584,7 +583,7 @@ impl Rooms { } } } else { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( "Expected code block in command body.", ), @@ -592,10 +591,10 @@ impl Rooms { } } "list_appservices" => { - admin.send(AdminCommand::ListAppservices); + db.admin.send(AdminCommand::ListAppservices); } _ => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain(format!( "Command: {}, Args: {:?}", command, args @@ -696,17 +695,12 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. - #[allow(clippy::too_many_arguments)] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - globals: &super::globals::Globals, - sending: &super::sending::Sending, - admin: &super::admin::Admin, - account_data: &super::account_data::AccountData, - appservice: &super::appservice::Appservice, + db: &Database, ) -> Result { let PduBuilder { event_type, @@ -789,7 +783,7 @@ impl Rooms { if !match event_type { EventType::RoomEncryption => { // Only allow encryption events if it's allowed in the config - globals.allow_encryption() + db.globals.allow_encryption() } EventType::RoomMember => { let prev_event = self @@ -895,13 +889,13 @@ impl Rooms { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(globals.server_name()) + to_canonical_value(db.globals.server_name()) .expect("server name is a valid CanonicalJsonValue"), ); ruma::signatures::hash_and_sign_event( - globals.server_name().as_str(), - globals.keypair(), + db.globals.server_name().as_str(), + db.globals.keypair(), &mut pdu_json, &RoomVersionId::Version6, ) @@ -922,24 +916,16 @@ impl Rooms { // Increment the last index and use that // This is also the next_batch/since value - let count = globals.next_count()?; + let count = db.globals.next_count()?; let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu_id, &pdu, &globals)?; + let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu( - &pdu, - pdu_json, - count, - pdu_id.clone().into(), - globals, - account_data, - admin, - )?; + self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist @@ -948,31 +934,28 @@ impl Rooms { for server in self .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != globals.server_name()) + .filter(|server| &**server != db.globals.server_name()) { - sending.send_pdu(&server, &pdu_id)?; + db.sending.send_pdu(&server, &pdu_id)?; } - for appservice in appservice.iter_all().filter_map(|r| r.ok()) { + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") .and_then(|users| users.as_sequence()) - .map_or_else( - || Vec::new(), - |users| { - users - .iter() - .map(|users| { - users - .get("regex") - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()) - }) - .filter_map(|o| o) - .collect::>() - }, - ); + .map_or_else(Vec::new, |users| { + users + .iter() + .map(|users| { + users + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }); let aliases = namespaces .get("aliases") .and_then(|users| users.get("regex")) @@ -989,25 +972,31 @@ impl Rooms { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, globals.server_name()).ok() + UserId::parse_with_server_name(string, db.globals.server_name()).ok() }); - if bridge_user_id.map_or(false, |bridge_user_id| { - self.is_joined(&bridge_user_id, room_id).unwrap_or(false) - }) || users.iter().any(|users| { + let user_is_joined = + |bridge_user_id| self.is_joined(&bridge_user_id, room_id).unwrap_or(false); + let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) || pdu.kind == EventType::RoomMember && pdu .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) - }) || aliases.map_or(false, |aliases| { + }; + let matching_aliases = |aliases: Regex| { room_aliases .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) - }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + }; + + if bridge_user_id.map_or(false, user_is_joined) + || users.iter().any(matching_users) + || aliases.map_or(false, matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) { - sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } } } diff --git a/src/database/sending.rs b/src/database/sending.rs index e827dad..4b0309f 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -148,6 +148,15 @@ impl Sending { let servernamepduid = key.clone(); let mut parts = servernamepduid.splitn(2, |&b| b == 0xff); + let exponential_backoff = |(tries, instant): &(u32, Instant)| { + // Fail if a request has failed recently (exponential backoff) + let mut min_elapsed_duration = Duration::from_secs(60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60*60*24) { + min_elapsed_duration = Duration::from_secs(60*60*24); + } + + instant.elapsed() < min_elapsed_duration + }; if let Some((server, is_appservice, pdu_id)) = utils::string_from_bytes( parts .next() @@ -172,15 +181,7 @@ impl Sending { .map(|pdu_id| (server, is_appservice, pdu_id)) ) .filter(|(server, is_appservice, _)| { - if last_failed_try.get(server).map_or(false, |(tries, instant)| { - // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(60) * *tries * *tries; - if min_elapsed_duration > Duration::from_secs(60*60*24) { - min_elapsed_duration = Duration::from_secs(60*60*24); - } - - instant.elapsed() < min_elapsed_duration - }) { + if last_failed_try.get(server).map_or(false, exponential_backoff) { return false; } diff --git a/src/error.rs b/src/error.rs index fed545c..13efce6 100644 --- a/src/error.rs +++ b/src/error.rs @@ -121,10 +121,12 @@ impl log::Log for ConduitLogger { fn log(&self, record: &log::Record<'_>) { let output = format!("{} - {}", record.level(), record.args()); + let match_mod_path = + |path: &str| path.starts_with("conduit::") || path.starts_with("state"); + if self.enabled(record.metadata()) - && (record.module_path().map_or(false, |path| { - path.starts_with("conduit::") || path.starts_with("state") - }) || record + && (record.module_path().map_or(false, match_mod_path) + || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/server_server.rs b/src/server_server.rs index 6907e34..ae59583 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1113,9 +1113,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; // We set the room state after inserting the pdu, so that we never have a moment in time From 96dc6be14b8065f8833b8372cfe1e3655d358f77 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 15:46:47 -0500 Subject: [PATCH 0387/1727] Use the auth_events for step 6, WIP forward_extremity_ids fn --- src/server_server.rs | 159 ++++++++++++++++++++++++++++--------------- 1 file changed, 104 insertions(+), 55 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ae59583..77f0fa8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -569,7 +569,7 @@ pub async fn send_transaction_message_route<'a>( // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); - for pdu in &body.pdus { + 'main_pdu_loop: for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks @@ -660,7 +660,6 @@ pub async fn send_transaction_message_route<'a>( }; let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB let previous = match fetch_events( &db, @@ -675,6 +674,7 @@ pub async fn send_transaction_message_route<'a>( _ => None, }; + // [auth_cache] At this point we have the auth chain of the incoming event. let mut event_map: state_res::EventMap> = auth_cache .iter() .map(|(k, v)| (k.clone(), v.clone())) @@ -688,7 +688,7 @@ pub async fn send_transaction_message_route<'a>( &pdu.auth_events .iter() .map(|id| { - event_map + auth_cache .get(id) .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) .ok_or_else(|| { @@ -790,7 +790,15 @@ pub async fn send_transaction_message_route<'a>( // End of step 5. // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids(&db, &pdu) { + let fork_states = match forward_extremity_ids( + &db, + &pdu, + server_name, + &pub_key_map, + &mut auth_cache, + ) + .await + { Ok(states) => states, Err(_) => { resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); @@ -805,47 +813,44 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // TODO: remove this is for current debugging Jan, 15 2021 + let mut number_fetches = 0_u32; let mut auth_events = vec![]; // this keeps track if we error so we can break out of these inner loops // to continue on with the incoming PDU's - let mut failed = false; for map in &fork_states { let mut state_auth = vec![]; - for pdu in map.values() { - let event = match auth_cache.get(pdu.event_id()) { + for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { + let event = match auth_cache.get(auth_id) { Some(aev) => aev.clone(), // We should know about every event at this point but just incase... None => match fetch_events( &db, server_name, &pub_key_map, - &[pdu.event_id().clone()], + &[auth_id.clone()], &mut auth_cache, ) .await - .map(|mut vec| vec.remove(0)) - { - Ok(aev) => aev.clone(), + .map(|mut vec| { + number_fetches += 1; + vec.remove(0) + }) { + Ok(aev) => aev, Err(_) => { resolved_map.insert( event_id.clone(), Err("Event has been soft failed".into()), ); - failed = true; - break; + continue 'main_pdu_loop; } }, }; state_auth.push(event); } - if failed { - break; - } auth_events.push(state_auth); } - if failed { - continue; - } + info!("{} event's were not in the auth_cache", number_fetches); // Add everything we will need to event_map event_map.extend( @@ -886,7 +891,13 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), - Err(e) => panic!("{:?}", e), + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("State resolution failed, either an event could not be found or deserialization".into()), + ); + continue 'main_pdu_loop; + } } }; @@ -914,6 +925,7 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). @@ -955,6 +967,37 @@ fn validate_event<'a>( }) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + /// Find the event and auth it. /// /// 1. Look in the main timeline (pduid_pdu tree) @@ -1000,36 +1043,6 @@ async fn fetch_events( Ok(pdus) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have any missing events it fails. -/// -/// The `auth_cache` is filled instead of returning a `Vec`. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], - auth_cache: &mut EventMap>, -) -> Result<()> { - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if auth_cache.contains_key(&ev_id) { - continue; - } - - let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) - .await - .map(|mut vec| vec.remove(0))?; - - stack.extend(ev.auth_events()); - auth_cache.insert(ev.event_id().clone(), ev); - } - Ok(()) -} - /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. async fn fetch_signing_keys( @@ -1049,6 +1062,7 @@ async fn fetch_signing_keys( } } } + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -1073,9 +1087,23 @@ fn signature_and_hash_check( ) } -fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { +async fn forward_extremity_ids( + db: &Database, + pdu: &PduEvent, + origin: &ServerName, + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + + for incoming_leaf in &pdu.prev_events { + if !current_leaves.contains(incoming_leaf) { + current_leaves.push(incoming_leaf.clone()); + } + } + let mut fork_states = vec![]; - for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1090,11 +1118,32 @@ fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result Date: Sat, 16 Jan 2021 16:37:20 -0500 Subject: [PATCH 0388/1727] Fixing the incoming events algorithm (review with time) --- src/database/rooms.rs | 2 +- src/server_server.rs | 237 ++++++++++++++++++++++++------------------ 2 files changed, 138 insertions(+), 101 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d62d4b0..325a2e2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -150,7 +150,7 @@ impl Rooms { } } - /// Returns the last state hash key added to the db. + /// Returns the state hash for this pdu. pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { Ok(self.pduid_statehash.get(pdu_id)?) } diff --git a/src/server_server.rs b/src/server_server.rs index 77f0fa8..0eb7d6f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,6 +5,7 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ + client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -590,6 +591,8 @@ pub async fn send_transaction_message_route<'a>( continue; } }; + + // 1. check the server is in the room (optional) if !db.rooms.exists(&room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); continue; @@ -634,14 +637,13 @@ pub async fn send_transaction_message_route<'a>( // the auth events that it references. let mut auth_cache = EventMap::new(); - // 1. check the server is in the room (optional) // 2. check content hash, redact if doesn't match // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events - // 6. persist this event as an outlier // 7. if not timeline event: stop - let pdu = match validate_event( + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let (pdu, previous) = match validate_event( &db, value, event_id.clone(), @@ -659,59 +661,16 @@ pub async fn send_transaction_message_route<'a>( } }; - let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events( - &db, - server_name, - &pub_key_map, - &pdu.prev_events, - &mut auth_cache, - ) - .await - { - Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), - _ => None, + let single_prev = if previous.len() == 1 { + previous.first().cloned() + } else { + None }; - // [auth_cache] At this point we have the auth chain of the incoming event. - let mut event_map: state_res::EventMap> = auth_cache - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(); + // 6. persist the event as an outlier. + db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; - // Check that the event passes auth based on the auth_events - let is_authed = state_res::event_auth::auth_check( - &RoomVersionId::Version6, - &pdu, - previous.clone(), - &pdu.auth_events - .iter() - .map(|id| { - auth_cache - .get(id) - .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) - .ok_or_else(|| { - Error::Conflict( - "Auth event not found, event failed recursive auth checks.", - ) - }) - }) - .collect::>>()?, - None, // TODO: third party invite - ) - .map_err(|_e| Error::Conflict("Auth check failed"))?; - - if !is_authed { - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has failed auth check with auth events".into()), - ); - continue; - } - // End of step 4. - - // Step 5. event passes auth based on state at the event + // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -757,9 +716,7 @@ pub async fn send_transaction_message_route<'a>( &res.auth_chain_ids, &mut auth_cache, ) - .await? - .into_iter() - .collect(), + .await?, ) } Err(_) => { @@ -771,10 +728,11 @@ pub async fn send_transaction_message_route<'a>( } }; + // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous.clone(), + single_prev.clone(), &state_at_event, None, // TODO: third party invite ) @@ -787,10 +745,34 @@ pub async fn send_transaction_message_route<'a>( ); continue; } - // End of step 5. + // End of step 10. + + // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let current_state = db + .rooms + .room_state_full(pdu.room_id())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + single_prev.clone(), + ¤t_state, + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail, we add the event as an outlier. + resolved_map.insert( + pdu.event_id().clone(), + Err("Event has been soft failed".into()), + ); + }; // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids( + let fork_states = match forward_extremities( &db, &pdu, server_name, @@ -806,7 +788,9 @@ pub async fn send_transaction_message_route<'a>( } }; - // Step 6. event passes auth based on state of all forks and current room state + // 13. start state-res with all previous forward extremities minus the ones that are in + // the prev_events of this event plus the new one created by this event and use + // the result as the new room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() @@ -852,6 +836,7 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); + let mut event_map = EventMap::new(); // Add everything we will need to event_map event_map.extend( auth_events @@ -904,7 +889,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous, + single_prev, &state_at_forks, None, ) @@ -925,14 +910,19 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// An async function that can recursively calls itself. +type AsyncRecursiveResult<'a, T> = Pin> + 'a + Send>>; + /// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). -/// 2. Passes signature checks, otherwise event is dropped. -/// 3. Passes hash checks, otherwise it is redacted before being processed further. -/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). -/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +/// 2. check content hash, redact if doesn't match +/// 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events +/// 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" +/// 5. reject "due to auth events" if the event doesn't pass auth based on the auth events +/// 7. if not timeline event: stop +/// 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events fn validate_event<'a>( db: &'a Database, value: CanonicalJsonObject, @@ -940,9 +930,24 @@ fn validate_event<'a>( pub_key_map: &'a PublicKeyMap, origin: &'a ServerName, auth_cache: &'a mut EventMap>, -) -> Pin> + 'a + Send>> { +) -> AsyncRecursiveResult<'a, (Arc, Vec>)> { Box::pin(async move { - let mut val = signature_and_hash_check(&pub_key_map, value)?; + let mut val = + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value + } + } + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type also finally verifying the first step listed above @@ -959,11 +964,42 @@ fn validate_event<'a>( .await .map_err(|_| "Event failed auth chain check".to_string())?; - db.rooms - .append_pdu_outlier(pdu.event_id(), &pdu) + let pdu = Arc::new(pdu.clone()); + + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let previous = fetch_events(&db, origin, &pub_key_map, &pdu.prev_events, auth_cache) + .await .map_err(|e| e.to_string())?; - Ok(pdu) + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + if previous.len() == 1 { + previous.first().cloned() + } else { + None + }, + &pdu.auth_events + .iter() + .map(|id| { + auth_cache + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + "Auth event not found, event failed recursive auth checks.".to_string() + }) + }) + .collect::, _>>()?, + None, // TODO: third party invite + ) + .map_err(|_e| "Auth check failed".to_string())?; + + if !is_authed { + return Err("Event has failed auth check with auth events".to_string()); + } + + Ok((pdu, previous)) }) } @@ -990,7 +1026,10 @@ async fn fetch_check_auth_events( let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await - .map(|mut vec| vec.remove(0))?; + .map(|mut vec| { + vec.pop() + .ok_or_else(|| Error::Conflict("Event was not found in fetch_events")) + })??; stack.extend(ev.auth_events()); auth_cache.insert(ev.event_id().clone(), ev); @@ -1028,11 +1067,12 @@ async fn fetch_events( { Ok(res) => { let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; - Arc::new(pdu) + pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, @@ -1063,31 +1103,11 @@ async fn fetch_signing_keys( } } -fn signature_and_hash_check( - pub_key_map: &ruma::signatures::PublicKeyMap, - value: CanonicalJsonObject, -) -> std::result::Result { - Ok( - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - error!("CONTENT HASH FAILED"); - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } else { - value - } - } - Err(_e) => { - return Err("Signature verification failed".to_string()); - } - }, - ) -} - -async fn forward_extremity_ids( +/// Gather all state snapshots needed to resolve the current state of the room. +/// +/// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) +async fn forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, @@ -1102,6 +1122,8 @@ async fn forward_extremity_ids( } } + let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; let mut fork_states = vec![]; for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { @@ -1109,6 +1131,10 @@ async fn forward_extremity_ids( .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); + + if current_hash.as_ref() == Some(&state_hash) { + includes_current_state = true; + } let state = db .rooms .state_full(&pdu.room_id, &state_hash)? @@ -1144,6 +1170,17 @@ async fn forward_extremity_ids( } } + // This guarantees that our current room state is included + if !includes_current_state && current_hash.is_some() { + fork_states.push( + db.rooms + .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(), + ) + } + Ok(fork_states) } From ebb38cd33043004bf3a7d4c453e5e904406d8994 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sat, 16 Jan 2021 14:48:24 -0700 Subject: [PATCH 0389/1727] improvement: respect logout_devices param on password change Move logout devices comment next to relevant loop remove unnecessary log --- src/client_server/account.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index f48543e..3d6498f 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -572,16 +572,16 @@ pub async fn change_password_route( db.users.set_password(&sender_user, &body.new_password)?; - // TODO: Read logout_devices field when it's available and respect that, currently not supported in Ruma - // See: https://github.com/ruma/ruma/issues/107 - // Logout all devices except the current one - for id in db - .users - .all_device_ids(&sender_user) - .filter_map(|id| id.ok()) - .filter(|id| id != sender_device) - { - db.users.remove_device(&sender_user, &id)?; + if body.logout_devices { + // Logout all devices except the current one + for id in db + .users + .all_device_ids(&sender_user) + .filter_map(|id| id.ok()) + .filter(|id| id != sender_device) + { + db.users.remove_device(&sender_user, &id)?; + } } db.flush().await?; From 890187e00419602a85c85f18b87867d96ec1972c Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sat, 16 Jan 2021 22:15:45 -0700 Subject: [PATCH 0390/1727] improvement: Handle optional device_id field during login remove debug logging --- src/client_server/session.rs | 35 ++++++++++++++++++++++++++--------- src/database/users.rs | 2 +- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index da3d8d8..173e823 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -77,7 +77,6 @@ pub async fn login_route( // Generate new device id if the user didn't specify one let device_id = body - .body .device_id .clone() .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); @@ -85,14 +84,32 @@ pub async fn login_route( // Generate a new token for the device let token = utils::random_string(TOKEN_LENGTH); - // TODO: Don't always create a new device - // Add device - db.users.create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - )?; + let mut create_new_device = true; + + // Only search db for existing device if one was provided in the request + match &body.device_id { + Some(_) => { + // Look to see if provided device_id already exists + if let Some(_) = db.users.all_device_ids(&user_id).find(|x| match x { + Ok(x) if **x == *device_id => true, + _ => false, + }) { + // Replace token for existing device + db.users.set_token(&user_id, &device_id, &token)?; + create_new_device = false; + } + } + _ => (), + }; + + if create_new_device { + db.users.create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + )?; + } info!("{} logged in", user_id); diff --git a/src/database/users.rs b/src/database/users.rs index 2a03960..d6a4ecf 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -251,7 +251,7 @@ impl Users { } /// Replaces the access token of one device. - fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); From 762255fa8d0ec7797ead205ac454479ff53fd860 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 17 Jan 2021 08:39:47 -0700 Subject: [PATCH 0391/1727] Simplify device creation logic during login --- src/client_server/session.rs | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 173e823..48fbea2 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -84,25 +84,17 @@ pub async fn login_route( // Generate a new token for the device let token = utils::random_string(TOKEN_LENGTH); - let mut create_new_device = true; + // Determine if device_id was provided and exists in the db for this user + let device_exists = body.device_id.as_ref().map_or(false, |device_id| { + db.users + .all_device_ids(&user_id) + .find(|x| x.as_ref().map_or(false, |v| v == device_id)) + .is_some() + }); - // Only search db for existing device if one was provided in the request - match &body.device_id { - Some(_) => { - // Look to see if provided device_id already exists - if let Some(_) = db.users.all_device_ids(&user_id).find(|x| match x { - Ok(x) if **x == *device_id => true, - _ => false, - }) { - // Replace token for existing device - db.users.set_token(&user_id, &device_id, &token)?; - create_new_device = false; - } - } - _ => (), - }; - - if create_new_device { + if device_exists { + db.users.set_token(&user_id, &device_id, &token)?; + } else { db.users.create_device( &user_id, &device_id, From c65bde41e01d388a7601b9af860c0950838f65ce Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:08:59 -0500 Subject: [PATCH 0392/1727] WIP gather and update forward extremities --- src/database/rooms.rs | 18 ++++++ src/pdu.rs | 21 ++++++- src/server_server.rs | 138 ++++++++++++++++++++++++++++-------------- 3 files changed, 132 insertions(+), 45 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 325a2e2..665e328 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,6 +397,24 @@ impl Rooms { Ok(events) } + /// Force an update to the leaves of a room. + pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { + self.roomid_pduleaves.remove(key?)?; + } + + for event_id in event_ids.iter() { + let mut key = prefix.to_owned(); + key.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + } + + Ok(()) + } + /// Replace the leaves of a room with a new event. pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/pdu.rs b/src/pdu.rs index 340ddee..e38410f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,7 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { @@ -284,6 +284,25 @@ impl state_res::Event for PduEvent { } } +// These impl's allow us to dedup state snapshots when resolving state +// for incoming events (federation/send/{txn}). +impl Eq for PduEvent {} +impl PartialEq for PduEvent { + fn eq(&self, other: &Self) -> bool { + self.event_id == other.event_id + } +} +impl PartialOrd for PduEvent { + fn partial_cmp(&self, other: &Self) -> Option { + self.event_id.partial_cmp(&other.event_id) + } +} +impl Ord for PduEvent { + fn cmp(&self, other: &Self) -> Ordering { + self.event_id.cmp(&other.event_id) + } +} + /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. diff --git a/src/server_server.rs b/src/server_server.rs index 0eb7d6f..16a1a8e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,7 +5,6 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ - client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -25,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -600,31 +599,21 @@ pub async fn send_transaction_message_route<'a>( let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); - if let Some(sig) = value.get("signatures") { - match sig { - CanonicalJsonValue::Object(entity) => { - for key in entity.keys() { - // TODO: save this in a DB maybe... - // fetch the public signing key - let origin = <&ServerName>::try_from(key.as_str()).unwrap(); - let keys = fetch_signing_keys(&db, origin).await?; - pub_key_map.insert( - origin.to_string(), - keys.into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(), - ); - } - } - _ => { - resolved_map.insert( - event_id, - Err("`signatures` is not a JSON object".to_string()), - ); - continue; - } - } + if let Some(CanonicalJsonValue::String(sender)) = value.get("sender") { + let sender = + UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); + let origin = sender.server_name(); + + // TODO: this could fail or the server not respond... + let keys = fetch_signing_keys(&db, origin).await?; + + pub_key_map.insert( + origin.to_string(), + keys.into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); } else { resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); continue; @@ -642,8 +631,9 @@ pub async fn send_transaction_message_route<'a>( // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events // 7. if not timeline event: stop - // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let (pdu, previous) = match validate_event( + // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + // the events found in step 8 can be authed/resolved and appended to the DB + let (pdu, previous): (_, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -670,6 +660,9 @@ pub async fn send_transaction_message_route<'a>( // 6. persist the event as an outlier. db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all + // the checks in this list starting at 1. These are not timeline events. + // // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db @@ -771,8 +764,12 @@ pub async fn send_transaction_message_route<'a>( ); }; - // Gather the forward extremities and resolve - let fork_states = match forward_extremities( + // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res + // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) + // + // calculate_forward_extremities takes care of adding the current state if not already in the state sets + // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. + let (mut fork_states, fork_ids) = match calculate_forward_extremities( &db, &pdu, server_name, @@ -788,6 +785,12 @@ pub async fn send_transaction_message_route<'a>( } }; + // add the incoming events to the mix of state snapshots + // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets + fork_states.insert(state_at_event.clone()); + + let fork_states = fork_states.into_iter().collect::>(); + // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -901,7 +904,9 @@ pub async fn send_transaction_message_route<'a>( Err("Event has been soft failed".into()), ); } else { - append_state(&db, &pdu)?; + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_state(&db, &pdu, &fork_ids)?; + // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } @@ -1106,25 +1111,52 @@ async fn fetch_signing_keys( /// Gather all state snapshots needed to resolve the current state of the room. /// /// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res -/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) -async fn forward_extremities( +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). +/// +/// The state snapshot of the incoming event __needs__ to be added to the resulting list. +async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, -) -> Result>>> { +) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + let mut is_incoming_leaf = true; + // Make sure the incoming event is not already a forward extremity + // FIXME: I think this could happen if different servers send us the same event?? + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + + // If the incoming event is already referenced by an existing event + // then do nothing - it's not a candidate to be a new extremity if + // it has been referenced. + if already_referenced(db, pdu)? { + is_incoming_leaf = false; + // This event has been dealt with already?? + } + + // TODO: + // [dendrite] Checks if any other leaves have been referenced and removes them + // but as long as we update the pdu leaves here and for events on our server this + // should not be possible. + + // Remove any forward extremities that are referenced by this incoming events prev_events for incoming_leaf in &pdu.prev_events { - if !current_leaves.contains(incoming_leaf) { - current_leaves.push(incoming_leaf.clone()); + if current_leaves.contains(incoming_leaf) { + if let Some(pos) = current_leaves.iter().position(|x| *x == *incoming_leaf) { + current_leaves.remove(pos); + } } } let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; - let mut fork_states = vec![]; + let mut fork_states = BTreeSet::new(); for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db @@ -1142,8 +1174,10 @@ async fn forward_extremities( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); - fork_states.push(state); + fork_states.insert(state); } else { + error!("Forward extremity not found... {}", id); + let res = db .sending .send_federation_request( @@ -1166,25 +1200,37 @@ async fn forward_extremities( .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); - fork_states.push(state); + fork_states.insert(state); } } + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.push( + fork_states.insert( db.rooms .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(), - ) + ); } - Ok(fork_states) + Ok((fork_states, dbg!(current_leaves))) } -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { +/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) +fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { + Ok(false) +} + +fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1195,13 +1241,17 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( - &pdu, + pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db, )?; + // If we update the room leaves after calling append_pdu it will stick since append_pdu + // calls replace_pdu_leaves with only the given event. + db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; + // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From 00436a32b0e6a85e6b77e530fb90df5fa4d1a958 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:36:44 -0500 Subject: [PATCH 0393/1727] Update ruma and rocket to latest git rev and tokio to 1.0 Ruma updated the event signing validation code and there was a dep resolving failure with serde rocket and tokio so I updated rocket latest and tokio 1.0 to fix. --- Cargo.lock | 454 ++++++++++++++++++++++++-------------- Cargo.toml | 6 +- src/client_server/sync.rs | 3 +- src/main.rs | 5 + 4 files changed, 297 insertions(+), 171 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d08de95..f172072 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -105,12 +105,6 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -148,15 +142,15 @@ checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "bytemuck" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aa2ec95ca3b5c54cf73c91acf06d24f4495d5f1b1c12506ae3483d646177ac" +checksum = "5a4bad0c5981acc24bc09e532f35160f952e35422603f0563cd7a73c2c2e65a0" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bytes" @@ -164,6 +158,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + [[package]] name = "cc" version = "1.0.66" @@ -192,7 +192,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.13.0", + "base64", "directories", "http", "image", @@ -210,7 +210,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio", + "tokio 1.0.2", "trust-dns-resolver", ] @@ -387,9 +387,9 @@ dependencies = [ [[package]] name = "figment" -version = "0.9.4" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13e2d266426f89e45fc544117ade84fad2a58ff676f34cc34e123fe4391b856" +checksum = "a3add2ec7727c9584a0ce75ee3c0f54f0ab692c7934450cc3a0287251e3a4f06" dependencies = [ "pear", "serde", @@ -457,9 +457,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" dependencies = [ "futures-channel", "futures-core", @@ -472,9 +472,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" dependencies = [ "futures-core", "futures-sink", @@ -482,15 +482,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" [[package]] name = "futures-executor" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" dependencies = [ "futures-core", "futures-task", @@ -499,15 +499,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -517,24 +517,24 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" dependencies = [ "once_cell", ] [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" dependencies = [ "futures-channel", "futures-core", @@ -543,7 +543,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.3", + "pin-project-lite 0.2.4", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -578,7 +578,7 @@ checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.1+wasi-snapshot-preview1", ] [[package]] @@ -609,7 +609,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", @@ -617,8 +617,28 @@ dependencies = [ "http", "indexmap", "slab", - "tokio", - "tokio-util", + "tokio 0.2.24", + "tokio-util 0.3.1", + "tracing", + "tracing-futures", +] + +[[package]] +name = "h2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio 1.0.2", + "tokio-util 0.6.1", "tracing", "tracing-futures", ] @@ -640,9 +660,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -660,11 +680,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "itoa", ] @@ -675,7 +695,17 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes", + "bytes 0.5.6", + "http", +] + +[[package]] +name = "http-body" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +dependencies = [ + "bytes 1.0.1", "http", ] @@ -697,19 +727,43 @@ version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.2.7", "http", - "http-body", + "http-body 0.3.1", "httparse", "httpdate", "itoa", - "pin-project 1.0.3", + "pin-project 1.0.4", "socket2", - "tokio", + "tokio 0.2.24", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.0", + "http", + "http-body 0.4.0", + "httparse", + "httpdate", + "itoa", + "pin-project 1.0.4", + "socket2", + "tokio 1.0.2", "tower-service", "tracing", "want", @@ -721,10 +775,10 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ - "bytes", - "hyper", + "bytes 0.5.6", + "hyper 0.13.9", "native-tls", - "tokio", + "tokio 0.2.24", "tokio-tls", ] @@ -868,15 +922,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" @@ -889,9 +943,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" dependencies = [ "cfg-if 0.1.10", ] @@ -986,21 +1040,23 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "mio" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ - "iovec", "libc", - "mio", + "log", + "miow 0.3.6", + "ntapi", + "winapi 0.3.9", ] [[package]] @@ -1015,6 +1071,16 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "miow" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +dependencies = [ + "socket2", + "winapi 0.3.9", +] + [[package]] name = "native-tls" version = "0.2.7" @@ -1044,6 +1110,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -1170,7 +1245,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.1.57", "smallvec", "winapi 0.3.9", ] @@ -1221,11 +1296,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" +checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" dependencies = [ - "pin-project-internal 1.0.3", + "pin-project-internal 1.0.4", ] [[package]] @@ -1241,9 +1316,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" +checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" dependencies = [ "proc-macro2", "quote", @@ -1258,9 +1333,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" [[package]] name = "pin-utils" @@ -1309,9 +1384,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -1365,9 +1440,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" +checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1437,6 +1512,15 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.3.5" @@ -1444,7 +1528,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom 0.1.16", - "redox_syscall", + "redox_syscall 0.1.57", "rust-argon2", ] @@ -1470,9 +1554,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -1482,9 +1566,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "remove_dir_all" @@ -1501,14 +1585,14 @@ version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ - "base64 0.13.0", - "bytes", + "base64", + "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", "http", - "http-body", - "hyper", + "http-body 0.3.1", + "hyper 0.13.9", "hyper-tls", "ipnet", "js-sys", @@ -1518,10 +1602,10 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "serde", "serde_urlencoded", - "tokio", + "tokio 0.2.24", "tokio-tls", "url", "wasm-bindgen", @@ -1558,7 +1642,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "async-trait", "atomic", @@ -1578,7 +1662,7 @@ dependencies = [ "serde", "state", "time", - "tokio", + "tokio 1.0.2", "ubyte", "version_check", "yansi", @@ -1587,7 +1671,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "devise", "glob", @@ -1599,23 +1683,24 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "cookie", "either", "http", - "hyper", + "hyper 0.14.2", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", + "pin-project-lite 0.2.4", "ref-cast", "smallvec", "state", "time", - "tokio", + "tokio 1.0.2", "tokio-rustls", "uncased", "unicode-xid", @@ -1624,8 +1709,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.0.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "js_int", @@ -1642,8 +1727,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "http", "percent-encoding", @@ -1657,8 +1742,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1668,8 +1753,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.2.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "ruma-api", "ruma-common", @@ -1682,8 +1767,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.10.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "http", @@ -1701,8 +1786,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "maplit", @@ -1714,8 +1799,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-common", @@ -1728,8 +1813,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1739,8 +1824,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-api", @@ -1754,11 +1839,11 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "paste", - "rand 0.8.1", + "rand 0.8.2", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1768,8 +1853,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro2", "quote", @@ -1779,16 +1864,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" -dependencies = [ - "serde", -] +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" [[package]] name = "ruma-serde" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "form_urlencoded", "itoa", @@ -1800,8 +1882,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1811,10 +1893,10 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.6.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ - "base64 0.13.0", + "base64", "ring", "ruma-identifiers", "ruma-serde", @@ -1828,7 +1910,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1851,11 +1933,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.12.3", + "base64", "log", "ring", "sct", @@ -1934,18 +2016,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" dependencies = [ "proc-macro2", "quote", @@ -2026,9 +2108,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "socket2" @@ -2065,7 +2147,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" +source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" dependencies = [ "itertools", "maplit", @@ -2138,14 +2220,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.2", + "redox_syscall 0.2.4", "remove_dir_all", "winapi 0.3.9", ] @@ -2172,18 +2254,18 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" dependencies = [ "lazy_static", ] [[package]] name = "time" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" dependencies = [ "const_fn", "libc", @@ -2238,28 +2320,41 @@ version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "iovec", "lazy_static", + "memchr", + "mio 0.6.23", + "pin-project-lite 0.1.11", + "slab", +] + +[[package]] +name = "tokio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" +dependencies = [ + "autocfg", + "bytes 1.0.1", "libc", "memchr", - "mio", - "mio-uds", + "mio 0.7.7", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "pin-project-lite 0.2.4", "signal-hook-registry", - "slab", "tokio-macros", "winapi 0.3.9", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" dependencies = [ "proc-macro2", "quote", @@ -2268,14 +2363,24 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio 1.0.2", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" dependencies = [ "futures-core", - "rustls", - "tokio", - "webpki", + "pin-project-lite 0.2.4", + "tokio 1.0.2", ] [[package]] @@ -2285,7 +2390,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio", + "tokio 0.2.24", ] [[package]] @@ -2294,12 +2399,27 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-core", "futures-sink", "log", "pin-project-lite 0.1.11", - "tokio", + "tokio 0.2.24", +] + +[[package]] +name = "tokio-util" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" +dependencies = [ + "bytes 1.0.1", + "futures-core", + "futures-sink", + "log", + "pin-project-lite 0.2.4", + "tokio 1.0.2", + "tokio-stream", ] [[package]] @@ -2325,7 +2445,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "tracing-attributes", "tracing-core", ] @@ -2376,7 +2496,7 @@ dependencies = [ "rand 0.7.3", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "url", ] @@ -2396,7 +2516,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "trust-dns-proto", ] @@ -2511,9 +2631,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "wasm-bindgen" diff --git a/Cargo.toml b/Cargo.toml index 004cbfd..eb594a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,11 +14,11 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f336e5a172361fc1860461bb03667b1ed2", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18f02319be83af4f3c1951dc220b52c5e", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } @@ -29,7 +29,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", featu # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "0.2.23" } +tokio = { version = "1.0.2", features = ["macros", "time"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 3136116..be51aeb 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -674,7 +674,8 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let mut delay = tokio::time::delay_for(duration); + let delay = tokio::time::sleep(duration); + tokio::pin!(delay); tokio::select! { _ = &mut delay => {} _ = watcher => {} diff --git a/src/main.rs b/src/main.rs index 9817c56..7d5bc71 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,6 +28,11 @@ fn setup_rocket() -> rocket::Rocket { // Force log level off, so we can use our own logger std::env::set_var("CONDUIT_LOG_LEVEL", "off"); + std::env::set_var( + "CONDUIT_CONFIG", + "/home/devinr/aprog/rust/__forks__/conduit/conduit.toml", + ); + let config = Figment::from(rocket::Config::release_default()) .merge( From 3a6f2644508db5f2e2e0a471a2b568d7a12b2d81 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:41:38 -0500 Subject: [PATCH 0394/1727] Add ability to update room leaves with multiple eventIds Tokio seems a bit broken with Rocket... --- src/client_server/membership.rs | 2 ++ src/database/rooms.rs | 37 ++++++++++++++++----------------- src/server_server.rs | 7 ++----- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 70bb480..1159185 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -665,6 +665,8 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + // TODO: can we simplify the DAG or should we copy it exactly?? + &pdu.prev_events, &db, )?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 665e328..a3f3aab 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,8 +397,11 @@ impl Rooms { Ok(events) } - /// Force an update to the leaves of a room. - pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + /// Replace the leaves of a room. + /// + /// The provided `event_ids` become the new leaves, this enables an event having multiple + /// `prev_events`. + pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -415,21 +418,6 @@ impl Rooms { Ok(()) } - /// Replace the leaves of a room with a new event. - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { - self.roomid_pduleaves.remove(key?)?; - } - - prefix.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&prefix, event_id.as_bytes())?; - - Ok(()) - } - /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -465,6 +453,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, + leaves: &[EventId], db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state @@ -497,7 +486,7 @@ impl Rooms { // We no longer keep this pdu as an outlier self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; - self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; + self.replace_pdu_leaves(&pdu.room_id, leaves)?; // Mark as read first so the sending client doesn't get a notification even if appending // fails @@ -943,7 +932,17 @@ impl Rooms { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; + // remove the + self.append_pdu( + &pdu, + pdu_json, + count, + pdu_id.clone().into(), + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + &[pdu.event_id.clone()], + db, + )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist diff --git a/src/server_server.rs b/src/server_server.rs index 16a1a8e..f782ad5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -24,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -1245,13 +1245,10 @@ fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> R utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + &new_room_leaves, &db, )?; - // If we update the room leaves after calling append_pdu it will stick since append_pdu - // calls replace_pdu_leaves with only the given event. - db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; - // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From a897608025a3a973f69eeeb43c233fc466375b20 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:59:08 -0500 Subject: [PATCH 0395/1727] Roll back tokio and rocket update since ruma's request is at 0.2 tokio --- Cargo.lock | 210 +++++++++----------------------------- Cargo.toml | 4 +- src/client_server/sync.rs | 3 +- src/main.rs | 5 - 4 files changed, 54 insertions(+), 168 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f172072..ce17095 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -105,6 +105,12 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + [[package]] name = "base64" version = "0.13.0" @@ -192,7 +198,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64", + "base64 0.13.0", "directories", "http", "image", @@ -210,7 +216,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio 1.0.2", + "tokio", "trust-dns-resolver", ] @@ -617,28 +623,8 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 0.2.24", - "tokio-util 0.3.1", - "tracing", - "tracing-futures", -] - -[[package]] -name = "h2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" -dependencies = [ - "bytes 1.0.1", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 1.0.2", - "tokio-util 0.6.1", + "tokio", + "tokio-util", "tracing", "tracing-futures", ] @@ -699,16 +685,6 @@ dependencies = [ "http", ] -[[package]] -name = "http-body" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" -dependencies = [ - "bytes 1.0.1", - "http", -] - [[package]] name = "httparse" version = "1.3.4" @@ -731,39 +707,15 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.2.7", + "h2", "http", - "http-body 0.3.1", + "http-body", "httparse", "httpdate", "itoa", "pin-project 1.0.4", "socket2", - "tokio 0.2.24", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" -dependencies = [ - "bytes 1.0.1", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.0", - "http", - "http-body 0.4.0", - "httparse", - "httpdate", - "itoa", - "pin-project 1.0.4", - "socket2", - "tokio 1.0.2", + "tokio", "tower-service", "tracing", "want", @@ -776,9 +728,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ "bytes 0.5.6", - "hyper 0.13.9", + "hyper", "native-tls", - "tokio 0.2.24", + "tokio", "tokio-tls", ] @@ -1040,23 +992,21 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow 0.2.2", + "miow", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio" -version = "0.7.7" +name = "mio-uds" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" +checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" dependencies = [ + "iovec", "libc", - "log", - "miow 0.3.6", - "ntapi", - "winapi 0.3.9", + "mio", ] [[package]] @@ -1071,16 +1021,6 @@ dependencies = [ "ws2_32-sys", ] -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2", - "winapi 0.3.9", -] - [[package]] name = "native-tls" version = "0.2.7" @@ -1110,15 +1050,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "num-integer" version = "0.1.44" @@ -1585,14 +1516,14 @@ version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ - "base64", + "base64 0.13.0", "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", "http", - "http-body 0.3.1", - "hyper 0.13.9", + "http-body", + "hyper", "hyper-tls", "ipnet", "js-sys", @@ -1605,7 +1536,7 @@ dependencies = [ "pin-project-lite 0.2.4", "serde", "serde_urlencoded", - "tokio 0.2.24", + "tokio", "tokio-tls", "url", "wasm-bindgen", @@ -1642,7 +1573,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" dependencies = [ "async-trait", "atomic", @@ -1662,7 +1593,7 @@ dependencies = [ "serde", "state", "time", - "tokio 1.0.2", + "tokio", "ubyte", "version_check", "yansi", @@ -1671,7 +1602,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" dependencies = [ "devise", "glob", @@ -1683,24 +1614,23 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" dependencies = [ "cookie", "either", "http", - "hyper 0.14.2", + "hyper", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", - "pin-project-lite 0.2.4", "ref-cast", "smallvec", "state", "time", - "tokio 1.0.2", + "tokio", "tokio-rustls", "uncased", "unicode-xid", @@ -1896,7 +1826,7 @@ name = "ruma-signatures" version = "0.6.0-alpha.1" source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ - "base64", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -1910,7 +1840,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1933,11 +1863,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.19.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ - "base64", + "base64 0.12.3", "log", "ring", "sct", @@ -2325,36 +2255,23 @@ dependencies = [ "futures-core", "iovec", "lazy_static", - "memchr", - "mio 0.6.23", - "pin-project-lite 0.1.11", - "slab", -] - -[[package]] -name = "tokio" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" -dependencies = [ - "autocfg", - "bytes 1.0.1", "libc", "memchr", - "mio 0.7.7", + "mio", + "mio-uds", "num_cpus", - "once_cell", - "pin-project-lite 0.2.4", + "pin-project-lite 0.1.11", "signal-hook-registry", + "slab", "tokio-macros", "winapi 0.3.9", ] [[package]] name = "tokio-macros" -version = "1.0.0" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" +checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2", "quote", @@ -2363,24 +2280,14 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.22.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls", - "tokio 1.0.2", - "webpki", -] - -[[package]] -name = "tokio-stream" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" +checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" dependencies = [ "futures-core", - "pin-project-lite 0.2.4", - "tokio 1.0.2", + "rustls", + "tokio", + "webpki", ] [[package]] @@ -2390,7 +2297,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio 0.2.24", + "tokio", ] [[package]] @@ -2404,22 +2311,7 @@ dependencies = [ "futures-sink", "log", "pin-project-lite 0.1.11", - "tokio 0.2.24", -] - -[[package]] -name = "tokio-util" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" -dependencies = [ - "bytes 1.0.1", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.4", - "tokio 1.0.2", - "tokio-stream", + "tokio", ] [[package]] @@ -2496,7 +2388,7 @@ dependencies = [ "rand 0.7.3", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "url", ] @@ -2516,7 +2408,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "trust-dns-proto", ] diff --git a/Cargo.toml b/Cargo.toml index eb594a0..405c89f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18f02319be83af4f3c1951dc220b52c5e", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "031948c1daaa146128d8a435be116476f2adde00", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers @@ -29,7 +29,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", featu # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "1.0.2", features = ["macros", "time"] } +tokio = { version = "0.2.24" } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index be51aeb..3136116 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -674,8 +674,7 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let delay = tokio::time::sleep(duration); - tokio::pin!(delay); + let mut delay = tokio::time::delay_for(duration); tokio::select! { _ = &mut delay => {} _ = watcher => {} diff --git a/src/main.rs b/src/main.rs index 7d5bc71..9817c56 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,11 +28,6 @@ fn setup_rocket() -> rocket::Rocket { // Force log level off, so we can use our own logger std::env::set_var("CONDUIT_LOG_LEVEL", "off"); - std::env::set_var( - "CONDUIT_CONFIG", - "/home/devinr/aprog/rust/__forks__/conduit/conduit.toml", - ); - let config = Figment::from(rocket::Config::release_default()) .merge( From a1e296374f2e42e4b22789cc1736f73bf5e85a1f Mon Sep 17 00:00:00 2001 From: Valkum Date: Fri, 22 Jan 2021 20:11:19 +0100 Subject: [PATCH 0396/1727] Allow the complement test image to use build artifacts --- tests/Complement.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 306105a..0ef8f90 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -9,7 +9,7 @@ ARG SCCACHE_ENDPOINT ARG SCCACHE_S3_USE_SSL COPY . . -RUN cargo build +RUN test -e target/release/cond_test || cargo build --release --offline FROM valkum/docker-rust-ci:latest WORKDIR /workdir From a119d858f368d2f714efe8104895727117e02a90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 24 Jan 2021 16:05:52 +0100 Subject: [PATCH 0397/1727] feature: push rule settings --- src/client_server/push.rs | 624 +++++++++++++++++++++++++++++++++++++- src/client_server/room.rs | 8 +- src/main.rs | 5 + 3 files changed, 618 insertions(+), 19 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 05ba8d0..667d667 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -1,16 +1,22 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; -use log::warn; use ruma::{ api::client::{ error::ErrorKind, - r0::push::{get_pushers, get_pushrules_all, set_pushrule, set_pushrule_enabled}, + r0::push::{ + delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, + get_pushrules_all, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleKind, + }, }, events::EventType, + push::{ + ConditionalPushRuleInit, ContentPushRule, OverridePushRule, PatternedPushRuleInit, + RoomPushRule, SenderPushRule, SimplePushRuleInit, UnderridePushRule, + }, }; #[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; +use rocket::{delete, get, post, put}; #[cfg_attr( feature = "conduit_bin", @@ -36,16 +42,201 @@ pub async fn get_pushrules_all_route( .into()) } -#[cfg_attr(feature = "conduit_bin", put( - "/_matrix/client/r0/pushrules/<_>/<_>/<_>", - //data = "" -))] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] +pub async fn get_pushrule_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = event.content.global; + let rule = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::_Custom(_) => None, + }; + + if let Some(rule) = rule { + Ok(get_pushrule::Response { rule }.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + } +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] pub async fn set_pushrule_route( db: State<'_, Database>, - //body: Ruma, + body: Ruma>, ) -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_route"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + } + + global.override_.insert(OverridePushRule( + ConditionalPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + conditions: body.conditions.clone(), + } + .into(), + )); + } + RuleKind::Underride => { + if let Some(rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + } + + global.underride.insert(UnderridePushRule( + ConditionalPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + conditions: body.conditions.clone(), + } + .into(), + )); + } + RuleKind::Sender => { + if let Some(rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + } + + global.sender.insert(SenderPushRule( + SimplePushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + } + .into(), + )); + } + RuleKind::Room => { + if let Some(rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + } + + global.room.insert(RoomPushRule( + SimplePushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + } + .into(), + )); + } + RuleKind::Content => { + if let Some(rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + } + + global.content.insert(ContentPushRule( + PatternedPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + pattern: body.pattern.clone().unwrap_or_default(), + } + .into(), + )); + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; db.flush().await?; @@ -54,19 +245,426 @@ pub async fn set_pushrule_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled") + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") +)] +pub async fn get_pushrule_actions_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + let actions = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::_Custom(_) => None, + }; + + db.flush().await?; + + Ok(get_pushrule_actions::Response { + actions: actions.unwrap_or_default(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") +)] +pub async fn set_pushrule_actions_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + rule.0.actions = body.actions.clone(); + global.override_.insert(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + rule.0.actions = body.actions.clone(); + global.underride.insert(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + rule.0.actions = body.actions.clone(); + global.sender.insert(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + rule.0.actions = body.actions.clone(); + global.room.insert(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + rule.0.actions = body.actions.clone(); + global.content.insert(rule); + } + } + RuleKind::_Custom(_) => {} + }; + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; + + db.flush().await?; + + Ok(set_pushrule_actions::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") +)] +pub async fn get_pushrule_enabled_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + let enabled = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::_Custom(_) => false, + }; + + db.flush().await?; + + Ok(get_pushrule_enabled::Response { enabled }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") )] pub async fn set_pushrule_enabled_route( db: State<'_, Database>, + body: Ruma>, ) -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_enabled_route"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + rule.0.enabled = body.enabled; + global.override_.insert(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + rule.0.enabled = body.enabled; + global.underride.insert(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + rule.0.enabled = body.enabled; + global.sender.insert(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + rule.0.enabled = body.enabled; + global.room.insert(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + rule.0.enabled = body.enabled; + global.content.insert(rule); + } + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; db.flush().await?; Ok(set_pushrule_enabled::Response.into()) } +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] +pub async fn delete_pushrule_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + } + } + RuleKind::Underride => { + if let Some(rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + } + } + RuleKind::Sender => { + if let Some(rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + } + } + RuleKind::Room => { + if let Some(rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + } + } + RuleKind::Content => { + if let Some(rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + } + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; + + db.flush().await?; + + Ok(delete_pushrule::Response.into()) +} + #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] pub async fn get_pushers_route() -> ConduitResult { Ok(get_pushers::Response { diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 631d87b..4adc335 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -220,12 +220,8 @@ pub async fn create_room_route( continue; } - db.rooms.build_and_append_pdu( - pdu_builder, - &sender_user, - &room_id, - &db, - )?; + db.rooms + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db)?; } // 6. Events implied by name and topic diff --git a/src/main.rs b/src/main.rs index 9817c56..054c859 100644 --- a/src/main.rs +++ b/src/main.rs @@ -55,7 +55,12 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_capabilities_route, client_server::get_pushrules_all_route, client_server::set_pushrule_route, + client_server::get_pushrule_route, client_server::set_pushrule_enabled_route, + client_server::get_pushrule_enabled_route, + client_server::get_pushrule_actions_route, + client_server::set_pushrule_actions_route, + client_server::delete_pushrule_route, client_server::get_room_event_route, client_server::get_filter_route, client_server::create_filter_route, From 35c1904b37812b08576e8da84d9e4effd2f71fc8 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 24 Jan 2021 20:18:40 -0500 Subject: [PATCH 0398/1727] Finish forward extremity gathering, use resolved state as new snapshot --- src/server_server.rs | 147 +++++++++++++++++++++++-------------------- 1 file changed, 80 insertions(+), 67 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index f782ad5..e733d24 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,6 +18,7 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -483,34 +484,6 @@ pub async fn get_public_rooms_route( .into()) } -#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] -pub enum PrevEvents { - Sequential(T), - Fork(Vec), -} - -impl IntoIterator for PrevEvents { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - match self { - Self::Sequential(item) => vec![item].into_iter(), - Self::Fork(list) => list.into_iter(), - } - } -} - -impl PrevEvents { - pub fn new(id: &[T]) -> Self { - match id { - [] => panic!("All events must have previous event"), - [single_id] => Self::Sequential(single_id.clone()), - rest => Self::Fork(rest.to_vec()), - } - } -} - #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -605,8 +578,16 @@ pub async fn send_transaction_message_route<'a>( UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); let origin = sender.server_name(); - // TODO: this could fail or the server not respond... - let keys = fetch_signing_keys(&db, origin).await?; + let keys = match fetch_signing_keys(&db, origin).await { + Ok(keys) => keys, + Err(_) => { + resolved_map.insert( + event_id, + Err("Could not find signing keys for this server".to_string()), + ); + continue; + } + }; pub_key_map.insert( origin.to_string(), @@ -769,11 +750,12 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, fork_ids) = match calculate_forward_extremities( + let (mut fork_states, extremities) = match calculate_forward_extremities( &db, &pdu, server_name, &pub_key_map, + current_state, &mut auth_cache, ) .await @@ -791,6 +773,7 @@ pub async fn send_transaction_message_route<'a>( let fork_states = fork_states.into_iter().collect::>(); + let mut update_state = false; // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -800,11 +783,12 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // We do need to force an update to this rooms state + update_state = true; + // TODO: remove this is for current debugging Jan, 15 2021 let mut number_fetches = 0_u32; let mut auth_events = vec![]; - // this keeps track if we error so we can break out of these inner loops - // to continue on with the incoming PDU's for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { @@ -821,14 +805,12 @@ pub async fn send_transaction_message_route<'a>( .await .map(|mut vec| { number_fetches += 1; - vec.remove(0) + vec.pop() }) { - Ok(aev) => aev, - Err(_) => { - resolved_map.insert( - event_id.clone(), - Err("Event has been soft failed".into()), - ); + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); continue 'main_pdu_loop; } }, @@ -839,20 +821,19 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); - let mut event_map = EventMap::new(); // Add everything we will need to event_map - event_map.extend( + auth_cache.extend( auth_events .iter() .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) .flatten(), ); - event_map.extend( + auth_cache.extend( incoming_auth_events .into_iter() .map(|pdu| (pdu.event_id().clone(), pdu)), ); - event_map.extend( + auth_cache.extend( state_at_event .into_iter() .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), @@ -873,7 +854,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) .collect(), - &mut event_map, + &mut auth_cache, ) { Ok(res) => res .into_iter() @@ -905,14 +886,23 @@ pub async fn send_transaction_message_route<'a>( ); } else { // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_state(&db, &pdu, &fork_ids)?; + append_incoming_pdu( + &db, + &pdu, + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } } - Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } /// An async function that can recursively calls itself. @@ -1029,6 +1019,7 @@ async fn fetch_check_auth_events( continue; } + // TODO: Batch these async calls so we can wait on multiple at once let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await .map(|mut vec| { @@ -1119,6 +1110,7 @@ async fn calculate_forward_extremities( pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, + current_state: BTreeMap<(EventType, Option), Arc>, auth_cache: &mut EventMap>, ) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; @@ -1126,17 +1118,13 @@ async fn calculate_forward_extremities( let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - if current_leaves.contains(pdu.event_id()) { - is_incoming_leaf = false; - // Not sure what to do here - } - + // // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if already_referenced(db, pdu)? { + if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { is_incoming_leaf = false; - // This event has been dealt with already?? + // Not sure what to do here } // TODO: @@ -1213,29 +1201,54 @@ async fn calculate_forward_extremities( // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.insert( - db.rooms - .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(), - ); + fork_states.insert(current_state); } Ok((fork_states, dbg!(current_leaves))) } -/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) -fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { - Ok(false) -} - -fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { +/// Update the room state to be the resolved state and add the fully auth'ed event +/// to the DB. +/// +/// TODO: If we force the state we need to validate all events in that state +/// any events we fetched from another server need to be fully verified? +fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: Option>>, +) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let new = state + .into_iter() + .map(|((ev, k), pdu)| { + Ok(( + ( + ev, + k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + db.rooms + .get_pdu_id(pdu.event_id()) + .ok() + .flatten() + .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? + .to_vec(), + )) + }) + .collect::>()?; + + info!("Force update of state for {:?}", pdu); + + db.rooms.force_state(pdu.room_id(), new, &db.globals)?; + } + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 265fab843a42d6eaef7a777104a72d101a2e91f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 24 Jan 2021 16:05:52 +0100 Subject: [PATCH 0399/1727] feature: push rule settings --- src/client_server/push.rs | 624 +++++++++++++++++++++++++++++++++++++- src/main.rs | 5 + 2 files changed, 616 insertions(+), 13 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 05ba8d0..667d667 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -1,16 +1,22 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; -use log::warn; use ruma::{ api::client::{ error::ErrorKind, - r0::push::{get_pushers, get_pushrules_all, set_pushrule, set_pushrule_enabled}, + r0::push::{ + delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, + get_pushrules_all, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleKind, + }, }, events::EventType, + push::{ + ConditionalPushRuleInit, ContentPushRule, OverridePushRule, PatternedPushRuleInit, + RoomPushRule, SenderPushRule, SimplePushRuleInit, UnderridePushRule, + }, }; #[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; +use rocket::{delete, get, post, put}; #[cfg_attr( feature = "conduit_bin", @@ -36,16 +42,201 @@ pub async fn get_pushrules_all_route( .into()) } -#[cfg_attr(feature = "conduit_bin", put( - "/_matrix/client/r0/pushrules/<_>/<_>/<_>", - //data = "" -))] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] +pub async fn get_pushrule_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = event.content.global; + let rule = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::_Custom(_) => None, + }; + + if let Some(rule) = rule { + Ok(get_pushrule::Response { rule }.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + } +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] pub async fn set_pushrule_route( db: State<'_, Database>, - //body: Ruma, + body: Ruma>, ) -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_route"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + } + + global.override_.insert(OverridePushRule( + ConditionalPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + conditions: body.conditions.clone(), + } + .into(), + )); + } + RuleKind::Underride => { + if let Some(rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + } + + global.underride.insert(UnderridePushRule( + ConditionalPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + conditions: body.conditions.clone(), + } + .into(), + )); + } + RuleKind::Sender => { + if let Some(rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + } + + global.sender.insert(SenderPushRule( + SimplePushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + } + .into(), + )); + } + RuleKind::Room => { + if let Some(rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + } + + global.room.insert(RoomPushRule( + SimplePushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + } + .into(), + )); + } + RuleKind::Content => { + if let Some(rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + } + + global.content.insert(ContentPushRule( + PatternedPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + pattern: body.pattern.clone().unwrap_or_default(), + } + .into(), + )); + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; db.flush().await?; @@ -54,19 +245,426 @@ pub async fn set_pushrule_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled") + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") +)] +pub async fn get_pushrule_actions_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + let actions = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::_Custom(_) => None, + }; + + db.flush().await?; + + Ok(get_pushrule_actions::Response { + actions: actions.unwrap_or_default(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") +)] +pub async fn set_pushrule_actions_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + rule.0.actions = body.actions.clone(); + global.override_.insert(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + rule.0.actions = body.actions.clone(); + global.underride.insert(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + rule.0.actions = body.actions.clone(); + global.sender.insert(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + rule.0.actions = body.actions.clone(); + global.room.insert(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + rule.0.actions = body.actions.clone(); + global.content.insert(rule); + } + } + RuleKind::_Custom(_) => {} + }; + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; + + db.flush().await?; + + Ok(set_pushrule_actions::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") +)] +pub async fn get_pushrule_enabled_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + let enabled = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::_Custom(_) => false, + }; + + db.flush().await?; + + Ok(get_pushrule_enabled::Response { enabled }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") )] pub async fn set_pushrule_enabled_route( db: State<'_, Database>, + body: Ruma>, ) -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_enabled_route"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + rule.0.enabled = body.enabled; + global.override_.insert(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + rule.0.enabled = body.enabled; + global.underride.insert(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + rule.0.enabled = body.enabled; + global.sender.insert(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + rule.0.enabled = body.enabled; + global.room.insert(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + rule.0.enabled = body.enabled; + global.content.insert(rule); + } + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; db.flush().await?; Ok(set_pushrule_enabled::Response.into()) } +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] +pub async fn delete_pushrule_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + } + } + RuleKind::Underride => { + if let Some(rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + } + } + RuleKind::Sender => { + if let Some(rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + } + } + RuleKind::Room => { + if let Some(rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + } + } + RuleKind::Content => { + if let Some(rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + } + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; + + db.flush().await?; + + Ok(delete_pushrule::Response.into()) +} + #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] pub async fn get_pushers_route() -> ConduitResult { Ok(get_pushers::Response { diff --git a/src/main.rs b/src/main.rs index 9c0eab6..93ab560 100644 --- a/src/main.rs +++ b/src/main.rs @@ -55,7 +55,12 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_capabilities_route, client_server::get_pushrules_all_route, client_server::set_pushrule_route, + client_server::get_pushrule_route, client_server::set_pushrule_enabled_route, + client_server::get_pushrule_enabled_route, + client_server::get_pushrule_actions_route, + client_server::set_pushrule_actions_route, + client_server::delete_pushrule_route, client_server::get_room_event_route, client_server::get_filter_route, client_server::create_filter_route, From 894b6ef0379946d26fa1f50f45daea50d739014f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:33:41 -0500 Subject: [PATCH 0400/1727] Resolved state is set as the current room state on incoming events --- src/server_server.rs | 43 +++++++++++++------------------------------ 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index e733d24..14a1d0c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -870,36 +870,20 @@ pub async fn send_transaction_message_route<'a>( } }; - if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_incoming_pdu( + &db, &pdu, - single_prev, - &state_at_forks, - None, - ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { - // Soft fail, we add the event as an outlier. - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has been soft failed".into()), - ); - } else { - // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( - &db, - &pdu, - &extremities, - if update_state { - Some(state_at_forks) - } else { - None - }, - )?; + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; - // Event has passed all auth/stateres checks - resolved_map.insert(pdu.event_id().clone(), Ok(())); - } + // Event has passed all auth/stateres checks + resolved_map.insert(pdu.event_id().clone(), Ok(())); } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) @@ -1210,8 +1194,7 @@ async fn calculate_forward_extremities( /// Update the room state to be the resolved state and add the fully auth'ed event /// to the DB. /// -/// TODO: If we force the state we need to validate all events in that state -/// any events we fetched from another server need to be fully verified? +/// TODO: Since all these events passed state resolution can we trust them to add fn append_incoming_pdu( db: &Database, pdu: &PduEvent, From b8b40ce38b2b6ac14294293bbf7b50330f3b667d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:50:45 -0500 Subject: [PATCH 0401/1727] Cleanup dbg prints and error messages --- src/server_server.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 14a1d0c..20d76f1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -496,7 +496,7 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - dbg!(&*body); + // dbg!(&*body); for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { @@ -1148,8 +1148,6 @@ async fn calculate_forward_extremities( fork_states.insert(state); } else { - error!("Forward extremity not found... {}", id); - let res = db .sending .send_federation_request( @@ -1188,7 +1186,7 @@ async fn calculate_forward_extremities( fork_states.insert(current_state); } - Ok((fork_states, dbg!(current_leaves))) + Ok((fork_states, current_leaves)) } /// Update the room state to be the resolved state and add the fully auth'ed event From cd0c5c0566251f882b05f97f53266e251f11c4af Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 11:20:33 -0500 Subject: [PATCH 0402/1727] Append state event that pass resolution to DB, update to tokio 1.1 --- Cargo.lock | 607 ++++++++++++++------------------------ Cargo.toml | 14 +- src/client_server/sync.rs | 5 +- src/database.rs | 3 +- src/database/globals.rs | 14 +- src/server_server.rs | 113 +++++-- 6 files changed, 322 insertions(+), 434 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce17095..66f624c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,20 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" - [[package]] name = "adler32" version = "1.2.0" @@ -48,6 +33,27 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" +[[package]] +name = "async-stream" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3670df70cbc01729f901f94c887814b3c68db038aad1329a418bae178bc5295c" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.42" @@ -76,7 +82,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -85,32 +91,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" -[[package]] -name = "backtrace" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide 0.4.3", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -142,9 +128,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "f07aa6688c702439a1be0307b6a94dffe1168569e45b9500c1372bc580740d59" [[package]] name = "bytemuck" @@ -158,12 +144,6 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.0.1" @@ -176,12 +156,6 @@ version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -198,7 +172,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.13.0", + "base64", "directories", "http", "image", @@ -264,7 +238,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -273,7 +247,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "const_fn", "crossbeam-utils", "lazy_static", @@ -288,10 +262,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "lazy_static", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "deflate" version = "0.8.6" @@ -349,7 +329,7 @@ checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -376,7 +356,7 @@ version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -442,25 +422,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures" version = "0.3.12" @@ -549,7 +513,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.4", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -571,20 +535,20 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "wasi 0.10.1+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -597,12 +561,6 @@ dependencies = [ "weezl", ] -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - [[package]] name = "glob" version = "0.3.0" @@ -611,11 +569,11 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" dependencies = [ - "bytes 0.5.6", + "bytes", "fnv", "futures-core", "futures-sink", @@ -661,7 +619,7 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -670,18 +628,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" dependencies = [ - "bytes 0.5.6", + "bytes", "http", ] @@ -699,11 +657,11 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.9" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" dependencies = [ - "bytes 0.5.6", + "bytes", "futures-channel", "futures-core", "futures-util", @@ -723,15 +681,15 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 0.5.6", + "bytes", "hyper", "native-tls", "tokio", - "tokio-tls", + "tokio-native-tls", ] [[package]] @@ -784,16 +742,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", + "cfg-if", ] [[package]] @@ -804,7 +753,7 @@ checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" dependencies = [ "socket2", "widestring", - "winapi 0.3.9", + "winapi", "winreg 0.6.2", ] @@ -816,9 +765,9 @@ checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" [[package]] name = "itertools" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" dependencies = [ "either", ] @@ -831,18 +780,15 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" -version = "0.1.20" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc797adac5f083b8ff0ca6f6294a999393d76e197c36488e2ef732c4715f6fa3" -dependencies = [ - "byteorder", -] +checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" dependencies = [ "wasm-bindgen", ] @@ -856,16 +802,6 @@ dependencies = [ "serde", ] -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -874,9 +810,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.82" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" +checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" [[package]] name = "linked-hash-map" @@ -895,11 +831,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -950,16 +886,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.3.7" @@ -969,56 +895,27 @@ dependencies = [ "adler32", ] -[[package]] -name = "miniz_oxide" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" -dependencies = [ - "adler", - "autocfg", -] - [[package]] name = "mio" -version = "0.6.23" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", "libc", "log", "miow", - "net2", - "slab", - "winapi 0.2.8", -] - -[[package]] -name = "mio-uds" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = [ - "iovec", - "libc", - "mio", + "ntapi", + "winapi", ] [[package]] name = "miow" -version = "0.2.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "socket2", + "winapi", ] [[package]] @@ -1040,14 +937,12 @@ dependencies = [ ] [[package]] -name = "net2" -version = "0.2.37" +name = "ntapi" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1101,12 +996,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" - [[package]] name = "once_cell" version = "1.5.2" @@ -1120,7 +1009,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "lazy_static", "libc", @@ -1173,12 +1062,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.1.57", "smallvec", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1256,12 +1145,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" - [[package]] name = "pin-project-lite" version = "0.2.4" @@ -1289,7 +1172,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide 0.3.7", + "miniz_oxide", ] [[package]] @@ -1371,9 +1254,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1416,7 +1299,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.1", + "getrandom 0.2.2", ] [[package]] @@ -1507,17 +1390,17 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "reqwest" -version = "0.10.10" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" +checksum = "fd281b1030aa675fb90aa994d07187645bb3c8fc756ca766e7c3070b439de9de" dependencies = [ - "base64 0.13.0", - "bytes 0.5.6", + "base64", + "bytes", "encoding_rs", "futures-core", "futures-util", @@ -1530,14 +1413,13 @@ dependencies = [ "lazy_static", "log", "mime", - "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.4", + "pin-project-lite", "serde", "serde_urlencoded", "tokio", - "tokio-tls", + "tokio-native-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1567,13 +1449,13 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.9", + "winapi", ] [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "async-trait", "atomic", @@ -1602,7 +1484,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "devise", "glob", @@ -1614,7 +1496,7 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "cookie", "either", @@ -1626,6 +1508,7 @@ dependencies = [ "parking_lot", "pear", "percent-encoding", + "pin-project-lite", "ref-cast", "smallvec", "state", @@ -1640,7 +1523,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "js_int", @@ -1658,7 +1541,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "http", "percent-encoding", @@ -1673,7 +1556,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1684,7 +1567,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "ruma-api", "ruma-common", @@ -1698,7 +1581,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "http", @@ -1717,7 +1600,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "maplit", @@ -1730,7 +1613,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-common", @@ -1744,7 +1627,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1755,7 +1638,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-api", @@ -1770,10 +1653,10 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "paste", - "rand 0.8.2", + "rand 0.8.3", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1784,7 +1667,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro2", "quote", @@ -1795,12 +1678,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "form_urlencoded", "itoa", @@ -1813,7 +1696,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1824,9 +1707,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ - "base64 0.13.0", + "base64", "ring", "ruma-identifiers", "ruma-serde", @@ -1840,18 +1723,12 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", ] -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - [[package]] name = "rustc_version" version = "0.2.3" @@ -1863,11 +1740,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.12.3", + "base64", "log", "ring", "sct", @@ -1887,7 +1764,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1946,18 +1823,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2", "quote", @@ -2048,9 +1925,9 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2077,15 +1954,15 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" +source = "git+https://github.com/ruma/state-res?rev=791c66d73cf064d09db0cdf767d5fef43a343425#791c66d73cf064d09db0cdf767d5fef43a343425" dependencies = [ "itertools", + "log", "maplit", "ruma", "serde", "serde_json", "thiserror", - "tracing", ] [[package]] @@ -2139,9 +2016,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.58" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", @@ -2154,12 +2031,12 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "rand 0.8.2", + "rand 0.8.3", "redox_syscall 0.2.4", "remove_dir_all", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2184,18 +2061,18 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" +checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "time" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" dependencies = [ "const_fn", "libc", @@ -2203,7 +2080,7 @@ dependencies = [ "stdweb", "time-macros", "version_check", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2231,9 +2108,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -2246,32 +2123,28 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.24" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" +checksum = "8efab2086f17abcddb8f756117665c958feee6b2e39974c2f1600592ab3a4195" dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", + "autocfg", + "bytes", "libc", "memchr", "mio", - "mio-uds", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "pin-project-lite", "signal-hook-registry", - "slab", "tokio-macros", - "winapi 0.3.9", + "winapi", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" dependencies = [ "proc-macro2", "quote", @@ -2279,39 +2152,51 @@ dependencies = [ ] [[package]] -name = "tokio-rustls" -version = "0.14.1" +name = "tokio-native-tls" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" -dependencies = [ - "futures-core", - "rustls", - "tokio", - "webpki", -] - -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" dependencies = [ "native-tls", "tokio", ] [[package]] -name = "tokio-util" -version = "0.3.1" +name = "tokio-rustls" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "bytes 0.5.6", + "rustls", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feb971a26599ffd28066d387f109746df178eff14d5ea1e235015c5601967a4b" +dependencies = [ + "async-stream", + "bytes", "futures-core", "futures-sink", "log", - "pin-project-lite 0.1.11", + "pin-project-lite", "tokio", + "tokio-stream", ] [[package]] @@ -2325,9 +2210,9 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" @@ -2335,24 +2220,11 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.4", - "tracing-attributes", + "cfg-if", + "pin-project-lite", "tracing-core", ] -[[package]] -name = "tracing-attributes" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tracing-core" version = "0.1.17" @@ -2374,18 +2246,22 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53861fcb288a166aae4c508ae558ed18b53838db728d4d310aad08270a7d4c2b" +checksum = "98a0381b2864c2978db7f8e17c7b23cca5a3a5f99241076e13002261a8ecbabd" dependencies = [ "async-trait", - "backtrace", + "cfg-if", + "data-encoding", "enum-as-inner", - "futures", + "futures-channel", + "futures-io", + "futures-util", "idna", + "ipnet", "lazy_static", "log", - "rand 0.7.3", + "rand 0.8.3", "smallvec", "thiserror", "tokio", @@ -2394,17 +2270,17 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6759e8efc40465547b0dfce9500d733c65f969a4cbbfbe3ccf68daaa46ef179e" +checksum = "3072d18c10bd621cb00507d59cfab5517862285c353160366e37fbf4c74856e4" dependencies = [ - "backtrace", - "cfg-if 0.1.10", - "futures", + "cfg-if", + "futures-util", "ipconfig", "lazy_static", "log", "lru-cache", + "parking_lot", "resolv-conf", "smallvec", "thiserror", @@ -2436,15 +2312,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -2523,17 +2390,17 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.1+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -2541,9 +2408,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" dependencies = [ "bumpalo", "lazy_static", @@ -2556,11 +2423,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -2568,9 +2435,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2578,9 +2445,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" dependencies = [ "proc-macro2", "quote", @@ -2591,15 +2458,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" +checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" dependencies = [ "js-sys", "wasm-bindgen", @@ -2617,9 +2484,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2bb9fc8309084dd7cd651336673844c1d47f8ef6d2091ec160b27f5c4aa277" +checksum = "4a32b378380f4e9869b22f0b5177c68a5519f03b3454fde0b291455ddbae266c" [[package]] name = "widestring" @@ -2627,12 +2494,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -2643,12 +2504,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -2667,7 +2522,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2676,17 +2531,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", + "winapi", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 405c89f..2c6c741 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,28 +14,28 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "031948c1daaa146128d8a435be116476f2adde00", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18f02319be83af4f3c1951dc220b52c5e", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "0.2.24" } +tokio = { version = "1.1.0", features = ["macros", "time", "sync"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries log = "0.4.11" # Used for rocket<->ruma conversions -http = "0.2.1" +http = "0.2.3" # Used to find data directory for default db path directories = "3.0.1" @@ -50,7 +50,7 @@ rand = "0.7.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.10.9" +reqwest = "0.11.0" # Used for conduit::Error type thiserror = "1.0.22" # Used to generate thumbnails for images @@ -60,7 +60,7 @@ base64 = "0.13.0" # Used when hashing the state ring = "0.16.19" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.19.6" +trust-dns-resolver = "0.20.0" # Used to find matching events for appservices regex = "1.4.2" diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 3136116..3bfff45 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -674,9 +674,10 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let mut delay = tokio::time::delay_for(duration); + let delay = tokio::time::sleep(duration); + tokio::pin!(delay); tokio::select! { - _ = &mut delay => {} + _ = &mut delay, if delay.is_elapsed() => {} _ = watcher => {} } } diff --git a/src/database.rs b/src/database.rs index e94a517..190f8be 100644 --- a/src/database.rs +++ b/src/database.rs @@ -106,8 +106,7 @@ impl Database { db.open_tree("global")?, db.open_tree("servertimeout_signingkey")?, config, - ) - .await?, + )?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 4670068..7156a75 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -26,11 +26,7 @@ pub struct Globals { } impl Globals { - pub async fn load( - globals: sled::Tree, - server_keys: sled::Tree, - config: Config, - ) -> Result { + pub fn load(globals: sled::Tree, server_keys: sled::Tree, config: Config) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -77,11 +73,9 @@ impl Globals { config, keypair: Arc::new(keypair), reqwest_client, - dns_resolver: TokioAsyncResolver::tokio_from_system_conf() - .await - .map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), servertimeout_signingkey: server_keys, }) diff --git a/src/server_server.rs b/src/server_server.rs index 20d76f1..adf3c58 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -25,7 +25,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashMap}, convert::TryFrom, fmt::Debug, future::Future, @@ -839,7 +839,7 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); - match state_res::StateResolution::resolve( + let res = match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, &fork_states @@ -856,10 +856,7 @@ pub async fn send_transaction_message_route<'a>( .collect(), &mut auth_cache, ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), + Ok(res) => res, Err(_) => { resolved_map.insert( pdu.event_id().clone(), @@ -867,7 +864,29 @@ pub async fn send_transaction_message_route<'a>( ); continue 'main_pdu_loop; } + }; + let mut resolved = BTreeMap::new(); + for (k, id) in res { + // We should know of the event but just incase + let pdu = match auth_cache.get(&id) { + Some(pdu) => pdu.clone(), + None => { + match fetch_events(&db, server_name, &pub_key_map, &[id], &mut auth_cache) + .await + .map(|mut vec| vec.pop()) + { + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); + continue 'main_pdu_loop; + } + } + } + }; + resolved.insert(k, pdu); } + resolved }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). @@ -1199,37 +1218,67 @@ fn append_incoming_pdu( new_room_leaves: &[EventId], state: Option>>, ) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pduid.to_vec(), + ); + } + None => { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + // TODO: can we use are current state if we just add this event to the end of our + // pduid_pdu tree?? + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + + db.rooms.append_pdu( + &*pdu, + utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &new_room_leaves, + &db, + )?; + // TODO: is this ok... + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pdu_id.to_vec(), + ); + } + } + } + + info!("Force update of state for {:?}", pdu); + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + } + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - // Update the state of the room if needed - // We can tell if we need to do this based on wether state resolution took place or not - if let Some(state) = state { - let new = state - .into_iter() - .map(|((ev, k), pdu)| { - Ok(( - ( - ev, - k.ok_or_else(|| Error::Conflict("State contained non state event"))?, - ), - db.rooms - .get_pdu_id(pdu.event_id()) - .ok() - .flatten() - .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? - .to_vec(), - )) - }) - .collect::>()?; - - info!("Force update of state for {:?}", pdu); - - db.rooms.force_state(pdu.room_id(), new, &db.globals)?; - } - // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 56b816a2be7a8286f8ec4e60e198e64e4a12227d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 21:45:33 -0500 Subject: [PATCH 0403/1727] Fix and integrate outlier tree, build forks after adding event to DB --- src/database.rs | 2 +- src/database/rooms.rs | 95 +++++++----- src/server_server.rs | 334 +++++++++++++++++++++++++----------------- 3 files changed, 263 insertions(+), 168 deletions(-) diff --git a/src/database.rs b/src/database.rs index 190f8be..7ad18cb 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, + pduid_outlierpdu: db.open_tree("pduid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a3f3aab..d459aee 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,9 +27,10 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, + time::Duration, }; -use super::admin::AdminCommand; +use super::{admin::AdminCommand, sending::Sending}; /// The unique identifier of each state group. /// @@ -67,7 +68,7 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) /// Any pdu that has passed the steps up to auth with auth_events. - pub(super) eventid_outlierpdu: sled::Tree, + pub(super) pduid_outlierpdu: sled::Tree, } impl Rooms { @@ -85,13 +86,20 @@ impl Rooms { let mut pduid = room_id.as_bytes().to_vec(); pduid.push(0xff); pduid.extend_from_slice(&pduid_short?); - self.pduid_pdu.get(&pduid)?.map_or_else( - || Err(Error::bad_database("Failed to find PDU in state snapshot.")), - |b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }, - ) + match self.pduid_pdu.get(&pduid)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")), + None => self + .pduid_outlierpdu + .get(pduid)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + } }) .filter_map(|r| r.ok()) .map(|pdu| { @@ -137,12 +145,20 @@ impl Rooms { Ok::<_, Error>(Some(( pdu_id.clone().into(), - serde_json::from_slice::( - &self.pduid_pdu.get(&pdu_id)?.ok_or_else(|| { - Error::bad_database("PDU in state not found in database.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, + match self.pduid_pdu.get(&pdu_id)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + None => self + .pduid_outlierpdu + .get(pdu_id)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })??, + }, ))) }) } else { @@ -307,9 +323,12 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) @@ -328,13 +347,17 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) } + /// Returns the pdu. pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { @@ -420,23 +443,27 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + if let Some(id) = self.eventid_pduid.get(event_id.as_bytes())? { + self.pduid_outlierpdu.get(id)?.map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) + } else { + Ok(None) + } } /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + pub fn append_pdu_outlier(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.pduid_outlierpdu.len()); + + // we need to be able to find it by event_id + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + let res = self - .eventid_outlierpdu + .pduid_outlierpdu .insert( - event_id.as_bytes(), + pdu_id, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), ) .map(|op| op.is_some())?; @@ -484,7 +511,9 @@ impl Rooms { } // We no longer keep this pdu as an outlier - self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + if let Some(id) = self.eventid_pduid.remove(pdu.event_id().as_bytes())? { + self.pduid_outlierpdu.remove(id)?; + } self.replace_pdu_leaves(&pdu.room_id, leaves)?; diff --git a/src/server_server.rs b/src/server_server.rs index adf3c58..ad0a1a4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -614,7 +614,7 @@ pub async fn send_transaction_message_route<'a>( // 7. if not timeline event: stop // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous): (_, Vec>) = match validate_event( + let (pdu, previous): (Arc, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -638,69 +638,75 @@ pub async fn send_transaction_message_route<'a>( None }; + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. // // Step 10. check the auth of the event passes based on the calculated state of the event - let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = - match db - .sending - .send_federation_request( - &db.globals, + let (mut state_at_event, incoming_auth_events): ( + StateMap>, + Vec>, + ) = match db + .sending + .send_federation_request( + &db.globals, + server_name, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events( + &db, server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, ) - .await - { - Ok(res) => { - let state = fetch_events( + .await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect(); + + ( + state, + fetch_events( &db, server_name, &pub_key_map, - &res.pdu_ids, + &res.auth_chain_ids, &mut auth_cache, ) - .await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - ( - state, - fetch_events( - &db, - server_name, - &pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await?, - ) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await?, + ) + } + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( @@ -750,12 +756,25 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, extremities) = match calculate_forward_extremities( + let extremities = match calculate_forward_extremities(&db, &pdu).await { + Ok(fork_ids) => fork_ids, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; + } + }; + + // Now that the event has passed all auth it is added into the timeline, we do have to + // find the leaves otherwise we would do this sooner + append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + + let mut fork_states = match build_forward_extremity_snapshots( &db, - &pdu, + pdu.room_id(), server_name, - &pub_key_map, current_state, + &extremities, + &pub_key_map, &mut auth_cache, ) .await @@ -767,6 +786,9 @@ pub async fn send_transaction_message_route<'a>( } }; + // Make this the state after (since we appended_incoming_pdu this should agree with our servers + // current state). + state_at_event.insert((pdu.kind(), pdu.state_key()), pdu.clone()); // add the incoming events to the mix of state snapshots // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets fork_states.insert(state_at_event.clone()); @@ -840,7 +862,7 @@ pub async fn send_transaction_message_route<'a>( ); let res = match state_res::StateResolution::resolve( - &pdu.room_id, + pdu.room_id(), &RoomVersionId::Version6, &fork_states .into_iter() @@ -865,6 +887,7 @@ pub async fn send_transaction_message_route<'a>( continue 'main_pdu_loop; } }; + let mut resolved = BTreeMap::new(); for (k, id) in res { // We should know of the event but just incase @@ -890,10 +913,9 @@ pub async fn send_transaction_message_route<'a>( }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( + update_resolved_state( &db, - &pdu, - &extremities, + pdu.room_id(), if update_state { Some(state_at_forks) } else { @@ -905,7 +927,10 @@ pub async fn send_transaction_message_route<'a>( resolved_map.insert(pdu.event_id().clone(), Ok(())); } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(send_transaction_message::v1::Response { + pdus: dbg!(resolved_map), + } + .into()) } /// An async function that can recursively calls itself. @@ -1036,13 +1061,14 @@ async fn fetch_check_auth_events( Ok(()) } -/// Find the event and auth it. +/// Find the event and auth it. Once the event is validated (steps 1 - 8) +/// it is appended to the outliers Tree. /// /// 1. Look in the main timeline (pduid_pdu tree) /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation /// 4. TODO: Ask other servers over federation? -async fn fetch_events( +pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, key_map: &PublicKeyMap, @@ -1071,6 +1097,13 @@ async fn fetch_events( .await .map_err(|_| Error::Conflict("Authentication of event failed"))?; + // create the pduid for this event but stick it in the outliers DB + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), @@ -1084,7 +1117,7 @@ async fn fetch_events( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. -async fn fetch_signing_keys( +pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, ) -> Result> { @@ -1108,26 +1141,28 @@ async fn fetch_signing_keys( /// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). /// /// The state snapshot of the incoming event __needs__ to be added to the resulting list. -async fn calculate_forward_extremities( +pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, - origin: &ServerName, - pub_key_map: &PublicKeyMap, - current_state: BTreeMap<(EventType, Option), Arc>, - auth_cache: &mut EventMap>, -) -> Result<(BTreeSet>>, Vec)> { +) -> Result> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - // + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - is_incoming_leaf = false; - // Not sure what to do here + // + // We first check if know of the event and then don't include it as a forward + // extremity if it is a timeline event + if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { + is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); } // TODO: @@ -1144,11 +1179,34 @@ async fn calculate_forward_extremities( } } - let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + + Ok(current_leaves) +} + +/// This should always be called after the incoming event has been appended to the DB. +/// +/// This guarentees that the incoming event will be in the state sets (at least our servers +/// and the sending server). +pub(crate) async fn build_forward_extremity_snapshots( + db: &Database, + room_id: &RoomId, + origin: &ServerName, + current_state: StateMap>, + current_leaves: &[EventId], + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let current_hash = db.rooms.current_state_hash(room_id)?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); - for id in ¤t_leaves { + for id in current_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1158,14 +1216,21 @@ async fn calculate_forward_extremities( if current_hash.as_ref() == Some(&state_hash) { includes_current_state = true; } - let state = db + + let mut state_before = db .rooms - .state_full(&pdu.room_id, &state_hash)? + .state_full(room_id, &state_hash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); + .collect::>(); - fork_states.insert(state); + // Now it's the state after + if let Some(pdu) = db.rooms.get_pdu_from_id(&id)? { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, Arc::new(pdu)); + } + + fork_states.insert(state_before); } else { let res = db .sending @@ -1173,7 +1238,7 @@ async fn calculate_forward_extremities( &db.globals, origin, get_room_state_ids::v1::Request { - room_id: pdu.room_id(), + room_id, event_id: id, }, ) @@ -1181,41 +1246,38 @@ async fn calculate_forward_extremities( // TODO: This only adds events to the auth_cache, there is for sure a better way to // do this... - fetch_events(&db, origin, &pub_key_map, &res.auth_chain_ids, auth_cache).await?; + fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - let state = fetch_events(&db, origin, &pub_key_map, &res.pdu_ids, auth_cache) + let mut state_before = fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) .await? .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); + .collect::>(); - fork_states.insert(state); + if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) + .await? + .pop() + { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, pdu); + } + + // Now it's the state after + fork_states.insert(state_before); } } - // Add the incoming event only if it is a leaf, we do this after fetching all the - // state since we know we have already fetched the state of the incoming event so lets - // not do it again! - if is_incoming_leaf { - current_leaves.push(pdu.event_id().clone()); - } - // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { fork_states.insert(current_state); } - Ok((fork_states, current_leaves)) + Ok(fork_states) } -/// Update the room state to be the resolved state and add the fully auth'ed event -/// to the DB. -/// -/// TODO: Since all these events passed state resolution can we trust them to add -fn append_incoming_pdu( +pub(crate) fn update_resolved_state( db: &Database, - pdu: &PduEvent, - new_room_leaves: &[EventId], + room_id: &RoomId, state: Option>>, ) -> Result<()> { // Update the state of the room if needed @@ -1236,44 +1298,50 @@ fn append_incoming_pdu( ); } None => { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // TODO: can we use are current state if we just add this event to the end of our - // pduid_pdu tree?? - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &*pdu, - utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &new_room_leaves, - &db, - )?; - // TODO: is this ok... - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pdu_id.to_vec(), - ); + error!("We didn't append an event as an outlier\n{:?}", pdu); } } } - info!("Force update of state for {:?}", pdu); - - db.rooms - .force_state(pdu.room_id(), new_state, &db.globals)?; + db.rooms.force_state(room_id, new_state, &db.globals)?; } + Ok(()) +} + +/// Append the incoming event setting the state snapshot to the state from the +/// server that sent the event. +pub(crate) fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: &StateMap>, +) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type.clone(), + state_k + .clone() + .ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + pduid.to_vec(), + ); + } + None => { + error!("We didn't append an event as an outlier\n{:?}", pdu); + } + } + } + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1281,7 +1349,7 @@ fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + let state_hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( pdu, @@ -1292,9 +1360,7 @@ fn append_incoming_pdu( &db, )?; - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + db.rooms.set_room_state(pdu.room_id(), &state_hash)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From a0ecd76e210cc924884a5b1c1d2d81d3b608827a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 26 Jan 2021 21:53:03 -0500 Subject: [PATCH 0404/1727] Run cargo fmt using nightly --- src/client_server/push.rs | 21 +++++++++++---------- src/client_server/read_marker.rs | 3 ++- src/database/appservice.rs | 6 ++++-- src/database/globals.rs | 9 +++++---- src/error.rs | 6 +++++- src/main.rs | 16 ++++++++++------ 6 files changed, 37 insertions(+), 24 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 667d667..fd938c1 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -5,10 +5,11 @@ use ruma::{ error::ErrorKind, r0::push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, - get_pushrules_all, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleKind, + get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions, + set_pushrule_enabled, RuleKind, }, }, - events::EventType, + events::{push_rules, EventType}, push::{ ConditionalPushRuleInit, ContentPushRule, OverridePushRule, PatternedPushRuleInit, RoomPushRule, SenderPushRule, SimplePushRuleInit, UnderridePushRule, @@ -30,7 +31,7 @@ pub async fn get_pushrules_all_route( let event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -54,7 +55,7 @@ pub async fn get_pushrule_route( let event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -116,7 +117,7 @@ pub async fn set_pushrule_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -262,7 +263,7 @@ pub async fn get_pushrule_actions_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -325,7 +326,7 @@ pub async fn set_pushrule_actions_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -428,7 +429,7 @@ pub async fn get_pushrule_enabled_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -488,7 +489,7 @@ pub async fn set_pushrule_enabled_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -591,7 +592,7 @@ pub async fn delete_pushrule_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 0c4ec1a..bb76a44 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -2,7 +2,8 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ - error::ErrorKind, r0::capabilities::get_capabilities, r0::read_marker::set_read_marker, + error::ErrorKind, + r0::{capabilities::get_capabilities, read_marker::set_read_marker}, }, events::{AnyEphemeralRoomEvent, AnyEvent, EventType}, }; diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 26ea5b9..764291d 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -1,6 +1,8 @@ use crate::{utils, Error, Result}; -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; #[derive(Clone)] pub struct Appservice { diff --git a/src/database/globals.rs b/src/database/globals.rs index 7156a75..fc4adc3 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,10 +4,11 @@ use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerName, ServerSigningKeyId, }; -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; -use std::sync::RwLock; -use std::time::Duration; +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, RwLock}, + time::Duration, +}; use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; diff --git a/src/error.rs b/src/error.rs index 13efce6..d8f10f4 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,8 @@ -use std::{collections::HashMap, sync::RwLock, time::Duration, time::Instant}; +use std::{ + collections::HashMap, + sync::RwLock, + time::{Duration, Instant}, +}; use log::error; use ruma::{ diff --git a/src/main.rs b/src/main.rs index 054c859..5aa0a19 100644 --- a/src/main.rs +++ b/src/main.rs @@ -18,11 +18,15 @@ pub use rocket::State; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; use log::LevelFilter; -use rocket::figment::{ - providers::{Env, Format, Toml}, - Figment, +use rocket::{ + catch, catchers, + fairing::AdHoc, + figment::{ + providers::{Env, Format, Toml}, + Figment, + }, + routes, Request, }; -use rocket::{catch, catchers, fairing::AdHoc, routes, Request}; fn setup_rocket() -> rocket::Rocket { // Force log level off, so we can use our own logger @@ -143,6 +147,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_key_changes_route, client_server::get_pushers_route, client_server::set_pushers_route, + // client_server::third_party_route, client_server::upgrade_room_route, server_server::get_server_version_route, server_server::get_server_keys_route, @@ -165,8 +170,7 @@ fn setup_rocket() -> rocket::Rocket { .await .expect("config is valid"); - data.sending - .start_handler(&data.globals, &data.rooms, &data.appservice); + data.sending.start_handler(&data); log::set_boxed_logger(Box::new(ConduitLogger { db: data.clone(), last_logs: Default::default(), From 2d69e816997d9bf4f51b6e35c6a9c408fb1c144a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 26 Jan 2021 21:54:35 -0500 Subject: [PATCH 0405/1727] WIP: send out push notification, impl pusher routes It seems in order to test this I may also have to impl the email 3pid route? I need to call the set_pusher route somehow. --- src/client_server/account.rs | 14 ++ src/client_server/push.rs | 34 ++- src/database.rs | 11 +- src/database/pusher.rs | 148 +++++++++++ src/database/rooms.rs | 3 + src/database/sending.rs | 470 +++++++++++++++++++++++------------ 6 files changed, 514 insertions(+), 166 deletions(-) create mode 100644 src/database/pusher.rs diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 76354b6..9f6c576 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -659,3 +659,17 @@ pub async fn deactivate_route( } .into()) } + +/*/ +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/account/3pid", data = "") +)] +pub async fn third_party_route( + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + Ok(account::add_3pid::Response::default().into()) +} +*/ diff --git a/src/client_server/push.rs b/src/client_server/push.rs index fd938c1..3a81679 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -666,20 +666,36 @@ pub async fn delete_pushrule_route( Ok(delete_pushrule::Response.into()) } -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] -pub async fn get_pushers_route() -> ConduitResult { +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushers", data = "") +)] +pub async fn get_pushers_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender = body.sender_user.as_ref().expect("authenticated endpoint"); + Ok(get_pushers::Response { - pushers: Vec::new(), + pushers: db.pusher.get_pusher(sender)?, } .into()) } -#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/pushers/set"))] -pub async fn set_pushers_route(db: State<'_, Database>) -> ConduitResult { +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/pushers/set", data = "") +)] +pub async fn set_pushers_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender = body.sender_user.as_ref().expect("authenticated endpoint"); + let pusher = body.pusher.clone(); + + db.pusher.set_pusher(sender, pusher)?; + db.flush().await?; - Ok(get_pushers::Response { - pushers: Vec::new(), - } - .into()) + Ok(set_pusher::Response::default().into()) } diff --git a/src/database.rs b/src/database.rs index 7ad18cb..b8dc524 100644 --- a/src/database.rs +++ b/src/database.rs @@ -4,6 +4,7 @@ pub mod appservice; pub mod globals; pub mod key_backups; pub mod media; +pub mod pusher; pub mod rooms; pub mod sending; pub mod transaction_ids; @@ -17,9 +18,11 @@ use log::info; use rocket::futures::{self, channel::mpsc}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; -use std::collections::HashMap; -use std::fs::remove_dir_all; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + fs::remove_dir_all, + sync::{Arc, RwLock}, +}; use tokio::sync::Semaphore; #[derive(Clone, Debug, Deserialize)] @@ -73,6 +76,7 @@ pub struct Database { pub sending: sending::Sending, pub admin: admin::Admin, pub appservice: appservice::Appservice, + pub pusher: pusher::PushData, pub _db: sled::Db, } @@ -187,6 +191,7 @@ impl Database { cached_registrations: Arc::new(RwLock::new(HashMap::new())), id_appserviceregistrations: db.open_tree("id_appserviceregistrations")?, }, + pusher: pusher::PushData::new(&db)?, _db: db, }; diff --git a/src/database/pusher.rs b/src/database/pusher.rs new file mode 100644 index 0000000..041085d --- /dev/null +++ b/src/database/pusher.rs @@ -0,0 +1,148 @@ +use crate::{Error, PduEvent, Result}; +use ruma::{ + api::client::r0::push::{Pusher, PusherKind}, + events::{ + room::{ + member::MemberEventContent, + message::{MessageEventContent, TextMessageEventContent}, + }, + EventType, + }, + push::{PushCondition, Ruleset}, + UserId, +}; + +#[derive(Debug, Clone)] +pub struct PushData { + /// UserId + pushkey -> Pusher + pub(super) senderkey_pusher: sled::Tree, +} + +impl PushData { + pub fn new(db: &sled::Db) -> Result { + Ok(Self { + senderkey_pusher: db.open_tree("senderkey_pusher")?, + }) + } + + pub fn set_pusher(&self, sender: &UserId, pusher: Pusher) -> Result<()> { + let mut key = sender.as_bytes().to_vec(); + key.extend_from_slice(pusher.pushkey.as_bytes()); + + self.senderkey_pusher.insert( + key, + &*serde_json::to_string(&pusher).expect("Pusher is valid JSON string"), + )?; + + Ok(()) + } + + pub fn get_pusher(&self, sender: &UserId) -> Result> { + self.senderkey_pusher + .scan_prefix(sender.as_bytes()) + .values() + .map(|push: std::result::Result| { + let push = push.map_err(|_| Error::bad_database("Invalid push bytes in db."))?; + Ok(serde_json::from_slice(&*push) + .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) + }) + .collect::>>() + } +} + +pub async fn send_push_notice( + user: &UserId, + pusher: &Pusher, + ruleset: Ruleset, + pdu: &PduEvent, +) -> Result<()> { + for rule in ruleset.into_iter() { + // TODO: can actions contain contradictory Actions + if rule + .actions + .iter() + .any(|act| matches!(act, ruma::push::Action::DontNotify)) + || !rule.enabled + { + continue; + } + + match rule.rule_id.as_str() { + ".m.rule.master" => {} + ".m.rule.suppress_notices" => {} + ".m.rule.invite_for_me" => {} + ".m.rule.member_event" => { + if let EventType::RoomMember = &pdu.kind { + // TODO use this? + let _member = serde_json::from_value::(pdu.content.clone()) + .map_err(|_| Error::bad_database("PDU contained bad message content"))?; + if let Some(conditions) = rule.conditions { + if conditions.iter().any(|cond| match cond { + PushCondition::EventMatch { key, pattern } => { + let mut json = + serde_json::to_value(pdu).expect("PDU is valid JSON"); + for key in key.split('.') { + json = json[key].clone(); + } + // TODO: this is baddddd + json.to_string().contains(pattern) + } + _ => false, + }) {} + } + } + } + ".m.rule.contains_display_name" => { + if let EventType::RoomMessage = &pdu.kind { + let msg_content = + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::bad_database("PDU contained bad message content") + })?; + if let MessageEventContent::Text(TextMessageEventContent { body, .. }) = + &msg_content + { + if body.contains(user.localpart()) { + send_notice(user, &pusher, &pdu).await?; + } + } + } + } + ".m.rule.tombstone" => {} + ".m.rule.roomnotif" => {} + ".m.rule.contains_user_name" => { + if let EventType::RoomMessage = &pdu.kind { + let msg_content = + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::bad_database("PDU contained bad message content") + })?; + if let MessageEventContent::Text(TextMessageEventContent { body, .. }) = + &msg_content + { + if body.contains(user.localpart()) { + send_notice(user, &pusher, &pdu).await?; + } + } + } + } + ".m.rule.call" => {} + ".m.rule.encrypted_room_one_to_one" => {} + ".m.rule.room_one_to_one" => {} + ".m.rule.message" => {} + ".m.rule.encrypted" => {} + _ => {} + } + } + Ok(()) +} + +async fn send_notice(_sender: &UserId, pusher: &Pusher, _event: &PduEvent) -> Result<()> { + if let Some(PusherKind::Http) = pusher.kind { + log::error!("YAHOOO"); + } else { + // EMAIL + todo!("send an email") + } + Ok(()) +} diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d459aee..19554f6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -531,6 +531,9 @@ impl Rooms { self.eventid_pduid .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + // See if the event matches any known pushers + db.sending.send_push_pdu(&*pdu_id)?; + match pdu.kind { EventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { diff --git a/src/database/sending.rs b/src/database/sending.rs index 4b0309f..a478501 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,43 +1,62 @@ use std::{ collections::HashMap, convert::TryFrom, - fmt::Debug, + fmt::{Debug, Display, Formatter}, sync::Arc, time::{Duration, Instant, SystemTime}, }; -use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; +use crate::{appservice_server, server_server, utils, Database, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; use log::info; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, + events::{push_rules, EventType}, ServerName, }; use sled::IVec; -use tokio::select; -use tokio::sync::Semaphore; +use tokio::{select, sync::Semaphore}; + +use super::{ + account_data::AccountData, appservice::Appservice, globals::Globals, pusher::PushData, + rooms::Rooms, +}; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum OutgoingKind { + Appservice(Box), + Push(Vec), + Normal(Box), +} + +impl Display for OutgoingKind { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + OutgoingKind::Appservice(name) => f.write_str(name.as_str()), + OutgoingKind::Normal(name) => f.write_str(name.as_str()), + OutgoingKind::Push(_) => f.write_str("Push notification TODO"), + } + } +} #[derive(Clone)] pub struct Sending { /// The state for a given state hash. - pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+)ServerName + PduId - pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+)ServerName + PduId (pduid can be empty for reservation) + pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)ServerName / UserId + PduId + pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+ / $)ServerName / UserId + PduId (pduid can be empty for reservation) pub(super) maximum_requests: Arc, } impl Sending { - pub fn start_handler( - &self, - globals: &super::globals::Globals, - rooms: &super::rooms::Rooms, - appservice: &super::appservice::Appservice, - ) { + pub fn start_handler(&self, db: &Database) { let servernamepduids = self.servernamepduids.clone(); let servercurrentpdus = self.servercurrentpdus.clone(); - let rooms = rooms.clone(); - let globals = globals.clone(); - let appservice = appservice.clone(); + let rooms = db.rooms.clone(); + let globals = db.globals.clone(); + let appservice = db.appservice.clone(); + let pusher = db.pusher.clone(); + let account_data = db.account_data.clone(); tokio::spawn(async move { let mut futures = FuturesUnordered::new(); @@ -45,45 +64,57 @@ impl Sending { // Retry requests we could not finish yet let mut current_transactions = HashMap::new(); - for (server, pdu, is_appservice) in servercurrentpdus + for (outgoing_kind, pdu) in servercurrentpdus .iter() .filter_map(|r| r.ok()) .filter_map(|(key, _)| Self::parse_servercurrentpdus(key).ok()) - .filter(|(_, pdu, _)| !pdu.is_empty()) // Skip reservation key + .filter(|(_, pdu)| !pdu.is_empty()) // Skip reservation key .take(50) // This should not contain more than 50 anyway { current_transactions - .entry((server, is_appservice)) + .entry(outgoing_kind) .or_insert_with(Vec::new) .push(pdu); } - for ((server, is_appservice), pdus) in current_transactions { + for (outgoing_kind, pdus) in current_transactions { futures.push(Self::handle_event( - server, - is_appservice, + outgoing_kind, pdus, - &globals, &rooms, + &globals, &appservice, + &pusher, + &account_data, )); } - let mut last_failed_try: HashMap, (u32, Instant)> = HashMap::new(); + let mut last_failed_try: HashMap = HashMap::new(); let mut subscriber = servernamepduids.watch_prefix(b""); loop { select! { Some(response) = futures.next() => { match response { - Ok((server, is_appservice)) => { - let mut prefix = if is_appservice { - b"+".to_vec() - } else { - Vec::new() + Ok(outgoing_kind) => { + let mut prefix = match &outgoing_kind { + OutgoingKind::Appservice(server) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(server.as_bytes()); + p + } + OutgoingKind::Push(id) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(&id); + p + }, + OutgoingKind::Normal(server) => { + let mut p = vec![]; + p.extend_from_slice(server.as_bytes()); + p + }, }; - prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); for key in servercurrentpdus @@ -116,22 +147,45 @@ impl Sending { servernamepduids.remove(¤t_key).unwrap(); } - futures.push(Self::handle_event(server, is_appservice, new_pdus, &globals, &rooms, &appservice)); + futures.push( + Self::handle_event( + outgoing_kind.clone(), + new_pdus, + &rooms, + &globals, + &appservice, + &pusher, + &account_data + ) + ); } else { servercurrentpdus.remove(&prefix).unwrap(); // servercurrentpdus with the prefix should be empty now } } - Err((server, is_appservice, e)) => { - info!("Couldn't send transaction to {}\n{}", server, e); - let mut prefix = if is_appservice { - b"+".to_vec() - } else { - Vec::new() + Err((outgoing_kind, e)) => { + info!("Couldn't send transaction to {}\n{}", outgoing_kind, e); + let mut prefix = match &outgoing_kind { + OutgoingKind::Appservice(serv) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(serv.as_bytes()); + p + }, + OutgoingKind::Push(id) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(&id); + p + }, + OutgoingKind::Normal(serv) => { + let mut p = vec![]; + p.extend_from_slice(serv.as_bytes()); + p + }, }; - prefix.extend_from_slice(server.as_bytes()); + prefix.push(0xff); - last_failed_try.insert(server.clone(), match last_failed_try.get(&server) { + + last_failed_try.insert(outgoing_kind.clone(), match last_failed_try.get(&outgoing_kind) { Some(last_failed) => { (last_failed.0+1, Instant::now()) }, @@ -157,40 +211,56 @@ impl Sending { instant.elapsed() < min_elapsed_duration }; - if let Some((server, is_appservice, pdu_id)) = utils::string_from_bytes( + if let Some((outgoing_kind, pdu_id)) = utils::string_from_bytes( parts .next() .expect("splitn will always return 1 or more elements"), ) - .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) - .map(|server_str| { + .map_err(|_| Error::bad_database("[Utf8] ServerName in servernamepduid bytes are invalid.")) + .and_then(|ident_str| { // Appservices start with a plus - if server_str.starts_with('+') { - (server_str[1..].to_owned(), true) + Ok(if ident_str.starts_with('+') { + OutgoingKind::Appservice( + Box::::try_from(&ident_str[1..]) + .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))? + ) + } else if ident_str.starts_with('$') { + OutgoingKind::Push(ident_str[1..].as_bytes().to_vec()) } else { - (server_str, false) - } + OutgoingKind::Normal( + Box::::try_from(ident_str) + .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))? + ) + }) }) - .and_then(|(server_str, is_appservice)| Box::::try_from(server_str) - .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid.")).map(|s| (s, is_appservice))) - .ok() - .and_then(|(server, is_appservice)| parts + .and_then(|outgoing_kind| parts .next() .ok_or_else(|| Error::bad_database("Invalid servernamepduid in db.")) - .ok() - .map(|pdu_id| (server, is_appservice, pdu_id)) + .map(|pdu_id| (outgoing_kind, pdu_id)) ) - .filter(|(server, is_appservice, _)| { - if last_failed_try.get(server).map_or(false, exponential_backoff) { + .ok() + .filter(|(outgoing_kind, _)| { + if last_failed_try.get(outgoing_kind).map_or(false, exponential_backoff) { return false; } - let mut prefix = if *is_appservice { - b"+".to_vec() - } else { - Vec::new() + let mut prefix = match outgoing_kind { + OutgoingKind::Appservice(serv) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(serv.as_bytes()); + p + }, + OutgoingKind::Push(id) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(&id); + p + }, + OutgoingKind::Normal(serv) => { + let mut p = vec![]; + p.extend_from_slice(serv.as_bytes()); + p + }, }; - prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); servercurrentpdus @@ -201,7 +271,17 @@ impl Sending { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); - futures.push(Self::handle_event(server, is_appservice, vec![pdu_id.into()], &globals, &rooms, &appservice)); + futures.push( + Self::handle_event( + outgoing_kind, + vec![pdu_id.into()], + &rooms, + &globals, + &appservice, + &pusher, + &account_data + ) + ); } } } @@ -210,6 +290,22 @@ impl Sending { }); } + pub fn send_push_pdu(&self, pdu_id: &[u8]) -> Result<()> { + // Make sure we don't cause utf8 errors when parsing to a String... + let pduid = String::from_utf8_lossy(pdu_id).as_bytes().to_vec(); + + // these are valid ServerName chars + // (byte.is_ascii_alphanumeric() || byte == b'-' || byte == b'.') + let mut key = b"$".to_vec(); + // keep each pdu push unique + key.extend_from_slice(pduid.as_slice()); + key.push(0xff); + key.extend_from_slice(pdu_id); + self.servernamepduids.insert(key, b"")?; + + Ok(()) + } + pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { let mut key = server.as_bytes().to_vec(); key.push(0xff); @@ -230,95 +326,154 @@ impl Sending { } async fn handle_event( - server: Box, - is_appservice: bool, + kind: OutgoingKind, pdu_ids: Vec, - globals: &super::globals::Globals, - rooms: &super::rooms::Rooms, - appservice: &super::appservice::Appservice, - ) -> std::result::Result<(Box, bool), (Box, bool, Error)> { - if is_appservice { - let pdu_jsons = pdu_ids - .iter() - .map(|pdu_id| { - Ok::<_, (Box, Error)>( - rooms - .get_pdu_from_id(pdu_id) - .map_err(|e| (server.clone(), e))? - .ok_or_else(|| { - ( - server.clone(), - Error::bad_database( - "Event in servernamepduids not found in db.", - ), - ) - })? - .to_any_event(), - ) - }) - .filter_map(|r| r.ok()) - .collect::>(); - appservice_server::send_request( - &globals, - appservice - .get_registration(server.as_str()) - .unwrap() - .unwrap(), // TODO: handle error - appservice::event::push_events::v1::Request { - events: &pdu_jsons, - txn_id: &utils::random_string(16), - }, - ) - .await - .map(|_response| (server.clone(), is_appservice)) - .map_err(|e| (server, is_appservice, e)) - } else { - let pdu_jsons = pdu_ids - .iter() - .map(|pdu_id| { - Ok::<_, (Box, Error)>( - // TODO: check room version and remove event_id if needed - serde_json::from_str( - PduEvent::convert_to_outgoing_federation_event( - rooms - .get_pdu_json_from_id(pdu_id) - .map_err(|e| (server.clone(), e))? - .ok_or_else(|| { - ( - server.clone(), - Error::bad_database( - "Event in servernamepduids not found in db.", - ), - ) - })?, - ) - .json() - .get(), + rooms: &Rooms, + globals: &Globals, + appservice: &Appservice, + pusher: &PushData, + account_data: &AccountData, + ) -> std::result::Result { + match kind { + OutgoingKind::Appservice(server) => { + let pdu_jsons = pdu_ids + .iter() + .map(|pdu_id| { + Ok::<_, (Box, Error)>( + rooms + .get_pdu_from_id(pdu_id) + .map_err(|e| (server.clone(), e))? + .ok_or_else(|| { + ( + server.clone(), + Error::bad_database( + "[Appservice] Event in servernamepduids not found in ", + ), + ) + })? + .to_any_event(), ) - .expect("Raw<..> is always valid"), - ) - }) - .filter_map(|r| r.ok()) - .collect::>(); + }) + .filter_map(|r| r.ok()) + .collect::>(); + appservice_server::send_request( + &globals, + appservice + .get_registration(server.as_str()) + .unwrap() + .unwrap(), // TODO: handle error + appservice::event::push_events::v1::Request { + events: &pdu_jsons, + txn_id: &utils::random_string(16), + }, + ) + .await + .map(|_response| OutgoingKind::Appservice(server.clone())) + .map_err(|e| (OutgoingKind::Appservice(server.clone()), e)) + } + OutgoingKind::Push(id) => { + let pdus = pdu_ids + .iter() + .map(|pdu_id| { + Ok::<_, (Vec, Error)>( + rooms + .get_pdu_from_id(pdu_id) + .map_err(|e| (id.clone(), e))? + .ok_or_else(|| { + ( + id.clone(), + Error::bad_database( + "[Push] Event in servernamepduids not found in db.", + ), + ) + })?, + ) + }) + .filter_map(|r| r.ok()) + .collect::>(); + dbg!(&pdus); + for pdu in &pdus { + for user in rooms.room_members(&pdu.room_id) { + dbg!(&user); + let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; + for pusher in pusher + .get_pusher(&user) + .map_err(|e| (OutgoingKind::Push(id.clone()), e))? + { + let rules_for_user = account_data + .get::( + None, + &user, + EventType::PushRules, + ) + .map_err(|e| (OutgoingKind::Push(id.clone()), e))? + .map(|ev| ev.content.global) + .unwrap_or_else(|| crate::push_rules::default_pushrules(&user)); + dbg!(&pusher); + dbg!(&rules_for_user); - server_server::send_request( - &globals, - &*server, - send_transaction_message::v1::Request { - origin: globals.server_name(), - pdus: &pdu_jsons, - edus: &[], - origin_server_ts: SystemTime::now(), - transaction_id: &utils::random_string(16), - }, - ) - .await - .map(|_response| (server.clone(), is_appservice)) - .map_err(|e| (server, is_appservice, e)) + crate::database::pusher::send_push_notice( + &user, + &pusher, + rules_for_user, + pdu, + ) + .await + .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; + } + } + } + + Ok(OutgoingKind::Push(id)) + } + OutgoingKind::Normal(server) => { + let pdu_jsons = pdu_ids + .iter() + .map(|pdu_id| { + Ok::<_, (OutgoingKind, Error)>( + // TODO: check room version and remove event_id if needed + serde_json::from_str( + PduEvent::convert_to_outgoing_federation_event( + rooms + .get_pdu_json_from_id(pdu_id) + .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? + .ok_or_else(|| { + ( + OutgoingKind::Normal(server.clone()), + Error::bad_database( + "[Normal] Event in servernamepduids not found in db.", + ), + ) + })?, + ) + .json() + .get(), + ) + .expect("Raw<..> is always valid"), + ) + }) + .filter_map(|r| r.ok()) + .collect::>(); + + server_server::send_request( + &globals, + &*server, + send_transaction_message::v1::Request { + origin: globals.server_name(), + pdus: &pdu_jsons, + edus: &[], + origin_server_ts: SystemTime::now(), + transaction_id: &utils::random_string(16), + }, + ) + .await + .map(|_response| OutgoingKind::Normal(server.clone())) + .map_err(|e| (OutgoingKind::Normal(server.clone()), e)) + } } } - fn parse_servercurrentpdus(key: IVec) -> Result<(Box, IVec, bool)> { + fn parse_servercurrentpdus(key: IVec) -> Result<(OutgoingKind, IVec)> { let mut parts = key.splitn(2, |&b| b == 0xff); let server = parts.next().expect("splitn always returns one element"); let pdu = parts @@ -330,19 +485,26 @@ impl Sending { })?; // Appservices start with a plus - let (server, is_appservice) = if server.starts_with('+') { - (&server[1..], true) + Ok::<_, Error>(if server.starts_with('+') { + ( + OutgoingKind::Appservice(Box::::try_from(server).map_err(|_| { + Error::bad_database("Invalid server string in server_currenttransaction") + })?), + IVec::from(pdu), + ) + } else if server.starts_with('$') { + ( + OutgoingKind::Push(server.as_bytes().to_vec()), + IVec::from(pdu), + ) } else { - (&*server, false) - }; - - Ok::<_, Error>(( - Box::::try_from(server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?, - IVec::from(pdu), - is_appservice, - )) + ( + OutgoingKind::Normal(Box::::try_from(server).map_err(|_| { + Error::bad_database("Invalid server string in server_currenttransaction") + })?), + IVec::from(pdu), + ) + }) } pub async fn send_federation_request( From 73124629b7d4cdbddef36c52bfe5e494bad1ac01 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 10:14:09 -0500 Subject: [PATCH 0406/1727] Add general rules matching for pusher, calc unread msgs --- Cargo.lock | 16 ++ Cargo.toml | 2 +- src/database/pusher.rs | 412 +++++++++++++++++++++++++++++++++++++--- src/database/sending.rs | 129 +++++++------ src/server_server.rs | 7 +- 5 files changed, 469 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66f624c..859d854 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1534,6 +1534,7 @@ dependencies = [ "ruma-events", "ruma-federation-api", "ruma-identifiers", + "ruma-push-gateway-api", "ruma-serde", "ruma-signatures", ] @@ -1680,6 +1681,21 @@ name = "ruma-identifiers-validation" version = "0.2.0" source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +[[package]] +name = "ruma-push-gateway-api" +version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +dependencies = [ + "js_int", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + [[package]] name = "ruma-serde" version = "0.3.0" diff --git a/Cargo.toml b/Cargo.toml index 2c6c741..a8760c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 041085d..a1a6130 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -1,17 +1,26 @@ -use crate::{Error, PduEvent, Result}; +use crate::{Database, Error, PduEvent, Result}; +use log::{error, info, warn}; use ruma::{ - api::client::r0::push::{Pusher, PusherKind}, - events::{ - room::{ - member::MemberEventContent, - message::{MessageEventContent, TextMessageEventContent}, + api::{ + client::r0::push::{Pusher, PusherKind}, + push_gateway::send_event_notification::{ + self, + v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - EventType, + OutgoingRequest, }, - push::{PushCondition, Ruleset}, - UserId, + events::room::{ + member::{MemberEventContent, MembershipState}, + message::{MessageEventContent, TextMessageEventContent}, + power_levels::PowerLevelsEventContent, + }, + events::EventType, + push::{Action, PushCondition, PushFormat, Ruleset, Tweak}, + uint, UInt, UserId, }; +use std::{convert::TryFrom, fmt::Debug, time::Duration}; + #[derive(Debug, Clone)] pub struct PushData { /// UserId + pushkey -> Pusher @@ -29,6 +38,15 @@ impl PushData { let mut key = sender.as_bytes().to_vec(); key.extend_from_slice(pusher.pushkey.as_bytes()); + // There are 2 kinds of pushers but the spec says: null deletes the pusher. + if pusher.kind.is_none() { + return self + .senderkey_pusher + .remove(key) + .map(|_| ()) + .map_err(Into::into); + } + self.senderkey_pusher.insert( key, &*serde_json::to_string(&pusher).expect("Pusher is valid JSON string"), @@ -46,15 +64,95 @@ impl PushData { Ok(serde_json::from_slice(&*push) .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) }) - .collect::>>() + .collect() + } +} + +pub async fn send_request( + globals: &crate::database::globals::Globals, + destination: &str, + request: T, +) -> Result +where + T: Debug, +{ + let destination = destination.replace("/_matrix/push/v1/notify", ""); + + let http_request = request + .try_into_http_request(&destination, Some("")) + .map_err(|e| { + warn!("Failed to find destination {}: {}", destination, e); + Error::BadServerResponse("Invalid destination") + })?; + + let mut reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + // TODO: we could keep this very short and let expo backoff do it's thing... + *reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); + + let url = reqwest_request.url().clone(); + let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; + + // Because reqwest::Response -> http::Response is complicated: + match reqwest_response { + Ok(mut reqwest_response) => { + let status = reqwest_response.status(); + let mut http_response = http::Response::builder().status(status); + let headers = http_response.headers_mut().unwrap(); + + for (k, v) in reqwest_response.headers_mut().drain() { + if let Some(key) = k { + headers.insert(key, v); + } + } + + let status = reqwest_response.status(); + + let body = reqwest_response + .bytes() + .await + .unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }) // TODO: handle timeout + .into_iter() + .collect::>(); + + if status != 200 { + info!( + "Push gateway returned bad response {} {}\n{}\n{:?}", + destination, + status, + url, + crate::utils::string_from_bytes(&body) + ); + } + + let response = T::IncomingResponse::try_from( + http_response + .body(body) + .expect("reqwest body is valid http body"), + ); + response.map_err(|_| { + info!( + "Push gateway returned invalid response bytes {}\n{}", + destination, url + ); + Error::BadServerResponse("Push gateway returned bad response.") + }) + } + Err(e) => Err(e.into()), } } pub async fn send_push_notice( user: &UserId, - pusher: &Pusher, + unread: UInt, + pushers: &[Pusher], ruleset: Ruleset, pdu: &PduEvent, + db: &Database, ) -> Result<()> { for rule in ruleset.into_iter() { // TODO: can actions contain contradictory Actions @@ -69,8 +167,44 @@ pub async fn send_push_notice( match rule.rule_id.as_str() { ".m.rule.master" => {} - ".m.rule.suppress_notices" => {} - ".m.rule.invite_for_me" => {} + ".m.rule.suppress_notices" => { + if pdu.kind == EventType::RoomMessage + && pdu + .content + .get("msgtype") + .map_or(false, |ty| ty == "m.notice") + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.invite_for_me" => { + if let EventType::RoomMember = &pdu.kind { + if pdu.state_key.as_deref() == Some(user.as_str()) + && serde_json::from_value::(pdu.content.clone()) + .map_err(|_| Error::bad_database("PDU contained bad message content"))? + .membership + == MembershipState::Invite + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + } ".m.rule.member_event" => { if let EventType::RoomMember = &pdu.kind { // TODO use this? @@ -88,7 +222,17 @@ pub async fn send_push_notice( json.to_string().contains(pattern) } _ => false, - }) {} + }) { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } } } } @@ -103,13 +247,71 @@ pub async fn send_push_notice( &msg_content { if body.contains(user.localpart()) { - send_notice(user, &pusher, &pdu).await?; + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + } + } + ".m.rule.tombstone" => { + if pdu.kind == EventType::RoomTombstone && pdu.state_key.as_deref() == Some("") { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.roomnotif" => { + if let EventType::RoomMessage = &pdu.kind { + let msg_content = + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::bad_database("PDU contained bad message content") + })?; + if let MessageEventContent::Text(TextMessageEventContent { body, .. }) = + &msg_content + { + let power_level_cmp = |pl: PowerLevelsEventContent| { + &pl.notifications.room + <= pl.users.get(&pdu.sender).unwrap_or(&ruma::int!(0)) + }; + let deserialize = |pl: PduEvent| { + serde_json::from_value::(pl.content).ok() + }; + if body.contains("@room") + && db + .rooms + .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .map(|(_, pl)| pl) + .map(deserialize) + .flatten() + .map_or(false, power_level_cmp) + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; } } } } - ".m.rule.tombstone" => {} - ".m.rule.roomnotif" => {} ".m.rule.contains_user_name" => { if let EventType::RoomMessage = &pdu.kind { let msg_content = @@ -121,28 +323,180 @@ pub async fn send_push_notice( &msg_content { if body.contains(user.localpart()) { - send_notice(user, &pusher, &pdu).await?; + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; } } } } - ".m.rule.call" => {} - ".m.rule.encrypted_room_one_to_one" => {} - ".m.rule.room_one_to_one" => {} - ".m.rule.message" => {} - ".m.rule.encrypted" => {} + ".m.rule.call" => { + if pdu.kind == EventType::CallInvite { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.encrypted_room_one_to_one" => { + if db.rooms.room_members(&pdu.room_id).count() == 2 + && pdu.kind == EventType::RoomEncrypted + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.room_one_to_one" => { + if db.rooms.room_members(&pdu.room_id).count() == 2 + && pdu.kind == EventType::RoomMessage + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.message" => { + if pdu.kind == EventType::RoomMessage { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.encrypted" => { + if pdu.kind == EventType::RoomEncrypted { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } _ => {} } } Ok(()) } -async fn send_notice(_sender: &UserId, pusher: &Pusher, _event: &PduEvent) -> Result<()> { - if let Some(PusherKind::Http) = pusher.kind { - log::error!("YAHOOO"); - } else { - // EMAIL - todo!("send an email") +async fn send_notice( + unread: UInt, + pushers: &[Pusher], + tweaks: Vec, + event: &PduEvent, + db: &Database, +) -> Result<()> { + let (http, _emails): (Vec<&Pusher>, _) = pushers + .iter() + .partition(|pusher| pusher.kind == Some(PusherKind::Http)); + + // TODO: + // Two problems with this + // 1. if "event_id_only" is the only format kind it seems we should never add more info + // 2. can pusher/devices have conflicting formats + for pusher in http { + let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); + let url = if let Some(url) = pusher.data.url.as_ref() { + url + } else { + error!("Http Pusher must have URL specified."); + continue; + }; + + let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); + device.data = Some(pusher.data.clone()); + + // this is not done if "event_id_only" is the format + device.tweaks = tweaks.clone(); + + let d = &[device]; + let mut notifi = Notification::new(d); + + notifi.prio = NotificationPriority::Low; + notifi.event_id = Some(&event.event_id); + notifi.room_id = Some(&event.room_id); + // TODO: missed calls + notifi.counts = NotificationCounts::new(unread, uint!(0)); + + if event.kind == EventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + { + notifi.prio = NotificationPriority::High + } + + if event_id_only { + // send_request( + // &db.globals, + // &url, + // send_event_notification::v1::Request::new(notifi), + // ) + // .await?; + } else { + notifi.sender = Some(&event.sender); + notifi.event_type = Some(&event.kind); + notifi.content = serde_json::value::to_raw_value(&event.content).ok(); + + if event.kind == EventType::RoomMember { + notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); + } + + let name = db.users.displayname(&event.sender)?; + notifi.sender_display_name = name.as_deref(); + let room_name = db + .rooms + .room_state_get(&event.room_id, &EventType::RoomName, "")? + .map(|(_, pdu)| match pdu.content.get("name") { + Some(serde_json::Value::String(s)) => Some(s.to_string()), + _ => None, + }) + .flatten(); + notifi.room_name = room_name.as_deref(); + + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; + } } + + // TODO: email + // for email in emails {} + Ok(()) } diff --git a/src/database/sending.rs b/src/database/sending.rs index a478501..48e427e 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -13,16 +13,11 @@ use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, events::{push_rules, EventType}, - ServerName, + uint, ServerName, UInt, }; use sled::IVec; use tokio::{select, sync::Semaphore}; -use super::{ - account_data::AccountData, appservice::Appservice, globals::Globals, pusher::PushData, - rooms::Rooms, -}; - #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(Box), @@ -52,11 +47,7 @@ impl Sending { pub fn start_handler(&self, db: &Database) { let servernamepduids = self.servernamepduids.clone(); let servercurrentpdus = self.servercurrentpdus.clone(); - let rooms = db.rooms.clone(); - let globals = db.globals.clone(); - let appservice = db.appservice.clone(); - let pusher = db.pusher.clone(); - let account_data = db.account_data.clone(); + let db = db.clone(); tokio::spawn(async move { let mut futures = FuturesUnordered::new(); @@ -79,15 +70,7 @@ impl Sending { } for (outgoing_kind, pdus) in current_transactions { - futures.push(Self::handle_event( - outgoing_kind, - pdus, - &rooms, - &globals, - &appservice, - &pusher, - &account_data, - )); + futures.push(Self::handle_event(outgoing_kind, pdus, &db)); } let mut last_failed_try: HashMap = HashMap::new(); @@ -151,11 +134,7 @@ impl Sending { Self::handle_event( outgoing_kind.clone(), new_pdus, - &rooms, - &globals, - &appservice, - &pusher, - &account_data + &db, ) ); } else { @@ -275,11 +254,7 @@ impl Sending { Self::handle_event( outgoing_kind, vec![pdu_id.into()], - &rooms, - &globals, - &appservice, - &pusher, - &account_data + &db, ) ); } @@ -325,14 +300,11 @@ impl Sending { Ok(()) } + // TODO this is the whole DB but is it better to clone smaller parts than the whole thing?? async fn handle_event( kind: OutgoingKind, pdu_ids: Vec, - rooms: &Rooms, - globals: &Globals, - appservice: &Appservice, - pusher: &PushData, - account_data: &AccountData, + db: &Database, ) -> std::result::Result { match kind { OutgoingKind::Appservice(server) => { @@ -340,7 +312,7 @@ impl Sending { .iter() .map(|pdu_id| { Ok::<_, (Box, Error)>( - rooms + db.rooms .get_pdu_from_id(pdu_id) .map_err(|e| (server.clone(), e))? .ok_or_else(|| { @@ -357,8 +329,8 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); appservice_server::send_request( - &globals, - appservice + &db.globals, + db.appservice .get_registration(server.as_str()) .unwrap() .unwrap(), // TODO: handle error @@ -376,7 +348,7 @@ impl Sending { .iter() .map(|pdu_id| { Ok::<_, (Vec, Error)>( - rooms + db.rooms .get_pdu_from_id(pdu_id) .map_err(|e| (id.clone(), e))? .ok_or_else(|| { @@ -391,36 +363,67 @@ impl Sending { }) .filter_map(|r| r.ok()) .collect::>(); + dbg!(&pdus); + for pdu in &pdus { - for user in rooms.room_members(&pdu.room_id) { + // Redacted events are not notification targets (we don't send push for them) + if pdu.unsigned.get("redacted_because").is_some() { + continue; + } + for user in db.rooms.room_members(&pdu.room_id) { dbg!(&user); + let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; - for pusher in pusher + let pushers = db + .pusher .get_pusher(&user) + .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; + + let rules_for_user = db + .account_data + .get::(None, &user, EventType::PushRules) + .map_err(|e| (OutgoingKind::Push(id.clone()), e))? + .map(|ev| ev.content.global) + .unwrap_or_else(|| crate::push_rules::default_pushrules(&user)); + + let unread: UInt = if let Some(last_read) = db + .rooms + .edus + .private_read_get(&pdu.room_id, &user) .map_err(|e| (OutgoingKind::Push(id.clone()), e))? { - let rules_for_user = account_data - .get::( - None, - &user, - EventType::PushRules, - ) + (db.rooms + .pdus_since(&user, &pdu.room_id, last_read) .map_err(|e| (OutgoingKind::Push(id.clone()), e))? - .map(|ev| ev.content.global) - .unwrap_or_else(|| crate::push_rules::default_pushrules(&user)); - dbg!(&pusher); - dbg!(&rules_for_user); + .filter_map(|pdu| pdu.ok()) // Filter out buggy events + .filter(|(_, pdu)| { + matches!( + pdu.kind.clone(), + EventType::RoomMessage | EventType::RoomEncrypted + ) + }) + .count() as u32) + .into() + } else { + // Just return zero unread messages + uint!(0) + }; - crate::database::pusher::send_push_notice( - &user, - &pusher, - rules_for_user, - pdu, - ) - .await - .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; - } + dbg!(&pushers); + + // dbg!(&rules_for_user); + + crate::database::pusher::send_push_notice( + &user, + unread, + &pushers, + rules_for_user, + pdu, + db, + ) + .await + .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; } } @@ -434,7 +437,7 @@ impl Sending { // TODO: check room version and remove event_id if needed serde_json::from_str( PduEvent::convert_to_outgoing_federation_event( - rooms + db.rooms .get_pdu_json_from_id(pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { @@ -456,10 +459,10 @@ impl Sending { .collect::>(); server_server::send_request( - &globals, + &db.globals, &*server, send_transaction_message::v1::Request { - origin: globals.server_name(), + origin: db.globals.server_name(), pdus: &pdu_jsons, edus: &[], origin_server_ts: SystemTime::now(), diff --git a/src/server_server.rs b/src/server_server.rs index ad0a1a4..03952eb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,6 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; -use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{error, info, warn}; +use log::{info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -12,7 +11,7 @@ use ruma::{ VerifyKey, }, event::{get_event, get_missing_events, get_room_state_ids}, - query::get_profile_information, + query::get_profile_information::{self, v1::ProfileField}, transactions::send_transaction_message, }, OutgoingRequest, @@ -222,7 +221,7 @@ fn add_port_to_hostname(destination_str: String) -> String { /// Returns: actual_destination, host header /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification -async fn find_actual_destination( +pub(crate) async fn find_actual_destination( globals: &crate::database::globals::Globals, destination: &ServerName, ) -> (String, Option) { From e4dc7ea8ac658f66fb6c3fc2c98b7fbb48f6dd5f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 14:19:56 -0500 Subject: [PATCH 0407/1727] Prevent admin room from recursively spamming itself and user -> user --- src/client_server/push.rs | 5 ++- src/database/pusher.rs | 84 +++++++++++++++++++++++++-------------- src/database/rooms.rs | 3 +- src/database/sending.rs | 28 +++++++++---- 4 files changed, 80 insertions(+), 40 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 3a81679..e648849 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -94,7 +94,10 @@ pub async fn get_pushrule_route( if let Some(rule) = rule { Ok(get_pushrule::Response { rule }.into()) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + Err(Error::BadRequest( + ErrorKind::NotFound, + "Push rule not found.", + )) } } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index a1a6130..c4f5801 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -182,7 +182,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.invite_for_me" => { @@ -201,7 +202,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -231,7 +234,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -255,7 +260,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -270,7 +277,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.roomnotif" => { @@ -307,7 +315,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -331,7 +341,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -346,7 +358,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.encrypted_room_one_to_one" => { @@ -361,7 +374,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.room_one_to_one" => { @@ -376,7 +390,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.message" => { @@ -389,7 +404,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.encrypted" => { @@ -402,12 +418,14 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } _ => {} } } + Ok(()) } @@ -417,6 +435,7 @@ async fn send_notice( tweaks: Vec, event: &PduEvent, db: &Database, + name: &str, ) -> Result<()> { let (http, _emails): (Vec<&Pusher>, _) = pushers .iter() @@ -436,10 +455,15 @@ async fn send_notice( }; let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - device.data = Some(pusher.data.clone()); + let mut data_minus_url = pusher.data.clone(); + // The url must be stripped off according to spec + data_minus_url.url = None; + device.data = Some(data_minus_url); - // this is not done if "event_id_only" is the format - device.tweaks = tweaks.clone(); + // Tweaks are only added if the format is NOT event_id_only + if !event_id_only { + device.tweaks = tweaks.clone(); + } let d = &[device]; let mut notifi = Notification::new(d); @@ -459,12 +483,13 @@ async fn send_notice( } if event_id_only { - // send_request( - // &db.globals, - // &url, - // send_event_notification::v1::Request::new(notifi), - // ) - // .await?; + error!("SEND PUSH NOTICE `{}`", name); + // send_request( + // &db.globals, + // &url, + // send_event_notification::v1::Request::new(notifi), + // ) + // .await?; } else { notifi.sender = Some(&event.sender); notifi.event_type = Some(&event.kind); @@ -474,8 +499,8 @@ async fn send_notice( notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - let name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = name.as_deref(); + let user_name = db.users.displayname(&event.sender)?; + notifi.sender_display_name = user_name.as_deref(); let room_name = db .rooms .room_state_get(&event.room_id, &EventType::RoomName, "")? @@ -486,12 +511,13 @@ async fn send_notice( .flatten(); notifi.room_name = room_name.as_deref(); - send_request( - &db.globals, - &url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; + error!("SEND PUSH NOTICE Full `{}`", name); + // send_request( + // &db.globals, + // &url, + // send_event_notification::v1::Request::new(notifi), + // ) + // .await?; } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 19554f6..ac7d27d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,10 +27,9 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, - time::Duration, }; -use super::{admin::AdminCommand, sending::Sending}; +use super::admin::AdminCommand; /// The unique identifier of each state group. /// diff --git a/src/database/sending.rs b/src/database/sending.rs index 48e427e..ce81e8c 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -364,17 +364,33 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); - dbg!(&pdus); - for pdu in &pdus { // Redacted events are not notification targets (we don't send push for them) if pdu.unsigned.get("redacted_because").is_some() { continue; } - for user in db.rooms.room_members(&pdu.room_id) { - dbg!(&user); + // Skip events that came from the admin room + if db + .rooms + .room_aliases(&pdu.room_id) + .any(|alias| match alias { + Ok(a) => a.as_str().starts_with("#admins:"), + _ => false, + }) + || pdu.sender.as_str().starts_with("@conduit:") + { + continue; + } + + for user in db.rooms.room_members(&pdu.room_id) { let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; + + // Don't notify the user of their own events + if user == pdu.sender { + continue; + } + let pushers = db .pusher .get_pusher(&user) @@ -410,10 +426,6 @@ impl Sending { uint!(0) }; - dbg!(&pushers); - - // dbg!(&rules_for_user); - crate::database::pusher::send_push_notice( &user, unread, From 6de5b3c2a06545556fac530905edd976f6c01d84 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 13 Dec 2020 13:41:00 +0100 Subject: [PATCH 0408/1727] Update repository link in crate metadata --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 76c52e5..4bf9247 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ description = "A Matrix homeserver written in Rust" license = "Apache-2.0" authors = ["timokoesters "] homepage = "https://conduit.rs" -repository = "https://git.koesters.xyz/timo/conduit" +repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.1.0" edition = "2018" From a61b1cef5bfa311484d84f4359262b46dd5a0a3c Mon Sep 17 00:00:00 2001 From: Valkum Date: Thu, 4 Feb 2021 23:51:20 +0100 Subject: [PATCH 0409/1727] Fix binary name typo --- tests/Complement.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 0ef8f90..24ee9ea 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -9,7 +9,7 @@ ARG SCCACHE_ENDPOINT ARG SCCACHE_S3_USE_SSL COPY . . -RUN test -e target/release/cond_test || cargo build --release --offline +RUN test -e target/release/conduit || cargo build --release --offline FROM valkum/docker-rust-ci:latest WORKDIR /workdir From 1d7207b39e15b249ce5b704c04d44cb442228168 Mon Sep 17 00:00:00 2001 From: Valkum Date: Fri, 5 Feb 2021 02:06:14 +0100 Subject: [PATCH 0410/1727] Sync are-we-synapse with dendrite --- tests/sytest/are-we-synapse-yet.list | 1258 +++++++++++++------------- tests/sytest/are-we-synapse-yet.py | 38 +- 2 files changed, 666 insertions(+), 630 deletions(-) diff --git a/tests/sytest/are-we-synapse-yet.list b/tests/sytest/are-we-synapse-yet.list index cdc280a..9909198 100644 --- a/tests/sytest/are-we-synapse-yet.list +++ b/tests/sytest/are-we-synapse-yet.list @@ -17,17 +17,17 @@ reg POST /register rejects registration of usernames with '£' reg POST /register rejects registration of usernames with 'é' reg POST /register rejects registration of usernames with '\n' reg POST /register rejects registration of usernames with ''' -reg POST /r0/admin/register with shared secret -reg POST /r0/admin/register admin with shared secret -reg POST /r0/admin/register with shared secret downcases capitals -reg POST /r0/admin/register with shared secret disallows symbols -reg POST rejects invalid utf-8 in JSON +reg POST /r0/admin/register with shared secret +reg POST /r0/admin/register admin with shared secret +reg POST /r0/admin/register with shared secret downcases capitals +reg POST /r0/admin/register with shared secret disallows symbols +reg POST rejects invalid utf-8 in JSON log GET /login yields a set of flows -log POST /login can log in as a user -log POST /login returns the same device_id as that in the request -log POST /login can log in as a user with just the local part of the id -log POST /login as non-existing user is rejected -log POST /login wrong password is rejected +log POST /login can log in as a user +log POST /login returns the same device_id as that in the request +log POST /login can log in as a user with just the local part of the id +log POST /login as non-existing user is rejected +log POST /login wrong password is rejected log Interactive authentication types include SSO log Can perform interactive authentication with SSO log The user must be consistent through an interactive authentication session with SSO @@ -39,18 +39,18 @@ pro PUT /profile/:user_id/displayname sets my name pro GET /profile/:user_id/displayname publicly accessible pro PUT /profile/:user_id/avatar_url sets my avatar pro GET /profile/:user_id/avatar_url publicly accessible -dev GET /device/{deviceId} +dev GET /device/{deviceId} dev GET /device/{deviceId} gives a 404 for unknown devices -dev GET /devices -dev PUT /device/{deviceId} updates device fields +dev GET /devices +dev PUT /device/{deviceId} updates device fields dev PUT /device/{deviceId} gives a 404 for unknown devices -dev DELETE /device/{deviceId} -dev DELETE /device/{deviceId} requires UI auth user to match device owner -dev DELETE /device/{deviceId} with no body gives a 401 -dev The deleted device must be consistent through an interactive auth session +dev DELETE /device/{deviceId} +dev DELETE /device/{deviceId} requires UI auth user to match device owner +dev DELETE /device/{deviceId} with no body gives a 401 +dev The deleted device must be consistent through an interactive auth session dev Users receive device_list updates for their own devices -pre GET /presence/:user_id/status fetches initial status -pre PUT /presence/:user_id/status updates my presence +pre GET /presence/:user_id/status fetches initial status +pre PUT /presence/:user_id/status updates my presence crm POST /createRoom makes a public room crm POST /createRoom makes a private room crm POST /createRoom makes a private room with invites @@ -62,21 +62,21 @@ crm POST /createRoom rejects attempts to create rooms with numeric versions crm POST /createRoom rejects attempts to create rooms with unknown versions crm POST /createRoom ignores attempts to set the room version via creation_content mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event +mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -mem GET /rooms/:room_id/joined_members fetches my membership -v1s GET /rooms/:room_id/initialSync fetches initial sync state -pub GET /publicRooms lists newly-created room +mem GET /rooms/:room_id/joined_members fetches my membership +v1s GET /rooms/:room_id/initialSync fetches initial sync state +pub GET /publicRooms lists newly-created room ali GET /directory/room/:room_alias yields room ID mem GET /joined_rooms lists newly-created room rst POST /rooms/:room_id/state/m.room.name sets name rst GET /rooms/:room_id/state/m.room.name gets name rst POST /rooms/:room_id/state/m.room.topic sets topic rst GET /rooms/:room_id/state/m.room.topic gets topic -rst GET /rooms/:room_id/state fetches entire room state +rst GET /rooms/:room_id/state fetches entire room state crm POST /createRoom with creation content ali PUT /directory/room/:room_alias creates alias -nsp GET /rooms/:room_id/aliases lists aliases +nsp GET /rooms/:room_id/aliases lists aliases jon POST /rooms/:room_id/join can join a room jon POST /join/:room_alias can join a room jon POST /join/:room_id can join a room @@ -89,748 +89,778 @@ snd POST /rooms/:room_id/send/:event_type sends a message snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id get GET /rooms/:room_id/messages returns a message -get GET /rooms/:room_id/messages lazy loads members correctly -typ PUT /rooms/:room_id/typing/:user_id sets typing notification +get GET /rooms/:room_id/messages lazy loads members correctly +typ PUT /rooms/:room_id/typing/:user_id sets typing notification +typ Typing notifications don't leak (3 subtests) rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels rst PUT /rooms/:room_id/state/m.room.power_levels can set levels rst PUT power_levels should not explode if the old power levels were empty rst Both GET and PUT work -rct POST /rooms/:room_id/receipt can create receipts +rct POST /rooms/:room_id/receipt can create receipts red POST /rooms/:room_id/read_markers can create read marker -med POST /media/v1/upload can create an upload -med GET /media/v1/download can fetch the value again -cap GET /capabilities is present and well formed for registered user +med POST /media/r0/upload can create an upload +med GET /media/r0/download can fetch the value again +cap GET /capabilities is present and well formed for registered user cap GET /r0/capabilities is not public -reg Register with a recaptcha -reg registration is idempotent, without username specified -reg registration is idempotent, with username specified -reg registration remembers parameters -reg registration accepts non-ascii passwords -reg registration with inhibit_login inhibits login +reg Register with a recaptcha +reg registration is idempotent, without username specified +reg registration is idempotent, with username specified +reg registration remembers parameters +reg registration accepts non-ascii passwords +reg registration with inhibit_login inhibits login reg User signups are forbidden from starting with '_' -reg Can register using an email address -log Can login with 3pid and password using m.login.password -log login types include SSO -log /login/cas/redirect redirects if the old m.login.cas login type is listed -log Can login with new user via CAS -lox Can logout current device -lox Can logout all devices +reg Can register using an email address +log Can login with 3pid and password using m.login.password +log login types include SSO +log /login/cas/redirect redirects if the old m.login.cas login type is listed +log Can login with new user via CAS +lox Can logout current device +lox Can logout all devices lox Request to logout with invalid an access token is rejected lox Request to logout without an access token is rejected -log After changing password, can't log in with old password -log After changing password, can log in with new password -log After changing password, existing session still works -log After changing password, a different session no longer works by default -log After changing password, different sessions can optionally be kept -psh Pushers created with a different access token are deleted on password change -psh Pushers created with a the same access token are not deleted on password change -acc Can deactivate account -acc Can't deactivate account with wrong password -acc After deactivating account, can't log in with password +log After changing password, can't log in with old password +log After changing password, can log in with new password +log After changing password, existing session still works +log After changing password, a different session no longer works by default +log After changing password, different sessions can optionally be kept +psh Pushers created with a different access token are deleted on password change +psh Pushers created with a the same access token are not deleted on password change +acc Can deactivate account +acc Can't deactivate account with wrong password +acc After deactivating account, can't log in with password acc After deactivating account, can't log in with an email -v1s initialSync sees my presence status -pre Presence change reports an event to myself -pre Friends presence changes reports events +v1s initialSync sees my presence status +pre Presence change reports an event to myself +pre Friends presence changes reports events crm Room creation reports m.room.create to myself crm Room creation reports m.room.member to myself -rst Setting room topic reports m.room.topic to myself -v1s Global initialSync -v1s Global initialSync with limit=0 gives no messages -v1s Room initialSync -v1s Room initialSync with limit=0 gives no messages -rst Setting state twice is idempotent -jon Joining room twice is idempotent +rst Setting room topic reports m.room.topic to myself +v1s Global initialSync +v1s Global initialSync with limit=0 gives no messages +v1s Room initialSync +v1s Room initialSync with limit=0 gives no messages +rst Setting state twice is idempotent +jon Joining room twice is idempotent syn New room members see their own join event -v1s New room members see existing users' presence in room initialSync +v1s New room members see existing users' presence in room initialSync syn Existing members see new members' join events -syn Existing members see new members' presence -v1s All room members see all room members' presence in global initialSync -f,jon Remote users can join room by alias -syn New room members see their own join event -v1s New room members see existing members' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new member's presence -v1s New room members see first user's profile information in global initialSync -v1s New room members see first user's profile information in per-room initialSync -f,jon Remote users may not join unfederated rooms +syn Existing members see new members' presence +v1s All room members see all room members' presence in global initialSync +f,jon Remote users can join room by alias +syn New room members see their own join event +v1s New room members see existing members' presence in room initialSync +syn Existing members see new members' join events +syn Existing members see new member's presence +v1s New room members see first user's profile information in global initialSync +v1s New room members see first user's profile information in per-room initialSync +f,jon Remote users may not join unfederated rooms syn Local room members see posted message events v1s Fetching eventstream a second time doesn't yield the message again syn Local non-members don't see posted message events -get Local room members can get room messages +get Local room members can get room messages f,syn Remote room members also see posted message events -f,get Remote room members can get room messages +f,get Remote room members can get room messages get Message history can be paginated f,get Message history can be paginated over federation -eph Ephemeral messages received from clients are correctly expired +eph Ephemeral messages received from clients are correctly expired ali Room aliases can contain Unicode f,ali Remote room alias queries can handle Unicode -ali Canonical alias can be set -ali Canonical alias can include alt_aliases +ali Canonical alias can be set +ali Canonical alias can include alt_aliases ali Regular users can add and delete aliases in the default room configuration ali Regular users can add and delete aliases when m.room.aliases is restricted ali Deleting a non-existent alias should return a 404 ali Users can't delete other's aliases -ali Users with sufficient power-level can delete other's aliases -ali Can delete canonical alias -ali Alias creators can delete alias with no ops -ali Alias creators can delete canonical alias with no ops -ali Only room members can list aliases of a room -inv Can invite users to invite-only rooms -inv Uninvited users cannot join the room -inv Invited user can reject invite -f,inv Invited user can reject invite over federation -f,inv Invited user can reject invite over federation several times -inv Invited user can reject invite for empty room -f,inv Invited user can reject invite over federation for empty room -inv Invited user can reject local invite after originator leaves -inv Invited user can see room metadata -f,inv Remote invited user can see room metadata -inv Users cannot invite themselves to a room -inv Users cannot invite a user that is already in the room -ban Banned user is kicked and may not rejoin until unbanned -f,ban Remote banned user is kicked and may not rejoin until unbanned -ban 'ban' event respects room powerlevel -plv setting 'm.room.name' respects room powerlevel +ali Users with sufficient power-level can delete other's aliases +ali Can delete canonical alias +ali Alias creators can delete alias with no ops +ali Alias creators can delete canonical alias with no ops +ali Only room members can list aliases of a room +inv Can invite users to invite-only rooms +inv Uninvited users cannot join the room +inv Invited user can reject invite +f,inv Invited user can reject invite over federation +f,inv Invited user can reject invite over federation several times +inv Invited user can reject invite for empty room +f,inv Invited user can reject invite over federation for empty room +inv Invited user can reject local invite after originator leaves +inv Invited user can see room metadata +f,inv Remote invited user can see room metadata +inv Users cannot invite themselves to a room +inv Users cannot invite a user that is already in the room +ban Banned user is kicked and may not rejoin until unbanned +f,ban Remote banned user is kicked and may not rejoin until unbanned +ban 'ban' event respects room powerlevel +plv setting 'm.room.name' respects room powerlevel plv setting 'm.room.power_levels' respects room powerlevel (2 subtests) plv Unprivileged users can set m.room.topic if it only needs level 0 plv Users cannot set ban powerlevel higher than their own (2 subtests) plv Users cannot set kick powerlevel higher than their own (2 subtests) plv Users cannot set redact powerlevel higher than their own (2 subtests) -v1s Check that event streams started after a client joined a room work (SYT-1) -v1s Event stream catches up fully after many messages -xxx POST /rooms/:room_id/redact/:event_id as power user redacts message -xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message -xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message -xxx POST /redact disallows redaction of event in different room -xxx Redaction of a redaction redacts the redaction reason -v1s A departed room is still included in /initialSync (SPEC-216) -v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) -rst Can get rooms/{roomId}/state for a departed room (SPEC-216) +v1s Check that event streams started after a client joined a room work (SYT-1) +v1s Event stream catches up fully after many messages +xxx POST /rooms/:room_id/redact/:event_id as power user redacts message +xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message +xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message +xxx POST /redact disallows redaction of event in different room +xxx Redaction of a redaction redacts the redaction reason +v1s A departed room is still included in /initialSync (SPEC-216) +v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) +rst Can get rooms/{roomId}/state for a departed room (SPEC-216) mem Can get rooms/{roomId}/members for a departed room (SPEC-216) -get Can get rooms/{roomId}/messages for a departed room (SPEC-216) -rst Can get 'm.room.name' state for a departed room (SPEC-216) +get Can get rooms/{roomId}/messages for a departed room (SPEC-216) +rst Can get 'm.room.name' state for a departed room (SPEC-216) syn Getting messages going forward is limited for a departed room (SPEC-216) -3pd Can invite existing 3pid -3pd Can invite existing 3pid with no ops into a private room -3pd Can invite existing 3pid in createRoom -3pd Can invite unbound 3pid -f,3pd Can invite unbound 3pid over federation -3pd Can invite unbound 3pid with no ops into a private room -f,3pd Can invite unbound 3pid over federation with no ops into a private room -f,3pd Can invite unbound 3pid over federation with users from both servers -3pd Can accept unbound 3pid invite after inviter leaves -3pd Can accept third party invite with /join +3pd Can invite existing 3pid +3pd Can invite existing 3pid with no ops into a private room +3pd Can invite existing 3pid in createRoom +3pd Can invite unbound 3pid +f,3pd Can invite unbound 3pid over federation +3pd Can invite unbound 3pid with no ops into a private room +f,3pd Can invite unbound 3pid over federation with no ops into a private room +f,3pd Can invite unbound 3pid over federation with users from both servers +3pd Can accept unbound 3pid invite after inviter leaves +3pd Can accept third party invite with /join 3pd 3pid invite join with wrong but valid signature are rejected 3pd 3pid invite join valid signature but revoked keys are rejected 3pd 3pid invite join valid signature but unreachable ID server are rejected gst Guest user cannot call /events globally gst Guest users can join guest_access rooms -gst Guest users can send messages to guest_access rooms if joined -gst Guest user calling /events doesn't tightloop -gst Guest users are kicked from guest_access rooms on revocation of guest_access +gst Guest users can send messages to guest_access rooms if joined +gst Guest user calling /events doesn't tightloop +gst Guest users are kicked from guest_access rooms on revocation of guest_access gst Guest user can set display names -gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation -gst Guest user can upgrade to fully featured user +gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation +gst Guest user can upgrade to fully featured user gst Guest user cannot upgrade other users -pub GET /publicRooms lists rooms -pub GET /publicRooms includes avatar URLs -gst Guest users can accept invites to private rooms over federation -gst Guest users denied access over federation if guest access prohibited -mem Room members can override their displayname on a room-specific basis +pub GET /publicRooms lists rooms +pub GET /publicRooms includes avatar URLs +gst Guest users can accept invites to private rooms over federation +gst Guest users denied access over federation if guest access prohibited +mem Room members can override their displayname on a room-specific basis mem Room members can join a room with an overridden displayname -mem Users cannot kick users from a room they are not in -mem Users cannot kick users who have already left a room -typ Typing notification sent to local room members -f,typ Typing notifications also sent to remote room members -typ Typing can be explicitly stopped -rct Read receipts are visible to /initialSync -rct Read receipts are sent as events -rct Receipts must be m.read -pro displayname updates affect room member events -pro avatar_url updates affect room member events +mem Users cannot kick users from a room they are not in +mem Users cannot kick users who have already left a room +typ Typing notification sent to local room members +f,typ Typing notifications also sent to remote room members +typ Typing can be explicitly stopped +rct Read receipts are visible to /initialSync +rct Read receipts are sent as events +rct Receipts must be m.read +pro displayname updates affect room member events +pro avatar_url updates affect room member events gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users gst Guest non-joined user cannot call /events on shared room gst Guest non-joined user cannot call /events on invited room gst Guest non-joined user cannot call /events on joined room gst Guest non-joined user cannot call /events on default room -gst Guest non-joined user can call /events on world_readable room +gst Guest non-joined user can call /events on world_readable room gst Guest non-joined users can get state for world_readable rooms gst Guest non-joined users can get individual state for world_readable rooms gst Guest non-joined users cannot room initalSync for non-world_readable rooms -gst Guest non-joined users can room initialSync for world_readable rooms +gst Guest non-joined users can room initialSync for world_readable rooms gst Guest non-joined users can get individual state for world_readable rooms after leaving gst Guest non-joined users cannot send messages to guest_access rooms if not joined gst Guest users can sync from world_readable guest_access rooms if joined -gst Guest users can sync from shared guest_access rooms if joined -gst Guest users can sync from invited guest_access rooms if joined -gst Guest users can sync from joined guest_access rooms if joined +gst Guest users can sync from shared guest_access rooms if joined +gst Guest users can sync from invited guest_access rooms if joined +gst Guest users can sync from joined guest_access rooms if joined gst Guest users can sync from default guest_access rooms if joined ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users -ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users -ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users -ath m.room.history_visibility == "default" allows/forbids appropriately for Real users +ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users +ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users +ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users +ath m.room.history_visibility == "default" allows/forbids appropriately for Real users ath Real non-joined user cannot call /events on shared room ath Real non-joined user cannot call /events on invited room ath Real non-joined user cannot call /events on joined room ath Real non-joined user cannot call /events on default room -ath Real non-joined user can call /events on world_readable room +ath Real non-joined user can call /events on world_readable room ath Real non-joined users can get state for world_readable rooms ath Real non-joined users can get individual state for world_readable rooms ath Real non-joined users cannot room initalSync for non-world_readable rooms -ath Real non-joined users can room initialSync for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms after leaving +ath Real non-joined users can room initialSync for world_readable rooms +ath Real non-joined users can get individual state for world_readable rooms after leaving ath Real non-joined users cannot send messages to guest_access rooms if not joined ath Real users can sync from world_readable guest_access rooms if joined -ath Real users can sync from shared guest_access rooms if joined -ath Real users can sync from invited guest_access rooms if joined -ath Real users can sync from joined guest_access rooms if joined +ath Real users can sync from shared guest_access rooms if joined +ath Real users can sync from invited guest_access rooms if joined +ath Real users can sync from joined guest_access rooms if joined ath Real users can sync from default guest_access rooms if joined -ath Only see history_visibility changes on boundaries +ath Only see history_visibility changes on boundaries f,ath Backfill works correctly with history visibility set to joined -fgt Forgotten room messages cannot be paginated -fgt Forgetting room does not show up in v2 /sync -fgt Can forget room you've been kicked from +fgt Forgotten room messages cannot be paginated +fgt Forgetting room does not show up in v2 /sync +fgt Can forget room you've been kicked from fgt Can't forget room you're still in -mem Can re-join room if re-invited -ath Only original members of the room can see messages from erased users +fgt Can re-join room if re-invited +ath Only original members of the room can see messages from erased users mem /joined_rooms returns only joined rooms -mem /joined_members return joined members -ctx /context/ on joined room works -ctx /context/ on non world readable room does not work -ctx /context/ returns correct number of events -ctx /context/ with lazy_load_members filter works +mem /joined_members return joined members +ctx /context/ on joined room works +ctx /context/ on non world readable room does not work +ctx /context/ returns correct number of events +ctx /context/ with lazy_load_members filter works get /event/ on joined room works get /event/ on non world readable room does not work get /event/ does not allow access to events before the user joined mem Can get rooms/{roomId}/members -mem Can get rooms/{roomId}/members at a given point -mem Can filter rooms/{roomId}/members -upg /upgrade creates a new room -upg /upgrade should preserve room visibility for public rooms -upg /upgrade should preserve room visibility for private rooms -upg /upgrade copies >100 power levels to the new room -upg /upgrade copies the power levels to the new room -upg /upgrade preserves the power level of the upgrading user in old and new rooms -upg /upgrade copies important state to the new room -upg /upgrade copies ban events to the new room -upg local user has push rules copied to upgraded room -f,upg remote user has push rules copied to upgraded room -upg /upgrade moves aliases to the new room -upg /upgrade moves remote aliases to the new room -upg /upgrade preserves direct room state -upg /upgrade preserves room federation ability -upg /upgrade restricts power levels in the old room -upg /upgrade restricts power levels in the old room when the old PLs are unusual -upg /upgrade to an unknown version is rejected -upg /upgrade is rejected if the user can't send state events -upg /upgrade of a bogus room fails gracefully -upg Cannot send tombstone event that points to the same room -f,upg Local and remote users' homeservers remove a room from their public directory on upgrade -rst Name/topic keys are correct +mem Can get rooms/{roomId}/members at a given point +mem Can filter rooms/{roomId}/members +upg /upgrade creates a new room +upg /upgrade should preserve room visibility for public rooms +upg /upgrade should preserve room visibility for private rooms +upg /upgrade copies >100 power levels to the new room +upg /upgrade copies the power levels to the new room +upg /upgrade preserves the power level of the upgrading user in old and new rooms +upg /upgrade copies important state to the new room +upg /upgrade copies ban events to the new room +upg local user has push rules copied to upgraded room +f,upg remote user has push rules copied to upgraded room +upg /upgrade moves aliases to the new room +upg /upgrade moves remote aliases to the new room +upg /upgrade preserves direct room state +upg /upgrade preserves room federation ability +upg /upgrade restricts power levels in the old room +upg /upgrade restricts power levels in the old room when the old PLs are unusual +upg /upgrade to an unknown version is rejected +upg /upgrade is rejected if the user can't send state events +upg /upgrade of a bogus room fails gracefully +upg Cannot send tombstone event that points to the same room +f,upg Local and remote users' homeservers remove a room from their public directory on upgrade +rst Name/topic keys are correct f,pub Can get remote public room list pub Can paginate public room list -pub Can search public room list +pub Can search public room list syn Can create filter syn Can download filter syn Can sync syn Can sync a joined room syn Full state sync includes joined rooms syn Newly joined room is included in an incremental sync -syn Newly joined room has correct timeline in incremental sync -syn Newly joined room includes presence in incremental sync -syn Get presence for newly joined members in incremental sync -syn Can sync a room with a single message -syn Can sync a room with a message with a transaction id +syn Newly joined room has correct timeline in incremental sync +syn Newly joined room includes presence in incremental sync +syn Get presence for newly joined members in incremental sync +syn Can sync a room with a single message +syn Can sync a room with a message with a transaction id syn A message sent after an initial sync appears in the timeline of an incremental sync. -syn A filtered timeline reaches its limit -syn Syncing a new room with a large timeline limit isn't limited -syn A full_state incremental update returns only recent timeline -syn A prev_batch token can be used in the v1 messages API -syn A next_batch token can be used in the v1 messages API -syn User sees their own presence in a sync +syn A filtered timeline reaches its limit +syn Syncing a new room with a large timeline limit isn't limited +syn A full_state incremental update returns only recent timeline +syn A prev_batch token can be used in the v1 messages API +syn A next_batch token can be used in the v1 messages API +syn User sees their own presence in a sync syn User is offline if they set_presence=offline in their sync -syn User sees updates to presence from other users in the incremental sync. -syn State is included in the timeline in the initial sync -f,syn State from remote users is included in the state in the initial sync +syn User sees updates to presence from other users in the incremental sync. +syn State is included in the timeline in the initial sync +f,syn State from remote users is included in the state in the initial sync syn Changes to state are included in an incremental sync -syn Changes to state are included in an gapped incremental sync -f,syn State from remote users is included in the timeline in an incremental sync -syn A full_state incremental update returns all state -syn When user joins a room the state is included in the next sync -syn A change to displayname should not result in a full state sync +syn Changes to state are included in an gapped incremental sync +f,syn State from remote users is included in the timeline in an incremental sync +syn A full_state incremental update returns all state +syn When user joins a room the state is included in the next sync +syn A change to displayname should not result in a full state sync syn A change to displayname should appear in incremental /sync -syn When user joins a room the state is included in a gapped sync -syn When user joins and leaves a room in the same batch, the full state is still included in the next sync +syn When user joins a room the state is included in a gapped sync +syn When user joins and leaves a room in the same batch, the full state is still included in the next sync syn Current state appears in timeline in private history syn Current state appears in timeline in private history with many messages before -syn Current state appears in timeline in private history with many messages after +syn Current state appears in timeline in private history with many messages after syn Rooms a user is invited to appear in an initial sync syn Rooms a user is invited to appear in an incremental sync syn Newly joined room is included in an incremental sync after invite syn Sync can be polled for updates syn Sync is woken up for leaves -syn Left rooms appear in the leave section of sync +syn Left rooms appear in the leave section of sync syn Newly left rooms appear in the leave section of incremental sync syn We should see our own leave event, even if history_visibility is restricted (SYN-662) syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) syn Newly left rooms appear in the leave section of gapped sync syn Previously left rooms don't appear in the leave section of sync syn Left rooms appear in the leave section of full state sync -syn Archived rooms only contain history from before the user left -syn Banned rooms appear in the leave section of sync +syn Archived rooms only contain history from before the user left +syn Banned rooms appear in the leave section of sync syn Newly banned rooms appear in the leave section of incremental sync syn Newly banned rooms appear in the leave section of incremental sync syn Typing events appear in initial sync syn Typing events appear in incremental sync syn Typing events appear in gapped sync -syn Read receipts appear in initial v2 /sync -syn New read receipts appear in incremental v2 /sync -syn Can pass a JSON filter as a query parameter -syn Can request federation format via the filter -syn Read markers appear in incremental v2 /sync -syn Read markers appear in initial v2 /sync -syn Read markers can be updated +syn Read receipts appear in initial v2 /sync +syn New read receipts appear in incremental v2 /sync +syn Can pass a JSON filter as a query parameter +syn Can request federation format via the filter +syn Read markers appear in incremental v2 /sync +syn Read markers appear in initial v2 /sync +syn Read markers can be updated syn Lazy loading parameters in the filter are strictly boolean -syn The only membership state included in an initial sync is for all the senders in the timeline -syn The only membership state included in an incremental sync is for senders in the timeline -syn The only membership state included in a gapped incremental sync is for senders in the timeline -syn Gapped incremental syncs include all state changes -syn Old leaves are present in gapped incremental syncs -syn Leaves are present in non-gapped incremental syncs -syn Old members are included in gappy incr LL sync if they start speaking -syn Members from the gap are included in gappy incr LL sync -syn We don't send redundant membership state across incremental syncs by default -syn We do send redundant membership state across incremental syncs if asked -syn Unnamed room comes with a name summary -syn Named room comes with just joined member count summary -syn Room summary only has 5 heroes -syn Room summary counts change when membership changes -rmv User can create and send/receive messages in a room with version 1 +syn The only membership state included in an initial sync is for all the senders in the timeline +syn The only membership state included in an incremental sync is for senders in the timeline +syn The only membership state included in a gapped incremental sync is for senders in the timeline +syn Gapped incremental syncs include all state changes +syn Old leaves are present in gapped incremental syncs +syn Leaves are present in non-gapped incremental syncs +syn Old members are included in gappy incr LL sync if they start speaking +syn Members from the gap are included in gappy incr LL sync +syn We don't send redundant membership state across incremental syncs by default +syn We do send redundant membership state across incremental syncs if asked +syn Unnamed room comes with a name summary +syn Named room comes with just joined member count summary +syn Room summary only has 5 heroes +syn Room summary counts change when membership changes +rmv User can create and send/receive messages in a room with version 1 rmv User can create and send/receive messages in a room with version 1 (2 subtests) rmv local user can join room with version 1 rmv User can invite local user to room with version 1 rmv remote user can join room with version 1 -rmv User can invite remote user to room with version 1 +rmv User can invite remote user to room with version 1 rmv Remote user can backfill in a room with version 1 -rmv Can reject invites over federation for rooms with version 1 -rmv Can receive redactions from regular users over federation in room version 1 -rmv User can create and send/receive messages in a room with version 2 +rmv Can reject invites over federation for rooms with version 1 +rmv Can receive redactions from regular users over federation in room version 1 +rmv User can create and send/receive messages in a room with version 2 rmv User can create and send/receive messages in a room with version 2 (2 subtests) rmv local user can join room with version 2 rmv User can invite local user to room with version 2 rmv remote user can join room with version 2 -rmv User can invite remote user to room with version 2 +rmv User can invite remote user to room with version 2 rmv Remote user can backfill in a room with version 2 -rmv Can reject invites over federation for rooms with version 2 -rmv Can receive redactions from regular users over federation in room version 2 -rmv User can create and send/receive messages in a room with version 3 +rmv Can reject invites over federation for rooms with version 2 +rmv Can receive redactions from regular users over federation in room version 2 +rmv User can create and send/receive messages in a room with version 3 rmv User can create and send/receive messages in a room with version 3 (2 subtests) -rmv local user can join room with version 3 -rmv User can invite local user to room with version 3 -rmv remote user can join room with version 3 -rmv User can invite remote user to room with version 3 -rmv Remote user can backfill in a room with version 3 -rmv Can reject invites over federation for rooms with version 3 -rmv Can receive redactions from regular users over federation in room version 3 -rmv User can create and send/receive messages in a room with version 4 +rmv local user can join room with version 3 +rmv User can invite local user to room with version 3 +rmv remote user can join room with version 3 +rmv User can invite remote user to room with version 3 +rmv Remote user can backfill in a room with version 3 +rmv Can reject invites over federation for rooms with version 3 +rmv Can receive redactions from regular users over federation in room version 3 +rmv User can create and send/receive messages in a room with version 4 rmv User can create and send/receive messages in a room with version 4 (2 subtests) -rmv local user can join room with version 4 -rmv User can invite local user to room with version 4 -rmv remote user can join room with version 4 -rmv User can invite remote user to room with version 4 -rmv Remote user can backfill in a room with version 4 -rmv Can reject invites over federation for rooms with version 4 -rmv Can receive redactions from regular users over federation in room version 4 -rmv User can create and send/receive messages in a room with version 5 +rmv local user can join room with version 4 +rmv User can invite local user to room with version 4 +rmv remote user can join room with version 4 +rmv User can invite remote user to room with version 4 +rmv Remote user can backfill in a room with version 4 +rmv Can reject invites over federation for rooms with version 4 +rmv Can receive redactions from regular users over federation in room version 4 +rmv User can create and send/receive messages in a room with version 5 rmv User can create and send/receive messages in a room with version 5 (2 subtests) -rmv local user can join room with version 5 -rmv User can invite local user to room with version 5 -rmv remote user can join room with version 5 -rmv User can invite remote user to room with version 5 -rmv Remote user can backfill in a room with version 5 -rmv Can reject invites over federation for rooms with version 5 -rmv Can receive redactions from regular users over federation in room version 5 -pre Presence changes are reported to local room members -f,pre Presence changes are also reported to remote room members -pre Presence changes to UNAVAILABLE are reported to local room members -f,pre Presence changes to UNAVAILABLE are reported to remote room members -v1s Newly created users see their own presence in /initialSync (SYT-34) -dvk Can upload device keys +rmv local user can join room with version 5 +rmv User can invite local user to room with version 5 +rmv remote user can join room with version 5 +rmv User can invite remote user to room with version 5 +rmv Remote user can backfill in a room with version 5 +rmv Can reject invites over federation for rooms with version 5 +rmv Can receive redactions from regular users over federation in room version 5 +rmv User can create and send/receive messages in a room with version 6 +rmv User can create and send/receive messages in a room with version 6 (2 subtests) +rmv local user can join room with version 6 +rmv User can invite local user to room with version 6 +rmv remote user can join room with version 6 +rmv User can invite remote user to room with version 6 +rmv Remote user can backfill in a room with version 6 +rmv Can reject invites over federation for rooms with version 6 +rmv Can receive redactions from regular users over federation in room version 6 +rmv Inbound federation rejects invites which include invalid JSON for room version 6 +rmv Outbound federation rejects invite response which include invalid JSON for room version 6 +rmv Inbound federation rejects invite rejections which include invalid JSON for room version 6 +rmv Server rejects invalid JSON in a version 6 room +pre Presence changes are reported to local room members +f,pre Presence changes are also reported to remote room members +pre Presence changes to UNAVAILABLE are reported to local room members +f,pre Presence changes to UNAVAILABLE are reported to remote room members +v1s Newly created users see their own presence in /initialSync (SYT-34) +dvk Can upload device keys dvk Should reject keys claiming to belong to a different user -dvk Can query device keys using POST -dvk Can query specific device keys using POST -dvk query for user with no keys returns empty key dict -dvk Can claim one time key using POST -f,dvk Can query remote device keys using POST -f,dvk Can claim remote one time key using POST -dvk Local device key changes appear in v2 /sync -dvk Local new device changes appear in v2 /sync -dvk Local delete device changes appear in v2 /sync -dvk Local update device changes appear in v2 /sync -dvk Can query remote device keys using POST after notification -f,dev Device deletion propagates over federation -f,dev If remote user leaves room, changes device and rejoins we see update in sync -f,dev If remote user leaves room we no longer receive device updates -dvk Local device key changes appear in /keys/changes -dvk New users appear in /keys/changes -f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes -dvk Get left notifs in sync and /keys/changes when other user leaves -dvk Get left notifs for other users in sync and /keys/changes when user leaves -f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes -dvk Can create backup version -dvk Can update backup version -dvk Responds correctly when backup is empty -dvk Can backup keys -dvk Can update keys with better versions -dvk Will not update keys with worse versions -dvk Will not back up to an old backup version -dvk Can delete backup -dvk Deleted & recreated backups are empty -dvk Can create more than 10 backup versions -dvk Can upload self-signing keys -dvk Fails to upload self-signing keys with no auth -dvk Fails to upload self-signing key without master key -dvk Changing master key notifies local users -dvk Changing user-signing key notifies local users -f,dvk can fetch self-signing keys over federation -f,dvk uploading self-signing key notifies over federation -f,dvk uploading signed devices gets propagated over federation +dvk Can query device keys using POST +dvk Can query specific device keys using POST +dvk query for user with no keys returns empty key dict +dvk Can claim one time key using POST +f,dvk Can query remote device keys using POST +f,dvk Can claim remote one time key using POST +dvk Local device key changes appear in v2 /sync +dvk Local new device changes appear in v2 /sync +dvk Local delete device changes appear in v2 /sync +dvk Local update device changes appear in v2 /sync +dvk Can query remote device keys using POST after notification +f,dev Device deletion propagates over federation +f,dev If remote user leaves room, changes device and rejoins we see update in sync +f,dev If remote user leaves room we no longer receive device updates +dvk Local device key changes appear in /keys/changes +dvk New users appear in /keys/changes +f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes +dvk Get left notifs in sync and /keys/changes when other user leaves +dvk Get left notifs for other users in sync and /keys/changes when user leaves +f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes +dkb Can create backup version +dkb Can update backup version +dkb Responds correctly when backup is empty +dkb Can backup keys +dkb Can update keys with better versions +dkb Will not update keys with worse versions +dkb Will not back up to an old backup version +dkb Can delete backup +dkb Deleted & recreated backups are empty +dkb Can create more than 10 backup versions +xsk Can upload self-signing keys +xsk Fails to upload self-signing keys with no auth +xsk Fails to upload self-signing key without master key +xsk Changing master key notifies local users +xsk Changing user-signing key notifies local users +f,xsk can fetch self-signing keys over federation +f,xsk uploading self-signing key notifies over federation +f,xsk uploading signed devices gets propagated over federation tag Can add tag tag Can remove tag tag Can list tags for a room -v1s Tags appear in the v1 /events stream -v1s Tags appear in the v1 /initalSync -v1s Tags appear in the v1 room initial sync +v1s Tags appear in the v1 /events stream +v1s Tags appear in the v1 /initalSync +v1s Tags appear in the v1 room initial sync tag Tags appear in an initial v2 /sync tag Newly updated tags appear in an incremental v2 /sync tag Deleted tags appear in an incremental v2 /sync -tag local user has tags copied to the new room -f,tag remote user has tags copied to the new room -sch Can search for an event by body -sch Can get context around search results -sch Can back-paginate search results -sch Search works across an upgraded room and its predecessor -sch Search results with rank ordering do not include redacted events -sch Search results with recent ordering do not include redacted events +tag local user has tags copied to the new room +f,tag remote user has tags copied to the new room +sch Can search for an event by body +sch Can get context around search results +sch Can back-paginate search results +sch Search works across an upgraded room and its predecessor +sch Search results with rank ordering do not include redacted events +sch Search results with recent ordering do not include redacted events acc Can add account data acc Can add account data to room -acc Can get account data without syncing -acc Can get room account data without syncing -v1s Latest account data comes down in /initialSync -v1s Latest account data comes down in room initialSync -v1s Account data appears in v1 /events stream -v1s Room account data appears in v1 /events stream -acc Latest account data appears in v2 /sync +acc Can get account data without syncing +acc Can get room account data without syncing +v1s Latest account data comes down in /initialSync +v1s Latest account data comes down in room initialSync +v1s Account data appears in v1 /events stream +v1s Room account data appears in v1 /events stream +acc Latest account data appears in v2 /sync acc New account data appears in incremental v2 /sync -oid Can generate a openid access_token that can be exchanged for information about a user -oid Invalid openid access tokens are rejected -oid Requests to userinfo without access tokens are rejected -std Can send a message directly to a device using PUT /sendToDevice -std Can recv a device message using /sync -std Can recv device messages until they are acknowledged -std Device messages with the same txn_id are deduplicated -std Device messages wake up /sync -std Can recv device messages over federation -std Device messages over federation wake up /sync -std Can send messages with a wildcard device id -std Can send messages with a wildcard device id to two devices -std Wildcard device messages wake up /sync -std Wildcard device messages over federation wake up /sync -adm /whois -nsp /purge_history -nsp /purge_history by ts -nsp Can backfill purged history -nsp Shutdown room -ign Ignore user in existing room -ign Ignore invite in full sync -ign Ignore invite in incremental sync +oid Can generate a openid access_token that can be exchanged for information about a user +oid Invalid openid access tokens are rejected +oid Requests to userinfo without access tokens are rejected +std Can send a message directly to a device using PUT /sendToDevice +std Can recv a device message using /sync +std Can recv device messages until they are acknowledged +std Device messages with the same txn_id are deduplicated +std Device messages wake up /sync +std Can recv device messages over federation +fsd Device messages over federation wake up /sync +std Can send messages with a wildcard device id +std Can send messages with a wildcard device id to two devices +std Wildcard device messages wake up /sync +fsd Wildcard device messages over federation wake up /sync +adm /whois +nsp /purge_history +nsp /purge_history by ts +nsp Can backfill purged history +nsp Shutdown room +ign Ignore user in existing room +ign Ignore invite in full sync +ign Ignore invite in incremental sync fky Checking local federation server fky Federation key API allows unsigned requests for keys -fky Federation key API can act as a notary server via a GET request -fky Federation key API can act as a notary server via a POST request -fky Key notary server should return an expired key if it can't find any others -fky Key notary server must not overwrite a valid key with a spurious result from the origin server -fqu Non-numeric ports in server names are rejected +fky Federation key API can act as a notary server via a GET request +fky Federation key API can act as a notary server via a POST request +fky Key notary server should return an expired key if it can't find any others +fky Key notary server must not overwrite a valid key with a spurious result from the origin server +fqu Non-numeric ports in server names are rejected fqu Outbound federation can query profile data fqu Inbound federation can query profile data fqu Outbound federation can query room alias directory fqu Inbound federation can query room alias directory -fsj Outbound federation can query v1 /send_join +fsj Outbound federation can query v1 /send_join fsj Outbound federation can query v2 /send_join -fmj Outbound federation passes make_join failures through to the client -fsj Inbound federation can receive v1 /send_join +fmj Outbound federation passes make_join failures through to the client +fsj Inbound federation can receive v1 /send_join fsj Inbound federation can receive v2 /send_join fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms -fsj Inbound /v1/send_join rejects incorrectly-signed joins -fsj Inbound /v1/send_join rejects joins from other servers +fsj Inbound /v1/send_join rejects incorrectly-signed joins +fsj Inbound /v1/send_join rejects joins from other servers fau Inbound federation rejects remote attempts to kick local users to rooms -frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support -frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support -frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 +frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support +frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support +frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 frv Inbound federation accepts attempts to join v2 rooms from servers with support -frv Outbound federation correctly handles unsupported room versions -frv A pair of servers can establish a join in a v2 room -fsj Outbound federation rejects send_join responses with no m.room.create event -frv Outbound federation rejects m.room.create events with an unknown room version -fsj Event with an invalid signature in the send_join response should not cause room join to fail +frv Outbound federation correctly handles unsupported room versions +frv A pair of servers can establish a join in a v2 room +fsj Outbound federation rejects send_join responses with no m.room.create event +frv Outbound federation rejects m.room.create events with an unknown room version +fsj Event with an invalid signature in the send_join response should not cause room join to fail +fsj Inbound: send_join rejects invalid JSON for room version 6 fed Outbound federation can send events -fed Inbound federation can receive events -fed Inbound federation can receive redacted events -fed Ephemeral messages received from servers are correctly expired -fed Events whose auth_events are in the wrong room do not mess up the room state -fed Inbound federation can return events -fed Inbound federation redacts events from erased users -fme Outbound federation can request missing events -fme Inbound federation can return missing events for world_readable visibility -fme Inbound federation can return missing events for shared visibility -fme Inbound federation can return missing events for invite visibility -fme Inbound federation can return missing events for joined visibility -fme outliers whose auth_events are in a different room are correctly rejected -fbk Outbound federation can backfill events -fbk Inbound federation can backfill events -fbk Backfill checks the events requested belong to the room -fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -fiv Outbound federation can send invites via v1 API -fiv Outbound federation can send invites via v2 API -fiv Inbound federation can receive invites via v1 API -fiv Inbound federation can receive invites via v2 API -fiv Inbound federation can receive invite and reject when remote replies with a 403 -fiv Inbound federation can receive invite and reject when remote replies with a 500 -fiv Inbound federation can receive invite and reject when remote is unreachable -fiv Inbound federation rejects invites which are not signed by the sender -fiv Inbound federation can receive invite rejections -fiv Inbound federation rejects incorrectly-signed invite rejections -fsl Inbound /v1/send_leave rejects leaves from other servers -fst Inbound federation can get state for a room +fed Inbound federation can receive events +fed Inbound federation can receive redacted events +fed Ephemeral messages received from servers are correctly expired +fed Events whose auth_events are in the wrong room do not mess up the room state +fed Inbound federation can return events +fed Inbound federation redacts events from erased users +fme Outbound federation can request missing events +fme Inbound federation can return missing events for world_readable visibility +fme Inbound federation can return missing events for shared visibility +fme Inbound federation can return missing events for invite visibility +fme Inbound federation can return missing events for joined visibility +fme outliers whose auth_events are in a different room are correctly rejected +fbk Outbound federation can backfill events +fbk Inbound federation can backfill events +fbk Backfill checks the events requested belong to the room +fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination +fiv Outbound federation can send invites via v1 API +fiv Outbound federation can send invites via v2 API +fiv Inbound federation can receive invites via v1 API +fiv Inbound federation can receive invites via v2 API +fiv Inbound federation can receive invite and reject when remote replies with a 403 +fiv Inbound federation can receive invite and reject when remote replies with a 500 +fiv Inbound federation can receive invite and reject when remote is unreachable +fiv Inbound federation rejects invites which are not signed by the sender +fiv Inbound federation can receive invite rejections +fiv Inbound federation rejects incorrectly-signed invite rejections +fsl Inbound /v1/send_leave rejects leaves from other servers +fst Inbound federation can get state for a room fst Inbound federation of state requires event_id as a mandatory paramater -fst Inbound federation can get state_ids for a room +fst Inbound federation can get state_ids for a room fst Inbound federation of state_ids requires event_id as a mandatory paramater -fst Federation rejects inbound events where the prev_events cannot be found -fst Room state at a rejected message event is the same as its predecessor -fst Room state at a rejected state event is the same as its predecessor -fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state -fst Federation handles empty auth_events in state_ids sanely -fst Getting state checks the events requested belong to the room -fst Getting state IDs checks the events requested belong to the room -fst Should not be able to take over the room by pretending there is no PL event -fpb Inbound federation can get public room list -fed Outbound federation sends receipts -fed Inbound federation rejects receipts from wrong remote -fed Inbound federation ignores redactions from invalid servers room > v3 -fed An event which redacts an event in a different room should be ignored -fed An event which redacts itself should be ignored -fed A pair of events which redact each other should be ignored -fdk Local device key changes get to remote servers -fdk Server correctly handles incoming m.device_list_update -fdk Server correctly resyncs when client query keys and there is no remote cache -fdk Server correctly resyncs when server leaves and rejoins a room -fdk Local device key changes get to remote servers with correct prev_id -fdk Device list doesn't change if remote server is down -fdk If a device list update goes missing, the server resyncs on the next one -fst Name/topic keys are correct -fau Remote servers cannot set power levels in rooms without existing powerlevels -fau Remote servers should reject attempts by non-creators to set the power levels -fau Inbound federation rejects typing notifications from wrong remote -fed Forward extremities remain so even after the next events are populated as outliers -fau Banned servers cannot send events -fau Banned servers cannot /make_join -fau Banned servers cannot /send_join -fau Banned servers cannot /make_leave -fau Banned servers cannot /send_leave -fau Banned servers cannot /invite -fau Banned servers cannot get room state -fau Banned servers cannot get room state ids -fau Banned servers cannot backfill -fau Banned servers cannot /event_auth -fau Banned servers cannot get missing events -fau Server correctly handles transactions that break edu limits -fau Inbound federation correctly soft fails events -fau Inbound federation accepts a second soft-failed event -fau Inbound federation correctly handles soft failed events as extremities -med Can upload with Unicode file name -med Can download with Unicode file name locally -f,med Can download with Unicode file name over federation -med Alternative server names do not cause a routing loop -med Can download specifying a different Unicode file name +fst Federation rejects inbound events where the prev_events cannot be found +fst Room state at a rejected message event is the same as its predecessor +fst Room state at a rejected state event is the same as its predecessor +fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state +fst Federation handles empty auth_events in state_ids sanely +fst Getting state checks the events requested belong to the room +fst Getting state IDs checks the events requested belong to the room +fst Should not be able to take over the room by pretending there is no PL event +fpb Inbound federation can get public room list +fed Outbound federation sends receipts +fed Inbound federation rejects receipts from wrong remote +fed Inbound federation ignores redactions from invalid servers room > v3 +fed An event which redacts an event in a different room should be ignored +fed An event which redacts itself should be ignored +fed A pair of events which redact each other should be ignored +fdk Local device key changes get to remote servers +fdk Server correctly handles incoming m.device_list_update +fdk Server correctly resyncs when client query keys and there is no remote cache +fdk Server correctly resyncs when server leaves and rejoins a room +fdk Local device key changes get to remote servers with correct prev_id +fdk Device list doesn't change if remote server is down +fdk If a device list update goes missing, the server resyncs on the next one +fst Name/topic keys are correct +fau Remote servers cannot set power levels in rooms without existing powerlevels +fau Remote servers should reject attempts by non-creators to set the power levels +fau Inbound federation rejects typing notifications from wrong remote +fau Users cannot set notifications powerlevel higher than their own +fed Forward extremities remain so even after the next events are populated as outliers +fau Banned servers cannot send events +fau Banned servers cannot /make_join +fau Banned servers cannot /send_join +fau Banned servers cannot /make_leave +fau Banned servers cannot /send_leave +fau Banned servers cannot /invite +fau Banned servers cannot get room state +fau Banned servers cannot get room state ids +fau Banned servers cannot backfill +fau Banned servers cannot /event_auth +fau Banned servers cannot get missing events +fau Server correctly handles transactions that break edu limits +fau Inbound federation correctly soft fails events +fau Inbound federation accepts a second soft-failed event +fau Inbound federation correctly handles soft failed events as extremities +med Can upload with Unicode file name +med Can download with Unicode file name locally +f,med Can download with Unicode file name over federation +med Alternative server names do not cause a routing loop +med Can download specifying a different Unicode file name med Can upload without a file name med Can download without a file name locally -f,med Can download without a file name over federation +f,med Can download without a file name over federation med Can upload with ASCII file name -med Can download file 'ascii' -med Can download file 'name with spaces' -med Can download file 'name;with;semicolons' -med Can download specifying a different ASCII file name +med Can download file 'ascii' +med Can download file 'name with spaces' +med Can download file 'name;with;semicolons' +med Can download specifying a different ASCII file name med Can send image in room message -med Can fetch images in room -med POSTed media can be thumbnailed -f,med Remote media can be thumbnailed -med Test URL preview -med Can read configuration endpoint -nsp Can quarantine media in rooms -udr User appears in user directory -udr User in private room doesn't appear in user directory -udr User joining then leaving public room appears and dissappears from directory -udr Users appear/disappear from directory when join_rules are changed -udr Users appear/disappear from directory when history_visibility are changed -udr Users stay in directory when join_rules are changed but history_visibility is world_readable -f,udr User in remote room doesn't appear in user directory after server left room -udr User directory correctly update on display name change -udr User in shared private room does appear in user directory -udr User in shared private room does appear in user directory until leave -udr User in dir while user still shares private rooms -nsp Create group -nsp Add group rooms -nsp Remove group rooms -nsp Get local group profile -nsp Get local group users -nsp Add/remove local group rooms -nsp Get local group summary -nsp Get remote group profile -nsp Get remote group users -nsp Add/remove remote group rooms -nsp Get remote group summary -nsp Add local group users -nsp Remove self from local group -nsp Remove other from local group -nsp Add remote group users -nsp Remove self from remote group -nsp Listing invited users of a remote group when not a member returns a 403 -nsp Add group category -nsp Remove group category -nsp Get group categories -nsp Add group role -nsp Remove group role -nsp Get group roles -nsp Add room to group summary -nsp Adding room to group summary keeps room_id when fetching rooms in group -nsp Adding multiple rooms to group summary have correct order -nsp Remove room from group summary -nsp Add room to group summary with category -nsp Remove room from group summary with category -nsp Add user to group summary -nsp Adding multiple users to group summary have correct order -nsp Remove user from group summary -nsp Add user to group summary with role -nsp Remove user from group summary with role -nsp Local group invites come down sync -nsp Group creator sees group in sync -nsp Group creator sees group in initial sync -nsp Get/set local group publicity -nsp Bulk get group publicity -nsp Joinability comes down summary -nsp Set group joinable and join it -nsp Group is not joinable by default -nsp Group is joinable over federation -nsp Room is transitioned on local and remote groups upon room upgrade -3pd Can bind 3PID via home server -3pd Can bind and unbind 3PID via homeserver -3pd Can unbind 3PID via homeserver when bound out of band -3pd 3PIDs are unbound after account deactivation -3pd Can bind and unbind 3PID via /unbind by specifying the identity server -3pd Can bind and unbind 3PID via /unbind without specifying the identity server -app AS can create a user -app AS can create a user with an underscore -app AS can create a user with inhibit_login +med Can fetch images in room +med POSTed media can be thumbnailed +f,med Remote media can be thumbnailed +med Test URL preview +med Can read configuration endpoint +nsp Can quarantine media in rooms +udr User appears in user directory +udr User in private room doesn't appear in user directory +udr User joining then leaving public room appears and dissappears from directory +udr Users appear/disappear from directory when join_rules are changed +udr Users appear/disappear from directory when history_visibility are changed +udr Users stay in directory when join_rules are changed but history_visibility is world_readable +f,udr User in remote room doesn't appear in user directory after server left room +udr User directory correctly update on display name change +udr User in shared private room does appear in user directory +udr User in shared private room does appear in user directory until leave +udr User in dir while user still shares private rooms +nsp Create group +nsp Add group rooms +nsp Remove group rooms +nsp Get local group profile +nsp Get local group users +nsp Add/remove local group rooms +nsp Get local group summary +nsp Get remote group profile +nsp Get remote group users +nsp Add/remove remote group rooms +nsp Get remote group summary +nsp Add local group users +nsp Remove self from local group +nsp Remove other from local group +nsp Add remote group users +nsp Remove self from remote group +nsp Listing invited users of a remote group when not a member returns a 403 +nsp Add group category +nsp Remove group category +nsp Get group categories +nsp Add group role +nsp Remove group role +nsp Get group roles +nsp Add room to group summary +nsp Adding room to group summary keeps room_id when fetching rooms in group +nsp Adding multiple rooms to group summary have correct order +nsp Remove room from group summary +nsp Add room to group summary with category +nsp Remove room from group summary with category +nsp Add user to group summary +nsp Adding multiple users to group summary have correct order +nsp Remove user from group summary +nsp Add user to group summary with role +nsp Remove user from group summary with role +nsp Local group invites come down sync +nsp Group creator sees group in sync +nsp Group creator sees group in initial sync +nsp Get/set local group publicity +nsp Bulk get group publicity +nsp Joinability comes down summary +nsp Set group joinable and join it +nsp Group is not joinable by default +nsp Group is joinable over federation +nsp Room is transitioned on local and remote groups upon room upgrade +3pd Can bind 3PID via home server +3pd Can bind and unbind 3PID via homeserver +3pd Can unbind 3PID via homeserver when bound out of band +3pd 3PIDs are unbound after account deactivation +3pd Can bind and unbind 3PID via /unbind by specifying the identity server +3pd Can bind and unbind 3PID via /unbind without specifying the identity server +app AS can create a user +app AS can create a user with an underscore +app AS can create a user with inhibit_login app AS cannot create users outside its own namespace app Regular users cannot register within the AS namespace -app AS can make room aliases +app AS can make room aliases app Regular users cannot create room aliases within the AS namespace -app AS-ghosted users can use rooms via AS -app AS-ghosted users can use rooms themselves -app Ghost user must register before joining room -app AS can set avatar for ghosted users -app AS can set displayname for ghosted users +app AS-ghosted users can use rooms via AS +app AS-ghosted users can use rooms themselves +app Ghost user must register before joining room +app AS can set avatar for ghosted users +app AS can set displayname for ghosted users app AS can't set displayname for random users -app Inviting an AS-hosted user asks the AS server -app Accesing an AS-hosted room alias asks the AS server -app Events in rooms with AS-hosted room aliases are sent to AS server -app AS user (not ghost) can join room without registering +app Inviting an AS-hosted user asks the AS server +app Accesing an AS-hosted room alias asks the AS server +app Events in rooms with AS-hosted room aliases are sent to AS server +app AS user (not ghost) can join room without registering app AS user (not ghost) can join room without registering, with user_id query param -app HS provides query metadata -app HS can provide query metadata on a single protocol -app HS will proxy request for 3PU mapping -app HS will proxy request for 3PL mapping -app AS can publish rooms in their own list -app AS and main public room lists are separate -app AS can deactivate a user -psh Test that a message is pushed -psh Invites are pushed -psh Rooms with names are correctly named in pushed -psh Rooms with canonical alias are correctly named in pushed -psh Rooms with many users are correctly pushed -psh Don't get pushed for rooms you've muted -psh Rejected events are not pushed -psh Can add global push rule for room -psh Can add global push rule for sender -psh Can add global push rule for content -psh Can add global push rule for override -psh Can add global push rule for underride -psh Can add global push rule for content -psh New rules appear before old rules by default -psh Can add global push rule before an existing rule -psh Can add global push rule after an existing rule -psh Can delete a push rule -psh Can disable a push rule -psh Adding the same push rule twice is idempotent -psh Messages that notify from another user increment unread notification count -psh Messages that highlight from another user increment unread highlight count -psh Can change the actions of default rules +app HS provides query metadata +app HS can provide query metadata on a single protocol +app HS will proxy request for 3PU mapping +app HS will proxy request for 3PL mapping +app AS can publish rooms in their own list +app AS and main public room lists are separate +app AS can deactivate a user +psh Test that a message is pushed +psh Invites are pushed +psh Rooms with names are correctly named in pushed +psh Rooms with canonical alias are correctly named in pushed +psh Rooms with many users are correctly pushed +psh Don't get pushed for rooms you've muted +psh Rejected events are not pushed +psh Can add global push rule for room +psh Can add global push rule for sender +psh Can add global push rule for content +psh Can add global push rule for override +psh Can add global push rule for underride +psh Can add global push rule for content +psh New rules appear before old rules by default +psh Can add global push rule before an existing rule +psh Can add global push rule after an existing rule +psh Can delete a push rule +psh Can disable a push rule +psh Adding the same push rule twice is idempotent +psh Messages that notify from another user increment unread notification count +psh Messages that highlight from another user increment unread highlight count +psh Can change the actions of default rules psh Changing the actions of an unknown default rule fails with 404 -psh Can change the actions of a user specified rule +psh Can change the actions of a user specified rule psh Changing the actions of an unknown rule fails with 404 -psh Can fetch a user's pushers +psh Can fetch a user's pushers psh Push rules come down in an initial /sync -psh Adding a push rule wakes up an incremental /sync -psh Disabling a push rule wakes up an incremental /sync -psh Enabling a push rule wakes up an incremental /sync -psh Setting actions for a push rule wakes up an incremental /sync -psh Can enable/disable default rules +psh Adding a push rule wakes up an incremental /sync +psh Disabling a push rule wakes up an incremental /sync +psh Enabling a push rule wakes up an incremental /sync +psh Setting actions for a push rule wakes up an incremental /sync +psh Can enable/disable default rules psh Enabling an unknown default rule fails with 404 -psh Test that rejected pushers are removed. -psh Notifications can be viewed with GET /notifications -psh Trying to add push rule with no scope fails with 400 -psh Trying to add push rule with invalid scope fails with 400 -psh Trying to add push rule with missing template fails with 400 -psh Trying to add push rule with missing rule_id fails with 400 -psh Trying to add push rule with empty rule_id fails with 400 -psh Trying to add push rule with invalid template fails with 400 -psh Trying to add push rule with rule_id with slashes fails with 400 -psh Trying to add push rule with override rule without conditions fails with 400 -psh Trying to add push rule with underride rule without conditions fails with 400 -psh Trying to add push rule with condition without kind fails with 400 -psh Trying to add push rule with content rule without pattern fails with 400 -psh Trying to add push rule with no actions fails with 400 -psh Trying to add push rule with invalid action fails with 400 -psh Trying to add push rule with invalid attr fails with 400 -psh Trying to add push rule with invalid value for enabled fails with 400 -psh Trying to get push rules with no trailing slash fails with 400 -psh Trying to get push rules with scope without trailing slash fails with 400 -psh Trying to get push rules with template without tailing slash fails with 400 -psh Trying to get push rules with unknown scope fails with 400 -psh Trying to get push rules with unknown template fails with 400 -psh Trying to get push rules with unknown attribute fails with 400 +psh Test that rejected pushers are removed. +psh Notifications can be viewed with GET /notifications +psh Trying to add push rule with no scope fails with 400 +psh Trying to add push rule with invalid scope fails with 400 +psh Trying to add push rule with missing template fails with 400 +psh Trying to add push rule with missing rule_id fails with 400 +psh Trying to add push rule with empty rule_id fails with 400 +psh Trying to add push rule with invalid template fails with 400 +psh Trying to add push rule with rule_id with slashes fails with 400 +psh Trying to add push rule with override rule without conditions fails with 400 +psh Trying to add push rule with underride rule without conditions fails with 400 +psh Trying to add push rule with condition without kind fails with 400 +psh Trying to add push rule with content rule without pattern fails with 400 +psh Trying to add push rule with no actions fails with 400 +psh Trying to add push rule with invalid action fails with 400 +psh Trying to add push rule with invalid attr fails with 400 +psh Trying to add push rule with invalid value for enabled fails with 400 +psh Trying to get push rules with no trailing slash fails with 400 +psh Trying to get push rules with scope without trailing slash fails with 400 +psh Trying to get push rules with template without tailing slash fails with 400 +psh Trying to get push rules with unknown scope fails with 400 +psh Trying to get push rules with unknown template fails with 400 +psh Trying to get push rules with unknown attribute fails with 400 psh Trying to get push rules with unknown rule_id fails with 404 -v1s GET /initialSync with non-numeric 'limit' -v1s GET /events with non-numeric 'limit' -v1s GET /events with negative 'limit' -v1s GET /events with non-numeric 'timeout' -ath Event size limits -syn Check creating invalid filters returns 4xx -f,pre New federated private chats get full presence information (SYN-115) -pre Left room members do not cause problems for presence -crm Rooms can be created with an initial invite list (SYN-205) -typ Typing notifications don't leak -ban Non-present room members cannot ban others -psh Getting push rules doesn't corrupt the cache SYN-390 -inv Test that we can be reinvited to a room we created -syn Multiple calls to /sync should not cause 500 errors -gst Guest user can call /events on another world_readable room (SYN-606) -gst Real user can call /events on another world_readable room (SYN-606) +psh Rooms with names are correctly named in pushes +v1s GET /initialSync with non-numeric 'limit' +v1s GET /events with non-numeric 'limit' +v1s GET /events with negative 'limit' +v1s GET /events with non-numeric 'timeout' +ath Event size limits +syn Check creating invalid filters returns 4xx +f,pre New federated private chats get full presence information (SYN-115) +pre Left room members do not cause problems for presence +crm Rooms can be created with an initial invite list (SYN-205) (1 subtests) +typ Typing notifications don't leak +ban Non-present room members cannot ban others +psh Getting push rules doesn't corrupt the cache SYN-390 +inv Test that we can be reinvited to a room we created +syn Multiple calls to /sync should not cause 500 errors +gst Guest user can call /events on another world_readable room (SYN-606) +gst Real user can call /events on another world_readable room (SYN-606) gst Events come down the correct room pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list std Can send a to-device message to two users which both receive it using /sync +fme Outbound federation will ignore a missing event with bad JSON for room version 6 +fbk Outbound federation rejects backfill containing invalid JSON for events in room version 6 +jso Invalid JSON integers +jso Invalid JSON floats +jso Invalid JSON special values +inv Can invite users to invite-only rooms (2 subtests) +plv setting 'm.room.name' respects room powerlevel (2 subtests) +psh Messages that notify from another user increment notification_count +psh Messages that org.matrix.msc2625.mark_unread from another user increment org.matrix.msc2625.unread_count +dvk Can claim one time key using POST (2 subtests) +fdk Can query remote device keys using POST (1 subtests) +fdk Can claim remote one time key using POST (2 subtests) +fmj Inbound /make_join rejects attempts to join rooms where all users have left \ No newline at end of file diff --git a/tests/sytest/are-we-synapse-yet.py b/tests/sytest/are-we-synapse-yet.py index 0b334ba..3d21fa4 100755 --- a/tests/sytest/are-we-synapse-yet.py +++ b/tests/sytest/are-we-synapse-yet.py @@ -11,7 +11,7 @@ import sys # The main complexity is grouping tests sensibly into features like 'Registration' # and 'Federation'. Then it just checks the ones which are passing and calculates # percentages for each group. Produces results like: -# +# # Client-Server APIs: 29% (196/666 tests) # ------------------- # Registration : 62% (20/32 tests) @@ -28,11 +28,13 @@ import sys # ✓ POST /register can create a user # ✓ POST /register downcases capitals in usernames # ... -# +# # You can also tack `-v` on to see exactly which tests each category falls under. test_mappings = { "nsp": "Non-Spec API", + "unk": "Unknown API (no group specified)", + "app": "Application Services API", "f": "Federation", # flag to mark test involves federation "federation_apis": { @@ -50,6 +52,7 @@ test_mappings = { "fpb": "Public Room API", "fdk": "Device Key APIs", "fed": "Federation API", + "fsd": "Send-to-Device APIs", }, "client_apis": { @@ -61,6 +64,8 @@ test_mappings = { "pro": "Profile", "dev": "Devices", "dvk": "Device Keys", + "dkb": "Device Key Backup", + "xsk": "Cross-signing Keys", "pre": "Presence", "crm": "Create Room", "syn": "Sync API", @@ -98,7 +103,7 @@ test_mappings = { "adm": "Server Admin API", "ign": "Ignore Users", "udr": "User Directory APIs", - "app": "Application Services API", + "jso": "Enforced canonical JSON", }, } @@ -156,20 +161,22 @@ def print_stats(header_name, gid_to_tests, gid_to_name, verbose): total_tests = 0 for gid, tests in gid_to_tests.items(): group_total = len(tests) + if group_total == 0: + continue group_passing = 0 test_names_and_marks = [] for name, passing in tests.items(): if passing: group_passing += 1 test_names_and_marks.append(f"{'✓' if passing else '×'} {name}") - + total_tests += group_total total_passing += group_passing pct = "{0:.0f}%".format(group_passing/group_total * 100) line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) subsections.append(line) subsection_test_names[line] = test_names_and_marks - + pct = "{0:.0f}%".format(total_passing/total_tests * 100) print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) print("-" * (len(header_name)+1)) @@ -186,7 +193,6 @@ def main(results_tap_path, verbose): test_name_to_group_id = {} fed_tests = set() client_tests = set() - groupless_tests = set() with open("./are-we-synapse-yet.list", "r") as f: for line in f.readlines(): test_name = " ".join(line.split(" ")[1:]).strip() @@ -212,8 +218,12 @@ def main(results_tap_path, verbose): # test_name: OK # } }, + "appservice": { + "app": {}, + }, "nonspec": { - "nsp": {} + "nsp": {}, + "unk": {} }, } with open(results_tap_path, "r") as f: @@ -224,10 +234,11 @@ def main(results_tap_path, verbose): name = test_result["name"] group_id = test_name_to_group_id.get(name) if not group_id: - groupless_tests.add(name) - # raise Exception("The test '%s' doesn't have a group" % (name,)) + summary["nonspec"]["unk"][name] = test_result["ok"] if group_id == "nsp": summary["nonspec"]["nsp"][name] = test_result["ok"] + elif group_id == "app": + summary["appservice"]["app"][name] = test_result["ok"] elif group_id in test_mappings["federation_apis"]: group = summary["federation"].get(group_id, {}) group[name] = test_result["ok"] @@ -243,12 +254,7 @@ def main(results_tap_path, verbose): print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose) print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose) print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose) - if verbose: - print("The following tests don't have a group:") - for name in groupless_tests: - print(" %s" % (name,)) - else: - print("%d tests don't have a group" % len(groupless_tests)) + print_stats("Application Services APIs", summary["appservice"], test_mappings, verbose) @@ -257,4 +263,4 @@ if __name__ == '__main__': parser.add_argument("tap_file", help="path to results.tap") parser.add_argument("-v", action="store_true", help="show individual test names in output") args = parser.parse_args() - main(args.tap_file, args.v) + main(args.tap_file, args.v) \ No newline at end of file From 6924dfc8ea56d8e8347b78364480ea2fce5a5905 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 6 Feb 2021 15:27:43 +0100 Subject: [PATCH 0411/1727] improvement: better appservice compatibility and optimizations --- src/appservice_server.rs | 2 +- src/client_server/state.rs | 2 +- src/client_server/sync.rs | 404 ++++++++++++++++++++----------------- src/database.rs | 7 +- src/database/rooms.rs | 4 + src/database/sending.rs | 3 +- src/main.rs | 32 ++- src/ruma_wrapper.rs | 19 +- src/server_server.rs | 4 +- 9 files changed, 270 insertions(+), 207 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 986909b..ec504b5 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,6 +1,6 @@ use crate::{utils, Error, Result}; use http::header::{HeaderValue, CONTENT_TYPE}; -use log::warn; +use log::{info, warn}; use ruma::api::OutgoingRequest; use std::{ convert::{TryFrom, TryInto}, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index cecb79d..faa415d 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -234,7 +234,7 @@ pub async fn get_state_events_for_empty_key_route( .1; Ok(get_state_events_for_empty_key::Response { - content: serde_json::value::to_raw_value(&event) + content: serde_json::value::to_raw_value(&event.content) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 3136116..494c773 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -95,15 +95,7 @@ pub async fn sync_events_route( // Database queries: - let current_state = db.rooms.room_state_full(&room_id)?; - let current_members = current_state - .iter() - .filter(|(key, _)| key.0 == EventType::RoomMember) - .map(|(key, value)| (&key.1, value)) // Only keep state key - .collect::>(); - let encrypted_room = current_state - .get(&(EventType::RoomEncryption, "".to_owned())) - .is_some(); + let current_state_hash = db.rooms.current_state_hash(&room_id)?; // These type is Option>. The outer Option is None when there is no event between // since and the current room state, meaning there should be no updates. @@ -115,69 +107,85 @@ pub async fn sync_events_route( .as_ref() .map(|pdu| db.rooms.pdu_state_hash(&pdu.as_ref().ok()?.0).ok()?); - let since_state = since_state_hash.as_ref().map(|state_hash| { - state_hash - .as_ref() - .and_then(|state_hash| db.rooms.state_full(&room_id, &state_hash).ok()) - }); + let ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) = if since_state_hash != None && Some(¤t_state_hash) != since_state_hash.as_ref() { + let current_state = db.rooms.room_state_full(&room_id)?; + let current_members = current_state + .iter() + .filter(|(key, _)| key.0 == EventType::RoomMember) + .map(|(key, value)| (&key.1, value)) // Only keep state key + .collect::>(); + let encrypted_room = current_state + .get(&(EventType::RoomEncryption, "".to_owned())) + .is_some(); + let since_state = since_state_hash.as_ref().map(|state_hash| { + state_hash + .as_ref() + .and_then(|state_hash| db.rooms.state_full(&room_id, &state_hash).ok()) + }); - let since_encryption = since_state.as_ref().map(|state| { - state - .as_ref() - .map(|state| state.get(&(EventType::RoomEncryption, "".to_owned()))) - }); - - // Calculations: - let new_encrypted_room = - encrypted_room && since_encryption.map_or(false, |encryption| encryption.is_none()); - - let send_member_count = since_state.as_ref().map_or(false, |since_state| { - since_state.as_ref().map_or(true, |since_state| { - current_members.len() - != since_state - .iter() - .filter(|(key, _)| key.0 == EventType::RoomMember) - .count() - }) - }); - - let since_sender_member = since_state.as_ref().map(|since_state| { - since_state.as_ref().and_then(|state| { + let since_encryption = since_state.as_ref().map(|state| { state - .get(&(EventType::RoomMember, sender_user.as_str().to_owned())) - .and_then(|pdu| { - serde_json::from_value::< + .as_ref() + .map(|state| state.get(&(EventType::RoomEncryption, "".to_owned()))) + }); + + // Calculations: + let new_encrypted_room = + encrypted_room && since_encryption.map_or(false, |encryption| encryption.is_none()); + + let send_member_count = since_state.as_ref().map_or(false, |since_state| { + since_state.as_ref().map_or(true, |since_state| { + current_members.len() + != since_state + .iter() + .filter(|(key, _)| key.0 == EventType::RoomMember) + .count() + }) + }); + + let since_sender_member = since_state.as_ref().map(|since_state| { + since_state.as_ref().and_then(|state| { + state + .get(&(EventType::RoomMember, sender_user.as_str().to_owned())) + .and_then(|pdu| { + serde_json::from_value::< Raw, >(pdu.content.clone()) .expect("Raw::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid PDU in database.")) .ok() - }) - }) - }); + }) + }) + }); - if encrypted_room { - for (user_id, current_member) in current_members { - let current_membership = serde_json::from_value::< - Raw, - >(current_member.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; + if encrypted_room { + for (user_id, current_member) in current_members { + let current_membership = serde_json::from_value::< + Raw, + >(current_member.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; - let since_membership = - since_state - .as_ref() - .map_or(MembershipState::Join, |since_state| { - since_state - .as_ref() - .and_then(|since_state| { - since_state - .get(&(EventType::RoomMember, user_id.clone())) - .and_then(|since_member| { - serde_json::from_value::< + let since_membership = + since_state + .as_ref() + .map_or(MembershipState::Join, |since_state| { + since_state + .as_ref() + .and_then(|since_state| { + since_state + .get(&(EventType::RoomMember, user_id.clone())) + .and_then(|since_member| { + serde_json::from_value::< Raw, >( since_member.content.clone() @@ -188,50 +196,158 @@ pub async fn sync_events_route( Error::bad_database("Invalid PDU in database.") }) .ok() - }) - }) - .map_or(MembershipState::Leave, |member| member.membership) - }); + }) + }) + .map_or(MembershipState::Leave, |member| member.membership) + }); - let user_id = UserId::try_from(user_id.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + let user_id = UserId::try_from(user_id.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - match (since_membership, current_membership) { - (MembershipState::Leave, MembershipState::Join) => { - // A new user joined an encrypted room - if !share_encrypted_room(&db, &sender_user, &user_id, &room_id) { - device_list_updates.insert(user_id); + match (since_membership, current_membership) { + (MembershipState::Leave, MembershipState::Join) => { + // A new user joined an encrypted room + if !share_encrypted_room(&db, &sender_user, &user_id, &room_id) { + device_list_updates.insert(user_id); + } } + (MembershipState::Join, MembershipState::Leave) => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} } - (MembershipState::Join, MembershipState::Leave) => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} } } - } - let joined_since_last_sync = since_sender_member.map_or(false, |member| { - member.map_or(true, |member| member.membership != MembershipState::Join) - }); + let joined_since_last_sync = since_sender_member.map_or(false, |member| { + member.map_or(true, |member| member.membership != MembershipState::Join) + }); - if joined_since_last_sync && encrypted_room || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend( + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend( + db.rooms + .room_members(&room_id) + .filter_map(|user_id| Some(user_id.ok()?)) + .filter(|user_id| { + // Don't send key updates from the sender to the sender + sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target already + !share_encrypted_room(&db, sender_user, user_id, &room_id) + }), + ); + } + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + let joined_member_count = db.rooms.room_members(&room_id).count(); + let invited_member_count = db.rooms.room_members_invited(&room_id).count(); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still joined or + // invited until we have 5 or we reach the end + + for hero in db + .rooms + .all_pdus(&sender_user, &room_id)? + .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus + .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) + .map(|(_, pdu)| { + let content = serde_json::from_value::< + Raw, + >(pdu.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; + + if let Some(state_key) = &pdu.state_key { + let user_id = + UserId::try_from(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + // The membership was and still is invite or join + if matches!( + content.membership, + MembershipState::Join | MembershipState::Invite + ) && (db.rooms.is_joined(&user_id, &room_id)? + || db.rooms.is_invited(&user_id, &room_id)?) + { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + .filter_map(|u| u.ok()) // Filter out buggy users + // Filter for possible heroes + .filter_map(|u| u) + { + if heroes.contains(&hero) || hero == sender_user.as_str() { + continue; + } + + heroes.push(hero); + } + } + + ( + Some(joined_member_count), + Some(invited_member_count), + heroes, + ) + } else { + (None, None, Vec::new()) + }; + + let state_events = if joined_since_last_sync { db.rooms - .room_members(&room_id) - .filter_map(|user_id| Some(user_id.ok()?)) - .filter(|user_id| { - // Don't send key updates from the sender to the sender - sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&db, sender_user, user_id, &room_id) - }), - ); - } + .room_state_full(&room_id)? + .into_iter() + .map(|(_, pdu)| pdu.to_sync_state_event()) + .collect() + } else { + match since_state { + None => Vec::new(), + Some(Some(since_state)) => current_state + .iter() + .filter(|(key, value)| { + since_state.get(key).map(|e| &e.event_id) != Some(&value.event_id) + }) + .filter(|(_, value)| { + !timeline_pdus.iter().any(|(_, timeline_pdu)| { + timeline_pdu.kind == value.kind + && timeline_pdu.state_key == value.state_key + }) + }) + .map(|(_, pdu)| pdu.to_sync_state_event()) + .collect(), + Some(None) => current_state + .iter() + .map(|(_, pdu)| pdu.to_sync_state_event()) + .collect(), + } + }; + + ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) + } else { + (Vec::new(), None, None, false, Vec::new()) + }; // Look for device list updates in this room device_list_updates.extend( @@ -240,71 +356,6 @@ pub async fn sync_events_route( .filter_map(|r| r.ok()), ); - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - let joined_member_count = db.rooms.room_members(&room_id).count(); - let invited_member_count = db.rooms.room_members_invited(&room_id).count(); - - // Recalculate heroes (first 5 members) - let mut heroes = Vec::new(); - - if joined_member_count + invited_member_count <= 5 { - // Go through all PDUs and for each member event, check if the user is still joined or - // invited until we have 5 or we reach the end - - for hero in db - .rooms - .all_pdus(&sender_user, &room_id)? - .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) - .map(|(_, pdu)| { - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - if let Some(state_key) = &pdu.state_key { - let user_id = UserId::try_from(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - // The membership was and still is invite or join - if matches!( - content.membership, - MembershipState::Join | MembershipState::Invite - ) && (db.rooms.is_joined(&user_id, &room_id)? - || db.rooms.is_invited(&user_id, &room_id)?) - { - Ok::<_, Error>(Some(state_key.clone())) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - .filter_map(|u| u.ok()) // Filter out buggy users - // Filter for possible heroes - .filter_map(|u| u) - { - if heroes.contains(&hero) || hero == sender_user.as_str() { - continue; - } - - heroes.push(hero); - } - } - - ( - Some(joined_member_count), - Some(invited_member_count), - heroes, - ) - } else { - (None, None, Vec::new()) - }; - let notification_count = if send_notification_counts { if let Some(last_read) = db.rooms.edus.private_read_get(&room_id, &sender_user)? { Some( @@ -385,34 +436,7 @@ pub async fn sync_events_route( events: room_events, }, state: sync_events::State { - events: if joined_since_last_sync { - db.rooms - .room_state_full(&room_id)? - .into_iter() - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect() - } else { - match since_state { - None => Vec::new(), - Some(Some(since_state)) => current_state - .iter() - .filter(|(key, value)| { - since_state.get(key).map(|e| &e.event_id) != Some(&value.event_id) - }) - .filter(|(_, value)| { - !timeline_pdus.iter().any(|(_, timeline_pdu)| { - timeline_pdu.kind == value.kind - && timeline_pdu.state_key == value.state_key - }) - }) - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect(), - Some(None) => current_state - .iter() - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect(), - } - }, + events: state_events, }, ephemeral: sync_events::Ephemeral { events: edus }, }; diff --git a/src/database.rs b/src/database.rs index 607e1be..afcd58f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -27,7 +27,7 @@ pub struct Config { server_name: Box, database_path: String, #[serde(default = "default_cache_capacity")] - cache_capacity: u64, + cache_capacity: u32, #[serde(default = "default_max_request_size")] max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] @@ -48,7 +48,7 @@ fn true_fn() -> bool { true } -fn default_cache_capacity() -> u64 { +fn default_cache_capacity() -> u32 { 1024 * 1024 * 1024 } @@ -93,8 +93,7 @@ impl Database { pub async fn load_or_create(config: Config) -> Result { let db = sled::Config::default() .path(&config.database_path) - .cache_capacity(config.cache_capacity) - .print_profile_on_drop(false) + .cache_capacity(config.cache_capacity as u64) .open()?; info!("Opened sled database at {}", config.database_path); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4081944..b35d006 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1010,6 +1010,10 @@ impl Rooms { .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + || self + .room_members(&room_id) + .filter_map(|r| r.ok()) + .any(|member| users.iter().any(|regex| regex.is_match(member.as_str()))) { sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } diff --git a/src/database/sending.rs b/src/database/sending.rs index 74aad32..0a66f73 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -8,7 +8,7 @@ use std::{ use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::info; +use log::{error, info}; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, @@ -131,6 +131,7 @@ impl Sending { }; prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); + last_failed_try.insert(server.clone(), match last_failed_try.get(&server) { Some(last_failed) => { (last_failed.0+1, Instant::now()) diff --git a/src/main.rs b/src/main.rs index 93ab560..65434a5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -15,6 +15,7 @@ pub use database::Database; pub use error::{ConduitLogger, Error, Result}; pub use pdu::PduEvent; pub use rocket::State; +use ruma::api::client::error::ErrorKind; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; use log::LevelFilter; @@ -154,7 +155,13 @@ fn setup_rocket() -> rocket::Rocket { server_server::get_profile_information_route, ], ) - .register(catchers![not_found_catcher]) + .register(catchers![ + not_found_catcher, + forbidden_catcher, + unknown_token_catcher, + missing_token_catcher, + bad_json_catcher + ]) .attach(AdHoc::on_attach("Config", |rocket| async { let config = rocket .figment() @@ -186,3 +193,26 @@ async fn main() { fn not_found_catcher(_req: &'_ Request<'_>) -> String { "404 Not Found".to_owned() } + +#[catch(580)] +fn forbidden_catcher() -> Result<()> { + Err(Error::BadRequest(ErrorKind::Forbidden, "Forbidden.")) +} + +#[catch(581)] +fn unknown_token_catcher() -> Result<()> { + Err(Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, + "Unknown token.", + )) +} + +#[catch(582)] +fn missing_token_catcher() -> Result<()> { + Err(Error::BadRequest(ErrorKind::MissingToken, "Missing token.")) +} + +#[catch(583)] +fn bad_json_catcher() -> Result<()> { + Err(Error::BadRequest(ErrorKind::BadJson, "Bad json.")) +} diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 0fdca74..45fcc7f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -45,7 +45,7 @@ where http::request::Request>, >>::Error: std::fmt::Debug, { - type Error = (); // TODO: Better error handling + type Error = (); type Owned = Data; type Borrowed = Self::Owned; @@ -82,7 +82,9 @@ where registration .get("as_token") .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| token.as_deref() == Some(as_token)) + .map_or(false, |as_token| { + dbg!(token.as_deref()) == dbg!(Some(as_token)) + }) }) { match T::METADATA.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { @@ -105,7 +107,8 @@ where ); if !db.users.exists(&user_id).unwrap() { - return Failure((Status::Unauthorized, ())); + // Forbidden + return Failure((Status::raw(580), ())); } // TODO: Check if appservice is allowed to be that user @@ -119,15 +122,15 @@ where AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { if let Some(token) = token { match db.users.find_from_token(&token).unwrap() { - // TODO: M_UNKNOWN_TOKEN - None => return Failure((Status::Unauthorized, ())), + // Unknown Token + None => return Failure((Status::raw(581), ())), Some((user_id, device_id)) => { (Some(user_id), Some(device_id.into()), false) } } } else { - // TODO: M_MISSING_TOKEN - return Failure((Status::Unauthorized, ())); + // Missing Token + return Failure((Status::raw(582), ())); } } AuthScheme::ServerSignatures => (None, None, false), @@ -163,7 +166,7 @@ where }), Err(e) => { warn!("{:?}", e); - Failure((Status::BadRequest, ())) + Failure((Status::raw(583), ())) } } }) diff --git a/src/server_server.rs b/src/server_server.rs index 7ff9e3f..3fea4da 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -557,7 +557,7 @@ pub async fn send_transaction_message_route<'a>( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + let next_room_state = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( &pdu, @@ -569,6 +569,8 @@ pub async fn send_transaction_message_route<'a>( &db.admin, )?; + db.rooms.set_room_state(&room_id, &next_room_state)?; + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } From ea1e4625d1987183f0bb669cb20bbabd08c71d26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 7 Feb 2021 13:20:00 +0100 Subject: [PATCH 0412/1727] fix: default config options --- conduit-example.toml | 6 +++--- src/database.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 70d3ce4..b82da2c 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -23,12 +23,12 @@ port = 6167 max_request_size = 20_000_000 # in bytes # Disable registration. No new users will be able to register on this server -#registration_disabled = false +#allow_registration = true # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work -#encryption_disabled = false -#federation_disabled = false +#allow_encryption = true +#allow_federation = false #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time diff --git a/src/database.rs b/src/database.rs index afcd58f..9fce293 100644 --- a/src/database.rs +++ b/src/database.rs @@ -32,7 +32,7 @@ pub struct Config { max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] max_concurrent_requests: u16, - #[serde(default)] + #[serde(default = "true_fn")] allow_registration: bool, #[serde(default = "true_fn")] allow_encryption: bool, From d49911c5e01ca1e1a6d14533bcf6ae47a146fe49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 7 Feb 2021 17:38:45 +0100 Subject: [PATCH 0413/1727] Add 'm.login.token' authentication --- Cargo.lock | 86 ++++++++++++++++++++++++++++++++++-- Cargo.toml | 2 + src/appservice_server.rs | 2 +- src/client_server/session.rs | 73 +++++++++++++++++++++--------- src/database.rs | 1 + src/database/globals.rs | 13 +++++- src/database/sending.rs | 2 +- 7 files changed, 150 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5be6aa..78ff405 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -182,6 +182,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits", + "time 0.1.44", + "winapi 0.3.9", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -197,6 +210,7 @@ dependencies = [ "http", "image", "js_int", + "jsonwebtoken", "log", "rand", "regex", @@ -243,7 +257,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time", + "time 0.2.23", "version_check", ] @@ -578,7 +592,7 @@ checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ "cfg-if 0.1.10", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] @@ -850,6 +864,20 @@ dependencies = [ "serde", ] +[[package]] +name = "jsonwebtoken" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +dependencies = [ + "base64 0.12.3", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "kernel32-sys" version = "0.2.2" @@ -1044,6 +1072,17 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -1204,6 +1243,17 @@ dependencies = [ "syn", ] +[[package]] +name = "pem" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c220d01f863d13d96ca82359d1e81e64a7c6bf0637bcde7b2349630addf0c6" +dependencies = [ + "base64 0.13.0", + "once_cell", + "regex", +] + [[package]] name = "percent-encoding" version = "2.1.0" @@ -1538,7 +1588,7 @@ dependencies = [ "rocket_http", "serde", "state", - "time", + "time 0.2.23", "tokio", "ubyte", "version_check", @@ -1575,7 +1625,7 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time", + "time 0.2.23", "tokio", "tokio-rustls", "uncased", @@ -1969,6 +2019,17 @@ dependencies = [ "libc", ] +[[package]] +name = "simple_asn1" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +dependencies = [ + "chrono", + "num-bigint", + "num-traits", +] + [[package]] name = "slab" version = "0.4.2" @@ -2168,6 +2229,17 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "time" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi 0.3.9", +] + [[package]] name = "time" version = "0.2.23" @@ -2498,6 +2570,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" version = "0.2.69" diff --git a/Cargo.toml b/Cargo.toml index 56a04e5..f7fbdc5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,8 @@ ring = "0.16.19" trust-dns-resolver = "0.19.6" # Used to find matching events for appservices regex = "1.4.2" +# jwt jsonwebtokens +jsonwebtoken = "7.2.0" [features] default = ["conduit_bin"] diff --git a/src/appservice_server.rs b/src/appservice_server.rs index ec504b5..986909b 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,6 +1,6 @@ use crate::{utils, Error, Result}; use http::header::{HeaderValue, CONTENT_TYPE}; -use log::{info, warn}; +use log::warn; use ruma::api::OutgoingRequest; use std::{ convert::{TryFrom, TryInto}, diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 48fbea2..1b2583c 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -8,6 +8,13 @@ use ruma::{ }, UserId, }; +use serde::Deserialize; + +#[derive(Debug, Deserialize)] +struct Claims { + sub: String, + exp: usize, +} #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -40,40 +47,62 @@ pub async fn login_route( body: Ruma>, ) -> ConduitResult { // Validate login method - let user_id = - // TODO: Other login methods - if let (login::IncomingUserInfo::MatrixId(username), login::IncomingLoginInfo::Password { password }) = - (&body.user, &body.login_info) - { - let user_id = UserId::parse_with_server_name(username.to_string(), db.globals.server_name()) - .map_err(|_| Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid." - ))?; - let hash = db.users.password_hash(&user_id)? - .ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "Wrong username or password." - ))?; + // TODO: Other login methods + let user_id = match &body.login_info { + login::IncomingLoginInfo::Password { password } => { + let username = if let login::IncomingUserInfo::MatrixId(matrix_id) = &body.user { + matrix_id + } else { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); + }; + let user_id = + UserId::parse_with_server_name(username.to_owned(), db.globals.server_name()) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; + let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Wrong username or password.", + ))?; if hash.is_empty() { return Err(Error::BadRequest( ErrorKind::UserDeactivated, - "The user has been deactivated" + "The user has been deactivated", )); } - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); + let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); if !hash_matches { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Wrong username or password.")); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Wrong username or password.", + )); } user_id - } else { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); - }; + } + login::IncomingLoginInfo::Token { token } => { + if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { + let token = jsonwebtoken::decode::( + &token, + &jwt_decoding_key, + &jsonwebtoken::Validation::default(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?; + let username = token.claims.sub; + UserId::parse_with_server_name(username, db.globals.server_name()).map_err( + |_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."), + )? + } else { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Token login is not supported (server has no jwt decoding key).", + )); + } + } + }; // Generate new device id if the user didn't specify one let device_id = body diff --git a/src/database.rs b/src/database.rs index 9fce293..8fcffd9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -38,6 +38,7 @@ pub struct Config { allow_encryption: bool, #[serde(default = "false_fn")] allow_federation: bool, + jwt_secret: Option, } fn false_fn() -> bool { diff --git a/src/database/globals.rs b/src/database/globals.rs index 3e24d82..ccd6284 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -11,12 +11,13 @@ pub const COUNTER: &str = "c"; #[derive(Clone)] pub struct Globals { + pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host pub(super) globals: sled::Tree, config: Config, keypair: Arc, reqwest_client: reqwest::Client, - pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host dns_resolver: TokioAsyncResolver, + jwt_decoding_key: Option>, } impl Globals { @@ -62,6 +63,11 @@ impl Globals { .build() .unwrap(); + let jwt_decoding_key = config + .jwt_secret + .as_ref() + .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); + Ok(Self { globals, config, @@ -73,6 +79,7 @@ impl Globals { Error::bad_config("Failed to set up trust dns resolver with system config.") })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), + jwt_decoding_key, }) } @@ -126,4 +133,8 @@ impl Globals { pub fn dns_resolver(&self) -> &TokioAsyncResolver { &self.dns_resolver } + + pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { + self.jwt_decoding_key.as_ref() + } } diff --git a/src/database/sending.rs b/src/database/sending.rs index 0a66f73..fd32793 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -8,7 +8,7 @@ use std::{ use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::{error, info}; +use log::info; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, From 4a92a29b566d27876ae85e5366272a695e17689b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 0414/1727] State resolution outline for /send --- Cargo.lock | 152 +++++++------------ Cargo.toml | 8 +- src/main.rs | 2 +- src/pdu.rs | 2 +- src/ruma_wrapper.rs | 14 +- src/server_server.rs | 349 +++++++++++++++++++++++++++++++++++++++---- 6 files changed, 384 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78ff405..9ab184c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,21 +229,11 @@ dependencies = [ "trust-dns-resolver", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -dependencies = [ - "cfg-if 0.1.10", - "wasm-bindgen", -] - [[package]] name = "const_fn" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" +checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" [[package]] name = "constant_time_eq" @@ -645,9 +635,9 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] @@ -674,9 +664,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" dependencies = [ "bytes", "fnv", @@ -772,9 +762,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", "hashbrown", @@ -1063,9 +1053,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.36" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cf75f38f16cb05ea017784dc6dbfd354f76c223dba37701734c4f5a9337d02" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1148,12 +1138,12 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" dependencies = [ "bitflags", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "foreign-types", "lazy_static", "libc", @@ -1168,18 +1158,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.12.0+1.1.1h" +version = "111.13.0+1.1.1i" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61" +checksum = "045e4dc48af57aad93d665885789b43222ae26f4886494da12d1ed58d309dcb6" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" dependencies = [ "autocfg", "cc", @@ -1202,9 +1192,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0" +checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ "cfg-if 1.0.0", "instant", @@ -1216,9 +1206,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" +checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "pear" @@ -1326,9 +1316,9 @@ checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "png" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe7f9f1c730833200b134370e1d5098964231af8450bce9b78ee3ab5278b970" +checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" dependencies = [ "bitflags", "crc32fast", @@ -1393,9 +1383,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" dependencies = [ "proc-macro2", ] @@ -1507,9 +1497,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.9" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", "bytes", @@ -1536,7 +1526,6 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] @@ -1636,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "js_int", @@ -1654,7 +1643,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "http", "percent-encoding", @@ -1669,7 +1658,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1680,7 +1669,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "ruma-api", "ruma-common", @@ -1694,7 +1683,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "http", @@ -1713,7 +1702,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "maplit", @@ -1726,7 +1715,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-common", @@ -1740,7 +1729,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1751,7 +1740,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-api", @@ -1766,7 +1755,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "paste", "rand", @@ -1780,7 +1769,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro2", "quote", @@ -1791,7 +1780,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "serde", ] @@ -1799,7 +1788,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "form_urlencoded", "itoa", @@ -1812,7 +1801,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1823,7 +1812,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "base64 0.12.3", "ring", @@ -1889,12 +1878,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -2012,9 +1995,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "signal-hook-registry" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ "libc", ] @@ -2060,13 +2043,12 @@ checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" [[package]] name = "socket2" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -2094,7 +2076,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#a1c15253f0777baad251da47c3f2c016cfed6f7e" +source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" dependencies = [ "itertools", "maplit", @@ -2177,9 +2159,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.54" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44" +checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" dependencies = [ "proc-macro2", "quote", @@ -2295,9 +2277,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" +checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ "bytes", "fnv", @@ -2365,9 +2347,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] @@ -2544,9 +2526,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "version_check" @@ -2644,30 +2626,6 @@ version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" -[[package]] -name = "wasm-bindgen-test" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" -dependencies = [ - "proc-macro2", - "quote", -] - [[package]] name = "web-sys" version = "0.3.46" diff --git a/Cargo.toml b/Cargo.toml index f7fbdc5..c2db3d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "ee814aa84934530d76f5e4b275d739805b49bdef" } -# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = { version = "0.2.23" } diff --git a/src/main.rs b/src/main.rs index 65434a5..4cab764 100644 --- a/src/main.rs +++ b/src/main.rs @@ -190,7 +190,7 @@ async fn main() { } #[catch(404)] -fn not_found_catcher(_req: &'_ Request<'_>) -> String { +fn not_found_catcher(_: &Request<'_>) -> String { "404 Not Found".to_owned() } diff --git a/src/pdu.rs b/src/pdu.rs index 75ef492..f6ec415 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -17,7 +17,7 @@ use std::{ time::UNIX_EPOCH, }; -#[derive(Deserialize, Serialize, Debug)] +#[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: EventId, pub room_id: RoomId, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 45fcc7f..e2f44cd 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ use crate::Error; use ruma::{ - api::{AuthScheme, OutgoingRequest}, + api::{AuthScheme, IncomingRequest, OutgoingRequest}, identifiers::{DeviceId, UserId}, Outgoing, }; @@ -29,7 +29,7 @@ use { /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { +pub struct Ruma { pub body: T::Incoming, pub sender_user: Option, pub sender_device: Option>, @@ -40,10 +40,7 @@ pub struct Ruma { #[cfg(feature = "conduit_bin")] impl<'a, T: Outgoing + OutgoingRequest> FromTransformedData<'a> for Ruma where - ::Incoming: TryFrom>> + std::fmt::Debug, - <::Incoming as std::convert::TryFrom< - http::request::Request>, - >>::Error: std::fmt::Debug, + T::Incoming: IncomingRequest, { type Error = (); type Owned = Data; @@ -152,8 +149,7 @@ where let http_request = http_request.body(body.clone()).unwrap(); debug!("{:?}", http_request); - - match ::Incoming::try_from(http_request) { + match ::try_from_http_request(http_request) { Ok(t) => Success(Ruma { body: t, sender_user, @@ -173,7 +169,7 @@ where } } -impl Deref for Ruma { +impl Deref for Ruma { type Target = T::Incoming; fn deref(&self) -> &Self::Target { diff --git a/src/server_server.rs b/src/server_server.rs index 3fea4da..d68e9fa 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{info, warn}; +use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -11,17 +11,18 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::get_missing_events, + event::{get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - EventId, RoomId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; +use state_res::StateMap; use std::{ - collections::BTreeMap, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, @@ -476,6 +477,34 @@ pub async fn get_public_rooms_route( .into()) } +#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum PrevEvents { + Sequential(T), + Fork(Vec), +} + +impl IntoIterator for PrevEvents { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + match self { + Self::Sequential(item) => vec![item].into_iter(), + Self::Fork(list) => list.into_iter(), + } + } +} + +impl PrevEvents { + pub fn new(id: &[T]) -> Self { + match id { + [] => panic!("All events must have previous event"), + [single_id] => Self::Sequential(single_id.clone()), + rest => Self::Fork(rest.to_vec()), + } + } +} + #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -532,55 +561,313 @@ pub async fn send_transaction_message_route<'a>( // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { - // Ruma/PduEvent/StateEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 1. Is a valid event, otherwise it is dropped. + // Ruma/PduEvent/StateEvent satisfies this - // state-res checks signatures - 2. Passes signature checks, otherwise event is dropped. - - // 3. Passes hash checks, otherwise it is redacted before being processed further. - // TODO: redact event if hashing fails let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let keys = db.globals.keypair(); + let mut pub_key_set = BTreeMap::new(); + pub_key_set.insert( + "ed25519:1".to_string(), + String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), + ); + let mut pub_key_map = BTreeMap::new(); + pub_key_map.insert("domain".to_string(), pub_key_set); + + let value = + match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => { + resolved_map + .insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + } + } else { + value + } + } + Err(_e) => { + resolved_map.insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + }; + let pdu = serde_json::from_value::( serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), ) .expect("all ruma pdus are conduit pdus"); - let room_id = &pdu.room_id; // If we have no idea about this room skip the PDU - if !db.rooms.exists(room_id)? { + if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); + // TODO: remove the need to convert to state_res + let event = pdu.convert_for_state_res(); + let previous = pdu + .prev_events + .first() + .map(|id| { + db.rooms + .get_pdu(id) + .expect("todo") + .map(|ev| ev.convert_for_state_res()) + }) + .flatten(); - let next_room_state = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &pdu, - value, - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + // 4. + let auth_events = db.rooms.get_auth_events( + &pdu.room_id, + &pdu.kind, + &pdu.sender, + pdu.state_key.as_deref(), + pdu.content.clone(), )?; - - db.rooms.set_room_state(&room_id, &next_room_state)?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + auth_events + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with auth events".into()), + ); + continue; } - resolved_map.insert(event_id, Ok::<(), String>(())); + let mut previous_states = vec![]; + for id in &pdu.prev_events { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + previous_states.push(state); + } else { + // fetch the state + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. + let state_at_event = if previous_states.is_empty() { + // State is empty + Default::default() + } else if previous_states.len() == 1 { + previous_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &previous_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + state_at_event + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Event failed auth with state_at + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with state at the event".into()), + ); + continue; + } + + // The event could still be soft failed + append_state_soft(&db, &pdu)?; + + // Gather the forward extremities and resolve + let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; + let mut fork_states = vec![]; + for id in &forward_extrems { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + fork_states.push(state); + } else { + // This is probably an error?? + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 6. + let state_at_forks = if fork_states.is_empty() { + // State is empty + Default::default() + } else if fork_states.len() == 1 { + fork_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous, + state_at_forks + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail + resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + } else { + append_state(&db, &pdu)?; + // Event has passed all auth/stateres checks + resolved_map.insert(event.event_id(), Ok(())); + } } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 4b9976aa743321a0c062fac9ffd2de737531b717 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 31 Dec 2020 08:40:49 -0500 Subject: [PATCH 0415/1727] Update state-res, use the new Event trait This also bumps ruma to latest and removes js_int infavor of the ruma re-export --- Cargo.lock | 140 +++++++++++++++++++------------- Cargo.toml | 8 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 2 +- src/client_server/membership.rs | 22 +++-- src/client_server/message.rs | 7 +- src/database/rooms.rs | 68 ++++++++++------ src/database/rooms/edus.rs | 3 +- src/database/users.rs | 3 +- src/pdu.rs | 116 +++++++++++++------------- src/server_server.rs | 64 ++++++++------- 11 files changed, 252 insertions(+), 183 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ab184c..b05a3c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -212,8 +212,8 @@ dependencies = [ "js_int", "jsonwebtoken", "log", - "rand", "regex", + "rand 0.7.3", "reqwest", "ring", "rocket", @@ -585,6 +585,17 @@ dependencies = [ "wasi 0.9.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "wasi", +] + [[package]] name = "gif" version = "0.11.1" @@ -847,9 +858,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.1.9" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96797f53235a1d6dc985f244a69de54b04c45b7e0e357a35c85a45a847d92f2" +checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" dependencies = [ "serde", ] @@ -1396,11 +1407,23 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.15", "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.0", + "rand_hc 0.3.0", ] [[package]] @@ -1410,7 +1433,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.0", ] [[package]] @@ -1419,7 +1452,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.15", +] + +[[package]] +name = "rand_core" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +dependencies = [ + "getrandom 0.2.0", ] [[package]] @@ -1428,7 +1470,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.0", ] [[package]] @@ -1443,7 +1494,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom", + "getrandom 0.1.15", "redox_syscall", "rust-argon2", ] @@ -1571,7 +1622,7 @@ dependencies = [ "memchr", "num_cpus", "parking_lot", - "rand", + "rand 0.7.3", "ref-cast", "rocket_codegen", "rocket_http", @@ -1625,7 +1676,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "js_int", @@ -1643,7 +1694,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "http", "percent-encoding", @@ -1658,7 +1709,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1669,7 +1720,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "ruma-api", "ruma-common", @@ -1683,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "http", @@ -1702,7 +1753,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "maplit", @@ -1715,7 +1766,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-common", @@ -1729,7 +1780,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1740,7 +1791,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-api", @@ -1755,21 +1806,21 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand", + "rand 0.8.0", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", + "ruma-serde-macros", "serde", - "strum", ] [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro2", "quote", @@ -1780,7 +1831,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "serde", ] @@ -1788,7 +1839,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "form_urlencoded", "itoa", @@ -1800,8 +1851,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +version = "0.2.3" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1812,9 +1863,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -2076,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" +source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" dependencies = [ "itertools", "maplit", @@ -2136,27 +2187,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" -[[package]] -name = "strum" -version = "0.19.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b89a286a7e3b5720b9a477b23253bc50debac207c8d21505f8e70b36792f11b5" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.19.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e61bb0be289045cb80bfce000512e32d09f8337e54c186725da381377ad1f8d5" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "syn" version = "1.0.55" @@ -2176,7 +2206,7 @@ checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ "cfg-if 0.1.10", "libc", - "rand", + "rand 0.7.3", "redox_syscall", "remove_dir_all", "winapi 0.3.9", @@ -2416,7 +2446,7 @@ dependencies = [ "idna", "lazy_static", "log", - "rand", + "rand 0.7.3", "smallvec", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index c2db3d9..bf74e8a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,13 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# TODO: remove the gen-eventid feature +state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio @@ -37,8 +38,7 @@ log = "0.4.11" http = "0.2.1" # Used to find data directory for default db path directories = "3.0.1" -# Used for number types for ruma -js_int = "0.1.9" + # Used for ruma wrapper serde_json = { version = "1.0.60", features = ["raw_value"] } # Used for appservice registration files diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index fa5db3a..2bff20c 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -124,7 +124,7 @@ pub async fn get_room_visibility_route( pub async fn get_public_rooms_filtered_helper( db: &Database, server: Option<&ServerName>, - limit: Option, + limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 156040b..f792062 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -131,7 +131,7 @@ pub async fn get_content_thumbnail_route( allow_remote: false, height: body.height, width: body.width, - method: body.method, + method: body.method.clone(), server_name: &body.server_name, media_id: &body.media_id, }, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b459d37..eb44085 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,7 +21,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use state_res::StateEvent; +use state_res::Event; use std::{ collections::{BTreeMap, HashMap, HashSet}, convert::TryFrom, @@ -594,19 +594,19 @@ async fn join_room_by_id_helper( .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created .map(|r| { let (event_id, value) = r?; - state_res::StateEvent::from_id_canon_obj(event_id.clone(), value.clone()) + PduEvent::from_id_val(&event_id, value.clone()) .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") }) }) - .collect::>>>()?; + .collect::>>>()?; let control_events = event_map .values() - .filter(|pdu| pdu.is_power_event()) - .map(|pdu| pdu.event_id()) + .filter(|pdu| state_res::is_power_event(pdu)) + .map(|pdu| pdu.event_id.clone()) .collect::>(); // These events are not guaranteed to be sorted but they are resolved according to spec @@ -646,7 +646,8 @@ async fn join_room_by_id_helper( .cloned() .collect::>(); - let power_level = resolved_control_events.get(&(EventType::RoomPowerLevels, "".into())); + let power_level = + resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".to_string()))); // Sort the remaining non control events let sorted_event_ids = state_res::StateResolution::mainline_sort( room_id, @@ -685,8 +686,13 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( +<<<<<<< HEAD &PduEvent::from(&**pdu), utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +======= + &pdu, + &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +>>>>>>> 6232d1f (Update state-res, use the new Event trait) count, pdu_id.clone().into(), &db.globals, @@ -695,7 +701,9 @@ async fn join_room_by_id_helper( )?; if state_events.contains(ev_id) { - state.insert((pdu.kind(), pdu.state_key()), pdu_id); + if let Some(key) = &pdu.state_key { + state.insert((pdu.kind(), key.to_string()), pdu_id); + } } } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 3640730..c56cc94 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -8,7 +8,10 @@ use ruma::{ events::EventContent, EventId, }; -use std::convert::{TryFrom, TryInto}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -46,7 +49,7 @@ pub async fn send_message_event_route( return Ok(send_message_event::Response { event_id }.into()); } - let mut unsigned = serde_json::Map::new(); + let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); let event_id = db.rooms.build_and_append_pdu( diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b35d006..f0129c6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Requester, StateEvent, StateMap, StateStore}; +use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; use std::{ collections::{BTreeMap, HashMap}, @@ -67,12 +67,8 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event( - &self, - room_id: &RoomId, - event_id: &EventId, - ) -> state_res::Result> { +impl StateStore for Rooms { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { let pid = self .get_pdu_id(event_id) .map_err(StateError::custom)? @@ -91,7 +87,7 @@ impl StateStore for Rooms { .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, ) .map_err(Into::into) - .and_then(|pdu: StateEvent| { + .and_then(|pdu: PduEvent| { // conduit's PDU's always contain a room_id but some // of ruma's do not so this must be an Option if pdu.room_id() == room_id { @@ -112,7 +108,7 @@ impl Rooms { &self, room_id: &RoomId, state_hash: &StateHashId, - ) -> Result> { + ) -> Result> { self.stateid_pduid .scan_prefix(&state_hash) .values() @@ -141,7 +137,7 @@ impl Rooms { pdu, )) }) - .collect::>>() + .collect() } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -181,7 +177,7 @@ impl Rooms { ))) }) } else { - return Ok(None); + Ok(None) } } @@ -205,7 +201,7 @@ impl Rooms { content: serde_json::Value, ) -> Result> { let auth_events = state_res::auth_types_for_event( - kind.clone(), + kind, sender, state_key.map(|s| s.to_string()), content, @@ -213,7 +209,13 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some((_, pdu)) = self.room_state_get(room_id, &event_type, &state_key)? { + if let Some((_, pdu)) = self.room_state_get( + room_id, + &event_type, + &state_key + .as_deref() + .expect("found a non state event in auth events"), + )? { events.insert((event_type, state_key), pdu); } } @@ -290,7 +292,10 @@ impl Rooms { } /// Returns the full room state. - pub fn room_state_full(&self, room_id: &RoomId) -> Result> { + pub fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { self.state_full(&room_id, ¤t_state_hash) } else { @@ -795,23 +800,40 @@ impl Rooms { ErrorKind::Unknown, "Membership can't be the first event", ))?)? - .map(|pdu| pdu.convert_for_state_res()); + .map(Arc::new); event_auth::valid_membership_change( // TODO this is a bit of a hack but not sure how to have a type // declared in `state_res` crate easily convert to/from conduit::PduEvent - Requester { - prev_event_ids: prev_events.to_owned(), - room_id: &room_id, - content: &content, - state_key: Some(state_key.to_owned()), - sender: &sender, - }, + &Arc::new(PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater"), + room_id: room_id.clone(), + sender: sender.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind: event_type, + content, + state_key: Some(state_key.clone()), + prev_events, + depth: (prev_events.len() as u32).into(), + auth_events: auth_events + .into_iter() + .map(|(_, pdu)| pdu.event_id) + .collect(), + redacts, + unsigned: unsigned + .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), + hashes: ruma::events::pdu::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: BTreeMap::new(), + }), prev_event, None, // TODO: third party invite &auth_events .iter() .map(|((ty, key), pdu)| { - Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res())) + Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) .collect::>>()?, ) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 29edc2a..2b1b03d 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, @@ -7,7 +6,7 @@ use ruma::{ }, presence::PresenceState, serde::Raw, - RoomId, UserId, + RoomId, UInt, UserId, }; use std::{ collections::HashMap, diff --git a/src/database/users.rs b/src/database/users.rs index 9da0776..153dce9 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ api::client::{ error::ErrorKind, @@ -11,7 +10,7 @@ use ruma::{ encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; diff --git a/src/pdu.rs b/src/pdu.rs index f6ec415..c764700 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,12 +1,11 @@ use crate::Error; -use js_int::UInt; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -33,8 +32,8 @@ pub struct PduEvent { pub auth_events: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub redacts: Option, - #[serde(default, skip_serializing_if = "serde_json::Map::is_empty")] - pub unsigned: serde_json::Map, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub unsigned: BTreeMap, pub hashes: EventHash, pub signatures: BTreeMap, BTreeMap>, } @@ -227,61 +226,66 @@ impl PduEvent { ) .expect("Raw::from_value always works") } -} -impl From<&state_res::StateEvent> for PduEvent { - fn from(pdu: &state_res::StateEvent) -> Self { - Self { - event_id: pdu.event_id(), - room_id: pdu.room_id().clone(), - sender: pdu.sender().clone(), - origin_server_ts: (pdu - .origin_server_ts() - .duration_since(UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64) - .try_into() - .expect("time is valid"), - kind: pdu.kind(), - content: pdu.content().clone(), - state_key: Some(pdu.state_key()), - prev_events: pdu.prev_event_ids(), - depth: *pdu.depth(), - auth_events: pdu.auth_events(), - redacts: pdu.redacts().cloned(), - unsigned: pdu.unsigned().clone().into_iter().collect(), - hashes: pdu.hashes().clone(), - signatures: pdu.signatures(), - } + pub fn from_id_val( + event_id: &EventId, + json: CanonicalJsonObject, + ) -> Result { + json.insert( + "event_id".to_string(), + ruma::serde::to_canonical_value(event_id).expect("event_id is a valid Value"), + ); + + serde_json::from_value(serde_json::to_value(json).expect("valid JSON")) } } -impl PduEvent { - pub fn convert_for_state_res(&self) -> Arc { - Arc::new( - // For consistency of eventId (just in case) we use the one - // generated by conduit for everything. - state_res::StateEvent::from_id_value( - self.event_id.clone(), - json!({ - "event_id": self.event_id, - "room_id": self.room_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "type": self.kind, - "content": self.content, - "state_key": self.state_key, - "prev_events": self.prev_events, - "depth": self.depth, - "auth_events": self.auth_events, - "redacts": self.redacts, - "unsigned": self.unsigned, - "hashes": self.hashes, - "signatures": self.signatures, - }), - ) - .expect("all conduit PDUs are state events"), - ) +impl state_res::Event for PduEvent { + fn event_id(&self) -> &EventId { + &self.event_id + } + + fn room_id(&self) -> &RoomId { + &self.room_id + } + + fn sender(&self) -> &UserId { + &self.sender + } + fn kind(&self) -> EventType { + self.kind.clone() + } + + fn content(&self) -> serde_json::Value { + self.content.clone() + } + fn origin_server_ts(&self) -> std::time::SystemTime { + UNIX_EPOCH + std::time::Duration::from_millis(self.origin_server_ts.into()) + } + + fn state_key(&self) -> Option { + self.state_key.clone() + } + fn prev_events(&self) -> Vec { + self.prev_events.to_vec() + } + fn depth(&self) -> &UInt { + &self.depth + } + fn auth_events(&self) -> Vec { + self.auth_events.to_vec() + } + fn redacts(&self) -> Option<&EventId> { + self.redacts.as_ref() + } + fn hashes(&self) -> &EventHash { + &self.hashes + } + fn signatures(&self) -> BTreeMap, BTreeMap> { + self.signatures.clone() + } + fn unsigned(&self) -> &BTreeMap { + &self.unsigned } } @@ -315,7 +319,7 @@ pub struct PduBuilder { #[serde(rename = "type")] pub event_type: EventType, pub content: serde_json::Value, - pub unsigned: Option>, + pub unsigned: Option>, pub state_key: Option, pub redacts: Option, } diff --git a/src/server_server.rs b/src/server_server.rs index d68e9fa..58d85b1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,12 +20,13 @@ use ruma::{ directory::{IncomingFilter, IncomingRoomNetwork}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::StateMap; +use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, + sync::Arc, time::{Duration, SystemTime}, }; @@ -610,17 +611,12 @@ pub async fn send_transaction_message_route<'a>( continue; } - // TODO: remove the need to convert to state_res - let event = pdu.convert_for_state_res(); + let event = Arc::new(pdu.clone()); + let previous = pdu .prev_events .first() - .map(|id| { - db.rooms - .get_pdu(id) - .expect("todo") - .map(|ev| ev.convert_for_state_res()) - }) + .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); // 4. @@ -637,27 +633,32 @@ pub async fn send_transaction_message_route<'a>( previous.clone(), auth_events .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) + .map(|(k, v)| (k, Arc::new(v))) .collect(), None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with auth events".into()), ); continue; } - let mut previous_states = vec![]; + let mut previous_states: Vec>> = vec![]; for id in &pdu.prev_events { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .collect(); previous_states.push(state); } else { // fetch the state @@ -693,7 +694,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -702,7 +703,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -712,17 +713,14 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_event, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Event failed auth with state_at resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with state at the event".into()), ); continue; @@ -733,14 +731,20 @@ pub async fn send_transaction_message_route<'a>( // Gather the forward extremities and resolve let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states = vec![]; + let mut fork_states: Vec>> = vec![]; for id in &forward_extrems { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + fork_states.push(state); } else { // This is probably an error?? @@ -776,7 +780,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -785,7 +789,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -795,20 +799,20 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Soft fail - resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + resolved_map.insert( + event.event_id().clone(), + Err("Event has been soft failed".into()), + ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id(), Ok(())); + resolved_map.insert(event.event_id().clone(), Ok(())); } } From 63af3d3da06d1fdb4d8e8be3637d2814efba799d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 3 Jan 2021 17:26:17 -0500 Subject: [PATCH 0416/1727] Step 5 in /send just fetches state from incoming server --- Cargo.lock | 78 ++++++------- src/database/rooms.rs | 29 +---- src/pdu.rs | 2 +- src/server_server.rs | 264 +++++++++++++++++++++++------------------- 4 files changed, 186 insertions(+), 187 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b05a3c4..227e822 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] @@ -44,9 +44,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assign" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" +checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-trait" @@ -369,9 +369,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "either" @@ -576,11 +576,11 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -834,9 +834,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" @@ -1046,9 +1046,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ "lazy_static", "libc", @@ -1149,9 +1149,9 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.31" +version = "0.10.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" +checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -1178,9 +1178,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.59" +version = "0.9.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" +checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" dependencies = [ "autocfg", "cc", @@ -1407,7 +1407,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -1452,7 +1452,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", ] [[package]] @@ -1494,25 +1494,25 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "redox_syscall", "rust-argon2", ] [[package]] name = "ref-cast" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -2005,9 +2005,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" +checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" dependencies = [ "itoa", "ryu", @@ -2088,9 +2088,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" +checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" [[package]] name = "socket2" @@ -2111,9 +2111,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" +checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" dependencies = [ "version_check", ] @@ -2127,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" +source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" dependencies = [ "itertools", "maplit", @@ -2189,9 +2189,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.55" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" +checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" dependencies = [ "proc-macro2", "quote", @@ -2214,18 +2214,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" dependencies = [ "proc-macro2", "quote", @@ -2752,9 +2752,9 @@ dependencies = [ [[package]] name = "yaml-rust" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f0129c6..ef76c39 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -802,32 +802,9 @@ impl Rooms { ))?)? .map(Arc::new); event_auth::valid_membership_change( - // TODO this is a bit of a hack but not sure how to have a type - // declared in `state_res` crate easily convert to/from conduit::PduEvent - &Arc::new(PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key: Some(state_key.clone()), - prev_events, - depth: (prev_events.len() as u32).into(), - auth_events: auth_events - .into_iter() - .map(|(_, pdu)| pdu.event_id) - .collect(), - redacts, - unsigned: unsigned - .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), - hashes: ruma::events::pdu::EventHash { - sha256: "aaa".to_owned(), - }, - signatures: BTreeMap::new(), - }), + Some(state_key.as_str()), + &sender, + content.clone(), prev_event, None, // TODO: third party invite &auth_events diff --git a/src/pdu.rs b/src/pdu.rs index c764700..2997317 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -229,7 +229,7 @@ impl PduEvent { pub fn from_id_val( event_id: &EventId, - json: CanonicalJsonObject, + mut json: CanonicalJsonObject, ) -> Result { json.insert( "event_id".to_string(), diff --git a/src/server_server.rs b/src/server_server.rs index 58d85b1..3c4308c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,13 +11,15 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + serde::Raw, + signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; @@ -578,32 +580,13 @@ pub async fn send_transaction_message_route<'a>( let mut pub_key_map = BTreeMap::new(); pub_key_map.insert("domain".to_string(), pub_key_set); - let value = - match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => { - resolved_map - .insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - } - } else { - value - } - } - Err(_e) => { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - }; - - let pdu = serde_json::from_value::( - serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("all ruma pdus are conduit pdus"); + let pdu = match signature_and_hash_check(&pub_key_map, value) { + Ok(pdu) => pdu, + Err(e) => { + resolved_map.insert(event_id, Err(e)); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -619,7 +602,10 @@ pub async fn send_transaction_message_route<'a>( .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); - // 4. + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // the auth events that would be correct for this pdu. Put another way we should use the auth events + // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( &pdu.room_id, &pdu.kind, @@ -627,6 +613,12 @@ pub async fn send_transaction_message_route<'a>( pdu.state_key.as_deref(), pdu.content.clone(), )?; + + let mut event_map: state_res::EventMap> = auth_events + .iter() + .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .collect(); + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, @@ -635,7 +627,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), - None, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -646,66 +638,38 @@ pub async fn send_transaction_message_route<'a>( continue; } - let mut previous_states: Vec>> = vec![]; - for id in &pdu.prev_events { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? + let server_name = body.body.origin.clone(); + let (state_at_event, incoming_auth_events): (StateMap>, _) = match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) + .await? .into_iter() - .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); - previous_states.push(state); - } else { - // fetch the state - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } - } - } - // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. - let state_at_event = if previous_states.is_empty() { - // State is empty - Default::default() - } else if previous_states.len() == 1 { - previous_states[0].clone() - } else { - match state_res::StateResolution::resolve( - &pdu.room_id, - &RoomVersionId::Version6, - &previous_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(), - None, - &db.rooms, - ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), - Err(e) => panic!("{:?}", e), + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await?, + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; } }; @@ -713,8 +677,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event, - None, + state_at_event.clone(), // TODO: could this be &state avoid .clone + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -747,22 +711,7 @@ pub async fn send_transaction_message_route<'a>( fork_states.push(state); } else { - // This is probably an error?? - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } + todo!("we don't know of a pdu that is part of our known forks OOPS") } } @@ -773,6 +722,18 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // Add as much as we can to the `event_map` (less DB hits) + event_map.extend( + incoming_auth_events + .into_iter() + .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + ); + event_map.extend( + state_at_event + .into_iter() + .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), + ); + match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, @@ -784,7 +745,7 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - None, + &mut event_map, &db.rooms, ) { Ok(res) => res @@ -819,8 +780,74 @@ pub async fn send_transaction_message_route<'a>( Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn signature_and_hash_check( + pub_key_map: &ruma::signatures::PublicKeyMap, + value: CanonicalJsonObject, +) -> std::result::Result { + let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".into()), + } + } else { + value + } + } + Err(_e) => return Err("Signature verification failed".into()), + }; + + serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// TODO: this needs to add events to the DB in a way that does not +/// effect the state of the room +async fn fetch_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + events: &[EventId], +) -> Result> { + let mut pdus = vec![]; + for id in events { + match db.rooms.get_pdu(id)? { + Some(pdu) => pdus.push(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: id }, + ) + .await + { + Ok(res) => { + let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(pdu) => { + // TODO: add to our DB somehow? + pdus.push(pdu); + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + } + } + Ok(pdus) +} + fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() + db.rooms.get_pdu_leaves(room_id) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -854,20 +881,15 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } + // db.rooms.append_pdu( + // pdu, + // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + // count, + // pdu_id.clone().into(), + // &db.globals, + // &db.account_data, + // &db.admin, + // )?; Ok(()) } From cdeb1236a20e39d0f9635e1c0e7ed40882effd59 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 5 Jan 2021 09:21:41 -0500 Subject: [PATCH 0417/1727] Fix clippy warnings remove unused imports --- Cargo.lock | 2 +- src/appservice_server.rs | 2 +- src/client_server/membership.rs | 1 - src/database/globals.rs | 4 ++- src/database/sending.rs | 12 ++++----- src/pdu.rs | 7 +---- src/server_server.rs | 45 ++++++++++++++++++++------------- 7 files changed, 39 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 227e822..9dc0b38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2127,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" +source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" dependencies = [ "itertools", "maplit", diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 986909b..04f14c0 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -25,7 +25,7 @@ where let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains("?") { + let symbol = if old_path_and_query.contains('?') { "&" } else { "?" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index eb44085..4e093c2 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -675,7 +675,6 @@ async fn join_room_by_id_helper( .iter() .filter(|id| resolved_events.values().any(|rid| rid == *id)) { - // this is a `state_res::StateEvent` that holds a `ruma::Pdu` let pdu = event_map .get(ev_id) .expect("Found event_id in sorted events that is not in resolved state"); diff --git a/src/database/globals.rs b/src/database/globals.rs index ccd6284..beb7de5 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -9,9 +9,11 @@ use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; +pub type DestinationCache = Arc, (String, Option)>>>; + #[derive(Clone)] pub struct Globals { - pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host + pub actual_destination_cache: DestinationCache, // actual_destination, host pub(super) globals: sled::Tree, config: Config, keypair: Arc, diff --git a/src/database/sending.rs b/src/database/sending.rs index fd32793..d99c4f3 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -79,7 +79,7 @@ impl Sending { match response { Ok((server, is_appservice)) => { let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -125,7 +125,7 @@ impl Sending { Err((server, is_appservice, e)) => { info!("Couldn't send transaction to {}\n{}", server, e); let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -157,7 +157,7 @@ impl Sending { .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) .map(|server_str| { // Appservices start with a plus - if server_str.starts_with("+") { + if server_str.starts_with('+') { (server_str[1..].to_owned(), true) } else { (server_str, false) @@ -186,7 +186,7 @@ impl Sending { } let mut prefix = if *is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -220,7 +220,7 @@ impl Sending { } pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = "+".as_bytes().to_vec(); + let mut key = b"+".to_vec(); key.extend_from_slice(appservice_id.as_bytes()); key.push(0xff); key.extend_from_slice(pdu_id); @@ -330,7 +330,7 @@ impl Sending { })?; // Appservices start with a plus - let (server, is_appservice) = if server.starts_with("+") { + let (server, is_appservice) = if server.starts_with('+') { (&server[1..], true) } else { (&*server, false) diff --git a/src/pdu.rs b/src/pdu.rs index 2997317..86fbc9f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,12 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, - time::UNIX_EPOCH, -}; +use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { diff --git a/src/server_server.rs b/src/server_server.rs index 3c4308c..3de3636 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,14 +11,13 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - serde::Raw, signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; @@ -220,7 +219,7 @@ fn add_port_to_hostname(destination_str: String) -> String { /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination( globals: &crate::database::globals::Globals, - destination: &Box, + destination: &ServerName, ) -> (String, Option) { let mut host = None; @@ -594,13 +593,14 @@ pub async fn send_transaction_message_route<'a>( continue; } + let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - - let previous = pdu - .prev_events - .first() - .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) - .flatten(); + // Fetch any unknown events or retrieve them from the DB + let previous = + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { + mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not @@ -616,14 +616,14 @@ pub async fn send_transaction_message_route<'a>( let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - auth_events + &auth_events .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), @@ -638,7 +638,6 @@ pub async fn send_transaction_message_route<'a>( continue; } - let server_name = body.body.origin.clone(); let (state_at_event, incoming_auth_events): (StateMap>, _) = match db .sending .send_federation_request( @@ -652,8 +651,18 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) - .await? + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); @@ -677,8 +686,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event.clone(), // TODO: could this be &state avoid .clone - None, // TODO: third party invite + &state_at_event, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -760,7 +769,7 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks, + &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? From 8de0d9f9ced7c11d24bd38d20f871bae11ed863e Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 08:52:30 -0500 Subject: [PATCH 0418/1727] Remove StateStore trait from state-res collect events needed --- Cargo.lock | 62 +++++++++--------- Cargo.toml | 2 +- src/client_server/membership.rs | 4 -- src/database/rooms.rs | 100 ++++++++++++++++++---------- src/server_server.rs | 111 +++++++++++++++++++------------- 5 files changed, 163 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9dc0b38..f621d16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -231,9 +231,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -558,7 +558,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.2", + "pin-project 1.0.3", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -587,13 +587,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] @@ -722,7 +722,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.2", + "pin-project 1.0.3", "socket2", "tokio", "tower-service", @@ -1272,11 +1272,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" +checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" dependencies = [ - "pin-project-internal 1.0.2", + "pin-project-internal 1.0.3", ] [[package]] @@ -1292,9 +1292,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" dependencies = [ "proc-macro2", "quote", @@ -1309,9 +1309,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" +checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" [[package]] name = "pin-utils" @@ -1416,13 +1416,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" dependencies = [ "libc", "rand_chacha 0.3.0", - "rand_core 0.6.0", + "rand_core 0.6.1", "rand_hc 0.3.0", ] @@ -1443,7 +1443,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1457,11 +1457,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.0", + "getrandom 0.2.1", ] [[package]] @@ -1479,7 +1479,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1569,7 +1569,7 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "serde", "serde_urlencoded", "tokio", @@ -1809,7 +1809,7 @@ version = "0.17.4" source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand 0.8.0", + "rand 0.8.1", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -2028,9 +2028,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" +checksum = "971be8f6e4d4a47163b405a3df70d14359186f9ab0f3a3ec37df144ca1ce089f" dependencies = [ "dtoa", "linked-hash-map", @@ -2127,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" +source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" dependencies = [ "itertools", "maplit", @@ -2189,9 +2189,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" +checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" dependencies = [ "proc-macro2", "quote", @@ -2398,7 +2398,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "tracing-attributes", "tracing-core", ] diff --git a/Cargo.toml b/Cargo.toml index bf74e8a..fdcc4ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 4e093c2..ea14268 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -618,7 +618,6 @@ async fn join_room_by_id_helper( &room_id, &control_events, &mut event_map, - &db.rooms, &event_ids, ); @@ -629,7 +628,6 @@ async fn join_room_by_id_helper( &sorted_control_events, &BTreeMap::new(), // We have no "clean/resolved" events to add (these extend the `resolved_control_events`) &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); @@ -654,7 +652,6 @@ async fn join_room_by_id_helper( &events_to_sort, power_level, &mut event_map, - &db.rooms, ); let resolved_events = state_res::StateResolution::iterative_auth_check( @@ -663,7 +660,6 @@ async fn join_room_by_id_helper( &sorted_event_ids, &resolved_control_events, &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ef76c39..fe4f23c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -67,40 +67,6 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { - let pid = self - .get_pdu_id(event_id) - .map_err(StateError::custom)? - .ok_or_else(|| { - StateError::NotFound(format!( - "PDU via room_id and event_id not found in the db: {}", - event_id.as_str() - )) - })?; - - serde_json::from_slice( - &self - .pduid_pdu - .get(pid) - .map_err(StateError::custom)? - .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, - ) - .map_err(Into::into) - .and_then(|pdu: PduEvent| { - // conduit's PDU's always contain a room_id but some - // of ruma's do not so this must be an Option - if pdu.room_id() == room_id { - Ok(Arc::new(pdu)) - } else { - Err(StateError::NotFound( - "Found PDU for incorrect room in db.".into(), - )) - } - }) - } -} - impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. @@ -222,6 +188,72 @@ impl Rooms { Ok(events) } + /// Returns a Vec of the related auth events to the given `event`. + /// + /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. + pub fn auth_events_full( + &self, + room_id: &RoomId, + event_ids: &[EventId], + ) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + if let Some(ev) = self.get_pdu(&ev_id)? { + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) + } + + /// Returns a Vec representing the difference in auth chains of the given `events`. + /// + /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). + pub fn auth_chain_diff( + &self, + room_id: &RoomId, + event_ids: Vec>, + ) -> Result> { + use std::collections::BTreeSet; + + let mut chains = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self + .auth_events_full(room_id, &ids)? + .into_iter() + .map(|pdu| pdu.event_id) + .collect::>(); + chains.push(chain); + } + + if let Some(chain) = chains.first() { + let rest = chains.iter().skip(1).flatten().cloned().collect(); + let common = chain.intersection(&rest).collect::>(); + + Ok(chains + .iter() + .flatten() + .filter(|id| !common.contains(&id)) + .cloned() + .collect::>() + .into_iter() + .collect()) + } else { + Ok(vec![]) + } + } + /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. diff --git a/src/server_server.rs b/src/server_server.rs index 3de3636..f68475c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -603,7 +603,7 @@ pub async fn send_transaction_message_route<'a>( }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not // the auth events that would be correct for this pdu. Put another way we should use the auth events // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( @@ -637,50 +637,56 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 4. - let (state_at_event, incoming_auth_events): (StateMap>, _) = match db - .sending - .send_federation_request( - &db.globals, - server_name.clone(), - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) - .collect(); - - ( - state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) - .await?, + // Step 5. event passes auth based on state at the event + let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = + match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, ) - } - Err(_) => { - resolved_map.insert( - event.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await + { + Ok(res) => { + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .collect(); + + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await? + .into_iter() + .map(Arc::new) + .collect(), + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; if !state_res::event_auth::auth_check( &RoomVersionId::Version6, @@ -698,6 +704,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 5. // The event could still be soft failed append_state_soft(&db, &pdu)?; @@ -724,18 +731,30 @@ pub async fn send_transaction_message_route<'a>( } } - // 6. + // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() } else if fork_states.len() == 1 { fork_states[0].clone() } else { + let auth_events = fork_states + .iter() + .map(|map| { + db.rooms.auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + }) + .collect(); + // Add as much as we can to the `event_map` (less DB hits) event_map.extend( incoming_auth_events .into_iter() - .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + .map(|pdu| (pdu.event_id().clone(), pdu)), ); event_map.extend( state_at_event @@ -754,8 +773,8 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), + &auth_events, &mut event_map, - &db.rooms, ) { Ok(res) => res .into_iter() From 168ae8dca00d480ad28d80e65918853f1802091b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 15:05:09 -0500 Subject: [PATCH 0419/1727] Fill event_map with all events that will be needed for resolution --- Cargo.lock | 2 +- src/server_server.rs | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f621d16..7a79dbe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -212,8 +212,8 @@ dependencies = [ "js_int", "jsonwebtoken", "log", - "regex", "rand 0.7.3", + "regex", "reqwest", "ring", "rocket", diff --git a/src/server_server.rs b/src/server_server.rs index f68475c..e87c05c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -565,7 +565,7 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this - + // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); // 2. Passes signature checks, otherwise event is dropped. @@ -741,16 +741,24 @@ pub async fn send_transaction_message_route<'a>( let auth_events = fork_states .iter() .map(|map| { - db.rooms.auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), - ) + db.rooms + .auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) }) - .collect(); + .collect::>>()?; - // Add as much as we can to the `event_map` (less DB hits) + // Add everything we will need to event_map + event_map.extend( + auth_events + .iter() + .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) + .flatten(), + ); event_map.extend( incoming_auth_events .into_iter() @@ -773,7 +781,10 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - &auth_events, + auth_events + .into_iter() + .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) + .collect(), &mut event_map, ) { Ok(res) => res From d0b8d0f5fdaf2ee62b6d14702cda5d2a154c241b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 12 Jan 2021 08:26:52 -0500 Subject: [PATCH 0420/1727] Fix signature/hash checks, fetch recursive auth events --- src/client_server/membership.rs | 7 +- src/database/rooms.rs | 4 +- src/pdu.rs | 12 +- src/server_server.rs | 240 +++++++++++++++++++++++++------- 4 files changed, 193 insertions(+), 70 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ea14268..29b6c14 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -681,13 +681,8 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( -<<<<<<< HEAD - &PduEvent::from(&**pdu), - utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), -======= &pdu, - &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), ->>>>>>> 6232d1f (Update state-res, use the new Event trait) + utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fe4f23c..88a772b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; +use state_res::{event_auth, Event, StateMap}; use std::{ collections::{BTreeMap, HashMap}, @@ -193,7 +193,7 @@ impl Rooms { /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. pub fn auth_events_full( &self, - room_id: &RoomId, + _room_id: &RoomId, event_ids: &[EventId], ) -> Result> { let mut result = BTreeMap::new(); diff --git a/src/pdu.rs b/src/pdu.rs index 86fbc9f..750f9cf 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -4,7 +4,7 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; @@ -286,12 +286,11 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// -/// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. +/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn process_incoming_pdu( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { - let mut value = - serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); + let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); let event_id = EventId::try_from(&*format!( "${}", @@ -300,11 +299,6 @@ pub(crate) fn process_incoming_pdu( )) .expect("ruma's reference hashes are valid event ids"); - value.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - (event_id, value) } diff --git a/src/server_server.rs b/src/server_server.rs index e87c05c..141d5bb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,5 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -6,6 +7,7 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ + device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -18,13 +20,14 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - signatures::{CanonicalJsonObject, PublicKeyMap}, + serde::to_canonical_value, + signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, net::{IpAddr, SocketAddr}, sync::Arc, @@ -519,6 +522,8 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } + dbg!(&*body); + for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { Ok(edu) => match edu.edu_type.as_str() { @@ -546,6 +551,7 @@ pub async fn send_transaction_message_route<'a>( } "m.presence" => {} "m.receipt" => {} + "m.device_list_update" => {} _ => {} }, Err(_err) => { @@ -565,21 +571,52 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this + // We do not add the event_id field to the pdu here because of signature and hashes checks // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. // 3. Passes hash checks, otherwise it is redacted before being processed further. - let keys = db.globals.keypair(); - let mut pub_key_set = BTreeMap::new(); - pub_key_set.insert( - "ed25519:1".to_string(), - String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), - ); + let server_name = body.body.origin.clone(); let mut pub_key_map = BTreeMap::new(); - pub_key_map.insert("domain".to_string(), pub_key_set); + if let Some(sig) = value.get("signatures") { + match sig { + CanonicalJsonValue::Object(entity) => { + for key in entity.keys() { + // TODO: save this in a DB maybe... + // fetch the public signing key + let res = db + .sending + .send_federation_request( + &db.globals, + Box::::try_from(key.to_string()).unwrap(), + get_server_keys::v2::Request::new(), + ) + .await?; - let pdu = match signature_and_hash_check(&pub_key_map, value) { + pub_key_map.insert( + res.server_key.server_name.to_string(), + res.server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); + } + } + _ => { + resolved_map.insert( + event_id, + Err("`signatures` is not a JSON object".to_string()), + ); + continue; + } + } + } else { + resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); + continue; + } + + let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -587,50 +624,75 @@ pub async fn send_transaction_message_route<'a>( } }; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - // Fetch any unknown events or retrieve them from the DB + dbg!(&*event); + // Fetch any unknown prev_events or retrieve them from the DB let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { - mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), _ => None, }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not - // the auth events that would be correct for this pdu. Put another way we should use the auth events - // the pdu claims are its auth events - let auth_events = db.rooms.get_auth_events( - &pdu.room_id, - &pdu.kind, - &pdu.sender, - pdu.state_key.as_deref(), - pdu.content.clone(), - )?; + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) + .await + { + Ok(events) => events, + Err(_) => { + resolved_map.insert( + pdu.event_id, + Err("Failed to recursively gather auth events".into()), + ); + continue; + } + }; let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); - if !state_res::event_auth::auth_check( + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - &auth_events - .into_iter() - .map(|(k, v)| (k, Arc::new(v))) - .collect(), + &pdu.auth_events + .iter() + .map(|id| { + event_map + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + Error::Conflict( + "Auth event not found, event failed recursive auth checks.", + ) + }) + }) + .collect::>>()?, None, // TODO: third party invite ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { + .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( pdu.event_id, Err("Event has failed auth check with auth events".into()), @@ -816,31 +878,92 @@ pub async fn send_transaction_message_route<'a>( } } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, -) -> std::result::Result { - let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".into()), +) -> std::result::Result { + Ok( + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + error!("CONTENT HASH FAILED"); + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value } - } else { - value } - } - Err(_e) => return Err("Signature verification failed".into()), - }; - - serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }, ) - .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have missing events it fails. +async fn fetch_check_auth_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + event_ids: &[EventId], +) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + let ev = match db.rooms.get_pdu(&ev_id)? { + Some(pdu) => pdu, + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: &ev_id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(mut val) => { + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map") + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }; + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) } /// TODO: this needs to add events to the DB in a way that does not @@ -865,10 +988,21 @@ async fn fetch_events( .await { Ok(res) => { - let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); match signature_and_hash_check(key_map, value) { - Ok(pdu) => { + Ok(mut val) => { // TODO: add to our DB somehow? + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + pdus.push(pdu); } Err(e) => { @@ -898,7 +1032,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; db.rooms.append_pdu( pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, From d108a735a45e6fe9b0fcda00b6d0ebbeff043f4a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 0421/1727] State resolution outline for /send --- src/server_server.rs | 53 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/server_server.rs b/src/server_server.rs index 141d5bb..31d6467 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1067,6 +1067,59 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { Ok(()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 52392628e9eff02ed0db65481c551840ba879405 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 14:39:56 -0500 Subject: [PATCH 0422/1727] Convert uses of Box to a ref --- src/client_server/alias.rs | 2 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 4 +- src/client_server/membership.rs | 4 +- src/database/sending.rs | 4 +- src/pdu.rs | 2 +- src/server_server.rs | 117 ++++++++++++++++++++++---------- 7 files changed, 92 insertions(+), 43 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 498e882..0dc40a9 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -70,7 +70,7 @@ pub async fn get_alias_helper( .sending .send_federation_request( &db.globals, - room_alias.server_name().to_owned(), + room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) .await?; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 2bff20c..87d5fc8 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -137,7 +137,7 @@ pub async fn get_public_rooms_filtered_helper( .sending .send_federation_request( &db.globals, - other_server.to_owned(), + other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, since: since.as_deref(), diff --git a/src/client_server/media.rs b/src/client_server/media.rs index f792062..275038a 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -77,7 +77,7 @@ pub async fn get_content_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content::Request { allow_remote: false, server_name: &body.server_name, @@ -126,7 +126,7 @@ pub async fn get_content_thumbnail_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content_thumbnail::Request { allow_remote: false, height: body.height, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 29b6c14..40e4183 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -468,7 +468,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, @@ -547,7 +547,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, diff --git a/src/database/sending.rs b/src/database/sending.rs index d99c4f3..e6cdc76 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -303,7 +303,7 @@ impl Sending { server_server::send_request( &globals, - server.clone(), + &*server, send_transaction_message::v1::Request { origin: globals.server_name(), pdus: &pdu_jsons, @@ -348,7 +348,7 @@ impl Sending { pub async fn send_federation_request( &self, globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where diff --git a/src/pdu.rs b/src/pdu.rs index 750f9cf..340ddee 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -287,7 +287,7 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. -pub(crate) fn process_incoming_pdu( +pub(crate) fn gen_event_id_canonical_json( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); diff --git a/src/server_server.rs b/src/server_server.rs index 31d6467..64e0a05 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,11 +20,12 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::{Event, StateMap}; +use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::{TryFrom, TryInto}, @@ -36,7 +37,7 @@ use std::{ pub async fn send_request( globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where @@ -50,7 +51,7 @@ where .actual_destination_cache .read() .unwrap() - .get(&destination) + .get(destination) .cloned(); let (actual_destination, host) = if let Some(result) = maybe_result { @@ -61,7 +62,7 @@ where .actual_destination_cache .write() .unwrap() - .insert(destination.clone(), result.clone()); + .insert(Box::::from(destination), result.clone()); result }; @@ -278,9 +279,9 @@ async fn find_actual_destination( (actual_destination, host) } -async fn query_srv_record<'a>( +async fn query_srv_record( globals: &crate::database::globals::Globals, - hostname: &'a str, + hostname: &str, ) -> Option { if let Ok(Some(host_port)) = globals .dns_resolver() @@ -572,11 +573,9 @@ pub async fn send_transaction_message_route<'a>( // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks - // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then - let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let server_name = body.body.origin.clone(); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { match sig { @@ -588,7 +587,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - Box::::try_from(key.to_string()).unwrap(), + <&ServerName>::try_from(key.as_str()).unwrap(), get_server_keys::v2::Request::new(), ) .await?; @@ -616,6 +615,9 @@ pub async fn send_transaction_message_route<'a>( continue; } + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { @@ -625,15 +627,20 @@ pub async fn send_transaction_message_route<'a>( }; // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type + // to our PduEvent type also finally verifying the first step listed above val.insert( "event_id".to_owned(), to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); - let pdu = serde_json::from_value::( + let pdu = match serde_json::from_value::( serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); + ) { + Ok(pdu) => pdu, + Err(_) => { + resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -644,18 +651,15 @@ pub async fn send_transaction_message_route<'a>( let event = Arc::new(pdu.clone()); dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // Recursively gather all auth events checking that the previous auth events are valid. let auth_events: Vec = - match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) - .await - { + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { Ok(events) => events, Err(_) => { resolved_map.insert( @@ -707,7 +711,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - server_name.clone(), + server_name, get_room_state_ids::v1::Request { room_id: pdu.room_id(), event_id: pdu.event_id(), @@ -716,8 +720,7 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -734,7 +737,7 @@ pub async fn send_transaction_message_route<'a>( ( state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) .await? .into_iter() .map(Arc::new) @@ -881,6 +884,52 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +async fn auth_each_event( + db: &Database, + value: CanonicalJsonObject, + event_id: EventId, + pub_key_map: &PublicKeyMap, + server_name: &ServerName, + auth_cache: EventMap>, +) -> std::result::Result { + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let mut val = signature_and_hash_check(&pub_key_map, value)?; + + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; + + // If we have no idea about this room skip the PDU + if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { + return Err("Room is unknown to this server".into()); + } + + // Fetch any unknown prev_events or retrieve them from the DB + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; + + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(events) => events, + Err(_) => return Err("Failed to recursively gather auth events".into()), + }; + + Ok(pdu) +} + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -909,7 +958,7 @@ fn signature_and_hash_check( /// events `auth_events`. If the chain is found to have missing events it fails. async fn fetch_check_auth_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, event_ids: &[EventId], ) -> Result> { @@ -929,13 +978,13 @@ async fn fetch_check_auth_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: &ev_id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { val.insert( @@ -970,7 +1019,7 @@ async fn fetch_check_auth_events( /// effect the state of the room async fn fetch_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, events: &[EventId], ) -> Result> { @@ -982,13 +1031,13 @@ async fn fetch_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { // TODO: add to our DB somehow? From 4cf530c55b32c494f1dde191fc07c2bcfed4ceac Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 21:32:22 -0500 Subject: [PATCH 0423/1727] Abstract event validation/fetching, add outlier and signing key DB trees Fixed the miss named commented out keys in conduit-example.toml. --- conduit-example.toml | 4 +- src/database.rs | 10 +- src/database/globals.rs | 77 ++++- src/database/rooms.rs | 97 ++---- src/error.rs | 7 +- src/main.rs | 1 + src/server_server.rs | 632 +++++++++++++++++++--------------------- 7 files changed, 415 insertions(+), 413 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index b82da2c..bb3ae33 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -23,11 +23,11 @@ port = 6167 max_request_size = 20_000_000 # in bytes # Disable registration. No new users will be able to register on this server -#allow_registration = true +#allow_registration = false # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work -#allow_encryption = true +#allow_encryption = false #allow_federation = false #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 diff --git a/src/database.rs b/src/database.rs index 8fcffd9..ea65d6f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -22,7 +22,7 @@ use std::fs::remove_dir_all; use std::sync::{Arc, RwLock}; use tokio::sync::Semaphore; -#[derive(Clone, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Config { server_name: Box, database_path: String, @@ -102,7 +102,12 @@ impl Database { let (admin_sender, admin_receiver) = mpsc::unbounded(); let db = Self { - globals: globals::Globals::load(db.open_tree("global")?, config).await?, + globals: globals::Globals::load( + db.open_tree("global")?, + db.open_tree("servertimeout_signingkey")?, + config, + ) + .await?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, @@ -155,6 +160,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index beb7de5..7eb162b 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,10 @@ use crate::{database::Config, utils, Error, Result}; use log::error; -use ruma::ServerName; -use std::collections::HashMap; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + ServerName, ServerSigningKeyId, +}; +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; @@ -20,10 +23,15 @@ pub struct Globals { reqwest_client: reqwest::Client, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, + pub(super) servertimeout_signingkey: sled::Tree, // ServerName -> algorithm:key + pubkey } impl Globals { - pub async fn load(globals: sled::Tree, config: Config) -> Result { + pub async fn load( + globals: sled::Tree, + server_keys: sled::Tree, + config: Config, + ) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -82,6 +90,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, + servertimeout_signingkey: server_keys, }) } @@ -139,4 +148,66 @@ impl Globals { pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { self.jwt_decoding_key.as_ref() } + + /// TODO: the key valid until timestamp is only honored in room version > 4 + /// Remove the outdated keys and insert the new ones. + /// + /// This doesn't actually check that the keys provided are newer than the old set. + pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> { + // Remove outdated keys + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, _) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + + if now > valid_until { + self.servertimeout_signingkey.remove(k)?; + } + } + + let mut key = origin.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice( + &(keys + .valid_until_ts + .duration_since(std::time::UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64) + .to_be_bytes(), + ); + + self.servertimeout_signingkey.insert( + key, + serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"), + )?; + Ok(()) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + pub fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result> { + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, bytes) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + // If these keys are still valid use em! + if valid_until > now { + return serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys")); + } + } + Ok(BTreeMap::default()) + } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 88a772b..81abd62 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -65,6 +65,9 @@ pub struct Rooms { /// The state for a given state hash. pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) + + /// Any pdu that has passed the steps up to auth with auth_events. + pub(super) eventid_outlierpdu: sled::Tree, } impl Rooms { @@ -188,72 +191,6 @@ impl Rooms { Ok(events) } - /// Returns a Vec of the related auth events to the given `event`. - /// - /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. - pub fn auth_events_full( - &self, - _room_id: &RoomId, - event_ids: &[EventId], - ) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - if let Some(ev) = self.get_pdu(&ev_id)? { - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) - } - - /// Returns a Vec representing the difference in auth chains of the given `events`. - /// - /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). - pub fn auth_chain_diff( - &self, - room_id: &RoomId, - event_ids: Vec>, - ) -> Result> { - use std::collections::BTreeSet; - - let mut chains = vec![]; - for ids in event_ids { - // TODO state store `auth_event_ids` returns self in the event ids list - // when an event returns `auth_event_ids` self is not contained - let chain = self - .auth_events_full(room_id, &ids)? - .into_iter() - .map(|pdu| pdu.event_id) - .collect::>(); - chains.push(chain); - } - - if let Some(chain) = chains.first() { - let rest = chains.iter().skip(1).flatten().cloned().collect(); - let common = chain.intersection(&rest).collect::>(); - - Ok(chains - .iter() - .flatten() - .filter(|id| !common.contains(&id)) - .cloned() - .collect::>() - .into_iter() - .collect()) - } else { - Ok(vec![]) - } - } - /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. @@ -475,6 +412,31 @@ impl Rooms { Ok(()) } + /// Returns the pdu from the outlier tree. + pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + + /// Returns true if the event_id was previously inserted. + pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + let res = self + .eventid_outlierpdu + .insert( + event_id.as_bytes(), + &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), + ) + .map(|op| op.is_some())?; + Ok(res) + } + /// Creates a new persisted data unit and adds it to a room. /// /// By this point the incoming event should be fully authenticated, no auth happens @@ -516,6 +478,9 @@ impl Rooms { } } + // We no longer keep this pdu as an outlier + self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; // Mark as read first so the sending client doesn't get a notification even if appending diff --git a/src/error.rs b/src/error.rs index c57843c..fed545c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -122,10 +122,9 @@ impl log::Log for ConduitLogger { let output = format!("{} - {}", record.level(), record.args()); if self.enabled(record.metadata()) - && (record - .module_path() - .map_or(false, |path| path.starts_with("conduit::")) - || record + && (record.module_path().map_or(false, |path| { + path.starts_with("conduit::") || path.starts_with("state") + }) || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/main.rs b/src/main.rs index 4cab764..e5c0399 100644 --- a/src/main.rs +++ b/src/main.rs @@ -167,6 +167,7 @@ fn setup_rocket() -> rocket::Rocket { .figment() .extract() .expect("It looks like your config is invalid. Please take a look at the error"); + let data = Database::load_or_create(config) .await .expect("config is valid"); diff --git a/src/server_server.rs b/src/server_server.rs index 64e0a05..6907e34 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,5 +1,4 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; -use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -7,7 +6,6 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ - device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -20,7 +18,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -28,9 +25,12 @@ use ruma::{ use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, + future::Future, net::{IpAddr, SocketAddr}, + pin::Pin, + result::Result as StdResult, sync::Arc, time::{Duration, SystemTime}, }; @@ -575,6 +575,26 @@ pub async fn send_transaction_message_route<'a>( // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + // If we have no idea about this room skip the PDU + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); + continue; + } + }; + if !db.rooms.exists(&room_id)? { + resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); + continue; + } + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { @@ -583,20 +603,12 @@ pub async fn send_transaction_message_route<'a>( for key in entity.keys() { // TODO: save this in a DB maybe... // fetch the public signing key - let res = db - .sending - .send_federation_request( - &db.globals, - <&ServerName>::try_from(key.as_str()).unwrap(), - get_server_keys::v2::Request::new(), - ) - .await?; + let origin = <&ServerName>::try_from(key.as_str()).unwrap(); + let keys = fetch_signing_keys(&db, origin).await?; pub_key_map.insert( - res.server_key.server_name.to_string(), - res.server_key - .verify_keys - .into_iter() + origin.to_string(), + keys.into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(), ); @@ -615,10 +627,31 @@ pub async fn send_transaction_message_route<'a>( continue; } - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = match signature_and_hash_check(&pub_key_map, value) { + // TODO: make this persist but not a DB Tree... + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) + // like structure so that once an auth event is known it would know (using indexes maybe) all of + // the auth events that it references. + let mut auth_cache = EventMap::new(); + + // 1. check the server is in the room (optional) + // 2. check content hash, redact if doesn't match + // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events + // 6. persist this event as an outlier + // 7. if not timeline event: stop + let pdu = match validate_event( + &db, + value, + event_id.clone(), + &pub_key_map, + server_name, + // All the auth events gathered will be here + &mut auth_cache, + ) + .await + { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -626,59 +659,31 @@ pub async fn send_transaction_message_route<'a>( } }; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = match serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) { - Ok(pdu) => pdu, - Err(_) => { - resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); - continue; - } - }; + let pdu = Arc::new(pdu.clone()); - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id)? { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - - let event = Arc::new(pdu.clone()); - dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + let previous = match fetch_events( + &db, + server_name, + &pub_key_map, + &pdu.prev_events, + &mut auth_cache, + ) + .await + { + Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), _ => None, }; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => { - resolved_map.insert( - pdu.event_id, - Err("Failed to recursively gather auth events".into()), - ); - continue; - } - }; - - let mut event_map: state_res::EventMap> = auth_events + let mut event_map: state_res::EventMap> = auth_cache .iter() - .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(k, v)| (k.clone(), v.clone())) .collect(); // Check that the event passes auth based on the auth_events let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &pdu.auth_events .iter() @@ -696,9 +701,10 @@ pub async fn send_transaction_message_route<'a>( None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( - pdu.event_id, + pdu.event_id().clone(), Err("Event has failed auth check with auth events".into()), ); continue; @@ -720,7 +726,14 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events( + &db, + server_name, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, + ) + .await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -732,21 +745,26 @@ pub async fn send_transaction_message_route<'a>( let state = state .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); ( state, - fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) - .await? - .into_iter() - .map(Arc::new) - .collect(), + fetch_events( + &db, + server_name, + &pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await? + .into_iter() + .collect(), ) } Err(_) => { resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Fetching state for event failed".into()), ); continue; @@ -755,7 +773,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &state_at_event, None, // TODO: third party invite @@ -764,37 +782,21 @@ pub async fn send_transaction_message_route<'a>( { // Event failed auth with state_at resolved_map.insert( - pdu.event_id, + event_id, Err("Event has failed auth check with state at the event".into()), ); continue; } // End of step 5. - // The event could still be soft failed - append_state_soft(&db, &pdu)?; - // Gather the forward extremities and resolve - let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states: Vec>> = vec![]; - for id in &forward_extrems { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); - - fork_states.push(state); - } else { - todo!("we don't know of a pdu that is part of our known forks OOPS") + let fork_states = match forward_extremity_ids(&db, &pdu) { + Ok(states) => states, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; } - } + }; // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { @@ -803,19 +805,47 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { - let auth_events = fork_states - .iter() - .map(|map| { - db.rooms - .auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), + let mut auth_events = vec![]; + // this keeps track if we error so we can break out of these inner loops + // to continue on with the incoming PDU's + let mut failed = false; + for map in &fork_states { + let mut state_auth = vec![]; + for pdu in map.values() { + let event = match auth_cache.get(pdu.event_id()) { + Some(aev) => aev.clone(), + // We should know about every event at this point but just incase... + None => match fetch_events( + &db, + server_name, + &pub_key_map, + &[pdu.event_id().clone()], + &mut auth_cache, ) - .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) - }) - .collect::>>()?; + .await + .map(|mut vec| vec.remove(0)) + { + Ok(aev) => aev.clone(), + Err(_) => { + resolved_map.insert( + event_id.clone(), + Err("Event has been soft failed".into()), + ); + failed = true; + break; + } + }, + }; + state_auth.push(event); + } + if failed { + break; + } + auth_events.push(state_auth); + } + if failed { + continue; + } // Add everything we will need to event_map event_map.extend( @@ -862,74 +892,163 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous, &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { - // Soft fail + // Soft fail, we add the event as an outlier. resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Event has been soft failed".into()), ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id().clone(), Ok(())); + resolved_map.insert(pdu.event_id().clone(), Ok(())); } } Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } -async fn auth_each_event( - db: &Database, +/// Validate any event that is given to us by another server. +/// +/// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). +/// 2. Passes signature checks, otherwise event is dropped. +/// 3. Passes hash checks, otherwise it is redacted before being processed further. +/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). +/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +fn validate_event<'a>( + db: &'a Database, value: CanonicalJsonObject, event_id: EventId, - pub_key_map: &PublicKeyMap, - server_name: &ServerName, - auth_cache: EventMap>, -) -> std::result::Result { - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = signature_and_hash_check(&pub_key_map, value)?; + pub_key_map: &'a PublicKeyMap, + origin: &'a ServerName, + auth_cache: &'a mut EventMap>, +) -> Pin> + 'a + Send>> { + Box::pin(async move { + let mut val = signature_and_hash_check(&pub_key_map, value)?; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU".to_string())?; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { - return Err("Room is unknown to this server".into()); - } + fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) + .await + .map_err(|_| "Event failed auth chain check".to_string())?; - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + db.rooms + .append_pdu_outlier(pdu.event_id(), &pdu) + .map_err(|e| e.to_string())?; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => return Err("Failed to recursively gather auth events".into()), - }; - - Ok(pdu) + Ok(pdu) + }) } +/// Find the event and auth it. +/// +/// 1. Look in the main timeline (pduid_pdu tree) +/// 2. Look at outlier pdu tree +/// 3. Ask origin server over federation +/// 4. TODO: Ask other servers over federation? +async fn fetch_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + events: &[EventId], + auth_cache: &mut EventMap>, +) -> Result>> { + let mut pdus = vec![]; + for id in events { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db.rooms.get_pdu_outlier(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; + + Arc::new(pdu) + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }, + }; + pdus.push(pdu); + } + Ok(pdus) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + +/// Search the DB for the signing keys of the given server, if we don't have them +/// fetch them from the server and save to our DB. +async fn fetch_signing_keys( + db: &Database, + origin: &ServerName, +) -> Result> { + match db.globals.signing_keys_for(origin)? { + keys if !keys.is_empty() => Ok(keys), + _ => { + let keys = db + .sending + .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .await + .map_err(|_| Error::BadServerResponse("Failed to request server keys"))?; + db.globals.add_signing_key(origin, &keys.server_key)?; + Ok(keys.server_key.verify_keys) + } + } +} fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -954,122 +1073,29 @@ fn signature_and_hash_check( ) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have missing events it fails. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], -) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); +fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { + let mut fork_states = vec![]; + for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - let ev = match db.rooms.get_pdu(&ev_id)? { - Some(pdu) => pdu, - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &ev_id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map") - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, - }; - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) -} - -/// TODO: this needs to add events to the DB in a way that does not -/// effect the state of the room -async fn fetch_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - events: &[EventId], -) -> Result> { - let mut pdus = vec![]; - for id in events { - match db.rooms.get_pdu(id)? { - Some(pdu) => pdus.push(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - // TODO: add to our DB somehow? - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); - - pdus.push(pdu); - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + fork_states.push(state); + } else { + return Err(Error::Conflict( + "we don't know of a pdu that is part of our known forks OOPS", + )); } } - Ok(pdus) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - db.rooms.get_pdu_leaves(room_id) + Ok(fork_states) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -1078,9 +1104,12 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + db.rooms.append_pdu( - pdu, + &pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), @@ -1089,78 +1118,9 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { &db.admin, )?; - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // db.rooms.append_pdu( - // pdu, - // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - // count, - // pdu_id.clone().into(), - // &db.globals, - // &db.account_data, - // &db.admin, - // )?; - - Ok(()) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() -} - -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From 4b2eb5ab82d6fc80aac2ab78a0c02d3add245743 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 11:05:57 -0500 Subject: [PATCH 0424/1727] Fix ALL clippy warnings --- src/client_server/account.rs | 78 ++++------------------ src/client_server/membership.rs | 40 ++--------- src/client_server/message.rs | 6 +- src/client_server/profile.rs | 12 +--- src/client_server/redact.rs | 6 +- src/client_server/room.rs | 90 +++++-------------------- src/client_server/state.rs | 6 +- src/database/admin.rs | 6 +- src/database/rooms.rs | 113 ++++++++++++++------------------ src/database/sending.rs | 19 +++--- src/error.rs | 8 ++- src/server_server.rs | 4 +- 12 files changed, 106 insertions(+), 282 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 75544b7..6927a53 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -239,11 +239,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Make conduit bot join @@ -264,11 +260,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -302,11 +294,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.1 Join Rules @@ -323,11 +311,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -346,11 +330,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -367,11 +347,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 6. Events implied by name and topic @@ -390,11 +366,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( @@ -410,11 +382,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Room alias @@ -436,11 +404,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; @@ -463,11 +427,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( PduBuilder { @@ -486,11 +446,7 @@ pub async fn register_route( }, &user_id, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Send welcome message @@ -515,11 +471,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -691,11 +643,7 @@ pub async fn deactivate_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 40e4183..70bb480 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -124,11 +124,7 @@ pub async fn leave_room_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -164,11 +160,7 @@ pub async fn invite_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -220,11 +212,7 @@ pub async fn kick_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -280,11 +268,7 @@ pub async fn ban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -332,11 +316,7 @@ pub async fn unban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -685,9 +665,7 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; if state_events.contains(ev_id) { @@ -717,11 +695,7 @@ async fn join_room_by_id_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index c56cc94..c64c390 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -68,11 +68,7 @@ pub async fn send_message_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.transaction_ids.add_txnid( diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 761443d..21759a8 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -63,11 +63,7 @@ pub async fn set_displayname_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update @@ -160,11 +156,7 @@ pub async fn set_avatar_url_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 212e751..282c35a 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -31,11 +31,7 @@ pub async fn redact_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 092e083..631d87b 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -65,11 +65,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Let the room creator join @@ -90,11 +86,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -135,11 +127,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4. Events set by preset @@ -175,11 +163,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -196,11 +180,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -225,11 +205,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 5. Events listed in initial_state @@ -248,11 +224,7 @@ pub async fn create_room_route( pdu_builder, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -273,11 +245,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -295,11 +263,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -322,11 +286,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -413,11 +373,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Get the old room federations status @@ -457,11 +413,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Join the new room @@ -482,11 +434,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Recommended transferable state events list from the specs @@ -519,11 +467,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -566,11 +510,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index faa415d..ae5e251 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -284,11 +284,7 @@ pub async fn send_state_event_for_key_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; Ok(event_id) diff --git a/src/database/admin.rs b/src/database/admin.rs index 1fb1983..501722e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -60,11 +60,7 @@ impl Admin { }, &conduit_user, &conduit_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, ) .unwrap(); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 81abd62..d62d4b0 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2,7 +2,7 @@ mod edus; pub use edus::RoomEdus; -use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; +use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use log::error; use regex::Regex; use ring::digest; @@ -447,9 +447,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, - globals: &super::globals::Globals, - account_data: &super::account_data::AccountData, - admin: &super::admin::Admin, + db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -486,7 +484,7 @@ impl Rooms { // Mark as read first so the sending client doesn't get a notification even if appending // fails self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count, &globals)?; + .private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; self.pduid_pdu.insert( &pdu_id, @@ -521,8 +519,8 @@ impl Rooms { ) })?, &pdu.sender, - account_data, - globals, + &db.account_data, + &db.globals, )?; } } @@ -540,10 +538,10 @@ impl Rooms { self.tokenids.insert(key, &[])?; } - if body.starts_with(&format!("@conduit:{}: ", globals.server_name())) + if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self .id_from_alias( - &format!("#admins:{}", globals.server_name()) + &format!("#admins:{}", db.globals.server_name()) .try_into() .expect("#admins:server_name is a valid room alias"), )? @@ -570,10 +568,11 @@ impl Rooms { ); match parsed_config { Ok(yaml) => { - admin.send(AdminCommand::RegisterAppservice(yaml)); + db.admin + .send(AdminCommand::RegisterAppservice(yaml)); } Err(e) => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( format!( "Could not parse appservice config: {}", @@ -584,7 +583,7 @@ impl Rooms { } } } else { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( "Expected code block in command body.", ), @@ -592,10 +591,10 @@ impl Rooms { } } "list_appservices" => { - admin.send(AdminCommand::ListAppservices); + db.admin.send(AdminCommand::ListAppservices); } _ => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain(format!( "Command: {}, Args: {:?}", command, args @@ -696,17 +695,12 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. - #[allow(clippy::too_many_arguments)] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - globals: &super::globals::Globals, - sending: &super::sending::Sending, - admin: &super::admin::Admin, - account_data: &super::account_data::AccountData, - appservice: &super::appservice::Appservice, + db: &Database, ) -> Result { let PduBuilder { event_type, @@ -789,7 +783,7 @@ impl Rooms { if !match event_type { EventType::RoomEncryption => { // Only allow encryption events if it's allowed in the config - globals.allow_encryption() + db.globals.allow_encryption() } EventType::RoomMember => { let prev_event = self @@ -895,13 +889,13 @@ impl Rooms { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(globals.server_name()) + to_canonical_value(db.globals.server_name()) .expect("server name is a valid CanonicalJsonValue"), ); ruma::signatures::hash_and_sign_event( - globals.server_name().as_str(), - globals.keypair(), + db.globals.server_name().as_str(), + db.globals.keypair(), &mut pdu_json, &RoomVersionId::Version6, ) @@ -922,24 +916,16 @@ impl Rooms { // Increment the last index and use that // This is also the next_batch/since value - let count = globals.next_count()?; + let count = db.globals.next_count()?; let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu_id, &pdu, &globals)?; + let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu( - &pdu, - pdu_json, - count, - pdu_id.clone().into(), - globals, - account_data, - admin, - )?; + self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist @@ -948,31 +934,28 @@ impl Rooms { for server in self .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != globals.server_name()) + .filter(|server| &**server != db.globals.server_name()) { - sending.send_pdu(&server, &pdu_id)?; + db.sending.send_pdu(&server, &pdu_id)?; } - for appservice in appservice.iter_all().filter_map(|r| r.ok()) { + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") .and_then(|users| users.as_sequence()) - .map_or_else( - || Vec::new(), - |users| { - users - .iter() - .map(|users| { - users - .get("regex") - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()) - }) - .filter_map(|o| o) - .collect::>() - }, - ); + .map_or_else(Vec::new, |users| { + users + .iter() + .map(|users| { + users + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }); let aliases = namespaces .get("aliases") .and_then(|users| users.get("regex")) @@ -989,29 +972,31 @@ impl Rooms { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, globals.server_name()).ok() + UserId::parse_with_server_name(string, db.globals.server_name()).ok() }); - if bridge_user_id.map_or(false, |bridge_user_id| { - self.is_joined(&bridge_user_id, room_id).unwrap_or(false) - }) || users.iter().any(|users| { + let user_is_joined = + |bridge_user_id| self.is_joined(&bridge_user_id, room_id).unwrap_or(false); + let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) || pdu.kind == EventType::RoomMember && pdu .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) - }) || aliases.map_or(false, |aliases| { + }; + let matching_aliases = |aliases: Regex| { room_aliases .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) - }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || self - .room_members(&room_id) - .filter_map(|r| r.ok()) - .any(|member| users.iter().any(|regex| regex.is_match(member.as_str()))) + }; + + if bridge_user_id.map_or(false, user_is_joined) + || users.iter().any(matching_users) + || aliases.map_or(false, matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) { - sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } } } diff --git a/src/database/sending.rs b/src/database/sending.rs index e6cdc76..101daf3 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -149,6 +149,15 @@ impl Sending { let servernamepduid = key.clone(); let mut parts = servernamepduid.splitn(2, |&b| b == 0xff); + let exponential_backoff = |(tries, instant): &(u32, Instant)| { + // Fail if a request has failed recently (exponential backoff) + let mut min_elapsed_duration = Duration::from_secs(60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60*60*24) { + min_elapsed_duration = Duration::from_secs(60*60*24); + } + + instant.elapsed() < min_elapsed_duration + }; if let Some((server, is_appservice, pdu_id)) = utils::string_from_bytes( parts .next() @@ -173,15 +182,7 @@ impl Sending { .map(|pdu_id| (server, is_appservice, pdu_id)) ) .filter(|(server, is_appservice, _)| { - if last_failed_try.get(server).map_or(false, |(tries, instant)| { - // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(60) * *tries * *tries; - if min_elapsed_duration > Duration::from_secs(60*60*24) { - min_elapsed_duration = Duration::from_secs(60*60*24); - } - - instant.elapsed() < min_elapsed_duration - }) { + if last_failed_try.get(server).map_or(false, exponential_backoff) { return false; } diff --git a/src/error.rs b/src/error.rs index fed545c..13efce6 100644 --- a/src/error.rs +++ b/src/error.rs @@ -121,10 +121,12 @@ impl log::Log for ConduitLogger { fn log(&self, record: &log::Record<'_>) { let output = format!("{} - {}", record.level(), record.args()); + let match_mod_path = + |path: &str| path.starts_with("conduit::") || path.starts_with("state"); + if self.enabled(record.metadata()) - && (record.module_path().map_or(false, |path| { - path.starts_with("conduit::") || path.starts_with("state") - }) || record + && (record.module_path().map_or(false, match_mod_path) + || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/server_server.rs b/src/server_server.rs index 6907e34..ae59583 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1113,9 +1113,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; // We set the room state after inserting the pdu, so that we never have a moment in time From db0aee3318b39d24ac37915ce49018117c0c03f2 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 15:46:47 -0500 Subject: [PATCH 0425/1727] Use the auth_events for step 6, WIP forward_extremity_ids fn --- src/server_server.rs | 159 ++++++++++++++++++++++++++++--------------- 1 file changed, 104 insertions(+), 55 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ae59583..77f0fa8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -569,7 +569,7 @@ pub async fn send_transaction_message_route<'a>( // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); - for pdu in &body.pdus { + 'main_pdu_loop: for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks @@ -660,7 +660,6 @@ pub async fn send_transaction_message_route<'a>( }; let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB let previous = match fetch_events( &db, @@ -675,6 +674,7 @@ pub async fn send_transaction_message_route<'a>( _ => None, }; + // [auth_cache] At this point we have the auth chain of the incoming event. let mut event_map: state_res::EventMap> = auth_cache .iter() .map(|(k, v)| (k.clone(), v.clone())) @@ -688,7 +688,7 @@ pub async fn send_transaction_message_route<'a>( &pdu.auth_events .iter() .map(|id| { - event_map + auth_cache .get(id) .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) .ok_or_else(|| { @@ -790,7 +790,15 @@ pub async fn send_transaction_message_route<'a>( // End of step 5. // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids(&db, &pdu) { + let fork_states = match forward_extremity_ids( + &db, + &pdu, + server_name, + &pub_key_map, + &mut auth_cache, + ) + .await + { Ok(states) => states, Err(_) => { resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); @@ -805,47 +813,44 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // TODO: remove this is for current debugging Jan, 15 2021 + let mut number_fetches = 0_u32; let mut auth_events = vec![]; // this keeps track if we error so we can break out of these inner loops // to continue on with the incoming PDU's - let mut failed = false; for map in &fork_states { let mut state_auth = vec![]; - for pdu in map.values() { - let event = match auth_cache.get(pdu.event_id()) { + for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { + let event = match auth_cache.get(auth_id) { Some(aev) => aev.clone(), // We should know about every event at this point but just incase... None => match fetch_events( &db, server_name, &pub_key_map, - &[pdu.event_id().clone()], + &[auth_id.clone()], &mut auth_cache, ) .await - .map(|mut vec| vec.remove(0)) - { - Ok(aev) => aev.clone(), + .map(|mut vec| { + number_fetches += 1; + vec.remove(0) + }) { + Ok(aev) => aev, Err(_) => { resolved_map.insert( event_id.clone(), Err("Event has been soft failed".into()), ); - failed = true; - break; + continue 'main_pdu_loop; } }, }; state_auth.push(event); } - if failed { - break; - } auth_events.push(state_auth); } - if failed { - continue; - } + info!("{} event's were not in the auth_cache", number_fetches); // Add everything we will need to event_map event_map.extend( @@ -886,7 +891,13 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), - Err(e) => panic!("{:?}", e), + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("State resolution failed, either an event could not be found or deserialization".into()), + ); + continue 'main_pdu_loop; + } } }; @@ -914,6 +925,7 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). @@ -955,6 +967,37 @@ fn validate_event<'a>( }) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + /// Find the event and auth it. /// /// 1. Look in the main timeline (pduid_pdu tree) @@ -1000,36 +1043,6 @@ async fn fetch_events( Ok(pdus) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have any missing events it fails. -/// -/// The `auth_cache` is filled instead of returning a `Vec`. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], - auth_cache: &mut EventMap>, -) -> Result<()> { - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if auth_cache.contains_key(&ev_id) { - continue; - } - - let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) - .await - .map(|mut vec| vec.remove(0))?; - - stack.extend(ev.auth_events()); - auth_cache.insert(ev.event_id().clone(), ev); - } - Ok(()) -} - /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. async fn fetch_signing_keys( @@ -1049,6 +1062,7 @@ async fn fetch_signing_keys( } } } + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -1073,9 +1087,23 @@ fn signature_and_hash_check( ) } -fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { +async fn forward_extremity_ids( + db: &Database, + pdu: &PduEvent, + origin: &ServerName, + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + + for incoming_leaf in &pdu.prev_events { + if !current_leaves.contains(incoming_leaf) { + current_leaves.push(incoming_leaf.clone()); + } + } + let mut fork_states = vec![]; - for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1090,11 +1118,32 @@ fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result Date: Sat, 16 Jan 2021 16:37:20 -0500 Subject: [PATCH 0426/1727] Fixing the incoming events algorithm (review with time) --- src/database/rooms.rs | 2 +- src/server_server.rs | 237 ++++++++++++++++++++++++------------------ 2 files changed, 138 insertions(+), 101 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d62d4b0..325a2e2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -150,7 +150,7 @@ impl Rooms { } } - /// Returns the last state hash key added to the db. + /// Returns the state hash for this pdu. pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { Ok(self.pduid_statehash.get(pdu_id)?) } diff --git a/src/server_server.rs b/src/server_server.rs index 77f0fa8..0eb7d6f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,6 +5,7 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ + client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -590,6 +591,8 @@ pub async fn send_transaction_message_route<'a>( continue; } }; + + // 1. check the server is in the room (optional) if !db.rooms.exists(&room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); continue; @@ -634,14 +637,13 @@ pub async fn send_transaction_message_route<'a>( // the auth events that it references. let mut auth_cache = EventMap::new(); - // 1. check the server is in the room (optional) // 2. check content hash, redact if doesn't match // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events - // 6. persist this event as an outlier // 7. if not timeline event: stop - let pdu = match validate_event( + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let (pdu, previous) = match validate_event( &db, value, event_id.clone(), @@ -659,59 +661,16 @@ pub async fn send_transaction_message_route<'a>( } }; - let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events( - &db, - server_name, - &pub_key_map, - &pdu.prev_events, - &mut auth_cache, - ) - .await - { - Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), - _ => None, + let single_prev = if previous.len() == 1 { + previous.first().cloned() + } else { + None }; - // [auth_cache] At this point we have the auth chain of the incoming event. - let mut event_map: state_res::EventMap> = auth_cache - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(); + // 6. persist the event as an outlier. + db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; - // Check that the event passes auth based on the auth_events - let is_authed = state_res::event_auth::auth_check( - &RoomVersionId::Version6, - &pdu, - previous.clone(), - &pdu.auth_events - .iter() - .map(|id| { - auth_cache - .get(id) - .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) - .ok_or_else(|| { - Error::Conflict( - "Auth event not found, event failed recursive auth checks.", - ) - }) - }) - .collect::>>()?, - None, // TODO: third party invite - ) - .map_err(|_e| Error::Conflict("Auth check failed"))?; - - if !is_authed { - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has failed auth check with auth events".into()), - ); - continue; - } - // End of step 4. - - // Step 5. event passes auth based on state at the event + // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -757,9 +716,7 @@ pub async fn send_transaction_message_route<'a>( &res.auth_chain_ids, &mut auth_cache, ) - .await? - .into_iter() - .collect(), + .await?, ) } Err(_) => { @@ -771,10 +728,11 @@ pub async fn send_transaction_message_route<'a>( } }; + // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous.clone(), + single_prev.clone(), &state_at_event, None, // TODO: third party invite ) @@ -787,10 +745,34 @@ pub async fn send_transaction_message_route<'a>( ); continue; } - // End of step 5. + // End of step 10. + + // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let current_state = db + .rooms + .room_state_full(pdu.room_id())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + single_prev.clone(), + ¤t_state, + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail, we add the event as an outlier. + resolved_map.insert( + pdu.event_id().clone(), + Err("Event has been soft failed".into()), + ); + }; // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids( + let fork_states = match forward_extremities( &db, &pdu, server_name, @@ -806,7 +788,9 @@ pub async fn send_transaction_message_route<'a>( } }; - // Step 6. event passes auth based on state of all forks and current room state + // 13. start state-res with all previous forward extremities minus the ones that are in + // the prev_events of this event plus the new one created by this event and use + // the result as the new room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() @@ -852,6 +836,7 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); + let mut event_map = EventMap::new(); // Add everything we will need to event_map event_map.extend( auth_events @@ -904,7 +889,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous, + single_prev, &state_at_forks, None, ) @@ -925,14 +910,19 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// An async function that can recursively calls itself. +type AsyncRecursiveResult<'a, T> = Pin> + 'a + Send>>; + /// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). -/// 2. Passes signature checks, otherwise event is dropped. -/// 3. Passes hash checks, otherwise it is redacted before being processed further. -/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). -/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +/// 2. check content hash, redact if doesn't match +/// 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events +/// 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" +/// 5. reject "due to auth events" if the event doesn't pass auth based on the auth events +/// 7. if not timeline event: stop +/// 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events fn validate_event<'a>( db: &'a Database, value: CanonicalJsonObject, @@ -940,9 +930,24 @@ fn validate_event<'a>( pub_key_map: &'a PublicKeyMap, origin: &'a ServerName, auth_cache: &'a mut EventMap>, -) -> Pin> + 'a + Send>> { +) -> AsyncRecursiveResult<'a, (Arc, Vec>)> { Box::pin(async move { - let mut val = signature_and_hash_check(&pub_key_map, value)?; + let mut val = + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value + } + } + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type also finally verifying the first step listed above @@ -959,11 +964,42 @@ fn validate_event<'a>( .await .map_err(|_| "Event failed auth chain check".to_string())?; - db.rooms - .append_pdu_outlier(pdu.event_id(), &pdu) + let pdu = Arc::new(pdu.clone()); + + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let previous = fetch_events(&db, origin, &pub_key_map, &pdu.prev_events, auth_cache) + .await .map_err(|e| e.to_string())?; - Ok(pdu) + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + if previous.len() == 1 { + previous.first().cloned() + } else { + None + }, + &pdu.auth_events + .iter() + .map(|id| { + auth_cache + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + "Auth event not found, event failed recursive auth checks.".to_string() + }) + }) + .collect::, _>>()?, + None, // TODO: third party invite + ) + .map_err(|_e| "Auth check failed".to_string())?; + + if !is_authed { + return Err("Event has failed auth check with auth events".to_string()); + } + + Ok((pdu, previous)) }) } @@ -990,7 +1026,10 @@ async fn fetch_check_auth_events( let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await - .map(|mut vec| vec.remove(0))?; + .map(|mut vec| { + vec.pop() + .ok_or_else(|| Error::Conflict("Event was not found in fetch_events")) + })??; stack.extend(ev.auth_events()); auth_cache.insert(ev.event_id().clone(), ev); @@ -1028,11 +1067,12 @@ async fn fetch_events( { Ok(res) => { let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; - Arc::new(pdu) + pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, @@ -1063,31 +1103,11 @@ async fn fetch_signing_keys( } } -fn signature_and_hash_check( - pub_key_map: &ruma::signatures::PublicKeyMap, - value: CanonicalJsonObject, -) -> std::result::Result { - Ok( - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - error!("CONTENT HASH FAILED"); - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } else { - value - } - } - Err(_e) => { - return Err("Signature verification failed".to_string()); - } - }, - ) -} - -async fn forward_extremity_ids( +/// Gather all state snapshots needed to resolve the current state of the room. +/// +/// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) +async fn forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, @@ -1102,6 +1122,8 @@ async fn forward_extremity_ids( } } + let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; let mut fork_states = vec![]; for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { @@ -1109,6 +1131,10 @@ async fn forward_extremity_ids( .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); + + if current_hash.as_ref() == Some(&state_hash) { + includes_current_state = true; + } let state = db .rooms .state_full(&pdu.room_id, &state_hash)? @@ -1144,6 +1170,17 @@ async fn forward_extremity_ids( } } + // This guarantees that our current room state is included + if !includes_current_state && current_hash.is_some() { + fork_states.push( + db.rooms + .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(), + ) + } + Ok(fork_states) } From 7309b2fba99b5c16704829a2729aad20f035ddc8 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:08:59 -0500 Subject: [PATCH 0427/1727] WIP gather and update forward extremities --- src/database/rooms.rs | 18 ++++++ src/pdu.rs | 21 ++++++- src/server_server.rs | 138 ++++++++++++++++++++++++++++-------------- 3 files changed, 132 insertions(+), 45 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 325a2e2..665e328 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,6 +397,24 @@ impl Rooms { Ok(events) } + /// Force an update to the leaves of a room. + pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { + self.roomid_pduleaves.remove(key?)?; + } + + for event_id in event_ids.iter() { + let mut key = prefix.to_owned(); + key.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + } + + Ok(()) + } + /// Replace the leaves of a room with a new event. pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/pdu.rs b/src/pdu.rs index 340ddee..e38410f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,7 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { @@ -284,6 +284,25 @@ impl state_res::Event for PduEvent { } } +// These impl's allow us to dedup state snapshots when resolving state +// for incoming events (federation/send/{txn}). +impl Eq for PduEvent {} +impl PartialEq for PduEvent { + fn eq(&self, other: &Self) -> bool { + self.event_id == other.event_id + } +} +impl PartialOrd for PduEvent { + fn partial_cmp(&self, other: &Self) -> Option { + self.event_id.partial_cmp(&other.event_id) + } +} +impl Ord for PduEvent { + fn cmp(&self, other: &Self) -> Ordering { + self.event_id.cmp(&other.event_id) + } +} + /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. diff --git a/src/server_server.rs b/src/server_server.rs index 0eb7d6f..16a1a8e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,7 +5,6 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ - client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -25,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -600,31 +599,21 @@ pub async fn send_transaction_message_route<'a>( let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); - if let Some(sig) = value.get("signatures") { - match sig { - CanonicalJsonValue::Object(entity) => { - for key in entity.keys() { - // TODO: save this in a DB maybe... - // fetch the public signing key - let origin = <&ServerName>::try_from(key.as_str()).unwrap(); - let keys = fetch_signing_keys(&db, origin).await?; - pub_key_map.insert( - origin.to_string(), - keys.into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(), - ); - } - } - _ => { - resolved_map.insert( - event_id, - Err("`signatures` is not a JSON object".to_string()), - ); - continue; - } - } + if let Some(CanonicalJsonValue::String(sender)) = value.get("sender") { + let sender = + UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); + let origin = sender.server_name(); + + // TODO: this could fail or the server not respond... + let keys = fetch_signing_keys(&db, origin).await?; + + pub_key_map.insert( + origin.to_string(), + keys.into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); } else { resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); continue; @@ -642,8 +631,9 @@ pub async fn send_transaction_message_route<'a>( // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events // 7. if not timeline event: stop - // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let (pdu, previous) = match validate_event( + // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + // the events found in step 8 can be authed/resolved and appended to the DB + let (pdu, previous): (_, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -670,6 +660,9 @@ pub async fn send_transaction_message_route<'a>( // 6. persist the event as an outlier. db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all + // the checks in this list starting at 1. These are not timeline events. + // // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db @@ -771,8 +764,12 @@ pub async fn send_transaction_message_route<'a>( ); }; - // Gather the forward extremities and resolve - let fork_states = match forward_extremities( + // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res + // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) + // + // calculate_forward_extremities takes care of adding the current state if not already in the state sets + // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. + let (mut fork_states, fork_ids) = match calculate_forward_extremities( &db, &pdu, server_name, @@ -788,6 +785,12 @@ pub async fn send_transaction_message_route<'a>( } }; + // add the incoming events to the mix of state snapshots + // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets + fork_states.insert(state_at_event.clone()); + + let fork_states = fork_states.into_iter().collect::>(); + // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -901,7 +904,9 @@ pub async fn send_transaction_message_route<'a>( Err("Event has been soft failed".into()), ); } else { - append_state(&db, &pdu)?; + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_state(&db, &pdu, &fork_ids)?; + // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } @@ -1106,25 +1111,52 @@ async fn fetch_signing_keys( /// Gather all state snapshots needed to resolve the current state of the room. /// /// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res -/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) -async fn forward_extremities( +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). +/// +/// The state snapshot of the incoming event __needs__ to be added to the resulting list. +async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, -) -> Result>>> { +) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + let mut is_incoming_leaf = true; + // Make sure the incoming event is not already a forward extremity + // FIXME: I think this could happen if different servers send us the same event?? + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + + // If the incoming event is already referenced by an existing event + // then do nothing - it's not a candidate to be a new extremity if + // it has been referenced. + if already_referenced(db, pdu)? { + is_incoming_leaf = false; + // This event has been dealt with already?? + } + + // TODO: + // [dendrite] Checks if any other leaves have been referenced and removes them + // but as long as we update the pdu leaves here and for events on our server this + // should not be possible. + + // Remove any forward extremities that are referenced by this incoming events prev_events for incoming_leaf in &pdu.prev_events { - if !current_leaves.contains(incoming_leaf) { - current_leaves.push(incoming_leaf.clone()); + if current_leaves.contains(incoming_leaf) { + if let Some(pos) = current_leaves.iter().position(|x| *x == *incoming_leaf) { + current_leaves.remove(pos); + } } } let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; - let mut fork_states = vec![]; + let mut fork_states = BTreeSet::new(); for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db @@ -1142,8 +1174,10 @@ async fn forward_extremities( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); - fork_states.push(state); + fork_states.insert(state); } else { + error!("Forward extremity not found... {}", id); + let res = db .sending .send_federation_request( @@ -1166,25 +1200,37 @@ async fn forward_extremities( .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); - fork_states.push(state); + fork_states.insert(state); } } + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.push( + fork_states.insert( db.rooms .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(), - ) + ); } - Ok(fork_states) + Ok((fork_states, dbg!(current_leaves))) } -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { +/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) +fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { + Ok(false) +} + +fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1195,13 +1241,17 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( - &pdu, + pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db, )?; + // If we update the room leaves after calling append_pdu it will stick since append_pdu + // calls replace_pdu_leaves with only the given event. + db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; + // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From 835cf80acd6be7d99b5bdc83c7f891dc167901d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 11 Feb 2021 13:16:14 +0100 Subject: [PATCH 0428/1727] fix: pushers --- src/database/pusher.rs | 62 ++++++++++++++++++++++++++--------------- src/database/rooms.rs | 6 +++- src/database/sending.rs | 55 +++++++++++------------------------- src/server_server.rs | 3 +- 4 files changed, 63 insertions(+), 63 deletions(-) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index c4f5801..336ef57 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -35,7 +35,10 @@ impl PushData { } pub fn set_pusher(&self, sender: &UserId, pusher: Pusher) -> Result<()> { + println!("CCCCCCCCCCCCCCCCCCCCCc"); + dbg!(&pusher); let mut key = sender.as_bytes().to_vec(); + key.push(0xff); key.extend_from_slice(pusher.pushkey.as_bytes()); // There are 2 kinds of pushers but the spec says: null deletes the pusher. @@ -48,7 +51,7 @@ impl PushData { } self.senderkey_pusher.insert( - key, + dbg!(key), &*serde_json::to_string(&pusher).expect("Pusher is valid JSON string"), )?; @@ -56,11 +59,16 @@ impl PushData { } pub fn get_pusher(&self, sender: &UserId) -> Result> { + let mut prefix = sender.as_bytes().to_vec(); + prefix.push(0xff); + self.senderkey_pusher - .scan_prefix(sender.as_bytes()) + .scan_prefix(dbg!(prefix)) .values() - .map(|push: std::result::Result| { - let push = push.map_err(|_| Error::bad_database("Invalid push bytes in db."))?; + .map(|push| { + println!("DDDDDDDDDDDDDDDDDDDDDDDDDD"); + let push = + dbg!(push).map_err(|_| Error::bad_database("Invalid push bytes in db."))?; Ok(serde_json::from_slice(&*push) .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) }) @@ -85,14 +93,17 @@ where Error::BadServerResponse("Invalid destination") })?; - let mut reqwest_request = reqwest::Request::try_from(http_request) + let reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); // TODO: we could keep this very short and let expo backoff do it's thing... - *reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); + //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; + let reqwest_response = globals + .reqwest_client() + .execute(dbg!(reqwest_request)) + .await; // Because reqwest::Response -> http::Response is complicated: match reqwest_response { @@ -154,6 +165,12 @@ pub async fn send_push_notice( pdu: &PduEvent, db: &Database, ) -> Result<()> { + if let Some(msgtype) = pdu.content.get("msgtype").and_then(|b| b.as_str()) { + if msgtype == "m.notice" { + return Ok(()); + } + } + for rule in ruleset.into_iter() { // TODO: can actions contain contradictory Actions if rule @@ -165,7 +182,7 @@ pub async fn send_push_notice( continue; } - match rule.rule_id.as_str() { + match dbg!(rule.rule_id.as_str()) { ".m.rule.master" => {} ".m.rule.suppress_notices" => { if pdu.kind == EventType::RoomMessage @@ -437,7 +454,8 @@ async fn send_notice( db: &Database, name: &str, ) -> Result<()> { - let (http, _emails): (Vec<&Pusher>, _) = pushers + println!("BBBBBBBBBBBBBBBr"); + let (http, _emails): (Vec<&Pusher>, _) = dbg!(pushers) .iter() .partition(|pusher| pusher.kind == Some(PusherKind::Http)); @@ -445,7 +463,7 @@ async fn send_notice( // Two problems with this // 1. if "event_id_only" is the only format kind it seems we should never add more info // 2. can pusher/devices have conflicting formats - for pusher in http { + for pusher in dbg!(http) { let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); let url = if let Some(url) = pusher.data.url.as_ref() { url @@ -484,12 +502,12 @@ async fn send_notice( if event_id_only { error!("SEND PUSH NOTICE `{}`", name); - // send_request( - // &db.globals, - // &url, - // send_event_notification::v1::Request::new(notifi), - // ) - // .await?; + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } else { notifi.sender = Some(&event.sender); notifi.event_type = Some(&event.kind); @@ -512,12 +530,12 @@ async fn send_notice( notifi.room_name = room_name.as_deref(); error!("SEND PUSH NOTICE Full `{}`", name); - // send_request( - // &db.globals, - // &url, - // send_event_notification::v1::Request::new(notifi), - // ) - // .await?; + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ac7d27d..0f02e33 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1358,6 +1358,7 @@ impl Rooms { self.alias_roomid .insert(alias.alias(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); + aliasid.push(0xff); aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); self.aliasid_alias.insert(aliasid, &*alias.alias())?; } else { @@ -1370,7 +1371,10 @@ impl Rooms { "Alias does not exist.", ))?; - for key in self.aliasid_alias.scan_prefix(room_id).keys() { + let mut prefix = room_id.to_vec(); + prefix.push(0xff); + + for key in self.aliasid_alias.scan_prefix(prefix).keys() { self.aliasid_alias.remove(key?)?; } } diff --git a/src/database/sending.rs b/src/database/sending.rs index ce81e8c..cbe9ffa 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,7 +1,7 @@ use std::{ collections::HashMap, convert::TryFrom, - fmt::{Debug, Display, Formatter}, + fmt::Debug, sync::Arc, time::{Duration, Instant, SystemTime}, }; @@ -25,16 +25,6 @@ pub enum OutgoingKind { Normal(Box), } -impl Display for OutgoingKind { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - OutgoingKind::Appservice(name) => f.write_str(name.as_str()), - OutgoingKind::Normal(name) => f.write_str(name.as_str()), - OutgoingKind::Push(_) => f.write_str("Push notification TODO"), - } - } -} - #[derive(Clone)] pub struct Sending { /// The state for a given state hash. @@ -143,7 +133,7 @@ impl Sending { } } Err((outgoing_kind, e)) => { - info!("Couldn't send transaction to {}\n{}", outgoing_kind, e); + info!("Couldn't send transaction to {:?}\n{}", outgoing_kind, e); let mut prefix = match &outgoing_kind { OutgoingKind::Appservice(serv) => { let mut p = b"+".to_vec(); @@ -278,6 +268,8 @@ impl Sending { key.extend_from_slice(pdu_id); self.servernamepduids.insert(key, b"")?; + println!("AAAA"); + Ok(()) } @@ -306,7 +298,7 @@ impl Sending { pdu_ids: Vec, db: &Database, ) -> std::result::Result { - match kind { + match dbg!(kind) { OutgoingKind::Appservice(server) => { let pdu_jsons = pdu_ids .iter() @@ -364,25 +356,12 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); - for pdu in &pdus { + for pdu in dbg!(&pdus) { // Redacted events are not notification targets (we don't send push for them) if pdu.unsigned.get("redacted_because").is_some() { continue; } - // Skip events that came from the admin room - if db - .rooms - .room_aliases(&pdu.room_id) - .any(|alias| match alias { - Ok(a) => a.as_str().starts_with("#admins:"), - _ => false, - }) - || pdu.sender.as_str().starts_with("@conduit:") - { - continue; - } - for user in db.rooms.room_members(&pdu.room_id) { let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; @@ -391,9 +370,7 @@ impl Sending { continue; } - let pushers = db - .pusher - .get_pusher(&user) + let pushers = dbg!(db.pusher.get_pusher(&user)) .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; let rules_for_user = db @@ -426,15 +403,17 @@ impl Sending { uint!(0) }; - crate::database::pusher::send_push_notice( - &user, - unread, - &pushers, - rules_for_user, - pdu, - db, + dbg!( + crate::database::pusher::send_push_notice( + &user, + unread, + &pushers, + rules_for_user, + pdu, + db, + ) + .await ) - .await .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; } } diff --git a/src/server_server.rs b/src/server_server.rs index 03952eb..a8946a9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,6 +1,6 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{info, warn}; +use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -17,7 +17,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, From aa7b6b6e09ee2ea4af0fb5b6a5b7e0fd51ddd11b Mon Sep 17 00:00:00 2001 From: Valkum Date: Mon, 22 Feb 2021 19:06:10 +0100 Subject: [PATCH 0429/1727] Sync paths with CI pipeline due to dockerignore# As the docker ignore file includes the target dir, content in this dir is no accessible to the docker daemon. We circumvent this by providing the build artifact in a dir called cached_dir --- tests/Complement.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 24ee9ea..370db7c 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -9,7 +9,7 @@ ARG SCCACHE_ENDPOINT ARG SCCACHE_S3_USE_SSL COPY . . -RUN test -e target/release/conduit || cargo build --release --offline +RUN test -e cached_target/release/conduit || cargo build --release FROM valkum/docker-rust-ci:latest WORKDIR /workdir From 66af1ff6958d2096d549b98d2c830a09652d2f33 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:36:44 -0500 Subject: [PATCH 0430/1727] Update ruma and rocket to latest git rev and tokio to 1.0 Ruma updated the event signing validation code and there was a dep resolving failure with serde rocket and tokio so I updated rocket latest and tokio 1.0 to fix. --- Cargo.lock | 453 ++++++++++++++++++++++++-------------- Cargo.toml | 6 +- src/client_server/sync.rs | 3 +- 3 files changed, 294 insertions(+), 168 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7a79dbe..0561d0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -148,15 +148,15 @@ checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "bytemuck" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aa2ec95ca3b5c54cf73c91acf06d24f4495d5f1b1c12506ae3483d646177ac" +checksum = "5a4bad0c5981acc24bc09e532f35160f952e35422603f0563cd7a73c2c2e65a0" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bytes" @@ -164,6 +164,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + [[package]] name = "cc" version = "1.0.66" @@ -191,7 +197,7 @@ dependencies = [ "libc", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.43", "winapi 0.3.9", ] @@ -225,7 +231,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio", + "tokio 1.0.2", "trust-dns-resolver", ] @@ -247,7 +253,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.23", + "time 0.2.24", "version_check", ] @@ -402,9 +408,9 @@ dependencies = [ [[package]] name = "figment" -version = "0.9.4" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13e2d266426f89e45fc544117ade84fad2a58ff676f34cc34e123fe4391b856" +checksum = "a3add2ec7727c9584a0ce75ee3c0f54f0ab692c7934450cc3a0287251e3a4f06" dependencies = [ "pear", "serde", @@ -472,9 +478,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" dependencies = [ "futures-channel", "futures-core", @@ -487,9 +493,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" dependencies = [ "futures-core", "futures-sink", @@ -497,15 +503,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" [[package]] name = "futures-executor" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" dependencies = [ "futures-core", "futures-task", @@ -514,15 +520,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -532,24 +538,24 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" dependencies = [ "once_cell", ] [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" dependencies = [ "futures-channel", "futures-core", @@ -558,7 +564,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.3", + "pin-project-lite 0.2.4", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -593,7 +599,7 @@ checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.1+wasi-snapshot-preview1", ] [[package]] @@ -624,7 +630,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", @@ -632,8 +638,28 @@ dependencies = [ "http", "indexmap", "slab", - "tokio", - "tokio-util", + "tokio 0.2.24", + "tokio-util 0.3.1", + "tracing", + "tracing-futures", +] + +[[package]] +name = "h2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio 1.0.2", + "tokio-util 0.6.1", "tracing", "tracing-futures", ] @@ -655,9 +681,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -675,11 +701,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "itoa", ] @@ -690,7 +716,17 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes", + "bytes 0.5.6", + "http", +] + +[[package]] +name = "http-body" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +dependencies = [ + "bytes 1.0.1", "http", ] @@ -712,19 +748,43 @@ version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.2.7", "http", - "http-body", + "http-body 0.3.1", "httparse", "httpdate", "itoa", - "pin-project 1.0.3", + "pin-project 1.0.4", "socket2", - "tokio", + "tokio 0.2.24", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.0", + "http", + "http-body 0.4.0", + "httparse", + "httpdate", + "itoa", + "pin-project 1.0.4", + "socket2", + "tokio 1.0.2", "tower-service", "tracing", "want", @@ -736,10 +796,10 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ - "bytes", - "hyper", + "bytes 0.5.6", + "hyper 0.13.9", "native-tls", - "tokio", + "tokio 0.2.24", "tokio-tls", ] @@ -897,15 +957,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" @@ -918,9 +978,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" dependencies = [ "cfg-if 0.1.10", ] @@ -1015,21 +1075,23 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "mio" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ - "iovec", "libc", - "mio", + "log", + "miow 0.3.6", + "ntapi", + "winapi 0.3.9", ] [[package]] @@ -1044,6 +1106,16 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "miow" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +dependencies = [ + "socket2", + "winapi 0.3.9", +] + [[package]] name = "native-tls" version = "0.2.7" @@ -1073,6 +1145,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-bigint" version = "0.2.6" @@ -1210,7 +1291,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.1.57", "smallvec", "winapi 0.3.9", ] @@ -1272,11 +1353,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" +checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" dependencies = [ - "pin-project-internal 1.0.3", + "pin-project-internal 1.0.4", ] [[package]] @@ -1292,9 +1373,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" +checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" dependencies = [ "proc-macro2", "quote", @@ -1309,9 +1390,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" [[package]] name = "pin-utils" @@ -1360,9 +1441,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -1416,9 +1497,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" +checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1488,6 +1569,15 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.3.5" @@ -1495,7 +1585,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom 0.1.16", - "redox_syscall", + "redox_syscall 0.1.57", "rust-argon2", ] @@ -1521,9 +1611,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -1533,9 +1623,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "remove_dir_all" @@ -1553,13 +1643,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", - "bytes", + "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", "http", - "http-body", - "hyper", + "http-body 0.3.1", + "hyper 0.13.9", "hyper-tls", "ipnet", "js-sys", @@ -1569,10 +1659,10 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "serde", "serde_urlencoded", - "tokio", + "tokio 0.2.24", "tokio-tls", "url", "wasm-bindgen", @@ -1609,7 +1699,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "async-trait", "atomic", @@ -1628,8 +1718,8 @@ dependencies = [ "rocket_http", "serde", "state", - "time 0.2.23", - "tokio", + "time 0.2.24", + "tokio 1.0.2", "ubyte", "version_check", "yansi", @@ -1638,7 +1728,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "devise", "glob", @@ -1650,23 +1740,24 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "cookie", "either", "http", - "hyper", + "hyper 0.14.2", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", + "pin-project-lite 0.2.4", "ref-cast", "smallvec", "state", - "time 0.2.23", - "tokio", + "time 0.2.24", + "tokio 1.0.2", "tokio-rustls", "uncased", "unicode-xid", @@ -1675,8 +1766,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.0.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "js_int", @@ -1693,8 +1784,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "http", "percent-encoding", @@ -1708,8 +1799,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1719,8 +1810,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.2.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "ruma-api", "ruma-common", @@ -1733,8 +1824,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.10.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "http", @@ -1752,8 +1843,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "maplit", @@ -1765,8 +1856,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-common", @@ -1779,8 +1870,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1790,8 +1881,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-api", @@ -1805,11 +1896,11 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "paste", - "rand 0.8.1", + "rand 0.8.2", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1819,8 +1910,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro2", "quote", @@ -1830,16 +1921,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" -dependencies = [ - "serde", -] +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" [[package]] name = "ruma-serde" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "form_urlencoded", "itoa", @@ -1851,8 +1939,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1862,8 +1950,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.6.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "base64 0.13.0", "ring", @@ -1902,11 +1990,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "log", "ring", "sct", @@ -1985,18 +2073,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" dependencies = [ "proc-macro2", "quote", @@ -2088,9 +2176,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "socket2" @@ -2127,7 +2215,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" +source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" dependencies = [ "itertools", "maplit", @@ -2200,14 +2288,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.2", + "redox_syscall 0.2.4", "remove_dir_all", "winapi 0.3.9", ] @@ -2234,29 +2322,28 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" dependencies = [ "lazy_static", ] [[package]] name = "time" -version = "0.1.44" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" dependencies = [ "const_fn", "libc", @@ -2311,28 +2398,41 @@ version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "iovec", "lazy_static", + "memchr", + "mio 0.6.23", + "pin-project-lite 0.1.11", + "slab", +] + +[[package]] +name = "tokio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" +dependencies = [ + "autocfg", + "bytes 1.0.1", "libc", "memchr", - "mio", - "mio-uds", + "mio 0.7.7", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "pin-project-lite 0.2.4", "signal-hook-registry", - "slab", "tokio-macros", "winapi 0.3.9", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" dependencies = [ "proc-macro2", "quote", @@ -2341,14 +2441,24 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio 1.0.2", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" dependencies = [ "futures-core", - "rustls", - "tokio", - "webpki", + "pin-project-lite 0.2.4", + "tokio 1.0.2", ] [[package]] @@ -2358,7 +2468,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio", + "tokio 0.2.24", ] [[package]] @@ -2367,12 +2477,27 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-core", "futures-sink", "log", "pin-project-lite 0.1.11", - "tokio", + "tokio 0.2.24", +] + +[[package]] +name = "tokio-util" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" +dependencies = [ + "bytes 1.0.1", + "futures-core", + "futures-sink", + "log", + "pin-project-lite 0.2.4", + "tokio 1.0.2", + "tokio-stream", ] [[package]] @@ -2398,7 +2523,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "tracing-attributes", "tracing-core", ] @@ -2449,7 +2574,7 @@ dependencies = [ "rand 0.7.3", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "url", ] @@ -2469,7 +2594,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "trust-dns-proto", ] @@ -2584,9 +2709,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "wasm-bindgen" diff --git a/Cargo.toml b/Cargo.toml index fdcc4ec..dd37838 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,11 +14,11 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f336e5a172361fc1860461bb03667b1ed2", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18f02319be83af4f3c1951dc220b52c5e", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } @@ -29,7 +29,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", featu # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "0.2.23" } +tokio = { version = "1.0.2", features = ["macros", "time"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 494c773..6cd518d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -698,7 +698,8 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let mut delay = tokio::time::delay_for(duration); + let delay = tokio::time::sleep(duration); + tokio::pin!(delay); tokio::select! { _ = &mut delay => {} _ = watcher => {} From 88c60605b457d0163b0b8d427e51cd07b0dd1f4c Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:41:38 -0500 Subject: [PATCH 0431/1727] Add ability to update room leaves with multiple eventIds Tokio seems a bit broken with Rocket... --- src/client_server/membership.rs | 2 ++ src/database/rooms.rs | 37 ++++++++++++++++----------------- src/server_server.rs | 7 ++----- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 70bb480..1159185 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -665,6 +665,8 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + // TODO: can we simplify the DAG or should we copy it exactly?? + &pdu.prev_events, &db, )?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 665e328..a3f3aab 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,8 +397,11 @@ impl Rooms { Ok(events) } - /// Force an update to the leaves of a room. - pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + /// Replace the leaves of a room. + /// + /// The provided `event_ids` become the new leaves, this enables an event having multiple + /// `prev_events`. + pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -415,21 +418,6 @@ impl Rooms { Ok(()) } - /// Replace the leaves of a room with a new event. - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { - self.roomid_pduleaves.remove(key?)?; - } - - prefix.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&prefix, event_id.as_bytes())?; - - Ok(()) - } - /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -465,6 +453,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, + leaves: &[EventId], db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state @@ -497,7 +486,7 @@ impl Rooms { // We no longer keep this pdu as an outlier self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; - self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; + self.replace_pdu_leaves(&pdu.room_id, leaves)?; // Mark as read first so the sending client doesn't get a notification even if appending // fails @@ -943,7 +932,17 @@ impl Rooms { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; + // remove the + self.append_pdu( + &pdu, + pdu_json, + count, + pdu_id.clone().into(), + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + &[pdu.event_id.clone()], + db, + )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist diff --git a/src/server_server.rs b/src/server_server.rs index 16a1a8e..f782ad5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -24,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -1245,13 +1245,10 @@ fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> R utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + &new_room_leaves, &db, )?; - // If we update the room leaves after calling append_pdu it will stick since append_pdu - // calls replace_pdu_leaves with only the given event. - db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; - // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From 602edfd8499726e21b51eaa4a4a8927381c876c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 24 Jan 2021 16:05:52 +0100 Subject: [PATCH 0432/1727] feature: push rule settings --- src/client_server/room.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 631d87b..4adc335 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -220,12 +220,8 @@ pub async fn create_room_route( continue; } - db.rooms.build_and_append_pdu( - pdu_builder, - &sender_user, - &room_id, - &db, - )?; + db.rooms + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db)?; } // 6. Events implied by name and topic From 05a4c0b325f1b8f1c7d3d5dbb56ee22b6e8af858 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 24 Jan 2021 20:18:40 -0500 Subject: [PATCH 0433/1727] Finish forward extremity gathering, use resolved state as new snapshot --- src/server_server.rs | 147 +++++++++++++++++++++++-------------------- 1 file changed, 80 insertions(+), 67 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index f782ad5..e733d24 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,6 +18,7 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -483,34 +484,6 @@ pub async fn get_public_rooms_route( .into()) } -#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] -pub enum PrevEvents { - Sequential(T), - Fork(Vec), -} - -impl IntoIterator for PrevEvents { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - match self { - Self::Sequential(item) => vec![item].into_iter(), - Self::Fork(list) => list.into_iter(), - } - } -} - -impl PrevEvents { - pub fn new(id: &[T]) -> Self { - match id { - [] => panic!("All events must have previous event"), - [single_id] => Self::Sequential(single_id.clone()), - rest => Self::Fork(rest.to_vec()), - } - } -} - #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -605,8 +578,16 @@ pub async fn send_transaction_message_route<'a>( UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); let origin = sender.server_name(); - // TODO: this could fail or the server not respond... - let keys = fetch_signing_keys(&db, origin).await?; + let keys = match fetch_signing_keys(&db, origin).await { + Ok(keys) => keys, + Err(_) => { + resolved_map.insert( + event_id, + Err("Could not find signing keys for this server".to_string()), + ); + continue; + } + }; pub_key_map.insert( origin.to_string(), @@ -769,11 +750,12 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, fork_ids) = match calculate_forward_extremities( + let (mut fork_states, extremities) = match calculate_forward_extremities( &db, &pdu, server_name, &pub_key_map, + current_state, &mut auth_cache, ) .await @@ -791,6 +773,7 @@ pub async fn send_transaction_message_route<'a>( let fork_states = fork_states.into_iter().collect::>(); + let mut update_state = false; // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -800,11 +783,12 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // We do need to force an update to this rooms state + update_state = true; + // TODO: remove this is for current debugging Jan, 15 2021 let mut number_fetches = 0_u32; let mut auth_events = vec![]; - // this keeps track if we error so we can break out of these inner loops - // to continue on with the incoming PDU's for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { @@ -821,14 +805,12 @@ pub async fn send_transaction_message_route<'a>( .await .map(|mut vec| { number_fetches += 1; - vec.remove(0) + vec.pop() }) { - Ok(aev) => aev, - Err(_) => { - resolved_map.insert( - event_id.clone(), - Err("Event has been soft failed".into()), - ); + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); continue 'main_pdu_loop; } }, @@ -839,20 +821,19 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); - let mut event_map = EventMap::new(); // Add everything we will need to event_map - event_map.extend( + auth_cache.extend( auth_events .iter() .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) .flatten(), ); - event_map.extend( + auth_cache.extend( incoming_auth_events .into_iter() .map(|pdu| (pdu.event_id().clone(), pdu)), ); - event_map.extend( + auth_cache.extend( state_at_event .into_iter() .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), @@ -873,7 +854,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) .collect(), - &mut event_map, + &mut auth_cache, ) { Ok(res) => res .into_iter() @@ -905,14 +886,23 @@ pub async fn send_transaction_message_route<'a>( ); } else { // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_state(&db, &pdu, &fork_ids)?; + append_incoming_pdu( + &db, + &pdu, + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } } - Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } /// An async function that can recursively calls itself. @@ -1029,6 +1019,7 @@ async fn fetch_check_auth_events( continue; } + // TODO: Batch these async calls so we can wait on multiple at once let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await .map(|mut vec| { @@ -1119,6 +1110,7 @@ async fn calculate_forward_extremities( pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, + current_state: BTreeMap<(EventType, Option), Arc>, auth_cache: &mut EventMap>, ) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; @@ -1126,17 +1118,13 @@ async fn calculate_forward_extremities( let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - if current_leaves.contains(pdu.event_id()) { - is_incoming_leaf = false; - // Not sure what to do here - } - + // // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if already_referenced(db, pdu)? { + if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { is_incoming_leaf = false; - // This event has been dealt with already?? + // Not sure what to do here } // TODO: @@ -1213,29 +1201,54 @@ async fn calculate_forward_extremities( // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.insert( - db.rooms - .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(), - ); + fork_states.insert(current_state); } Ok((fork_states, dbg!(current_leaves))) } -/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) -fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { - Ok(false) -} - -fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { +/// Update the room state to be the resolved state and add the fully auth'ed event +/// to the DB. +/// +/// TODO: If we force the state we need to validate all events in that state +/// any events we fetched from another server need to be fully verified? +fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: Option>>, +) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let new = state + .into_iter() + .map(|((ev, k), pdu)| { + Ok(( + ( + ev, + k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + db.rooms + .get_pdu_id(pdu.event_id()) + .ok() + .flatten() + .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? + .to_vec(), + )) + }) + .collect::>()?; + + info!("Force update of state for {:?}", pdu); + + db.rooms.force_state(pdu.room_id(), new, &db.globals)?; + } + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 68f60933e6af03889440a5f9c58d10ce67359f21 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:33:41 -0500 Subject: [PATCH 0434/1727] Resolved state is set as the current room state on incoming events --- src/server_server.rs | 43 +++++++++++++------------------------------ 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index e733d24..14a1d0c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -870,36 +870,20 @@ pub async fn send_transaction_message_route<'a>( } }; - if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_incoming_pdu( + &db, &pdu, - single_prev, - &state_at_forks, - None, - ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { - // Soft fail, we add the event as an outlier. - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has been soft failed".into()), - ); - } else { - // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( - &db, - &pdu, - &extremities, - if update_state { - Some(state_at_forks) - } else { - None - }, - )?; + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; - // Event has passed all auth/stateres checks - resolved_map.insert(pdu.event_id().clone(), Ok(())); - } + // Event has passed all auth/stateres checks + resolved_map.insert(pdu.event_id().clone(), Ok(())); } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) @@ -1210,8 +1194,7 @@ async fn calculate_forward_extremities( /// Update the room state to be the resolved state and add the fully auth'ed event /// to the DB. /// -/// TODO: If we force the state we need to validate all events in that state -/// any events we fetched from another server need to be fully verified? +/// TODO: Since all these events passed state resolution can we trust them to add fn append_incoming_pdu( db: &Database, pdu: &PduEvent, From e0453e2348b5a77ea9ac0b5e40296a303027875c Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:50:45 -0500 Subject: [PATCH 0435/1727] Cleanup dbg prints and error messages --- src/server_server.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 14a1d0c..20d76f1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -496,7 +496,7 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - dbg!(&*body); + // dbg!(&*body); for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { @@ -1148,8 +1148,6 @@ async fn calculate_forward_extremities( fork_states.insert(state); } else { - error!("Forward extremity not found... {}", id); - let res = db .sending .send_federation_request( @@ -1188,7 +1186,7 @@ async fn calculate_forward_extremities( fork_states.insert(current_state); } - Ok((fork_states, dbg!(current_leaves))) + Ok((fork_states, current_leaves)) } /// Update the room state to be the resolved state and add the fully auth'ed event From 6fd3e1d1ddb2d9707f1713e962ee350a85e07795 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 11:20:33 -0500 Subject: [PATCH 0436/1727] Append state event that pass resolution to DB, update to tokio 1.1 --- Cargo.lock | 630 +++++++++++++------------------------- Cargo.toml | 12 +- src/client_server/sync.rs | 2 +- src/database.rs | 3 +- src/database/globals.rs | 14 +- src/server_server.rs | 113 +++++-- 6 files changed, 298 insertions(+), 476 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0561d0a..c7381be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,20 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" - [[package]] name = "adler32" version = "1.2.0" @@ -48,6 +33,27 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" +[[package]] +name = "async-stream" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3670df70cbc01729f901f94c887814b3c68db038aad1329a418bae178bc5295c" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.42" @@ -76,7 +82,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -85,32 +91,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" -[[package]] -name = "backtrace" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide 0.4.3", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -142,9 +128,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "f07aa6688c702439a1be0307b6a94dffe1168569e45b9500c1372bc580740d59" [[package]] name = "bytemuck" @@ -158,12 +144,6 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.0.1" @@ -176,12 +156,6 @@ version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -211,7 +185,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.13.0", + "base64", "directories", "http", "image", @@ -279,7 +253,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -288,7 +262,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "const_fn", "crossbeam-utils", "lazy_static", @@ -303,10 +277,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "lazy_static", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "deflate" version = "0.8.6" @@ -364,7 +344,7 @@ checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -391,7 +371,7 @@ version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -457,25 +437,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures" version = "0.3.12" @@ -564,7 +528,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.4", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -586,20 +550,20 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "wasi 0.10.1+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -612,12 +576,6 @@ dependencies = [ "weezl", ] -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - [[package]] name = "glob" version = "0.3.0" @@ -626,11 +584,11 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" dependencies = [ - "bytes 0.5.6", + "bytes", "fnv", "futures-core", "futures-sink", @@ -696,7 +654,7 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -705,18 +663,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" dependencies = [ - "bytes 0.5.6", + "bytes", "http", ] @@ -744,11 +702,11 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.9" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" dependencies = [ - "bytes 0.5.6", + "bytes", "futures-channel", "futures-core", "futures-util", @@ -792,15 +750,15 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 0.5.6", - "hyper 0.13.9", + "bytes", + "hyper", "native-tls", - "tokio 0.2.24", - "tokio-tls", + "tokio", + "tokio-native-tls", ] [[package]] @@ -853,16 +811,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", + "cfg-if", ] [[package]] @@ -873,7 +822,7 @@ checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" dependencies = [ "socket2", "widestring", - "winapi 0.3.9", + "winapi", "winreg 0.6.2", ] @@ -885,9 +834,9 @@ checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" [[package]] name = "itertools" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" dependencies = [ "either", ] @@ -900,18 +849,15 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" -version = "0.1.20" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc797adac5f083b8ff0ca6f6294a999393d76e197c36488e2ef732c4715f6fa3" -dependencies = [ - "byteorder", -] +checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" dependencies = [ "wasm-bindgen", ] @@ -925,30 +871,6 @@ dependencies = [ "serde", ] -[[package]] -name = "jsonwebtoken" -version = "7.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" -dependencies = [ - "base64 0.12.3", - "pem", - "ring", - "serde", - "serde_json", - "simple_asn1", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -957,9 +879,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.82" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" +checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" [[package]] name = "linked-hash-map" @@ -978,11 +900,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -1033,16 +955,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.3.7" @@ -1052,35 +964,6 @@ dependencies = [ "adler32", ] -[[package]] -name = "miniz_oxide" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "mio" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" -dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - [[package]] name = "mio" version = "0.7.7" @@ -1089,21 +972,19 @@ checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ "libc", "log", - "miow 0.3.6", + "miow", "ntapi", - "winapi 0.3.9", + "winapi", ] [[package]] name = "miow" -version = "0.2.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "socket2", + "winapi", ] [[package]] @@ -1135,14 +1016,12 @@ dependencies = [ ] [[package]] -name = "net2" -version = "0.2.37" +name = "ntapi" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1216,12 +1095,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" - [[package]] name = "once_cell" version = "1.5.2" @@ -1235,7 +1108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "lazy_static", "libc", @@ -1288,12 +1161,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.1.57", "smallvec", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1382,12 +1255,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" - [[package]] name = "pin-project-lite" version = "0.2.4" @@ -1415,7 +1282,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide 0.3.7", + "miniz_oxide", ] [[package]] @@ -1497,9 +1364,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1542,7 +1409,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.1", + "getrandom 0.2.2", ] [[package]] @@ -1633,17 +1500,17 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "reqwest" -version = "0.10.10" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" +checksum = "fd281b1030aa675fb90aa994d07187645bb3c8fc756ca766e7c3070b439de9de" dependencies = [ - "base64 0.13.0", - "bytes 0.5.6", + "base64", + "bytes", "encoding_rs", "futures-core", "futures-util", @@ -1656,14 +1523,13 @@ dependencies = [ "lazy_static", "log", "mime", - "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.4", + "pin-project-lite", "serde", "serde_urlencoded", - "tokio 0.2.24", - "tokio-tls", + "tokio", + "tokio-native-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1693,7 +1559,7 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1752,7 +1618,7 @@ dependencies = [ "parking_lot", "pear", "percent-encoding", - "pin-project-lite 0.2.4", + "pin-project-lite", "ref-cast", "smallvec", "state", @@ -1767,7 +1633,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "js_int", @@ -1785,7 +1651,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "http", "percent-encoding", @@ -1800,7 +1666,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1811,7 +1677,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "ruma-api", "ruma-common", @@ -1825,7 +1691,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "http", @@ -1844,7 +1710,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "maplit", @@ -1857,7 +1723,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-common", @@ -1871,7 +1737,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1882,7 +1748,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-api", @@ -1897,10 +1763,10 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "paste", - "rand 0.8.2", + "rand 0.8.3", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1911,7 +1777,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro2", "quote", @@ -1922,12 +1788,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "form_urlencoded", "itoa", @@ -1940,7 +1806,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1951,9 +1817,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ - "base64 0.13.0", + "base64", "ring", "ruma-identifiers", "ruma-serde", @@ -1967,18 +1833,12 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", ] -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - [[package]] name = "rustc_version" version = "0.2.3" @@ -1994,7 +1854,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.13.0", + "base64", "log", "ring", "sct", @@ -2014,7 +1874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2073,18 +1933,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2", "quote", @@ -2186,9 +2046,9 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2215,15 +2075,15 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" +source = "git+https://github.com/ruma/state-res?rev=791c66d73cf064d09db0cdf767d5fef43a343425#791c66d73cf064d09db0cdf767d5fef43a343425" dependencies = [ "itertools", + "log", "maplit", "ruma", "serde", "serde_json", "thiserror", - "tracing", ] [[package]] @@ -2277,9 +2137,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.58" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", @@ -2292,12 +2152,12 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "rand 0.8.2", + "rand 0.8.3", "redox_syscall 0.2.4", "remove_dir_all", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2322,28 +2182,18 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" +checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "time" -version = "0.1.43" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "time" -version = "0.2.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" dependencies = [ "const_fn", "libc", @@ -2351,7 +2201,7 @@ dependencies = [ "stdweb", "time-macros", "version_check", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2379,9 +2229,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -2394,38 +2244,21 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.24" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "memchr", - "mio 0.6.23", - "pin-project-lite 0.1.11", - "slab", -] - -[[package]] -name = "tokio" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" +checksum = "8efab2086f17abcddb8f756117665c958feee6b2e39974c2f1600592ab3a4195" dependencies = [ "autocfg", - "bytes 1.0.1", + "bytes", "libc", "memchr", - "mio 0.7.7", + "mio", "num_cpus", "once_cell", - "pin-project-lite 0.2.4", + "pin-project-lite", "signal-hook-registry", "tokio-macros", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2439,6 +2272,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -2457,46 +2300,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" dependencies = [ "futures-core", - "pin-project-lite 0.2.4", - "tokio 1.0.2", -] - -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio 0.2.24", + "pin-project-lite", + "tokio", ] [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "feb971a26599ffd28066d387f109746df178eff14d5ea1e235015c5601967a4b" dependencies = [ - "bytes 0.5.6", + "async-stream", + "bytes", "futures-core", "futures-sink", "log", - "pin-project-lite 0.1.11", - "tokio 0.2.24", -] - -[[package]] -name = "tokio-util" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" -dependencies = [ - "bytes 1.0.1", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.4", - "tokio 1.0.2", + "pin-project-lite", + "tokio", "tokio-stream", ] @@ -2511,9 +2331,9 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" @@ -2521,24 +2341,11 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.4", - "tracing-attributes", + "cfg-if", + "pin-project-lite", "tracing-core", ] -[[package]] -name = "tracing-attributes" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tracing-core" version = "0.1.17" @@ -2560,18 +2367,22 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53861fcb288a166aae4c508ae558ed18b53838db728d4d310aad08270a7d4c2b" +checksum = "98a0381b2864c2978db7f8e17c7b23cca5a3a5f99241076e13002261a8ecbabd" dependencies = [ "async-trait", - "backtrace", + "cfg-if", + "data-encoding", "enum-as-inner", - "futures", + "futures-channel", + "futures-io", + "futures-util", "idna", + "ipnet", "lazy_static", "log", - "rand 0.7.3", + "rand 0.8.3", "smallvec", "thiserror", "tokio 0.2.24", @@ -2580,17 +2391,17 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6759e8efc40465547b0dfce9500d733c65f969a4cbbfbe3ccf68daaa46ef179e" +checksum = "3072d18c10bd621cb00507d59cfab5517862285c353160366e37fbf4c74856e4" dependencies = [ - "backtrace", - "cfg-if 0.1.10", - "futures", + "cfg-if", + "futures-util", "ipconfig", "lazy_static", "log", "lru-cache", + "parking_lot", "resolv-conf", "smallvec", "thiserror", @@ -2622,15 +2433,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -2709,17 +2511,17 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.1+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -2727,9 +2529,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" dependencies = [ "bumpalo", "lazy_static", @@ -2742,11 +2544,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -2754,9 +2556,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2764,9 +2566,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" dependencies = [ "proc-macro2", "quote", @@ -2777,15 +2579,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" +checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" dependencies = [ "js-sys", "wasm-bindgen", @@ -2803,9 +2605,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2bb9fc8309084dd7cd651336673844c1d47f8ef6d2091ec160b27f5c4aa277" +checksum = "4a32b378380f4e9869b22f0b5177c68a5519f03b3454fde0b291455ddbae266c" [[package]] name = "widestring" @@ -2813,12 +2615,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -2829,12 +2625,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -2853,7 +2643,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2862,17 +2652,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", + "winapi", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index dd37838..de6a966 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,24 +18,24 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "1.0.2", features = ["macros", "time"] } +tokio = { version = "1.1.0", features = ["macros", "time", "sync"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries log = "0.4.11" # Used for rocket<->ruma conversions -http = "0.2.1" +http = "0.2.3" # Used to find data directory for default db path directories = "3.0.1" @@ -50,7 +50,7 @@ rand = "0.7.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.10.9" +reqwest = "0.11.0" # Used for conduit::Error type thiserror = "1.0.22" # Used to generate thumbnails for images @@ -60,7 +60,7 @@ base64 = "0.13.0" # Used when hashing the state ring = "0.16.19" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.19.6" +trust-dns-resolver = "0.20.0" # Used to find matching events for appservices regex = "1.4.2" # jwt jsonwebtokens diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6cd518d..97b6ad2 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -701,7 +701,7 @@ pub async fn sync_events_route( let delay = tokio::time::sleep(duration); tokio::pin!(delay); tokio::select! { - _ = &mut delay => {} + _ = &mut delay, if delay.is_elapsed() => {} _ = watcher => {} } } diff --git a/src/database.rs b/src/database.rs index ea65d6f..b841ab9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -106,8 +106,7 @@ impl Database { db.open_tree("global")?, db.open_tree("servertimeout_signingkey")?, config, - ) - .await?, + )?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 7eb162b..2ed6a9f 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -27,11 +27,7 @@ pub struct Globals { } impl Globals { - pub async fn load( - globals: sled::Tree, - server_keys: sled::Tree, - config: Config, - ) -> Result { + pub fn load(globals: sled::Tree, server_keys: sled::Tree, config: Config) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -83,11 +79,9 @@ impl Globals { config, keypair: Arc::new(keypair), reqwest_client, - dns_resolver: TokioAsyncResolver::tokio_from_system_conf() - .await - .map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, servertimeout_signingkey: server_keys, diff --git a/src/server_server.rs b/src/server_server.rs index 20d76f1..adf3c58 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -25,7 +25,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashMap}, convert::TryFrom, fmt::Debug, future::Future, @@ -839,7 +839,7 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); - match state_res::StateResolution::resolve( + let res = match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, &fork_states @@ -856,10 +856,7 @@ pub async fn send_transaction_message_route<'a>( .collect(), &mut auth_cache, ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), + Ok(res) => res, Err(_) => { resolved_map.insert( pdu.event_id().clone(), @@ -867,7 +864,29 @@ pub async fn send_transaction_message_route<'a>( ); continue 'main_pdu_loop; } + }; + let mut resolved = BTreeMap::new(); + for (k, id) in res { + // We should know of the event but just incase + let pdu = match auth_cache.get(&id) { + Some(pdu) => pdu.clone(), + None => { + match fetch_events(&db, server_name, &pub_key_map, &[id], &mut auth_cache) + .await + .map(|mut vec| vec.pop()) + { + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); + continue 'main_pdu_loop; + } + } + } + }; + resolved.insert(k, pdu); } + resolved }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). @@ -1199,37 +1218,67 @@ fn append_incoming_pdu( new_room_leaves: &[EventId], state: Option>>, ) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pduid.to_vec(), + ); + } + None => { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + // TODO: can we use are current state if we just add this event to the end of our + // pduid_pdu tree?? + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + + db.rooms.append_pdu( + &*pdu, + utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &new_room_leaves, + &db, + )?; + // TODO: is this ok... + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pdu_id.to_vec(), + ); + } + } + } + + info!("Force update of state for {:?}", pdu); + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + } + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - // Update the state of the room if needed - // We can tell if we need to do this based on wether state resolution took place or not - if let Some(state) = state { - let new = state - .into_iter() - .map(|((ev, k), pdu)| { - Ok(( - ( - ev, - k.ok_or_else(|| Error::Conflict("State contained non state event"))?, - ), - db.rooms - .get_pdu_id(pdu.event_id()) - .ok() - .flatten() - .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? - .to_vec(), - )) - }) - .collect::>()?; - - info!("Force update of state for {:?}", pdu); - - db.rooms.force_state(pdu.room_id(), new, &db.globals)?; - } - // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 6661de50ab5069838f60893afea9a421f6f034e3 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 21:45:33 -0500 Subject: [PATCH 0437/1727] Fix and integrate outlier tree, build forks after adding event to DB --- src/database.rs | 2 +- src/database/rooms.rs | 95 +++++++----- src/server_server.rs | 334 +++++++++++++++++++++++++----------------- 3 files changed, 263 insertions(+), 168 deletions(-) diff --git a/src/database.rs b/src/database.rs index b841ab9..a9cc362 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, + pduid_outlierpdu: db.open_tree("pduid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a3f3aab..d459aee 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,9 +27,10 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, + time::Duration, }; -use super::admin::AdminCommand; +use super::{admin::AdminCommand, sending::Sending}; /// The unique identifier of each state group. /// @@ -67,7 +68,7 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) /// Any pdu that has passed the steps up to auth with auth_events. - pub(super) eventid_outlierpdu: sled::Tree, + pub(super) pduid_outlierpdu: sled::Tree, } impl Rooms { @@ -85,13 +86,20 @@ impl Rooms { let mut pduid = room_id.as_bytes().to_vec(); pduid.push(0xff); pduid.extend_from_slice(&pduid_short?); - self.pduid_pdu.get(&pduid)?.map_or_else( - || Err(Error::bad_database("Failed to find PDU in state snapshot.")), - |b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }, - ) + match self.pduid_pdu.get(&pduid)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")), + None => self + .pduid_outlierpdu + .get(pduid)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + } }) .filter_map(|r| r.ok()) .map(|pdu| { @@ -137,12 +145,20 @@ impl Rooms { Ok::<_, Error>(Some(( pdu_id.clone().into(), - serde_json::from_slice::( - &self.pduid_pdu.get(&pdu_id)?.ok_or_else(|| { - Error::bad_database("PDU in state not found in database.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, + match self.pduid_pdu.get(&pdu_id)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + None => self + .pduid_outlierpdu + .get(pdu_id)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })??, + }, ))) }) } else { @@ -307,9 +323,12 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) @@ -328,13 +347,17 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) } + /// Returns the pdu. pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { @@ -420,23 +443,27 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + if let Some(id) = self.eventid_pduid.get(event_id.as_bytes())? { + self.pduid_outlierpdu.get(id)?.map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) + } else { + Ok(None) + } } /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + pub fn append_pdu_outlier(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.pduid_outlierpdu.len()); + + // we need to be able to find it by event_id + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + let res = self - .eventid_outlierpdu + .pduid_outlierpdu .insert( - event_id.as_bytes(), + pdu_id, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), ) .map(|op| op.is_some())?; @@ -484,7 +511,9 @@ impl Rooms { } // We no longer keep this pdu as an outlier - self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + if let Some(id) = self.eventid_pduid.remove(pdu.event_id().as_bytes())? { + self.pduid_outlierpdu.remove(id)?; + } self.replace_pdu_leaves(&pdu.room_id, leaves)?; diff --git a/src/server_server.rs b/src/server_server.rs index adf3c58..ad0a1a4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -614,7 +614,7 @@ pub async fn send_transaction_message_route<'a>( // 7. if not timeline event: stop // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous): (_, Vec>) = match validate_event( + let (pdu, previous): (Arc, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -638,69 +638,75 @@ pub async fn send_transaction_message_route<'a>( None }; + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. // // Step 10. check the auth of the event passes based on the calculated state of the event - let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = - match db - .sending - .send_federation_request( - &db.globals, + let (mut state_at_event, incoming_auth_events): ( + StateMap>, + Vec>, + ) = match db + .sending + .send_federation_request( + &db.globals, + server_name, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events( + &db, server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, ) - .await - { - Ok(res) => { - let state = fetch_events( + .await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect(); + + ( + state, + fetch_events( &db, server_name, &pub_key_map, - &res.pdu_ids, + &res.auth_chain_ids, &mut auth_cache, ) - .await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - ( - state, - fetch_events( - &db, - server_name, - &pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await?, - ) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await?, + ) + } + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( @@ -750,12 +756,25 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, extremities) = match calculate_forward_extremities( + let extremities = match calculate_forward_extremities(&db, &pdu).await { + Ok(fork_ids) => fork_ids, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; + } + }; + + // Now that the event has passed all auth it is added into the timeline, we do have to + // find the leaves otherwise we would do this sooner + append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + + let mut fork_states = match build_forward_extremity_snapshots( &db, - &pdu, + pdu.room_id(), server_name, - &pub_key_map, current_state, + &extremities, + &pub_key_map, &mut auth_cache, ) .await @@ -767,6 +786,9 @@ pub async fn send_transaction_message_route<'a>( } }; + // Make this the state after (since we appended_incoming_pdu this should agree with our servers + // current state). + state_at_event.insert((pdu.kind(), pdu.state_key()), pdu.clone()); // add the incoming events to the mix of state snapshots // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets fork_states.insert(state_at_event.clone()); @@ -840,7 +862,7 @@ pub async fn send_transaction_message_route<'a>( ); let res = match state_res::StateResolution::resolve( - &pdu.room_id, + pdu.room_id(), &RoomVersionId::Version6, &fork_states .into_iter() @@ -865,6 +887,7 @@ pub async fn send_transaction_message_route<'a>( continue 'main_pdu_loop; } }; + let mut resolved = BTreeMap::new(); for (k, id) in res { // We should know of the event but just incase @@ -890,10 +913,9 @@ pub async fn send_transaction_message_route<'a>( }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( + update_resolved_state( &db, - &pdu, - &extremities, + pdu.room_id(), if update_state { Some(state_at_forks) } else { @@ -905,7 +927,10 @@ pub async fn send_transaction_message_route<'a>( resolved_map.insert(pdu.event_id().clone(), Ok(())); } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(send_transaction_message::v1::Response { + pdus: dbg!(resolved_map), + } + .into()) } /// An async function that can recursively calls itself. @@ -1036,13 +1061,14 @@ async fn fetch_check_auth_events( Ok(()) } -/// Find the event and auth it. +/// Find the event and auth it. Once the event is validated (steps 1 - 8) +/// it is appended to the outliers Tree. /// /// 1. Look in the main timeline (pduid_pdu tree) /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation /// 4. TODO: Ask other servers over federation? -async fn fetch_events( +pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, key_map: &PublicKeyMap, @@ -1071,6 +1097,13 @@ async fn fetch_events( .await .map_err(|_| Error::Conflict("Authentication of event failed"))?; + // create the pduid for this event but stick it in the outliers DB + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), @@ -1084,7 +1117,7 @@ async fn fetch_events( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. -async fn fetch_signing_keys( +pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, ) -> Result> { @@ -1108,26 +1141,28 @@ async fn fetch_signing_keys( /// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). /// /// The state snapshot of the incoming event __needs__ to be added to the resulting list. -async fn calculate_forward_extremities( +pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, - origin: &ServerName, - pub_key_map: &PublicKeyMap, - current_state: BTreeMap<(EventType, Option), Arc>, - auth_cache: &mut EventMap>, -) -> Result<(BTreeSet>>, Vec)> { +) -> Result> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - // + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - is_incoming_leaf = false; - // Not sure what to do here + // + // We first check if know of the event and then don't include it as a forward + // extremity if it is a timeline event + if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { + is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); } // TODO: @@ -1144,11 +1179,34 @@ async fn calculate_forward_extremities( } } - let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + + Ok(current_leaves) +} + +/// This should always be called after the incoming event has been appended to the DB. +/// +/// This guarentees that the incoming event will be in the state sets (at least our servers +/// and the sending server). +pub(crate) async fn build_forward_extremity_snapshots( + db: &Database, + room_id: &RoomId, + origin: &ServerName, + current_state: StateMap>, + current_leaves: &[EventId], + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let current_hash = db.rooms.current_state_hash(room_id)?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); - for id in ¤t_leaves { + for id in current_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1158,14 +1216,21 @@ async fn calculate_forward_extremities( if current_hash.as_ref() == Some(&state_hash) { includes_current_state = true; } - let state = db + + let mut state_before = db .rooms - .state_full(&pdu.room_id, &state_hash)? + .state_full(room_id, &state_hash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); + .collect::>(); - fork_states.insert(state); + // Now it's the state after + if let Some(pdu) = db.rooms.get_pdu_from_id(&id)? { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, Arc::new(pdu)); + } + + fork_states.insert(state_before); } else { let res = db .sending @@ -1173,7 +1238,7 @@ async fn calculate_forward_extremities( &db.globals, origin, get_room_state_ids::v1::Request { - room_id: pdu.room_id(), + room_id, event_id: id, }, ) @@ -1181,41 +1246,38 @@ async fn calculate_forward_extremities( // TODO: This only adds events to the auth_cache, there is for sure a better way to // do this... - fetch_events(&db, origin, &pub_key_map, &res.auth_chain_ids, auth_cache).await?; + fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - let state = fetch_events(&db, origin, &pub_key_map, &res.pdu_ids, auth_cache) + let mut state_before = fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) .await? .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); + .collect::>(); - fork_states.insert(state); + if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) + .await? + .pop() + { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, pdu); + } + + // Now it's the state after + fork_states.insert(state_before); } } - // Add the incoming event only if it is a leaf, we do this after fetching all the - // state since we know we have already fetched the state of the incoming event so lets - // not do it again! - if is_incoming_leaf { - current_leaves.push(pdu.event_id().clone()); - } - // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { fork_states.insert(current_state); } - Ok((fork_states, current_leaves)) + Ok(fork_states) } -/// Update the room state to be the resolved state and add the fully auth'ed event -/// to the DB. -/// -/// TODO: Since all these events passed state resolution can we trust them to add -fn append_incoming_pdu( +pub(crate) fn update_resolved_state( db: &Database, - pdu: &PduEvent, - new_room_leaves: &[EventId], + room_id: &RoomId, state: Option>>, ) -> Result<()> { // Update the state of the room if needed @@ -1236,44 +1298,50 @@ fn append_incoming_pdu( ); } None => { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // TODO: can we use are current state if we just add this event to the end of our - // pduid_pdu tree?? - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &*pdu, - utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &new_room_leaves, - &db, - )?; - // TODO: is this ok... - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pdu_id.to_vec(), - ); + error!("We didn't append an event as an outlier\n{:?}", pdu); } } } - info!("Force update of state for {:?}", pdu); - - db.rooms - .force_state(pdu.room_id(), new_state, &db.globals)?; + db.rooms.force_state(room_id, new_state, &db.globals)?; } + Ok(()) +} + +/// Append the incoming event setting the state snapshot to the state from the +/// server that sent the event. +pub(crate) fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: &StateMap>, +) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type.clone(), + state_k + .clone() + .ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + pduid.to_vec(), + ); + } + None => { + error!("We didn't append an event as an outlier\n{:?}", pdu); + } + } + } + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1281,7 +1349,7 @@ fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + let state_hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( pdu, @@ -1292,9 +1360,7 @@ fn append_incoming_pdu( &db, )?; - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + db.rooms.set_room_state(pdu.room_id(), &state_hash)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From e09be2f7ee31a97b615a86e5bdae8ac75ec93ff6 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 30 Jan 2021 12:43:43 -0500 Subject: [PATCH 0438/1727] Add incoming event to the current room state then resolve All state snapshots that are used in the resolve call are state after snapshots, they have the event inserted. --- src/server_server.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ad0a1a4..f55b377 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -768,9 +768,11 @@ pub async fn send_transaction_message_route<'a>( // find the leaves otherwise we would do this sooner append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + // This will create the state after any state snapshot it builds + // So current_state will have the incoming event inserted to it let mut fork_states = match build_forward_extremity_snapshots( &db, - pdu.room_id(), + pdu.clone(), server_name, current_state, &extremities, @@ -1195,14 +1197,14 @@ pub(crate) async fn calculate_forward_extremities( /// and the sending server). pub(crate) async fn build_forward_extremity_snapshots( db: &Database, - room_id: &RoomId, + pdu: Arc, origin: &ServerName, - current_state: StateMap>, + mut current_state: StateMap>, current_leaves: &[EventId], pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, ) -> Result>>> { - let current_hash = db.rooms.current_state_hash(room_id)?; + let current_hash = db.rooms.current_state_hash(pdu.room_id())?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); @@ -1219,7 +1221,7 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut state_before = db .rooms - .state_full(room_id, &state_hash)? + .state_full(pdu.room_id(), &state_hash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect::>(); @@ -1238,7 +1240,7 @@ pub(crate) async fn build_forward_extremity_snapshots( &db.globals, origin, get_room_state_ids::v1::Request { - room_id, + room_id: pdu.room_id(), event_id: id, }, ) @@ -1269,6 +1271,9 @@ pub(crate) async fn build_forward_extremity_snapshots( // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { + error!("Did not include current state"); + current_state.insert((pdu.kind(), pdu.state_key()), pdu); + fork_states.insert(current_state); } From 64374b4679f6b63dc36d3da7ab5d58753c1980d8 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Feb 2021 12:44:30 -0500 Subject: [PATCH 0439/1727] Use eventId when saving outliers --- src/client_server/sync.rs | 2 +- src/database.rs | 2 +- src/database/rooms.rs | 76 +++++++++++++++++++++++++-------------- src/server_server.rs | 69 +++++++++++++++++------------------ 4 files changed, 84 insertions(+), 65 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 97b6ad2..6cd518d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -701,7 +701,7 @@ pub async fn sync_events_route( let delay = tokio::time::sleep(duration); tokio::pin!(delay); tokio::select! { - _ = &mut delay, if delay.is_elapsed() => {} + _ = &mut delay => {} _ = watcher => {} } } diff --git a/src/database.rs b/src/database.rs index a9cc362..b841ab9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - pduid_outlierpdu: db.open_tree("pduid_outlierpdu")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d459aee..ee8f0ab 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,10 +27,9 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, - time::Duration, }; -use super::{admin::AdminCommand, sending::Sending}; +use super::admin::AdminCommand; /// The unique identifier of each state group. /// @@ -67,13 +66,16 @@ pub struct Rooms { pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) - /// Any pdu that has passed the steps up to auth with auth_events. - pub(super) pduid_outlierpdu: sled::Tree, + /// RoomId + EventId -> outlier PDU. + /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. + pub(super) eventid_outlierpdu: sled::Tree, } impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. + /// + /// TODO: Should this check for outliers, it does now. pub fn state_full( &self, room_id: &RoomId, @@ -90,7 +92,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")), None => self - .pduid_outlierpdu + .eventid_outlierpdu .get(pduid)? .map(|b| { serde_json::from_slice::(&b) @@ -118,6 +120,8 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + /// + /// TODO: Should this check for outliers, it does now. pub fn state_get( &self, room_id: &RoomId, @@ -149,7 +153,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, None => self - .pduid_outlierpdu + .eventid_outlierpdu .get(pdu_id)? .map(|b| { serde_json::from_slice::(&b) @@ -260,6 +264,8 @@ impl Rooms { } }; + // Because of outliers this could also be an eventID but that + // is handled by `state_full` let pdu_id_short = pdu_id .splitn(2, |&b| b == 0xff) .nth(1) @@ -325,9 +331,12 @@ impl Rooms { Ok(Some( serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, - None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, + None => self + .eventid_outlierpdu + .get(event_id.as_bytes())? + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) @@ -342,6 +351,8 @@ impl Rooms { } /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -349,9 +360,12 @@ impl Rooms { Ok(Some( serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, - None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, + None => self + .eventid_outlierpdu + .get(event_id.as_bytes())? + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) @@ -443,27 +457,34 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - if let Some(id) = self.eventid_pduid.get(event_id.as_bytes())? { - self.pduid_outlierpdu.get(id)?.map_or(Ok(None), |pdu| { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) - } else { - Ok(None) - } } /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.pduid_outlierpdu.len()); + pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result { + log::info!( + "Number of outlier pdu's {:#?}", + self.eventid_outlierpdu + .iter() + .map(|pair| { + let (_k, v) = pair.unwrap(); + serde_json::from_slice::(&v).unwrap() + }) + .collect::>() + ); - // we need to be able to find it by event_id - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + let mut key = pdu.room_id().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pdu.event_id().as_bytes()); let res = self - .pduid_outlierpdu + .eventid_outlierpdu .insert( - pdu_id, + &key, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), ) .map(|op| op.is_some())?; @@ -511,9 +532,10 @@ impl Rooms { } // We no longer keep this pdu as an outlier - if let Some(id) = self.eventid_pduid.remove(pdu.event_id().as_bytes())? { - self.pduid_outlierpdu.remove(id)?; - } + let mut key = pdu.room_id().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pdu.event_id().as_bytes()); + self.eventid_outlierpdu.remove(key)?; self.replace_pdu_leaves(&pdu.room_id, leaves)?; diff --git a/src/server_server.rs b/src/server_server.rs index f55b377..5177f96 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,7 +18,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -638,12 +637,8 @@ pub async fn send_transaction_message_route<'a>( None }; - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; + db.rooms.append_pdu_outlier(&pdu)?; // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -1079,37 +1074,28 @@ pub(crate) async fn fetch_events( ) -> Result>> { let mut pdus = vec![]; for id in events { + // `get_pdu` checks the outliers tree for us let pdu = match db.rooms.get_pdu(&id)? { Some(pdu) => Arc::new(pdu), - None => match db.rooms.get_pdu_outlier(&id)? { - Some(pdu) => Arc::new(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let (pdu, _) = - validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let (pdu, _) = validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; - // create the pduid for this event but stick it in the outliers DB - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; - pdu - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + db.rooms.append_pdu_outlier(&pdu)?; + pdu + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, }; pdus.push(pdu); @@ -1193,7 +1179,7 @@ pub(crate) async fn calculate_forward_extremities( /// This should always be called after the incoming event has been appended to the DB. /// -/// This guarentees that the incoming event will be in the state sets (at least our servers +/// This guarantees that the incoming event will be in the state sets (at least our servers /// and the sending server). pub(crate) async fn build_forward_extremity_snapshots( db: &Database, @@ -1303,7 +1289,18 @@ pub(crate) fn update_resolved_state( ); } None => { - error!("We didn't append an event as an outlier\n{:?}", pdu); + let mut pduid = pdu.room_id().as_bytes().to_vec(); + pduid.push(0xff); + pduid.extend_from_slice(pdu.event_id().as_bytes()); + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pduid, + ); } } } From 591769d5f3fde6314ba84130898dc6202b9b5c98 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Feb 2021 17:02:56 -0500 Subject: [PATCH 0440/1727] Fiter PDU's before main incoming PDU loop --- src/database/rooms.rs | 11 +---- src/server_server.rs | 103 ++++++++++++++++++++++++++---------------- 2 files changed, 66 insertions(+), 48 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ee8f0ab..6ee29a6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -466,16 +466,7 @@ impl Rooms { /// Returns true if the event_id was previously inserted. pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result { - log::info!( - "Number of outlier pdu's {:#?}", - self.eventid_outlierpdu - .iter() - .map(|pair| { - let (_k, v) = pair.unwrap(); - serde_json::from_slice::(&v).unwrap() - }) - .collect::>() - ); + log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index 5177f96..2cfbc6e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -533,6 +533,54 @@ pub async fn send_transaction_message_route<'a>( } } + let mut resolved_map = BTreeMap::new(); + + let pdus_to_resolve = body + .pdus + .iter() + .filter_map(|pdu| { + // 1. Is a valid event, otherwise it is dropped. + // Ruma/PduEvent/StateEvent satisfies this + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + + // If we have no idea about this room skip the PDU + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); + return None; + } + }; + + // 1. check the server is in the room (optional) + match db.rooms.exists(&room_id) { + Ok(true) => {} + _ => { + resolved_map + .insert(event_id, Err("Room is unknown to this server".to_string())); + return None; + } + } + + // If we know of this pdu we don't need to continue processing it + // + // This check is essentially + if let Ok(Some(_)) = db.rooms.get_pdu_id(&event_id) { + return None; + } + + Some((event_id, value)) + }) + .collect::>(); + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere? // SPEC: // Servers MUST strictly enforce the JSON format specified in the appendices. @@ -540,35 +588,7 @@ pub async fn send_transaction_message_route<'a>( // events over federation. For example, the Federation API's /send endpoint would // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. - let mut resolved_map = BTreeMap::new(); - 'main_pdu_loop: for pdu in &body.pdus { - // 1. Is a valid event, otherwise it is dropped. - // Ruma/PduEvent/StateEvent satisfies this - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); - - // If we have no idea about this room skip the PDU - let room_id = match value - .get("room_id") - .map(|id| match id { - CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), - _ => None, - }) - .flatten() - { - Some(id) => id, - None => { - resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); - continue; - } - }; - - // 1. check the server is in the room (optional) - if !db.rooms.exists(&room_id)? { - resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); - continue; - } - + 'main_pdu_loop: for (event_id, value) in pdus_to_resolve { let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -921,13 +941,13 @@ pub async fn send_transaction_message_route<'a>( )?; // Event has passed all auth/stateres checks - resolved_map.insert(pdu.event_id().clone(), Ok(())); } - Ok(send_transaction_message::v1::Response { - pdus: dbg!(resolved_map), + if !resolved_map.is_empty() { + warn!("These PDU's failed {:?}", resolved_map); } - .into()) + + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } /// An async function that can recursively calls itself. @@ -1139,6 +1159,7 @@ pub(crate) async fn calculate_forward_extremities( // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? if current_leaves.contains(pdu.event_id()) { + error!("The incoming event is already present in get_pdu_leaves BUG"); is_incoming_leaf = false; // Not sure what to do here } @@ -1147,11 +1168,12 @@ pub(crate) async fn calculate_forward_extremities( // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. // - // We first check if know of the event and then don't include it as a forward - // extremity if it is a timeline event - if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); - } + // We check this in the filter just before the main incoming PDU for loop + // so no already known event can make it this far. + // + // if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { + // is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); + // } // TODO: // [dendrite] Checks if any other leaves have been referenced and removes them @@ -1219,7 +1241,12 @@ pub(crate) async fn build_forward_extremity_snapshots( } fork_states.insert(state_before); + } else if id == pdu.event_id() { + // We add this snapshot after `build_forward_extremity_snapshots` is + // called which we requested from the sending server } else { + error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); + let res = db .sending .send_federation_request( From 74d530ae0eff76bbdd7a130cd17a645b5455676f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 3 Feb 2021 20:00:01 -0500 Subject: [PATCH 0441/1727] Address review issues, fix forward extremity calc Keep track of all prev_events since if we know that an event is a prev_event it is referenced and does not qualify as a forward extremity. --- src/client_server/push.rs | 5 +- src/database.rs | 3 +- src/database/globals.rs | 8 +- src/database/rooms.rs | 79 +++++---- src/server_server.rs | 341 ++++++++++++++++++-------------------- 5 files changed, 220 insertions(+), 216 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 667d667..7c3e9d9 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -93,7 +93,10 @@ pub async fn get_pushrule_route( if let Some(rule) = rule { Ok(get_pushrule::Response { rule }.into()) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + Err(Error::BadRequest( + ErrorKind::NotFound, + "Push rule not found.", + )) } } diff --git a/src/database.rs b/src/database.rs index b841ab9..3fb8442 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,8 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, + roomeventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, + prevevent_parent: db.open_tree("prevevent_parent")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 2ed6a9f..00b4568 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -27,7 +27,11 @@ pub struct Globals { } impl Globals { - pub fn load(globals: sled::Tree, server_keys: sled::Tree, config: Config) -> Result { + pub fn load( + globals: sled::Tree, + servertimeout_signingkey: sled::Tree, + config: Config, + ) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -84,7 +88,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, - servertimeout_signingkey: server_keys, + servertimeout_signingkey, }) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6ee29a6..abe8c65 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -68,7 +68,9 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: sled::Tree, + pub(super) roomeventid_outlierpdu: sled::Tree, + /// RoomId + EventId -> Parent PDU EventId. + pub(super) prevevent_parent: sled::Tree, } impl Rooms { @@ -92,7 +94,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")), None => self - .eventid_outlierpdu + .roomeventid_outlierpdu .get(pduid)? .map(|b| { serde_json::from_slice::(&b) @@ -120,8 +122,6 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - /// - /// TODO: Should this check for outliers, it does now. pub fn state_get( &self, room_id: &RoomId, @@ -153,7 +153,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, None => self - .eventid_outlierpdu + .roomeventid_outlierpdu .get(pdu_id)? .map(|b| { serde_json::from_slice::(&b) @@ -203,7 +203,7 @@ impl Rooms { &event_type, &state_key .as_deref() - .expect("found a non state event in auth events"), + .ok_or_else(|| Error::bad_database("Saved auth event with no state key."))?, )? { events.insert((event_type, state_key), pdu); } @@ -248,7 +248,7 @@ impl Rooms { let mut prefix = state_hash.to_vec(); prefix.push(0xff); - for ((event_type, state_key), pdu_id) in state { + for ((event_type, state_key), id_long) in state { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); @@ -266,7 +266,7 @@ impl Rooms { // Because of outliers this could also be an eventID but that // is handled by `state_full` - let pdu_id_short = pdu_id + let pdu_id_short = id_long .splitn(2, |&b| b == 0xff) .nth(1) .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; @@ -332,7 +332,7 @@ impl Rooms { serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, None => self - .eventid_outlierpdu + .roomeventid_outlierpdu .get(event_id.as_bytes())? .ok_or_else(|| { Error::bad_database("Event is not in pdu tree or outliers.") @@ -360,12 +360,10 @@ impl Rooms { Ok(Some( serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, - None => self - .eventid_outlierpdu - .get(event_id.as_bytes())? - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, + None => match self.roomeventid_outlierpdu.get(event_id.as_bytes())? { + Some(b) => b, + None => return Ok(None), + }, }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) @@ -373,6 +371,8 @@ impl Rooms { } /// Returns the pdu. + /// + /// This does __NOT__ check the outliers `Tree`. pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( @@ -436,7 +436,7 @@ impl Rooms { /// Replace the leaves of a room. /// - /// The provided `event_ids` become the new leaves, this enables an event having multiple + /// The provided `event_ids` become the new leaves, this allows a room to have multiple /// `prev_events`. pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); @@ -455,31 +455,42 @@ impl Rooms { Ok(()) } + pub fn is_pdu_referenced(&self, pdu: &PduEvent) -> Result { + let mut key = pdu.room_id().as_bytes().to_vec(); + key.extend_from_slice(pdu.event_id().as_bytes()); + self.prevevent_parent.contains_key(key).map_err(Into::into) + } + /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu + self.roomeventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) } - /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + /// Append the PDU as an outlier. + /// + /// Any event given to this will be processed (state-res) on another thread. + pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { + log::info!( + "Number of outlier pdu's {}", + self.roomeventid_outlierpdu.len() + ); let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.event_id().as_bytes()); - let res = self - .eventid_outlierpdu - .insert( - &key, - &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), - ) - .map(|op| op.is_some())?; - Ok(res) + self.eventid_pduid + .insert(pdu.event_id().as_bytes(), key.as_slice())?; + + self.roomeventid_outlierpdu.insert( + &key, + &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), + )?; + Ok(()) } /// Creates a new persisted data unit and adds it to a room. @@ -526,7 +537,15 @@ impl Rooms { let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.event_id().as_bytes()); - self.eventid_outlierpdu.remove(key)?; + self.roomeventid_outlierpdu.remove(key)?; + + // We must keep track of all events that have been referenced. + for leaf in leaves { + let mut key = pdu.room_id().as_bytes().to_vec(); + key.extend_from_slice(leaf.as_bytes()); + self.prevevent_parent + .insert(key, pdu.event_id().as_bytes())?; + } self.replace_pdu_leaves(&pdu.room_id, leaves)?; @@ -541,6 +560,8 @@ impl Rooms { .expect("CanonicalJsonObject is always a valid String"), )?; + // This also replaces the eventid of any outliers with the correct + // pduid, removing the place holder. self.eventid_pduid .insert(pdu.event_id.as_bytes(), &*pdu_id)?; diff --git a/src/server_server.rs b/src/server_server.rs index 2cfbc6e..48d5956 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -571,8 +571,6 @@ pub async fn send_transaction_message_route<'a>( } // If we know of this pdu we don't need to continue processing it - // - // This check is essentially if let Ok(Some(_)) = db.rooms.get_pdu_id(&event_id) { return None; } @@ -664,64 +662,66 @@ pub async fn send_transaction_message_route<'a>( // the checks in this list starting at 1. These are not timeline events. // // Step 10. check the auth of the event passes based on the calculated state of the event - let (mut state_at_event, incoming_auth_events): ( - StateMap>, - Vec>, - ) = match db - .sending - .send_federation_request( - &db.globals, - server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - let state = fetch_events( - &db, + // + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event + let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = + match db + .sending + .send_federation_request( + &db.globals, server_name, - &pub_key_map, - &res.pdu_ids, - &mut auth_cache, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, ) - .await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - ( - state, - fetch_events( + .await + { + Ok(res) => { + let state = fetch_events( &db, server_name, &pub_key_map, - &res.auth_chain_ids, + &res.pdu_ids, &mut auth_cache, ) - .await?, - ) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + error!("Server sent us an invalid state"); + continue; + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect(); + + ( + state, + fetch_events( + &db, + server_name, + &pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await?, + ) + } + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( @@ -764,6 +764,7 @@ pub async fn send_transaction_message_route<'a>( pdu.event_id().clone(), Err("Event has been soft failed".into()), ); + continue; }; // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res @@ -779,10 +780,6 @@ pub async fn send_transaction_message_route<'a>( } }; - // Now that the event has passed all auth it is added into the timeline, we do have to - // find the leaves otherwise we would do this sooner - append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; - // This will create the state after any state snapshot it builds // So current_state will have the incoming event inserted to it let mut fork_states = match build_forward_extremity_snapshots( @@ -805,10 +802,11 @@ pub async fn send_transaction_message_route<'a>( // Make this the state after (since we appended_incoming_pdu this should agree with our servers // current state). - state_at_event.insert((pdu.kind(), pdu.state_key()), pdu.clone()); - // add the incoming events to the mix of state snapshots + let mut state_after = state_at_event.clone(); + state_after.insert((pdu.kind(), pdu.state_key()), pdu.clone()); + // Add the incoming event to the mix of state snapshots // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets - fork_states.insert(state_at_event.clone()); + fork_states.insert(state_after.clone()); let fork_states = fork_states.into_iter().collect::>(); @@ -826,39 +824,27 @@ pub async fn send_transaction_message_route<'a>( update_state = true; // TODO: remove this is for current debugging Jan, 15 2021 - let mut number_fetches = 0_u32; let mut auth_events = vec![]; for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { let event = match auth_cache.get(auth_id) { Some(aev) => aev.clone(), - // We should know about every event at this point but just incase... - None => match fetch_events( - &db, - server_name, - &pub_key_map, - &[auth_id.clone()], - &mut auth_cache, - ) - .await - .map(|mut vec| { - number_fetches += 1; - vec.pop() - }) { - Ok(Some(aev)) => aev, - _ => { - resolved_map - .insert(event_id.clone(), Err("Failed to fetch event".into())); - continue 'main_pdu_loop; - } - }, + // The only events that haven't been added to the auth cache are + // events we have knowledge of previously + None => { + error!("Event was not present in auth_cache {}", auth_id); + resolved_map.insert( + event_id.clone(), + Err("Event was not present in auth cache".into()), + ); + continue 'main_pdu_loop; + } }; state_auth.push(event); } auth_events.push(state_auth); } - info!("{} event's were not in the auth_cache", number_fetches); // Add everything we will need to event_map auth_cache.extend( @@ -873,7 +859,7 @@ pub async fn send_transaction_message_route<'a>( .map(|pdu| (pdu.event_id().clone(), pdu)), ); auth_cache.extend( - state_at_event + state_after .into_iter() .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); @@ -911,17 +897,12 @@ pub async fn send_transaction_message_route<'a>( let pdu = match auth_cache.get(&id) { Some(pdu) => pdu.clone(), None => { - match fetch_events(&db, server_name, &pub_key_map, &[id], &mut auth_cache) - .await - .map(|mut vec| vec.pop()) - { - Ok(Some(aev)) => aev, - _ => { - resolved_map - .insert(event_id.clone(), Err("Failed to fetch event".into())); - continue 'main_pdu_loop; - } - } + error!("Event was not present in auth_cache {}", id); + resolved_map.insert( + event_id.clone(), + Err("Event was not present in auth cache".into()), + ); + continue 'main_pdu_loop; } }; resolved.insert(k, pdu); @@ -929,7 +910,12 @@ pub async fn send_transaction_message_route<'a>( resolved }; - // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + + // Set the new room state to the resolved state update_resolved_state( &db, pdu.room_id(), @@ -1046,8 +1032,6 @@ fn validate_event<'a>( /// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// The check in `fetch_check_auth_events` is that a complete chain is found for the /// events `auth_events`. If the chain is found to have any missing events it fails. -/// -/// The `auth_cache` is filled instead of returning a `Vec`. async fn fetch_check_auth_events( db: &Database, origin: &ServerName, @@ -1073,7 +1057,6 @@ async fn fetch_check_auth_events( })??; stack.extend(ev.auth_events()); - auth_cache.insert(ev.event_id().clone(), ev); } Ok(()) } @@ -1085,6 +1068,9 @@ async fn fetch_check_auth_events( /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation /// 4. TODO: Ask other servers over federation? +/// +/// If the event is unknown to the `auth_cache` it is added. This guarantees that any +/// event we need to know of will be present. pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, @@ -1118,6 +1104,7 @@ pub(crate) async fn fetch_events( Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, }; + auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); pdus.push(pdu); } Ok(pdus) @@ -1167,13 +1154,9 @@ pub(crate) async fn calculate_forward_extremities( // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - // - // We check this in the filter just before the main incoming PDU for loop - // so no already known event can make it this far. - // - // if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - // is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); - // } + if db.rooms.is_pdu_referenced(pdu)? { + is_incoming_leaf = false; + } // TODO: // [dendrite] Checks if any other leaves have been referenced and removes them @@ -1217,74 +1200,79 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); for id in current_leaves { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); + match db.rooms.get_pdu_id(id)? { + // We can skip this because it is handled outside of this function + // The current server state and incoming event state are built to be + // the state after. + // This would be the incoming state from the server. + Some(_) if id == pdu.event_id() => {} + Some(pduid) if db.rooms.get_pdu_from_id(&pduid)?.is_some() => { + let state_hash = db + .rooms + .pdu_state_hash(&pduid)? + .expect("found pdu with no statehash"); - if current_hash.as_ref() == Some(&state_hash) { - includes_current_state = true; + if current_hash.as_ref() == Some(&state_hash) { + includes_current_state = true; + } + + let mut state_before = db + .rooms + .state_full(pdu.room_id(), &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect::>(); + + // Now it's the state after + if let Some(pdu) = db.rooms.get_pdu_from_id(&pduid)? { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, Arc::new(pdu)); + } + + fork_states.insert(state_before); } + _ => { + error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - let mut state_before = db - .rooms - .state_full(pdu.room_id(), &state_hash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect::>(); + let res = db + .sending + .send_federation_request( + &db.globals, + origin, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: id, + }, + ) + .await?; - // Now it's the state after - if let Some(pdu) = db.rooms.get_pdu_from_id(&id)? { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, Arc::new(pdu)); + // TODO: This only adds events to the auth_cache, there is for sure a better way to + // do this... + fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; + + let mut state_before = + fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) + .await? + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect::>(); + + if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) + .await? + .pop() + { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, pdu); + } + + // Now it's the state after + fork_states.insert(state_before); } - - fork_states.insert(state_before); - } else if id == pdu.event_id() { - // We add this snapshot after `build_forward_extremity_snapshots` is - // called which we requested from the sending server - } else { - error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - - let res = db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: id, - }, - ) - .await?; - - // TODO: This only adds events to the auth_cache, there is for sure a better way to - // do this... - fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - - let mut state_before = fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) - .await? - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect::>(); - - if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) - .await? - .pop() - { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, pdu); - } - - // Now it's the state after - fork_states.insert(state_before); } } // This guarantees that our current room state is included - if !includes_current_state && current_hash.is_some() { - error!("Did not include current state"); + if !includes_current_state { current_state.insert((pdu.kind(), pdu.state_key()), pdu); fork_states.insert(current_state); @@ -1316,18 +1304,7 @@ pub(crate) fn update_resolved_state( ); } None => { - let mut pduid = pdu.room_id().as_bytes().to_vec(); - pduid.push(0xff); - pduid.extend_from_slice(pdu.event_id().as_bytes()); - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pduid, - ); + error!("We are missing a state event for the current room state."); } } } @@ -1349,9 +1326,9 @@ pub(crate) fn append_incoming_pdu( // Update the state of the room if needed // We can tell if we need to do this based on wether state resolution took place or not let mut new_state = HashMap::new(); - for ((ev_type, state_k), pdu) in state { - match db.rooms.get_pdu_id(pdu.event_id())? { - Some(pduid) => { + for ((ev_type, state_k), state_pdu) in state { + match db.rooms.get_pdu_id(state_pdu.event_id())? { + Some(state_pduid) => { new_state.insert( ( ev_type.clone(), @@ -1359,12 +1336,10 @@ pub(crate) fn append_incoming_pdu( .clone() .ok_or_else(|| Error::Conflict("State contained non state event"))?, ), - pduid.to_vec(), + state_pduid.to_vec(), ); } - None => { - error!("We didn't append an event as an outlier\n{:?}", pdu); - } + None => error!("We are missing a state event for the incoming event snapshot"), } } From 48601142f8afe96042eec0bdade94056f4054a99 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 3 Feb 2021 22:48:43 -0500 Subject: [PATCH 0442/1727] Use auth_cache to avoid db, save state for every event when joining --- src/client_server/membership.rs | 14 +++------ src/database.rs | 1 + src/database/rooms.rs | 55 ++++++++++++++++++++++++++++++++- src/server_server.rs | 53 ++++++++++++++++++------------- 4 files changed, 90 insertions(+), 33 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 1159185..99c0b62 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -643,8 +643,6 @@ async fn join_room_by_id_helper( ) .expect("iterative auth check failed on resolved events"); - let mut state = HashMap::new(); - // filter the events that failed the auth check keeping the remaining events // sorted correctly for ev_id in sorted_event_ids @@ -660,24 +658,20 @@ async fn join_room_by_id_helper( let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + + let hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + db.rooms.append_pdu( &pdu, utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - // TODO: can we simplify the DAG or should we copy it exactly?? &pdu.prev_events, &db, )?; - if state_events.contains(ev_id) { - if let Some(key) = &pdu.state_key { - state.insert((pdu.kind(), key.to_string()), pdu_id); - } - } + db.rooms.set_room_state(room_id, &hash)?; } - - db.rooms.force_state(room_id, state, &db.globals)?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/database.rs b/src/database.rs index 3fb8442..35b7bcd 100644 --- a/src/database.rs +++ b/src/database.rs @@ -161,6 +161,7 @@ impl Database { roomid_statehash: db.open_tree("roomid_statehash")?, roomeventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, + roomeventid_outlierpducount: db.open_tree("roomeventid_outlierpducount")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index abe8c65..43d5f7d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -69,6 +69,10 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. pub(super) roomeventid_outlierpdu: sled::Tree, + /// RoomId + EventId -> count of the last known pdu when the outlier was inserted. + /// This allows us to skip any state snapshots that would for sure not have the outlier. + pub(super) roomeventid_outlierpducount: sled::Tree, + /// RoomId + EventId -> Parent PDU EventId. pub(super) prevevent_parent: sled::Tree, } @@ -323,6 +327,15 @@ impl Rooms { .map_or(Ok(None), |pdu_id| self.pdu_count(&pdu_id).map(Some)) } + pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { + self.pduid_pdu + .scan_prefix(room_id.as_bytes()) + .last() + .map(|b| self.pdu_count(&b?.0)) + .transpose() + .map(|op| op.unwrap_or_default()) + } + /// Returns the json of a pdu. pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid @@ -490,6 +503,8 @@ impl Rooms { &key, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), )?; + self.roomeventid_outlierpducount + .insert(&key, &self.latest_pdu_count(pdu.room_id())?.to_be_bytes())?; Ok(()) } @@ -537,7 +552,45 @@ impl Rooms { let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.event_id().as_bytes()); - self.roomeventid_outlierpdu.remove(key)?; + if self.roomeventid_outlierpdu.remove(&key)?.is_some() { + if let Some(state_key) = pdu.state_key.as_deref() { + let mut statekey = pdu.kind().as_ref().as_bytes().to_vec(); + statekey.extend_from_slice(state_key.as_bytes()); + + let short = match self.statekey_short.get(&statekey)? { + Some(short) => utils::u64_from_bytes(&short).map_err(|_| { + Error::bad_database("Invalid short bytes in statekey_short.") + })?, + None => { + error!( + "This event has been inserted into the state snapshot tree previously." + ); + let short = db.globals.next_count()?; + self.statekey_short + .insert(&statekey, &short.to_be_bytes())?; + short + } + }; + + let mut start = pdu.room_id().as_bytes().to_vec(); + start.extend_from_slice( + &self + .roomeventid_outlierpducount + .get(&key)? + .unwrap_or_default(), + ); + for hash in self.pduid_statehash.range(start..).values() { + let mut hash = hash?.to_vec(); + hash.extend_from_slice(&short.to_be_bytes()); + + let _ = self.stateid_pduid.compare_and_swap( + hash, + Some(pdu.event_id().as_bytes()), + Some(pdu_id.as_ref()), + )?; + } + } + } // We must keep track of all events that have been referenced. for leaf in leaves { diff --git a/src/server_server.rs b/src/server_server.rs index 48d5956..780109c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -971,6 +971,7 @@ fn validate_event<'a>( } } Err(_e) => { + error!("{}", _e); return Err("Signature verification failed".to_string()); } }; @@ -988,7 +989,7 @@ fn validate_event<'a>( fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) .await - .map_err(|_| "Event failed auth chain check".to_string())?; + .map_err(|e| e.to_string())?; let pdu = Arc::new(pdu.clone()); @@ -1064,6 +1065,7 @@ async fn fetch_check_auth_events( /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// +/// 0. Look in the auth_cache /// 1. Look in the main timeline (pduid_pdu tree) /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation @@ -1080,28 +1082,35 @@ pub(crate) async fn fetch_events( ) -> Result>> { let mut pdus = vec![]; for id in events { - // `get_pdu` checks the outliers tree for us - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => Arc::new(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let (pdu, _) = validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + let pdu = match auth_cache.get(id) { + Some(pdu) => pdu.clone(), + // `get_pdu` checks the outliers tree for us + None => match db.rooms.get_pdu(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|e| { + error!("{:?}", e); + Error::Conflict("Authentication of event failed") + })?; - db.rooms.append_pdu_outlier(&pdu)?; - pdu - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + db.rooms.append_pdu_outlier(&pdu)?; + pdu + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, }, }; auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); From 8f283510aa93189f6845a2950da32c8fb49fc1f5 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 3 Feb 2021 23:01:31 -0500 Subject: [PATCH 0443/1727] Fix unused import clippy warning --- Cargo.lock | 52 ++++++++++++++++++++++++++------- src/client_server/membership.rs | 6 ++-- src/client_server/session.rs | 3 +- src/server_server.rs | 3 +- 4 files changed, 46 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c7381be..2565a35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,6 +97,12 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + [[package]] name = "base64" version = "0.13.0" @@ -172,7 +178,7 @@ dependencies = [ "num-integer", "num-traits", "time 0.1.43", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -185,7 +191,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64", + "base64 0.13.0", "directories", "http", "image", @@ -227,7 +233,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.24", + "time 0.2.25", "version_check", ] @@ -871,6 +877,20 @@ dependencies = [ "serde", ] +[[package]] +name = "jsonwebtoken" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +dependencies = [ + "base64 0.12.3", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1509,7 +1529,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd281b1030aa675fb90aa994d07187645bb3c8fc756ca766e7c3070b439de9de" dependencies = [ - "base64", + "base64 0.13.0", "bytes", "encoding_rs", "futures-core", @@ -1584,8 +1604,8 @@ dependencies = [ "rocket_http", "serde", "state", - "time 0.2.24", - "tokio 1.0.2", + "time 0.2.25", + "tokio", "ubyte", "version_check", "yansi", @@ -1622,8 +1642,8 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.24", - "tokio 1.0.2", + "time 0.2.25", + "tokio", "tokio-rustls", "uncased", "unicode-xid", @@ -1819,7 +1839,7 @@ name = "ruma-signatures" version = "0.6.0-alpha.1" source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ - "base64", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -1833,7 +1853,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1854,7 +1874,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64", + "base64 0.13.0", "log", "ring", "sct", @@ -2189,6 +2209,16 @@ dependencies = [ "once_cell", ] +[[package]] +name = "time" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "time" version = "0.2.25" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 99c0b62..211388e 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,9 +21,9 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use state_res::Event; +// use state_res::Event; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashSet}, convert::TryFrom, iter, sync::Arc, @@ -557,7 +557,7 @@ async fn join_room_by_id_helper( let room_state = send_join_response.room_state.state.iter().map(add_event_id); - let state_events = room_state + let _state_events = room_state .clone() .map(|pdu: Result<(EventId, CanonicalJsonObject)>| Ok(pdu?.0)) .chain(iter::once(Ok(event_id.clone()))) // Add join event we just created diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 1b2583c..f8d64f0 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -117,8 +117,7 @@ pub async fn login_route( let device_exists = body.device_id.as_ref().map_or(false, |device_id| { db.users .all_device_ids(&user_id) - .find(|x| x.as_ref().map_or(false, |v| v == device_id)) - .is_some() + .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); if device_exists { diff --git a/src/server_server.rs b/src/server_server.rs index 780109c..1e81d5e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -800,8 +800,7 @@ pub async fn send_transaction_message_route<'a>( } }; - // Make this the state after (since we appended_incoming_pdu this should agree with our servers - // current state). + // Make this the state after. let mut state_after = state_at_event.clone(); state_after.insert((pdu.kind(), pdu.state_key()), pdu.clone()); // Add the incoming event to the mix of state snapshots From 0cc6448dbe1d31d7e4f84f27ab9ca957b69ebe0f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 9 Feb 2021 17:58:40 -0500 Subject: [PATCH 0444/1727] Temp disable rust_2018_idioms for CI --- Cargo.lock | 212 +++++++++++++--------------------------------------- src/main.rs | 2 +- 2 files changed, 53 insertions(+), 161 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2565a35..956e372 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,27 +33,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" -[[package]] -name = "async-stream" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3670df70cbc01729f901f94c887814b3c68db038aad1329a418bae178bc5295c" -dependencies = [ - "async-stream-impl", - "futures-core", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "async-trait" version = "0.1.42" @@ -134,9 +113,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07aa6688c702439a1be0307b6a94dffe1168569e45b9500c1372bc580740d59" +checksum = "099e596ef14349721d9016f6b80dd3419ea1bf289ab9b44df8e4dfd3a005d5d9" [[package]] name = "bytemuck" @@ -195,7 +174,6 @@ dependencies = [ "directories", "http", "image", - "js_int", "jsonwebtoken", "log", "rand 0.7.3", @@ -211,7 +189,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio 1.0.2", + "tokio", "trust-dns-resolver", ] @@ -373,9 +351,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.26" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ "cfg-if", ] @@ -602,28 +580,8 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 0.2.24", - "tokio-util 0.3.1", - "tracing", - "tracing-futures", -] - -[[package]] -name = "h2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" -dependencies = [ - "bytes 1.0.1", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 1.0.2", - "tokio-util 0.6.1", + "tokio", + "tokio-util", "tracing", "tracing-futures", ] @@ -684,21 +642,11 @@ dependencies = [ "http", ] -[[package]] -name = "http-body" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" -dependencies = [ - "bytes 1.0.1", - "http", -] - [[package]] name = "httparse" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" [[package]] name = "httpdate" @@ -708,47 +656,23 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.2.7", + "h2", "http", - "http-body 0.3.1", + "http-body", "httparse", "httpdate", "itoa", - "pin-project 1.0.4", + "pin-project 1.0.5", "socket2", - "tokio 0.2.24", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" -dependencies = [ - "bytes 1.0.1", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.0", - "http", - "http-body 0.4.0", - "httparse", - "httpdate", - "itoa", - "pin-project 1.0.4", - "socket2", - "tokio 1.0.2", + "tokio", "tower-service", "tracing", "want", @@ -769,9 +693,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "de910d521f7cc3135c4de8db1cb910e0b5ed1dc6f57c381cd07e8e661ce10094" dependencies = [ "matches", "unicode-bidi", @@ -780,9 +704,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ce04077ead78e39ae8610ad26216aed811996b043d47beed5090db674f9e9b5" +checksum = "293f07a1875fa7e9c5897b51aa68b2d8ed8271b87e1a44cb64b9c3d98aabbc0d" dependencies = [ "bytemuck", "byteorder", @@ -899,9 +823,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" +checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" [[package]] name = "linked-hash-map" @@ -1007,16 +931,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2", - "winapi 0.3.9", -] - [[package]] name = "native-tls" version = "0.2.7" @@ -1044,15 +958,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -1246,11 +1151,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" dependencies = [ - "pin-project-internal 1.0.4", + "pin-project-internal 1.0.5", ] [[package]] @@ -1266,9 +1171,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" dependencies = [ "proc-macro2", "quote", @@ -1535,8 +1440,8 @@ dependencies = [ "futures-core", "futures-util", "http", - "http-body 0.3.1", - "hyper 0.13.9", + "http-body", + "hyper", "hyper-tls", "ipnet", "js-sys", @@ -1569,9 +1474,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.19" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -1631,7 +1536,7 @@ dependencies = [ "cookie", "either", "http", - "hyper 0.14.2", + "hyper", "indexmap", "log", "mime", @@ -1973,9 +1878,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" +checksum = "ea1c6153794552ea7cf7cf63b1231a25de00ec90db326ba6264440fa08e31486" dependencies = [ "itoa", "ryu", @@ -1996,9 +1901,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "971be8f6e4d4a47163b405a3df70d14359186f9ab0f3a3ec37df144ca1ce089f" +checksum = "bdd2af560da3c1fdc02cb80965289254fc35dff869810061e2d8290ee48848ae" dependencies = [ "dtoa", "linked-hash-map", @@ -2079,9 +1984,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" +checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" dependencies = [ "version_check", ] @@ -2202,9 +2107,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ "once_cell", ] @@ -2274,9 +2179,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8efab2086f17abcddb8f756117665c958feee6b2e39974c2f1600592ab3a4195" +checksum = "e8190d04c665ea9e6b6a0dc45523ade572c088d2e6566244c1122671dbf4ae3a" dependencies = [ "autocfg", "bytes", @@ -2293,9 +2198,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" +checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" dependencies = [ "proc-macro2", "quote", @@ -2319,35 +2224,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", - "tokio 1.0.2", + "tokio", "webpki", ] -[[package]] -name = "tokio-stream" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-util" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb971a26599ffd28066d387f109746df178eff14d5ea1e235015c5601967a4b" +checksum = "ebb7cb2f00c5ae8df755b252306272cd1790d39728363936e01827e11f0b017b" dependencies = [ - "async-stream", "bytes", "futures-core", "futures-sink", "log", "pin-project-lite", "tokio", - "tokio-stream", ] [[package]] @@ -2367,9 +2259,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "f7d40a22fd029e33300d8d89a5cc8ffce18bb7c587662f54629e94c9de5487f3" dependencies = [ "cfg-if", "pin-project-lite", @@ -2415,7 +2307,7 @@ dependencies = [ "rand 0.8.3", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "url", ] @@ -2435,7 +2327,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "trust-dns-proto", ] @@ -2474,9 +2366,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] diff --git a/src/main.rs b/src/main.rs index e5c0399..9b64506 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,4 @@ -#![warn(rust_2018_idioms)] +// #![warn(rust_2018_idioms)] pub mod appservice_server; pub mod client_server; From a601c29c978c85c6dd346b8395f01581c684ccdc Mon Sep 17 00:00:00 2001 From: Niklas Zender Date: Fri, 26 Feb 2021 14:55:06 +0000 Subject: [PATCH 0445/1727] Chore: Add Issue Template --- .gitlab/issue_templates/Issue Template.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .gitlab/issue_templates/Issue Template.md diff --git a/.gitlab/issue_templates/Issue Template.md b/.gitlab/issue_templates/Issue Template.md new file mode 100644 index 0000000..e1a0667 --- /dev/null +++ b/.gitlab/issue_templates/Issue Template.md @@ -0,0 +1,15 @@ +# Headline + +### Description + + + + + + + + + + + +/label ~conduit From 0dd8a15c4900ec9ba6fe1764b5ca31c4575bb199 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 27 Feb 2021 16:09:22 -0500 Subject: [PATCH 0446/1727] Fix leaves not being replaced by correct eventId in membership Update ruma --- Cargo.lock | 62 ++++++++++++++++++++++++--------- Cargo.toml | 5 +-- src/client_server/membership.rs | 2 +- src/database/key_backups.rs | 32 ++++++++++------- 4 files changed, 69 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 956e372..d9dbbf2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1558,7 +1558,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "js_int", @@ -1569,6 +1569,8 @@ dependencies = [ "ruma-events", "ruma-federation-api", "ruma-identifiers", + "ruma-identity-service-api", + "ruma-push-gateway-api", "ruma-serde", "ruma-signatures", ] @@ -1576,7 +1578,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "http", "percent-encoding", @@ -1591,7 +1593,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1602,7 +1604,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "ruma-api", "ruma-common", @@ -1616,7 +1618,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "http", @@ -1635,7 +1637,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "maplit", @@ -1648,7 +1650,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-common", @@ -1662,7 +1664,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1673,7 +1675,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-api", @@ -1688,7 +1690,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "paste", "rand 0.8.3", @@ -1702,7 +1704,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro2", "quote", @@ -1713,12 +1715,40 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" + +[[package]] +name = "ruma-identity-service-api" +version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" +dependencies = [ + "ruma-api", + "ruma-common", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-push-gateway-api" +version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" +dependencies = [ + "js_int", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "form_urlencoded", "itoa", @@ -1731,7 +1761,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1742,7 +1772,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "base64 0.13.0", "ring", @@ -2000,7 +2030,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=791c66d73cf064d09db0cdf767d5fef43a343425#791c66d73cf064d09db0cdf767d5fef43a343425" +source = "git+https://github.com/ruma/state-res?branch=main#d34a78c5b66de419862d9e592bde8e0007111ebd" dependencies = [ "itertools", "log", diff --git a/Cargo.toml b/Cargo.toml index de6a966..4a901e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,15 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } +# state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 211388e..e3b1827 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -666,7 +666,7 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &pdu.prev_events, + &[pdu.event_id.clone()], &db, )?; diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index a50e45e..4c65354 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -2,7 +2,7 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, - r0::backup::{BackupAlgorithm, KeyData, Sessions}, + r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, }, RoomId, UserId, }; @@ -129,7 +129,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - key_data: &KeyData, + key_data: &KeyBackupData, globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.to_string().as_bytes().to_vec(); @@ -153,7 +153,7 @@ impl KeyBackups { self.backupkeyid_backup.insert( &key, - &*serde_json::to_string(&key_data).expect("KeyData::to_string always works"), + &*serde_json::to_string(&key_data).expect("KeyBackupData::to_string always works"), )?; Ok(()) @@ -182,13 +182,17 @@ impl KeyBackups { .to_string()) } - pub fn get_all(&self, user_id: &UserId, version: &str) -> Result> { + pub fn get_all( + &self, + user_id: &UserId, + version: &str, + ) -> Result> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::::new(); + let mut rooms = BTreeMap::::new(); for result in self.backupkeyid_backup.scan_prefix(&prefix).map(|r| { let (key, value) = r?; @@ -211,15 +215,16 @@ impl KeyBackups { ) .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid room id."))?; - let key_data = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid."))?; + let key_data = serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + })?; Ok::<_, Error>((room_id, session_id, key_data)) }) { let (room_id, session_id, key_data) = result?; rooms .entry(room_id) - .or_insert_with(|| Sessions { + .or_insert_with(|| RoomKeyBackup { sessions: BTreeMap::new(), }) .sessions @@ -234,7 +239,7 @@ impl KeyBackups { user_id: &UserId, version: &str, room_id: &RoomId, - ) -> BTreeMap { + ) -> BTreeMap { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -257,7 +262,7 @@ impl KeyBackups { })?; let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyData in backupkeyid_backup is invalid.") + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") })?; Ok::<_, Error>((session_id, key_data)) @@ -272,7 +277,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - ) -> Result> { + ) -> Result> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -284,8 +289,9 @@ impl KeyBackups { self.backupkeyid_backup .get(&key)? .map(|value| { - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid.")) + serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + }) }) .transpose() } From f3253f2033691ec47719335d8e0c04b684c48899 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 28 Feb 2021 18:53:17 -0500 Subject: [PATCH 0447/1727] Move comments about Rooms trees to doc comments --- src/database/rooms.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 43d5f7d..4ad499c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -63,8 +63,11 @@ pub struct Rooms { /// Remember the state hash at events in the past. pub(super) pduid_statehash: sled::Tree, /// The state for a given state hash. - pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count - pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) + /// + /// StateKey = EventType + StateKey, Short = Count + pub(super) statekey_short: sled::Tree, + /// StateId = StateHash + Short, PduId = Count (without roomid) + pub(super) stateid_pduid: sled::Tree, /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. @@ -583,11 +586,11 @@ impl Rooms { let mut hash = hash?.to_vec(); hash.extend_from_slice(&short.to_be_bytes()); - let _ = self.stateid_pduid.compare_and_swap( + let _ = dbg!(self.stateid_pduid.compare_and_swap( hash, Some(pdu.event_id().as_bytes()), Some(pdu_id.as_ref()), - )?; + )?); } } } @@ -921,12 +924,12 @@ impl Rooms { content.clone(), prev_event, None, // TODO: third party invite - &auth_events + dbg!(&auth_events .iter() .map(|((ty, key), pdu)| { Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) - .collect::>>()?, + .collect::>>()?), ) .map_err(|e| { log::error!("{}", e); From c9f4ff5cf8e20dba0e6dfc24de6acb83458e3b2d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Mar 2021 08:23:28 -0500 Subject: [PATCH 0448/1727] Ask multiple servers for keys when not known or sending server failed --- src/database/rooms.rs | 2 +- src/server_server.rs | 61 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 53 insertions(+), 10 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4ad499c..992c97c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1605,7 +1605,7 @@ impl Rooms { }) } - /// Returns an iterator over all joined members of a room. + /// Returns an iterator of all servers participating in this room. pub fn room_servers(&self, room_id: &RoomId) -> impl Iterator>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index 1e81d5e..58c4b33 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -8,8 +8,8 @@ use ruma::{ federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ - get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, - VerifyKey, + get_remote_server_keys, get_server_keys, + get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, event::{get_event, get_missing_events, get_room_state_ids}, query::get_profile_information, @@ -575,7 +575,7 @@ pub async fn send_transaction_message_route<'a>( return None; } - Some((event_id, value)) + Some((event_id, room_id, value)) }) .collect::>(); @@ -586,7 +586,7 @@ pub async fn send_transaction_message_route<'a>( // events over federation. For example, the Federation API's /send endpoint would // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. - 'main_pdu_loop: for (event_id, value) in pdus_to_resolve { + 'main_pdu_loop: for (event_id, room_id, value) in pdus_to_resolve { let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -595,7 +595,7 @@ pub async fn send_transaction_message_route<'a>( UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); let origin = sender.server_name(); - let keys = match fetch_signing_keys(&db, origin).await { + let keys = match fetch_signing_keys(&db, &room_id, origin).await { Ok(keys) => keys, Err(_) => { resolved_map.insert( @@ -1122,18 +1122,61 @@ pub(crate) async fn fetch_events( /// fetch them from the server and save to our DB. pub(crate) async fn fetch_signing_keys( db: &Database, + room_id: &RoomId, origin: &ServerName, ) -> Result> { match db.globals.signing_keys_for(origin)? { keys if !keys.is_empty() => Ok(keys), _ => { - let keys = db + match db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) .await - .map_err(|_| Error::BadServerResponse("Failed to request server keys"))?; - db.globals.add_signing_key(origin, &keys.server_key)?; - Ok(keys.server_key.verify_keys) + { + Ok(keys) => { + db.globals.add_signing_key(origin, &keys.server_key)?; + Ok(keys.server_key.verify_keys) + } + _ => { + for server in db.rooms.room_servers(room_id) { + let server = server?; + if let Ok(keys) = db + .sending + .send_federation_request( + &db.globals, + &server, + get_remote_server_keys::v2::Request::new( + &server, + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ), + ) + .await + { + let keys: Vec = keys.server_keys; + let key = keys.into_iter().fold(None, |mut key, next| { + if let Some(verified) = &key { + // rustc cannot elide this type for some reason + let v: &ServerSigningKeys = verified; + if v.verify_keys + .iter() + .zip(next.verify_keys.iter()) + .all(|(a, b)| a.1.key == b.1.key) + { + } + } else { + key = Some(next) + } + key + }); + } + } + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) + } + } } } } From 79c9de98cd6699df8647b70ef24d9dd0889a497a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Mar 2021 09:17:53 -0500 Subject: [PATCH 0449/1727] Add trusted_servers, filter servers to query keys by trusted_servers --- src/database.rs | 2 ++ src/database/globals.rs | 4 ++++ src/server_server.rs | 16 +++++++++++++--- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/database.rs b/src/database.rs index 35b7bcd..20cc7e1 100644 --- a/src/database.rs +++ b/src/database.rs @@ -39,6 +39,8 @@ pub struct Config { #[serde(default = "false_fn")] allow_federation: bool, jwt_secret: Option, + #[serde(default = "Vec::new")] + trusted_servers: Vec>, } fn false_fn() -> bool { diff --git a/src/database/globals.rs b/src/database/globals.rs index 00b4568..3c65e74 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -139,6 +139,10 @@ impl Globals { self.config.allow_federation } + pub fn trusted_servers(&self) -> &[Box] { + &self.config.trusted_servers + } + pub fn dns_resolver(&self) -> &TokioAsyncResolver { &self.dns_resolver } diff --git a/src/server_server.rs b/src/server_server.rs index 58c4b33..dcd72f7 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1138,7 +1138,9 @@ pub(crate) async fn fetch_signing_keys( Ok(keys.server_key.verify_keys) } _ => { - for server in db.rooms.room_servers(room_id) { + for server in db.rooms.room_servers(room_id).filter( + |ser| matches!(ser, Ok(s) if db.globals.trusted_servers().contains(s)), + ) { let server = server?; if let Ok(keys) = db .sending @@ -1154,8 +1156,9 @@ pub(crate) async fn fetch_signing_keys( ) .await { + let mut trust = 0; let keys: Vec = keys.server_keys; - let key = keys.into_iter().fold(None, |mut key, next| { + let key = keys.iter().fold(None, |mut key, next| { if let Some(verified) = &key { // rustc cannot elide this type for some reason let v: &ServerSigningKeys = verified; @@ -1164,12 +1167,19 @@ pub(crate) async fn fetch_signing_keys( .zip(next.verify_keys.iter()) .all(|(a, b)| a.1.key == b.1.key) { + trust += 1; } } else { - key = Some(next) + key = Some(next.clone()) } key }); + + if trust == (keys.len() - 1) && key.is_some() { + let k = key.unwrap(); + db.globals.add_signing_key(origin, &k)?; + return Ok(k.verify_keys); + } } } Err(Error::BadServerResponse( From dd68031b692065469b4fde959e5751df2005f10e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 2 Mar 2021 14:32:30 +0100 Subject: [PATCH 0450/1727] improvement: implement /receipt --- src/client_server/read_marker.rs | 52 ++++++++++++++++++++++++++++---- src/main.rs | 2 +- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 0c4ec1a..f7d3712 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -2,7 +2,8 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ - error::ErrorKind, r0::capabilities::get_capabilities, r0::read_marker::set_read_marker, + error::ErrorKind, + r0::{read_marker::set_read_marker, receipt::create_receipt}, }, events::{AnyEphemeralRoomEvent, AnyEvent, EventType}, }; @@ -83,13 +84,52 @@ pub async fn set_read_marker_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "") )] -pub async fn set_receipt_route( +pub async fn create_receipt_route( db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + db.rooms.edus.private_read_set( + &body.room_id, + &sender_user, + db.rooms + .get_pdu_count(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, + &db.globals, + )?; + + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(SystemTime::now()), + }, + ); + let mut receipt_content = BTreeMap::new(); + receipt_content.insert( + body.event_id.to_owned(), + ruma::events::receipt::Receipts { + read: Some(user_receipts), + }, + ); + + db.rooms.edus.readreceipt_update( + &sender_user, + &body.room_id, + AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )), + &db.globals, + )?; db.flush().await?; - Ok(set_read_marker::Response.into()) + Ok(create_receipt::Response.into()) } diff --git a/src/main.rs b/src/main.rs index 65434a5..d5f1f4e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -90,7 +90,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_backup_key_sessions_route, client_server::get_backup_keys_route, client_server::set_read_marker_route, - client_server::set_receipt_route, + client_server::create_receipt_route, client_server::create_typing_event_route, client_server::create_room_route, client_server::redact_event_route, From 278751eb23cd524fb489634905612e4939c1501c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 2 Mar 2021 14:36:48 +0100 Subject: [PATCH 0451/1727] improvement: use transaction ids for federation requests --- src/database/sending.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index fd32793..8c487e1 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -9,6 +9,7 @@ use std::{ use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; use log::info; +use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, @@ -229,6 +230,13 @@ impl Sending { Ok(()) } + fn calculate_hash(keys: &[IVec]) -> Vec { + // We only hash the pdu's event ids, not the whole pdu + let bytes = keys.join(&0xff); + let hash = digest::digest(&digest::SHA256, &bytes); + hash.as_ref().to_owned() + } + async fn handle_event( server: Box, is_appservice: bool, @@ -266,7 +274,10 @@ impl Sending { .unwrap(), // TODO: handle error appservice::event::push_events::v1::Request { events: &pdu_jsons, - txn_id: &utils::random_string(16), + txn_id: &base64::encode_config( + Self::calculate_hash(&pdu_ids), + base64::URL_SAFE_NO_PAD, + ), }, ) .await @@ -309,7 +320,10 @@ impl Sending { pdus: &pdu_jsons, edus: &[], origin_server_ts: SystemTime::now(), - transaction_id: &utils::random_string(16), + transaction_id: &base64::encode_config( + Self::calculate_hash(&pdu_ids), + base64::URL_SAFE_NO_PAD, + ), }, ) .await From f7713fdf2e470ba437cf858faf8c306649d91fbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 26 Feb 2021 13:24:07 +0100 Subject: [PATCH 0452/1727] fix: sending code got stuck sometimes --- src/database/sending.rs | 78 ++++++++++++++++++++++++++++++----------- src/ruma_wrapper.rs | 4 +-- 2 files changed, 59 insertions(+), 23 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index 8c487e1..dfb7fa9 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -8,7 +8,7 @@ use std::{ use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::info; +use log::{info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ @@ -36,6 +36,7 @@ impl Sending { ) { let servernamepduids = self.servernamepduids.clone(); let servercurrentpdus = self.servercurrentpdus.clone(); + let maximum_requests = self.maximum_requests.clone(); let rooms = rooms.clone(); let globals = globals.clone(); let appservice = appservice.clone(); @@ -44,23 +45,43 @@ impl Sending { let mut futures = FuturesUnordered::new(); // Retry requests we could not finish yet - let mut current_transactions = HashMap::new(); + let mut current_transactions = HashMap::<(Box, bool), Vec>::new(); - for (server, pdu, is_appservice) in servercurrentpdus + for (key, server, pdu, is_appservice) in servercurrentpdus .iter() .filter_map(|r| r.ok()) .filter_map(|(key, _)| Self::parse_servercurrentpdus(key).ok()) - .filter(|(_, pdu, _)| !pdu.is_empty()) // Skip reservation key - .take(50) - // This should not contain more than 50 anyway { - current_transactions + if pdu.is_empty() { + // Remove old reservation key + servercurrentpdus.remove(key).unwrap(); + continue; + } + + let entry = current_transactions .entry((server, is_appservice)) - .or_insert_with(Vec::new) - .push(pdu); + .or_insert_with(Vec::new); + + if entry.len() > 30 { + warn!("Dropping some current pdus because too many were queued. This should not happen."); + servercurrentpdus.remove(key).unwrap(); + continue; + } + + entry.push(pdu); } for ((server, is_appservice), pdus) in current_transactions { + // Create new reservation + let mut prefix = if is_appservice { + "+".as_bytes().to_vec() + } else { + Vec::new() + }; + prefix.extend_from_slice(server.as_bytes()); + prefix.push(0xff); + servercurrentpdus.insert(prefix, &[]).unwrap(); + futures.push(Self::handle_event( server, is_appservice, @@ -68,6 +89,7 @@ impl Sending { &globals, &rooms, &appservice, + &maximum_requests, )); } @@ -106,7 +128,7 @@ impl Sending { .map(|k| { k.subslice(prefix.len(), k.len() - prefix.len()) }) - .take(50) + .take(30) .collect::>(); if !new_pdus.is_empty() { @@ -117,7 +139,7 @@ impl Sending { servernamepduids.remove(¤t_key).unwrap(); } - futures.push(Self::handle_event(server, is_appservice, new_pdus, &globals, &rooms, &appservice)); + futures.push(Self::handle_event(server, is_appservice, new_pdus, &globals, &rooms, &appservice, &maximum_requests)); } else { servercurrentpdus.remove(&prefix).unwrap(); // servercurrentpdus with the prefix should be empty now @@ -194,15 +216,17 @@ impl Sending { prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); - servercurrentpdus + if servercurrentpdus .compare_and_swap(prefix, Option::<&[u8]>::None, Some(&[])) // Try to reserve - == Ok(Ok(())) + == Ok(Ok(())) { true } else { + false + } }) { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); - futures.push(Self::handle_event(server, is_appservice, vec![pdu_id.into()], &globals, &rooms, &appservice)); + futures.push(Self::handle_event(server, is_appservice, vec![pdu_id.into()], &globals, &rooms, &appservice, &maximum_requests)); } } } @@ -244,6 +268,7 @@ impl Sending { globals: &super::globals::Globals, rooms: &super::rooms::Rooms, appservice: &super::appservice::Appservice, + maximum_requests: &Semaphore, ) -> std::result::Result<(Box, bool), (Box, bool, Error)> { if is_appservice { let pdu_jsons = pdu_ids @@ -266,7 +291,9 @@ impl Sending { }) .filter_map(|r| r.ok()) .collect::>(); - appservice_server::send_request( + + let permit = maximum_requests.acquire().await; + let response = appservice_server::send_request( &globals, appservice .get_registration(server.as_str()) @@ -282,7 +309,11 @@ impl Sending { ) .await .map(|_response| (server.clone(), is_appservice)) - .map_err(|e| (server, is_appservice, e)) + .map_err(|e| (server, is_appservice, e)); + + drop(permit); + + response } else { let pdu_jsons = pdu_ids .iter() @@ -312,7 +343,8 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); - server_server::send_request( + let permit = maximum_requests.acquire().await; + let response = server_server::send_request( &globals, server.clone(), send_transaction_message::v1::Request { @@ -328,12 +360,17 @@ impl Sending { ) .await .map(|_response| (server.clone(), is_appservice)) - .map_err(|e| (server, is_appservice, e)) + .map_err(|e| (server, is_appservice, e)); + + drop(permit); + + response } } - fn parse_servercurrentpdus(key: IVec) -> Result<(Box, IVec, bool)> { - let mut parts = key.splitn(2, |&b| b == 0xff); + fn parse_servercurrentpdus(key: IVec) -> Result<(IVec, Box, IVec, bool)> { + let key2 = key.clone(); + let mut parts = key2.splitn(2, |&b| b == 0xff); let server = parts.next().expect("splitn always returns one element"); let pdu = parts .next() @@ -351,6 +388,7 @@ impl Sending { }; Ok::<_, Error>(( + key, Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 45fcc7f..898561f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -82,9 +82,7 @@ where registration .get("as_token") .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| { - dbg!(token.as_deref()) == dbg!(Some(as_token)) - }) + .map_or(false, |as_token| token.as_deref() == Some(as_token)) }) { match T::METADATA.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { From 4155a47db1e365b0b2875c419fb1ba1e584587e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 28 Feb 2021 12:41:03 +0100 Subject: [PATCH 0453/1727] feat: opentelemetry/jaeger support --- Cargo.lock | 249 ++++++++++++++++++++++++++-- Cargo.toml | 6 + conduit-example.toml | 4 + src/client_server/account.rs | 5 + src/client_server/alias.rs | 3 + src/client_server/backup.rs | 14 ++ src/client_server/capabilities.rs | 1 + src/client_server/config.rs | 2 + src/client_server/context.rs | 1 + src/client_server/device.rs | 5 + src/client_server/directory.rs | 4 + src/client_server/filter.rs | 2 + src/client_server/keys.rs | 6 + src/client_server/media.rs | 4 + src/client_server/membership.rs | 11 ++ src/client_server/message.rs | 2 + src/client_server/mod.rs | 1 + src/client_server/presence.rs | 1 + src/client_server/profile.rs | 5 + src/client_server/push.rs | 10 ++ src/client_server/read_marker.rs | 2 + src/client_server/redact.rs | 1 + src/client_server/room.rs | 3 + src/client_server/search.rs | 1 + src/client_server/session.rs | 4 + src/client_server/state.rs | 5 + src/client_server/sync.rs | 5 +- src/client_server/tag.rs | 3 + src/client_server/thirdparty.rs | 1 + src/client_server/to_device.rs | 1 + src/client_server/typing.rs | 1 + src/client_server/unversioned.rs | 1 + src/client_server/user_directory.rs | 1 + src/client_server/voip.rs | 1 + src/database.rs | 2 + src/database/account_data.rs | 1 + src/database/rooms.rs | 16 ++ src/database/rooms/edus.rs | 5 + src/database/sending.rs | 6 + src/database/users.rs | 5 + src/error.rs | 68 +------- src/main.rs | 52 ++++-- src/pdu.rs | 34 ++-- src/ruma_wrapper.rs | 4 +- src/server_server.rs | 14 ++ 45 files changed, 457 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78ff405..c8d48dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,6 +30,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "arrayref" version = "0.3.6" @@ -212,7 +221,9 @@ dependencies = [ "js_int", "jsonwebtoken", "log", - "rand", + "opentelemetry", + "opentelemetry-jaeger", + "rand 0.7.3", "regex", "reqwest", "ring", @@ -226,6 +237,9 @@ dependencies = [ "state-res", "thiserror", "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", "trust-dns-resolver", ] @@ -595,6 +609,17 @@ dependencies = [ "wasi 0.9.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", +] + [[package]] name = "gif" version = "0.11.1" @@ -795,6 +820,12 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "integer-encoding" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" + [[package]] name = "iovec" version = "0.1.4" @@ -945,6 +976,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.8" @@ -1189,6 +1229,44 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "514d24875c140ed269eecc2d1b56d7b71b573716922a763c317fb1b1b4b58f15" +dependencies = [ + "async-trait", + "futures", + "js-sys", + "lazy_static", + "percent-encoding", + "pin-project 1.0.2", + "rand 0.8.3", + "thiserror", +] + +[[package]] +name = "opentelemetry-jaeger" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5677b3a361784aff6e2b1b30dbdb5f85f4ec57ff2ced41d9a481ad70a9d0b57" +dependencies = [ + "async-trait", + "lazy_static", + "opentelemetry", + "thiserror", + "thrift", +] + +[[package]] +name = "ordered-float" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +dependencies = [ + "num-traits", +] + [[package]] name = "parking_lot" version = "0.11.1" @@ -1406,11 +1484,23 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.15", "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.2", + "rand_hc 0.3.0", ] [[package]] @@ -1420,7 +1510,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.2", ] [[package]] @@ -1429,7 +1529,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.15", +] + +[[package]] +name = "rand_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +dependencies = [ + "getrandom 0.2.2", ] [[package]] @@ -1438,7 +1547,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.2", ] [[package]] @@ -1453,7 +1571,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom", + "getrandom 0.1.15", "redox_syscall", "rust-argon2", ] @@ -1490,6 +1608,16 @@ dependencies = [ "thread_local", ] +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder", + "regex-syntax", +] + [[package]] name = "regex-syntax" version = "0.6.21" @@ -1582,7 +1710,7 @@ dependencies = [ "memchr", "num_cpus", "parking_lot", - "rand", + "rand 0.7.3", "ref-cast", "rocket_codegen", "rocket_http", @@ -1769,7 +1897,7 @@ version = "0.17.4" source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "paste", - "rand", + "rand 0.7.3", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -2010,6 +2138,15 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +[[package]] +name = "sharded-slab" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +dependencies = [ + "lazy_static", +] + [[package]] name = "signal-hook-registry" version = "1.2.2" @@ -2194,7 +2331,7 @@ checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ "cfg-if 0.1.10", "libc", - "rand", + "rand 0.7.3", "redox_syscall", "remove_dir_all", "winapi 0.3.9", @@ -2229,6 +2366,28 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "thrift" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b" +dependencies = [ + "byteorder", + "integer-encoding", + "log", + "ordered-float", + "threadpool", +] + [[package]] name = "time" version = "0.1.44" @@ -2380,9 +2539,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ "cfg-if 1.0.0", "log", @@ -2393,9 +2552,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" +checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07" dependencies = [ "proc-macro2", "quote", @@ -2421,6 +2580,62 @@ dependencies = [ "tracing", ] +[[package]] +name = "tracing-log" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccdf13c28f1654fe806838f28c5b9cb23ca4c0eae71450daa489f50e523ceb1" +dependencies = [ + "opentelemetry", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + +[[package]] +name = "tracing-serde" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ab8966ac3ca27126141f7999361cc97dd6fb4b71da04c02044fa9045d98bb96" +dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + [[package]] name = "trust-dns-proto" version = "0.19.6" @@ -2434,7 +2649,7 @@ dependencies = [ "idna", "lazy_static", "log", - "rand", + "rand 0.7.3", "smallvec", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index f7fbdc5..9ab5250 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -65,6 +65,12 @@ trust-dns-resolver = "0.19.6" regex = "1.4.2" # jwt jsonwebtokens jsonwebtoken = "7.2.0" +# Performance measurements +tracing = "0.1.25" +opentelemetry = "0.12.0" +tracing-subscriber = "0.2.16" +tracing-opentelemetry = "0.11.0" +opentelemetry-jaeger = "0.11.0" [features] default = ["conduit_bin"] diff --git a/conduit-example.toml b/conduit-example.toml index b82da2c..b1bc618 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -28,8 +28,12 @@ max_request_size = 20_000_000 # in bytes # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work #allow_encryption = true + #allow_federation = false +# Enable jaeger to support monitoring and troubleshooting through jaeger +#allow_jaeger = false + #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time #workers = 4 # default: cpu core count * 2 diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 75544b7..044468b 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -40,6 +40,7 @@ const GUEST_NAME_LENGTH: usize = 10; feature = "conduit_bin", get("/_matrix/client/r0/register/available", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_register_available_route( db: State<'_, Database>, body: Ruma>, @@ -82,6 +83,7 @@ pub async fn get_register_available_route( feature = "conduit_bin", post("/_matrix/client/r0/register", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn register_route( db: State<'_, Database>, body: Ruma>, @@ -546,6 +548,7 @@ pub async fn register_route( feature = "conduit_bin", post("/_matrix/client/r0/account/password", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn change_password_route( db: State<'_, Database>, body: Ruma>, @@ -610,6 +613,7 @@ pub async fn change_password_route( feature = "conduit_bin", get("/_matrix/client/r0/account/whoami", data = "") )] +#[tracing::instrument(skip(body))] pub async fn whoami_route(body: Ruma) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(whoami::Response { @@ -630,6 +634,7 @@ pub async fn whoami_route(body: Ruma) -> ConduitResult, body: Ruma>, diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 498e882..b8c16d9 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -19,6 +19,7 @@ use rocket::{delete, get, put}; feature = "conduit_bin", put("/_matrix/client/r0/directory/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_alias_route( db: State<'_, Database>, body: Ruma>, @@ -39,6 +40,7 @@ pub async fn create_alias_route( feature = "conduit_bin", delete("/_matrix/client/r0/directory/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_alias_route( db: State<'_, Database>, body: Ruma>, @@ -54,6 +56,7 @@ pub async fn delete_alias_route( feature = "conduit_bin", get("/_matrix/client/r0/directory/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_alias_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 0f34ba7..f33d0de 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -17,6 +17,7 @@ use rocket::{delete, get, post, put}; feature = "conduit_bin", post("/_matrix/client/unstable/room_keys/version", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_backup_route( db: State<'_, Database>, body: Ruma, @@ -35,6 +36,7 @@ pub async fn create_backup_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn update_backup_route( db: State<'_, Database>, body: Ruma>, @@ -52,6 +54,7 @@ pub async fn update_backup_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/version", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_latest_backup_route( db: State<'_, Database>, body: Ruma, @@ -79,6 +82,7 @@ pub async fn get_latest_backup_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_backup_route( db: State<'_, Database>, body: Ruma>, @@ -105,6 +109,7 @@ pub async fn get_backup_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_backup_route( db: State<'_, Database>, body: Ruma>, @@ -123,6 +128,7 @@ pub async fn delete_backup_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn add_backup_keys_route( db: State<'_, Database>, body: Ruma>, @@ -156,6 +162,7 @@ pub async fn add_backup_keys_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn add_backup_key_sessions_route( db: State<'_, Database>, body: Ruma>, @@ -187,6 +194,7 @@ pub async fn add_backup_key_sessions_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn add_backup_key_session_route( db: State<'_, Database>, body: Ruma>, @@ -215,6 +223,7 @@ pub async fn add_backup_key_session_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_backup_keys_route( db: State<'_, Database>, body: Ruma>, @@ -230,6 +239,7 @@ pub async fn get_backup_keys_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_backup_key_sessions_route( db: State<'_, Database>, body: Ruma>, @@ -247,6 +257,7 @@ pub async fn get_backup_key_sessions_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_backup_key_session_route( db: State<'_, Database>, body: Ruma>, @@ -270,6 +281,7 @@ pub async fn get_backup_key_session_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_backup_keys_route( db: State<'_, Database>, body: Ruma>, @@ -292,6 +304,7 @@ pub async fn delete_backup_keys_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_sessions_route( db: State<'_, Database>, body: Ruma>, @@ -314,6 +327,7 @@ pub async fn delete_backup_key_sessions_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_session_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index fa12a08..b4fdf69 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -9,6 +9,7 @@ use rocket::get; /// /// Get information on this server's supported feature set and other relevent capabilities. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/capabilities"))] +#[tracing::instrument] pub async fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); available.insert( diff --git a/src/client_server/config.rs b/src/client_server/config.rs index f1d233a..aece96e 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -16,6 +16,7 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_global_account_data_route( db: State<'_, Database>, body: Ruma>, @@ -49,6 +50,7 @@ pub async fn set_global_account_data_route( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_global_account_data_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/context.rs b/src/client_server/context.rs index f2a8cd4..cb9aaf9 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -10,6 +10,7 @@ use rocket::get; feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_context_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 86ac511..1950c5c 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -16,6 +16,7 @@ use rocket::{delete, get, post, put}; feature = "conduit_bin", get("/_matrix/client/r0/devices", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_devices_route( db: State<'_, Database>, body: Ruma, @@ -35,6 +36,7 @@ pub async fn get_devices_route( feature = "conduit_bin", get("/_matrix/client/r0/devices/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_device_route( db: State<'_, Database>, body: Ruma>, @@ -53,6 +55,7 @@ pub async fn get_device_route( feature = "conduit_bin", put("/_matrix/client/r0/devices/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn update_device_route( db: State<'_, Database>, body: Ruma>, @@ -78,6 +81,7 @@ pub async fn update_device_route( feature = "conduit_bin", delete("/_matrix/client/r0/devices/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_device_route( db: State<'_, Database>, body: Ruma>, @@ -126,6 +130,7 @@ pub async fn delete_device_route( feature = "conduit_bin", post("/_matrix/client/r0/delete_devices", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_devices_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index fa5db3a..1c72915 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -31,6 +31,7 @@ use rocket::{get, post, put}; feature = "conduit_bin", post("/_matrix/client/r0/publicRooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma>, @@ -50,6 +51,7 @@ pub async fn get_public_rooms_filtered_route( feature = "conduit_bin", get("/_matrix/client/r0/publicRooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma>, @@ -78,6 +80,7 @@ pub async fn get_public_rooms_route( feature = "conduit_bin", put("/_matrix/client/r0/directory/list/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_room_visibility_route( db: State<'_, Database>, body: Ruma>, @@ -107,6 +110,7 @@ pub async fn set_room_visibility_route( feature = "conduit_bin", get("/_matrix/client/r0/directory/list/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_room_visibility_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 4513ab4..a08eb34 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -5,6 +5,7 @@ use ruma::api::client::r0::filter::{self, create_filter, get_filter}; use rocket::{get, post}; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] +#[tracing::instrument] pub async fn get_filter_route() -> ConduitResult { // TODO Ok(get_filter::Response::new(filter::IncomingFilterDefinition { @@ -18,6 +19,7 @@ pub async fn get_filter_route() -> ConduitResult { } #[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] +#[tracing::instrument] pub async fn create_filter_route() -> ConduitResult { // TODO Ok(create_filter::Response::new(utils::random_string(10)).into()) diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 8426518..08bb4c6 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -22,6 +22,7 @@ use rocket::{get, post}; feature = "conduit_bin", post("/_matrix/client/r0/keys/upload", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn upload_keys_route( db: State<'_, Database>, body: Ruma, @@ -70,6 +71,7 @@ pub async fn upload_keys_route( feature = "conduit_bin", post("/_matrix/client/r0/keys/query", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: State<'_, Database>, body: Ruma>, @@ -150,6 +152,7 @@ pub async fn get_keys_route( feature = "conduit_bin", post("/_matrix/client/r0/keys/claim", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: State<'_, Database>, body: Ruma, @@ -183,6 +186,7 @@ pub async fn claim_keys_route( feature = "conduit_bin", post("/_matrix/client/unstable/keys/device_signing/upload", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn upload_signing_keys_route( db: State<'_, Database>, body: Ruma>, @@ -240,6 +244,7 @@ pub async fn upload_signing_keys_route( feature = "conduit_bin", post("/_matrix/client/unstable/keys/signatures/upload", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn upload_signatures_route( db: State<'_, Database>, body: Ruma, @@ -300,6 +305,7 @@ pub async fn upload_signatures_route( feature = "conduit_bin", get("/_matrix/client/r0/keys/changes", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_key_changes_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 156040b..2db4fc6 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -12,6 +12,7 @@ use std::convert::TryInto; const MXC_LENGTH: usize = 32; #[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] +#[tracing::instrument(skip(db))] pub async fn get_media_config_route( db: State<'_, Database>, ) -> ConduitResult { @@ -25,6 +26,7 @@ pub async fn get_media_config_route( feature = "conduit_bin", post("/_matrix/media/r0/upload", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_content_route( db: State<'_, Database>, body: Ruma>, @@ -54,6 +56,7 @@ pub async fn create_content_route( feature = "conduit_bin", get("/_matrix/media/r0/download/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_content_route( db: State<'_, Database>, body: Ruma>, @@ -103,6 +106,7 @@ pub async fn get_content_route( feature = "conduit_bin", get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_content_thumbnail_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b459d37..287cfbb 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -36,6 +36,7 @@ use rocket::{get, post}; feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/join", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_route( db: State<'_, Database>, body: Ruma>, @@ -54,6 +55,7 @@ pub async fn join_room_by_id_route( feature = "conduit_bin", post("/_matrix/client/r0/join/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( db: State<'_, Database>, body: Ruma>, @@ -88,6 +90,7 @@ pub async fn join_room_by_id_or_alias_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/leave", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn leave_room_route( db: State<'_, Database>, body: Ruma>, @@ -140,6 +143,7 @@ pub async fn leave_room_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/invite", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn invite_user_route( db: State<'_, Database>, body: Ruma>, @@ -183,6 +187,7 @@ pub async fn invite_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/kick", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn kick_user_route( db: State<'_, Database>, body: Ruma>, @@ -236,6 +241,7 @@ pub async fn kick_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/ban", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn ban_user_route( db: State<'_, Database>, body: Ruma>, @@ -296,6 +302,7 @@ pub async fn ban_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/unban", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn unban_user_route( db: State<'_, Database>, body: Ruma>, @@ -348,6 +355,7 @@ pub async fn unban_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/forget", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn forget_room_route( db: State<'_, Database>, body: Ruma>, @@ -365,6 +373,7 @@ pub async fn forget_room_route( feature = "conduit_bin", get("/_matrix/client/r0/joined_rooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn joined_rooms_route( db: State<'_, Database>, body: Ruma, @@ -385,6 +394,7 @@ pub async fn joined_rooms_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/members", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_member_events_route( db: State<'_, Database>, body: Ruma>, @@ -414,6 +424,7 @@ pub async fn get_member_events_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn joined_members_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 3640730..39a61cb 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -17,6 +17,7 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_message_event_route( db: State<'_, Database>, body: Ruma>, @@ -88,6 +89,7 @@ pub async fn send_message_event_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/messages", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_message_events_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index 672957b..dd8e7a6 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -75,6 +75,7 @@ const SESSION_ID_LENGTH: usize = 256; #[cfg(feature = "conduit_bin")] #[options("/<_..>")] +#[tracing::instrument] pub async fn options_route() -> ConduitResult { Ok(send_event_to_device::Response.into()) } diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 15c746e..175853f 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -10,6 +10,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/presence/<_>/status", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_presence_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 761443d..bd8425a 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -19,6 +19,7 @@ use std::convert::TryInto; feature = "conduit_bin", put("/_matrix/client/r0/profile/<_>/displayname", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_displayname_route( db: State<'_, Database>, body: Ruma>, @@ -102,6 +103,7 @@ pub async fn set_displayname_route( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>/displayname", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_displayname_route( db: State<'_, Database>, body: Ruma>, @@ -116,6 +118,7 @@ pub async fn get_displayname_route( feature = "conduit_bin", put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_avatar_url_route( db: State<'_, Database>, body: Ruma>, @@ -199,6 +202,7 @@ pub async fn set_avatar_url_route( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_avatar_url_route( db: State<'_, Database>, body: Ruma>, @@ -213,6 +217,7 @@ pub async fn get_avatar_url_route( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_profile_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 667d667..03da73a 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -22,6 +22,7 @@ use rocket::{delete, get, post, put}; feature = "conduit_bin", get("/_matrix/client/r0/pushrules", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_pushrules_all_route( db: State<'_, Database>, body: Ruma, @@ -46,6 +47,7 @@ pub async fn get_pushrules_all_route( feature = "conduit_bin", get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_route( db: State<'_, Database>, body: Ruma>, @@ -101,6 +103,7 @@ pub async fn get_pushrule_route( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_route( db: State<'_, Database>, body: Ruma>, @@ -247,6 +250,7 @@ pub async fn set_pushrule_route( feature = "conduit_bin", get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_actions_route( db: State<'_, Database>, body: Ruma>, @@ -310,6 +314,7 @@ pub async fn get_pushrule_actions_route( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_actions_route( db: State<'_, Database>, body: Ruma>, @@ -413,6 +418,7 @@ pub async fn set_pushrule_actions_route( feature = "conduit_bin", get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_enabled_route( db: State<'_, Database>, body: Ruma>, @@ -473,6 +479,7 @@ pub async fn get_pushrule_enabled_route( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_enabled_route( db: State<'_, Database>, body: Ruma>, @@ -576,6 +583,7 @@ pub async fn set_pushrule_enabled_route( feature = "conduit_bin", delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_pushrule_route( db: State<'_, Database>, body: Ruma>, @@ -666,6 +674,7 @@ pub async fn delete_pushrule_route( } #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] +#[tracing::instrument] pub async fn get_pushers_route() -> ConduitResult { Ok(get_pushers::Response { pushers: Vec::new(), @@ -674,6 +683,7 @@ pub async fn get_pushers_route() -> ConduitResult { } #[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/pushers/set"))] +#[tracing::instrument(skip(db))] pub async fn set_pushers_route(db: State<'_, Database>) -> ConduitResult { db.flush().await?; diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index f7d3712..555b7e7 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -16,6 +16,7 @@ use std::{collections::BTreeMap, time::SystemTime}; feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_read_marker_route( db: State<'_, Database>, body: Ruma>, @@ -84,6 +85,7 @@ pub async fn set_read_marker_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_receipt_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 212e751..af277db 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -12,6 +12,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn redact_event_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 092e083..e2c931c 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,6 +22,7 @@ use rocket::{get, post}; feature = "conduit_bin", post("/_matrix/client/r0/createRoom", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_room_route( db: State<'_, Database>, body: Ruma>, @@ -350,6 +351,7 @@ pub async fn create_room_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_room_event_route( db: State<'_, Database>, body: Ruma>, @@ -377,6 +379,7 @@ pub async fn get_room_event_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_room_id>/upgrade", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 5fb87f0..a668a0d 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -11,6 +11,7 @@ use std::collections::BTreeMap; feature = "conduit_bin", post("/_matrix/client/r0/search", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn search_events_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 1b2583c..8c8b643 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -24,6 +24,7 @@ use rocket::{get, post}; /// Get the homeserver's supported login types. One of these should be used as the `type` field /// when logging in. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] +#[tracing::instrument] pub async fn get_login_types_route() -> ConduitResult { Ok(get_login_types::Response::new(vec![get_login_types::LoginType::Password]).into()) } @@ -42,6 +43,7 @@ pub async fn get_login_types_route() -> ConduitResult feature = "conduit_bin", post("/_matrix/client/r0/login", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn login_route( db: State<'_, Database>, body: Ruma>, @@ -156,6 +158,7 @@ pub async fn login_route( feature = "conduit_bin", post("/_matrix/client/r0/logout", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn logout_route( db: State<'_, Database>, body: Ruma, @@ -183,6 +186,7 @@ pub async fn logout_route( feature = "conduit_bin", post("/_matrix/client/r0/logout/all", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn logout_all_route( db: State<'_, Database>, body: Ruma, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index faa415d..073d94f 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -22,6 +22,7 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( db: State<'_, Database>, body: Ruma>, @@ -55,6 +56,7 @@ pub async fn send_state_event_for_key_route( feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( db: State<'_, Database>, body: Ruma>, @@ -96,6 +98,7 @@ pub async fn send_state_event_for_empty_key_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_state_events_route( db: State<'_, Database>, body: Ruma>, @@ -142,6 +145,7 @@ pub async fn get_state_events_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_key_route( db: State<'_, Database>, body: Ruma>, @@ -193,6 +197,7 @@ pub async fn get_state_events_for_key_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 494c773..b4d0520 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -30,6 +30,7 @@ use std::{ feature = "conduit_bin", get("/_matrix/client/r0/sync", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn sync_events_route( db: State<'_, Database>, body: Ruma>, @@ -310,8 +311,7 @@ pub async fn sync_events_route( }; let state_events = if joined_since_last_sync { - db.rooms - .room_state_full(&room_id)? + current_state .into_iter() .map(|(_, pdu)| pdu.to_sync_state_event()) .collect() @@ -708,6 +708,7 @@ pub async fn sync_events_route( Ok(response.into()) } +#[tracing::instrument(skip(db))] fn share_encrypted_room( db: &Database, sender_user: &UserId, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 7bbf9e8..21264a1 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -13,6 +13,7 @@ use rocket::{delete, get, put}; feature = "conduit_bin", put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn update_tag_route( db: State<'_, Database>, body: Ruma>, @@ -49,6 +50,7 @@ pub async fn update_tag_route( feature = "conduit_bin", delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_tag_route( db: State<'_, Database>, body: Ruma>, @@ -82,6 +84,7 @@ pub async fn delete_tag_route( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_tags_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index c775e9b..3c07699 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -10,6 +10,7 @@ use std::collections::BTreeMap; feature = "conduit_bin", get("/_matrix/client/r0/thirdparty/protocols") )] +#[tracing::instrument] pub async fn get_protocols_route() -> ConduitResult { warn!("TODO: get_protocols_route"); Ok(get_protocols::Response { diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 5bc001e..460bd05 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -12,6 +12,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_event_to_device_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index e90746e..4b7feb7 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -10,6 +10,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub fn create_typing_event_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index e51ed56..d25dce6 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -15,6 +15,7 @@ use rocket::get; /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] +#[tracing::instrument] pub async fn get_supported_versions_route() -> ConduitResult { let mut resp = get_supported_versions::Response::new(vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()]); diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index 5829364..b358274 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -9,6 +9,7 @@ use rocket::post; feature = "conduit_bin", post("/_matrix/client/r0/user_directory/search", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn search_users_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 9216f1a..7924a7f 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -6,6 +6,7 @@ use std::time::Duration; use rocket::get; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] +#[tracing::instrument] pub async fn turn_server_route() -> ConduitResult { Ok(get_turn_server_info::Response { username: "".to_owned(), diff --git a/src/database.rs b/src/database.rs index 8fcffd9..6dc9c70 100644 --- a/src/database.rs +++ b/src/database.rs @@ -38,6 +38,8 @@ pub struct Config { allow_encryption: bool, #[serde(default = "false_fn")] allow_federation: bool, + #[serde(default = "false_fn")] + pub allow_jaeger: bool, jwt_secret: Option, } diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 855ebfe..38e6c32 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -74,6 +74,7 @@ impl AccountData { } /// Returns all changes to the account data that happened after `since`. + #[tracing::instrument(skip(self))] pub fn changes_since( &self, room_id: Option<&RoomId>, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b35d006..7e80134 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -108,6 +108,7 @@ impl StateStore for Rooms { impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. + #[tracing::instrument(skip(self))] pub fn state_full( &self, room_id: &RoomId, @@ -145,6 +146,7 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] pub fn state_get( &self, room_id: &RoomId, @@ -186,11 +188,13 @@ impl Rooms { } /// Returns the last state hash key added to the db. + #[tracing::instrument(skip(self))] pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { Ok(self.pduid_statehash.get(pdu_id)?) } /// Returns the last state hash key added to the db for the given room. + #[tracing::instrument(skip(self))] pub fn current_state_hash(&self, room_id: &RoomId) -> Result> { Ok(self.roomid_statehash.get(room_id.as_bytes())?) } @@ -290,6 +294,7 @@ impl Rooms { } /// Returns the full room state. + #[tracing::instrument(skip(self))] pub fn room_state_full(&self, room_id: &RoomId) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { self.state_full(&room_id, ¤t_state_hash) @@ -299,6 +304,7 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] pub fn room_state_get( &self, room_id: &RoomId, @@ -313,6 +319,7 @@ impl Rooms { } /// Returns the `count` of this pdu's id. + #[tracing::instrument(skip(self))] pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { Ok( utils::u64_from_bytes(&pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()]) @@ -1024,6 +1031,7 @@ impl Rooms { } /// Returns an iterator over all PDUs in a room. + #[tracing::instrument(skip(self))] pub fn all_pdus( &self, user_id: &UserId, @@ -1034,6 +1042,7 @@ impl Rooms { /// Returns a double-ended iterator over all events in a room that happened after the event with id `since` /// in chronological order. + #[tracing::instrument(skip(self))] pub fn pdus_since( &self, user_id: &UserId, @@ -1100,6 +1109,7 @@ impl Rooms { /// Returns an iterator over all events and their token in a room that happened after the event /// with id `from` in chronological order. + #[tracing::instrument(skip(self))] pub fn pdus_after( &self, user_id: &UserId, @@ -1449,6 +1459,7 @@ impl Rooms { )) } + #[tracing::instrument(skip(self))] pub fn get_shared_rooms<'a>( &'a self, users: Vec, @@ -1510,6 +1521,7 @@ impl Rooms { } /// Returns an iterator over all joined members of a room. + #[tracing::instrument(skip(self))] pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1558,6 +1570,7 @@ impl Rooms { } /// Returns an iterator over all invited members of a room. + #[tracing::instrument(skip(self))] pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1582,6 +1595,7 @@ impl Rooms { } /// Returns an iterator over all rooms this user joined. + #[tracing::instrument(skip(self))] pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { self.userroomid_joined .scan_prefix(user_id.as_bytes()) @@ -1603,6 +1617,7 @@ impl Rooms { } /// Returns an iterator over all rooms a user was invited to. + #[tracing::instrument(skip(self))] pub fn rooms_invited(&self, user_id: &UserId) -> impl Iterator> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1627,6 +1642,7 @@ impl Rooms { } /// Returns an iterator over all rooms a user left. + #[tracing::instrument(skip(self))] pub fn rooms_left(&self, user_id: &UserId) -> impl Iterator> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 29edc2a..8433884 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -71,6 +71,7 @@ impl RoomEdus { } /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + #[tracing::instrument(skip(self))] pub fn readreceipts_since( &self, room_id: &RoomId, @@ -116,6 +117,7 @@ impl RoomEdus { } /// Returns the private read marker. + #[tracing::instrument(skip(self))] pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { let mut key = room_id.to_string().as_bytes().to_vec(); key.push(0xff); @@ -257,6 +259,7 @@ impl RoomEdus { } /// Returns the count of the last typing update in this room. + #[tracing::instrument(skip(self, globals))] pub fn last_typing_update( &self, room_id: &RoomId, @@ -340,6 +343,7 @@ impl RoomEdus { } /// Resets the presence timeout, so the user will stay in their current presence state. + #[tracing::instrument(skip(self))] pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_lastpresenceupdate.insert( &user_id.to_string().as_bytes(), @@ -430,6 +434,7 @@ impl RoomEdus { } /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. + #[tracing::instrument(skip(self, globals, rooms))] pub fn presence_since( &self, room_id: &RoomId, diff --git a/src/database/sending.rs b/src/database/sending.rs index 8c487e1..2e50710 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -211,6 +211,7 @@ impl Sending { }); } + #[tracing::instrument(skip(self))] pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { let mut key = server.as_bytes().to_vec(); key.push(0xff); @@ -220,6 +221,7 @@ impl Sending { Ok(()) } + #[tracing::instrument(skip(self))] pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { let mut key = "+".as_bytes().to_vec(); key.extend_from_slice(appservice_id.as_bytes()); @@ -230,6 +232,7 @@ impl Sending { Ok(()) } + #[tracing::instrument] fn calculate_hash(keys: &[IVec]) -> Vec { // We only hash the pdu's event ids, not the whole pdu let bytes = keys.join(&0xff); @@ -237,6 +240,7 @@ impl Sending { hash.as_ref().to_owned() } + #[tracing::instrument(skip(globals, rooms, appservice))] async fn handle_event( server: Box, is_appservice: bool, @@ -359,6 +363,7 @@ impl Sending { )) } + #[tracing::instrument(skip(self, globals))] pub async fn send_federation_request( &self, globals: &crate::database::globals::Globals, @@ -375,6 +380,7 @@ impl Sending { response } + #[tracing::instrument(skip(self, globals))] pub async fn send_appservice_request( &self, globals: &crate::database::globals::Globals, diff --git a/src/database/users.rs b/src/database/users.rs index 9da0776..985647a 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -312,6 +312,7 @@ impl Users { Ok(()) } + #[tracing::instrument(skip(self))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate .get(&user_id.to_string().as_bytes())? @@ -365,6 +366,7 @@ impl Users { .transpose() } + #[tracing::instrument(skip(self))] pub fn count_one_time_keys( &self, user_id: &UserId, @@ -564,6 +566,7 @@ impl Users { Ok(()) } + #[tracing::instrument(skip(self))] pub fn keys_changed( &self, user_or_room_id: &str, @@ -739,6 +742,7 @@ impl Users { Ok(()) } + #[tracing::instrument(skip(self))] pub fn get_to_device_events( &self, user_id: &UserId, @@ -761,6 +765,7 @@ impl Users { Ok(events) } + #[tracing::instrument(skip(self))] pub fn remove_to_device_events( &self, user_id: &UserId, diff --git a/src/error.rs b/src/error.rs index c57843c..65c5b4f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,14 +1,7 @@ -use std::{collections::HashMap, sync::RwLock, time::Duration, time::Instant}; - use log::error; -use ruma::{ - api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}, - events::room::message, -}; +use ruma::api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}; use thiserror::Error; -use crate::{database::admin::AdminCommand, Database}; - #[cfg(feature = "conduit_bin")] use { crate::RumaResponse, @@ -107,62 +100,3 @@ where .respond_to(r) } } - -pub struct ConduitLogger { - pub db: Database, - pub last_logs: RwLock>, -} - -impl log::Log for ConduitLogger { - fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { - true - } - - fn log(&self, record: &log::Record<'_>) { - let output = format!("{} - {}", record.level(), record.args()); - - if self.enabled(record.metadata()) - && (record - .module_path() - .map_or(false, |path| path.starts_with("conduit::")) - || record - .module_path() - .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying - && record.metadata().level() <= log::Level::Warn) - { - let first_line = output - .lines() - .next() - .expect("lines always returns one item"); - - eprintln!("{}", output); - - let mute_duration = match record.metadata().level() { - log::Level::Error => Duration::from_secs(60 * 5), // 5 minutes - log::Level::Warn => Duration::from_secs(60 * 60 * 24), // A day - _ => Duration::from_secs(60 * 60 * 24 * 7), // A week - }; - - if self - .last_logs - .read() - .unwrap() - .get(first_line) - .map_or(false, |i| i.elapsed() < mute_duration) - // Don't post this log again for some time - { - return; - } - - if let Ok(mut_last_logs) = &mut self.last_logs.try_write() { - mut_last_logs.insert(first_line.to_owned(), Instant::now()); - } - - self.db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::notice_plain(output), - )); - } - } - - fn flush(&self) {} -} diff --git a/src/main.rs b/src/main.rs index d5f1f4e..498cfa6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,21 +11,23 @@ mod push_rules; mod ruma_wrapper; mod utils; +use database::Config; pub use database::Database; -pub use error::{ConduitLogger, Error, Result}; +pub use error::{Error, Result}; pub use pdu::PduEvent; pub use rocket::State; use ruma::api::client::error::ErrorKind; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use log::LevelFilter; use rocket::figment::{ providers::{Env, Format, Toml}, Figment, }; use rocket::{catch, catchers, fairing::AdHoc, routes, Request}; +use tracing::span; +use tracing_subscriber::{prelude::*, Registry}; -fn setup_rocket() -> rocket::Rocket { +fn setup_rocket() -> (rocket::Rocket, Config) { // Force log level off, so we can use our own logger std::env::set_var("CONDUIT_LOG_LEVEL", "off"); @@ -39,7 +41,12 @@ fn setup_rocket() -> rocket::Rocket { ) .merge(Env::prefixed("CONDUIT_").global()); - rocket::custom(config) + let parsed_config = config + .extract::() + .expect("It looks like your config is invalid. Please take a look at the error"); + let parsed_config2 = parsed_config.clone(); + + let rocket = rocket::custom(config) .mount( "/", routes![ @@ -163,30 +170,41 @@ fn setup_rocket() -> rocket::Rocket { bad_json_catcher ]) .attach(AdHoc::on_attach("Config", |rocket| async { - let config = rocket - .figment() - .extract() - .expect("It looks like your config is invalid. Please take a look at the error"); - let data = Database::load_or_create(config) + let data = Database::load_or_create(parsed_config2) .await .expect("config is valid"); data.sending .start_handler(&data.globals, &data.rooms, &data.appservice); - log::set_boxed_logger(Box::new(ConduitLogger { - db: data.clone(), - last_logs: Default::default(), - })) - .unwrap(); - log::set_max_level(LevelFilter::Info); Ok(rocket.manage(data)) - })) + })); + + (rocket, parsed_config) } #[rocket::main] async fn main() { - setup_rocket().launch().await.unwrap(); + let (rocket, config) = setup_rocket(); + + if config.allow_jaeger { + let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() + .with_service_name("conduit") + .install() + .unwrap(); + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + Registry::default().with(telemetry).try_init().unwrap(); + + let root = span!(tracing::Level::INFO, "app_start", work_units = 2); + let _enter = root.enter(); + + rocket.launch().await.unwrap(); + } else { + let root = span!(tracing::Level::INFO, "app_start", work_units = 2); + let _enter = root.enter(); + + rocket.launch().await.unwrap(); + } } #[catch(404)] diff --git a/src/pdu.rs b/src/pdu.rs index 75ef492..544c073 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -40,6 +40,7 @@ pub struct PduEvent { } impl PduEvent { + #[tracing::instrument(skip(self))] pub fn redact(&mut self, reason: &PduEvent) -> crate::Result<()> { self.unsigned.clear(); @@ -86,6 +87,7 @@ impl PduEvent { Ok(()) } + #[tracing::instrument(skip(self))] pub fn to_sync_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, @@ -107,6 +109,7 @@ impl PduEvent { } /// This only works for events that are also AnyRoomEvents. + #[tracing::instrument(skip(self))] pub fn to_any_event(&self) -> Raw { let mut json = json!({ "content": self.content, @@ -128,6 +131,7 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] pub fn to_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, @@ -149,6 +153,7 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] pub fn to_state_event(&self) -> Raw { let json = json!({ "content": self.content, @@ -164,20 +169,27 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] pub fn to_sync_state_event(&self) -> Raw { - let json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "unsigned": self.unsigned, - "state_key": self.state_key, - }); + let json = format!( + r#"{{"content":{},"type":"{}","event_id":"{}","sender":"{}","origin_server_ts":{},"unsigned":{},"state_key":"{}"}}"#, + self.content, + self.kind, + self.event_id, + self.sender, + self.origin_server_ts, + serde_json::to_string(&self.unsigned).expect("Map::to_string always works"), + self.state_key + .as_ref() + .expect("state events have state keys") + ); - serde_json::from_value(json).expect("Raw::from_value always works") + Raw::from_json( + serde_json::value::RawValue::from_string(json).expect("our string is valid json"), + ) } + #[tracing::instrument(skip(self))] pub fn to_stripped_state_event(&self) -> Raw { let json = json!({ "content": self.content, @@ -189,6 +201,7 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] pub fn to_member_event(&self) -> Raw> { let json = json!({ "content": self.content, @@ -206,6 +219,7 @@ impl PduEvent { } /// This does not return a full `Pdu` it is only to satisfy ruma's types. + #[tracing::instrument] pub fn convert_to_outgoing_federation_event( mut pdu_json: CanonicalJsonObject, ) -> Raw { diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 45fcc7f..898561f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -82,9 +82,7 @@ where registration .get("as_token") .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| { - dbg!(token.as_deref()) == dbg!(Some(as_token)) - }) + .map_or(false, |as_token| token.as_deref() == Some(as_token)) }) { match T::METADATA.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { diff --git a/src/server_server.rs b/src/server_server.rs index 3fea4da..4ea9bfe 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -28,6 +28,7 @@ use std::{ time::{Duration, SystemTime}, }; +#[tracing::instrument(skip(globals))] pub async fn send_request( globals: &crate::database::globals::Globals, destination: Box, @@ -194,6 +195,7 @@ where } } +#[tracing::instrument] fn get_ip_with_port(destination_str: String) -> Option { if destination_str.parse::().is_ok() { Some(destination_str) @@ -204,6 +206,7 @@ fn get_ip_with_port(destination_str: String) -> Option { } } +#[tracing::instrument] fn add_port_to_hostname(destination_str: String) -> String { match destination_str.find(':') { None => destination_str.to_owned() + ":8448", @@ -214,6 +217,7 @@ fn add_port_to_hostname(destination_str: String) -> String { /// Returns: actual_destination, host header /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification +#[tracing::instrument(skip(globals))] async fn find_actual_destination( globals: &crate::database::globals::Globals, destination: &Box, @@ -272,6 +276,7 @@ async fn find_actual_destination( (actual_destination, host) } +#[tracing::instrument(skip(globals))] async fn query_srv_record<'a>( globals: &crate::database::globals::Globals, hostname: &'a str, @@ -296,6 +301,7 @@ async fn query_srv_record<'a>( } } +#[tracing::instrument(skip(globals))] pub async fn request_well_known( globals: &crate::database::globals::Globals, destination: &str, @@ -319,6 +325,7 @@ pub async fn request_well_known( } #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] +#[tracing::instrument(skip(db))] pub fn get_server_version_route( db: State<'_, Database>, ) -> ConduitResult { @@ -336,6 +343,7 @@ pub fn get_server_version_route( } #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] +#[tracing::instrument(skip(db))] pub fn get_server_keys_route(db: State<'_, Database>) -> Json { if !db.globals.allow_federation() { // TODO: Use proper types @@ -378,6 +386,7 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json { } #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] +#[tracing::instrument(skip(db))] pub fn get_server_keys_deprecated_route(db: State<'_, Database>) -> Json { get_server_keys_route(db) } @@ -386,6 +395,7 @@ pub fn get_server_keys_deprecated_route(db: State<'_, Database>) -> Json feature = "conduit_bin", post("/_matrix/federation/v1/publicRooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma>, @@ -433,6 +443,7 @@ pub async fn get_public_rooms_filtered_route( feature = "conduit_bin", get("/_matrix/federation/v1/publicRooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma>, @@ -480,6 +491,7 @@ pub async fn get_public_rooms_route( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_transaction_message_route<'a>( db: State<'a, Database>, body: Ruma>, @@ -585,6 +597,7 @@ pub async fn send_transaction_message_route<'a>( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub fn get_missing_events_route<'a>( db: State<'a, Database>, body: Ruma>, @@ -630,6 +643,7 @@ pub fn get_missing_events_route<'a>( feature = "conduit_bin", get("/_matrix/federation/v1/query/profile", data = "") )] +#[tracing::instrument(skip(db, body))] pub fn get_profile_information_route<'a>( db: State<'a, Database>, body: Ruma>, From f2ec2be821b49ad565bdb92e44eabe9510cf5455 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 3 Mar 2021 21:41:26 +0100 Subject: [PATCH 0454/1727] fix: don't always query aliases of appservices only do so if the alias matches the regex in the registration file --- src/client_server/alias.rs | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index b8c16d9..0a8ad08 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,5 +1,6 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; +use regex::Regex; use ruma::{ api::{ appservice, @@ -86,15 +87,23 @@ pub async fn get_alias_helper( Some(r) => room_id = Some(r), None => { for (_id, registration) in db.appservice.iter_all().filter_map(|r| r.ok()) { - if db - .sending - .send_appservice_request( - &db.globals, - registration, - appservice::query::query_room_alias::v1::Request { room_alias }, - ) - .await - .is_ok() + let aliases = registration + .get("namespaces") + .and_then(|ns| ns.get("aliases")) + .and_then(|users| users.get("regex")) + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()); + + if aliases.map_or(false, |aliases| aliases.is_match(room_alias.as_str())) + && db + .sending + .send_appservice_request( + &db.globals, + registration, + appservice::query::query_room_alias::v1::Request { room_alias }, + ) + .await + .is_ok() { room_id = Some(db.rooms.id_from_alias(&room_alias)?.ok_or_else(|| { Error::bad_config("Appservice lied to us. Room does not exist.") From f4039902d866c4fe9ee400ed65ac52fc6c611b72 Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Wed, 3 Mar 2021 22:38:31 +0100 Subject: [PATCH 0455/1727] Bump Rocket to current HEAD --- Cargo.lock | 745 ++++++++++++++++++++------------------ Cargo.toml | 4 +- src/client_server/sync.rs | 3 +- src/database/admin.rs | 3 +- 4 files changed, 401 insertions(+), 354 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8d48dc..45ea00c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,18 +2,18 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] [[package]] name = "adler" -version = "0.2.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "adler32" @@ -53,9 +53,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assign" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" +checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-trait" @@ -96,14 +96,14 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" +checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" dependencies = [ "addr2line", "cfg-if 1.0.0", "libc", - "miniz_oxide 0.4.3", + "miniz_oxide 0.4.4", "object", "rustc-demangle", ] @@ -151,21 +151,21 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" [[package]] name = "bytemuck" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aa2ec95ca3b5c54cf73c91acf06d24f4495d5f1b1c12506ae3483d646177ac" +checksum = "bed57e2090563b83ba8f83366628ce535a7584c9afa4c9fc0612a03925c6df58" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bytes" @@ -174,10 +174,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] -name = "cc" -version = "1.0.66" +name = "bytes" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + +[[package]] +name = "cc" +version = "1.0.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" [[package]] name = "cfg-if" @@ -200,7 +206,7 @@ dependencies = [ "libc", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.43", "winapi 0.3.9", ] @@ -236,28 +242,18 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio", + "tokio 1.2.0", "tracing", "tracing-opentelemetry", "tracing-subscriber", "trust-dns-resolver", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -dependencies = [ - "cfg-if 0.1.10", - "wasm-bindgen", -] - [[package]] name = "const_fn" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -271,7 +267,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.23", + "time 0.2.25", "version_check", ] @@ -302,12 +298,11 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" dependencies = [ "cfg-if 1.0.0", - "const_fn", "crossbeam-utils", "lazy_static", "memoffset", @@ -316,9 +311,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" dependencies = [ "autocfg", "cfg-if 1.0.0", @@ -393,9 +388,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "either" @@ -405,9 +400,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.26" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ "cfg-if 1.0.0", ] @@ -426,10 +421,11 @@ dependencies = [ [[package]] name = "figment" -version = "0.9.4" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13e2d266426f89e45fc544117ade84fad2a58ff676f34cc34e123fe4391b856" +checksum = "c38799b106530aa30f774f7fca6d8f7e5f6234a79f427c4fad3c975eaf678931" dependencies = [ + "atomic", "pear", "serde", "toml", @@ -460,9 +456,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ "matches", "percent-encoding", @@ -496,9 +492,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" dependencies = [ "futures-channel", "futures-core", @@ -511,9 +507,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" dependencies = [ "futures-core", "futures-sink", @@ -521,15 +517,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" [[package]] name = "futures-executor" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" dependencies = [ "futures-core", "futures-task", @@ -538,15 +534,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" +checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -556,24 +552,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" +checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" -dependencies = [ - "once_cell", -] +checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" dependencies = [ "futures-channel", "futures-core", @@ -582,7 +575,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.2", + "pin-project-lite 0.2.5", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -600,11 +593,11 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -617,7 +610,7 @@ checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -648,7 +641,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", @@ -656,12 +649,31 @@ dependencies = [ "http", "indexmap", "slab", - "tokio", - "tokio-util", + "tokio 0.2.25", + "tokio-util 0.3.1", "tracing", "tracing-futures", ] +[[package]] +name = "h2" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio 1.2.0", + "tokio-util 0.6.3", + "tracing", +] + [[package]] name = "hashbrown" version = "0.9.1" @@ -670,18 +682,18 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -699,11 +711,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "itoa", ] @@ -714,15 +726,25 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes", + "bytes 0.5.6", + "http", +] + +[[package]] +name = "http-body" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +dependencies = [ + "bytes 1.0.1", "http", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" [[package]] name = "httpdate" @@ -732,23 +754,47 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.9" +version = "0.13.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.2.7", "http", - "http-body", + "http-body 0.3.1", "httparse", "httpdate", "itoa", - "pin-project 1.0.2", + "pin-project", "socket2", - "tokio", + "tokio 0.2.25", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.1", + "http", + "http-body 0.4.0", + "httparse", + "httpdate", + "itoa", + "pin-project", + "socket2", + "tokio 1.2.0", "tower-service", "tracing", "want", @@ -760,18 +806,18 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ - "bytes", - "hyper", + "bytes 0.5.6", + "hyper 0.13.10", "native-tls", - "tokio", + "tokio 0.2.25", "tokio-tls", ] [[package]] name = "idna" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" dependencies = [ "matches", "unicode-bidi", @@ -780,9 +826,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.12" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ce04077ead78e39ae8610ad26216aed811996b043d47beed5090db674f9e9b5" +checksum = "24ffcb7e7244a9bf19d35bf2883b9c080c4ced3c07a9895572178cdb8f13f6a1" dependencies = [ "bytemuck", "byteorder", @@ -797,9 +843,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", "hashbrown", @@ -864,24 +910,21 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" -version = "0.1.20" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc797adac5f083b8ff0ca6f6294a999393d76e197c36488e2ef732c4715f6fa3" -dependencies = [ - "byteorder", -] +checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "dc9f84f9b115ce7843d60706df1422a916680bfdfcbdb0447c5614ff9d7e4d78" dependencies = [ "wasm-bindgen", ] @@ -927,15 +970,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.81" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "265d751d31d6780a3f956bb5b8022feba2d94eeee5a84ba64f4212eedca42213" [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" @@ -948,11 +991,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.11" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", ] [[package]] @@ -1033,9 +1076,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", "autocfg", @@ -1054,21 +1097,23 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "mio" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +checksum = "a5dede4e2065b3842b8b0af444119f3aa331cc7cc2dd20388bfb0f5d5a38823a" dependencies = [ - "iovec", "libc", - "mio", + "log", + "miow 0.3.6", + "ntapi", + "winapi 0.3.9", ] [[package]] @@ -1084,10 +1129,20 @@ dependencies = [ ] [[package]] -name = "native-tls" -version = "0.2.6" +name = "miow" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +dependencies = [ + "socket2", + "winapi 0.3.9", +] + +[[package]] +name = "native-tls" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ "lazy_static", "libc", @@ -1103,15 +1158,24 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.36" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cf75f38f16cb05ea017784dc6dbfd354f76c223dba37701734c4f5a9337d02" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-bigint" version = "0.2.6" @@ -1176,24 +1240,24 @@ dependencies = [ [[package]] name = "object" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" [[package]] name = "once_cell" -version = "1.5.2" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "foreign-types", "lazy_static", "libc", @@ -1208,18 +1272,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.12.0+1.1.1h" +version = "111.14.0+1.1.1j" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61" +checksum = "055b569b5bd7e5462a1700f595c7c7d487691d73b5ce064176af7f9f0cbb80a9" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" dependencies = [ "autocfg", "cc", @@ -1240,7 +1304,7 @@ dependencies = [ "js-sys", "lazy_static", "percent-encoding", - "pin-project 1.0.2", + "pin-project", "rand 0.8.3", "thiserror", ] @@ -1280,29 +1344,29 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.5", "smallvec", "winapi 0.3.9", ] [[package]] name = "paste" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" +checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "pear" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f612cbd0f9dd03f5dd28a191c48e4148c3b027e41207b32eee130373c6c941" +checksum = "3e61c26b3b7e7ef4bd0b17d2943b4620ca4682721f35a51c7fec1f5ae6325150" dependencies = [ "inlinable_string", "pear_codegen", @@ -1311,9 +1375,9 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602cf1780ee9bbca663ea75769e05643e16fe87d7c8ac9f4f385a2ed8940a75c" +checksum = "b35ff95312c89207a3770143c628d2788cf4f7dcc230b25d9623e863d5b30b84" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", @@ -1323,9 +1387,9 @@ dependencies = [ [[package]] name = "pem" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c220d01f863d13d96ca82359d1e81e64a7c6bf0637bcde7b2349630addf0c6" +checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" dependencies = [ "base64 0.13.0", "once_cell", @@ -1340,38 +1404,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.27" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" dependencies = [ - "pin-project-internal 0.4.27", -] - -[[package]] -name = "pin-project" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" -dependencies = [ - "pin-project-internal 1.0.2", + "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.27" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-internal" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" dependencies = [ "proc-macro2", "quote", @@ -1380,15 +1424,15 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" +checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.0" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" +checksum = "0cf491442e4b033ed1c722cb9f0df5fcfcf4de682466c46469c36bc47dc5548a" [[package]] name = "pin-utils" @@ -1404,9 +1448,9 @@ checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "png" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe7f9f1c730833200b134370e1d5098964231af8450bce9b78ee3ab5278b970" +checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" dependencies = [ "bitflags", "crc32fast", @@ -1437,9 +1481,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -1471,9 +1515,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2", ] @@ -1484,7 +1528,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -1529,7 +1573,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", ] [[package]] @@ -1565,31 +1609,40 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", - "redox_syscall", + "getrandom 0.1.16", + "redox_syscall 0.1.57", "rust-argon2", ] [[package]] name = "ref-cast" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -1598,9 +1651,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -1620,9 +1673,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "remove_dir_all" @@ -1635,18 +1688,18 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.9" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", - "bytes", + "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", "http", - "http-body", - "hyper", + "http-body 0.3.1", + "hyper 0.13.10", "hyper-tls", "ipnet", "js-sys", @@ -1656,15 +1709,14 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.5", "serde", "serde_urlencoded", - "tokio", + "tokio 0.2.25", "tokio-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] @@ -1681,9 +1733,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.19" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -1697,7 +1749,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=93e62c86eddf7cc9a7fc40b044182f83f0d7d92a#93e62c86eddf7cc9a7fc40b044182f83f0d7d92a" dependencies = [ "async-trait", "atomic", @@ -1710,14 +1762,14 @@ dependencies = [ "memchr", "num_cpus", "parking_lot", - "rand 0.7.3", + "rand 0.8.3", "ref-cast", "rocket_codegen", "rocket_http", "serde", "state", - "time 0.2.23", - "tokio", + "time 0.2.25", + "tokio 1.2.0", "ubyte", "version_check", "yansi", @@ -1726,7 +1778,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=93e62c86eddf7cc9a7fc40b044182f83f0d7d92a#93e62c86eddf7cc9a7fc40b044182f83f0d7d92a" dependencies = [ "devise", "glob", @@ -1738,23 +1790,24 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=93e62c86eddf7cc9a7fc40b044182f83f0d7d92a#93e62c86eddf7cc9a7fc40b044182f83f0d7d92a" dependencies = [ "cookie", "either", "http", - "hyper", + "hyper 0.14.4", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", + "pin-project-lite 0.2.5", "ref-cast", "smallvec", "state", - "time 0.2.23", - "tokio", + "time 0.2.25", + "tokio 1.2.0", "tokio-rustls", "uncased", "unicode-xid", @@ -1990,11 +2043,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "log", "ring", "sct", @@ -2017,12 +2070,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -2041,9 +2088,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1759c2e3c8580017a484a7ac56d3abc5a6c1feadf88db2f3633f12ae4268c69" +checksum = "2dfd318104249865096c8da1dfabf09ddbb6d0330ea176812a62ec75e40c4166" dependencies = [ "bitflags", "core-foundation", @@ -2054,9 +2101,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f99b9d5e26d2a71633cc4f2ebae7cc9f874044e0c351a27e17892d76dce5678b" +checksum = "dee48cdde5ed250b0d3252818f646e174ab414036edb884dde62d80a3ac6082d" dependencies = [ "core-foundation-sys", "libc", @@ -2079,18 +2126,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.118" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2", "quote", @@ -2099,9 +2146,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.60" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" dependencies = [ "itoa", "ryu", @@ -2122,9 +2169,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.14" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" +checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" dependencies = [ "dtoa", "linked-hash-map", @@ -2149,9 +2196,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ "libc", ] @@ -2191,19 +2238,18 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "socket2" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -2215,9 +2261,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" +checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" dependencies = [ "version_check", ] @@ -2314,9 +2360,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.54" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", @@ -2325,32 +2371,32 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.3", + "redox_syscall 0.2.5", "remove_dir_all", "winapi 0.3.9", ] [[package]] name = "thiserror" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" dependencies = [ "proc-macro2", "quote", @@ -2359,11 +2405,11 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -2390,20 +2436,19 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" dependencies = [ "const_fn", "libc", @@ -2439,9 +2484,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -2454,32 +2499,45 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" +checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "iovec", "lazy_static", + "memchr", + "mio 0.6.23", + "pin-project-lite 0.1.12", + "slab", +] + +[[package]] +name = "tokio" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8190d04c665ea9e6b6a0dc45523ade572c088d2e6566244c1122671dbf4ae3a" +dependencies = [ + "autocfg", + "bytes 1.0.1", "libc", "memchr", - "mio", - "mio-uds", + "mio 0.7.9", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "pin-project-lite 0.2.5", "signal-hook-registry", - "slab", "tokio-macros", "winapi 0.3.9", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" dependencies = [ "proc-macro2", "quote", @@ -2488,13 +2546,12 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "futures-core", "rustls", - "tokio", + "tokio 1.2.0", "webpki", ] @@ -2505,7 +2562,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio", + "tokio 0.2.25", ] [[package]] @@ -2514,28 +2571,42 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-core", "futures-sink", "log", - "pin-project-lite 0.1.11", - "tokio", + "pin-project-lite 0.1.12", + "tokio 0.2.25", +] + +[[package]] +name = "tokio-util" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebb7cb2f00c5ae8df755b252306272cd1790d39728363936e01827e11f0b017b" +dependencies = [ + "bytes 1.0.1", + "futures-core", + "futures-sink", + "log", + "pin-project-lite 0.2.5", + "tokio 1.2.0", ] [[package]] name = "toml" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" @@ -2545,7 +2616,7 @@ checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.5", "tracing-attributes", "tracing-core", ] @@ -2572,11 +2643,11 @@ dependencies = [ [[package]] name = "tracing-futures" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 0.4.27", + "pin-project", "tracing", ] @@ -2652,7 +2723,7 @@ dependencies = [ "rand 0.7.3", "smallvec", "thiserror", - "tokio", + "tokio 0.2.25", "url", ] @@ -2672,7 +2743,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio", + "tokio 0.2.25", "trust-dns-proto", ] @@ -2693,9 +2764,9 @@ dependencies = [ [[package]] name = "uncased" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "369fa7fd7969c5373541d3c9a40dc1b76ce676fc87aba30d87c0ad3b97fad179" +checksum = "300932469d646d39929ffe84ad5c1837beecf602519ef5695e485b472de4082b" dependencies = [ "version_check", ] @@ -2720,9 +2791,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] @@ -2747,9 +2818,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ "form_urlencoded", "idna", @@ -2759,9 +2830,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "version_check" @@ -2787,15 +2858,15 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "7ee1280240b7c461d6a0071313e08f34a60b0365f14260362e5a2b17d1d31aa7" dependencies = [ "cfg-if 1.0.0", "serde", @@ -2805,9 +2876,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "5b7d8b6942b8bb3a9b0e73fc79b98095a27de6fa247615e59d096754a3bc2aa8" dependencies = [ "bumpalo", "lazy_static", @@ -2820,9 +2891,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "8e67a5806118af01f0d9045915676b22aaebecf4178ae7021bc171dab0b897ab" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -2832,9 +2903,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "e5ac38da8ef716661f0f36c0d8320b89028efe10c7c0afde65baffb496ce0d3b" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2842,9 +2913,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "cc053ec74d454df287b9374ee8abb36ffd5acb95ba87da3ba5b7d3fe20eb401e" dependencies = [ "proc-macro2", "quote", @@ -2855,39 +2926,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" - -[[package]] -name = "wasm-bindgen-test" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" -dependencies = [ - "proc-macro2", - "quote", -] +checksum = "7d6f8ec44822dd71f5f221a5847fb34acd9060535c1211b70a05844c0f6383b1" [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "ec600b26223b2948cedfde2a0aa6756dcf1fef616f43d7b3097aaf53a6c4d92b" dependencies = [ "js-sys", "wasm-bindgen", @@ -2905,9 +2952,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2bb9fc8309084dd7cd651336673844c1d47f8ef6d2091ec160b27f5c4aa277" +checksum = "4a32b378380f4e9869b22f0b5177c68a5519f03b3454fde0b291455ddbae266c" [[package]] name = "widestring" @@ -2979,9 +3026,9 @@ dependencies = [ [[package]] name = "yaml-rust" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] diff --git a/Cargo.toml b/Cargo.toml index 9ab5250..27394f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f336e5a172361fc1860461bb03667b1ed2", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86eddf7cc9a7fc40b044182f83f0d7d92a", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers @@ -28,7 +28,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-com #state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "0.2.23" } +tokio = { version = "1.2.0" } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index b4d0520..fac6b15 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -698,7 +698,8 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let mut delay = tokio::time::delay_for(duration); + let delay = tokio::time::sleep(duration); + tokio::pin!(delay); tokio::select! { _ = &mut delay => {} _ = watcher => {} diff --git a/src/database/admin.rs b/src/database/admin.rs index 1fb1983..160f55a 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -7,7 +7,6 @@ use ruma::{ events::{room::message, EventType}, UserId, }; -use tokio::select; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), @@ -71,7 +70,7 @@ impl Admin { }; loop { - select! { + tokio::select! { Some(event) = receiver.next() => { match event { AdminCommand::RegisterAppservice(yaml) => { From 437cb5783a04f41fa31c2906ad4596093f703e3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 4 Mar 2021 11:29:13 +0100 Subject: [PATCH 0456/1727] fix: apply the same appservice sending rules to events coming from federation currently this code is duplicated from database/rooms.rs, when we refactor server_server.rs we should deduplicate it --- src/server_server.rs | 67 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 4ea9bfe..26d7b1d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2,6 +2,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Resu use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{info, warn}; +use regex::Regex; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -18,6 +19,7 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::EventType, EventId, RoomId, ServerName, ServerSigningKeyId, UserId, }; use std::{ @@ -584,9 +586,70 @@ pub async fn send_transaction_message_route<'a>( db.rooms.set_room_state(&room_id, &next_room_state)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } + if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else( + || Vec::new(), + |users| { + users + .iter() + .map(|users| { + users + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }, + ); + let aliases = namespaces + .get("aliases") + .and_then(|users| users.get("regex")) + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()); + let rooms = namespaces + .get("rooms") + .and_then(|rooms| rooms.as_sequence()); + let room_aliases = db.rooms.room_aliases(&room_id); + + let bridge_user_id = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, db.globals.server_name()).ok() + }); + + if bridge_user_id.map_or(false, |bridge_user_id| { + db.rooms + .is_joined(&bridge_user_id, room_id) + .unwrap_or(false) + }) || users.iter().any(|users| { + users.is_match(pdu.sender.as_str()) + || pdu.kind == EventType::RoomMember + && pdu + .state_key + .as_ref() + .map_or(false, |state_key| users.is_match(&state_key)) + }) || aliases.map_or(false, |aliases| { + room_aliases + .filter_map(|r| r.ok()) + .any(|room_alias| aliases.is_match(room_alias.as_str())) + }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + || db + .rooms + .room_members(&room_id) + .filter_map(|r| r.ok()) + .any(|member| users.iter().any(|regex| regex.is_match(member.as_str()))) + { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + } + } resolved_map.insert(event_id, Ok::<(), String>(())); } From 105f893cf3be42c85f8ada937764f90a7c27e548 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 4 Mar 2021 12:29:08 +0100 Subject: [PATCH 0457/1727] chore: bump other dependencies to make it work --- Cargo.lock | 505 ++++++++------------------------- Cargo.toml | 28 +- src/client_server/directory.rs | 4 +- src/database/globals.rs | 8 +- src/database/rooms/edus.rs | 3 +- src/database/users.rs | 3 +- src/pdu.rs | 3 +- src/utils.rs | 1 + 8 files changed, 134 insertions(+), 421 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45ea00c..51ccff7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,20 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler32" version = "1.2.0" @@ -36,7 +21,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -85,7 +70,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -94,20 +79,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" -[[package]] -name = "backtrace" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide 0.4.4", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.8" @@ -167,12 +138,6 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.0.1" @@ -185,12 +150,6 @@ version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -207,7 +166,7 @@ dependencies = [ "num-integer", "num-traits", "time 0.1.43", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -224,12 +183,11 @@ dependencies = [ "directories", "http", "image", - "js_int", "jsonwebtoken", "log", "opentelemetry", "opentelemetry-jaeger", - "rand 0.7.3", + "rand 0.8.3", "regex", "reqwest", "ring", @@ -242,7 +200,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio 1.2.0", + "tokio", "tracing", "tracing-opentelemetry", "tracing-subscriber", @@ -293,7 +251,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -302,7 +260,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", "lazy_static", "memoffset", @@ -316,10 +274,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "lazy_static", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "deflate" version = "0.8.6" @@ -377,7 +341,7 @@ checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -404,7 +368,7 @@ version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -471,25 +435,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures" version = "0.3.13" @@ -575,7 +523,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.5", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -597,7 +545,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -608,7 +556,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.10.2+wasi-snapshot-preview1", ] @@ -623,45 +571,19 @@ dependencies = [ "weezl", ] -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - [[package]] name = "glob" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" -[[package]] -name = "h2" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 0.2.25", - "tokio-util 0.3.1", - "tracing", - "tracing-futures", -] - [[package]] name = "h2" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "futures-core", "futures-sink", @@ -669,8 +591,8 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.2.0", - "tokio-util 0.6.3", + "tokio", + "tokio-util", "tracing", ] @@ -706,7 +628,7 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -715,28 +637,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] -[[package]] -name = "http-body" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -dependencies = [ - "bytes 0.5.6", - "http", -] - [[package]] name = "http-body" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" dependencies = [ - "bytes 1.0.1", + "bytes", "http", ] @@ -752,49 +664,25 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" -[[package]] -name = "hyper" -version = "0.13.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" -dependencies = [ - "bytes 0.5.6", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.2.7", - "http", - "http-body 0.3.1", - "httparse", - "httpdate", - "itoa", - "pin-project", - "socket2", - "tokio 0.2.25", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" dependencies = [ - "bytes 1.0.1", + "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.3.1", + "h2", "http", - "http-body 0.4.0", + "http-body", "httparse", "httpdate", "itoa", "pin-project", "socket2", - "tokio 1.2.0", + "tokio", "tower-service", "tracing", "want", @@ -802,15 +690,15 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 0.5.6", - "hyper 0.13.10", + "bytes", + "hyper", "native-tls", - "tokio 0.2.25", - "tokio-tls", + "tokio", + "tokio-native-tls", ] [[package]] @@ -863,7 +751,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -872,15 +760,6 @@ version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", -] - [[package]] name = "ipconfig" version = "0.2.2" @@ -889,7 +768,7 @@ checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" dependencies = [ "socket2", "widestring", - "winapi 0.3.9", + "winapi", "winreg 0.6.2", ] @@ -952,16 +831,6 @@ dependencies = [ "simple_asn1", ] -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -995,7 +864,7 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1055,16 +924,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.3.7" @@ -1074,35 +933,6 @@ dependencies = [ "adler32", ] -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "mio" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" -dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - [[package]] name = "mio" version = "0.7.9" @@ -1111,21 +941,9 @@ checksum = "a5dede4e2065b3842b8b0af444119f3aa331cc7cc2dd20388bfb0f5d5a38823a" dependencies = [ "libc", "log", - "miow 0.3.6", + "miow", "ntapi", - "winapi 0.3.9", -] - -[[package]] -name = "miow" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" -dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "winapi", ] [[package]] @@ -1135,7 +953,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ "socket2", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1156,24 +974,13 @@ dependencies = [ "tempfile", ] -[[package]] -name = "net2" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", -] - [[package]] name = "ntapi" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1238,12 +1045,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" - [[package]] name = "once_cell" version = "1.7.2" @@ -1257,7 +1058,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "lazy_static", "libc", @@ -1348,12 +1149,12 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.2.5", "smallvec", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1364,9 +1165,9 @@ checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "pear" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e61c26b3b7e7ef4bd0b17d2943b4620ca4682721f35a51c7fec1f5ae6325150" +checksum = "86ab3a2b792945ed67eadbbdcbd2898f8dd2319392b2a45ac21adea5245cb113" dependencies = [ "inlinable_string", "pear_codegen", @@ -1375,9 +1176,9 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35ff95312c89207a3770143c628d2788cf4f7dcc230b25d9623e863d5b30b84" +checksum = "620c9c4776ba41b59ab101360c9b1419c0c8c81cd2e6e39fae7109e7425994cb" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", @@ -1422,12 +1223,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - [[package]] name = "pin-project-lite" version = "0.2.5" @@ -1455,7 +1250,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide 0.3.7", + "miniz_oxide", ] [[package]] @@ -1683,37 +1478,36 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "reqwest" -version = "0.10.10" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" +checksum = "0460542b551950620a3648c6aa23318ac6b3cd779114bd873209e6e8b5eb1c34" dependencies = [ "base64 0.13.0", - "bytes 0.5.6", + "bytes", "encoding_rs", "futures-core", "futures-util", "http", - "http-body 0.3.1", - "hyper 0.13.10", + "http-body", + "hyper", "hyper-tls", "ipnet", "js-sys", "lazy_static", "log", "mime", - "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.5", + "pin-project-lite", "serde", "serde_urlencoded", - "tokio 0.2.25", - "tokio-tls", + "tokio", + "tokio-native-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1743,7 +1537,7 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1769,7 +1563,7 @@ dependencies = [ "serde", "state", "time 0.2.25", - "tokio 1.2.0", + "tokio", "ubyte", "version_check", "yansi", @@ -1795,19 +1589,19 @@ dependencies = [ "cookie", "either", "http", - "hyper 0.14.4", + "hyper", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", - "pin-project-lite 0.2.5", + "pin-project-lite", "ref-cast", "smallvec", "state", "time 0.2.25", - "tokio 1.2.0", + "tokio", "tokio-rustls", "uncased", "unicode-xid", @@ -2026,12 +1820,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - [[package]] name = "rustc_version" version = "0.2.3" @@ -2067,7 +1855,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2248,9 +2036,9 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2375,12 +2163,12 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "rand 0.8.3", "redox_syscall 0.2.5", "remove_dir_all", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2441,7 +2229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2456,7 +2244,7 @@ dependencies = [ "stdweb", "time-macros", "version_check", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2497,23 +2285,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" -[[package]] -name = "tokio" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "memchr", - "mio 0.6.23", - "pin-project-lite 0.1.12", - "slab", -] - [[package]] name = "tokio" version = "1.2.0" @@ -2521,16 +2292,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8190d04c665ea9e6b6a0dc45523ade572c088d2e6566244c1122671dbf4ae3a" dependencies = [ "autocfg", - "bytes 1.0.1", + "bytes", "libc", "memchr", - "mio 0.7.9", + "mio", "num_cpus", "once_cell", - "pin-project-lite 0.2.5", + "pin-project-lite", "signal-hook-registry", "tokio-macros", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2544,6 +2315,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -2551,46 +2332,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", - "tokio 1.2.0", + "tokio", "webpki", ] -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio 0.2.25", -] - -[[package]] -name = "tokio-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" -dependencies = [ - "bytes 0.5.6", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.1.12", - "tokio 0.2.25", -] - [[package]] name = "tokio-util" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebb7cb2f00c5ae8df755b252306272cd1790d39728363936e01827e11f0b017b" dependencies = [ - "bytes 1.0.1", + "bytes", "futures-core", "futures-sink", "log", - "pin-project-lite 0.2.5", - "tokio 1.2.0", + "pin-project-lite", + "tokio", ] [[package]] @@ -2614,9 +2371,8 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.5", + "cfg-if", + "pin-project-lite", "tracing-attributes", "tracing-core", ] @@ -2641,16 +2397,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - [[package]] name = "tracing-log" version = "0.1.2" @@ -2709,41 +2455,45 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53861fcb288a166aae4c508ae558ed18b53838db728d4d310aad08270a7d4c2b" +checksum = "98a0381b2864c2978db7f8e17c7b23cca5a3a5f99241076e13002261a8ecbabd" dependencies = [ "async-trait", - "backtrace", + "cfg-if", + "data-encoding", "enum-as-inner", - "futures", + "futures-channel", + "futures-io", + "futures-util", "idna", + "ipnet", "lazy_static", "log", - "rand 0.7.3", + "rand 0.8.3", "smallvec", "thiserror", - "tokio 0.2.25", + "tokio", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6759e8efc40465547b0dfce9500d733c65f969a4cbbfbe3ccf68daaa46ef179e" +checksum = "3072d18c10bd621cb00507d59cfab5517862285c353160366e37fbf4c74856e4" dependencies = [ - "backtrace", - "cfg-if 0.1.10", - "futures", + "cfg-if", + "futures-util", "ipconfig", "lazy_static", "log", "lru-cache", + "parking_lot", "resolv-conf", "smallvec", "thiserror", - "tokio 0.2.25", + "tokio", "trust-dns-proto", ] @@ -2771,15 +2521,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -2868,7 +2609,7 @@ version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee1280240b7c461d6a0071313e08f34a60b0365f14260362e5a2b17d1d31aa7" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -2895,7 +2636,7 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e67a5806118af01f0d9045915676b22aaebecf4178ae7021bc171dab0b897ab" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -2962,12 +2703,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -2978,12 +2713,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -3002,7 +2731,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -3011,17 +2740,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", + "winapi", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 27394f0..0351543 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,41 +28,39 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-com #state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "1.2.0" } +tokio = "1.2.0" # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries -log = "0.4.11" +log = "0.4.14" # Used for rocket<->ruma conversions -http = "0.2.1" +http = "0.2.3" # Used to find data directory for default db path directories = "3.0.1" -# Used for number types for ruma -js_int = "0.1.9" # Used for ruma wrapper -serde_json = { version = "1.0.60", features = ["raw_value"] } +serde_json = { version = "1.0.64", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.8.14" +serde_yaml = "0.8.17" # Used for pdu definition -serde = "1.0.117" +serde = "1.0.123" # Used for secure identifiers -rand = "0.7.3" +rand = "0.8.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.10.9" +reqwest = "0.11.1" # Used for conduit::Error type -thiserror = "1.0.22" +thiserror = "1.0.24" # Used to generate thumbnails for images -image = { version = "0.23.12", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key base64 = "0.13.0" # Used when hashing the state -ring = "0.16.19" +ring = "0.16.20" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.19.6" +trust-dns-resolver = "0.20.0" # Used to find matching events for appservices -regex = "1.4.2" +regex = "1.4.3" # jwt jsonwebtokens jsonwebtoken = "7.2.0" # Performance measurements diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 1c72915..0dadde9 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -21,7 +21,7 @@ use ruma::{ EventType, }, serde::Raw, - ServerName, + ServerName, UInt, }; #[cfg(feature = "conduit_bin")] @@ -128,7 +128,7 @@ pub async fn get_room_visibility_route( pub async fn get_public_rooms_filtered_helper( db: &Database, server: Option<&ServerName>, - limit: Option, + limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, diff --git a/src/database/globals.rs b/src/database/globals.rs index ccd6284..7e924db 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -73,11 +73,9 @@ impl Globals { config, keypair: Arc::new(keypair), reqwest_client, - dns_resolver: TokioAsyncResolver::tokio_from_system_conf() - .await - .map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, }) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 8433884..084e4a1 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, @@ -7,7 +6,7 @@ use ruma::{ }, presence::PresenceState, serde::Raw, - RoomId, UserId, + RoomId, UInt, UserId, }; use std::{ collections::HashMap, diff --git a/src/database/users.rs b/src/database/users.rs index 985647a..e5bc16e 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ api::client::{ error::ErrorKind, @@ -11,7 +10,7 @@ use ruma::{ encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; diff --git a/src/pdu.rs b/src/pdu.rs index 544c073..bcf5ffb 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,12 +1,11 @@ use crate::Error; -use js_int::UInt; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; diff --git a/src/utils.rs b/src/utils.rs index c82e6fe..0783567 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -55,6 +55,7 @@ pub fn random_string(length: usize) -> String { thread_rng() .sample_iter(&rand::distributions::Alphanumeric) .take(length) + .map(char::from) .collect() } From 231c6032f465024b336122b54390f10a38d4bd6b Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Thu, 4 Mar 2021 12:35:12 +0000 Subject: [PATCH 0458/1727] Make clippy happy (needless-return, etc.) --- src/appservice_server.rs | 2 +- src/client_server/push.rs | 2 +- src/client_server/session.rs | 3 +-- src/database/globals.rs | 3 ++- src/database/rooms.rs | 7 ++++--- src/database/sending.rs | 13 +++++++------ src/server_server.rs | 6 +++--- 7 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 986909b..04f14c0 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -25,7 +25,7 @@ where let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains("?") { + let symbol = if old_path_and_query.contains('?') { "&" } else { "?" diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 03da73a..5403f96 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -95,7 +95,7 @@ pub async fn get_pushrule_route( if let Some(rule) = rule { Ok(get_pushrule::Response { rule }.into()) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")) } } diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 8c8b643..7b3acfc 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -119,8 +119,7 @@ pub async fn login_route( let device_exists = body.device_id.as_ref().map_or(false, |device_id| { db.users .all_device_ids(&user_id) - .find(|x| x.as_ref().map_or(false, |v| v == device_id)) - .is_some() + .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); if device_exists { diff --git a/src/database/globals.rs b/src/database/globals.rs index ccd6284..7a0c217 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -9,9 +9,10 @@ use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; +type WellKnownMap = HashMap, (String, Option)>; #[derive(Clone)] pub struct Globals { - pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host + pub actual_destination_cache: Arc>, // actual_destination, host pub(super) globals: sled::Tree, config: Config, keypair: Arc, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7e80134..6ee4f19 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -183,7 +183,7 @@ impl Rooms { ))) }) } else { - return Ok(None); + Ok(None) } } @@ -449,6 +449,7 @@ impl Rooms { /// /// By this point the incoming event should be fully authenticated, no auth happens /// in `append_pdu`. + #[allow(clippy::too_many_arguments)] pub fn append_pdu( &self, pdu: &PduEvent, @@ -970,7 +971,7 @@ impl Rooms { .get("users") .and_then(|users| users.as_sequence()) .map_or_else( - || Vec::new(), + Vec::new, |users| { users .iter() @@ -1002,7 +1003,7 @@ impl Rooms { .and_then(|string| { UserId::parse_with_server_name(string, globals.server_name()).ok() }); - + #[allow(clippy::blocks_in_if_conditions)] if bridge_user_id.map_or(false, |bridge_user_id| { self.is_joined(&bridge_user_id, room_id).unwrap_or(false) }) || users.iter().any(|users| { diff --git a/src/database/sending.rs b/src/database/sending.rs index 11034ea..48fe68a 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -102,7 +102,7 @@ impl Sending { match response { Ok((server, is_appservice)) => { let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -148,7 +148,7 @@ impl Sending { Err((server, is_appservice, e)) => { info!("Couldn't send transaction to {}\n{}", server, e); let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -180,7 +180,7 @@ impl Sending { .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) .map(|server_str| { // Appservices start with a plus - if server_str.starts_with("+") { + if server_str.starts_with('+') { (server_str[1..].to_owned(), true) } else { (server_str, false) @@ -196,6 +196,7 @@ impl Sending { .map(|pdu_id| (server, is_appservice, pdu_id)) ) .filter(|(server, is_appservice, _)| { + #[allow(clippy::blocks_in_if_conditions)] if last_failed_try.get(server).map_or(false, |(tries, instant)| { // Fail if a request has failed recently (exponential backoff) let mut min_elapsed_duration = Duration::from_secs(60) * *tries * *tries; @@ -209,7 +210,7 @@ impl Sending { } let mut prefix = if *is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -247,7 +248,7 @@ impl Sending { #[tracing::instrument(skip(self))] pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = "+".as_bytes().to_vec(); + let mut key = b"+".to_vec(); key.extend_from_slice(appservice_id.as_bytes()); key.push(0xff); key.extend_from_slice(pdu_id); @@ -385,7 +386,7 @@ impl Sending { })?; // Appservices start with a plus - let (server, is_appservice) = if server.starts_with("+") { + let (server, is_appservice) = if server.starts_with('+') { (&server[1..], true) } else { (&*server, false) diff --git a/src/server_server.rs b/src/server_server.rs index 4ea9bfe..261172a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -220,7 +220,7 @@ fn add_port_to_hostname(destination_str: String) -> String { #[tracing::instrument(skip(globals))] async fn find_actual_destination( globals: &crate::database::globals::Globals, - destination: &Box, + destination: &'_ ServerName, ) -> (String, Option) { let mut host = None; @@ -277,9 +277,9 @@ async fn find_actual_destination( } #[tracing::instrument(skip(globals))] -async fn query_srv_record<'a>( +async fn query_srv_record( globals: &crate::database::globals::Globals, - hostname: &'a str, + hostname: &'_ str, ) -> Option { if let Ok(Some(host_port)) = globals .dns_resolver() From 1ce00f90fea3f1f078e22387442087c5ca7329b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 4 Mar 2021 13:36:06 +0100 Subject: [PATCH 0459/1727] fix: don't accept incoming pdus if we know about them already --- src/server_server.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/server_server.rs b/src/server_server.rs index 00f31ca..d589818 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -554,6 +554,12 @@ pub async fn send_transaction_message_route<'a>( // TODO: redact event if hashing fails let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); + // Skip the pdu if we already know about it + if db.rooms.get_pdu_id(&event_id)?.is_some() { + resolved_map.insert(event_id, Err("We already know about this event".into())); + continue; + } + let pdu = serde_json::from_value::( serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), ) From e239014fa3935c523565b21344fec0b926c7fccf Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 4 Mar 2021 08:02:41 -0500 Subject: [PATCH 0460/1727] Query for the correct server --- src/server_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server_server.rs b/src/server_server.rs index dcd72f7..12b60b9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1148,7 +1148,7 @@ pub(crate) async fn fetch_signing_keys( &db.globals, &server, get_remote_server_keys::v2::Request::new( - &server, + origin, SystemTime::now() .checked_add(Duration::from_secs(3600)) .expect("SystemTime to large"), From 0d55964d241c00b36341f1843bb515f9241e8463 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 4 Mar 2021 08:45:23 -0500 Subject: [PATCH 0461/1727] Run nightly cargo fmt --- rustfmt.toml | 2 +- src/database.rs | 8 +++++--- src/database/appservice.rs | 6 ++++-- src/database/globals.rs | 9 +++++---- src/database/sending.rs | 3 +-- src/error.rs | 6 +++++- src/main.rs | 14 +++++++++----- 7 files changed, 30 insertions(+), 18 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 7d2cf54..e86028b 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1 +1 @@ -merge_imports = true +imports_granularity="Crate" diff --git a/src/database.rs b/src/database.rs index 34b74be..bf3e0f0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -17,9 +17,11 @@ use log::info; use rocket::futures::{self, channel::mpsc}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; -use std::collections::HashMap; -use std::fs::remove_dir_all; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + fs::remove_dir_all, + sync::{Arc, RwLock}, +}; use tokio::sync::Semaphore; #[derive(Clone, Debug, Deserialize)] diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 26ea5b9..764291d 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -1,6 +1,8 @@ use crate::{utils, Error, Result}; -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; #[derive(Clone)] pub struct Appservice { diff --git a/src/database/globals.rs b/src/database/globals.rs index 8d7f104..8c0463d 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,10 +4,11 @@ use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerName, ServerSigningKeyId, }; -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; -use std::sync::RwLock; -use std::time::Duration; +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, RwLock}, + time::Duration, +}; use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; diff --git a/src/database/sending.rs b/src/database/sending.rs index 9793971..00073af 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -16,8 +16,7 @@ use ruma::{ ServerName, }; use sled::IVec; -use tokio::select; -use tokio::sync::Semaphore; +use tokio::{select, sync::Semaphore}; #[derive(Clone)] pub struct Sending { diff --git a/src/error.rs b/src/error.rs index 13efce6..d8f10f4 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,8 @@ -use std::{collections::HashMap, sync::RwLock, time::Duration, time::Instant}; +use std::{ + collections::HashMap, + sync::RwLock, + time::{Duration, Instant}, +}; use log::error; use ruma::{ diff --git a/src/main.rs b/src/main.rs index b469f4d..a2c020f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,4 @@ -// #![warn(rust_2018_idioms)] +#![warn(rust_2018_idioms)] pub mod appservice_server; pub mod client_server; @@ -19,11 +19,15 @@ pub use rocket::State; use ruma::api::client::error::ErrorKind; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use rocket::figment::{ - providers::{Env, Format, Toml}, - Figment, +use rocket::{ + catch, catchers, + fairing::AdHoc, + figment::{ + providers::{Env, Format, Toml}, + Figment, + }, + routes, Request, }; -use rocket::{catch, catchers, fairing::AdHoc, routes, Request}; use tracing::span; use tracing_subscriber::{prelude::*, Registry}; From 6309506a48daa70b120b4dcf0ea76ab2487208d2 Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Thu, 4 Mar 2021 15:26:34 +0100 Subject: [PATCH 0462/1727] Allow suspicious_else_formatting This is probably unfixable because it is caused by macros --- src/lib.rs | 1 + src/main.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index aed129f..196626e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::suspicious_else_formatting)] pub mod appservice_server; pub mod client_server; mod database; diff --git a/src/main.rs b/src/main.rs index 498cfa6..eff5552 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,5 @@ #![warn(rust_2018_idioms)] +#![allow(clippy::suspicious_else_formatting)] pub mod appservice_server; pub mod client_server; From 4027cbd01ddf11541c84e4029fbb1587d951e784 Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Thu, 4 Mar 2021 15:27:25 +0100 Subject: [PATCH 0463/1727] Fix various clippy errors --- src/database/sending.rs | 8 +++----- src/ruma_wrapper.rs | 5 +++-- src/server_server.rs | 10 +++++++--- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index 48fe68a..1ae063f 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -74,7 +74,7 @@ impl Sending { for ((server, is_appservice), pdus) in current_transactions { // Create new reservation let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -217,11 +217,9 @@ impl Sending { prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); - if servercurrentpdus + servercurrentpdus .compare_and_swap(prefix, Option::<&[u8]>::None, Some(&[])) // Try to reserve - == Ok(Ok(())) { true } else { - false - } + == Ok(Ok(())) }) { servercurrentpdus.insert(&key, &[]).unwrap(); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 898561f..188d1b6 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,17 +1,17 @@ use crate::Error; use ruma::{ - api::{AuthScheme, OutgoingRequest}, identifiers::{DeviceId, UserId}, Outgoing, }; use std::{ - convert::{TryFrom, TryInto}, + convert::{TryInto}, ops::Deref, }; #[cfg(feature = "conduit_bin")] use { crate::utils, + ruma::api::{AuthScheme, OutgoingRequest}, log::{debug, warn}, rocket::{ data::{ @@ -25,6 +25,7 @@ use { Request, State, }, std::io::Cursor, + std::convert::TryFrom, }; /// This struct converts rocket requests into ruma structs by converting them into http requests diff --git a/src/server_server.rs b/src/server_server.rs index 00f31ca..a3fac18 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -3,7 +3,7 @@ use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{info, warn}; use regex::Regex; -use rocket::{get, post, put, response::content::Json, State}; +use rocket::{response::content::Json, State}; use ruma::{ api::{ federation::{ @@ -29,6 +29,10 @@ use std::{ net::{IpAddr, SocketAddr}, time::{Duration, SystemTime}, }; +#[cfg(feature = "conduit_bin")] +use { + rocket::{get, post, put} +}; #[tracing::instrument(skip(globals))] pub async fn send_request( @@ -591,7 +595,7 @@ pub async fn send_transaction_message_route<'a>( .get("users") .and_then(|users| users.as_sequence()) .map_or_else( - || Vec::new(), + Vec::new, |users| { users .iter() @@ -623,7 +627,7 @@ pub async fn send_transaction_message_route<'a>( .and_then(|string| { UserId::parse_with_server_name(string, db.globals.server_name()).ok() }); - + #[allow(clippy::blocks_in_if_conditions)] if bridge_user_id.map_or(false, |bridge_user_id| { db.rooms .is_joined(&bridge_user_id, room_id) From bb1064ce04fb66594f4de7b61b60d05d05ef5730 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 4 Mar 2021 15:28:28 +0100 Subject: [PATCH 0464/1727] docs: small readme changes --- README.md | 49 +++++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 8e4b1b7..bff15e6 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,43 @@ # Conduit ### A Matrix homeserver written in Rust -[![Liberapay](https://img.shields.io/liberapay/receives/timokoesters?logo=liberapay)](https://liberapay.com/timokoesters) -[![Matrix](https://img.shields.io/matrix/conduit:conduit.rs?server_fqdn=conduit.koesters.xyz)](https://matrix.to/#/#conduit:matrix.org) - #### What is the goal? A fast Matrix homeserver that's easy to set up and just works. You can install it on a mini-computer like the Raspberry Pi to host Matrix for your family, friends or company. + #### Can I try it out? Yes! Just open a Matrix client ( or Element Android for example) and register on the `https://conduit.koesters.xyz` homeserver. + +#### What is it build on? + +- [Ruma](https://www.ruma.io): Useful structures for endpoint requests and + responses that can be (de)serialized +- [Sled](https://github.com/spacejam/sled): A simple (key, value) database with + good performance +- [Rocket](https://rocket.rs): A flexible web framework + + +#### What are the biggest things still missing? + +- Most federation features (invites, e2ee) +- Push notifications on mobile +- Notification settings +- Lots of testing + +Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). + + #### How can I deploy my own? ##### Deploy -Download or compile a conduit binary and call it from somewhere like a systemd script. [Read +Download or compile a Conduit binary, set up the config and call it from somewhere like a systemd script. [Read more](DEPLOY.md) ##### Deploy using Docker @@ -33,32 +51,15 @@ docker run -d -p 8448:8000 -v db:/srv/conduit/.local/share/conduit matrixconduit Or build and run it with docker or docker-compose. [Read more](docker/README.md) -#### What is it build on? - -- [Ruma](https://www.ruma.io): Useful structures for endpoint requests and - responses that can be (de)serialized -- [Sled](https://github.com/spacejam/sled): A simple (key, value) database with - good performance -- [Rocket](https://rocket.rs): A flexible web framework - -#### What are the biggest things still missing? - -- Appservices (Bridges and Bots) -- Most federation features (invites, e2ee) -- Push notifications on mobile -- Notification settings -- Lots of testing - -Also check out the [milestones](https://git.koesters.xyz/timo/conduit/milestones). #### How can I contribute? 1. Look for an issue you would like to work on and make sure it's not assigned to other users 2. Ask someone to assign the issue to you (comment on the issue or chat in - #conduit:matrix.org) -3. Fork the repo and work on the issue. #conduit:matrix.org is happy to help :) -4. Submit a PR + #conduit:nordgedanken.dev) +3. Fork the repo and work on the issue. #conduit:nordgedanken.dev is happy to help :) +4. Submit a MR #### Donate From 156296ac051a83cf009cd399dc097164238a6a18 Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Thu, 4 Mar 2021 17:22:23 +0000 Subject: [PATCH 0465/1727] Fix Complement CI paths The final container tried to COPY from the default cargo target dir, but we only checked if cached_target is present. We copy from cached_target to target when cached_target is present now in the builder --- tests/Complement.Dockerfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 370db7c..7deb6e8 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -9,7 +9,9 @@ ARG SCCACHE_ENDPOINT ARG SCCACHE_S3_USE_SSL COPY . . -RUN test -e cached_target/release/conduit || cargo build --release +RUN mkdir -p target/release +RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release + FROM valkum/docker-rust-ci:latest WORKDIR /workdir @@ -37,4 +39,4 @@ CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement PKI support" && t sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ /workdir/caddy start --config caddy.json > /dev/null && \ /workdir/conduit - \ No newline at end of file + From 96431069a9bc72cc4cf79a030e9f68a371910201 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 4 Mar 2021 21:26:30 +0100 Subject: [PATCH 0466/1727] docs: add icon and license to readme --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index bff15e6..32fa657 100644 --- a/README.md +++ b/README.md @@ -65,3 +65,9 @@ Or build and run it with docker or docker-compose. [Read more](docker/README.md) Liberapay: \ Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` + + +#### Logo + +Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \ +Logo License: https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md From 7bc58461cd5378889fa45fe6fa65997a09f16c16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 12 Mar 2021 16:22:35 +0100 Subject: [PATCH 0467/1727] fix: avoid illegal instruction crash see https://github.com/spacejam/sled/issues/1232 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 0351543..8addf50 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-com # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" # Used for storing data permanently -sled = { version = "0.34.6", default-features = false } +sled = { version = "0.34.6", features = ["no_metrics"] } # Used for emitting log entries log = "0.4.14" # Used for rocket<->ruma conversions From 6da40225bb7363b9c76c4574820843faf587b43b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 13 Mar 2021 16:30:12 +0100 Subject: [PATCH 0468/1727] improvement: make state res actually work --- Cargo.lock | 97 +++++-- Cargo.toml | 11 +- src/client_server/account.rs | 13 +- src/client_server/capabilities.rs | 35 ++- src/client_server/config.rs | 7 +- src/client_server/membership.rs | 189 ++++++-------- src/client_server/sync.rs | 21 +- src/database.rs | 3 +- src/database/globals.rs | 56 ++-- src/database/rooms.rs | 219 +++++++--------- src/database/sending.rs | 7 +- src/main.rs | 2 + src/server_server.rs | 418 +++++++++++++++--------------- 13 files changed, 537 insertions(+), 541 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7538e0..e157565 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -187,6 +187,7 @@ dependencies = [ "log", "opentelemetry", "opentelemetry-jaeger", + "pretty_env_logger", "rand", "regex", "reqwest", @@ -383,6 +384,19 @@ dependencies = [ "syn", ] +[[package]] +name = "env_logger" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "figment" version = "0.10.3" @@ -664,6 +678,15 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +[[package]] +name = "humantime" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +dependencies = [ + "quick-error", +] + [[package]] name = "hyper" version = "0.14.4" @@ -688,6 +711,21 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +dependencies = [ + "futures-util", + "hyper", + "log", + "rustls", + "tokio", + "tokio-rustls", + "webpki", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -1259,6 +1297,16 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +[[package]] +name = "pretty_env_logger" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" +dependencies = [ + "env_logger", + "log", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -1454,6 +1502,7 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-rustls", "hyper-tls", "ipnet", "js-sys", @@ -1463,14 +1512,17 @@ dependencies = [ "native-tls", "percent-encoding", "pin-project-lite", + "rustls", "serde", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-rustls", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.7.0", ] @@ -1570,7 +1622,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "js_int", @@ -1590,7 +1641,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "http", "percent-encoding", @@ -1605,7 +1655,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1616,7 +1665,6 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "ruma-api", "ruma-common", @@ -1630,7 +1678,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "http", @@ -1649,7 +1696,6 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "maplit", @@ -1662,7 +1708,6 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-common", @@ -1676,7 +1721,6 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1687,7 +1731,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-api", @@ -1702,7 +1745,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "paste", "rand", @@ -1716,7 +1758,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro2", "quote", @@ -1727,12 +1768,10 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "ruma-api", "ruma-common", @@ -1745,7 +1784,6 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-api", @@ -1760,7 +1798,6 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "form_urlencoded", "itoa", @@ -1773,7 +1810,6 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1784,7 +1820,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "base64 0.13.0", "ring", @@ -2051,7 +2086,6 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=main#d34a78c5b66de419862d9e592bde8e0007111ebd" dependencies = [ "itertools", "log", @@ -2136,6 +2170,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "termcolor" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +dependencies = [ + "winapi-util", +] + [[package]] name = "thiserror" version = "1.0.24" @@ -2656,6 +2699,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki-roots" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" +dependencies = [ + "webpki", +] + [[package]] name = "weezl" version = "0.1.4" @@ -2684,6 +2736,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 2293b62..9c08776 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,16 +18,16 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } +#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -# ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } -# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" @@ -50,7 +50,7 @@ rand = "0.8.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.11.1" +reqwest = { version = "0.11.1", features = ["rustls-tls"] } # Used for conduit::Error type thiserror = "1.0.24" # Used to generate thumbnails for images @@ -71,6 +71,7 @@ opentelemetry = "0.12.0" tracing-subscriber = "0.2.16" tracing-opentelemetry = "0.11.0" opentelemetry-jaeger = "0.11.0" +pretty_env_logger = "0.4.0" [features] default = ["conduit_bin"] diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 7d3067e..1c6f517 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -455,16 +455,9 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMessage, - content: serde_json::to_value(message::MessageEventContent::Text( - message::TextMessageEventContent { - body: "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), - formatted: Some(message::FormattedBody { - format: message::MessageFormat::Html, - body: "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing /join #conduit:matrix.org. Important: Please don't join any other Matrix rooms over federation without permission from the room's admins. Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), - }), - relates_to: None, - new_content: None, - }, + content: serde_json::to_value(message::MessageEventContent::text_html( + "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), + "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing /join #conduit:matrix.org. Important: Please don't join any other Matrix rooms over federation without permission from the room's admins. Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index b4fdf69..a3c0db6 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,5 +1,10 @@ use crate::ConduitResult; -use ruma::{api::client::r0::capabilities::get_capabilities, RoomVersionId}; +use ruma::{ + api::client::r0::capabilities::{ + get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, + }, + RoomVersionId, +}; use std::collections::BTreeMap; #[cfg(feature = "conduit_bin")] @@ -12,24 +17,14 @@ use rocket::get; #[tracing::instrument] pub async fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); - available.insert( - RoomVersionId::Version5, - get_capabilities::RoomVersionStability::Stable, - ); - available.insert( - RoomVersionId::Version6, - get_capabilities::RoomVersionStability::Stable, - ); + available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); + available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); - Ok(get_capabilities::Response { - capabilities: get_capabilities::Capabilities { - change_password: get_capabilities::ChangePasswordCapability::default(), // enabled by default - room_versions: get_capabilities::RoomVersionsCapability { - default: RoomVersionId::Version6, - available, - }, - custom_capabilities: BTreeMap::new(), - }, - } - .into()) + let mut capabilities = Capabilities::new(); + capabilities.room_versions = RoomVersionsCapability { + default: RoomVersionId::Version6, + available, + }; + + Ok(get_capabilities::Response { capabilities }.into()) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index aece96e..a53b7cd 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -23,7 +23,7 @@ pub async fn set_global_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let content = serde_json::from_str::(body.data.get()) + let data = serde_json::from_str(body.data.get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); @@ -33,10 +33,7 @@ pub async fn set_global_account_data_route( sender_user, event_type.clone().into(), &BasicEvent { - content: CustomEventContent { - event_type, - json: content, - }, + content: CustomEventContent { event_type, data }, }, &db.globals, )?; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b7b2d4b..d63fa02 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,7 +4,7 @@ use crate::{ pdu::{PduBuilder, PduEvent}, utils, ConduitResult, Database, Error, Result, Ruma, }; -use log::warn; +use log::{info, warn}; use ruma::{ api::{ client::{ @@ -21,11 +21,9 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -// use state_res::Event; use std::{ - collections::{BTreeMap, HashSet}, + collections::{BTreeMap, HashMap}, convert::TryFrom, - iter, sync::Arc, }; @@ -439,6 +437,7 @@ pub async fn joined_members_route( Ok(joined_members::Response { joined }.into()) } +#[tracing::instrument(skip(db))] async fn join_room_by_id_helper( db: &Database, sender_user: Option<&UserId>, @@ -566,23 +565,22 @@ async fn join_room_by_id_helper( Ok((event_id, value)) }; - let room_state = send_join_response.room_state.state.iter().map(add_event_id); + let count = db.globals.next_count()?; - let _state_events = room_state - .clone() - .map(|pdu: Result<(EventId, CanonicalJsonObject)>| Ok(pdu?.0)) - .chain(iter::once(Ok(event_id.clone()))) // Add join event we just created - .collect::>>()?; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); - let auth_chain = send_join_response + let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) + .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; + + let mut state = HashMap::new(); + + for pdu in send_join_response .room_state - .auth_chain + .state .iter() - .map(add_event_id); - - let mut event_map = room_state - .chain(auth_chain) - .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created + .map(add_event_id) .map(|r| { let (event_id, value) = r?; PduEvent::from_id_val(&event_id, value.clone()) @@ -592,97 +590,78 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") }) }) - .collect::>>>()?; - - let control_events = event_map - .values() - .filter(|pdu| state_res::is_power_event(pdu)) - .map(|pdu| pdu.event_id.clone()) - .collect::>(); - - // These events are not guaranteed to be sorted but they are resolved according to spec - // we auth them anyways to weed out faulty/malicious server. The following is basically the - // full state resolution algorithm. - let event_ids = event_map.keys().cloned().collect::>(); - - let sorted_control_events = state_res::StateResolution::reverse_topological_power_sort( - &room_id, - &control_events, - &mut event_map, - &event_ids, - ); - - // Auth check each event against the "partial" state created by the preceding events - let resolved_control_events = state_res::StateResolution::iterative_auth_check( - room_id, - &RoomVersionId::Version6, - &sorted_control_events, - &BTreeMap::new(), // We have no "clean/resolved" events to add (these extend the `resolved_control_events`) - &mut event_map, - ) - .expect("iterative auth check failed on resolved events"); - - // This removes the control events that failed auth, leaving the resolved - // to be mainline sorted. In the actual `state_res::StateResolution::resolve` - // function both are removed since these are all events we don't know of - // we must keep track of everything to add to our DB. - let events_to_sort = event_map - .keys() - .filter(|id| { - !sorted_control_events.contains(id) - || resolved_control_events.values().any(|rid| *id == rid) - }) - .cloned() - .collect::>(); - - let power_level = - resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".to_string()))); - // Sort the remaining non control events - let sorted_event_ids = state_res::StateResolution::mainline_sort( - room_id, - &events_to_sort, - power_level, - &mut event_map, - ); - - let resolved_events = state_res::StateResolution::iterative_auth_check( - room_id, - &RoomVersionId::Version6, - &sorted_event_ids, - &resolved_control_events, - &mut event_map, - ) - .expect("iterative auth check failed on resolved events"); - - // filter the events that failed the auth check keeping the remaining events - // sorted correctly - for ev_id in sorted_event_ids - .iter() - .filter(|id| resolved_events.values().any(|rid| rid == *id)) { - let pdu = event_map - .get(ev_id) - .expect("Found event_id in sorted events that is not in resolved state"); + let (id, pdu) = pdu?; + info!("adding {} to outliers: {:#?}", id, pdu); + db.rooms.add_pdu_outlier(&pdu)?; + if let Some(state_key) = &pdu.state_key { + if pdu.kind == EventType::RoomMember { + let target_user_id = UserId::try_from(state_key.clone()).map_err(|_| { + Error::BadServerResponse("Invalid user id in send_join response.") + })?; - // We do not rebuild the PDU in this case only insert to DB - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - let hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &pdu, - utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &[pdu.event_id.clone()], - &db, - )?; - - db.rooms.set_room_state(room_id, &hash)?; + // Update our membership info, we do this here incase a user is invited + // and immediately leaves we need the DB to record the invite event for auth + db.rooms.update_membership( + &pdu.room_id, + &target_user_id, + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid member event content.", + ) + })?, + &pdu.sender, + &db.account_data, + &db.globals, + )?; + } + let mut long_id = room_id.as_bytes().to_vec(); + long_id.push(0xff); + long_id.extend_from_slice(id.as_bytes()); + state.insert((pdu.kind.clone(), state_key.clone()), long_id); + } } + + state.insert( + ( + pdu.kind.clone(), + pdu.state_key.clone().expect("join event has state key"), + ), + pdu_id.clone(), + ); + + db.rooms.force_state(room_id, state, &db.globals)?; + + for pdu in send_join_response + .room_state + .auth_chain + .iter() + .map(add_event_id) + .map(|r| { + let (event_id, value) = r?; + PduEvent::from_id_val(&event_id, value.clone()) + .map(|ev| (event_id, Arc::new(ev))) + .map_err(|e| { + warn!("{:?}: {}", value, e); + Error::BadServerResponse("Invalid PDU in send_join response.") + }) + }) + { + let (id, pdu) = pdu?; + info!("adding {} to outliers: {:#?}", id, pdu); + db.rooms.add_pdu_outlier(&pdu)?; + } + + db.rooms.append_pdu( + &pdu, + utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), + db.globals.next_count()?, + pdu_id.into(), + &[pdu.event_id.clone()], + db, + )?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index fac6b15..f01eb39 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -102,9 +102,14 @@ pub async fn sync_events_route( // since and the current room state, meaning there should be no updates. // The inner Option is None when there is an event, but there is no state hash associated // with it. This can happen for the RoomCreate event, so all updates should arrive. - let first_pdu_after_since = db.rooms.pdus_after(sender_user, &room_id, since).next(); + let first_pdu_before_since = db.rooms.pdus_until(sender_user, &room_id, since).next(); + let pdus_after_since = db + .rooms + .pdus_after(sender_user, &room_id, since) + .next() + .is_some(); - let since_state_hash = first_pdu_after_since + let since_state_hash = first_pdu_before_since .as_ref() .map(|pdu| db.rooms.pdu_state_hash(&pdu.as_ref().ok()?.0).ok()?); @@ -114,7 +119,7 @@ pub async fn sync_events_route( invited_member_count, joined_since_last_sync, state_events, - ) = if since_state_hash != None && Some(¤t_state_hash) != since_state_hash.as_ref() { + ) = if pdus_after_since && Some(¤t_state_hash) != since_state_hash.as_ref() { let current_state = db.rooms.room_state_full(&room_id)?; let current_members = current_state .iter() @@ -138,9 +143,9 @@ pub async fn sync_events_route( // Calculations: let new_encrypted_room = - encrypted_room && since_encryption.map_or(false, |encryption| encryption.is_none()); + encrypted_room && since_encryption.map_or(true, |encryption| encryption.is_none()); - let send_member_count = since_state.as_ref().map_or(false, |since_state| { + let send_member_count = since_state.as_ref().map_or(true, |since_state| { since_state.as_ref().map_or(true, |since_state| { current_members.len() != since_state @@ -179,7 +184,7 @@ pub async fn sync_events_route( let since_membership = since_state .as_ref() - .map_or(MembershipState::Join, |since_state| { + .map_or(MembershipState::Leave, |since_state| { since_state .as_ref() .and_then(|since_state| { @@ -221,7 +226,7 @@ pub async fn sync_events_route( } } - let joined_since_last_sync = since_sender_member.map_or(false, |member| { + let joined_since_last_sync = since_sender_member.map_or(true, |member| { member.map_or(true, |member| member.membership != MembershipState::Join) }); @@ -310,7 +315,7 @@ pub async fn sync_events_route( (None, None, Vec::new()) }; - let state_events = if joined_since_last_sync { + let state_events = if dbg!(joined_since_last_sync) { current_state .into_iter() .map(|(_, pdu)| pdu.to_sync_state_event()) diff --git a/src/database.rs b/src/database.rs index bf3e0f0..0f5e4b4 100644 --- a/src/database.rs +++ b/src/database.rs @@ -165,9 +165,8 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - roomeventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, + eventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, - roomeventid_outlierpducount: db.open_tree("roomeventid_outlierpducount")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 8c0463d..dd594c5 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -24,7 +24,7 @@ pub struct Globals { reqwest_client: reqwest::Client, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, - pub(super) servertimeout_signingkey: sled::Tree, // ServerName -> algorithm:key + pubkey + pub(super) servertimeout_signingkey: sled::Tree, // ServerName + Timeout Timestamp -> algorithm:key + pubkey } impl Globals { @@ -157,37 +157,31 @@ impl Globals { /// /// This doesn't actually check that the keys provided are newer than the old set. pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> { - // Remove outdated keys - let now = crate::utils::millis_since_unix_epoch(); - for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { - let (k, _) = item?; - let valid_until = k - .splitn(2, |&b| b == 0xff) - .nth(1) - .map(crate::utils::u64_from_bytes) - .ok_or_else(|| Error::bad_database("Invalid signing keys."))? - .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + let mut key1 = origin.as_bytes().to_vec(); + key1.push(0xff); - if now > valid_until { - self.servertimeout_signingkey.remove(k)?; - } - } + let mut key2 = key1.clone(); - let mut key = origin.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice( - &(keys - .valid_until_ts - .duration_since(std::time::UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64) - .to_be_bytes(), - ); + let ts = keys + .valid_until_ts + .duration_since(std::time::UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64; + + key1.extend_from_slice(&ts.to_be_bytes()); + key2.extend_from_slice(&(ts + 1).to_be_bytes()); self.servertimeout_signingkey.insert( - key, + key1, serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"), )?; + + self.servertimeout_signingkey.insert( + key2, + serde_json::to_vec(&keys.old_verify_keys) + .expect("ServerSigningKeys are a valid string"), + )?; + Ok(()) } @@ -196,7 +190,10 @@ impl Globals { &self, origin: &ServerName, ) -> Result> { + let mut response = BTreeMap::new(); + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { let (k, bytes) = item?; let valid_until = k @@ -207,10 +204,11 @@ impl Globals { .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; // If these keys are still valid use em! if valid_until > now { - return serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys")); + let btree: BTreeMap<_, _> = serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys"))?; + response.extend(btree); } } - Ok(BTreeMap::default()) + Ok(response) } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d48494b..2a88628 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,7 @@ mod edus; pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::error; +use log::{error, info, warn}; use regex::Regex; use ring::digest; use ruma::{ @@ -71,10 +71,7 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) roomeventid_outlierpdu: sled::Tree, - /// RoomId + EventId -> count of the last known pdu when the outlier was inserted. - /// This allows us to skip any state snapshots that would for sure not have the outlier. - pub(super) roomeventid_outlierpducount: sled::Tree, + pub(super) eventid_outlierpdu: sled::Tree, /// RoomId + EventId -> Parent PDU EventId. pub(super) prevevent_parent: sled::Tree, @@ -89,19 +86,21 @@ impl Rooms { room_id: &RoomId, state_hash: &StateHashId, ) -> Result> { - self.stateid_pduid + let r = self + .stateid_pduid .scan_prefix(&state_hash) .values() - .map(|pduid_short| { - let mut pduid = room_id.as_bytes().to_vec(); - pduid.push(0xff); - pduid.extend_from_slice(&pduid_short?); - match self.pduid_pdu.get(&pduid)? { + .map(|short_id| { + let short_id = short_id?; + let mut long_id = room_id.as_bytes().to_vec(); + long_id.push(0xff); + long_id.extend_from_slice(&short_id); + match self.pduid_pdu.get(&long_id)? { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")), None => self - .roomeventid_outlierpdu - .get(pduid)? + .eventid_outlierpdu + .get(short_id)? .map(|b| { serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")) @@ -124,7 +123,9 @@ impl Rooms { pdu, )) }) - .collect() + .collect(); + + r } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -140,6 +141,8 @@ impl Rooms { key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); + info!("Looking for {} {:?}", event_type, state_key); + let short = self.statekey_short.get(&key)?; if let Some(short) = short { @@ -147,32 +150,40 @@ impl Rooms { stateid.push(0xff); stateid.extend_from_slice(&short); + info!("trying to find pduid/eventid. short: {:?}", stateid); self.stateid_pduid .get(&stateid)? - .map_or(Ok(None), |pdu_id_short| { - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&pdu_id_short); + .map_or(Ok(None), |short_id| { + info!("found in stateid_pduid"); + let mut long_id = room_id.as_bytes().to_vec(); + long_id.push(0xff); + long_id.extend_from_slice(&short_id); - Ok::<_, Error>(Some(( - pdu_id.clone().into(), - match self.pduid_pdu.get(&pdu_id)? { - Some(b) => serde_json::from_slice::(&b) + Ok::<_, Error>(Some(match self.pduid_pdu.get(&long_id)? { + Some(b) => ( + long_id.clone().into(), + serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - None => self - .roomeventid_outlierpdu - .get(pdu_id)? - .map(|b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })??, - }, - ))) + ), + None => { + info!("looking in outliers"); + ( + short_id.clone().into(), + self.eventid_outlierpdu + .get(&short_id)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })??, + ) + } + })) }) } else { + info!("short id not found"); Ok(None) } } @@ -215,6 +226,8 @@ impl Rooms { .ok_or_else(|| Error::bad_database("Saved auth event with no state key."))?, )? { events.insert((event_type, state_key), pdu); + } else { + warn!("Could not find {} {:?} in state", event_type, state_key); } } Ok(events) @@ -253,11 +266,11 @@ impl Rooms { globals: &super::globals::Globals, ) -> Result<()> { let state_hash = - self.calculate_hash(&state.values().map(|pdu_id| &**pdu_id).collect::>())?; + self.calculate_hash(&state.values().map(|long_id| &**long_id).collect::>())?; let mut prefix = state_hash.to_vec(); prefix.push(0xff); - for ((event_type, state_key), id_long) in state { + for ((event_type, state_key), long_id) in state { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); @@ -273,16 +286,13 @@ impl Rooms { } }; - // Because of outliers this could also be an eventID but that - // is handled by `state_full` - let pdu_id_short = id_long - .splitn(2, |&b| b == 0xff) - .nth(1) - .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; + // If it's a pdu id we remove the room id, if it's an event id we leave it the same + let short_id = long_id.splitn(2, |&b| b == 0xff).nth(1).unwrap_or(&long_id); let mut state_id = prefix.clone(); state_id.extend_from_slice(&short.to_be_bytes()); - self.stateid_pduid.insert(state_id, pdu_id_short)?; + info!("inserting {:?} into {:?}", short_id, state_id); + self.stateid_pduid.insert(state_id, short_id)?; } self.roomid_statehash @@ -348,20 +358,19 @@ impl Rooms { pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu_id| { - Ok(Some( - serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { - Some(b) => b, - None => self - .roomeventid_outlierpdu - .get(event_id.as_bytes())? - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, - }) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + .map_or_else::, _, _>( + || Ok(self.eventid_outlierpdu.get(event_id.as_bytes())?), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + Ok(serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?) }) + .transpose() } /// Returns the pdu's id. @@ -371,24 +380,31 @@ impl Rooms { .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) } + pub fn get_long_id(&self, event_id: &EventId) -> Result> { + Ok(self + .get_pdu_id(event_id)? + .map_or_else(|| event_id.as_bytes().to_vec(), |pduid| pduid.to_vec())) + } + /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu_id| { - Ok(Some( - serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { - Some(b) => b, - None => match self.roomeventid_outlierpdu.get(event_id.as_bytes())? { - Some(b) => b, - None => return Ok(None), - }, - }) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + .map_or_else::, _, _>( + || Ok(self.eventid_outlierpdu.get(event_id.as_bytes())?), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + Ok(serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?) }) + .transpose() } /// Returns the pdu. @@ -484,7 +500,7 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.roomeventid_outlierpdu + self.eventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) @@ -494,25 +510,12 @@ impl Rooms { /// Append the PDU as an outlier. /// /// Any event given to this will be processed (state-res) on another thread. - pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { - log::info!( - "Number of outlier pdu's {}", - self.roomeventid_outlierpdu.len() - ); - - let mut key = pdu.room_id().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu.event_id().as_bytes()); - - self.eventid_pduid - .insert(pdu.event_id().as_bytes(), key.as_slice())?; - - self.roomeventid_outlierpdu.insert( - &key, + pub fn add_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { + self.eventid_outlierpdu.insert( + &pdu.event_id.as_bytes(), &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), )?; - self.roomeventid_outlierpducount - .insert(&key, &self.latest_pdu_count(pdu.room_id())?.to_be_bytes())?; + Ok(()) } @@ -557,50 +560,6 @@ impl Rooms { } } - // We no longer keep this pdu as an outlier - let mut key = pdu.room_id().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu.event_id().as_bytes()); - if self.roomeventid_outlierpdu.remove(&key)?.is_some() { - if let Some(state_key) = pdu.state_key.as_deref() { - let mut statekey = pdu.kind().as_ref().as_bytes().to_vec(); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_short.get(&statekey)? { - Some(short) => utils::u64_from_bytes(&short).map_err(|_| { - Error::bad_database("Invalid short bytes in statekey_short.") - })?, - None => { - error!( - "This event has been inserted into the state snapshot tree previously." - ); - let short = db.globals.next_count()?; - self.statekey_short - .insert(&statekey, &short.to_be_bytes())?; - short - } - }; - - let mut start = pdu.room_id().as_bytes().to_vec(); - start.extend_from_slice( - &self - .roomeventid_outlierpducount - .get(&key)? - .unwrap_or_default(), - ); - for hash in self.pduid_statehash.range(start..).values() { - let mut hash = hash?.to_vec(); - hash.extend_from_slice(&short.to_be_bytes()); - - let _ = dbg!(self.stateid_pduid.compare_and_swap( - hash, - Some(pdu.event_id().as_bytes()), - Some(pdu_id.as_ref()), - )?); - } - } - } - // We must keep track of all events that have been referenced. for leaf in leaves { let mut key = pdu.room_id().as_bytes().to_vec(); @@ -1275,7 +1234,7 @@ impl Rooms { } /// Update current membership data. - fn update_membership( + pub fn update_membership( &self, room_id: &RoomId, user_id: &UserId, diff --git a/src/database/sending.rs b/src/database/sending.rs index 00073af..f96e489 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -346,6 +346,8 @@ impl Sending { .collect::>(); let permit = maximum_requests.acquire().await; + + info!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = server_server::send_request( &globals, &*server, @@ -361,7 +363,10 @@ impl Sending { }, ) .await - .map(|_response| (server.clone(), is_appservice)) + .map(|response| { + info!("server response: {:?}", response); + (server.clone(), is_appservice) + }) .map_err(|e| (server, is_appservice, e)); drop(permit); diff --git a/src/main.rs b/src/main.rs index a2c020f..eb89fea 100644 --- a/src/main.rs +++ b/src/main.rs @@ -204,6 +204,8 @@ async fn main() { rocket.launch().await.unwrap(); } else { + pretty_env_logger::init(); + let root = span!(tracing::Level::INFO, "app_start", work_units = 2); let _enter = root.enter(); diff --git a/src/server_server.rs b/src/server_server.rs index a665fe9..02610e8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -509,7 +509,7 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - // dbg!(&*body); + info!("Incoming PDUs: {:?}", &body.pdus); for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { @@ -600,37 +600,11 @@ pub async fn send_transaction_message_route<'a>( // events over federation. For example, the Federation API's /send endpoint would // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. - 'main_pdu_loop: for (event_id, room_id, value) in pdus_to_resolve { + 'main_pdu_loop: for (event_id, _room_id, value) in pdus_to_resolve { + info!("Working on incoming pdu: {:?}", value); let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); - if let Some(CanonicalJsonValue::String(sender)) = value.get("sender") { - let sender = - UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); - let origin = sender.server_name(); - - let keys = match fetch_signing_keys(&db, &room_id, origin).await { - Ok(keys) => keys, - Err(_) => { - resolved_map.insert( - event_id, - Err("Could not find signing keys for this server".to_string()), - ); - continue; - } - }; - - pub_key_map.insert( - origin.to_string(), - keys.into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(), - ); - } else { - resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); - continue; - } - // TODO: make this persist but not a DB Tree... // This is all the auth_events that have been recursively fetched so they don't have to be // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) @@ -645,11 +619,11 @@ pub async fn send_transaction_message_route<'a>( // 7. if not timeline event: stop // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous): (Arc, Vec>) = match validate_event( + let (pdu, previous_create): (Arc, Option>) = match validate_event( &db, value, event_id.clone(), - &pub_key_map, + &mut pub_key_map, server_name, // All the auth events gathered will be here &mut auth_cache, @@ -662,15 +636,11 @@ pub async fn send_transaction_message_route<'a>( continue; } }; - - let single_prev = if previous.len() == 1 { - previous.first().cloned() - } else { - None - }; + info!("Validated event."); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(&pdu)?; + db.rooms.add_pdu_outlier(&pdu)?; + info!("Added pdu as outlier."); // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -679,6 +649,7 @@ pub async fn send_transaction_message_route<'a>( // // TODO: if we know the prev_events of the incoming event we can avoid the request and build // the state from a known point and resolve if > 1 prev_event + info!("Requesting state at event."); let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -693,14 +664,20 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events( + info!("Fetching state events at event."); + let state = match fetch_events( &db, server_name, - &pub_key_map, + &mut pub_key_map, &res.pdu_ids, &mut auth_cache, ) - .await?; + .await + { + Ok(state) => state, + Err(_) => continue, + }; + // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -716,17 +693,21 @@ pub async fn send_transaction_message_route<'a>( .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); - ( - state, - fetch_events( - &db, - server_name, - &pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await?, + let incoming_auth_events = match fetch_events( + &db, + server_name, + &mut pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, ) + .await + { + Ok(state) => state, + Err(_) => continue, + }; + + info!("Fetching auth events of state events at event."); + (state, incoming_auth_events) } Err(_) => { resolved_map.insert( @@ -741,7 +722,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - single_prev.clone(), + previous_create.clone(), &state_at_event, None, // TODO: third party invite ) @@ -754,6 +735,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + info!("Auth check succeeded."); // End of step 10. // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it @@ -764,10 +746,12 @@ pub async fn send_transaction_message_route<'a>( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); + info!("current state: {:#?}", current_state); + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - single_prev.clone(), + previous_create, ¤t_state, None, ) @@ -780,6 +764,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; }; + info!("Auth check with current state succeeded."); // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) @@ -787,7 +772,10 @@ pub async fn send_transaction_message_route<'a>( // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. let extremities = match calculate_forward_extremities(&db, &pdu).await { - Ok(fork_ids) => fork_ids, + Ok(fork_ids) => { + info!("Calculated new forward extremities: {:?}", fork_ids); + fork_ids + } Err(_) => { resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); continue; @@ -836,7 +824,6 @@ pub async fn send_transaction_message_route<'a>( // We do need to force an update to this rooms state update_state = true; - // TODO: remove this is for current debugging Jan, 15 2021 let mut auth_events = vec![]; for map in &fork_states { let mut state_auth = vec![]; @@ -877,6 +864,8 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); + info!("auth events: {:?}", auth_cache); + let res = match state_res::StateResolution::resolve( pdu.room_id(), &RoomVersionId::Version6, @@ -927,6 +916,7 @@ pub async fn send_transaction_message_route<'a>( // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + info!("Appended incoming pdu."); // Set the new room state to the resolved state update_resolved_state( @@ -938,6 +928,7 @@ pub async fn send_transaction_message_route<'a>( None }, )?; + info!("Updated resolved state"); // Event has passed all auth/stateres checks } @@ -962,17 +953,52 @@ type AsyncRecursiveResult<'a, T> = Pin( db: &'a Database, value: CanonicalJsonObject, event_id: EventId, - pub_key_map: &'a PublicKeyMap, + pub_key_map: &'a mut PublicKeyMap, origin: &'a ServerName, auth_cache: &'a mut EventMap>, -) -> AsyncRecursiveResult<'a, (Arc, Vec>)> { +) -> AsyncRecursiveResult<'a, (Arc, Option>)> { Box::pin(async move { + for signature_server in match value + .get("signatures") + .ok_or_else(|| "No signatures in server response pdu.".to_string())? + { + CanonicalJsonValue::Object(map) => map, + _ => return Err("Invalid signatures object in server response pdu.".to_string()), + } + .keys() + { + info!("Fetching signing keys for {}", signature_server); + let keys = match fetch_signing_keys( + &db, + &Box::::try_from(&**signature_server).map_err(|_| { + "Invalid servername in signatures of server response pdu.".to_string() + })?, + ) + .await + { + Ok(keys) => { + info!("Keys: {:?}", keys); + keys + } + Err(_) => { + return Err( + "Signature verification failed: Could not fetch signing key.".to_string(), + ); + } + }; + + pub_key_map.insert(signature_server.clone(), keys); + + info!("Fetched signing keys"); + } + let mut val = - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version5) { Ok(ver) => { if let ruma::signatures::Verified::Signatures = ver { match ruma::signatures::redact(&value, &RoomVersionId::Version6) { @@ -1000,26 +1026,34 @@ fn validate_event<'a>( ) .map_err(|_| "Event is not a valid PDU".to_string())?; + info!("Fetching auth events."); fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) .await .map_err(|e| e.to_string())?; let pdu = Arc::new(pdu.clone()); + /* // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let previous = fetch_events(&db, origin, &pub_key_map, &pdu.prev_events, auth_cache) + info!("Fetching prev events."); + let previous = fetch_events(&db, origin, pub_key_map, &pdu.prev_events, auth_cache) .await .map_err(|e| e.to_string())?; + */ + + // if the previous event was the create event special rules apply + let previous_create = if pdu.auth_events.len() == 1 && pdu.prev_events == pdu.auth_events { + auth_cache.get(&pdu.auth_events[0]).cloned() + } else { + None + }; // Check that the event passes auth based on the auth_events + info!("Checking auth."); let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - if previous.len() == 1 { - previous.first().cloned() - } else { - None - }, + previous_create.clone(), &pdu.auth_events .iter() .map(|id| { @@ -1039,39 +1073,20 @@ fn validate_event<'a>( return Err("Event has failed auth check with auth events".to_string()); } - Ok((pdu, previous)) + info!("Validation successful."); + Ok((pdu, previous_create)) }) } -/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have any missing events it fails. +#[tracing::instrument(skip(db))] async fn fetch_check_auth_events( db: &Database, origin: &ServerName, - key_map: &PublicKeyMap, + key_map: &mut PublicKeyMap, event_ids: &[EventId], auth_cache: &mut EventMap>, ) -> Result<()> { - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if auth_cache.contains_key(&ev_id) { - continue; - } - - // TODO: Batch these async calls so we can wait on multiple at once - let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) - .await - .map(|mut vec| { - vec.pop() - .ok_or_else(|| Error::Conflict("Event was not found in fetch_events")) - })??; - - stack.extend(ev.auth_events()); - } + fetch_events(db, origin, key_map, event_ids, auth_cache).await?; Ok(()) } @@ -1086,44 +1101,58 @@ async fn fetch_check_auth_events( /// /// If the event is unknown to the `auth_cache` it is added. This guarantees that any /// event we need to know of will be present. +#[tracing::instrument(skip(db))] pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, - key_map: &PublicKeyMap, + key_map: &mut PublicKeyMap, events: &[EventId], auth_cache: &mut EventMap>, ) -> Result>> { let mut pdus = vec![]; for id in events { + info!("Fetching event: {}", id); let pdu = match auth_cache.get(id) { - Some(pdu) => pdu.clone(), + Some(pdu) => { + info!("Event found in cache"); + pdu.clone() + } // `get_pdu` checks the outliers tree for us None => match db.rooms.get_pdu(&id)? { - Some(pdu) => Arc::new(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let (pdu, _) = - validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|e| { - error!("{:?}", e); - Error::Conflict("Authentication of event failed") - })?; + Some(pdu) => { + info!("Event found in outliers"); + Arc::new(pdu) + } + None => { + info!("Fetching event over federation"); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + info!("Got event over federation: {:?}", res); + let (event_id, value) = + crate::pdu::gen_event_id_canonical_json(&res.pdu); + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|e| { + error!("ERROR: {:?}", e); + Error::Conflict("Authentication of event failed") + })?; - db.rooms.append_pdu_outlier(&pdu)?; - pdu + info!("Added fetched pdu as outlier."); + db.rooms.add_pdu_outlier(&pdu)?; + pdu + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + } }, }; auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); @@ -1134,14 +1163,23 @@ pub(crate) async fn fetch_events( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. +#[tracing::instrument(skip(db))] pub(crate) async fn fetch_signing_keys( db: &Database, - room_id: &RoomId, origin: &ServerName, -) -> Result> { +) -> Result> { + let mut result = BTreeMap::new(); + match db.globals.signing_keys_for(origin)? { - keys if !keys.is_empty() => Ok(keys), + keys if !keys.is_empty() => { + info!("we knew the signing keys already: {:?}", keys); + Ok(keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect()) + } _ => { + info!("Asking {} for it's signing key", origin); match db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) @@ -1149,13 +1187,24 @@ pub(crate) async fn fetch_signing_keys( { Ok(keys) => { db.globals.add_signing_key(origin, &keys.server_key)?; - Ok(keys.server_key.verify_keys) + + result.extend( + keys.server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + keys.server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + return Ok(result); } _ => { - for server in db.rooms.room_servers(room_id).filter( - |ser| matches!(ser, Ok(s) if db.globals.trusted_servers().contains(s)), - ) { - let server = server?; + for server in db.globals.trusted_servers() { + info!("Asking {} for {}'s signing key", server, origin); if let Ok(keys) = db .sending .send_federation_request( @@ -1170,30 +1219,21 @@ pub(crate) async fn fetch_signing_keys( ) .await { - let mut trust = 0; - let keys: Vec = keys.server_keys; - let key = keys.iter().fold(None, |mut key, next| { - if let Some(verified) = &key { - // rustc cannot elide this type for some reason - let v: &ServerSigningKeys = verified; - if v.verify_keys - .iter() - .zip(next.verify_keys.iter()) - .all(|(a, b)| a.1.key == b.1.key) - { - trust += 1; - } - } else { - key = Some(next.clone()) - } - key - }); - - if trust == (keys.len() - 1) && key.is_some() { - let k = key.unwrap(); + info!("Got signing keys: {:?}", keys); + for k in keys.server_keys.into_iter() { db.globals.add_signing_key(origin, &k)?; - return Ok(k.verify_keys); + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); } + return Ok(result); } } Err(Error::BadServerResponse( @@ -1211,6 +1251,7 @@ pub(crate) async fn fetch_signing_keys( /// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). /// /// The state snapshot of the incoming event __needs__ to be added to the resulting list. +#[tracing::instrument(skip(db))] pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, @@ -1261,6 +1302,7 @@ pub(crate) async fn calculate_forward_extremities( /// /// This guarantees that the incoming event will be in the state sets (at least our servers /// and the sending server). +#[tracing::instrument(skip(db))] pub(crate) async fn build_forward_extremity_snapshots( db: &Database, pdu: Arc, @@ -1275,12 +1317,14 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); for id in current_leaves { + if id == &pdu.event_id { + continue; + } match db.rooms.get_pdu_id(id)? { // We can skip this because it is handled outside of this function // The current server state and incoming event state are built to be // the state after. // This would be the incoming state from the server. - Some(_) if id == pdu.event_id() => {} Some(pduid) if db.rooms.get_pdu_from_id(&pduid)?.is_some() => { let state_hash = db .rooms @@ -1308,40 +1352,7 @@ pub(crate) async fn build_forward_extremity_snapshots( } _ => { error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - - let res = db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: id, - }, - ) - .await?; - - // TODO: This only adds events to the auth_cache, there is for sure a better way to - // do this... - fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - - let mut state_before = - fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) - .await? - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect::>(); - - if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) - .await? - .pop() - { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, pdu); - } - - // Now it's the state after - fork_states.insert(state_before); + return Err(Error::BadDatabase("Missing state snapshot.")); } } } @@ -1353,9 +1364,11 @@ pub(crate) async fn build_forward_extremity_snapshots( fork_states.insert(current_state); } + info!("Fork states: {:?}", fork_states); Ok(fork_states) } +#[tracing::instrument(skip(db))] pub(crate) fn update_resolved_state( db: &Database, room_id: &RoomId, @@ -1366,22 +1379,14 @@ pub(crate) fn update_resolved_state( if let Some(state) = state { let mut new_state = HashMap::new(); for ((ev_type, state_k), pdu) in state { - match db.rooms.get_pdu_id(pdu.event_id())? { - Some(pduid) => { - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pduid.to_vec(), - ); - } - None => { - error!("We are missing a state event for the current room state."); - } - } + let long_id = db.rooms.get_long_id(&pdu.event_id)?; + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + long_id, + ); } db.rooms.force_state(room_id, new_state, &db.globals)?; @@ -1392,6 +1397,7 @@ pub(crate) fn update_resolved_state( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. +#[tracing::instrument(skip(db))] pub(crate) fn append_incoming_pdu( db: &Database, pdu: &PduEvent, @@ -1402,20 +1408,16 @@ pub(crate) fn append_incoming_pdu( // We can tell if we need to do this based on wether state resolution took place or not let mut new_state = HashMap::new(); for ((ev_type, state_k), state_pdu) in state { - match db.rooms.get_pdu_id(state_pdu.event_id())? { - Some(state_pduid) => { - new_state.insert( - ( - ev_type.clone(), - state_k - .clone() - .ok_or_else(|| Error::Conflict("State contained non state event"))?, - ), - state_pduid.to_vec(), - ); - } - None => error!("We are missing a state event for the incoming event snapshot"), - } + let long_id = db.rooms.get_long_id(state_pdu.event_id())?; + new_state.insert( + ( + ev_type.clone(), + state_k + .clone() + .ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + long_id.to_vec(), + ); } db.rooms From f79053484bfea3d3bb0b156972d9c77df6a5251c Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sat, 13 Mar 2021 19:00:13 +0000 Subject: [PATCH 0469/1727] Add .gitlab-ci.yml --- .gitlab-ci.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .gitlab-ci.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..10ca273 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,13 @@ +image: "rust:latest" + +variables: + GIT_SUBMODULE_STRATEGY: recursive + +before_script: + - apt-get update -yqq + - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config + +test:cargo: + script: + - rustc --version && cargo --version # Print version info for debugging + - cargo test --workspace --verbose --locked From d0d5bc412b570e10e9233b3b917e838cfd04df29 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sat, 13 Mar 2021 20:33:16 +0100 Subject: [PATCH 0470/1727] Add more gitignore --- .gitignore | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index efbc1d9..e2f4e88 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,64 @@ -/target -**/*.rs.bk +# CMake +cmake-build-*/ +# IntelliJ +.idea/ +out/ +*.iml +modules.xml +*.ipr + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# Linux backup files +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +# Rust +/target/ + +### vscode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +*.code-workspace + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows shortcuts +*.lnk + +# Conduit Rocket.toml conduit.toml + +# Etc. +**/*.rs.bk From 03670282037c56d870605fb6e00c637c488a8c28 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sun, 14 Mar 2021 11:55:28 +0100 Subject: [PATCH 0471/1727] add settings.json --- .vscode/settings.json | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..c3f6605 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "rust-analyzer.procMacro.enable": true +} \ No newline at end of file From 44425a903a27a0ca0e1f9ff7bc65ea1b13ded54a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 16 Mar 2021 18:00:26 +0100 Subject: [PATCH 0472/1727] fix: multiple federation/pusher fixes --- src/client_server/push.rs | 8 +-- src/client_server/sync.rs | 9 +--- src/database/pusher.rs | 22 +++----- src/database/rooms.rs | 22 ++++---- src/database/sending.rs | 24 ++++----- src/error.rs | 60 --------------------- src/server_server.rs | 107 +++++++++++++++++--------------------- 7 files changed, 85 insertions(+), 167 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 4dc9769..a7ddbb6 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -686,10 +686,10 @@ pub async fn get_pushers_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender = body.sender_user.as_ref().expect("authenticated endpoint"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_pushers::Response { - pushers: db.pusher.get_pusher(sender)?, + pushers: db.pusher.get_pusher(sender_user)?, } .into()) } @@ -703,10 +703,10 @@ pub async fn set_pushers_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender = body.sender_user.as_ref().expect("authenticated endpoint"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pusher = body.pusher.clone(); - db.pusher.set_pusher(sender, pusher)?; + db.pusher.set_pusher(sender_user, pusher)?; db.flush().await?; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 0fc98ec..6551b2a 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -315,7 +315,7 @@ pub async fn sync_events_route( (None, None, Vec::new()) }; - let state_events = if dbg!(joined_since_last_sync) { + let state_events = if joined_since_last_sync { current_state .into_iter() .map(|(_, pdu)| pdu.to_sync_state_event()) @@ -703,12 +703,7 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let delay = tokio::time::sleep(duration); - tokio::pin!(delay); - tokio::select! { - _ = &mut delay, if delay.is_elapsed() => {} - _ = watcher => {} - } + let _ = tokio::time::timeout(duration, watcher).await; } Ok(response.into()) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 2bf6bf7..59ccbef 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -35,8 +35,6 @@ impl PushData { } pub fn set_pusher(&self, sender: &UserId, pusher: Pusher) -> Result<()> { - println!("CCCCCCCCCCCCCCCCCCCCCc"); - dbg!(&pusher); let mut key = sender.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pusher.pushkey.as_bytes()); @@ -51,7 +49,7 @@ impl PushData { } self.senderkey_pusher.insert( - dbg!(key), + key, &*serde_json::to_string(&pusher).expect("Pusher is valid JSON string"), )?; @@ -63,12 +61,10 @@ impl PushData { prefix.push(0xff); self.senderkey_pusher - .scan_prefix(dbg!(prefix)) + .scan_prefix(prefix) .values() .map(|push| { - println!("DDDDDDDDDDDDDDDDDDDDDDDDDD"); - let push = - dbg!(push).map_err(|_| Error::bad_database("Invalid push bytes in db."))?; + let push = push.map_err(|_| Error::bad_database("Invalid push bytes in db."))?; Ok(serde_json::from_slice(&*push) .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) }) @@ -100,10 +96,7 @@ where //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let reqwest_response = globals - .reqwest_client() - .execute(dbg!(reqwest_request)) - .await; + let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; // Because reqwest::Response -> http::Response is complicated: match reqwest_response { @@ -182,7 +175,7 @@ pub async fn send_push_notice( continue; } - match dbg!(rule.rule_id.as_str()) { + match rule.rule_id.as_str() { ".m.rule.master" => {} ".m.rule.suppress_notices" => { if pdu.kind == EventType::RoomMessage @@ -454,8 +447,7 @@ async fn send_notice( db: &Database, name: &str, ) -> Result<()> { - println!("BBBBBBBBBBBBBBBr"); - let (http, _emails): (Vec<&Pusher>, _) = dbg!(pushers) + let (http, _emails): (Vec<&Pusher>, _) = pushers .iter() .partition(|pusher| pusher.kind == Some(PusherKind::Http)); @@ -463,7 +455,7 @@ async fn send_notice( // Two problems with this // 1. if "event_id_only" is the only format kind it seems we should never add more info // 2. can pusher/devices have conflicting formats - for pusher in dbg!(http) { + for pusher in http { let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); let url = if let Some(url) = pusher.data.url.as_ref() { url diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 648f080..c908d51 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,7 @@ mod edus; pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::{error, info, warn}; +use log::{debug, error, info, warn}; use regex::Regex; use ring::digest; use ruma::{ @@ -67,7 +67,7 @@ pub struct Rooms { /// StateKey = EventType + StateKey, Short = Count pub(super) statekey_short: sled::Tree, /// StateId = StateHash + Short, PduId = Count (without roomid) - pub(super) stateid_pduid: sled::Tree, + pub(super) stateid_eventid: sled::Tree, /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. @@ -138,7 +138,7 @@ impl Rooms { key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); - info!("Looking for {} {:?}", event_type, state_key); + debug!("Looking for {} {:?}", event_type, state_key); let short = self.statekey_short.get(&key)?; @@ -147,11 +147,11 @@ impl Rooms { stateid.push(0xff); stateid.extend_from_slice(&short); - info!("trying to find pduid/eventid. short: {:?}", stateid); + debug!("trying to find pduid/eventid. short: {:?}", stateid); self.stateid_pduid .get(&stateid)? .map_or(Ok(None), |short_id| { - info!("found in stateid_pduid"); + debug!("found in stateid_pduid"); let mut long_id = room_id.as_bytes().to_vec(); long_id.push(0xff); long_id.extend_from_slice(&short_id); @@ -163,7 +163,7 @@ impl Rooms { .map_err(|_| Error::bad_database("Invalid PDU in db."))?, ), None => { - info!("looking in outliers"); + debug!("looking in outliers"); ( short_id.clone().into(), self.eventid_outlierpdu @@ -180,7 +180,7 @@ impl Rooms { })) }) } else { - info!("short id not found"); + warn!("short id not found"); Ok(None) } } @@ -288,7 +288,7 @@ impl Rooms { let mut state_id = prefix.clone(); state_id.extend_from_slice(&short.to_be_bytes()); - info!("inserting {:?} into {:?}", short_id, state_id); + debug!("inserting {:?} into {:?}", short_id, state_id); self.stateid_pduid.insert(state_id, short_id)?; } @@ -574,7 +574,7 @@ impl Rooms { self.pduid_pdu.insert( &pdu_id, - &*serde_json::to_string(&pdu_json) + &*serde_json::to_string(dbg!(&pdu_json)) .expect("CanonicalJsonObject is always a valid String"), )?; @@ -889,12 +889,12 @@ impl Rooms { content.clone(), prev_event, None, // TODO: third party invite - dbg!(&auth_events + &auth_events .iter() .map(|((ty, key), pdu)| { Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) - .collect::>>()?), + .collect::>>()?, ) .map_err(|e| { log::error!("{}", e); diff --git a/src/database/sending.rs b/src/database/sending.rs index fc1d27d..b35f7c5 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -10,7 +10,7 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::{info, warn}; +use log::{debug, error, info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ @@ -308,8 +308,6 @@ impl Sending { key.extend_from_slice(pdu_id); self.servernamepduids.insert(key, b"")?; - println!("AAAA"); - Ok(()) } @@ -348,7 +346,7 @@ impl Sending { pdu_ids: Vec, db: &Database, ) -> std::result::Result { - match dbg!(&kind) { + match &kind { OutgoingKind::Appservice(server) => { let pdu_jsons = pdu_ids .iter() @@ -414,21 +412,23 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); - for pdu in dbg!(&pdus) { + for pdu in pdus { // Redacted events are not notification targets (we don't send push for them) if pdu.unsigned.get("redacted_because").is_some() { continue; } - for user in db.rooms.room_members(&pdu.room_id) { - let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; - + for user in db.users.iter().filter_map(|r| r.ok()).filter(|user_id| { + db.rooms.is_joined(&user_id, &pdu.room_id).unwrap_or(false) + }) { // Don't notify the user of their own events if user == pdu.sender { continue; } - let pushers = dbg!(db.pusher.get_pusher(&user)) + let pushers = db + .pusher + .get_pusher(&user) .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; let rules_for_user = db @@ -467,7 +467,7 @@ impl Sending { unread, &pushers, rules_for_user, - pdu, + &pdu, db, ) .await @@ -510,7 +510,7 @@ impl Sending { let permit = db.sending.maximum_requests.acquire().await; - info!("sending pdus to {}: {:#?}", server, pdu_jsons); + error!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = server_server::send_request( &db.globals, &*server, @@ -527,7 +527,7 @@ impl Sending { ) .await .map(|response| { - info!("server response: {:?}", response); + error!("server response: {:?}", response); kind.clone() }) .map_err(|e| (kind, e)); diff --git a/src/error.rs b/src/error.rs index d8f10f4..8a64e63 100644 --- a/src/error.rs +++ b/src/error.rs @@ -111,63 +111,3 @@ where .respond_to(r) } } - -pub struct ConduitLogger { - pub db: Database, - pub last_logs: RwLock>, -} - -impl log::Log for ConduitLogger { - fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { - true - } - - fn log(&self, record: &log::Record<'_>) { - let output = format!("{} - {}", record.level(), record.args()); - - let match_mod_path = - |path: &str| path.starts_with("conduit::") || path.starts_with("state"); - - if self.enabled(record.metadata()) - && (record.module_path().map_or(false, match_mod_path) - || record - .module_path() - .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying - && record.metadata().level() <= log::Level::Warn) - { - let first_line = output - .lines() - .next() - .expect("lines always returns one item"); - - eprintln!("{}", output); - - let mute_duration = match record.metadata().level() { - log::Level::Error => Duration::from_secs(60 * 5), // 5 minutes - log::Level::Warn => Duration::from_secs(60 * 60 * 24), // A day - _ => Duration::from_secs(60 * 60 * 24 * 7), // A week - }; - - if self - .last_logs - .read() - .unwrap() - .get(first_line) - .map_or(false, |i| i.elapsed() < mute_duration) - // Don't post this log again for some time - { - return; - } - - if let Ok(mut_last_logs) = &mut self.last_logs.try_write() { - mut_last_logs.insert(first_line.to_owned(), Instant::now()); - } - - self.db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::notice_plain(output), - )); - } - } - - fn flush(&self) {} -} diff --git a/src/server_server.rs b/src/server_server.rs index 02610e8..919d12f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{error, info, warn}; +use log::{debug, error, info, warn}; use regex::Regex; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ @@ -27,7 +27,7 @@ use ruma::{ use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, future::Future, net::{IpAddr, SocketAddr}, @@ -601,7 +601,7 @@ pub async fn send_transaction_message_route<'a>( // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. 'main_pdu_loop: for (event_id, _room_id, value) in pdus_to_resolve { - info!("Working on incoming pdu: {:?}", value); + debug!("Working on incoming pdu: {:?}", value); let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -636,11 +636,11 @@ pub async fn send_transaction_message_route<'a>( continue; } }; - info!("Validated event."); + debug!("Validated event."); // 6. persist the event as an outlier. db.rooms.add_pdu_outlier(&pdu)?; - info!("Added pdu as outlier."); + debug!("Added pdu as outlier."); // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -649,7 +649,7 @@ pub async fn send_transaction_message_route<'a>( // // TODO: if we know the prev_events of the incoming event we can avoid the request and build // the state from a known point and resolve if > 1 prev_event - info!("Requesting state at event."); + debug!("Requesting state at event."); let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -664,7 +664,7 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - info!("Fetching state events at event."); + debug!("Fetching state events at event."); let state = match fetch_events( &db, server_name, @@ -706,7 +706,7 @@ pub async fn send_transaction_message_route<'a>( Err(_) => continue, }; - info!("Fetching auth events of state events at event."); + debug!("Fetching auth events of state events at event."); (state, incoming_auth_events) } Err(_) => { @@ -735,7 +735,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } - info!("Auth check succeeded."); + debug!("Auth check succeeded."); // End of step 10. // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it @@ -746,8 +746,6 @@ pub async fn send_transaction_message_route<'a>( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); - info!("current state: {:#?}", current_state); - if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, @@ -764,7 +762,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; }; - info!("Auth check with current state succeeded."); + debug!("Auth check with current state succeeded."); // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) @@ -773,7 +771,7 @@ pub async fn send_transaction_message_route<'a>( // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. let extremities = match calculate_forward_extremities(&db, &pdu).await { Ok(fork_ids) => { - info!("Calculated new forward extremities: {:?}", fork_ids); + debug!("Calculated new forward extremities: {:?}", fork_ids); fork_ids } Err(_) => { @@ -828,20 +826,21 @@ pub async fn send_transaction_message_route<'a>( for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { - let event = match auth_cache.get(auth_id) { - Some(aev) => aev.clone(), - // The only events that haven't been added to the auth cache are - // events we have knowledge of previously - None => { - error!("Event was not present in auth_cache {}", auth_id); - resolved_map.insert( - event_id.clone(), - Err("Event was not present in auth cache".into()), - ); - continue 'main_pdu_loop; + match fetch_events( + &db, + server_name, + &mut pub_key_map, + &[auth_id.clone()], + &mut auth_cache, + ) + .await + { + // This should always contain exactly one element when Ok + Ok(events) => state_auth.push(events[0].clone()), + Err(e) => { + debug!("Event was not present: {}", e); } - }; - state_auth.push(event); + } } auth_events.push(state_auth); } @@ -864,7 +863,7 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); - info!("auth events: {:?}", auth_cache); + debug!("auth events: {:?}", auth_cache); let res = match state_res::StateResolution::resolve( pdu.room_id(), @@ -916,7 +915,7 @@ pub async fn send_transaction_message_route<'a>( // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; - info!("Appended incoming pdu."); + debug!("Appended incoming pdu."); // Set the new room state to the resolved state update_resolved_state( @@ -928,7 +927,7 @@ pub async fn send_transaction_message_route<'a>( None }, )?; - info!("Updated resolved state"); + debug!("Updated resolved state"); // Event has passed all auth/stateres checks } @@ -972,7 +971,7 @@ fn validate_event<'a>( } .keys() { - info!("Fetching signing keys for {}", signature_server); + debug!("Fetching signing keys for {}", signature_server); let keys = match fetch_signing_keys( &db, &Box::::try_from(&**signature_server).map_err(|_| { @@ -981,10 +980,7 @@ fn validate_event<'a>( ) .await { - Ok(keys) => { - info!("Keys: {:?}", keys); - keys - } + Ok(keys) => keys, Err(_) => { return Err( "Signature verification failed: Could not fetch signing key.".to_string(), @@ -993,8 +989,6 @@ fn validate_event<'a>( }; pub_key_map.insert(signature_server.clone(), keys); - - info!("Fetched signing keys"); } let mut val = @@ -1026,7 +1020,7 @@ fn validate_event<'a>( ) .map_err(|_| "Event is not a valid PDU".to_string())?; - info!("Fetching auth events."); + debug!("Fetching auth events."); fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) .await .map_err(|e| e.to_string())?; @@ -1035,7 +1029,7 @@ fn validate_event<'a>( /* // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - info!("Fetching prev events."); + debug!("Fetching prev events."); let previous = fetch_events(&db, origin, pub_key_map, &pdu.prev_events, auth_cache) .await .map_err(|e| e.to_string())?; @@ -1049,7 +1043,7 @@ fn validate_event<'a>( }; // Check that the event passes auth based on the auth_events - info!("Checking auth."); + debug!("Checking auth."); let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, @@ -1073,7 +1067,7 @@ fn validate_event<'a>( return Err("Event has failed auth check with auth events".to_string()); } - info!("Validation successful."); + debug!("Validation successful."); Ok((pdu, previous_create)) }) } @@ -1111,20 +1105,19 @@ pub(crate) async fn fetch_events( ) -> Result>> { let mut pdus = vec![]; for id in events { - info!("Fetching event: {}", id); let pdu = match auth_cache.get(id) { Some(pdu) => { - info!("Event found in cache"); + debug!("Event found in cache"); pdu.clone() } // `get_pdu` checks the outliers tree for us None => match db.rooms.get_pdu(&id)? { Some(pdu) => { - info!("Event found in outliers"); + debug!("Event found in outliers"); Arc::new(pdu) } None => { - info!("Fetching event over federation"); + debug!("Fetching event over federation"); match db .sending .send_federation_request( @@ -1135,7 +1128,7 @@ pub(crate) async fn fetch_events( .await { Ok(res) => { - info!("Got event over federation: {:?}", res); + debug!("Got event over federation: {:?}", res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); let (pdu, _) = @@ -1146,7 +1139,7 @@ pub(crate) async fn fetch_events( Error::Conflict("Authentication of event failed") })?; - info!("Added fetched pdu as outlier."); + debug!("Added fetched pdu as outlier."); db.rooms.add_pdu_outlier(&pdu)?; pdu } @@ -1171,15 +1164,11 @@ pub(crate) async fn fetch_signing_keys( let mut result = BTreeMap::new(); match db.globals.signing_keys_for(origin)? { - keys if !keys.is_empty() => { - info!("we knew the signing keys already: {:?}", keys); - Ok(keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect()) - } + keys if !keys.is_empty() => Ok(keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect()), _ => { - info!("Asking {} for it's signing key", origin); match db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) @@ -1204,7 +1193,7 @@ pub(crate) async fn fetch_signing_keys( } _ => { for server in db.globals.trusted_servers() { - info!("Asking {} for {}'s signing key", server, origin); + debug!("Asking {} for {}'s signing key", server, origin); if let Ok(keys) = db .sending .send_federation_request( @@ -1219,7 +1208,7 @@ pub(crate) async fn fetch_signing_keys( ) .await { - info!("Got signing keys: {:?}", keys); + debug!("Got signing keys: {:?}", keys); for k in keys.server_keys.into_iter() { db.globals.add_signing_key(origin, &k)?; result.extend( @@ -1364,7 +1353,6 @@ pub(crate) async fn build_forward_extremity_snapshots( fork_states.insert(current_state); } - info!("Fork states: {:?}", fork_states); Ok(fork_states) } @@ -1548,7 +1536,10 @@ pub fn get_missing_events_route<'a>( ) .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, ); - events.push(serde_json::from_value(pdu).expect("Raw<..> is always valid")); + events.push(PduEvent::convert_to_outgoing_federation_event( + serde_json::from_value(pdu) + .map_err(|_| Error::bad_database("Invalid pdu in database."))?, + )); } i += 1; } From 100307c9366383d8c612a464dfcee542e97f9d44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 17 Mar 2021 22:30:25 +0100 Subject: [PATCH 0473/1727] improvement: optimize state storage --- src/client_server/membership.rs | 12 +- src/client_server/profile.rs | 2 - src/client_server/room.rs | 4 +- src/client_server/state.rs | 12 +- src/client_server/sync.rs | 45 +-- src/database.rs | 12 +- src/database/pusher.rs | 3 +- src/database/rooms.rs | 469 +++++++++++++++++++------------- src/server_server.rs | 36 ++- 9 files changed, 341 insertions(+), 254 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index d63fa02..d571eaa 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -106,7 +106,6 @@ pub async fn leave_room_route( ErrorKind::BadState, "Cannot leave a room you are not a member of.", ))? - .1 .content, ) .expect("from_value::> can never fail") @@ -195,7 +194,6 @@ pub async fn kick_user_route( ErrorKind::BadState, "Cannot kick member that's not in the room.", ))? - .1 .content, ) .expect("Raw::from_value always works") @@ -251,7 +249,7 @@ pub async fn ban_user_route( is_direct: None, third_party_invite: None, }), - |(_, event)| { + |event| { let mut event = serde_json::from_value::>(event.content) .expect("Raw::from_value always works") @@ -302,7 +300,6 @@ pub async fn unban_user_route( ErrorKind::BadState, "Cannot unban a user who is not banned.", ))? - .1 .content, ) .expect("from_value::> can never fail") @@ -617,10 +614,7 @@ async fn join_room_by_id_helper( &db.globals, )?; } - let mut long_id = room_id.as_bytes().to_vec(); - long_id.push(0xff); - long_id.extend_from_slice(id.as_bytes()); - state.insert((pdu.kind.clone(), state_key.clone()), long_id); + state.insert((pdu.kind.clone(), state_key.clone()), pdu.event_id.clone()); } } @@ -629,7 +623,7 @@ async fn join_room_by_id_helper( pdu.kind.clone(), pdu.state_key.clone().expect("join event has state key"), ), - pdu_id.clone(), + pdu.event_id.clone(), ); db.rooms.force_state(room_id, state, &db.globals)?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 7e57c1e..9bcb289 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -49,7 +49,6 @@ pub async fn set_displayname_route( "Tried to send displayname update for user not in the room.", ) })? - .1 .content .clone(), ) @@ -144,7 +143,6 @@ pub async fn set_avatar_url_route( "Tried to send avatar url update for user not in the room.", ) })? - .1 .content .clone(), ) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 409028c..399677f 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -380,7 +380,6 @@ pub async fn upgrade_room_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? - .1 .content, ) .expect("Raw::from_value always works") @@ -452,7 +451,7 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? { - Some((_, v)) => v.content.clone(), + Some(v) => v.content.clone(), None => continue, // Skipping missing events. }; @@ -482,7 +481,6 @@ pub async fn upgrade_room_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? - .1 .content, ) .expect("database contains invalid PDU") diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 57bf7e5..54c5fa5 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -112,7 +112,7 @@ pub async fn get_state_events_route( && !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? - .map(|(_, event)| { + .map(|event| { serde_json::from_value::(event.content) .map_err(|_| { Error::bad_database( @@ -159,7 +159,7 @@ pub async fn get_state_events_for_key_route( && !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? - .map(|(_, event)| { + .map(|event| { serde_json::from_value::(event.content) .map_err(|_| { Error::bad_database( @@ -183,8 +183,7 @@ pub async fn get_state_events_for_key_route( .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", - ))? - .1; + ))?; Ok(get_state_events_for_key::Response { content: serde_json::value::to_raw_value(&event.content) @@ -211,7 +210,7 @@ pub async fn get_state_events_for_empty_key_route( && !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? - .map(|(_, event)| { + .map(|event| { serde_json::from_value::(event.content) .map_err(|_| { Error::bad_database( @@ -235,8 +234,7 @@ pub async fn get_state_events_for_empty_key_route( .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", - ))? - .1; + ))?; Ok(get_state_events_for_empty_key::Response { content: serde_json::value::to_raw_value(&event.content) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6551b2a..280632b 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -96,7 +96,7 @@ pub async fn sync_events_route( // Database queries: - let current_state_hash = db.rooms.current_state_hash(&room_id)?; + let current_shortstatehash = db.rooms.current_shortstatehash(&room_id)?; // These type is Option>. The outer Option is None when there is no event between // since and the current room state, meaning there should be no updates. @@ -109,9 +109,11 @@ pub async fn sync_events_route( .next() .is_some(); - let since_state_hash = first_pdu_before_since - .as_ref() - .map(|pdu| db.rooms.pdu_state_hash(&pdu.as_ref().ok()?.0).ok()?); + let since_shortstatehash = first_pdu_before_since.as_ref().map(|pdu| { + db.rooms + .pdu_shortstatehash(&pdu.as_ref().ok()?.1.event_id) + .ok()? + }); let ( heroes, @@ -119,7 +121,7 @@ pub async fn sync_events_route( invited_member_count, joined_since_last_sync, state_events, - ) = if pdus_after_since && Some(¤t_state_hash) != since_state_hash.as_ref() { + ) = if pdus_after_since && Some(current_shortstatehash) != since_shortstatehash { let current_state = db.rooms.room_state_full(&room_id)?; let current_members = current_state .iter() @@ -129,11 +131,18 @@ pub async fn sync_events_route( let encrypted_room = current_state .get(&(EventType::RoomEncryption, "".to_owned())) .is_some(); - let since_state = since_state_hash.as_ref().map(|state_hash| { - state_hash - .as_ref() - .and_then(|state_hash| db.rooms.state_full(&room_id, &state_hash).ok()) - }); + let since_state = since_shortstatehash + .as_ref() + .map(|since_shortstatehash| { + Ok::<_, Error>( + since_shortstatehash + .map(|since_shortstatehash| { + db.rooms.state_full(&room_id, since_shortstatehash) + }) + .transpose()?, + ) + }) + .transpose()?; let since_encryption = since_state.as_ref().map(|state| { state @@ -496,16 +505,16 @@ pub async fn sync_events_route( .and_then(|pdu| pdu.ok()) .and_then(|pdu| { db.rooms - .pdu_state_hash(&pdu.0) + .pdu_shortstatehash(&pdu.1.event_id) .ok()? .ok_or_else(|| Error::bad_database("Pdu in db doesn't have a state hash.")) .ok() }) - .and_then(|state_hash| { + .and_then(|shortstatehash| { db.rooms .state_get( &room_id, - &state_hash, + shortstatehash, &EventType::RoomMember, sender_user.as_str(), ) @@ -513,14 +522,14 @@ pub async fn sync_events_route( .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) .ok() }) - .and_then(|(pdu_id, pdu)| { + .and_then(|pdu| { serde_json::from_value::>( pdu.content.clone(), ) .expect("Raw::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .map(|content| (pdu_id, pdu, content)) + .map(|content| (pdu, content)) .ok() }) { since_member @@ -529,7 +538,7 @@ pub async fn sync_events_route( continue; }; - let left_since_last_sync = since_member.2.membership == MembershipState::Join; + let left_since_last_sync = since_member.1.membership == MembershipState::Join; let left_room = if left_since_last_sync { device_list_left.extend( @@ -550,10 +559,10 @@ pub async fn sync_events_route( let pdus = db.rooms.pdus_since(&sender_user, &room_id, since)?; let mut room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .take_while(|(pdu_id, _)| since_member.0 != pdu_id) + .take_while(|(pdu_id, pdu)| &since_member.0 != pdu) .map(|(_, pdu)| pdu.to_sync_room_event()) .collect::>(); - room_events.push(since_member.1.to_sync_room_event()); + room_events.push(since_member.0.to_sync_room_event()); sync_events::LeftRoom { account_data: sync_events::AccountData { events: Vec::new() }, diff --git a/src/database.rs b/src/database.rs index 17177e8..f65d5e0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -163,10 +163,14 @@ impl Database { roomuserid_invited: db.open_tree("roomuserid_invited")?, userroomid_left: db.open_tree("userroomid_left")?, - statekey_short: db.open_tree("statekey_short")?, - stateid_pduid: db.open_tree("stateid_pduid")?, - pduid_statehash: db.open_tree("pduid_statehash")?, - roomid_statehash: db.open_tree("roomid_statehash")?, + statekey_shortstatekey: db.open_tree("statekey_shortstatekey")?, + stateid_shorteventid: db.open_tree("stateid_shorteventid")?, + eventid_shorteventid: db.open_tree("eventid_shorteventid")?, + shorteventid_eventid: db.open_tree("shorteventid_eventid")?, + shorteventid_shortstatehash: db.open_tree("eventid_shortstatehash")?, + roomid_shortstatehash: db.open_tree("roomid_shortstatehash")?, + statehash_shortstatehash: db.open_tree("statehash_shortstatehash")?, + eventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, }, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 59ccbef..b6c6cf4 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -312,7 +312,6 @@ pub async fn send_push_notice( && db .rooms .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? - .map(|(_, pl)| pl) .map(deserialize) .flatten() .map_or(false, power_level_cmp) @@ -514,7 +513,7 @@ async fn send_notice( let room_name = db .rooms .room_state_get(&event.room_id, &EventType::RoomName, "")? - .map(|(_, pdu)| match pdu.content.get("name") { + .map(|pdu| match pdu.content.get("name") { Some(serde_json::Value::String(s)) => Some(s.to_string()), _ => None, }) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c908d51..a342566 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -59,15 +59,19 @@ pub struct Rooms { pub(super) userroomid_left: sled::Tree, /// Remember the current state hash of a room. - pub(super) roomid_statehash: sled::Tree, + pub(super) roomid_shortstatehash: sled::Tree, /// Remember the state hash at events in the past. - pub(super) pduid_statehash: sled::Tree, - /// The state for a given state hash. - /// - /// StateKey = EventType + StateKey, Short = Count - pub(super) statekey_short: sled::Tree, - /// StateId = StateHash + Short, PduId = Count (without roomid) - pub(super) stateid_eventid: sled::Tree, + pub(super) shorteventid_shortstatehash: sled::Tree, + /// StateKey = EventType + StateKey, ShortStateKey = Count + pub(super) statekey_shortstatekey: sled::Tree, + pub(super) shorteventid_eventid: sled::Tree, + /// ShortEventId = Count + pub(super) eventid_shorteventid: sled::Tree, + /// ShortEventId = Count + pub(super) statehash_shortstatehash: sled::Tree, + /// ShortStateHash = Count + /// StateId = ShortStateHash + ShortStateKey + pub(super) stateid_shorteventid: sled::Tree, /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. @@ -81,37 +85,65 @@ impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] - pub fn state_full( + pub fn state_full_ids( &self, room_id: &RoomId, state_hash: &StateHashId, - ) -> Result> { - self.stateid_pduid - .scan_prefix(&state_hash) + ) -> Result> { + let shortstatehash = self + .statehash_shortstatehash + .get(state_hash)? + .ok_or_else(|| Error::bad_database("Asked for statehash that does not exist."))?; + + Ok(self + .stateid_shorteventid + .scan_prefix(&shortstatehash) .values() - .map(|short_id| { - let short_id = short_id?; - let mut long_id = room_id.as_bytes().to_vec(); - long_id.push(0xff); - long_id.extend_from_slice(&short_id); - match self.pduid_pdu.get(&long_id)? { - Some(b) => serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")), - None => self - .eventid_outlierpdu - .get(short_id)? - .map(|b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, - } + .filter_map(|r| r.ok()) + .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) + .flatten() + .map(|bytes| { + Ok::<_, Error>( + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") + })?) + .map_err(|_| { + Error::bad_database("EventId in stateid_shorteventid is invalid.") + })?, + ) }) .filter_map(|r| r.ok()) + .collect()) + } + + #[tracing::instrument(skip(self))] + pub fn state_full( + &self, + room_id: &RoomId, + shortstatehash: u64, + ) -> Result> { + Ok(self + .stateid_shorteventid + .scan_prefix(shortstatehash.to_be_bytes()) + .values() + .filter_map(|r| r.ok()) + .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) + .flatten() + .map(|bytes| { + Ok::<_, Error>( + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") + })?) + .map_err(|_| { + Error::bad_database("EventId in stateid_shorteventid is invalid.") + })?, + ) + }) + .filter_map(|r| r.ok()) + .map(|eventid| self.get_pdu(&eventid)) + .filter_map(|r| r.ok().flatten()) .map(|pdu| { - Ok(( + Ok::<_, Error>(( ( pdu.kind.clone(), pdu.state_key @@ -122,7 +154,8 @@ impl Rooms { pdu, )) }) - .collect() + .filter_map(|r| r.ok()) + .collect()) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -130,71 +163,73 @@ impl Rooms { pub fn state_get( &self, room_id: &RoomId, - state_hash: &StateHashId, + shortstatehash: u64, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result> { let mut key = event_type.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); - debug!("Looking for {} {:?}", event_type, state_key); + let shortstatekey = self.statekey_shortstatekey.get(&key)?; - let short = self.statekey_short.get(&key)?; + if let Some(shortstatekey) = shortstatekey { + let mut stateid = shortstatehash.to_be_bytes().to_vec(); + stateid.extend_from_slice(&shortstatekey); - if let Some(short) = short { - let mut stateid = state_hash.to_vec(); - stateid.push(0xff); - stateid.extend_from_slice(&short); - - debug!("trying to find pduid/eventid. short: {:?}", stateid); - self.stateid_pduid + self.stateid_shorteventid .get(&stateid)? - .map_or(Ok(None), |short_id| { - debug!("found in stateid_pduid"); - let mut long_id = room_id.as_bytes().to_vec(); - long_id.push(0xff); - long_id.extend_from_slice(&short_id); - - Ok::<_, Error>(Some(match self.pduid_pdu.get(&long_id)? { - Some(b) => ( - long_id.clone().into(), - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - ), - None => { - debug!("looking in outliers"); - ( - short_id.clone().into(), - self.eventid_outlierpdu - .get(&short_id)? - .map(|b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })??, + .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) + .flatten() + .map(|bytes| { + Ok::<_, Error>( + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "EventID in stateid_shorteventid is invalid unicode.", ) - } - })) + })?) + .map_err(|_| { + Error::bad_database("EventId in stateid_shorteventid is invalid.") + })?, + ) }) + .map(|r| r.ok()) + .flatten() + .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) } else { - warn!("short id not found"); Ok(None) } } /// Returns the state hash for this pdu. #[tracing::instrument(skip(self))] - pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { - Ok(self.pduid_statehash.get(pdu_id)?) + pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { + self.eventid_shorteventid + .get(event_id.as_bytes())? + .map_or(Ok(None), |shorteventid| { + Ok(self.shorteventid_shortstatehash.get(shorteventid)?.map_or( + Ok::<_, Error>(None), + |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "Invalid shortstatehash bytes in shorteventid_shortstatehash", + ) + })?)) + }, + )?) + }) } /// Returns the last state hash key added to the db for the given room. #[tracing::instrument(skip(self))] - pub fn current_state_hash(&self, room_id: &RoomId) -> Result> { - Ok(self.roomid_statehash.get(room_id.as_bytes())?) + pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.roomid_shortstatehash + .get(room_id.as_bytes())? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") + })?)) + }) } /// This fetches auth events from the current state. @@ -215,7 +250,7 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some((_, pdu)) = self.room_state_get( + if let Some(pdu) = self.room_state_get( room_id, &event_type, &state_key @@ -233,9 +268,9 @@ impl Rooms { /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, pdu_id_bytes: &[&[u8]]) -> Result { + fn calculate_hash(&self, bytes_list: &[&[u8]]) -> Result { // We only hash the pdu's event ids, not the whole pdu - let bytes = pdu_id_bytes.join(&0xff); + let bytes = bytes_list.join(&0xff); let hash = digest::digest(&digest::SHA256, &bytes); Ok(hash.as_ref().into()) } @@ -259,41 +294,65 @@ impl Rooms { pub fn force_state( &self, room_id: &RoomId, - state: HashMap<(EventType, String), Vec>, + state: HashMap<(EventType, String), EventId>, globals: &super::globals::Globals, ) -> Result<()> { - let state_hash = - self.calculate_hash(&state.values().map(|long_id| &**long_id).collect::>())?; - let mut prefix = state_hash.to_vec(); - prefix.push(0xff); + let state_hash = self.calculate_hash( + &state + .values() + .map(|event_id| event_id.as_bytes()) + .collect::>(), + )?; - for ((event_type, state_key), long_id) in state { + let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { + Some(shortstatehash) => { + warn!("state hash already existed?!"); + shortstatehash.to_vec() + } + None => { + let shortstatehash = globals.next_count()?; + self.statehash_shortstatehash + .insert(&state_hash, &shortstatehash.to_be_bytes())?; + shortstatehash.to_be_bytes().to_vec() + } + }; + + for ((event_type, state_key), eventid) in state { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); - let short = match self.statekey_short.get(&statekey)? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid short bytes in statekey_short."))?, + let shortstatekey = match self.statekey_shortstatekey.get(&statekey)? { + Some(shortstatekey) => shortstatekey.to_vec(), None => { - let short = globals.next_count()?; - self.statekey_short - .insert(&statekey, &short.to_be_bytes())?; - short + let shortstatekey = globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes())?; + shortstatekey.to_be_bytes().to_vec() } }; - // If it's a pdu id we remove the room id, if it's an event id we leave it the same - let short_id = long_id.splitn(2, |&b| b == 0xff).nth(1).unwrap_or(&long_id); + let shorteventid = match self.eventid_shorteventid.get(eventid.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(eventid.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), eventid.as_bytes())?; + shorteventid.to_be_bytes().to_vec() + } + }; - let mut state_id = prefix.clone(); - state_id.extend_from_slice(&short.to_be_bytes()); - debug!("inserting {:?} into {:?}", short_id, state_id); - self.stateid_pduid.insert(state_id, short_id)?; + let mut state_id = shortstatehash.clone(); + state_id.extend_from_slice(&shortstatekey); + + self.stateid_shorteventid + .insert(&*state_id, &*shorteventid)?; } - self.roomid_statehash - .insert(room_id.as_bytes(), &*state_hash)?; + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &*shortstatehash)?; Ok(()) } @@ -304,8 +363,8 @@ impl Rooms { &self, room_id: &RoomId, ) -> Result> { - if let Some(current_state_hash) = self.current_state_hash(room_id)? { - self.state_full(&room_id, ¤t_state_hash) + if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + self.state_full(&room_id, current_shortstatehash) } else { Ok(BTreeMap::new()) } @@ -318,9 +377,9 @@ impl Rooms { room_id: &RoomId, event_type: &EventType, state_key: &str, - ) -> Result> { - if let Some(current_state_hash) = self.current_state_hash(room_id)? { - self.state_get(&room_id, ¤t_state_hash, event_type, state_key) + ) -> Result> { + if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + self.state_get(&room_id, current_shortstatehash, event_type, state_key) } else { Ok(None) } @@ -377,12 +436,6 @@ impl Rooms { .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) } - pub fn get_long_id(&self, event_id: &EventId) -> Result> { - Ok(self - .get_pdu_id(event_id)? - .map_or_else(|| event_id.as_bytes().to_vec(), |pduid| pduid.to_vec())) - } - /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. @@ -538,15 +591,15 @@ impl Rooms { .entry("unsigned".to_owned()) .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) { - if let Some(prev_state_hash) = self.pdu_state_hash(&pdu_id).unwrap() { + if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { if let Some(prev_state) = self - .state_get(&pdu.room_id, &prev_state_hash, &pdu.kind, &state_key) + .state_get(&pdu.room_id, shortstatehash, &pdu.kind, &state_key) .unwrap() { unsigned.insert( "prev_content".to_owned(), CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.1.content) + utils::to_canonical_object(prev_state.content) .expect("event is valid, we just created it"), ), ); @@ -574,7 +627,7 @@ impl Rooms { self.pduid_pdu.insert( &pdu_id, - &*serde_json::to_string(dbg!(&pdu_json)) + &*serde_json::to_string(&pdu_json) .expect("CanonicalJsonObject is always a valid String"), )?; @@ -706,71 +759,112 @@ impl Rooms { /// Generates a new StateHash and associates it with the incoming event. /// /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `pduid_statehash`. - /// The incoming event is the `pdu_id` passed to this method. + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. pub fn append_to_state( &self, - new_pdu_id: &[u8], new_pdu: &PduEvent, globals: &super::globals::Globals, - ) -> Result { - let old_state = - if let Some(old_state_hash) = self.roomid_statehash.get(new_pdu.room_id.as_bytes())? { - // Store state for event. The state does not include the event itself. - // Instead it's the state before the pdu, so the room's old state. - self.pduid_statehash.insert(new_pdu_id, &old_state_hash)?; - if new_pdu.state_key.is_none() { - return Ok(old_state_hash); - } + ) -> Result { + let old_state = if let Some(old_shortstatehash) = + self.roomid_shortstatehash.get(new_pdu.room_id.as_bytes())? + { + // Store state for event. The state does not include the event itself. + // Instead it's the state before the pdu, so the room's old state. - let mut prefix = old_state_hash.to_vec(); - prefix.push(0xff); - self.stateid_pduid - .scan_prefix(&prefix) - .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) - // Chop the old state_hash out leaving behind the short key (u64) - .map(|(k, v)| (k.subslice(prefix.len(), k.len() - prefix.len()), v)) - .collect::>() - } else { - HashMap::new() - }; - - if let Some(state_key) = &new_pdu.state_key { - let mut new_state = old_state; - let mut pdu_key = new_pdu.kind.as_ref().as_bytes().to_vec(); - pdu_key.push(0xff); - pdu_key.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_short.get(&pdu_key)? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid short bytes in statekey_short."))?, + let shorteventid = match self.eventid_shorteventid.get(new_pdu.event_id.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), None => { - let short = globals.next_count()?; - self.statekey_short.insert(&pdu_key, &short.to_be_bytes())?; - short + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(new_pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), new_pdu.event_id.as_bytes())?; + shorteventid.to_be_bytes().to_vec() } }; - let new_pdu_id_short = new_pdu_id - .splitn(2, |&b| b == 0xff) - .nth(1) - .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; - - new_state.insert((&short.to_be_bytes()).into(), new_pdu_id_short.into()); - - let new_state_hash = - self.calculate_hash(&new_state.values().map(|b| &**b).collect::>())?; - - let mut key = new_state_hash.to_vec(); - key.push(0xff); - - for (short, short_pdu_id) in new_state { - let mut state_id = key.clone(); - state_id.extend_from_slice(&short); - self.stateid_pduid.insert(&state_id, &short_pdu_id)?; + self.shorteventid_shortstatehash + .insert(shorteventid, &old_shortstatehash)?; + if new_pdu.state_key.is_none() { + return utils::u64_from_bytes(&old_shortstatehash).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomid_shortstatehash.") + }); } - Ok(new_state_hash) + self.stateid_shorteventid + .scan_prefix(&old_shortstatehash) + .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) + // Chop the old_shortstatehash out leaving behind the short state key + .map(|(k, v)| { + ( + k.subslice(old_shortstatehash.len(), k.len() - old_shortstatehash.len()), + v, + ) + }) + .collect::>() + } else { + HashMap::new() + }; + + if let Some(state_key) = &new_pdu.state_key { + let mut new_state: HashMap = old_state; + + let mut new_state_key = new_pdu.kind.as_ref().as_bytes().to_vec(); + new_state_key.push(0xff); + new_state_key.extend_from_slice(state_key.as_bytes()); + + let shortstatekey = match self.statekey_shortstatekey.get(&new_state_key)? { + Some(shortstatekey) => shortstatekey.to_vec(), + None => { + let shortstatekey = globals.next_count()?; + self.statekey_shortstatekey + .insert(&new_state_key, &shortstatekey.to_be_bytes())?; + shortstatekey.to_be_bytes().to_vec() + } + }; + + let shorteventid = match self.eventid_shorteventid.get(new_pdu.event_id.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(new_pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), new_pdu.event_id.as_bytes())?; + shorteventid.to_be_bytes().to_vec() + } + }; + + new_state.insert(shortstatekey.into(), shorteventid.into()); + + let new_state_hash = self.calculate_hash( + &new_state + .values() + .map(|event_id| &**event_id) + .collect::>(), + )?; + + let shortstatehash = match self.statehash_shortstatehash.get(&new_state_hash)? { + Some(shortstatehash) => { + warn!("state hash already existed?!"); + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("PDU has invalid count bytes."))? + } + None => { + let shortstatehash = globals.next_count()?; + self.statehash_shortstatehash + .insert(&new_state_hash, &shortstatehash.to_be_bytes())?; + shortstatehash + } + }; + + for (shortstatekey, shorteventid) in new_state { + let mut state_id = shortstatehash.to_be_bytes().to_vec(); + state_id.extend_from_slice(&shortstatekey); + self.stateid_shorteventid.insert(&state_id, &shorteventid)?; + } + + Ok(shortstatehash) } else { Err(Error::bad_database( "Tried to insert non-state event into room without a state.", @@ -778,9 +872,9 @@ impl Rooms { } } - pub fn set_room_state(&self, room_id: &RoomId, state_hash: &StateHashId) -> Result<()> { - self.roomid_statehash - .insert(room_id.as_bytes(), state_hash)?; + pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } @@ -833,7 +927,7 @@ impl Rooms { }, }) }, - |(_, power_levels)| { + |power_levels| { Ok(serde_json::from_value::>( power_levels.content, ) @@ -844,18 +938,15 @@ impl Rooms { )?; let sender_membership = self .room_state_get(&room_id, &EventType::RoomMember, &sender.to_string())? - .map_or( - Ok::<_, Error>(member::MembershipState::Leave), - |(_, pdu)| { - Ok( - serde_json::from_value::>(pdu.content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership, - ) - }, - )?; + .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { + Ok( + serde_json::from_value::>(pdu.content) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid Member event in db."))? + .membership, + ) + })?; let sender_power = power_levels.users.get(&sender).map_or_else( || { @@ -936,7 +1027,7 @@ impl Rooms { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some((_, prev_pdu)) = self.room_state_get(&room_id, &event_type, &state_key)? { + if let Some(prev_pdu) = self.room_state_get(&room_id, &event_type, &state_key)? { unsigned.insert("prev_content".to_owned(), prev_pdu.content); unsigned.insert( "prev_sender".to_owned(), @@ -1014,7 +1105,7 @@ impl Rooms { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; + let statehashid = self.append_to_state(&pdu, &db.globals)?; // remove the self.append_pdu( @@ -1030,7 +1121,7 @@ impl Rooms { // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - self.set_room_state(&room_id, &statehashid)?; + self.set_room_state(&room_id, statehashid)?; for server in self .room_servers(room_id) @@ -1267,7 +1358,7 @@ impl Rooms { // Check if the room has a predecessor if let Some(predecessor) = self .room_state_get(&room_id, &EventType::RoomCreate, "")? - .and_then(|(_, create)| { + .and_then(|create| { serde_json::from_value::< Raw, >(create.content) diff --git a/src/server_server.rs b/src/server_server.rs index 919d12f..2f32b63 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1301,7 +1301,7 @@ pub(crate) async fn build_forward_extremity_snapshots( pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, ) -> Result>>> { - let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let current_shortstatehash = db.rooms.current_shortstatehash(pdu.room_id())?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); @@ -1309,39 +1309,37 @@ pub(crate) async fn build_forward_extremity_snapshots( if id == &pdu.event_id { continue; } - match db.rooms.get_pdu_id(id)? { + match db.rooms.get_pdu(id)? { // We can skip this because it is handled outside of this function // The current server state and incoming event state are built to be // the state after. // This would be the incoming state from the server. - Some(pduid) if db.rooms.get_pdu_from_id(&pduid)?.is_some() => { - let state_hash = db + Some(leave_pdu) => { + let pdu_shortstatehash = db .rooms - .pdu_state_hash(&pduid)? - .expect("found pdu with no statehash"); + .pdu_shortstatehash(&leave_pdu.event_id)? + .ok_or_else(|| Error::bad_database("Found pdu with no statehash in db."))?; - if current_hash.as_ref() == Some(&state_hash) { + if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { includes_current_state = true; } let mut state_before = db .rooms - .state_full(pdu.room_id(), &state_hash)? + .state_full(pdu.room_id(), pdu_shortstatehash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect::>(); // Now it's the state after - if let Some(pdu) = db.rooms.get_pdu_from_id(&pduid)? { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, Arc::new(pdu)); - } + let key = (leave_pdu.kind.clone(), leave_pdu.state_key.clone()); + state_before.insert(key, Arc::new(leave_pdu)); fork_states.insert(state_before); } _ => { - error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - return Err(Error::BadDatabase("Missing state snapshot.")); + error!("Missing state snapshot for {:?}", id); + return Err(Error::bad_database("Missing state snapshot.")); } } } @@ -1367,13 +1365,12 @@ pub(crate) fn update_resolved_state( if let Some(state) = state { let mut new_state = HashMap::new(); for ((ev_type, state_k), pdu) in state { - let long_id = db.rooms.get_long_id(&pdu.event_id)?; new_state.insert( ( ev_type, state_k.ok_or_else(|| Error::Conflict("State contained non state event"))?, ), - long_id, + pdu.event_id.clone(), ); } @@ -1396,7 +1393,6 @@ pub(crate) fn append_incoming_pdu( // We can tell if we need to do this based on wether state resolution took place or not let mut new_state = HashMap::new(); for ((ev_type, state_k), state_pdu) in state { - let long_id = db.rooms.get_long_id(state_pdu.event_id())?; new_state.insert( ( ev_type.clone(), @@ -1404,7 +1400,7 @@ pub(crate) fn append_incoming_pdu( .clone() .ok_or_else(|| Error::Conflict("State contained non state event"))?, ), - long_id.to_vec(), + state_pdu.event_id.clone(), ); } @@ -1418,7 +1414,7 @@ pub(crate) fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let state_hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + let state_hash = db.rooms.append_to_state(&pdu, &db.globals)?; db.rooms.append_pdu( pdu, @@ -1429,7 +1425,7 @@ pub(crate) fn append_incoming_pdu( &db, )?; - db.rooms.set_room_state(pdu.room_id(), &state_hash)?; + db.rooms.set_room_state(pdu.room_id(), state_hash)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { From a77fcd106e5f3f0c7bb3ada59ebd86d548d56213 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 00:09:57 +0100 Subject: [PATCH 0474/1727] feat: implement /state_ids and fix federation stuff --- src/database.rs | 12 ++++----- src/database/rooms.rs | 16 +++--------- src/main.rs | 1 + src/server_server.rs | 57 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 68 insertions(+), 18 deletions(-) diff --git a/src/database.rs b/src/database.rs index f65d5e0..67f888f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -110,11 +110,6 @@ impl Database { let (admin_sender, admin_receiver) = mpsc::unbounded(); let db = Self { - globals: globals::Globals::load( - db.open_tree("global")?, - db.open_tree("servertimeout_signingkey")?, - config, - )?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, @@ -191,7 +186,7 @@ impl Database { sending: sending::Sending { servernamepduids: db.open_tree("servernamepduids")?, servercurrentpdus: db.open_tree("servercurrentpdus")?, - maximum_requests: Arc::new(Semaphore::new(10)), + maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), }, admin: admin::Admin { sender: admin_sender, @@ -201,6 +196,11 @@ impl Database { id_appserviceregistrations: db.open_tree("id_appserviceregistrations")?, }, pusher: pusher::PushData::new(&db)?, + globals: globals::Globals::load( + db.open_tree("global")?, + db.open_tree("servertimeout_signingkey")?, + config, + )?, _db: db, }; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a342566..2450622 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -85,19 +85,10 @@ impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] - pub fn state_full_ids( - &self, - room_id: &RoomId, - state_hash: &StateHashId, - ) -> Result> { - let shortstatehash = self - .statehash_shortstatehash - .get(state_hash)? - .ok_or_else(|| Error::bad_database("Asked for statehash that does not exist."))?; - + pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { Ok(self .stateid_shorteventid - .scan_prefix(&shortstatehash) + .scan_prefix(&shortstatehash.to_be_bytes()) .values() .filter_map(|r| r.ok()) .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) @@ -895,7 +886,8 @@ impl Rooms { redacts, } = pdu_builder; // TODO: Make sure this isn't called twice in parallel - let prev_events = self.get_pdu_leaves(&room_id)?; + let mut prev_events = self.get_pdu_leaves(&room_id)?; + prev_events.truncate(20); let auth_events = self.get_auth_events( &room_id, diff --git a/src/main.rs b/src/main.rs index 893273f..1aa4d54 100644 --- a/src/main.rs +++ b/src/main.rs @@ -165,6 +165,7 @@ fn setup_rocket() -> (rocket::Rocket, Config) { server_server::get_public_rooms_filtered_route, server_server::send_transaction_message_route, server_server::get_missing_events_route, + server_server::get_room_state_ids_route, server_server::get_profile_information_route, ], ) diff --git a/src/server_server.rs b/src/server_server.rs index 2f32b63..da9928c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -6,6 +6,7 @@ use regex::Regex; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ + client::error::ErrorKind, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -1543,6 +1544,62 @@ pub fn get_missing_events_route<'a>( Ok(get_missing_events::v1::Response { events }.into()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v1/state_ids/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn get_room_state_ids_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let shortstatehash = db + .rooms + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pdu_ids = db.rooms.state_full_ids(shortstatehash)?; + + let mut auth_chain_ids = BTreeSet::::new(); + let mut todo = BTreeSet::new(); + todo.insert(body.event_id.clone()); + + loop { + if let Some(event_id) = todo.iter().next().cloned() { + if let Some(pdu) = db.rooms.get_pdu(&event_id)? { + todo.extend( + pdu.auth_events + .clone() + .into_iter() + .collect::>() + .difference(&auth_chain_ids) + .cloned(), + ); + auth_chain_ids.extend(pdu.auth_events.into_iter()); + } else { + warn!("Could not find pdu mentioned in auth events."); + } + + todo.remove(&event_id); + } else { + break; + } + } + + Ok(get_room_state_ids::v1::Response { + auth_chain_ids: auth_chain_ids.into_iter().collect(), + pdu_ids, + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/query/profile", data = "") From a0fa0ee7a0da0cddf5471ededf19330bab56ee5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 12:03:15 +0100 Subject: [PATCH 0475/1727] fix: join appservice room with alias --- src/client_server/alias.rs | 20 ++++++++++++++++---- src/database/rooms.rs | 24 ++++++++++++++++-------- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 03d4909..07b4977 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -90,11 +90,23 @@ pub async fn get_alias_helper( let aliases = registration .get("namespaces") .and_then(|ns| ns.get("aliases")) - .and_then(|users| users.get("regex")) - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()); + .and_then(|aliases| aliases.as_sequence()) + .map_or_else(Vec::new, |aliases| { + aliases + .iter() + .map(|aliases| { + aliases + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }); - if aliases.map_or(false, |aliases| aliases.is_match(room_alias.as_str())) + if aliases + .iter() + .any(|aliases| aliases.is_match(room_alias.as_str())) && db .sending .send_appservice_request( diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2450622..e6c3b93 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1142,15 +1142,23 @@ impl Rooms { }); let aliases = namespaces .get("aliases") - .and_then(|users| users.get("regex")) - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()); + .and_then(|aliases| aliases.as_sequence()) + .map_or_else(Vec::new, |aliases| { + aliases + .iter() + .map(|aliases| { + aliases + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }); let rooms = namespaces .get("rooms") .and_then(|rooms| rooms.as_sequence()); - let room_aliases = self.room_aliases(&room_id); - let bridge_user_id = appservice .1 .get("sender_localpart") @@ -1170,15 +1178,15 @@ impl Rooms { .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) }; - let matching_aliases = |aliases: Regex| { - room_aliases + let matching_aliases = |aliases: &Regex| { + self.room_aliases(&room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; if bridge_user_id.map_or(false, user_is_joined) || users.iter().any(matching_users) - || aliases.map_or(false, matching_aliases) + || aliases.iter().any(matching_aliases) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From 5cb15551f345843033e71d1ec1eaff5c0263a7aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 13:13:08 +0100 Subject: [PATCH 0476/1727] improvement: use db compression --- Cargo.lock | 55 ++++++++++++++++++++++++++++++++++++++++++++++++- Cargo.toml | 2 +- src/database.rs | 1 + 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c70fa7e..d5010da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -149,6 +149,9 @@ name = "cc" version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +dependencies = [ + "jobserver", +] [[package]] name = "cfg-if" @@ -801,6 +804,15 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +[[package]] +name = "itertools" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.10.0" @@ -816,6 +828,15 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +[[package]] +name = "jobserver" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" +dependencies = [ + "libc", +] + [[package]] name = "jpeg-decoder" version = "0.1.22" @@ -2023,6 +2044,7 @@ dependencies = [ "libc", "log", "parking_lot", + "zstd", ] [[package]] @@ -2067,7 +2089,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" name = "state-res" version = "0.1.0" dependencies = [ - "itertools", + "itertools 0.10.0", "log", "maplit", "ruma", @@ -2754,3 +2776,34 @@ name = "yansi" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" + +[[package]] +name = "zstd" +version = "0.5.4+zstd.1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "2.0.6+zstd.1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "1.4.18+zstd.1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +dependencies = [ + "cc", + "glob", + "itertools 0.9.0", + "libc", +] diff --git a/Cargo.toml b/Cargo.toml index ae0dd1d..6750d70 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-event # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" # Used for storing data permanently -sled = { version = "0.34.6", default-features = false } +sled = { version = "0.34.6", default-features = false, features = ["compression"] } # Used for emitting log entries log = "0.4.14" # Used for rocket<->ruma conversions diff --git a/src/database.rs b/src/database.rs index 67f888f..138efbe 100644 --- a/src/database.rs +++ b/src/database.rs @@ -103,6 +103,7 @@ impl Database { let db = sled::Config::default() .path(&config.database_path) .cache_capacity(config.cache_capacity as u64) + .use_compression(true) .open()?; info!("Opened sled database at {}", config.database_path); From 7b3fe88345038938780c5a0e222f4ffa92e6e8ef Mon Sep 17 00:00:00 2001 From: Gabriel Souza Franco Date: Sat, 13 Mar 2021 22:31:41 -0300 Subject: [PATCH 0477/1727] Send proper Host header in federation requests --- src/database/globals.rs | 4 +--- src/server_server.rs | 19 +++++++------------ 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index c7e53ca..bad9c89 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -13,9 +13,7 @@ use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; -pub type DestinationCache = Arc, (String, Option)>>>; -type WellKnownMap = HashMap, (String, Option)>; - +type WellKnownMap = HashMap, (String, String)>; #[derive(Clone)] pub struct Globals { pub actual_destination_cache: Arc>, // actual_destination, host diff --git a/src/server_server.rs b/src/server_server.rs index da9928c..59befde 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -139,11 +139,9 @@ where } } - if let Some(host) = host { - http_request - .headers_mut() - .insert(HOST, HeaderValue::from_str(&host).unwrap()); - } + http_request + .headers_mut() + .insert(HOST, HeaderValue::from_str(&host).unwrap()); let mut reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); @@ -231,10 +229,9 @@ fn add_port_to_hostname(destination_str: String) -> String { async fn find_actual_destination( globals: &crate::database::globals::Globals, destination: &'_ ServerName, -) -> (String, Option) { - let mut host = None; - +) -> (String, String) { let destination_str = destination.as_str().to_owned(); + let mut host = destination_str.clone(); let actual_destination = "https://".to_owned() + &match get_ip_with_port(destination_str.clone()) { Some(host_port) => { @@ -249,6 +246,7 @@ async fn find_actual_destination( match request_well_known(globals, &destination.as_str()).await { // 3: A .well-known file is available Some(delegated_hostname) => { + host = delegated_hostname.clone(); match get_ip_with_port(delegated_hostname.clone()) { Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file None => { @@ -270,10 +268,7 @@ async fn find_actual_destination( None => { match query_srv_record(globals, &destination_str).await { // 4: SRV record found - Some(hostname) => { - host = Some(destination_str.to_owned()); - hostname - } + Some(hostname) => hostname, // 5: No SRV record found None => add_port_to_hostname(destination_str.to_string()), } From f775c76d8a73df472e7b93bf7cb5988f89cbf5bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 18:33:43 +0100 Subject: [PATCH 0478/1727] chore: get rid of warnings --- rust-toolchain | 2 +- rustfmt.toml | 1 + src/client_server/read_marker.rs | 4 +--- src/client_server/sync.rs | 2 +- src/database/pusher.rs | 14 ++++++++------ src/database/rooms.rs | 2 +- src/database/sending.rs | 2 +- src/error.rs | 13 +------------ src/main.rs | 1 - src/ruma_wrapper.rs | 9 ++++----- src/server_server.rs | 7 +++++-- 11 files changed, 24 insertions(+), 33 deletions(-) diff --git a/rust-toolchain b/rust-toolchain index 21998d3..5a5c721 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.47.0 +1.50.0 diff --git a/rustfmt.toml b/rustfmt.toml index e86028b..739b454 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1 +1,2 @@ +unstable_features = true imports_granularity="Crate" diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 20464db..555b7e7 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -3,9 +3,7 @@ use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::{ - capabilities::get_capabilities, read_marker::set_read_marker, receipt::create_receipt, - }, + r0::{read_marker::set_read_marker, receipt::create_receipt}, }, events::{AnyEphemeralRoomEvent, AnyEvent, EventType}, }; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 280632b..da2ddf4 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -559,7 +559,7 @@ pub async fn sync_events_route( let pdus = db.rooms.pdus_since(&sender_user, &room_id, since)?; let mut room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .take_while(|(pdu_id, pdu)| &since_member.0 != pdu) + .take_while(|(_, pdu)| &since_member.0 != pdu) .map(|(_, pdu)| pdu.to_sync_room_event()) .collect::>(); room_events.push(since_member.0.to_sync_room_event()); diff --git a/src/database/pusher.rs b/src/database/pusher.rs index b6c6cf4..cc421db 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -9,17 +9,19 @@ use ruma::{ }, OutgoingRequest, }, - events::room::{ - member::{MemberEventContent, MembershipState}, - message::{MessageEventContent, MessageType, TextMessageEventContent}, - power_levels::PowerLevelsEventContent, + events::{ + room::{ + member::{MemberEventContent, MembershipState}, + message::{MessageEventContent, MessageType, TextMessageEventContent}, + power_levels::PowerLevelsEventContent, + }, + EventType, }, - events::EventType, push::{Action, PushCondition, PushFormat, Ruleset, Tweak}, uint, UInt, UserId, }; -use std::{convert::TryFrom, fmt::Debug, time::Duration}; +use std::{convert::TryFrom, fmt::Debug}; #[derive(Debug, Clone)] pub struct PushData { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e6c3b93..d494d33 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,7 @@ mod edus; pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::{debug, error, info, warn}; +use log::{error, warn}; use regex::Regex; use ring::digest; use ruma::{ diff --git a/src/database/sending.rs b/src/database/sending.rs index b35f7c5..50bbc8b 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -10,7 +10,7 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::{debug, error, info, warn}; +use log::{error, info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ diff --git a/src/error.rs b/src/error.rs index 8a64e63..65c5b4f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,18 +1,7 @@ -use std::{ - collections::HashMap, - sync::RwLock, - time::{Duration, Instant}, -}; - use log::error; -use ruma::{ - api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}, - events::room::message, -}; +use ruma::api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}; use thiserror::Error; -use crate::{database::admin::AdminCommand, Database}; - #[cfg(feature = "conduit_bin")] use { crate::RumaResponse, diff --git a/src/main.rs b/src/main.rs index 1aa4d54..2ec3a42 100644 --- a/src/main.rs +++ b/src/main.rs @@ -19,7 +19,6 @@ pub use rocket::State; use ruma::api::client::error::ErrorKind; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use log::LevelFilter; use rocket::{ catch, catchers, fairing::AdHoc, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 640771f..8c72529 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,13 +1,10 @@ use crate::Error; use ruma::{ - api::{AuthScheme, IncomingRequest, OutgoingRequest}, + api::OutgoingRequest, identifiers::{DeviceId, UserId}, Outgoing, }; -use std::{ - convert::{TryFrom, TryInto}, - ops::Deref, -}; +use std::{convert::TryInto, ops::Deref}; #[cfg(feature = "conduit_bin")] use { @@ -24,6 +21,8 @@ use { tokio::io::AsyncReadExt, Request, State, }, + ruma::api::{AuthScheme, IncomingRequest}, + std::convert::TryFrom, std::io::Cursor, }; diff --git a/src/server_server.rs b/src/server_server.rs index 59befde..d43588a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -3,7 +3,7 @@ use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{debug, error, info, warn}; use regex::Regex; -use rocket::{get, post, put, response::content::Json, State}; +use rocket::{response::content::Json, State}; use ruma::{ api::{ client::error::ErrorKind, @@ -28,7 +28,7 @@ use ruma::{ use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, future::Future, net::{IpAddr, SocketAddr}, @@ -38,6 +38,9 @@ use std::{ time::{Duration, SystemTime}, }; +#[cfg(feature = "conduit_bin")] +use rocket::{get, post, put}; + #[tracing::instrument(skip(globals))] pub async fn send_request( globals: &crate::database::globals::Globals, From d4c76f4654cf453ee084c24f5955937d93754092 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 19:38:08 +0100 Subject: [PATCH 0479/1727] chore: update ruma --- Cargo.lock | 18 ++++++++++++++++++ Cargo.toml | 10 +++++----- src/client_server/session.rs | 7 +++++-- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d5010da..adcc27b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1623,6 +1623,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "js_int", @@ -1642,6 +1643,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "http", "percent-encoding", @@ -1656,6 +1658,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1666,6 +1669,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1679,6 +1683,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "http", @@ -1697,6 +1702,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "maplit", @@ -1709,6 +1715,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-common", @@ -1722,6 +1729,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1732,6 +1740,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1746,6 +1755,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "paste", "rand", @@ -1759,6 +1769,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro2", "quote", @@ -1769,10 +1780,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" [[package]] name = "ruma-identity-service-api" version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1785,6 +1798,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1799,6 +1813,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "form_urlencoded", "itoa", @@ -1811,6 +1826,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1821,6 +1837,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "base64 0.13.0", "ring", @@ -2088,6 +2105,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" +source = "git+https://github.com/ruma/state-res?rev=34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488#34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 6750d70..33f1d1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,16 +18,16 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } -# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } +#ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } +#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature #state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } -# state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } -state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 7b3acfc..cb6442d 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -51,8 +51,11 @@ pub async fn login_route( // Validate login method // TODO: Other login methods let user_id = match &body.login_info { - login::IncomingLoginInfo::Password { password } => { - let username = if let login::IncomingUserInfo::MatrixId(matrix_id) = &body.user { + login::IncomingLoginInfo::Password { + identifier, + password, + } => { + let username = if let login::IncomingUserIdentifier::MatrixId(matrix_id) = identifier { matrix_id } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); From 363c629fafcaa202f296d0c1988cdb26950e40ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 22 Mar 2021 14:04:11 +0100 Subject: [PATCH 0480/1727] fix: signature key fetching, optimize push sending --- Cargo.lock | 18 --- Cargo.toml | 9 +- src/client_server/push.rs | 2 +- src/database/pusher.rs | 196 ++++++++++++++++------------- src/database/rooms.rs | 22 +++- src/database/sending.rs | 259 ++++++++++++++++++++------------------ src/server_server.rs | 226 ++++++++++++++++++--------------- 7 files changed, 396 insertions(+), 336 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index adcc27b..d5010da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1623,7 +1623,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "js_int", @@ -1643,7 +1642,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "http", "percent-encoding", @@ -1658,7 +1656,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1669,7 +1666,6 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1683,7 +1679,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "http", @@ -1702,7 +1697,6 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "maplit", @@ -1715,7 +1709,6 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-common", @@ -1729,7 +1722,6 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1740,7 +1732,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1755,7 +1746,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "paste", "rand", @@ -1769,7 +1759,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro2", "quote", @@ -1780,12 +1769,10 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1798,7 +1785,6 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1813,7 +1799,6 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "form_urlencoded", "itoa", @@ -1826,7 +1811,6 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1837,7 +1821,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "base64 0.13.0", "ring", @@ -2105,7 +2088,6 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488#34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 33f1d1e..1476200 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,16 +18,15 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } +#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } #ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -#state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } -state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/push.rs b/src/client_server/push.rs index a7ddbb6..9de8c16 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -689,7 +689,7 @@ pub async fn get_pushers_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_pushers::Response { - pushers: db.pusher.get_pusher(sender_user)?, + pushers: db.pusher.get_pushers(sender_user)?, } .into()) } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index cc421db..b0b9e1e 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -20,6 +20,7 @@ use ruma::{ push::{Action, PushCondition, PushFormat, Ruleset, Tweak}, uint, UInt, UserId, }; +use sled::IVec; use std::{convert::TryFrom, fmt::Debug}; @@ -58,7 +59,17 @@ impl PushData { Ok(()) } - pub fn get_pusher(&self, sender: &UserId) -> Result> { + pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { + self.senderkey_pusher + .get(senderkey)? + .map(|push| { + Ok(serde_json::from_slice(&*push) + .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) + }) + .transpose() + } + + pub fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); @@ -72,6 +83,16 @@ impl PushData { }) .collect() } + + pub fn get_pusher_senderkeys(&self, sender: &UserId) -> impl Iterator> { + let mut prefix = sender.as_bytes().to_vec(); + prefix.push(0xff); + + self.senderkey_pusher + .scan_prefix(prefix) + .keys() + .map(|r| Ok(r?)) + } } pub async fn send_request( @@ -155,7 +176,7 @@ where pub async fn send_push_notice( user: &UserId, unread: UInt, - pushers: &[Pusher], + pusher: &Pusher, ruleset: Ruleset, pdu: &PduEvent, db: &Database, @@ -194,7 +215,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -214,8 +235,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -246,7 +266,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) .await?; break; } @@ -272,7 +292,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) .await?; break; } @@ -289,7 +309,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -326,7 +346,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) .await?; break; } @@ -352,7 +372,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) .await?; break; } @@ -369,7 +389,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -385,7 +405,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -401,7 +421,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -415,7 +435,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -429,7 +449,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -442,98 +462,96 @@ pub async fn send_push_notice( async fn send_notice( unread: UInt, - pushers: &[Pusher], + pusher: &Pusher, tweaks: Vec, event: &PduEvent, db: &Database, name: &str, ) -> Result<()> { - let (http, _emails): (Vec<&Pusher>, _) = pushers - .iter() - .partition(|pusher| pusher.kind == Some(PusherKind::Http)); + // TODO: email + if pusher.kind == Some(PusherKind::Http) { + return Ok(()); + } // TODO: // Two problems with this // 1. if "event_id_only" is the only format kind it seems we should never add more info // 2. can pusher/devices have conflicting formats - for pusher in http { - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = pusher.data.url.as_ref() { - url - } else { - error!("Http Pusher must have URL specified."); - continue; - }; + let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); + let url = if let Some(url) = pusher.data.url.as_ref() { + url + } else { + error!("Http Pusher must have URL specified."); + return Ok(()); + }; - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = Some(data_minus_url); + let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); + let mut data_minus_url = pusher.data.clone(); + // The url must be stripped off according to spec + data_minus_url.url = None; + device.data = Some(data_minus_url); - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); + // Tweaks are only added if the format is NOT event_id_only + if !event_id_only { + device.tweaks = tweaks.clone(); + } + + let d = &[device]; + let mut notifi = Notification::new(d); + + notifi.prio = NotificationPriority::Low; + notifi.event_id = Some(&event.event_id); + notifi.room_id = Some(&event.room_id); + // TODO: missed calls + notifi.counts = NotificationCounts::new(unread, uint!(0)); + + if event.kind == EventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + { + notifi.prio = NotificationPriority::High + } + + if event_id_only { + error!("SEND PUSH NOTICE `{}`", name); + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; + } else { + notifi.sender = Some(&event.sender); + notifi.event_type = Some(&event.kind); + notifi.content = serde_json::value::to_raw_value(&event.content).ok(); + + if event.kind == EventType::RoomMember { + notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - let d = &[device]; - let mut notifi = Notification::new(d); + let user_name = db.users.displayname(&event.sender)?; + notifi.sender_display_name = user_name.as_deref(); + let room_name = db + .rooms + .room_state_get(&event.room_id, &EventType::RoomName, "")? + .map(|pdu| match pdu.content.get("name") { + Some(serde_json::Value::String(s)) => Some(s.to_string()), + _ => None, + }) + .flatten(); + notifi.room_name = room_name.as_deref(); - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == EventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } - - if event_id_only { - error!("SEND PUSH NOTICE `{}`", name); - send_request( - &db.globals, - &url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - notifi.content = serde_json::value::to_raw_value(&event.content).ok(); - - if event.kind == EventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); - } - - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - let room_name = db - .rooms - .room_state_get(&event.room_id, &EventType::RoomName, "")? - .map(|pdu| match pdu.content.get("name") { - Some(serde_json::Value::String(s)) => Some(s.to_string()), - _ => None, - }) - .flatten(); - notifi.room_name = room_name.as_deref(); - - error!("SEND PUSH NOTICE Full `{}`", name); - send_request( - &db.globals, - &url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } + error!("SEND PUSH NOTICE Full `{}`", name); + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } // TODO: email - // for email in emails {} Ok(()) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d494d33..2e2d486 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -84,7 +84,6 @@ pub struct Rooms { impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { Ok(self .stateid_shorteventid @@ -107,7 +106,6 @@ impl Rooms { .collect()) } - #[tracing::instrument(skip(self))] pub fn state_full( &self, room_id: &RoomId, @@ -628,7 +626,25 @@ impl Rooms { .insert(pdu.event_id.as_bytes(), &*pdu_id)?; // See if the event matches any known pushers - db.sending.send_push_pdu(&*pdu_id)?; + for user in db + .users + .iter() + .filter_map(|r| r.ok()) + .filter(|user_id| db.rooms.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) + { + // Don't notify the user of their own events + if user == pdu.sender { + continue; + } + + for senderkey in db + .pusher + .get_pusher_senderkeys(&user) + .filter_map(|r| r.ok()) + { + db.sending.send_push_pdu(&*pdu_id, senderkey)?; + } + } match pdu.kind { EventType::RoomRedaction => { diff --git a/src/database/sending.rs b/src/database/sending.rs index 50bbc8b..9b74ed7 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,6 @@ use std::{ collections::HashMap, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, sync::Arc, time::{Duration, Instant, SystemTime}, @@ -14,9 +14,9 @@ use log::{error, info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ - api::{appservice, federation, OutgoingRequest}, + api::{appservice, client::r0::push::Pusher, federation, OutgoingRequest}, events::{push_rules, EventType}, - uint, ServerName, UInt, + uint, ServerName, UInt, UserId, }; use sled::IVec; use tokio::{select, sync::Semaphore}; @@ -24,14 +24,14 @@ use tokio::{select, sync::Semaphore}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(Box), - Push(Vec), + Push(Vec, Vec), // user and pushkey Normal(Box), } #[derive(Clone)] pub struct Sending { /// The state for a given state hash. - pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)ServerName / UserId + PduId + pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)SenderKey / ServerName / UserId + PduId pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+ / $)ServerName / UserId + PduId (pduid can be empty for reservation) pub(super) maximum_requests: Arc, } @@ -85,9 +85,11 @@ impl Sending { p.extend_from_slice(server.as_bytes()); p } - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(&id); + p.extend_from_slice(&user); + p.push(0xff); + p.extend_from_slice(&pushkey); p } OutgoingKind::Normal(server) => { @@ -106,6 +108,7 @@ impl Sending { let mut subscriber = servernamepduids.watch_prefix(b""); loop { + println!("."); select! { Some(response) = futures.next() => { match response { @@ -116,9 +119,11 @@ impl Sending { p.extend_from_slice(server.as_bytes()); p } - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(&id); + p.extend_from_slice(&user); + p.push(0xff); + p.extend_from_slice(&pushkey); p }, OutgoingKind::Normal(server) => { @@ -179,9 +184,11 @@ impl Sending { p.extend_from_slice(serv.as_bytes()); p }, - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(&id); + p.extend_from_slice(&user); + p.push(0xff); + p.extend_from_slice(&pushkey); p }, OutgoingKind::Normal(serv) => { @@ -208,7 +215,6 @@ impl Sending { Some(event) = &mut subscriber => { if let sled::Event::Insert { key, .. } = event { let servernamepduid = key.clone(); - let mut parts = servernamepduid.splitn(2, |&b| b == 0xff); let exponential_backoff = |(tries, instant): &(u32, Instant)| { // Fail if a request has failed recently (exponential backoff) @@ -219,33 +225,8 @@ impl Sending { instant.elapsed() < min_elapsed_duration }; - if let Some((outgoing_kind, pdu_id)) = utils::string_from_bytes( - parts - .next() - .expect("splitn will always return 1 or more elements"), - ) - .map_err(|_| Error::bad_database("[Utf8] ServerName in servernamepduid bytes are invalid.")) - .and_then(|ident_str| { - // Appservices start with a plus - Ok(if ident_str.starts_with('+') { - OutgoingKind::Appservice( - Box::::try_from(&ident_str[1..]) - .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))? - ) - } else if ident_str.starts_with('$') { - OutgoingKind::Push(ident_str[1..].as_bytes().to_vec()) - } else { - OutgoingKind::Normal( - Box::::try_from(ident_str) - .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))? - ) - }) - }) - .and_then(|outgoing_kind| parts - .next() - .ok_or_else(|| Error::bad_database("Invalid servernamepduid in db.")) - .map(|pdu_id| (outgoing_kind, pdu_id)) - ) + + if let Some((outgoing_kind, pdu_id)) = Self::parse_servercurrentpdus(&servernamepduid) .ok() .filter(|(outgoing_kind, _)| { if last_failed_try.get(outgoing_kind).map_or(false, exponential_backoff) { @@ -258,9 +239,11 @@ impl Sending { p.extend_from_slice(serv.as_bytes()); p }, - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(&id); + p.extend_from_slice(&user); + p.push(0xff); + p.extend_from_slice(&pushkey); p }, OutgoingKind::Normal(serv) => { @@ -279,6 +262,8 @@ impl Sending { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); + dbg!("there is a future"); + futures.push( Self::handle_event( outgoing_kind, @@ -295,15 +280,9 @@ impl Sending { } #[tracing::instrument(skip(self))] - pub fn send_push_pdu(&self, pdu_id: &[u8]) -> Result<()> { - // Make sure we don't cause utf8 errors when parsing to a String... - let pduid = String::from_utf8_lossy(pdu_id).as_bytes().to_vec(); - - // these are valid ServerName chars - // (byte.is_ascii_alphanumeric() || byte == b'-' || byte == b'.') + pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: IVec) -> Result<()> { let mut key = b"$".to_vec(); - // keep each pdu push unique - key.extend_from_slice(pduid.as_slice()); + key.extend_from_slice(&senderkey); key.push(0xff); key.extend_from_slice(pdu_id); self.servernamepduids.insert(key, b"")?; @@ -313,6 +292,7 @@ impl Sending { #[tracing::instrument(skip(self))] pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { + dbg!(&server); let mut key = server.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu_id); @@ -369,6 +349,8 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); let permit = db.sending.maximum_requests.acquire().await; + + error!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = appservice_server::send_request( &db.globals, db.appservice @@ -391,17 +373,17 @@ impl Sending { response } - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let pdus = pdu_ids .iter() .map(|pdu_id| { Ok::<_, (Vec, Error)>( db.rooms .get_pdu_from_id(pdu_id) - .map_err(|e| (id.clone(), e))? + .map_err(|e| (pushkey.clone(), e))? .ok_or_else(|| { ( - id.clone(), + pushkey.clone(), Error::bad_database( "[Push] Event in servernamepduids not found in db.", ), @@ -418,66 +400,80 @@ impl Sending { continue; } - for user in db.users.iter().filter_map(|r| r.ok()).filter(|user_id| { - db.rooms.is_joined(&user_id, &pdu.room_id).unwrap_or(false) - }) { - // Don't notify the user of their own events - if user == pdu.sender { - continue; - } - - let pushers = db - .pusher - .get_pusher(&user) - .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; - - let rules_for_user = db - .account_data - .get::(None, &user, EventType::PushRules) - .map_err(|e| (OutgoingKind::Push(id.clone()), e))? - .map(|ev| ev.content.global) - .unwrap_or_else(|| crate::push_rules::default_pushrules(&user)); - - let unread: UInt = if let Some(last_read) = db - .rooms - .edus - .private_read_get(&pdu.room_id, &user) - .map_err(|e| (OutgoingKind::Push(id.clone()), e))? - { - (db.rooms - .pdus_since(&user, &pdu.room_id, last_read) - .map_err(|e| (OutgoingKind::Push(id.clone()), e))? - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .filter(|(_, pdu)| { - matches!( - pdu.kind.clone(), - EventType::RoomMessage | EventType::RoomEncrypted - ) - }) - .count() as u32) - .into() - } else { - // Just return zero unread messages - uint!(0) - }; - - let permit = db.sending.maximum_requests.acquire().await; - let _response = pusher::send_push_notice( - &user, - unread, - &pushers, - rules_for_user, - &pdu, - db, + let userid = UserId::try_from(utils::string_from_bytes(user).map_err(|e| { + ( + OutgoingKind::Push(user.clone(), pushkey.clone()), + Error::bad_database("Invalid push user string in db."), ) - .await - .map(|_response| kind.clone()) - .map_err(|e| (kind.clone(), e)); + })?) + .map_err(|e| { + ( + OutgoingKind::Push(user.clone(), pushkey.clone()), + Error::bad_database("Invalid push user id in db."), + ) + })?; - drop(permit); - } + let mut senderkey = user.clone(); + senderkey.push(0xff); + senderkey.extend_from_slice(pushkey); + + let pusher = match db + .pusher + .get_pusher(&senderkey) + .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + { + Some(pusher) => pusher, + None => continue, + }; + + let rules_for_user = db + .account_data + .get::(None, &userid, EventType::PushRules) + .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + .map(|ev| ev.content.global) + .unwrap_or_else(|| crate::push_rules::default_pushrules(&userid)); + + let unread: UInt = if let Some(last_read) = db + .rooms + .edus + .private_read_get(&pdu.room_id, &userid) + .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + { + (db.rooms + .pdus_since(&userid, &pdu.room_id, last_read) + .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + .filter_map(|pdu| pdu.ok()) // Filter out buggy events + .filter(|(_, pdu)| { + matches!( + pdu.kind.clone(), + EventType::RoomMessage | EventType::RoomEncrypted + ) + }) + .count() as u32) + .into() + } else { + // Just return zero unread messages + uint!(0) + }; + + let permit = db.sending.maximum_requests.acquire().await; + + error!("sending pdu to {}: {:#?}", userid, pdu); + let _response = pusher::send_push_notice( + &userid, + unread, + &pusher, + rules_for_user, + &pdu, + db, + ) + .await + .map(|_response| kind.clone()) + .map_err(|e| (kind.clone(), e)); + + drop(permit); } - Ok(OutgoingKind::Push(id.clone())) + Ok(OutgoingKind::Push(user.clone(), pushkey.clone())) } OutgoingKind::Normal(server) => { let pdu_jsons = pdu_ids @@ -540,30 +536,49 @@ impl Sending { } fn parse_servercurrentpdus(key: &IVec) -> Result<(OutgoingKind, IVec)> { - let mut parts = key.splitn(2, |&b| b == 0xff); - let server = parts.next().expect("splitn always returns one element"); - let pdu = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - - let server = utils::string_from_bytes(&server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - // Appservices start with a plus - Ok::<_, Error>(if server.starts_with('+') { + Ok::<_, Error>(if key.starts_with(b"+") { + let mut parts = key[1..].splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let pdu = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(&server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + ( OutgoingKind::Appservice(Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), IVec::from(pdu), ) - } else if server.starts_with('$') { + } else if key.starts_with(b"$") { + let mut parts = key[1..].splitn(3, |&b| b == 0xff); + + let user = parts.next().expect("splitn always returns one element"); + let pushkey = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let pdu = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; ( - OutgoingKind::Push(server.as_bytes().to_vec()), + OutgoingKind::Push(user.to_vec(), pushkey.to_vec()), IVec::from(pdu), ) } else { + let mut parts = key.splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let pdu = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(&server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + ( OutgoingKind::Normal(Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") diff --git a/src/server_server.rs b/src/server_server.rs index d43588a..82c5f82 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -21,9 +21,10 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::EventType, + identifiers::{KeyId, KeyName}, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, SigningKeyAlgorithm, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ @@ -600,7 +601,7 @@ pub async fn send_transaction_message_route<'a>( // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. 'main_pdu_loop: for (event_id, _room_id, value) in pdus_to_resolve { - debug!("Working on incoming pdu: {:?}", value); + info!("Working on incoming pdu: {:?}", value); let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -639,7 +640,7 @@ pub async fn send_transaction_message_route<'a>( // 6. persist the event as an outlier. db.rooms.add_pdu_outlier(&pdu)?; - debug!("Added pdu as outlier."); + info!("Added pdu as outlier."); // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -914,7 +915,7 @@ pub async fn send_transaction_message_route<'a>( // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; - debug!("Appended incoming pdu."); + info!("Appended incoming pdu."); // Set the new room state to the resolved state update_resolved_state( @@ -961,21 +962,31 @@ fn validate_event<'a>( auth_cache: &'a mut EventMap>, ) -> AsyncRecursiveResult<'a, (Arc, Option>)> { Box::pin(async move { - for signature_server in match value + for (signature_server, signature) in match value .get("signatures") .ok_or_else(|| "No signatures in server response pdu.".to_string())? { CanonicalJsonValue::Object(map) => map, _ => return Err("Invalid signatures object in server response pdu.".to_string()), - } - .keys() - { + } { + let signature_object = match signature { + CanonicalJsonValue::Object(map) => map, + _ => { + return Err( + "Invalid signatures content object in server response pdu.".to_string() + ) + } + }; + + let signature_ids = signature_object.keys().collect::>(); + debug!("Fetching signing keys for {}", signature_server); let keys = match fetch_signing_keys( &db, &Box::::try_from(&**signature_server).map_err(|_| { "Invalid servername in signatures of server response pdu.".to_string() })?, + signature_ids, ) .await { @@ -987,26 +998,29 @@ fn validate_event<'a>( } }; - pub_key_map.insert(signature_server.clone(), keys); + pub_key_map.insert(dbg!(signature_server.clone()), dbg!(keys)); } - let mut val = - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version5) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } else { - value + let mut val = match ruma::signatures::verify_event( + dbg!(&pub_key_map), + &value, + &RoomVersionId::Version5, + ) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), } + } else { + value } - Err(_e) => { - error!("{}", _e); - return Err("Signature verification failed".to_string()); - } - }; + } + Err(_e) => { + error!("{}", _e); + return Err("Signature verification failed".to_string()); + } + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type also finally verifying the first step listed above @@ -1116,7 +1130,7 @@ pub(crate) async fn fetch_events( Arc::new(pdu) } None => { - debug!("Fetching event over federation"); + debug!("Fetching event over federation: {:?}", id); match db .sending .send_federation_request( @@ -1159,78 +1173,93 @@ pub(crate) async fn fetch_events( pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, + signature_ids: Vec<&String>, ) -> Result> { - let mut result = BTreeMap::new(); + let contains_all_ids = |keys: &BTreeMap| { + signature_ids + .iter() + .all(|&id| dbg!(dbg!(&keys).contains_key(dbg!(id)))) + }; - match db.globals.signing_keys_for(origin)? { - keys if !keys.is_empty() => Ok(keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect()), - _ => { - match db - .sending - .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) - .await - { - Ok(keys) => { - db.globals.add_signing_key(origin, &keys.server_key)?; + let mut result = db + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); - result.extend( - keys.server_key - .verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - keys.server_key - .old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - return Ok(result); - } - _ => { - for server in db.globals.trusted_servers() { - debug!("Asking {} for {}'s signing key", server, origin); - if let Ok(keys) = db - .sending - .send_federation_request( - &db.globals, - &server, - get_remote_server_keys::v2::Request::new( - origin, - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), - ), - ) - .await - { - debug!("Got signing keys: {:?}", keys); - for k in keys.server_keys.into_iter() { - db.globals.add_signing_key(origin, &k)?; - result.extend( - k.verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } - return Ok(result); - } - } - Err(Error::BadServerResponse( - "Failed to find public key for server", - )) - } + if contains_all_ids(&result) { + return Ok(result); + } + + if let Ok(get_keys_response) = db + .sending + .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .await + { + db.globals + .add_signing_key(origin, &get_keys_response.server_key)?; + + result.extend( + get_keys_response + .server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + get_keys_response + .server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + + if contains_all_ids(&result) { + return Ok(result); + } + } + + for server in db.globals.trusted_servers() { + debug!("Asking {} for {}'s signing key", server, origin); + if let Ok(keys) = db + .sending + .send_federation_request( + &db.globals, + &server, + get_remote_server_keys::v2::Request::new( + origin, + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ), + ) + .await + { + debug!("Got signing keys: {:?}", keys); + for k in keys.server_keys.into_iter() { + db.globals.add_signing_key(origin, &k)?; + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + } + + if contains_all_ids(&result) { + return Ok(result); } } } + + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) } /// Gather all state snapshots needed to resolve the current state of the room. @@ -1244,7 +1273,7 @@ pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, ) -> Result> { - let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + let mut current_leaves = dbg!(db.rooms.get_pdu_leaves(pdu.room_id())?); let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity @@ -1290,7 +1319,6 @@ pub(crate) async fn calculate_forward_extremities( /// /// This guarantees that the incoming event will be in the state sets (at least our servers /// and the sending server). -#[tracing::instrument(skip(db))] pub(crate) async fn build_forward_extremity_snapshots( db: &Database, pdu: Arc, @@ -1316,7 +1344,7 @@ pub(crate) async fn build_forward_extremity_snapshots( Some(leave_pdu) => { let pdu_shortstatehash = db .rooms - .pdu_shortstatehash(&leave_pdu.event_id)? + .pdu_shortstatehash(dbg!(&leave_pdu.event_id))? .ok_or_else(|| Error::bad_database("Found pdu with no statehash in db."))?; if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { @@ -1367,7 +1395,9 @@ pub(crate) fn update_resolved_state( new_state.insert( ( ev_type, - state_k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + state_k.ok_or_else(|| { + Error::Conflict("update_resolved_state: State contained non state event") + })?, ), pdu.event_id.clone(), ); @@ -1395,9 +1425,9 @@ pub(crate) fn append_incoming_pdu( new_state.insert( ( ev_type.clone(), - state_k - .clone() - .ok_or_else(|| Error::Conflict("State contained non state event"))?, + state_k.clone().ok_or_else(|| { + Error::Conflict("append_incoming_pdu: State contained non state event") + })?, ), state_pdu.event_id.clone(), ); From 3ea7d162dba75de25a598be8b3762f4b5e8a61ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 23 Mar 2021 12:59:27 +0100 Subject: [PATCH 0481/1727] fix: various improvements and fixes --- Cargo.lock | 18 ++++++++++++ Cargo.toml | 9 +++--- src/client_server/context.rs | 17 ++++++++---- src/database.rs | 12 ++++---- src/database/pusher.rs | 2 +- src/database/rooms.rs | 2 +- src/database/sending.rs | 9 +----- src/ruma_wrapper.rs | 1 + src/server_server.rs | 54 +++++++++++++++--------------------- 9 files changed, 67 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d5010da..adcc27b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1623,6 +1623,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "js_int", @@ -1642,6 +1643,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "http", "percent-encoding", @@ -1656,6 +1658,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1666,6 +1669,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1679,6 +1683,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "http", @@ -1697,6 +1702,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "maplit", @@ -1709,6 +1715,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-common", @@ -1722,6 +1729,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1732,6 +1740,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1746,6 +1755,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "paste", "rand", @@ -1759,6 +1769,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro2", "quote", @@ -1769,10 +1780,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" [[package]] name = "ruma-identity-service-api" version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1785,6 +1798,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1799,6 +1813,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "form_urlencoded", "itoa", @@ -1811,6 +1826,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1821,6 +1837,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "base64 0.13.0", "ring", @@ -2088,6 +2105,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" +source = "git+https://github.com/ruma/state-res?rev=34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488#34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 1476200..33f1d1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,15 +18,16 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } #ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -#state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } -state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/context.rs b/src/client_server/context.rs index cb9aaf9..1fee2f2 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -24,20 +24,25 @@ pub async fn get_context_route( )); } + let base_pdu_id = db + .rooms + .get_pdu_id(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Base event id not found.", + ))?; + + let base_token = db.rooms.pdu_count(&base_pdu_id)?; + let base_event = db .rooms - .get_pdu(&body.event_id)? + .get_pdu_from_id(&base_pdu_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Base event not found.", ))? .to_room_event(); - let base_token = db - .rooms - .get_pdu_count(&body.event_id)? - .expect("event still exists"); - let events_before = db .rooms .pdus_until(&sender_user, &body.room_id, base_token) diff --git a/src/database.rs b/src/database.rs index 138efbe..47cee0d 100644 --- a/src/database.rs +++ b/src/database.rs @@ -120,7 +120,7 @@ impl Database { token_userdeviceid: db.open_tree("token_userdeviceid")?, onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys")?, userid_lastonetimekeyupdate: db.open_tree("userid_lastonetimekeyupdate")?, - keychangeid_userid: db.open_tree("devicekeychangeid_userid")?, + keychangeid_userid: db.open_tree("keychangeid_userid")?, keyid_key: db.open_tree("keyid_key")?, userid_masterkeyid: db.open_tree("userid_masterkeyid")?, userid_selfsigningkeyid: db.open_tree("userid_selfsigningkeyid")?, @@ -135,7 +135,7 @@ impl Database { readreceiptid_readreceipt: db.open_tree("readreceiptid_readreceipt")?, roomuserid_privateread: db.open_tree("roomuserid_privateread")?, // "Private" read receipt roomuserid_lastprivatereadupdate: db - .open_tree("roomid_lastprivatereadupdate")?, + .open_tree("roomuserid_lastprivatereadupdate")?, typingid_userid: db.open_tree("typingid_userid")?, roomid_lasttypingupdate: db.open_tree("roomid_lasttypingupdate")?, presenceid_presence: db.open_tree("presenceid_presence")?, @@ -146,7 +146,7 @@ impl Database { roomid_pduleaves: db.open_tree("roomid_pduleaves")?, alias_roomid: db.open_tree("alias_roomid")?, - aliasid_alias: db.open_tree("alias_roomid")?, + aliasid_alias: db.open_tree("aliasid_alias")?, publicroomids: db.open_tree("publicroomids")?, tokenids: db.open_tree("tokenids")?, @@ -163,11 +163,11 @@ impl Database { stateid_shorteventid: db.open_tree("stateid_shorteventid")?, eventid_shorteventid: db.open_tree("eventid_shorteventid")?, shorteventid_eventid: db.open_tree("shorteventid_eventid")?, - shorteventid_shortstatehash: db.open_tree("eventid_shortstatehash")?, + shorteventid_shortstatehash: db.open_tree("shorteventid_shortstatehash")?, roomid_shortstatehash: db.open_tree("roomid_shortstatehash")?, statehash_shortstatehash: db.open_tree("statehash_shortstatehash")?, - eventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, }, account_data: account_data::AccountData { @@ -179,7 +179,7 @@ impl Database { key_backups: key_backups::KeyBackups { backupid_algorithm: db.open_tree("backupid_algorithm")?, backupid_etag: db.open_tree("backupid_etag")?, - backupkeyid_backup: db.open_tree("backupkeyid_backupmetadata")?, + backupkeyid_backup: db.open_tree("backupkeyid_backup")?, }, transaction_ids: transaction_ids::TransactionIds { userdevicetxnid_response: db.open_tree("userdevicetxnid_response")?, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index b0b9e1e..f4b35f2 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -469,7 +469,7 @@ async fn send_notice( name: &str, ) -> Result<()> { // TODO: email - if pusher.kind == Some(PusherKind::Http) { + if pusher.kind == Some(PusherKind::Email) { return Ok(()); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2e2d486..2144340 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1524,7 +1524,7 @@ impl Rooms { let mut aliasid = room_id.as_bytes().to_vec(); aliasid.push(0xff); aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(aliasid, &*alias.alias())?; + self.aliasid_alias.insert(aliasid, &*alias.as_bytes())?; } else { // room_id=None means remove alias let room_id = self diff --git a/src/database/sending.rs b/src/database/sending.rs index 9b74ed7..a9204c5 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -108,7 +108,6 @@ impl Sending { let mut subscriber = servernamepduids.watch_prefix(b""); loop { - println!("."); select! { Some(response) = futures.next() => { match response { @@ -262,8 +261,6 @@ impl Sending { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); - dbg!("there is a future"); - futures.push( Self::handle_event( outgoing_kind, @@ -292,7 +289,6 @@ impl Sending { #[tracing::instrument(skip(self))] pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { - dbg!(&server); let mut key = server.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu_id); @@ -350,7 +346,6 @@ impl Sending { .collect::>(); let permit = db.sending.maximum_requests.acquire().await; - error!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = appservice_server::send_request( &db.globals, db.appservice @@ -458,7 +453,6 @@ impl Sending { let permit = db.sending.maximum_requests.acquire().await; - error!("sending pdu to {}: {:#?}", userid, pdu); let _response = pusher::send_push_notice( &userid, unread, @@ -506,7 +500,6 @@ impl Sending { let permit = db.sending.maximum_requests.acquire().await; - error!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = server_server::send_request( &db.globals, &*server, @@ -523,7 +516,7 @@ impl Sending { ) .await .map(|response| { - error!("server response: {:?}", response); + info!("server response: {:?}", response); kind.clone() }) .map_err(|e| (kind, e)); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 8c72529..9787e2d 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -219,6 +219,7 @@ where "Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Authorization", ); + response.raw_header("Access-Control-Max-Age", "86400"); response.ok() } Err(_) => Err(Status::InternalServerError), diff --git a/src/server_server.rs b/src/server_server.rs index 82c5f82..3c364db 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -863,8 +863,6 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); - debug!("auth events: {:?}", auth_cache); - let res = match state_res::StateResolution::resolve( pdu.room_id(), &RoomVersionId::Version6, @@ -952,7 +950,7 @@ type AsyncRecursiveResult<'a, T> = Pin( db: &'a Database, value: CanonicalJsonObject, @@ -998,29 +996,26 @@ fn validate_event<'a>( } }; - pub_key_map.insert(dbg!(signature_server.clone()), dbg!(keys)); + pub_key_map.insert(signature_server.clone(), keys); } - let mut val = match ruma::signatures::verify_event( - dbg!(&pub_key_map), - &value, - &RoomVersionId::Version5, - ) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), + let mut val = + match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version5) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value } - } else { - value } - } - Err(_e) => { - error!("{}", _e); - return Err("Signature verification failed".to_string()); - } - }; + Err(e) => { + error!("{:?}: {}", value, e); + return Err("Signature verification failed".to_string()); + } + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type also finally verifying the first step listed above @@ -1085,7 +1080,7 @@ fn validate_event<'a>( }) } -#[tracing::instrument(skip(db))] +#[tracing::instrument(skip(db, key_map, auth_cache))] async fn fetch_check_auth_events( db: &Database, origin: &ServerName, @@ -1108,7 +1103,7 @@ async fn fetch_check_auth_events( /// /// If the event is unknown to the `auth_cache` it is added. This guarantees that any /// event we need to know of will be present. -#[tracing::instrument(skip(db))] +//#[tracing::instrument(skip(db, key_map, auth_cache))] pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, @@ -1175,11 +1170,8 @@ pub(crate) async fn fetch_signing_keys( origin: &ServerName, signature_ids: Vec<&String>, ) -> Result> { - let contains_all_ids = |keys: &BTreeMap| { - signature_ids - .iter() - .all(|&id| dbg!(dbg!(&keys).contains_key(dbg!(id)))) - }; + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|&id| keys.contains_key(id)); let mut result = db .globals @@ -1273,7 +1265,7 @@ pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, ) -> Result> { - let mut current_leaves = dbg!(db.rooms.get_pdu_leaves(pdu.room_id())?); + let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity @@ -1344,7 +1336,7 @@ pub(crate) async fn build_forward_extremity_snapshots( Some(leave_pdu) => { let pdu_shortstatehash = db .rooms - .pdu_shortstatehash(dbg!(&leave_pdu.event_id))? + .pdu_shortstatehash(&leave_pdu.event_id)? .ok_or_else(|| Error::bad_database("Found pdu with no statehash in db."))?; if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { From 46d8f36a2ca6181b8cc8643dee3a19fc32877681 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 23 Mar 2021 19:46:54 +0100 Subject: [PATCH 0482/1727] fix: media thumbnail calculation and appservice detection --- src/client_server/membership.rs | 8 +++++--- src/database/media.rs | 9 +++++---- src/database/rooms.rs | 5 ++++- src/pdu.rs | 10 +++++++--- src/server_server.rs | 10 ++++++++-- 5 files changed, 29 insertions(+), 13 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index d571eaa..71be6ac 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,7 +4,7 @@ use crate::{ pdu::{PduBuilder, PduEvent}, utils, ConduitResult, Database, Error, Result, Ruma, }; -use log::{info, warn}; +use log::{error, info, warn}; use ruma::{ api::{ client::{ @@ -544,8 +544,10 @@ async fn join_room_by_id_helper( .await?; let add_event_id = |pdu: &Raw| -> Result<(EventId, CanonicalJsonObject)> { - let mut value = serde_json::from_str(pdu.json().get()) - .expect("converting raw jsons to values always works"); + let mut value = serde_json::from_str(pdu.json().get()).map_err(|e| { + error!("{:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; let event_id = EventId::try_from(&*format!( "${}", ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) diff --git a/src/database/media.rs b/src/database/media.rs index 448d071..f958dc8 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -226,16 +226,17 @@ impl Media { } let thumbnail = if crop { - image.resize_to_fill(width, height, FilterType::Triangle) + image.resize_to_fill(width, height, FilterType::CatmullRom) } else { let (exact_width, exact_height) = { // Copied from image::dynimage::resize_dimensions let ratio = u64::from(original_width) * u64::from(height); let nratio = u64::from(width) * u64::from(original_height); - let use_width = nratio > ratio; + let use_width = nratio <= ratio; let intermediate = if use_width { - u64::from(original_height) * u64::from(width) / u64::from(width) + u64::from(original_height) * u64::from(width) + / u64::from(original_width) } else { u64::from(original_width) * u64::from(height) / u64::from(original_height) @@ -261,7 +262,7 @@ impl Media { } }; - image.thumbnail_exact(exact_width, exact_height) + image.thumbnail_exact(dbg!(exact_width), dbg!(exact_height)) }; let mut thumbnail_bytes = Vec::new(); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2144340..3bf72d0 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1193,6 +1193,9 @@ impl Rooms { .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) + || db.rooms.room_members(&room_id).any(|userid| { + userid.map_or(false, |userid| users.is_match(userid.as_str())) + }) }; let matching_aliases = |aliases: &Regex| { self.room_aliases(&room_id) @@ -1201,9 +1204,9 @@ impl Rooms { }; if bridge_user_id.map_or(false, user_is_joined) - || users.iter().any(matching_users) || aliases.iter().any(matching_aliases) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + || users.iter().any(matching_users) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } diff --git a/src/pdu.rs b/src/pdu.rs index 6085581..009fde6 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,4 +1,5 @@ use crate::Error; +use log::error; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, @@ -322,8 +323,11 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &Raw, -) -> (EventId, CanonicalJsonObject) { - let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); +) -> crate::Result<(EventId, CanonicalJsonObject)> { + let value = serde_json::from_str(pdu.json().get()).map_err(|e| { + error!("{:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; let event_id = EventId::try_from(&*format!( "${}", @@ -332,7 +336,7 @@ pub(crate) fn gen_event_id_canonical_json( )) .expect("ruma's reference hashes are valid event ids"); - (event_id, value) + Ok((event_id, value)) } /// Build the start of a PDU in order to add it to the `Database`. diff --git a/src/server_server.rs b/src/server_server.rs index 3c364db..fa5706d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -556,7 +556,13 @@ pub async fn send_transaction_message_route<'a>( // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return None; + } + }; // If we have no idea about this room skip the PDU let room_id = match value @@ -1138,7 +1144,7 @@ pub(crate) async fn fetch_events( Ok(res) => { debug!("Got event over federation: {:?}", res); let (event_id, value) = - crate::pdu::gen_event_id_canonical_json(&res.pdu); + crate::pdu::gen_event_id_canonical_json(&res.pdu)?; let (pdu, _) = validate_event(db, value, event_id, key_map, origin, auth_cache) .await From 1d00a8c41f92c0df5dac40299bf53134bcfa31b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 23 Mar 2021 22:01:14 +0100 Subject: [PATCH 0483/1727] improvement: better logging Use CONDUIT_LOG or the log setting in the config --- conduit-example.toml | 1 + src/client_server/thirdparty.rs | 2 +- src/database.rs | 6 ++++++ src/main.rs | 3 ++- src/server_server.rs | 13 ++++++++----- 5 files changed, 18 insertions(+), 7 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 3aca538..fea84bd 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -35,6 +35,7 @@ max_request_size = 20_000_000 # in bytes #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time +#log = "info,rocket=off,_=off,sled=off" #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index 3c07699..fe5b784 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -12,7 +12,7 @@ use std::collections::BTreeMap; )] #[tracing::instrument] pub async fn get_protocols_route() -> ConduitResult { - warn!("TODO: get_protocols_route"); + // TODO Ok(get_protocols::Response { protocols: BTreeMap::new(), } diff --git a/src/database.rs b/src/database.rs index 47cee0d..b14a912 100644 --- a/src/database.rs +++ b/src/database.rs @@ -46,6 +46,8 @@ pub struct Config { jwt_secret: Option, #[serde(default = "Vec::new")] trusted_servers: Vec>, + #[serde(default = "default_log")] + pub log: String, } fn false_fn() -> bool { @@ -68,6 +70,10 @@ fn default_max_concurrent_requests() -> u16 { 4 } +fn default_log() -> String { + "info,rocket=off,_=off,sled=off".to_owned() +} + #[derive(Clone)] pub struct Database { pub globals: globals::Globals, diff --git a/src/main.rs b/src/main.rs index 2ec3a42..327aefa 100644 --- a/src/main.rs +++ b/src/main.rs @@ -205,7 +205,8 @@ async fn main() { rocket.launch().await.unwrap(); } else { - pretty_env_logger::init(); + std::env::set_var("CONDUIT_LOG", config.log); + pretty_env_logger::init_custom_env("CONDUIT_LOG"); let root = span!(tracing::Level::INFO, "app_start", work_units = 2); let _enter = root.enter(); diff --git a/src/server_server.rs b/src/server_server.rs index fa5706d..4912878 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1339,11 +1339,14 @@ pub(crate) async fn build_forward_extremity_snapshots( // The current server state and incoming event state are built to be // the state after. // This would be the incoming state from the server. - Some(leave_pdu) => { + Some(leaf_pdu) => { let pdu_shortstatehash = db .rooms - .pdu_shortstatehash(&leave_pdu.event_id)? - .ok_or_else(|| Error::bad_database("Found pdu with no statehash in db."))?; + .pdu_shortstatehash(&leaf_pdu.event_id)? + .ok_or_else(|| { + warn!("Leaf pdu: {:?}", leaf_pdu); + Error::bad_database("Found pdu with no statehash in db.") + })?; if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { includes_current_state = true; @@ -1357,8 +1360,8 @@ pub(crate) async fn build_forward_extremity_snapshots( .collect::>(); // Now it's the state after - let key = (leave_pdu.kind.clone(), leave_pdu.state_key.clone()); - state_before.insert(key, Arc::new(leave_pdu)); + let key = (leaf_pdu.kind.clone(), leaf_pdu.state_key.clone()); + state_before.insert(key, Arc::new(leaf_pdu)); fork_states.insert(state_before); } From e305889b7250a97a7e83c96f98d4e65a570be35b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 24 Mar 2021 08:48:28 +0100 Subject: [PATCH 0484/1727] feat: room_account_data endpoints --- src/client_server/config.rs | 54 ++++++++++++++++++++++++++++++++++++- src/main.rs | 2 ++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/src/client_server/config.rs b/src/client_server/config.rs index a53b7cd..6abcba2 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -3,7 +3,7 @@ use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::config::{get_global_account_data, set_global_account_data}, + r0::config::{get_room_account_data, get_global_account_data, set_room_account_data, set_global_account_data}, }, events::{custom::CustomEventContent, BasicEvent}, serde::Raw, @@ -43,6 +43,37 @@ pub async fn set_global_account_data_route( Ok(set_global_account_data::Response.into()) } +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn set_room_account_data_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let data = serde_json::from_str(body.data.get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; + + let event_type = body.event_type.to_string(); + + db.account_data.update( + Some(&body.room_id), + sender_user, + event_type.clone().into(), + &BasicEvent { + content: CustomEventContent { event_type, data }, + }, + &db.globals, + )?; + + db.flush().await?; + + Ok(set_room_account_data::Response.into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") @@ -63,3 +94,24 @@ pub async fn get_global_account_data_route( Ok(get_global_account_data::Response { account_data: data }.into()) } + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn get_room_account_data_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let data = db + .account_data + .get::>(Some(&body.room_id), sender_user, body.event_type.clone().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + + db.flush().await?; + + Ok(get_room_account_data::Response { account_data: data }.into()) +} diff --git a/src/main.rs b/src/main.rs index 327aefa..696ce5c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -77,7 +77,9 @@ fn setup_rocket() -> (rocket::Rocket, Config) { client_server::get_filter_route, client_server::create_filter_route, client_server::set_global_account_data_route, + client_server::set_room_account_data_route, client_server::get_global_account_data_route, + client_server::get_room_account_data_route, client_server::set_displayname_route, client_server::get_displayname_route, client_server::set_avatar_url_route, From e50f2864ded296d0143f10782f1b60cf41189514 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 23 Mar 2021 22:31:19 +0100 Subject: [PATCH 0485/1727] improvement: save state for send_join pdu --- src/client_server/membership.rs | 8 ++++++++ src/database/rooms.rs | 1 - 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 71be6ac..36bbced 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -650,6 +650,10 @@ async fn join_room_by_id_helper( db.rooms.add_pdu_outlier(&pdu)?; } + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?; + db.rooms.append_pdu( &pdu, utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), @@ -658,6 +662,10 @@ async fn join_room_by_id_helper( &[pdu.event_id.clone()], db, )?; + + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + db.rooms.set_room_state(&room_id, statehashid)?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3bf72d0..91f468f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1115,7 +1115,6 @@ impl Rooms { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = self.append_to_state(&pdu, &db.globals)?; - // remove the self.append_pdu( &pdu, pdu_json, From 16eed1d8c251748087dabcb42ab48b676c776224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 24 Mar 2021 11:52:10 +0100 Subject: [PATCH 0486/1727] chore: get rid of warnings --- src/client_server/backup.rs | 10 ++--- src/client_server/config.rs | 21 +++++++-- src/client_server/sync.rs | 11 +---- src/client_server/thirdparty.rs | 1 - src/database/appservice.rs | 4 +- src/database/media.rs | 2 +- src/database/rooms.rs | 16 +++---- src/database/sending.rs | 33 +++++++------- src/server_server.rs | 80 ++++++++++++++------------------- 9 files changed, 82 insertions(+), 96 deletions(-) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index f33d0de..12f3bfd 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -267,12 +267,10 @@ pub async fn get_backup_key_session_route( let key_data = db .key_backups .get_session(&sender_user, &body.version, &body.room_id, &body.session_id)? - .ok_or_else(|| { - Error::BadRequest( - ErrorKind::NotFound, - "Backup key not found for this user's session.", - ) - })?; + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Backup key not found for this user's session.", + ))?; Ok(get_backup_key_session::Response { key_data }.into()) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 6abcba2..68cd2e0 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -3,7 +3,10 @@ use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::config::{get_room_account_data, get_global_account_data, set_room_account_data, set_global_account_data}, + r0::config::{ + get_global_account_data, get_room_account_data, set_global_account_data, + set_room_account_data, + }, }, events::{custom::CustomEventContent, BasicEvent}, serde::Raw, @@ -45,7 +48,10 @@ pub async fn set_global_account_data_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", data = "") + put( + "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", + data = "" + ) )] #[tracing::instrument(skip(db, body))] pub async fn set_room_account_data_route( @@ -97,7 +103,10 @@ pub async fn get_global_account_data_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", data = "") + get( + "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", + data = "" + ) )] #[tracing::instrument(skip(db, body))] pub async fn get_room_account_data_route( @@ -108,7 +117,11 @@ pub async fn get_room_account_data_route( let data = db .account_data - .get::>(Some(&body.room_id), sender_user, body.event_type.clone().into())? + .get::>( + Some(&body.room_id), + sender_user, + body.event_type.clone().into(), + )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; db.flush().await?; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index da2ddf4..d38699c 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -136,9 +136,7 @@ pub async fn sync_events_route( .map(|since_shortstatehash| { Ok::<_, Error>( since_shortstatehash - .map(|since_shortstatehash| { - db.rooms.state_full(&room_id, since_shortstatehash) - }) + .map(|since_shortstatehash| db.rooms.state_full(since_shortstatehash)) .transpose()?, ) }) @@ -512,12 +510,7 @@ pub async fn sync_events_route( }) .and_then(|shortstatehash| { db.rooms - .state_get( - &room_id, - shortstatehash, - &EventType::RoomMember, - sender_user.as_str(), - ) + .state_get(shortstatehash, &EventType::RoomMember, sender_user.as_str()) .ok()? .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) .ok() diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index fe5b784..5d3c540 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -1,7 +1,6 @@ use crate::ConduitResult; use ruma::api::client::r0::thirdparty::get_protocols; -use log::warn; #[cfg(feature = "conduit_bin")] use rocket::get; use std::collections::BTreeMap; diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 764291d..222eb18 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -55,9 +55,7 @@ impl Appservice { }) } - pub fn iter_all<'a>( - &'a self, - ) -> impl Iterator> + 'a { + pub fn iter_all(&self) -> impl Iterator> + '_ { self.iter_ids().filter_map(|id| id.ok()).map(move |id| { Ok(( id.clone(), diff --git a/src/database/media.rs b/src/database/media.rs index f958dc8..37fcb74 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -262,7 +262,7 @@ impl Media { } }; - image.thumbnail_exact(dbg!(exact_width), dbg!(exact_height)) + image.thumbnail_exact(exact_width, exact_height) }; let mut thumbnail_bytes = Vec::new(); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 91f468f..175d4ac 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -108,7 +108,6 @@ impl Rooms { pub fn state_full( &self, - room_id: &RoomId, shortstatehash: u64, ) -> Result> { Ok(self @@ -151,7 +150,6 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn state_get( &self, - room_id: &RoomId, shortstatehash: u64, event_type: &EventType, state_key: &str, @@ -257,11 +255,11 @@ impl Rooms { /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> Result { + fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { // We only hash the pdu's event ids, not the whole pdu let bytes = bytes_list.join(&0xff); let hash = digest::digest(&digest::SHA256, &bytes); - Ok(hash.as_ref().into()) + hash.as_ref().into() } /// Checks if a room exists. @@ -291,7 +289,7 @@ impl Rooms { .values() .map(|event_id| event_id.as_bytes()) .collect::>(), - )?; + ); let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { Some(shortstatehash) => { @@ -353,7 +351,7 @@ impl Rooms { room_id: &RoomId, ) -> Result> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(&room_id, current_shortstatehash) + self.state_full(current_shortstatehash) } else { Ok(BTreeMap::new()) } @@ -368,7 +366,7 @@ impl Rooms { state_key: &str, ) -> Result> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(&room_id, current_shortstatehash, event_type, state_key) + self.state_get(current_shortstatehash, event_type, state_key) } else { Ok(None) } @@ -582,7 +580,7 @@ impl Rooms { { if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { if let Some(prev_state) = self - .state_get(&pdu.room_id, shortstatehash, &pdu.kind, &state_key) + .state_get(shortstatehash, &pdu.kind, &state_key) .unwrap() { unsigned.insert( @@ -849,7 +847,7 @@ impl Rooms { .values() .map(|event_id| &**event_id) .collect::>(), - )?; + ); let shortstatehash = match self.statehash_shortstatehash.get(&new_state_hash)? { Some(shortstatehash) => { diff --git a/src/database/sending.rs b/src/database/sending.rs index a9204c5..1cc2f91 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,6 @@ use std::{ collections::HashMap, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, sync::Arc, time::{Duration, Instant, SystemTime}, @@ -10,11 +10,11 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::{error, info, warn}; +use log::{info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ - api::{appservice, client::r0::push::Pusher, federation, OutgoingRequest}, + api::{appservice, federation, OutgoingRequest}, events::{push_rules, EventType}, uint, ServerName, UInt, UserId, }; @@ -264,7 +264,7 @@ impl Sending { futures.push( Self::handle_event( outgoing_kind, - vec![pdu_id.into()], + vec![pdu_id], &db, ) ); @@ -395,18 +395,19 @@ impl Sending { continue; } - let userid = UserId::try_from(utils::string_from_bytes(user).map_err(|e| { - ( - OutgoingKind::Push(user.clone(), pushkey.clone()), - Error::bad_database("Invalid push user string in db."), - ) - })?) - .map_err(|e| { - ( - OutgoingKind::Push(user.clone(), pushkey.clone()), - Error::bad_database("Invalid push user id in db."), - ) - })?; + let userid = + UserId::try_from(utils::string_from_bytes(user).map_err(|_| { + ( + OutgoingKind::Push(user.clone(), pushkey.clone()), + Error::bad_database("Invalid push user string in db."), + ) + })?) + .map_err(|_| { + ( + OutgoingKind::Push(user.clone(), pushkey.clone()), + Error::bad_database("Invalid push user id in db."), + ) + })?; let mut senderkey = user.clone(); senderkey.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index 4912878..8babc89 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -21,10 +21,9 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::EventType, - identifiers::{KeyId, KeyName}, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, SigningKeyAlgorithm, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ @@ -788,23 +787,17 @@ pub async fn send_transaction_message_route<'a>( // This will create the state after any state snapshot it builds // So current_state will have the incoming event inserted to it - let mut fork_states = match build_forward_extremity_snapshots( - &db, - pdu.clone(), - server_name, - current_state, - &extremities, - &pub_key_map, - &mut auth_cache, - ) - .await - { - Ok(states) => states, - Err(_) => { - resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); - continue; - } - }; + let mut fork_states = + match build_forward_extremity_snapshots(&db, pdu.clone(), current_state, &extremities) + .await + { + Ok(states) => states, + Err(_) => { + resolved_map + .insert(event_id, Err("Failed to gather forward extremities".into())); + continue; + } + }; // Make this the state after. let mut state_after = state_at_event.clone(); @@ -1320,11 +1313,8 @@ pub(crate) async fn calculate_forward_extremities( pub(crate) async fn build_forward_extremity_snapshots( db: &Database, pdu: Arc, - origin: &ServerName, mut current_state: StateMap>, current_leaves: &[EventId], - pub_key_map: &PublicKeyMap, - auth_cache: &mut EventMap>, ) -> Result>>> { let current_shortstatehash = db.rooms.current_shortstatehash(pdu.room_id())?; @@ -1354,7 +1344,7 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut state_before = db .rooms - .state_full(pdu.room_id(), pdu_shortstatehash)? + .state_full(pdu_shortstatehash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect::>(); @@ -1396,9 +1386,9 @@ pub(crate) fn update_resolved_state( new_state.insert( ( ev_type, - state_k.ok_or_else(|| { - Error::Conflict("update_resolved_state: State contained non state event") - })?, + state_k.ok_or(Error::Conflict( + "update_resolved_state: State contained non state event", + ))?, ), pdu.event_id.clone(), ); @@ -1426,9 +1416,9 @@ pub(crate) fn append_incoming_pdu( new_state.insert( ( ev_type.clone(), - state_k.clone().ok_or_else(|| { - Error::Conflict("append_incoming_pdu: State contained non state event") - })?, + state_k.clone().ok_or(Error::Conflict( + "append_incoming_pdu: State contained non state event", + ))?, ), state_pdu.event_id.clone(), ); @@ -1600,26 +1590,22 @@ pub fn get_room_state_ids_route<'a>( let mut todo = BTreeSet::new(); todo.insert(body.event_id.clone()); - loop { - if let Some(event_id) = todo.iter().next().cloned() { - if let Some(pdu) = db.rooms.get_pdu(&event_id)? { - todo.extend( - pdu.auth_events - .clone() - .into_iter() - .collect::>() - .difference(&auth_chain_ids) - .cloned(), - ); - auth_chain_ids.extend(pdu.auth_events.into_iter()); - } else { - warn!("Could not find pdu mentioned in auth events."); - } - - todo.remove(&event_id); + while let Some(event_id) = todo.iter().next().cloned() { + if let Some(pdu) = db.rooms.get_pdu(&event_id)? { + todo.extend( + pdu.auth_events + .clone() + .into_iter() + .collect::>() + .difference(&auth_chain_ids) + .cloned(), + ); + auth_chain_ids.extend(pdu.auth_events.into_iter()); } else { - break; + warn!("Could not find pdu mentioned in auth events."); } + + todo.remove(&event_id); } Ok(get_room_state_ids::v1::Response { From c213769d9f8448b21ae414db62a68ea0044701cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 25 Mar 2021 23:55:40 +0100 Subject: [PATCH 0487/1727] improvement: refactor state res and fix a few bugs in the process --- Cargo.lock | 2 +- Cargo.toml | 4 +- src/client_server/membership.rs | 8 +- src/client_server/sync.rs | 6 +- src/database/pusher.rs | 31 +- src/database/rooms.rs | 302 +++++---- src/database/sending.rs | 7 +- src/server_server.rs | 1124 +++++++++++++------------------ 8 files changed, 651 insertions(+), 833 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index adcc27b..9580942 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2105,7 +2105,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488#34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488" +source = "git+https://github.com/timokoesters/state-res?branch=improvements#1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 33f1d1e..453bc8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,10 +23,10 @@ ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-a #ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution -# state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", branch = "improvements", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature #state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } -state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { git = "https://github.com/ruma/state-res", rev = "1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0", features = ["unstable-pre-spec", "gen-eventid"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 36bbced..6a64ea4 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,11 +21,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use std::{ - collections::{BTreeMap, HashMap}, - convert::TryFrom, - sync::Arc, -}; +use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -573,7 +569,7 @@ async fn join_room_by_id_helper( let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; - let mut state = HashMap::new(); + let mut state = BTreeMap::new(); for pdu in send_join_response .room_state diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index d38699c..bd7046d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,5 +1,6 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; +use log::error; use ruma::{ api::client::r0::sync::sync_events, events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, @@ -505,7 +506,10 @@ pub async fn sync_events_route( db.rooms .pdu_shortstatehash(&pdu.1.event_id) .ok()? - .ok_or_else(|| Error::bad_database("Pdu in db doesn't have a state hash.")) + .ok_or_else(|| { + error!("{:?}", pdu.1); + Error::bad_database("Pdu in db doesn't have a state hash.") + }) .ok() }) .and_then(|shortstatehash| { diff --git a/src/database/pusher.rs b/src/database/pusher.rs index f4b35f2..8e9b24e 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -215,7 +215,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -235,7 +235,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -266,8 +266,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -292,8 +291,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -309,7 +307,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -346,8 +344,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -372,8 +369,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -389,7 +385,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -405,7 +401,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -421,7 +417,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -435,7 +431,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -449,7 +445,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -466,7 +462,6 @@ async fn send_notice( tweaks: Vec, event: &PduEvent, db: &Database, - name: &str, ) -> Result<()> { // TODO: email if pusher.kind == Some(PusherKind::Email) { @@ -514,7 +509,6 @@ async fn send_notice( } if event_id_only { - error!("SEND PUSH NOTICE `{}`", name); send_request( &db.globals, &url, @@ -542,7 +536,6 @@ async fn send_notice( .flatten(); notifi.room_name = room_name.as_deref(); - error!("SEND PUSH NOTICE Full `{}`", name); send_request( &db.globals, &url, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 175d4ac..e1e97b4 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,27 +3,24 @@ mod edus; pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::{error, warn}; +use log::{debug, error, warn}; use regex::Regex; use ring::digest; use ruma::{ api::client::error::ErrorKind, events::{ ignored_user_list, - room::{ - member, message, - power_levels::{self, PowerLevelsEventContent}, - }, + room::{create::CreateEventContent, member, message}, EventType, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Event, StateMap}; +use state_res::{Event, StateMap}; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem, sync::Arc, @@ -227,26 +224,24 @@ impl Rooms { sender: &UserId, state_key: Option<&str>, content: serde_json::Value, - ) -> Result> { + ) -> Result>> { let auth_events = state_res::auth_types_for_event( kind, sender, state_key.map(|s| s.to_string()), - content, + content.clone(), ); let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some(pdu) = self.room_state_get( - room_id, - &event_type, - &state_key - .as_deref() - .ok_or_else(|| Error::bad_database("Saved auth event with no state key."))?, - )? { - events.insert((event_type, state_key), pdu); + if let Some(pdu) = self.room_state_get(room_id, &event_type, &state_key)? { + events.insert((event_type, state_key), Arc::new(pdu)); } else { - warn!("Could not find {} {:?} in state", event_type, state_key); + // This is okay because when creating a new room some events were not created yet + debug!( + "{:?}: Could not find {} {:?} in state", + content, event_type, state_key + ); } } Ok(events) @@ -281,7 +276,7 @@ impl Rooms { pub fn force_state( &self, room_id: &RoomId, - state: HashMap<(EventType, String), EventId>, + state: BTreeMap<(EventType, String), EventId>, globals: &super::globals::Globals, ) -> Result<()> { let state_hash = self.calculate_hash( @@ -293,8 +288,10 @@ impl Rooms { let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { Some(shortstatehash) => { - warn!("state hash already existed?!"); - shortstatehash.to_vec() + // State already existed in db + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &*shortstatehash)?; + return Ok(()); } None => { let shortstatehash = globals.next_count()?; @@ -483,14 +480,11 @@ impl Rooms { } /// Returns the leaf pdus of a room. - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { + pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - let mut events = Vec::new(); - - for event in self - .roomid_pduleaves + self.roomid_pduleaves .scan_prefix(prefix) .values() .map(|bytes| { @@ -501,11 +495,7 @@ impl Rooms { .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?, ) }) - { - events.push(event?); - } - - Ok(events) + .collect() } /// Replace the leaves of a room. @@ -761,6 +751,90 @@ impl Rooms { Ok(()) } + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + pub fn set_event_state( + &self, + event_id: &EventId, + state: &StateMap>, + globals: &super::globals::Globals, + ) -> Result<()> { + let shorteventid = match self.eventid_shorteventid.get(event_id.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; + shorteventid.to_be_bytes().to_vec() + } + }; + + let state_hash = self.calculate_hash( + &state + .values() + .map(|pdu| pdu.event_id.as_bytes()) + .collect::>(), + ); + + let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { + Some(shortstatehash) => { + // State already existed in db + self.shorteventid_shortstatehash + .insert(shorteventid, &*shortstatehash)?; + return Ok(()); + } + None => { + let shortstatehash = globals.next_count()?; + self.statehash_shortstatehash + .insert(&state_hash, &shortstatehash.to_be_bytes())?; + shortstatehash.to_be_bytes().to_vec() + } + }; + + for ((event_type, state_key), pdu) in state { + let mut statekey = event_type.as_ref().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(&state_key.as_bytes()); + + let shortstatekey = match self.statekey_shortstatekey.get(&statekey)? { + Some(shortstatekey) => shortstatekey.to_vec(), + None => { + let shortstatekey = globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes())?; + shortstatekey.to_be_bytes().to_vec() + } + }; + + let shorteventid = match self.eventid_shorteventid.get(pdu.event_id.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), pdu.event_id.as_bytes())?; + shorteventid.to_be_bytes().to_vec() + } + }; + + let mut state_id = shortstatehash.clone(); + state_id.extend_from_slice(&shortstatekey); + + self.stateid_shorteventid + .insert(&*state_id, &*shorteventid)?; + } + + self.shorteventid_shortstatehash + .insert(shorteventid, &*shortstatehash)?; + + Ok(()) + } + /// Generates a new StateHash and associates it with the incoming event. /// /// This adds all current state events (not including the incoming event) @@ -900,8 +974,37 @@ impl Rooms { redacts, } = pdu_builder; // TODO: Make sure this isn't called twice in parallel - let mut prev_events = self.get_pdu_leaves(&room_id)?; - prev_events.truncate(20); + let prev_events = self + .get_pdu_leaves(&room_id)? + .into_iter() + .take(20) + .collect::>(); + + let create_event = self.room_state_get(&room_id, &EventType::RoomCreate, "")?; + + let create_event_content = create_event + .as_ref() + .map(|create_event| { + Ok::<_, Error>( + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?, + ) + }) + .transpose()?; + + let create_prev_event = if prev_events.len() == 1 + && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) + { + create_event.map(Arc::new) + } else { + None + }; + + let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| { + create_event.room_version + }); let auth_events = self.get_auth_events( &room_id, @@ -911,118 +1014,6 @@ impl Rooms { content.clone(), )?; - // Is the event authorized? - if let Some(state_key) = &state_key { - let power_levels = self - .room_state_get(&room_id, &EventType::RoomPowerLevels, "")? - .map_or_else( - || { - Ok::<_, Error>(power_levels::PowerLevelsEventContent { - ban: 50.into(), - events: BTreeMap::new(), - events_default: 0.into(), - invite: 50.into(), - kick: 50.into(), - redact: 50.into(), - state_default: 0.into(), - users: BTreeMap::new(), - users_default: 0.into(), - notifications: - ruma::events::room::power_levels::NotificationPowerLevels { - room: 50.into(), - }, - }) - }, - |power_levels| { - Ok(serde_json::from_value::>( - power_levels.content, - ) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?) - }, - )?; - let sender_membership = self - .room_state_get(&room_id, &EventType::RoomMember, &sender.to_string())? - .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { - Ok( - serde_json::from_value::>(pdu.content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership, - ) - })?; - - let sender_power = power_levels.users.get(&sender).map_or_else( - || { - if sender_membership != member::MembershipState::Join { - None - } else { - Some(&power_levels.users_default) - } - }, - // If it's okay, wrap with Some(_) - Some, - ); - - // Is the event allowed? - #[allow(clippy::blocks_in_if_conditions)] - if !match event_type { - EventType::RoomEncryption => { - // Only allow encryption events if it's allowed in the config - db.globals.allow_encryption() - } - EventType::RoomMember => { - let prev_event = self - .get_pdu(prev_events.get(0).ok_or(Error::BadRequest( - ErrorKind::Unknown, - "Membership can't be the first event", - ))?)? - .map(Arc::new); - event_auth::valid_membership_change( - Some(state_key.as_str()), - &sender, - content.clone(), - prev_event, - None, // TODO: third party invite - &auth_events - .iter() - .map(|((ty, key), pdu)| { - Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) - }) - .collect::>>()?, - ) - .map_err(|e| { - log::error!("{}", e); - Error::Conflict("Found incoming PDU with invalid data.") - })? - } - EventType::RoomCreate => prev_events.is_empty(), - // Not allow any of the following events if the sender is not joined. - _ if sender_membership != member::MembershipState::Join => false, - _ => { - // TODO - sender_power.unwrap_or(&power_levels.users_default) - >= &power_levels.state_default - } - } { - error!("Unauthorized {}", event_type); - // Not authorized - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized", - )); - } - } else if !self.is_joined(&sender, &room_id)? { - // TODO: auth rules apply to all events, not only those with a state key - error!("Unauthorized {}", event_type); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized", - )); - } - // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() @@ -1057,8 +1048,8 @@ impl Rooms { .try_into() .map_err(|_| Error::bad_database("Depth is invalid"))?, auth_events: auth_events - .into_iter() - .map(|(_, pdu)| pdu.event_id) + .iter() + .map(|(_, pdu)| pdu.event_id.clone()) .collect(), redacts, unsigned, @@ -1068,6 +1059,23 @@ impl Rooms { signatures: BTreeMap::new(), }; + if !state_res::auth_check( + &room_version, + &Arc::new(pdu.clone()), + create_prev_event, + &auth_events, + None, // TODO: third_party_invite + ) + .map_err(|e| { + error!("{:?}", e); + Error::bad_database("Auth check failed.") + })? { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event is not authorized.", + )); + } + // Hash and sign let mut pdu_json = utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); diff --git a/src/database/sending.rs b/src/database/sending.rs index 1cc2f91..b792479 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -175,8 +175,7 @@ impl Sending { // servercurrentpdus with the prefix should be empty now } } - Err((outgoing_kind, e)) => { - info!("Couldn't send transaction to {:?}\n{}", outgoing_kind, e); + Err((outgoing_kind, _)) => { let mut prefix = match &outgoing_kind { OutgoingKind::Appservice(serv) => { let mut p = b"+".to_vec(); @@ -217,7 +216,7 @@ impl Sending { let exponential_backoff = |(tries, instant): &(u32, Instant)| { // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(60) * (*tries) * (*tries); + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60*60*24) { min_elapsed_duration = Duration::from_secs(60*60*24); } @@ -261,6 +260,8 @@ impl Sending { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); + last_failed_try.remove(&outgoing_kind); + futures.push( Self::handle_event( outgoing_kind, diff --git a/src/server_server.rs b/src/server_server.rs index 8babc89..e461b5a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -22,12 +22,12 @@ use ruma::{ directory::{IncomingFilter, IncomingRoomNetwork}, events::EventType, serde::to_canonical_value, - signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, + signatures::CanonicalJsonValue, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{btree_map::Entry, BTreeMap, BTreeSet, HashSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -180,13 +180,7 @@ where .collect::>(); if status != 200 { - info!( - "Server returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - utils::string_from_bytes(&body) - ); + info!("{} {}:\n{}", url, status, String::from_utf8_lossy(&body),); } let response = T::IncomingResponse::try_from( @@ -194,13 +188,7 @@ where .body(body) .expect("reqwest body is valid http body"), ); - response.map_err(|_| { - info!( - "Server returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Server returned bad response.") - }) + response.map_err(|_| Error::BadServerResponse("Server returned bad response.")) } Err(e) => Err(e.into()), } @@ -508,8 +496,6 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - info!("Incoming PDUs: {:?}", &body.pdus); - for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { Ok(edu) => match edu.edu_type.as_str() { @@ -548,385 +534,19 @@ pub async fn send_transaction_message_route<'a>( let mut resolved_map = BTreeMap::new(); - let pdus_to_resolve = body - .pdus - .iter() - .filter_map(|pdu| { - // 1. Is a valid event, otherwise it is dropped. - // Ruma/PduEvent/StateEvent satisfies this - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return None; - } - }; - - // If we have no idea about this room skip the PDU - let room_id = match value - .get("room_id") - .map(|id| match id { - CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), - _ => None, - }) - .flatten() - { - Some(id) => id, - None => { - resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); - return None; - } - }; - - // 1. check the server is in the room (optional) - match db.rooms.exists(&room_id) { - Ok(true) => {} - _ => { - resolved_map - .insert(event_id, Err("Room is unknown to this server".to_string())); - return None; - } - } - - // If we know of this pdu we don't need to continue processing it - if let Ok(Some(_)) = db.rooms.get_pdu_id(&event_id) { - return None; - } - - Some((event_id, room_id, value)) - }) - .collect::>(); - - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere? - // SPEC: - // Servers MUST strictly enforce the JSON format specified in the appendices. - // This translates to a 400 M_BAD_JSON error on most endpoints, or discarding of - // events over federation. For example, the Federation API's /send endpoint would - // discard the event whereas the Client Server API's /send/{eventType} endpoint - // would return a M_BAD_JSON error. - 'main_pdu_loop: for (event_id, _room_id, value) in pdus_to_resolve { - info!("Working on incoming pdu: {:?}", value); - let server_name = &body.body.origin; - let mut pub_key_map = BTreeMap::new(); - - // TODO: make this persist but not a DB Tree... - // This is all the auth_events that have been recursively fetched so they don't have to be - // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) - // like structure so that once an auth event is known it would know (using indexes maybe) all of - // the auth events that it references. - let mut auth_cache = EventMap::new(); - - // 2. check content hash, redact if doesn't match - // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events - // 7. if not timeline event: stop - // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous_create): (Arc, Option>) = match validate_event( - &db, - value, - event_id.clone(), - &mut pub_key_map, - server_name, - // All the auth events gathered will be here - &mut auth_cache, - ) - .await - { - Ok(pdu) => pdu, - Err(e) => { - resolved_map.insert(event_id, Err(e)); - continue; - } - }; - debug!("Validated event."); - - // 6. persist the event as an outlier. - db.rooms.add_pdu_outlier(&pdu)?; - info!("Added pdu as outlier."); - - // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all - // the checks in this list starting at 1. These are not timeline events. - // - // Step 10. check the auth of the event passes based on the calculated state of the event - // - // TODO: if we know the prev_events of the incoming event we can avoid the request and build - // the state from a known point and resolve if > 1 prev_event - debug!("Requesting state at event."); - let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = - match db - .sending - .send_federation_request( - &db.globals, - server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - debug!("Fetching state events at event."); - let state = match fetch_events( - &db, - server_name, - &mut pub_key_map, - &res.pdu_ids, - &mut auth_cache, - ) - .await - { - Ok(state) => state, - Err(_) => continue, - }; - - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - error!("Server sent us an invalid state"); - continue; - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - let incoming_auth_events = match fetch_events( - &db, - server_name, - &mut pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await - { - Ok(state) => state, - Err(_) => continue, - }; - - debug!("Fetching auth events of state events at event."); - (state, incoming_auth_events) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; - - // 10. This is the actual auth check for state at the event - if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, - &pdu, - previous_create.clone(), - &state_at_event, - None, // TODO: third party invite - ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { - // Event failed auth with state_at - resolved_map.insert( - event_id, - Err("Event has failed auth check with state at the event".into()), - ); - continue; - } - debug!("Auth check succeeded."); - // End of step 10. - - // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it - let current_state = db - .rooms - .room_state_full(pdu.room_id())? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); - - if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, - &pdu, - previous_create, - ¤t_state, - None, - ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { - // Soft fail, we add the event as an outlier. - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has been soft failed".into()), - ); - continue; - }; - debug!("Auth check with current state succeeded."); - - // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res - // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) - // - // calculate_forward_extremities takes care of adding the current state if not already in the state sets - // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let extremities = match calculate_forward_extremities(&db, &pdu).await { - Ok(fork_ids) => { - debug!("Calculated new forward extremities: {:?}", fork_ids); - fork_ids - } + for pdu in body.pdus.iter() { + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { + Ok(t) => t, Err(_) => { - resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + // Event could not be converted to canonical json continue; } }; - // This will create the state after any state snapshot it builds - // So current_state will have the incoming event inserted to it - let mut fork_states = - match build_forward_extremity_snapshots(&db, pdu.clone(), current_state, &extremities) - .await - { - Ok(states) => states, - Err(_) => { - resolved_map - .insert(event_id, Err("Failed to gather forward extremities".into())); - continue; - } - }; - - // Make this the state after. - let mut state_after = state_at_event.clone(); - state_after.insert((pdu.kind(), pdu.state_key()), pdu.clone()); - // Add the incoming event to the mix of state snapshots - // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets - fork_states.insert(state_after.clone()); - - let fork_states = fork_states.into_iter().collect::>(); - - let mut update_state = false; - // 13. start state-res with all previous forward extremities minus the ones that are in - // the prev_events of this event plus the new one created by this event and use - // the result as the new room state - let state_at_forks = if fork_states.is_empty() { - // State is empty - Default::default() - } else if fork_states.len() == 1 { - fork_states[0].clone() - } else { - // We do need to force an update to this rooms state - update_state = true; - - let mut auth_events = vec![]; - for map in &fork_states { - let mut state_auth = vec![]; - for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { - match fetch_events( - &db, - server_name, - &mut pub_key_map, - &[auth_id.clone()], - &mut auth_cache, - ) - .await - { - // This should always contain exactly one element when Ok - Ok(events) => state_auth.push(events[0].clone()), - Err(e) => { - debug!("Event was not present: {}", e); - } - } - } - auth_events.push(state_auth); - } - - // Add everything we will need to event_map - auth_cache.extend( - auth_events - .iter() - .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) - .flatten(), - ); - auth_cache.extend( - incoming_auth_events - .into_iter() - .map(|pdu| (pdu.event_id().clone(), pdu)), - ); - auth_cache.extend( - state_after - .into_iter() - .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), - ); - - let res = match state_res::StateResolution::resolve( - pdu.room_id(), - &RoomVersionId::Version6, - &fork_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(), - auth_events - .into_iter() - .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) - .collect(), - &mut auth_cache, - ) { - Ok(res) => res, - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("State resolution failed, either an event could not be found or deserialization".into()), - ); - continue 'main_pdu_loop; - } - }; - - let mut resolved = BTreeMap::new(); - for (k, id) in res { - // We should know of the event but just incase - let pdu = match auth_cache.get(&id) { - Some(pdu) => pdu.clone(), - None => { - error!("Event was not present in auth_cache {}", id); - resolved_map.insert( - event_id.clone(), - Err("Event was not present in auth cache".into()), - ); - continue 'main_pdu_loop; - } - }; - resolved.insert(k, pdu); - } - resolved - }; - - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; - info!("Appended incoming pdu."); - - // Set the new room state to the resolved state - update_resolved_state( - &db, - pdu.room_id(), - if update_state { - Some(state_at_forks) - } else { - None - }, - )?; - debug!("Updated resolved state"); - - // Event has passed all auth/stateres checks + if let Err(e) = handle_incoming_pdu(&body.origin, &event_id, value, true, &db).await { + resolved_map.insert(event_id, Err(e)); + } } if !resolved_map.is_empty() { @@ -939,26 +559,80 @@ pub async fn send_transaction_message_route<'a>( /// An async function that can recursively calls itself. type AsyncRecursiveResult<'a, T> = Pin> + 'a + Send>>; -/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events -/// Validate any event that is given to us by another server. -/// -/// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). -/// 2. check content hash, redact if doesn't match -/// 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events -/// 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" -/// 5. reject "due to auth events" if the event doesn't pass auth based on the auth events -/// 7. if not timeline event: stop -/// 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events -#[tracing::instrument(skip(db, pub_key_map, auth_cache))] -fn validate_event<'a>( - db: &'a Database, - value: CanonicalJsonObject, - event_id: EventId, - pub_key_map: &'a mut PublicKeyMap, +/// When receiving an event one needs to: +/// 0. Skip the PDU if we already know about it +/// 1. Check the server is in the room +/// 2. Check signatures, otherwise drop +/// 3. Check content hash, redact if doesn't match +/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not +/// timeline events +/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are +/// also rejected "due to auth events" +/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events +/// 7. Persist this event as an outlier +/// 8. If not timeline event: stop +/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline +/// events +/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities +/// doing all the checks in this list starting at 1. These are not timeline events +/// 11. Check the auth of the event passes based on the state of the event +/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by +/// doing state res where one of the inputs was a previously trusted set of state, don't just +/// trust a set of state we got from a remote) +/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" +/// it +/// 14. Use state resolution to find new room state +// We use some AsyncRecursiveResult hacks here so we can call this async funtion recursively +fn handle_incoming_pdu<'a>( origin: &'a ServerName, - auth_cache: &'a mut EventMap>, -) -> AsyncRecursiveResult<'a, (Arc, Option>)> { + event_id: &'a EventId, + value: BTreeMap, + is_timeline_event: bool, + db: &'a Database, +) -> AsyncRecursiveResult<'a, Arc> { Box::pin(async move { + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + + // 0. Skip the PDU if we already know about it + if let Ok(Some(pdu)) = db.rooms.get_pdu(&event_id) { + return Ok(Arc::new(pdu)); + } + + // 1. Check the server is in the room + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + // Event is invalid + return Err("Event needs a valid RoomId".to_string()); + } + }; + + match db.rooms.exists(&room_id) { + Ok(true) => {} + _ => { + return Err("Room is unknown to this server".to_string()); + } + } + + let mut pub_key_map = BTreeMap::new(); + + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. + // TODO: make this persist across requests but not in a DB Tree (in globals?) + // TODO: This could potentially also be some sort of trie (suffix tree) like structure so + // that once an auth event is known it would know (using indexes maybe) all of the auth + // events that it references. + let mut auth_cache = EventMap::new(); + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys for (signature_server, signature) in match value .get("signatures") .ok_or_else(|| "No signatures in server response pdu.".to_string())? @@ -998,132 +672,443 @@ fn validate_event<'a>( pub_key_map.insert(signature_server.clone(), keys); } + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match let mut val = match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version5) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } else { - value - } - } Err(e) => { + // Drop error!("{:?}: {}", value, e); return Err("Signature verification failed".to_string()); } + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } + Ok(ruma::signatures::Verified::All) => value, }; // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above + // to our PduEvent type val.insert( "event_id".to_owned(), to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); - let pdu = serde_json::from_value::( + let incoming_pdu = serde_json::from_value::( serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), ) - .map_err(|_| "Event is not a valid PDU".to_string())?; + .map_err(|_| "Event is not a valid PDU.".to_string())?; + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" debug!("Fetching auth events."); - fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) + fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, &mut auth_cache) .await .map_err(|e| e.to_string())?; - let pdu = Arc::new(pdu.clone()); + // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + debug!("Checking auth."); - /* - // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - debug!("Fetching prev events."); - let previous = fetch_events(&db, origin, pub_key_map, &pdu.prev_events, auth_cache) - .await - .map_err(|e| e.to_string())?; - */ + // Build map of auth events + let mut auth_events = BTreeMap::new(); + for id in incoming_pdu.auth_events.iter() { + let auth_event = auth_cache.get(id).ok_or_else(|| { + "Auth event not found, event failed recursive auth checks.".to_string() + })?; - // if the previous event was the create event special rules apply - let previous_create = if pdu.auth_events.len() == 1 && pdu.prev_events == pdu.auth_events { - auth_cache.get(&pdu.auth_events[0]).cloned() + match auth_events.entry(( + auth_event.kind.clone(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + Entry::Vacant(v) => { + v.insert(auth_event.clone()); + } + Entry::Occupied(_) => { + return Err( + "Auth event's type and state_key combination exists multiple times." + .to_owned(), + ) + } + } + } + + let create_event = db + .rooms + .room_state_get(&incoming_pdu.room_id, &EventType::RoomCreate, "") + .map_err(|_| "Failed to ask database for event.")? + .ok_or_else(|| "Failed to find create event in db.")?; + + // The original create event must be in the auth events + if auth_events + .get(&(EventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(&create_event) + { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + // If the previous event was the create event special rules apply + let previous_create = if incoming_pdu.auth_events.len() == 1 + && incoming_pdu.prev_events == incoming_pdu.auth_events + { + auth_cache + .get(&incoming_pdu.auth_events[0]) + .cloned() + .filter(|maybe_create| **maybe_create == create_event) } else { None }; - // Check that the event passes auth based on the auth_events - debug!("Checking auth."); - let is_authed = state_res::event_auth::auth_check( + let incoming_pdu = Arc::new(incoming_pdu.clone()); + + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &pdu, + &incoming_pdu, previous_create.clone(), - &pdu.auth_events - .iter() - .map(|id| { - auth_cache - .get(id) - .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) - .ok_or_else(|| { - "Auth event not found, event failed recursive auth checks.".to_string() - }) - }) - .collect::, _>>()?, + &auth_events, None, // TODO: third party invite ) - .map_err(|_e| "Auth check failed".to_string())?; - - if !is_authed { - return Err("Event has failed auth check with auth events".to_string()); + .map_err(|_e| "Auth check failed".to_string())? + { + return Err("Event has failed auth check with auth events.".to_string()); } debug!("Validation successful."); - Ok((pdu, previous_create)) - }) -} -#[tracing::instrument(skip(db, key_map, auth_cache))] -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &mut PublicKeyMap, - event_ids: &[EventId], - auth_cache: &mut EventMap>, -) -> Result<()> { - fetch_events(db, origin, key_map, event_ids, auth_cache).await?; - Ok(()) + // 7. Persist the event as an outlier. + db.rooms + .add_pdu_outlier(&incoming_pdu) + .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; + debug!("Added pdu as outlier."); + + // 8. if not timeline event: stop + if !is_timeline_event { + return Ok(incoming_pdu); + } + + // TODO: 9. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + // doing all the checks in this list starting at 1. These are not timeline events. + + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event + + debug!("Requesting state at event."); + let (state_at_incoming_event, incoming_auth_events): (StateMap>, Vec>) = + // Call /state_ids to find out what the state at this pdu is. We trust the server's + // response to some extend, but we still do a lot of checks on the events + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_room_state_ids::v1::Request { + room_id: &incoming_pdu.room_id, + event_id: &incoming_pdu.event_id, + }, + ) + .await + { + Ok(res) => { + debug!("Fetching state events at event."); + let state_vec = match fetch_and_handle_events( + &db, + origin, + &res.pdu_ids, + &mut auth_cache, + ) + .await + { + Ok(state) => state, + Err(_) => return Err("Failed to fetch state events.".to_owned()), + }; + + let mut state = BTreeMap::new(); + for pdu in state_vec.into_iter() { + match state.entry((pdu.kind.clone(), pdu.state_key.clone().ok_or_else(|| "Found non-state pdu in state events.".to_owned())?)) { + Entry::Vacant(v) => { + v.insert(pdu); + } + Entry::Occupied(_) => { + return Err( + "State event's type and state_key combination exists multiple times.".to_owned(), + ) + } + } + } + + // The original create event must still be in the state + if state.get(&(EventType::RoomCreate, "".to_owned())).map(|a| a.as_ref()) != Some(&create_event) { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + debug!("Fetching auth chain events at event."); + let incoming_auth_events = match fetch_and_handle_events( + &db, + origin, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await + { + Ok(state) => state, + Err(_) => return Err("Failed to fetch auth chain.".to_owned()), + }; + + (state, incoming_auth_events) + } + Err(_) => { + return Err("Fetching state for event failed".into()); + } + }; + + // 11. Check the auth of the event passes based on the state of the event + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &incoming_pdu, + previous_create.clone(), + &state_at_incoming_event, + None, // TODO: third party invite + ) + .map_err(|_e| "Auth check failed.".to_owned())? + { + return Err("Event has failed auth check with state at the event.".into()); + } + debug!("Auth check succeeded."); + + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let current_state = db + .rooms + .room_state_full(incoming_pdu.room_id()) + .map_err(|_| "Failed to load room state.".to_owned())? + .into_iter() + .map(|(k, v)| (k, Arc::new(v))) + .collect(); + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, // TODO: Use correct room version + &incoming_pdu, + previous_create, + ¤t_state, + None, + ) + .map_err(|_e| "Auth check failed.".to_owned())? + { + // Soft fail, we leave the event as an outlier but don't add it to the timeline + return Err("Event has been soft failed".into()); + }; + debug!("Auth check with current state succeeded."); + + // Now we calculate the set of extremities this room has after the incoming event has been + // applied. We start with the previous extremities (aka leaves) + let mut extremities = db + .rooms + .get_pdu_leaves(&incoming_pdu.room_id) + .map_err(|_| "Failed to load room leaves".to_owned())?; + + // Remove any forward extremities that are referenced by this incoming event's prev_events + for prev_event in &incoming_pdu.prev_events { + if extremities.contains(prev_event) { + extremities.remove(prev_event); + } + } + + let mut fork_states = BTreeSet::new(); + for id in &extremities { + match db.rooms.get_pdu(&id).map_err(|_| "Failed to ask db for pdu.".to_owned())? { + Some(leaf_pdu) => { + let pdu_shortstatehash = db + .rooms + .pdu_shortstatehash(&leaf_pdu.event_id) + .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + "Found pdu with no statehash in db.".to_owned() + })?; + + let mut leaf_state = db + .rooms + .state_full(pdu_shortstatehash) + .map_err(|_| "Failed to ask db for room state.".to_owned())? + .into_iter() + .map(|(k, v)| (k, Arc::new(v))) + .collect::>(); + + if let Some(state_key) = &leaf_pdu.state_key { + // Now it's the state after + let key = (leaf_pdu.kind.clone(), state_key.clone()); + leaf_state.insert(key, Arc::new(leaf_pdu)); + } + + fork_states.insert(leaf_state); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err("Missing state snapshot.".to_owned()); + } + } + } + + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). + + // We do this by adding the current state to the list of fork states + fork_states.insert(current_state); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + state_after.insert( + (incoming_pdu.kind.clone(), state_key.clone()), + incoming_pdu.clone(), + ); + } + fork_states.insert(state_after.clone()); + + let fork_states = fork_states.into_iter().collect::>(); + + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + return Err("State is empty.".to_owned()); + } else if fork_states.len() == 1 { + // There was only one state, so it has to be the room's current state (because that is + // always included) + info!("Skipping stateres because there is no new state."); + fork_states[0] + .iter() + .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) + .collect() + } else { + // We do need to force an update to this room's state + update_state = true; + + let mut auth_events = vec![]; + for map in &fork_states { + let mut state_auth = vec![]; + for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { + match fetch_and_handle_events(&db, origin, &[auth_id.clone()], &mut auth_cache) + .await + { + // This should always contain exactly one element when Ok + Ok(events) => state_auth.push(events[0].clone()), + Err(e) => { + debug!("Event was not present: {}", e); + } + } + } + auth_events.push(state_auth); + } + + // Add everything we will need to event_map + auth_cache.extend( + auth_events + .iter() + .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) + .flatten(), + ); + auth_cache.extend( + incoming_auth_events + .into_iter() + .map(|pdu| (pdu.event_id().clone(), pdu)), + ); + auth_cache.extend( + state_after + .into_iter() + .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), + ); + + match state_res::StateResolution::resolve( + &incoming_pdu.room_id, + &RoomVersionId::Version6, + &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id.clone())) + .collect::>() + }) + .collect::>(), + auth_events + .into_iter() + .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) + .collect(), + &mut auth_cache, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err("State resolution failed, either an event could not be found or deserialization".into()); + } + } + }; + + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + append_incoming_pdu(&db, &incoming_pdu, extremities, &state_at_incoming_event) + .map_err(|_| "Failed to add pdu to db.".to_owned())?; + debug!("Appended incoming pdu."); + + // Set the new room state to the resolved state + if update_state { + db.rooms + .force_state(&room_id, new_room_state, &db.globals) + .map_err(|_| "Failed to set new room state.".to_owned())?; + } + debug!("Updated resolved state"); + + // Event has passed all auth/stateres checks + Ok(incoming_pdu) + }) } /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// -/// 0. Look in the auth_cache -/// 1. Look in the main timeline (pduid_pdu tree) -/// 2. Look at outlier pdu tree -/// 3. Ask origin server over federation -/// 4. TODO: Ask other servers over federation? +/// a. Look in the auth_cache +/// b. Look in the main timeline (pduid_pdu tree) +/// c. Look at outlier pdu tree +/// d. Ask origin server over federation +/// e. TODO: Ask other servers over federation? /// /// If the event is unknown to the `auth_cache` it is added. This guarantees that any /// event we need to know of will be present. //#[tracing::instrument(skip(db, key_map, auth_cache))] -pub(crate) async fn fetch_events( +pub(crate) async fn fetch_and_handle_events( db: &Database, origin: &ServerName, - key_map: &mut PublicKeyMap, events: &[EventId], auth_cache: &mut EventMap>, ) -> Result>> { let mut pdus = vec![]; for id in events { + // a. Look at auth cache let pdu = match auth_cache.get(id) { Some(pdu) => { debug!("Event found in cache"); pdu.clone() } - // `get_pdu` checks the outliers tree for us + // b. Look in the main timeline (pduid_pdu tree) + // c. Look at outlier pdu tree + // (get_pdu checks both) None => match db.rooms.get_pdu(&id)? { Some(pdu) => { debug!("Event found in outliers"); Arc::new(pdu) } None => { + // d. Ask origin server over federation debug!("Fetching event over federation: {:?}", id); match db .sending @@ -1138,16 +1123,13 @@ pub(crate) async fn fetch_events( debug!("Got event over federation: {:?}", res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu)?; - let (pdu, _) = - validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|e| { - error!("ERROR: {:?}", e); - Error::Conflict("Authentication of event failed") - })?; + let pdu = handle_incoming_pdu(origin, &event_id, value, false, db) + .await + .map_err(|e| { + error!("Error: {:?}", e); + Error::Conflict("Authentication of event failed") + })?; - debug!("Added fetched pdu as outlier."); - db.rooms.add_pdu_outlier(&pdu)?; pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), @@ -1253,180 +1235,15 @@ pub(crate) async fn fetch_signing_keys( )) } -/// Gather all state snapshots needed to resolve the current state of the room. -/// -/// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res -/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). -/// -/// The state snapshot of the incoming event __needs__ to be added to the resulting list. -#[tracing::instrument(skip(db))] -pub(crate) async fn calculate_forward_extremities( - db: &Database, - pdu: &PduEvent, -) -> Result> { - let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; - - let mut is_incoming_leaf = true; - // Make sure the incoming event is not already a forward extremity - // FIXME: I think this could happen if different servers send us the same event?? - if current_leaves.contains(pdu.event_id()) { - error!("The incoming event is already present in get_pdu_leaves BUG"); - is_incoming_leaf = false; - // Not sure what to do here - } - - // If the incoming event is already referenced by an existing event - // then do nothing - it's not a candidate to be a new extremity if - // it has been referenced. - if db.rooms.is_pdu_referenced(pdu)? { - is_incoming_leaf = false; - } - - // TODO: - // [dendrite] Checks if any other leaves have been referenced and removes them - // but as long as we update the pdu leaves here and for events on our server this - // should not be possible. - - // Remove any forward extremities that are referenced by this incoming events prev_events - for incoming_leaf in &pdu.prev_events { - if current_leaves.contains(incoming_leaf) { - if let Some(pos) = current_leaves.iter().position(|x| *x == *incoming_leaf) { - current_leaves.remove(pos); - } - } - } - - // Add the incoming event only if it is a leaf, we do this after fetching all the - // state since we know we have already fetched the state of the incoming event so lets - // not do it again! - if is_incoming_leaf { - current_leaves.push(pdu.event_id().clone()); - } - - Ok(current_leaves) -} - -/// This should always be called after the incoming event has been appended to the DB. -/// -/// This guarantees that the incoming event will be in the state sets (at least our servers -/// and the sending server). -pub(crate) async fn build_forward_extremity_snapshots( - db: &Database, - pdu: Arc, - mut current_state: StateMap>, - current_leaves: &[EventId], -) -> Result>>> { - let current_shortstatehash = db.rooms.current_shortstatehash(pdu.room_id())?; - - let mut includes_current_state = false; - let mut fork_states = BTreeSet::new(); - for id in current_leaves { - if id == &pdu.event_id { - continue; - } - match db.rooms.get_pdu(id)? { - // We can skip this because it is handled outside of this function - // The current server state and incoming event state are built to be - // the state after. - // This would be the incoming state from the server. - Some(leaf_pdu) => { - let pdu_shortstatehash = db - .rooms - .pdu_shortstatehash(&leaf_pdu.event_id)? - .ok_or_else(|| { - warn!("Leaf pdu: {:?}", leaf_pdu); - Error::bad_database("Found pdu with no statehash in db.") - })?; - - if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { - includes_current_state = true; - } - - let mut state_before = db - .rooms - .state_full(pdu_shortstatehash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect::>(); - - // Now it's the state after - let key = (leaf_pdu.kind.clone(), leaf_pdu.state_key.clone()); - state_before.insert(key, Arc::new(leaf_pdu)); - - fork_states.insert(state_before); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err(Error::bad_database("Missing state snapshot.")); - } - } - } - - // This guarantees that our current room state is included - if !includes_current_state { - current_state.insert((pdu.kind(), pdu.state_key()), pdu); - - fork_states.insert(current_state); - } - - Ok(fork_states) -} - -#[tracing::instrument(skip(db))] -pub(crate) fn update_resolved_state( - db: &Database, - room_id: &RoomId, - state: Option>>, -) -> Result<()> { - // Update the state of the room if needed - // We can tell if we need to do this based on wether state resolution took place or not - if let Some(state) = state { - let mut new_state = HashMap::new(); - for ((ev_type, state_k), pdu) in state { - new_state.insert( - ( - ev_type, - state_k.ok_or(Error::Conflict( - "update_resolved_state: State contained non state event", - ))?, - ), - pdu.event_id.clone(), - ); - } - - db.rooms.force_state(room_id, new_state, &db.globals)?; - } - - Ok(()) -} - /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip(db))] pub(crate) fn append_incoming_pdu( db: &Database, pdu: &PduEvent, - new_room_leaves: &[EventId], + new_room_leaves: HashSet, state: &StateMap>, ) -> Result<()> { - // Update the state of the room if needed - // We can tell if we need to do this based on wether state resolution took place or not - let mut new_state = HashMap::new(); - for ((ev_type, state_k), state_pdu) in state { - new_state.insert( - ( - ev_type.clone(), - state_k.clone().ok_or(Error::Conflict( - "append_incoming_pdu: State contained non state event", - ))?, - ), - state_pdu.event_id.clone(), - ); - } - - db.rooms - .force_state(pdu.room_id(), new_state, &db.globals)?; - let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1434,19 +1251,18 @@ pub(crate) fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let state_hash = db.rooms.append_to_state(&pdu, &db.globals)?; + db.rooms + .set_event_state(&pdu.event_id, state, &db.globals)?; db.rooms.append_pdu( pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &new_room_leaves, + &new_room_leaves.into_iter().collect::>(), &db, )?; - db.rooms.set_room_state(pdu.room_id(), state_hash)?; - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces From 5a13f6f7e063966ca14004e2b8fe287b34771d62 Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Wed, 24 Mar 2021 18:25:21 +0100 Subject: [PATCH 0488/1727] Remove build container and use the always present build artifact directly Proper chmod caddy and conduit binaries --- tests/Complement.Dockerfile | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 370db7c..abae3fb 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -1,39 +1,34 @@ -FROM valkum/docker-rust-ci:latest as builder -WORKDIR /workdir - -ARG RUSTC_WRAPPER -ARG AWS_ACCESS_KEY_ID -ARG AWS_SECRET_ACCESS_KEY -ARG SCCACHE_BUCKET -ARG SCCACHE_ENDPOINT -ARG SCCACHE_S3_USE_SSL - -COPY . . -RUN test -e cached_target/release/conduit || cargo build --release - +# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit FROM valkum/docker-rust-ci:latest WORKDIR /workdir RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.2.1/caddy_2.2.1_linux_amd64.tar.gz" RUN tar xzf caddy_2.2.1_linux_amd64.tar.gz -COPY --from=builder /workdir/target/debug/conduit /workdir/conduit +COPY cached_target/release/conduit /workdir/conduit +RUN chmod +x /workdir/conduit +RUN chmod +x /workdir/caddy -COPY Rocket-example.toml Rocket.toml +COPY conduit-example.toml conduit.toml ENV SERVER_NAME=localhost ENV ROCKET_LOG=normal +ENV CONDUIT_CONFIG=/workdir/conduit.toml -RUN sed -i "s/port = 14004/port = 8008/g" Rocket.toml -RUN echo "federation_enabled = true" >> Rocket.toml +RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml +RUN echo "allow_federation = true" >> conduit.toml +RUN echo "allow_encryption = true" >> conduit.toml +RUN echo "allow_registration = true" >> conduit.toml +RUN echo "log = \"info,rocket=info,_=off,sled=off\"" >> conduit.toml +RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml # Enabled Caddy auto cert generation for complement provided CA. -RUN echo '{"apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"localhost:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json +RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json EXPOSE 8008 8448 CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement PKI support" && true) || \ - sed -i "s/server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" Rocket.toml && \ + sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \ sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ /workdir/caddy start --config caddy.json > /dev/null && \ /workdir/conduit From cd4dc137310f8cb7b8df97df549b3da95a4d68af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 26 Mar 2021 11:10:45 +0100 Subject: [PATCH 0489/1727] fix: use correct room versions --- Cargo.lock | 141 ++++++++++++----------- conduit-example.toml | 2 +- src/client_server/capabilities.rs | 1 - src/client_server/membership.rs | 21 ++-- src/client_server/room.rs | 5 +- src/database.rs | 2 +- src/database/rooms.rs | 26 ++++- src/database/sending.rs | 8 +- src/server_server.rs | 185 +++++++++++++++++++----------- 9 files changed, 236 insertions(+), 155 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9580942..2a80291 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,7 +229,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.25", + "time 0.2.26", "version_check", ] @@ -580,9 +580,9 @@ dependencies = [ [[package]] name = "gif" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02efba560f227847cb41463a7395c514d127d4f74fff12ef0137fff1b84b96c4" +checksum = "5a668f699973d0f573d15749b7002a9ac9e1f9c6b220e7b165601334c173d8de" dependencies = [ "color_quant", "weezl", @@ -596,9 +596,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" +checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" dependencies = [ "bytes", "fnv", @@ -661,12 +661,13 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" dependencies = [ "bytes", "http", + "pin-project-lite", ] [[package]] @@ -845,9 +846,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.48" +version = "0.3.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc9f84f9b115ce7843d60706df1422a916680bfdfcbdb0447c5614ff9d7e4d78" +checksum = "dc15e39392125075f60c95ba416f5381ff6c3a948ff02ab12464715adf56c821" dependencies = [ "wasm-bindgen", ] @@ -883,9 +884,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.88" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b07a082330a35e43f63177cc01689da34fbffa0105e1246cf0311472cac73a" +checksum = "8916b1f6ca17130ec6568feccee27c156ad12037880833a3b842a823236502e7" [[package]] name = "linked-hash-map" @@ -955,9 +956,9 @@ checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memoffset" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" +checksum = "cc14fc54a812b4472b4113facc3e44d099fbc0ea2ce0551fa5c703f8edfbfd38" dependencies = [ "autocfg", ] @@ -979,9 +980,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.9" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5dede4e2065b3842b8b0af444119f3aa331cc7cc2dd20388bfb0f5d5a38823a" +checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" dependencies = [ "libc", "log", @@ -992,11 +993,10 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "socket2", "winapi", ] @@ -1097,15 +1097,15 @@ checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" [[package]] name = "openssl" -version = "0.10.32" +version = "0.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" +checksum = "a61075b62a23fef5a29815de7536d940aa35ce96d18ce0cc5076272db678a577" dependencies = [ "bitflags", "cfg-if", "foreign-types", - "lazy_static", "libc", + "once_cell", "openssl-sys", ] @@ -1117,18 +1117,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.14.0+1.1.1j" +version = "111.15.0+1.1.1k" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b569b5bd7e5462a1700f595c7c7d487691d73b5ce064176af7f9f0cbb80a9" +checksum = "b1a5f6ae2ac04393b217ea9f700cd04fa9bf3d93fae2872069f3d15d908af70a" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.60" +version = "0.9.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" +checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f" dependencies = [ "autocfg", "cc", @@ -1203,9 +1203,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" +checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" [[package]] name = "pear" @@ -1249,18 +1249,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" +checksum = "bc174859768806e91ae575187ada95c91a29e96a98dc5d2cd9a1fed039501ba6" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" +checksum = "a490329918e856ed1b083f244e3bfe2d8c4f336407e4ea9e1a9f479ff09049e5" dependencies = [ "proc-macro2", "quote", @@ -1459,9 +1459,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.4" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54fd1046a3107eb58f42de31d656fee6853e5d276c455fd943742dce89fc3dd3" +checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19" dependencies = [ "aho-corasick", "memchr", @@ -1574,7 +1574,7 @@ dependencies = [ "rocket_http", "serde", "state", - "time 0.2.25", + "time 0.2.26", "tokio", "ubyte", "version_check", @@ -1612,7 +1612,7 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.25", + "time 0.2.26", "tokio", "tokio-rustls", "uncased", @@ -1953,18 +1953,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.124" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd761ff957cb2a45fbb9ab3da6512de9de55872866160b23c25f1a841e99d29f" +checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.124" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1800f7693e94e186f5e25a28291ae1570da908aff7d97a095dec1e56ff99069b" +checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d" dependencies = [ "proc-macro2", "quote", @@ -2089,9 +2089,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" +checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" dependencies = [ "version_check", ] @@ -2105,7 +2105,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=improvements#1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0" +source = "git+https://github.com/timokoesters/state-res?branch=improvements#625c37cb776b381a83ab7ee58b13e32506849648" dependencies = [ "itertools 0.10.0", "log", @@ -2167,9 +2167,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd9bc7ccc2688b3344c2f48b9b546648b25ce0b20fc717ee7fa7981a8ca9717" +checksum = "3fd9d1e9976102a03c542daa2eff1b43f9d72306342f3f8b3ed5fb8908195d6f" dependencies = [ "proc-macro2", "quote", @@ -2262,9 +2262,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.25" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" +checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" dependencies = [ "const_fn", "libc", @@ -2315,9 +2315,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d56477f6ed99e10225f38f9f75f872f29b8b8bd8c0b946f63345bb144e9eeda" +checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" dependencies = [ "autocfg", "bytes", @@ -2366,9 +2366,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec31e5cc6b46e653cf57762f36f71d5e6386391d88a72fd6db4508f8f676fb29" +checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" dependencies = [ "bytes", "futures-core", @@ -2483,9 +2483,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98a0381b2864c2978db7f8e17c7b23cca5a3a5f99241076e13002261a8ecbabd" +checksum = "8d57e219ba600dd96c2f6d82eb79645068e14edbc5c7e27514af40436b88150c" dependencies = [ "async-trait", "cfg-if", @@ -2501,15 +2501,16 @@ dependencies = [ "rand", "smallvec", "thiserror", + "tinyvec", "tokio", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3072d18c10bd621cb00507d59cfab5517862285c353160366e37fbf4c74856e4" +checksum = "b0437eea3a6da51acc1e946545ff53d5b8fb2611ff1c3bed58522dde100536ae" dependencies = [ "cfg-if", "futures-util", @@ -2605,9 +2606,9 @@ checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "want" @@ -2633,9 +2634,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee1280240b7c461d6a0071313e08f34a60b0365f14260362e5a2b17d1d31aa7" +checksum = "8fe8f61dba8e5d645a4d8132dc7a0a66861ed5e1045d2c0ed940fab33bac0fbe" dependencies = [ "cfg-if", "serde", @@ -2645,9 +2646,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b7d8b6942b8bb3a9b0e73fc79b98095a27de6fa247615e59d096754a3bc2aa8" +checksum = "046ceba58ff062da072c7cb4ba5b22a37f00a302483f7e2a6cdc18fedbdc1fd3" dependencies = [ "bumpalo", "lazy_static", @@ -2660,9 +2661,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e67a5806118af01f0d9045915676b22aaebecf4178ae7021bc171dab0b897ab" +checksum = "73157efb9af26fb564bb59a009afd1c7c334a44db171d280690d0c3faaec3468" dependencies = [ "cfg-if", "js-sys", @@ -2672,9 +2673,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ac38da8ef716661f0f36c0d8320b89028efe10c7c0afde65baffb496ce0d3b" +checksum = "0ef9aa01d36cda046f797c57959ff5f3c615c9cc63997a8d545831ec7976819b" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2682,9 +2683,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc053ec74d454df287b9374ee8abb36ffd5acb95ba87da3ba5b7d3fe20eb401e" +checksum = "96eb45c1b2ee33545a813a92dbb53856418bf7eb54ab34f7f7ff1448a5b3735d" dependencies = [ "proc-macro2", "quote", @@ -2695,15 +2696,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6f8ec44822dd71f5f221a5847fb34acd9060535c1211b70a05844c0f6383b1" +checksum = "b7148f4696fb4960a346eaa60bbfb42a1ac4ebba21f750f75fc1375b098d5ffa" [[package]] name = "web-sys" -version = "0.3.48" +version = "0.3.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec600b26223b2948cedfde2a0aa6756dcf1fef616f43d7b3097aaf53a6c4d92b" +checksum = "59fe19d70f5dacc03f6e46777213facae5ac3801575d56ca6cbd4c93dcd12310" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/conduit-example.toml b/conduit-example.toml index fea84bd..87f959d 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -35,7 +35,7 @@ max_request_size = 20_000_000 # in bytes #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time -#log = "info,rocket=off,_=off,sled=off" +#log = "info,state_res=warn,rocket=off,_=off,sled=off" #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index a3c0db6..ddc213d 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -17,7 +17,6 @@ use rocket::get; #[tracing::instrument] pub async fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); - available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); let mut capabilities = Capabilities::new(); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 6a64ea4..5d630b4 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,7 +4,7 @@ use crate::{ pdu::{PduBuilder, PduEvent}, utils, ConduitResult, Database, Error, Result, Ruma, }; -use log::{error, info, warn}; +use log::{error, warn}; use ruma::{ api::{ client::{ @@ -455,7 +455,7 @@ async fn join_room_by_id_helper( federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, - ver: &[RoomVersionId::Version5, RoomVersionId::Version6], + ver: &[RoomVersionId::Version6], }, ) .await; @@ -469,6 +469,11 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; + let room_version = match make_join_response.room_version { + Some(room_version) if room_version == RoomVersionId::Version6 => room_version, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + let mut join_event_stub = serde_json::from_str::(make_join_response.event.json().get()) .map_err(|_| { @@ -505,14 +510,14 @@ async fn join_room_by_id_helper( db.globals.server_name().as_str(), db.globals.keypair(), &mut join_event_stub, - &RoomVersionId::Version6, + &room_version, ) .expect("event is valid, we just created it"); // Generate event id let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&join_event_stub, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&join_event_stub, &room_version) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -546,7 +551,7 @@ async fn join_room_by_id_helper( })?; let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&value, &room_version) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -586,8 +591,7 @@ async fn join_room_by_id_helper( }) }) { - let (id, pdu) = pdu?; - info!("adding {} to outliers: {:#?}", id, pdu); + let (_id, pdu) = pdu?; db.rooms.add_pdu_outlier(&pdu)?; if let Some(state_key) = &pdu.state_key { if pdu.kind == EventType::RoomMember { @@ -641,8 +645,7 @@ async fn join_room_by_id_helper( }) }) { - let (id, pdu) = pdu?; - info!("adding {} to outliers: {:#?}", id, pdu); + let (_id, pdu) = pdu?; db.rooms.add_pdu_outlier(&pdu)?; } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 399677f..f8d980b 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -343,10 +343,7 @@ pub async fn upgrade_room_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !matches!( - body.new_version, - RoomVersionId::Version5 | RoomVersionId::Version6 - ) { + if !matches!(body.new_version, RoomVersionId::Version6) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", diff --git a/src/database.rs b/src/database.rs index b14a912..d8734b5 100644 --- a/src/database.rs +++ b/src/database.rs @@ -71,7 +71,7 @@ fn default_max_concurrent_requests() -> u16 { } fn default_log() -> String { - "info,rocket=off,_=off,sled=off".to_owned() + "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() } #[derive(Clone)] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e1e97b4..676ac07 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -420,6 +420,27 @@ impl Rooms { .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) } + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map_or_else::, _, _>( + || Ok(None), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + Ok(serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?) + }) + .transpose() + } + /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. @@ -1002,6 +1023,7 @@ impl Rooms { None }; + // If there was no create event yet, assume we are creating a version 6 room right now let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| { create_event.room_version }); @@ -1093,14 +1115,14 @@ impl Rooms { db.globals.server_name().as_str(), db.globals.keypair(), &mut pdu_json, - &RoomVersionId::Version6, + &room_version, ) .expect("event is valid, we just created it"); // Generate event id pdu.event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&pdu_json, &room_version) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); diff --git a/src/database/sending.rs b/src/database/sending.rs index b792479..82d2cdd 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -10,7 +10,7 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::{info, warn}; +use log::warn; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ @@ -518,7 +518,11 @@ impl Sending { ) .await .map(|response| { - info!("server response: {:?}", response); + for pdu in response.pdus { + if pdu.1.is_err() { + warn!("Failed to send to {}: {:?}", server, pdu); + } + } kind.clone() }) .map_err(|e| (kind, e)); diff --git a/src/server_server.rs b/src/server_server.rs index e461b5a..28540eb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,10 +20,10 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::EventType, - serde::to_canonical_value, + events::{room::create::CreateEventContent, EventType}, + serde::{to_canonical_value, Raw}, signatures::CanonicalJsonValue, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ @@ -180,7 +180,12 @@ where .collect::>(); if status != 200 { - info!("{} {}:\n{}", url, status, String::from_utf8_lossy(&body),); + info!( + "{} {}: {}", + url, + status, + String::from_utf8_lossy(&body).lines().collect::>().join(" ") + ); } let response = T::IncomingResponse::try_from( @@ -534,6 +539,16 @@ pub async fn send_transaction_message_route<'a>( let mut resolved_map = BTreeMap::new(); + let mut pub_key_map = BTreeMap::new(); + + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. + // TODO: make this persist across requests but not in a DB Tree (in globals?) + // TODO: This could potentially also be some sort of trie (suffix tree) like structure so + // that once an auth event is known it would know (using indexes maybe) all of the auth + // events that it references. + let mut auth_cache = EventMap::new(); + for pdu in body.pdus.iter() { // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { @@ -544,13 +559,27 @@ pub async fn send_transaction_message_route<'a>( } }; - if let Err(e) = handle_incoming_pdu(&body.origin, &event_id, value, true, &db).await { + if let Err(e) = handle_incoming_pdu( + &body.origin, + &event_id, + value, + true, + &db, + &mut pub_key_map, + &mut auth_cache, + ) + .await + { resolved_map.insert(event_id, Err(e)); } } - if !resolved_map.is_empty() { - warn!("These PDU's failed {:?}", resolved_map); + for pdu in &resolved_map { + if let Err(e) = pdu.1 { + if e != "Room is unknown to this server." { + warn!("Incoming PDU failed {:?}", pdu); + } + } } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) @@ -589,12 +618,14 @@ fn handle_incoming_pdu<'a>( value: BTreeMap, is_timeline_event: bool, db: &'a Database, + pub_key_map: &'a mut BTreeMap>, + auth_cache: &'a mut EventMap>, ) -> AsyncRecursiveResult<'a, Arc> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // 0. Skip the PDU if we already know about it - if let Ok(Some(pdu)) = db.rooms.get_pdu(&event_id) { + if let Ok(Some(pdu)) = db.rooms.get_non_outlier_pdu(&event_id) { return Ok(Arc::new(pdu)); } @@ -610,27 +641,17 @@ fn handle_incoming_pdu<'a>( Some(id) => id, None => { // Event is invalid - return Err("Event needs a valid RoomId".to_string()); + return Err("Event needs a valid RoomId.".to_string()); } }; match db.rooms.exists(&room_id) { Ok(true) => {} _ => { - return Err("Room is unknown to this server".to_string()); + return Err("Room is unknown to this server.".to_string()); } } - let mut pub_key_map = BTreeMap::new(); - - // This is all the auth_events that have been recursively fetched so they don't have to be - // deserialized over and over again. - // TODO: make this persist across requests but not in a DB Tree (in globals?) - // TODO: This could potentially also be some sort of trie (suffix tree) like structure so - // that once an auth event is known it would know (using indexes maybe) all of the auth - // events that it references. - let mut auth_cache = EventMap::new(); - // We go through all the signatures we see on the value and fetch the corresponding signing // keys for (signature_server, signature) in match value @@ -674,22 +695,35 @@ fn handle_incoming_pdu<'a>( // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let mut val = - match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version5) { - Err(e) => { - // Drop - error!("{:?}: {}", value, e); - return Err("Signature verification failed".to_string()); + let create_event = db + .rooms + .room_state_get(&room_id, &EventType::RoomCreate, "") + .map_err(|_| "Failed to ask database for event.")? + .ok_or_else(|| "Failed to find create event in db.")?; + + let create_event_content = + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; + + let room_version = create_event_content.room_version; + + let mut val = match ruma::signatures::verify_event(&pub_key_map, &value, &room_version) { + Err(e) => { + // Drop + error!("{:?}: {}", value, e); + return Err("Signature verification failed".to_string()); + } + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + match ruma::signatures::redact(&value, &room_version) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), } - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } - Ok(ruma::signatures::Verified::All) => value, - }; + } + Ok(ruma::signatures::Verified::All) => value, + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type @@ -705,9 +739,15 @@ fn handle_incoming_pdu<'a>( // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" debug!("Fetching auth events."); - fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, &mut auth_cache) - .await - .map_err(|e| e.to_string())?; + fetch_and_handle_events( + db, + origin, + &incoming_pdu.auth_events, + pub_key_map, + auth_cache, + ) + .await + .map_err(|e| e.to_string())?; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events debug!("Checking auth."); @@ -738,12 +778,6 @@ fn handle_incoming_pdu<'a>( } } - let create_event = db - .rooms - .room_state_get(&incoming_pdu.room_id, &EventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.")? - .ok_or_else(|| "Failed to find create event in db.")?; - // The original create event must be in the auth events if auth_events .get(&(EventType::RoomCreate, "".to_owned())) @@ -768,7 +802,7 @@ fn handle_incoming_pdu<'a>( let incoming_pdu = Arc::new(incoming_pdu.clone()); if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, + &room_version, &incoming_pdu, previous_create.clone(), &auth_events, @@ -810,7 +844,7 @@ fn handle_incoming_pdu<'a>( &db.globals, origin, get_room_state_ids::v1::Request { - room_id: &incoming_pdu.room_id, + room_id: &room_id, event_id: &incoming_pdu.event_id, }, ) @@ -822,7 +856,8 @@ fn handle_incoming_pdu<'a>( &db, origin, &res.pdu_ids, - &mut auth_cache, + pub_key_map, + auth_cache, ) .await { @@ -854,7 +889,8 @@ fn handle_incoming_pdu<'a>( &db, origin, &res.auth_chain_ids, - &mut auth_cache, + pub_key_map, + auth_cache, ) .await { @@ -871,7 +907,7 @@ fn handle_incoming_pdu<'a>( // 11. Check the auth of the event passes based on the state of the event if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, + &room_version, &incoming_pdu, previous_create.clone(), &state_at_incoming_event, @@ -886,14 +922,14 @@ fn handle_incoming_pdu<'a>( // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it let current_state = db .rooms - .room_state_full(incoming_pdu.room_id()) + .room_state_full(&room_id) .map_err(|_| "Failed to load room state.".to_owned())? .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(); if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, // TODO: Use correct room version + &room_version, &incoming_pdu, previous_create, ¤t_state, @@ -910,7 +946,7 @@ fn handle_incoming_pdu<'a>( // applied. We start with the previous extremities (aka leaves) let mut extremities = db .rooms - .get_pdu_leaves(&incoming_pdu.room_id) + .get_pdu_leaves(&room_id) .map_err(|_| "Failed to load room leaves".to_owned())?; // Remove any forward extremities that are referenced by this incoming event's prev_events @@ -922,7 +958,11 @@ fn handle_incoming_pdu<'a>( let mut fork_states = BTreeSet::new(); for id in &extremities { - match db.rooms.get_pdu(&id).map_err(|_| "Failed to ask db for pdu.".to_owned())? { + match db + .rooms + .get_pdu(&id) + .map_err(|_| "Failed to ask db for pdu.".to_owned())? + { Some(leaf_pdu) => { let pdu_shortstatehash = db .rooms @@ -985,7 +1025,7 @@ fn handle_incoming_pdu<'a>( } else if fork_states.len() == 1 { // There was only one state, so it has to be the room's current state (because that is // always included) - info!("Skipping stateres because there is no new state."); + debug!("Skipping stateres because there is no new state."); fork_states[0] .iter() .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) @@ -998,8 +1038,14 @@ fn handle_incoming_pdu<'a>( for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { - match fetch_and_handle_events(&db, origin, &[auth_id.clone()], &mut auth_cache) - .await + match fetch_and_handle_events( + &db, + origin, + &[auth_id.clone()], + pub_key_map, + auth_cache, + ) + .await { // This should always contain exactly one element when Ok Ok(events) => state_auth.push(events[0].clone()), @@ -1030,8 +1076,8 @@ fn handle_incoming_pdu<'a>( ); match state_res::StateResolution::resolve( - &incoming_pdu.room_id, - &RoomVersionId::Version6, + &room_id, + &room_version, &fork_states .into_iter() .map(|map| { @@ -1044,7 +1090,7 @@ fn handle_incoming_pdu<'a>( .into_iter() .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) .collect(), - &mut auth_cache, + auth_cache, ) { Ok(new_state) => new_state, Err(_) => { @@ -1089,6 +1135,7 @@ pub(crate) async fn fetch_and_handle_events( db: &Database, origin: &ServerName, events: &[EventId], + pub_key_map: &mut BTreeMap>, auth_cache: &mut EventMap>, ) -> Result>> { let mut pdus = vec![]; @@ -1123,12 +1170,20 @@ pub(crate) async fn fetch_and_handle_events( debug!("Got event over federation: {:?}", res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu)?; - let pdu = handle_incoming_pdu(origin, &event_id, value, false, db) - .await - .map_err(|e| { - error!("Error: {:?}", e); - Error::Conflict("Authentication of event failed") - })?; + let pdu = handle_incoming_pdu( + origin, + &event_id, + value, + false, + db, + pub_key_map, + auth_cache, + ) + .await + .map_err(|e| { + error!("Error: {:?}", e); + Error::Conflict("Authentication of event failed") + })?; pdu } From db7044a950bad897ed16725d7b18b9b47b767342 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 26 Mar 2021 13:41:05 +0100 Subject: [PATCH 0490/1727] sytest test --- src/database/rooms.rs | 6 +- src/server_server.rs | 9 +- tests/sytest/sytest-whitelist | 384 ++++++++++++++++++++++++++++++++++ 3 files changed, 394 insertions(+), 5 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 676ac07..5d43626 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1081,7 +1081,7 @@ impl Rooms { signatures: BTreeMap::new(), }; - if !state_res::auth_check( + let auth_check = state_res::auth_check( &room_version, &Arc::new(pdu.clone()), create_prev_event, @@ -1091,7 +1091,9 @@ impl Rooms { .map_err(|e| { error!("{:?}", e); Error::bad_database("Auth check failed.") - })? { + })?; + + if !auth_check { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Event is not authorized.", diff --git a/src/server_server.rs b/src/server_server.rs index 28540eb..bb0b9af 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -184,7 +184,10 @@ where "{} {}: {}", url, status, - String::from_utf8_lossy(&body).lines().collect::>().join(" ") + String::from_utf8_lossy(&body) + .lines() + .collect::>() + .join(" ") ); } @@ -698,8 +701,8 @@ fn handle_incoming_pdu<'a>( let create_event = db .rooms .room_state_get(&room_id, &EventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.")? - .ok_or_else(|| "Failed to find create event in db.")?; + .map_err(|_| "Failed to ask database for event.".to_owned())? + .ok_or_else(|| "Failed to find create event in db.".to_owned())?; let create_event_content = serde_json::from_value::>(create_event.content.clone()) diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist index d3271dd..eda851a 100644 --- a/tests/sytest/sytest-whitelist +++ b/tests/sytest/sytest-whitelist @@ -1,71 +1,335 @@ +/event/ does not allow access to events before the user joined +/event/ on joined room works +/event/ on non world readable room does not work /joined_members return joined members /joined_rooms returns only joined rooms +/whois 3pid invite join valid signature but revoked keys are rejected 3pid invite join valid signature but unreachable ID server are rejected 3pid invite join with wrong but valid signature are rejected +A change to displayname should appear in incremental /sync +A full_state incremental update returns all state +A full_state incremental update returns only recent timeline +A message sent after an initial sync appears in the timeline of an incremental sync. +A next_batch token can be used in the v1 messages API +A pair of events which redact each other should be ignored +A pair of servers can establish a join in a v2 room +A prev_batch token can be used in the v1 messages API +AS can create a user +AS can create a user with an underscore +AS can create a user with inhibit_login +AS can set avatar for ghosted users +AS can set displayname for ghosted users +AS can't set displayname for random users AS cannot create users outside its own namespace +AS user (not ghost) can join room without registering +AS user (not ghost) can join room without registering, with user_id query param +After changing password, a different session no longer works by default +After changing password, can log in with new password +After changing password, can't log in with old password +After changing password, different sessions can optionally be kept +After changing password, existing session still works After deactivating account, can't log in with an email +After deactivating account, can't log in with password Alias creators can delete alias with no ops Alias creators can delete canonical alias with no ops Alternative server names do not cause a routing loop +An event which redacts an event in a different room should be ignored +An event which redacts itself should be ignored +Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list +Backfill checks the events requested belong to the room +Backfill works correctly with history visibility set to joined +Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination +Banned servers cannot /event_auth +Banned servers cannot /invite +Banned servers cannot /make_join +Banned servers cannot /make_leave +Banned servers cannot /send_join +Banned servers cannot /send_leave +Banned servers cannot backfill +Banned servers cannot get missing events +Banned servers cannot get room state +Banned servers cannot get room state ids +Banned servers cannot send events +Banned user is kicked and may not rejoin until unbanned Both GET and PUT work +Can /sync newly created room Can add account data +Can add account data to room Can add tag +Can claim one time key using POST +Can claim remote one time key using POST Can create filter +Can deactivate account Can delete canonical alias +Can download file 'ascii' +Can download file 'name with spaces' +Can download file 'name;with;semicolons' +Can download filter +Can download specifying a different ASCII file name +Can download specifying a different Unicode file name +Can download with Unicode file name locally +Can download with Unicode file name over federation +Can download without a file name locally +Can download without a file name over federation +Can forget room you've been kicked from +Can get 'm.room.name' state for a departed room (SPEC-216) +Can get account data without syncing +Can get remote public room list +Can get room account data without syncing +Can get rooms/{roomId}/members +Can get rooms/{roomId}/members for a departed room (SPEC-216) +Can get rooms/{roomId}/state for a departed room (SPEC-216) Can invite users to invite-only rooms Can list tags for a room Can logout all devices Can logout current device +Can paginate public room list +Can pass a JSON filter as a query parameter +Can query device keys using POST +Can query remote device keys using POST +Can query specific device keys using POST Can re-join room if re-invited Can read configuration endpoint +Can receive redactions from regular users over federation in room version 1 +Can receive redactions from regular users over federation in room version 2 +Can receive redactions from regular users over federation in room version 3 +Can receive redactions from regular users over federation in room version 4 +Can receive redactions from regular users over federation in room version 5 +Can receive redactions from regular users over federation in room version 6 Can recv a device message using /sync +Can recv a device message using /sync +Can recv device messages over federation Can recv device messages until they are acknowledged +Can recv device messages until they are acknowledged +Can reject invites over federation for rooms with version 1 +Can reject invites over federation for rooms with version 2 +Can reject invites over federation for rooms with version 3 +Can reject invites over federation for rooms with version 4 +Can reject invites over federation for rooms with version 5 +Can reject invites over federation for rooms with version 6 Can remove tag +Can search public room list Can send a message directly to a device using PUT /sendToDevice +Can send a message directly to a device using PUT /sendToDevice Can send a to-device message to two users which both receive it using /sync +Can send image in room message Can send messages with a wildcard device id +Can send messages with a wildcard device id Can send messages with a wildcard device id to two devices +Can send messages with a wildcard device id to two devices Can sync +Can sync a joined room +Can sync a room with a message with a transaction id +Can sync a room with a single message +Can upload device keys Can upload with ASCII file name Can upload with Unicode file name Can upload without a file name +Can't deactivate account with wrong password +Can't forget room you're still in +Changes to state are included in an gapped incremental sync +Changes to state are included in an incremental sync Changing the actions of an unknown default rule fails with 404 Changing the actions of an unknown rule fails with 404 Checking local federation server +Creators can delete alias Current state appears in timeline in private history Current state appears in timeline in private history with many messages before +DELETE /device/{deviceId} +DELETE /device/{deviceId} requires UI auth user to match device owner +DELETE /device/{deviceId} with no body gives a 401 Deleted tags appear in an incremental v2 /sync Deleting a non-existent alias should return a 404 +Device list doesn't change if remote server is down +Device messages over federation wake up /sync Device messages wake up /sync +Device messages wake up /sync Device messages with the same txn_id are deduplicated +Device messages with the same txn_id are deduplicated +Enabling an unknown default rule fails with 404 +Event size limits +Event with an invalid signature in the send_join response should not cause room join to fail Events come down the correct room +Events whose auth_events are in the wrong room do not mess up the room state +Existing members see new members' join events +Federation key API allows unsigned requests for keys +Federation key API can act as a notary server via a GET request +Federation key API can act as a notary server via a POST request +Federation rejects inbound events where the prev_events cannot be found +Fetching eventstream a second time doesn't yield the message again +Forgetting room does not show up in v2 /sync +Full state sync includes joined rooms +GET /capabilities is present and well formed for registered user GET /device/{deviceId} GET /device/{deviceId} gives a 404 for unknown devices GET /devices +GET /directory/room/:room_alias yields room ID +GET /events initially GET /events with negative 'limit' GET /events with non-numeric 'limit' GET /events with non-numeric 'timeout' +GET /initialSync initially GET /joined_rooms lists newly-created room GET /login yields a set of flows GET /media/r0/download can fetch the value again GET /profile/:user_id/avatar_url publicly accessible GET /profile/:user_id/displayname publicly accessible +GET /publicRooms includes avatar URLs GET /publicRooms lists newly-created room +GET /publicRooms lists rooms +GET /r0/capabilities is not public GET /register yields a set of flows +GET /rooms/:room_id/joined_members fetches my membership +GET /rooms/:room_id/messages returns a message GET /rooms/:room_id/state fetches entire room state GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership +GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event +GET /rooms/:room_id/state/m.room.name gets name +GET /rooms/:room_id/state/m.room.power_levels can fetch levels +GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels +GET /rooms/:room_id/state/m.room.topic gets topic +Get left notifs for other users in sync and /keys/changes when user leaves +Getting messages going forward is limited for a departed room (SPEC-216) Getting push rules doesn't corrupt the cache SYN-390 +Getting state IDs checks the events requested belong to the room +Getting state checks the events requested belong to the room +Ghost user must register before joining room +Guest non-joined user cannot call /events on default room +Guest non-joined user cannot call /events on invited room +Guest non-joined user cannot call /events on joined room +Guest non-joined user cannot call /events on shared room +Guest non-joined users can get individual state for world_readable rooms +Guest non-joined users can get individual state for world_readable rooms after leaving +Guest non-joined users can get state for world_readable rooms +Guest non-joined users cannot room initalSync for non-world_readable rooms +Guest non-joined users cannot send messages to guest_access rooms if not joined +Guest user can set display names +Guest user cannot call /events globally +Guest user cannot upgrade other users +Guest users can accept invites to private rooms over federation +Guest users can join guest_access rooms +Guest users can send messages to guest_access rooms if joined +If a device list update goes missing, the server resyncs on the next one +If remote user leaves room we no longer receive device updates +If remote user leaves room, changes device and rejoins we see update in /keys/changes +If remote user leaves room, changes device and rejoins we see update in sync +Inbound /make_join rejects attempts to join rooms where all users have left +Inbound /v1/make_join rejects remote attempts to join local users to rooms +Inbound /v1/send_join rejects incorrectly-signed joins +Inbound /v1/send_join rejects joins from other servers +Inbound /v1/send_leave rejects leaves from other servers +Inbound federation accepts a second soft-failed event +Inbound federation accepts attempts to join v2 rooms from servers with support +Inbound federation can backfill events +Inbound federation can get public room list +Inbound federation can get state for a room +Inbound federation can get state_ids for a room +Inbound federation can query profile data +Inbound federation can query room alias directory +Inbound federation can receive events +Inbound federation can receive invites via v1 API +Inbound federation can receive invites via v2 API +Inbound federation can receive redacted events +Inbound federation can receive v1 /send_join +Inbound federation can receive v2 /send_join +Inbound federation can return events +Inbound federation can return missing events for invite visibility +Inbound federation can return missing events for world_readable visibility +Inbound federation correctly soft fails events +Inbound federation of state requires event_id as a mandatory paramater +Inbound federation of state_ids requires event_id as a mandatory paramater +Inbound federation rejects attempts to join v1 rooms from servers without v1 support +Inbound federation rejects attempts to join v2 rooms from servers lacking version support +Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 +Inbound federation rejects invite rejections which include invalid JSON for room version 6 +Inbound federation rejects invites which include invalid JSON for room version 6 +Inbound federation rejects receipts from wrong remote +Inbound federation rejects remote attempts to join local users to rooms +Inbound federation rejects remote attempts to kick local users to rooms +Inbound federation rejects typing notifications from wrong remote +Inbound: send_join rejects invalid JSON for room version 6 +Invalid JSON floats +Invalid JSON integers +Invalid JSON special values +Invited user can reject invite +Invited user can reject invite over federation +Invited user can reject invite over federation for empty room +Invited user can reject invite over federation several times +Invited user can see room metadata +Inviting an AS-hosted user asks the AS server +Lazy loading parameters in the filter are strictly boolean +Left rooms appear in the leave section of full state sync +Local delete device changes appear in v2 /sync +Local device key changes appear in /keys/changes +Local device key changes appear in v2 /sync +Local device key changes get to remote servers +Local new device changes appear in v2 /sync +Local non-members don't see posted message events +Local room members can get room messages +Local room members see posted message events +Local update device changes appear in v2 /sync +Local users can peek by room alias +Local users can peek into world_readable rooms by room ID +Message history can be paginated +Message history can be paginated over federation +Name/topic keys are correct +New account data appears in incremental v2 /sync +New read receipts appear in incremental v2 /sync +New room members see their own join event +New users appear in /keys/changes +Newly banned rooms appear in the leave section of incremental sync +Newly joined room is included in an incremental sync +Newly joined room is included in an incremental sync after invite +Newly left rooms appear in the leave section of gapped sync +Newly left rooms appear in the leave section of incremental sync Newly updated tags appear in an incremental v2 /sync +Non-numeric ports in server names are rejected +Outbound federation can backfill events +Outbound federation can query profile data +Outbound federation can query room alias directory +Outbound federation can query v1 /send_join +Outbound federation can query v2 /send_join +Outbound federation can request missing events +Outbound federation can send events +Outbound federation can send invites via v1 API +Outbound federation can send invites via v2 API +Outbound federation can send room-join requests +Outbound federation correctly handles unsupported room versions +Outbound federation passes make_join failures through to the client +Outbound federation rejects backfill containing invalid JSON for events in room version 6 +Outbound federation rejects m.room.create events with an unknown room version +Outbound federation rejects send_join responses with no m.room.create event +Outbound federation sends receipts +Outbound federation will ignore a missing event with bad JSON for room version 6 +POST /createRoom creates a room with the given version +POST /createRoom ignores attempts to set the room version via creation_content POST /createRoom makes a private room POST /createRoom makes a private room with invites +POST /createRoom makes a public room +POST /createRoom makes a room with a name +POST /createRoom makes a room with a topic +POST /createRoom rejects attempts to create rooms with numeric versions +POST /createRoom rejects attempts to create rooms with unknown versions +POST /createRoom with creation content +POST /join/:room_alias can join a room +POST /join/:room_alias can join a room with custom content POST /join/:room_id can join a room +POST /join/:room_id can join a room with custom content POST /login as non-existing user is rejected POST /login can log in as a user POST /login can log in as a user with just the local part of the id POST /login returns the same device_id as that in the request POST /login wrong password is rejected POST /media/r0/upload can create an upload +POST /redact disallows redaction of event in different room +POST /register allows registration of usernames with '-' +POST /register allows registration of usernames with '.' +POST /register allows registration of usernames with '/' +POST /register allows registration of usernames with '3' +POST /register allows registration of usernames with '=' +POST /register allows registration of usernames with '_' +POST /register allows registration of usernames with 'q' POST /register can create a user POST /register downcases capitals in usernames POST /register rejects registration of usernames with '!' @@ -88,41 +352,161 @@ POST /rooms/:room_id/ban can ban a user POST /rooms/:room_id/invite can send an invite POST /rooms/:room_id/join can join a room POST /rooms/:room_id/leave can leave a room +POST /rooms/:room_id/read_markers can create read marker +POST /rooms/:room_id/receipt can create receipts +POST /rooms/:room_id/redact/:event_id as original message sender redacts message +POST /rooms/:room_id/redact/:event_id as power user redacts message +POST /rooms/:room_id/redact/:event_id as random user does not redact message +POST /rooms/:room_id/send/:event_type sends a message POST /rooms/:room_id/state/m.room.name sets name POST /rooms/:room_id/state/m.room.topic sets topic POST /rooms/:room_id/upgrade can upgrade a room version +POST rejects invalid utf-8 in JSON POSTed media can be thumbnailed PUT /device/{deviceId} gives a 404 for unknown devices PUT /device/{deviceId} updates device fields PUT /directory/room/:room_alias creates alias PUT /profile/:user_id/avatar_url sets my avatar PUT /profile/:user_id/displayname sets my name +PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id +PUT /rooms/:room_id/send/:event_type/:txn_id sends a message PUT /rooms/:room_id/state/m.room.power_levels can set levels +PUT /rooms/:room_id/typing/:user_id sets typing notification PUT power_levels should not explode if the old power levels were empty +Peeked rooms only turn up in the sync for the device who peeked them +Previously left rooms don't appear in the leave section of sync Push rules come down in an initial /sync Read markers appear in incremental v2 /sync Read markers appear in initial v2 /sync Read markers can be updated +Read receipts appear in initial v2 /sync +Real non-joined user cannot call /events on default room +Real non-joined user cannot call /events on invited room +Real non-joined user cannot call /events on joined room +Real non-joined user cannot call /events on shared room +Real non-joined users can get individual state for world_readable rooms +Real non-joined users can get individual state for world_readable rooms after leaving +Real non-joined users can get state for world_readable rooms +Real non-joined users cannot room initalSync for non-world_readable rooms +Real non-joined users cannot send messages to guest_access rooms if not joined +Receipts must be m.read +Redaction of a redaction redacts the redaction reason Regular users can add and delete aliases in the default room configuration +Regular users can add and delete aliases when m.room.aliases is restricted +Regular users cannot create room aliases within the AS namespace +Regular users cannot register within the AS namespace +Remote media can be thumbnailed +Remote room alias queries can handle Unicode +Remote room members also see posted message events +Remote room members can get room messages +Remote user can backfill in a room with version 1 +Remote user can backfill in a room with version 2 +Remote user can backfill in a room with version 3 +Remote user can backfill in a room with version 4 +Remote user can backfill in a room with version 5 +Remote user can backfill in a room with version 6 +Remote users can join room by alias +Remote users may not join unfederated rooms +Request to logout with invalid an access token is rejected +Request to logout without an access token is rejected +Room aliases can contain Unicode Room creation reports m.room.create to myself Room creation reports m.room.member to myself +Room members can join a room with an overridden displayname +Room members can override their displayname on a room-specific basis +Room state at a rejected message event is the same as its predecessor +Room state at a rejected state event is the same as its predecessor Rooms a user is invited to appear in an incremental sync Rooms a user is invited to appear in an initial sync +Rooms can be created with an initial invite list (SYN-205) +Server correctly handles incoming m.device_list_update +Server correctly handles transactions that break edu limits +Server correctly resyncs when client query keys and there is no remote cache +Server correctly resyncs when server leaves and rejoins a room +Server rejects invalid JSON in a version 6 room Setting room topic reports m.room.topic to myself +Should not be able to take over the room by pretending there is no PL event Should reject keys claiming to belong to a different user +State from remote users is included in the state in the initial sync +State from remote users is included in the timeline in an incremental sync +State is included in the timeline in the initial sync +Sync can be polled for updates +Sync is woken up for leaves +Syncing a new room with a large timeline limit isn't limited Tags appear in an initial v2 /sync Trying to get push rules with unknown rule_id fails with 404 +Typing can be explicitly stopped Typing events appear in gapped sync Typing events appear in incremental sync Typing events appear in initial sync +Typing notification sent to local room members +Typing notifications also sent to remote room members +Typing notifications don't leak Uninvited users cannot join the room +Unprivileged users can set m.room.topic if it only needs level 0 User appears in user directory +User can create and send/receive messages in a room with version 1 +User can create and send/receive messages in a room with version 2 +User can create and send/receive messages in a room with version 3 +User can create and send/receive messages in a room with version 4 +User can create and send/receive messages in a room with version 5 +User can create and send/receive messages in a room with version 6 +User can invite local user to room with version 1 +User can invite local user to room with version 2 +User can invite local user to room with version 3 +User can invite local user to room with version 4 +User can invite local user to room with version 5 +User can invite local user to room with version 6 +User can invite remote user to room with version 1 +User can invite remote user to room with version 2 +User can invite remote user to room with version 3 +User can invite remote user to room with version 4 +User can invite remote user to room with version 5 +User can invite remote user to room with version 6 User directory correctly update on display name change User in dir while user still shares private rooms User in shared private room does appear in user directory User is offline if they set_presence=offline in their sync +User signups are forbidden from starting with '_' +Users can't delete other's aliases +Users cannot invite a user that is already in the room +Users cannot invite themselves to a room +Users cannot kick users from a room they are not in +Users cannot kick users who have already left a room +Users cannot set ban powerlevel higher than their own +Users cannot set kick powerlevel higher than their own +Users cannot set notifications powerlevel higher than their own +Users cannot set redact powerlevel higher than their own +Users receive device_list updates for their own devices Users with sufficient power-level can delete other's aliases Version responds 200 OK with valid structure +We can't peek into rooms with invited history_visibility +We can't peek into rooms with joined history_visibility +We can't peek into rooms with shared history_visibility +We don't send redundant membership state across incremental syncs by default We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) +We should see our own leave event, even if history_visibility is restricted (SYN-662) +Wildcard device messages over federation wake up /sync Wildcard device messages wake up /sync +Wildcard device messages wake up /sync +avatar_url updates affect room member events +displayname updates affect room member events +local user can join room with version 1 +local user can join room with version 2 +local user can join room with version 3 +local user can join room with version 4 +local user can join room with version 5 +local user can join room with version 6 +m.room.history_visibility == "joined" allows/forbids appropriately for Guest users +m.room.history_visibility == "joined" allows/forbids appropriately for Real users +m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users +m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users query for user with no keys returns empty key dict +remote user can join room with version 1 +remote user can join room with version 2 +remote user can join room with version 3 +remote user can join room with version 4 +remote user can join room with version 5 +remote user can join room with version 6 +setting 'm.room.name' respects room powerlevel +setting 'm.room.power_levels' respects room powerlevel From 246e4735fdcb7e3f2ad4c9078a34a90a5062db92 Mon Sep 17 00:00:00 2001 From: Mariusz Kogen Date: Fri, 2 Apr 2021 22:11:14 +0000 Subject: [PATCH 0491/1727] No need to log out and back in fix --- CROSS_COMPILE.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/CROSS_COMPILE.md b/CROSS_COMPILE.md index 3c7bbcb..4728768 100644 --- a/CROSS_COMPILE.md +++ b/CROSS_COMPILE.md @@ -1,13 +1,10 @@ Install docker: - +``` $ sudo apt install docker $ sudo usermod -aG docker $USER - -Then log out and back in. - +$ exec sudo su -l $USER $ sudo systemctl start docker - $ cargo install cross $ cross build --release --features tls_vendored --target armv7-unknown-linux-musleabihf - +``` The cross-compiled binary is at target/armv7-unknown-linux-musleabihf/release/conduit From 9b2f8730bb7123a29c09e87cc60b681b1b8ec3c1 Mon Sep 17 00:00:00 2001 From: Marcel Date: Tue, 6 Apr 2021 12:26:47 +0000 Subject: [PATCH 0492/1727] fix: Do not run conduit as root instead use a separate user --- DEPLOY.md | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index b547b64..0faa277 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -18,6 +18,14 @@ $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` +## Adding a conduit user + +While conduit can run as any user it is usually better to use dedicated users for different services. +This also allows you to make sure that the file permissions are correctly set up. + +In debian you can use this command to create a conduit user: + +`sudo adduser --system conduit --no-create-home` ## Setting up a systemd service @@ -33,8 +41,8 @@ After=network.target [Service] Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" -User=root -Group=root +User=conduit +Group=nogroup Restart=always ExecStart=/usr/local/bin/matrix-conduit @@ -91,6 +99,16 @@ allow_federation = true address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy ``` +## Setting the correct file permissions + +As we are using a conduit specific user we need to allow it to read the config. +To do that you can run this command on debian: + +`sudo chown -R conduit:nogroup /etc/matrix-conduit` + +If you use the default database path you also need to run this. (It might be that you first need to create that folder): + +`sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db` ## Setting up the Reverse Proxy From c267c0f09e8733e29efbc6bd0611a85826aae720 Mon Sep 17 00:00:00 2001 From: Marcel Date: Tue, 6 Apr 2021 13:17:39 +0000 Subject: [PATCH 0493/1727] fix: use full codeblocks and capitalize Conduit and Debian --- DEPLOY.md | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 0faa277..f801e29 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -18,14 +18,16 @@ $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` -## Adding a conduit user +## Adding a Conduit user While conduit can run as any user it is usually better to use dedicated users for different services. This also allows you to make sure that the file permissions are correctly set up. -In debian you can use this command to create a conduit user: +In Debian you can use this command to create a Conduit user: -`sudo adduser --system conduit --no-create-home` +``` +sudo adduser --system conduit --no-create-home +``` ## Setting up a systemd service @@ -101,14 +103,20 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re ## Setting the correct file permissions -As we are using a conduit specific user we need to allow it to read the config. -To do that you can run this command on debian: +As we are using a Conduit specific user we need to allow it to read the config. +To do that you can run this command on Debian: -`sudo chown -R conduit:nogroup /etc/matrix-conduit` +``` +sudo chown -R conduit:nogroup /etc/matrix-conduit +``` -If you use the default database path you also need to run this. (It might be that you first need to create that folder): +If you use the default database path you also need to run this: + +``` +sudo mkdir -p /var/lib/matrix-conduit/conduit_db +sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db +``` -`sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db` ## Setting up the Reverse Proxy From fe744c856f9df7e27cd13956c950f35966377d44 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 5 Apr 2021 21:25:10 +0200 Subject: [PATCH 0494/1727] Upgrade ruma MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … and refactor push rule code along the way. --- Cargo.lock | 159 +++++++++-------- Cargo.toml | 4 +- src/client_server/account.rs | 4 +- src/client_server/media.rs | 2 +- src/client_server/push.rs | 315 ++++++++++------------------------ src/client_server/state.rs | 25 ++- src/database/pusher.rs | 323 ++++++----------------------------- src/database/sending.rs | 4 +- src/database/users.rs | 16 +- src/lib.rs | 1 - src/main.rs | 1 - src/push_rules.rs | 256 --------------------------- src/ruma_wrapper.rs | 13 +- 13 files changed, 261 insertions(+), 862 deletions(-) delete mode 100644 src/push_rules.rs diff --git a/Cargo.lock b/Cargo.lock index 2a80291..c6c1769 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" +checksum = "076a6803b0dacd6a88cfe64deba628b01533ff5ef265687e6938280c1afd0a28" [[package]] name = "constant_time_eq" @@ -356,9 +356,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "dtoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" [[package]] name = "either" @@ -402,9 +402,9 @@ dependencies = [ [[package]] name = "figment" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c38799b106530aa30f774f7fca6d8f7e5f6234a79f427c4fad3c975eaf678931" +checksum = "0ca029e813a72b7526d28273d25f3e4a2f365d1b7a1018a6f93ec9053a119763" dependencies = [ "atomic", "pear", @@ -693,9 +693,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" +checksum = "8bf09f61b52cfcf4c00de50df88ae423d6c02354e385a86341133b5338630ad1" dependencies = [ "bytes", "futures-channel", @@ -708,7 +708,7 @@ dependencies = [ "httpdate", "itoa", "pin-project", - "socket2", + "socket2 0.4.0", "tokio", "tower-service", "tracing", @@ -764,6 +764,7 @@ checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" dependencies = [ "autocfg", "hashbrown", + "serde", ] [[package]] @@ -793,7 +794,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" dependencies = [ - "socket2", + "socket2 0.3.19", "widestring", "winapi", "winreg 0.6.2", @@ -846,9 +847,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.49" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc15e39392125075f60c95ba416f5381ff6c3a948ff02ab12464715adf56c821" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" dependencies = [ "wasm-bindgen", ] @@ -884,9 +885,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8916b1f6ca17130ec6568feccee27c156ad12037880833a3b842a823236502e7" +checksum = "56d855069fafbb9b344c0f962150cd2c1187975cb1c22c1522c240d8c4986714" [[package]] name = "linked-hash-map" @@ -896,9 +897,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" dependencies = [ "scopeguard", ] @@ -956,9 +957,9 @@ checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memoffset" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc14fc54a812b4472b4113facc3e44d099fbc0ea2ce0551fa5c703f8edfbfd38" +checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" dependencies = [ "autocfg", ] @@ -1315,10 +1316,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "0.1.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" dependencies = [ + "thiserror", "toml", ] @@ -1336,9 +1338,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" dependencies = [ "unicode-xid", ] @@ -1623,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "assign", "js_int", @@ -1643,7 +1645,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "http", "percent-encoding", @@ -1658,7 +1660,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1669,7 +1671,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "ruma-api", "ruma-common", @@ -1683,7 +1685,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "assign", "http", @@ -1701,21 +1703,24 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.3.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ + "indexmap", "js_int", "maplit", "ruma-identifiers", "ruma-serde", "serde", "serde_json", + "tracing", + "wildmatch", ] [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "js_int", "ruma-common", @@ -1729,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1740,7 +1745,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "js_int", "ruma-api", @@ -1754,8 +1759,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.18.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "paste", "rand", @@ -1768,8 +1773,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.18.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "proc-macro2", "quote", @@ -1779,13 +1784,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.2.2" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "ruma-api", "ruma-common", @@ -1798,7 +1803,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "js_int", "ruma-api", @@ -1812,8 +1817,8 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.3.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "form_urlencoded", "itoa", @@ -1825,8 +1830,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.3.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1836,8 +1841,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "base64 0.13.0", "ring", @@ -1915,9 +1920,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d493c5f39e02dfb062cd8f33301f90f9b13b650e8c1b1d0fd75c19dd64bff69d" +checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" dependencies = [ "bitflags", "core-foundation", @@ -1928,9 +1933,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee48cdde5ed250b0d3252818f646e174ab414036edb884dde62d80a3ac6082d" +checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" dependencies = [ "core-foundation-sys", "libc", @@ -2081,6 +2086,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "spin" version = "0.5.2" @@ -2105,7 +2120,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=improvements#625c37cb776b381a83ab7ee58b13e32506849648" +source = "git+https://github.com/ruma/state-res?rev=af450d0fe2b0e1c890284d0bc3b9d6d4008ac475#af450d0fe2b0e1c890284d0bc3b9d6d4008ac475" dependencies = [ "itertools 0.10.0", "log", @@ -2167,9 +2182,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.64" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fd9d1e9976102a03c542daa2eff1b43f9d72306342f3f8b3ed5fb8908195d6f" +checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" dependencies = [ "proc-macro2", "quote", @@ -2300,9 +2315,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" +checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" dependencies = [ "tinyvec_macros", ] @@ -2634,9 +2649,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe8f61dba8e5d645a4d8132dc7a0a66861ed5e1045d2c0ed940fab33bac0fbe" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" dependencies = [ "cfg-if", "serde", @@ -2646,9 +2661,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ceba58ff062da072c7cb4ba5b22a37f00a302483f7e2a6cdc18fedbdc1fd3" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" dependencies = [ "bumpalo", "lazy_static", @@ -2661,9 +2676,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73157efb9af26fb564bb59a009afd1c7c334a44db171d280690d0c3faaec3468" +checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" dependencies = [ "cfg-if", "js-sys", @@ -2673,9 +2688,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9aa01d36cda046f797c57959ff5f3c615c9cc63997a8d545831ec7976819b" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2683,9 +2698,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96eb45c1b2ee33545a813a92dbb53856418bf7eb54ab34f7f7ff1448a5b3735d" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" dependencies = [ "proc-macro2", "quote", @@ -2696,15 +2711,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7148f4696fb4960a346eaa60bbfb42a1ac4ebba21f750f75fc1375b098d5ffa" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" [[package]] name = "web-sys" -version = "0.3.49" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fe19d70f5dacc03f6e46777213facae5ac3801575d56ca6cbd4c93dcd12310" +checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" dependencies = [ "js-sys", "wasm-bindgen", @@ -2732,6 +2747,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" +[[package]] +name = "wildmatch" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07ae7ce410f81ba679081aac1d4874f3b1c328535b630209aa5b4cdaaf895e20" + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 453bc8e..7bd1557 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } +ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570d5a86c1468e8a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } #ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", branch = "improvements", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "af450d0fe2b0e1c890284d0bc3b9d6d4008ac475", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature #state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } #state-res = { git = "https://github.com/ruma/state-res", rev = "1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0", features = ["unstable-pre-spec", "gen-eventid"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 10f5d75..4c5b60c 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -21,7 +21,7 @@ use ruma::{ }, EventType, }, - RoomAliasId, RoomId, RoomVersionId, UserId, + push, RoomAliasId, RoomId, RoomVersionId, UserId, }; use register::RegistrationKind; @@ -181,7 +181,7 @@ pub async fn register_route( EventType::PushRules, &ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { - global: crate::push_rules::default_pushrules(&user_id), + global: push::Ruleset::server_default(&user_id), }, }, &db.globals, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 57fc2b0..f9350e0 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -46,7 +46,7 @@ pub async fn create_content_route( db.flush().await?; Ok(create_content::Response { - content_uri: mxc, + content_uri: mxc.try_into().expect("Invalid mxc:// URI"), blurhash: None, } .into()) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 9de8c16..e37e660 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -10,10 +10,7 @@ use ruma::{ }, }, events::{push_rules, EventType}, - push::{ - ConditionalPushRuleInit, ContentPushRule, OverridePushRule, PatternedPushRuleInit, - RoomPushRule, SenderPushRule, SimplePushRuleInit, UnderridePushRule, - }, + push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; #[cfg(feature = "conduit_bin")] @@ -67,29 +64,24 @@ pub async fn get_pushrule_route( let rule = match body.kind { RuleKind::Override => global .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::Underride => global .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::Sender => global .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::Room => global .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::Content => global .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::_Custom(_) => None, }; @@ -105,14 +97,15 @@ pub async fn get_pushrule_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] -#[tracing::instrument(skip(db, body))] +#[tracing::instrument(skip(db, req))] pub async fn set_pushrule_route( db: State<'_, Database>, - body: Ruma>, + req: Ruma>, ) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = req.sender_user.as_ref().expect("user is authenticated"); + let body = req.body; if body.scope != "global" { return Err(Error::BadRequest( @@ -132,107 +125,62 @@ pub async fn set_pushrule_route( let global = &mut event.content.global; match body.kind { RuleKind::Override => { - if let Some(rule) = global - .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.override_.remove(&rule); - } - - global.override_.insert(OverridePushRule( + global.override_.replace( ConditionalPushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), - conditions: body.conditions.clone(), + rule_id: body.rule_id, + conditions: body.conditions, } .into(), - )); + ); } RuleKind::Underride => { - if let Some(rule) = global - .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.underride.remove(&rule); - } - - global.underride.insert(UnderridePushRule( + global.underride.replace( ConditionalPushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), - conditions: body.conditions.clone(), + rule_id: body.rule_id, + conditions: body.conditions, } .into(), - )); + ); } RuleKind::Sender => { - if let Some(rule) = global - .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.sender.remove(&rule); - } - - global.sender.insert(SenderPushRule( + global.sender.replace( SimplePushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), + rule_id: body.rule_id, } .into(), - )); + ); } RuleKind::Room => { - if let Some(rule) = global - .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.room.remove(&rule); - } - - global.room.insert(RoomPushRule( + global.room.replace( SimplePushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), + rule_id: body.rule_id, } .into(), - )); + ); } RuleKind::Content => { - if let Some(rule) = global - .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.content.remove(&rule); - } - - global.content.insert(ContentPushRule( + global.content.replace( PatternedPushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), - pattern: body.pattern.clone().unwrap_or_default(), + rule_id: body.rule_id, + pattern: body.pattern.unwrap_or_default(), } .into(), - )); + ); } RuleKind::_Custom(_) => {} } @@ -280,29 +228,24 @@ pub async fn get_pushrule_actions_route( let actions = match body.kind { RuleKind::Override => global .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::Underride => global .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::Sender => global .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::Room => global .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::Content => global .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::_Custom(_) => None, }; @@ -343,63 +286,33 @@ pub async fn set_pushrule_actions_route( let global = &mut event.content.global; match body.kind { RuleKind::Override => { - if let Some(mut rule) = global - .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.override_.remove(&rule); - rule.0.actions = body.actions.clone(); - global.override_.insert(rule); + if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.override_.replace(rule); } } RuleKind::Underride => { - if let Some(mut rule) = global - .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.underride.remove(&rule); - rule.0.actions = body.actions.clone(); - global.underride.insert(rule); + if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.underride.replace(rule); } } RuleKind::Sender => { - if let Some(mut rule) = global - .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.sender.remove(&rule); - rule.0.actions = body.actions.clone(); - global.sender.insert(rule); + if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.sender.replace(rule); } } RuleKind::Room => { - if let Some(mut rule) = global - .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.room.remove(&rule); - rule.0.actions = body.actions.clone(); - global.room.insert(rule); + if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.room.replace(rule); } } RuleKind::Content => { - if let Some(mut rule) = global - .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.content.remove(&rule); - rule.0.actions = body.actions.clone(); - global.content.insert(rule); + if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.content.replace(rule); } } RuleKind::_Custom(_) => {} @@ -449,28 +362,28 @@ pub async fn get_pushrule_enabled_route( RuleKind::Override => global .override_ .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::Underride => global .underride .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::Sender => global .sender .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::Room => global .room .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::Content => global .content .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::_Custom(_) => false, }; @@ -508,62 +421,37 @@ pub async fn set_pushrule_enabled_route( let global = &mut event.content.global; match body.kind { RuleKind::Override => { - if let Some(mut rule) = global - .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { global.override_.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.override_.insert(rule); } } RuleKind::Underride => { - if let Some(mut rule) = global - .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() { global.underride.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.underride.insert(rule); } } RuleKind::Sender => { - if let Some(mut rule) = global - .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() { global.sender.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.sender.insert(rule); } } RuleKind::Room => { - if let Some(mut rule) = global - .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() { global.room.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.room.insert(rule); } } RuleKind::Content => { - if let Some(mut rule) = global - .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() { global.content.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.content.insert(rule); } } @@ -612,52 +500,27 @@ pub async fn delete_pushrule_route( let global = &mut event.content.global; match body.kind { RuleKind::Override => { - if let Some(rule) = global - .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() { global.override_.remove(&rule); } } RuleKind::Underride => { - if let Some(rule) = global - .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.underride.get(body.rule_id.as_str()).cloned() { global.underride.remove(&rule); } } RuleKind::Sender => { - if let Some(rule) = global - .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.sender.get(body.rule_id.as_str()).cloned() { global.sender.remove(&rule); } } RuleKind::Room => { - if let Some(rule) = global - .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.room.get(body.rule_id.as_str()).cloned() { global.room.remove(&rule); } } RuleKind::Content => { - if let Some(rule) = global - .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.content.get(body.rule_id.as_str()).cloned() { global.content.remove(&rule); } } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 54c5fa5..88cce03 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -3,10 +3,7 @@ use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::state::{ - get_state_events, get_state_events_for_empty_key, get_state_events_for_key, - send_state_event_for_empty_key, send_state_event_for_key, - }, + r0::state::{get_state_events, get_state_events_for_key, send_state_event}, }, events::{ room::history_visibility::{HistoryVisibility, HistoryVisibilityEventContent}, @@ -25,8 +22,8 @@ use rocket::{get, put}; #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( db: State<'_, Database>, - body: Ruma>, -) -> ConduitResult { + body: Ruma>, +) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let content = serde_json::from_str::( @@ -49,7 +46,7 @@ pub async fn send_state_event_for_key_route( db.flush().await?; - Ok(send_state_event_for_key::Response { event_id }.into()) + Ok(send_state_event::Response { event_id }.into()) } #[cfg_attr( @@ -59,8 +56,8 @@ pub async fn send_state_event_for_key_route( #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( db: State<'_, Database>, - body: Ruma>, -) -> ConduitResult { + body: Ruma>, +) -> ConduitResult { // This just calls send_state_event_for_key_route let Ruma { body, @@ -81,7 +78,7 @@ pub async fn send_state_event_for_empty_key_route( &db, sender_user .as_ref() - .expect("no user for send state empty key rout"), + .expect("no user for send state empty key route"), &body.content, json, &body.room_id, @@ -91,7 +88,7 @@ pub async fn send_state_event_for_empty_key_route( db.flush().await?; - Ok(send_state_event_for_empty_key::Response { event_id }.into()) + Ok(send_state_event::Response { event_id }.into()) } #[cfg_attr( @@ -199,8 +196,8 @@ pub async fn get_state_events_for_key_route( #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( db: State<'_, Database>, - body: Ruma>, -) -> ConduitResult { + body: Ruma>, +) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -236,7 +233,7 @@ pub async fn get_state_events_for_empty_key_route( "State event not found.", ))?; - Ok(get_state_events_for_empty_key::Response { + Ok(get_state_events_for_key::Response { content: serde_json::value::to_raw_value(&event.content) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 8e9b24e..6a88d5e 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -9,15 +9,8 @@ use ruma::{ }, OutgoingRequest, }, - events::{ - room::{ - member::{MemberEventContent, MembershipState}, - message::{MessageEventContent, MessageType, TextMessageEventContent}, - power_levels::PowerLevelsEventContent, - }, - EventType, - }, - push::{Action, PushCondition, PushFormat, Ruleset, Tweak}, + events::{room::power_levels::PowerLevelsEventContent, EventType}, + push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, uint, UInt, UserId, }; use sled::IVec; @@ -181,276 +174,56 @@ pub async fn send_push_notice( pdu: &PduEvent, db: &Database, ) -> Result<()> { - if let Some(msgtype) = pdu.content.get("msgtype").and_then(|b| b.as_str()) { - if msgtype == "m.notice" { - return Ok(()); + let power_levels: PowerLevelsEventContent = db + .rooms + .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_value(ev.content) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + let ctx = PushConditionRoomCtx { + room_id: pdu.room_id.clone(), + member_count: (db.rooms.room_members(&pdu.room_id).count() as u32).into(), + user_display_name: user.localpart().into(), // TODO: Use actual display name + users_power_levels: power_levels.users, + default_power_level: power_levels.users_default, + notification_power_levels: power_levels.notifications, + }; + + let mut notify = None; + let mut tweaks = Vec::new(); + + for action in ruleset.get_actions(&pdu.to_sync_state_event(), &ctx) { + let n = match action { + Action::DontNotify => false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => true, + Action::SetTweak(tweak) => { + tweaks.push(tweak.clone()); + continue; + } + }; + + if notify.is_some() { + return Err(Error::bad_database( + r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, + )); } + + notify = Some(n); } - for rule in ruleset.into_iter() { - // TODO: can actions contain contradictory Actions - if rule - .actions - .iter() - .any(|act| matches!(act, ruma::push::Action::DontNotify)) - || !rule.enabled - { - continue; - } + let notify = notify.ok_or_else(|| { + Error::bad_database( + r#"Malformed pushrule contains none of these actions: ["dont_notify", "notify", "coalesce"]"#, + ) + })?; - match rule.rule_id.as_str() { - ".m.rule.master" => {} - ".m.rule.suppress_notices" => { - if pdu.kind == EventType::RoomMessage - && pdu - .content - .get("msgtype") - .map_or(false, |ty| ty == "m.notice") - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.invite_for_me" => { - if let EventType::RoomMember = &pdu.kind { - if pdu.state_key.as_deref() == Some(user.as_str()) - && serde_json::from_value::(pdu.content.clone()) - .map_err(|_| Error::bad_database("PDU contained bad message content"))? - .membership - == MembershipState::Invite - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - ".m.rule.member_event" => { - if let EventType::RoomMember = &pdu.kind { - // TODO use this? - let _member = serde_json::from_value::(pdu.content.clone()) - .map_err(|_| Error::bad_database("PDU contained bad message content"))?; - if let Some(conditions) = rule.conditions { - if conditions.iter().any(|cond| match cond { - PushCondition::EventMatch { key, pattern } => { - let mut json = - serde_json::to_value(pdu).expect("PDU is valid JSON"); - for key in key.split('.') { - json = json[key].clone(); - } - // TODO: this is baddddd - json.to_string().contains(pattern) - } - _ => false, - }) { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - } - ".m.rule.contains_display_name" => { - if let EventType::RoomMessage = &pdu.kind { - let msg_content = - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { - Error::bad_database("PDU contained bad message content") - })?; - if let MessageType::Text(TextMessageEventContent { body, .. }) = - &msg_content.msgtype - { - if body.contains(user.localpart()) { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - } - ".m.rule.tombstone" => { - if pdu.kind == EventType::RoomTombstone && pdu.state_key.as_deref() == Some("") { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.roomnotif" => { - if let EventType::RoomMessage = &pdu.kind { - let msg_content = - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { - Error::bad_database("PDU contained bad message content") - })?; - if let MessageType::Text(TextMessageEventContent { body, .. }) = - &msg_content.msgtype - { - let power_level_cmp = |pl: PowerLevelsEventContent| { - &pl.notifications.room - <= pl.users.get(&pdu.sender).unwrap_or(&ruma::int!(0)) - }; - let deserialize = |pl: PduEvent| { - serde_json::from_value::(pl.content).ok() - }; - if body.contains("@room") - && db - .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? - .map(deserialize) - .flatten() - .map_or(false, power_level_cmp) - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - } - ".m.rule.contains_user_name" => { - if let EventType::RoomMessage = &pdu.kind { - let msg_content = - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { - Error::bad_database("PDU contained bad message content") - })?; - if let MessageType::Text(TextMessageEventContent { body, .. }) = - &msg_content.msgtype - { - if body.contains(user.localpart()) { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - } - ".m.rule.call" => { - if pdu.kind == EventType::CallInvite { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.encrypted_room_one_to_one" => { - if db.rooms.room_members(&pdu.room_id).count() == 2 - && pdu.kind == EventType::RoomEncrypted - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.room_one_to_one" => { - if db.rooms.room_members(&pdu.room_id).count() == 2 - && pdu.kind == EventType::RoomMessage - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.message" => { - if pdu.kind == EventType::RoomMessage { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.encrypted" => { - if pdu.kind == EventType::RoomEncrypted { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - _ => {} - } + if notify { + send_notice(unread, pusher, tweaks, pdu, db).await?; } Ok(()) diff --git a/src/database/sending.rs b/src/database/sending.rs index 82d2cdd..b0f9c4d 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -16,7 +16,7 @@ use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, events::{push_rules, EventType}, - uint, ServerName, UInt, UserId, + push, uint, ServerName, UInt, UserId, }; use sled::IVec; use tokio::{select, sync::Semaphore}; @@ -428,7 +428,7 @@ impl Sending { .get::(None, &userid, EventType::PushRules) .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? .map(|ev| ev.content.global) - .unwrap_or_else(|| crate::push_rules::default_pushrules(&userid)); + .unwrap_or_else(|| push::Ruleset::server_default(&userid)); let unread: UInt = if let Some(last_read) = db .rooms diff --git a/src/database/users.rs b/src/database/users.rs index e5bc16e..ddbfd38 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -9,6 +9,7 @@ use ruma::{ }, encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, + identifiers::MxcUri, serde::Raw, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, }; @@ -150,21 +151,22 @@ impl Users { } /// Get a the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result> { + pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl .get(user_id.to_string())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Avatar URL in db is invalid.") - })?)) + .map(|bytes| { + let s = utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; + MxcUri::try_from(s).map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) }) + .transpose() } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl - .insert(user_id.to_string(), &*avatar_url)?; + .insert(user_id.to_string(), avatar_url.to_string().as_str())?; } else { self.userid_avatarurl.remove(user_id.to_string())?; } diff --git a/src/lib.rs b/src/lib.rs index aed129f..0af46e0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,7 +3,6 @@ pub mod client_server; mod database; mod error; mod pdu; -mod push_rules; mod ruma_wrapper; pub mod server_server; mod utils; diff --git a/src/main.rs b/src/main.rs index 696ce5c..f523abb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,7 +7,6 @@ pub mod server_server; mod database; mod error; mod pdu; -mod push_rules; mod ruma_wrapper; mod utils; diff --git a/src/push_rules.rs b/src/push_rules.rs deleted file mode 100644 index 76a1a61..0000000 --- a/src/push_rules.rs +++ /dev/null @@ -1,256 +0,0 @@ -use ruma::{ - push::{ - Action, ConditionalPushRule, ConditionalPushRuleInit, ContentPushRule, OverridePushRule, - PatternedPushRule, PatternedPushRuleInit, PushCondition, RoomMemberCountIs, Ruleset, Tweak, - UnderridePushRule, - }, - UserId, -}; - -pub fn default_pushrules(user_id: &UserId) -> Ruleset { - let mut rules = Ruleset::default(); - - rules.add(ContentPushRule(contains_user_name_rule(&user_id))); - - for rule in vec![ - master_rule(), - suppress_notices_rule(), - invite_for_me_rule(), - member_event_rule(), - contains_display_name_rule(), - tombstone_rule(), - roomnotif_rule(), - ] { - rules.add(OverridePushRule(rule)); - } - - for rule in vec![ - call_rule(), - encrypted_room_one_to_one_rule(), - room_one_to_one_rule(), - message_rule(), - encrypted_rule(), - ] { - rules.add(UnderridePushRule(rule)); - } - - rules -} - -pub fn master_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::DontNotify], - default: true, - enabled: false, - rule_id: ".m.rule.master".to_owned(), - conditions: vec![], - } - .into() -} - -pub fn suppress_notices_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::DontNotify], - default: true, - enabled: true, - rule_id: ".m.rule.suppress_notices".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "content.msgtype".to_owned(), - pattern: "m.notice".to_owned(), - }], - } - .into() -} - -pub fn invite_for_me_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(false)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.invite_for_me".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "content.membership".to_owned(), - pattern: "m.invite".to_owned(), - }], - } - .into() -} - -pub fn member_event_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::DontNotify], - default: true, - enabled: true, - rule_id: ".m.rule.member_event".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "content.membership".to_owned(), - pattern: "type".to_owned(), - }], - } - .into() -} - -pub fn contains_display_name_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(true)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.contains_display_name".to_owned(), - conditions: vec![PushCondition::ContainsDisplayName], - } - .into() -} - -pub fn tombstone_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(true))], - default: true, - enabled: true, - rule_id: ".m.rule.tombstone".to_owned(), - conditions: vec![ - PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.tombstone".to_owned(), - }, - PushCondition::EventMatch { - key: "state_key".to_owned(), - pattern: "".to_owned(), - }, - ], - } - .into() -} - -pub fn roomnotif_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(true))], - default: true, - enabled: true, - rule_id: ".m.rule.roomnotif".to_owned(), - conditions: vec![ - PushCondition::EventMatch { - key: "content.body".to_owned(), - pattern: "@room".to_owned(), - }, - PushCondition::SenderNotificationPermission { - key: "room".to_owned(), - }, - ], - } - .into() -} - -pub fn contains_user_name_rule(user_id: &UserId) -> PatternedPushRule { - PatternedPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(true)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.contains_user_name".to_owned(), - pattern: user_id.localpart().to_owned(), - } - .into() -} - -pub fn call_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("ring".to_owned())), - Action::SetTweak(Tweak::Highlight(false)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.call".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.call.invite".to_owned(), - }], - } - .into() -} - -pub fn encrypted_room_one_to_one_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(false)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.encrypted_room_one_to_one".to_owned(), - conditions: vec![ - PushCondition::RoomMemberCount { - is: RoomMemberCountIs::from(2_u32.into()..), - }, - PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.encrypted".to_owned(), - }, - ], - } - .into() -} - -pub fn room_one_to_one_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(false)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.room_one_to_one".to_owned(), - conditions: vec![ - PushCondition::RoomMemberCount { - is: RoomMemberCountIs::from(2_u32.into()..), - }, - PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }, - ], - } - .into() -} - -pub fn message_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(false))], - default: true, - enabled: true, - rule_id: ".m.rule.message".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }], - } - .into() -} - -pub fn encrypted_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(false))], - default: true, - enabled: true, - rule_id: ".m.rule.encrypted".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.encrypted".to_owned(), - }], - } - .into() -} diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 9787e2d..8e1d34f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,5 @@ use crate::Error; use ruma::{ - api::OutgoingRequest, identifiers::{DeviceId, UserId}, Outgoing, }; @@ -28,7 +27,7 @@ use { /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { +pub struct Ruma { pub body: T::Incoming, pub sender_user: Option, pub sender_device: Option>, @@ -37,7 +36,7 @@ pub struct Ruma { } #[cfg(feature = "conduit_bin")] -impl<'a, T: Outgoing + OutgoingRequest> FromTransformedData<'a> for Ruma +impl<'a, T: Outgoing> FromTransformedData<'a> for Ruma where T::Incoming: IncomingRequest, { @@ -56,6 +55,8 @@ where request: &'a Request<'_>, outcome: Transformed<'a, Self>, ) -> FromDataFuture<'a, Self, Self::Error> { + let metadata = T::Incoming::METADATA; + Box::pin(async move { let data = rocket::try_outcome!(outcome.owned()); let db = request @@ -80,7 +81,7 @@ where .and_then(|as_token| as_token.as_str()) .map_or(false, |as_token| token.as_deref() == Some(as_token)) }) { - match T::METADATA.authentication { + match metadata.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { let user_id = request.get_query_value::("user_id").map_or_else( || { @@ -112,7 +113,7 @@ where AuthScheme::None => (None, None, true), } } else { - match T::METADATA.authentication { + match metadata.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { if let Some(token) = token { match db.users.find_from_token(&token).unwrap() { @@ -166,7 +167,7 @@ where } } -impl Deref for Ruma { +impl Deref for Ruma { type Target = T::Incoming; fn deref(&self) -> &Self::Target { From dbe8c2ce19ad5c7e8493b717841f0a2e558be27b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 5 Apr 2021 21:44:21 +0200 Subject: [PATCH 0495/1727] Remove lots of redundant string allocations --- src/database.rs | 4 +-- src/database/account_data.rs | 10 +++--- src/database/key_backups.rs | 30 ++++++++--------- src/database/rooms.rs | 16 +++++----- src/database/rooms/edus.rs | 62 +++++++++++++++++------------------- src/database/uiaa.rs | 4 +-- src/database/users.rs | 56 +++++++++++++++----------------- 7 files changed, 87 insertions(+), 95 deletions(-) diff --git a/src/database.rs b/src/database.rs index d8734b5..bacf3b9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -217,7 +217,7 @@ impl Database { } pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { - let userid_bytes = user_id.to_string().as_bytes().to_vec(); + let userid_bytes = user_id.as_bytes().to_vec(); let mut userid_prefix = userid_bytes.clone(); userid_prefix.push(0xff); @@ -241,7 +241,7 @@ impl Database { // Events for rooms we are in for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - let roomid_bytes = room_id.to_string().as_bytes().to_vec(); + let roomid_bytes = room_id.as_bytes().to_vec(); let mut roomid_prefix = roomid_bytes.clone(); roomid_prefix.push(0xff); diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 38e6c32..f3832ea 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -30,7 +30,7 @@ impl AccountData { .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.extend_from_slice(&user_id.as_bytes()); prefix.push(0xff); // Remove old entry @@ -42,7 +42,7 @@ impl AccountData { let mut key = prefix; key.extend_from_slice(&globals.next_count()?.to_be_bytes()); key.push(0xff); - key.extend_from_slice(event_type.to_string().as_bytes()); + key.extend_from_slice(event_type.as_ref().as_bytes()); let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling if json.get("type").is_none() || json.get("content").is_none() { @@ -89,7 +89,7 @@ impl AccountData { .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.extend_from_slice(&user_id.as_bytes()); prefix.push(0xff); // Skip the data that's exactly at since, because we sent that last time @@ -135,7 +135,7 @@ impl AccountData { .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.extend_from_slice(&user_id.as_bytes()); prefix.push(0xff); let kind = kind.clone(); @@ -148,7 +148,7 @@ impl AccountData { k.rsplit(|&b| b == 0xff) .next() .map(|current_event_type| { - current_event_type == kind.to_string().as_bytes() + current_event_type == kind.as_ref().as_bytes() }) .unwrap_or(false) }) diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 4c65354..0f9af2e 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -24,7 +24,7 @@ impl KeyBackups { ) -> Result { let version = globals.next_count()?.to_string(); - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); @@ -39,7 +39,7 @@ impl KeyBackups { } pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); @@ -67,7 +67,7 @@ impl KeyBackups { backup_metadata: &BackupAlgorithm, globals: &super::globals::Globals, ) -> Result { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); @@ -89,7 +89,7 @@ impl KeyBackups { } pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); self.backupid_algorithm .scan_prefix(&prefix) @@ -113,7 +113,7 @@ impl KeyBackups { } pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -132,7 +132,7 @@ impl KeyBackups { key_data: &KeyBackupData, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -147,7 +147,7 @@ impl KeyBackups { .insert(&key, &globals.next_count()?.to_be_bytes())?; key.push(0xff); - key.extend_from_slice(room_id.to_string().as_bytes()); + key.extend_from_slice(room_id.as_bytes()); key.push(0xff); key.extend_from_slice(session_id.as_bytes()); @@ -160,7 +160,7 @@ impl KeyBackups { } pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -168,7 +168,7 @@ impl KeyBackups { } pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); @@ -187,7 +187,7 @@ impl KeyBackups { user_id: &UserId, version: &str, ) -> Result> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); @@ -240,7 +240,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, ) -> BTreeMap { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); @@ -278,7 +278,7 @@ impl KeyBackups { room_id: &RoomId, session_id: &str, ) -> Result> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); key.push(0xff); @@ -297,7 +297,7 @@ impl KeyBackups { } pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); key.push(0xff); @@ -320,7 +320,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, ) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); key.push(0xff); @@ -346,7 +346,7 @@ impl KeyBackups { room_id: &RoomId, session_id: &str, ) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); key.push(0xff); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5d43626..ef1e558 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -151,7 +151,7 @@ impl Rooms { event_type: &EventType, state_key: &str, ) -> Result> { - let mut key = event_type.to_string().as_bytes().to_vec(); + let mut key = event_type.as_ref().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); @@ -416,7 +416,7 @@ impl Rooms { /// Returns the pdu's id. pub fn get_pdu_id(&self, event_id: &EventId) -> Result> { self.eventid_pduid - .get(event_id.to_string().as_bytes())? + .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) } @@ -690,7 +690,7 @@ impl Rooms { .split_terminator(|c: char| !c.is_alphanumeric()) .map(str::to_lowercase) { - let mut key = pdu.room_id.to_string().as_bytes().to_vec(); + let mut key = pdu.room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(word.as_bytes()); key.push(0xff); @@ -1264,7 +1264,7 @@ impl Rooms { room_id: &RoomId, since: u64, ) -> Result>> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); // Skip the first pdu if it's exactly at since, because we sent that last time @@ -1298,7 +1298,7 @@ impl Rooms { until: u64, ) -> impl Iterator> { // Create the first part of the full pdu id - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut current = prefix.clone(); @@ -1332,7 +1332,7 @@ impl Rooms { from: u64, ) -> impl Iterator> { // Create the first part of the full pdu id - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut current = prefix.clone(); @@ -1883,9 +1883,9 @@ impl Rooms { } pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + userroom_id.extend_from_slice(room_id.as_bytes()); Ok(self.roomuseroncejoinedids.get(userroom_id)?.is_some()) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 084e4a1..9e43fe1 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -34,7 +34,7 @@ impl RoomEdus { event: EduEvent, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); // Remove old entry @@ -49,7 +49,7 @@ impl RoomEdus { key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element") - == user_id.to_string().as_bytes() + == user_id.as_bytes() }) { // This is the old room_latest @@ -59,7 +59,7 @@ impl RoomEdus { let mut room_latest_id = prefix; room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); room_latest_id.push(0xff); - room_latest_id.extend_from_slice(&user_id.to_string().as_bytes()); + room_latest_id.extend_from_slice(&user_id.as_bytes()); self.readreceiptid_readreceipt.insert( room_latest_id, @@ -76,7 +76,7 @@ impl RoomEdus { room_id: &RoomId, since: u64, ) -> Result>>> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut first_possible_edu = prefix.clone(); @@ -102,9 +102,9 @@ impl RoomEdus { count: u64, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut key = room_id.to_string().as_bytes().to_vec(); + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); + key.extend_from_slice(&user_id.as_bytes()); self.roomuserid_privateread .insert(&key, &count.to_be_bytes())?; @@ -118,9 +118,9 @@ impl RoomEdus { /// Returns the private read marker. #[tracing::instrument(skip(self))] pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.to_string().as_bytes().to_vec(); + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); + key.extend_from_slice(&user_id.as_bytes()); self.roomuserid_privateread.get(key)?.map_or(Ok(None), |v| { Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { @@ -131,9 +131,9 @@ impl RoomEdus { /// Returns the count of the last typing update in this room. pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.to_string().as_bytes().to_vec(); + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); + key.extend_from_slice(&user_id.as_bytes()); Ok(self .roomuserid_lastprivatereadupdate @@ -155,7 +155,7 @@ impl RoomEdus { timeout: u64, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let count = globals.next_count()?.to_be_bytes(); @@ -166,10 +166,10 @@ impl RoomEdus { room_typing_id.extend_from_slice(&count); self.typingid_userid - .insert(&room_typing_id, &*user_id.to_string().as_bytes())?; + .insert(&room_typing_id, &*user_id.as_bytes())?; self.roomid_lasttypingupdate - .insert(&room_id.to_string().as_bytes(), &count)?; + .insert(&room_id.as_bytes(), &count)?; Ok(()) } @@ -181,7 +181,7 @@ impl RoomEdus { room_id: &RoomId, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let user_id = user_id.to_string(); @@ -200,10 +200,8 @@ impl RoomEdus { } if found_outdated { - self.roomid_lasttypingupdate.insert( - &room_id.to_string().as_bytes(), - &globals.next_count()?.to_be_bytes(), - )?; + self.roomid_lasttypingupdate + .insert(&room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; } Ok(()) @@ -215,7 +213,7 @@ impl RoomEdus { room_id: &RoomId, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let current_timestamp = utils::millis_since_unix_epoch(); @@ -248,10 +246,8 @@ impl RoomEdus { } if found_outdated { - self.roomid_lasttypingupdate.insert( - &room_id.to_string().as_bytes(), - &globals.next_count()?.to_be_bytes(), - )?; + self.roomid_lasttypingupdate + .insert(&room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; } Ok(()) @@ -268,7 +264,7 @@ impl RoomEdus { Ok(self .roomid_lasttypingupdate - .get(&room_id.to_string().as_bytes())? + .get(&room_id.as_bytes())? .map_or(Ok::<_, Error>(None), |bytes| { Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") @@ -281,7 +277,7 @@ impl RoomEdus { &self, room_id: &RoomId, ) -> Result> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut user_ids = Vec::new(); @@ -322,11 +318,11 @@ impl RoomEdus { let count = globals.next_count()?.to_be_bytes(); - let mut presence_id = room_id.to_string().as_bytes().to_vec(); + let mut presence_id = room_id.as_bytes().to_vec(); presence_id.push(0xff); presence_id.extend_from_slice(&count); presence_id.push(0xff); - presence_id.extend_from_slice(&presence.sender.to_string().as_bytes()); + presence_id.extend_from_slice(&presence.sender.as_bytes()); self.presenceid_presence.insert( presence_id, @@ -334,7 +330,7 @@ impl RoomEdus { )?; self.userid_lastpresenceupdate.insert( - &user_id.to_string().as_bytes(), + &user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), )?; @@ -345,7 +341,7 @@ impl RoomEdus { #[tracing::instrument(skip(self))] pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_lastpresenceupdate.insert( - &user_id.to_string().as_bytes(), + &user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), )?; @@ -355,7 +351,7 @@ impl RoomEdus { /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. pub fn last_presence_update(&self, user_id: &UserId) -> Result> { self.userid_lastpresenceupdate - .get(&user_id.to_string().as_bytes())? + .get(&user_id.as_bytes())? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") @@ -398,7 +394,7 @@ impl RoomEdus { .try_into() .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.to_string().as_bytes().to_vec(); + let mut presence_id = room_id.as_bytes().to_vec(); presence_id.push(0xff); presence_id.extend_from_slice(&count); presence_id.push(0xff); @@ -424,7 +420,7 @@ impl RoomEdus { } self.userid_lastpresenceupdate.insert( - &user_id.to_string().as_bytes(), + &user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), )?; } @@ -443,7 +439,7 @@ impl RoomEdus { ) -> Result> { self.presence_maintain(rooms, globals)?; - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut first_possible_edu = prefix.clone(); diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 381a701..4c33b86 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -148,7 +148,7 @@ impl Uiaa { device_id: &DeviceId, uiaainfo: Option<&UiaaInfo>, ) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -170,7 +170,7 @@ impl Uiaa { device_id: &DeviceId, session: &str, ) -> Result { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); diff --git a/src/database/users.rs b/src/database/users.rs index ddbfd38..c794e52 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -185,7 +185,7 @@ impl Users { // This method should never be called for nonexistent users. assert!(self.exists(user_id)?); - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -208,7 +208,7 @@ impl Users { /// Removes a device from a user. pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -234,7 +234,7 @@ impl Users { /// Returns an iterator over all device ids of this user. pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator>> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata self.userdeviceid_metadata @@ -254,7 +254,7 @@ impl Users { /// Replaces the access token of one device. pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -282,7 +282,7 @@ impl Users { one_time_key_value: &OneTimeKey, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.as_bytes()); @@ -305,10 +305,8 @@ impl Users { .expect("OneTimeKey::to_string always works"), )?; - self.userid_lastonetimekeyupdate.insert( - &user_id.to_string().as_bytes(), - &globals.next_count()?.to_be_bytes(), - )?; + self.userid_lastonetimekeyupdate + .insert(&user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; Ok(()) } @@ -316,7 +314,7 @@ impl Users { #[tracing::instrument(skip(self))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate - .get(&user_id.to_string().as_bytes())? + .get(&user_id.as_bytes())? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") @@ -332,18 +330,16 @@ impl Users { key_algorithm: &DeviceKeyAlgorithm, globals: &super::globals::Globals, ) -> Result> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); prefix.push(b'"'); // Annoying quotation mark - prefix.extend_from_slice(key_algorithm.to_string().as_bytes()); + prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); prefix.push(b':'); - self.userid_lastonetimekeyupdate.insert( - &user_id.to_string().as_bytes(), - &globals.next_count()?.to_be_bytes(), - )?; + self.userid_lastonetimekeyupdate + .insert(&user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; self.onetimekeyid_onetimekeys .scan_prefix(&prefix) @@ -373,7 +369,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -409,7 +405,7 @@ impl Users { rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -434,7 +430,7 @@ impl Users { ) -> Result<()> { // TODO: Check signatures - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // Master key @@ -532,9 +528,9 @@ impl Users { rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = target_id.to_string().as_bytes().to_vec(); + let mut key = target_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(key_id.to_string().as_bytes()); + key.extend_from_slice(key_id.as_bytes()); let mut cross_signing_key = serde_json::from_slice::(&self.keyid_key.get(&key)?.ok_or( @@ -617,14 +613,14 @@ impl Users { continue; } - let mut key = room_id.to_string().as_bytes().to_vec(); + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&count); self.keychangeid_userid.insert(key, &*user_id.to_string())?; } - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&count); self.keychangeid_userid.insert(key, &*user_id.to_string())?; @@ -637,7 +633,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.as_bytes()); @@ -724,7 +720,7 @@ impl Users { content: serde_json::Value, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = target_user_id.to_string().as_bytes().to_vec(); + let mut key = target_user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(target_device_id.as_bytes()); key.push(0xff); @@ -751,7 +747,7 @@ impl Users { ) -> Result>> { let mut events = Vec::new(); - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); @@ -773,7 +769,7 @@ impl Users { device_id: &DeviceId, until: u64, ) -> Result<()> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); @@ -808,7 +804,7 @@ impl Users { device_id: &DeviceId, device: &Device, ) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -831,7 +827,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -845,7 +841,7 @@ impl Users { } pub fn all_devices_metadata(&self, user_id: &UserId) -> impl Iterator> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); self.userdeviceid_metadata From dc031fff95594fbf572a8a7564f761518e9889d2 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 5 Apr 2021 21:46:10 +0200 Subject: [PATCH 0496/1727] Remove redundant calls to .iter() and .into_iter() --- src/database/rooms.rs | 2 +- src/server_server.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ef1e558..b2043d1 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -531,7 +531,7 @@ impl Rooms { self.roomid_pduleaves.remove(key?)?; } - for event_id in event_ids.iter() { + for event_id in event_ids { let mut key = prefix.to_owned(); key.extend_from_slice(event_id.as_bytes()); self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; diff --git a/src/server_server.rs b/src/server_server.rs index bb0b9af..421b4f8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -552,7 +552,7 @@ pub async fn send_transaction_message_route<'a>( // events that it references. let mut auth_cache = EventMap::new(); - for pdu in body.pdus.iter() { + for pdu in &body.pdus { // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { Ok(t) => t, @@ -757,7 +757,7 @@ fn handle_incoming_pdu<'a>( // Build map of auth events let mut auth_events = BTreeMap::new(); - for id in incoming_pdu.auth_events.iter() { + for id in &incoming_pdu.auth_events { let auth_event = auth_cache.get(id).ok_or_else(|| { "Auth event not found, event failed recursive auth checks.".to_string() })?; @@ -869,7 +869,7 @@ fn handle_incoming_pdu<'a>( }; let mut state = BTreeMap::new(); - for pdu in state_vec.into_iter() { + for pdu in state_vec { match state.entry((pdu.kind.clone(), pdu.state_key.clone().ok_or_else(|| "Found non-state pdu in state events.".to_owned())?)) { Entry::Vacant(v) => { v.insert(pdu); @@ -1268,7 +1268,7 @@ pub(crate) async fn fetch_signing_keys( .await { debug!("Got signing keys: {:?}", keys); - for k in keys.server_keys.into_iter() { + for k in keys.server_keys { db.globals.add_signing_key(origin, &k)?; result.extend( k.verify_keys From f3f95a73d0d667afa213f01553e5294a1b30e502 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 7 Apr 2021 15:56:57 +0200 Subject: [PATCH 0497/1727] improvement: /event route --- src/main.rs | 1 + src/server_server.rs | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/src/main.rs b/src/main.rs index f523abb..4ccc025 100644 --- a/src/main.rs +++ b/src/main.rs @@ -164,6 +164,7 @@ fn setup_rocket() -> (rocket::Rocket, Config) { server_server::get_public_rooms_route, server_server::get_public_rooms_filtered_route, server_server::send_transaction_message_route, + server_server::get_event_route, server_server::get_missing_events_route, server_server::get_room_state_ids_route, server_server::get_profile_information_route, diff --git a/src/server_server.rs b/src/server_server.rs index 421b4f8..84cfe61 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1388,6 +1388,34 @@ pub(crate) fn append_incoming_pdu( Ok(()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v1/event/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn get_event_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + Ok(get_event::v1::Response { + origin: db.globals.server_name().to_owned(), + origin_server_ts: SystemTime::now(), + pdu: PduEvent::convert_to_outgoing_federation_event( + serde_json::from_value( + db.rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?, + ) + .map_err(|_| Error::bad_database("Invalid pdu in database."))?, + ), + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 51aa6448bcb79892b15acceba5299caa3c3f17c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 7 Apr 2021 17:58:33 +0200 Subject: [PATCH 0498/1727] fix: use sled main to avoid deadlock --- Cargo.lock | 123 ++++++++++++++++++++++++++++------------------------- Cargo.toml | 4 +- 2 files changed, 69 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c6c1769..2b960f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -109,6 +109,15 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +[[package]] +name = "bitmaps" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" +dependencies = [ + "typenum", +] + [[package]] name = "blake2b_simd" version = "0.5.11" @@ -258,19 +267,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam-epoch" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "lazy_static", - "memoffset", - "scopeguard", -] - [[package]] name = "crossbeam-utils" version = "0.8.3" @@ -547,15 +543,6 @@ dependencies = [ "slab", ] -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - [[package]] name = "getrandom" version = "0.1.16" @@ -739,6 +726,20 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "im" +version = "15.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "111c1983f3c5bb72732df25cddacee9b546d08325fb584b5ebd38148be7b0246" +dependencies = [ + "bitmaps", + "rand_core 0.5.1", + "rand_xoshiro", + "sized-chunks", + "typenum", + "version_check", +] + [[package]] name = "image" version = "0.23.14" @@ -806,15 +807,6 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" -[[package]] -name = "itertools" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.10.0" @@ -955,15 +947,6 @@ version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" -[[package]] -name = "memoffset" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" -dependencies = [ - "autocfg", -] - [[package]] name = "mime" version = "0.3.16" @@ -1381,7 +1364,7 @@ checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", "rand_chacha", - "rand_core", + "rand_core 0.6.2", "rand_hc", ] @@ -1392,9 +1375,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.2", ] +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" + [[package]] name = "rand_core" version = "0.6.2" @@ -1410,7 +1399,16 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core", + "rand_core 0.6.2", +] + +[[package]] +name = "rand_xoshiro" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004" +dependencies = [ + "rand_core 0.5.1", ] [[package]] @@ -2046,6 +2044,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "sized-chunks" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65e65d6a9f13cd78f361ea5a2cf53a45d67cdda421ba0316b9be101560f3d207" +dependencies = [ + "bitmaps", + "typenum", +] + [[package]] name = "slab" version = "0.4.2" @@ -2055,14 +2063,11 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "sled" version = "0.34.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" +source = "git+https://github.com/spacejam/sled.git?rev=e4640e0773595229f398438886f19bca6f7326a2#e4640e0773595229f398438886f19bca6f7326a2" dependencies = [ "crc32fast", - "crossbeam-epoch", - "crossbeam-utils", "fs2", - "fxhash", + "im", "libc", "log", "parking_lot", @@ -2122,7 +2127,7 @@ name = "state-res" version = "0.1.0" source = "git+https://github.com/ruma/state-res?rev=af450d0fe2b0e1c890284d0bc3b9d6d4008ac475#af450d0fe2b0e1c890284d0bc3b9d6d4008ac475" dependencies = [ - "itertools 0.10.0", + "itertools", "log", "maplit", "ruma", @@ -2547,6 +2552,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "typenum" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" + [[package]] name = "ubyte" version = "0.10.1" @@ -2819,18 +2830,18 @@ checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" [[package]] name = "zstd" -version = "0.5.4+zstd.1.4.7" +version = "0.6.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" +checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" +version = "3.0.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" +checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" dependencies = [ "libc", "zstd-sys", @@ -2838,12 +2849,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" +version = "1.4.20+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" dependencies = [ "cc", - "glob", - "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 7bd1557..caf0bdf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,9 @@ state-res = { git = "https://github.com/ruma/state-res", rev = "af450d0fe2b0e1c8 # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" # Used for storing data permanently -sled = { version = "0.34.6", default-features = false, features = ["compression"] } +#sled = { version = "0.34.6", default-features = false, features = ["compression"] } +sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } + # Used for emitting log entries log = "0.4.14" # Used for rocket<->ruma conversions From 84f4ce73e511c646a97a12126ebcc96d9b90991b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 9 Apr 2021 21:38:16 +0200 Subject: [PATCH 0499/1727] fix: membership deserializing --- Cargo.lock | 14 +++++------ Cargo.toml | 7 ++---- src/client_server/membership.rs | 16 ++++++++----- src/client_server/room.rs | 28 ++++++++++++---------- src/database.rs | 2 +- src/database/pusher.rs | 2 +- src/database/rooms.rs | 41 ++++++++++++++++++--------------- src/database/sending.rs | 18 +++++++++------ src/utils.rs | 9 ++++---- 9 files changed, 74 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b960f3..b9b9af7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -877,9 +877,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56d855069fafbb9b344c0f962150cd2c1187975cb1c22c1522c240d8c4986714" +checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" [[package]] name = "linked-hash-map" @@ -2125,7 +2125,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=af450d0fe2b0e1c890284d0bc3b9d6d4008ac475#af450d0fe2b0e1c890284d0bc3b9d6d4008ac475" +source = "git+https://github.com/timokoesters/state-res?rev=1ec42ea2fc0b0728bf027a5899839ad94bb3091b#1ec42ea2fc0b0728bf027a5899839ad94bb3091b" dependencies = [ "itertools", "log", @@ -2578,9 +2578,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" dependencies = [ "matches", ] @@ -2760,9 +2760,9 @@ checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "wildmatch" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ae7ce410f81ba679081aac1d4874f3b1c328535b630209aa5b4cdaaf895e20" +checksum = "d6c48bd20df7e4ced539c12f570f937c6b4884928a87fee70a479d72f031d4e0" [[package]] name = "winapi" diff --git a/Cargo.toml b/Cargo.toml index caf0bdf..35037ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,11 +23,8 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570 #ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution -state-res = { git = "https://github.com/ruma/state-res", rev = "af450d0fe2b0e1c890284d0bc3b9d6d4008ac475", features = ["unstable-pre-spec"] } -# TODO: remove the gen-eventid feature -#state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { git = "https://github.com/ruma/state-res", rev = "1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "1ec42ea2fc0b0728bf027a5899839ad94bb3091b", features = ["unstable-pre-spec"] } +#state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 5d630b4..3f4f23f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -604,12 +604,16 @@ async fn join_room_by_id_helper( db.rooms.update_membership( &pdu.room_id, &target_user_id, - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid member event content.", - ) + serde_json::from_value::( + pdu.content + .get("membership") + .ok_or_else(|| { + Error::BadServerResponse("Invalid member event content") + })? + .clone(), + ) + .map_err(|_| { + Error::BadServerResponse("Invalid membership state content.") })?, &pdu.sender, &db.account_data, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index f8d980b..bba7f95 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -91,10 +91,24 @@ pub async fn create_room_route( )?; // 3. Power levels + + // Figure out preset. We need it for preset specific events + let preset = body + .preset + .clone() + .unwrap_or_else(|| match &body.visibility { + room::Visibility::Private => create_room::RoomPreset::PrivateChat, + room::Visibility::Public => create_room::RoomPreset::PublicChat, + room::Visibility::_Custom(_) => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom + }); + let mut users = BTreeMap::new(); users.insert(sender_user.clone(), 100.into()); - for invite_ in &body.invite { - users.insert(invite_.clone(), 100.into()); + + if preset == create_room::RoomPreset::TrustedPrivateChat { + for invite_ in &body.invite { + users.insert(invite_.clone(), 100.into()); + } } let power_levels_content = if let Some(power_levels) = &body.power_level_content_override { @@ -133,16 +147,6 @@ pub async fn create_room_route( // 4. Events set by preset - // Figure out preset. We need it for preset specific events - let preset = body - .preset - .clone() - .unwrap_or_else(|| match &body.visibility { - room::Visibility::Private => create_room::RoomPreset::PrivateChat, - room::Visibility::Public => create_room::RoomPreset::PublicChat, - room::Visibility::_Custom(s) => create_room::RoomPreset::_Custom(s.into()), - }); - // 4.1 Join Rules db.rooms.build_and_append_pdu( PduBuilder { diff --git a/src/database.rs b/src/database.rs index bacf3b9..cb0df15 100644 --- a/src/database.rs +++ b/src/database.rs @@ -108,7 +108,7 @@ impl Database { pub async fn load_or_create(config: Config) -> Result { let db = sled::Config::default() .path(&config.database_path) - .cache_capacity(config.cache_capacity as u64) + .cache_capacity(config.cache_capacity as usize) .use_compression(true) .open()?; diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 6a88d5e..27e5926 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -196,7 +196,7 @@ pub async fn send_push_notice( let mut notify = None; let mut tweaks = Vec::new(); - for action in ruleset.get_actions(&pdu.to_sync_state_event(), &ctx) { + for action in ruleset.get_actions(&pdu.to_sync_room_event(), &ctx) { let n = match action { Action::DontNotify => false, // TODO: Implement proper support for coalesce diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b2043d1..81697e3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -465,7 +465,7 @@ impl Rooms { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { + pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) @@ -671,11 +671,21 @@ impl Rooms { self.update_membership( &pdu.room_id, &target_user_id, - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { + serde_json::from_value::( + pdu.content + .get("membership") + .ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid member event content", + ) + })? + .clone(), + ) + .map_err(|_| { Error::BadRequest( ErrorKind::InvalidParam, - "Invalid member event content.", + "Invalid membership state content.", ) })?, &pdu.sender, @@ -895,19 +905,14 @@ impl Rooms { .scan_prefix(&old_shortstatehash) .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) // Chop the old_shortstatehash out leaving behind the short state key - .map(|(k, v)| { - ( - k.subslice(old_shortstatehash.len(), k.len() - old_shortstatehash.len()), - v, - ) - }) - .collect::>() + .map(|(k, v)| (k[old_shortstatehash.len()..].to_vec(), v)) + .collect::, IVec>>() } else { HashMap::new() }; if let Some(state_key) = &new_pdu.state_key { - let mut new_state: HashMap = old_state; + let mut new_state: HashMap, IVec> = old_state; let mut new_state_key = new_pdu.kind.as_ref().as_bytes().to_vec(); new_state_key.push(0xff); @@ -935,7 +940,7 @@ impl Rooms { } }; - new_state.insert(shortstatekey.into(), shorteventid.into()); + new_state.insert(shortstatekey, shorteventid.into()); let new_state_hash = self.calculate_hash( &new_state @@ -1377,13 +1382,11 @@ impl Rooms { &self, room_id: &RoomId, user_id: &UserId, - member_content: member::MemberEventContent, + membership: member::MembershipState, sender: &UserId, account_data: &super::account_data::AccountData, globals: &super::globals::Globals, ) -> Result<()> { - let membership = member_content.membership; - let mut roomserver_id = room_id.as_bytes().to_vec(); roomserver_id.push(0xff); roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); @@ -1633,7 +1636,7 @@ impl Rooms { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result<(impl Iterator + 'a, Vec)> { + ) -> Result<(impl Iterator> + 'a, Vec)> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1661,7 +1664,7 @@ impl Rooms { .0 + 1; // +1 because the pdu id starts AFTER the separator - let pdu_id = key.subslice(pduid_index, key.len() - pduid_index); + let pdu_id = key[pduid_index..].to_vec(); Ok::<_, Error>(pdu_id) }) @@ -1700,7 +1703,7 @@ impl Rooms { .0 + 1; // +1 because the room id starts AFTER the separator - let room_id = key.subslice(roomid_index, key.len() - roomid_index); + let room_id = key[roomid_index..].to_vec(); Ok::<_, Error>(room_id) }) diff --git a/src/database/sending.rs b/src/database/sending.rs index b0f9c4d..779df06 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -47,7 +47,7 @@ impl Sending { let mut futures = FuturesUnordered::new(); // Retry requests we could not finish yet - let mut current_transactions = HashMap::>::new(); + let mut current_transactions = HashMap::>>::new(); for (key, outgoing_kind, pdu) in servercurrentpdus .iter() @@ -55,7 +55,7 @@ impl Sending { .filter_map(|(key, _)| { Self::parse_servercurrentpdus(&key) .ok() - .map(|(k, p)| (key, k, p)) + .map(|(k, p)| (key, k, p.to_vec())) }) { if pdu.is_empty() { @@ -150,7 +150,7 @@ impl Sending { .keys() .filter_map(|r| r.ok()) .map(|k| { - k.subslice(prefix.len(), k.len() - prefix.len()) + k[prefix.len()..].to_vec() }) .take(30) .collect::>(); @@ -211,7 +211,11 @@ impl Sending { }; }, Some(event) = &mut subscriber => { - if let sled::Event::Insert { key, .. } = event { + for (_tree, key, value_opt) in &event { + if value_opt.is_none() { + continue; + } + let servernamepduid = key.clone(); let exponential_backoff = |(tries, instant): &(u32, Instant)| { @@ -265,7 +269,7 @@ impl Sending { futures.push( Self::handle_event( outgoing_kind, - vec![pdu_id], + vec![pdu_id.to_vec()], &db, ) ); @@ -310,7 +314,7 @@ impl Sending { } #[tracing::instrument] - fn calculate_hash(keys: &[IVec]) -> Vec { + fn calculate_hash(keys: &[Vec]) -> Vec { // We only hash the pdu's event ids, not the whole pdu let bytes = keys.join(&0xff); let hash = digest::digest(&digest::SHA256, &bytes); @@ -320,7 +324,7 @@ impl Sending { #[tracing::instrument(skip(db))] async fn handle_event( kind: OutgoingKind, - pdu_ids: Vec, + pdu_ids: Vec>, db: &Database, ) -> std::result::Result { match &kind { diff --git a/src/utils.rs b/src/utils.rs index 0783567..45d9de8 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -2,7 +2,6 @@ use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; -use sled::IVec; use std::{ cmp, convert::TryInto, @@ -70,10 +69,10 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } -pub fn common_elements( - mut iterators: impl Iterator>, - check_order: impl Fn(&IVec, &IVec) -> Ordering, -) -> Option> { +pub fn common_elements<'a>( + mut iterators: impl Iterator>>, + check_order: impl Fn(&[u8], &[u8]) -> Ordering, +) -> Option>> { let first_iterator = iterators.next()?; let mut other_iterators = iterators.map(|i| i.peekable()).collect::>(); From 044e65afccb87da5012117b3192a5bd4dbaf5150 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 10:12:05 +0200 Subject: [PATCH 0500/1727] fix: move back to sled stable --- Cargo.lock | 123 +++++++++++++++++++--------------------- Cargo.toml | 4 +- src/database.rs | 5 +- src/database/sending.rs | 10 ++-- 4 files changed, 68 insertions(+), 74 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9b9af7..cf881c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -109,15 +109,6 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -[[package]] -name = "bitmaps" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" -dependencies = [ - "typenum", -] - [[package]] name = "blake2b_simd" version = "0.5.11" @@ -267,6 +258,19 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-epoch" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "lazy_static", + "memoffset", + "scopeguard", +] + [[package]] name = "crossbeam-utils" version = "0.8.3" @@ -543,6 +547,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "getrandom" version = "0.1.16" @@ -726,20 +739,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "im" -version = "15.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111c1983f3c5bb72732df25cddacee9b546d08325fb584b5ebd38148be7b0246" -dependencies = [ - "bitmaps", - "rand_core 0.5.1", - "rand_xoshiro", - "sized-chunks", - "typenum", - "version_check", -] - [[package]] name = "image" version = "0.23.14" @@ -807,6 +806,15 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +[[package]] +name = "itertools" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.10.0" @@ -947,6 +955,15 @@ version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +[[package]] +name = "memoffset" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.16" @@ -1364,7 +1381,7 @@ checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", "rand_chacha", - "rand_core 0.6.2", + "rand_core", "rand_hc", ] @@ -1375,15 +1392,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core 0.6.2", + "rand_core", ] -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" - [[package]] name = "rand_core" version = "0.6.2" @@ -1399,16 +1410,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core 0.6.2", -] - -[[package]] -name = "rand_xoshiro" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004" -dependencies = [ - "rand_core 0.5.1", + "rand_core", ] [[package]] @@ -2044,16 +2046,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "sized-chunks" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e65d6a9f13cd78f361ea5a2cf53a45d67cdda421ba0316b9be101560f3d207" -dependencies = [ - "bitmaps", - "typenum", -] - [[package]] name = "slab" version = "0.4.2" @@ -2063,11 +2055,14 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "sled" version = "0.34.6" -source = "git+https://github.com/spacejam/sled.git?rev=e4640e0773595229f398438886f19bca6f7326a2#e4640e0773595229f398438886f19bca6f7326a2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" dependencies = [ "crc32fast", + "crossbeam-epoch", + "crossbeam-utils", "fs2", - "im", + "fxhash", "libc", "log", "parking_lot", @@ -2127,7 +2122,7 @@ name = "state-res" version = "0.1.0" source = "git+https://github.com/timokoesters/state-res?rev=1ec42ea2fc0b0728bf027a5899839ad94bb3091b#1ec42ea2fc0b0728bf027a5899839ad94bb3091b" dependencies = [ - "itertools", + "itertools 0.10.0", "log", "maplit", "ruma", @@ -2552,12 +2547,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "typenum" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" - [[package]] name = "ubyte" version = "0.10.1" @@ -2830,18 +2819,18 @@ checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" [[package]] name = "zstd" -version = "0.6.1+zstd.1.4.9" +version = "0.5.4+zstd.1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" +checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "3.0.1+zstd.1.4.9" +version = "2.0.6+zstd.1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" +checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" dependencies = [ "libc", "zstd-sys", @@ -2849,10 +2838,12 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.20+zstd.1.4.9" +version = "1.4.18+zstd.1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" +checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" dependencies = [ "cc", + "glob", + "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 35037ee..3109dd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,8 +29,8 @@ state-res = { git = "https://github.com/timokoesters/state-res", rev = "1ec42ea2 # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" # Used for storing data permanently -#sled = { version = "0.34.6", default-features = false, features = ["compression"] } -sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } +sled = { version = "0.34.6", default-features = false, features = ["compression"] } +#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } # Used for emitting log entries log = "0.4.14" diff --git a/src/database.rs b/src/database.rs index cb0df15..a266c21 100644 --- a/src/database.rs +++ b/src/database.rs @@ -108,7 +108,7 @@ impl Database { pub async fn load_or_create(config: Config) -> Result { let db = sled::Config::default() .path(&config.database_path) - .cache_capacity(config.cache_capacity as usize) + .cache_capacity(config.cache_capacity as u64) .use_compression(true) .open()?; @@ -301,7 +301,8 @@ impl Database { } pub async fn flush(&self) -> Result<()> { - self._db.flush_async().await?; + // noop while we don't use sled 1.0 + //self._db.flush_async().await?; Ok(()) } } diff --git a/src/database/sending.rs b/src/database/sending.rs index 779df06..d6dcead 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -211,10 +211,12 @@ impl Sending { }; }, Some(event) = &mut subscriber => { - for (_tree, key, value_opt) in &event { - if value_opt.is_none() { - continue; - } + if let sled::Event::Insert { key, .. } = event { + // New sled version: + //for (_tree, key, value_opt) in &event { + // if value_opt.is_none() { + // continue; + // } let servernamepduid = key.clone(); From 588de12d799090cf2c964d763f06cd8ee80ef8cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 10:12:29 +0200 Subject: [PATCH 0501/1727] fix: lost forward extremity --- src/server_server.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server_server.rs b/src/server_server.rs index 84cfe61..4a93a3d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1010,6 +1010,7 @@ fn handle_incoming_pdu<'a>( fork_states.insert(current_state); // We also add state after incoming event to the fork states + extremities.insert(incoming_pdu.event_id.clone()); let mut state_after = state_at_incoming_event.clone(); if let Some(state_key) = &incoming_pdu.state_key { state_after.insert( From b0ea692706bed8de8bc5f3e82b9186700ba1ee0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 10:50:30 +0200 Subject: [PATCH 0502/1727] fix: malformed pushrule error when event does not trigger any actions --- src/database/pusher.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 27e5926..9a9452c 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -216,16 +216,11 @@ pub async fn send_push_notice( notify = Some(n); } - let notify = notify.ok_or_else(|| { - Error::bad_database( - r#"Malformed pushrule contains none of these actions: ["dont_notify", "notify", "coalesce"]"#, - ) - })?; - - if notify { + if notify == Some(true) { send_notice(unread, pusher, tweaks, pdu, db).await?; } - + // Else the event triggered no actions + Ok(()) } From dd6985059899c22ca10a76684f5fe2696d36033f Mon Sep 17 00:00:00 2001 From: Marcel Date: Sun, 11 Apr 2021 10:50:38 +0000 Subject: [PATCH 0503/1727] docs: Fix missing _matrix in apache config --- DEPLOY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index f801e29..53ca2ea 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -133,8 +133,8 @@ Listen 8448 ServerName your.server.name # EDIT THIS AllowEncodedSlashes NoDecode -ProxyPass /_matrix/ http://localhost:6167/ -ProxyPassReverse /_matrix/ http://localhost:6167/ +ProxyPass /_matrix/ http://localhost:6167/_matrix/ +ProxyPassReverse /_matrix/ http://localhost:6167/_matrix/ Include /etc/letsencrypt/options-ssl-apache.conf SSLCertificateFile /etc/letsencrypt/live/your.server.name/fullchain.pem # EDIT THIS From ac99e05714767bfcca27dbbbc3725b2e7548f61c Mon Sep 17 00:00:00 2001 From: Marcel Date: Sun, 11 Apr 2021 10:51:43 +0000 Subject: [PATCH 0504/1727] docs: capitalize a "conduit" that was missed --- DEPLOY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 53ca2ea..4601ab5 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -20,7 +20,7 @@ $ sudo chmod +x /usr/local/bin/matrix-conduit ## Adding a Conduit user -While conduit can run as any user it is usually better to use dedicated users for different services. +While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows you to make sure that the file permissions are correctly set up. In Debian you can use this command to create a Conduit user: From 8773e5013d08a37851c4379db6bc66641602ece2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 21:01:27 +0200 Subject: [PATCH 0505/1727] feat: incoming invites over federation --- Cargo.lock | 36 +++--- Cargo.toml | 8 +- src/client_server/account.rs | 10 +- src/client_server/membership.rs | 3 + src/client_server/sync.rs | 45 ++----- src/database.rs | 10 +- src/database/pusher.rs | 4 +- src/database/rooms.rs | 203 +++++++++++++++++++++++--------- src/main.rs | 1 + src/server_server.rs | 133 +++++++++++++++++---- 10 files changed, 307 insertions(+), 146 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf881c2..42042b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1625,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "assign", "js_int", @@ -1645,7 +1645,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "http", "percent-encoding", @@ -1660,7 +1660,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1671,7 +1671,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "ruma-api", "ruma-common", @@ -1685,7 +1685,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "assign", "http", @@ -1704,7 +1704,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "indexmap", "js_int", @@ -1720,7 +1720,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "js_int", "ruma-common", @@ -1734,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1745,7 +1745,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "js_int", "ruma-api", @@ -1760,7 +1760,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "paste", "rand", @@ -1774,7 +1774,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "proc-macro2", "quote", @@ -1785,12 +1785,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "ruma-api", "ruma-common", @@ -1803,7 +1803,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "js_int", "ruma-api", @@ -1818,7 +1818,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "form_urlencoded", "itoa", @@ -1831,7 +1831,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1842,7 +1842,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "base64 0.13.0", "ring", @@ -2120,7 +2120,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?rev=1ec42ea2fc0b0728bf027a5899839ad94bb3091b#1ec42ea2fc0b0728bf027a5899839ad94bb3091b" +source = "git+https://github.com/timokoesters/state-res?rev=2e90b36babeb0d6b99ce8d4b513302a25dcdffc1#2e90b36babeb0d6b99ce8d4b513302a25dcdffc1" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 3109dd8..a28c08d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570d5a86c1468e8a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +#ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570d5a86c1468e8a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/timokoesters/ruma", rev = "b11de1e1f9d3c15267d09617131cf217f8277fa4", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", rev = "1ec42ea2fc0b0728bf027a5899839ad94bb3091b", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "2e90b36babeb0d6b99ce8d4b513302a25dcdffc1", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 4c5b60c..2241d45 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -617,11 +617,11 @@ pub async fn deactivate_route( } // Leave all joined rooms and reject all invitations - for room_id in db - .rooms - .rooms_joined(&sender_user) - .chain(db.rooms.rooms_invited(&sender_user)) - { + for room_id in db.rooms.rooms_joined(&sender_user).chain( + db.rooms + .rooms_invited(&sender_user) + .map(|t| t.map(|(r, _)| r)), + ) { let room_id = room_id?; let event = member::MemberEventContent { membership: member::MembershipState::Leave, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 3f4f23f..3876246 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -599,6 +599,8 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid user id in send_join response.") })?; + let invite_state = Vec::new(); // TODO add a few important events + // Update our membership info, we do this here incase a user is invited // and immediately leaves we need the DB to record the invite event for auth db.rooms.update_membership( @@ -616,6 +618,7 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid membership state content.") })?, &pdu.sender, + Some(invite_state), &db.account_data, &db.globals, )?; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index bd7046d..f1ad9a5 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -588,44 +588,23 @@ pub async fn sync_events_route( } let mut invited_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_invited(&sender_user) { - let room_id = room_id?; - let mut invited_since_last_sync = false; - for pdu in db.rooms.pdus_since(&sender_user, &room_id, since)? { - let (_, pdu) = pdu?; - if pdu.kind == EventType::RoomMember && pdu.state_key == Some(sender_user.to_string()) { - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))?; + for result in db.rooms.rooms_invited(&sender_user) { + let (room_id, invite_state_events) = result?; + let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; - if content.membership == MembershipState::Invite { - invited_since_last_sync = true; - break; - } - } - } - - if !invited_since_last_sync { + // Invited before last sync + if Some(since) >= invite_count { continue; } - let invited_room = sync_events::InvitedRoom { - invite_state: sync_events::InviteState { - events: db - .rooms - .room_state_full(&room_id)? - .into_iter() - .map(|(_, pdu)| pdu.to_stripped_state_event()) - .collect(), + invited_rooms.insert( + room_id.clone(), + sync_events::InvitedRoom { + invite_state: sync_events::InviteState { + events: invite_state_events, + }, }, - }; - - if !invited_room.is_empty() { - invited_rooms.insert(room_id.clone(), invited_room); - } + ); } for user_id in left_encrypted_users { diff --git a/src/database.rs b/src/database.rs index a266c21..211c3f4 100644 --- a/src/database.rs +++ b/src/database.rs @@ -161,8 +161,8 @@ impl Database { userroomid_joined: db.open_tree("userroomid_joined")?, roomuserid_joined: db.open_tree("roomuserid_joined")?, roomuseroncejoinedids: db.open_tree("roomuseroncejoinedids")?, - userroomid_invited: db.open_tree("userroomid_invited")?, - roomuserid_invited: db.open_tree("roomuserid_invited")?, + userroomid_invitestate: db.open_tree("userroomid_invitestate")?, + roomuserid_invitecount: db.open_tree("roomuserid_invitecount")?, userroomid_left: db.open_tree("userroomid_left")?, statekey_shortstatekey: db.open_tree("statekey_shortstatekey")?, @@ -236,7 +236,11 @@ impl Database { ); futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix)); - futures.push(self.rooms.userroomid_invited.watch_prefix(&userid_prefix)); + futures.push( + self.rooms + .userroomid_invitestate + .watch_prefix(&userid_prefix), + ); futures.push(self.rooms.userroomid_left.watch_prefix(&userid_prefix)); // Events for rooms we are in diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 9a9452c..f4c02d0 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -216,11 +216,11 @@ pub async fn send_push_notice( notify = Some(n); } - if notify == Some(true) { + if notify == Some(true) { send_notice(unread, pusher, tweaks, pdu, db).await?; } // Else the event triggered no actions - + Ok(()) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 81697e3..ba98790 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -11,10 +11,10 @@ use ruma::{ events::{ ignored_user_list, room::{create::CreateEventContent, member, message}, - EventType, + AnyStrippedStateEvent, EventType, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, + uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; use state_res::{Event, StateMap}; @@ -51,8 +51,8 @@ pub struct Rooms { pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, pub(super) roomuseroncejoinedids: sled::Tree, - pub(super) userroomid_invited: sled::Tree, - pub(super) roomuserid_invited: sled::Tree, + pub(super) userroomid_invitestate: sled::Tree, + pub(super) roomuserid_invitecount: sled::Tree, pub(super) userroomid_left: sled::Tree, /// Remember the current state hash of a room. @@ -145,12 +145,12 @@ impl Rooms { /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). #[tracing::instrument(skip(self))] - pub fn state_get( + pub fn state_get_id( &self, shortstatehash: u64, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result> { let mut key = event_type.as_ref().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); @@ -161,7 +161,8 @@ impl Rooms { let mut stateid = shortstatehash.to_be_bytes().to_vec(); stateid.extend_from_slice(&shortstatekey); - self.stateid_shorteventid + Ok(self + .stateid_shorteventid .get(&stateid)? .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) .flatten() @@ -178,13 +179,24 @@ impl Rooms { ) }) .map(|r| r.ok()) - .flatten() - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) + .flatten()) } else { Ok(None) } } + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] + pub fn state_get( + &self, + shortstatehash: u64, + event_type: &EventType, + state_key: &str, + ) -> Result> { + self.state_get_id(shortstatehash, event_type, state_key)? + .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) + } + /// Returns the state hash for this pdu. #[tracing::instrument(skip(self))] pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { @@ -354,6 +366,21 @@ impl Rooms { } } + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] + pub fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &EventType, + state_key: &str, + ) -> Result> { + if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + self.state_get_id(current_shortstatehash, event_type, state_key) + } else { + Ok(None) + } + } + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). #[tracing::instrument(skip(self))] pub fn room_state_get( @@ -395,7 +422,7 @@ impl Rooms { } /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { + pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map_or_else::, _, _>( @@ -666,29 +693,64 @@ impl Rooms { // if the state_key fails let target_user_id = UserId::try_from(state_key.clone()) .expect("This state_key was previously validated"); + + let membership = serde_json::from_value::( + pdu.content + .get("membership") + .ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid member event content", + ) + })? + .clone(), + ) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid membership state content.", + ) + })?; + + let invite_state = match membership { + member::MembershipState::Invite => { + let mut state = Vec::new(); + // Add recommended events + if let Some(e) = + self.room_state_get(&pdu.room_id, &EventType::RoomJoinRules, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get( + &pdu.room_id, + &EventType::RoomCanonicalAlias, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&pdu.room_id, &EventType::RoomAvatar, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&pdu.room_id, &EventType::RoomName, "")? + { + state.push(e.to_stripped_state_event()); + } + Some(state) + } + _ => None, + }; + // Update our membership info, we do this here incase a user is invited // and immediately leaves we need the DB to record the invite event for auth self.update_membership( &pdu.room_id, &target_user_id, - serde_json::from_value::( - pdu.content - .get("membership") - .ok_or_else(|| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid member event content", - ) - })? - .clone(), - ) - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid membership state content.", - ) - })?, + membership, &pdu.sender, + invite_state, &db.account_data, &db.globals, )?; @@ -1044,10 +1106,10 @@ impl Rooms { // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .filter_map(|event_id| Some(self.get_pdu_json(event_id).ok()??.get("depth")?.as_u64()?)) + .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) .max() - .unwrap_or(0_u64) - + 1; + .unwrap_or(uint!(0)) + + uint!(1); let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { @@ -1071,9 +1133,7 @@ impl Rooms { content, state_key, prev_events, - depth: depth - .try_into() - .map_err(|_| Error::bad_database("Depth is invalid"))?, + depth, auth_events: auth_events .iter() .map(|(_, pdu)| pdu.event_id.clone()) @@ -1384,6 +1444,7 @@ impl Rooms { user_id: &UserId, membership: member::MembershipState, sender: &UserId, + invite_state: Option>>, account_data: &super::account_data::AccountData, globals: &super::globals::Globals, ) -> Result<()> { @@ -1487,8 +1548,8 @@ impl Rooms { self.roomserverids.insert(&roomserver_id, &[])?; self.userroomid_joined.insert(&userroom_id, &[])?; self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invited.remove(&userroom_id)?; - self.roomuserid_invited.remove(&roomuser_id)?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; self.userroomid_left.remove(&userroom_id)?; } member::MembershipState::Invite => { @@ -1508,8 +1569,13 @@ impl Rooms { } self.roomserverids.insert(&roomserver_id, &[])?; - self.userroomid_invited.insert(&userroom_id, &[])?; - self.roomuserid_invited.insert(&roomuser_id, &[])?; + self.userroomid_invitestate.insert( + &userroom_id, + serde_json::to_vec(&invite_state.unwrap_or_default()) + .expect("state to bytes always works"), + )?; + self.roomuserid_invitecount + .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_left.remove(&userroom_id)?; @@ -1526,8 +1592,8 @@ impl Rooms { self.userroomid_left.insert(&userroom_id, &[])?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invited.remove(&userroom_id)?; - self.roomuserid_invited.remove(&roomuser_id)?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; } _ => {} } @@ -1797,7 +1863,7 @@ impl Rooms { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - self.roomuserid_invited + self.roomuserid_invitecount .scan_prefix(prefix) .keys() .map(|key| { @@ -1816,6 +1882,22 @@ impl Rooms { }) } + /// Returns an iterator over all invited members of a room. + #[tracing::instrument(skip(self))] + pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_invitecount + .get(key)? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid invitecount in db.") + })?)) + }) + } + /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self))] pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { @@ -1840,27 +1922,32 @@ impl Rooms { /// Returns an iterator over all rooms a user was invited to. #[tracing::instrument(skip(self))] - pub fn rooms_invited(&self, user_id: &UserId) -> impl Iterator> { + pub fn rooms_invited( + &self, + user_id: &UserId, + ) -> impl Iterator>)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - self.userroomid_invited - .scan_prefix(prefix) - .keys() - .map(|key| { - Ok(RoomId::try_from( - utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, + self.userroomid_invitestate.scan_prefix(prefix).map(|r| { + let (key, state) = r?; + let room_id = RoomId::try_from( + utils::string_from_bytes( + &key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?) - }) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok((room_id, state)) + }) } /// Returns an iterator over all rooms a user left. @@ -1906,7 +1993,7 @@ impl Rooms { userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - Ok(self.userroomid_invited.get(userroom_id)?.is_some()) + Ok(self.userroomid_invitestate.get(userroom_id)?.is_some()) } pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { diff --git a/src/main.rs b/src/main.rs index 4ccc025..6fd04ce 100644 --- a/src/main.rs +++ b/src/main.rs @@ -167,6 +167,7 @@ fn setup_rocket() -> (rocket::Rocket, Config) { server_server::get_event_route, server_server::get_missing_events_route, server_server::get_room_state_ids_route, + server_server::create_invite_route, server_server::get_profile_information_route, ], ) diff --git a/src/server_server.rs b/src/server_server.rs index 4a93a3d..1fad54e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -10,20 +10,24 @@ use ruma::{ federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ - get_remote_server_keys, get_server_keys, - get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, + get_remote_server_keys, get_server_keys, get_server_version, ServerSigningKeys, + VerifyKey, }, event::{get_event, get_missing_events, get_room_state_ids}, + membership::create_invite, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::{room::create::CreateEventContent, EventType}, + events::{ + room::{create::CreateEventContent, member::MembershipState}, + EventType, + }, serde::{to_canonical_value, Raw}, signatures::CanonicalJsonValue, - EventId, RoomId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ @@ -332,13 +336,13 @@ pub async fn request_well_known( #[tracing::instrument(skip(db))] pub fn get_server_version_route( db: State<'_, Database>, -) -> ConduitResult { +) -> ConduitResult { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - Ok(get_server_version::Response { - server: Some(get_server_version::Server { + Ok(get_server_version::v1::Response { + server: Some(get_server_version::v1::Server { name: Some("Conduit".to_owned()), version: Some(env!("CARGO_PKG_VERSION").to_owned()), }), @@ -1406,12 +1410,9 @@ pub fn get_event_route<'a>( origin: db.globals.server_name().to_owned(), origin_server_ts: SystemTime::now(), pdu: PduEvent::convert_to_outgoing_federation_event( - serde_json::from_value( - db.rooms - .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?, - ) - .map_err(|_| Error::bad_database("Invalid pdu in database."))?, + db.rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?, ), } .into()) @@ -1438,9 +1439,10 @@ pub fn get_missing_events_route<'a>( if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { if body.earliest_events.contains( &serde_json::from_value( - pdu.get("event_id") - .cloned() - .ok_or_else(|| Error::bad_database("Event in db has no event_id field."))?, + serde_json::to_value(pdu.get("event_id").cloned().ok_or_else(|| { + Error::bad_database("Event in db has no event_id field.") + })?) + .expect("canonical json is valid json value"), ) .map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?, ) { @@ -1449,16 +1451,14 @@ pub fn get_missing_events_route<'a>( } queued_events.extend_from_slice( &serde_json::from_value::>( - pdu.get("prev_events").cloned().ok_or_else(|| { - Error::bad_database("Invalid prev_events field of pdu in db.") - })?, + serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { + Error::bad_database("Event in db has no prev_events field.") + })?) + .expect("canonical json is valid json value"), ) .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, ); - events.push(PduEvent::convert_to_outgoing_federation_event( - serde_json::from_value(pdu) - .map_err(|_| Error::bad_database("Invalid pdu in database."))?, - )); + events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); } i += 1; } @@ -1518,6 +1518,93 @@ pub fn get_room_state_ids_route<'a>( .into()) } +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/federation/v2/invite/<_>/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn create_invite_route<'a>( + db: State<'a, Database>, + body: Ruma, +) -> ConduitResult { + if body.room_version < RoomVersionId::Version6 { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: body.room_version.clone(), + }, + "Server does not support this room version.", + )); + } + + let mut signed_event = utils::to_canonical_object(&body.event) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; + + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut signed_event, + &body.room_version, + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + + let sender = serde_json::from_value( + serde_json::to_value( + signed_event + .get("sender") + .ok_or_else(|| { + Error::BadRequest(ErrorKind::InvalidParam, "Event had no sender field.") + })? + .clone(), + ) + .expect("CanonicalJsonValue to serde_json::Value always works"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; + let invited_user = serde_json::from_value( + serde_json::to_value( + signed_event + .get("state_key") + .ok_or_else(|| { + Error::BadRequest(ErrorKind::InvalidParam, "Event had no state_key field.") + })? + .clone(), + ) + .expect("CanonicalJsonValue to serde_json::Value always works"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; + + let mut invite_state = body.invite_room_state.clone(); + + let mut event = serde_json::from_str::>( + &body.event.json().to_string(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + + event.insert("event_id".to_owned(), "$dummy".into()); + invite_state.push( + serde_json::from_value::(event.into()) + .map_err(|e| { + warn!("Invalid invite event: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") + })? + .to_stripped_state_event(), + ); + + db.rooms.update_membership( + &body.room_id, + &invited_user, + MembershipState::Invite, + &sender, + Some(invite_state), + &db.account_data, + &db.globals, + )?; + + Ok(create_invite::v2::Response { + event: PduEvent::convert_to_outgoing_federation_event(signed_event), + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/query/profile", data = "") From a8231eef25ca427ecec61cfe1e0c9c2412b26c83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 21:29:37 +0200 Subject: [PATCH 0506/1727] fix: alias parsing --- src/database/rooms.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ba98790..09944cf 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1668,8 +1668,10 @@ impl Rooms { .scan_prefix(prefix) .values() .map(|bytes| { - Ok(serde_json::from_slice(&bytes?) - .map_err(|_| Error::bad_database("Alias in aliasid_alias is invalid."))?) + Ok(utils::string_from_bytes(&bytes?) + .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))?) }) } From a961732f5f52c7bbba11335f84b3f0ceae9e9404 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 21:35:17 +0200 Subject: [PATCH 0507/1727] fix: overflow --- src/database/rooms/edus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 9e43fe1..c48f4c2 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -382,7 +382,7 @@ impl RoomEdus { .ok()?, )) }) - .take_while(|(_, timestamp)| current_timestamp - timestamp > 5 * 60_000) + .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) // 5 Minutes { // Send new presence events to set the user offline From 1dc85895a7a243b46ae8c60615c6b268f4c8f9df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 12 Apr 2021 10:12:52 +0200 Subject: [PATCH 0508/1727] fix: show warning for invalid user ids --- src/client_server/membership.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 3876246..149df93 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -595,7 +595,8 @@ async fn join_room_by_id_helper( db.rooms.add_pdu_outlier(&pdu)?; if let Some(state_key) = &pdu.state_key { if pdu.kind == EventType::RoomMember { - let target_user_id = UserId::try_from(state_key.clone()).map_err(|_| { + let target_user_id = UserId::try_from(state_key.clone()).map_err(|e| { + warn!("Invalid user id in send_join response: {}: {}", state_key, e); Error::BadServerResponse("Invalid user id in send_join response.") })?; From 662a0cf1df7c33ec1f3c00b6f0a722cdbc03a3d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 12 Apr 2021 12:40:16 +0200 Subject: [PATCH 0509/1727] improvement: better and more efficient message count calculation --- src/client_server/membership.rs | 5 +- src/client_server/read_marker.rs | 4 ++ src/client_server/sync.rs | 38 ++++++------ src/database.rs | 3 + src/database/pusher.rs | 54 +++++++++++------ src/database/rooms.rs | 101 +++++++++++++++++++++++++++++-- src/database/sending.rs | 32 +++------- 7 files changed, 169 insertions(+), 68 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 149df93..e816005 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -596,7 +596,10 @@ async fn join_room_by_id_helper( if let Some(state_key) = &pdu.state_key { if pdu.kind == EventType::RoomMember { let target_user_id = UserId::try_from(state_key.clone()).map_err(|e| { - warn!("Invalid user id in send_join response: {}: {}", state_key, e); + warn!( + "Invalid user id in send_join response: {}: {}", + state_key, e + ); Error::BadServerResponse("Invalid user id in send_join response.") })?; diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 555b7e7..166e59a 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -47,6 +47,8 @@ pub async fn set_read_marker_route( ))?, &db.globals, )?; + db.rooms + .reset_notification_counts(&sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); user_receipts.insert( @@ -103,6 +105,8 @@ pub async fn create_receipt_route( ))?, &db.globals, )?; + db.rooms + .reset_notification_counts(&sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); user_receipts.insert( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index f1ad9a5..fe14208 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -12,7 +12,7 @@ use ruma::{ use rocket::{get, tokio}; use std::{ collections::{hash_map, BTreeMap, HashMap, HashSet}, - convert::TryFrom, + convert::{TryFrom, TryInto}, time::Duration, }; @@ -370,23 +370,23 @@ pub async fn sync_events_route( ); let notification_count = if send_notification_counts { - if let Some(last_read) = db.rooms.edus.private_read_get(&room_id, &sender_user)? { - Some( - (db.rooms - .pdus_since(&sender_user, &room_id, last_read)? - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .filter(|(_, pdu)| { - matches!( - pdu.kind.clone(), - EventType::RoomMessage | EventType::RoomEncrypted - ) - }) - .count() as u32) - .into(), - ) - } else { - None - } + Some( + db.rooms + .notification_count(&sender_user, &room_id)? + .try_into() + .expect("notification count can't go that high"), + ) + } else { + None + }; + + let highlight_count = if send_notification_counts { + Some( + db.rooms + .highlight_count(&sender_user, &room_id)? + .try_into() + .expect("highlight count can't go that high"), + ) } else { None }; @@ -440,7 +440,7 @@ pub async fn sync_events_route( invited_member_count: invited_member_count.map(|n| (n as u32).into()), }, unread_notifications: sync_events::UnreadNotificationsCount { - highlight_count: None, + highlight_count, notification_count, }, timeline: sync_events::Timeline { diff --git a/src/database.rs b/src/database.rs index 211c3f4..9d629dd 100644 --- a/src/database.rs +++ b/src/database.rs @@ -165,6 +165,9 @@ impl Database { roomuserid_invitecount: db.open_tree("roomuserid_invitecount")?, userroomid_left: db.open_tree("userroomid_left")?, + userroomid_notificationcount: db.open_tree("userroomid_notificationcount")?, + userroomid_highlightcount: db.open_tree("userroomid_highlightcount")?, + statekey_shortstatekey: db.open_tree("statekey_shortstatekey")?, stateid_shorteventid: db.open_tree("stateid_shorteventid")?, eventid_shorteventid: db.open_tree("eventid_shorteventid")?, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index f4c02d0..e2bd3f1 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -174,29 +174,10 @@ pub async fn send_push_notice( pdu: &PduEvent, db: &Database, ) -> Result<()> { - let power_levels: PowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_value(ev.content) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let ctx = PushConditionRoomCtx { - room_id: pdu.room_id.clone(), - member_count: (db.rooms.room_members(&pdu.room_id).count() as u32).into(), - user_display_name: user.localpart().into(), // TODO: Use actual display name - users_power_levels: power_levels.users, - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications, - }; - let mut notify = None; let mut tweaks = Vec::new(); - for action in ruleset.get_actions(&pdu.to_sync_room_event(), &ctx) { + for action in get_actions(user, &ruleset, pdu, db)? { let n = match action { Action::DontNotify => false, // TODO: Implement proper support for coalesce @@ -224,6 +205,39 @@ pub async fn send_push_notice( Ok(()) } +pub fn get_actions<'a>( + user: &UserId, + ruleset: &'a Ruleset, + pdu: &PduEvent, + db: &Database, +) -> Result> { + let power_levels: PowerLevelsEventContent = db + .rooms + .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_value(ev.content) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + let ctx = PushConditionRoomCtx { + room_id: pdu.room_id.clone(), + member_count: (db.rooms.room_members(&pdu.room_id).count() as u32).into(), + user_display_name: db + .users + .displayname(&user)? + .unwrap_or(user.localpart().to_owned()), + users_power_levels: power_levels.users, + default_power_level: power_levels.users_default, + notification_power_levels: power_levels.notifications, + }; + + Ok(ruleset + .get_actions(&pdu.to_sync_room_event(), &ctx) + .map(Clone::clone)) +} + async fn send_notice( unread: UInt, pusher: &Pusher, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 09944cf..3f37de6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -9,10 +9,11 @@ use ring::digest; use ruma::{ api::client::error::ErrorKind, events::{ - ignored_user_list, + ignored_user_list, push_rules, room::{create::CreateEventContent, member, message}, AnyStrippedStateEvent, EventType, }, + push::{self, Action, Tweak}, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; @@ -26,7 +27,7 @@ use std::{ sync::Arc, }; -use super::admin::AdminCommand; +use super::{admin::AdminCommand, pusher}; /// The unique identifier of each state group. /// @@ -51,10 +52,13 @@ pub struct Rooms { pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, pub(super) roomuseroncejoinedids: sled::Tree, - pub(super) userroomid_invitestate: sled::Tree, - pub(super) roomuserid_invitecount: sled::Tree, + pub(super) userroomid_invitestate: sled::Tree, // InviteState = Vec> + pub(super) roomuserid_invitecount: sled::Tree, // InviteCount = Count pub(super) userroomid_left: sled::Tree, + pub(super) userroomid_notificationcount: sled::Tree, // NotifyCount = u64 + pub(super) userroomid_highlightcount: sled::Tree, // HightlightCount = u64 + /// Remember the current state hash of a room. pub(super) roomid_shortstatehash: sled::Tree, /// Remember the state hash at events in the past. @@ -649,6 +653,7 @@ impl Rooms { // fails self.edus .private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; + self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; self.pduid_pdu.insert( &pdu_id, @@ -673,6 +678,45 @@ impl Rooms { continue; } + let rules_for_user = db + .account_data + .get::(None, &user, EventType::PushRules)? + .map(|ev| ev.content.global) + .unwrap_or_else(|| push::Ruleset::server_default(&user)); + + let mut highlight = false; + let mut notify = false; + + for action in pusher::get_actions(&user, &rules_for_user, pdu, db)? { + match action { + Action::DontNotify => notify = false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => notify = true, + Action::SetTweak(Tweak::Highlight(true)) => { + highlight = true; + } + _ => {} + }; + } + + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(pdu.room_id.as_bytes()); + + if notify { + &self + .userroomid_notificationcount + .update_and_fetch(&userroom_id, utils::increment)? + .expect("utils::increment will always put in a value"); + } + + if highlight { + &self + .userroomid_highlightcount + .update_and_fetch(&userroom_id, utils::increment)? + .expect("utils::increment will always put in a value"); + } + for senderkey in db .pusher .get_pusher_senderkeys(&user) @@ -738,6 +782,14 @@ impl Rooms { { state.push(e.to_stripped_state_event()); } + if let Some(e) = + self.room_state_get(&pdu.room_id, &EventType::RoomMember, pdu.sender.as_str())? + { + state.push(e.to_stripped_state_event()); + } + + state.push(pdu.to_stripped_state_event()); + Some(state) } _ => None, @@ -844,6 +896,47 @@ impl Rooms { Ok(()) } + pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_notificationcount + .insert(&userroom_id, &0_u64.to_be_bytes())?; + self.userroomid_highlightcount + .insert(&userroom_id, &0_u64.to_be_bytes())?; + + Ok(()) + } + + pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_notificationcount + .get(&userroom_id)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid notification count in db.")) + }) + .unwrap_or(Ok(0)) + } + + pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_highlightcount + .get(&userroom_id)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid highlight count in db.")) + }) + .unwrap_or(Ok(0)) + } + /// Generates a new StateHash and associates it with the incoming event. /// /// This adds all current state events (not including the incoming event) diff --git a/src/database/sending.rs b/src/database/sending.rs index d6dcead..ffd3ed6 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,6 @@ use std::{ collections::HashMap, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, sync::Arc, time::{Duration, Instant, SystemTime}, @@ -16,7 +16,7 @@ use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, events::{push_rules, EventType}, - push, uint, ServerName, UInt, UserId, + push, ServerName, UInt, UserId, }; use sled::IVec; use tokio::{select, sync::Semaphore}; @@ -432,32 +432,16 @@ impl Sending { let rules_for_user = db .account_data .get::(None, &userid, EventType::PushRules) - .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + .unwrap_or_default() .map(|ev| ev.content.global) .unwrap_or_else(|| push::Ruleset::server_default(&userid)); - let unread: UInt = if let Some(last_read) = db + let unread: UInt = db .rooms - .edus - .private_read_get(&pdu.room_id, &userid) - .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? - { - (db.rooms - .pdus_since(&userid, &pdu.room_id, last_read) - .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .filter(|(_, pdu)| { - matches!( - pdu.kind.clone(), - EventType::RoomMessage | EventType::RoomEncrypted - ) - }) - .count() as u32) - .into() - } else { - // Just return zero unread messages - uint!(0) - }; + .notification_count(&userid, &pdu.room_id) + .map_err(|e| (kind.clone(), e))? + .try_into() + .expect("notifiation count can't go that high"); let permit = db.sending.maximum_requests.acquire().await; From b4f79b77ba37c875c9c9c78cf8a03b1eeda83d64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 13 Apr 2021 15:00:45 +0200 Subject: [PATCH 0510/1727] feat: reject invites over federation --- Cargo.lock | 106 +++++------ Cargo.toml | 6 +- src/appservice_server.rs | 17 +- src/client_server/membership.rs | 35 +--- src/client_server/sync.rs | 95 ++-------- src/database.rs | 5 +- src/database/pusher.rs | 17 +- src/database/rooms.rs | 299 +++++++++++++++++++++++++++++--- src/ruma_wrapper.rs | 14 +- src/server_server.rs | 38 ++-- 10 files changed, 391 insertions(+), 241 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 42042b6..d3da6fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -457,9 +457,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" +checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" dependencies = [ "futures-channel", "futures-core", @@ -472,9 +472,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" +checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" dependencies = [ "futures-core", "futures-sink", @@ -482,15 +482,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" +checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" [[package]] name = "futures-executor" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" +checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" dependencies = [ "futures-core", "futures-task", @@ -499,15 +499,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" +checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" [[package]] name = "futures-macro" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" +checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -517,21 +517,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" +checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" [[package]] name = "futures-task" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" +checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" [[package]] name = "futures-util" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" +checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" dependencies = [ "futures-channel", "futures-core", @@ -650,9 +650,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" dependencies = [ "bytes", "fnv", @@ -672,9 +672,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.3.5" +version = "1.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" +checksum = "bc35c995b9d93ec174cf9a27d425c7892722101e14993cd227fdb51d70cf9589" [[package]] name = "httpdate" @@ -1497,9 +1497,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf12057f289428dbf5c591c74bf10392e4a8003f993405a902f20117019022d4" +checksum = "2296f2fac53979e8ccbc4a1136b25dcefd37be9ed7e4a1f6b05a6029c84ff124" dependencies = [ "base64 0.13.0", "bytes", @@ -1625,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "assign", "js_int", @@ -1645,8 +1645,9 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ + "bytes", "http", "percent-encoding", "ruma-api-macros", @@ -1660,7 +1661,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1671,7 +1672,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "ruma-api", "ruma-common", @@ -1685,9 +1686,10 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "assign", + "bytes", "http", "js_int", "maplit", @@ -1704,7 +1706,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "indexmap", "js_int", @@ -1720,7 +1722,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "js_int", "ruma-common", @@ -1734,7 +1736,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1745,7 +1747,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "js_int", "ruma-api", @@ -1760,7 +1762,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "paste", "rand", @@ -1774,7 +1776,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "proc-macro2", "quote", @@ -1785,12 +1787,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "ruma-api", "ruma-common", @@ -1803,7 +1805,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "js_int", "ruma-api", @@ -1818,7 +1820,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "form_urlencoded", "itoa", @@ -1831,7 +1833,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1842,7 +1844,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "base64 0.13.0", "ring", @@ -1910,9 +1912,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ "ring", "untrusted", @@ -2120,7 +2122,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?rev=2e90b36babeb0d6b99ce8d4b513302a25dcdffc1#2e90b36babeb0d6b99ce8d4b513302a25dcdffc1" +source = "git+https://github.com/timokoesters/state-res?rev=94534b8ff3e71b544ae36206abc182321e9d41f1#94534b8ff3e71b544ae36206abc182321e9d41f1" dependencies = [ "itertools 0.10.0", "log", @@ -2182,9 +2184,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" +checksum = "48fe99c6bd8b1cc636890bcc071842de909d902c81ac7dab53ba33c421ab8ffb" dependencies = [ "proc-macro2", "quote", @@ -2330,9 +2332,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" +checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5" dependencies = [ "autocfg", "bytes", @@ -2381,9 +2383,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" +checksum = "940a12c99365c31ea8dd9ba04ec1be183ffe4920102bb7122c2f515437601e8e" dependencies = [ "bytes", "futures-core", @@ -2558,9 +2560,9 @@ dependencies = [ [[package]] name = "uncased" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300932469d646d39929ffe84ad5c1837beecf602519ef5695e485b472de4082b" +checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" dependencies = [ "version_check", ] diff --git a/Cargo.toml b/Cargo.toml index a28c08d..84e40d2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570d5a86c1468e8a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/timokoesters/ruma", rev = "b11de1e1f9d3c15267d09617131cf217f8277fa4", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "6394609feb4af5c43b840fab85b824b13cebb156", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/timokoesters/ruma", rev = "220d5b4a76b3b781f7f8297fbe6b14473b04214b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", rev = "2e90b36babeb0d6b99ce8d4b513302a25dcdffc1", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "94534b8ff3e71b544ae36206abc182321e9d41f1", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 04f14c0..1b72c76 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,7 +1,7 @@ use crate::{utils, Error, Result}; use http::header::{HeaderValue, CONTENT_TYPE}; use log::warn; -use ruma::api::OutgoingRequest; +use ruma::api::{IncomingResponse, OutgoingRequest}; use std::{ convert::{TryFrom, TryInto}, fmt::Debug, @@ -66,15 +66,10 @@ where let status = reqwest_response.status(); - let body = reqwest_response - .bytes() - .await - .unwrap_or_else(|e| { - warn!("server error: {}", e); - Vec::new().into() - }) // TODO: handle timeout - .into_iter() - .collect::>(); + let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + warn!("server error: {}", e); + Vec::new().into() + }); // TODO: handle timeout if status != 200 { warn!( @@ -86,7 +81,7 @@ where ); } - let response = T::IncomingResponse::try_from( + let response = T::IncomingResponse::try_from_http_response( http_response .body(body) .expect("reqwest body is valid http body"), diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e816005..d491ca0 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -91,37 +91,7 @@ pub async fn leave_room_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = serde_json::from_value::>( - db.rooms - .room_state_get( - &body.room_id, - &EventType::RoomMember, - &sender_user.to_string(), - )? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content, - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = member::MembershipState::Leave; - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - &sender_user, - &body.room_id, - &db, - )?; + db.rooms.leave_room(sender_user, &body.room_id, &db).await?; db.flush().await?; @@ -480,6 +450,7 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid make_join event json received from server.") })?; + // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), to_canonical_value(db.globals.server_name()) @@ -699,5 +670,7 @@ async fn join_room_by_id_helper( )?; } + db.flush().await?; + Ok(join_room_by_id::Response::new(room_id.clone()).into()) } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index fe14208..66a1e13 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,6 +1,5 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; -use log::error; use ruma::{ api::client::r0::sync::sync_events, events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, @@ -494,83 +493,17 @@ pub async fn sync_events_route( } let mut left_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_left(&sender_user) { - let room_id = room_id?; + for result in db.rooms.rooms_left(&sender_user) { + let (room_id, left_state_events) = result?; + let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; - let since_member = if let Some(since_member) = db - .rooms - .pdus_after(sender_user, &room_id, since) - .next() - .and_then(|pdu| pdu.ok()) - .and_then(|pdu| { - db.rooms - .pdu_shortstatehash(&pdu.1.event_id) - .ok()? - .ok_or_else(|| { - error!("{:?}", pdu.1); - Error::bad_database("Pdu in db doesn't have a state hash.") - }) - .ok() - }) - .and_then(|shortstatehash| { - db.rooms - .state_get(shortstatehash, &EventType::RoomMember, sender_user.as_str()) - .ok()? - .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) - .ok() - }) - .and_then(|pdu| { - serde_json::from_value::>( - pdu.content.clone(), - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .map(|content| (pdu, content)) - .ok() - }) { - since_member - } else { - // We couldn't find the since_member event. This is very weird - we better abort + // Left before last sync + if Some(since) >= left_count { continue; - }; + } - let left_since_last_sync = since_member.1.membership == MembershipState::Join; - - let left_room = if left_since_last_sync { - device_list_left.extend( - db.rooms - .room_members(&room_id) - .filter_map(|user_id| Some(user_id.ok()?)) - .filter(|user_id| { - // Don't send key updates from the sender to the sender - sender_user != user_id - }) - .filter(|user_id| { - // Only send if the sender doesn't share any encrypted room with the target - // anymore - !share_encrypted_room(&db, sender_user, user_id, &room_id) - }), - ); - - let pdus = db.rooms.pdus_since(&sender_user, &room_id, since)?; - let mut room_events = pdus - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .take_while(|(_, pdu)| &since_member.0 != pdu) - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect::>(); - room_events.push(since_member.0.to_sync_room_event()); - - sync_events::LeftRoom { - account_data: sync_events::AccountData { events: Vec::new() }, - timeline: sync_events::Timeline { - limited: false, - prev_batch: Some(next_batch.clone()), - events: room_events, - }, - state: sync_events::State { events: Vec::new() }, - } - } else { + left_rooms.insert( + room_id.clone(), sync_events::LeftRoom { account_data: sync_events::AccountData { events: Vec::new() }, timeline: sync_events::Timeline { @@ -578,13 +511,11 @@ pub async fn sync_events_route( prev_batch: Some(next_batch.clone()), events: Vec::new(), }, - state: sync_events::State { events: Vec::new() }, - } - }; - - if !left_room.is_empty() { - left_rooms.insert(room_id.clone(), left_room); - } + state: sync_events::State { + events: left_state_events, + }, + }, + ); } let mut invited_rooms = BTreeMap::new(); diff --git a/src/database.rs b/src/database.rs index 9d629dd..6bb1b17 100644 --- a/src/database.rs +++ b/src/database.rs @@ -163,7 +163,8 @@ impl Database { roomuseroncejoinedids: db.open_tree("roomuseroncejoinedids")?, userroomid_invitestate: db.open_tree("userroomid_invitestate")?, roomuserid_invitecount: db.open_tree("roomuserid_invitecount")?, - userroomid_left: db.open_tree("userroomid_left")?, + userroomid_leftstate: db.open_tree("userroomid_leftstate")?, + roomuserid_leftcount: db.open_tree("roomuserid_leftcount")?, userroomid_notificationcount: db.open_tree("userroomid_notificationcount")?, userroomid_highlightcount: db.open_tree("userroomid_highlightcount")?, @@ -244,7 +245,7 @@ impl Database { .userroomid_invitestate .watch_prefix(&userid_prefix), ); - futures.push(self.rooms.userroomid_left.watch_prefix(&userid_prefix)); + futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix)); // Events for rooms we are in for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { diff --git a/src/database/pusher.rs b/src/database/pusher.rs index e2bd3f1..be30576 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -7,7 +7,7 @@ use ruma::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - OutgoingRequest, + IncomingResponse, OutgoingRequest, }, events::{room::power_levels::PowerLevelsEventContent, EventType}, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, @@ -129,15 +129,10 @@ where let status = reqwest_response.status(); - let body = reqwest_response - .bytes() - .await - .unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }) // TODO: handle timeout - .into_iter() - .collect::>(); + let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout if status != 200 { info!( @@ -149,7 +144,7 @@ where ); } - let response = T::IncomingResponse::try_from( + let response = T::IncomingResponse::try_from_http_response( http_response .body(body) .expect("reqwest body is valid http body"), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3f37de6..caf7a09 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1,17 +1,18 @@ mod edus; pub use edus::RoomEdus; +use member::MembershipState; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use log::{debug, error, warn}; use regex::Regex; use ring::digest; use ruma::{ - api::client::error::ErrorKind, + api::{client::error::ErrorKind, federation}, events::{ ignored_user_list, push_rules, room::{create::CreateEventContent, member, message}, - AnyStrippedStateEvent, EventType, + AnyStrippedStateEvent, AnySyncStateEvent, EventType, }, push::{self, Action, Tweak}, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, @@ -54,7 +55,8 @@ pub struct Rooms { pub(super) roomuseroncejoinedids: sled::Tree, pub(super) userroomid_invitestate: sled::Tree, // InviteState = Vec> pub(super) roomuserid_invitecount: sled::Tree, // InviteCount = Count - pub(super) userroomid_left: sled::Tree, + pub(super) userroomid_leftstate: sled::Tree, + pub(super) roomuserid_leftcount: sled::Tree, pub(super) userroomid_notificationcount: sled::Tree, // NotifyCount = u64 pub(super) userroomid_highlightcount: sled::Tree, // HightlightCount = u64 @@ -671,7 +673,7 @@ impl Rooms { .users .iter() .filter_map(|r| r.ok()) - .filter(|user_id| db.rooms.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) + .filter(|user_id| self.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) { // Don't notify the user of their own events if user == pdu.sender { @@ -782,9 +784,11 @@ impl Rooms { { state.push(e.to_stripped_state_event()); } - if let Some(e) = - self.room_state_get(&pdu.room_id, &EventType::RoomMember, pdu.sender.as_str())? - { + if let Some(e) = self.room_state_get( + &pdu.room_id, + &EventType::RoomMember, + pdu.sender.as_str(), + )? { state.push(e.to_stripped_state_event()); } @@ -1380,7 +1384,7 @@ impl Rooms { .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) - || db.rooms.room_members(&room_id).any(|userid| { + || self.room_members(&room_id).any(|userid| { userid.map_or(false, |userid| users.is_match(userid.as_str())) }) }; @@ -1537,7 +1541,7 @@ impl Rooms { user_id: &UserId, membership: member::MembershipState, sender: &UserId, - invite_state: Option>>, + last_state: Option>>, account_data: &super::account_data::AccountData, globals: &super::globals::Globals, ) -> Result<()> { @@ -1643,7 +1647,8 @@ impl Rooms { self.roomuserid_joined.insert(&roomuser_id, &[])?; self.userroomid_invitestate.remove(&userroom_id)?; self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_left.remove(&userroom_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; } member::MembershipState::Invite => { // We want to know if the sender is ignored by the receiver @@ -1664,14 +1669,15 @@ impl Rooms { self.roomserverids.insert(&roomserver_id, &[])?; self.userroomid_invitestate.insert( &userroom_id, - serde_json::to_vec(&invite_state.unwrap_or_default()) + serde_json::to_vec(&last_state.unwrap_or_default()) .expect("state to bytes always works"), )?; self.roomuserid_invitecount .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_left.remove(&userroom_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; } member::MembershipState::Leave | member::MembershipState::Ban => { if self @@ -1682,7 +1688,12 @@ impl Rooms { { self.roomserverids.remove(&roomserver_id)?; } - self.userroomid_left.insert(&userroom_id, &[])?; + self.userroomid_leftstate.insert( + &userroom_id, + serde_json::to_vec(&Vec::>::new()).unwrap(), + )?; // TODO + self.roomuserid_leftcount + .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -1694,13 +1705,191 @@ impl Rooms { Ok(()) } + pub async fn leave_room( + &self, + user_id: &UserId, + room_id: &RoomId, + db: &Database, + ) -> Result<()> { + // Ask a remote server if we don't have this room + if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { + if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { + warn!("Failed to leave room {} remotely: {}", user_id, e); + // Don't tell the client about this error + } + + let last_state = self + .invite_state(user_id, room_id)? + .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; + + // We always drop the invite, we can't rely on other servers + self.update_membership( + room_id, + user_id, + MembershipState::Leave, + user_id, + last_state, + &db.account_data, + &db.globals, + )?; + } else { + let mut event = serde_json::from_value::>( + self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot leave a room you are not a member of.", + ))? + .content, + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = member::MembershipState::Leave; + + self.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + room_id, + db, + )?; + } + + Ok(()) + } + + async fn remote_leave_room( + &self, + user_id: &UserId, + room_id: &RoomId, + db: &Database, + ) -> Result<()> { + let mut make_leave_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in leaving.", + )); + + let invite_state = db + .rooms + .invite_state(user_id, room_id)? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "User is not invited.", + ))?; + + let servers = invite_state + .iter() + .filter_map(|event| { + serde_json::from_str::(&event.json().to_string()).ok() + }) + .filter_map(|event| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::try_from(sender).ok()) + .map(|user| user.server_name().to_owned()); + + for remote_server in servers { + let make_leave_response = db + .sending + .send_federation_request( + &db.globals, + &remote_server, + federation::membership::get_leave_event::v1::Request { room_id, user_id }, + ) + .await; + + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + + if make_leave_response_and_server.is_ok() { + break; + } + } + + let (make_leave_response, remote_server) = make_leave_response_and_server?; + + let room_version = match make_leave_response.room_version { + Some(room_version) if room_version == RoomVersionId::Version6 => room_version, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + + let mut leave_event_stub = + serde_json::from_str::(make_leave_response.event.json().get()) + .map_err(|_| { + Error::BadServerResponse("Invalid make_leave event json received from server.") + })?; + + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + to_canonical_value(db.globals.server_name()) + .map_err(|_| Error::bad_database("Invalid server name found"))?, + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + to_canonical_value(utils::millis_since_unix_epoch()) + .expect("Timestamp is valid js_int value"), + ); + // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + leave_event_stub.remove("event_id"); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut leave_event_stub, + &room_version, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&leave_event_stub, &room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + leave_event_stub.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; + + db.sending + .send_federation_request( + &db.globals, + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id, + event_id: &event_id, + pdu: PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + }, + ) + .await?; + + Ok(()) + } + /// Makes a user forget a room. pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - self.userroomid_left.remove(userroom_id)?; + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + self.userroomid_leftstate.remove(userroom_id)?; + self.roomuserid_leftcount.remove(roomuser_id)?; Ok(()) } @@ -1977,7 +2166,6 @@ impl Rooms { }) } - /// Returns an iterator over all invited members of a room. #[tracing::instrument(skip(self))] pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { let mut key = room_id.as_bytes().to_vec(); @@ -1993,6 +2181,21 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] + pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_leftcount + .get(key)? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid leftcount in db.") + })?)) + }) + } + /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self))] pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { @@ -2045,25 +2248,75 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] + pub fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&room_id.as_bytes()); + + self.userroomid_invitestate + .get(key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok(state) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + pub fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&room_id.as_bytes()); + + self.userroomid_leftstate + .get(key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok(state) + }) + .transpose() + } + /// Returns an iterator over all rooms a user left. #[tracing::instrument(skip(self))] - pub fn rooms_left(&self, user_id: &UserId) -> impl Iterator> { + pub fn rooms_left( + &self, + user_id: &UserId, + ) -> impl Iterator>)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - self.userroomid_left.scan_prefix(prefix).keys().map(|key| { - Ok(RoomId::try_from( + self.userroomid_leftstate.scan_prefix(prefix).map(|r| { + let (key, state) = r?; + let room_id = RoomId::try_from( utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) + &key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) .map_err(|_| { - Error::bad_database("Room ID in userroomid_left is invalid unicode.") + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") })?, ) - .map_err(|_| Error::bad_database("Room ID in userroomid_left is invalid."))?) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok((room_id, state)) }) } @@ -2096,6 +2349,6 @@ impl Rooms { userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - Ok(self.userroomid_left.get(userroom_id)?.is_some()) + Ok(self.userroomid_leftstate.get(userroom_id)?.is_some()) } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 8e1d34f..c60c04e 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,9 +1,10 @@ use crate::Error; use ruma::{ + api::OutgoingResponse, identifiers::{DeviceId, UserId}, Outgoing, }; -use std::{convert::TryInto, ops::Deref}; +use std::ops::Deref; #[cfg(feature = "conduit_bin")] use { @@ -145,7 +146,7 @@ where let mut body = Vec::new(); handle.read_to_end(&mut body).await.unwrap(); - let http_request = http_request.body(body.clone()).unwrap(); + let http_request = http_request.body(&*body).unwrap(); debug!("{:?}", http_request); match ::try_from_http_request(http_request) { Ok(t) => Success(Ruma { @@ -178,9 +179,9 @@ impl Deref for Ruma { /// This struct converts ruma responses into rocket http responses. pub type ConduitResult = std::result::Result, Error>; -pub struct RumaResponse>>>(pub T); +pub struct RumaResponse(pub T); -impl>>> From for RumaResponse { +impl From for RumaResponse { fn from(t: T) -> Self { Self(t) } @@ -189,12 +190,11 @@ impl>>> From for RumaResponse { #[cfg(feature = "conduit_bin")] impl<'r, 'o, T> Responder<'r, 'o> for RumaResponse where - T: Send + TryInto>>, - T::Error: Send, + T: Send + OutgoingResponse, 'o: 'r, { fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { - let http_response: Result, _> = self.0.try_into(); + let http_response: Result, _> = self.0.try_into_http_response(); match http_response { Ok(http_response) => { let mut response = rocket::response::Response::build(); diff --git a/src/server_server.rs b/src/server_server.rs index 1fad54e..304bc19 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,7 +18,7 @@ use ruma::{ query::get_profile_information, transactions::send_transaction_message, }, - OutgoingRequest, + IncomingResponse, OutgoingRequest, OutgoingResponse, }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ @@ -173,15 +173,10 @@ where let status = reqwest_response.status(); - let body = reqwest_response - .bytes() - .await - .unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }) // TODO: handle timeout - .into_iter() - .collect::>(); + let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout if status != 200 { info!( @@ -195,7 +190,7 @@ where ); } - let response = T::IncomingResponse::try_from( + let response = T::IncomingResponse::try_from_http_response( http_response .body(body) .expect("reqwest body is valid http body"), @@ -350,6 +345,7 @@ pub fn get_server_version_route( .into()) } +// Response type for this endpoint is Json because we need to calculate a signature for the response #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] #[tracing::instrument(skip(db))] pub fn get_server_keys_route(db: State<'_, Database>) -> Json { @@ -369,7 +365,7 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json { }, ); let mut response = serde_json::from_slice( - http::Response::try_from(get_server_keys::v2::Response { + get_server_keys::v2::Response { server_key: ServerSigningKeys { server_name: db.globals.server_name().to_owned(), verify_keys, @@ -377,7 +373,8 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json { signatures: BTreeMap::new(), valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2), }, - }) + } + .try_into_http_response() .unwrap() .body(), ) @@ -745,7 +742,7 @@ fn handle_incoming_pdu<'a>( // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - debug!("Fetching auth events."); + debug!("Fetching auth events for {}", incoming_pdu.event_id); fetch_and_handle_events( db, origin, @@ -757,7 +754,10 @@ fn handle_incoming_pdu<'a>( .map_err(|e| e.to_string())?; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - debug!("Checking auth."); + debug!( + "Auth check for {} based on auth events", + incoming_pdu.event_id + ); // Build map of auth events let mut auth_events = BTreeMap::new(); @@ -1151,7 +1151,7 @@ pub(crate) async fn fetch_and_handle_events( // a. Look at auth cache let pdu = match auth_cache.get(id) { Some(pdu) => { - debug!("Event found in cache"); + debug!("Found {} in cache", id); pdu.clone() } // b. Look in the main timeline (pduid_pdu tree) @@ -1159,12 +1159,12 @@ pub(crate) async fn fetch_and_handle_events( // (get_pdu checks both) None => match db.rooms.get_pdu(&id)? { Some(pdu) => { - debug!("Event found in outliers"); + debug!("Found {} in outliers", id); Arc::new(pdu) } None => { // d. Ask origin server over federation - debug!("Fetching event over federation: {:?}", id); + debug!("Fetching {} over federation.", id); match db .sending .send_federation_request( @@ -1175,7 +1175,7 @@ pub(crate) async fn fetch_and_handle_events( .await { Ok(res) => { - debug!("Got event over federation: {:?}", res); + debug!("Got {} over federation: {:?}", id, res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu)?; let pdu = handle_incoming_pdu( From 5049d0e01b173b7fe1f6cdd6b22fa22a4e223d29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 13 Apr 2021 18:17:51 +0200 Subject: [PATCH 0511/1727] improvement: check signatures on join --- src/client_server/membership.rs | 100 +++++++++++++-------------- src/server_server.rs | 115 ++++++++++++++------------------ 2 files changed, 94 insertions(+), 121 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index d491ca0..f648978 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -2,7 +2,7 @@ use super::State; use crate::{ client_server, pdu::{PduBuilder, PduEvent}, - utils, ConduitResult, Database, Error, Result, Ruma, + server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; use log::{error, warn}; use ruma::{ @@ -21,7 +21,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; +use std::{collections::BTreeMap, convert::TryFrom}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -515,27 +515,6 @@ async fn join_room_by_id_helper( ) .await?; - let add_event_id = |pdu: &Raw| -> Result<(EventId, CanonicalJsonObject)> { - let mut value = serde_json::from_str(pdu.json().get()).map_err(|e| { - error!("{:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&value, &room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - value.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("a valid EventId can be converted to CanonicalJsonValue"), - ); - - Ok((event_id, value)) - }; - let count = db.globals.next_count()?; let mut pdu_id = room_id.as_bytes().to_vec(); @@ -546,23 +525,15 @@ async fn join_room_by_id_helper( .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; let mut state = BTreeMap::new(); + let mut pub_key_map = BTreeMap::new(); + + for pdu in send_join_response.room_state.state.iter() { + let (event_id, value) = validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { + warn!("{:?}: {}", value, e); + Error::BadServerResponse("Invalid PDU in send_join response.") + })?; - for pdu in send_join_response - .room_state - .state - .iter() - .map(add_event_id) - .map(|r| { - let (event_id, value) = r?; - PduEvent::from_id_val(&event_id, value.clone()) - .map(|ev| (event_id, Arc::new(ev))) - .map_err(|e| { - warn!("{:?}: {}", value, e); - Error::BadServerResponse("Invalid PDU in send_join response.") - }) - }) - { - let (_id, pdu) = pdu?; db.rooms.add_pdu_outlier(&pdu)?; if let Some(state_key) = &pdu.state_key { if pdu.kind == EventType::RoomMember { @@ -612,22 +583,12 @@ async fn join_room_by_id_helper( db.rooms.force_state(room_id, state, &db.globals)?; - for pdu in send_join_response - .room_state - .auth_chain - .iter() - .map(add_event_id) - .map(|r| { - let (event_id, value) = r?; - PduEvent::from_id_val(&event_id, value.clone()) - .map(|ev| (event_id, Arc::new(ev))) - .map_err(|e| { - warn!("{:?}: {}", value, e); - Error::BadServerResponse("Invalid PDU in send_join response.") - }) - }) - { - let (_id, pdu) = pdu?; + for pdu in send_join_response.room_state.auth_chain.iter() { + let (event_id, value) = validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { + warn!("{:?}: {}", value, e); + Error::BadServerResponse("Invalid PDU in send_join response.") + })?; db.rooms.add_pdu_outlier(&pdu)?; } @@ -674,3 +635,32 @@ async fn join_room_by_id_helper( Ok(join_room_by_id::Response::new(room_id.clone()).into()) } + +async fn validate_and_add_event_id( + pdu: &Raw, + room_version: &RoomVersionId, + pub_key_map: &mut BTreeMap>, + db: &Database, +) -> Result<(EventId, CanonicalJsonObject)> { + let mut value = serde_json::from_str::(pdu.json().get()).map_err(|e| { + error!("{:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + server_server::fetch_required_signing_keys(&value, pub_key_map, db).await?; + + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value, &room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + value.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("a valid EventId can be converted to CanonicalJsonValue"), + ); + + Ok((event_id, value)) +} diff --git a/src/server_server.rs b/src/server_server.rs index 304bc19..39b626f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -658,44 +658,7 @@ fn handle_incoming_pdu<'a>( // We go through all the signatures we see on the value and fetch the corresponding signing // keys - for (signature_server, signature) in match value - .get("signatures") - .ok_or_else(|| "No signatures in server response pdu.".to_string())? - { - CanonicalJsonValue::Object(map) => map, - _ => return Err("Invalid signatures object in server response pdu.".to_string()), - } { - let signature_object = match signature { - CanonicalJsonValue::Object(map) => map, - _ => { - return Err( - "Invalid signatures content object in server response pdu.".to_string() - ) - } - }; - - let signature_ids = signature_object.keys().collect::>(); - - debug!("Fetching signing keys for {}", signature_server); - let keys = match fetch_signing_keys( - &db, - &Box::::try_from(&**signature_server).map_err(|_| { - "Invalid servername in signatures of server response pdu.".to_string() - })?, - signature_ids, - ) - .await - { - Ok(keys) => keys, - Err(_) => { - return Err( - "Signature verification failed: Could not fetch signing key.".to_string(), - ); - } - }; - - pub_key_map.insert(signature_server.clone(), keys); - } + fetch_required_signing_keys(&value, pub_key_map, db).await.map_err(|e| e.to_string())?; // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match @@ -1639,38 +1602,58 @@ pub fn get_profile_information_route<'a>( .into()) } -/* -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v2/invite/<_>/<_>", data = "") -)] -pub fn get_user_devices_route<'a>( - db: State<'a, Database>, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut displayname = None; - let mut avatar_url = None; - - match body.field { - Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, - Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?, - None => { - displayname = db.users.displayname(&body.user_id)?; - avatar_url = db.users.avatar_url(&body.user_id)?; +pub async fn fetch_required_signing_keys( + event: &BTreeMap, + pub_key_map: &mut BTreeMap>, + db: &Database, +) -> Result<()> { + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + for (signature_server, signature) in match event + .get("signatures") + .ok_or_else(|| Error::BadServerResponse("No signatures in server response pdu."))? + { + CanonicalJsonValue::Object(map) => map, + _ => { + return Err(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + )) } + } { + let signature_object = match signature { + CanonicalJsonValue::Object(map) => map, + _ => { + return Err(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + )) + } + }; + + let signature_ids = signature_object.keys().collect::>(); + + debug!("Fetching signing keys for {}", signature_server); + let keys = match fetch_signing_keys( + db, + &Box::::try_from(&**signature_server).map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?, + signature_ids, + ) + .await + { + Ok(keys) => keys, + Err(_) => { + return Err(Error::BadServerResponse( + "Signature verification failed: Could not fetch signing key.", + )); + } + }; + + pub_key_map.insert(signature_server.clone(), keys); } - Ok(get_profile_information::v1::Response { - displayname, - avatar_url, - } - .into()) + Ok(()) } -*/ #[cfg(test)] mod tests { From 8b40e0a85ffec3ad9a712fd5175944158ac46f5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 13 Apr 2021 21:34:31 +0200 Subject: [PATCH 0512/1727] improvement: fetch signing keys in parallel when joining a room --- Cargo.lock | 48 +++++++++---------- Cargo.toml | 4 +- src/client_server/membership.rs | 31 +++++++++++-- src/server_server.rs | 82 ++++++++++++++++++++++----------- 4 files changed, 105 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3da6fb..d153c28 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1625,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "assign", "js_int", @@ -1645,9 +1645,8 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ - "bytes", "http", "percent-encoding", "ruma-api-macros", @@ -1661,7 +1660,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1672,7 +1671,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "ruma-api", "ruma-common", @@ -1686,10 +1685,9 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "assign", - "bytes", "http", "js_int", "maplit", @@ -1705,8 +1703,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "indexmap", "js_int", @@ -1722,7 +1720,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "js_int", "ruma-common", @@ -1736,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1747,7 +1745,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "js_int", "ruma-api", @@ -1761,8 +1759,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.18.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.19.0" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "paste", "rand", @@ -1775,8 +1773,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.18.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.19.0" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "proc-macro2", "quote", @@ -1786,13 +1784,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.2.3" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "ruma-api", "ruma-common", @@ -1805,7 +1803,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "js_int", "ruma-api", @@ -1820,7 +1818,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "form_urlencoded", "itoa", @@ -1833,7 +1831,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1843,8 +1841,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.7.0" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "base64 0.13.0", "ring", @@ -2122,7 +2120,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?rev=94534b8ff3e71b544ae36206abc182321e9d41f1#94534b8ff3e71b544ae36206abc182321e9d41f1" +source = "git+https://github.com/timokoesters/state-res?rev=84e70c062708213d01281438598e16f13dffeda4#84e70c062708213d01281438598e16f13dffeda4" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 84e40d2..9aa9cee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "6394609feb4af5c43b840fab85b824b13cebb156", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "c1693569f15920e408aa6a26b7f3cc7fc6693a63", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "220d5b4a76b3b781f7f8297fbe6b14473b04214b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", rev = "94534b8ff3e71b544ae36206abc182321e9d41f1", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "84e70c062708213d01281438598e16f13dffeda4", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index f648978..c348409 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -5,6 +5,7 @@ use crate::{ server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; use log::{error, warn}; +use rocket::futures; use ruma::{ api::{ client::{ @@ -21,6 +22,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; +use std::sync::RwLock; use std::{collections::BTreeMap, convert::TryFrom}; #[cfg(feature = "conduit_bin")] @@ -525,10 +527,18 @@ async fn join_room_by_id_helper( .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; let mut state = BTreeMap::new(); - let mut pub_key_map = BTreeMap::new(); + let mut pub_key_map = RwLock::new(BTreeMap::new()); - for pdu in send_join_response.room_state.state.iter() { - let (event_id, value) = validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + for result in futures::future::join_all( + send_join_response + .room_state + .state + .iter() + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)), + ) + .await + { + let (event_id, value) = result?; let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") @@ -584,7 +594,8 @@ async fn join_room_by_id_helper( db.rooms.force_state(room_id, state, &db.globals)?; for pdu in send_join_response.room_state.auth_chain.iter() { - let (event_id, value) = validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + let (event_id, value) = + validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") @@ -639,7 +650,7 @@ async fn join_room_by_id_helper( async fn validate_and_add_event_id( pdu: &Raw, room_version: &RoomVersionId, - pub_key_map: &mut BTreeMap>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<(EventId, CanonicalJsonObject)> { let mut value = serde_json::from_str::(pdu.json().get()).map_err(|e| { @@ -648,6 +659,16 @@ async fn validate_and_add_event_id( })?; server_server::fetch_required_signing_keys(&value, pub_key_map, db).await?; + if let Err(e) = ruma::signatures::verify_event( + &*pub_key_map + .read() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?, + &value, + room_version, + ) { + warn!("Event failed verification: {}", e); + return Err(Error::BadServerResponse("Event failed verification.")); + } let event_id = EventId::try_from(&*format!( "${}", diff --git a/src/server_server.rs b/src/server_server.rs index 39b626f..791ec1c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -38,7 +38,7 @@ use std::{ net::{IpAddr, SocketAddr}, pin::Pin, result::Result as StdResult, - sync::Arc, + sync::{Arc, RwLock}, time::{Duration, SystemTime}, }; @@ -543,7 +543,7 @@ pub async fn send_transaction_message_route<'a>( let mut resolved_map = BTreeMap::new(); - let mut pub_key_map = BTreeMap::new(); + let pub_key_map = RwLock::new(BTreeMap::new()); // This is all the auth_events that have been recursively fetched so they don't have to be // deserialized over and over again. @@ -569,7 +569,7 @@ pub async fn send_transaction_message_route<'a>( value, true, &db, - &mut pub_key_map, + &pub_key_map, &mut auth_cache, ) .await @@ -622,7 +622,7 @@ fn handle_incoming_pdu<'a>( value: BTreeMap, is_timeline_event: bool, db: &'a Database, - pub_key_map: &'a mut BTreeMap>, + pub_key_map: &'a RwLock>>, auth_cache: &'a mut EventMap>, ) -> AsyncRecursiveResult<'a, Arc> { Box::pin(async move { @@ -658,7 +658,9 @@ fn handle_incoming_pdu<'a>( // We go through all the signatures we see on the value and fetch the corresponding signing // keys - fetch_required_signing_keys(&value, pub_key_map, db).await.map_err(|e| e.to_string())?; + fetch_required_signing_keys(&value, &pub_key_map, db) + .await + .map_err(|e| e.to_string())?; // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match @@ -676,7 +678,11 @@ fn handle_incoming_pdu<'a>( let room_version = create_event_content.room_version; - let mut val = match ruma::signatures::verify_event(&pub_key_map, &value, &room_version) { + let mut val = match ruma::signatures::verify_event( + &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, + &value, + &room_version, + ) { Err(e) => { // Drop error!("{:?}: {}", value, e); @@ -1106,7 +1112,7 @@ pub(crate) async fn fetch_and_handle_events( db: &Database, origin: &ServerName, events: &[EventId], - pub_key_map: &mut BTreeMap>, + pub_key_map: &RwLock>>, auth_cache: &mut EventMap>, ) -> Result>> { let mut pdus = vec![]; @@ -1256,6 +1262,7 @@ pub(crate) async fn fetch_signing_keys( } } + warn!("Failed to find public key for server: {}", origin); Err(Error::BadServerResponse( "Failed to find public key for server", )) @@ -1486,7 +1493,7 @@ pub fn get_room_state_ids_route<'a>( put("/_matrix/federation/v2/invite/<_>/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn create_invite_route<'a>( +pub async fn create_invite_route<'a>( db: State<'a, Database>, body: Ruma, ) -> ConduitResult { @@ -1510,6 +1517,20 @@ pub fn create_invite_route<'a>( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + // Generate event id + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&signed_event, &body.room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + signed_event.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let sender = serde_json::from_value( serde_json::to_value( signed_event @@ -1543,24 +1564,26 @@ pub fn create_invite_route<'a>( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; event.insert("event_id".to_owned(), "$dummy".into()); - invite_state.push( - serde_json::from_value::(event.into()) - .map_err(|e| { - warn!("Invalid invite event: {}", e); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") - })? - .to_stripped_state_event(), - ); - db.rooms.update_membership( - &body.room_id, - &invited_user, - MembershipState::Invite, - &sender, - Some(invite_state), - &db.account_data, - &db.globals, - )?; + let pdu = serde_json::from_value::(event.into()).map_err(|e| { + warn!("Invalid invite event: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") + })?; + + invite_state.push(pdu.to_stripped_state_event()); + + // If the room already exists, the remote server will notify us about the join via /send + if !db.rooms.exists(&pdu.room_id)? { + db.rooms.update_membership( + &body.room_id, + &invited_user, + MembershipState::Invite, + &sender, + Some(invite_state), + &db.account_data, + &db.globals, + )?; + } Ok(create_invite::v2::Response { event: PduEvent::convert_to_outgoing_federation_event(signed_event), @@ -1604,7 +1627,7 @@ pub fn get_profile_information_route<'a>( pub async fn fetch_required_signing_keys( event: &BTreeMap, - pub_key_map: &mut BTreeMap>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { // We go through all the signatures we see on the value and fetch the corresponding signing @@ -1642,14 +1665,17 @@ pub async fn fetch_required_signing_keys( .await { Ok(keys) => keys, - Err(_) => { + Err(e) => { return Err(Error::BadServerResponse( "Signature verification failed: Could not fetch signing key.", )); } }; - pub_key_map.insert(signature_server.clone(), keys); + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(signature_server.clone(), keys); } Ok(()) From 595129463856cc4b03cccebba678ced4af3865c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 14 Apr 2021 09:39:06 +0200 Subject: [PATCH 0513/1727] feat: join cursed rooms this removes several restrictions and tries to continue verifying a pdu event if some auth events fail (it drops/ignores bad pdus) --- Cargo.lock | 2 +- Cargo.toml | 2 +- src/client_server/membership.rs | 25 ++++++++++++++++++++----- src/pdu.rs | 25 ++++++++++--------------- src/server_server.rs | 27 ++++++++++++++++----------- 5 files changed, 48 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d153c28..f6af8b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2120,7 +2120,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?rev=84e70c062708213d01281438598e16f13dffeda4#84e70c062708213d01281438598e16f13dffeda4" +source = "git+https://github.com/timokoesters/state-res?rev=9bb46ae681bfc361cff740e78dc42bb711db9779#9bb46ae681bfc361cff740e78dc42bb711db9779" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 9aa9cee..fba7e5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "c1693569f15920e408aa6a26b7 #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", rev = "84e70c062708213d01281438598e16f13dffeda4", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "9bb46ae681bfc361cff740e78dc42bb711db9779", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index c348409..4be0d5f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -527,7 +527,7 @@ async fn join_room_by_id_helper( .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; let mut state = BTreeMap::new(); - let mut pub_key_map = RwLock::new(BTreeMap::new()); + let pub_key_map = RwLock::new(BTreeMap::new()); for result in futures::future::join_all( send_join_response @@ -538,7 +538,11 @@ async fn join_room_by_id_helper( ) .await { - let (event_id, value) = result?; + let (event_id, value) = match result { + Ok(t) => t, + Err(_) => continue, + }; + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") @@ -593,9 +597,20 @@ async fn join_room_by_id_helper( db.rooms.force_state(room_id, state, &db.globals)?; - for pdu in send_join_response.room_state.auth_chain.iter() { - let (event_id, value) = - validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + for result in futures::future::join_all( + send_join_response + .room_state + .auth_chain + .iter() + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)), + ) + .await + { + let (event_id, value) = match result { + Ok(t) => t, + Err(_) => continue, + }; + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") diff --git a/src/pdu.rs b/src/pdu.rs index 009fde6..a7d9432 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -166,22 +166,17 @@ impl PduEvent { #[tracing::instrument(skip(self))] pub fn to_sync_state_event(&self) -> Raw { - let json = format!( - r#"{{"content":{},"type":"{}","event_id":"{}","sender":"{}","origin_server_ts":{},"unsigned":{},"state_key":"{}"}}"#, - self.content, - self.kind, - self.event_id, - self.sender, - self.origin_server_ts, - serde_json::to_string(&self.unsigned).expect("Map::to_string always works"), - self.state_key - .as_ref() - .expect("state events have state keys") - ); + let json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, + "state_key": self.state_key, + }); - Raw::from_json( - serde_json::value::RawValue::from_string(json).expect("our string is valid json"), - ) + serde_json::from_value(json).expect("Raw::from_value always works") } #[tracing::instrument(skip(self))] diff --git a/src/server_server.rs b/src/server_server.rs index 791ec1c..538540a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -685,7 +685,7 @@ fn handle_incoming_pdu<'a>( ) { Err(e) => { // Drop - error!("{:?}: {}", value, e); + warn!("{:?}: {}", value, e); return Err("Signature verification failed".to_string()); } Ok(ruma::signatures::Verified::Signatures) => { @@ -1147,7 +1147,7 @@ pub(crate) async fn fetch_and_handle_events( debug!("Got {} over federation: {:?}", id, res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu)?; - let pdu = handle_incoming_pdu( + let pdu = match handle_incoming_pdu( origin, &event_id, value, @@ -1157,14 +1157,20 @@ pub(crate) async fn fetch_and_handle_events( auth_cache, ) .await - .map_err(|e| { - error!("Error: {:?}", e); - Error::Conflict("Authentication of event failed") - })?; + { + Ok(pdu) => pdu, + Err(e) => { + warn!("Authentication of event {} failed: {:?}", id, e); + continue; + } + }; pdu } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + Err(_) => { + warn!("Failed to fetch event: {}", id); + continue; + } } } }, @@ -1665,10 +1671,9 @@ pub async fn fetch_required_signing_keys( .await { Ok(keys) => keys, - Err(e) => { - return Err(Error::BadServerResponse( - "Signature verification failed: Could not fetch signing key.", - )); + Err(_) => { + warn!("Signature verification failed: Could not fetch signing key.",); + continue; } }; From c1953efa6bd3b8e354bc0bcff372072694f5b043 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 14 Apr 2021 10:43:31 +0200 Subject: [PATCH 0514/1727] chore: code cleanup --- src/client_server/membership.rs | 7 +--- src/database/pusher.rs | 2 +- src/database/rooms.rs | 71 ++++++++++++++++++--------------- src/server_server.rs | 33 +++++++-------- src/utils.rs | 2 +- 5 files changed, 59 insertions(+), 56 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index dad4328..0da0747 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -568,9 +568,7 @@ async fn join_room_by_id_helper( serde_json::from_value::( pdu.content .get("membership") - .ok_or_else(|| { - Error::BadServerResponse("Invalid member event content") - })? + .ok_or(Error::BadServerResponse("Invalid member event content"))? .clone(), ) .map_err(|_| { @@ -578,8 +576,7 @@ async fn join_room_by_id_helper( })?, &pdu.sender, Some(invite_state), - &db.account_data, - &db.globals, + db, )?; } state.insert((pdu.kind.clone(), state_key.clone()), pdu.event_id.clone()); diff --git a/src/database/pusher.rs b/src/database/pusher.rs index be30576..c204386 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -222,7 +222,7 @@ pub fn get_actions<'a>( user_display_name: db .users .displayname(&user)? - .unwrap_or(user.localpart().to_owned()), + .unwrap_or_else(|| user.localpart().to_owned()), users_power_levels: power_levels.users, default_power_level: power_levels.users_default, notification_power_levels: power_levels.notifications, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index caf7a09..5053360 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -706,14 +706,14 @@ impl Rooms { userroom_id.extend_from_slice(pdu.room_id.as_bytes()); if notify { - &self + self .userroomid_notificationcount .update_and_fetch(&userroom_id, utils::increment)? .expect("utils::increment will always put in a value"); } if highlight { - &self + self .userroomid_highlightcount .update_and_fetch(&userroom_id, utils::increment)? .expect("utils::increment will always put in a value"); @@ -743,12 +743,10 @@ impl Rooms { let membership = serde_json::from_value::( pdu.content .get("membership") - .ok_or_else(|| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid member event content", - ) - })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid member event content", + ))? .clone(), ) .map_err(|_| { @@ -807,8 +805,7 @@ impl Rooms { membership, &pdu.sender, invite_state, - &db.account_data, - &db.globals, + db, )?; } } @@ -1205,7 +1202,7 @@ impl Rooms { .iter() .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) .max() - .unwrap_or(uint!(0)) + .unwrap_or_else(|| uint!(0)) + uint!(1); let mut unsigned = unsigned.unwrap_or_default(); @@ -1542,8 +1539,7 @@ impl Rooms { membership: member::MembershipState, sender: &UserId, last_state: Option>>, - account_data: &super::account_data::AccountData, - globals: &super::globals::Globals, + db: &Database, ) -> Result<()> { let mut roomserver_id = room_id.as_bytes().to_vec(); roomserver_id.push(0xff); @@ -1603,23 +1599,32 @@ impl Rooms { // .ok(); // Copy old tags to new room - if let Some(tag_event) = account_data.get::( - Some(&predecessor.room_id), - user_id, - EventType::Tag, - )? { - account_data - .update(Some(room_id), user_id, EventType::Tag, &tag_event, globals) + if let Some(tag_event) = + db.account_data.get::( + Some(&predecessor.room_id), + user_id, + EventType::Tag, + )? + { + db.account_data + .update( + Some(room_id), + user_id, + EventType::Tag, + &tag_event, + &db.globals, + ) .ok(); }; // Copy direct chat flag - if let Some(mut direct_event) = account_data - .get::( - None, - user_id, - EventType::Direct, - )? { + if let Some(mut direct_event) = + db.account_data.get::( + None, + user_id, + EventType::Direct, + )? + { let mut room_ids_updated = false; for room_ids in direct_event.content.0.values_mut() { @@ -1630,12 +1635,12 @@ impl Rooms { } if room_ids_updated { - account_data.update( + db.account_data.update( None, user_id, EventType::Direct, &direct_event, - globals, + &db.globals, )?; } }; @@ -1652,7 +1657,8 @@ impl Rooms { } member::MembershipState::Invite => { // We want to know if the sender is ignored by the receiver - let is_ignored = account_data + let is_ignored = db + .account_data .get::( None, // Ignored users are in global account data &user_id, // Receiver @@ -1673,7 +1679,7 @@ impl Rooms { .expect("state to bytes always works"), )?; self.roomuserid_invitecount - .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; + .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_leftstate.remove(&userroom_id)?; @@ -1693,7 +1699,7 @@ impl Rooms { serde_json::to_vec(&Vec::>::new()).unwrap(), )?; // TODO self.roomuserid_leftcount - .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; + .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -1729,8 +1735,7 @@ impl Rooms { MembershipState::Leave, user_id, last_state, - &db.account_data, - &db.globals, + db, )?; } else { let mut event = serde_json::from_value::>( diff --git a/src/server_server.rs b/src/server_server.rs index 538540a..5b49472 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1413,15 +1413,16 @@ pub fn get_missing_events_route<'a>( let mut i = 0; while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { - if body.earliest_events.contains( - &serde_json::from_value( + let event_id = + serde_json::from_value( serde_json::to_value(pdu.get("event_id").cloned().ok_or_else(|| { Error::bad_database("Event in db has no event_id field.") })?) .expect("canonical json is valid json value"), ) - .map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?, - ) { + .map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?; + + if body.earliest_events.contains(&event_id) { i += 1; continue; } @@ -1541,9 +1542,10 @@ pub async fn create_invite_route<'a>( serde_json::to_value( signed_event .get("sender") - .ok_or_else(|| { - Error::BadRequest(ErrorKind::InvalidParam, "Event had no sender field.") - })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no sender field.", + ))? .clone(), ) .expect("CanonicalJsonValue to serde_json::Value always works"), @@ -1553,9 +1555,10 @@ pub async fn create_invite_route<'a>( serde_json::to_value( signed_event .get("state_key") - .ok_or_else(|| { - Error::BadRequest(ErrorKind::InvalidParam, "Event had no state_key field.") - })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no state_key field.", + ))? .clone(), ) .expect("CanonicalJsonValue to serde_json::Value always works"), @@ -1586,8 +1589,7 @@ pub async fn create_invite_route<'a>( MembershipState::Invite, &sender, Some(invite_state), - &db.account_data, - &db.globals, + &db, )?; } @@ -1638,10 +1640,9 @@ pub async fn fetch_required_signing_keys( ) -> Result<()> { // We go through all the signatures we see on the value and fetch the corresponding signing // keys - for (signature_server, signature) in match event - .get("signatures") - .ok_or_else(|| Error::BadServerResponse("No signatures in server response pdu."))? - { + for (signature_server, signature) in match event.get("signatures").ok_or( + Error::BadServerResponse("No signatures in server response pdu."), + )? { CanonicalJsonValue::Object(map) => map, _ => { return Err(Error::BadServerResponse( diff --git a/src/utils.rs b/src/utils.rs index 45d9de8..106baff 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -69,7 +69,7 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } -pub fn common_elements<'a>( +pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, ) -> Option>> { From 001d8dc2573988b13da5e481a422fa8a1df109c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 14 Apr 2021 12:55:14 +0200 Subject: [PATCH 0515/1727] fix: don't do expensive operation on local /send --- src/database/pusher.rs | 2 +- src/database/rooms.rs | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index c204386..40b829f 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -218,7 +218,7 @@ pub fn get_actions<'a>( let ctx = PushConditionRoomCtx { room_id: pdu.room_id.clone(), - member_count: (db.rooms.room_members(&pdu.room_id).count() as u32).into(), + member_count: 10_u32.into(), // TODO: get member count efficiently user_display_name: db .users .displayname(&user)? diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5053360..955ad83 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -674,6 +674,7 @@ impl Rooms { .iter() .filter_map(|r| r.ok()) .filter(|user_id| self.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) + .filter(|user_id| !db.users.is_deactivated(user_id).unwrap_or(false)) { // Don't notify the user of their own events if user == pdu.sender { @@ -706,15 +707,13 @@ impl Rooms { userroom_id.extend_from_slice(pdu.room_id.as_bytes()); if notify { - self - .userroomid_notificationcount + self.userroomid_notificationcount .update_and_fetch(&userroom_id, utils::increment)? .expect("utils::increment will always put in a value"); } if highlight { - self - .userroomid_highlightcount + self.userroomid_highlightcount .update_and_fetch(&userroom_id, utils::increment)? .expect("utils::increment will always put in a value"); } From bc98425dfe13192e8e9be65806fe1f6017e38963 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 14 Apr 2021 14:41:39 +0200 Subject: [PATCH 0516/1727] improvement: use invite state as hints to what servers to ask for joining --- src/client_server/membership.rs | 51 +++++++++++++++++++++++++++++---- src/database/rooms.rs | 3 +- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0da0747..ab646a1 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -22,7 +22,11 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, sync::RwLock}; +use std::{ + collections::{BTreeMap, HashSet}, + convert::TryFrom, + sync::RwLock, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -36,11 +40,29 @@ pub async fn join_room_by_id_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let mut servers = db + .rooms + .invite_state(&sender_user, &body.room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| { + serde_json::from_str::(&event.json().to_string()).ok() + }) + .filter_map(|event| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::try_from(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect::>(); + + servers.insert(body.room_id.server_name().to_owned()); + join_room_by_id_helper( &db, body.sender_user.as_ref(), &body.room_id, - &[body.room_id.server_name().to_owned()], + &servers, body.third_party_signed.as_ref(), ) .await @@ -55,12 +77,31 @@ pub async fn join_room_by_id_or_alias_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) { - Ok(room_id) => (vec![room_id.server_name().to_owned()], room_id), + Ok(room_id) => { + let mut servers = db + .rooms + .invite_state(&sender_user, &room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| { + serde_json::from_str::(&event.json().to_string()).ok() + }) + .filter_map(|event| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::try_from(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect::>(); + + servers.insert(room_id.server_name().to_owned()); + (servers, room_id) + } Err(room_alias) => { let response = client_server::get_alias_helper(&db, &room_alias).await?; - (response.0.servers, response.0.room_id) + (response.0.servers.into_iter().collect(), response.0.room_id) } }; @@ -406,7 +447,7 @@ async fn join_room_by_id_helper( db: &Database, sender_user: Option<&UserId>, room_id: &RoomId, - servers: &[Box], + servers: &HashSet>, _third_party_signed: Option<&IncomingThirdPartySigned>, ) -> ConduitResult { let sender_user = sender_user.expect("user is authenticated"); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 955ad83..35c1df7 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1795,7 +1795,8 @@ impl Rooms { .filter_map(|event| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| UserId::try_from(sender).ok()) - .map(|user| user.server_name().to_owned()); + .map(|user| user.server_name().to_owned()) + .collect::>(); for remote_server in servers { let make_leave_response = db From 04b08d45050c5e54c2dd26f77468bd619c744aaf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 14 Apr 2021 17:46:17 +0200 Subject: [PATCH 0517/1727] chore: bump stateres --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6af8b5..c5c79b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2120,7 +2120,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?rev=9bb46ae681bfc361cff740e78dc42bb711db9779#9bb46ae681bfc361cff740e78dc42bb711db9779" +source = "git+https://github.com/ruma/state-res?rev=4516d73e8c7495330619bfb5b42c3bbf704293d8#4516d73e8c7495330619bfb5b42c3bbf704293d8" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 59d9ff1..57b6f7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "c1693569f15920e408aa6a26b7 #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", rev = "9bb46ae681bfc361cff740e78dc42bb711db9779", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "4516d73e8c7495330619bfb5b42c3bbf704293d8", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio From 0b56589dce90d2ee2f551a421b9a4efdd606ab6a Mon Sep 17 00:00:00 2001 From: Gabriel Souza Franco Date: Thu, 15 Apr 2021 22:07:27 -0300 Subject: [PATCH 0518/1727] feat: add handling of tls cert for delegated hosts --- CROSS_COMPILE.md | 2 +- Cargo.lock | 151 +++++++--------------------------------- Cargo.toml | 7 +- src/database/globals.rs | 40 ++++++++++- src/server_server.rs | 10 +++ 5 files changed, 82 insertions(+), 128 deletions(-) diff --git a/CROSS_COMPILE.md b/CROSS_COMPILE.md index 4728768..366fd23 100644 --- a/CROSS_COMPILE.md +++ b/CROSS_COMPILE.md @@ -5,6 +5,6 @@ $ sudo usermod -aG docker $USER $ exec sudo su -l $USER $ sudo systemctl start docker $ cargo install cross -$ cross build --release --features tls_vendored --target armv7-unknown-linux-musleabihf +$ cross build --release --target armv7-unknown-linux-musleabihf ``` The cross-compiled binary is at target/armv7-unknown-linux-musleabihf/release/conduit diff --git a/Cargo.lock b/Cargo.lock index c5c79b8..84001dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -198,6 +198,8 @@ dependencies = [ "rocket", "ruma", "rust-argon2", + "rustls", + "rustls-native-certs", "serde", "serde_json", "serde_yaml", @@ -209,6 +211,7 @@ dependencies = [ "tracing-opentelemetry", "tracing-subscriber", "trust-dns-resolver", + "webpki", ] [[package]] @@ -420,21 +423,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.0.1" @@ -716,16 +704,18 @@ dependencies = [ ] [[package]] -name = "hyper-tls" -version = "0.5.0" +name = "hyper-rustls" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" dependencies = [ - "bytes", + "futures-util", "hyper", - "native-tls", + "log", + "rustls", "tokio", - "tokio-native-tls", + "tokio-rustls", + "webpki", ] [[package]] @@ -1001,24 +991,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "native-tls" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "ntapi" version = "0.3.6" @@ -1096,49 +1068,12 @@ version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" -[[package]] -name = "openssl" -version = "0.10.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a61075b62a23fef5a29815de7536d940aa35ce96d18ce0cc5076272db678a577" -dependencies = [ - "bitflags", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-sys", -] - [[package]] name = "openssl-probe" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -[[package]] -name = "openssl-src" -version = "111.15.0+1.1.1k" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a5f6ae2ac04393b217ea9f700cd04fa9bf3d93fae2872069f3d15d908af70a" -dependencies = [ - "cc", -] - -[[package]] -name = "openssl-sys" -version = "0.9.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f" -dependencies = [ - "autocfg", - "cc", - "libc", - "openssl-src", - "pkg-config", - "vcpkg", -] - [[package]] name = "opentelemetry" version = "0.12.0" @@ -1280,12 +1215,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkg-config" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" - [[package]] name = "png" version = "0.16.8" @@ -1486,15 +1415,6 @@ version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - [[package]] name = "reqwest" version = "0.11.3" @@ -1509,19 +1429,20 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-tls", + "hyper-rustls", "ipnet", "js-sys", "lazy_static", "log", "mime", - "native-tls", "percent-encoding", "pin-project-lite", + "rustls", + "rustls-native-certs", "serde", "serde_urlencoded", "tokio", - "tokio-native-tls", + "tokio-rustls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1886,6 +1807,18 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls-native-certs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +dependencies = [ + "openssl-probe", + "rustls", + "schannel", + "security-framework", +] + [[package]] name = "ryu" version = "1.0.5" @@ -2191,20 +2124,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "tempfile" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" -dependencies = [ - "cfg-if", - "libc", - "rand", - "redox_syscall 0.2.5", - "remove_dir_all", - "winapi", -] - [[package]] name = "termcolor" version = "1.1.2" @@ -2358,16 +2277,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.22.0" @@ -2613,12 +2522,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "vcpkg" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" - [[package]] name = "version_check" version = "0.9.3" diff --git a/Cargo.toml b/Cargo.toml index 57b6f7f..fa495df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,7 +49,11 @@ rand = "0.8.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { version = "0.11.1" } +reqwest = { version = "0.11.3", default-features = false, features = ["rustls-tls-native-roots"] } +# Custom TLS verifier +rustls = { version = "0.19", features = ["dangerous_configuration"] } +rustls-native-certs = "0.5.0" +webpki = "0.21.0" # Used for conduit::Error type thiserror = "1.0.24" # Used to generate thumbnails for images @@ -75,7 +79,6 @@ pretty_env_logger = "0.4.0" [features] default = ["conduit_bin"] conduit_bin = [] # TODO: add rocket to this when it is optional -tls_vendored = ["reqwest/native-tls-vendored"] [[bin]] name = "conduit" diff --git a/src/database/globals.rs b/src/database/globals.rs index bad9c89..e48c849 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -10,13 +10,16 @@ use std::{ time::Duration, }; use trust_dns_resolver::TokioAsyncResolver; +use rustls::{ServerCertVerifier, WebPKIVerifier}; pub const COUNTER: &str = "c"; type WellKnownMap = HashMap, (String, String)>; +type TlsNameMap = HashMap; #[derive(Clone)] pub struct Globals { pub actual_destination_cache: Arc>, // actual_destination, host + pub tls_name_override: Arc>, pub(super) globals: sled::Tree, config: Config, keypair: Arc, @@ -26,6 +29,33 @@ pub struct Globals { pub(super) servertimeout_signingkey: sled::Tree, // ServerName + Timeout Timestamp -> algorithm:key + pubkey } +struct MatrixServerVerifier { + inner: WebPKIVerifier, + tls_name_override: Arc>, +} + +impl ServerCertVerifier for MatrixServerVerifier { + fn verify_server_cert( + &self, + roots: &rustls::RootCertStore, + presented_certs: &[rustls::Certificate], + dns_name: webpki::DNSNameRef<'_>, + ocsp_response: &[u8], + ) -> std::result::Result { + let cache = self.tls_name_override.read().unwrap(); + log::debug!("Searching for override for {:?}", dns_name); + log::debug!("Cache: {:?}", cache); + let override_name = match cache.get(dns_name.into()) { + Some(host) => { + log::debug!("Override found! {:?}", host); + host.as_ref() + }, + None => dns_name + }; + self.inner.verify_server_cert(roots, presented_certs, override_name, ocsp_response) + } +} + impl Globals { pub fn load( globals: sled::Tree, @@ -66,10 +96,17 @@ impl Globals { } }; + let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new())); + let verifier = Arc::new(MatrixServerVerifier { inner: WebPKIVerifier::new(), tls_name_override: tls_name_override.clone() }); + let mut tlsconfig = rustls::ClientConfig::new(); + tlsconfig.dangerous().set_certificate_verifier(verifier); + tlsconfig.root_store = rustls_native_certs::load_native_certs().expect("Error loading system certificates"); + let reqwest_client = reqwest::Client::builder() .connect_timeout(Duration::from_secs(30)) .timeout(Duration::from_secs(60 * 3)) .pool_max_idle_per_host(1) + .use_preconfigured_tls(tlsconfig) .build() .unwrap(); @@ -86,7 +123,8 @@ impl Globals { dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { Error::bad_config("Failed to set up trust dns resolver with system config.") })?, - actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), + actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), + tls_name_override, servertimeout_signingkey, jwt_decoding_key, }) diff --git a/src/server_server.rs b/src/server_server.rs index 5b49472..1bee9d5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -74,6 +74,16 @@ where .write() .unwrap() .insert(Box::::from(destination), result.clone()); + let actual_destination = result.0.strip_prefix("https://").unwrap().splitn(2, ':').next().unwrap(); + let host = result.1.splitn(2, ':').next().unwrap_or(&result.1); + if actual_destination != host { + globals.tls_name_override.write().unwrap().insert( + actual_destination.to_owned(), + webpki::DNSNameRef::try_from_ascii_str(&host) + .unwrap() + .to_owned(), + ); + } result }; From b4c001de2f921b78997407034bf54b83e10a2c3e Mon Sep 17 00:00:00 2001 From: Gabriel Souza Franco Date: Fri, 16 Apr 2021 00:27:26 -0300 Subject: [PATCH 0519/1727] chore: cleanup string-based code in find_actual_destination --- src/server_server.rs | 101 ++++++++++++++++++++++++++++++------------- 1 file changed, 70 insertions(+), 31 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 1bee9d5..e85532e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -45,6 +45,35 @@ use std::{ #[cfg(feature = "conduit_bin")] use rocket::{get, post, put}; +#[derive(Clone, Debug, PartialEq)] +enum FederationDestination { + Literal(SocketAddr), + Named(String, String), +} + +impl FederationDestination { + fn into_url(self) -> String { + match self { + Self::Literal(addr) => format!("https://{}", addr), + Self::Named(host, port) => format!("https://{}{}", host, port), + } + } + + fn into_uri(self) -> String { + match self { + Self::Literal(addr) => addr.to_string(), + Self::Named(host, ref port) => host + port, + } + } + + fn host(&self) -> String { + match &self { + Self::Literal(addr) => addr.ip().to_string(), + Self::Named(host, _) => host.clone() + } + } +} + #[tracing::instrument(skip(globals))] pub async fn send_request( globals: &crate::database::globals::Globals, @@ -69,17 +98,17 @@ where result } else { let result = find_actual_destination(globals, &destination).await; + let (actual_destination, host) = result.clone(); + let result = (result.0.into_url(), result.1.into_uri()); globals .actual_destination_cache .write() .unwrap() .insert(Box::::from(destination), result.clone()); - let actual_destination = result.0.strip_prefix("https://").unwrap().splitn(2, ':').next().unwrap(); - let host = result.1.splitn(2, ':').next().unwrap_or(&result.1); if actual_destination != host { globals.tls_name_override.write().unwrap().insert( - actual_destination.to_owned(), - webpki::DNSNameRef::try_from_ascii_str(&host) + actual_destination.host(), + webpki::DNSNameRef::try_from_ascii_str(&host.host()) .unwrap() .to_owned(), ); @@ -212,22 +241,23 @@ where } #[tracing::instrument] -fn get_ip_with_port(destination_str: String) -> Option { - if destination_str.parse::().is_ok() { - Some(destination_str) +fn get_ip_with_port(destination_str: &str) -> Option { + if let Ok(destination) = destination_str.parse::() { + Some(FederationDestination::Literal(destination)) } else if let Ok(ip_addr) = destination_str.parse::() { - Some(SocketAddr::new(ip_addr, 8448).to_string()) + Some(FederationDestination::Literal(SocketAddr::new(ip_addr, 8448))) } else { None } } #[tracing::instrument] -fn add_port_to_hostname(destination_str: String) -> String { - match destination_str.find(':') { - None => destination_str.to_owned() + ":8448", - Some(_) => destination_str.to_string(), - } +fn add_port_to_hostname(destination_str: &str) -> FederationDestination { + let (host, port) = match destination_str.find(':') { + None => (destination_str, ":8448"), + Some(pos) => destination_str.split_at(pos), + }; + FederationDestination::Named(host.to_string(), port.to_string()) } /// Returns: actual_destination, host header @@ -237,36 +267,37 @@ fn add_port_to_hostname(destination_str: String) -> String { async fn find_actual_destination( globals: &crate::database::globals::Globals, destination: &'_ ServerName, -) -> (String, String) { +) -> (FederationDestination, FederationDestination) { let destination_str = destination.as_str().to_owned(); - let mut host = destination_str.clone(); - let actual_destination = "https://".to_owned() - + &match get_ip_with_port(destination_str.clone()) { + let mut hostname = destination_str.clone(); + let actual_destination = match get_ip_with_port(&destination_str) { Some(host_port) => { // 1: IP literal with provided or default port host_port } None => { - if destination_str.find(':').is_some() { + if let Some(pos) = destination_str.find(':') { // 2: Hostname with included port - destination_str + let (host, port) = destination_str.split_at(pos); + FederationDestination::Named(host.to_string(), port.to_string()) } else { match request_well_known(globals, &destination.as_str()).await { // 3: A .well-known file is available Some(delegated_hostname) => { - host = delegated_hostname.clone(); - match get_ip_with_port(delegated_hostname.clone()) { + hostname = delegated_hostname.clone(); + match get_ip_with_port(&delegated_hostname) { Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file None => { - if destination_str.find(':').is_some() { + if let Some(pos) = destination_str.find(':') { // 3.2: Hostname with port in .well-known file - destination_str + let (host, port) = destination_str.split_at(pos); + FederationDestination::Named(host.to_string(), port.to_string()) } else { match query_srv_record(globals, &delegated_hostname).await { // 3.3: SRV lookup successful Some(hostname) => hostname, // 3.4: No SRV records, just use the hostname from .well-known - None => add_port_to_hostname(delegated_hostname), + None => add_port_to_hostname(&delegated_hostname), } } } @@ -278,7 +309,7 @@ async fn find_actual_destination( // 4: SRV record found Some(hostname) => hostname, // 5: No SRV record found - None => add_port_to_hostname(destination_str.to_string()), + None => add_port_to_hostname(&destination_str), } } } @@ -286,24 +317,32 @@ async fn find_actual_destination( } }; - (actual_destination, host) + let hostname = get_ip_with_port(&hostname).unwrap_or_else(|| { + match hostname.find(':') { + Some(pos) => { + let (host, port) = hostname.split_at(pos); + FederationDestination::Named(host.to_string(), port.to_string()) + } + None => FederationDestination::Named(hostname, "".to_string()) + } + }); + (actual_destination, hostname) } #[tracing::instrument(skip(globals))] async fn query_srv_record( globals: &crate::database::globals::Globals, hostname: &'_ str, -) -> Option { +) -> Option { if let Ok(Some(host_port)) = globals .dns_resolver() .srv_lookup(format!("_matrix._tcp.{}", hostname)) .await .map(|srv| { srv.iter().next().map(|result| { - format!( - "{}:{}", - result.target().to_string().trim_end_matches('.'), - result.port().to_string() + FederationDestination::Named( + result.target().to_string().trim_end_matches('.').to_string(), + format!(":{}", result.port()) ) }) }) From e73de2317eda6eb565a3c43e24e5164eb739b750 Mon Sep 17 00:00:00 2001 From: Gabriel Souza Franco Date: Fri, 16 Apr 2021 00:36:35 -0300 Subject: [PATCH 0520/1727] fix: verify tls cert for non-conformant servers --- src/database/globals.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index e48c849..fd51e35 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -52,7 +52,11 @@ impl ServerCertVerifier for MatrixServerVerifier { }, None => dns_name }; - self.inner.verify_server_cert(roots, presented_certs, override_name, ocsp_response) + + self.inner.verify_server_cert(roots, presented_certs, override_name, ocsp_response).or_else(|_| { + log::warn!("Server is non-compliant, retrying with original name!"); + self.inner.verify_server_cert(roots, presented_certs, dns_name, ocsp_response) + }) } } From 95db30918ea4d8511fe8defa39f9fcb941ae314d Mon Sep 17 00:00:00 2001 From: Tobias Fella Date: Thu, 15 Apr 2021 23:08:13 +0200 Subject: [PATCH 0521/1727] docs: Fix fix formatting of code blocks in markdown --- CROSS_COMPILE.md | 1 + DEPLOY.md | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/CROSS_COMPILE.md b/CROSS_COMPILE.md index 4728768..5bdfff4 100644 --- a/CROSS_COMPILE.md +++ b/CROSS_COMPILE.md @@ -1,4 +1,5 @@ Install docker: + ``` $ sudo apt install docker $ sudo usermod -aG docker $USER diff --git a/DEPLOY.md b/DEPLOY.md index 4601ab5..6959759 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -53,6 +53,7 @@ WantedBy=multi-user.target ``` Finally, run + ```bash $ sudo systemctl daemon-reload ``` @@ -61,6 +62,7 @@ $ sudo systemctl daemon-reload ## Creating the Conduit configuration file Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment to read it. You need to change at least the server name.** + ```toml [global] # The server_name is the name of this server. It is used as a suffix for user @@ -125,6 +127,7 @@ This depends on whether you use Apache, Nginx or another web server. ### Apache Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this: + ``` Listen 8448 @@ -143,6 +146,7 @@ SSLCertificateKeyFile /etc/letsencrypt/live/your.server.name/privkey.pem # EDIT ``` **You need to make some edits again.** When you are done, run + ```bash $ sudo systemctl reload apache2 ``` @@ -152,6 +156,7 @@ $ sudo systemctl reload apache2 If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf` + ``` server { listen 443; @@ -164,6 +169,7 @@ server { } ``` **You need to make some edits again.** When you are done, run + ```bash $ sudo systemctl reload nginx ``` @@ -172,6 +178,7 @@ $ sudo systemctl reload nginx ## SSL Certificate The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: + ```bash $ sudo certbot -d your.server.name ``` @@ -180,11 +187,13 @@ $ sudo certbot -d your.server.name ## You're done! Now you can start Conduit with: + ```bash $ sudo systemctl start conduit ``` Set it to start automatically when your system boots with: + ```bash $ sudo systemctl enable conduit ``` From 7faa021ff5656570aae815739dd3bc44ce18b5b8 Mon Sep 17 00:00:00 2001 From: Gabriel Souza Franco Date: Fri, 16 Apr 2021 12:18:22 -0300 Subject: [PATCH 0522/1727] chore: code formatting and cleanup --- src/database/globals.rs | 41 +++++++------ src/server_server.rs | 129 +++++++++++++++++++++------------------- 2 files changed, 89 insertions(+), 81 deletions(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index fd51e35..acecf02 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,16 +1,16 @@ use crate::{database::Config, utils, Error, Result}; -use log::error; +use log::{error, info}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerName, ServerSigningKeyId, }; +use rustls::{ServerCertVerifier, WebPKIVerifier}; use std::{ collections::{BTreeMap, HashMap}, sync::{Arc, RwLock}, time::Duration, }; use trust_dns_resolver::TokioAsyncResolver; -use rustls::{ServerCertVerifier, WebPKIVerifier}; pub const COUNTER: &str = "c"; @@ -42,21 +42,20 @@ impl ServerCertVerifier for MatrixServerVerifier { dns_name: webpki::DNSNameRef<'_>, ocsp_response: &[u8], ) -> std::result::Result { - let cache = self.tls_name_override.read().unwrap(); - log::debug!("Searching for override for {:?}", dns_name); - log::debug!("Cache: {:?}", cache); - let override_name = match cache.get(dns_name.into()) { - Some(host) => { - log::debug!("Override found! {:?}", host); - host.as_ref() - }, - None => dns_name - }; - - self.inner.verify_server_cert(roots, presented_certs, override_name, ocsp_response).or_else(|_| { - log::warn!("Server is non-compliant, retrying with original name!"); - self.inner.verify_server_cert(roots, presented_certs, dns_name, ocsp_response) - }) + if let Some(override_name) = self.tls_name_override.read().unwrap().get(dns_name.into()) { + let result = self.inner.verify_server_cert( + roots, + presented_certs, + override_name.as_ref(), + ocsp_response, + ); + if result.is_ok() { + return result; + } + info!("Server {:?} is non-compliant, retrying TLS verification with original name", dns_name); + } + self.inner + .verify_server_cert(roots, presented_certs, dns_name, ocsp_response) } } @@ -101,10 +100,14 @@ impl Globals { }; let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new())); - let verifier = Arc::new(MatrixServerVerifier { inner: WebPKIVerifier::new(), tls_name_override: tls_name_override.clone() }); + let verifier = Arc::new(MatrixServerVerifier { + inner: WebPKIVerifier::new(), + tls_name_override: tls_name_override.clone(), + }); let mut tlsconfig = rustls::ClientConfig::new(); tlsconfig.dangerous().set_certificate_verifier(verifier); - tlsconfig.root_store = rustls_native_certs::load_native_certs().expect("Error loading system certificates"); + tlsconfig.root_store = + rustls_native_certs::load_native_certs().expect("Error loading system certificates"); let reqwest_client = reqwest::Client::builder() .connect_timeout(Duration::from_secs(30)) diff --git a/src/server_server.rs b/src/server_server.rs index e85532e..ac38f4d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -46,13 +46,13 @@ use std::{ use rocket::{get, post, put}; #[derive(Clone, Debug, PartialEq)] -enum FederationDestination { +enum FedDest { Literal(SocketAddr), Named(String, String), } -impl FederationDestination { - fn into_url(self) -> String { +impl FedDest { + fn into_https_url(self) -> String { match self { Self::Literal(addr) => format!("https://{}", addr), Self::Named(host, port) => format!("https://{}{}", host, port), @@ -69,7 +69,7 @@ impl FederationDestination { fn host(&self) -> String { match &self { Self::Literal(addr) => addr.ip().to_string(), - Self::Named(host, _) => host.clone() + Self::Named(host, _) => host.clone(), } } } @@ -99,13 +99,13 @@ where } else { let result = find_actual_destination(globals, &destination).await; let (actual_destination, host) = result.clone(); - let result = (result.0.into_url(), result.1.into_uri()); + let result = (result.0.into_https_url(), result.1.into_uri()); globals .actual_destination_cache .write() .unwrap() .insert(Box::::from(destination), result.clone()); - if actual_destination != host { + if actual_destination.host() != host.host() { globals.tls_name_override.write().unwrap().insert( actual_destination.host(), webpki::DNSNameRef::try_from_ascii_str(&host.host()) @@ -241,23 +241,23 @@ where } #[tracing::instrument] -fn get_ip_with_port(destination_str: &str) -> Option { +fn get_ip_with_port(destination_str: &str) -> Option { if let Ok(destination) = destination_str.parse::() { - Some(FederationDestination::Literal(destination)) + Some(FedDest::Literal(destination)) } else if let Ok(ip_addr) = destination_str.parse::() { - Some(FederationDestination::Literal(SocketAddr::new(ip_addr, 8448))) + Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) } else { None } } #[tracing::instrument] -fn add_port_to_hostname(destination_str: &str) -> FederationDestination { +fn add_port_to_hostname(destination_str: &str) -> FedDest { let (host, port) = match destination_str.find(':') { None => (destination_str, ":8448"), Some(pos) => destination_str.split_at(pos), }; - FederationDestination::Named(host.to_string(), port.to_string()) + FedDest::Named(host.to_string(), port.to_string()) } /// Returns: actual_destination, host header @@ -267,65 +267,66 @@ fn add_port_to_hostname(destination_str: &str) -> FederationDestination { async fn find_actual_destination( globals: &crate::database::globals::Globals, destination: &'_ ServerName, -) -> (FederationDestination, FederationDestination) { +) -> (FedDest, FedDest) { let destination_str = destination.as_str().to_owned(); let mut hostname = destination_str.clone(); let actual_destination = match get_ip_with_port(&destination_str) { - Some(host_port) => { - // 1: IP literal with provided or default port - host_port - } - None => { - if let Some(pos) = destination_str.find(':') { - // 2: Hostname with included port - let (host, port) = destination_str.split_at(pos); - FederationDestination::Named(host.to_string(), port.to_string()) - } else { - match request_well_known(globals, &destination.as_str()).await { - // 3: A .well-known file is available - Some(delegated_hostname) => { - hostname = delegated_hostname.clone(); - match get_ip_with_port(&delegated_hostname) { - Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file - None => { - if let Some(pos) = destination_str.find(':') { - // 3.2: Hostname with port in .well-known file - let (host, port) = destination_str.split_at(pos); - FederationDestination::Named(host.to_string(), port.to_string()) - } else { - match query_srv_record(globals, &delegated_hostname).await { - // 3.3: SRV lookup successful - Some(hostname) => hostname, - // 3.4: No SRV records, just use the hostname from .well-known - None => add_port_to_hostname(&delegated_hostname), - } + Some(host_port) => { + // 1: IP literal with provided or default port + host_port + } + None => { + if let Some(pos) = destination_str.find(':') { + // 2: Hostname with included port + let (host, port) = destination_str.split_at(pos); + FedDest::Named(host.to_string(), port.to_string()) + } else { + match request_well_known(globals, &destination.as_str()).await { + // 3: A .well-known file is available + Some(delegated_hostname) => { + hostname = delegated_hostname.clone(); + match get_ip_with_port(&delegated_hostname) { + Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file + None => { + if let Some(pos) = destination_str.find(':') { + // 3.2: Hostname with port in .well-known file + let (host, port) = destination_str.split_at(pos); + FedDest::Named(host.to_string(), port.to_string()) + } else { + match query_srv_record(globals, &delegated_hostname).await { + // 3.3: SRV lookup successful + Some(hostname) => hostname, + // 3.4: No SRV records, just use the hostname from .well-known + None => add_port_to_hostname(&delegated_hostname), } } } } - // 4: No .well-known or an error occured - None => { - match query_srv_record(globals, &destination_str).await { - // 4: SRV record found - Some(hostname) => hostname, - // 5: No SRV record found - None => add_port_to_hostname(&destination_str), - } + } + // 4: No .well-known or an error occured + None => { + match query_srv_record(globals, &destination_str).await { + // 4: SRV record found + Some(hostname) => hostname, + // 5: No SRV record found + None => add_port_to_hostname(&destination_str), } } } } - }; - - let hostname = get_ip_with_port(&hostname).unwrap_or_else(|| { - match hostname.find(':') { - Some(pos) => { - let (host, port) = hostname.split_at(pos); - FederationDestination::Named(host.to_string(), port.to_string()) - } - None => FederationDestination::Named(hostname, "".to_string()) } - }); + }; + + let hostname = if let Ok(addr) = hostname.parse::() { + FedDest::Literal(addr) + } else if let Ok(addr) = hostname.parse::() { + FedDest::Named(addr.to_string(), "".to_string()) + } else if let Some(pos) = hostname.find(':') { + let (host, port) = hostname.split_at(pos); + FedDest::Named(host.to_string(), port.to_string()) + } else { + FedDest::Named(hostname, "".to_string()) + }; (actual_destination, hostname) } @@ -333,16 +334,20 @@ async fn find_actual_destination( async fn query_srv_record( globals: &crate::database::globals::Globals, hostname: &'_ str, -) -> Option { +) -> Option { if let Ok(Some(host_port)) = globals .dns_resolver() .srv_lookup(format!("_matrix._tcp.{}", hostname)) .await .map(|srv| { srv.iter().next().map(|result| { - FederationDestination::Named( - result.target().to_string().trim_end_matches('.').to_string(), - format!(":{}", result.port()) + FedDest::Named( + result + .target() + .to_string() + .trim_end_matches('.') + .to_string(), + format!(":{}", result.port()), ) }) }) From eedac4fd9610feee34963d0bb227b354f97cd210 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 16 Apr 2021 18:18:29 +0200 Subject: [PATCH 0523/1727] feat: make_join, send_join and /directory --- src/client_server/membership.rs | 16 +- src/database/rooms.rs | 150 ++++++--- src/main.rs | 3 + src/server_server.rs | 578 +++++++++++++++++++++++++++----- 4 files changed, 601 insertions(+), 146 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ab646a1..dcd7c37 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -564,7 +564,7 @@ async fn join_room_by_id_helper( pdu_id.extend_from_slice(&count.to_be_bytes()); let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) - .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; + .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; let mut state = BTreeMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); @@ -588,7 +588,7 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") })?; - db.rooms.add_pdu_outlier(&pdu)?; + db.rooms.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { if pdu.kind == EventType::RoomMember { let target_user_id = UserId::try_from(state_key.clone()).map_err(|e| { @@ -632,7 +632,11 @@ async fn join_room_by_id_helper( pdu.event_id.clone(), ); - db.rooms.force_state(room_id, state, &db.globals)?; + if state.get(&(EventType::RoomCreate, "".to_owned())).is_none() { + return Err(Error::BadServerResponse("State contained no create event.")); + } + + db.rooms.force_state(room_id, state, &db)?; for result in futures::future::join_all( send_join_response @@ -648,11 +652,7 @@ async fn join_room_by_id_helper( Err(_) => continue, }; - let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { - warn!("{:?}: {}", value, e); - Error::BadServerResponse("Invalid PDU in send_join response.") - })?; - db.rooms.add_pdu_outlier(&pdu)?; + db.rooms.add_pdu_outlier(&event_id, &value)?; } // We append to state before appending the pdu, so we don't have a moment in time with the diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 35c1df7..b714582 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -241,7 +241,7 @@ impl Rooms { kind: &EventType, sender: &UserId, state_key: Option<&str>, - content: serde_json::Value, + content: &serde_json::Value, ) -> Result>> { let auth_events = state_res::auth_types_for_event( kind, @@ -295,7 +295,7 @@ impl Rooms { &self, room_id: &RoomId, state: BTreeMap<(EventType, String), EventId>, - globals: &super::globals::Globals, + db: &Database, ) -> Result<()> { let state_hash = self.calculate_hash( &state @@ -304,57 +304,109 @@ impl Rooms { .collect::>(), ); - let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { - Some(shortstatehash) => { - // State already existed in db - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &*shortstatehash)?; - return Ok(()); - } - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(&state_hash, &shortstatehash.to_be_bytes())?; - shortstatehash.to_be_bytes().to_vec() + let (shortstatehash, already_existed) = + match self.statehash_shortstatehash.get(&state_hash)? { + Some(shortstatehash) => ( + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, + true, + ), + None => { + let shortstatehash = db.globals.next_count()?; + self.statehash_shortstatehash + .insert(&state_hash, &shortstatehash.to_be_bytes())?; + (shortstatehash, false) + } + }; + + let new_state = if !already_existed { + let mut new_state = HashSet::new(); + + for ((event_type, state_key), eventid) in state { + new_state.insert(eventid.clone()); + + let mut statekey = event_type.as_ref().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(&state_key.as_bytes()); + + let shortstatekey = match self.statekey_shortstatekey.get(&statekey)? { + Some(shortstatekey) => shortstatekey.to_vec(), + None => { + let shortstatekey = db.globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes())?; + shortstatekey.to_be_bytes().to_vec() + } + }; + + let shorteventid = match self.eventid_shorteventid.get(eventid.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = db.globals.next_count()?; + self.eventid_shorteventid + .insert(eventid.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), eventid.as_bytes())?; + shorteventid.to_be_bytes().to_vec() + } + }; + + let mut state_id = shortstatehash.to_be_bytes().to_vec(); + state_id.extend_from_slice(&shortstatekey); + + self.stateid_shorteventid + .insert(&state_id, &*shorteventid)?; } + + new_state + } else { + self.state_full_ids(shortstatehash)?.into_iter().collect() }; - for ((event_type, state_key), eventid) in state { - let mut statekey = event_type.as_ref().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(&state_key.as_bytes()); + let old_state = self + .current_shortstatehash(&room_id)? + .map(|s| self.state_full_ids(s)) + .transpose()? + .map(|vec| vec.into_iter().collect::>()) + .unwrap_or_default(); - let shortstatekey = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => shortstatekey.to_vec(), - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - shortstatekey.to_be_bytes().to_vec() + for event_id in new_state.difference(&old_state) { + if let Some(pdu) = self.get_pdu_json(event_id)? { + if pdu.get("event_type") + == Some(&CanonicalJsonValue::String("m.room.member".to_owned())) + { + if let Ok(pdu) = serde_json::from_value::( + serde_json::to_value(&pdu).expect("CanonicalJsonObj is a valid JsonValue"), + ) { + if let Some(membership) = + pdu.content.get("membership").and_then(|membership| { + serde_json::from_value::( + membership.clone(), + ) + .ok() + }) + { + if let Some(state_key) = pdu + .state_key + .and_then(|state_key| UserId::try_from(state_key).ok()) + { + self.update_membership( + room_id, + &state_key, + membership, + &pdu.sender, + None, + db, + )?; + } + } + } } - }; - - let shorteventid = match self.eventid_shorteventid.get(eventid.as_bytes())? { - Some(shorteventid) => shorteventid.to_vec(), - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(eventid.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), eventid.as_bytes())?; - shorteventid.to_be_bytes().to_vec() - } - }; - - let mut state_id = shortstatehash.clone(); - state_id.extend_from_slice(&shortstatekey); - - self.stateid_shorteventid - .insert(&*state_id, &*shorteventid)?; + } } self.roomid_shortstatehash - .insert(room_id.as_bytes(), &*shortstatehash)?; + .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } @@ -591,10 +643,10 @@ impl Rooms { /// Append the PDU as an outlier. /// /// Any event given to this will be processed (state-res) on another thread. - pub fn add_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { + pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { self.eventid_outlierpdu.insert( - &pdu.event_id.as_bytes(), - &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), + &event_id.as_bytes(), + &*serde_json::to_string(&pdu).expect("CanonicalJsonObject is valid string"), )?; Ok(()) @@ -1193,7 +1245,7 @@ impl Rooms { &event_type, &sender, state_key.as_deref(), - content.clone(), + &content, )?; // Our depth is the maximum depth of prev_events + 1 diff --git a/src/main.rs b/src/main.rs index 31cfaca..3157023 100644 --- a/src/main.rs +++ b/src/main.rs @@ -168,7 +168,10 @@ fn setup_rocket() -> (rocket::Rocket, Config) { server_server::get_event_route, server_server::get_missing_events_route, server_server::get_room_state_ids_route, + server_server::create_join_event_template_route, + server_server::create_join_event_route, server_server::create_invite_route, + server_server::get_room_information_route, server_server::get_profile_information_route, ], ) diff --git a/src/server_server.rs b/src/server_server.rs index 5b49472..ed4be0c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -14,25 +14,32 @@ use ruma::{ VerifyKey, }, event::{get_event, get_missing_events, get_room_state_ids}, - membership::create_invite, - query::get_profile_information, + membership::{ + create_invite, + create_join_event::{self, RoomState}, + create_join_event_template, + }, + query::{get_profile_information, get_room_information}, transactions::send_transaction_message, }, IncomingResponse, OutgoingRequest, OutgoingResponse, }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ - room::{create::CreateEventContent, member::MembershipState}, + room::{ + create::CreateEventContent, + member::{MemberEventContent, MembershipState}, + }, EventType, }, serde::{to_canonical_value, Raw}, - signatures::CanonicalJsonValue, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + signatures::{CanonicalJsonObject, CanonicalJsonValue}, + uint, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ collections::{btree_map::Entry, BTreeMap, BTreeSet, HashSet}, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, future::Future, net::{IpAddr, SocketAddr}, @@ -589,8 +596,8 @@ pub async fn send_transaction_message_route<'a>( Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } -/// An async function that can recursively calls itself. -type AsyncRecursiveResult<'a, T> = Pin> + 'a + Send>>; +/// An async function that can recursively call itself. +type AsyncRecursiveResult<'a, T, E> = Pin> + 'a + Send>>; /// When receiving an event one needs to: /// 0. Skip the PDU if we already know about it @@ -624,13 +631,13 @@ fn handle_incoming_pdu<'a>( db: &'a Database, pub_key_map: &'a RwLock>>, auth_cache: &'a mut EventMap>, -) -> AsyncRecursiveResult<'a, Arc> { +) -> AsyncRecursiveResult<'a, Option>, String> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - // 0. Skip the PDU if we already know about it - if let Ok(Some(pdu)) = db.rooms.get_non_outlier_pdu(&event_id) { - return Ok(Arc::new(pdu)); + // 0. Skip the PDU if we already have it as a timeline event + if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) { + return Ok(Some(pdu_id.to_vec())); } // 1. Check the server is in the room @@ -690,6 +697,7 @@ fn handle_incoming_pdu<'a>( } Ok(ruma::signatures::Verified::Signatures) => { // Redact + warn!("Calculated hash does not match: {}", event_id); match ruma::signatures::redact(&value, &room_version) { Ok(obj) => obj, Err(_) => return Err("Redaction failed".to_string()), @@ -705,7 +713,7 @@ fn handle_incoming_pdu<'a>( to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); let incoming_pdu = serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), ) .map_err(|_| "Event is not a valid PDU.".to_string())?; @@ -793,13 +801,13 @@ fn handle_incoming_pdu<'a>( // 7. Persist the event as an outlier. db.rooms - .add_pdu_outlier(&incoming_pdu) + .add_pdu_outlier(&incoming_pdu.event_id, &val) .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; debug!("Added pdu as outlier."); // 8. if not timeline event: stop if !is_timeline_event { - return Ok(incoming_pdu); + return Ok(None); } // TODO: 9. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events @@ -811,7 +819,54 @@ fn handle_incoming_pdu<'a>( // the state from a known point and resolve if > 1 prev_event debug!("Requesting state at event."); - let (state_at_incoming_event, incoming_auth_events): (StateMap>, Vec>) = + let mut state_at_incoming_event = None; + let mut incoming_auth_events = Vec::new(); + + if incoming_pdu.prev_events.len() == 1 { + let prev_event = &incoming_pdu.prev_events[0]; + let state_vec = db + .rooms + .pdu_shortstatehash(prev_event) + .map_err(|_| "Failed talking to db".to_owned())? + .map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok()) + .flatten(); + if let Some(mut state_vec) = state_vec { + if db + .rooms + .get_pdu(prev_event) + .ok() + .flatten() + .ok_or_else(|| "Could not find prev event, but we know the state.".to_owned())? + .state_key + .is_some() + { + state_vec.push(prev_event.clone()); + } + state_at_incoming_event = Some( + fetch_and_handle_events(db, origin, &state_vec, pub_key_map, auth_cache) + .await + .map_err(|_| "Failed to fetch state events locally".to_owned())? + .into_iter() + .map(|pdu| { + ( + ( + pdu.kind.clone(), + pdu.state_key + .clone() + .expect("events from state_full_ids are state events"), + ), + pdu, + ) + }) + .collect(), + ); + } + &state_at_incoming_event; + + // TODO: set incoming_auth_events? + } + + if state_at_incoming_event.is_none() { // Call /state_ids to find out what the state at this pdu is. We trust the server's // response to some extend, but we still do a lot of checks on the events match db @@ -856,12 +911,16 @@ fn handle_incoming_pdu<'a>( } // The original create event must still be in the state - if state.get(&(EventType::RoomCreate, "".to_owned())).map(|a| a.as_ref()) != Some(&create_event) { + if state + .get(&(EventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(&create_event) + { return Err("Incoming event refers to wrong create event.".to_owned()); } debug!("Fetching auth chain events at event."); - let incoming_auth_events = match fetch_and_handle_events( + incoming_auth_events = match fetch_and_handle_events( &db, origin, &res.auth_chain_ids, @@ -874,12 +933,16 @@ fn handle_incoming_pdu<'a>( Err(_) => return Err("Failed to fetch auth chain.".to_owned()), }; - (state, incoming_auth_events) + state_at_incoming_event = Some(state); } Err(_) => { return Err("Fetching state for event failed".into()); } }; + } + + let state_at_incoming_event = + state_at_incoming_event.expect("we always set this to some above"); // 11. Check the auth of the event passes based on the state of the event if !state_res::event_auth::auth_check( @@ -1079,20 +1142,26 @@ fn handle_incoming_pdu<'a>( // Now that the event has passed all auth it is added into the timeline. // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - append_incoming_pdu(&db, &incoming_pdu, extremities, &state_at_incoming_event) - .map_err(|_| "Failed to add pdu to db.".to_owned())?; + let pdu_id = append_incoming_pdu( + &db, + &incoming_pdu, + val, + extremities, + &state_at_incoming_event, + ) + .map_err(|_| "Failed to add pdu to db.".to_owned())?; debug!("Appended incoming pdu."); // Set the new room state to the resolved state if update_state { db.rooms - .force_state(&room_id, new_room_state, &db.globals) + .force_state(&room_id, new_room_state, &db) .map_err(|_| "Failed to set new room state.".to_owned())?; } debug!("Updated resolved state"); // Event has passed all auth/stateres checks - Ok(incoming_pdu) + Ok(Some(pdu_id)) }) } @@ -1108,77 +1177,93 @@ fn handle_incoming_pdu<'a>( /// If the event is unknown to the `auth_cache` it is added. This guarantees that any /// event we need to know of will be present. //#[tracing::instrument(skip(db, key_map, auth_cache))] -pub(crate) async fn fetch_and_handle_events( - db: &Database, - origin: &ServerName, - events: &[EventId], - pub_key_map: &RwLock>>, - auth_cache: &mut EventMap>, -) -> Result>> { - let mut pdus = vec![]; - for id in events { - // a. Look at auth cache - let pdu = match auth_cache.get(id) { - Some(pdu) => { - debug!("Found {} in cache", id); - pdu.clone() - } - // b. Look in the main timeline (pduid_pdu tree) - // c. Look at outlier pdu tree - // (get_pdu checks both) - None => match db.rooms.get_pdu(&id)? { - Some(pdu) => { - debug!("Found {} in outliers", id); - Arc::new(pdu) - } - None => { - // d. Ask origin server over federation - debug!("Fetching {} over federation.", id); - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - debug!("Got {} over federation: {:?}", id, res); - let (event_id, value) = - crate::pdu::gen_event_id_canonical_json(&res.pdu)?; - let pdu = match handle_incoming_pdu( - origin, - &event_id, - value, - false, +pub(crate) fn fetch_and_handle_events<'a>( + db: &'a Database, + origin: &'a ServerName, + events: &'a [EventId], + pub_key_map: &'a RwLock>>, + auth_cache: &'a mut EventMap>, +) -> AsyncRecursiveResult<'a, Vec>, Error> { + Box::pin(async move { + let mut pdus = vec![]; + for id in events { + // a. Look at auth cache + let pdu = + match auth_cache.get(id) { + Some(pdu) => { + debug!("Found {} in cache", id); + // We already have the auth chain for events in cache + pdu.clone() + } + // b. Look in the main timeline (pduid_pdu tree) + // c. Look at outlier pdu tree + // (get_pdu checks both) + None => match db.rooms.get_pdu(&id)? { + Some(pdu) => { + debug!("Found {} in db", id); + // We need to fetch the auth chain + let _ = fetch_and_handle_events( db, + origin, + &pdu.auth_events, pub_key_map, auth_cache, ) - .await + .await?; + Arc::new(pdu) + } + None => { + // d. Ask origin server over federation + debug!("Fetching {} over federation.", id); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await { - Ok(pdu) => pdu, - Err(e) => { - warn!("Authentication of event {} failed: {:?}", id, e); + Ok(res) => { + debug!("Got {} over federation: {:?}", id, res); + let (event_id, value) = + crate::pdu::gen_event_id_canonical_json(&res.pdu)?; + // This will also fetch the auth chain + match handle_incoming_pdu( + origin, + &event_id, + value.clone(), + false, + db, + pub_key_map, + auth_cache, + ) + .await + { + Ok(_) => Arc::new(serde_json::from_value( + serde_json::to_value(value) + .expect("canonicaljsonobject is valid value"), + ) + .expect("This is possible because handle_incoming_pdu worked")), + Err(e) => { + warn!("Authentication of event {} failed: {:?}", id, e); + continue; + } + } + } + Err(_) => { + warn!("Failed to fetch event: {}", id); continue; } - }; - - pdu + } } - Err(_) => { - warn!("Failed to fetch event: {}", id); - continue; - } - } - } - }, - }; - auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); - pdus.push(pdu); - } - Ok(pdus) + }, + }; + auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); + pdus.push(pdu); + } + Ok(pdus) + }) } /// Search the DB for the signing keys of the given server, if we don't have them @@ -1280,9 +1365,10 @@ pub(crate) async fn fetch_signing_keys( pub(crate) fn append_incoming_pdu( db: &Database, pdu: &PduEvent, + pdu_json: CanonicalJsonObject, new_room_leaves: HashSet, state: &StateMap>, -) -> Result<()> { +) -> Result> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1295,7 +1381,7 @@ pub(crate) fn append_incoming_pdu( db.rooms.append_pdu( pdu, - utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + pdu_json, count, pdu_id.clone().into(), &new_room_leaves.into_iter().collect::>(), @@ -1366,7 +1452,7 @@ pub(crate) fn append_incoming_pdu( } } - Ok(()) + Ok(pdu_id) } #[cfg_attr( @@ -1495,6 +1581,291 @@ pub fn get_room_state_ids_route<'a>( .into()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v1/make_join/<_>/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn create_join_event_template_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + if !db.rooms.exists(&body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Server is not in room.", + )); + } + + if !body.ver.contains(&RoomVersionId::Version6) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: RoomVersionId::Version6, + }, + "Room version not supported.", + )); + } + + let prev_events = db + .rooms + .get_pdu_leaves(&body.room_id)? + .into_iter() + .take(20) + .collect::>(); + + let create_event = db + .rooms + .room_state_get(&body.room_id, &EventType::RoomCreate, "")?; + + let create_event_content = create_event + .as_ref() + .map(|create_event| { + Ok::<_, Error>( + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?, + ) + }) + .transpose()?; + + let create_prev_event = if prev_events.len() == 1 + && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) + { + create_event.map(Arc::new) + } else { + None + }; + + // If there was no create event yet, assume we are creating a version 6 room right now + let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| { + create_event.room_version + }); + + let content = serde_json::to_value(MemberEventContent { + avatar_url: None, + displayname: None, + is_direct: None, + membership: MembershipState::Join, + third_party_invite: None, + }) + .expect("member event is valid value"); + + let state_key = body.user_id.to_string(); + let kind = EventType::RoomMember; + + let auth_events = db.rooms.get_auth_events( + &body.room_id, + &kind, + &body.user_id, + Some(&state_key), + &content, + )?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) + .max() + .unwrap_or_else(|| uint!(0)) + + uint!(1); + + let mut unsigned = BTreeMap::new(); + + if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? { + unsigned.insert("prev_content".to_owned(), prev_pdu.content); + unsigned.insert( + "prev_sender".to_owned(), + serde_json::to_value(prev_pdu.sender).expect("UserId::to_value always works"), + ); + } + + let pdu = PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater"), + room_id: body.room_id.clone(), + sender: body.user_id.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind, + content, + state_key: Some(state_key), + prev_events, + depth, + auth_events: auth_events + .iter() + .map(|(_, pdu)| pdu.event_id.clone()) + .collect(), + redacts: None, + unsigned, + hashes: ruma::events::pdu::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: BTreeMap::new(), + }; + + let auth_check = state_res::auth_check( + &room_version, + &Arc::new(pdu.clone()), + create_prev_event, + &auth_events, + None, // TODO: third_party_invite + ) + .map_err(|e| { + error!("{:?}", e); + Error::bad_database("Auth check failed.") + })?; + + if !auth_check { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event is not authorized.", + )); + } + + // Hash and sign + let mut pdu_json = + utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); + + pdu_json.remove("event_id"); + + // Add origin because synapse likes that (and it's required in the spec) + pdu_json.insert( + "origin".to_owned(), + to_canonical_value(db.globals.server_name()) + .expect("server name is a valid CanonicalJsonValue"), + ); + + Ok(create_join_event_template::v1::Response { + room_version: Some(RoomVersionId::Version6), + event: serde_json::from_value::>( + serde_json::to_value(pdu_json).expect("CanonicalJson is valid serde_json::Value"), + ) + .expect("Raw::from_value always works"), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/federation/v2/send_join/<_>/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn create_join_event_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + // We need to return the state prior to joining, let's keep a reference to that here + let shortstatehash = + db.rooms + .current_shortstatehash(&body.room_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pub_key_map = RwLock::new(BTreeMap::new()); + let mut auth_cache = EventMap::new(); + + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&body.pdu) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + } + }; + + let origin = serde_json::from_value::>( + serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event needs an origin field.", + ))?) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + + let pdu_id = handle_incoming_pdu( + &origin, + &event_id, + value, + true, + &db, + &pub_key_map, + &mut auth_cache, + ) + .await + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Error while handling incoming PDU.", + ) + })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + + let state_ids = db.rooms.state_full_ids(shortstatehash)?; + + let mut auth_chain_ids = BTreeSet::::new(); + let mut todo = state_ids.iter().cloned().collect::>(); + + while let Some(event_id) = todo.iter().next().cloned() { + if let Some(pdu) = db.rooms.get_pdu(&event_id)? { + todo.extend( + pdu.auth_events + .clone() + .into_iter() + .collect::>() + .difference(&auth_chain_ids) + .cloned(), + ); + auth_chain_ids.extend(pdu.auth_events.into_iter()); + } else { + warn!("Could not find pdu mentioned in auth events."); + } + + todo.remove(&event_id); + } + + for server in db + .rooms + .room_servers(&body.room_id) + .filter_map(|r| r.ok()) + .filter(|server| &**server != db.globals.server_name()) + { + db.sending.send_pdu(&server, &pdu_id)?; + } + + Ok(create_join_event::v2::Response { + room_state: RoomState { + auth_chain: auth_chain_ids + .iter() + .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) + .map(|json| PduEvent::convert_to_outgoing_federation_event(json)) + .collect(), + state: state_ids + .iter() + .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) + .map(|json| PduEvent::convert_to_outgoing_federation_event(json)) + .collect(), + }, + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v2/invite/<_>/<_>", data = "") @@ -1504,6 +1875,10 @@ pub async fn create_invite_route<'a>( db: State<'a, Database>, body: Ruma, ) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + if body.room_version < RoomVersionId::Version6 { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { @@ -1599,6 +1974,31 @@ pub async fn create_invite_route<'a>( .into()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v1/query/directory", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn get_room_information_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let room_id = db + .rooms + .id_from_alias(&body.room_alias)? + .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Room alias not found."))?; + + Ok(get_room_information::v1::Response { + room_id, + servers: vec![db.globals.server_name().to_owned()], + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/query/profile", data = "") From ab58609d3d85149ffcd342911dd326f397f39f24 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 16 Apr 2021 22:07:58 +0200 Subject: [PATCH 0524/1727] No longer use/support a local environment file --- Cargo.toml | 1 - debian/env.local | 33 --------------------------------- 2 files changed, 34 deletions(-) delete mode 100644 debian/env.local diff --git a/Cargo.toml b/Cargo.toml index 57b6f7f..ebfd56f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -98,7 +98,6 @@ instead of a server that has high scalability.""" section = "net" priority = "optional" assets = [ - ["debian/env.local", "etc/matrix-conduit/local", "644"], ["debian/README.Debian", "usr/share/doc/matrix-conduit/", "644"], ["README.md", "usr/share/doc/matrix-conduit/", "644"], ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], diff --git a/debian/env.local b/debian/env.local deleted file mode 100644 index cd552de..0000000 --- a/debian/env.local +++ /dev/null @@ -1,33 +0,0 @@ -# Conduit homeserver local configuration -# -# Conduit is an application based on the Rocket web framework. -# Configuration of Conduit happens via Debconf (see the resulting config in -# `/etc/matrix-conduit/debian`) and optionally by uncommenting and tweaking the -# variables in this file below. - -# The maximum size of a Matrix HTTP requests in bytes. -# -# This mostly affects the size of files that can be downloaded/uploaded. -# It defaults to 20971520 (20MB). -#ROCKET_MAX_REQUEST_SIZE=20971520 - -# Whether user registration is allowed. -# -# User registration is not disabled by default. -#ROCKET_REGISTRATION_DISABLED=false - -# Whether encryption is enabled. -# -# (End-to-end) encryption is not disabled by default. -#ROCKET_ENCRYPTION_DISABLED=false - -# Whether federation with other Matrix servers is enabled. -# -# Federation is not enabled by default; it is still experimental. -#ROCKET_FEDERATION_ENABLED=false - -# The log level of the homeserver. -# -# The log level is "critical" by default. -# Allowed values are: "off", "normal", "debug", "critical" -#ROCKET_LOG="critical" From d4e0ba245baaebe8b9fc537250138c17b08c8a70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 19 Apr 2021 11:53:46 +0200 Subject: [PATCH 0525/1727] fix: bug when fetching events over federation --- src/server_server.rs | 105 ++++++++++++++++++++++--------------------- 1 file changed, 55 insertions(+), 50 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ed4be0c..5779b90 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1188,48 +1188,47 @@ pub(crate) fn fetch_and_handle_events<'a>( let mut pdus = vec![]; for id in events { // a. Look at auth cache - let pdu = - match auth_cache.get(id) { + let pdu = match auth_cache.get(id) { + Some(pdu) => { + debug!("Found {} in cache", id); + // We already have the auth chain for events in cache + pdu.clone() + } + // b. Look in the main timeline (pduid_pdu tree) + // c. Look at outlier pdu tree + // (get_pdu checks both) + None => match db.rooms.get_pdu(&id)? { Some(pdu) => { - debug!("Found {} in cache", id); - // We already have the auth chain for events in cache - pdu.clone() + debug!("Found {} in db", id); + // We need to fetch the auth chain + let _ = fetch_and_handle_events( + db, + origin, + &pdu.auth_events, + pub_key_map, + auth_cache, + ) + .await?; + Arc::new(pdu) } - // b. Look in the main timeline (pduid_pdu tree) - // c. Look at outlier pdu tree - // (get_pdu checks both) - None => match db.rooms.get_pdu(&id)? { - Some(pdu) => { - debug!("Found {} in db", id); - // We need to fetch the auth chain - let _ = fetch_and_handle_events( - db, + None => { + // d. Ask origin server over federation + debug!("Fetching {} over federation.", id); + match db + .sending + .send_federation_request( + &db.globals, origin, - &pdu.auth_events, - pub_key_map, - auth_cache, + get_event::v1::Request { event_id: &id }, ) - .await?; - Arc::new(pdu) - } - None => { - // d. Ask origin server over federation - debug!("Fetching {} over federation.", id); - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - debug!("Got {} over federation: {:?}", id, res); - let (event_id, value) = - crate::pdu::gen_event_id_canonical_json(&res.pdu)?; - // This will also fetch the auth chain - match handle_incoming_pdu( + .await + { + Ok(res) => { + debug!("Got {} over federation: {:?}", id, res); + let (event_id, mut value) = + crate::pdu::gen_event_id_canonical_json(&res.pdu)?; + // This will also fetch the auth chain + match handle_incoming_pdu( origin, &event_id, value.clone(), @@ -1240,25 +1239,31 @@ pub(crate) fn fetch_and_handle_events<'a>( ) .await { - Ok(_) => Arc::new(serde_json::from_value( - serde_json::to_value(value) - .expect("canonicaljsonobject is valid value"), - ) - .expect("This is possible because handle_incoming_pdu worked")), + Ok(_) => { + value.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + + Arc::new(serde_json::from_value( + serde_json::to_value(value).expect("canonicaljsonobject is valid value"), + ).expect("This is possible because handle_incoming_pdu worked")) + } Err(e) => { warn!("Authentication of event {} failed: {:?}", id, e); continue; } } - } - Err(_) => { - warn!("Failed to fetch event: {}", id); - continue; - } + } + Err(_) => { + warn!("Failed to fetch event: {}", id); + continue; } } - }, - }; + } + }, + }; auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); pdus.push(pdu); } From 0b9182455c65ef1dc339182bf1e604fe42441658 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 19 Apr 2021 11:57:17 +0200 Subject: [PATCH 0526/1727] fix: use working email --- DEPLOY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 6959759..7e6dd78 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -2,7 +2,7 @@ ## Getting help -If you run into any problems while setting up Conduit, write an email to `support@conduit.rs`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Installing Conduit From 4c4e1f90274c02f039d7f4c4e15e6d7e1463dfd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Apr 2021 12:53:20 +0200 Subject: [PATCH 0527/1727] docs: update readme --- README.md | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 32fa657..292ee83 100644 --- a/README.md +++ b/README.md @@ -23,11 +23,23 @@ example) and register on the `https://conduit.koesters.xyz` homeserver. - [Rocket](https://rocket.rs): A flexible web framework -#### What are the biggest things still missing? +#### What is the current status? -- Most federation features (invites, e2ee) -- Push notifications on mobile -- Notification settings +Conduit can already be used chat with other users on Conduit, chat with users +from other Matrix servers and even to chat with users on other platforms using +appservices. When chatting with users on the same Conduit server, everything +should work assuming you use a compatible client. + +**You should not join Matrix rooms without asking the admins first.** We do not +know whether Conduit is safe for general use yet, so you should assume there is +some chance that it breaks rooms permanently for all participating users. We +are not aware of such a bug today, but we would like to do more testing. + +There are still a few important features missing: + +- Database stability (currently you might have to do manual upgrades or even wipe the db for new versions) +- End-to-end encrypted chats over federation +- Typing, presence, read receipts etc. over federation - Lots of testing Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). From ed253e236cff1cdab7bd18dc03a4046da4ca3c4f Mon Sep 17 00:00:00 2001 From: Gabriel Souza Franco Date: Wed, 21 Apr 2021 00:35:44 -0300 Subject: [PATCH 0528/1727] chore: document FedDest, fix tests --- src/server_server.rs | 62 ++++++++++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 22 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ac38f4d..553f944 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -45,6 +45,20 @@ use std::{ #[cfg(feature = "conduit_bin")] use rocket::{get, post, put}; +/// Wraps either an literal IP address plus port, or a hostname plus complement +/// (colon-plus-port if it was specified). +/// +/// Note: A `FedDest::Named` might contain an IP address in string form if there +/// was no port specified to construct a SocketAddr with. +/// +/// # Examples: +/// ```rust,ignore +/// FedDest::Literal("198.51.100.3:8448".parse()?); +/// FedDest::Literal("[2001:db8::4:5]:443".parse()?); +/// FedDest::Named("matrix.example.org".to_owned(), "".to_owned()); +/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); +/// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); +/// ``` #[derive(Clone, Debug, PartialEq)] enum FedDest { Literal(SocketAddr), @@ -52,21 +66,21 @@ enum FedDest { } impl FedDest { - fn into_https_url(self) -> String { + fn into_https_string(self) -> String { match self { Self::Literal(addr) => format!("https://{}", addr), Self::Named(host, port) => format!("https://{}{}", host, port), } } - fn into_uri(self) -> String { + fn into_uri_string(self) -> String { match self { Self::Literal(addr) => addr.to_string(), Self::Named(host, ref port) => host + port, } } - fn host(&self) -> String { + fn hostname(&self) -> String { match &self { Self::Literal(addr) => addr.ip().to_string(), Self::Named(host, _) => host.clone(), @@ -99,21 +113,23 @@ where } else { let result = find_actual_destination(globals, &destination).await; let (actual_destination, host) = result.clone(); - let result = (result.0.into_https_url(), result.1.into_uri()); + let result_string = (result.0.into_https_string(), result.1.into_uri_string()); globals .actual_destination_cache .write() .unwrap() - .insert(Box::::from(destination), result.clone()); - if actual_destination.host() != host.host() { + .insert(Box::::from(destination), result_string.clone()); + let dest_hostname = actual_destination.hostname(); + let host_hostname = host.hostname(); + if dest_hostname != host_hostname { globals.tls_name_override.write().unwrap().insert( - actual_destination.host(), - webpki::DNSNameRef::try_from_ascii_str(&host.host()) + dest_hostname, + webpki::DNSNameRef::try_from_ascii_str(&host_hostname) .unwrap() .to_owned(), ); } - result + result_string }; let mut http_request = request @@ -317,6 +333,8 @@ async fn find_actual_destination( } }; + // Can't use get_ip_with_port here because we don't want to add a port + // to an IP address if it wasn't specified let hostname = if let Ok(addr) = hostname.parse::() { FedDest::Literal(addr) } else if let Ok(addr) = hostname.parse::() { @@ -1743,45 +1761,45 @@ pub async fn fetch_required_signing_keys( #[cfg(test)] mod tests { - use super::{add_port_to_hostname, get_ip_with_port}; + use super::{FedDest, add_port_to_hostname, get_ip_with_port}; #[test] fn ips_get_default_ports() { assert_eq!( - get_ip_with_port(String::from("1.1.1.1")), - Some(String::from("1.1.1.1:8448")) + get_ip_with_port("1.1.1.1"), + Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap())) ); assert_eq!( - get_ip_with_port(String::from("dead:beef::")), - Some(String::from("[dead:beef::]:8448")) + get_ip_with_port("dead:beef::"), + Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap())) ); } #[test] fn ips_keep_custom_ports() { assert_eq!( - get_ip_with_port(String::from("1.1.1.1:1234")), - Some(String::from("1.1.1.1:1234")) + get_ip_with_port("1.1.1.1:1234"), + Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap())) ); assert_eq!( - get_ip_with_port(String::from("[dead::beef]:8933")), - Some(String::from("[dead::beef]:8933")) + get_ip_with_port("[dead::beef]:8933"), + Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap())) ); } #[test] fn hostnames_get_default_ports() { assert_eq!( - add_port_to_hostname(String::from("example.com")), - "example.com:8448" + add_port_to_hostname("example.com"), + FedDest::Named(String::from("example.com"), String::from(":8448")) ) } #[test] fn hostnames_keep_custom_ports() { assert_eq!( - add_port_to_hostname(String::from("example.com:1337")), - "example.com:1337" + add_port_to_hostname("example.com:1337"), + FedDest::Named(String::from("example.com"), String::from(":1337")) ) } } From e815486030cbae0706663051a1ed4f2da1e9caf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Apr 2021 09:56:50 +0200 Subject: [PATCH 0529/1727] fix: don't allow inviting other users (not implemented yet) --- src/client_server/membership.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index dcd7c37..7eca676 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -152,6 +152,13 @@ pub async fn invite_user_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { + if body.room_id.server_name() != db.globals.server_name() { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Inviting users from other homeservers is not implemented yet.", + )); + } + db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, From 71ed1b295a3204c1e6abb865f322b1874e0ebb1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Apr 2021 10:51:34 +0200 Subject: [PATCH 0530/1727] feat: /devices route --- src/database.rs | 1 + src/database/users.rs | 23 +++++++++++++++++++++++ src/main.rs | 1 + src/server_server.rs | 41 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 66 insertions(+) diff --git a/src/database.rs b/src/database.rs index 6bb1b17..06a708d 100644 --- a/src/database.rs +++ b/src/database.rs @@ -123,6 +123,7 @@ impl Database { userid_avatarurl: db.open_tree("userid_avatarurl")?, userdeviceid_token: db.open_tree("userdeviceid_token")?, userdeviceid_metadata: db.open_tree("userdeviceid_metadata")?, + userid_devicelistversion: db.open_tree("userid_devicelistversion")?, token_userdeviceid: db.open_tree("token_userdeviceid")?, onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys")?, userid_lastonetimekeyupdate: db.open_tree("userid_lastonetimekeyupdate")?, diff --git a/src/database/users.rs b/src/database/users.rs index c794e52..9cdfb5f 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -22,6 +22,7 @@ pub struct Users { pub(super) userid_avatarurl: sled::Tree, pub(super) userdeviceid_token: sled::Tree, pub(super) userdeviceid_metadata: sled::Tree, // This is also used to check if a device exists + pub(super) userid_devicelistversion: sled::Tree, // DevicelistVersion = u64 pub(super) token_userdeviceid: sled::Tree, pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + DeviceKeyId @@ -189,6 +190,10 @@ impl Users { userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); + self.userid_devicelistversion + .update_and_fetch(&user_id.as_bytes(), utils::increment)? + .expect("utils::increment will always put in a value"); + self.userdeviceid_metadata.insert( userdeviceid, serde_json::to_string(&Device { @@ -227,6 +232,10 @@ impl Users { // TODO: Remove onetimekeys + self.userid_devicelistversion + .update_and_fetch(&user_id.as_bytes(), utils::increment)? + .expect("utils::increment will always put in a value"); + self.userdeviceid_metadata.remove(&userdeviceid)?; Ok(()) @@ -811,6 +820,10 @@ impl Users { // Only existing devices should be able to call this. assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); + self.userid_devicelistversion + .update_and_fetch(&user_id.as_bytes(), utils::increment)? + .expect("utils::increment will always put in a value"); + self.userdeviceid_metadata.insert( userdeviceid, serde_json::to_string(device) @@ -840,6 +853,16 @@ impl Users { }) } + pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { + self.userid_devicelistversion + .get(user_id.as_bytes())? + .map_or(Ok(None), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid devicelistversion in db.")) + .map(Some) + }) + } + pub fn all_devices_metadata(&self, user_id: &UserId) -> impl Iterator> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); diff --git a/src/main.rs b/src/main.rs index 3157023..ba8448d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -171,6 +171,7 @@ fn setup_rocket() -> (rocket::Rocket, Config) { server_server::create_join_event_template_route, server_server::create_join_event_route, server_server::create_invite_route, + server_server::get_devices_route, server_server::get_room_information_route, server_server::get_profile_information_route, ], diff --git a/src/server_server.rs b/src/server_server.rs index 5779b90..e969b31 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -8,6 +8,7 @@ use ruma::{ api::{ client::error::ErrorKind, federation::{ + device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_remote_server_keys, get_server_keys, get_server_version, ServerSigningKeys, @@ -1979,6 +1980,46 @@ pub async fn create_invite_route<'a>( .into()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v1/user/devices/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn get_devices_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + Ok(get_devices::v1::Response { + user_id: body.user_id.clone(), + stream_id: db + .users + .get_devicelist_version(&body.user_id)? + .unwrap_or(0) + .try_into() + .expect("version will not grow that large"), + devices: db + .users + .all_devices_metadata(&body.user_id) + .filter_map(|r| r.ok()) + .filter_map(|metadata| { + Some(UserDevice { + keys: db + .users + .get_device_keys(&body.user_id, &metadata.device_id) + .ok()??, + device_id: metadata.device_id, + device_display_name: metadata.display_name, + }) + }) + .collect(), + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/query/directory", data = "") From 2f440e644dedb958ed290671165a3441e2109b55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Apr 2021 10:59:07 +0200 Subject: [PATCH 0531/1727] fix: clippy --- src/server_server.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index e969b31..6024b8a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -862,8 +862,6 @@ fn handle_incoming_pdu<'a>( .collect(), ); } - &state_at_incoming_event; - // TODO: set incoming_auth_events? } @@ -1860,12 +1858,12 @@ pub async fn create_join_event_route<'a>( auth_chain: auth_chain_ids .iter() .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) - .map(|json| PduEvent::convert_to_outgoing_federation_event(json)) + .map(PduEvent::convert_to_outgoing_federation_event) .collect(), state: state_ids .iter() .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) - .map(|json| PduEvent::convert_to_outgoing_federation_event(json)) + .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }, } @@ -2036,7 +2034,7 @@ pub fn get_room_information_route<'a>( let room_id = db .rooms .id_from_alias(&body.room_alias)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Room alias not found."))?; + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Room alias not found."))?; Ok(get_room_information::v1::Response { room_id, From 5bf43a4d7e5861cd9696da79aa26714bd18c8c3c Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 16 Apr 2021 22:10:07 +0200 Subject: [PATCH 0532/1727] Generate conduit.toml instead of debian environment file Only generate this file once. I am not sure what to do with future upgrades yet. --- Cargo.toml | 2 +- conduit-example.toml | 2 +- debian/README.Debian | 12 ++--- debian/matrix-conduit.service | 5 +-- debian/postinst | 84 ++++++++++++++++++++--------------- 5 files changed, 57 insertions(+), 48 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ebfd56f..d510a1b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,7 +103,7 @@ assets = [ ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], ] conf-files = [ - "/etc/matrix-conduit/local" + "/etc/matrix-conduit/conduit.toml" ] maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } diff --git a/conduit-example.toml b/conduit-example.toml index 87f959d..246465e 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -11,7 +11,7 @@ # YOU NEED TO EDIT THIS #server_name = "your.server.name" -# This is the only directly where Conduit will save its data +# This is the only directory where Conduit will save its data database_path = "/var/lib/conduit/conduit.db" # The port Conduit will be running on. You need to set up a reverse proxy in diff --git a/debian/README.Debian b/debian/README.Debian index 69fb975..22416cf 100644 --- a/debian/README.Debian +++ b/debian/README.Debian @@ -4,19 +4,19 @@ Conduit for Debian Configuration ------------- -When installed, Debconf handles the configuration of the homeserver (host)name, -the address and port it listens on. These configuration variables end up in -/etc/matrix-conduit/debian. +When installed, Debconf generates the configuration of the homeserver +(host)name, the address and port it listens on. This configuration ends up in +/etc/matrix-conduit/conduit.toml. You can tweak more detailed settings by uncommenting and setting the variables -in /etc/matrix-conduit/local. This involves settings such as the maximum file -size for download/upload, enabling federation, etc. +in /etc/matrix-conduit/conduit.toml. This involves settings such as the maximum +file size for download/upload, enabling federation, etc. Running ------- The package uses the matrix-conduit.service systemd unit file to start and -stop Conduit. It loads the configuration files mentioned above to set up the +stop Conduit. It loads the configuration file mentioned above to set up the environment before running the server. This package assumes by default that Conduit is placed behind a reverse proxy diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service index 5ab7917..7c12d1a 100644 --- a/debian/matrix-conduit.service +++ b/debian/matrix-conduit.service @@ -34,10 +34,7 @@ SystemCallFilter=@system-service SystemCallErrorNumber=EPERM StateDirectory=matrix-conduit -Environment="ROCKET_ENV=production" -Environment="ROCKET_DATABASE_PATH=/var/lib/matrix-conduit" -EnvironmentFile=/etc/matrix-conduit/debian -EnvironmentFile=/etc/matrix-conduit/local +Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" ExecStart=/usr/sbin/matrix-conduit Restart=on-failure diff --git a/debian/postinst b/debian/postinst index bd7fb85..4a55930 100644 --- a/debian/postinst +++ b/debian/postinst @@ -4,7 +4,7 @@ set -e . /usr/share/debconf/confmodule CONDUIT_CONFIG_PATH=/etc/matrix-conduit -CONDUIT_CONFIG_FILE="$CONDUIT_CONFIG_PATH/debian" +CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml" CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit case "$1" in @@ -25,48 +25,60 @@ case "$1" in chown _matrix-conduit "$CONDUIT_DATABASE_PATH" fi - # Write the debconf values in the config. - db_get matrix-conduit/hostname - ROCKET_SERVER_NAME="$RET" - db_get matrix-conduit/address - ROCKET_ADDRESS="$RET" - db_get matrix-conduit/port - ROCKET_PORT="$RET" - cat >"$CONDUIT_CONFIG_FILE" << EOF -# Conduit homeserver Debian configuration -# -# Conduit is an application based on the Rocket web framework. -# Configuration of Conduit happens via Debconf (of which the resulting config -# is in this file) and optionally by uncommenting and tweaking the variables in -# /etc/matrix-conduit/local. + if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then + # Write the debconf values in the config. + db_get matrix-conduit/hostname + CONDUIT_SERVER_NAME="$RET" + db_get matrix-conduit/address + CONDUIT_ADDRESS="$RET" + db_get matrix-conduit/port + CONDUIT_PORT="$RET" + mkdir -p "$CONDUIT_CONFIG_PATH" + cat > "$CONDUIT_CONFIG_FILE" << EOF +[global] +# The server_name is the name of this server. It is used as a suffix for user +# and room ids. Examples: matrix.org, conduit.rs +# The Conduit server needs to be reachable at https://your.server.name/ on port +# 443 (client-server) and 8448 (federation) OR you can create /.well-known +# files to redirect requests. See +# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client +# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# for more information. +server_name = "${CONDUIT_SERVER_NAME}" -# THIS FILE IS GENERATED BY DEBCONF AND WILL BE OVERRIDDEN! -# -# Please make changes by running: -# -# \$ dpkg-reconfigure matrix-conduit -# -# or by providing overriding changes in /etc/matrix-conduit/local. +# This is the only directory where Conduit will save its data. +database_path = "${CONDUIT_DATABASE_PATH}" -# The server (host)name of the Matrix homeserver. -# -# This is the hostname the homeserver will be reachable at via a client. -ROCKET_SERVER_NAME="$ROCKET_SERVER_NAME" - -# The address the Matrix homeserver listens on. -# +# The address Conduit will be listening on. # By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to # only listen on the localhost when using a reverse proxy. -ROCKET_ADDRESS="$ROCKET_ADDRESS" +address = "${CONDUIT_ADDRESS}" -# The port of the Matrix homeserver. -# -# This port is could be any available port if accessed by a reverse proxy. -# By default the server listens on port 8000. -ROCKET_PORT="$ROCKET_PORT" +# The port Conduit will be running on. You need to set up a reverse proxy in +# your web server (e.g. apache or nginx), so all requests to /_matrix on port +# 443 and 8448 will be forwarded to the Conduit instance running on this port. +port = ${CONDUIT_PORT} -# THIS FILE IS GENERATED BY DEBCONF AND WILL BE OVERRIDDEN! +# Max size for uploads +max_request_size = 20_000_000 # in bytes + +# Disable registration. No new users will be able to register on this server. +#allow_registration = false + +# Disable encryption, so no new encrypted rooms can be created. +# Note: Existing rooms will continue to work. +#allow_encryption = false +#allow_federation = false + +# Enable jaeger to support monitoring and troubleshooting through jaeger. +#allow_jaeger = false + +#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 +#max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time +#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#workers = 4 # default: cpu core count * 2 EOF + fi ;; esac From f3b1096417605790c04ce0eb051efb339c0008d1 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 16 Apr 2021 22:10:52 +0200 Subject: [PATCH 0533/1727] Change the default library path (follows DEPLOY.md) --- debian/postinst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/postinst b/debian/postinst index 4a55930..c3d727c 100644 --- a/debian/postinst +++ b/debian/postinst @@ -5,7 +5,7 @@ set -e CONDUIT_CONFIG_PATH=/etc/matrix-conduit CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml" -CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/conduit_db case "$1" in configure) @@ -13,7 +13,7 @@ case "$1" in if ! getent passwd _matrix-conduit > /dev/null ; then echo 'Adding system user for the Conduit Matrix homeserver' 1>&2 adduser --system --group --quiet \ - --home $CONDUIT_DATABASE_PATH \ + --home "$CONDUIT_DATABASE_PATH" \ --disabled-login \ --force-badname \ _matrix-conduit From 19aaffeb23c6f6838e1afebd975a49ff0fbf1ab8 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 16 Apr 2021 22:11:07 +0200 Subject: [PATCH 0534/1727] Change the default port (follows DEPLOY.md) --- debian/README.Debian | 8 ++++---- debian/templates | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/debian/README.Debian b/debian/README.Debian index 22416cf..5f63b5c 100644 --- a/debian/README.Debian +++ b/debian/README.Debian @@ -19,10 +19,10 @@ The package uses the matrix-conduit.service systemd unit file to start and stop Conduit. It loads the configuration file mentioned above to set up the environment before running the server. -This package assumes by default that Conduit is placed behind a reverse proxy -such as Apache or nginx. This default deployment entails just listening on -127.0.0.1 and the free port 14004 and is reachable via a client using the URL -http://localhost:14004. +This package assumes by default that Conduit will be placed behind a reverse +proxy such as Apache or nginx. This default deployment entails just listening +on 127.0.0.1 and the free port 6167 and is reachable via a client using the URL +http://localhost:6167. At a later stage this packaging may support also setting up TLS and running stand-alone. In this case, however, you need to set up some certificates and diff --git a/debian/templates b/debian/templates index a408f84..c4281ad 100644 --- a/debian/templates +++ b/debian/templates @@ -16,6 +16,6 @@ Description: The listen address of the Matrix homeserver Template: matrix-conduit/port Type: string -Default: 14004 +Default: 6167 Description: The port of the Matrix homeserver This port is most often just accessed by a reverse proxy. From 4fb2f17b8365185319595d178d72df6f06bcacfd Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 16 Apr 2021 22:11:58 +0200 Subject: [PATCH 0535/1727] Purge debconf changes from the DB on purge --- debian/postrm | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/debian/postrm b/debian/postrm index 04ca325..2894909 100644 --- a/debian/postrm +++ b/debian/postrm @@ -1,11 +1,16 @@ #!/bin/sh set -e +. /usr/share/debconf/confmodule + CONDUIT_CONFIG_PATH=/etc/matrix-conduit CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit case $1 in purge) + # Remove debconf changes from the db + db_purge + # Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior # "configuration files must be preserved when the package is removed, and # only deleted when the package is purged." From f63a624743ef73f417b569ecadded8d4046c338e Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 23 Apr 2021 19:11:42 +0200 Subject: [PATCH 0536/1727] Add text about using cargo-deb to the README --- README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/README.md b/README.md index 32fa657..b128e81 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,26 @@ Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit Download or compile a Conduit binary, set up the config and call it from somewhere like a systemd script. [Read more](DEPLOY.md) +##### Deloy using a Debian package + +You need to have the `deb` helper command installed that creates Debian packages from Cargo projects (see [cargo-deb](https://github.com/mmstick/cargo-deb/) for more info): + +```shell +$ cargo install cargo-deb +``` + +Then, you can create and install a Debian package at a whim: + +```shell +$ cargo deb +$ dpkg -i target/debian/matrix-conduit_0.1.0_amd64.deb +``` + +This will build, package, install, configure and start Conduit. [Read more](debian/README.Debian). + +Note that `cargo deb` supports [cross-compilation](https://github.com/mmstick/cargo-deb/#cross-compilation) too! +Official Debian packages will follow once Conduit starts to have stable releases. + ##### Deploy using Docker Pull and run the docker image with From 7e4320a0792eba530d50001f86d8e65c2546b014 Mon Sep 17 00:00:00 2001 From: Gabriel Souza Franco Date: Fri, 23 Apr 2021 15:27:35 -0300 Subject: [PATCH 0537/1727] chore: document proper proxy options in DEPLOY.md --- DEPLOY.md | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 7e6dd78..3d4541a 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -136,8 +136,8 @@ Listen 8448 ServerName your.server.name # EDIT THIS AllowEncodedSlashes NoDecode -ProxyPass /_matrix/ http://localhost:6167/_matrix/ -ProxyPassReverse /_matrix/ http://localhost:6167/_matrix/ +ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon +ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ Include /etc/letsencrypt/options-ssl-apache.conf SSLCertificateFile /etc/letsencrypt/live/your.server.name/fullchain.pem # EDIT THIS @@ -159,13 +159,23 @@ http section of `/etc/nginx/nginx.conf` ``` server { - listen 443; - listen 8448; + listen 443 ssl http2; + listen [::]:443 ssl http2; + listen 8448 ssl http2; + listen [::]:8448 ssl http2; server_name your.server.name; # EDIT THIS + merge_slashes off; location /_matrix/ { - proxy_pass http://localhost:6167/_matrix/; + proxy_pass http://127.0.0.1:6167$request_uri; + proxy_set_header Host $http_host; + proxy_buffering off; } + + ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS + ssl_certificate_key /etc/letsencrypt/live/your.server.name/privkey.pem; # EDIT THIS + ssl_trusted_certificate /etc/letsencrypt/live/your.server.name/chain.pem; # EDIT THIS + include /etc/letsencrypt/options-ssl-nginx.conf; } ``` **You need to make some edits again.** When you are done, run From 1f84013b2a10fb365ba1333ec01fa2e1abe24020 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Apr 2021 14:06:39 +0200 Subject: [PATCH 0538/1727] feat: verify signatures for incoming requests --- src/ruma_wrapper.rs | 153 ++++++++++++++++++++++++++++++++++++++++--- src/server_server.rs | 5 +- 2 files changed, 149 insertions(+), 9 deletions(-) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index c60c04e..5685ac6 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -4,11 +4,12 @@ use ruma::{ identifiers::{DeviceId, UserId}, Outgoing, }; +use std::collections::BTreeMap; use std::ops::Deref; #[cfg(feature = "conduit_bin")] use { - crate::utils, + crate::{server_server, utils}, log::{debug, warn}, rocket::{ data::{ @@ -21,7 +22,11 @@ use { tokio::io::AsyncReadExt, Request, State, }, - ruma::api::{AuthScheme, IncomingRequest}, + ruma::{ + api::{AuthScheme, IncomingRequest}, + signatures::CanonicalJsonValue, + ServerName, + }, std::convert::TryFrom, std::io::Cursor, }; @@ -72,6 +77,11 @@ where .map(|s| s[7..].to_owned()) // Split off "Bearer " .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())); + let limit = db.globals.max_request_size(); + let mut handle = data.open(ByteUnit::Byte(limit.into())); + let mut body = Vec::new(); + handle.read_to_end(&mut body).await.unwrap(); + let (sender_user, sender_device, from_appservice) = if let Some((_id, registration)) = db.appservice .iter_all() @@ -129,7 +139,139 @@ where return Failure((Status::raw(582), ())); } } - AuthScheme::ServerSignatures => (None, None, false), + AuthScheme::ServerSignatures => { + // Get origin from header + let x_matrix = match request + .headers() + .get_one("Authorization") + .map(|s| { + s[9..] + .split_terminator(',').map(|field| {let mut splits = field.splitn(2, '='); (splits.next(), splits.next().map(|s| s.trim_matches('"')))}).collect::>() + }) // Split off "X-Matrix " and parse the rest + { + Some(t) => t, + None => { + warn!("No Authorization header"); + + // Forbidden + return Failure((Status::raw(580), ())); + } + }; + + let origin_str = match x_matrix.get(&Some("origin")) { + Some(Some(o)) => *o, + _ => { + warn!("Invalid X-Matrix header origin field: {:?}", x_matrix); + + // Forbidden + return Failure((Status::raw(580), ())); + } + }; + + let origin = match Box::::try_from(origin_str) { + Ok(s) => s, + _ => { + warn!( + "Invalid server name in X-Matrix header origin field: {:?}", + x_matrix + ); + + // Forbidden + return Failure((Status::raw(580), ())); + } + }; + + let key = match x_matrix.get(&Some("key")) { + Some(Some(k)) => *k, + _ => { + warn!("Invalid X-Matrix header key field: {:?}", x_matrix); + + // Forbidden + return Failure((Status::raw(580), ())); + } + }; + + let sig = match x_matrix.get(&Some("sig")) { + Some(Some(s)) => *s, + _ => { + warn!("Invalid X-Matrix header sig field: {:?}", x_matrix); + + // Forbidden + return Failure((Status::raw(580), ())); + } + }; + + let json_body = serde_json::from_slice::(&body); + + let mut request_map = BTreeMap::::new(); + + if let Ok(json_body) = json_body { + request_map.insert("content".to_owned(), json_body); + }; + + request_map.insert( + "method".to_owned(), + CanonicalJsonValue::String(request.method().to_string()), + ); + request_map.insert( + "uri".to_owned(), + CanonicalJsonValue::String(request.uri().to_string()), + ); + request_map.insert( + "origin".to_owned(), + CanonicalJsonValue::String(origin.as_str().to_owned()), + ); + request_map.insert( + "destination".to_owned(), + CanonicalJsonValue::String( + db.globals.server_name().as_str().to_owned(), + ), + ); + + let mut origin_signatures = BTreeMap::new(); + origin_signatures + .insert(key.to_owned(), CanonicalJsonValue::String(sig.to_owned())); + + let mut signatures = BTreeMap::new(); + signatures.insert( + origin.as_str().to_owned(), + CanonicalJsonValue::Object(origin_signatures), + ); + + request_map.insert( + "signatures".to_owned(), + CanonicalJsonValue::Object(signatures), + ); + + let keys = match server_server::fetch_signing_keys( + &db, + &origin, + vec![&key.to_owned()], + ) + .await + { + Ok(b) => b, + Err(e) => { + warn!("Failed to fetch signing keys: {}", e); + + // Forbidden + return Failure((Status::raw(580), ())); + } + }; + + let mut pub_key_map = BTreeMap::new(); + pub_key_map.insert(origin.as_str().to_owned(), keys); + + match ruma::signatures::verify_json(&pub_key_map, &request_map) { + Ok(()) => (None, None, false), + Err(e) => { + warn!("Failed to verify json request: {}: {:?} {:?}", e, pub_key_map, request_map); + + // Forbidden + return Failure((Status::raw(580), ())); + } + } + } AuthScheme::None => (None, None, false), } }; @@ -141,11 +283,6 @@ where http_request = http_request.header(header.name.as_str(), &*header.value); } - let limit = db.globals.max_request_size(); - let mut handle = data.open(ByteUnit::Byte(limit.into())); - let mut body = Vec::new(); - handle.read_to_end(&mut body).await.unwrap(); - let http_request = http_request.body(&*body).unwrap(); debug!("{:?}", http_request); match ::try_from_http_request(http_request) { diff --git a/src/server_server.rs b/src/server_server.rs index ad198fc..90b5099 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2106,7 +2106,10 @@ pub fn get_room_information_route<'a>( let room_id = db .rooms .id_from_alias(&body.room_alias)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Room alias not found."))?; + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room alias not found.", + ))?; Ok(get_room_information::v1::Response { room_id, From 6e84d317b2f186130943c0cd6ccf21b416e84fad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 22 Apr 2021 11:26:20 +0200 Subject: [PATCH 0539/1727] improvement: update ruma --- Cargo.lock | 60 +++++++++++++++---------------- Cargo.toml | 4 +-- src/client_server/message.rs | 4 +-- src/client_server/session.rs | 7 +++- src/client_server/state.rs | 70 ++++++++++++++---------------------- src/database/pusher.rs | 22 ++++++------ src/ruma_wrapper.rs | 7 ++-- src/server_server.rs | 57 +++++++++++++---------------- 8 files changed, 106 insertions(+), 125 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84001dc..c13e7d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1545,8 +1545,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.0.3" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "assign", "js_int", @@ -1565,8 +1565,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.17.0-alpha.4" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "http", "percent-encoding", @@ -1580,8 +1580,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.17.0-alpha.4" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1591,8 +1591,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.2.0-alpha.3" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "ruma-api", "ruma-common", @@ -1605,8 +1605,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.10.0-alpha.3" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "assign", "http", @@ -1624,8 +1624,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "indexmap", "js_int", @@ -1640,8 +1640,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.22.0-alpha.3" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "js_int", "ruma-common", @@ -1654,8 +1654,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.22.0-alpha.3" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1665,8 +1665,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.1.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "js_int", "ruma-api", @@ -1681,7 +1681,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "paste", "rand", @@ -1695,7 +1695,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "proc-macro2", "quote", @@ -1705,13 +1705,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" [[package]] name = "ruma-identity-service-api" -version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "ruma-api", "ruma-common", @@ -1723,8 +1723,8 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" -version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "js_int", "ruma-api", @@ -1739,7 +1739,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "form_urlencoded", "itoa", @@ -1752,7 +1752,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1763,7 +1763,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" +source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" dependencies = [ "base64 0.13.0", "ring", @@ -2053,7 +2053,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=4516d73e8c7495330619bfb5b42c3bbf704293d8#4516d73e8c7495330619bfb5b42c3bbf704293d8" +source = "git+https://github.com/ruma/state-res?rev=aa53d07f51ffb7258f5c1e499bddffd4c630f7df#aa53d07f51ffb7258f5c1e499bddffd4c630f7df" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index fa495df..4cf2ed4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "c1693569f15920e408aa6a26b7f3cc7fc6693a63", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "4f16b9357c15d649075393a723f23cf560251754", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "220d5b4a76b3b781f7f8297fbe6b14473b04214b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/ruma/state-res", rev = "4516d73e8c7495330619bfb5b42c3bbf704293d8", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "aa53d07f51ffb7258f5c1e499bddffd4c630f7df", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 04f27de..ecd2665 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -5,7 +5,7 @@ use ruma::{ error::ErrorKind, r0::message::{get_message_events, send_message_event}, }, - events::EventContent, + events::EventType, EventId, }; use std::{ @@ -55,7 +55,7 @@ pub async fn send_message_event_route( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: body.content.event_type().into(), + event_type: EventType::from(&body.event_type), content: serde_json::from_str( body.json_body .as_ref() diff --git a/src/client_server/session.rs b/src/client_server/session.rs index cb6442d..3718003 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -26,7 +26,12 @@ use rocket::{get, post}; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] #[tracing::instrument] pub async fn get_login_types_route() -> ConduitResult { - Ok(get_login_types::Response::new(vec![get_login_types::LoginType::Password]).into()) + Ok( + get_login_types::Response::new(vec![get_login_types::LoginType::Password( + Default::default(), + )]) + .into(), + ) } /// # `POST /_matrix/client/r0/login` diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 88cce03..68e0c7f 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -6,9 +6,13 @@ use ruma::{ r0::state::{get_state_events, get_state_events_for_key, send_state_event}, }, events::{ - room::history_visibility::{HistoryVisibility, HistoryVisibilityEventContent}, - AnyStateEventContent, EventContent, EventType, + room::{ + canonical_alias::CanonicalAliasEventContent, + history_visibility::{HistoryVisibility, HistoryVisibilityEventContent}, + }, + AnyStateEventContent, EventType, }, + serde::Raw, EventId, RoomId, UserId, }; @@ -26,21 +30,13 @@ pub async fn send_state_event_for_key_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let content = serde_json::from_str::( - body.json_body - .as_ref() - .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? - .get(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; - let event_id = send_state_event_for_key_helper( &db, sender_user, - &body.content, - content, &body.room_id, - Some(body.state_key.to_owned()), + EventType::from(&body.event_type), + &body.body.body, // Yes, I hate it too + body.state_key.to_owned(), ) .await?; @@ -58,31 +54,15 @@ pub async fn send_state_event_for_empty_key_route( db: State<'_, Database>, body: Ruma>, ) -> ConduitResult { - // This just calls send_state_event_for_key_route - let Ruma { - body, - sender_user, - json_body, - .. - } = body; - - let json = serde_json::from_str::( - json_body - .as_ref() - .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? - .get(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = send_state_event_for_key_helper( &db, - sender_user - .as_ref() - .expect("no user for send state empty key route"), - &body.content, - json, + sender_user, &body.room_id, - Some("".into()), + EventType::from(&body.event_type), + &body.body.body, + body.state_key.to_owned(), ) .await?; @@ -183,7 +163,7 @@ pub async fn get_state_events_for_key_route( ))?; Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(&event.content) + content: serde_json::from_value(event.content) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) @@ -234,7 +214,7 @@ pub async fn get_state_events_for_empty_key_route( ))?; Ok(get_state_events_for_key::Response { - content: serde_json::value::to_raw_value(&event.content) + content: serde_json::from_value(event.content) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) @@ -243,17 +223,19 @@ pub async fn get_state_events_for_empty_key_route( pub async fn send_state_event_for_key_helper( db: &Database, sender: &UserId, - content: &AnyStateEventContent, - json: serde_json::Value, room_id: &RoomId, - state_key: Option, + event_type: EventType, + json: &Raw, + state_key: String, ) -> Result { let sender_user = sender; - if let AnyStateEventContent::RoomCanonicalAlias(canonical_alias) = content { + if let Ok(canonical_alias) = + serde_json::from_str::(json.json().get()) + { let mut aliases = canonical_alias.alt_aliases.clone(); - if let Some(alias) = canonical_alias.alias.clone() { + if let Some(alias) = canonical_alias.alias { aliases.push(alias); } @@ -276,10 +258,10 @@ pub async fn send_state_event_for_key_helper( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: content.event_type().into(), - content: json, + event_type, + content: serde_json::from_str(json.json().get()).expect("content is valid json"), unsigned: None, - state_key, + state_key: Some(state_key), redacts: None, }, &sender_user, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 40b829f..28da236 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -2,7 +2,7 @@ use crate::{Database, Error, PduEvent, Result}; use log::{error, info, warn}; use ruma::{ api::{ - client::r0::push::{Pusher, PusherKind}, + client::r0::push::{get_pushers, set_pusher, PusherKind}, push_gateway::send_event_notification::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, @@ -30,7 +30,7 @@ impl PushData { }) } - pub fn set_pusher(&self, sender: &UserId, pusher: Pusher) -> Result<()> { + pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pusher.pushkey.as_bytes()); @@ -52,7 +52,7 @@ impl PushData { Ok(()) } - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { + pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { self.senderkey_pusher .get(senderkey)? .map(|push| { @@ -62,7 +62,7 @@ impl PushData { .transpose() } - pub fn get_pushers(&self, sender: &UserId) -> Result> { + pub fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); @@ -164,7 +164,7 @@ where pub async fn send_push_notice( user: &UserId, unread: UInt, - pusher: &Pusher, + pusher: &get_pushers::Pusher, ruleset: Ruleset, pdu: &PduEvent, db: &Database, @@ -205,7 +205,7 @@ pub fn get_actions<'a>( ruleset: &'a Ruleset, pdu: &PduEvent, db: &Database, -) -> Result> { +) -> Result<&'a [Action]> { let power_levels: PowerLevelsEventContent = db .rooms .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? @@ -228,20 +228,18 @@ pub fn get_actions<'a>( notification_power_levels: power_levels.notifications, }; - Ok(ruleset - .get_actions(&pdu.to_sync_room_event(), &ctx) - .map(Clone::clone)) + Ok(ruleset.get_actions(&pdu.to_sync_room_event(), &ctx)) } async fn send_notice( unread: UInt, - pusher: &Pusher, + pusher: &get_pushers::Pusher, tweaks: Vec, event: &PduEvent, db: &Database, ) -> Result<()> { // TODO: email - if pusher.kind == Some(PusherKind::Email) { + if pusher.kind == PusherKind::Email { return Ok(()); } @@ -250,7 +248,7 @@ async fn send_notice( // 1. if "event_id_only" is the only format kind it seems we should never add more info // 2. can pusher/devices have conflicting formats let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = pusher.data.url.as_ref() { + let url = if let Some(url) = &pusher.data.url { url } else { error!("Http Pusher must have URL specified."); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 5685ac6..5c518b3 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -4,7 +4,6 @@ use ruma::{ identifiers::{DeviceId, UserId}, Outgoing, }; -use std::collections::BTreeMap; use std::ops::Deref; #[cfg(feature = "conduit_bin")] @@ -27,6 +26,7 @@ use { signatures::CanonicalJsonValue, ServerName, }, + std::collections::BTreeMap, std::convert::TryFrom, std::io::Cursor, }; @@ -265,7 +265,10 @@ where match ruma::signatures::verify_json(&pub_key_map, &request_map) { Ok(()) => (None, None, false), Err(e) => { - warn!("Failed to verify json request: {}: {:?} {:?}", e, pub_key_map, request_map); + warn!( + "Failed to verify json request: {}: {:?} {:?}", + e, pub_key_map, request_map + ); // Forbidden return Failure((Status::raw(580), ())); diff --git a/src/server_server.rs b/src/server_server.rs index 90b5099..8ff962a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -21,7 +21,7 @@ use ruma::{ create_join_event_template, }, query::{get_profile_information, get_room_information}, - transactions::send_transaction_message, + transactions::{edu::Edu, send_transaction_message}, }, IncomingResponse, OutgoingRequest, OutgoingResponse, }, @@ -585,39 +585,32 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - for edu in &body.edus { - match serde_json::from_str::(edu.json().get()) { - Ok(edu) => match edu.edu_type.as_str() { - "m.typing" => { - if let Some(typing) = edu.content.get("typing") { - if typing.as_bool().unwrap_or_default() { - db.rooms.edus.typing_add( - &UserId::try_from(edu.content["user_id"].as_str().unwrap()) - .unwrap(), - &RoomId::try_from(edu.content["room_id"].as_str().unwrap()) - .unwrap(), - 3000 + utils::millis_since_unix_epoch(), - &db.globals, - )?; - } else { - db.rooms.edus.typing_remove( - &UserId::try_from(edu.content["user_id"].as_str().unwrap()) - .unwrap(), - &RoomId::try_from(edu.content["room_id"].as_str().unwrap()) - .unwrap(), - &db.globals, - )?; - } - } + for edu in body + .edus + .iter() + .map(|edu| serde_json::from_str::(edu.json().get())) + .filter_map(|r| r.ok()) + { + match edu { + Edu::Presence(_) => {} + Edu::Receipt(_) => {} + Edu::Typing(typing) => { + if typing.typing { + db.rooms.edus.typing_add( + &typing.user_id, + &typing.room_id, + 3000 + utils::millis_since_unix_epoch(), + &db.globals, + )?; + } else { + db.rooms + .edus + .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?; } - "m.presence" => {} - "m.receipt" => {} - "m.device_list_update" => {} - _ => {} - }, - Err(_err) => { - continue; } + Edu::DeviceListUpdate(_) => {} + Edu::DirectToDevice(_) => {} + Edu::_Custom(_) => {} } } From bb234ca0020f874da07ffcb62ca96066d85836ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 22 Apr 2021 11:27:01 +0200 Subject: [PATCH 0540/1727] fix: only show one typing event per user --- src/database/rooms/edus.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index c48f4c2..f69e897 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -9,7 +9,7 @@ use ruma::{ RoomId, UInt, UserId, }; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, convert::{TryFrom, TryInto}, mem, }; @@ -280,7 +280,7 @@ impl RoomEdus { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - let mut user_ids = Vec::new(); + let mut user_ids = HashSet::new(); for user_id in self .typingid_userid @@ -295,11 +295,13 @@ impl RoomEdus { ) }) { - user_ids.push(user_id?); + user_ids.insert(user_id?); } Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { user_ids }, + content: ruma::events::typing::TypingEventContent { + user_ids: user_ids.into_iter().collect(), + }, }) } From 23f81bfaf7a2cea78523ec4caabdca670f117a02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 23 Apr 2021 18:54:17 +0200 Subject: [PATCH 0541/1727] chore: update dependencies --- Cargo.lock | 2752 ------------------------------------------- Cargo.toml | 2 +- src/main.rs | 49 +- src/ruma_wrapper.rs | 400 +++---- 4 files changed, 215 insertions(+), 2988 deletions(-) delete mode 100644 Cargo.lock diff --git a/Cargo.lock b/Cargo.lock deleted file mode 100644 index c13e7d6..0000000 --- a/Cargo.lock +++ /dev/null @@ -1,2752 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "adler32" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" - -[[package]] -name = "aho-corasick" -version = "0.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" -dependencies = [ - "memchr", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "assign" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" - -[[package]] -name = "async-trait" -version = "0.1.48" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea56748e10732c49404c153638a15ec3d6211ec5ff35d9bb20e13b93576adf" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "atomic" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" -dependencies = [ - "autocfg", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" - -[[package]] -name = "base-x" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" - -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - -[[package]] -name = "base64" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" - -[[package]] -name = "binascii" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" - -[[package]] -name = "blake2b_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" -dependencies = [ - "arrayref", - "arrayvec", - "constant_time_eq", -] - -[[package]] -name = "bumpalo" -version = "3.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" - -[[package]] -name = "bytemuck" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed57e2090563b83ba8f83366628ce535a7584c9afa4c9fc0612a03925c6df58" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" - -[[package]] -name = "cc" -version = "1.0.67" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" -dependencies = [ - "jobserver", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "time 0.1.43", - "winapi", -] - -[[package]] -name = "color_quant" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" - -[[package]] -name = "conduit" -version = "0.1.0" -dependencies = [ - "base64 0.13.0", - "directories", - "http", - "image", - "jsonwebtoken", - "log", - "opentelemetry", - "opentelemetry-jaeger", - "pretty_env_logger", - "rand", - "regex", - "reqwest", - "ring", - "rocket", - "ruma", - "rust-argon2", - "rustls", - "rustls-native-certs", - "serde", - "serde_json", - "serde_yaml", - "sled", - "state-res", - "thiserror", - "tokio", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", - "trust-dns-resolver", - "webpki", -] - -[[package]] -name = "const_fn" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076a6803b0dacd6a88cfe64deba628b01533ff5ef265687e6938280c1afd0a28" - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "cookie" -version = "0.15.0-dev" -source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" -dependencies = [ - "percent-encoding", - "time 0.2.26", - "version_check", -] - -[[package]] -name = "core-foundation" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" - -[[package]] -name = "crc32fast" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "lazy_static", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" -dependencies = [ - "autocfg", - "cfg-if", - "lazy_static", -] - -[[package]] -name = "data-encoding" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" - -[[package]] -name = "deflate" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73770f8e1fe7d64df17ca66ad28994a0a623ea497fa69486e14984e715c5d174" -dependencies = [ - "adler32", - "byteorder", -] - -[[package]] -name = "devise" -version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=3648468#3648468a9ede9ca896cd35bc1eb818c7a9fb3047" -dependencies = [ - "devise_codegen", - "devise_core", -] - -[[package]] -name = "devise_codegen" -version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=3648468#3648468a9ede9ca896cd35bc1eb818c7a9fb3047" -dependencies = [ - "devise_core", - "quote", -] - -[[package]] -name = "devise_core" -version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=3648468#3648468a9ede9ca896cd35bc1eb818c7a9fb3047" -dependencies = [ - "bitflags", - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn", -] - -[[package]] -name = "directories" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - -[[package]] -name = "dtoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "encoding_rs" -version = "0.8.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "enum-as-inner" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "figment" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca029e813a72b7526d28273d25f3e4a2f365d1b7a1018a6f93ec9053a119763" -dependencies = [ - "atomic", - "pear", - "serde", - "toml", - "uncased", - "version_check", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "form_urlencoded" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" -dependencies = [ - "matches", - "percent-encoding", -] - -[[package]] -name = "fs2" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "futures" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" - -[[package]] -name = "futures-executor" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" - -[[package]] -name = "futures-macro" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" - -[[package]] -name = "futures-task" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" - -[[package]] -name = "futures-util" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "proc-macro-hack", - "proc-macro-nested", - "slab", -] - -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.10.2+wasi-snapshot-preview1", -] - -[[package]] -name = "gif" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a668f699973d0f573d15749b7002a9ac9e1f9c6b220e7b165601334c173d8de" -dependencies = [ - "color_quant", - "weezl", -] - -[[package]] -name = "glob" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" - -[[package]] -name = "h2" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" - -[[package]] -name = "heck" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "hermit-abi" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" -dependencies = [ - "libc", -] - -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - -[[package]] -name = "http" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc35c995b9d93ec174cf9a27d425c7892722101e14993cd227fdb51d70cf9589" - -[[package]] -name = "httpdate" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" - -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error", -] - -[[package]] -name = "hyper" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf09f61b52cfcf4c00de50df88ae423d6c02354e385a86341133b5338630ad1" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project", - "socket2 0.4.0", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-rustls" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" -dependencies = [ - "futures-util", - "hyper", - "log", - "rustls", - "tokio", - "tokio-rustls", - "webpki", -] - -[[package]] -name = "idna" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "image" -version = "0.23.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ffcb7e7244a9bf19d35bf2883b9c080c4ced3c07a9895572178cdb8f13f6a1" -dependencies = [ - "bytemuck", - "byteorder", - "color_quant", - "gif", - "jpeg-decoder", - "num-iter", - "num-rational", - "num-traits", - "png", -] - -[[package]] -name = "indexmap" -version = "1.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" -dependencies = [ - "autocfg", - "hashbrown", - "serde", -] - -[[package]] -name = "inlinable_string" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3094308123a0e9fd59659ce45e22de9f53fc1d2ac6e1feb9fef988e4f76cad77" - -[[package]] -name = "instant" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "integer-encoding" -version = "1.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" - -[[package]] -name = "ipconfig" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" -dependencies = [ - "socket2 0.3.19", - "widestring", - "winapi", - "winreg 0.6.2", -] - -[[package]] -name = "ipnet" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" - -[[package]] -name = "itertools" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" - -[[package]] -name = "jobserver" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" -dependencies = [ - "libc", -] - -[[package]] -name = "jpeg-decoder" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" - -[[package]] -name = "js-sys" -version = "0.3.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "js_int" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" -dependencies = [ - "serde", -] - -[[package]] -name = "jsonwebtoken" -version = "7.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" -dependencies = [ - "base64 0.12.3", - "pem", - "ring", - "serde", - "serde_json", - "simple_asn1", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" - -[[package]] -name = "linked-hash-map" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" - -[[package]] -name = "lock_api" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - -[[package]] -name = "matchers" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" - -[[package]] -name = "memchr" -version = "2.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" - -[[package]] -name = "memoffset" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" -dependencies = [ - "autocfg", -] - -[[package]] -name = "mime" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" - -[[package]] -name = "miniz_oxide" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" -dependencies = [ - "adler32", -] - -[[package]] -name = "mio" -version = "0.7.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" -dependencies = [ - "libc", - "log", - "miow", - "ntapi", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", -] - -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi", -] - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "once_cell" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" - -[[package]] -name = "openssl-probe" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" - -[[package]] -name = "opentelemetry" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514d24875c140ed269eecc2d1b56d7b71b573716922a763c317fb1b1b4b58f15" -dependencies = [ - "async-trait", - "futures", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand", - "thiserror", -] - -[[package]] -name = "opentelemetry-jaeger" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5677b3a361784aff6e2b1b30dbdb5f85f4ec57ff2ced41d9a481ad70a9d0b57" -dependencies = [ - "async-trait", - "lazy_static", - "opentelemetry", - "thiserror", - "thrift", -] - -[[package]] -name = "ordered-float" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" -dependencies = [ - "num-traits", -] - -[[package]] -name = "parking_lot" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.5", - "smallvec", - "winapi", -] - -[[package]] -name = "paste" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" - -[[package]] -name = "pear" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ab3a2b792945ed67eadbbdcbd2898f8dd2319392b2a45ac21adea5245cb113" -dependencies = [ - "inlinable_string", - "pear_codegen", - "yansi", -] - -[[package]] -name = "pear_codegen" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620c9c4776ba41b59ab101360c9b1419c0c8c81cd2e6e39fae7109e7425994cb" -dependencies = [ - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn", -] - -[[package]] -name = "pem" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" -dependencies = [ - "base64 0.13.0", - "once_cell", - "regex", -] - -[[package]] -name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" - -[[package]] -name = "pin-project" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc174859768806e91ae575187ada95c91a29e96a98dc5d2cd9a1fed039501ba6" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a490329918e856ed1b083f244e3bfe2d8c4f336407e4ea9e1a9f479ff09049e5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "png" -version = "0.16.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" -dependencies = [ - "bitflags", - "crc32fast", - "deflate", - "miniz_oxide", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" - -[[package]] -name = "pretty_env_logger" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" -dependencies = [ - "env_logger", - "log", -] - -[[package]] -name = "proc-macro-crate" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" -dependencies = [ - "thiserror", - "toml", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - -[[package]] -name = "proc-macro2" -version = "1.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "proc-macro2-diagnostics" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "version_check", - "yansi", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] -name = "quote" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", - "rand_hc", -] - -[[package]] -name = "rand_chacha" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" -dependencies = [ - "getrandom 0.2.2", -] - -[[package]] -name = "rand_hc" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" -dependencies = [ - "rand_core", -] - -[[package]] -name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - -[[package]] -name = "redox_syscall" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" -dependencies = [ - "bitflags", -] - -[[package]] -name = "redox_users" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" -dependencies = [ - "getrandom 0.1.16", - "redox_syscall 0.1.57", - "rust-argon2", -] - -[[package]] -name = "ref-cast" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "regex" -version = "1.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" -dependencies = [ - "byteorder", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" - -[[package]] -name = "reqwest" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2296f2fac53979e8ccbc4a1136b25dcefd37be9ed7e4a1f6b05a6029c84ff124" -dependencies = [ - "base64 0.13.0", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "http", - "http-body", - "hyper", - "hyper-rustls", - "ipnet", - "js-sys", - "lazy_static", - "log", - "mime", - "percent-encoding", - "pin-project-lite", - "rustls", - "rustls-native-certs", - "serde", - "serde_urlencoded", - "tokio", - "tokio-rustls", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg 0.7.0", -] - -[[package]] -name = "resolv-conf" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname", - "quick-error", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi", -] - -[[package]] -name = "rocket" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=93e62c86eddf7cc9a7fc40b044182f83f0d7d92a#93e62c86eddf7cc9a7fc40b044182f83f0d7d92a" -dependencies = [ - "async-trait", - "atomic", - "atty", - "binascii", - "either", - "figment", - "futures", - "log", - "memchr", - "num_cpus", - "parking_lot", - "rand", - "ref-cast", - "rocket_codegen", - "rocket_http", - "serde", - "state", - "time 0.2.26", - "tokio", - "ubyte", - "version_check", - "yansi", -] - -[[package]] -name = "rocket_codegen" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=93e62c86eddf7cc9a7fc40b044182f83f0d7d92a#93e62c86eddf7cc9a7fc40b044182f83f0d7d92a" -dependencies = [ - "devise", - "glob", - "indexmap", - "quote", - "rocket_http", -] - -[[package]] -name = "rocket_http" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=93e62c86eddf7cc9a7fc40b044182f83f0d7d92a#93e62c86eddf7cc9a7fc40b044182f83f0d7d92a" -dependencies = [ - "cookie", - "either", - "http", - "hyper", - "indexmap", - "log", - "mime", - "parking_lot", - "pear", - "percent-encoding", - "pin-project-lite", - "ref-cast", - "smallvec", - "state", - "time 0.2.26", - "tokio", - "tokio-rustls", - "uncased", - "unicode-xid", - "version_check", -] - -[[package]] -name = "ruma" -version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "assign", - "js_int", - "ruma-api", - "ruma-appservice-api", - "ruma-client-api", - "ruma-common", - "ruma-events", - "ruma-federation-api", - "ruma-identifiers", - "ruma-identity-service-api", - "ruma-push-gateway-api", - "ruma-serde", - "ruma-signatures", -] - -[[package]] -name = "ruma-api" -version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "http", - "percent-encoding", - "ruma-api-macros", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "ruma-api-macros" -version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "ruma-appservice-api" -version = "0.2.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "ruma-api", - "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-client-api" -version = "0.10.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "assign", - "http", - "js_int", - "maplit", - "percent-encoding", - "ruma-api", - "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-common" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "indexmap", - "js_int", - "maplit", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", - "tracing", - "wildmatch", -] - -[[package]] -name = "ruma-events" -version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "js_int", - "ruma-common", - "ruma-events-macros", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-events-macros" -version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "ruma-federation-api" -version = "0.1.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "js_int", - "ruma-api", - "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-identifiers" -version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "paste", - "rand", - "ruma-identifiers-macros", - "ruma-identifiers-validation", - "ruma-serde", - "ruma-serde-macros", - "serde", -] - -[[package]] -name = "ruma-identifiers-macros" -version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "proc-macro2", - "quote", - "ruma-identifiers-validation", - "syn", -] - -[[package]] -name = "ruma-identifiers-validation" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" - -[[package]] -name = "ruma-identity-service-api" -version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "ruma-api", - "ruma-common", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-push-gateway-api" -version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "js_int", - "ruma-api", - "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-serde" -version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "form_urlencoded", - "itoa", - "js_int", - "ruma-serde-macros", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-serde-macros" -version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "ruma-signatures" -version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=4f16b9357c15d649075393a723f23cf560251754#4f16b9357c15d649075393a723f23cf560251754" -dependencies = [ - "base64 0.13.0", - "ring", - "ruma-identifiers", - "ruma-serde", - "serde_json", - "untrusted", -] - -[[package]] -name = "rust-argon2" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" -dependencies = [ - "base64 0.13.0", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils", -] - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - -[[package]] -name = "rustls" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" -dependencies = [ - "base64 0.13.0", - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "rustls-native-certs" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" -dependencies = [ - "openssl-probe", - "rustls", - "schannel", - "security-framework", -] - -[[package]] -name = "ryu" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" - -[[package]] -name = "schannel" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = [ - "lazy_static", - "winapi", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "security-framework" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - -[[package]] -name = "serde" -version = "1.0.125" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.125" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_yaml" -version = "0.8.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" -dependencies = [ - "dtoa", - "linked-hash-map", - "serde", - "yaml-rust", -] - -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - -[[package]] -name = "sharded-slab" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "signal-hook-registry" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" -dependencies = [ - "libc", -] - -[[package]] -name = "simple_asn1" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" -dependencies = [ - "chrono", - "num-bigint", - "num-traits", -] - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" - -[[package]] -name = "sled" -version = "0.34.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" -dependencies = [ - "crc32fast", - "crossbeam-epoch", - "crossbeam-utils", - "fs2", - "fxhash", - "libc", - "log", - "parking_lot", - "zstd", -] - -[[package]] -name = "smallvec" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" - -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if", - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - -[[package]] -name = "state" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" - -[[package]] -name = "state-res" -version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=aa53d07f51ffb7258f5c1e499bddffd4c630f7df#aa53d07f51ffb7258f5c1e499bddffd4c630f7df" -dependencies = [ - "itertools 0.10.0", - "log", - "maplit", - "ruma", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - -[[package]] -name = "syn" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fe99c6bd8b1cc636890bcc071842de909d902c81ac7dab53ba33c421ab8ffb" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "termcolor" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "thiserror" -version = "1.0.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "thread_local" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" -dependencies = [ - "once_cell", -] - -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] -name = "thrift" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b" -dependencies = [ - "byteorder", - "integer-encoding", - "log", - "ordered-float", - "threadpool", -] - -[[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "time" -version = "0.2.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi", -] - -[[package]] -name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros-impl" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - -[[package]] -name = "tinyvec" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "tokio" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5" -dependencies = [ - "autocfg", - "bytes", - "libc", - "memchr", - "mio", - "num_cpus", - "once_cell", - "pin-project-lite", - "signal-hook-registry", - "tokio-macros", - "winapi", -] - -[[package]] -name = "tokio-macros" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls", - "tokio", - "webpki", -] - -[[package]] -name = "tokio-util" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "940a12c99365c31ea8dd9ba04ec1be183ffe4920102bb7122c2f515437601e8e" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "toml" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" -dependencies = [ - "serde", -] - -[[package]] -name = "tower-service" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" - -[[package]] -name = "tracing" -version = "0.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" -dependencies = [ - "cfg-if", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "tracing-log" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-opentelemetry" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccdf13c28f1654fe806838f28c5b9cb23ca4c0eae71450daa489f50e523ceb1" -dependencies = [ - "opentelemetry", - "tracing", - "tracing-core", - "tracing-log", - "tracing-subscriber", -] - -[[package]] -name = "tracing-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705096c6f83bf68ea5d357a6aa01829ddbdac531b357b45abeca842938085baa" -dependencies = [ - "ansi_term", - "chrono", - "lazy_static", - "matchers", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "trust-dns-proto" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d57e219ba600dd96c2f6d82eb79645068e14edbc5c7e27514af40436b88150c" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna", - "ipnet", - "lazy_static", - "log", - "rand", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0437eea3a6da51acc1e946545ff53d5b8fb2611ff1c3bed58522dde100536ae" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lazy_static", - "log", - "lru-cache", - "parking_lot", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "trust-dns-proto", -] - -[[package]] -name = "try-lock" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" - -[[package]] -name = "ubyte" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42756bb9e708855de2f8a98195643dff31a97f0485d90d8467b39dc24be9e8fe" -dependencies = [ - "serde", -] - -[[package]] -name = "uncased" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" -dependencies = [ - "matches", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" - -[[package]] -name = "unicode-xid" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "url" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" -dependencies = [ - "form_urlencoded", - "idna", - "matches", - "percent-encoding", -] - -[[package]] -name = "version_check" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = [ - "log", - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasm-bindgen" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" -dependencies = [ - "cfg-if", - "serde", - "serde_json", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" - -[[package]] -name = "web-sys" -version = "0.3.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "weezl" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a32b378380f4e9869b22f0b5177c68a5519f03b3454fde0b291455ddbae266c" - -[[package]] -name = "widestring" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" - -[[package]] -name = "wildmatch" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c48bd20df7e4ced539c12f570f937c6b4884928a87fee70a479d72f031d4e0" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "winreg" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" -dependencies = [ - "winapi", -] - -[[package]] -name = "winreg" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" -dependencies = [ - "winapi", -] - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "yansi" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" - -[[package]] -name = "zstd" -version = "0.5.4+zstd.1.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" -dependencies = [ - "libc", - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" -dependencies = [ - "cc", - "glob", - "itertools 0.9.0", - "libc", -] diff --git a/Cargo.toml b/Cargo.toml index 4cf2ed4..9265f99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86eddf7cc9a7fc40b044182f83f0d7d92a", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "e1307ddf48dac14e6a37e526098732327bcb86f0", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers diff --git a/src/main.rs b/src/main.rs index ba8448d..1417737 100644 --- a/src/main.rs +++ b/src/main.rs @@ -21,7 +21,6 @@ pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; use rocket::{ catch, catchers, - fairing::AdHoc, figment::{ providers::{Env, Format, Toml}, Figment, @@ -31,9 +30,9 @@ use rocket::{ use tracing::span; use tracing_subscriber::{prelude::*, Registry}; -fn setup_rocket() -> (rocket::Rocket, Config) { +async fn setup_rocket() -> (rocket::Rocket, Config) { // Force log level off, so we can use our own logger - std::env::set_var("CONDUIT_LOG_LEVEL", "off"); + //std::env::set_var("CONDUIT_LOG_LEVEL", "off"); let config = Figment::from(rocket::Config::release_default()) @@ -48,9 +47,15 @@ fn setup_rocket() -> (rocket::Rocket, Config) { let parsed_config = config .extract::() .expect("It looks like your config is invalid. Please take a look at the error"); - let parsed_config2 = parsed_config.clone(); + + let data = Database::load_or_create(parsed_config.clone()) + .await + .expect("config is valid"); + + data.sending.start_handler(&data); let rocket = rocket::custom(config) + .manage(data) .mount( "/", routes![ @@ -176,29 +181,23 @@ fn setup_rocket() -> (rocket::Rocket, Config) { server_server::get_profile_information_route, ], ) - .register(catchers![ - not_found_catcher, - forbidden_catcher, - unknown_token_catcher, - missing_token_catcher, - bad_json_catcher - ]) - .attach(AdHoc::on_attach("Config", |rocket| async { - let data = Database::load_or_create(parsed_config2) - .await - .expect("config is valid"); - - data.sending.start_handler(&data); - - Ok(rocket.manage(data)) - })); + .register( + "/", + catchers![ + not_found_catcher, + forbidden_catcher, + unknown_token_catcher, + missing_token_catcher, + bad_json_catcher + ], + ); (rocket, parsed_config) } #[rocket::main] async fn main() { - let (rocket, config) = setup_rocket(); + let (rocket, config) = setup_rocket().await; if config.allow_jaeger { let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() @@ -213,11 +212,11 @@ async fn main() { rocket.launch().await.unwrap(); } else { - std::env::set_var("CONDUIT_LOG", config.log); - pretty_env_logger::init_custom_env("CONDUIT_LOG"); + //std::env::set_var("CONDUIT_LOG", config.log); + //pretty_env_logger::init_custom_env("CONDUIT_LOG"); - let root = span!(tracing::Level::INFO, "app_start", work_units = 2); - let _enter = root.enter(); + //let root = span!(tracing::Level::INFO, "app_start", work_units = 2); + //let _enter = root.enter(); rocket.launch().await.unwrap(); } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 5c518b3..eb3802e 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -11,10 +11,7 @@ use { crate::{server_server, utils}, log::{debug, warn}, rocket::{ - data::{ - ByteUnit, Data, FromDataFuture, FromTransformedData, Transform, TransformFuture, - Transformed, - }, + data::{self, ByteUnit, Data, FromData}, http::Status, outcome::Outcome::*, response::{self, Responder}, @@ -42,106 +39,92 @@ pub struct Ruma { } #[cfg(feature = "conduit_bin")] -impl<'a, T: Outgoing> FromTransformedData<'a> for Ruma +#[rocket::async_trait] +impl<'a, T: Outgoing> FromData<'a> for Ruma where T::Incoming: IncomingRequest, { type Error = (); - type Owned = Data; - type Borrowed = Self::Owned; - fn transform<'r>( - _req: &'r Request<'_>, - data: Data, - ) -> TransformFuture<'r, Self::Owned, Self::Error> { - Box::pin(async move { Transform::Owned(Success(data)) }) - } - - fn from_data( - request: &'a Request<'_>, - outcome: Transformed<'a, Self>, - ) -> FromDataFuture<'a, Self, Self::Error> { + async fn from_data(request: &'a Request<'_>, data: Data) -> data::Outcome { let metadata = T::Incoming::METADATA; + let db = request + .guard::>() + .await + .expect("database was loaded"); - Box::pin(async move { - let data = rocket::try_outcome!(outcome.owned()); - let db = request - .guard::>() - .await - .expect("database was loaded"); + // Get token from header or query value + let token = request + .headers() + .get_one("Authorization") + .map(|s| s[7..].to_owned()) // Split off "Bearer " + .or_else(|| request.query_value("access_token").and_then(|r| r.ok())); - // Get token from header or query value - let token = request - .headers() - .get_one("Authorization") - .map(|s| s[7..].to_owned()) // Split off "Bearer " - .or_else(|| request.get_query_value("access_token").and_then(|r| r.ok())); + let limit = db.globals.max_request_size(); + let mut handle = data.open(ByteUnit::Byte(limit.into())); + let mut body = Vec::new(); + handle.read_to_end(&mut body).await.unwrap(); - let limit = db.globals.max_request_size(); - let mut handle = data.open(ByteUnit::Byte(limit.into())); - let mut body = Vec::new(); - handle.read_to_end(&mut body).await.unwrap(); - - let (sender_user, sender_device, from_appservice) = if let Some((_id, registration)) = - db.appservice - .iter_all() - .filter_map(|r| r.ok()) - .find(|(_id, registration)| { - registration - .get("as_token") - .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| token.as_deref() == Some(as_token)) - }) { - match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - let user_id = request.get_query_value::("user_id").map_or_else( - || { - UserId::parse_with_server_name( - registration - .get("sender_localpart") - .unwrap() - .as_str() - .unwrap(), - db.globals.server_name(), - ) - .unwrap() - }, - |string| { - UserId::try_from(string.expect("parsing to string always works")) + let (sender_user, sender_device, from_appservice) = if let Some((_id, registration)) = db + .appservice + .iter_all() + .filter_map(|r| r.ok()) + .find(|(_id, registration)| { + registration + .get("as_token") + .and_then(|as_token| as_token.as_str()) + .map_or(false, |as_token| token.as_deref() == Some(as_token)) + }) { + match metadata.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + let user_id = request.query_value::("user_id").map_or_else( + || { + UserId::parse_with_server_name( + registration + .get("sender_localpart") .unwrap() - }, - ); + .as_str() + .unwrap(), + db.globals.server_name(), + ) + .unwrap() + }, + |string| { + UserId::try_from(string.expect("parsing to string always works")) + .unwrap() + }, + ); - if !db.users.exists(&user_id).unwrap() { - // Forbidden - return Failure((Status::raw(580), ())); - } - - // TODO: Check if appservice is allowed to be that user - (Some(user_id), None, true) + if !db.users.exists(&user_id).unwrap() { + // Forbidden + return Failure((Status::raw(580), ())); } - AuthScheme::ServerSignatures => (None, None, true), - AuthScheme::None => (None, None, true), + + // TODO: Check if appservice is allowed to be that user + (Some(user_id), None, true) } - } else { - match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - if let Some(token) = token { - match db.users.find_from_token(&token).unwrap() { - // Unknown Token - None => return Failure((Status::raw(581), ())), - Some((user_id, device_id)) => { - (Some(user_id), Some(device_id.into()), false) - } + AuthScheme::ServerSignatures => (None, None, true), + AuthScheme::None => (None, None, true), + } + } else { + match metadata.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + if let Some(token) = token { + match db.users.find_from_token(&token).unwrap() { + // Unknown Token + None => return Failure((Status::raw(581), ())), + Some((user_id, device_id)) => { + (Some(user_id), Some(device_id.into()), false) } - } else { - // Missing Token - return Failure((Status::raw(582), ())); } + } else { + // Missing Token + return Failure((Status::raw(582), ())); } - AuthScheme::ServerSignatures => { - // Get origin from header - let x_matrix = match request + } + AuthScheme::ServerSignatures => { + // Get origin from header + let x_matrix = match request .headers() .get_one("Authorization") .map(|s| { @@ -158,153 +141,150 @@ where } }; - let origin_str = match x_matrix.get(&Some("origin")) { - Some(Some(o)) => *o, - _ => { - warn!("Invalid X-Matrix header origin field: {:?}", x_matrix); + let origin_str = match x_matrix.get(&Some("origin")) { + Some(Some(o)) => *o, + _ => { + warn!("Invalid X-Matrix header origin field: {:?}", x_matrix); - // Forbidden - return Failure((Status::raw(580), ())); - } - }; + // Forbidden + return Failure((Status::raw(580), ())); + } + }; - let origin = match Box::::try_from(origin_str) { - Ok(s) => s, - _ => { - warn!( - "Invalid server name in X-Matrix header origin field: {:?}", - x_matrix - ); + let origin = match Box::::try_from(origin_str) { + Ok(s) => s, + _ => { + warn!( + "Invalid server name in X-Matrix header origin field: {:?}", + x_matrix + ); - // Forbidden - return Failure((Status::raw(580), ())); - } - }; + // Forbidden + return Failure((Status::raw(580), ())); + } + }; - let key = match x_matrix.get(&Some("key")) { - Some(Some(k)) => *k, - _ => { - warn!("Invalid X-Matrix header key field: {:?}", x_matrix); + let key = match x_matrix.get(&Some("key")) { + Some(Some(k)) => *k, + _ => { + warn!("Invalid X-Matrix header key field: {:?}", x_matrix); - // Forbidden - return Failure((Status::raw(580), ())); - } - }; + // Forbidden + return Failure((Status::raw(580), ())); + } + }; - let sig = match x_matrix.get(&Some("sig")) { - Some(Some(s)) => *s, - _ => { - warn!("Invalid X-Matrix header sig field: {:?}", x_matrix); + let sig = match x_matrix.get(&Some("sig")) { + Some(Some(s)) => *s, + _ => { + warn!("Invalid X-Matrix header sig field: {:?}", x_matrix); - // Forbidden - return Failure((Status::raw(580), ())); - } - }; + // Forbidden + return Failure((Status::raw(580), ())); + } + }; - let json_body = serde_json::from_slice::(&body); + let json_body = serde_json::from_slice::(&body); - let mut request_map = BTreeMap::::new(); + let mut request_map = BTreeMap::::new(); - if let Ok(json_body) = json_body { - request_map.insert("content".to_owned(), json_body); - }; + if let Ok(json_body) = json_body { + request_map.insert("content".to_owned(), json_body); + }; - request_map.insert( - "method".to_owned(), - CanonicalJsonValue::String(request.method().to_string()), - ); - request_map.insert( - "uri".to_owned(), - CanonicalJsonValue::String(request.uri().to_string()), - ); - request_map.insert( - "origin".to_owned(), - CanonicalJsonValue::String(origin.as_str().to_owned()), - ); - request_map.insert( - "destination".to_owned(), - CanonicalJsonValue::String( - db.globals.server_name().as_str().to_owned(), - ), - ); + request_map.insert( + "method".to_owned(), + CanonicalJsonValue::String(request.method().to_string()), + ); + request_map.insert( + "uri".to_owned(), + CanonicalJsonValue::String(request.uri().to_string()), + ); - let mut origin_signatures = BTreeMap::new(); - origin_signatures - .insert(key.to_owned(), CanonicalJsonValue::String(sig.to_owned())); + println!("{}: {:?}", origin, request.uri().to_string()); - let mut signatures = BTreeMap::new(); - signatures.insert( - origin.as_str().to_owned(), - CanonicalJsonValue::Object(origin_signatures), - ); + request_map.insert( + "origin".to_owned(), + CanonicalJsonValue::String(origin.as_str().to_owned()), + ); + request_map.insert( + "destination".to_owned(), + CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), + ); - request_map.insert( - "signatures".to_owned(), - CanonicalJsonValue::Object(signatures), - ); + let mut origin_signatures = BTreeMap::new(); + origin_signatures + .insert(key.to_owned(), CanonicalJsonValue::String(sig.to_owned())); - let keys = match server_server::fetch_signing_keys( - &db, - &origin, - vec![&key.to_owned()], - ) - .await - { - Ok(b) => b, - Err(e) => { - warn!("Failed to fetch signing keys: {}", e); + let mut signatures = BTreeMap::new(); + signatures.insert( + origin.as_str().to_owned(), + CanonicalJsonValue::Object(origin_signatures), + ); - // Forbidden - return Failure((Status::raw(580), ())); - } - }; + request_map.insert( + "signatures".to_owned(), + CanonicalJsonValue::Object(signatures), + ); - let mut pub_key_map = BTreeMap::new(); - pub_key_map.insert(origin.as_str().to_owned(), keys); + let keys = match server_server::fetch_signing_keys( + &db, + &origin, + vec![&key.to_owned()], + ) + .await + { + Ok(b) => b, + Err(e) => { + warn!("Failed to fetch signing keys: {}", e); - match ruma::signatures::verify_json(&pub_key_map, &request_map) { - Ok(()) => (None, None, false), - Err(e) => { - warn!( - "Failed to verify json request: {}: {:?} {:?}", - e, pub_key_map, request_map - ); + // Forbidden + return Failure((Status::raw(580), ())); + } + }; - // Forbidden - return Failure((Status::raw(580), ())); - } + let mut pub_key_map = BTreeMap::new(); + pub_key_map.insert(origin.as_str().to_owned(), keys); + + match ruma::signatures::verify_json(&pub_key_map, &request_map) { + Ok(()) => (None, None, false), + Err(e) => { + warn!("Failed to verify json request from {}: {}", origin, e,); + + // Forbidden + return Failure((Status::raw(580), ())); } } - AuthScheme::None => (None, None, false), } - }; - - let mut http_request = http::Request::builder() - .uri(request.uri().to_string()) - .method(&*request.method().to_string()); - for header in request.headers().iter() { - http_request = http_request.header(header.name.as_str(), &*header.value); + AuthScheme::None => (None, None, false), } + }; - let http_request = http_request.body(&*body).unwrap(); - debug!("{:?}", http_request); - match ::try_from_http_request(http_request) { - Ok(t) => Success(Ruma { - body: t, - sender_user, - sender_device, - // TODO: Can we avoid parsing it again? (We only need this for append_pdu) - json_body: utils::string_from_bytes(&body) - .ok() - .and_then(|s| serde_json::value::RawValue::from_string(s).ok()), - from_appservice, - }), - Err(e) => { - warn!("{:?}", e); - Failure((Status::raw(583), ())) - } + let mut http_request = http::Request::builder() + .uri(request.uri().to_string()) + .method(&*request.method().to_string()); + for header in request.headers().iter() { + http_request = http_request.header(header.name.as_str(), &*header.value); + } + + let http_request = http_request.body(&*body).unwrap(); + debug!("{:?}", http_request); + match ::try_from_http_request(http_request) { + Ok(t) => Success(Ruma { + body: t, + sender_user, + sender_device, + // TODO: Can we avoid parsing it again? (We only need this for append_pdu) + json_body: utils::string_from_bytes(&body) + .ok() + .and_then(|s| serde_json::value::RawValue::from_string(s).ok()), + from_appservice, + }), + Err(e) => { + warn!("{:?}", e); + Failure((Status::raw(583), ())) } - }) + } } } From 7067d7acae7d8647ac0d9733d346819393f6f627 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 23 Apr 2021 18:16:24 +0200 Subject: [PATCH 0542/1727] Refactor Responder implementation for RumaResponse --- src/ruma_wrapper.rs | 52 ++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index eb3802e..1464452 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -314,36 +314,34 @@ where 'o: 'r, { fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { - let http_response: Result, _> = self.0.try_into_http_response(); - match http_response { - Ok(http_response) => { - let mut response = rocket::response::Response::build(); + let http_response = self + .0 + .try_into_http_response() + .map_err(|_| Status::InternalServerError)?; - let status = http_response.status(); - response.raw_status(status.into(), ""); + let mut response = rocket::response::Response::build(); - for header in http_response.headers() { - response - .raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); - } + let status = http_response.status(); + response.raw_status(status.into(), ""); - let http_body = http_response.into_body(); - - response.sized_body(http_body.len(), Cursor::new(http_body)); - - response.raw_header("Access-Control-Allow-Origin", "*"); - response.raw_header( - "Access-Control-Allow-Methods", - "GET, POST, PUT, DELETE, OPTIONS", - ); - response.raw_header( - "Access-Control-Allow-Headers", - "Origin, X-Requested-With, Content-Type, Accept, Authorization", - ); - response.raw_header("Access-Control-Max-Age", "86400"); - response.ok() - } - Err(_) => Err(Status::InternalServerError), + for header in http_response.headers() { + response.raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); } + + let http_body = http_response.into_body(); + + response.sized_body(http_body.len(), Cursor::new(http_body)); + + response.raw_header("Access-Control-Allow-Origin", "*"); + response.raw_header( + "Access-Control-Allow-Methods", + "GET, POST, PUT, DELETE, OPTIONS", + ); + response.raw_header( + "Access-Control-Allow-Headers", + "Origin, X-Requested-With, Content-Type, Accept, Authorization", + ); + response.raw_header("Access-Control-Max-Age", "86400"); + response.ok() } } From e72fd44bb5b61303b42763be61c7c9a218b6e718 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 23 Apr 2021 18:38:20 +0200 Subject: [PATCH 0543/1727] Refactor send_request for appservices --- src/appservice_server.rs | 79 +++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 42 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 1b72c76..9220c2d 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -49,51 +49,46 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; + let mut reqwest_response = globals.reqwest_client().execute(reqwest_request).await?; // Because reqwest::Response -> http::Response is complicated: - match reqwest_response { - Ok(mut reqwest_response) => { - let status = reqwest_response.status(); - let mut http_response = http::Response::builder().status(status); - let headers = http_response.headers_mut().unwrap(); + let status = reqwest_response.status(); + let mut http_response = http::Response::builder().status(status); + let headers = http_response.headers_mut().unwrap(); - for (k, v) in reqwest_response.headers_mut().drain() { - if let Some(key) = k { - headers.insert(key, v); - } - } - - let status = reqwest_response.status(); - - let body = reqwest_response.bytes().await.unwrap_or_else(|e| { - warn!("server error: {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - warn!( - "Appservice returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - warn!( - "Appservice returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Server returned bad response.") - }) + for (k, v) in reqwest_response.headers_mut().drain() { + if let Some(key) = k { + headers.insert(key, v); } - Err(e) => Err(e.into()), } + + let status = reqwest_response.status(); + + let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + warn!("server error: {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if status != 200 { + warn!( + "Appservice returned bad response {} {}\n{}\n{:?}", + destination, + status, + url, + utils::string_from_bytes(&body) + ); + } + + let response = T::IncomingResponse::try_from_http_response( + http_response + .body(body) + .expect("reqwest body is valid http body"), + ); + response.map_err(|_| { + warn!( + "Appservice returned invalid response bytes {}\n{}", + destination, url + ); + Error::BadServerResponse("Server returned bad response.") + }) } From d8b484beed672d5a2daba1f5ccca432a89a395d3 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 23 Apr 2021 18:45:06 +0200 Subject: [PATCH 0544/1727] Upgrade ruma --- Cargo.lock | 2897 ++++++++++++++++++++++++++++++++++++++ Cargo.toml | 6 +- src/appservice_server.rs | 8 +- src/database/pusher.rs | 8 +- src/ruma_wrapper.rs | 2 +- src/server_server.rs | 6 +- 6 files changed, 2915 insertions(+), 12 deletions(-) create mode 100644 Cargo.lock diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..0b70043 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,2897 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "adler32" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" + +[[package]] +name = "aho-corasick" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +dependencies = [ + "memchr", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "assign" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" + +[[package]] +name = "async-trait" +version = "0.1.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atomic" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +dependencies = [ + "autocfg", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "base-x" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" + +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + +[[package]] +name = "binascii" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "blake2b_simd" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "bumpalo" +version = "3.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" + +[[package]] +name = "bytemuck" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed57e2090563b83ba8f83366628ce535a7584c9afa4c9fc0612a03925c6df58" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + +[[package]] +name = "cc" +version = "1.0.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +dependencies = [ + "jobserver", +] + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits", + "time 0.1.43", + "winapi", +] + +[[package]] +name = "color_quant" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" + +[[package]] +name = "conduit" +version = "0.1.0" +dependencies = [ + "base64 0.13.0", + "bytes", + "directories", + "http", + "image", + "jsonwebtoken", + "log", + "opentelemetry", + "opentelemetry-jaeger", + "pretty_env_logger", + "rand", + "regex", + "reqwest", + "ring", + "rocket", + "ruma", + "rust-argon2", + "rustls", + "rustls-native-certs", + "serde", + "serde_json", + "serde_yaml", + "sled", + "state-res", + "thiserror", + "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", + "trust-dns-resolver", + "webpki", +] + +[[package]] +name = "const_fn" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402da840495de3f976eaefc3485b7f5eb5b0bf9761f9a47be27fe975b3b8c2ec" + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + +[[package]] +name = "cookie" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdf8865bac3d9a3bde5bde9088ca431b11f5d37c7a578b8086af77248b76627" +dependencies = [ + "percent-encoding", + "time 0.2.26", + "version_check", +] + +[[package]] +name = "core-foundation" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" + +[[package]] +name = "crc32fast" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils", + "lazy_static", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" +dependencies = [ + "autocfg", + "cfg-if 1.0.0", + "lazy_static", +] + +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + +[[package]] +name = "deflate" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73770f8e1fe7d64df17ca66ad28994a0a623ea497fa69486e14984e715c5d174" +dependencies = [ + "adler32", + "byteorder", +] + +[[package]] +name = "derive_more" +version = "0.99.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f82b1b72f1263f214c0f823371768776c4f5841b942c9883aa8e5ec584fd0ba6" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "devise" +version = "0.3.0" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=df00b5#df00b5162edd53e8d496e7935774e69b5f7f6bdf" +dependencies = [ + "devise_codegen", + "devise_core", +] + +[[package]] +name = "devise_codegen" +version = "0.3.0" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=df00b5#df00b5162edd53e8d496e7935774e69b5f7f6bdf" +dependencies = [ + "devise_core", + "quote", +] + +[[package]] +name = "devise_core" +version = "0.3.0" +source = "git+https://github.com/SergioBenitez/Devise.git?rev=df00b5#df00b5162edd53e8d496e7935774e69b5f7f6bdf" +dependencies = [ + "bitflags", + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn", +] + +[[package]] +name = "directories" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + +[[package]] +name = "dtoa" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "encoding_rs" +version = "0.8.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "enum-as-inner" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "env_logger" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "figment" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca029e813a72b7526d28273d25f3e4a2f365d1b7a1018a6f93ec9053a119763" +dependencies = [ + "atomic", + "pear", + "serde", + "toml", + "uncased", + "version_check", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +dependencies = [ + "matches", + "percent-encoding", +] + +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "futures" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" + +[[package]] +name = "futures-executor" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" + +[[package]] +name = "futures-macro" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" + +[[package]] +name = "futures-task" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" + +[[package]] +name = "futures-util" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", +] + +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + +[[package]] +name = "generator" +version = "0.6.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "061d3be1afec479d56fa3bd182bf966c7999ec175fcfdb87ac14d417241366c6" +dependencies = [ + "cc", + "libc", + "log", + "rustversion", + "winapi", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.10.2+wasi-snapshot-preview1", +] + +[[package]] +name = "gif" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a668f699973d0f573d15749b7002a9ac9e1f9c6b220e7b165601334c173d8de" +dependencies = [ + "color_quant", + "weezl", +] + +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + +[[package]] +name = "h2" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" + +[[package]] +name = "heck" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +dependencies = [ + "libc", +] + +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", +] + +[[package]] +name = "http" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1ce40d6fc9764887c2fdc7305c3dcc429ba11ff981c1509416afd5697e4437" + +[[package]] +name = "httpdate" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05842d0d43232b23ccb7060ecb0f0626922c21f30012e97b767b30afd4a5d4b9" + +[[package]] +name = "humantime" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +dependencies = [ + "quick-error", +] + +[[package]] +name = "hyper" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e5f105c494081baa3bf9e200b279e27ec1623895cd504c7dbef8d0b080fcf54" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project", + "socket2 0.4.0", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +dependencies = [ + "futures-util", + "hyper", + "log", + "rustls", + "tokio", + "tokio-rustls", + "webpki", +] + +[[package]] +name = "idna" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "image" +version = "0.23.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24ffcb7e7244a9bf19d35bf2883b9c080c4ced3c07a9895572178cdb8f13f6a1" +dependencies = [ + "bytemuck", + "byteorder", + "color_quant", + "gif", + "jpeg-decoder", + "num-iter", + "num-rational", + "num-traits", + "png", +] + +[[package]] +name = "indexmap" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" +dependencies = [ + "autocfg", + "hashbrown", + "serde", +] + +[[package]] +name = "inlinable_string" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3094308123a0e9fd59659ce45e22de9f53fc1d2ac6e1feb9fef988e4f76cad77" + +[[package]] +name = "instant" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "integer-encoding" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" + +[[package]] +name = "ipconfig" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2 0.3.19", + "widestring", + "winapi", + "winreg 0.6.2", +] + +[[package]] +name = "ipnet" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" + +[[package]] +name = "itertools" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" + +[[package]] +name = "jobserver" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd" +dependencies = [ + "libc", +] + +[[package]] +name = "jpeg-decoder" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" + +[[package]] +name = "js-sys" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "js_int" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" +dependencies = [ + "serde", +] + +[[package]] +name = "jsonwebtoken" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +dependencies = [ + "base64 0.12.3", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" + +[[package]] +name = "linked-hash-map" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" + +[[package]] +name = "lock_api" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "loom" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" +dependencies = [ + "cfg-if 0.1.10", + "generator", + "scoped-tls", + "serde", + "serde_json", +] + +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" + +[[package]] +name = "memchr" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" + +[[package]] +name = "memoffset" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" +dependencies = [ + "autocfg", +] + +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + +[[package]] +name = "miniz_oxide" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" +dependencies = [ + "adler32", +] + +[[package]] +name = "mio" +version = "0.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" +dependencies = [ + "libc", + "log", + "miow", + "ntapi", + "winapi", +] + +[[package]] +name = "miow" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" +dependencies = [ + "winapi", +] + +[[package]] +name = "multer" +version = "1.2.2" +source = "git+https://github.com/rousan/multer-rs.git?rev=7e4f0c5f#7e4f0c5fe14e4c531f503922bfe04f68b32ddf17" +dependencies = [ + "bytes", + "derive_more", + "encoding_rs", + "futures-util", + "http", + "httparse", + "log", + "mime", + "tokio", + "tokio-util", + "twoway", + "version_check", +] + +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" + +[[package]] +name = "openssl-probe" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" + +[[package]] +name = "opentelemetry" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "514d24875c140ed269eecc2d1b56d7b71b573716922a763c317fb1b1b4b58f15" +dependencies = [ + "async-trait", + "futures", + "js-sys", + "lazy_static", + "percent-encoding", + "pin-project", + "rand", + "thiserror", +] + +[[package]] +name = "opentelemetry-jaeger" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5677b3a361784aff6e2b1b30dbdb5f85f4ec57ff2ced41d9a481ad70a9d0b57" +dependencies = [ + "async-trait", + "lazy_static", + "opentelemetry", + "thiserror", + "thrift", +] + +[[package]] +name = "ordered-float" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +dependencies = [ + "num-traits", +] + +[[package]] +name = "parking_lot" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +dependencies = [ + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall 0.2.6", + "smallvec", + "winapi", +] + +[[package]] +name = "paste" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" + +[[package]] +name = "pear" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86ab3a2b792945ed67eadbbdcbd2898f8dd2319392b2a45ac21adea5245cb113" +dependencies = [ + "inlinable_string", + "pear_codegen", + "yansi", +] + +[[package]] +name = "pear_codegen" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "620c9c4776ba41b59ab101360c9b1419c0c8c81cd2e6e39fae7109e7425994cb" +dependencies = [ + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn", +] + +[[package]] +name = "pem" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +dependencies = [ + "base64 0.13.0", + "once_cell", + "regex", +] + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" + +[[package]] +name = "pin-project" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "png" +version = "0.16.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" +dependencies = [ + "bitflags", + "crc32fast", + "deflate", + "miniz_oxide", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + +[[package]] +name = "pretty_env_logger" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" +dependencies = [ + "env_logger", + "log", +] + +[[package]] +name = "proc-macro-crate" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" +dependencies = [ + "thiserror", + "toml", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + +[[package]] +name = "proc-macro-nested" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" + +[[package]] +name = "proc-macro2" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "proc-macro2-diagnostics" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "version_check", + "yansi", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quote" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +dependencies = [ + "getrandom 0.2.2", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "redox_syscall" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8270314b5ccceb518e7e578952f0b72b88222d02e8f77f5ecf7abbb673539041" +dependencies = [ + "bitflags", +] + +[[package]] +name = "redox_users" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" +dependencies = [ + "getrandom 0.1.16", + "redox_syscall 0.1.57", + "rust-argon2", +] + +[[package]] +name = "ref-cast" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "regex" +version = "1.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "reqwest" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2296f2fac53979e8ccbc4a1136b25dcefd37be9ed7e4a1f6b05a6029c84ff124" +dependencies = [ + "base64 0.13.0", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "hyper-rustls", + "ipnet", + "js-sys", + "lazy_static", + "log", + "mime", + "percent-encoding", + "pin-project-lite", + "rustls", + "rustls-native-certs", + "serde", + "serde_urlencoded", + "tokio", + "tokio-rustls", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg 0.7.0", +] + +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi", +] + +[[package]] +name = "rocket" +version = "0.5.0-dev" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=e1307ddf48dac14e6a37e526098732327bcb86f0#e1307ddf48dac14e6a37e526098732327bcb86f0" +dependencies = [ + "async-trait", + "atomic", + "atty", + "binascii", + "bytes", + "either", + "figment", + "futures", + "indexmap", + "log", + "memchr", + "multer", + "num_cpus", + "parking_lot", + "pin-project-lite", + "rand", + "ref-cast", + "rocket_codegen", + "rocket_http", + "serde", + "state", + "tempfile", + "time 0.2.26", + "tokio", + "tokio-util", + "ubyte", + "version_check", + "yansi", +] + +[[package]] +name = "rocket_codegen" +version = "0.5.0-dev" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=e1307ddf48dac14e6a37e526098732327bcb86f0#e1307ddf48dac14e6a37e526098732327bcb86f0" +dependencies = [ + "devise", + "glob", + "indexmap", + "quote", + "rocket_http", + "unicode-xid", +] + +[[package]] +name = "rocket_http" +version = "0.5.0-dev" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=e1307ddf48dac14e6a37e526098732327bcb86f0#e1307ddf48dac14e6a37e526098732327bcb86f0" +dependencies = [ + "cookie", + "either", + "http", + "hyper", + "indexmap", + "log", + "memchr", + "mime", + "parking_lot", + "pear", + "percent-encoding", + "pin-project-lite", + "ref-cast", + "serde", + "smallvec", + "stable-pattern", + "state", + "time 0.2.26", + "tokio", + "tokio-rustls", + "uncased", +] + +[[package]] +name = "ruma" +version = "0.0.3" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "assign", + "js_int", + "ruma-api", + "ruma-appservice-api", + "ruma-client-api", + "ruma-common", + "ruma-events", + "ruma-federation-api", + "ruma-identifiers", + "ruma-identity-service-api", + "ruma-push-gateway-api", + "ruma-serde", + "ruma-signatures", +] + +[[package]] +name = "ruma-api" +version = "0.17.0-alpha.4" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "bytes", + "http", + "percent-encoding", + "ruma-api-macros", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "ruma-api-macros" +version = "0.17.0-alpha.4" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ruma-appservice-api" +version = "0.2.0-alpha.3" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-client-api" +version = "0.10.0-alpha.3" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "assign", + "bytes", + "http", + "js_int", + "maplit", + "percent-encoding", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-common" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "indexmap", + "js_int", + "maplit", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", + "tracing", + "wildmatch", +] + +[[package]] +name = "ruma-events" +version = "0.22.0-alpha.3" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "js_int", + "ruma-common", + "ruma-events-macros", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-events-macros" +version = "0.22.0-alpha.3" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ruma-federation-api" +version = "0.1.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "js_int", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-identifiers" +version = "0.19.0" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "paste", + "rand", + "ruma-identifiers-macros", + "ruma-identifiers-validation", + "ruma-serde", + "ruma-serde-macros", + "serde", +] + +[[package]] +name = "ruma-identifiers-macros" +version = "0.19.0" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "proc-macro2", + "quote", + "ruma-identifiers-validation", + "syn", +] + +[[package]] +name = "ruma-identifiers-validation" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" + +[[package]] +name = "ruma-identity-service-api" +version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "ruma-api", + "ruma-common", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-push-gateway-api" +version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "js_int", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-serde" +version = "0.3.1" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "bytes", + "form_urlencoded", + "itoa", + "js_int", + "ruma-serde-macros", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-serde-macros" +version = "0.3.1" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ruma-signatures" +version = "0.7.0" +source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +dependencies = [ + "base64 0.13.0", + "ring", + "ruma-identifiers", + "ruma-serde", + "serde_json", + "untrusted", +] + +[[package]] +name = "rust-argon2" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" +dependencies = [ + "base64 0.13.0", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", +] + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "rustls" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +dependencies = [ + "base64 0.13.0", + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "rustls-native-certs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +dependencies = [ + "openssl-probe", + "rustls", + "schannel", + "security-framework", +] + +[[package]] +name = "rustversion" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi", +] + +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "sct" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "security-framework" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.125" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.125" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_yaml" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" +dependencies = [ + "dtoa", + "linked-hash-map", + "serde", + "yaml-rust", +] + +[[package]] +name = "sha1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" + +[[package]] +name = "sharded-slab" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook-registry" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +dependencies = [ + "libc", +] + +[[package]] +name = "simple_asn1" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +dependencies = [ + "chrono", + "num-bigint", + "num-traits", +] + +[[package]] +name = "slab" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" + +[[package]] +name = "sled" +version = "0.34.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" +dependencies = [ + "crc32fast", + "crossbeam-epoch", + "crossbeam-utils", + "fs2", + "fxhash", + "libc", + "log", + "parking_lot", + "zstd", +] + +[[package]] +name = "smallvec" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" + +[[package]] +name = "socket2" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "winapi", +] + +[[package]] +name = "socket2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "stable-pattern" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4564168c00635f88eaed410d5efa8131afa8d8699a612c80c455a0ba05c21045" +dependencies = [ + "memchr", +] + +[[package]] +name = "standback" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" +dependencies = [ + "version_check", +] + +[[package]] +name = "state" +version = "0.4.2" +source = "git+https://github.com/SergioBenitez/state.git?rev=8f94dc#8f94dce673b7d4b0e7b96c808a84f5e2a4be4a60" +dependencies = [ + "loom", +] + +[[package]] +name = "state-res" +version = "0.1.0" +source = "git+https://github.com/ruma/state-res?rev=1dd252d1c97a38def74bc097c197a33179ed8fbb#1dd252d1c97a38def74bc097c197a33179ed8fbb" +dependencies = [ + "itertools 0.10.0", + "log", + "maplit", + "ruma", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2", + "quote", + "serde", + "serde_derive", + "syn", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2", + "quote", + "serde", + "serde_derive", + "serde_json", + "sha1", + "syn", +] + +[[package]] +name = "stdweb-internal-runtime" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" + +[[package]] +name = "syn" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9505f307c872bab8eb46f77ae357c8eba1fdacead58ee5a850116b1d7f82883" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "tempfile" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "rand", + "redox_syscall 0.2.6", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "termcolor" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" +dependencies = [ + "once_cell", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "thrift" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b" +dependencies = [ + "byteorder", + "integer-encoding", + "log", + "ordered-float", + "threadpool", +] + +[[package]] +name = "time" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "time" +version = "0.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" +dependencies = [ + "const_fn", + "libc", + "standback", + "stdweb", + "time-macros", + "version_check", + "winapi", +] + +[[package]] +name = "time-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "standback", + "syn", +] + +[[package]] +name = "tinyvec" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "tokio" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5" +dependencies = [ + "autocfg", + "bytes", + "libc", + "memchr", + "mio", + "num_cpus", + "once_cell", + "pin-project-lite", + "signal-hook-registry", + "tokio-macros", + "winapi", +] + +[[package]] +name = "tokio-macros" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-util" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "940a12c99365c31ea8dd9ba04ec1be183ffe4920102bb7122c2f515437601e8e" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +dependencies = [ + "serde", +] + +[[package]] +name = "tower-service" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" + +[[package]] +name = "tracing" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" +dependencies = [ + "cfg-if 1.0.0", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "tracing-log" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccdf13c28f1654fe806838f28c5b9cb23ca4c0eae71450daa489f50e523ceb1" +dependencies = [ + "opentelemetry", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + +[[package]] +name = "tracing-serde" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "705096c6f83bf68ea5d357a6aa01829ddbdac531b357b45abeca842938085baa" +dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "trust-dns-proto" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "952a078337565ba39007de99b151770f41039253a31846f0a3d5cd5a4ac8eedf" +dependencies = [ + "async-trait", + "cfg-if 1.0.0", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "lazy_static", + "log", + "rand", + "smallvec", + "thiserror", + "tinyvec", + "tokio", + "url", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9c97f7d103e0f94dbe384a57908833505ae5870126492f166821b7cf685589" +dependencies = [ + "cfg-if 1.0.0", + "futures-util", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "parking_lot", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "trust-dns-proto", +] + +[[package]] +name = "try-lock" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" + +[[package]] +name = "twoway" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b40075910de3a912adbd80b5d8bad6ad10a23eeb1f5bf9d4006839e899ba5bc" +dependencies = [ + "memchr", + "unchecked-index", +] + +[[package]] +name = "ubyte" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42756bb9e708855de2f8a98195643dff31a97f0485d90d8467b39dc24be9e8fe" +dependencies = [ + "serde", +] + +[[package]] +name = "uncased" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" +dependencies = [ + "serde", + "version_check", +] + +[[package]] +name = "unchecked-index" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeba86d422ce181a719445e51872fa30f1f7413b62becb52e95ec91aa262d85c" + +[[package]] +name = "unicode-bidi" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" +dependencies = [ + "matches", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "url" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" +dependencies = [ + "form_urlencoded", + "idna", + "matches", + "percent-encoding", +] + +[[package]] +name = "version_check" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "wasm-bindgen" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" +dependencies = [ + "cfg-if 1.0.0", + "serde", + "serde_json", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" + +[[package]] +name = "web-sys" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "weezl" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8b77fdfd5a253be4ab714e4ffa3c49caf146b4de743e97510c0656cf90f1e8e" + +[[package]] +name = "widestring" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" + +[[package]] +name = "wildmatch" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6c48bd20df7e4ced539c12f570f937c6b4884928a87fee70a479d72f031d4e0" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi", +] + +[[package]] +name = "winreg" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +dependencies = [ + "winapi", +] + +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "yansi" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" + +[[package]] +name = "zstd" +version = "0.5.4+zstd.1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "2.0.6+zstd.1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "1.4.18+zstd.1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +dependencies = [ + "cc", + "glob", + "itertools 0.9.0", + "libc", +] diff --git a/Cargo.toml b/Cargo.toml index 9265f99..19378b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "e1307ddf4 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "4f16b9357c15d649075393a723f23cf560251754", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "220d5b4a76b3b781f7f8297fbe6b14473b04214b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/ruma/state-res", rev = "aa53d07f51ffb7258f5c1e499bddffd4c630f7df", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "1dd252d1c97a38def74bc097c197a33179ed8fbb", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio @@ -32,6 +32,8 @@ tokio = "1.2.0" sled = { version = "0.34.6", features = ["compression", "no_metrics"] } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } +# Used for the http request / response body type for Ruma endpoints used with reqwest +bytes = "1.0.1" # Used for emitting log entries log = "0.4.14" # Used for rocket<->ruma conversions diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 9220c2d..8f64edf 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,7 +1,8 @@ use crate::{utils, Error, Result}; +use bytes::BytesMut; use http::header::{HeaderValue, CONTENT_TYPE}; use log::warn; -use ruma::api::{IncomingResponse, OutgoingRequest}; +use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; use std::{ convert::{TryFrom, TryInto}, fmt::Debug, @@ -20,8 +21,9 @@ where let hs_token = registration.get("hs_token").unwrap().as_str().unwrap(); let mut http_request = request - .try_into_http_request(&destination, Some("")) - .unwrap(); + .try_into_http_request::(&destination, SendAccessToken::IfRequired("")) + .unwrap() + .map(|body| body.freeze()); let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 28da236..7c7abb3 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -1,4 +1,5 @@ use crate::{Database, Error, PduEvent, Result}; +use bytes::BytesMut; use log::{error, info, warn}; use ruma::{ api::{ @@ -7,7 +8,7 @@ use ruma::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - IncomingResponse, OutgoingRequest, + IncomingResponse, OutgoingRequest, SendAccessToken, }, events::{room::power_levels::PowerLevelsEventContent, EventType}, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, @@ -99,11 +100,12 @@ where let destination = destination.replace("/_matrix/push/v1/notify", ""); let http_request = request - .try_into_http_request(&destination, Some("")) + .try_into_http_request::(&destination, SendAccessToken::IfRequired("")) .map_err(|e| { warn!("Failed to find destination {}: {}", destination, e); Error::BadServerResponse("Invalid destination") - })?; + })? + .map(|body| body.freeze()); let reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 1464452..4ac7d71 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -316,7 +316,7 @@ where fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { let http_response = self .0 - .try_into_http_response() + .try_into_http_response::>() .map_err(|_| Status::InternalServerError)?; let mut response = rocket::response::Response::build(); diff --git a/src/server_server.rs b/src/server_server.rs index 8ff962a..0a882fe 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -23,7 +23,7 @@ use ruma::{ query::{get_profile_information, get_room_information}, transactions::{edu::Edu, send_transaction_message}, }, - IncomingResponse, OutgoingRequest, OutgoingResponse, + IncomingResponse, OutgoingRequest, OutgoingResponse, SendAccessToken, }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ @@ -141,7 +141,7 @@ where }; let mut http_request = request - .try_into_http_request(&actual_destination, Some("")) + .try_into_http_request::>(&actual_destination, SendAccessToken::IfRequired("")) .map_err(|e| { warn!("Failed to find destination {}: {}", actual_destination, e); Error::BadServerResponse("Invalid destination") @@ -454,7 +454,7 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json { valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2), }, } - .try_into_http_response() + .try_into_http_response::>() .unwrap() .body(), ) From 026af6b1a60b01f877e80dba5bc36850c6c506cf Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 23 Apr 2021 19:04:59 +0200 Subject: [PATCH 0545/1727] Improve formatting of ruma_wrapper.rs --- src/ruma_wrapper.rs | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 4ac7d71..7777e12 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -34,7 +34,8 @@ pub struct Ruma { pub body: T::Incoming, pub sender_user: Option, pub sender_device: Option>, - pub json_body: Option>, // This is None when body is not a valid string + // This is None when body is not a valid string + pub json_body: Option>, pub from_appservice: bool, } @@ -124,22 +125,24 @@ where } AuthScheme::ServerSignatures => { // Get origin from header - let x_matrix = match request - .headers() - .get_one("Authorization") - .map(|s| { - s[9..] - .split_terminator(',').map(|field| {let mut splits = field.splitn(2, '='); (splits.next(), splits.next().map(|s| s.trim_matches('"')))}).collect::>() - }) // Split off "X-Matrix " and parse the rest - { - Some(t) => t, - None => { - warn!("No Authorization header"); + let x_matrix = match request.headers().get_one("Authorization").map(|s| { + // Split off "X-Matrix " and parse the rest + s[9..] + .split_terminator(',') + .map(|field| { + let mut splits = field.splitn(2, '='); + (splits.next(), splits.next().map(|s| s.trim_matches('"'))) + }) + .collect::>() + }) { + Some(t) => t, + None => { + warn!("No Authorization header"); - // Forbidden - return Failure((Status::raw(580), ())); - } - }; + // Forbidden + return Failure((Status::raw(580), ())); + } + }; let origin_str = match x_matrix.get(&Some("origin")) { Some(Some(o)) => *o, From 226045ea4bb845fa201b66159a02f6f6d82634be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 24 Apr 2021 12:27:46 +0200 Subject: [PATCH 0546/1727] improvement: warning on misconfigured reverse proxy --- src/database/globals.rs | 5 ++++- src/ruma_wrapper.rs | 9 +++++---- src/server_server.rs | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index acecf02..04f8d29 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -52,7 +52,10 @@ impl ServerCertVerifier for MatrixServerVerifier { if result.is_ok() { return result; } - info!("Server {:?} is non-compliant, retrying TLS verification with original name", dns_name); + info!( + "Server {:?} is non-compliant, retrying TLS verification with original name", + dns_name + ); } self.inner .verify_server_cert(roots, presented_certs, dns_name, ocsp_response) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 7777e12..49a9fb0 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -203,9 +203,6 @@ where "uri".to_owned(), CanonicalJsonValue::String(request.uri().to_string()), ); - - println!("{}: {:?}", origin, request.uri().to_string()); - request_map.insert( "origin".to_owned(), CanonicalJsonValue::String(origin.as_str().to_owned()), @@ -252,7 +249,11 @@ where match ruma::signatures::verify_json(&pub_key_map, &request_map) { Ok(()) => (None, None, false), Err(e) => { - warn!("Failed to verify json request from {}: {}", origin, e,); + warn!("Failed to verify json request from {}: {}", origin, e); + + if request.uri().to_string().contains('@') { + warn!("Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: use nocanon)"); + } // Forbidden return Failure((Status::raw(580), ())); diff --git a/src/server_server.rs b/src/server_server.rs index 0a882fe..187ec4f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2201,7 +2201,7 @@ pub async fn fetch_required_signing_keys( #[cfg(test)] mod tests { - use super::{FedDest, add_port_to_hostname, get_ip_with_port}; + use super::{add_port_to_hostname, get_ip_with_port, FedDest}; #[test] fn ips_get_default_ports() { From 2fc1ec2ad5ad97f3cda6f5e9f6bcc15a05b3a977 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 24 Apr 2021 13:34:35 +0200 Subject: [PATCH 0547/1727] fix: logging --- Cargo.lock | 6 ++--- Cargo.toml | 2 +- src/main.rs | 64 ++++++++++++++++++++++++----------------------------- 3 files changed, 33 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b70043..e50907d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1558,7 +1558,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=e1307ddf48dac14e6a37e526098732327bcb86f0#e1307ddf48dac14e6a37e526098732327bcb86f0" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=801e04bd5369eb39e126c75f6d11e1e9597304d8#801e04bd5369eb39e126c75f6d11e1e9597304d8" dependencies = [ "async-trait", "atomic", @@ -1593,7 +1593,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=e1307ddf48dac14e6a37e526098732327bcb86f0#e1307ddf48dac14e6a37e526098732327bcb86f0" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=801e04bd5369eb39e126c75f6d11e1e9597304d8#801e04bd5369eb39e126c75f6d11e1e9597304d8" dependencies = [ "devise", "glob", @@ -1606,7 +1606,7 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=e1307ddf48dac14e6a37e526098732327bcb86f0#e1307ddf48dac14e6a37e526098732327bcb86f0" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=801e04bd5369eb39e126c75f6d11e1e9597304d8#801e04bd5369eb39e126c75f6d11e1e9597304d8" dependencies = [ "cookie", "either", diff --git a/Cargo.toml b/Cargo.toml index 19378b8..a800350 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "e1307ddf48dac14e6a37e526098732327bcb86f0", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers diff --git a/src/main.rs b/src/main.rs index 1417737..af1ddac 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,30 +30,7 @@ use rocket::{ use tracing::span; use tracing_subscriber::{prelude::*, Registry}; -async fn setup_rocket() -> (rocket::Rocket, Config) { - // Force log level off, so we can use our own logger - //std::env::set_var("CONDUIT_LOG_LEVEL", "off"); - - let config = - Figment::from(rocket::Config::release_default()) - .merge( - Toml::file(Env::var("CONDUIT_CONFIG").expect( - "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", - )) - .nested(), - ) - .merge(Env::prefixed("CONDUIT_").global()); - - let parsed_config = config - .extract::() - .expect("It looks like your config is invalid. Please take a look at the error"); - - let data = Database::load_or_create(parsed_config.clone()) - .await - .expect("config is valid"); - - data.sending.start_handler(&data); - +fn setup_rocket(config: Figment, data: Database) -> rocket::Rocket { let rocket = rocket::custom(config) .manage(data) .mount( @@ -192,12 +169,33 @@ async fn setup_rocket() -> (rocket::Rocket, Config) { ], ); - (rocket, parsed_config) + rocket } #[rocket::main] async fn main() { - let (rocket, config) = setup_rocket().await; + // Force log level off, so we can use our own logger + std::env::set_var("CONDUIT_LOG_LEVEL", "off"); + + let raw_config = + Figment::from(rocket::Config::release_default()) + .merge( + Toml::file(Env::var("CONDUIT_CONFIG").expect( + "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", + )) + .nested(), + ) + .merge(Env::prefixed("CONDUIT_").global()); + + let config = raw_config + .extract::() + .expect("It looks like your config is invalid. Please take a look at the error"); + + let db = Database::load_or_create(config.clone()) + .await + .expect("config is valid"); + + db.sending.start_handler(&db); if config.allow_jaeger { let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() @@ -209,17 +207,13 @@ async fn main() { let root = span!(tracing::Level::INFO, "app_start", work_units = 2); let _enter = root.enter(); - - rocket.launch().await.unwrap(); } else { - //std::env::set_var("CONDUIT_LOG", config.log); - //pretty_env_logger::init_custom_env("CONDUIT_LOG"); - - //let root = span!(tracing::Level::INFO, "app_start", work_units = 2); - //let _enter = root.enter(); - - rocket.launch().await.unwrap(); + std::env::set_var("CONDUIT_LOG", config.log); + pretty_env_logger::init_custom_env("CONDUIT_LOG"); } + + let rocket = setup_rocket(raw_config, db); + rocket.launch().await.unwrap(); } #[catch(404)] From 3dfc245633f2424dde9eb5fab18973d68693ee95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 24 Apr 2021 18:01:05 +0200 Subject: [PATCH 0548/1727] fix: send transaction retry code --- src/database/sending.rs | 136 +++++++++++++++++++++++----------------- 1 file changed, 79 insertions(+), 57 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index ffd3ed6..5495b36 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -10,7 +10,7 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::warn; +use log::{error, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ @@ -32,10 +32,16 @@ pub enum OutgoingKind { pub struct Sending { /// The state for a given state hash. pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)SenderKey / ServerName / UserId + PduId - pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+ / $)ServerName / UserId + PduId (pduid can be empty for reservation) + pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+ / $)ServerName / UserId + PduId pub(super) maximum_requests: Arc, } +enum TransactionStatus { + Running, + Failed(u32, Instant), // number of times failed, time of last failure + Retrying(u32), // number of times failed +} + impl Sending { pub fn start_handler(&self, db: &Database) { let servernamepduids = self.servernamepduids.clone(); @@ -47,8 +53,10 @@ impl Sending { let mut futures = FuturesUnordered::new(); // Retry requests we could not finish yet - let mut current_transactions = HashMap::>>::new(); + let mut subscriber = servernamepduids.watch_prefix(b""); + let mut current_transaction_status = HashMap::, TransactionStatus>::new(); + let mut initial_transactions = HashMap::>>::new(); for (key, outgoing_kind, pdu) in servercurrentpdus .iter() .filter_map(|r| r.ok()) @@ -58,18 +66,15 @@ impl Sending { .map(|(k, p)| (key, k, p.to_vec())) }) { - if pdu.is_empty() { - // Remove old reservation key - servercurrentpdus.remove(key).unwrap(); - continue; - } - - let entry = current_transactions - .entry(outgoing_kind) + let entry = initial_transactions + .entry(outgoing_kind.clone()) .or_insert_with(Vec::new); if entry.len() > 30 { - warn!("Dropping some current pdus because too many were queued. This should not happen."); + warn!( + "Dropping some current pdu: {:?} {:?} {:?}", + key, outgoing_kind, pdu + ); servercurrentpdus.remove(key).unwrap(); continue; } @@ -77,8 +82,7 @@ impl Sending { entry.push(pdu); } - for (outgoing_kind, pdus) in current_transactions { - // Create new reservation + for (outgoing_kind, pdus) in initial_transactions { let mut prefix = match &outgoing_kind { OutgoingKind::Appservice(server) => { let mut p = b"+".to_vec(); @@ -99,14 +103,10 @@ impl Sending { } }; prefix.push(0xff); - servercurrentpdus.insert(prefix, &[]).unwrap(); - + current_transaction_status.insert(prefix, TransactionStatus::Running); futures.push(Self::handle_event(outgoing_kind.clone(), pdus, &db)); } - let mut last_failed_try: HashMap = HashMap::new(); - - let mut subscriber = servernamepduids.watch_prefix(b""); loop { select! { Some(response) = futures.next() => { @@ -138,10 +138,7 @@ impl Sending { .keys() .filter_map(|r| r.ok()) { - // Don't remove reservation yet - if prefix.len() != key.len() { - servercurrentpdus.remove(key).unwrap(); - } + servercurrentpdus.remove(key).unwrap(); } // Find events that have been added since starting the last request @@ -171,8 +168,7 @@ impl Sending { ) ); } else { - servercurrentpdus.remove(&prefix).unwrap(); - // servercurrentpdus with the prefix should be empty now + current_transaction_status.remove(&prefix); } } Err((outgoing_kind, _)) => { @@ -198,15 +194,14 @@ impl Sending { prefix.push(0xff); - last_failed_try.insert(outgoing_kind.clone(), match last_failed_try.get(&outgoing_kind) { - Some(last_failed) => { - (last_failed.0+1, Instant::now()) + current_transaction_status.entry(prefix).and_modify(|e| *e = match e { + TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), + TransactionStatus::Retrying(n) => TransactionStatus::Failed(*n+1, Instant::now()), + TransactionStatus::Failed(_, _) => { + error!("Request that was not even running failed?!"); + return }, - None => { - (1, Instant::now()) - } }); - servercurrentpdus.remove(&prefix).unwrap(); } }; }, @@ -220,24 +215,12 @@ impl Sending { let servernamepduid = key.clone(); - let exponential_backoff = |(tries, instant): &(u32, Instant)| { - // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60*60*24) { - min_elapsed_duration = Duration::from_secs(60*60*24); - } + let mut retry = false; - instant.elapsed() < min_elapsed_duration - }; - - if let Some((outgoing_kind, pdu_id)) = Self::parse_servercurrentpdus(&servernamepduid) + if let Some((outgoing_kind, prefix, pdu_id)) = Self::parse_servercurrentpdus(&servernamepduid) .ok() - .filter(|(outgoing_kind, _)| { - if last_failed_try.get(outgoing_kind).map_or(false, exponential_backoff) { - return false; - } - - let mut prefix = match outgoing_kind { + .map(|(outgoing_kind, pdu_id)| { + let mut prefix = match &outgoing_kind { OutgoingKind::Appservice(serv) => { let mut p = b"+".to_vec(); p.extend_from_slice(serv.as_bytes()); @@ -258,20 +241,59 @@ impl Sending { }; prefix.push(0xff); - servercurrentpdus - .compare_and_swap(prefix, Option::<&[u8]>::None, Some(&[])) // Try to reserve - == Ok(Ok(())) + (outgoing_kind, prefix, pdu_id) + }) + .filter(|(_, prefix, _)| { + let entry = current_transaction_status.entry(prefix.clone()); + let mut allow = true; + + entry.and_modify(|e| match e { + TransactionStatus::Running | TransactionStatus::Retrying(_) => { + allow = false; // already running + }, + TransactionStatus::Failed(tries, time) => { + // Fail if a request has failed recently (exponential backoff) + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60*60*24) { + min_elapsed_duration = Duration::from_secs(60*60*24); + } + + if time.elapsed() < min_elapsed_duration { + allow = false; + } else { + retry = true; + *e = TransactionStatus::Retrying(*tries); + } + } + }).or_insert(TransactionStatus::Running); + + allow }) { - servercurrentpdus.insert(&key, &[]).unwrap(); - servernamepduids.remove(&key).unwrap(); - - last_failed_try.remove(&outgoing_kind); + let mut pdus = Vec::new(); + if retry { + // We retry the previous transaction + for pdu in servercurrentpdus + .scan_prefix(&prefix) + .filter_map(|r| r.ok()) + .filter_map(|(key, _)| { + Self::parse_servercurrentpdus(&key) + .ok() + .map(|(_, p)| p.to_vec()) + }) + { + pdus.push(pdu); + } + } else { + servercurrentpdus.insert(&key, &[]).unwrap(); + servernamepduids.remove(&key).unwrap(); + pdus.push(pdu_id.to_vec()); + } futures.push( Self::handle_event( outgoing_kind, - vec![pdu_id.to_vec()], + pdus, &db, ) ); @@ -342,7 +364,7 @@ impl Sending { ( server.clone(), Error::bad_database( - "[Appservice] Event in servernamepduids not found in ", + "[Appservice] Event in servernamepduids not found in db.", ), ) })? From 58463bba93b7f3804f8c654889b0c1b78528241e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 25 Apr 2021 14:10:07 +0200 Subject: [PATCH 0549/1727] feat: send invites over federation --- src/client_server/membership.rs | 283 ++++++++++++++++++++++++++++---- src/client_server/profile.rs | 154 +++++++++-------- src/client_server/room.rs | 23 +-- src/database/rooms.rs | 68 ++++---- src/server_server.rs | 2 +- 5 files changed, 373 insertions(+), 157 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 7eca676..a9a5109 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -5,6 +5,7 @@ use crate::{ server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; use log::{error, warn}; +use member::{MemberEventContent, MembershipState}; use rocket::futures; use ruma::{ api::{ @@ -16,16 +17,21 @@ use ruma::{ unban_user, IncomingThirdPartySigned, }, }, - federation, + federation::{self, membership::create_invite}, + }, + events::{ + pdu::Pdu, + room::{create::CreateEventContent, member}, + EventType, }, - events::{pdu::Pdu, room::member, EventType}, serde::{to_canonical_value, CanonicalJsonObject, Raw}, - EventId, RoomId, RoomVersionId, ServerName, UserId, + uint, EventId, RoomId, RoomVersionId, ServerName, UserId, }; +use state_res::EventMap; use std::{ collections::{BTreeMap, HashSet}, - convert::TryFrom, - sync::RwLock, + convert::{TryFrom, TryInto}, + sync::{Arc, RwLock}, }; #[cfg(feature = "conduit_bin")] @@ -152,35 +158,8 @@ pub async fn invite_user_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { - if body.room_id.server_name() != db.globals.server_name() { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Inviting users from other homeservers is not implemented yet.", - )); - } - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, - is_direct: None, - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &sender_user, - &body.room_id, - &db, - )?; - + invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; db.flush().await?; - Ok(invite_user::Response.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) @@ -744,3 +723,241 @@ async fn validate_and_add_event_id( Ok((event_id, value)) } + +pub async fn invite_helper( + sender_user: &UserId, + user_id: &UserId, + room_id: &RoomId, + db: &Database, + is_direct: bool, +) -> Result<()> { + if user_id.server_name() != db.globals.server_name() { + let prev_events = db + .rooms + .get_pdu_leaves(room_id)? + .into_iter() + .take(20) + .collect::>(); + + let create_event = db + .rooms + .room_state_get(room_id, &EventType::RoomCreate, "")?; + + let create_event_content = create_event + .as_ref() + .map(|create_event| { + Ok::<_, Error>( + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?, + ) + }) + .transpose()?; + + let create_prev_event = if prev_events.len() == 1 + && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) + { + create_event.map(Arc::new) + } else { + None + }; + + // If there was no create event yet, assume we are creating a version 6 room right now + let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| { + create_event.room_version + }); + + let content = serde_json::to_value(MemberEventContent { + avatar_url: None, + displayname: None, + is_direct: Some(is_direct), + membership: MembershipState::Invite, + third_party_invite: None, + }) + .expect("member event is valid value"); + + let state_key = user_id.to_string(); + let kind = EventType::RoomMember; + + let auth_events = + db.rooms + .get_auth_events(room_id, &kind, &sender_user, Some(&state_key), &content)?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) + .max() + .unwrap_or_else(|| uint!(0)) + + uint!(1); + + let mut unsigned = BTreeMap::new(); + + if let Some(prev_pdu) = db.rooms.room_state_get(room_id, &kind, &state_key)? { + unsigned.insert("prev_content".to_owned(), prev_pdu.content); + unsigned.insert( + "prev_sender".to_owned(), + serde_json::to_value(prev_pdu.sender).expect("UserId::to_value always works"), + ); + } + + let pdu = PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater"), + room_id: room_id.clone(), + sender: sender_user.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind, + content, + state_key: Some(state_key), + prev_events, + depth, + auth_events: auth_events + .iter() + .map(|(_, pdu)| pdu.event_id.clone()) + .collect(), + redacts: None, + unsigned, + hashes: ruma::events::pdu::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: BTreeMap::new(), + }; + + let auth_check = state_res::auth_check( + &room_version, + &Arc::new(pdu.clone()), + create_prev_event, + &auth_events, + None, // TODO: third_party_invite + ) + .map_err(|e| { + error!("{:?}", e); + Error::bad_database("Auth check failed.") + })?; + + if !auth_check { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event is not authorized.", + )); + } + + // Hash and sign + let mut pdu_json = + utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); + + pdu_json.remove("event_id"); + + // Add origin because synapse likes that (and it's required in the spec) + pdu_json.insert( + "origin".to_owned(), + to_canonical_value(db.globals.server_name()) + .expect("server name is a valid CanonicalJsonValue"), + ); + + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut pdu_json, + &room_version, + ) + .expect("event is valid, we just created it"); + + let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; + let response = db + .sending + .send_federation_request( + &db.globals, + user_id.server_name(), + create_invite::v2::Request { + room_id: room_id.clone(), + event_id: ruma::event_id!("$receivingservershouldsetthis"), + room_version: RoomVersionId::Version6, + event: PduEvent::convert_to_outgoing_federation_event(pdu_json), + invite_room_state, + }, + ) + .await?; + + let pub_key_map = RwLock::new(BTreeMap::new()); + let mut auth_cache = EventMap::new(); + + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + } + }; + + let origin = serde_json::from_value::>( + serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event needs an origin field.", + ))?) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + + let pdu_id = server_server::handle_incoming_pdu( + &origin, + &event_id, + value, + true, + &db, + &pub_key_map, + &mut auth_cache, + ) + .await + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Error while handling incoming PDU.", + ) + })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + + for server in db + .rooms + .room_servers(room_id) + .filter_map(|r| r.ok()) + .filter(|server| &**server != db.globals.server_name()) + { + db.sending.send_pdu(&server, &pdu_id)?; + } + + return Ok(()); + } + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(member::MemberEventContent { + membership: member::MembershipState::Invite, + displayname: db.users.displayname(&user_id)?, + avatar_url: db.users.avatar_url(&user_id)?, + is_direct: None, + third_party_invite: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &sender_user, + room_id, + &db, + )?; + + Ok(()) +} diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 9bcb289..f2c141b 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -30,41 +30,48 @@ pub async fn set_displayname_route( .set_displayname(&sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms - for room_id in db.rooms.rooms_joined(&sender_user) { - let room_id = room_id?; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { - displayname: body.displayname.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get( - &room_id, - &EventType::RoomMember, - &sender_user.to_string(), - )? - .ok_or_else(|| { - Error::bad_database( - "Tried to send displayname update for user not in the room.", - ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - &sender_user, - &room_id, - &db, - )?; + for (pdu_builder, room_id) in db + .rooms + .rooms_joined(&sender_user) + .filter_map(|r| r.ok()) + .map(|room_id| { + Ok::<_, Error>(( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + displayname: body.displayname.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_user.to_string(), + )? + .ok_or_else(|| { + Error::bad_database( + "Tried to send displayname update for user not in the room.", + ) + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + room_id, + )) + }) + .filter_map(|r| r.ok()) + { + let _ = db + .rooms + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db); // Presence update db.rooms.edus.update_presence( @@ -124,41 +131,48 @@ pub async fn set_avatar_url_route( .set_avatar_url(&sender_user, body.avatar_url.clone())?; // Send a new membership event and presence update into all joined rooms - for room_id in db.rooms.rooms_joined(&sender_user) { - let room_id = room_id?; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { - avatar_url: body.avatar_url.clone(), - ..serde_json::from_value::>( - db.rooms - .room_state_get( - &room_id, - &EventType::RoomMember, - &sender_user.to_string(), - )? - .ok_or_else(|| { - Error::bad_database( - "Tried to send avatar url update for user not in the room.", - ) - })? - .content - .clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - &sender_user, - &room_id, - &db, - )?; + for (pdu_builder, room_id) in db + .rooms + .rooms_joined(&sender_user) + .filter_map(|r| r.ok()) + .map(|room_id| { + Ok::<_, Error>(( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + avatar_url: body.avatar_url.clone(), + ..serde_json::from_value::>( + db.rooms + .room_state_get( + &room_id, + &EventType::RoomMember, + &sender_user.to_string(), + )? + .ok_or_else(|| { + Error::bad_database( + "Tried to send displayname update for user not in the room.", + ) + })? + .content + .clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + room_id, + )) + }) + .filter_map(|r| r.ok()) + { + let _ = db + .rooms + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db); // Presence update db.rooms.edus.update_presence( diff --git a/src/client_server/room.rs b/src/client_server/room.rs index bba7f95..f8d6ab2 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -1,4 +1,5 @@ use super::State; +use crate::client_server::invite_helper; use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; use log::info; use ruma::{ @@ -269,26 +270,8 @@ pub async fn create_room_route( } // 7. Events implied by invite (and TODO: invite_3pid) - for user in &body.invite { - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user)?, - avatar_url: db.users.avatar_url(&user)?, - is_direct: Some(body.is_direct), - third_party_invite: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user.to_string()), - redacts: None, - }, - &sender_user, - &room_id, - &db, - )?; + for user_id in &body.invite { + let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await; } // Homeserver specific stuff diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b714582..24ab65f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -809,39 +809,7 @@ impl Rooms { let invite_state = match membership { member::MembershipState::Invite => { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&pdu.room_id, &EventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &pdu.room_id, - &EventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&pdu.room_id, &EventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&pdu.room_id, &EventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &pdu.room_id, - &EventType::RoomMember, - pdu.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(pdu.to_stripped_state_event()); + let state = self.calculate_invite_state(pdu)?; Some(state) } @@ -1184,6 +1152,40 @@ impl Rooms { } } + pub fn calculate_invite_state( + &self, + invite_event: &PduEvent, + ) -> Result>> { + let mut state = Vec::new(); + // Add recommended events + if let Some(e) = + self.room_state_get(&invite_event.room_id, &EventType::RoomJoinRules, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &EventType::RoomCanonicalAlias, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomAvatar, "")? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomName, "")? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get( + &invite_event.room_id, + &EventType::RoomMember, + invite_event.sender.as_str(), + )? { + state.push(e.to_stripped_state_event()); + } + + state.push(invite_event.to_stripped_state_event()); + Ok(state) + } + pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { self.roomid_shortstatehash .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; diff --git a/src/server_server.rs b/src/server_server.rs index 187ec4f..a6d5864 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -689,7 +689,7 @@ type AsyncRecursiveResult<'a, T, E> = Pin( +pub fn handle_incoming_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, value: BTreeMap, From 5be5c9e9f0d6cc6fe76706a594ceed0e2a52ab54 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 26 Apr 2021 18:01:36 +0200 Subject: [PATCH 0550/1727] Bump ruma --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 4 ++-- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e50907d..b0212c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1634,7 +1634,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "assign", "js_int", @@ -1654,7 +1654,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "bytes", "http", @@ -1670,7 +1670,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1681,7 +1681,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "ruma-api", "ruma-common", @@ -1695,7 +1695,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "assign", "bytes", @@ -1715,7 +1715,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "indexmap", "js_int", @@ -1731,7 +1731,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "js_int", "ruma-common", @@ -1745,7 +1745,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1756,7 +1756,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "js_int", "ruma-api", @@ -1771,7 +1771,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "paste", "rand", @@ -1785,7 +1785,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "proc-macro2", "quote", @@ -1796,12 +1796,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" [[package]] name = "ruma-identity-service-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "ruma-api", "ruma-common", @@ -1814,7 +1814,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "js_int", "ruma-api", @@ -1829,7 +1829,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "bytes", "form_urlencoded", @@ -1843,7 +1843,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1854,7 +1854,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88#12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88" +source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" dependencies = [ "base64 0.13.0", "ring", @@ -2167,7 +2167,7 @@ dependencies = [ [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=1dd252d1c97a38def74bc097c197a33179ed8fbb#1dd252d1c97a38def74bc097c197a33179ed8fbb" +source = "git+https://github.com/ruma/state-res?rev=ce665d213fffeaa47e146d01c6b87f9eb9feaa52#ce665d213fffeaa47e146d01c6b87f9eb9feaa52" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index a800350..8554fa2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "12ec0fb1680ebc4fec4fbefbbd0890ae4eaf3a88", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "d27584ae3bdc035529e7389f1c392d4c96f9f8eb", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "220d5b4a76b3b781f7f8297fbe6b14473b04214b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/ruma/state-res", rev = "1dd252d1c97a38def74bc097c197a33179ed8fbb", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "ce665d213fffeaa47e146d01c6b87f9eb9feaa52", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio From c2b72773d564942d17ddc1eb1e38c8e0d547ac4b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 26 Apr 2021 18:01:41 +0200 Subject: [PATCH 0551/1727] Fix clippy warning --- src/main.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/main.rs b/src/main.rs index af1ddac..1c058b0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -31,7 +31,7 @@ use tracing::span; use tracing_subscriber::{prelude::*, Registry}; fn setup_rocket(config: Figment, data: Database) -> rocket::Rocket { - let rocket = rocket::custom(config) + rocket::custom(config) .manage(data) .mount( "/", @@ -167,9 +167,7 @@ fn setup_rocket(config: Figment, data: Database) -> rocket::Rocket Date: Mon, 26 Apr 2021 18:20:20 +0200 Subject: [PATCH 0552/1727] Refactor usage of CanonicalJsonValue --- src/client_server/membership.rs | 17 +++--- src/database/rooms.rs | 23 ++++---- src/pdu.rs | 4 +- src/server_server.rs | 94 ++++++++++++++------------------- 4 files changed, 63 insertions(+), 75 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index a9a5109..05501bd 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -24,7 +24,7 @@ use ruma::{ room::{create::CreateEventContent, member}, EventType, }, - serde::{to_canonical_value, CanonicalJsonObject, Raw}, + serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, uint, EventId, RoomId, RoomVersionId, ServerName, UserId, }; use state_res::EventMap; @@ -481,13 +481,15 @@ async fn join_room_by_id_helper( // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .map_err(|_| Error::bad_database("Invalid server name found"))?, + CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), ); join_event_stub.insert( "origin_server_ts".to_owned(), - to_canonical_value(utils::millis_since_unix_epoch()) - .expect("Timestamp is valid js_int value"), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), ); join_event_stub.insert( "content".to_owned(), @@ -524,7 +526,7 @@ async fn join_room_by_id_helper( // Add event_id back join_event_stub.insert( "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + CanonicalJsonValue::String(event_id.as_str().to_owned()), ); // It has enough fields to be called a proper event now @@ -717,8 +719,7 @@ async fn validate_and_add_event_id( value.insert( "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("a valid EventId can be converted to CanonicalJsonValue"), + CanonicalJsonValue::String(event_id.as_str().to_owned()), ); Ok((event_id, value)) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 24ab65f..7cee944 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -15,7 +15,7 @@ use ruma::{ AnyStrippedStateEvent, AnySyncStateEvent, EventType, }, push::{self, Action, Tweak}, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; @@ -372,9 +372,7 @@ impl Rooms { for event_id in new_state.difference(&old_state) { if let Some(pdu) = self.get_pdu_json(event_id)? { - if pdu.get("event_type") - == Some(&CanonicalJsonValue::String("m.room.member".to_owned())) - { + if pdu.get("event_type").and_then(|val| val.as_str()) == Some("m.room.member") { if let Ok(pdu) = serde_json::from_value::( serde_json::to_value(&pdu).expect("CanonicalJsonObj is a valid JsonValue"), ) { @@ -1321,8 +1319,7 @@ impl Rooms { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), + CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), ); ruma::signatures::hash_and_sign_event( @@ -1343,7 +1340,7 @@ impl Rooms { pdu_json.insert( "event_id".to_owned(), - to_canonical_value(&pdu.event_id).expect("EventId is a valid CanonicalJsonValue"), + CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), ); // Increment the last index and use that @@ -1885,13 +1882,15 @@ impl Rooms { // TODO: Is origin needed? leave_event_stub.insert( "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .map_err(|_| Error::bad_database("Invalid server name found"))?, + CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), ); leave_event_stub.insert( "origin_server_ts".to_owned(), - to_canonical_value(utils::millis_since_unix_epoch()) - .expect("Timestamp is valid js_int value"), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), ); // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms leave_event_stub.remove("event_id"); @@ -1916,7 +1915,7 @@ impl Rooms { // Add event_id back leave_event_stub.insert( "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + CanonicalJsonValue::String(event_id.as_str().to_owned()), ); // It has enough fields to be called a proper event now diff --git a/src/pdu.rs b/src/pdu.rs index a7d9432..d66247f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -5,7 +5,7 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; @@ -238,7 +238,7 @@ impl PduEvent { ) -> Result { json.insert( "event_id".to_string(), - to_canonical_value(event_id).expect("event_id is a valid Value"), + CanonicalJsonValue::String(event_id.as_str().to_owned()), ); serde_json::from_value(serde_json::to_value(json).expect("valid JSON")) diff --git a/src/server_server.rs b/src/server_server.rs index a6d5864..1e58067 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -33,7 +33,7 @@ use ruma::{ }, EventType, }, - serde::{to_canonical_value, Raw}, + serde::Raw, signatures::{CanonicalJsonObject, CanonicalJsonValue}, uint, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; @@ -709,11 +709,7 @@ pub fn handle_incoming_pdu<'a>( // 1. Check the server is in the room let room_id = match value .get("room_id") - .map(|id| match id { - CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), - _ => None, - }) - .flatten() + .and_then(|id| RoomId::try_from(id.as_str()?).ok()) { Some(id) => id, None => { @@ -776,7 +772,7 @@ pub fn handle_incoming_pdu<'a>( // to our PduEvent type val.insert( "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + CanonicalJsonValue::String(event_id.as_str().to_owned()), ); let incoming_pdu = serde_json::from_value::( serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), @@ -1306,8 +1302,7 @@ pub(crate) fn fetch_and_handle_events<'a>( Ok(_) => { value.insert( "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), + CanonicalJsonValue::String(event_id.into()), ); Arc::new(serde_json::from_value( @@ -1805,8 +1800,7 @@ pub fn create_join_event_template_route<'a>( // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), + CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), ); Ok(create_join_event_template::v1::Response { @@ -1979,33 +1973,30 @@ pub async fn create_invite_route<'a>( // Add event_id back signed_event.insert( "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + CanonicalJsonValue::String(event_id.into()), ); let sender = serde_json::from_value( - serde_json::to_value( - signed_event - .get("sender") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event had no sender field.", - ))? - .clone(), - ) - .expect("CanonicalJsonValue to serde_json::Value always works"), + signed_event + .get("sender") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no sender field.", + ))? + .clone() + .into(), ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; + let invited_user = serde_json::from_value( - serde_json::to_value( - signed_event - .get("state_key") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event had no state_key field.", - ))? - .clone(), - ) - .expect("CanonicalJsonValue to serde_json::Value always works"), + signed_event + .get("state_key") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no state_key field.", + ))? + .clone() + .into(), ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; @@ -2150,39 +2141,36 @@ pub async fn fetch_required_signing_keys( pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { + let signatures = event + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + // We go through all the signatures we see on the value and fetch the corresponding signing // keys - for (signature_server, signature) in match event.get("signatures").ok_or( - Error::BadServerResponse("No signatures in server response pdu."), - )? { - CanonicalJsonValue::Object(map) => map, - _ => { - return Err(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - )) - } - } { - let signature_object = match signature { - CanonicalJsonValue::Object(map) => map, - _ => { - return Err(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - )) - } - }; + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; let signature_ids = signature_object.keys().collect::>(); debug!("Fetching signing keys for {}", signature_server); - let keys = match fetch_signing_keys( + let fetch_res = fetch_signing_keys( db, &Box::::try_from(&**signature_server).map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?, signature_ids, ) - .await - { + .await; + + let keys = match fetch_res { Ok(keys) => keys, Err(_) => { warn!("Signature verification failed: Could not fetch signing key.",); From 3c3062a316d37e9f097f8afcc50d028845eb66eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 28 Apr 2021 17:52:58 +0200 Subject: [PATCH 0553/1727] improvement: optimize room directory --- src/client_server/directory.rs | 57 +++++++++++++++++----------------- 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index ae70ec5..018050d 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -209,17 +209,15 @@ pub async fn get_public_rooms_filtered_helper( .map(|room_id| { let room_id = room_id?; - // TODO: Do not load full state? - let state = db.rooms.room_state_full(&room_id)?; - let chunk = PublicRoomsChunk { aliases: Vec::new(), - canonical_alias: state - .get(&(EventType::RoomCanonicalAlias, "".to_owned())) + canonical_alias: db + .rooms + .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? .map_or(Ok::<_, Error>(None), |s| { Ok(serde_json::from_value::< Raw, - >(s.content.clone()) + >(s.content) .expect("from_value::> can never fail") .deserialize() .map_err(|_| { @@ -227,11 +225,12 @@ pub async fn get_public_rooms_filtered_helper( })? .alias) })?, - name: state.get(&(EventType::RoomName, "".to_owned())).map_or( - Ok::<_, Error>(None), - |s| { + name: db + .rooms + .room_state_get(&room_id, &EventType::RoomName, "")? + .map_or(Ok::<_, Error>(None), |s| { Ok(serde_json::from_value::>( - s.content.clone(), + s.content, ) .expect("from_value::> can never fail") .deserialize() @@ -240,16 +239,15 @@ pub async fn get_public_rooms_filtered_helper( })? .name() .map(|n| n.to_owned())) - }, - )?, + })?, num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), - room_id, - topic: state.get(&(EventType::RoomTopic, "".to_owned())).map_or( - Ok::<_, Error>(None), - |s| { + topic: db + .rooms + .room_state_get(&room_id, &EventType::RoomTopic, "")? + .map_or(Ok::<_, Error>(None), |s| { Ok(Some( serde_json::from_value::>( - s.content.clone(), + s.content, ) .expect("from_value::> can never fail") .deserialize() @@ -258,14 +256,14 @@ pub async fn get_public_rooms_filtered_helper( })? .topic, )) - }, - )?, - world_readable: state - .get(&(EventType::RoomHistoryVisibility, "".to_owned())) + })?, + world_readable: db + .rooms + .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? .map_or(Ok::<_, Error>(false), |s| { Ok(serde_json::from_value::< Raw, - >(s.content.clone()) + >(s.content) .expect("from_value::> can never fail") .deserialize() .map_err(|_| { @@ -276,12 +274,13 @@ pub async fn get_public_rooms_filtered_helper( .history_visibility == history_visibility::HistoryVisibility::WorldReadable) })?, - guest_can_join: state - .get(&(EventType::RoomGuestAccess, "".to_owned())) + guest_can_join: db + .rooms + .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? .map_or(Ok::<_, Error>(false), |s| { Ok( serde_json::from_value::>( - s.content.clone(), + s.content, ) .expect("from_value::> can never fail") .deserialize() @@ -292,12 +291,13 @@ pub async fn get_public_rooms_filtered_helper( == guest_access::GuestAccess::CanJoin, ) })?, - avatar_url: state - .get(&(EventType::RoomAvatar, "".to_owned())) + avatar_url: db + .rooms + .room_state_get(&room_id, &EventType::RoomAvatar, "")? .map(|s| { Ok::<_, Error>( serde_json::from_value::>( - s.content.clone(), + s.content, ) .expect("from_value::> can never fail") .deserialize() @@ -310,6 +310,7 @@ pub async fn get_public_rooms_filtered_helper( .transpose()? // url is now an Option so we must flatten .flatten(), + room_id, }; Ok(chunk) }) From b76af682a6010f663eddbbd979b6dc846f8e3a2b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 29 Apr 2021 20:16:49 +0200 Subject: [PATCH 0554/1727] Fix clippy warnings --- src/client_server/profile.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index f2c141b..79d0077 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -52,8 +52,7 @@ pub async fn set_displayname_route( "Tried to send displayname update for user not in the room.", ) })? - .content - .clone(), + .content, ) .expect("from_value::> can never fail") .deserialize() @@ -153,8 +152,7 @@ pub async fn set_avatar_url_route( "Tried to send displayname update for user not in the room.", ) })? - .content - .clone(), + .content, ) .expect("from_value::> can never fail") .deserialize() From 61c522dcc0e350f59bd69e140e588e74b3d6a96b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 29 Apr 2021 20:16:52 +0200 Subject: [PATCH 0555/1727] Fix formatting --- src/client_server/profile.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 79d0077..882b02e 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -49,8 +49,9 @@ pub async fn set_displayname_route( )? .ok_or_else(|| { Error::bad_database( - "Tried to send displayname update for user not in the room.", - ) + "Tried to send displayname update for user not in the \ + room.", + ) })? .content, ) @@ -149,8 +150,9 @@ pub async fn set_avatar_url_route( )? .ok_or_else(|| { Error::bad_database( - "Tried to send displayname update for user not in the room.", - ) + "Tried to send displayname update for user not in the \ + room.", + ) })? .content, ) From 08ca573df0fa4bf2650643a26f3d46151466bd7f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 29 Apr 2021 16:50:04 +0200 Subject: [PATCH 0556/1727] Remove explicit setting of content-type header `try_into_http_request` takes care of this already. --- src/appservice_server.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 8f64edf..27d0c0d 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,6 +1,5 @@ use crate::{utils, Error, Result}; use bytes::BytesMut; -use http::header::{HeaderValue, CONTENT_TYPE}; use log::warn; use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; use std::{ @@ -40,11 +39,6 @@ where ); *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); - http_request.headers_mut().insert( - CONTENT_TYPE, - HeaderValue::from_str("application/json").unwrap(), - ); - let mut reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); From c28eba1e5b1164f5e7021de6172e82e16bd5ffd0 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 29 Apr 2021 20:56:45 +0200 Subject: [PATCH 0557/1727] Bump ruma --- Cargo.lock | 37 +++++++++++++++++++------------------ Cargo.toml | 4 ++-- src/client_server/config.rs | 14 +++++++------- 3 files changed, 28 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b0212c8..60301bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1634,7 +1634,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "assign", "js_int", @@ -1654,7 +1654,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "bytes", "http", @@ -1670,7 +1670,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1681,7 +1681,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "ruma-api", "ruma-common", @@ -1695,7 +1695,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "assign", "bytes", @@ -1715,7 +1715,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "indexmap", "js_int", @@ -1731,7 +1731,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "js_int", "ruma-common", @@ -1745,7 +1745,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1756,7 +1756,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "js_int", "ruma-api", @@ -1771,7 +1771,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "paste", "rand", @@ -1785,7 +1785,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "proc-macro2", "quote", @@ -1796,13 +1796,14 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" [[package]] name = "ruma-identity-service-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ + "js_int", "ruma-api", "ruma-common", "ruma-identifiers", @@ -1814,7 +1815,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "js_int", "ruma-api", @@ -1829,7 +1830,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "bytes", "form_urlencoded", @@ -1843,7 +1844,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1854,7 +1855,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=d27584ae3bdc035529e7389f1c392d4c96f9f8eb#d27584ae3bdc035529e7389f1c392d4c96f9f8eb" +source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" dependencies = [ "base64 0.13.0", "ring", @@ -2167,7 +2168,7 @@ dependencies = [ [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=ce665d213fffeaa47e146d01c6b87f9eb9feaa52#ce665d213fffeaa47e146d01c6b87f9eb9feaa52" +source = "git+https://github.com/ruma/state-res?rev=c20893e536bea4d17a9fe6af28428fb17169b56f#c20893e536bea4d17a9fe6af28428fb17169b56f" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 8554fa2..f12420e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "d27584ae3bdc035529e7389f1c392d4c96f9f8eb", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "8c286e78d41770fe431e7304cc2fe23e383793df", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "220d5b4a76b3b781f7f8297fbe6b14473b04214b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/ruma/state-res", rev = "ce665d213fffeaa47e146d01c6b87f9eb9feaa52", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "c20893e536bea4d17a9fe6af28428fb17169b56f", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 68cd2e0..b6f272d 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -8,7 +8,7 @@ use ruma::{ set_room_account_data, }, }, - events::{custom::CustomEventContent, BasicEvent}, + events::{custom::CustomEventContent, AnyBasicEventContent, BasicEvent}, serde::Raw, }; @@ -91,14 +91,14 @@ pub async fn get_global_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data = db + let account_data = db .account_data - .get::>(None, sender_user, body.event_type.clone().into())? + .get::>(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; db.flush().await?; - Ok(get_global_account_data::Response { account_data: data }.into()) + Ok(get_global_account_data::Response { account_data }.into()) } #[cfg_attr( @@ -115,9 +115,9 @@ pub async fn get_room_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data = db + let account_data = db .account_data - .get::>( + .get::>( Some(&body.room_id), sender_user, body.event_type.clone().into(), @@ -126,5 +126,5 @@ pub async fn get_room_account_data_route( db.flush().await?; - Ok(get_room_account_data::Response { account_data: data }.into()) + Ok(get_room_account_data::Response { account_data }.into()) } From 8bfaf09f3750ddd64762d278827e2e933cec2f52 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 29 Apr 2021 20:58:05 +0200 Subject: [PATCH 0558/1727] Clean up reqwest::Response to http::Response conversion --- src/appservice_server.rs | 30 +++++++++++++++--------------- src/database/pusher.rs | 35 +++++++++++++++++------------------ src/server_server.rs | 34 +++++++++++++++++----------------- 3 files changed, 49 insertions(+), 50 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 27d0c0d..4291857 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -5,6 +5,7 @@ use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; use std::{ convert::{TryFrom, TryInto}, fmt::Debug, + mem, time::Duration, }; @@ -45,22 +46,21 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut reqwest_response = globals.reqwest_client().execute(reqwest_request).await?; + let mut response = globals.reqwest_client().execute(reqwest_request).await?; - // Because reqwest::Response -> http::Response is complicated: - let status = reqwest_response.status(); - let mut http_response = http::Response::builder().status(status); - let headers = http_response.headers_mut().unwrap(); + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); - for (k, v) in reqwest_response.headers_mut().drain() { - if let Some(key) = k { - headers.insert(key, v); - } - } - - let status = reqwest_response.status(); - - let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + let body = response.bytes().await.unwrap_or_else(|e| { warn!("server error: {}", e); Vec::new().into() }); // TODO: handle timeout @@ -76,7 +76,7 @@ where } let response = T::IncomingResponse::try_from_http_response( - http_response + http_response_builder .body(body) .expect("reqwest body is valid http body"), ); diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 7c7abb3..75c2efb 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -16,7 +16,7 @@ use ruma::{ }; use sled::IVec; -use std::{convert::TryFrom, fmt::Debug}; +use std::{convert::TryFrom, fmt::Debug, mem}; #[derive(Debug, Clone)] pub struct PushData { @@ -114,24 +114,23 @@ where //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; + let response = globals.reqwest_client().execute(reqwest_request).await; - // Because reqwest::Response -> http::Response is complicated: - match reqwest_response { - Ok(mut reqwest_response) => { - let status = reqwest_response.status(); - let mut http_response = http::Response::builder().status(status); - let headers = http_response.headers_mut().unwrap(); + match response { + Ok(mut response) => { + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); - for (k, v) in reqwest_response.headers_mut().drain() { - if let Some(key) = k { - headers.insert(key, v); - } - } - - let status = reqwest_response.status(); - - let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + let body = response.bytes().await.unwrap_or_else(|e| { warn!("server error {}", e); Vec::new().into() }); // TODO: handle timeout @@ -147,7 +146,7 @@ where } let response = T::IncomingResponse::try_from_http_response( - http_response + http_response_builder .body(body) .expect("reqwest body is valid http body"), ); diff --git a/src/server_server.rs b/src/server_server.rs index 1e58067..908a54e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -43,6 +43,7 @@ use std::{ convert::{TryFrom, TryInto}, fmt::Debug, future::Future, + mem, net::{IpAddr, SocketAddr}, pin::Pin, result::Result as StdResult, @@ -219,24 +220,23 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; + let response = globals.reqwest_client().execute(reqwest_request).await; - // Because reqwest::Response -> http::Response is complicated: - match reqwest_response { - Ok(mut reqwest_response) => { - let status = reqwest_response.status(); - let mut http_response = http::Response::builder().status(status); - let headers = http_response.headers_mut().unwrap(); + match response { + Ok(mut response) => { + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); - for (k, v) in reqwest_response.headers_mut().drain() { - if let Some(key) = k { - headers.insert(key, v); - } - } - - let status = reqwest_response.status(); - - let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + let body = response.bytes().await.unwrap_or_else(|e| { warn!("server error {}", e); Vec::new().into() }); // TODO: handle timeout @@ -254,7 +254,7 @@ where } let response = T::IncomingResponse::try_from_http_response( - http_response + http_response_builder .body(body) .expect("reqwest body is valid http body"), ); From e1c4e5c73ebde9451cc947254cbe58c92ee0a42a Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 29 Apr 2021 21:06:15 +0200 Subject: [PATCH 0559/1727] Return only event content in account_data endpoints, not the entire event --- src/client_server/config.rs | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/src/client_server/config.rs b/src/client_server/config.rs index b6f272d..07a9bc9 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -11,6 +11,8 @@ use ruma::{ events::{custom::CustomEventContent, AnyBasicEventContent, BasicEvent}, serde::Raw, }; +use serde::Deserialize; +use serde_json::value::RawValue as RawJsonValue; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -91,13 +93,16 @@ pub async fn get_global_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let account_data = db + let event = db .account_data - .get::>(None, sender_user, body.event_type.clone().into())? + .get::>(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - db.flush().await?; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + Ok(get_global_account_data::Response { account_data }.into()) } @@ -115,16 +120,24 @@ pub async fn get_room_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let account_data = db + let event = db .account_data - .get::>( + .get::>( Some(&body.room_id), sender_user, body.event_type.clone().into(), )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - db.flush().await?; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + Ok(get_room_account_data::Response { account_data }.into()) } + +#[derive(Deserialize)] +struct ExtractEventContent { + content: Raw, +} From cf94b8e712b0c8d31ec8a55022b78c1e112560e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 4 May 2021 19:03:18 +0200 Subject: [PATCH 0560/1727] improvement: uiaa works like in synapse --- src/client_server/account.rs | 39 ++++++-- src/client_server/device.rs | 14 ++- src/client_server/directory.rs | 164 ++++++++++++++++----------------- src/client_server/keys.rs | 7 +- src/client_server/message.rs | 9 +- src/client_server/mod.rs | 6 +- src/database.rs | 3 +- src/database/uiaa.rs | 125 +++++++++++++++++++------ src/ruma_wrapper.rs | 56 ++++++++--- src/server_server.rs | 80 +++++++++------- 10 files changed, 326 insertions(+), 177 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 2241d45..6554277 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,4 +1,7 @@ -use std::{collections::BTreeMap, convert::TryInto}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, +}; use super::{State, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; @@ -143,16 +146,28 @@ pub async fn register_route( if !body.from_appservice { if let Some(auth) = &body.auth { - let (worked, uiaainfo) = - db.uiaa - .try_auth(&user_id, "".into(), auth, &uiaainfo, &db.users, &db.globals)?; + let (worked, uiaainfo) = db.uiaa.try_auth( + &UserId::parse_with_server_name("", db.globals.server_name()) + .expect("we know this is valid"), + "".into(), + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&user_id, "".into(), &uiaainfo)?; + db.uiaa.create( + &UserId::parse_with_server_name("", db.globals.server_name()) + .expect("we know this is valid"), + "".into(), + &uiaainfo, + &body.json_body.expect("body is json"), + )?; return Err(Error::Uiaa(uiaainfo)); } } @@ -526,7 +541,12 @@ pub async fn change_password_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; + db.uiaa.create( + &sender_user, + &sender_device, + &uiaainfo, + &body.json_body.expect("body is json"), + )?; return Err(Error::Uiaa(uiaainfo)); } @@ -612,7 +632,12 @@ pub async fn deactivate_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; + db.uiaa.create( + &sender_user, + &sender_device, + &uiaainfo, + &body.json_body.expect("body is json"), + )?; return Err(Error::Uiaa(uiaainfo)); } diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 1950c5c..961ba97 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -115,7 +115,12 @@ pub async fn delete_device_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; + db.uiaa.create( + &sender_user, + &sender_device, + &uiaainfo, + &body.json_body.expect("body is json"), + )?; return Err(Error::Uiaa(uiaainfo)); } @@ -164,7 +169,12 @@ pub async fn delete_devices_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; + db.uiaa.create( + &sender_user, + &sender_device, + &uiaainfo, + &body.json_body.expect("body is json"), + )?; return Err(Error::Uiaa(uiaainfo)); } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 018050d..9864a5e 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -203,19 +203,20 @@ pub async fn get_public_rooms_filtered_helper( } } - let mut all_rooms = - db.rooms - .public_rooms() - .map(|room_id| { - let room_id = room_id?; + let mut all_rooms = db + .rooms + .public_rooms() + .map(|room_id| { + let room_id = room_id?; - let chunk = PublicRoomsChunk { - aliases: Vec::new(), - canonical_alias: db - .rooms - .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok(serde_json::from_value::< + let chunk = PublicRoomsChunk { + aliases: Vec::new(), + canonical_alias: db + .rooms + .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? + .map_or(Ok::<_, Error>(None), |s| { + Ok( + serde_json::from_value::< Raw, >(s.content) .expect("from_value::> can never fail") @@ -223,62 +224,61 @@ pub async fn get_public_rooms_filtered_helper( .map_err(|_| { Error::bad_database("Invalid canonical alias event in database.") })? - .alias) - })?, - name: db - .rooms - .room_state_get(&room_id, &EventType::RoomName, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok(serde_json::from_value::>( - s.content, - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid room name event in database.") - })? - .name() - .map(|n| n.to_owned())) - })?, - num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), - topic: db - .rooms - .room_state_get(&room_id, &EventType::RoomTopic, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok(Some( - serde_json::from_value::>( - s.content, - ) + .alias, + ) + })?, + name: db + .rooms + .room_state_get(&room_id, &EventType::RoomName, "")? + .map_or(Ok::<_, Error>(None), |s| { + Ok( + serde_json::from_value::>(s.content) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid room name event in database.") + })? + .name() + .map(|n| n.to_owned()), + ) + })?, + num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), + topic: db + .rooms + .room_state_get(&room_id, &EventType::RoomTopic, "")? + .map_or(Ok::<_, Error>(None), |s| { + Ok(Some( + serde_json::from_value::>(s.content) .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database("Invalid room topic event in database.") })? .topic, - )) - })?, - world_readable: db - .rooms - .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? - .map_or(Ok::<_, Error>(false), |s| { - Ok(serde_json::from_value::< - Raw, - >(s.content) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - })? - .history_visibility - == history_visibility::HistoryVisibility::WorldReadable) - })?, - guest_can_join: db - .rooms - .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? - .map_or(Ok::<_, Error>(false), |s| { - Ok( + )) + })?, + world_readable: db + .rooms + .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? + .map_or(Ok::<_, Error>(false), |s| { + Ok(serde_json::from_value::< + Raw, + >(s.content) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + })? + .history_visibility + == history_visibility::HistoryVisibility::WorldReadable) + })?, + guest_can_join: db + .rooms + .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? + .map_or(Ok::<_, Error>(false), |s| { + Ok( serde_json::from_value::>( s.content, ) @@ -290,33 +290,31 @@ pub async fn get_public_rooms_filtered_helper( .guest_access == guest_access::GuestAccess::CanJoin, ) - })?, - avatar_url: db - .rooms - .room_state_get(&room_id, &EventType::RoomAvatar, "")? - .map(|s| { - Ok::<_, Error>( - serde_json::from_value::>( - s.content, - ) + })?, + avatar_url: db + .rooms + .room_state_get(&room_id, &EventType::RoomAvatar, "")? + .map(|s| { + Ok::<_, Error>( + serde_json::from_value::>(s.content) .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database("Invalid room avatar event in database.") })? .url, - ) - }) - .transpose()? - // url is now an Option so we must flatten - .flatten(), - room_id, - }; - Ok(chunk) - }) - .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms - // We need to collect all, so we can sort by member count - .collect::>(); + ) + }) + .transpose()? + // url is now an Option so we must flatten + .flatten(), + room_id, + }; + Ok(chunk) + }) + .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms + // We need to collect all, so we can sort by member count + .collect::>(); all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 08bb4c6..aafa157 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -220,7 +220,12 @@ pub async fn upload_signing_keys_route( // Success! } else { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create(&sender_user, &sender_device, &uiaainfo)?; + db.uiaa.create( + &sender_user, + &sender_device, + &uiaainfo, + &body.json_body.expect("body is json"), + )?; return Err(Error::Uiaa(uiaainfo)); } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index ecd2665..96de93d 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -56,13 +56,8 @@ pub async fn send_message_event_route( let event_id = db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::from(&body.event_type), - content: serde_json::from_str( - body.json_body - .as_ref() - .ok_or(Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))? - .get(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + content: serde_json::from_str(body.body.body.json().get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, unsigned: Some(unsigned), state_key: None, redacts: None, diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index dd8e7a6..825dbbb 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -69,9 +69,9 @@ use { ruma::api::client::r0::to_device::send_event_to_device, }; -const DEVICE_ID_LENGTH: usize = 10; -const TOKEN_LENGTH: usize = 256; -const SESSION_ID_LENGTH: usize = 256; +pub const DEVICE_ID_LENGTH: usize = 10; +pub const TOKEN_LENGTH: usize = 256; +pub const SESSION_ID_LENGTH: usize = 256; #[cfg(feature = "conduit_bin")] #[options("/<_..>")] diff --git a/src/database.rs b/src/database.rs index 06a708d..6504f9c 100644 --- a/src/database.rs +++ b/src/database.rs @@ -135,7 +135,8 @@ impl Database { todeviceid_events: db.open_tree("todeviceid_events")?, }, uiaa: uiaa::Uiaa { - userdeviceid_uiaainfo: db.open_tree("userdeviceid_uiaainfo")?, + userdevicesessionid_uiaainfo: db.open_tree("userdevicesessionid_uiaainfo")?, + userdevicesessionid_uiaarequest: db.open_tree("userdevicesessionid_uiaarequest")?, }, rooms: rooms::Rooms { edus: rooms::RoomEdus { diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 4c33b86..3b77840 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,15 +1,17 @@ -use crate::{Error, Result}; +use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, r0::uiaa::{IncomingAuthData, UiaaInfo}, }, + signatures::CanonicalJsonValue, DeviceId, UserId, }; #[derive(Clone)] pub struct Uiaa { - pub(super) userdeviceid_uiaainfo: sled::Tree, // User-interactive authentication + pub(super) userdevicesessionid_uiaainfo: sled::Tree, // User-interactive authentication + pub(super) userdevicesessionid_uiaarequest: sled::Tree, // UiaaRequest = canonical json value } impl Uiaa { @@ -19,8 +21,20 @@ impl Uiaa { user_id: &UserId, device_id: &DeviceId, uiaainfo: &UiaaInfo, + json_body: &CanonicalJsonValue, ) -> Result<()> { - self.update_uiaa_session(user_id, device_id, Some(uiaainfo)) + self.set_uiaa_request( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) + json_body, + )?; + self.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session should be set"), + Some(uiaainfo), + ) } pub fn try_auth( @@ -45,6 +59,10 @@ impl Uiaa { }) .unwrap_or_else(|| Ok(uiaainfo.clone()))?; + if uiaainfo.session.is_none() { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + } + // Find out what the user completed match &**kind { "m.login.password" => { @@ -130,35 +148,96 @@ impl Uiaa { } if !completed { - self.update_uiaa_session(user_id, device_id, Some(&uiaainfo))?; + self.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session is always set"), + Some(&uiaainfo), + )?; return Ok((false, uiaainfo)); } // UIAA was successful! Remove this session and return true - self.update_uiaa_session(user_id, device_id, None)?; + self.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session is always set"), + None, + )?; Ok((true, uiaainfo)) } else { panic!("FallbackAcknowledgement is not supported yet"); } } + fn set_uiaa_request( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + request: &CanonicalJsonValue, + ) -> Result<()> { + let mut userdevicesessionid = user_id.as_bytes().to_vec(); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(device_id.as_bytes()); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(session.as_bytes()); + + self.userdevicesessionid_uiaarequest.insert( + &userdevicesessionid, + &*serde_json::to_string(request).expect("json value to string always works"), + )?; + + Ok(()) + } + + pub fn get_uiaa_request( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + ) -> Result> { + let mut userdevicesessionid = user_id.as_bytes().to_vec(); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(device_id.as_bytes()); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(session.as_bytes()); + + self.userdevicesessionid_uiaarequest + .get(&userdevicesessionid)? + .map_or(Ok(None), |bytes| { + Ok::<_, Error>(Some( + serde_json::from_str::( + &utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid uiaa request bytes in db.") + })?, + ) + .map_err(|_| Error::bad_database("Invalid uiaa request in db."))?, + )) + }) + } + fn update_uiaa_session( &self, user_id: &UserId, device_id: &DeviceId, + session: &str, uiaainfo: Option<&UiaaInfo>, ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + let mut userdevicesessionid = user_id.as_bytes().to_vec(); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(device_id.as_bytes()); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(session.as_bytes()); if let Some(uiaainfo) = uiaainfo { - self.userdeviceid_uiaainfo.insert( - &userdeviceid, + self.userdevicesessionid_uiaainfo.insert( + &userdevicesessionid, &*serde_json::to_string(&uiaainfo).expect("UiaaInfo::to_string always works"), )?; } else { - self.userdeviceid_uiaainfo.remove(&userdeviceid)?; + self.userdevicesessionid_uiaainfo + .remove(&userdevicesessionid)?; } Ok(()) @@ -170,14 +249,16 @@ impl Uiaa { device_id: &DeviceId, session: &str, ) -> Result { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + let mut userdevicesessionid = user_id.as_bytes().to_vec(); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(device_id.as_bytes()); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(session.as_bytes()); let uiaainfo = serde_json::from_slice::( &self - .userdeviceid_uiaainfo - .get(&userdeviceid)? + .userdevicesessionid_uiaainfo + .get(&userdevicesessionid)? .ok_or(Error::BadRequest( ErrorKind::Forbidden, "UIAA session does not exist.", @@ -185,18 +266,6 @@ impl Uiaa { ) .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid."))?; - if uiaainfo - .session - .as_ref() - .filter(|&s| s == session) - .is_none() - { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "UIAA session token invalid.", - )); - } - Ok(uiaainfo) } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 49a9fb0..e4eda87 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -8,7 +8,7 @@ use std::ops::Deref; #[cfg(feature = "conduit_bin")] use { - crate::{server_server, utils}, + crate::server_server, log::{debug, warn}, rocket::{ data::{self, ByteUnit, Data, FromData}, @@ -35,7 +35,7 @@ pub struct Ruma { pub sender_user: Option, pub sender_device: Option>, // This is None when body is not a valid string - pub json_body: Option>, + pub json_body: Option, pub from_appservice: bool, } @@ -66,6 +66,8 @@ where let mut body = Vec::new(); handle.read_to_end(&mut body).await.unwrap(); + let mut json_body = serde_json::from_slice::(&body).ok(); + let (sender_user, sender_device, from_appservice) = if let Some((_id, registration)) = db .appservice .iter_all() @@ -115,7 +117,7 @@ where // Unknown Token None => return Failure((Status::raw(581), ())), Some((user_id, device_id)) => { - (Some(user_id), Some(device_id.into()), false) + (Some(user_id), Some(Box::::from(device_id)), false) } } } else { @@ -187,12 +189,10 @@ where } }; - let json_body = serde_json::from_slice::(&body); - let mut request_map = BTreeMap::::new(); - if let Ok(json_body) = json_body { - request_map.insert("content".to_owned(), json_body); + if let Some(json_body) = &json_body { + request_map.insert("content".to_owned(), json_body.clone()); }; request_map.insert( @@ -271,6 +271,43 @@ where http_request = http_request.header(header.name.as_str(), &*header.value); } + match &mut json_body { + Some(CanonicalJsonValue::Object(json_body)) => { + let user_id = sender_user.clone().unwrap_or_else(|| { + UserId::parse_with_server_name("", db.globals.server_name()) + .expect("we know this is valid") + }); + + if let Some(initial_request) = json_body + .get("auth") + .and_then(|auth| auth.as_object()) + .and_then(|auth| auth.get("session")) + .and_then(|session| session.as_str()) + .and_then(|session| { + db.uiaa + .get_uiaa_request( + &user_id, + &sender_device.clone().unwrap_or_else(|| "".into()), + session, + ) + .ok() + .flatten() + }) + { + match initial_request { + CanonicalJsonValue::Object(initial_request) => { + for (key, value) in initial_request.into_iter() { + json_body.entry(key).or_insert(value); + } + } + _ => {} + } + } + body = serde_json::to_vec(json_body).expect("value to bytes can't fail"); + } + _ => {} + } + let http_request = http_request.body(&*body).unwrap(); debug!("{:?}", http_request); match ::try_from_http_request(http_request) { @@ -278,11 +315,8 @@ where body: t, sender_user, sender_device, - // TODO: Can we avoid parsing it again? (We only need this for append_pdu) - json_body: utils::string_from_bytes(&body) - .ok() - .and_then(|s| serde_json::value::RawValue::from_string(s).ok()), from_appservice, + json_body, }), Err(e) => { warn!("{:?}", e); diff --git a/src/server_server.rs b/src/server_server.rs index 908a54e..3899239 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1018,29 +1018,6 @@ pub fn handle_incoming_pdu<'a>( } debug!("Auth check succeeded."); - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - let current_state = db - .rooms - .room_state_full(&room_id) - .map_err(|_| "Failed to load room state.".to_owned())? - .into_iter() - .map(|(k, v)| (k, Arc::new(v))) - .collect(); - - if !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - previous_create, - ¤t_state, - None, - ) - .map_err(|_e| "Auth check failed.".to_owned())? - { - // Soft fail, we leave the event as an outlier but don't add it to the timeline - return Err("Event has been soft failed".into()); - }; - debug!("Auth check with current state succeeded."); - // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) let mut extremities = db @@ -1103,6 +1080,14 @@ pub fn handle_incoming_pdu<'a>( // don't just trust a set of state we got from a remote). // We do this by adding the current state to the list of fork states + let current_state = db + .rooms + .room_state_full(&room_id) + .map_err(|_| "Failed to load room state.".to_owned())? + .into_iter() + .map(|(k, v)| (k, Arc::new(v))) + .collect(); + fork_states.insert(current_state); // We also add state after incoming event to the fork states @@ -1199,18 +1184,40 @@ pub fn handle_incoming_pdu<'a>( } }; - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - let pdu_id = append_incoming_pdu( - &db, + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let soft_fail = !state_res::event_auth::auth_check( + &room_version, &incoming_pdu, - val, - extremities, - &state_at_incoming_event, + previous_create, + &new_room_state + .iter() + .filter_map(|(k, v)| { + Some((k.clone(), Arc::new(db.rooms.get_pdu(&v).ok().flatten()?))) + }) + .collect(), + None, ) - .map_err(|_| "Failed to add pdu to db.".to_owned())?; - debug!("Appended incoming pdu."); + .map_err(|_e| "Auth check failed.".to_owned())?; + + let mut pdu_id = None; + if !soft_fail { + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + pdu_id = Some( + append_incoming_pdu( + &db, + &incoming_pdu, + val, + extremities, + &state_at_incoming_event, + ) + .map_err(|_| "Failed to add pdu to db.".to_owned())?, + ); + debug!("Appended incoming pdu."); + } else { + warn!("Event was soft failed: {:?}", incoming_pdu); + } // Set the new room state to the resolved state if update_state { @@ -1220,8 +1227,13 @@ pub fn handle_incoming_pdu<'a>( } debug!("Updated resolved state"); + if soft_fail { + // Soft fail, we leave the event as an outlier but don't add it to the timeline + return Err("Event has been soft failed".into()); + } + // Event has passed all auth/stateres checks - Ok(Some(pdu_id)) + Ok(pdu_id) }) } From 3408d74f93e7edfddcc4c189585c5a4f917edd9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 May 2021 12:25:37 +0200 Subject: [PATCH 0561/1727] fix: add trusted_servers to config and deploy guide --- DEPLOY.md | 2 ++ conduit-example.toml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/DEPLOY.md b/DEPLOY.md index 3d4541a..8dedad2 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -96,6 +96,8 @@ allow_registration = false allow_encryption = true allow_federation = true +trusted_servers = ["matrix.org"] + #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time #workers = 4 # default: cpu core count * 2 diff --git a/conduit-example.toml b/conduit-example.toml index 87f959d..6d39e30 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -33,6 +33,8 @@ max_request_size = 20_000_000 # in bytes # Enable jaeger to support monitoring and troubleshooting through jaeger #allow_jaeger = false +trusted_servers = ["matrix.org"] + #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time #log = "info,state_res=warn,rocket=off,_=off,sled=off" From 268ad3441ca25cb625ff5afb91233286df7a9038 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 5 May 2021 16:14:49 +0000 Subject: [PATCH 0562/1727] add dbg_macro check --- src/lib.rs | 2 ++ src/main.rs | 1 + 2 files changed, 3 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index f7d9062..50ca6ea 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,6 @@ #![allow(clippy::suspicious_else_formatting)] +#![deny(clippy::dbg_macro)] + pub mod appservice_server; pub mod client_server; mod database; diff --git a/src/main.rs b/src/main.rs index 1c058b0..1d6d23a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] #![allow(clippy::suspicious_else_formatting)] +#![deny(clippy::dbg_macro)] pub mod appservice_server; pub mod client_server; From e48cd9b018931de8e1e5bec4d7f6fe5b7d2b5cb4 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 5 May 2021 16:41:22 +0000 Subject: [PATCH 0563/1727] add clippy to CI --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 10ca273..4963912 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,3 +11,4 @@ test:cargo: script: - rustc --version && cargo --version # Print version info for debugging - cargo test --workspace --verbose --locked + - cargo clippy From 9d2cc4d8b1bb28bd497c461bcddd2f33eda9b934 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 5 May 2021 16:48:44 +0000 Subject: [PATCH 0564/1727] add cargo fmt check --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4963912..85fc704 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,4 +11,5 @@ test:cargo: script: - rustc --version && cargo --version # Print version info for debugging - cargo test --workspace --verbose --locked + - cargo fmt --all -- --check - cargo clippy From 442d0732a43fd2699aaf7c646ba95d6d72d9a10c Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 5 May 2021 17:01:48 +0000 Subject: [PATCH 0565/1727] add rustup --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 85fc704..0e610e7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,6 +6,7 @@ variables: before_script: - apt-get update -yqq - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config + - rustup component add clippy test:cargo: script: From 9bf99e99a8305af12c1078b6fb94787240746df1 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 5 May 2021 17:26:59 +0000 Subject: [PATCH 0566/1727] fix clippy rustup --- .gitlab-ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0e610e7..e25985b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,11 +6,10 @@ variables: before_script: - apt-get update -yqq - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - - rustup component add clippy test:cargo: script: - rustc --version && cargo --version # Print version info for debugging - cargo test --workspace --verbose --locked - cargo fmt --all -- --check - - cargo clippy + - rustup component add clippy && cargo clippy From 80f184447662fed5bfdcbaf43616d6b161043a2f Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 5 May 2021 17:44:32 +0000 Subject: [PATCH 0567/1727] fix rustup pls --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e25985b..d09a881 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,10 +6,11 @@ variables: before_script: - apt-get update -yqq - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config + - rustup component add clippy rustfmt test:cargo: script: - rustc --version && cargo --version # Print version info for debugging - cargo test --workspace --verbose --locked - cargo fmt --all -- --check - - rustup component add clippy && cargo clippy + - cargo clippy From a0457000ff055d43165aa75fc9c04e3d99c7dfbb Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 8 May 2021 01:54:24 +0200 Subject: [PATCH 0568/1727] Upgrade Ruma --- Cargo.lock | 183 ++++++++++++++------------------ Cargo.toml | 9 +- src/client_server/account.rs | 5 +- src/client_server/membership.rs | 12 ++- src/client_server/room.rs | 3 +- src/client_server/tag.rs | 4 +- src/database/rooms.rs | 22 ++-- src/pdu.rs | 2 +- src/server_server.rs | 20 ++-- 9 files changed, 117 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 60301bb..e750620 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,9 +8,9 @@ checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" [[package]] name = "aho-corasick" -version = "0.7.15" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ "memchr", ] @@ -211,7 +211,6 @@ dependencies = [ "serde_json", "serde_yaml", "sled", - "state-res", "thiserror", "tokio", "tracing", @@ -277,9 +276,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" +checksum = "52fb27eab85b17fbb9f6fd667089e07d6a2eb8743d02639ee7f6a7a7729c9c94" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -290,9 +289,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" +checksum = "4feb231f0d4d6af81aed15928e58ecf5816aa62a2393e2c82f46973e92a9a278" dependencies = [ "autocfg", "cfg-if 1.0.0", @@ -359,18 +358,18 @@ dependencies = [ [[package]] name = "directories" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" +checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" dependencies = [ "libc", "redox_users", @@ -583,17 +582,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.2" @@ -602,7 +590,7 @@ checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -623,9 +611,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" +checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" dependencies = [ "bytes", "fnv", @@ -914,9 +902,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" +checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" [[package]] name = "linked-hash-map" @@ -926,9 +914,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" +checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" dependencies = [ "scopeguard", ] @@ -993,9 +981,9 @@ checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "memchr" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" [[package]] name = "memoffset" @@ -1203,7 +1191,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall 0.2.6", + "redox_syscall", "smallvec", "winapi", ] @@ -1401,7 +1389,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" dependencies = [ - "getrandom 0.2.2", + "getrandom", ] [[package]] @@ -1415,28 +1403,21 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.1.57" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - -[[package]] -name = "redox_syscall" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8270314b5ccceb518e7e578952f0b72b88222d02e8f77f5ecf7abbb673539041" +checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" +checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.1.16", - "redox_syscall 0.1.57", - "rust-argon2", + "getrandom", + "redox_syscall", ] [[package]] @@ -1461,9 +1442,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.6" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", @@ -1482,9 +1463,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.23" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "remove_dir_all" @@ -1634,7 +1615,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "assign", "js_int", @@ -1649,12 +1630,13 @@ dependencies = [ "ruma-push-gateway-api", "ruma-serde", "ruma-signatures", + "ruma-state-res", ] [[package]] name = "ruma-api" version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "bytes", "http", @@ -1670,7 +1652,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1681,7 +1663,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "ruma-api", "ruma-common", @@ -1695,7 +1677,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "assign", "bytes", @@ -1715,11 +1697,10 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "indexmap", "js_int", - "maplit", "ruma-identifiers", "ruma-serde", "serde", @@ -1731,7 +1712,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "js_int", "ruma-common", @@ -1745,7 +1726,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1756,7 +1737,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "js_int", "ruma-api", @@ -1771,7 +1752,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "paste", "rand", @@ -1785,9 +1766,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ - "proc-macro2", "quote", "ruma-identifiers-validation", "syn", @@ -1796,12 +1776,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" [[package]] name = "ruma-identity-service-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "js_int", "ruma-api", @@ -1809,13 +1789,12 @@ dependencies = [ "ruma-identifiers", "ruma-serde", "serde", - "serde_json", ] [[package]] name = "ruma-push-gateway-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "js_int", "ruma-api", @@ -1830,7 +1809,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "bytes", "form_urlencoded", @@ -1844,7 +1823,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1855,7 +1834,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=8c286e78d41770fe431e7304cc2fe23e383793df#8c286e78d41770fe431e7304cc2fe23e383793df" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" dependencies = [ "base64 0.13.0", "ring", @@ -1865,6 +1844,24 @@ dependencies = [ "untrusted", ] +[[package]] +name = "ruma-state-res" +version = "0.1.0" +source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +dependencies = [ + "itertools 0.10.0", + "js_int", + "maplit", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "ruma-signatures", + "serde", + "serde_json", + "thiserror", + "tracing", +] + [[package]] name = "rust-argon2" version = "0.8.3" @@ -2165,20 +2162,6 @@ dependencies = [ "loom", ] -[[package]] -name = "state-res" -version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=c20893e536bea4d17a9fe6af28428fb17169b56f#c20893e536bea4d17a9fe6af28428fb17169b56f" -dependencies = [ - "itertools 0.10.0", - "log", - "maplit", - "ruma", - "serde", - "serde_json", - "thiserror", -] - [[package]] name = "stdweb" version = "0.4.20" @@ -2230,9 +2213,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.70" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9505f307c872bab8eb46f77ae357c8eba1fdacead58ee5a850116b1d7f82883" +checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" dependencies = [ "proc-macro2", "quote", @@ -2248,7 +2231,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "rand", - "redox_syscall 0.2.6", + "redox_syscall", "remove_dir_all", "winapi", ] @@ -2448,9 +2431,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" +checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" dependencies = [ "cfg-if 1.0.0", "pin-project-lite", @@ -2471,9 +2454,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" dependencies = [ "lazy_static", ] @@ -2514,9 +2497,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705096c6f83bf68ea5d357a6aa01829ddbdac531b357b45abeca842938085baa" +checksum = "aa5553bf0883ba7c9cbe493b085c29926bd41b66afc31ff72cf17ff4fb60dcd5" dependencies = [ "ansi_term", "chrono", @@ -2646,9 +2629,9 @@ checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "untrusted" @@ -2658,9 +2641,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ "form_urlencoded", "idna", @@ -2684,12 +2667,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.10.2+wasi-snapshot-preview1" diff --git a/Cargo.toml b/Cargo.toml index f12420e..13bcd9b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,16 +15,9 @@ edition = "2018" # Used to handle requests # TODO: This can become optional as soon as proper configs are supported rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests -#rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "8c286e78d41770fe431e7304cc2fe23e383793df", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { git = "https://github.com/timokoesters/ruma", rev = "220d5b4a76b3b781f7f8297fbe6b14473b04214b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } - -# Used when doing state resolution -state-res = { git = "https://github.com/ruma/state-res", rev = "c20893e536bea4d17a9fe6af28428fb17169b56f", features = ["unstable-pre-spec"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "71686ce8a4d1770a80de216080718fe9de7bd925", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 6554277..24b04d5 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,7 +1,4 @@ -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, -}; +use std::{collections::BTreeMap, convert::TryInto}; use super::{State, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 05501bd..e31e582 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -25,9 +25,9 @@ use ruma::{ EventType, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + state_res::{self, EventMap, RoomVersion}, uint, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use state_res::EventMap; use std::{ collections::{BTreeMap, HashSet}, convert::{TryFrom, TryInto}, @@ -765,9 +765,11 @@ pub async fn invite_helper( }; // If there was no create event yet, assume we are creating a version 6 room right now - let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); + let room_version_id = create_event_content + .map_or(RoomVersionId::Version6, |create_event| { + create_event.room_version + }); + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let content = serde_json::to_value(MemberEventContent { avatar_url: None, @@ -863,7 +865,7 @@ pub async fn invite_helper( db.globals.server_name().as_str(), db.globals.keypair(), &mut pdu_json, - &room_version, + &room_version_id, ) .expect("event is valid, we just created it"); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index f8d6ab2..658dfb8 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -1,6 +1,5 @@ use super::State; -use crate::client_server::invite_helper; -use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; +use crate::{client_server::invite_helper, pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; use log::info; use ruma::{ api::client::{ diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 21264a1..63e70ff 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -31,7 +31,7 @@ pub async fn update_tag_route( tags_event .content .tags - .insert(body.tag.to_string(), body.tag_info.clone()); + .insert(body.tag.clone().into(), body.tag_info.clone()); db.account_data.update( Some(&body.room_id), @@ -65,7 +65,7 @@ pub async fn delete_tag_route( tags: BTreeMap::new(), }, }); - tags_event.content.tags.remove(&body.tag); + tags_event.content.tags.remove(&body.tag.clone().into()); db.account_data.update( Some(&body.room_id), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7cee944..c359997 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -16,10 +16,10 @@ use ruma::{ }, push::{self, Action, Tweak}, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, + state_res::{self, Event, RoomVersion, StateMap}, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -1236,9 +1236,11 @@ impl Rooms { }; // If there was no create event yet, assume we are creating a version 6 room right now - let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); + let room_version_id = create_event_content + .map_or(RoomVersionId::Version6, |create_event| { + create_event.room_version + }); + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = self.get_auth_events( &room_id, @@ -1326,14 +1328,14 @@ impl Rooms { db.globals.server_name().as_str(), db.globals.keypair(), &mut pdu_json, - &room_version, + &room_version_id, ) .expect("event is valid, we just created it"); // Generate event id pdu.event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version) + ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -1868,8 +1870,8 @@ impl Rooms { let (make_leave_response, remote_server) = make_leave_response_and_server?; - let room_version = match make_leave_response.room_version { - Some(room_version) if room_version == RoomVersionId::Version6 => room_version, + let room_version_id = match make_leave_response.room_version { + Some(id @ RoomVersionId::Version6) => id, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -1900,14 +1902,14 @@ impl Rooms { db.globals.server_name().as_str(), db.globals.keypair(), &mut leave_event_stub, - &room_version, + &room_version_id, ) .expect("event is valid, we just created it"); // Generate event id let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version) + ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); diff --git a/src/pdu.rs b/src/pdu.rs index d66247f..a593f0b 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -6,7 +6,7 @@ use ruma::{ AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, + state_res, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; diff --git a/src/server_server.rs b/src/server_server.rs index 3899239..fa460bf 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -35,9 +35,9 @@ use ruma::{ }, serde::Raw, signatures::{CanonicalJsonObject, CanonicalJsonValue}, + state_res::{self, Event, EventMap, RoomVersion, StateMap}, uint, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::{Event, EventMap, StateMap}; use std::{ collections::{btree_map::Entry, BTreeMap, BTreeSet, HashSet}, convert::{TryFrom, TryInto}, @@ -745,12 +745,13 @@ pub fn handle_incoming_pdu<'a>( .deserialize() .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; - let room_version = create_event_content.room_version; + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); let mut val = match ruma::signatures::verify_event( &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, &value, - &room_version, + room_version_id, ) { Err(e) => { // Drop @@ -760,7 +761,7 @@ pub fn handle_incoming_pdu<'a>( Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); - match ruma::signatures::redact(&value, &room_version) { + match ruma::signatures::redact(&value, room_version_id) { Ok(obj) => obj, Err(_) => return Err("Redaction failed".to_string()), } @@ -1162,7 +1163,7 @@ pub fn handle_incoming_pdu<'a>( match state_res::StateResolution::resolve( &room_id, - &room_version, + room_version_id, &fork_states .into_iter() .map(|map| { @@ -1718,9 +1719,12 @@ pub fn create_join_event_template_route<'a>( }; // If there was no create event yet, assume we are creating a version 6 room right now - let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); + let room_version = RoomVersion::new( + &create_event_content.map_or(RoomVersionId::Version6, |create_event| { + create_event.room_version + }), + ) + .expect("room version is supported"); let content = serde_json::to_value(MemberEventContent { avatar_url: None, From af6fea3d4e75b28c17d9c8a5e00d1d7bf55737a0 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 8 May 2021 02:13:01 +0200 Subject: [PATCH 0569/1727] Refactor some canonical JSON code --- src/pdu.rs | 5 +++- src/ruma_wrapper.rs | 56 +++++++++++++++++++-------------------------- 2 files changed, 28 insertions(+), 33 deletions(-) diff --git a/src/pdu.rs b/src/pdu.rs index a593f0b..84756bc 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -213,7 +213,10 @@ impl PduEvent { pub fn convert_to_outgoing_federation_event( mut pdu_json: CanonicalJsonObject, ) -> Raw { - if let Some(CanonicalJsonValue::Object(unsigned)) = pdu_json.get_mut("unsigned") { + if let Some(unsigned) = pdu_json + .get_mut("unsigned") + .and_then(|val| val.as_object_mut()) + { unsigned.remove("transaction_id"); } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index e4eda87..9143999 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -271,41 +271,33 @@ where http_request = http_request.header(header.name.as_str(), &*header.value); } - match &mut json_body { - Some(CanonicalJsonValue::Object(json_body)) => { - let user_id = sender_user.clone().unwrap_or_else(|| { - UserId::parse_with_server_name("", db.globals.server_name()) - .expect("we know this is valid") - }); + if let Some(json_body) = json_body.as_mut().and_then(|val| val.as_object_mut()) { + let user_id = sender_user.clone().unwrap_or_else(|| { + UserId::parse_with_server_name("", db.globals.server_name()) + .expect("we know this is valid") + }); - if let Some(initial_request) = json_body - .get("auth") - .and_then(|auth| auth.as_object()) - .and_then(|auth| auth.get("session")) - .and_then(|session| session.as_str()) - .and_then(|session| { - db.uiaa - .get_uiaa_request( - &user_id, - &sender_device.clone().unwrap_or_else(|| "".into()), - session, - ) - .ok() - .flatten() - }) - { - match initial_request { - CanonicalJsonValue::Object(initial_request) => { - for (key, value) in initial_request.into_iter() { - json_body.entry(key).or_insert(value); - } - } - _ => {} - } + if let Some(CanonicalJsonValue::Object(initial_request)) = json_body + .get("auth") + .and_then(|auth| auth.as_object()) + .and_then(|auth| auth.get("session")) + .and_then(|session| session.as_str()) + .and_then(|session| { + db.uiaa + .get_uiaa_request( + &user_id, + &sender_device.clone().unwrap_or_else(|| "".into()), + session, + ) + .ok() + .flatten() + }) + { + for (key, value) in initial_request { + json_body.entry(key).or_insert(value); } - body = serde_json::to_vec(json_body).expect("value to bytes can't fail"); } - _ => {} + body = serde_json::to_vec(json_body).expect("value to bytes can't fail"); } let http_request = http_request.body(&*body).unwrap(); From f62258ba35d7906eec93bfb473e1967361c47195 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 May 2021 20:04:28 +0200 Subject: [PATCH 0570/1727] improvement: bug fixes and refactors - power level content override adds to the default event instead of replacing it - sending code refactored to make edus possible - remove presence events when restarting conduit - remove room_id field from read receipts over /sync - handle incoming read receipts - fix array bounds bug in server_server.rs --- Cargo.toml | 4 + src/client_server/membership.rs | 8 +- src/client_server/room.rs | 23 +- src/database.rs | 5 +- src/database/rooms/edus.rs | 11 +- src/database/sending.rs | 468 ++++++++++++++++---------------- src/error.rs | 4 +- src/main.rs | 9 +- src/server_server.rs | 101 ++++--- 9 files changed, 356 insertions(+), 277 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 13bcd9b..78496e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,3 +106,7 @@ conf-files = [ ] maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } + +# For flamegraphs: +#[profile.release] +#debug = true diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e31e582..63c103d 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -568,7 +568,13 @@ async fn join_room_by_id_helper( { let (event_id, value) = match result { Ok(t) => t, - Err(_) => continue, + Err(e) => { + warn!( + "PDU could not be verified: {:?} {:?} {:?}", + e, event_id, pdu + ); + continue; + } }; let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 658dfb8..0bc67d4 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -111,11 +111,7 @@ pub async fn create_room_route( } } - let power_levels_content = if let Some(power_levels) = &body.power_level_content_override { - serde_json::from_str(power_levels.json().get()).map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") - })? - } else { + let mut power_levels_content = serde_json::to_value(ruma::events::room::power_levels::PowerLevelsEventContent { ban: 50.into(), events: BTreeMap::new(), @@ -130,8 +126,21 @@ pub async fn create_room_route( room: 50.into(), }, }) - .expect("event is valid, we just created it") - }; + .expect("event is valid, we just created it"); + + if let Some(power_level_content_override) = &body.power_level_content_override { + let json = serde_json::from_str::>( + power_level_content_override.json().get(), + ) + .map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") + })?; + + for (key, value) in json { + power_levels_content[key] = value; + } + } + db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomPowerLevels, diff --git a/src/database.rs b/src/database.rs index 6504f9c..62b3a40 100644 --- a/src/database.rs +++ b/src/database.rs @@ -198,7 +198,7 @@ impl Database { }, sending: sending::Sending { servernamepduids: db.open_tree("servernamepduids")?, - servercurrentpdus: db.open_tree("servercurrentpdus")?, + servercurrentevents: db.open_tree("servercurrentevents")?, maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), }, admin: admin::Admin { @@ -217,6 +217,9 @@ impl Database { _db: db, }; + // This data is probably outdated + db.rooms.edus.presenceid_presence.clear()?; + db.admin.start_handler(db.clone(), admin_receiver); Ok(db) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index f69e897..56000e0 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -6,6 +6,7 @@ use ruma::{ }, presence::PresenceState, serde::Raw, + signatures::CanonicalJsonObject, RoomId, UInt, UserId, }; use std::{ @@ -88,9 +89,13 @@ impl RoomEdus { .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(_, v)| { - Ok(serde_json::from_slice(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid.") - })?) + let mut json = serde_json::from_slice::(&v).map_err(|_| { + Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") + })?; + json.remove("room_id"); + Ok(Raw::from_json( + serde_json::value::to_raw_value(&json).expect("json is valid raw value"), + )) })) } diff --git a/src/database/sending.rs b/src/database/sending.rs index 5495b36..e530396 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -28,11 +28,44 @@ pub enum OutgoingKind { Normal(Box), } +impl OutgoingKind { + pub fn get_prefix(&self) -> Vec { + let mut prefix = match self { + OutgoingKind::Appservice(server) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(server.as_bytes()); + p + } + OutgoingKind::Push(user, pushkey) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(&user); + p.push(0xff); + p.extend_from_slice(&pushkey); + p + } + OutgoingKind::Normal(server) => { + let mut p = Vec::new(); + p.extend_from_slice(server.as_bytes()); + p + } + }; + prefix.push(0xff); + + prefix + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum SendingEventType { + Pdu(Vec), + Edu(Vec), +} + #[derive(Clone)] pub struct Sending { /// The state for a given state hash. pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)SenderKey / ServerName / UserId + PduId - pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+ / $)ServerName / UserId + PduId + pub(super) servercurrentevents: sled::Tree, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / (*)EduEvent pub(super) maximum_requests: Arc, } @@ -45,7 +78,7 @@ enum TransactionStatus { impl Sending { pub fn start_handler(&self, db: &Database) { let servernamepduids = self.servernamepduids.clone(); - let servercurrentpdus = self.servercurrentpdus.clone(); + let servercurrentevents = self.servercurrentevents.clone(); let db = db.clone(); @@ -56,14 +89,14 @@ impl Sending { let mut subscriber = servernamepduids.watch_prefix(b""); let mut current_transaction_status = HashMap::, TransactionStatus>::new(); - let mut initial_transactions = HashMap::>>::new(); - for (key, outgoing_kind, pdu) in servercurrentpdus + let mut initial_transactions = HashMap::>::new(); + for (key, outgoing_kind, event) in servercurrentevents .iter() .filter_map(|r| r.ok()) .filter_map(|(key, _)| { - Self::parse_servercurrentpdus(&key) + Self::parse_servercurrentevent(&key) .ok() - .map(|(k, p)| (key, k, p.to_vec())) + .map(|(k, e)| (key, k, e)) }) { let entry = initial_transactions @@ -72,39 +105,20 @@ impl Sending { if entry.len() > 30 { warn!( - "Dropping some current pdu: {:?} {:?} {:?}", - key, outgoing_kind, pdu + "Dropping some current events: {:?} {:?} {:?}", + key, outgoing_kind, event ); - servercurrentpdus.remove(key).unwrap(); + servercurrentevents.remove(key).unwrap(); continue; } - entry.push(pdu); + entry.push(event); } - for (outgoing_kind, pdus) in initial_transactions { - let mut prefix = match &outgoing_kind { - OutgoingKind::Appservice(server) => { - let mut p = b"+".to_vec(); - p.extend_from_slice(server.as_bytes()); - p - } - OutgoingKind::Push(user, pushkey) => { - let mut p = b"$".to_vec(); - p.extend_from_slice(&user); - p.push(0xff); - p.extend_from_slice(&pushkey); - p - } - OutgoingKind::Normal(server) => { - let mut p = Vec::new(); - p.extend_from_slice(server.as_bytes()); - p - } - }; - prefix.push(0xff); - current_transaction_status.insert(prefix, TransactionStatus::Running); - futures.push(Self::handle_event(outgoing_kind.clone(), pdus, &db)); + for (outgoing_kind, events) in initial_transactions { + current_transaction_status + .insert(outgoing_kind.get_prefix(), TransactionStatus::Running); + futures.push(Self::handle_events(outgoing_kind.clone(), events, &db)); } loop { @@ -112,58 +126,46 @@ impl Sending { Some(response) = futures.next() => { match response { Ok(outgoing_kind) => { - let mut prefix = match &outgoing_kind { - OutgoingKind::Appservice(server) => { - let mut p = b"+".to_vec(); - p.extend_from_slice(server.as_bytes()); - p - } - OutgoingKind::Push(user, pushkey) => { - let mut p = b"$".to_vec(); - p.extend_from_slice(&user); - p.push(0xff); - p.extend_from_slice(&pushkey); - p - }, - OutgoingKind::Normal(server) => { - let mut p = vec![]; - p.extend_from_slice(server.as_bytes()); - p - }, - }; - prefix.push(0xff); - - for key in servercurrentpdus + let prefix = outgoing_kind.get_prefix(); + for key in servercurrentevents .scan_prefix(&prefix) .keys() .filter_map(|r| r.ok()) { - servercurrentpdus.remove(key).unwrap(); + servercurrentevents.remove(key).unwrap(); } // Find events that have been added since starting the last request - let new_pdus = servernamepduids + let new_events = servernamepduids .scan_prefix(&prefix) .keys() .filter_map(|r| r.ok()) .map(|k| { - k[prefix.len()..].to_vec() + SendingEventType::Pdu(k[prefix.len()..].to_vec()) }) .take(30) .collect::>(); - if !new_pdus.is_empty() { - for pdu_id in &new_pdus { + // TODO: find edus + + if !new_events.is_empty() { + // Insert pdus we found + for event in &new_events { let mut current_key = prefix.clone(); - current_key.extend_from_slice(pdu_id); - servercurrentpdus.insert(¤t_key, &[]).unwrap(); - servernamepduids.remove(¤t_key).unwrap(); + match event { + SendingEventType::Pdu(b) | + SendingEventType::Edu(b) => { + current_key.extend_from_slice(&b); + servercurrentevents.insert(¤t_key, &[]).unwrap(); + servernamepduids.remove(¤t_key).unwrap(); + } + } } futures.push( - Self::handle_event( + Self::handle_events( outgoing_kind.clone(), - new_pdus, + new_events, &db, ) ); @@ -172,29 +174,7 @@ impl Sending { } } Err((outgoing_kind, _)) => { - let mut prefix = match &outgoing_kind { - OutgoingKind::Appservice(serv) => { - let mut p = b"+".to_vec(); - p.extend_from_slice(serv.as_bytes()); - p - }, - OutgoingKind::Push(user, pushkey) => { - let mut p = b"$".to_vec(); - p.extend_from_slice(&user); - p.push(0xff); - p.extend_from_slice(&pushkey); - p - }, - OutgoingKind::Normal(serv) => { - let mut p = vec![]; - p.extend_from_slice(serv.as_bytes()); - p - }, - }; - - prefix.push(0xff); - - current_transaction_status.entry(prefix).and_modify(|e| *e = match e { + current_transaction_status.entry(outgoing_kind.get_prefix()).and_modify(|e| *e = match e { TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), TransactionStatus::Retrying(n) => TransactionStatus::Failed(*n+1, Instant::now()), TransactionStatus::Failed(_, _) => { @@ -206,97 +186,17 @@ impl Sending { }; }, Some(event) = &mut subscriber => { - if let sled::Event::Insert { key, .. } = event { // New sled version: //for (_tree, key, value_opt) in &event { // if value_opt.is_none() { // continue; // } - let servernamepduid = key.clone(); - - let mut retry = false; - - if let Some((outgoing_kind, prefix, pdu_id)) = Self::parse_servercurrentpdus(&servernamepduid) - .ok() - .map(|(outgoing_kind, pdu_id)| { - let mut prefix = match &outgoing_kind { - OutgoingKind::Appservice(serv) => { - let mut p = b"+".to_vec(); - p.extend_from_slice(serv.as_bytes()); - p - }, - OutgoingKind::Push(user, pushkey) => { - let mut p = b"$".to_vec(); - p.extend_from_slice(&user); - p.push(0xff); - p.extend_from_slice(&pushkey); - p - }, - OutgoingKind::Normal(serv) => { - let mut p = vec![]; - p.extend_from_slice(serv.as_bytes()); - p - }, - }; - prefix.push(0xff); - - (outgoing_kind, prefix, pdu_id) - }) - .filter(|(_, prefix, _)| { - let entry = current_transaction_status.entry(prefix.clone()); - let mut allow = true; - - entry.and_modify(|e| match e { - TransactionStatus::Running | TransactionStatus::Retrying(_) => { - allow = false; // already running - }, - TransactionStatus::Failed(tries, time) => { - // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60*60*24) { - min_elapsed_duration = Duration::from_secs(60*60*24); - } - - if time.elapsed() < min_elapsed_duration { - allow = false; - } else { - retry = true; - *e = TransactionStatus::Retrying(*tries); - } - } - }).or_insert(TransactionStatus::Running); - - allow - }) - { - let mut pdus = Vec::new(); - - if retry { - // We retry the previous transaction - for pdu in servercurrentpdus - .scan_prefix(&prefix) - .filter_map(|r| r.ok()) - .filter_map(|(key, _)| { - Self::parse_servercurrentpdus(&key) - .ok() - .map(|(_, p)| p.to_vec()) - }) - { - pdus.push(pdu); - } - } else { - servercurrentpdus.insert(&key, &[]).unwrap(); - servernamepduids.remove(&key).unwrap(); - pdus.push(pdu_id.to_vec()); + if let sled::Event::Insert { key, .. } = event { + if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key) { + if let Some(events) = Self::select_events(&outgoing_kind, vec![(event, key)], &mut current_transaction_status, &servercurrentevents, &servernamepduids) { + futures.push(Self::handle_events(outgoing_kind, events, &db)); } - futures.push( - Self::handle_event( - outgoing_kind, - pdus, - &db, - ) - ); } } } @@ -305,6 +205,73 @@ impl Sending { }); } + fn select_events( + outgoing_kind: &OutgoingKind, + new_events: Vec<(SendingEventType, IVec)>, // Events we want to send: event and full key + current_transaction_status: &mut HashMap, TransactionStatus>, + servercurrentevents: &sled::Tree, + servernamepduids: &sled::Tree, + ) -> Option> { + let mut retry = false; + let mut allow = true; + + let prefix = outgoing_kind.get_prefix(); + let entry = current_transaction_status.entry(prefix.clone()); + + entry + .and_modify(|e| match e { + TransactionStatus::Running | TransactionStatus::Retrying(_) => { + allow = false; // already running + } + TransactionStatus::Failed(tries, time) => { + // Fail if a request has failed recently (exponential backoff) + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + allow = false; + } else { + retry = true; + *e = TransactionStatus::Retrying(*tries); + } + } + }) + .or_insert(TransactionStatus::Running); + + if !allow { + return None; + } + + let mut events = Vec::new(); + + if retry { + // We retry the previous transaction + for key in servercurrentevents + .scan_prefix(&prefix) + .keys() + .filter_map(|r| r.ok()) + { + if let Ok((_, e)) = Self::parse_servercurrentevent(&key) { + events.push(e); + } + } + } else { + for (e, full_key) in new_events { + servercurrentevents.insert(&full_key, &[]).unwrap(); + + // If it was a PDU we have to unqueue it + // TODO: don't try to unqueue EDUs + servernamepduids.remove(&full_key).unwrap(); + + events.push(e); + } + } + + Some(events) + } + #[tracing::instrument(skip(self))] pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: IVec) -> Result<()> { let mut key = b"$".to_vec(); @@ -338,7 +305,7 @@ impl Sending { } #[tracing::instrument] - fn calculate_hash(keys: &[Vec]) -> Vec { + fn calculate_hash(keys: &[&[u8]]) -> Vec { // We only hash the pdu's event ids, not the whole pdu let bytes = keys.join(&0xff); let hash = digest::digest(&digest::SHA256, &bytes); @@ -346,33 +313,37 @@ impl Sending { } #[tracing::instrument(skip(db))] - async fn handle_event( + async fn handle_events( kind: OutgoingKind, - pdu_ids: Vec>, + events: Vec, db: &Database, ) -> std::result::Result { match &kind { OutgoingKind::Appservice(server) => { - let pdu_jsons = pdu_ids - .iter() - .map(|pdu_id| { - Ok::<_, (Box, Error)>( - db.rooms - .get_pdu_from_id(pdu_id) - .map_err(|e| (server.clone(), e))? + let mut pdu_jsons = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + pdu_jsons.push(db.rooms + .get_pdu_from_id(&pdu_id) + .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { ( - server.clone(), + kind.clone(), Error::bad_database( "[Appservice] Event in servernamepduids not found in db.", ), ) })? - .to_any_event(), - ) - }) - .filter_map(|r| r.ok()) - .collect::>(); + .to_any_event()) + } + SendingEventType::Edu(_) => { + // Appservices don't need EDUs (?) + } + } + } + let permit = db.sending.maximum_requests.acquire().await; let response = appservice_server::send_request( @@ -384,7 +355,14 @@ impl Sending { appservice::event::push_events::v1::Request { events: &pdu_jsons, txn_id: &base64::encode_config( - Self::calculate_hash(&pdu_ids), + Self::calculate_hash( + &events + .iter() + .map(|e| match e { + SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, + }) + .collect::>(), + ), base64::URL_SAFE_NO_PAD, ), }, @@ -398,25 +376,30 @@ impl Sending { response } OutgoingKind::Push(user, pushkey) => { - let pdus = pdu_ids - .iter() - .map(|pdu_id| { - Ok::<_, (Vec, Error)>( - db.rooms - .get_pdu_from_id(pdu_id) - .map_err(|e| (pushkey.clone(), e))? - .ok_or_else(|| { - ( - pushkey.clone(), - Error::bad_database( - "[Push] Event in servernamepduids not found in db.", - ), - ) - })?, - ) - }) - .filter_map(|r| r.ok()) - .collect::>(); + let mut pdus = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + pdus.push( + db.rooms + .get_pdu_from_id(&pdu_id) + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database( + "[Push] Event in servernamepduids not found in db.", + ), + ) + })?, + ); + } + SendingEventType::Edu(_) => { + // Push gateways don't need EDUs (?) + } + } + } for pdu in pdus { // Redacted events are not notification targets (we don't send push for them) @@ -427,13 +410,13 @@ impl Sending { let userid = UserId::try_from(utils::string_from_bytes(user).map_err(|_| { ( - OutgoingKind::Push(user.clone(), pushkey.clone()), + kind.clone(), Error::bad_database("Invalid push user string in db."), ) })?) .map_err(|_| { ( - OutgoingKind::Push(user.clone(), pushkey.clone()), + kind.clone(), Error::bad_database("Invalid push user id in db."), ) })?; @@ -484,15 +467,17 @@ impl Sending { Ok(OutgoingKind::Push(user.clone(), pushkey.clone())) } OutgoingKind::Normal(server) => { - let pdu_jsons = pdu_ids - .iter() - .map(|pdu_id| { - Ok::<_, (OutgoingKind, Error)>( + let mut edu_jsons = Vec::new(); + let mut pdu_jsons = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { // TODO: check room version and remove event_id if needed - serde_json::from_str( + pdu_jsons.push(serde_json::from_str( PduEvent::convert_to_outgoing_federation_event( db.rooms - .get_pdu_json_from_id(pdu_id) + .get_pdu_json_from_id(&pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { ( @@ -506,11 +491,15 @@ impl Sending { .json() .get(), ) - .expect("Raw<..> is always valid"), - ) - }) - .filter_map(|r| r.ok()) - .collect::>(); + .expect("Raw<..> is always valid")); + } + SendingEventType::Edu(edu) => { + edu_jsons.push( + serde_json::from_slice(edu).expect("Raw<..> is always valid"), + ); + } + } + } let permit = db.sending.maximum_requests.acquire().await; @@ -520,10 +509,17 @@ impl Sending { send_transaction_message::v1::Request { origin: db.globals.server_name(), pdus: &pdu_jsons, - edus: &[], + edus: &edu_jsons, origin_server_ts: SystemTime::now(), transaction_id: &base64::encode_config( - Self::calculate_hash(&pdu_ids), + Self::calculate_hash( + &events + .iter() + .map(|e| match e { + SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, + }) + .collect::>(), + ), base64::URL_SAFE_NO_PAD, ), }, @@ -546,13 +542,13 @@ impl Sending { } } - fn parse_servercurrentpdus(key: &IVec) -> Result<(OutgoingKind, IVec)> { + fn parse_servercurrentevent(key: &IVec) -> Result<(OutgoingKind, SendingEventType)> { // Appservices start with a plus Ok::<_, Error>(if key.starts_with(b"+") { let mut parts = key[1..].splitn(2, |&b| b == 0xff); let server = parts.next().expect("splitn always returns one element"); - let pdu = parts + let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; let server = utils::string_from_bytes(&server).map_err(|_| { @@ -563,7 +559,11 @@ impl Sending { OutgoingKind::Appservice(Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), - IVec::from(pdu), + if event.starts_with(b"*") { + SendingEventType::Edu(event[1..].to_vec()) + } else { + SendingEventType::Pdu(event.to_vec()) + }, ) } else if key.starts_with(b"$") { let mut parts = key[1..].splitn(3, |&b| b == 0xff); @@ -572,18 +572,22 @@ impl Sending { let pushkey = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let pdu = parts + let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; ( OutgoingKind::Push(user.to_vec(), pushkey.to_vec()), - IVec::from(pdu), + if event.starts_with(b"*") { + SendingEventType::Edu(event[1..].to_vec()) + } else { + SendingEventType::Pdu(event.to_vec()) + }, ) } else { let mut parts = key.splitn(2, |&b| b == 0xff); let server = parts.next().expect("splitn always returns one element"); - let pdu = parts + let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; let server = utils::string_from_bytes(&server).map_err(|_| { @@ -594,7 +598,11 @@ impl Sending { OutgoingKind::Normal(Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), - IVec::from(pdu), + if event.starts_with(b"*") { + SendingEventType::Edu(event[1..].to_vec()) + } else { + SendingEventType::Pdu(event.to_vec()) + }, ) }) } diff --git a/src/error.rs b/src/error.rs index 65c5b4f..6c37bed 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,4 @@ -use log::error; +use log::{error, warn}; use ruma::api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}; use thiserror::Error; @@ -92,6 +92,8 @@ where _ => (Unknown, StatusCode::INTERNAL_SERVER_ERROR), }; + warn!("{}: {}", status_code, message); + RumaResponse::from(RumaError { kind, message, diff --git a/src/main.rs b/src/main.rs index 1c058b0..87928cd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -205,13 +205,16 @@ async fn main() { let root = span!(tracing::Level::INFO, "app_start", work_units = 2); let _enter = root.enter(); + + let rocket = setup_rocket(raw_config, db); + rocket.launch().await.unwrap(); } else { std::env::set_var("CONDUIT_LOG", config.log); pretty_env_logger::init_custom_env("CONDUIT_LOG"); - } - let rocket = setup_rocket(raw_config, db); - rocket.launch().await.unwrap(); + let rocket = setup_rocket(raw_config, db); + rocket.launch().await.unwrap(); + } } #[catch(404)] diff --git a/src/server_server.rs b/src/server_server.rs index fa460bf..77eb448 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -27,11 +27,12 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ + receipt::{ReceiptEvent, ReceiptEventContent}, room::{ create::CreateEventContent, member::{MemberEventContent, MembershipState}, }, - EventType, + AnyEphemeralRoomEvent, AnyEvent as EduEvent, EventType, }, serde::Raw, signatures::{CanonicalJsonObject, CanonicalJsonValue}, @@ -585,35 +586,6 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - for edu in body - .edus - .iter() - .map(|edu| serde_json::from_str::(edu.json().get())) - .filter_map(|r| r.ok()) - { - match edu { - Edu::Presence(_) => {} - Edu::Receipt(_) => {} - Edu::Typing(typing) => { - if typing.typing { - db.rooms.edus.typing_add( - &typing.user_id, - &typing.room_id, - 3000 + utils::millis_since_unix_epoch(), - &db.globals, - )?; - } else { - db.rooms - .edus - .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?; - } - } - Edu::DeviceListUpdate(_) => {} - Edu::DirectToDevice(_) => {} - Edu::_Custom(_) => {} - } - } - let mut resolved_map = BTreeMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); @@ -659,6 +631,73 @@ pub async fn send_transaction_message_route<'a>( } } + for edu in body + .edus + .iter() + .map(|edu| serde_json::from_str::(edu.json().get())) + .filter_map(|r| r.ok()) + { + match edu { + Edu::Presence(_) => {} + Edu::Receipt(receipt) => { + for (room_id, room_updates) in receipt.receipts { + for (user_id, user_updates) in room_updates.read { + if let Some((event_id, _)) = user_updates + .event_ids + .iter() + .filter_map(|id| { + db.rooms.get_pdu_count(&id).ok().flatten().map(|r| (id, r)) + }) + .max_by_key(|(_, count)| *count) + { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert(user_id.clone(), user_updates.data); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert( + event_id.to_owned(), + ruma::events::receipt::Receipts { + read: Some(user_receipts), + }, + ); + + let event = + EduEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt(ReceiptEvent { + content: ReceiptEventContent(receipt_content), + room_id: room_id.clone(), + })); + db.rooms.edus.readreceipt_update( + &user_id, + &room_id, + event, + &db.globals, + )?; + } else { + warn!("No known event ids in read receipt: {:?}", user_updates); + } + } + } + } + Edu::Typing(typing) => { + if typing.typing { + db.rooms.edus.typing_add( + &typing.user_id, + &typing.room_id, + 3000 + utils::millis_since_unix_epoch(), + &db.globals, + )?; + } else { + db.rooms + .edus + .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?; + } + } + Edu::DeviceListUpdate(_) => {} + Edu::DirectToDevice(_) => {} + Edu::_Custom(_) => {} + } + } + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } @@ -1134,7 +1173,7 @@ pub fn handle_incoming_pdu<'a>( .await { // This should always contain exactly one element when Ok - Ok(events) => state_auth.push(events[0].clone()), + Ok(events) => state_auth.extend_from_slice(&events), Err(e) => { debug!("Event was not present: {}", e); } From 3ea0d2b567b59dd4f8dc7ede9ab8bbece22afdf4 Mon Sep 17 00:00:00 2001 From: Jonas Fowl Date: Thu, 13 May 2021 07:57:11 +0000 Subject: [PATCH 0571/1727] Try to improve CI build times by caching --- .gitlab-ci.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d09a881..b3dcd5e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,9 +1,18 @@ image: "rust:latest" +cache: + paths: + - target + - cargohome + + variables: GIT_SUBMODULE_STRATEGY: recursive + CARGO_HOME: "cargohome" + FF_USE_FASTZIP: 1 before_script: + - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" - apt-get update -yqq - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - rustup component add clippy rustfmt From 24793891e052cfe66bde1b1fd65d75584c7c0949 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 14 May 2021 11:03:18 +0200 Subject: [PATCH 0572/1727] feat: implement GET /presence --- Cargo.toml | 4 +-- src/client_server/presence.rs | 51 ++++++++++++++++++++++++++++++++--- src/database/rooms/edus.rs | 41 ++++++++++++++++++++++++++++ src/main.rs | 1 + 4 files changed, 92 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 78496e4..950924a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -108,5 +108,5 @@ maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } # For flamegraphs: -#[profile.release] -#debug = true +[profile.release] +debug = true diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 175853f..9f4f7a3 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -1,10 +1,10 @@ use super::State; use crate::{utils, ConduitResult, Database, Ruma}; -use ruma::api::client::r0::presence::set_presence; -use std::convert::TryInto; +use ruma::api::client::r0::presence::{get_presence, set_presence}; +use std::{convert::TryInto, time::Duration}; #[cfg(feature = "conduit_bin")] -use rocket::put; +use rocket::{get, put}; #[cfg_attr( feature = "conduit_bin", @@ -46,3 +46,48 @@ pub async fn set_presence_route( Ok(set_presence::Response.into()) } + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/presence/<_>/status", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn get_presence_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let mut presence_event = None; + + for room_id in db + .rooms + .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()]) + { + let room_id = room_id?; + + if let Some(presence) = db + .rooms + .edus + .get_last_presence_event(&sender_user, &room_id)? + { + presence_event = Some(presence); + } + } + + if let Some(presence) = presence_event { + Ok(get_presence::Response { + // TODO: Should ruma just use the presenceeventcontent type here? + status_msg: presence.content.status_msg, + currently_active: presence.content.currently_active, + last_active_ago: presence + .content + .last_active_ago + .map(|millis| Duration::from_millis(millis.into())), + presence: presence.content.presence, + } + .into()) + } else { + todo!(); + } +} diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 56000e0..3bf2e06 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -367,6 +367,47 @@ impl RoomEdus { .transpose() } + pub fn get_last_presence_event( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result> { + let last_update = match self.last_presence_update(user_id)? { + Some(last) => last, + None => return Ok(None), + }; + + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&last_update.to_be_bytes()); + presence_id.push(0xff); + presence_id.extend_from_slice(&user_id.as_bytes()); + + self.presenceid_presence + .get(presence_id)? + .map(|value| { + let mut presence = serde_json::from_slice::(&value) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?; + let current_timestamp: UInt = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + + if presence.content.presence == PresenceState::Online { + // Don't set last_active_ago when the user is online + presence.content.last_active_ago = None; + } else { + // Convert from timestamp to duration + presence.content.last_active_ago = presence + .content + .last_active_ago + .map(|timestamp| current_timestamp - timestamp); + } + + Ok(presence) + }) + .transpose() + } + /// Sets all users to offline who have been quiet for too long. pub fn presence_maintain( &self, diff --git a/src/main.rs b/src/main.rs index 5005a37..57eb0d0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -69,6 +69,7 @@ fn setup_rocket(config: Figment, data: Database) -> rocket::Rocket Date: Mon, 17 May 2021 10:25:27 +0200 Subject: [PATCH 0573/1727] feat: send read receipts over federation currently they will only be sent if a PDU has to be sent as well --- src/client_server/sync.rs | 1 + src/database.rs | 29 +++++++++- src/database/globals.rs | 12 +++++ src/database/rooms.rs | 28 ++++++++++ src/database/rooms/edus.rs | 29 ++++++++-- src/database/sending.rs | 108 +++++++++++++++++++++++++++++++++++-- 6 files changed, 197 insertions(+), 10 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 66a1e13..fe6f692 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -406,6 +406,7 @@ pub async fn sync_events_route( .edus .readreceipts_since(&room_id, since)? .filter_map(|r| r.ok()) // Filter out buggy events + .map(|(_, _, v)| v) .collect::>(); if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since { diff --git a/src/database.rs b/src/database.rs index 62b3a40..6b68b9e 100644 --- a/src/database.rs +++ b/src/database.rs @@ -14,7 +14,7 @@ pub mod users; use crate::{Error, Result}; use directories::ProjectDirs; use futures::StreamExt; -use log::info; +use log::{error, info}; use rocket::futures::{self, channel::mpsc}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; @@ -160,6 +160,7 @@ impl Database { tokenids: db.open_tree("tokenids")?, roomserverids: db.open_tree("roomserverids")?, + serverroomids: db.open_tree("serverroomids")?, userroomid_joined: db.open_tree("userroomid_joined")?, roomuserid_joined: db.open_tree("roomuserid_joined")?, roomuseroncejoinedids: db.open_tree("roomuseroncejoinedids")?, @@ -197,6 +198,7 @@ impl Database { userdevicetxnid_response: db.open_tree("userdevicetxnid_response")?, }, sending: sending::Sending { + servername_educount: db.open_tree("servername_educount")?, servernamepduids: db.open_tree("servernamepduids")?, servercurrentevents: db.open_tree("servercurrentevents")?, maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), @@ -217,6 +219,31 @@ impl Database { _db: db, }; + // MIGRATIONS + if db.globals.database_version()? < 1 { + for roomserverid in db.rooms.roomserverids.iter().keys() { + let roomserverid = roomserverid?; + let mut parts = roomserverid.split(|&b| b == 0xff); + let room_id = parts.next().expect("split always returns one element"); + let servername = match parts.next() { + Some(s) => s, + None => { + error!("Migration: Invalid roomserverid in db."); + continue; + } + }; + let mut serverroomid = servername.to_vec(); + serverroomid.push(0xff); + serverroomid.extend_from_slice(room_id); + + db.rooms.serverroomids.insert(serverroomid, &[])?; + } + + db.globals.bump_database_version(1)?; + + info!("Migration: 0 -> 1 finished"); + } + // This data is probably outdated db.rooms.edus.presenceid_presence.clear()?; diff --git a/src/database/globals.rs b/src/database/globals.rs index 04f8d29..c1eafe0 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -258,4 +258,16 @@ impl Globals { } Ok(response) } + + pub fn database_version(&self) -> Result { + self.globals.get("version")?.map_or(Ok(0), |version| { + utils::u64_from_bytes(&version) + .map_err(|_| Error::bad_database("Database version id is invalid.")) + }) + } + + pub fn bump_database_version(&self, new_version: u64) -> Result<()> { + self.globals.insert("version", &new_version.to_be_bytes())?; + Ok(()) + } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c359997..48e6e11 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -50,6 +50,8 @@ pub struct Rooms { /// Participating servers in a room. pub(super) roomserverids: sled::Tree, // RoomServerId = RoomId + ServerName + pub(super) serverroomids: sled::Tree, // ServerRoomId = ServerName + RoomId + pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, pub(super) roomuseroncejoinedids: sled::Tree, @@ -1597,6 +1599,10 @@ impl Rooms { roomserver_id.push(0xff); roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); + let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); + serverroom_id.push(0xff); + serverroom_id.extend_from_slice(room_id.as_bytes()); + let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -1700,6 +1706,7 @@ impl Rooms { } self.roomserverids.insert(&roomserver_id, &[])?; + self.serverroomids.insert(&serverroom_id, &[])?; self.userroomid_joined.insert(&userroom_id, &[])?; self.roomuserid_joined.insert(&roomuser_id, &[])?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -1725,6 +1732,7 @@ impl Rooms { } self.roomserverids.insert(&roomserver_id, &[])?; + self.serverroomids.insert(&serverroom_id, &[])?; self.userroomid_invitestate.insert( &userroom_id, serde_json::to_vec(&last_state.unwrap_or_default()) @@ -1745,6 +1753,7 @@ impl Rooms { .all(|u| u.server_name() != user_id.server_name()) { self.roomserverids.remove(&roomserver_id)?; + self.serverroomids.remove(&serverroom_id)?; } self.userroomid_leftstate.insert( &userroom_id, @@ -2152,6 +2161,25 @@ impl Rooms { }) } + /// Returns an iterator of all rooms a server participates in (as far as we know). + pub fn server_rooms(&self, server: &ServerName) -> impl Iterator> { + let mut prefix = server.as_bytes().to_vec(); + prefix.push(0xff); + + self.serverroomids.scan_prefix(prefix).keys().map(|key| { + Ok(RoomId::try_from( + utils::string_from_bytes( + &key? + .rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid."))?) + }) + } + /// Returns an iterator over all joined members of a room. #[tracing::instrument(skip(self))] pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 3bf2e06..89f2905 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -76,9 +76,12 @@ impl RoomEdus { &self, room_id: &RoomId, since: u64, - ) -> Result>>> { + ) -> Result< + impl Iterator)>>, + > { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); + let prefix2 = prefix.clone(); let mut first_possible_edu = prefix.clone(); first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since @@ -87,14 +90,30 @@ impl RoomEdus { .readreceiptid_readreceipt .range(&*first_possible_edu..) .filter_map(|r| r.ok()) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| { + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(k, v)| { + let count = + utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) + .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; + let user_id = UserId::try_from( + utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) + .map_err(|_| { + Error::bad_database("Invalid readreceiptid userid bytes in db.") + })?, + ) + .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; + let mut json = serde_json::from_slice::(&v).map_err(|_| { Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") })?; json.remove("room_id"); - Ok(Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), + + Ok(( + user_id, + count, + Raw::from_json( + serde_json::value::to_raw_value(&json).expect("json is valid raw value"), + ), )) })) } diff --git a/src/database/sending.rs b/src/database/sending.rs index e530396..199bd05 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,5 +1,5 @@ use std::{ - collections::HashMap, + collections::{BTreeMap, HashMap}, convert::{TryFrom, TryInto}, fmt::Debug, sync::Arc, @@ -14,8 +14,15 @@ use log::{error, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ - api::{appservice, federation, OutgoingRequest}, - events::{push_rules, EventType}, + api::{ + appservice, + federation::{ + self, + transactions::edu::{Edu, ReceiptContent, ReceiptData, ReceiptMap}, + }, + OutgoingRequest, + }, + events::{push_rules, AnySyncEphemeralRoomEvent, EventType}, push, ServerName, UInt, UserId, }; use sled::IVec; @@ -64,6 +71,7 @@ pub enum SendingEventType { #[derive(Clone)] pub struct Sending { /// The state for a given state hash. + pub(super) servername_educount: sled::Tree, // EduCount: Count of last EDU sync pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)SenderKey / ServerName / UserId + PduId pub(super) servercurrentevents: sled::Tree, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / (*)EduEvent pub(super) maximum_requests: Arc, @@ -194,7 +202,7 @@ impl Sending { if let sled::Event::Insert { key, .. } = event { if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key) { - if let Some(events) = Self::select_events(&outgoing_kind, vec![(event, key)], &mut current_transaction_status, &servercurrentevents, &servernamepduids) { + if let Some(events) = Self::select_events(&outgoing_kind, vec![(event, key)], &mut current_transaction_status, &servercurrentevents, &servernamepduids, &db) { futures.push(Self::handle_events(outgoing_kind, events, &db)); } } @@ -211,6 +219,7 @@ impl Sending { current_transaction_status: &mut HashMap, TransactionStatus>, servercurrentevents: &sled::Tree, servernamepduids: &sled::Tree, + db: &Database, ) -> Option> { let mut retry = false; let mut allow = true; @@ -267,11 +276,102 @@ impl Sending { events.push(e); } + + match outgoing_kind { + OutgoingKind::Normal(server_name) => { + if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) { + events.extend_from_slice(&select_edus); + db.sending + .servername_educount + .insert(server_name.as_bytes(), &last_count.to_be_bytes()) + .unwrap(); + } + } + _ => {} + } } Some(events) } + pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec, u64)> { + // u64: count of last edu + let since = db + .sending + .servername_educount + .get(server.as_bytes())? + .map_or(Ok(0), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) + })?; + let mut events = Vec::new(); + let mut max_edu_count = since; + 'outer: for room_id in db.rooms.server_rooms(server) { + let room_id = room_id?; + for r in db.rooms.edus.readreceipts_since(&room_id, since)? { + let (user_id, count, read_receipt) = r?; + + if count > max_edu_count { + max_edu_count = count; + } + + if user_id.server_name() != db.globals.server_name() { + continue; + } + + let event = + serde_json::from_str::(&read_receipt.json().get()) + .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; + let federation_event = match event { + AnySyncEphemeralRoomEvent::Receipt(r) => { + let mut read = BTreeMap::new(); + + let (event_id, receipt) = r + .content + .0 + .into_iter() + .next() + .expect("we only use one event per read receipt"); + let receipt = receipt + .read + .expect("our read receipts always set this") + .remove(&user_id) + .expect("our read receipts always have the user here"); + + read.insert( + user_id, + ReceiptData { + data: receipt.clone(), + event_ids: vec![event_id.clone()], + }, + ); + + let receipt_map = ReceiptMap { read }; + + let mut receipts = BTreeMap::new(); + receipts.insert(room_id.clone(), receipt_map); + + Edu::Receipt(ReceiptContent { receipts }) + } + _ => { + Error::bad_database("Invalid event type in read_receipts"); + continue; + } + }; + + events.push(SendingEventType::Edu( + serde_json::to_vec(&federation_event).expect("json can be serialized"), + )); + + if events.len() >= 20 { + break 'outer; + } + } + } + + Ok((events, max_edu_count)) + } + #[tracing::instrument(skip(self))] pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: IVec) -> Result<()> { let mut key = b"$".to_vec(); From ae41bc50677165d722ef4f7c7b3a9ee89e39d0b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 17 May 2021 10:58:44 +0200 Subject: [PATCH 0574/1727] fix: run state res with old current state again I'm a bit torn on the "auth check based on the current state of the room". It can mean multiple things: 1. The state of the room before the homeserver looked at the event at all. But that means if a message event from a user arrives, but we didn't see their join event before, we soft fail the message (even though we would find the join event when going through the auth events of the event and doing state res) 2. The state of the room after doing state-res with the event and our previous room state. We need to do this state resolution to find the new room state anyway, so we could just use the new room state for the auth check. The problem is that if the incoming event is a membership leave event, the new room state does not allow another leave event. This is obviously the wrong option. 3. The state of the room after doing state-res with the state **before** the event and our previous room state. This will mean a lot more calculations because we have to run state-res again We used 2. before and now use 1. again --- src/server_server.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 77eb448..1a1716d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1126,9 +1126,9 @@ pub fn handle_incoming_pdu<'a>( .map_err(|_| "Failed to load room state.".to_owned())? .into_iter() .map(|(k, v)| (k, Arc::new(v))) - .collect(); + .collect::>(); - fork_states.insert(current_state); + fork_states.insert(current_state.clone()); // We also add state after incoming event to the fork states extremities.insert(incoming_pdu.event_id.clone()); @@ -1229,12 +1229,7 @@ pub fn handle_incoming_pdu<'a>( &room_version, &incoming_pdu, previous_create, - &new_room_state - .iter() - .filter_map(|(k, v)| { - Some((k.clone(), Arc::new(db.rooms.get_pdu(&v).ok().flatten()?))) - }) - .collect(), + ¤t_state, None, ) .map_err(|_e| "Auth check failed.".to_owned())?; From 09157b2096704119ccb9416ef0ae9d97872fd53f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 20 May 2021 23:46:52 +0200 Subject: [PATCH 0575/1727] improvement: federation get_keys and optimize signingkey storage - get encryption keys over federation - optimize signing key storage - rate limit parsing of bad events - rate limit signature fetching - dependency bumps --- Cargo.lock | 203 +++++++++++++++++-------------- Cargo.toml | 7 +- src/client_server/config.rs | 33 +++-- src/client_server/keys.rs | 155 ++++++++++++----------- src/client_server/membership.rs | 47 +++++-- src/client_server/read_marker.rs | 52 ++++---- src/client_server/sync.rs | 6 +- src/database.rs | 2 +- src/database/account_data.rs | 6 +- src/database/globals.rs | 92 +++++++------- src/database/pusher.rs | 3 +- src/database/rooms/edus.rs | 4 +- src/database/sending.rs | 31 +++-- src/database/users.rs | 29 ++--- src/main.rs | 1 + src/pdu.rs | 17 +-- src/ruma_wrapper.rs | 51 ++++---- src/server_server.rs | 198 +++++++++++++++++++++++++----- 18 files changed, 566 insertions(+), 371 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e750620..e43638d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -316,9 +316,9 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.13" +version = "0.99.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b1b72f1263f214c0f823371768776c4f5841b942c9883aa8e5ec584fd0ba6" +checksum = "5cc7b9cef1e351660e5443924e4f43ab25fbbed3e9a5f052df3677deb4d6b320" dependencies = [ "convert_case", "proc-macro2", @@ -470,9 +470,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" +checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" dependencies = [ "futures-channel", "futures-core", @@ -485,9 +485,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" +checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" dependencies = [ "futures-core", "futures-sink", @@ -495,15 +495,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" +checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" [[package]] name = "futures-executor" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" +checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" dependencies = [ "futures-core", "futures-task", @@ -512,16 +512,17 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" +checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" [[package]] name = "futures-macro" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" +checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" dependencies = [ + "autocfg", "proc-macro-hack", "proc-macro2", "quote", @@ -530,22 +531,23 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" +checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" [[package]] name = "futures-task" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" +checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" [[package]] name = "futures-util" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" +checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" dependencies = [ + "autocfg", "futures-channel", "futures-core", "futures-io", @@ -676,9 +678,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" +checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" dependencies = [ "bytes", "http", @@ -687,9 +689,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1ce40d6fc9764887c2fdc7305c3dcc429ba11ff981c1509416afd5697e4437" +checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" [[package]] name = "httpdate" @@ -784,6 +786,15 @@ dependencies = [ "serde", ] +[[package]] +name = "indoc" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a75aeaaef0ce18b58056d306c27b07436fbb34b8816c53094b76dd81803136" +dependencies = [ + "unindent", +] + [[package]] name = "inlinable_string" version = "0.1.14" @@ -864,9 +875,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.50" +version = "0.3.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" +checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" dependencies = [ "wasm-bindgen", ] @@ -1129,9 +1140,9 @@ checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" [[package]] name = "openssl-probe" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "opentelemetry" @@ -1614,8 +1625,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.1.1" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "assign", "js_int", @@ -1635,8 +1646,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.17.0" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "bytes", "http", @@ -1651,8 +1662,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.17.0-alpha.4" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.17.0" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1662,8 +1673,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.2.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "ruma-api", "ruma-common", @@ -1676,8 +1687,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.10.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.10.1" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "assign", "bytes", @@ -1696,8 +1707,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.5.1" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "indexmap", "js_int", @@ -1711,9 +1722,10 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.22.2" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ + "indoc", "js_int", "ruma-common", "ruma-events-macros", @@ -1725,8 +1737,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.22.0-alpha.3" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.22.2" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1736,8 +1748,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.1.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.1.0" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "js_int", "ruma-api", @@ -1751,8 +1763,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.19.1" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "paste", "rand", @@ -1765,8 +1777,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.19.0" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.19.1" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "quote", "ruma-identifiers-validation", @@ -1776,12 +1788,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" [[package]] name = "ruma-identity-service-api" -version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.1.0" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "js_int", "ruma-api", @@ -1793,8 +1805,8 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" -version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.1.0" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "js_int", "ruma-api", @@ -1808,8 +1820,8 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "bytes", "form_urlencoded", @@ -1822,8 +1834,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1833,8 +1845,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +version = "0.7.1" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "base64 0.13.0", "ring", @@ -1847,11 +1859,12 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=71686ce8a4d1770a80de216080718fe9de7bd925#71686ce8a4d1770a80de216080718fe9de7bd925" +source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" dependencies = [ "itertools 0.10.0", "js_int", "maplit", + "ruma-common", "ruma-events", "ruma-identifiers", "ruma-serde", @@ -1910,9 +1923,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" +checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" [[package]] name = "ryu" @@ -1992,18 +2005,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.125" +version = "1.0.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" +checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.125" +version = "1.0.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d" +checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" dependencies = [ "proc-macro2", "quote", @@ -2361,9 +2374,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5" +checksum = "bd3076b5c8cc18138b8f8814895c11eb4de37114a5d127bafdc5e55798ceef37" dependencies = [ "autocfg", "bytes", @@ -2380,9 +2393,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" +checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" dependencies = [ "proc-macro2", "quote", @@ -2402,9 +2415,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "940a12c99365c31ea8dd9ba04ec1be183ffe4920102bb7122c2f515437601e8e" +checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" dependencies = [ "bytes", "futures-core", @@ -2519,9 +2532,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952a078337565ba39007de99b151770f41039253a31846f0a3d5cd5a4ac8eedf" +checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" dependencies = [ "async-trait", "cfg-if 1.0.0", @@ -2544,9 +2557,9 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9c97f7d103e0f94dbe384a57908833505ae5870126492f166821b7cf685589" +checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" dependencies = [ "cfg-if 1.0.0", "futures-util", @@ -2633,6 +2646,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +[[package]] +name = "unindent" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f14ee04d9415b52b3aeab06258a3f07093182b88ba0f9b8d203f211a7a7d41c7" + [[package]] name = "untrusted" version = "0.7.1" @@ -2675,9 +2694,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" +checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" dependencies = [ "cfg-if 1.0.0", "serde", @@ -2687,9 +2706,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" +checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" dependencies = [ "bumpalo", "lazy_static", @@ -2702,9 +2721,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" +checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -2714,9 +2733,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" +checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2724,9 +2743,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" +checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" dependencies = [ "proc-macro2", "quote", @@ -2737,15 +2756,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" +checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" [[package]] name = "web-sys" -version = "0.3.50" +version = "0.3.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" +checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 950924a..2343071 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,8 @@ edition = "2018" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "71686ce8a4d1770a80de216080718fe9de7bd925", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "a238a0dda5b06fad146f8f01d690cbe011d13245", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" @@ -108,5 +109,5 @@ maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } # For flamegraphs: -[profile.release] -debug = true +#[profile.release] +#debug = true diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 07a9bc9..e7daa9e 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -8,11 +8,11 @@ use ruma::{ set_room_account_data, }, }, - events::{custom::CustomEventContent, AnyBasicEventContent, BasicEvent}, + events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent}, serde::Raw, }; use serde::Deserialize; -use serde_json::value::RawValue as RawJsonValue; +use serde_json::{json, value::RawValue as RawJsonValue}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -28,7 +28,7 @@ pub async fn set_global_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data = serde_json::from_str(body.data.get()) + let data = serde_json::from_str::(body.data.get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); @@ -37,9 +37,10 @@ pub async fn set_global_account_data_route( None, sender_user, event_type.clone().into(), - &BasicEvent { - content: CustomEventContent { event_type, data }, - }, + &json!({ + "type": event_type, + "content": data, + }), &db.globals, )?; @@ -71,9 +72,10 @@ pub async fn set_room_account_data_route( Some(&body.room_id), sender_user, event_type.clone().into(), - &BasicEvent { - content: CustomEventContent { event_type, data }, - }, + &json!({ + "type": event_type, + "content": data, + }), &db.globals, )?; @@ -99,7 +101,7 @@ pub async fn get_global_account_data_route( .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; db.flush().await?; - let account_data = serde_json::from_str::(event.get()) + let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; @@ -130,7 +132,7 @@ pub async fn get_room_account_data_route( .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; db.flush().await?; - let account_data = serde_json::from_str::(event.get()) + let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; @@ -138,6 +140,11 @@ pub async fn get_room_account_data_route( } #[derive(Deserialize)] -struct ExtractEventContent { - content: Raw, +struct ExtractRoomEventContent { + content: Raw, +} + +#[derive(Deserialize)] +struct ExtractGlobalEventContent { + content: Raw, } diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index aafa157..7a88fb6 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -1,5 +1,5 @@ use super::{State, SESSION_ID_LENGTH}; -use crate::{utils, ConduitResult, Database, Error, Ruma}; +use crate::{utils, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -12,6 +12,7 @@ use ruma::{ }, }, encryption::UnsignedDeviceInfo, + DeviceId, UserId, }; use std::collections::{BTreeMap, HashSet}; @@ -78,74 +79,14 @@ pub async fn get_keys_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut master_keys = BTreeMap::new(); - let mut self_signing_keys = BTreeMap::new(); - let mut user_signing_keys = BTreeMap::new(); - let mut device_keys = BTreeMap::new(); + let response = get_keys_helper( + Some(sender_user), + &body.device_keys, + |u| u == sender_user, + &db, + )?; - for (user_id, device_ids) in &body.device_keys { - if device_ids.is_empty() { - let mut container = BTreeMap::new(); - for device_id in db.users.all_device_ids(user_id) { - let device_id = device_id?; - if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? { - let metadata = db - .users - .get_device_metadata(user_id, &device_id)? - .ok_or_else(|| { - Error::bad_database("all_device_keys contained nonexistent device.") - })?; - - keys.unsigned = UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }; - - container.insert(device_id, keys); - } - } - device_keys.insert(user_id.clone(), container); - } else { - for device_id in device_ids { - let mut container = BTreeMap::new(); - if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? { - let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( - Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to get keys for nonexistent device.", - ), - )?; - - keys.unsigned = UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }; - - container.insert(device_id.clone(), keys); - } - device_keys.insert(user_id.clone(), container); - } - } - - if let Some(master_key) = db.users.get_master_key(user_id, sender_user)? { - master_keys.insert(user_id.clone(), master_key); - } - if let Some(self_signing_key) = db.users.get_self_signing_key(user_id, sender_user)? { - self_signing_keys.insert(user_id.clone(), self_signing_key); - } - if user_id == sender_user { - if let Some(user_signing_key) = db.users.get_user_signing_key(sender_user)? { - user_signing_keys.insert(user_id.clone(), user_signing_key); - } - } - } - - Ok(get_keys::Response { - master_keys, - self_signing_keys, - user_signing_keys, - device_keys, - failures: BTreeMap::new(), - } - .into()) + Ok(response.into()) } #[cfg_attr( @@ -356,3 +297,81 @@ pub async fn get_key_changes_route( } .into()) } + +pub fn get_keys_helper bool>( + sender_user: Option<&UserId>, + device_keys_input: &BTreeMap>>, + allowed_signatures: F, + db: &Database, +) -> Result { + let mut master_keys = BTreeMap::new(); + let mut self_signing_keys = BTreeMap::new(); + let mut user_signing_keys = BTreeMap::new(); + let mut device_keys = BTreeMap::new(); + + for (user_id, device_ids) in device_keys_input { + if device_ids.is_empty() { + let mut container = BTreeMap::new(); + for device_id in db.users.all_device_ids(user_id) { + let device_id = device_id?; + if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? { + let metadata = db + .users + .get_device_metadata(user_id, &device_id)? + .ok_or_else(|| { + Error::bad_database("all_device_keys contained nonexistent device.") + })?; + + keys.unsigned = UnsignedDeviceInfo { + device_display_name: metadata.display_name, + }; + + container.insert(device_id, keys); + } + } + device_keys.insert(user_id.clone(), container); + } else { + for device_id in device_ids { + let mut container = BTreeMap::new(); + if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? { + let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( + Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to get keys for nonexistent device.", + ), + )?; + + keys.unsigned = UnsignedDeviceInfo { + device_display_name: metadata.display_name, + }; + + container.insert(device_id.clone(), keys); + } + device_keys.insert(user_id.clone(), container); + } + } + + if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? { + master_keys.insert(user_id.clone(), master_key); + } + if let Some(self_signing_key) = db + .users + .get_self_signing_key(user_id, &allowed_signatures)? + { + self_signing_keys.insert(user_id.clone(), self_signing_key); + } + if Some(user_id) == sender_user { + if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? { + user_signing_keys.insert(user_id.clone(), user_signing_key); + } + } + } + + Ok(get_keys::Response { + master_keys, + self_signing_keys, + user_signing_keys, + device_keys, + failures: BTreeMap::new(), + }) +} diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 63c103d..de8b4cb 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,7 +4,7 @@ use crate::{ pdu::{PduBuilder, PduEvent}, server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; -use log::{error, warn}; +use log::{debug, error, warn}; use member::{MemberEventContent, MembershipState}; use rocket::futures; use ruma::{ @@ -29,9 +29,10 @@ use ruma::{ uint, EventId, RoomId, RoomVersionId, ServerName, UserId, }; use std::{ - collections::{BTreeMap, HashSet}, + collections::{btree_map::Entry, BTreeMap, HashSet}, convert::{TryFrom, TryInto}, sync::{Arc, RwLock}, + time::{Duration, Instant}, }; #[cfg(feature = "conduit_bin")] @@ -703,6 +704,38 @@ async fn validate_and_add_event_id( error!("{:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value, &room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } server_server::fetch_required_signing_keys(&value, pub_key_map, db).await?; if let Err(e) = ruma::signatures::verify_event( @@ -712,17 +745,11 @@ async fn validate_and_add_event_id( &value, room_version, ) { - warn!("Event failed verification: {}", e); + warn!("Event {} failed verification: {}", event_id, e); + back_off(event_id); return Err(Error::BadServerResponse("Event failed verification.")); } - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&value, &room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - value.insert( "event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()), diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 166e59a..1b7ea0b 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -5,12 +5,14 @@ use ruma::{ error::ErrorKind, r0::{read_marker::set_read_marker, receipt::create_receipt}, }, - events::{AnyEphemeralRoomEvent, AnyEvent, EventType}, + events::{AnyEphemeralRoomEvent, EventType}, + receipt::ReceiptType, + MilliSecondsSinceUnixEpoch, }; #[cfg(feature = "conduit_bin")] use rocket::post; -use std::{collections::BTreeMap, time::SystemTime}; +use std::collections::BTreeMap; #[cfg_attr( feature = "conduit_bin", @@ -27,7 +29,6 @@ pub async fn set_read_marker_route( content: ruma::events::fully_read::FullyReadEventContent { event_id: body.fully_read.clone(), }, - room_id: body.room_id.clone(), }; db.account_data.update( Some(&body.room_id), @@ -54,26 +55,23 @@ pub async fn set_read_marker_route( user_receipts.insert( sender_user.clone(), ruma::events::receipt::Receipt { - ts: Some(SystemTime::now()), + ts: Some(MilliSecondsSinceUnixEpoch::now()), }, ); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + let mut receipt_content = BTreeMap::new(); - receipt_content.insert( - event.to_owned(), - ruma::events::receipt::Receipts { - read: Some(user_receipts), - }, - ); + receipt_content.insert(event.to_owned(), receipts); db.rooms.edus.readreceipt_update( &sender_user, &body.room_id, - AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( - ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - )), + AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }), &db.globals, )?; } @@ -112,26 +110,22 @@ pub async fn create_receipt_route( user_receipts.insert( sender_user.clone(), ruma::events::receipt::Receipt { - ts: Some(SystemTime::now()), + ts: Some(MilliSecondsSinceUnixEpoch::now()), }, ); + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + let mut receipt_content = BTreeMap::new(); - receipt_content.insert( - body.event_id.to_owned(), - ruma::events::receipt::Receipts { - read: Some(user_receipts), - }, - ); + receipt_content.insert(body.event_id.to_owned(), receipts); db.rooms.edus.readreceipt_update( &sender_user, &body.room_id, - AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( - ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - )), + AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }), &db.globals, )?; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index fe6f692..0a27b8d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -422,7 +422,7 @@ pub async fn sync_events_route( } let joined_room = sync_events::JoinedRoom { - account_data: sync_events::AccountData { + account_data: sync_events::RoomAccountData { events: db .account_data .changes_since(Some(&room_id), &sender_user, since)? @@ -506,7 +506,7 @@ pub async fn sync_events_route( left_rooms.insert( room_id.clone(), sync_events::LeftRoom { - account_data: sync_events::AccountData { events: Vec::new() }, + account_data: sync_events::RoomAccountData { events: Vec::new() }, timeline: sync_events::Timeline { limited: false, prev_batch: Some(next_batch.clone()), @@ -577,7 +577,7 @@ pub async fn sync_events_route( .map(|(_, v)| Raw::from(v)) .collect(), }, - account_data: sync_events::AccountData { + account_data: sync_events::GlobalAccountData { events: db .account_data .changes_since(None, &sender_user, since)? diff --git a/src/database.rs b/src/database.rs index 6b68b9e..d7126e3 100644 --- a/src/database.rs +++ b/src/database.rs @@ -213,7 +213,7 @@ impl Database { pusher: pusher::PushData::new(&db)?, globals: globals::Globals::load( db.open_tree("global")?, - db.open_tree("servertimeout_signingkey")?, + db.open_tree("server_signingkeys")?, config, )?, _db: db, diff --git a/src/database/account_data.rs b/src/database/account_data.rs index f3832ea..bb970c3 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,7 +1,7 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::error::ErrorKind, - events::{AnyEvent as EduEvent, EventType}, + events::{AnyEphemeralRoomEvent, EventType}, serde::Raw, RoomId, UserId, }; @@ -80,7 +80,7 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>> { + ) -> Result>> { let mut userdata = HashMap::new(); let mut prefix = room_id @@ -110,7 +110,7 @@ impl AccountData { .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, ) .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - serde_json::from_slice::>(&v).map_err(|_| { + serde_json::from_slice::>(&v).map_err(|_| { Error::bad_database("Database contains invalid account data.") })?, )) diff --git a/src/database/globals.rs b/src/database/globals.rs index c1eafe0..0dd73b2 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -2,20 +2,22 @@ use crate::{database::Config, utils, Error, Result}; use log::{error, info}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, - ServerName, ServerSigningKeyId, + EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, }; use rustls::{ServerCertVerifier, WebPKIVerifier}; use std::{ collections::{BTreeMap, HashMap}, sync::{Arc, RwLock}, - time::Duration, + time::{Duration, Instant}, }; +use tokio::sync::Semaphore; use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; type WellKnownMap = HashMap, (String, String)>; type TlsNameMap = HashMap; +type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries #[derive(Clone)] pub struct Globals { pub actual_destination_cache: Arc>, // actual_destination, host @@ -26,7 +28,10 @@ pub struct Globals { reqwest_client: reqwest::Client, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, - pub(super) servertimeout_signingkey: sled::Tree, // ServerName + Timeout Timestamp -> algorithm:key + pubkey + pub(super) server_signingkeys: sled::Tree, + pub bad_event_ratelimiter: Arc>>, + pub bad_signature_ratelimiter: Arc, RateLimitState>>>, + pub servername_ratelimiter: Arc, Arc>>>, } struct MatrixServerVerifier { @@ -65,7 +70,7 @@ impl ServerCertVerifier for MatrixServerVerifier { impl Globals { pub fn load( globals: sled::Tree, - servertimeout_signingkey: sled::Tree, + server_signingkeys: sled::Tree, config: Config, ) -> Result { let bytes = &*globals @@ -135,8 +140,11 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), tls_name_override, - servertimeout_signingkey, + server_signingkeys, jwt_decoding_key, + bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), + bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), + servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), }) } @@ -203,31 +211,21 @@ impl Globals { /// Remove the outdated keys and insert the new ones. /// /// This doesn't actually check that the keys provided are newer than the old set. - pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> { - let mut key1 = origin.as_bytes().to_vec(); - key1.push(0xff); - - let mut key2 = key1.clone(); - - let ts = keys - .valid_until_ts - .duration_since(std::time::UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64; - - key1.extend_from_slice(&ts.to_be_bytes()); - key2.extend_from_slice(&(ts + 1).to_be_bytes()); - - self.servertimeout_signingkey.insert( - key1, - serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"), - )?; - - self.servertimeout_signingkey.insert( - key2, - serde_json::to_vec(&keys.old_verify_keys) - .expect("ServerSigningKeys are a valid string"), - )?; + pub fn add_signing_key(&self, origin: &ServerName, new_keys: &ServerSigningKeys) -> Result<()> { + self.server_signingkeys + .update_and_fetch(origin.as_bytes(), |signingkeys| { + let mut keys = signingkeys + .and_then(|keys| serde_json::from_slice(keys).ok()) + .unwrap_or_else(|| { + // Just insert "now", it doesn't matter + ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) + }); + keys.verify_keys + .extend(new_keys.verify_keys.clone().into_iter()); + keys.old_verify_keys + .extend(new_keys.old_verify_keys.clone().into_iter()); + Some(serde_json::to_vec(&keys).expect("serversigningkeys can be serialized")) + })?; Ok(()) } @@ -237,26 +235,22 @@ impl Globals { &self, origin: &ServerName, ) -> Result> { - let mut response = BTreeMap::new(); + let signingkeys = self + .server_signingkeys + .get(origin.as_bytes())? + .and_then(|bytes| serde_json::from_slice::(&bytes).ok()) + .map(|keys| { + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + tree + }) + .unwrap_or_else(BTreeMap::new); - let now = crate::utils::millis_since_unix_epoch(); - - for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { - let (k, bytes) = item?; - let valid_until = k - .splitn(2, |&b| b == 0xff) - .nth(1) - .map(crate::utils::u64_from_bytes) - .ok_or_else(|| Error::bad_database("Invalid signing keys."))? - .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; - // If these keys are still valid use em! - if valid_until > now { - let btree: BTreeMap<_, _> = serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys"))?; - response.extend(btree); - } - } - Ok(response) + Ok(signingkeys) } pub fn database_version(&self) -> Result { diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 75c2efb..51f55a1 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -294,7 +294,8 @@ async fn send_notice( } else { notifi.sender = Some(&event.sender); notifi.event_type = Some(&event.kind); - notifi.content = serde_json::value::to_raw_value(&event.content).ok(); + let content = serde_json::value::to_raw_value(&event.content).ok(); + notifi.content = content.as_deref(); if event.kind == EventType::RoomMember { notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 89f2905..f4c7075 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -2,7 +2,7 @@ use crate::{utils, Error, Result}; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, - AnyEvent as EduEvent, SyncEphemeralRoomEvent, + AnyEphemeralRoomEvent, SyncEphemeralRoomEvent, }, presence::PresenceState, serde::Raw, @@ -32,7 +32,7 @@ impl RoomEdus { &self, user_id: &UserId, room_id: &RoomId, - event: EduEvent, + event: AnyEphemeralRoomEvent, globals: &super::super::globals::Globals, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/database/sending.rs b/src/database/sending.rs index 199bd05..ed5b5ef 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -3,7 +3,7 @@ use std::{ convert::{TryFrom, TryInto}, fmt::Debug, sync::Arc, - time::{Duration, Instant, SystemTime}, + time::{Duration, Instant}, }; use crate::{ @@ -23,7 +23,9 @@ use ruma::{ OutgoingRequest, }, events::{push_rules, AnySyncEphemeralRoomEvent, EventType}, - push, ServerName, UInt, UserId, + push, + receipt::ReceiptType, + MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, }; use sled::IVec; use tokio::{select, sync::Semaphore}; @@ -277,17 +279,14 @@ impl Sending { events.push(e); } - match outgoing_kind { - OutgoingKind::Normal(server_name) => { - if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) { - events.extend_from_slice(&select_edus); - db.sending - .servername_educount - .insert(server_name.as_bytes(), &last_count.to_be_bytes()) - .unwrap(); - } + if let OutgoingKind::Normal(server_name) = outgoing_kind { + if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) { + events.extend_from_slice(&select_edus); + db.sending + .servername_educount + .insert(server_name.as_bytes(), &last_count.to_be_bytes()) + .unwrap(); } - _ => {} } } @@ -326,14 +325,14 @@ impl Sending { AnySyncEphemeralRoomEvent::Receipt(r) => { let mut read = BTreeMap::new(); - let (event_id, receipt) = r + let (event_id, mut receipt) = r .content .0 .into_iter() .next() .expect("we only use one event per read receipt"); let receipt = receipt - .read + .remove(&ReceiptType::Read) .expect("our read receipts always set this") .remove(&user_id) .expect("our read receipts always have the user here"); @@ -436,7 +435,7 @@ impl Sending { ), ) })? - .to_any_event()) + .to_room_event()) } SendingEventType::Edu(_) => { // Appservices don't need EDUs (?) @@ -610,7 +609,7 @@ impl Sending { origin: db.globals.server_name(), pdus: &pdu_jsons, edus: &edu_jsons, - origin_server_ts: SystemTime::now(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), transaction_id: &base64::encode_config( Self::calculate_hash( &events diff --git a/src/database/users.rs b/src/database/users.rs index 9cdfb5f..a5b8775 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,19 +1,13 @@ use crate::{utils, Error, Result}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::{ - device::Device, - keys::{CrossSigningKey, OneTimeKey}, - }, - }, - encryption::DeviceKeys, + api::client::{error::ErrorKind, r0::device::Device}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, EventType}, identifiers::MxcUri, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, UInt, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; +use std::{collections::BTreeMap, convert::TryFrom, mem}; #[derive(Clone)] pub struct Users { @@ -200,7 +194,7 @@ impl Users { device_id: device_id.into(), display_name: initial_device_display_name, last_seen_ip: None, // TODO - last_seen_ts: Some(SystemTime::now()), + last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), }) .expect("Device::to_string never fails.") .as_bytes(), @@ -653,12 +647,11 @@ impl Users { }) } - pub fn get_master_key( + pub fn get_master_key bool>( &self, user_id: &UserId, - sender_id: &UserId, + allowed_signatures: F, ) -> Result> { - // TODO: hide some signatures self.userid_masterkeyid .get(user_id.to_string())? .map_or(Ok(None), |key| { @@ -673,7 +666,7 @@ impl Users { cross_signing_key.signatures = cross_signing_key .signatures .into_iter() - .filter(|(user, _)| user == user_id || user == sender_id) + .filter(|(user, _)| allowed_signatures(user)) .collect(); Ok(Some(cross_signing_key)) @@ -681,10 +674,10 @@ impl Users { }) } - pub fn get_self_signing_key( + pub fn get_self_signing_key bool>( &self, user_id: &UserId, - sender_id: &UserId, + allowed_signatures: F, ) -> Result> { self.userid_selfsigningkeyid .get(user_id.to_string())? @@ -700,7 +693,7 @@ impl Users { cross_signing_key.signatures = cross_signing_key .signatures .into_iter() - .filter(|(user, _)| user == user_id || user == sender_id) + .filter(|(user, _)| user == user_id || allowed_signatures(user)) .collect(); Ok(Some(cross_signing_key)) diff --git a/src/main.rs b/src/main.rs index 57eb0d0..7257d69 100644 --- a/src/main.rs +++ b/src/main.rs @@ -158,6 +158,7 @@ fn setup_rocket(config: Figment, data: Database) -> rocket::Rocket Raw { + pub fn to_any_event(&self) -> Raw { let mut json = json!({ "content": self.content, "type": self.kind, @@ -267,10 +269,9 @@ impl state_res::Event for PduEvent { fn content(&self) -> serde_json::Value { self.content.clone() } - fn origin_server_ts(&self) -> std::time::SystemTime { - UNIX_EPOCH + std::time::Duration::from_millis(self.origin_server_ts.into()) + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + MilliSecondsSinceUnixEpoch(self.origin_server_ts) } - fn state_key(&self) -> Option { self.state_key.clone() } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 9143999..d0f7303 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -34,6 +34,7 @@ pub struct Ruma { pub body: T::Incoming, pub sender_user: Option, pub sender_device: Option>, + pub sender_servername: Option>, // This is None when body is not a valid string pub json_body: Option, pub from_appservice: bool, @@ -68,7 +69,10 @@ where let mut json_body = serde_json::from_slice::(&body).ok(); - let (sender_user, sender_device, from_appservice) = if let Some((_id, registration)) = db + let (sender_user, sender_device, sender_servername, from_appservice) = if let Some(( + _id, + registration, + )) = db .appservice .iter_all() .filter_map(|r| r.ok()) @@ -104,10 +108,10 @@ where } // TODO: Check if appservice is allowed to be that user - (Some(user_id), None, true) + (Some(user_id), None, None, true) } - AuthScheme::ServerSignatures => (None, None, true), - AuthScheme::None => (None, None, true), + AuthScheme::ServerSignatures => (None, None, None, true), + AuthScheme::None => (None, None, None, true), } } else { match metadata.authentication { @@ -116,9 +120,12 @@ where match db.users.find_from_token(&token).unwrap() { // Unknown Token None => return Failure((Status::raw(581), ())), - Some((user_id, device_id)) => { - (Some(user_id), Some(Box::::from(device_id)), false) - } + Some((user_id, device_id)) => ( + Some(user_id), + Some(Box::::from(device_id)), + None, + false, + ), } } else { // Missing Token @@ -227,27 +234,24 @@ where CanonicalJsonValue::Object(signatures), ); - let keys = match server_server::fetch_signing_keys( - &db, - &origin, - vec![&key.to_owned()], - ) - .await - { - Ok(b) => b, - Err(e) => { - warn!("Failed to fetch signing keys: {}", e); + let keys = + match server_server::fetch_signing_keys(&db, &origin, vec![key.to_owned()]) + .await + { + Ok(b) => b, + Err(e) => { + warn!("Failed to fetch signing keys: {}", e); - // Forbidden - return Failure((Status::raw(580), ())); - } - }; + // Forbidden + return Failure((Status::raw(580), ())); + } + }; let mut pub_key_map = BTreeMap::new(); pub_key_map.insert(origin.as_str().to_owned(), keys); match ruma::signatures::verify_json(&pub_key_map, &request_map) { - Ok(()) => (None, None, false), + Ok(()) => (None, None, Some(origin), false), Err(e) => { warn!("Failed to verify json request from {}: {}", origin, e); @@ -260,7 +264,7 @@ where } } } - AuthScheme::None => (None, None, false), + AuthScheme::None => (None, None, None, false), } }; @@ -307,6 +311,7 @@ where body: t, sender_user, sender_device, + sender_servername, from_appservice, json_body, }), diff --git a/src/server_server.rs b/src/server_server.rs index 1a1716d..d51c9eb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,10 @@ -use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{ + client_server::{self, get_keys_helper}, + utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, +}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{debug, error, info, warn}; +use log::{debug, error, info, trace, warn}; use regex::Regex; use rocket::{response::content::Json, State}; use ruma::{ @@ -15,6 +18,7 @@ use ruma::{ VerifyKey, }, event::{get_event, get_missing_events, get_room_state_ids}, + keys::get_keys, membership::{ create_invite, create_join_event::{self, RoomState}, @@ -32,12 +36,14 @@ use ruma::{ create::CreateEventContent, member::{MemberEventContent, MembershipState}, }, - AnyEphemeralRoomEvent, AnyEvent as EduEvent, EventType, + AnyEphemeralRoomEvent, EventType, }, + receipt::ReceiptType, serde::Raw, signatures::{CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, Event, EventMap, RoomVersion, StateMap}, - uint, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + ServerSigningKeyId, UserId, }; use std::{ collections::{btree_map::Entry, BTreeMap, BTreeSet, HashSet}, @@ -49,8 +55,9 @@ use std::{ pin::Pin, result::Result as StdResult, sync::{Arc, RwLock}, - time::{Duration, SystemTime}, + time::{Duration, Instant, SystemTime}, }; +use tokio::sync::Semaphore; #[cfg(feature = "conduit_bin")] use rocket::{get, post, put}; @@ -452,7 +459,10 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json { verify_keys, old_verify_keys: BTreeMap::new(), signatures: BTreeMap::new(), - valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2), + valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(60 * 2), + ) + .expect("time is valid"), }, } .try_into_http_response::>() @@ -608,6 +618,7 @@ pub async fn send_transaction_message_route<'a>( } }; + let start_time = Instant::now(); if let Err(e) = handle_incoming_pdu( &body.origin, &event_id, @@ -619,7 +630,17 @@ pub async fn send_transaction_message_route<'a>( ) .await { - resolved_map.insert(event_id, Err(e)); + resolved_map.insert(event_id.clone(), Err(e)); + } + + let elapsed = start_time.elapsed(); + if elapsed > Duration::from_secs(1) { + warn!( + "Handling event {} took {}m{}s", + event_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); } } @@ -653,19 +674,16 @@ pub async fn send_transaction_message_route<'a>( let mut user_receipts = BTreeMap::new(); user_receipts.insert(user_id.clone(), user_updates.data); - let mut receipt_content = BTreeMap::new(); - receipt_content.insert( - event_id.to_owned(), - ruma::events::receipt::Receipts { - read: Some(user_receipts), - }, - ); + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); - let event = - EduEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt(ReceiptEvent { - content: ReceiptEventContent(receipt_content), - room_id: room_id.clone(), - })); + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event_id.to_owned(), receipts); + + let event = AnyEphemeralRoomEvent::Receipt(ReceiptEvent { + content: ReceiptEventContent(receipt_content), + room_id: room_id.clone(), + }); db.rooms.edus.readreceipt_update( &user_id, &room_id, @@ -698,6 +716,8 @@ pub async fn send_transaction_message_route<'a>( } } + info!("/send/{} done", body.transaction_id); + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } @@ -794,7 +814,7 @@ pub fn handle_incoming_pdu<'a>( ) { Err(e) => { // Drop - warn!("{:?}: {}", value, e); + warn!("Dropping bad event {}: {}", event_id, e); return Err("Signature verification failed".to_string()); } Ok(ruma::signatures::Verified::Signatures) => { @@ -821,6 +841,7 @@ pub fn handle_incoming_pdu<'a>( // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // EDIT: Step 5 is not applied anymore because it failed too often debug!("Fetching auth events for {}", incoming_pdu.event_id); fetch_and_handle_events( db, @@ -1292,12 +1313,30 @@ pub(crate) fn fetch_and_handle_events<'a>( auth_cache: &'a mut EventMap>, ) -> AsyncRecursiveResult<'a, Vec>, Error> { Box::pin(async move { + let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + let mut pdus = vec![]; for id in events { + if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&id) { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", id); + continue; + } + } // a. Look at auth cache let pdu = match auth_cache.get(id) { Some(pdu) => { - debug!("Found {} in cache", id); // We already have the auth chain for events in cache pdu.clone() } @@ -1306,7 +1345,7 @@ pub(crate) fn fetch_and_handle_events<'a>( // (get_pdu checks both) None => match db.rooms.get_pdu(&id)? { Some(pdu) => { - debug!("Found {} in db", id); + trace!("Found {} in db", id); // We need to fetch the auth chain let _ = fetch_and_handle_events( db, @@ -1331,7 +1370,7 @@ pub(crate) fn fetch_and_handle_events<'a>( .await { Ok(res) => { - debug!("Got {} over federation: {:?}", id, res); + debug!("Got {} over federation", id); let (event_id, mut value) = crate::pdu::gen_event_id_canonical_json(&res.pdu)?; // This will also fetch the auth chain @@ -1358,12 +1397,14 @@ pub(crate) fn fetch_and_handle_events<'a>( } Err(e) => { warn!("Authentication of event {} failed: {:?}", id, e); + back_off(id.clone()); continue; } } } Err(_) => { warn!("Failed to fetch event: {}", id); + back_off(id.clone()); continue; } } @@ -1383,10 +1424,67 @@ pub(crate) fn fetch_and_handle_events<'a>( pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, - signature_ids: Vec<&String>, + signature_ids: Vec, ) -> Result> { let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|&id| keys.contains_key(id)); + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let permit = db + .globals + .servername_ratelimiter + .read() + .unwrap() + .get(origin) + .map(|s| Arc::clone(s).acquire_owned()); + + let permit = match permit { + Some(p) => p, + None => { + let mut write = db.globals.servername_ratelimiter.write().unwrap(); + let s = Arc::clone( + write + .entry(origin.to_owned()) + .or_insert_with(|| Arc::new(Semaphore::new(1))), + ); + + s.acquire_owned() + } + } + .await; + + let back_off = |id| match db + .globals + .bad_signature_ratelimiter + .write() + .unwrap() + .entry(id) + { + Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + if let Some((time, tries)) = db + .globals + .bad_signature_ratelimiter + .read() + .unwrap() + .get(&signature_ids) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {:?}", signature_ids); + return Err(Error::BadServerResponse("bad signature, still backing off")); + } + } + + debug!("Loading signing keys for {}", origin); let mut result = db .globals @@ -1399,6 +1497,8 @@ pub(crate) async fn fetch_signing_keys( return Ok(result); } + debug!("Fetching signing keys for {} over federation", origin); + if let Ok(get_keys_response) = db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) @@ -1436,14 +1536,17 @@ pub(crate) async fn fetch_signing_keys( &server, get_remote_server_keys::v2::Request::new( origin, - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ) + .expect("time is valid"), ), ) .await { - debug!("Got signing keys: {:?}", keys); + trace!("Got signing keys: {:?}", keys); for k in keys.server_keys { db.globals.add_signing_key(origin, &k)?; result.extend( @@ -1464,6 +1567,10 @@ pub(crate) async fn fetch_signing_keys( } } + drop(permit); + + back_off(signature_ids); + warn!("Failed to find public key for server: {}", origin); Err(Error::BadServerResponse( "Failed to find public key for server", @@ -1581,7 +1688,7 @@ pub fn get_event_route<'a>( Ok(get_event::v1::Response { origin: db.globals.server_name().to_owned(), - origin_server_ts: SystemTime::now(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), pdu: PduEvent::convert_to_outgoing_federation_event( db.rooms .get_pdu_json(&body.event_id)? @@ -2186,6 +2293,34 @@ pub fn get_profile_information_route<'a>( .into()) } +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/federation/v1/user/keys/query", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn get_keys_route<'a>( + db: State<'a, Database>, + body: Ruma, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let result = get_keys_helper( + None, + &body.device_keys, + |u| Some(u.server_name()) == body.sender_servername.as_deref(), + &db, + )?; + + Ok(get_keys::v1::Response { + device_keys: result.device_keys, + master_keys: result.master_keys, + self_signing_keys: result.self_signing_keys, + } + .into()) +} + pub async fn fetch_required_signing_keys( event: &BTreeMap, pub_key_map: &RwLock>>, @@ -2208,9 +2343,8 @@ pub async fn fetch_required_signing_keys( "Invalid signatures content object in server response pdu.", ))?; - let signature_ids = signature_object.keys().collect::>(); + let signature_ids = signature_object.keys().cloned().collect::>(); - debug!("Fetching signing keys for {}", signature_server); let fetch_res = fetch_signing_keys( db, &Box::::try_from(&**signature_server).map_err(|_| { From 989d843c40f112205e34c454940e3031e30923ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 21 May 2021 18:12:02 +0200 Subject: [PATCH 0576/1727] fix: unauthorized pdus will be responded to with FORBIDDEN --- src/client_server/membership.rs | 12 +++--------- src/database/rooms.rs | 2 +- src/server_server.rs | 4 ++-- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index de8b4cb..9674b7a 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -569,13 +569,7 @@ async fn join_room_by_id_helper( { let (event_id, value) = match result { Ok(t) => t, - Err(e) => { - warn!( - "PDU could not be verified: {:?} {:?} {:?}", - e, event_id, pdu - ); - continue; - } + Err(_) => continue, }; let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { @@ -701,7 +695,7 @@ async fn validate_and_add_event_id( db: &Database, ) -> Result<(EventId, CanonicalJsonObject)> { let mut value = serde_json::from_str::(pdu.json().get()).map_err(|e| { - error!("{:?}: {:?}", pdu, e); + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; let event_id = EventId::try_from(&*format!( @@ -745,7 +739,7 @@ async fn validate_and_add_event_id( &value, room_version, ) { - warn!("Event {} failed verification: {}", event_id, e); + warn!("Event {} failed verification {:?} {}", event_id, pdu, e); back_off(event_id); return Err(Error::BadServerResponse("Event failed verification.")); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 48e6e11..5ba170a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1309,7 +1309,7 @@ impl Rooms { if !auth_check { return Err(Error::BadRequest( - ErrorKind::InvalidParam, + ErrorKind::Forbidden, "Event is not authorized.", )); } diff --git a/src/server_server.rs b/src/server_server.rs index d51c9eb..699cbbe 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1484,7 +1484,7 @@ pub(crate) async fn fetch_signing_keys( } } - debug!("Loading signing keys for {}", origin); + trace!("Loading signing keys for {}", origin); let mut result = db .globals @@ -1943,7 +1943,7 @@ pub fn create_join_event_template_route<'a>( if !auth_check { return Err(Error::BadRequest( - ErrorKind::InvalidParam, + ErrorKind::Forbidden, "Event is not authorized.", )); } From 3e2f742f30506fcf0bcea82c8aa697995f265cf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 21 May 2021 22:22:05 +0200 Subject: [PATCH 0577/1727] fix: room version warnings and other bugs when joining rooms --- src/client_server/membership.rs | 43 +++++---------------------------- src/client_server/sync.rs | 8 ++---- src/database/rooms.rs | 5 +++- 3 files changed, 12 insertions(+), 44 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 9674b7a..75d7258 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -546,12 +546,6 @@ async fn join_room_by_id_helper( ) .await?; - let count = db.globals.next_count()?; - - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; @@ -579,36 +573,6 @@ async fn join_room_by_id_helper( db.rooms.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - if pdu.kind == EventType::RoomMember { - let target_user_id = UserId::try_from(state_key.clone()).map_err(|e| { - warn!( - "Invalid user id in send_join response: {}: {}", - state_key, e - ); - Error::BadServerResponse("Invalid user id in send_join response.") - })?; - - let invite_state = Vec::new(); // TODO add a few important events - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - db.rooms.update_membership( - &pdu.room_id, - &target_user_id, - serde_json::from_value::( - pdu.content - .get("membership") - .ok_or(Error::BadServerResponse("Invalid member event content"))? - .clone(), - ) - .map_err(|_| { - Error::BadServerResponse("Invalid membership state content.") - })?, - &pdu.sender, - Some(invite_state), - db, - )?; - } state.insert((pdu.kind.clone(), state_key.clone()), pdu.event_id.clone()); } } @@ -648,10 +612,15 @@ async fn join_room_by_id_helper( // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?; + let count = db.globals.next_count()?; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + db.rooms.append_pdu( &pdu, utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), - db.globals.next_count()?, + count, pdu_id.into(), &[pdu.event_id.clone()], db, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 0a27b8d..2b6b39e 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -103,11 +103,6 @@ pub async fn sync_events_route( // The inner Option is None when there is an event, but there is no state hash associated // with it. This can happen for the RoomCreate event, so all updates should arrive. let first_pdu_before_since = db.rooms.pdus_until(sender_user, &room_id, since).next(); - let pdus_after_since = db - .rooms - .pdus_after(sender_user, &room_id, since) - .next() - .is_some(); let since_shortstatehash = first_pdu_before_since.as_ref().map(|pdu| { db.rooms @@ -121,7 +116,7 @@ pub async fn sync_events_route( invited_member_count, joined_since_last_sync, state_events, - ) = if pdus_after_since && Some(current_shortstatehash) != since_shortstatehash { + ) = if Some(current_shortstatehash) != since_shortstatehash { let current_state = db.rooms.room_state_full(&room_id)?; let current_members = current_state .iter() @@ -224,6 +219,7 @@ pub async fn sync_events_route( device_list_updates.insert(user_id); } } + // TODO: Remove, this should never happen here, right? (MembershipState::Join, MembershipState::Leave) => { // Write down users that have left encrypted rooms we are in left_encrypted_users.insert(user_id); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5ba170a..ede8589 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -374,7 +374,7 @@ impl Rooms { for event_id in new_state.difference(&old_state) { if let Some(pdu) = self.get_pdu_json(event_id)? { - if pdu.get("event_type").and_then(|val| val.as_str()) == Some("m.room.member") { + if pdu.get("type").and_then(|val| val.as_str()) == Some("m.room.member") { if let Ok(pdu) = serde_json::from_value::( serde_json::to_value(&pdu).expect("CanonicalJsonObj is a valid JsonValue"), ) { @@ -1158,6 +1158,9 @@ impl Rooms { ) -> Result>> { let mut state = Vec::new(); // Add recommended events + if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomCreate, "")? { + state.push(e.to_stripped_state_event()); + } if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomJoinRules, "")? { From 1b42770ab561e7e36cbb819f96d96f0075fc9dfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 22 May 2021 10:34:19 +0200 Subject: [PATCH 0578/1727] improvement: warning for small max_request_size values --- src/database.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index d7126e3..76eae24 100644 --- a/src/database.rs +++ b/src/database.rs @@ -112,7 +112,9 @@ impl Database { .use_compression(true) .open()?; - info!("Opened sled database at {}", config.database_path); + if config.max_request_size < 1024 { + eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); + } let (admin_sender, admin_receiver) = mpsc::unbounded(); From 9b77eb7bb7d899785aa91345ae704cf2f9e1efff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 22 May 2021 11:31:15 +0200 Subject: [PATCH 0579/1727] fix: too many syncs --- src/client_server/sync.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2b6b39e..b0a22dd 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -103,6 +103,11 @@ pub async fn sync_events_route( // The inner Option is None when there is an event, but there is no state hash associated // with it. This can happen for the RoomCreate event, so all updates should arrive. let first_pdu_before_since = db.rooms.pdus_until(sender_user, &room_id, since).next(); + let pdus_after_since = db + .rooms + .pdus_after(sender_user, &room_id, since) + .next() + .is_some(); let since_shortstatehash = first_pdu_before_since.as_ref().map(|pdu| { db.rooms @@ -116,7 +121,7 @@ pub async fn sync_events_route( invited_member_count, joined_since_last_sync, state_events, - ) = if Some(current_shortstatehash) != since_shortstatehash { + ) = if pdus_after_since && Some(current_shortstatehash) != since_shortstatehash { let current_state = db.rooms.room_state_full(&room_id)?; let current_members = current_state .iter() From 47160e9e06ede6ba19d8704132d225d6ecc1645d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 22 May 2021 13:39:31 +0200 Subject: [PATCH 0580/1727] docs: APPSERVICES setup guide --- APPSERVICES.md | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ DEPLOY.md | 2 ++ README.md | 4 ++++ 3 files changed, 54 insertions(+) create mode 100644 APPSERVICES.md diff --git a/APPSERVICES.md b/APPSERVICES.md new file mode 100644 index 0000000..a84f1d2 --- /dev/null +++ b/APPSERVICES.md @@ -0,0 +1,48 @@ +# Setting up Appservices + +## Getting help + +If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). + +## Tested appservices + +Here are some appservices we tested and that work with Conduit: +- matrix-appservice-discord +- mautrix-hangouts +- mautrix-telegram + +## Set up the appservice + +Follow whatever instructions are given by the appservice. This usually includes +downloading, changing its config (setting domain, homeserver url, port etc.) +and later starting it. + +At some point the appservice guide should ask you to add a registration yaml +file to the homeserver. In Synapse you would do this by adding the path to the +homeserver.yaml, but in Conduit you can do this from within Matrix: + +First, go into the #admins room of your homeserver. The first person that +registered on the homeserver automatically joins it. Then send a message into +the room like this: + + @conduit:your.server.name: register_appservice + ``` + paste + the + contents + of + the + yaml + registration + here + ``` + +You can confirm it worked by sending a message like this: +`@conduit:your.server.name: list_appservices` + +The @conduit bot should answer with `Appservices (1): your-bridge` + +Then you are done. Conduit will send messages to the appservices and the +appservice can send requests to the homeserver. You don't need to restart +Conduit, but if it doesn't work, restarting while the appservice is running +could help. diff --git a/DEPLOY.md b/DEPLOY.md index 8dedad2..02073ff 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -209,3 +209,5 @@ Set it to start automatically when your system boots with: ```bash $ sudo systemctl enable conduit ``` + +If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). diff --git a/README.md b/README.md index ace30eb..591313f 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,10 @@ Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit Download or compile a Conduit binary, set up the config and call it from somewhere like a systemd script. [Read more](DEPLOY.md) +If you want to connect an Appservice to Conduit, take a look at the [Appservice Guide](APPSERVICES.md). + +If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). + ##### Deloy using a Debian package You need to have the `deb` helper command installed that creates Debian packages from Cargo projects (see [cargo-deb](https://github.com/mmstick/cargo-deb/) for more info): From 52a96b3d8404ccfc04ab0e0ba60ed8f9963ba686 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Fri, 12 Mar 2021 18:26:23 +0100 Subject: [PATCH 0581/1727] Update Dockerfile and docker-compose - Dockerfile now tracks the gitlab repository and the master branch. - docker-compose now points to conduit.toml instead of Rocket.toml and its env vars were also renamed from ROCKET_ to CONDUIT_. Furthermore vectorim/riot-web was changed to vectorim/element-web --- Dockerfile | 6 +++--- docker-compose.yml | 30 ++++++++++++++++-------------- docker/README.md | 6 +++--- docker/docker-compose.traefik.yml | 27 +++++++++++++++------------ 4 files changed, 37 insertions(+), 32 deletions(-) diff --git a/Dockerfile b/Dockerfile index a97f4cf..594a4b8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,7 +30,7 @@ COPY . . RUN if [[ $LOCAL == "true" ]]; then \ cargo install --path . ; \ else \ - cargo install --git "https://github.com/timokoesters/conduit.git" --rev ${GIT_REF}; \ + cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF}; \ fi ########################## RUNTIME IMAGE ########################## @@ -40,7 +40,7 @@ FROM alpine:3.12 ARG CREATED ARG VERSION -ARG GIT_REF=HEAD +ARG GIT_REF=origin/master # Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # including a custom label specifying the build command @@ -52,7 +52,7 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.description="A Matrix homeserver written in Rust" \ org.opencontainers.image.url="https://conduit.rs/" \ org.opencontainers.image.revision=${GIT_REF} \ - org.opencontainers.image.source="https://git.koesters.xyz/timo/conduit.git" \ + org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ org.opencontainers.image.licenses="Apache-2.0" \ org.opencontainers.image.documentation="" \ org.opencontainers.image.ref.name="" \ diff --git a/docker-compose.yml b/docker-compose.yml index 7d19762..38c8a11 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,33 +15,35 @@ services: # CREATED: # VERSION: # LOCAL: 'false' - # GIT_REF: HEAD + # GIT_REF: origin/master restart: unless-stopped ports: - 8448:8000 volumes: - db:/srv/conduit/.local/share/conduit - ### Uncomment if you want to use Rocket.toml to configure Conduit - ### Note: Set env vars will override Rocket.toml values - # - ./Rocket.toml:/srv/conduit/Rocket.toml + ### Uncomment if you want to use conduit.toml to configure Conduit + ### Note: Set env vars will override conduit.toml values + # - ./conduit.toml:/srv/conduit/conduit.toml environment: - ROCKET_SERVER_NAME: localhost:8000 # replace with your own name + CONDUIT_SERVER_NAME: localhost:8000 # replace with your own name ### Uncomment and change values as desired - # ROCKET_LOG: normal # Available levels are: off, debug, normal, critical - # ROCKET_PORT: 8000 - # ROCKET_REGISTRATION_DISABLED: 'true' - # ROCKET_ENCRYPTION_DISABLED: 'true' - # ROCKET_FEDERATION_ENABLED: 'true' - # ROCKET_DATABASE_PATH: /srv/conduit/.local/share/conduit - # ROCKET_WORKERS: 10 - # ROCKET_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + # CONDUIT_ADDRESS: 127.0.0.1 + # CONDUIT_PORT: 8000 + # CONDUIT_LOG: normal # Available levels are: off, debug, normal, critical + # CONDUIT_ALLOW_JAEGER: 'false' + # CONDUIT_ALLOW_REGISTRATION : 'false' + # CONDUIT_ALLOW_ENCRYPTION: 'false' + # CONDUIT_ALLOW_FEDERATION: 'false' + # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit + # CONDUIT_WORKERS: 10 + # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second ### Domain or Subdomain for the communication between Element and Conduit ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: - # image: vectorim/riot-web:latest + # image: vectorim/element-web:latest # restart: unless-stopped # ports: # - 8009:80 diff --git a/docker/README.md b/docker/README.md index f90b9a4..6ae7453 100644 --- a/docker/README.md +++ b/docker/README.md @@ -17,13 +17,13 @@ The Dockerfile includes a few build arguments that should be supplied when build ARG LOCAL=false ARG CREATED ARG VERSION -ARG GIT_REF=HEAD +ARG GIT_REF=origin/master ``` - **CREATED:** Date and time as string (date-time as defined by RFC 3339). Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.created`. Supply by it like this `$(date -u +'%Y-%m-%dT%H:%M:%SZ')` - **VERSION:** The SemVer version of Conduit, which is in the image. Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.version`. If you have a `Cargo.toml` in your build context, you can get it with `$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)` - **LOCAL:** *(Optional)* A boolean value, specifies if the local build context should be used, or if the official repository will be cloned. If not supplied with the build command, it will default to `false`. -- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `HEAD`. +- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `origin/master`. To build the image you can use the following command @@ -40,7 +40,7 @@ which also will tag the resulting image as `matrixconduit/matrix-conduit:latest` After building the image you can simply run it with ``` bash -docker run -d -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e ROCKET_SERVER_NAME="localhost:8000" matrixconduit/matrix-conduit:latest +docker run -d -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e CONDUIT_SERVER_NAME="localhost:8000" matrixconduit/matrix-conduit:latest ``` For detached mode, you also need to use the `-d` flag. You can pass in more env vars as are shown here, for an overview of possible values, you can take a look at the `docker-compose.yml` file. diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index 111eaa5..25497c6 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -15,32 +15,35 @@ services: # CREATED: # VERSION: # LOCAL: 'false' - # GIT_REF: HEAD + # GIT_REF: origin/master restart: unless-stopped volumes: - db:/srv/conduit/.local/share/conduit - ### Uncomment if you want to use Rocket.toml to configure Conduit - ### Note: Set env vars will override Rocket.toml values - # - ./Rocket.toml:/srv/conduit/Rocket.toml + ### Uncomment if you want to use conduit.toml to configure Conduit + ### Note: Set env vars will override conduit.toml values + # - ./conduit.toml:/srv/conduit/conduit.toml networks: - proxy environment: ROCKET_SERVER_NAME: localhost:8000 # replace with your own name ### Uncomment and change values as desired - # ROCKET_LOG: normal # Available levels are: off, debug, normal, critical - # ROCKET_PORT: 8000 - # ROCKET_REGISTRATION_DISABLED: 'true' - # ROCKET_ENCRYPTION_DISABLED: 'true' - # ROCKET_DATABASE_PATH: /srv/conduit/.local/share/conduit - # ROCKET_WORKERS: 10 - # ROCKET_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + # CONDUIT_ADDRESS: 127.0.0.1 + # CONDUIT_PORT: 8000 + # CONDUIT_LOG: normal # Available levels are: off, debug, normal, critical + # CONDUIT_ALLOW_JAEGER: 'false' + # CONDUIT_ALLOW_REGISTRATION : 'false' + # CONDUIT_ALLOW_ENCRYPTION: 'false' + # CONDUIT_ALLOW_FEDERATION: 'false' + # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit + # CONDUIT_WORKERS: 10 + # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second ### Domain or Subdomain for the communication between Element and Conduit ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: - # image: vectorim/riot-web:latest + # image: vectorim/element-web:latest # restart: unless-stopped # volumes: # - ./element_config.json:/app/config.json From beb428d8669d509b3199531660a3544ccff69d9a Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Fri, 12 Mar 2021 21:07:43 +0100 Subject: [PATCH 0582/1727] Update docker command in README Mention the need for a config. --- README.md | 4 +++- docker/README.md | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 591313f..3a8384f 100644 --- a/README.md +++ b/README.md @@ -82,9 +82,11 @@ Pull and run the docker image with ``` bash docker pull matrixconduit/matrix-conduit:latest -docker run -d -p 8448:8000 -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest +docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest ``` +> Note: You also need to supply a `conduit.toml` config file, you can find an example [here](./conduit-example.toml). + Or build and run it with docker or docker-compose. [Read more](docker/README.md) diff --git a/docker/README.md b/docker/README.md index 6ae7453..6bf36f1 100644 --- a/docker/README.md +++ b/docker/README.md @@ -40,10 +40,11 @@ which also will tag the resulting image as `matrixconduit/matrix-conduit:latest` After building the image you can simply run it with ``` bash -docker run -d -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e CONDUIT_SERVER_NAME="localhost:8000" matrixconduit/matrix-conduit:latest +docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest ``` -For detached mode, you also need to use the `-d` flag. You can pass in more env vars as are shown here, for an overview of possible values, you can take a look at the `docker-compose.yml` file. +For detached mode, you also need to use the `-d` flag. You also need to supply a `conduit.toml` config file, you can find an example [here](../conduit-example.toml). +You can pass in more env vars as are shown here, for an overview of possible values, you can take a look at the `docker-compose.yml` file. If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. From 5a7ccbdfab228daca0ebecf46b9c4372d36faf6a Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Tue, 16 Mar 2021 09:14:11 +0100 Subject: [PATCH 0583/1727] Add CONDUIT_CONFIG to all relevant docker files And mention that an empty string can be used to configure Conduit purely with env vars. --- Dockerfile | 2 ++ README.md | 1 + docker-compose.yml | 4 +++- docker/README.md | 3 ++- docker/docker-compose.traefik.yml | 4 +++- 5 files changed, 11 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 594a4b8..51f146d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -42,6 +42,8 @@ ARG CREATED ARG VERSION ARG GIT_REF=origin/master +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" + # Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # including a custom label specifying the build command LABEL org.opencontainers.image.created=${CREATED} \ diff --git a/README.md b/README.md index 3a8384f..ac603e6 100644 --- a/README.md +++ b/README.md @@ -86,6 +86,7 @@ docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/sr ``` > Note: You also need to supply a `conduit.toml` config file, you can find an example [here](./conduit-example.toml). +> Or you can pass in `-e CONDUIT_CONFIG=""` and configure Conduit purely with env vars. Or build and run it with docker or docker-compose. [Read more](docker/README.md) diff --git a/docker-compose.yml b/docker-compose.yml index 38c8a11..cb98a11 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -29,7 +29,9 @@ services: ### Uncomment and change values as desired # CONDUIT_ADDRESS: 127.0.0.1 # CONDUIT_PORT: 8000 - # CONDUIT_LOG: normal # Available levels are: off, debug, normal, critical + # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if want to configure purely by env vars, set this to an empty string '' + # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging + # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_REGISTRATION : 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' diff --git a/docker/README.md b/docker/README.md index 6bf36f1..499d1ad 100644 --- a/docker/README.md +++ b/docker/README.md @@ -44,7 +44,8 @@ docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/sr ``` For detached mode, you also need to use the `-d` flag. You also need to supply a `conduit.toml` config file, you can find an example [here](../conduit-example.toml). -You can pass in more env vars as are shown here, for an overview of possible values, you can take a look at the `docker-compose.yml` file. +You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need +too pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index 25497c6..ef14ca9 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -29,7 +29,9 @@ services: ### Uncomment and change values as desired # CONDUIT_ADDRESS: 127.0.0.1 # CONDUIT_PORT: 8000 - # CONDUIT_LOG: normal # Available levels are: off, debug, normal, critical + # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if want to configure purely by env vars, set this to an empty string '' + # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging + # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_REGISTRATION : 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' From c6625d83e691ab05f0568ed50951c676379830c8 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Sat, 22 May 2021 16:53:08 +0200 Subject: [PATCH 0584/1727] Add CONDUIT_TRUSTED_SERVERS config param --- docker-compose.yml | 20 +++++++++++--------- docker/docker-compose.traefik.yml | 2 ++ 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index cb98a11..2dcf906 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,17 +5,17 @@ services: homeserver: ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. - image: matrixconduit/matrix-conduit:latest + #image: matrixconduit/matrix-conduit:latest ### If you want to build a fresh image from the sources, then comment the image line and uncomment the ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d - # build: - # context: . - # args: - # CREATED: - # VERSION: - # LOCAL: 'false' - # GIT_REF: origin/master + build: + context: . + args: + CREATED: '2021-03-16T08:18:27Z' + VERSION: '0.1.0' + LOCAL: 'false' + GIT_REF: origin/master restart: unless-stopped ports: - 8448:8000 @@ -26,10 +26,12 @@ services: # - ./conduit.toml:/srv/conduit/conduit.toml environment: CONDUIT_SERVER_NAME: localhost:8000 # replace with your own name + CONDUIT_TRUSTED_SERVERS: + - 'matrix.org' ### Uncomment and change values as desired # CONDUIT_ADDRESS: 127.0.0.1 # CONDUIT_PORT: 8000 - # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if want to configure purely by env vars, set this to an empty string '' + # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index ef14ca9..c5728a0 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -26,6 +26,8 @@ services: - proxy environment: ROCKET_SERVER_NAME: localhost:8000 # replace with your own name + CONDUIT_TRUSTED_SERVERS: + - 'matrix.org' ### Uncomment and change values as desired # CONDUIT_ADDRESS: 127.0.0.1 # CONDUIT_PORT: 8000 From 90cd11d8506828c5fe10c524973d9429ca0d12c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 22 May 2021 18:49:30 +0200 Subject: [PATCH 0585/1727] fix: Forbidden instead of InvalidParam when joining --- src/client_server/membership.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 75d7258..206ea9d 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -839,7 +839,7 @@ pub async fn invite_helper( if !auth_check { return Err(Error::BadRequest( - ErrorKind::InvalidParam, + ErrorKind::Forbidden, "Event is not authorized.", )); } From 634fe04c33d45f091856e64aaf51508b0e9730b0 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Sat, 22 May 2021 20:49:07 +0200 Subject: [PATCH 0586/1727] Fix wrong env var name in traefik compose --- docker/docker-compose.traefik.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index c5728a0..629e79b 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -25,7 +25,7 @@ services: networks: - proxy environment: - ROCKET_SERVER_NAME: localhost:8000 # replace with your own name + CONDUIT_SERVER_NAME: localhost:8000 # replace with your own name CONDUIT_TRUSTED_SERVERS: - 'matrix.org' ### Uncomment and change values as desired From c1b2b468b8ccb486b9c9ae7426db1064e8c89a49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 22 May 2021 21:33:31 +0200 Subject: [PATCH 0587/1727] fix: bad except in ruma wrapper --- src/ruma_wrapper.rs | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index d0f7303..f2b9b9f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -59,7 +59,7 @@ where let token = request .headers() .get_one("Authorization") - .map(|s| s[7..].to_owned()) // Split off "Bearer " + .and_then(|s| s.get(7..)) // Split off "Bearer " .or_else(|| request.query_value("access_token").and_then(|r| r.ok())); let limit = db.globals.max_request_size(); @@ -134,16 +134,20 @@ where } AuthScheme::ServerSignatures => { // Get origin from header - let x_matrix = match request.headers().get_one("Authorization").map(|s| { + let x_matrix = match request + .headers() + .get_one("Authorization") + .and_then(|s| // Split off "X-Matrix " and parse the rest - s[9..] - .split_terminator(',') - .map(|field| { - let mut splits = field.splitn(2, '='); - (splits.next(), splits.next().map(|s| s.trim_matches('"'))) - }) - .collect::>() - }) { + s.get(9..)) + .map(|s| { + s.split_terminator(',') + .map(|field| { + let mut splits = field.splitn(2, '='); + (splits.next(), splits.next().map(|s| s.trim_matches('"'))) + }) + .collect::>() + }) { Some(t) => t, None => { warn!("No Authorization header"); From fd69ac621c2ff5b19dfaadbff70d9484a220e925 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 22 May 2021 21:41:08 +0200 Subject: [PATCH 0588/1727] fix: run ci with docker --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b3dcd5e..e80d27e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,5 +1,8 @@ image: "rust:latest" +default: + tags: [docker] + cache: paths: - target From 1939e628141c4dcdf0c177845e84daf0cd5de18b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 23 May 2021 11:11:20 +0200 Subject: [PATCH 0589/1727] fmt --- src/ruma_wrapper.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index f2b9b9f..147df3c 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -137,9 +137,7 @@ where let x_matrix = match request .headers() .get_one("Authorization") - .and_then(|s| - // Split off "X-Matrix " and parse the rest - s.get(9..)) + .and_then(|s| s.get(9..)) // Split off "X-Matrix " and parse the rest .map(|s| { s.split_terminator(',') .map(|field| { From e5c7119516db04fcad6440892fcf0b1c1bbf5715 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 23 May 2021 16:45:32 +0200 Subject: [PATCH 0590/1727] feat: forward federation errors to the client --- src/error.rs | 21 +++++++++++++++++---- src/server_server.rs | 25 +++++++++++++++++-------- 2 files changed, 34 insertions(+), 12 deletions(-) diff --git a/src/error.rs b/src/error.rs index 6c37bed..e2664e2 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,5 +1,11 @@ use log::{error, warn}; -use ruma::api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}; +use ruma::{ + api::client::{ + error::{Error as RumaError, ErrorKind}, + r0::uiaa::UiaaInfo, + }, + ServerName, +}; use thiserror::Error; #[cfg(feature = "conduit_bin")] @@ -10,7 +16,7 @@ use { response::{self, Responder}, Request, }, - ruma::api::client::{error::Error as RumaError, r0::uiaa::UiaaResponse}, + ruma::api::client::r0::uiaa::UiaaResponse, }; pub type Result = std::result::Result; @@ -33,6 +39,8 @@ pub enum Error { source: reqwest::Error, }, #[error("{0}")] + FederationError(Box, RumaError), + #[error("{0}")] BadServerResponse(&'static str), #[error("{0}")] BadConfig(&'static str), @@ -66,8 +74,13 @@ where 'o: 'r, { fn respond_to(self, r: &'r Request<'_>) -> response::Result<'o> { - if let Self::Uiaa(uiaainfo) = &self { - return RumaResponse::from(UiaaResponse::AuthResponse(uiaainfo.clone())).respond_to(r); + if let Self::Uiaa(uiaainfo) = self { + return RumaResponse::from(UiaaResponse::AuthResponse(uiaainfo)).respond_to(r); + } + + if let Self::FederationError(origin, mut error) = self { + error.message = format!("Answer from {}: {}", origin, error.message); + return RumaResponse::from(error).respond_to(r); } let message = format!("{}", self); diff --git a/src/server_server.rs b/src/server_server.rs index 699cbbe..82e51fc 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -9,7 +9,7 @@ use regex::Regex; use rocket::{response::content::Json, State}; use ruma::{ api::{ - client::error::ErrorKind, + client::error::{Error as RumaError, ErrorKind}, federation::{ device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, @@ -27,7 +27,7 @@ use ruma::{ query::{get_profile_information, get_room_information}, transactions::{edu::Edu, send_transaction_message}, }, - IncomingResponse, OutgoingRequest, OutgoingResponse, SendAccessToken, + EndpointError, IncomingResponse, OutgoingRequest, OutgoingResponse, SendAccessToken, }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ @@ -261,12 +261,21 @@ where ); } - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| Error::BadServerResponse("Server returned bad response.")) + let http_response = http_response_builder + .body(body) + .expect("reqwest body is valid http body"); + + if status == 200 { + let response = T::IncomingResponse::try_from_http_response(http_response); + response.map_err(|_| Error::BadServerResponse("Server returned bad 200 response.")) + } else { + Err(Error::FederationError( + destination.to_owned(), + RumaError::try_from_http_response(http_response).map_err(|_| { + Error::BadServerResponse("Server returned bad error response.") + })?, + )) + } } Err(e) => Err(e.into()), } From 57ecd81534aed4cd26d94bcb4c176c8d0fc7e295 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 24 May 2021 13:21:15 +0200 Subject: [PATCH 0591/1727] fix: logs for ruma crate --- src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 7257d69..fc61a0b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -212,8 +212,8 @@ async fn main() { let rocket = setup_rocket(raw_config, db); rocket.launch().await.unwrap(); } else { - std::env::set_var("CONDUIT_LOG", config.log); - pretty_env_logger::init_custom_env("CONDUIT_LOG"); + std::env::set_var("RUST_LOG", config.log); + tracing_subscriber::fmt::init(); let rocket = setup_rocket(raw_config, db); rocket.launch().await.unwrap(); From aacf6289db3cc97869493943189a5d1afa861f3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 24 May 2021 17:59:06 +0200 Subject: [PATCH 0592/1727] improvement: increase default max concurrent requests --- DEPLOY.md | 2 +- conduit-example.toml | 2 +- debian/postinst | 2 +- src/database.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 02073ff..c48b778 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -99,7 +99,7 @@ allow_federation = true trusted_servers = ["matrix.org"] #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 -#max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time +#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy diff --git a/conduit-example.toml b/conduit-example.toml index 3ac3a48..66c105b 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -36,7 +36,7 @@ max_request_size = 20_000_000 # in bytes trusted_servers = ["matrix.org"] #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 -#max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time +#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #log = "info,state_res=warn,rocket=off,_=off,sled=off" #workers = 4 # default: cpu core count * 2 diff --git a/debian/postinst b/debian/postinst index c3d727c..6a4cdb8 100644 --- a/debian/postinst +++ b/debian/postinst @@ -74,7 +74,7 @@ max_request_size = 20_000_000 # in bytes #allow_jaeger = false #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 -#max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time +#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #log = "info,state_res=warn,rocket=off,_=off,sled=off" #workers = 4 # default: cpu core count * 2 EOF diff --git a/src/database.rs b/src/database.rs index 76eae24..561b561 100644 --- a/src/database.rs +++ b/src/database.rs @@ -67,7 +67,7 @@ fn default_max_request_size() -> u32 { } fn default_max_concurrent_requests() -> u16 { - 4 + 100 } fn default_log() -> String { From 8387ceacab2e8ede4fe83090b239f40f3eada7ff Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Tue, 25 May 2021 21:34:31 +0200 Subject: [PATCH 0593/1727] Fix docker-compose trusted_servers env var --- docker-compose.yml | 3 +-- docker/docker-compose.traefik.yml | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 2dcf906..cfc2462 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -26,8 +26,7 @@ services: # - ./conduit.toml:/srv/conduit/conduit.toml environment: CONDUIT_SERVER_NAME: localhost:8000 # replace with your own name - CONDUIT_TRUSTED_SERVERS: - - 'matrix.org' + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' ### Uncomment and change values as desired # CONDUIT_ADDRESS: 127.0.0.1 # CONDUIT_PORT: 8000 diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index 629e79b..943cf3c 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -26,8 +26,7 @@ services: - proxy environment: CONDUIT_SERVER_NAME: localhost:8000 # replace with your own name - CONDUIT_TRUSTED_SERVERS: - - 'matrix.org' + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' ### Uncomment and change values as desired # CONDUIT_ADDRESS: 127.0.0.1 # CONDUIT_PORT: 8000 From ddcf1a715b68b6d924b2afcb1a50655c5e67899a Mon Sep 17 00:00:00 2001 From: phesch Date: Wed, 26 May 2021 17:16:40 +0200 Subject: [PATCH 0594/1727] Fix redacted_because field being sent as a string --- src/pdu.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/pdu.rs b/src/pdu.rs index 34f72d5..a72f04d 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -74,9 +74,7 @@ impl PduEvent { self.unsigned.insert( "redacted_because".to_owned(), - serde_json::to_string(reason) - .expect("PduEvent::to_string always works") - .into(), + serde_json::to_value(reason).expect("to_value(PduEvent) always works"), ); self.content = new_content.into(); From daa1fc90a717826afe13bdeb2a1aadf461b5f6d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 27 May 2021 10:47:06 +0200 Subject: [PATCH 0595/1727] fix: state resolution bugs --- Cargo.lock | 89 ++++++++++++++++++++++---------------------- Cargo.toml | 2 +- src/server_server.rs | 6 ++- 3 files changed, 51 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e43638d..accd12a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,9 +146,9 @@ checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" [[package]] name = "cc" -version = "1.0.67" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" dependencies = [ "jobserver", ] @@ -222,9 +222,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402da840495de3f976eaefc3485b7f5eb5b0bf9761f9a47be27fe975b3b8c2ec" +checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" [[package]] name = "constant_time_eq" @@ -586,9 +586,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if 1.0.0", "libc", @@ -695,9 +695,9 @@ checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" [[package]] name = "httpdate" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05842d0d43232b23ccb7060ecb0f0626922c21f30012e97b767b30afd4a5d4b9" +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "humantime" @@ -710,9 +710,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.7" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e5f105c494081baa3bf9e200b279e27ec1623895cd504c7dbef8d0b080fcf54" +checksum = "d3f71a7eea53a3f8257a7b4795373ff886397178cd634430ea94e12d7fe4fe34" dependencies = [ "bytes", "futures-channel", @@ -884,9 +884,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" +checksum = "defaba9bcd19568a4b4b3736b23e368e5b75e3ea126fd4cb3e4ad2ea5af274fd" dependencies = [ "serde", ] @@ -913,9 +913,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" +checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" [[package]] name = "linked-hash-map" @@ -1215,9 +1215,9 @@ checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" [[package]] name = "pear" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ab3a2b792945ed67eadbbdcbd2898f8dd2319392b2a45ac21adea5245cb113" +checksum = "15e44241c5e4c868e3eaa78b7c1848cadd6344ed4f54d029832d32b415a58702" dependencies = [ "inlinable_string", "pear_codegen", @@ -1226,9 +1226,9 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620c9c4776ba41b59ab101360c9b1419c0c8c81cd2e6e39fae7109e7425994cb" +checksum = "82a5ca643c2303ecb740d506539deba189e16f2754040a42901cd8105d0282d0" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", @@ -1337,9 +1337,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" dependencies = [ "unicode-xid", ] @@ -1626,7 +1626,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "assign", "js_int", @@ -1647,7 +1647,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "bytes", "http", @@ -1663,7 +1663,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1674,7 +1674,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "ruma-api", "ruma-common", @@ -1688,7 +1688,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.1" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "assign", "bytes", @@ -1708,7 +1708,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.1" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "indexmap", "js_int", @@ -1723,7 +1723,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "indoc", "js_int", @@ -1738,7 +1738,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1749,7 +1749,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "js_int", "ruma-api", @@ -1764,7 +1764,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.1" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "paste", "rand", @@ -1778,7 +1778,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.1" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "quote", "ruma-identifiers-validation", @@ -1788,12 +1788,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" [[package]] name = "ruma-identity-service-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "js_int", "ruma-api", @@ -1806,7 +1806,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "js_int", "ruma-api", @@ -1821,7 +1821,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "bytes", "form_urlencoded", @@ -1835,7 +1835,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1846,20 +1846,21 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", "serde_json", + "tracing", "untrusted", ] [[package]] name = "ruma-state-res" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=a238a0dda5b06fad146f8f01d690cbe011d13245#a238a0dda5b06fad146f8f01d690cbe011d13245" +source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" dependencies = [ "itertools 0.10.0", "js_int", @@ -2260,18 +2261,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" +checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" +checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d" dependencies = [ "proc-macro2", "quote", @@ -2583,9 +2584,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "twoway" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b40075910de3a912adbd80b5d8bad6ad10a23eeb1f5bf9d4006839e899ba5bc" +checksum = "c57ffb460d7c24cd6eda43694110189030a3d1dfe418416d9468fd1c1d290b47" dependencies = [ "memchr", "unchecked-index", diff --git a/Cargo.toml b/Cargo.toml index 7bc4dba..c310277 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ edition = "2018" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "a238a0dda5b06fad146f8f01d690cbe011d13245", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "42bbb81bd2e3919080d3d8689aefb755b7ec8223", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/server_server.rs b/src/server_server.rs index 82e51fc..51b3fb6 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1209,6 +1209,10 @@ pub fn handle_incoming_pdu<'a>( } } } + auth_cache.extend( + map.iter() + .map(|pdu| (pdu.1.event_id.clone(), pdu.1.clone())), + ); auth_events.push(state_auth); } @@ -1216,7 +1220,7 @@ pub fn handle_incoming_pdu<'a>( auth_cache.extend( auth_events .iter() - .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) + .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id.clone(), pdu.clone()))) .flatten(), ); auth_cache.extend( From 7db59c550ff106642d2756cdf8422bfd5e6936ea Mon Sep 17 00:00:00 2001 From: Gabriel Souza Franco Date: Thu, 27 May 2021 13:59:40 -0300 Subject: [PATCH 0596/1727] fix: also return successful PDUs in /send/:txnId --- src/server_server.rs | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 51b3fb6..7903d8a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -628,19 +628,20 @@ pub async fn send_transaction_message_route<'a>( }; let start_time = Instant::now(); - if let Err(e) = handle_incoming_pdu( - &body.origin, - &event_id, - value, - true, - &db, - &pub_key_map, - &mut auth_cache, - ) - .await - { - resolved_map.insert(event_id.clone(), Err(e)); - } + resolved_map.insert( + event_id.clone(), + handle_incoming_pdu( + &body.origin, + &event_id, + value, + true, + &db, + &pub_key_map, + &mut auth_cache, + ) + .await + .map(|_| ()), + ); let elapsed = start_time.elapsed(); if elapsed > Duration::from_secs(1) { From 6af942814f4cf9c7442b39279baaf27765e2820d Mon Sep 17 00:00:00 2001 From: Vadim Zeitlin Date: Thu, 27 May 2021 23:14:32 +0200 Subject: [PATCH 0597/1727] Fix some typos in the README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ac603e6..e982424 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ Yes! Just open a Matrix client ( or Element Android for example) and register on the `https://conduit.koesters.xyz` homeserver. -#### What is it build on? +#### What is it built on? - [Ruma](https://www.ruma.io): Useful structures for endpoint requests and responses that can be (de)serialized @@ -56,7 +56,7 @@ If you want to connect an Appservice to Conduit, take a look at the [Appservice If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). -##### Deloy using a Debian package +##### Deploy using a Debian package You need to have the `deb` helper command installed that creates Debian packages from Cargo projects (see [cargo-deb](https://github.com/mmstick/cargo-deb/) for more info): From 0f16a79888f38db46167ee214fe1d57bfcd4f666 Mon Sep 17 00:00:00 2001 From: Vadim Zeitlin Date: Thu, 27 May 2021 23:13:50 +0200 Subject: [PATCH 0598/1727] Specify the minimum required Rust version in the manifest Also mention it in the deployment guide. --- Cargo.toml | 1 + DEPLOY.md | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index c310277..1488a3a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.1.0" edition = "2018" +rust = "1.50" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/DEPLOY.md b/DEPLOY.md index c48b778..f26feaa 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -6,7 +6,7 @@ If you run into any problems while setting up Conduit, write an email to `timo@k ## Installing Conduit -You have to download the binary that fits your machine. Run `uname -m` to see +You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: - x84_64: `https://conduit.rs/master/x86_64/conduit-bin` - armv7: `https://conduit.rs/master/armv7/conduit-bin` @@ -18,6 +18,13 @@ $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` +Alternatively, you may compile the binary yourself using +```bash +$ cargo build --release +``` +Note that this currently requires Rust 1.50. + + ## Adding a Conduit user While Conduit can run as any user it is usually better to use dedicated users for different services. From f199b51f9795e7a58192221cd2902990efb94bab Mon Sep 17 00:00:00 2001 From: Vadim Zeitlin Date: Fri, 28 May 2021 09:42:59 +0200 Subject: [PATCH 0599/1727] Move the link to cross-compiling guide to DEPLOY.md --- DEPLOY.md | 1 + README.md | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index f26feaa..fe8c331 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -24,6 +24,7 @@ $ cargo build --release ``` Note that this currently requires Rust 1.50. +If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). ## Adding a Conduit user diff --git a/README.md b/README.md index e982424..e856dd5 100644 --- a/README.md +++ b/README.md @@ -54,8 +54,6 @@ more](DEPLOY.md) If you want to connect an Appservice to Conduit, take a look at the [Appservice Guide](APPSERVICES.md). -If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). - ##### Deploy using a Debian package You need to have the `deb` helper command installed that creates Debian packages from Cargo projects (see [cargo-deb](https://github.com/mmstick/cargo-deb/) for more info): From 5b5cc0574e66cbe4b08a5b8cb5f850c6cd22f3e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 28 May 2021 13:44:40 +0200 Subject: [PATCH 0600/1727] feat: implement /claim, handle to-device events --- Cargo.lock | 302 +++++++++++++++++++++++++++++++++----- Cargo.toml | 2 +- src/client_server/keys.rs | 50 ++++--- src/client_server/sync.rs | 8 +- src/database/globals.rs | 2 +- src/database/rooms.rs | 7 +- src/main.rs | 1 + src/server_server.rs | 101 ++++++++++++- 8 files changed, 409 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index accd12a..a07d5c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -120,6 +120,15 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array", +] + [[package]] name = "bumpalo" version = "3.6.1" @@ -198,7 +207,7 @@ dependencies = [ "opentelemetry", "opentelemetry-jaeger", "pretty_env_logger", - "rand", + "rand 0.8.3", "regex", "reqwest", "ring", @@ -220,6 +229,11 @@ dependencies = [ "webpki", ] +[[package]] +name = "const-oid" +version = "0.5.2" +source = "git+https://github.com/RustCrypto/utils?rev=51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0#51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0" + [[package]] name = "const_fn" version = "0.4.8" @@ -265,6 +279,15 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" +[[package]] +name = "cpufeatures" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8" +dependencies = [ + "libc", +] + [[package]] name = "crc32fast" version = "1.2.1" @@ -298,6 +321,19 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "curve25519-dalek" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "639891fde0dbea823fc3d798a0fdf9d2f9440a42d64a78ab3488b0ca025117b3" +dependencies = [ + "byteorder", + "digest", + "rand_core 0.5.1", + "subtle", + "zeroize", +] + [[package]] name = "data-encoding" version = "2.3.2" @@ -314,6 +350,14 @@ dependencies = [ "byteorder", ] +[[package]] +name = "der" +version = "0.3.4" +source = "git+https://github.com/RustCrypto/utils?rev=51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0#51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0" +dependencies = [ + "const-oid", +] + [[package]] name = "derive_more" version = "0.99.14" @@ -356,6 +400,15 @@ dependencies = [ "syn", ] +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + [[package]] name = "directories" version = "3.0.2" @@ -388,6 +441,29 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" +[[package]] +name = "ed25519" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d0860415b12243916284c67a9be413e044ee6668247b99ba26d94b2bc06c8f6" +dependencies = [ + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand 0.7.3", + "serde", + "sha2", + "zeroize", +] + [[package]] name = "either" version = "1.6.1" @@ -584,6 +660,27 @@ dependencies = [ "winapi", ] +[[package]] +name = "generic-array" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + [[package]] name = "getrandom" version = "0.2.3" @@ -592,7 +689,7 @@ checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -1138,6 +1235,12 @@ version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + [[package]] name = "openssl-probe" version = "0.1.4" @@ -1156,7 +1259,7 @@ dependencies = [ "lazy_static", "percent-encoding", "pin-project", - "rand", + "rand 0.8.3", "thiserror", ] @@ -1285,6 +1388,16 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.6.0" +source = "git+https://github.com/RustCrypto/utils?rev=51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0#51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0" +dependencies = [ + "der", + "spki", + "zeroize", +] + [[package]] name = "png" version = "0.16.8" @@ -1372,6 +1485,19 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + [[package]] name = "rand" version = "0.8.3" @@ -1379,9 +1505,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.3.0", + "rand_core 0.6.2", + "rand_hc 0.3.0", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", ] [[package]] @@ -1391,7 +1527,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.2", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", ] [[package]] @@ -1400,7 +1545,16 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" dependencies = [ - "getrandom", + "getrandom 0.2.3", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", ] [[package]] @@ -1409,7 +1563,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core", + "rand_core 0.6.2", ] [[package]] @@ -1427,7 +1581,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom", + "getrandom 0.2.3", "redox_syscall", ] @@ -1567,7 +1721,7 @@ dependencies = [ "num_cpus", "parking_lot", "pin-project-lite", - "rand", + "rand 0.8.3", "ref-cast", "rocket_codegen", "rocket_http", @@ -1626,7 +1780,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "assign", "js_int", @@ -1647,7 +1801,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "bytes", "http", @@ -1663,7 +1817,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1674,7 +1828,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "ruma-api", "ruma-common", @@ -1688,7 +1842,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.1" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "assign", "bytes", @@ -1708,7 +1862,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.1" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "indexmap", "js_int", @@ -1723,7 +1877,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "indoc", "js_int", @@ -1738,7 +1892,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1749,7 +1903,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "js_int", "ruma-api", @@ -1764,10 +1918,10 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.1" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "paste", - "rand", + "rand 0.8.3", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1778,7 +1932,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.1" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "quote", "ruma-identifiers-validation", @@ -1788,12 +1942,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" [[package]] name = "ruma-identity-service-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "js_int", "ruma-api", @@ -1806,7 +1960,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "js_int", "ruma-api", @@ -1821,7 +1975,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "bytes", "form_urlencoded", @@ -1835,7 +1989,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1846,13 +2000,17 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "base64 0.13.0", - "ring", + "ed25519-dalek", + "pkcs8", + "rand 0.7.3", "ruma-identifiers", "ruma-serde", "serde_json", + "sha2", + "thiserror", "tracing", "untrusted", ] @@ -1860,7 +2018,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=42bbb81bd2e3919080d3d8689aefb755b7ec8223#42bbb81bd2e3919080d3d8689aefb755b7ec8223" +source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" dependencies = [ "itertools 0.10.0", "js_int", @@ -2065,6 +2223,19 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +[[package]] +name = "sha2" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" +dependencies = [ + "block-buffer", + "cfg-if 1.0.0", + "cpufeatures", + "digest", + "opaque-debug", +] + [[package]] name = "sharded-slab" version = "0.1.1" @@ -2083,6 +2254,12 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" + [[package]] name = "simple_asn1" version = "0.4.1" @@ -2150,6 +2327,14 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spki" +version = "0.3.0" +source = "git+https://github.com/RustCrypto/utils?rev=51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0#51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0" +dependencies = [ + "der", +] + [[package]] name = "stable-pattern" version = "0.1.0" @@ -2225,6 +2410,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" +[[package]] +name = "subtle" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" + [[package]] name = "syn" version = "1.0.72" @@ -2236,6 +2427,18 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "synstructure" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + [[package]] name = "tempfile" version = "3.2.0" @@ -2244,7 +2447,7 @@ checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ "cfg-if 1.0.0", "libc", - "rand", + "rand 0.8.3", "redox_syscall", "remove_dir_all", "winapi", @@ -2548,7 +2751,7 @@ dependencies = [ "ipnet", "lazy_static", "log", - "rand", + "rand 0.8.3", "smallvec", "thiserror", "tinyvec", @@ -2592,6 +2795,12 @@ dependencies = [ "unchecked-index", ] +[[package]] +name = "typenum" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" + [[package]] name = "ubyte" version = "0.10.1" @@ -2687,6 +2896,12 @@ dependencies = [ "try-lock", ] +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + [[package]] name = "wasi" version = "0.10.2+wasi-snapshot-preview1" @@ -2863,6 +3078,27 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" +[[package]] +name = "zeroize" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + [[package]] name = "zstd" version = "0.5.4+zstd.1.4.7" diff --git a/Cargo.toml b/Cargo.toml index 1488a3a..6ae9077 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ rust = "1.50" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "42bbb81bd2e3919080d3d8689aefb755b7ec8223", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 7a88fb6..d856bf3 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -12,7 +12,7 @@ use ruma::{ }, }, encryption::UnsignedDeviceInfo, - DeviceId, UserId, + DeviceId, DeviceKeyAlgorithm, UserId, }; use std::collections::{BTreeMap, HashSet}; @@ -98,29 +98,11 @@ pub async fn claim_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let mut one_time_keys = BTreeMap::new(); - for (user_id, map) in &body.one_time_keys { - let mut container = BTreeMap::new(); - for (device_id, key_algorithm) in map { - if let Some(one_time_keys) = - db.users - .take_one_time_key(user_id, device_id, key_algorithm, &db.globals)? - { - let mut c = BTreeMap::new(); - c.insert(one_time_keys.0, one_time_keys.1); - container.insert(device_id.clone(), c); - } - } - one_time_keys.insert(user_id.clone(), container); - } + let response = claim_keys_helper(&body.one_time_keys, &db)?; db.flush().await?; - Ok(claim_keys::Response { - failures: BTreeMap::new(), - one_time_keys, - } - .into()) + Ok(response.into()) } #[cfg_attr( @@ -375,3 +357,29 @@ pub fn get_keys_helper bool>( failures: BTreeMap::new(), }) } + +pub fn claim_keys_helper( + one_time_keys_input: &BTreeMap, DeviceKeyAlgorithm>>, + db: &Database, +) -> Result { + let mut one_time_keys = BTreeMap::new(); + for (user_id, map) in one_time_keys_input { + let mut container = BTreeMap::new(); + for (device_id, key_algorithm) in map { + if let Some(one_time_keys) = + db.users + .take_one_time_key(user_id, device_id, key_algorithm, &db.globals)? + { + let mut c = BTreeMap::new(); + c.insert(one_time_keys.0, one_time_keys.1); + container.insert(device_id.clone(), c); + } + } + one_time_keys.insert(user_id.clone(), container); + } + + Ok(claim_keys::Response { + failures: BTreeMap::new(), + one_time_keys, + }) +} diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index b0a22dd..63ad590 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,5 +1,6 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; +use log::error; use ruma::{ api::client::r0::sync::sync_events, events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, @@ -71,7 +72,12 @@ pub async fn sync_events_route( let mut non_timeline_pdus = db .rooms .pdus_since(&sender_user, &room_id, since)? - .filter_map(|r| r.ok()); // Filter out buggy events + .filter_map(|r| { + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }); // Filter out buggy events // Take the last 10 events for the timeline let timeline_pdus = non_timeline_pdus diff --git a/src/database/globals.rs b/src/database/globals.rs index 0dd73b2..5d91d37 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -94,7 +94,7 @@ impl Globals { .map(|key| (version, key)) }) .and_then(|(version, key)| { - ruma::signatures::Ed25519KeyPair::new(&key, version) + ruma::signatures::Ed25519KeyPair::from_der(&key, version) .map_err(|_| Error::bad_database("Private or public keys are invalid.")) }); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ede8589..6b17f39 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1494,7 +1494,12 @@ impl Rooms { Ok(self .pduid_pdu .range(first_pdu_id..last_pdu_id) - .filter_map(|r| r.ok()) + .filter_map(|r| { + if r.is_err() { + error!("Bad pdu in pduid_pdu: {:?}", r); + } + r.ok() + }) .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; diff --git a/src/main.rs b/src/main.rs index fc61a0b..e76cea4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -159,6 +159,7 @@ fn setup_rocket(config: Figment, data: Database) -> rocket::Rocket( .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?; } } - Edu::DeviceListUpdate(_) => {} - Edu::DirectToDevice(_) => {} + Edu::DeviceListUpdate(_) => { + // TODO: Instead of worrying about stream ids we can just fetch all devices again + } + Edu::DirectToDevice(DirectDeviceContent { + sender, + ev_type, + message_id, + messages, + }) => { + // Check if this is a new transaction id + if db + .transaction_ids + .existing_txnid(&sender, None, &message_id)? + .is_some() + { + continue; + } + + for (target_user_id, map) in &messages { + for (target_device_id_maybe, event) in map { + match target_device_id_maybe { + to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => { + db.users.add_to_device_event( + &sender, + &target_user_id, + &target_device_id, + &ev_type, + serde_json::from_str(event.get()).map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + })?, + &db.globals, + )? + } + + to_device::DeviceIdOrAllDevices::AllDevices => { + for target_device_id in db.users.all_device_ids(&target_user_id) { + db.users.add_to_device_event( + &sender, + &target_user_id, + &target_device_id?, + &ev_type, + serde_json::from_str(event.get()).map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + })?, + &db.globals, + )?; + } + } + } + } + } + + // Save transaction id with empty data + db.transaction_ids + .add_txnid(&sender, None, &message_id, &[])?; + } Edu::_Custom(_) => {} } } @@ -2335,6 +2401,29 @@ pub fn get_keys_route<'a>( .into()) } +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/federation/v1/user/keys/claim", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn claim_keys_route<'a>( + db: State<'a, Database>, + body: Ruma, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let result = claim_keys_helper(&body.one_time_keys, &db)?; + + db.flush().await?; + + Ok(claim_keys::v1::Response { + one_time_keys: result.one_time_keys, + } + .into()) +} + pub async fn fetch_required_signing_keys( event: &BTreeMap, pub_key_map: &RwLock>>, From deacdf6f58458167454359f60fb3d9cc04cfbe8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 28 May 2021 22:53:00 +0200 Subject: [PATCH 0601/1727] fix: is_direct for locally invited users --- src/client_server/membership.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 206ea9d..96fe800 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -945,7 +945,7 @@ pub async fn invite_helper( membership: member::MembershipState::Invite, displayname: db.users.displayname(&user_id)?, avatar_url: db.users.avatar_url(&user_id)?, - is_direct: None, + is_direct: Some(is_direct), third_party_invite: None, }) .expect("event is valid, we just created it"), From 59dd3676d50cb8db76fffa0f6a1286fd7c12a3a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 29 May 2021 10:49:13 +0200 Subject: [PATCH 0602/1727] fix: putting global account data works now --- src/client_server/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/config.rs b/src/client_server/config.rs index e7daa9e..ce437ef 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -63,7 +63,7 @@ pub async fn set_room_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data = serde_json::from_str(body.data.get()) + let data = serde_json::from_str::(body.data.get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); From 88cf043f944f73f3c31d30c7f8cb5e773f4f3c73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 30 May 2021 21:55:43 +0200 Subject: [PATCH 0603/1727] fix: deactivate accounts that should be deactivated --- src/client_server/account.rs | 12 +++---- src/client_server/media.rs | 10 ++++-- src/database.rs | 21 ++++++++++- src/database/media.rs | 69 +++++++++++++++++++++++------------- src/database/users.rs | 23 +++++++----- 5 files changed, 91 insertions(+), 44 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 24b04d5..0cf30a0 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -179,12 +179,11 @@ pub async fn register_route( let password = if is_guest { None } else { - body.password.clone() - } - .unwrap_or_default(); + body.password.as_deref() + }; // Create user - db.users.create(&user_id, &password)?; + db.users.create(&user_id, password)?; // Initial data db.account_data.update( @@ -233,7 +232,7 @@ pub async fn register_route( let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) .expect("@conduit:server_name is valid"); - db.users.create(&conduit_user, "")?; + db.users.create(&conduit_user, None)?; let room_id = RoomId::new(db.globals.server_name()); @@ -547,7 +546,8 @@ pub async fn change_password_route( return Err(Error::Uiaa(uiaainfo)); } - db.users.set_password(&sender_user, &body.new_password)?; + db.users + .set_password(&sender_user, Some(&body.new_password))?; if body.logout_devices { // Logout all devices except the current one diff --git a/src/client_server/media.rs b/src/client_server/media.rs index f9350e0..74ca6c8 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -38,7 +38,11 @@ pub async fn create_content_route( ); db.media.create( mxc.clone(), - &body.filename.as_deref(), + &body + .filename + .as_ref() + .map(|filename| "inline; filename=".to_owned() + filename) + .as_deref(), &body.content_type.as_deref(), &body.file, )?; @@ -64,7 +68,7 @@ pub async fn get_content_route( let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { - filename, + content_disposition, content_type, file, }) = db.media.get(&mxc)? @@ -72,7 +76,7 @@ pub async fn get_content_route( Ok(get_content::Response { file, content_type, - content_disposition: filename, + content_disposition, } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { diff --git a/src/database.rs b/src/database.rs index 561b561..7a55b03 100644 --- a/src/database.rs +++ b/src/database.rs @@ -11,7 +11,7 @@ pub mod transaction_ids; pub mod uiaa; pub mod users; -use crate::{Error, Result}; +use crate::{utils, Error, Result}; use directories::ProjectDirs; use futures::StreamExt; use log::{error, info}; @@ -246,6 +246,25 @@ impl Database { info!("Migration: 0 -> 1 finished"); } + if db.globals.database_version()? < 2 { + // We accidentally inserted hashed versions of "" into the db instead of just "" + for userid_password in db.users.userid_password.iter() { + let (userid, password) = userid_password?; + + let password = utils::string_from_bytes(&password); + + if password.map_or(false, |password| { + argon2::verify_encoded(&password, b"").unwrap_or(false) + }) { + db.users.userid_password.insert(userid, b"")?; + } + } + + db.globals.bump_database_version(2)?; + + info!("Migration: 1 -> 2 finished"); + } + // This data is probably outdated db.rooms.edus.presenceid_presence.clear()?; diff --git a/src/database/media.rs b/src/database/media.rs index 37fcb74..28ef88a 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -4,14 +4,14 @@ use crate::{utils, Error, Result}; use std::mem; pub struct FileMeta { - pub filename: Option, + pub content_disposition: Option, pub content_type: Option, pub file: Vec, } #[derive(Clone)] pub struct Media { - pub(super) mediaid_file: sled::Tree, // MediaId = MXC + WidthHeight + Filename + ContentType + pub(super) mediaid_file: sled::Tree, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType } impl Media { @@ -19,7 +19,7 @@ impl Media { pub fn create( &self, mxc: String, - filename: &Option<&str>, + content_disposition: &Option<&str>, content_type: &Option<&str>, file: &[u8], ) -> Result<()> { @@ -28,7 +28,12 @@ impl Media { key.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail key.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail key.push(0xff); - key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default()); + key.extend_from_slice( + content_disposition + .as_ref() + .map(|f| f.as_bytes()) + .unwrap_or_default(), + ); key.push(0xff); key.extend_from_slice( content_type @@ -46,7 +51,7 @@ impl Media { pub fn upload_thumbnail( &self, mxc: String, - filename: &Option, + content_disposition: &Option, content_type: &Option, width: u32, height: u32, @@ -57,7 +62,12 @@ impl Media { key.extend_from_slice(&width.to_be_bytes()); key.extend_from_slice(&height.to_be_bytes()); key.push(0xff); - key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default()); + key.extend_from_slice( + content_disposition + .as_ref() + .map(|f| f.as_bytes()) + .unwrap_or_default(), + ); key.push(0xff); key.extend_from_slice( content_type @@ -92,20 +102,24 @@ impl Media { }) .transpose()?; - let filename_bytes = parts + let content_disposition_bytes = parts .next() .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - let filename = if filename_bytes.is_empty() { + let content_disposition = if content_disposition_bytes.is_empty() { None } else { - Some(utils::string_from_bytes(filename_bytes).map_err(|_| { - Error::bad_database("Filename in mediaid_file is invalid unicode.") - })?) + Some( + utils::string_from_bytes(content_disposition_bytes).map_err(|_| { + Error::bad_database( + "Content Disposition in mediaid_file is invalid unicode.", + ) + })?, + ) }; Ok(Some(FileMeta { - filename, + content_disposition, content_type, file: file.to_vec(), })) @@ -169,21 +183,22 @@ impl Media { }) .transpose()?; - let filename_bytes = parts + let content_disposition_bytes = parts .next() .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - let filename = if filename_bytes.is_empty() { + let content_disposition = if content_disposition_bytes.is_empty() { None } else { Some( - utils::string_from_bytes(filename_bytes) - .map_err(|_| Error::bad_database("Filename in db is invalid."))?, + utils::string_from_bytes(content_disposition_bytes).map_err(|_| { + Error::bad_database("Content Disposition in db is invalid.") + })?, ) }; Ok(Some(FileMeta { - filename, + content_disposition, content_type, file: file.to_vec(), })) @@ -202,16 +217,20 @@ impl Media { }) .transpose()?; - let filename_bytes = parts + let content_disposition_bytes = parts .next() .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - let filename = if filename_bytes.is_empty() { + let content_disposition = if content_disposition_bytes.is_empty() { None } else { - Some(utils::string_from_bytes(filename_bytes).map_err(|_| { - Error::bad_database("Filename in mediaid_file is invalid unicode.") - })?) + Some( + utils::string_from_bytes(content_disposition_bytes).map_err(|_| { + Error::bad_database( + "Content Disposition in mediaid_file is invalid unicode.", + ) + })?, + ) }; if let Ok(image) = image::load_from_memory(&file) { @@ -219,7 +238,7 @@ impl Media { let original_height = image.height(); if width > original_width || height > original_height { return Ok(Some(FileMeta { - filename, + content_disposition, content_type, file: file.to_vec(), })); @@ -286,14 +305,14 @@ impl Media { self.mediaid_file.insert(thumbnail_key, &*thumbnail_bytes)?; Ok(Some(FileMeta { - filename, + content_disposition, content_type, file: thumbnail_bytes.to_vec(), })) } else { // Couldn't parse file to generate thumbnail, send original Ok(Some(FileMeta { - filename, + content_disposition, content_type, file: file.to_vec(), })) diff --git a/src/database/users.rs b/src/database/users.rs index a5b8775..52e6e33 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -49,7 +49,7 @@ impl Users { } /// Create a new user account on this homeserver. - pub fn create(&self, user_id: &UserId, password: &str) -> Result<()> { + pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { self.set_password(user_id, password)?; Ok(()) } @@ -110,15 +110,20 @@ impl Users { } /// Hash and set the user's password to the Argon2 hash - pub fn set_password(&self, user_id: &UserId, password: &str) -> Result<()> { - if let Ok(hash) = utils::calculate_hash(&password) { - self.userid_password.insert(user_id.to_string(), &*hash)?; - Ok(()) + pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + if let Some(password) = password { + if let Ok(hash) = utils::calculate_hash(&password) { + self.userid_password.insert(user_id.to_string(), &*hash)?; + Ok(()) + } else { + Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Password does not meet the requirements.", + )) + } } else { - Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Password does not meet the requirements.", - )) + self.userid_password.insert(user_id.to_string(), "")?; + Ok(()) } } From e1e529d818510b335cba3db02656616ee4eb7267 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 30 May 2021 22:01:12 +0200 Subject: [PATCH 0604/1727] fix: don't apply push rules for users of other homeservers --- src/database/rooms.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6b17f39..703314e 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -725,8 +725,9 @@ impl Rooms { .users .iter() .filter_map(|r| r.ok()) - .filter(|user_id| self.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) + .filter(|user_id| user_id.server_name() == db.globals.server_name()) .filter(|user_id| !db.users.is_deactivated(user_id).unwrap_or(false)) + .filter(|user_id| self.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) { // Don't notify the user of their own events if user == pdu.sender { From b84c66dabaa49c65f126efa537f57ea658e8234f Mon Sep 17 00:00:00 2001 From: Jonas Fowl Date: Tue, 1 Jun 2021 00:58:50 +0000 Subject: [PATCH 0605/1727] Generate binaries for 3 architectures in the CI The result is stored in the gitlab package registry --- .gitlab-ci.yml | 90 +++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 74 insertions(+), 16 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e80d27e..ae9b32b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,28 +1,86 @@ -image: "rust:latest" - -default: - tags: [docker] - -cache: - paths: - - target - - cargohome - +stages: + - test + - build + - release variables: GIT_SUBMODULE_STRATEGY: recursive - CARGO_HOME: "cargohome" FF_USE_FASTZIP: 1 -before_script: - - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" - - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - - rustup component add clippy rustfmt test:cargo: + stage: "test" + needs: [] + image: "rust:latest" + variables: + CARGO_HOME: "cargohome" + cache: + paths: + - target + - cargohome + key: test_cache + interruptible: true + before_script: + - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" + - apt-get update -yqq + - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config + - rustup component add clippy rustfmt script: - rustc --version && cargo --version # Print version info for debugging - cargo test --workspace --verbose --locked - cargo fmt --all -- --check - cargo clippy + + +# Compile conduit for different linux target architectures +build:cargo: + stage: "build" + needs: [] + interruptible: true + parallel: + matrix: + - TARGET: "x86_64-unknown-linux-gnu" + - TARGET: "armv7-unknown-linux-gnueabihf" + NEEDED_PACKAGES: "build-essential gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross" + - TARGET: "aarch64-unknown-linux-gnu" + NEEDED_PACKAGES: "build-essential gcc-8-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross" + TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-8" + TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-8" + image: "rust:latest" + cache: + paths: + - target/ + key: "build_cache-$TARGET" + variables: + CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc + CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER: "/linux-runner armv7" + CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc + CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++ + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER: "/linux-runner aarch64" + CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc + CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++ + before_script: + - apt-get update -yqq + - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" + - "rustup target add $TARGET" + script: + - rustc --version && cargo --version # Print version info for debugging + - cargo build --target $TARGET --release + - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' + artifacts: + name: "conduit-$TARGET" + paths: + - "conduit-$TARGET" + + +# Store the resulting binaries into the GitLab package registry, so they can be downloaded +publish:package: + stage: release + image: curlimages/curl:latest + script: + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/conduit-x86_64"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/conduit-armv7"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/conduit-aarch64"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file README.md "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/README.md"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file LICENSE "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/LICENSE"' From bb7a4220d3dfe218395da62141f00960f80361d5 Mon Sep 17 00:00:00 2001 From: Damian Poddebniak Date: Fri, 4 Jun 2021 19:42:08 +0200 Subject: [PATCH 0606/1727] Fix ruma dependency --- Cargo.lock | 46 +++++++++++++++++++++++----------------------- Cargo.toml | 2 +- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a07d5c3..36a6659 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1779,8 +1779,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +version = "0.1.2" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "assign", "js_int", @@ -1801,7 +1801,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "bytes", "http", @@ -1817,7 +1817,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1828,7 +1828,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "ruma-api", "ruma-common", @@ -1841,8 +1841,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.10.1" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +version = "0.10.2" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "assign", "bytes", @@ -1861,8 +1861,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.5.1" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +version = "0.5.2" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "indexmap", "js_int", @@ -1877,7 +1877,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "indoc", "js_int", @@ -1892,7 +1892,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1903,7 +1903,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "js_int", "ruma-api", @@ -1918,7 +1918,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.1" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "paste", "rand 0.8.3", @@ -1932,7 +1932,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.1" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "quote", "ruma-identifiers-validation", @@ -1941,13 +1941,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" [[package]] name = "ruma-identity-service-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "js_int", "ruma-api", @@ -1960,7 +1960,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "js_int", "ruma-api", @@ -1975,7 +1975,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "bytes", "form_urlencoded", @@ -1989,7 +1989,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1999,8 +1999,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +version = "0.7.2" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2018,7 +2018,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9#e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9" +source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" dependencies = [ "itertools 0.10.0", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 6ae9077..f36d838 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ rust = "1.50" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "e30c4a6d2071b2a93e8bd6aba52e07c9bb191fc9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "b39537812c12caafcbf8b7bd744a06d196d45281", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio From d0ee82325449bc935fd6b39eee50191cc638ec77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 8 Jun 2021 18:10:00 +0200 Subject: [PATCH 0607/1727] feat: swappable database backend --- Cargo.lock | 106 ++++++ Cargo.toml | 1 + src/client_server/account.rs | 12 +- src/client_server/alias.rs | 11 +- src/client_server/backup.rs | 32 +- src/client_server/config.rs | 10 +- src/client_server/context.rs | 4 +- src/client_server/device.rs | 12 +- src/client_server/directory.rs | 10 +- src/client_server/keys.rs | 17 +- src/client_server/media.rs | 10 +- src/client_server/membership.rs | 22 +- src/client_server/message.rs | 5 +- src/client_server/presence.rs | 8 +- src/client_server/profile.rs | 12 +- src/client_server/push.rs | 22 +- src/client_server/read_marker.rs | 6 +- src/client_server/redact.rs | 3 +- src/client_server/room.rs | 8 +- src/client_server/search.rs | 3 +- src/client_server/session.rs | 8 +- src/client_server/state.rs | 12 +- src/client_server/sync.rs | 34 +- src/client_server/tag.rs | 8 +- src/client_server/to_device.rs | 4 +- src/client_server/typing.rs | 4 +- src/client_server/user_directory.rs | 4 +- src/database.rs | 173 ++++----- src/database/abstraction.rs | 309 ++++++++++++++++ src/database/account_data.rs | 57 ++- src/database/admin.rs | 30 +- src/database/appservice.rs | 31 +- src/database/globals.rs | 81 ++-- src/database/key_backups.rs | 141 ++++--- src/database/media.rs | 23 +- src/database/pusher.rs | 34 +- src/database/rooms.rs | 555 ++++++++++++++-------------- src/database/rooms/edus.rs | 103 +++--- src/database/sending.rs | 122 +++--- src/database/transaction_ids.rs | 14 +- src/database/uiaa.rs | 13 +- src/database/users.rs | 230 ++++++------ src/error.rs | 7 +- src/main.rs | 6 +- src/ruma_wrapper.rs | 7 +- src/server_server.rs | 64 ++-- src/utils.rs | 27 +- 47 files changed, 1434 insertions(+), 981 deletions(-) create mode 100644 src/database/abstraction.rs diff --git a/Cargo.lock b/Cargo.lock index 36a6659..630d414 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -103,6 +103,25 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" +[[package]] +name = "bindgen" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", +] + [[package]] name = "bitflags" version = "1.2.1" @@ -162,6 +181,15 @@ dependencies = [ "jobserver", ] +[[package]] +name = "cexpr" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "0.1.10" @@ -187,6 +215,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "clang-sys" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -212,6 +251,7 @@ dependencies = [ "reqwest", "ring", "rocket", + "rocksdb", "ruma", "rust-argon2", "rustls", @@ -1008,12 +1048,40 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" +[[package]] +name = "libloading" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" +dependencies = [ + "cfg-if 1.0.0", + "winapi", +] + +[[package]] +name = "librocksdb-sys" +version = "6.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5da125e1c0f22c7cae785982115523a0738728498547f415c9054cb17c7e89f9" +dependencies = [ + "bindgen", + "cc", + "glob", + "libc", +] + [[package]] name = "linked-hash-map" version = "0.5.4" @@ -1158,6 +1226,16 @@ dependencies = [ "version_check", ] +[[package]] +name = "nom" +version = "5.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +dependencies = [ + "memchr", + "version_check", +] + [[package]] name = "ntapi" version = "0.3.6" @@ -1339,6 +1417,12 @@ dependencies = [ "syn", ] +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + [[package]] name = "pem" version = "0.8.3" @@ -1777,6 +1861,16 @@ dependencies = [ "uncased", ] +[[package]] +name = "rocksdb" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3" +dependencies = [ + "libc", + "librocksdb-sys", +] + [[package]] name = "ruma" version = "0.1.2" @@ -2046,6 +2140,12 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc_version" version = "0.2.3" @@ -2245,6 +2345,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" + [[package]] name = "signal-hook-registry" version = "1.3.0" diff --git a/Cargo.toml b/Cargo.toml index f36d838..eb43da5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "b39537812c12caafcbf8b7bd74 tokio = "1.2.0" # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"] } +rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"] } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } # Used for the http request / response body type for Ruma endpoints used with reqwest diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 0cf30a0..56de5fc 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, convert::TryInto}; +use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; use super::{State, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; @@ -42,7 +42,7 @@ const GUEST_NAME_LENGTH: usize = 10; )] #[tracing::instrument(skip(db, body))] pub async fn get_register_available_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { // Validate user id @@ -85,7 +85,7 @@ pub async fn get_register_available_route( )] #[tracing::instrument(skip(db, body))] pub async fn register_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_registration() { @@ -227,7 +227,7 @@ pub async fn register_route( )?; // If this is the first user on this server, create the admins room - if db.users.count() == 1 { + if db.users.count()? == 1 { // Create a user for the server let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) .expect("@conduit:server_name is valid"); @@ -506,7 +506,7 @@ pub async fn register_route( )] #[tracing::instrument(skip(db, body))] pub async fn change_password_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -598,7 +598,7 @@ pub async fn whoami_route(body: Ruma) -> ConduitResult, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 07b4977..40252af 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use regex::Regex; @@ -22,7 +24,7 @@ use rocket::{delete, get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn create_alias_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if db.rooms.id_from_alias(&body.room_alias)?.is_some() { @@ -43,7 +45,7 @@ pub async fn create_alias_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_alias_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { db.rooms.set_alias(&body.room_alias, None, &db.globals)?; @@ -59,7 +61,7 @@ pub async fn delete_alias_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_alias_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { get_alias_helper(&db, &body.room_alias).await @@ -86,7 +88,8 @@ pub async fn get_alias_helper( match db.rooms.id_from_alias(&room_alias)? { Some(r) => room_id = Some(r), None => { - for (_id, registration) in db.appservice.iter_all().filter_map(|r| r.ok()) { + let iter = db.appservice.iter_all()?; + for (_id, registration) in iter.filter_map(|r| r.ok()) { let aliases = registration .get("namespaces") .and_then(|ns| ns.get("aliases")) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 12f3bfd..fcca676 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::api::client::{ @@ -19,7 +21,7 @@ use rocket::{delete, get, post, put}; )] #[tracing::instrument(skip(db, body))] pub async fn create_backup_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -38,7 +40,7 @@ pub async fn create_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn update_backup_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -56,7 +58,7 @@ pub async fn update_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_latest_backup_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -84,7 +86,7 @@ pub async fn get_latest_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_backup_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -111,7 +113,7 @@ pub async fn get_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -130,7 +132,7 @@ pub async fn delete_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn add_backup_keys_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -164,7 +166,7 @@ pub async fn add_backup_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn add_backup_key_sessions_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -196,7 +198,7 @@ pub async fn add_backup_key_sessions_route( )] #[tracing::instrument(skip(db, body))] pub async fn add_backup_key_session_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -225,7 +227,7 @@ pub async fn add_backup_key_session_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_backup_keys_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -241,14 +243,14 @@ pub async fn get_backup_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_backup_key_sessions_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sessions = db .key_backups - .get_room(&sender_user, &body.version, &body.room_id); + .get_room(&sender_user, &body.version, &body.room_id)?; Ok(get_backup_key_sessions::Response { sessions }.into()) } @@ -259,7 +261,7 @@ pub async fn get_backup_key_sessions_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_backup_key_session_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -281,7 +283,7 @@ pub async fn get_backup_key_session_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_keys_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -304,7 +306,7 @@ pub async fn delete_backup_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_sessions_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -327,7 +329,7 @@ pub async fn delete_backup_key_sessions_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_session_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/config.rs b/src/client_server/config.rs index ce437ef..829bf94 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ @@ -23,7 +25,7 @@ use rocket::{get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn set_global_account_data_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -58,7 +60,7 @@ pub async fn set_global_account_data_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_room_account_data_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -90,7 +92,7 @@ pub async fn set_room_account_data_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_global_account_data_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -117,7 +119,7 @@ pub async fn get_global_account_data_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_room_account_data_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 1fee2f2..b86fd0b 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,7 +1,7 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::api::client::{error::ErrorKind, r0::context::get_context}; -use std::convert::TryFrom; +use std::{convert::TryFrom, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::get; @@ -12,7 +12,7 @@ use rocket::get; )] #[tracing::instrument(skip(db, body))] pub async fn get_context_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 961ba97..2441524 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{utils, ConduitResult, Database, Error, Ruma}; use ruma::api::client::{ @@ -18,7 +20,7 @@ use rocket::{delete, get, post, put}; )] #[tracing::instrument(skip(db, body))] pub async fn get_devices_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -38,7 +40,7 @@ pub async fn get_devices_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_device_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -57,7 +59,7 @@ pub async fn get_device_route( )] #[tracing::instrument(skip(db, body))] pub async fn update_device_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -83,7 +85,7 @@ pub async fn update_device_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_device_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -137,7 +139,7 @@ pub async fn delete_device_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_devices_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 9864a5e..ad609cd 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{ConduitResult, Database, Error, Result, Ruma}; use log::info; @@ -33,7 +35,7 @@ use rocket::{get, post, put}; )] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { get_public_rooms_filtered_helper( @@ -53,7 +55,7 @@ pub async fn get_public_rooms_filtered_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let response = get_public_rooms_filtered_helper( @@ -82,7 +84,7 @@ pub async fn get_public_rooms_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_room_visibility_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -112,7 +114,7 @@ pub async fn set_room_visibility_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_room_visibility_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { Ok(get_room_visibility::Response { diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index d856bf3..f80a329 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -14,7 +14,10 @@ use ruma::{ encryption::UnsignedDeviceInfo, DeviceId, DeviceKeyAlgorithm, UserId, }; -use std::collections::{BTreeMap, HashSet}; +use std::{ + collections::{BTreeMap, HashSet}, + sync::Arc, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -25,7 +28,7 @@ use rocket::{get, post}; )] #[tracing::instrument(skip(db, body))] pub async fn upload_keys_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -74,7 +77,7 @@ pub async fn upload_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_keys_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -95,7 +98,7 @@ pub async fn get_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let response = claim_keys_helper(&body.one_time_keys, &db)?; @@ -111,7 +114,7 @@ pub async fn claim_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn upload_signing_keys_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -174,7 +177,7 @@ pub async fn upload_signing_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn upload_signatures_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -235,7 +238,7 @@ pub async fn upload_signatures_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_key_changes_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 74ca6c8..0673787 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -7,14 +7,14 @@ use ruma::api::client::{ #[cfg(feature = "conduit_bin")] use rocket::{get, post}; -use std::convert::TryInto; +use std::{convert::TryInto, sync::Arc}; const MXC_LENGTH: usize = 32; #[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] #[tracing::instrument(skip(db))] pub async fn get_media_config_route( - db: State<'_, Database>, + db: State<'_, Arc>, ) -> ConduitResult { Ok(get_media_config::Response { upload_size: db.globals.max_request_size().into(), @@ -28,7 +28,7 @@ pub async fn get_media_config_route( )] #[tracing::instrument(skip(db, body))] pub async fn create_content_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let mxc = format!( @@ -62,7 +62,7 @@ pub async fn create_content_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_content_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -112,7 +112,7 @@ pub async fn get_content_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_content_thumbnail_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 96fe800..92d7ace 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -44,7 +44,7 @@ use rocket::{get, post}; )] #[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -81,7 +81,7 @@ pub async fn join_room_by_id_route( )] #[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -135,7 +135,7 @@ pub async fn join_room_by_id_or_alias_route( )] #[tracing::instrument(skip(db, body))] pub async fn leave_room_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -153,7 +153,7 @@ pub async fn leave_room_route( )] #[tracing::instrument(skip(db, body))] pub async fn invite_user_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -173,7 +173,7 @@ pub async fn invite_user_route( )] #[tracing::instrument(skip(db, body))] pub async fn kick_user_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -222,7 +222,7 @@ pub async fn kick_user_route( )] #[tracing::instrument(skip(db, body))] pub async fn ban_user_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -279,7 +279,7 @@ pub async fn ban_user_route( )] #[tracing::instrument(skip(db, body))] pub async fn unban_user_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -327,7 +327,7 @@ pub async fn unban_user_route( )] #[tracing::instrument(skip(db, body))] pub async fn forget_room_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -345,7 +345,7 @@ pub async fn forget_room_route( )] #[tracing::instrument(skip(db, body))] pub async fn joined_rooms_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -366,7 +366,7 @@ pub async fn joined_rooms_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_member_events_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -396,7 +396,7 @@ pub async fn get_member_events_route( )] #[tracing::instrument(skip(db, body))] pub async fn joined_members_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 96de93d..0d19f34 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -11,6 +11,7 @@ use ruma::{ use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, + sync::Arc, }; #[cfg(feature = "conduit_bin")] @@ -22,7 +23,7 @@ use rocket::{get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn send_message_event_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -85,7 +86,7 @@ pub async fn send_message_event_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_message_events_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 9f4f7a3..ce80dfd 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -1,7 +1,7 @@ use super::State; use crate::{utils, ConduitResult, Database, Ruma}; use ruma::api::client::r0::presence::{get_presence, set_presence}; -use std::{convert::TryInto, time::Duration}; +use std::{convert::TryInto, sync::Arc, time::Duration}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -12,7 +12,7 @@ use rocket::{get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn set_presence_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -53,7 +53,7 @@ pub async fn set_presence_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_presence_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -62,7 +62,7 @@ pub async fn get_presence_route( for room_id in db .rooms - .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()]) + .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? { let room_id = room_id?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 882b02e..32bb608 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -13,7 +13,7 @@ use ruma::{ #[cfg(feature = "conduit_bin")] use rocket::{get, put}; -use std::convert::TryInto; +use std::{convert::TryInto, sync::Arc}; #[cfg_attr( feature = "conduit_bin", @@ -21,7 +21,7 @@ use std::convert::TryInto; )] #[tracing::instrument(skip(db, body))] pub async fn set_displayname_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -107,7 +107,7 @@ pub async fn set_displayname_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_displayname_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { Ok(get_display_name::Response { @@ -122,7 +122,7 @@ pub async fn get_displayname_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_avatar_url_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -208,7 +208,7 @@ pub async fn set_avatar_url_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_avatar_url_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { Ok(get_avatar_url::Response { @@ -223,7 +223,7 @@ pub async fn get_avatar_url_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_profile_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.users.exists(&body.user_id)? { diff --git a/src/client_server/push.rs b/src/client_server/push.rs index e37e660..d6f6212 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ @@ -22,7 +24,7 @@ use rocket::{delete, get, post, put}; )] #[tracing::instrument(skip(db, body))] pub async fn get_pushrules_all_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -47,7 +49,7 @@ pub async fn get_pushrules_all_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -101,7 +103,7 @@ pub async fn get_pushrule_route( )] #[tracing::instrument(skip(db, req))] pub async fn set_pushrule_route( - db: State<'_, Database>, + db: State<'_, Arc>, req: Ruma>, ) -> ConduitResult { let sender_user = req.sender_user.as_ref().expect("user is authenticated"); @@ -204,7 +206,7 @@ pub async fn set_pushrule_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_actions_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -263,7 +265,7 @@ pub async fn get_pushrule_actions_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_actions_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -337,7 +339,7 @@ pub async fn set_pushrule_actions_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_enabled_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -398,7 +400,7 @@ pub async fn get_pushrule_enabled_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_enabled_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -477,7 +479,7 @@ pub async fn set_pushrule_enabled_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_pushrule_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -546,7 +548,7 @@ pub async fn delete_pushrule_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_pushers_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -563,7 +565,7 @@ pub async fn get_pushers_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_pushers_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 1b7ea0b..837170f 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -12,7 +12,7 @@ use ruma::{ #[cfg(feature = "conduit_bin")] use rocket::post; -use std::collections::BTreeMap; +use std::{collections::BTreeMap, sync::Arc}; #[cfg_attr( feature = "conduit_bin", @@ -20,7 +20,7 @@ use std::collections::BTreeMap; )] #[tracing::instrument(skip(db, body))] pub async fn set_read_marker_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -87,7 +87,7 @@ pub async fn set_read_marker_route( )] #[tracing::instrument(skip(db, body))] pub async fn create_receipt_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index be5d3b1..e193082 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -4,6 +4,7 @@ use ruma::{ api::client::r0::redact::redact_event, events::{room::redaction, EventType}, }; +use std::sync::Arc; #[cfg(feature = "conduit_bin")] use rocket::put; @@ -14,7 +15,7 @@ use rocket::put; )] #[tracing::instrument(skip(db, body))] pub async fn redact_event_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 0bc67d4..7c50775 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -13,7 +13,7 @@ use ruma::{ serde::Raw, RoomAliasId, RoomId, RoomVersionId, }; -use std::{cmp::max, collections::BTreeMap, convert::TryFrom}; +use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -24,7 +24,7 @@ use rocket::{get, post}; )] #[tracing::instrument(skip(db, body))] pub async fn create_room_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -304,7 +304,7 @@ pub async fn create_room_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_room_event_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -332,7 +332,7 @@ pub async fn get_room_event_route( )] #[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, _room_id: String, ) -> ConduitResult { diff --git a/src/client_server/search.rs b/src/client_server/search.rs index a668a0d..ef5ddc2 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,6 +1,7 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::api::client::{error::ErrorKind, r0::search::search_events}; +use std::sync::Arc; #[cfg(feature = "conduit_bin")] use rocket::post; @@ -13,7 +14,7 @@ use std::collections::BTreeMap; )] #[tracing::instrument(skip(db, body))] pub async fn search_events_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 3718003..9a75ae2 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::{State, DEVICE_ID_LENGTH, TOKEN_LENGTH}; use crate::{utils, ConduitResult, Database, Error, Ruma}; use log::info; @@ -50,7 +52,7 @@ pub async fn get_login_types_route() -> ConduitResult )] #[tracing::instrument(skip(db, body))] pub async fn login_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { // Validate login method @@ -167,7 +169,7 @@ pub async fn login_route( )] #[tracing::instrument(skip(db, body))] pub async fn logout_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -195,7 +197,7 @@ pub async fn logout_route( )] #[tracing::instrument(skip(db, body))] pub async fn logout_all_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 68e0c7f..c431ac0 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ @@ -25,7 +27,7 @@ use rocket::{get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -51,7 +53,7 @@ pub async fn send_state_event_for_key_route( )] #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -77,7 +79,7 @@ pub async fn send_state_event_for_empty_key_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -124,7 +126,7 @@ pub async fn get_state_events_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_key_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -175,7 +177,7 @@ pub async fn get_state_events_for_key_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 63ad590..2f28706 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,5 +1,5 @@ use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; +use crate::{ConduitResult, Database, Error, Result, Ruma}; use log::error; use ruma::{ api::client::r0::sync::sync_events, @@ -13,6 +13,7 @@ use rocket::{get, tokio}; use std::{ collections::{hash_map, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, + sync::Arc, time::Duration, }; @@ -33,7 +34,7 @@ use std::{ )] #[tracing::instrument(skip(db, body))] pub async fn sync_events_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -71,18 +72,23 @@ pub async fn sync_events_route( let mut non_timeline_pdus = db .rooms - .pdus_since(&sender_user, &room_id, since)? + .pdus_until(&sender_user, &room_id, u64::MAX) .filter_map(|r| { + // Filter out buggy events if r.is_err() { error!("Bad pdu in pdus_since: {:?}", r); } r.ok() - }); // Filter out buggy events + }) + .take_while(|(pduid, _)| { + db.rooms + .pdu_count(pduid) + .map_or(false, |count| count > since) + }); // Take the last 10 events for the timeline let timeline_pdus = non_timeline_pdus .by_ref() - .rev() .take(10) .collect::>() .into_iter() @@ -226,7 +232,7 @@ pub async fn sync_events_route( match (since_membership, current_membership) { (MembershipState::Leave, MembershipState::Join) => { // A new user joined an encrypted room - if !share_encrypted_room(&db, &sender_user, &user_id, &room_id) { + if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? { device_list_updates.insert(user_id); } } @@ -257,6 +263,7 @@ pub async fn sync_events_route( .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already !share_encrypted_room(&db, sender_user, user_id, &room_id) + .unwrap_or(false) }), ); } @@ -274,7 +281,7 @@ pub async fn sync_events_route( for hero in db .rooms - .all_pdus(&sender_user, &room_id)? + .all_pdus(&sender_user, &room_id) .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) .map(|(_, pdu)| { @@ -411,7 +418,7 @@ pub async fn sync_events_route( let mut edus = db .rooms .edus - .readreceipts_since(&room_id, since)? + .readreceipts_since(&room_id, since) .filter_map(|r| r.ok()) // Filter out buggy events .map(|(_, _, v)| v) .collect::>(); @@ -549,7 +556,7 @@ pub async fn sync_events_route( for user_id in left_encrypted_users { let still_share_encrypted_room = db .rooms - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? .filter_map(|r| r.ok()) .filter_map(|other_room_id| { Some( @@ -639,9 +646,10 @@ fn share_encrypted_room( sender_user: &UserId, user_id: &UserId, ignore_room: &RoomId, -) -> bool { - db.rooms - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) +) -> Result { + Ok(db + .rooms + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? .filter_map(|r| r.ok()) .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { @@ -652,5 +660,5 @@ fn share_encrypted_room( .is_some(), ) }) - .any(|encrypted| encrypted) + .any(|encrypted| encrypted)) } diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 63e70ff..2382fe0 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -4,7 +4,7 @@ use ruma::{ api::client::r0::tag::{create_tag, delete_tag, get_tags}, events::EventType, }; -use std::collections::BTreeMap; +use std::{collections::BTreeMap, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{delete, get, put}; @@ -15,7 +15,7 @@ use rocket::{delete, get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn update_tag_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -52,7 +52,7 @@ pub async fn update_tag_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_tag_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -86,7 +86,7 @@ pub async fn delete_tag_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_tags_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 460bd05..f2a97ab 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::api::client::{ @@ -14,7 +16,7 @@ use rocket::put; )] #[tracing::instrument(skip(db, body))] pub async fn send_event_to_device_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 4b7feb7..a0a5d43 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{utils, ConduitResult, Database, Ruma}; use create_typing_event::Typing; @@ -12,7 +14,7 @@ use rocket::put; )] #[tracing::instrument(skip(db, body))] pub fn create_typing_event_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index b358274..0ddc7e8 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use super::State; use crate::{ConduitResult, Database, Ruma}; use ruma::api::client::r0::user_directory::search_users; @@ -11,7 +13,7 @@ use rocket::post; )] #[tracing::instrument(skip(db, body))] pub async fn search_users_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { let limit = u64::from(body.limit) as usize; diff --git a/src/database.rs b/src/database.rs index 7a55b03..e3b954e 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,3 +1,5 @@ +pub mod abstraction; + pub mod account_data; pub mod admin; pub mod appservice; @@ -12,10 +14,10 @@ pub mod uiaa; pub mod users; use crate::{utils, Error, Result}; +use abstraction::DatabaseEngine; use directories::ProjectDirs; -use futures::StreamExt; use log::{error, info}; -use rocket::futures::{self, channel::mpsc}; +use rocket::futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; use std::{ @@ -74,7 +76,8 @@ fn default_log() -> String { "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() } -#[derive(Clone)] +pub type Engine = abstraction::SledEngine; + pub struct Database { pub globals: globals::Globals, pub users: users::Users, @@ -88,7 +91,6 @@ pub struct Database { pub admin: admin::Admin, pub appservice: appservice::Appservice, pub pusher: pusher::PushData, - pub _db: sled::Db, } impl Database { @@ -105,126 +107,126 @@ impl Database { } /// Load an existing database or create a new one. - pub async fn load_or_create(config: Config) -> Result { - let db = sled::Config::default() - .path(&config.database_path) - .cache_capacity(config.cache_capacity as u64) - .use_compression(true) - .open()?; + pub async fn load_or_create(config: Config) -> Result> { + let builder = Engine::open(&config)?; if config.max_request_size < 1024 { eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); } let (admin_sender, admin_receiver) = mpsc::unbounded(); + let (sending_sender, sending_receiver) = mpsc::unbounded(); - let db = Self { + let db = Arc::new(Self { users: users::Users { - userid_password: db.open_tree("userid_password")?, - userid_displayname: db.open_tree("userid_displayname")?, - userid_avatarurl: db.open_tree("userid_avatarurl")?, - userdeviceid_token: db.open_tree("userdeviceid_token")?, - userdeviceid_metadata: db.open_tree("userdeviceid_metadata")?, - userid_devicelistversion: db.open_tree("userid_devicelistversion")?, - token_userdeviceid: db.open_tree("token_userdeviceid")?, - onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys")?, - userid_lastonetimekeyupdate: db.open_tree("userid_lastonetimekeyupdate")?, - keychangeid_userid: db.open_tree("keychangeid_userid")?, - keyid_key: db.open_tree("keyid_key")?, - userid_masterkeyid: db.open_tree("userid_masterkeyid")?, - userid_selfsigningkeyid: db.open_tree("userid_selfsigningkeyid")?, - userid_usersigningkeyid: db.open_tree("userid_usersigningkeyid")?, - todeviceid_events: db.open_tree("todeviceid_events")?, + userid_password: builder.open_tree("userid_password")?, + userid_displayname: builder.open_tree("userid_displayname")?, + userid_avatarurl: builder.open_tree("userid_avatarurl")?, + userdeviceid_token: builder.open_tree("userdeviceid_token")?, + userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, + userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, + token_userdeviceid: builder.open_tree("token_userdeviceid")?, + onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?, + userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?, + keychangeid_userid: builder.open_tree("keychangeid_userid")?, + keyid_key: builder.open_tree("keyid_key")?, + userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, + userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, + userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, + todeviceid_events: builder.open_tree("todeviceid_events")?, }, uiaa: uiaa::Uiaa { - userdevicesessionid_uiaainfo: db.open_tree("userdevicesessionid_uiaainfo")?, - userdevicesessionid_uiaarequest: db.open_tree("userdevicesessionid_uiaarequest")?, + userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, + userdevicesessionid_uiaarequest: builder + .open_tree("userdevicesessionid_uiaarequest")?, }, rooms: rooms::Rooms { edus: rooms::RoomEdus { - readreceiptid_readreceipt: db.open_tree("readreceiptid_readreceipt")?, - roomuserid_privateread: db.open_tree("roomuserid_privateread")?, // "Private" read receipt - roomuserid_lastprivatereadupdate: db + readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, + roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt + roomuserid_lastprivatereadupdate: builder .open_tree("roomuserid_lastprivatereadupdate")?, - typingid_userid: db.open_tree("typingid_userid")?, - roomid_lasttypingupdate: db.open_tree("roomid_lasttypingupdate")?, - presenceid_presence: db.open_tree("presenceid_presence")?, - userid_lastpresenceupdate: db.open_tree("userid_lastpresenceupdate")?, + typingid_userid: builder.open_tree("typingid_userid")?, + roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, + presenceid_presence: builder.open_tree("presenceid_presence")?, + userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, }, - pduid_pdu: db.open_tree("pduid_pdu")?, - eventid_pduid: db.open_tree("eventid_pduid")?, - roomid_pduleaves: db.open_tree("roomid_pduleaves")?, + pduid_pdu: builder.open_tree("pduid_pdu")?, + eventid_pduid: builder.open_tree("eventid_pduid")?, + roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, - alias_roomid: db.open_tree("alias_roomid")?, - aliasid_alias: db.open_tree("aliasid_alias")?, - publicroomids: db.open_tree("publicroomids")?, + alias_roomid: builder.open_tree("alias_roomid")?, + aliasid_alias: builder.open_tree("aliasid_alias")?, + publicroomids: builder.open_tree("publicroomids")?, - tokenids: db.open_tree("tokenids")?, + tokenids: builder.open_tree("tokenids")?, - roomserverids: db.open_tree("roomserverids")?, - serverroomids: db.open_tree("serverroomids")?, - userroomid_joined: db.open_tree("userroomid_joined")?, - roomuserid_joined: db.open_tree("roomuserid_joined")?, - roomuseroncejoinedids: db.open_tree("roomuseroncejoinedids")?, - userroomid_invitestate: db.open_tree("userroomid_invitestate")?, - roomuserid_invitecount: db.open_tree("roomuserid_invitecount")?, - userroomid_leftstate: db.open_tree("userroomid_leftstate")?, - roomuserid_leftcount: db.open_tree("roomuserid_leftcount")?, + roomserverids: builder.open_tree("roomserverids")?, + serverroomids: builder.open_tree("serverroomids")?, + userroomid_joined: builder.open_tree("userroomid_joined")?, + roomuserid_joined: builder.open_tree("roomuserid_joined")?, + roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, + userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, + roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, + userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, + roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, - userroomid_notificationcount: db.open_tree("userroomid_notificationcount")?, - userroomid_highlightcount: db.open_tree("userroomid_highlightcount")?, + userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, + userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, - statekey_shortstatekey: db.open_tree("statekey_shortstatekey")?, - stateid_shorteventid: db.open_tree("stateid_shorteventid")?, - eventid_shorteventid: db.open_tree("eventid_shorteventid")?, - shorteventid_eventid: db.open_tree("shorteventid_eventid")?, - shorteventid_shortstatehash: db.open_tree("shorteventid_shortstatehash")?, - roomid_shortstatehash: db.open_tree("roomid_shortstatehash")?, - statehash_shortstatehash: db.open_tree("statehash_shortstatehash")?, + statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, + stateid_shorteventid: builder.open_tree("stateid_shorteventid")?, + eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, + shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, + shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, + roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, + statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, - eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, - prevevent_parent: db.open_tree("prevevent_parent")?, + eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, + prevevent_parent: builder.open_tree("prevevent_parent")?, }, account_data: account_data::AccountData { - roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, + roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, }, media: media::Media { - mediaid_file: db.open_tree("mediaid_file")?, + mediaid_file: builder.open_tree("mediaid_file")?, }, key_backups: key_backups::KeyBackups { - backupid_algorithm: db.open_tree("backupid_algorithm")?, - backupid_etag: db.open_tree("backupid_etag")?, - backupkeyid_backup: db.open_tree("backupkeyid_backup")?, + backupid_algorithm: builder.open_tree("backupid_algorithm")?, + backupid_etag: builder.open_tree("backupid_etag")?, + backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, }, transaction_ids: transaction_ids::TransactionIds { - userdevicetxnid_response: db.open_tree("userdevicetxnid_response")?, + userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, }, sending: sending::Sending { - servername_educount: db.open_tree("servername_educount")?, - servernamepduids: db.open_tree("servernamepduids")?, - servercurrentevents: db.open_tree("servercurrentevents")?, + servername_educount: builder.open_tree("servername_educount")?, + servernamepduids: builder.open_tree("servernamepduids")?, + servercurrentevents: builder.open_tree("servercurrentevents")?, maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), + sender: sending_sender, }, admin: admin::Admin { sender: admin_sender, }, appservice: appservice::Appservice { cached_registrations: Arc::new(RwLock::new(HashMap::new())), - id_appserviceregistrations: db.open_tree("id_appserviceregistrations")?, + id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, + }, + pusher: pusher::PushData { + senderkey_pusher: builder.open_tree("senderkey_pusher")?, }, - pusher: pusher::PushData::new(&db)?, globals: globals::Globals::load( - db.open_tree("global")?, - db.open_tree("server_signingkeys")?, + builder.open_tree("global")?, + builder.open_tree("server_signingkeys")?, config, )?, - _db: db, - }; + }); // MIGRATIONS + // TODO: database versions of new dbs should probably not be 0 if db.globals.database_version()? < 1 { - for roomserverid in db.rooms.roomserverids.iter().keys() { - let roomserverid = roomserverid?; + for (roomserverid, _) in db.rooms.roomserverids.iter() { let mut parts = roomserverid.split(|&b| b == 0xff); let room_id = parts.next().expect("split always returns one element"); let servername = match parts.next() { @@ -238,7 +240,7 @@ impl Database { serverroomid.push(0xff); serverroomid.extend_from_slice(room_id); - db.rooms.serverroomids.insert(serverroomid, &[])?; + db.rooms.serverroomids.insert(&serverroomid, &[])?; } db.globals.bump_database_version(1)?; @@ -248,15 +250,13 @@ impl Database { if db.globals.database_version()? < 2 { // We accidentally inserted hashed versions of "" into the db instead of just "" - for userid_password in db.users.userid_password.iter() { - let (userid, password) = userid_password?; - + for (userid, password) in db.users.userid_password.iter() { let password = utils::string_from_bytes(&password); if password.map_or(false, |password| { argon2::verify_encoded(&password, b"").unwrap_or(false) }) { - db.users.userid_password.insert(userid, b"")?; + db.users.userid_password.insert(&userid, b"")?; } } @@ -268,7 +268,8 @@ impl Database { // This data is probably outdated db.rooms.edus.presenceid_presence.clear()?; - db.admin.start_handler(db.clone(), admin_receiver); + db.admin.start_handler(Arc::clone(&db), admin_receiver); + db.sending.start_handler(Arc::clone(&db), sending_receiver); Ok(db) } @@ -282,7 +283,7 @@ impl Database { userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); userdeviceid_prefix.push(0xff); - let mut futures = futures::stream::FuturesUnordered::new(); + let mut futures = FuturesUnordered::new(); // Return when *any* user changed his key // TODO: only send for user they share a room with diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs new file mode 100644 index 0000000..5a2afd5 --- /dev/null +++ b/src/database/abstraction.rs @@ -0,0 +1,309 @@ +use std::{ + collections::BTreeMap, + future::Future, + pin::Pin, + sync::{Arc, RwLock}, +}; + +use log::warn; +use rocksdb::{ + BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, Direction, MultiThreaded, Options, +}; + +use super::Config; +use crate::{utils, Result}; + +pub struct SledEngine(sled::Db); +pub struct SledEngineTree(sled::Tree); +pub struct RocksDbEngine(rocksdb::DBWithThreadMode); +pub struct RocksDbEngineTree<'a> { + db: Arc, + name: &'a str, + watchers: RwLock, Vec>>>, +} + +pub trait DatabaseEngine: Sized { + fn open(config: &Config) -> Result>; + fn open_tree(self: &Arc, name: &'static str) -> Result>; +} + +pub trait Tree: Send + Sync { + fn get(&self, key: &[u8]) -> Result>>; + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; + + fn remove(&self, key: &[u8]) -> Result<()>; + + fn iter<'a>(&'a self) -> Box, Box<[u8]>)> + Send + Sync + 'a>; + + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box, Box<[u8]>)> + 'a>; + + fn increment(&self, key: &[u8]) -> Result>; + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Box<[u8]>)> + 'a>; + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>>; + + fn clear(&self) -> Result<()> { + for (key, _) in self.iter() { + self.remove(&key)?; + } + + Ok(()) + } +} + +impl DatabaseEngine for SledEngine { + fn open(config: &Config) -> Result> { + Ok(Arc::new(SledEngine( + sled::Config::default() + .path(&config.database_path) + .cache_capacity(config.cache_capacity as u64) + .use_compression(true) + .open()?, + ))) + } + + fn open_tree(self: &Arc, name: &'static str) -> Result> { + Ok(Arc::new(SledEngineTree(self.0.open_tree(name)?))) + } +} + +impl Tree for SledEngineTree { + fn get(&self, key: &[u8]) -> Result>> { + Ok(self.0.get(key)?.map(|v| v.to_vec())) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + self.0.insert(key, value)?; + Ok(()) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + self.0.remove(key)?; + Ok(()) + } + + fn iter<'a>(&'a self) -> Box, Box<[u8]>)> + Send + Sync + 'a> { + Box::new( + self.0 + .iter() + .filter_map(|r| { + if let Err(e) = &r { + warn!("Error: {}", e); + } + r.ok() + }) + .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())), + ) + } + + fn iter_from( + &self, + from: &[u8], + backwards: bool, + ) -> Box, Box<[u8]>)>> { + let iter = if backwards { + self.0.range(..from) + } else { + self.0.range(from..) + }; + + let iter = iter + .filter_map(|r| { + if let Err(e) = &r { + warn!("Error: {}", e); + } + r.ok() + }) + .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())); + + if backwards { + Box::new(iter.rev()) + } else { + Box::new(iter) + } + } + + fn increment(&self, key: &[u8]) -> Result> { + Ok(self + .0 + .update_and_fetch(key, utils::increment) + .map(|o| o.expect("increment always sets a value").to_vec())?) + } + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Box<[u8]>)> + 'a> { + let iter = self + .0 + .scan_prefix(prefix) + .filter_map(|r| { + if let Err(e) = &r { + warn!("Error: {}", e); + } + r.ok() + }) + .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())); + + Box::new(iter) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + let prefix = prefix.to_vec(); + Box::pin(async move { + self.0.watch_prefix(prefix).await; + }) + } +} + +impl DatabaseEngine for RocksDbEngine { + fn open(config: &Config) -> Result> { + let mut db_opts = Options::default(); + db_opts.create_if_missing(true); + + let cfs = DBWithThreadMode::::list_cf(&db_opts, &config.database_path) + .unwrap_or_default(); + + let mut options = Options::default(); + options.set_merge_operator_associative("increment", utils::increment_rocksdb); + + let db = DBWithThreadMode::::open_cf_descriptors( + &db_opts, + &config.database_path, + cfs.iter() + .map(|name| ColumnFamilyDescriptor::new(name, options.clone())), + )?; + + Ok(Arc::new(RocksDbEngine(db))) + } + + fn open_tree(self: &Arc, name: &'static str) -> Result> { + let mut options = Options::default(); + options.set_merge_operator_associative("increment", utils::increment_rocksdb); + + // Create if it doesn't exist + let _ = self.0.create_cf(name, &options); + + Ok(Arc::new(RocksDbEngineTree { + name, + db: Arc::clone(self), + watchers: RwLock::new(BTreeMap::new()), + })) + } +} + +impl RocksDbEngineTree<'_> { + fn cf(&self) -> BoundColumnFamily<'_> { + self.db.0.cf_handle(self.name).unwrap() + } +} + +impl Tree for RocksDbEngineTree<'_> { + fn get(&self, key: &[u8]) -> Result>> { + Ok(self.db.0.get_cf(self.cf(), key)?) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let watchers = self.watchers.read().unwrap(); + let mut triggered = Vec::new(); + + for length in 0..=key.len() { + if watchers.contains_key(&key[..length]) { + triggered.push(&key[..length]); + } + } + + drop(watchers); + + if !triggered.is_empty() { + let mut watchers = self.watchers.write().unwrap(); + for prefix in triggered { + if let Some(txs) = watchers.remove(prefix) { + for tx in txs { + let _ = tx.send(()); + } + } + } + } + + Ok(self.db.0.put_cf(self.cf(), key, value)?) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + Ok(self.db.0.delete_cf(self.cf(), key)?) + } + + fn iter<'a>(&'a self) -> Box, Box<[u8]>)> + Send + Sync + 'a> { + Box::new( + self.db + .0 + .iterator_cf(self.cf(), rocksdb::IteratorMode::Start), + ) + } + + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box, Box<[u8]>)> + 'a> { + Box::new(self.db.0.iterator_cf( + self.cf(), + rocksdb::IteratorMode::From( + from, + if backwards { + Direction::Reverse + } else { + Direction::Forward + }, + ), + )) + } + + fn increment(&self, key: &[u8]) -> Result> { + // TODO: atomic? + let old = self.get(key)?; + let new = utils::increment(old.as_deref()).unwrap(); + self.insert(key, &new)?; + Ok(new) + } + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Box<[u8]>)> + 'a> { + Box::new( + self.db + .0 + .iterator_cf( + self.cf(), + rocksdb::IteratorMode::From(&prefix, Direction::Forward), + ) + .take_while(move |(k, _)| k.starts_with(&prefix)), + ) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + let (tx, rx) = tokio::sync::oneshot::channel(); + + self.watchers + .write() + .unwrap() + .entry(prefix.to_vec()) + .or_default() + .push(tx); + + Box::pin(async move { + // Tx is never destroyed + rx.await.unwrap(); + }) + } +} diff --git a/src/database/account_data.rs b/src/database/account_data.rs index bb970c3..2ba7bc3 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -6,12 +6,12 @@ use ruma::{ RoomId, UserId, }; use serde::{de::DeserializeOwned, Serialize}; -use sled::IVec; -use std::{collections::HashMap, convert::TryFrom}; +use std::{collections::HashMap, convert::TryFrom, sync::Arc}; + +use super::abstraction::Tree; -#[derive(Clone)] pub struct AccountData { - pub(super) roomuserdataid_accountdata: sled::Tree, // RoomUserDataId = Room + User + Count + Type + pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type } impl AccountData { @@ -34,9 +34,8 @@ impl AccountData { prefix.push(0xff); // Remove old entry - if let Some(previous) = self.find_event(room_id, user_id, &event_type) { - let (old_key, _) = previous?; - self.roomuserdataid_accountdata.remove(old_key)?; + if let Some((old_key, _)) = self.find_event(room_id, user_id, &event_type)? { + self.roomuserdataid_accountdata.remove(&old_key)?; } let mut key = prefix; @@ -52,8 +51,10 @@ impl AccountData { )); } - self.roomuserdataid_accountdata - .insert(key, &*json.to_string())?; + self.roomuserdataid_accountdata.insert( + &key, + &serde_json::to_vec(&json).expect("to_vec always works on json values"), + )?; Ok(()) } @@ -65,9 +66,8 @@ impl AccountData { user_id: &UserId, kind: EventType, ) -> Result> { - self.find_event(room_id, user_id, &kind) - .map(|r| { - let (_, v) = r?; + self.find_event(room_id, user_id, &kind)? + .map(|(_, v)| { serde_json::from_slice(&v).map_err(|_| Error::bad_database("could not deserialize")) }) .transpose() @@ -98,8 +98,7 @@ impl AccountData { for r in self .roomuserdataid_accountdata - .range(&*first_possible..) - .filter_map(|r| r.ok()) + .iter_from(&first_possible, false) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(k, v)| { Ok::<_, Error>(( @@ -128,7 +127,7 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, kind: &EventType, - ) -> Option> { + ) -> Result, Box<[u8]>)>> { let mut prefix = room_id .map(|r| r.to_string()) .unwrap_or_default() @@ -137,23 +136,21 @@ impl AccountData { prefix.push(0xff); prefix.extend_from_slice(&user_id.as_bytes()); prefix.push(0xff); + + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + let kind = kind.clone(); - self.roomuserdataid_accountdata - .scan_prefix(prefix) - .rev() - .find(move |r| { - r.as_ref() - .map(|(k, _)| { - k.rsplit(|&b| b == 0xff) - .next() - .map(|current_event_type| { - current_event_type == kind.as_ref().as_bytes() - }) - .unwrap_or(false) - }) + Ok(self + .roomuserdataid_accountdata + .iter_from(&last_possible_key, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .find(move |(k, _)| { + k.rsplit(|&b| b == 0xff) + .next() + .map(|current_event_type| current_event_type == kind.as_ref().as_bytes()) .unwrap_or(false) - }) - .map(|r| Ok(r?)) + })) } } diff --git a/src/database/admin.rs b/src/database/admin.rs index 3014385..7826cfe 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,6 +1,9 @@ -use std::convert::{TryFrom, TryInto}; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, +}; -use crate::pdu::PduBuilder; +use crate::{pdu::PduBuilder, Database}; use log::warn; use rocket::futures::{channel::mpsc, stream::StreamExt}; use ruma::{ @@ -22,7 +25,7 @@ pub struct Admin { impl Admin { pub fn start_handler( &self, - db: super::Database, + db: Arc, mut receiver: mpsc::UnboundedReceiver, ) { tokio::spawn(async move { @@ -73,14 +76,17 @@ impl Admin { db.appservice.register_appservice(yaml).unwrap(); // TODO handle error } AdminCommand::ListAppservices => { - let appservices = db.appservice.iter_ids().collect::>(); - let count = appservices.len(); - let output = format!( - "Appservices ({}): {}", - count, - appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") - ); - send_message(message::MessageEventContent::text_plain(output)); + if let Ok(appservices) = db.appservice.iter_ids().map(|ids| ids.collect::>()) { + let count = appservices.len(); + let output = format!( + "Appservices ({}): {}", + count, + appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") + ); + send_message(message::MessageEventContent::text_plain(output)); + } else { + send_message(message::MessageEventContent::text_plain("Failed to get appservices.")); + } } AdminCommand::SendMessage(message) => { send_message(message); @@ -93,6 +99,6 @@ impl Admin { } pub fn send(&self, command: AdminCommand) { - self.sender.unbounded_send(command).unwrap() + self.sender.unbounded_send(command).unwrap(); } } diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 222eb18..21b18a7 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -4,18 +4,21 @@ use std::{ sync::{Arc, RwLock}, }; -#[derive(Clone)] +use super::abstraction::Tree; + pub struct Appservice { pub(super) cached_registrations: Arc>>, - pub(super) id_appserviceregistrations: sled::Tree, + pub(super) id_appserviceregistrations: Arc, } impl Appservice { pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<()> { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); - self.id_appserviceregistrations - .insert(id, serde_yaml::to_string(&yaml).unwrap().as_bytes())?; + self.id_appserviceregistrations.insert( + id.as_bytes(), + serde_yaml::to_string(&yaml).unwrap().as_bytes(), + )?; self.cached_registrations .write() .unwrap() @@ -33,7 +36,7 @@ impl Appservice { || { Ok(self .id_appserviceregistrations - .get(id)? + .get(id.as_bytes())? .map(|bytes| { Ok::<_, Error>(serde_yaml::from_slice(&bytes).map_err(|_| { Error::bad_database( @@ -47,21 +50,25 @@ impl Appservice { ) } - pub fn iter_ids(&self) -> impl Iterator> { - self.id_appserviceregistrations.iter().keys().map(|id| { - Ok(utils::string_from_bytes(&id?).map_err(|_| { + pub fn iter_ids<'a>( + &'a self, + ) -> Result> + Send + Sync + 'a> { + Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { + Ok(utils::string_from_bytes(&id).map_err(|_| { Error::bad_database("Invalid id bytes in id_appserviceregistrations.") })?) - }) + })) } - pub fn iter_all(&self) -> impl Iterator> + '_ { - self.iter_ids().filter_map(|id| id.ok()).map(move |id| { + pub fn iter_all( + &self, + ) -> Result> + '_ + Send + Sync> { + Ok(self.iter_ids()?.filter_map(|id| id.ok()).map(move |id| { Ok(( id.clone(), self.get_registration(&id)? .expect("iter_ids only returns appservices that exist"), )) - }) + })) } } diff --git a/src/database/globals.rs b/src/database/globals.rs index 5d91d37..37ebf13 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -13,22 +13,23 @@ use std::{ use tokio::sync::Semaphore; use trust_dns_resolver::TokioAsyncResolver; -pub const COUNTER: &str = "c"; +use super::abstraction::Tree; + +pub const COUNTER: &[u8] = b"c"; type WellKnownMap = HashMap, (String, String)>; type TlsNameMap = HashMap; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries -#[derive(Clone)] pub struct Globals { pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, - pub(super) globals: sled::Tree, + pub(super) globals: Arc, config: Config, keypair: Arc, reqwest_client: reqwest::Client, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, - pub(super) server_signingkeys: sled::Tree, + pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, @@ -69,15 +70,20 @@ impl ServerCertVerifier for MatrixServerVerifier { impl Globals { pub fn load( - globals: sled::Tree, - server_signingkeys: sled::Tree, + globals: Arc, + server_signingkeys: Arc, config: Config, ) -> Result { - let bytes = &*globals - .update_and_fetch("keypair", utils::generate_keypair)? - .expect("utils::generate_keypair always returns Some"); + let keypair_bytes = globals.get(b"keypair")?.map_or_else( + || { + let keypair = utils::generate_keypair(); + globals.insert(b"keypair", &keypair)?; + Ok::<_, Error>(keypair) + }, + |s| Ok(s.to_vec()), + )?; - let mut parts = bytes.splitn(2, |&b| b == 0xff); + let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff); let keypair = utils::string_from_bytes( // 1. version @@ -102,7 +108,7 @@ impl Globals { Ok(k) => k, Err(e) => { error!("Keypair invalid. Deleting..."); - globals.remove("keypair")?; + globals.remove(b"keypair")?; return Err(e); } }; @@ -159,13 +165,8 @@ impl Globals { } pub fn next_count(&self) -> Result { - Ok(utils::u64_from_bytes( - &self - .globals - .update_and_fetch(COUNTER, utils::increment)? - .expect("utils::increment will always put in a value"), - ) - .map_err(|_| Error::bad_database("Count has invalid bytes."))?) + Ok(utils::u64_from_bytes(&self.globals.increment(COUNTER)?) + .map_err(|_| Error::bad_database("Count has invalid bytes."))?) } pub fn current_count(&self) -> Result { @@ -211,21 +212,30 @@ impl Globals { /// Remove the outdated keys and insert the new ones. /// /// This doesn't actually check that the keys provided are newer than the old set. - pub fn add_signing_key(&self, origin: &ServerName, new_keys: &ServerSigningKeys) -> Result<()> { - self.server_signingkeys - .update_and_fetch(origin.as_bytes(), |signingkeys| { - let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(keys).ok()) - .unwrap_or_else(|| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); - keys.verify_keys - .extend(new_keys.verify_keys.clone().into_iter()); - keys.old_verify_keys - .extend(new_keys.old_verify_keys.clone().into_iter()); - Some(serde_json::to_vec(&keys).expect("serversigningkeys can be serialized")) - })?; + pub fn add_signing_key(&self, origin: &ServerName, new_keys: ServerSigningKeys) -> Result<()> { + // Not atomic, but this is not critical + let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; + + let mut keys = signingkeys + .and_then(|keys| serde_json::from_slice(&keys).ok()) + .unwrap_or_else(|| { + // Just insert "now", it doesn't matter + ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) + }); + + let ServerSigningKeys { + verify_keys, + old_verify_keys, + .. + } = new_keys; + + keys.verify_keys.extend(verify_keys.into_iter()); + keys.old_verify_keys.extend(old_verify_keys.into_iter()); + + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), + )?; Ok(()) } @@ -254,14 +264,15 @@ impl Globals { } pub fn database_version(&self) -> Result { - self.globals.get("version")?.map_or(Ok(0), |version| { + self.globals.get(b"version")?.map_or(Ok(0), |version| { utils::u64_from_bytes(&version) .map_err(|_| Error::bad_database("Database version id is invalid.")) }) } pub fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.globals.insert("version", &new_version.to_be_bytes())?; + self.globals + .insert(b"version", &new_version.to_be_bytes())?; Ok(()) } } diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 0f9af2e..0685c48 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -6,13 +6,14 @@ use ruma::{ }, RoomId, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom}; +use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; + +use super::abstraction::Tree; -#[derive(Clone)] pub struct KeyBackups { - pub(super) backupid_algorithm: sled::Tree, // BackupId = UserId + Version(Count) - pub(super) backupid_etag: sled::Tree, // BackupId = UserId + Version(Count) - pub(super) backupkeyid_backup: sled::Tree, // BackupKeyId = UserId + Version + RoomId + SessionId + pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) + pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) + pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId } impl KeyBackups { @@ -30,8 +31,7 @@ impl KeyBackups { self.backupid_algorithm.insert( &key, - &*serde_json::to_string(backup_metadata) - .expect("BackupAlgorithm::to_string always works"), + &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), )?; self.backupid_etag .insert(&key, &globals.next_count()?.to_be_bytes())?; @@ -48,13 +48,8 @@ impl KeyBackups { key.push(0xff); - for outdated_key in self - .backupkeyid_backup - .scan_prefix(&key) - .keys() - .filter_map(|r| r.ok()) - { - self.backupkeyid_backup.remove(outdated_key)?; + for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { + self.backupkeyid_backup.remove(&outdated_key)?; } Ok(()) @@ -80,8 +75,9 @@ impl KeyBackups { self.backupid_algorithm.insert( &key, - &*serde_json::to_string(backup_metadata) - .expect("BackupAlgorithm::to_string always works"), + &serde_json::to_string(backup_metadata) + .expect("BackupAlgorithm::to_string always works") + .as_bytes(), )?; self.backupid_etag .insert(&key, &globals.next_count()?.to_be_bytes())?; @@ -91,11 +87,14 @@ impl KeyBackups { pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + self.backupid_algorithm - .scan_prefix(&prefix) - .last() - .map_or(Ok(None), |r| { - let (key, value) = r?; + .iter_from(&last_possible_key, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .next() + .map_or(Ok(None), |(key, value)| { let version = utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -117,10 +116,13 @@ impl KeyBackups { key.push(0xff); key.extend_from_slice(version.as_bytes()); - self.backupid_algorithm.get(key)?.map_or(Ok(None), |bytes| { - Ok(serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid."))?) - }) + self.backupid_algorithm + .get(&key)? + .map_or(Ok(None), |bytes| { + Ok(serde_json::from_slice(&bytes).map_err(|_| { + Error::bad_database("Algorithm in backupid_algorithm is invalid.") + })?) + }) } pub fn add_key( @@ -153,7 +155,7 @@ impl KeyBackups { self.backupkeyid_backup.insert( &key, - &*serde_json::to_string(&key_data).expect("KeyBackupData::to_string always works"), + &serde_json::to_vec(&key_data).expect("KeyBackupData::to_vec always works"), )?; Ok(()) @@ -164,7 +166,7 @@ impl KeyBackups { prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); - Ok(self.backupkeyid_backup.scan_prefix(&prefix).count()) + Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) } pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { @@ -194,33 +196,37 @@ impl KeyBackups { let mut rooms = BTreeMap::::new(); - for result in self.backupkeyid_backup.scan_prefix(&prefix).map(|r| { - let (key, value) = r?; - let mut parts = key.rsplit(|&b| b == 0xff); + for result in self + .backupkeyid_backup + .scan_prefix(prefix) + .map(|(key, value)| { + let mut parts = key.rsplit(|&b| b == 0xff); - let session_id = utils::string_from_bytes( - &parts - .next() - .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, - ) - .map_err(|_| Error::bad_database("backupkeyid_backup session_id is invalid."))?; + let session_id = + utils::string_from_bytes(&parts.next().ok_or_else(|| { + Error::bad_database("backupkeyid_backup key is invalid.") + })?) + .map_err(|_| { + Error::bad_database("backupkeyid_backup session_id is invalid.") + })?; - let room_id = RoomId::try_from( - utils::string_from_bytes( - &parts - .next() - .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, + let room_id = RoomId::try_from( + utils::string_from_bytes(&parts.next().ok_or_else(|| { + Error::bad_database("backupkeyid_backup key is invalid.") + })?) + .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, ) - .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, - ) - .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid room id."))?; + .map_err(|_| { + Error::bad_database("backupkeyid_backup room_id is invalid room id.") + })?; - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - })?; + let key_data = serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + })?; - Ok::<_, Error>((room_id, session_id, key_data)) - }) { + Ok::<_, Error>((room_id, session_id, key_data)) + }) + { let (room_id, session_id, key_data) = result?; rooms .entry(room_id) @@ -239,7 +245,7 @@ impl KeyBackups { user_id: &UserId, version: &str, room_id: &RoomId, - ) -> BTreeMap { + ) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -247,10 +253,10 @@ impl KeyBackups { prefix.extend_from_slice(room_id.as_bytes()); prefix.push(0xff); - self.backupkeyid_backup - .scan_prefix(&prefix) - .map(|r| { - let (key, value) = r?; + Ok(self + .backupkeyid_backup + .scan_prefix(prefix) + .map(|(key, value)| { let mut parts = key.rsplit(|&b| b == 0xff); let session_id = @@ -268,7 +274,7 @@ impl KeyBackups { Ok::<_, Error>((session_id, key_data)) }) .filter_map(|r| r.ok()) - .collect() + .collect()) } pub fn get_session( @@ -302,13 +308,8 @@ impl KeyBackups { key.extend_from_slice(&version.as_bytes()); key.push(0xff); - for outdated_key in self - .backupkeyid_backup - .scan_prefix(&key) - .keys() - .filter_map(|r| r.ok()) - { - self.backupkeyid_backup.remove(outdated_key)?; + for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { + self.backupkeyid_backup.remove(&outdated_key)?; } Ok(()) @@ -327,13 +328,8 @@ impl KeyBackups { key.extend_from_slice(&room_id.as_bytes()); key.push(0xff); - for outdated_key in self - .backupkeyid_backup - .scan_prefix(&key) - .keys() - .filter_map(|r| r.ok()) - { - self.backupkeyid_backup.remove(outdated_key)?; + for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { + self.backupkeyid_backup.remove(&outdated_key)?; } Ok(()) @@ -354,13 +350,8 @@ impl KeyBackups { key.push(0xff); key.extend_from_slice(&session_id.as_bytes()); - for outdated_key in self - .backupkeyid_backup - .scan_prefix(&key) - .keys() - .filter_map(|r| r.ok()) - { - self.backupkeyid_backup.remove(outdated_key)?; + for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { + self.backupkeyid_backup.remove(&outdated_key)?; } Ok(()) diff --git a/src/database/media.rs b/src/database/media.rs index 28ef88a..ca45484 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -1,7 +1,9 @@ use image::{imageops::FilterType, GenericImageView}; use crate::{utils, Error, Result}; -use std::mem; +use std::{mem, sync::Arc}; + +use super::abstraction::Tree; pub struct FileMeta { pub content_disposition: Option, @@ -9,9 +11,8 @@ pub struct FileMeta { pub file: Vec, } -#[derive(Clone)] pub struct Media { - pub(super) mediaid_file: sled::Tree, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType + pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType } impl Media { @@ -42,7 +43,7 @@ impl Media { .unwrap_or_default(), ); - self.mediaid_file.insert(key, file)?; + self.mediaid_file.insert(&key, file)?; Ok(()) } @@ -76,7 +77,7 @@ impl Media { .unwrap_or_default(), ); - self.mediaid_file.insert(key, file)?; + self.mediaid_file.insert(&key, file)?; Ok(()) } @@ -89,8 +90,7 @@ impl Media { prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail prefix.push(0xff); - if let Some(r) = self.mediaid_file.scan_prefix(&prefix).next() { - let (key, file) = r?; + if let Some((key, file)) = self.mediaid_file.scan_prefix(prefix).next() { let mut parts = key.rsplit(|&b| b == 0xff); let content_type = parts @@ -169,9 +169,8 @@ impl Media { original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail original_prefix.push(0xff); - if let Some(r) = self.mediaid_file.scan_prefix(&thumbnail_prefix).next() { + if let Some((key, file)) = self.mediaid_file.scan_prefix(thumbnail_prefix).next() { // Using saved thumbnail - let (key, file) = r?; let mut parts = key.rsplit(|&b| b == 0xff); let content_type = parts @@ -202,10 +201,8 @@ impl Media { content_type, file: file.to_vec(), })) - } else if let Some(r) = self.mediaid_file.scan_prefix(&original_prefix).next() { + } else if let Some((key, file)) = self.mediaid_file.scan_prefix(original_prefix).next() { // Generate a thumbnail - - let (key, file) = r?; let mut parts = key.rsplit(|&b| b == 0xff); let content_type = parts @@ -302,7 +299,7 @@ impl Media { widthheight, ); - self.mediaid_file.insert(thumbnail_key, &*thumbnail_bytes)?; + self.mediaid_file.insert(&thumbnail_key, &thumbnail_bytes)?; Ok(Some(FileMeta { content_disposition, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 51f55a1..39b631d 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -14,23 +14,17 @@ use ruma::{ push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, uint, UInt, UserId, }; -use sled::IVec; -use std::{convert::TryFrom, fmt::Debug, mem}; +use std::{convert::TryFrom, fmt::Debug, mem, sync::Arc}; + +use super::abstraction::Tree; -#[derive(Debug, Clone)] pub struct PushData { /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: sled::Tree, + pub(super) senderkey_pusher: Arc, } impl PushData { - pub fn new(db: &sled::Db) -> Result { - Ok(Self { - senderkey_pusher: db.open_tree("senderkey_pusher")?, - }) - } - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); @@ -40,14 +34,14 @@ impl PushData { if pusher.kind.is_none() { return self .senderkey_pusher - .remove(key) + .remove(&key) .map(|_| ()) .map_err(Into::into); } self.senderkey_pusher.insert( - key, - &*serde_json::to_string(&pusher).expect("Pusher is valid JSON string"), + &key, + &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), )?; Ok(()) @@ -69,23 +63,21 @@ impl PushData { self.senderkey_pusher .scan_prefix(prefix) - .values() - .map(|push| { - let push = push.map_err(|_| Error::bad_database("Invalid push bytes in db."))?; + .map(|(_, push)| { Ok(serde_json::from_slice(&*push) .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) }) .collect() } - pub fn get_pusher_senderkeys(&self, sender: &UserId) -> impl Iterator> { + pub fn get_pusher_senderkeys<'a>( + &'a self, + sender: &UserId, + ) -> impl Iterator> + 'a { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); - self.senderkey_pusher - .scan_prefix(prefix) - .keys() - .map(|r| Ok(r?)) + self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 703314e..0a8239d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -28,7 +28,7 @@ use std::{ sync::Arc, }; -use super::{admin::AdminCommand, pusher}; +use super::{abstraction::Tree, admin::AdminCommand, pusher}; /// The unique identifier of each state group. /// @@ -36,54 +36,53 @@ use super::{admin::AdminCommand, pusher}; /// hashing the entire state. pub type StateHashId = IVec; -#[derive(Clone)] pub struct Rooms { pub edus: edus::RoomEdus, - pub(super) pduid_pdu: sled::Tree, // PduId = RoomId + Count - pub(super) eventid_pduid: sled::Tree, - pub(super) roomid_pduleaves: sled::Tree, - pub(super) alias_roomid: sled::Tree, - pub(super) aliasid_alias: sled::Tree, // AliasId = RoomId + Count - pub(super) publicroomids: sled::Tree, + pub(super) pduid_pdu: Arc, // PduId = RoomId + Count + pub(super) eventid_pduid: Arc, + pub(super) roomid_pduleaves: Arc, + pub(super) alias_roomid: Arc, + pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count + pub(super) publicroomids: Arc, - pub(super) tokenids: sled::Tree, // TokenId = RoomId + Token + PduId + pub(super) tokenids: Arc, // TokenId = RoomId + Token + PduId /// Participating servers in a room. - pub(super) roomserverids: sled::Tree, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: sled::Tree, // ServerRoomId = ServerName + RoomId + pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName + pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - pub(super) userroomid_joined: sled::Tree, - pub(super) roomuserid_joined: sled::Tree, - pub(super) roomuseroncejoinedids: sled::Tree, - pub(super) userroomid_invitestate: sled::Tree, // InviteState = Vec> - pub(super) roomuserid_invitecount: sled::Tree, // InviteCount = Count - pub(super) userroomid_leftstate: sled::Tree, - pub(super) roomuserid_leftcount: sled::Tree, + pub(super) userroomid_joined: Arc, + pub(super) roomuserid_joined: Arc, + pub(super) roomuseroncejoinedids: Arc, + pub(super) userroomid_invitestate: Arc, // InviteState = Vec> + pub(super) roomuserid_invitecount: Arc, // InviteCount = Count + pub(super) userroomid_leftstate: Arc, + pub(super) roomuserid_leftcount: Arc, - pub(super) userroomid_notificationcount: sled::Tree, // NotifyCount = u64 - pub(super) userroomid_highlightcount: sled::Tree, // HightlightCount = u64 + pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 + pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: sled::Tree, + pub(super) roomid_shortstatehash: Arc, /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: sled::Tree, + pub(super) shorteventid_shortstatehash: Arc, /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: sled::Tree, - pub(super) shorteventid_eventid: sled::Tree, + pub(super) statekey_shortstatekey: Arc, + pub(super) shorteventid_eventid: Arc, /// ShortEventId = Count - pub(super) eventid_shorteventid: sled::Tree, + pub(super) eventid_shorteventid: Arc, /// ShortEventId = Count - pub(super) statehash_shortstatehash: sled::Tree, + pub(super) statehash_shortstatehash: Arc, /// ShortStateHash = Count /// StateId = ShortStateHash + ShortStateKey - pub(super) stateid_shorteventid: sled::Tree, + pub(super) stateid_shorteventid: Arc, /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: sled::Tree, + pub(super) eventid_outlierpdu: Arc, /// RoomId + EventId -> Parent PDU EventId. - pub(super) prevevent_parent: sled::Tree, + pub(super) prevevent_parent: Arc, } impl Rooms { @@ -92,10 +91,8 @@ impl Rooms { pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { Ok(self .stateid_shorteventid - .scan_prefix(&shortstatehash.to_be_bytes()) - .values() - .filter_map(|r| r.ok()) - .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) + .scan_prefix(shortstatehash.to_be_bytes().to_vec()) + .map(|(_, bytes)| self.shorteventid_eventid.get(&bytes).ok().flatten()) .flatten() .map(|bytes| { Ok::<_, Error>( @@ -117,10 +114,8 @@ impl Rooms { ) -> Result> { Ok(self .stateid_shorteventid - .scan_prefix(shortstatehash.to_be_bytes()) - .values() - .filter_map(|r| r.ok()) - .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) + .scan_prefix(shortstatehash.to_be_bytes().to_vec()) + .map(|(_, bytes)| self.shorteventid_eventid.get(&bytes).ok().flatten()) .flatten() .map(|bytes| { Ok::<_, Error>( @@ -211,16 +206,16 @@ impl Rooms { self.eventid_shorteventid .get(event_id.as_bytes())? .map_or(Ok(None), |shorteventid| { - Ok(self.shorteventid_shortstatehash.get(shorteventid)?.map_or( - Ok::<_, Error>(None), - |bytes| { + Ok(self + .shorteventid_shortstatehash + .get(&shorteventid)? + .map_or(Ok::<_, Error>(None), |bytes| { Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database( "Invalid shortstatehash bytes in shorteventid_shortstatehash", ) })?)) - }, - )?) + })?) }) } @@ -285,7 +280,8 @@ impl Rooms { // Look for PDUs in that room. Ok(self .pduid_pdu - .get_gt(&prefix)? + .iter_from(&prefix, false) + .next() .filter(|(k, _)| k.starts_with(&prefix)) .is_some()) } @@ -471,10 +467,17 @@ impl Rooms { } pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + self.pduid_pdu - .scan_prefix(room_id.as_bytes()) - .last() - .map(|b| self.pdu_count(&b?.0)) + .iter_from(&last_possible_key, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .next() + .map(|b| self.pdu_count(&b.0)) .transpose() .map(|op| op.unwrap_or_default()) } @@ -499,7 +502,7 @@ impl Rooms { } /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result> { + pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { self.eventid_pduid .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) @@ -570,11 +573,11 @@ impl Rooms { } /// Removes a pdu and creates a new one with the same id. - fn replace_pdu(&self, pdu_id: &IVec, pdu: &PduEvent) -> Result<()> { + fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(&pdu_id)?.is_some() { self.pduid_pdu.insert( &pdu_id, - &*serde_json::to_string(pdu).expect("PduEvent::to_string always works"), + &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), )?; Ok(()) } else { @@ -591,11 +594,11 @@ impl Rooms { prefix.push(0xff); self.roomid_pduleaves - .scan_prefix(prefix) - .values() - .map(|bytes| { + .scan_prefix(dbg!(prefix)) + .map(|(key, bytes)| { + dbg!(key); Ok::<_, Error>( - EventId::try_from(utils::string_from_bytes(&bytes?).map_err(|_| { + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") })?) .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?, @@ -612,8 +615,8 @@ impl Rooms { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { - self.roomid_pduleaves.remove(key?)?; + for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { + self.roomid_pduleaves.remove(&key)?; } for event_id in event_ids { @@ -628,7 +631,7 @@ impl Rooms { pub fn is_pdu_referenced(&self, pdu: &PduEvent) -> Result { let mut key = pdu.room_id().as_bytes().to_vec(); key.extend_from_slice(pdu.event_id().as_bytes()); - self.prevevent_parent.contains_key(key).map_err(Into::into) + Ok(self.prevevent_parent.get(&key)?.is_some()) } /// Returns the pdu from the outlier tree. @@ -646,7 +649,7 @@ impl Rooms { pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { self.eventid_outlierpdu.insert( &event_id.as_bytes(), - &*serde_json::to_string(&pdu).expect("CanonicalJsonObject is valid string"), + &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), )?; Ok(()) @@ -698,7 +701,7 @@ impl Rooms { let mut key = pdu.room_id().as_bytes().to_vec(); key.extend_from_slice(leaf.as_bytes()); self.prevevent_parent - .insert(key, pdu.event_id().as_bytes())?; + .insert(&key, pdu.event_id().as_bytes())?; } self.replace_pdu_leaves(&pdu.room_id, leaves)?; @@ -711,8 +714,7 @@ impl Rooms { self.pduid_pdu.insert( &pdu_id, - &*serde_json::to_string(&pdu_json) - .expect("CanonicalJsonObject is always a valid String"), + &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), )?; // This also replaces the eventid of any outliers with the correct @@ -760,22 +762,14 @@ impl Rooms { userroom_id.extend_from_slice(pdu.room_id.as_bytes()); if notify { - self.userroomid_notificationcount - .update_and_fetch(&userroom_id, utils::increment)? - .expect("utils::increment will always put in a value"); + self.userroomid_notificationcount.increment(&userroom_id)?; } if highlight { - self.userroomid_highlightcount - .update_and_fetch(&userroom_id, utils::increment)? - .expect("utils::increment will always put in a value"); + self.userroomid_highlightcount.increment(&userroom_id)?; } - for senderkey in db - .pusher - .get_pusher_senderkeys(&user) - .filter_map(|r| r.ok()) - { + for senderkey in db.pusher.get_pusher_senderkeys(&user) { db.sending.send_push_pdu(&*pdu_id, senderkey)?; } } @@ -840,7 +834,7 @@ impl Rooms { key.extend_from_slice(word.as_bytes()); key.push(0xff); key.extend_from_slice(&pdu_id); - self.tokenids.insert(key, &[])?; + self.tokenids.insert(&key, &[])?; } if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) @@ -991,7 +985,7 @@ impl Rooms { Some(shortstatehash) => { // State already existed in db self.shorteventid_shortstatehash - .insert(shorteventid, &*shortstatehash)?; + .insert(&shorteventid, &*shortstatehash)?; return Ok(()); } None => { @@ -1037,7 +1031,7 @@ impl Rooms { } self.shorteventid_shortstatehash - .insert(shorteventid, &*shortstatehash)?; + .insert(&shorteventid, &*shortstatehash)?; Ok(()) } @@ -1070,7 +1064,7 @@ impl Rooms { }; self.shorteventid_shortstatehash - .insert(shorteventid, &old_shortstatehash)?; + .insert(&shorteventid, &old_shortstatehash)?; if new_pdu.state_key.is_none() { return utils::u64_from_bytes(&old_shortstatehash).map_err(|_| { Error::bad_database("Invalid shortstatehash in roomid_shortstatehash.") @@ -1078,17 +1072,16 @@ impl Rooms { } self.stateid_shorteventid - .scan_prefix(&old_shortstatehash) - .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) + .scan_prefix(old_shortstatehash.clone()) // Chop the old_shortstatehash out leaving behind the short state key .map(|(k, v)| (k[old_shortstatehash.len()..].to_vec(), v)) - .collect::, IVec>>() + .collect::, Box<[u8]>>>() } else { HashMap::new() }; if let Some(state_key) = &new_pdu.state_key { - let mut new_state: HashMap, IVec> = old_state; + let mut new_state: HashMap, Box<[u8]>> = old_state; let mut new_state_key = new_pdu.kind.as_ref().as_bytes().to_vec(); new_state_key.push(0xff); @@ -1205,6 +1198,7 @@ impl Rooms { room_id: &RoomId, db: &Database, ) -> Result { + dbg!(&pdu_builder); let PduBuilder { event_type, content, @@ -1385,7 +1379,7 @@ impl Rooms { db.sending.send_pdu(&server, &pdu_id)?; } - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") @@ -1464,23 +1458,23 @@ impl Rooms { /// Returns an iterator over all PDUs in a room. #[tracing::instrument(skip(self))] - pub fn all_pdus( - &self, + pub fn all_pdus<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, - ) -> Result>> { + ) -> impl Iterator, PduEvent)>> + 'a { self.pdus_since(user_id, room_id, 0) } - /// Returns a double-ended iterator over all events in a room that happened after the event with id `since` + /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. #[tracing::instrument(skip(self))] - pub fn pdus_since( - &self, + pub fn pdus_since<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result>> { + ) -> impl Iterator, PduEvent)>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1488,19 +1482,10 @@ impl Rooms { let mut first_pdu_id = prefix.clone(); first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - let mut last_pdu_id = prefix; - last_pdu_id.extend_from_slice(&u64::MAX.to_be_bytes()); - let user_id = user_id.clone(); - Ok(self - .pduid_pdu - .range(first_pdu_id..last_pdu_id) - .filter_map(|r| { - if r.is_err() { - error!("Bad pdu in pduid_pdu: {:?}", r); - } - r.ok() - }) + self.pduid_pdu + .iter_from(&first_pdu_id, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; @@ -1508,17 +1493,17 @@ impl Rooms { pdu.unsigned.remove("transaction_id"); } Ok((pdu_id, pdu)) - })) + }) } /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. - pub fn pdus_until( - &self, + pub fn pdus_until<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, until: u64, - ) -> impl Iterator> { + ) -> impl Iterator, PduEvent)>> + 'a { // Create the first part of the full pdu id let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1530,9 +1515,7 @@ impl Rooms { let user_id = user_id.clone(); self.pduid_pdu - .range(..current) - .rev() - .filter_map(|r| r.ok()) + .iter_from(current, true) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) @@ -1547,12 +1530,12 @@ impl Rooms { /// Returns an iterator over all events and their token in a room that happened after the event /// with id `from` in chronological order. #[tracing::instrument(skip(self))] - pub fn pdus_after( - &self, + pub fn pdus_after<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, from: u64, - ) -> impl Iterator> { + ) -> impl Iterator, PduEvent)>> + 'a { // Create the first part of the full pdu id let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1564,8 +1547,7 @@ impl Rooms { let user_id = user_id.clone(); self.pduid_pdu - .range(current..) - .filter_map(|r| r.ok()) + .iter_from(current, false) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) @@ -1744,7 +1726,7 @@ impl Rooms { self.serverroomids.insert(&serverroom_id, &[])?; self.userroomid_invitestate.insert( &userroom_id, - serde_json::to_vec(&last_state.unwrap_or_default()) + &serde_json::to_vec(&last_state.unwrap_or_default()) .expect("state to bytes always works"), )?; self.roomuserid_invitecount @@ -1766,7 +1748,7 @@ impl Rooms { } self.userroomid_leftstate.insert( &userroom_id, - serde_json::to_vec(&Vec::>::new()).unwrap(), + &serde_json::to_vec(&Vec::>::new()).unwrap(), )?; // TODO self.roomuserid_leftcount .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; @@ -1966,8 +1948,8 @@ impl Rooms { roomuser_id.push(0xff); roomuser_id.extend_from_slice(user_id.as_bytes()); - self.userroomid_leftstate.remove(userroom_id)?; - self.roomuserid_leftcount.remove(roomuser_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; Ok(()) } @@ -1981,26 +1963,26 @@ impl Rooms { if let Some(room_id) = room_id { // New alias self.alias_roomid - .insert(alias.alias(), room_id.as_bytes())?; + .insert(&alias.alias().as_bytes(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); aliasid.push(0xff); aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(aliasid, &*alias.as_bytes())?; + self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; } else { // room_id=None means remove alias - let room_id = self - .alias_roomid - .remove(alias.alias())? - .ok_or(Error::BadRequest( + if let Some(room_id) = self.alias_roomid.get(&alias.alias().as_bytes())? { + let mut prefix = room_id.to_vec(); + prefix.push(0xff); + + for (key, _) in self.aliasid_alias.scan_prefix(prefix) { + self.aliasid_alias.remove(&key)?; + } + self.alias_roomid.remove(&alias.alias().as_bytes())?; + } else { + return Err(Error::BadRequest( ErrorKind::NotFound, "Alias does not exist.", - ))?; - - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for key in self.aliasid_alias.scan_prefix(prefix).keys() { - self.aliasid_alias.remove(key?)?; + )); } } @@ -2009,7 +1991,7 @@ impl Rooms { pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result> { self.alias_roomid - .get(alias.alias())? + .get(alias.alias().as_bytes())? .map_or(Ok(None), |bytes| { Ok(Some( RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { @@ -2020,19 +2002,19 @@ impl Rooms { }) } - pub fn room_aliases(&self, room_id: &RoomId) -> impl Iterator> { + pub fn room_aliases<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - self.aliasid_alias - .scan_prefix(prefix) - .values() - .map(|bytes| { - Ok(utils::string_from_bytes(&bytes?) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))?) - }) + self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { + Ok(utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))?) + }) } pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { @@ -2046,13 +2028,13 @@ impl Rooms { } pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.contains_key(room_id.as_bytes())?) + Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - pub fn public_rooms(&self) -> impl Iterator> { - self.publicroomids.iter().keys().map(|bytes| { + pub fn public_rooms<'a>(&'a self) -> impl Iterator> + 'a { + self.publicroomids.iter().map(|(bytes, _)| { Ok( - RoomId::try_from(utils::string_from_bytes(&bytes?).map_err(|_| { + RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in publicroomids is invalid unicode.") })?) .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid."))?, @@ -2073,31 +2055,39 @@ impl Rooms { .map(str::to_lowercase) .collect::>(); - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - self.tokenids - .scan_prefix(&prefix2) - .keys() - .rev() // Newest pdus first - .filter_map(|r| r.ok()) - .map(|key| { - let pduid_index = key - .iter() - .enumerate() - .filter(|(_, &b)| b == 0xff) - .nth(1) - .ok_or_else(|| Error::bad_database("Invalid tokenid in db."))? - .0 - + 1; // +1 because the pdu id starts AFTER the separator + let iterators = words + .clone() + .into_iter() + .map(move |word| { + let mut prefix2 = prefix.clone(); + prefix2.extend_from_slice(word.as_bytes()); + prefix2.push(0xff); - let pdu_id = key[pduid_index..].to_vec(); + let mut last_possible_id = prefix2.clone(); + last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - Ok::<_, Error>(pdu_id) - }) - .filter_map(|r| r.ok()) - }); + Ok::<_, Error>( + self.tokenids + .iter_from(&last_possible_id, true) // Newest pdus first + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(|(key, _)| { + let pduid_index = key + .iter() + .enumerate() + .filter(|(_, &b)| b == 0xff) + .nth(1) + .ok_or_else(|| Error::bad_database("Invalid tokenid in db."))? + .0 + + 1; // +1 because the pdu id starts AFTER the separator + + let pdu_id = key[pduid_index..].to_vec(); + + Ok::<_, Error>(pdu_id) + }) + .filter_map(|r| r.ok()), + ) + }) + .filter_map(|r| r.ok()); Ok(( utils::common_elements(iterators, |a, b| { @@ -2113,52 +2103,59 @@ impl Rooms { pub fn get_shared_rooms<'a>( &'a self, users: Vec, - ) -> impl Iterator> + 'a { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + ) -> Result> + 'a> { + let iterators = users + .into_iter() + .map(move |user_id| { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); - self.userroomid_joined - .scan_prefix(&prefix) - .keys() - .filter_map(|r| r.ok()) - .map(|key| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator + Ok::<_, Error>( + self.userroomid_joined + .scan_prefix(prefix) + .map(|(key, _)| { + let roomid_index = key + .iter() + .enumerate() + .find(|(_, &b)| b == 0xff) + .ok_or_else(|| { + Error::bad_database("Invalid userroomid_joined in db.") + })? + .0 + + 1; // +1 because the room id starts AFTER the separator - let room_id = key[roomid_index..].to_vec(); + let room_id = key[roomid_index..].to_vec(); - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); + Ok::<_, Error>(room_id) + }) + .filter_map(|r| r.ok()), + ) + }) + .filter_map(|r| r.ok()); // We use the default compare function because keys are sorted correctly (not reversed) - utils::common_elements(iterators, Ord::cmp) + Ok(utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { RoomId::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { Error::bad_database("Invalid RoomId bytes in userroomid_joined") })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - }) + })) } /// Returns an iterator of all servers participating in this room. - pub fn room_servers(&self, room_id: &RoomId) -> impl Iterator>> { + pub fn room_servers<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - self.roomserverids.scan_prefix(prefix).keys().map(|key| { + self.roomserverids.scan_prefix(prefix).map(|(key, _)| { Ok(Box::::try_from( utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) + &key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -2171,15 +2168,17 @@ impl Rooms { } /// Returns an iterator of all rooms a server participates in (as far as we know). - pub fn server_rooms(&self, server: &ServerName) -> impl Iterator> { + pub fn server_rooms<'a>( + &'a self, + server: &ServerName, + ) -> impl Iterator> + 'a { let mut prefix = server.as_bytes().to_vec(); prefix.push(0xff); - self.serverroomids.scan_prefix(prefix).keys().map(|key| { + self.serverroomids.scan_prefix(prefix).map(|(key, _)| { Ok(RoomId::try_from( utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) + &key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -2191,42 +2190,42 @@ impl Rooms { /// Returns an iterator over all joined members of a room. #[tracing::instrument(skip(self))] - pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { + pub fn room_members<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - self.roomuserid_joined - .scan_prefix(prefix) - .keys() - .map(|key| { - Ok(UserId::try_from( - utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, + self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { + Ok(UserId::try_from( + utils::string_from_bytes( + &key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid."))?) - }) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_joined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid."))?) + }) } /// Returns an iterator over all User IDs who ever joined a room. - pub fn room_useroncejoined(&self, room_id: &RoomId) -> impl Iterator> { + pub fn room_useroncejoined<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomuseroncejoinedids .scan_prefix(prefix) - .keys() - .map(|key| { + .map(|(key, _)| { Ok(UserId::try_from( utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) + &key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -2240,18 +2239,19 @@ impl Rooms { /// Returns an iterator over all invited members of a room. #[tracing::instrument(skip(self))] - pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator> { + pub fn room_members_invited<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomuserid_invitecount .scan_prefix(prefix) - .keys() - .map(|key| { + .map(|(key, _)| { Ok(UserId::try_from( utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) + &key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -2270,7 +2270,7 @@ impl Rooms { key.extend_from_slice(user_id.as_bytes()); self.roomuserid_invitecount - .get(key)? + .get(&key)? .map_or(Ok(None), |bytes| { Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Invalid invitecount in db.") @@ -2285,7 +2285,7 @@ impl Rooms { key.extend_from_slice(user_id.as_bytes()); self.roomuserid_leftcount - .get(key)? + .get(&key)? .map_or(Ok(None), |bytes| { Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Invalid leftcount in db.") @@ -2295,15 +2295,16 @@ impl Rooms { /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self))] - pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { + pub fn rooms_joined<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator> + 'a { self.userroomid_joined - .scan_prefix(user_id.as_bytes()) - .keys() - .map(|key| { + .scan_prefix(user_id.as_bytes().to_vec()) + .map(|(key, _)| { Ok(RoomId::try_from( utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) + &key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -2317,32 +2318,33 @@ impl Rooms { /// Returns an iterator over all rooms a user was invited to. #[tracing::instrument(skip(self))] - pub fn rooms_invited( - &self, + pub fn rooms_invited<'a>( + &'a self, user_id: &UserId, - ) -> impl Iterator>)>> { + ) -> impl Iterator>)>> + 'a { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - self.userroomid_invitestate.scan_prefix(prefix).map(|r| { - let (key, state) = r?; - let room_id = RoomId::try_from( - utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + self.userroomid_invitestate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::try_from( + utils::string_from_bytes( + &key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - Ok((room_id, state)) - }) + Ok((room_id, state)) + }) } #[tracing::instrument(skip(self))] @@ -2356,7 +2358,7 @@ impl Rooms { key.extend_from_slice(&room_id.as_bytes()); self.userroomid_invitestate - .get(key)? + .get(&key)? .map(|state| { let state = serde_json::from_slice(&state) .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; @@ -2377,7 +2379,7 @@ impl Rooms { key.extend_from_slice(&room_id.as_bytes()); self.userroomid_leftstate - .get(key)? + .get(&key)? .map(|state| { let state = serde_json::from_slice(&state) .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; @@ -2389,32 +2391,33 @@ impl Rooms { /// Returns an iterator over all rooms a user left. #[tracing::instrument(skip(self))] - pub fn rooms_left( - &self, + pub fn rooms_left<'a>( + &'a self, user_id: &UserId, - ) -> impl Iterator>)>> { + ) -> impl Iterator>)>> + 'a { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - self.userroomid_leftstate.scan_prefix(prefix).map(|r| { - let (key, state) = r?; - let room_id = RoomId::try_from( - utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + self.userroomid_leftstate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::try_from( + utils::string_from_bytes( + &key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - Ok((room_id, state)) - }) + Ok((room_id, state)) + }) } pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { @@ -2422,7 +2425,7 @@ impl Rooms { userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - Ok(self.roomuseroncejoinedids.get(userroom_id)?.is_some()) + Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) } pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { @@ -2430,7 +2433,7 @@ impl Rooms { userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - Ok(self.userroomid_joined.get(userroom_id)?.is_some()) + Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) } pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { @@ -2438,7 +2441,7 @@ impl Rooms { userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - Ok(self.userroomid_invitestate.get(userroom_id)?.is_some()) + Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) } pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { @@ -2446,6 +2449,6 @@ impl Rooms { userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - Ok(self.userroomid_leftstate.get(userroom_id)?.is_some()) + Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) } } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index f4c7075..677d26e 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result}; +use crate::{database::abstraction::Tree, utils, Error, Result}; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, @@ -13,17 +13,17 @@ use std::{ collections::{HashMap, HashSet}, convert::{TryFrom, TryInto}, mem, + sync::Arc, }; -#[derive(Clone)] pub struct RoomEdus { - pub(in super::super) readreceiptid_readreceipt: sled::Tree, // ReadReceiptId = RoomId + Count + UserId - pub(in super::super) roomuserid_privateread: sled::Tree, // RoomUserId = Room + User, PrivateRead = Count - pub(in super::super) roomuserid_lastprivatereadupdate: sled::Tree, // LastPrivateReadUpdate = Count - pub(in super::super) typingid_userid: sled::Tree, // TypingId = RoomId + TimeoutTime + Count - pub(in super::super) roomid_lasttypingupdate: sled::Tree, // LastRoomTypingUpdate = Count - pub(in super::super) presenceid_presence: sled::Tree, // PresenceId = RoomId + Count + UserId - pub(in super::super) userid_lastpresenceupdate: sled::Tree, // LastPresenceUpdate = Count + pub(in super::super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId + pub(in super::super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count + pub(in super::super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count + pub(in super::super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count + pub(in super::super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count + pub(in super::super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId + pub(in super::super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count } impl RoomEdus { @@ -38,15 +38,15 @@ impl RoomEdus { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + // Remove old entry - if let Some(old) = self + if let Some((old, _)) = self .readreceiptid_readreceipt - .scan_prefix(&prefix) - .keys() - .rev() - .filter_map(|r| r.ok()) - .take_while(|key| key.starts_with(&prefix)) - .find(|key| { + .iter_from(&last_possible_key, true) + .take_while(|(key, _)| key.starts_with(&prefix)) + .find(|(key, _)| { key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element") @@ -54,7 +54,7 @@ impl RoomEdus { }) { // This is the old room_latest - self.readreceiptid_readreceipt.remove(old)?; + self.readreceiptid_readreceipt.remove(&old)?; } let mut room_latest_id = prefix; @@ -63,8 +63,8 @@ impl RoomEdus { room_latest_id.extend_from_slice(&user_id.as_bytes()); self.readreceiptid_readreceipt.insert( - room_latest_id, - &*serde_json::to_string(&event).expect("EduEvent::to_string always works"), + &room_latest_id, + &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), )?; Ok(()) @@ -72,13 +72,12 @@ impl RoomEdus { /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. #[tracing::instrument(skip(self))] - pub fn readreceipts_since( - &self, + pub fn readreceipts_since<'a>( + &'a self, room_id: &RoomId, since: u64, - ) -> Result< - impl Iterator)>>, - > { + ) -> impl Iterator)>> + 'a + { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let prefix2 = prefix.clone(); @@ -86,10 +85,8 @@ impl RoomEdus { let mut first_possible_edu = prefix.clone(); first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - Ok(self - .readreceiptid_readreceipt - .range(&*first_possible_edu..) - .filter_map(|r| r.ok()) + self.readreceiptid_readreceipt + .iter_from(&first_possible_edu, false) .take_while(move |(k, _)| k.starts_with(&prefix2)) .map(move |(k, v)| { let count = @@ -115,7 +112,7 @@ impl RoomEdus { serde_json::value::to_raw_value(&json).expect("json is valid raw value"), ), )) - })) + }) } /// Sets a private read marker at `count`. @@ -146,11 +143,13 @@ impl RoomEdus { key.push(0xff); key.extend_from_slice(&user_id.as_bytes()); - self.roomuserid_privateread.get(key)?.map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) + self.roomuserid_privateread + .get(&key)? + .map_or(Ok(None), |v| { + Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { + Error::bad_database("Invalid private read marker bytes") + })?)) + }) } /// Returns the count of the last typing update in this room. @@ -215,11 +214,10 @@ impl RoomEdus { // Maybe there are multiple ones from calling roomtyping_add multiple times for outdated_edu in self .typingid_userid - .scan_prefix(&prefix) - .filter_map(|r| r.ok()) - .filter(|(_, v)| v == user_id.as_bytes()) + .scan_prefix(prefix) + .filter(|(_, v)| &**v == user_id.as_bytes()) { - self.typingid_userid.remove(outdated_edu.0)?; + self.typingid_userid.remove(&outdated_edu.0)?; found_outdated = true; } @@ -247,10 +245,8 @@ impl RoomEdus { // Find all outdated edus before inserting a new one for outdated_edu in self .typingid_userid - .scan_prefix(&prefix) - .keys() - .map(|key| { - let key = key?; + .scan_prefix(prefix) + .map(|(key, _)| { Ok::<_, Error>(( key.clone(), utils::u64_from_bytes( @@ -265,7 +261,7 @@ impl RoomEdus { .take_while(|&(_, timestamp)| timestamp < current_timestamp) { // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(outdated_edu.0)?; + self.typingid_userid.remove(&outdated_edu.0)?; found_outdated = true; } @@ -309,10 +305,9 @@ impl RoomEdus { for user_id in self .typingid_userid .scan_prefix(prefix) - .values() - .map(|user_id| { + .map(|(_, user_id)| { Ok::<_, Error>( - UserId::try_from(utils::string_from_bytes(&user_id?).map_err(|_| { + UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| { Error::bad_database("User ID in typingid_userid is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?, @@ -351,12 +346,12 @@ impl RoomEdus { presence_id.extend_from_slice(&presence.sender.as_bytes()); self.presenceid_presence.insert( - presence_id, - &*serde_json::to_string(&presence).expect("PresenceEvent can be serialized"), + &presence_id, + &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), )?; self.userid_lastpresenceupdate.insert( - &user_id.as_bytes(), + user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), )?; @@ -403,7 +398,7 @@ impl RoomEdus { presence_id.extend_from_slice(&user_id.as_bytes()); self.presenceid_presence - .get(presence_id)? + .get(&presence_id)? .map(|value| { let mut presence = serde_json::from_slice::(&value) .map_err(|_| Error::bad_database("Invalid presence event in db."))?; @@ -438,7 +433,6 @@ impl RoomEdus { for (user_id_bytes, last_timestamp) in self .userid_lastpresenceupdate .iter() - .filter_map(|r| r.ok()) .filter_map(|(k, bytes)| { Some(( k, @@ -468,8 +462,8 @@ impl RoomEdus { presence_id.extend_from_slice(&user_id_bytes); self.presenceid_presence.insert( - presence_id, - &*serde_json::to_string(&PresenceEvent { + &presence_id, + &serde_json::to_vec(&PresenceEvent { content: PresenceEventContent { avatar_url: None, currently_active: None, @@ -515,8 +509,7 @@ impl RoomEdus { for (key, value) in self .presenceid_presence - .range(&*first_possible_edu..) - .filter_map(|r| r.ok()) + .iter_from(&*first_possible_edu, false) .take_while(|(key, _)| key.starts_with(&prefix)) { let user_id = UserId::try_from( diff --git a/src/database/sending.rs b/src/database/sending.rs index ed5b5ef..77f6ed7 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -12,7 +12,10 @@ use crate::{ use federation::transactions::send_transaction_message; use log::{error, warn}; use ring::digest; -use rocket::futures::stream::{FuturesUnordered, StreamExt}; +use rocket::futures::{ + channel::mpsc, + stream::{FuturesUnordered, StreamExt}, +}; use ruma::{ api::{ appservice, @@ -27,9 +30,10 @@ use ruma::{ receipt::ReceiptType, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, }; -use sled::IVec; use tokio::{select, sync::Semaphore}; +use super::abstraction::Tree; + #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(Box), @@ -70,13 +74,13 @@ pub enum SendingEventType { Edu(Vec), } -#[derive(Clone)] pub struct Sending { /// The state for a given state hash. - pub(super) servername_educount: sled::Tree, // EduCount: Count of last EDU sync - pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)SenderKey / ServerName / UserId + PduId - pub(super) servercurrentevents: sled::Tree, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / (*)EduEvent + pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync + pub(super) servernamepduids: Arc, // ServernamePduId = (+ / $)SenderKey / ServerName / UserId + PduId + pub(super) servercurrentevents: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / (*)EduEvent pub(super) maximum_requests: Arc, + pub sender: mpsc::UnboundedSender>, } enum TransactionStatus { @@ -86,28 +90,25 @@ enum TransactionStatus { } impl Sending { - pub fn start_handler(&self, db: &Database) { - let servernamepduids = self.servernamepduids.clone(); - let servercurrentevents = self.servercurrentevents.clone(); - + pub fn start_handler(&self, db: Arc, mut receiver: mpsc::UnboundedReceiver>) { let db = db.clone(); tokio::spawn(async move { let mut futures = FuturesUnordered::new(); - // Retry requests we could not finish yet - let mut subscriber = servernamepduids.watch_prefix(b""); let mut current_transaction_status = HashMap::, TransactionStatus>::new(); + // Retry requests we could not finish yet let mut initial_transactions = HashMap::>::new(); - for (key, outgoing_kind, event) in servercurrentevents - .iter() - .filter_map(|r| r.ok()) - .filter_map(|(key, _)| { - Self::parse_servercurrentevent(&key) - .ok() - .map(|(k, e)| (key, k, e)) - }) + for (key, outgoing_kind, event) in + db.sending + .servercurrentevents + .iter() + .filter_map(|(key, _)| { + Self::parse_servercurrentevent(&key) + .ok() + .map(|(k, e)| (key, k, e)) + }) { let entry = initial_transactions .entry(outgoing_kind.clone()) @@ -118,7 +119,7 @@ impl Sending { "Dropping some current events: {:?} {:?} {:?}", key, outgoing_kind, event ); - servercurrentevents.remove(key).unwrap(); + db.sending.servercurrentevents.remove(&key).unwrap(); continue; } @@ -137,20 +138,16 @@ impl Sending { match response { Ok(outgoing_kind) => { let prefix = outgoing_kind.get_prefix(); - for key in servercurrentevents - .scan_prefix(&prefix) - .keys() - .filter_map(|r| r.ok()) + for (key, _) in db.sending.servercurrentevents + .scan_prefix(prefix.clone()) { - servercurrentevents.remove(key).unwrap(); + db.sending.servercurrentevents.remove(&key).unwrap(); } // Find events that have been added since starting the last request - let new_events = servernamepduids - .scan_prefix(&prefix) - .keys() - .filter_map(|r| r.ok()) - .map(|k| { + let new_events = db.sending.servernamepduids + .scan_prefix(prefix.clone()) + .map(|(k, _)| { SendingEventType::Pdu(k[prefix.len()..].to_vec()) }) .take(30) @@ -166,8 +163,8 @@ impl Sending { SendingEventType::Pdu(b) | SendingEventType::Edu(b) => { current_key.extend_from_slice(&b); - servercurrentevents.insert(¤t_key, &[]).unwrap(); - servernamepduids.remove(¤t_key).unwrap(); + db.sending.servercurrentevents.insert(¤t_key, &[]).unwrap(); + db.sending.servernamepduids.remove(¤t_key).unwrap(); } } } @@ -195,18 +192,15 @@ impl Sending { } }; }, - Some(event) = &mut subscriber => { - // New sled version: - //for (_tree, key, value_opt) in &event { - // if value_opt.is_none() { - // continue; - // } - - if let sled::Event::Insert { key, .. } = event { - if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key) { - if let Some(events) = Self::select_events(&outgoing_kind, vec![(event, key)], &mut current_transaction_status, &servercurrentevents, &servernamepduids, &db) { - futures.push(Self::handle_events(outgoing_kind, events, &db)); - } + Some(key) = receiver.next() => { + if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key) { + if let Ok(Some(events)) = Self::select_events( + &outgoing_kind, + vec![(event, key)], + &mut current_transaction_status, + &db + ) { + futures.push(Self::handle_events(outgoing_kind, events, &db)); } } } @@ -217,12 +211,10 @@ impl Sending { fn select_events( outgoing_kind: &OutgoingKind, - new_events: Vec<(SendingEventType, IVec)>, // Events we want to send: event and full key + new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key current_transaction_status: &mut HashMap, TransactionStatus>, - servercurrentevents: &sled::Tree, - servernamepduids: &sled::Tree, db: &Database, - ) -> Option> { + ) -> Result>> { let mut retry = false; let mut allow = true; @@ -252,29 +244,25 @@ impl Sending { .or_insert(TransactionStatus::Running); if !allow { - return None; + return Ok(None); } let mut events = Vec::new(); if retry { // We retry the previous transaction - for key in servercurrentevents - .scan_prefix(&prefix) - .keys() - .filter_map(|r| r.ok()) - { + for (key, _) in db.sending.servercurrentevents.scan_prefix(prefix) { if let Ok((_, e)) = Self::parse_servercurrentevent(&key) { events.push(e); } } } else { for (e, full_key) in new_events { - servercurrentevents.insert(&full_key, &[]).unwrap(); + db.sending.servercurrentevents.insert(&full_key, &[])?; // If it was a PDU we have to unqueue it // TODO: don't try to unqueue EDUs - servernamepduids.remove(&full_key).unwrap(); + db.sending.servernamepduids.remove(&full_key)?; events.push(e); } @@ -284,13 +272,12 @@ impl Sending { events.extend_from_slice(&select_edus); db.sending .servername_educount - .insert(server_name.as_bytes(), &last_count.to_be_bytes()) - .unwrap(); + .insert(server_name.as_bytes(), &last_count.to_be_bytes())?; } } } - Some(events) + Ok(Some(events)) } pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec, u64)> { @@ -307,7 +294,7 @@ impl Sending { let mut max_edu_count = since; 'outer: for room_id in db.rooms.server_rooms(server) { let room_id = room_id?; - for r in db.rooms.edus.readreceipts_since(&room_id, since)? { + for r in db.rooms.edus.readreceipts_since(&room_id, since) { let (user_id, count, read_receipt) = r?; if count > max_edu_count { @@ -372,12 +359,13 @@ impl Sending { } #[tracing::instrument(skip(self))] - pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: IVec) -> Result<()> { + pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Box<[u8]>) -> Result<()> { let mut key = b"$".to_vec(); key.extend_from_slice(&senderkey); key.push(0xff); key.extend_from_slice(pdu_id); - self.servernamepduids.insert(key, b"")?; + self.servernamepduids.insert(&key, b"")?; + self.sender.unbounded_send(key).unwrap(); Ok(()) } @@ -387,7 +375,8 @@ impl Sending { let mut key = server.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu_id); - self.servernamepduids.insert(key, b"")?; + self.servernamepduids.insert(&key, b"")?; + self.sender.unbounded_send(key).unwrap(); Ok(()) } @@ -398,7 +387,8 @@ impl Sending { key.extend_from_slice(appservice_id.as_bytes()); key.push(0xff); key.extend_from_slice(pdu_id); - self.servernamepduids.insert(key, b"")?; + self.servernamepduids.insert(&key, b"")?; + self.sender.unbounded_send(key).unwrap(); Ok(()) } @@ -641,7 +631,7 @@ impl Sending { } } - fn parse_servercurrentevent(key: &IVec) -> Result<(OutgoingKind, SendingEventType)> { + fn parse_servercurrentevent(key: &[u8]) -> Result<(OutgoingKind, SendingEventType)> { // Appservices start with a plus Ok::<_, Error>(if key.starts_with(b"+") { let mut parts = key[1..].splitn(2, |&b| b == 0xff); diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index 1f8ba7d..3e37779 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -1,10 +1,12 @@ +use std::sync::Arc; + use crate::Result; use ruma::{DeviceId, UserId}; -use sled::IVec; -#[derive(Clone)] +use super::abstraction::Tree; + pub struct TransactionIds { - pub(super) userdevicetxnid_response: sled::Tree, // Response can be empty (/sendToDevice) or the event id (/send) + pub(super) userdevicetxnid_response: Arc, // Response can be empty (/sendToDevice) or the event id (/send) } impl TransactionIds { @@ -21,7 +23,7 @@ impl TransactionIds { key.push(0xff); key.extend_from_slice(txn_id.as_bytes()); - self.userdevicetxnid_response.insert(key, data)?; + self.userdevicetxnid_response.insert(&key, data)?; Ok(()) } @@ -31,7 +33,7 @@ impl TransactionIds { user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &str, - ) -> Result> { + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); @@ -39,6 +41,6 @@ impl TransactionIds { key.extend_from_slice(txn_id.as_bytes()); // If there's no entry, this is a new transaction - Ok(self.userdevicetxnid_response.get(key)?) + Ok(self.userdevicetxnid_response.get(&key)?) } } diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 3b77840..f7f3d1f 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ api::client::{ @@ -8,10 +10,11 @@ use ruma::{ DeviceId, UserId, }; -#[derive(Clone)] +use super::abstraction::Tree; + pub struct Uiaa { - pub(super) userdevicesessionid_uiaainfo: sled::Tree, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: sled::Tree, // UiaaRequest = canonical json value + pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication + pub(super) userdevicesessionid_uiaarequest: Arc, // UiaaRequest = canonical json value } impl Uiaa { @@ -185,7 +188,7 @@ impl Uiaa { self.userdevicesessionid_uiaarequest.insert( &userdevicesessionid, - &*serde_json::to_string(request).expect("json value to string always works"), + &serde_json::to_vec(request).expect("json value to vec always works"), )?; Ok(()) @@ -233,7 +236,7 @@ impl Uiaa { if let Some(uiaainfo) = uiaainfo { self.userdevicesessionid_uiaainfo.insert( &userdevicesessionid, - &*serde_json::to_string(&uiaainfo).expect("UiaaInfo::to_string always works"), + &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), )?; } else { self.userdevicesessionid_uiaainfo diff --git a/src/database/users.rs b/src/database/users.rs index 52e6e33..b6d3b3c 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -7,40 +7,41 @@ use ruma::{ serde::Raw, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, UInt, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, mem}; +use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc}; + +use super::abstraction::Tree; -#[derive(Clone)] pub struct Users { - pub(super) userid_password: sled::Tree, - pub(super) userid_displayname: sled::Tree, - pub(super) userid_avatarurl: sled::Tree, - pub(super) userdeviceid_token: sled::Tree, - pub(super) userdeviceid_metadata: sled::Tree, // This is also used to check if a device exists - pub(super) userid_devicelistversion: sled::Tree, // DevicelistVersion = u64 - pub(super) token_userdeviceid: sled::Tree, + pub(super) userid_password: Arc, + pub(super) userid_displayname: Arc, + pub(super) userid_avatarurl: Arc, + pub(super) userdeviceid_token: Arc, + pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists + pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 + pub(super) token_userdeviceid: Arc, - pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: sled::Tree, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: sled::Tree, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: sled::Tree, // KeyId = UserId + KeyId (depends on key type) - pub(super) userid_masterkeyid: sled::Tree, - pub(super) userid_selfsigningkeyid: sled::Tree, - pub(super) userid_usersigningkeyid: sled::Tree, + pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId + pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count + pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count + pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) + pub(super) userid_masterkeyid: Arc, + pub(super) userid_selfsigningkeyid: Arc, + pub(super) userid_usersigningkeyid: Arc, - pub(super) todeviceid_events: sled::Tree, // ToDeviceId = UserId + DeviceId + Count + pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count } impl Users { /// Check if a user has an account on this homeserver. pub fn exists(&self, user_id: &UserId) -> Result { - Ok(self.userid_password.contains_key(user_id.to_string())?) + Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) } /// Check if account is deactivated pub fn is_deactivated(&self, user_id: &UserId) -> Result { Ok(self .userid_password - .get(user_id.to_string())? + .get(user_id.as_bytes())? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "User does not exist.", @@ -55,14 +56,14 @@ impl Users { } /// Returns the number of users registered on this server. - pub fn count(&self) -> usize { - self.userid_password.iter().count() + pub fn count(&self) -> Result { + Ok(self.userid_password.iter().count()) } /// Find out which user an access token belongs to. pub fn find_from_token(&self, token: &str) -> Result> { self.token_userdeviceid - .get(token)? + .get(token.as_bytes())? .map_or(Ok(None), |bytes| { let mut parts = bytes.split(|&b| b == 0xff); let user_bytes = parts.next().ok_or_else(|| { @@ -87,10 +88,10 @@ impl Users { } /// Returns an iterator over all users on this homeserver. - pub fn iter(&self) -> impl Iterator> { - self.userid_password.iter().keys().map(|bytes| { + pub fn iter<'a>(&'a self) -> impl Iterator> + 'a { + self.userid_password.iter().map(|(bytes, _)| { Ok( - UserId::try_from(utils::string_from_bytes(&bytes?).map_err(|_| { + UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in userid_password is invalid."))?, @@ -101,7 +102,7 @@ impl Users { /// Returns the password hash for the given user. pub fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password - .get(user_id.to_string())? + .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Password hash in db is not valid string.") @@ -113,7 +114,8 @@ impl Users { pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { if let Ok(hash) = utils::calculate_hash(&password) { - self.userid_password.insert(user_id.to_string(), &*hash)?; + self.userid_password + .insert(user_id.as_bytes(), hash.as_bytes())?; Ok(()) } else { Err(Error::BadRequest( @@ -122,7 +124,7 @@ impl Users { )) } } else { - self.userid_password.insert(user_id.to_string(), "")?; + self.userid_password.insert(user_id.as_bytes(), b"")?; Ok(()) } } @@ -130,7 +132,7 @@ impl Users { /// Returns the displayname of a user on this homeserver. pub fn displayname(&self, user_id: &UserId) -> Result> { self.userid_displayname - .get(user_id.to_string())? + .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Displayname in db is invalid.") @@ -142,9 +144,9 @@ impl Users { pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { if let Some(displayname) = displayname { self.userid_displayname - .insert(user_id.to_string(), &*displayname)?; + .insert(user_id.as_bytes(), displayname.as_bytes())?; } else { - self.userid_displayname.remove(user_id.to_string())?; + self.userid_displayname.remove(user_id.as_bytes())?; } Ok(()) @@ -153,7 +155,7 @@ impl Users { /// Get a the avatar_url of a user. pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl - .get(user_id.to_string())? + .get(user_id.as_bytes())? .map(|bytes| { let s = utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; @@ -166,9 +168,9 @@ impl Users { pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl - .insert(user_id.to_string(), avatar_url.to_string().as_str())?; + .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; } else { - self.userid_avatarurl.remove(user_id.to_string())?; + self.userid_avatarurl.remove(user_id.as_bytes())?; } Ok(()) @@ -190,19 +192,17 @@ impl Users { userdeviceid.extend_from_slice(device_id.as_bytes()); self.userid_devicelistversion - .update_and_fetch(&user_id.as_bytes(), utils::increment)? - .expect("utils::increment will always put in a value"); + .increment(user_id.as_bytes())?; self.userdeviceid_metadata.insert( - userdeviceid, - serde_json::to_string(&Device { + &userdeviceid, + &serde_json::to_vec(&Device { device_id: device_id.into(), display_name: initial_device_display_name, last_seen_ip: None, // TODO last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), }) - .expect("Device::to_string never fails.") - .as_bytes(), + .expect("Device::to_string never fails."), )?; self.set_token(user_id, &device_id, token)?; @@ -217,7 +217,8 @@ impl Users { userdeviceid.extend_from_slice(device_id.as_bytes()); // Remove tokens - if let Some(old_token) = self.userdeviceid_token.remove(&userdeviceid)? { + if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { + self.userdeviceid_token.remove(&userdeviceid)?; self.token_userdeviceid.remove(&old_token)?; } @@ -225,15 +226,14 @@ impl Users { let mut prefix = userdeviceid.clone(); prefix.push(0xff); - for key in self.todeviceid_events.scan_prefix(&prefix).keys() { - self.todeviceid_events.remove(key?)?; + for (key, _) in self.todeviceid_events.scan_prefix(prefix) { + self.todeviceid_events.remove(&key)?; } // TODO: Remove onetimekeys self.userid_devicelistversion - .update_and_fetch(&user_id.as_bytes(), utils::increment)? - .expect("utils::increment will always put in a value"); + .increment(user_id.as_bytes())?; self.userdeviceid_metadata.remove(&userdeviceid)?; @@ -241,16 +241,18 @@ impl Users { } /// Returns an iterator over all device ids of this user. - pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator>> { + pub fn all_device_ids<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator>> + 'a { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata self.userdeviceid_metadata .scan_prefix(prefix) - .keys() - .map(|bytes| { + .map(|(bytes, _)| { Ok(utils::string_from_bytes( - &*bytes? + &bytes .rsplit(|&b| b == 0xff) .next() .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, @@ -271,13 +273,15 @@ impl Users { // Remove old token if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.token_userdeviceid.remove(old_token)?; + self.token_userdeviceid.remove(&old_token)?; // It will be removed from userdeviceid_token by the insert later } // Assign token to user device combination - self.userdeviceid_token.insert(&userdeviceid, &*token)?; - self.token_userdeviceid.insert(token, userdeviceid)?; + self.userdeviceid_token + .insert(&userdeviceid, token.as_bytes())?; + self.token_userdeviceid + .insert(token.as_bytes(), &userdeviceid)?; Ok(()) } @@ -309,8 +313,7 @@ impl Users { self.onetimekeyid_onetimekeys.insert( &key, - &*serde_json::to_string(&one_time_key_value) - .expect("OneTimeKey::to_string always works"), + &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), )?; self.userid_lastonetimekeyupdate @@ -350,10 +353,9 @@ impl Users { .insert(&user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; self.onetimekeyid_onetimekeys - .scan_prefix(&prefix) + .scan_prefix(prefix) .next() - .map(|r| { - let (key, value) = r?; + .map(|(key, value)| { self.onetimekeyid_onetimekeys.remove(&key)?; Ok(( @@ -383,21 +385,20 @@ impl Users { let mut counts = BTreeMap::new(); - for algorithm in self - .onetimekeyid_onetimekeys - .scan_prefix(&userdeviceid) - .keys() - .map(|bytes| { - Ok::<_, Error>( - serde_json::from_slice::( - &*bytes?.rsplit(|&b| b == 0xff).next().ok_or_else(|| { - Error::bad_database("OneTimeKey ID in db is invalid.") - })?, + for algorithm in + self.onetimekeyid_onetimekeys + .scan_prefix(userdeviceid) + .map(|(bytes, _)| { + Ok::<_, Error>( + serde_json::from_slice::( + &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { + Error::bad_database("OneTimeKey ID in db is invalid.") + })?, + ) + .map_err(|_| Error::bad_database("DeviceKeyId in db is invalid."))? + .algorithm(), ) - .map_err(|_| Error::bad_database("DeviceKeyId in db is invalid."))? - .algorithm(), - ) - }) + }) { *counts.entry(algorithm?).or_default() += UInt::from(1_u32); } @@ -419,7 +420,7 @@ impl Users { self.keyid_key.insert( &userdeviceid, - &*serde_json::to_string(&device_keys).expect("DeviceKeys::to_string always works"), + &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), )?; self.mark_device_key_update(user_id, rooms, globals)?; @@ -460,11 +461,11 @@ impl Users { self.keyid_key.insert( &master_key_key, - &*serde_json::to_string(&master_key).expect("CrossSigningKey::to_string always works"), + &serde_json::to_vec(&master_key).expect("CrossSigningKey::to_vec always works"), )?; self.userid_masterkeyid - .insert(&*user_id.to_string(), master_key_key)?; + .insert(user_id.as_bytes(), &master_key_key)?; // Self-signing key if let Some(self_signing_key) = self_signing_key { @@ -486,12 +487,12 @@ impl Users { self.keyid_key.insert( &self_signing_key_key, - &*serde_json::to_string(&self_signing_key) - .expect("CrossSigningKey::to_string always works"), + &serde_json::to_vec(&self_signing_key) + .expect("CrossSigningKey::to_vec always works"), )?; self.userid_selfsigningkeyid - .insert(&*user_id.to_string(), self_signing_key_key)?; + .insert(user_id.as_bytes(), &self_signing_key_key)?; } // User-signing key @@ -514,12 +515,12 @@ impl Users { self.keyid_key.insert( &user_signing_key_key, - &*serde_json::to_string(&user_signing_key) - .expect("CrossSigningKey::to_string always works"), + &serde_json::to_vec(&user_signing_key) + .expect("CrossSigningKey::to_vec always works"), )?; self.userid_usersigningkeyid - .insert(&*user_id.to_string(), user_signing_key_key)?; + .insert(user_id.as_bytes(), &user_signing_key_key)?; } self.mark_device_key_update(user_id, rooms, globals)?; @@ -561,8 +562,7 @@ impl Users { self.keyid_key.insert( &key, - &*serde_json::to_string(&cross_signing_key) - .expect("CrossSigningKey::to_string always works"), + &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), )?; // TODO: Should we notify about this change? @@ -572,24 +572,20 @@ impl Users { } #[tracing::instrument(skip(self))] - pub fn keys_changed( - &self, + pub fn keys_changed<'a>( + &'a self, user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator> { + ) -> impl Iterator> + 'a { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); let mut start = prefix.clone(); start.extend_from_slice(&(from + 1).to_be_bytes()); - let mut end = prefix.clone(); - end.extend_from_slice(&to.unwrap_or(u64::MAX).to_be_bytes()); - self.keychangeid_userid - .range(start..end) - .filter_map(|r| r.ok()) + .iter_from(&start, false) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(_, bytes)| { Ok( @@ -625,13 +621,13 @@ impl Users { key.push(0xff); key.extend_from_slice(&count); - self.keychangeid_userid.insert(key, &*user_id.to_string())?; + self.keychangeid_userid.insert(&key, user_id.as_bytes())?; } let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&count); - self.keychangeid_userid.insert(key, &*user_id.to_string())?; + self.keychangeid_userid.insert(&key, user_id.as_bytes())?; Ok(()) } @@ -645,7 +641,7 @@ impl Users { key.push(0xff); key.extend_from_slice(device_id.as_bytes()); - self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { Error::bad_database("DeviceKeys in db are invalid.") })?)) @@ -658,9 +654,9 @@ impl Users { allowed_signatures: F, ) -> Result> { self.userid_masterkeyid - .get(user_id.to_string())? + .get(user_id.as_bytes())? .map_or(Ok(None), |key| { - self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { let mut cross_signing_key = serde_json::from_slice::(&bytes) .map_err(|_| { Error::bad_database("CrossSigningKey in db is invalid.") @@ -685,9 +681,9 @@ impl Users { allowed_signatures: F, ) -> Result> { self.userid_selfsigningkeyid - .get(user_id.to_string())? + .get(user_id.as_bytes())? .map_or(Ok(None), |key| { - self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { let mut cross_signing_key = serde_json::from_slice::(&bytes) .map_err(|_| { Error::bad_database("CrossSigningKey in db is invalid.") @@ -708,9 +704,9 @@ impl Users { pub fn get_user_signing_key(&self, user_id: &UserId) -> Result> { self.userid_usersigningkeyid - .get(user_id.to_string())? + .get(user_id.as_bytes())? .map_or(Ok(None), |key| { - self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { Error::bad_database("CrossSigningKey in db is invalid.") })?)) @@ -740,7 +736,7 @@ impl Users { self.todeviceid_events.insert( &key, - &*serde_json::to_string(&json).expect("Map::to_string always works"), + &serde_json::to_vec(&json).expect("Map::to_vec always works"), )?; Ok(()) @@ -759,9 +755,9 @@ impl Users { prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); - for value in self.todeviceid_events.scan_prefix(&prefix).values() { + for (_, value) in self.todeviceid_events.scan_prefix(prefix) { events.push( - serde_json::from_slice(&*value?) + serde_json::from_slice(&value) .map_err(|_| Error::bad_database("Event in todeviceid_events is invalid."))?, ); } @@ -786,10 +782,9 @@ impl Users { for (key, _) in self .todeviceid_events - .range(&*prefix..=&*last) - .keys() - .map(|key| { - let key = key?; + .iter_from(&last, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(key, _)| { Ok::<_, Error>(( key.clone(), utils::u64_from_bytes(&key[key.len() - mem::size_of::()..key.len()]) @@ -799,7 +794,7 @@ impl Users { .filter_map(|r| r.ok()) .take_while(|&(_, count)| count <= until) { - self.todeviceid_events.remove(key)?; + self.todeviceid_events.remove(&key)?; } Ok(()) @@ -819,14 +814,11 @@ impl Users { assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); self.userid_devicelistversion - .update_and_fetch(&user_id.as_bytes(), utils::increment)? - .expect("utils::increment will always put in a value"); + .increment(user_id.as_bytes())?; self.userdeviceid_metadata.insert( - userdeviceid, - serde_json::to_string(device) - .expect("Device::to_string always works") - .as_bytes(), + &userdeviceid, + &serde_json::to_vec(device).expect("Device::to_string always works"), )?; Ok(()) @@ -861,15 +853,17 @@ impl Users { }) } - pub fn all_devices_metadata(&self, user_id: &UserId) -> impl Iterator> { + pub fn all_devices_metadata<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator> + 'a { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); self.userdeviceid_metadata .scan_prefix(key) - .values() - .map(|bytes| { - Ok(serde_json::from_slice::(&bytes?).map_err(|_| { + .map(|(_, bytes)| { + Ok(serde_json::from_slice::(&bytes).map_err(|_| { Error::bad_database("Device in userdeviceid_metadata is invalid.") })?) }) @@ -885,7 +879,7 @@ impl Users { // Set the password to "" to indicate a deactivated account. Hashes will never result in an // empty string, so the user will not be able to log in again. Systems like changing the // password without logging in should check if the account is deactivated. - self.userid_password.insert(user_id.to_string(), "")?; + self.userid_password.insert(user_id.as_bytes(), &[])?; // TODO: Unhook 3PID Ok(()) diff --git a/src/error.rs b/src/error.rs index e2664e2..93c67c1 100644 --- a/src/error.rs +++ b/src/error.rs @@ -23,11 +23,16 @@ pub type Result = std::result::Result; #[derive(Error, Debug)] pub enum Error { - #[error("There was a problem with the connection to the database.")] + #[error("There was a problem with the connection to the sled database.")] SledError { #[from] source: sled::Error, }, + #[error("There was a problem with the connection to the rocksdb database: {source}")] + RocksDbError { + #[from] + source: rocksdb::Error, + }, #[error("Could not generate an image.")] ImageError { #[from] diff --git a/src/main.rs b/src/main.rs index e76cea4..8b63d1d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -12,6 +12,8 @@ mod pdu; mod ruma_wrapper; mod utils; +use std::sync::Arc; + use database::Config; pub use database::Database; pub use error::{Error, Result}; @@ -31,7 +33,7 @@ use rocket::{ use tracing::span; use tracing_subscriber::{prelude::*, Registry}; -fn setup_rocket(config: Figment, data: Database) -> rocket::Rocket { +fn setup_rocket(config: Figment, data: Arc) -> rocket::Rocket { rocket::custom(config) .manage(data) .mount( @@ -197,8 +199,6 @@ async fn main() { .await .expect("config is valid"); - db.sending.start_handler(&db); - if config.allow_jaeger { let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() .with_service_name("conduit") diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 147df3c..ba2c37e 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,10 +1,10 @@ -use crate::Error; +use crate::{Database, Error}; use ruma::{ api::OutgoingResponse, identifiers::{DeviceId, UserId}, Outgoing, }; -use std::ops::Deref; +use std::{ops::Deref, sync::Arc}; #[cfg(feature = "conduit_bin")] use { @@ -51,7 +51,7 @@ where async fn from_data(request: &'a Request<'_>, data: Data) -> data::Outcome { let metadata = T::Incoming::METADATA; let db = request - .guard::>() + .guard::>>() .await .expect("database was loaded"); @@ -75,6 +75,7 @@ where )) = db .appservice .iter_all() + .unwrap() .filter_map(|r| r.ok()) .find(|(_id, registration)| { registration diff --git a/src/server_server.rs b/src/server_server.rs index b405c1a..7a338dc 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -433,7 +433,7 @@ pub async fn request_well_known( #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] #[tracing::instrument(skip(db))] pub fn get_server_version_route( - db: State<'_, Database>, + db: State<'_, Arc>, ) -> ConduitResult { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -451,7 +451,7 @@ pub fn get_server_version_route( // Response type for this endpoint is Json because we need to calculate a signature for the response #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] #[tracing::instrument(skip(db))] -pub fn get_server_keys_route(db: State<'_, Database>) -> Json { +pub fn get_server_keys_route(db: State<'_, Arc>) -> Json { if !db.globals.allow_federation() { // TODO: Use proper types return Json("Federation is disabled.".to_owned()); @@ -498,7 +498,7 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json { #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] #[tracing::instrument(skip(db))] -pub fn get_server_keys_deprecated_route(db: State<'_, Database>) -> Json { +pub fn get_server_keys_deprecated_route(db: State<'_, Arc>) -> Json { get_server_keys_route(db) } @@ -508,7 +508,7 @@ pub fn get_server_keys_deprecated_route(db: State<'_, Database>) -> Json )] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -556,7 +556,7 @@ pub async fn get_public_rooms_filtered_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( - db: State<'_, Database>, + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -603,8 +603,8 @@ pub async fn get_public_rooms_route( put("/_matrix/federation/v1/send/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub async fn send_transaction_message_route<'a>( - db: State<'a, Database>, +pub async fn send_transaction_message_route( + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -1585,7 +1585,7 @@ pub(crate) async fn fetch_signing_keys( .await { db.globals - .add_signing_key(origin, &get_keys_response.server_key)?; + .add_signing_key(origin, get_keys_response.server_key.clone())?; result.extend( get_keys_response @@ -1628,7 +1628,7 @@ pub(crate) async fn fetch_signing_keys( { trace!("Got signing keys: {:?}", keys); for k in keys.server_keys { - db.globals.add_signing_key(origin, &k)?; + db.globals.add_signing_key(origin, k.clone())?; result.extend( k.verify_keys .into_iter() @@ -1686,7 +1686,7 @@ pub(crate) fn append_incoming_pdu( &db, )?; - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") @@ -1758,8 +1758,8 @@ pub(crate) fn append_incoming_pdu( get("/_matrix/federation/v1/event/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn get_event_route<'a>( - db: State<'a, Database>, +pub fn get_event_route( + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -1783,8 +1783,8 @@ pub fn get_event_route<'a>( post("/_matrix/federation/v1/get_missing_events/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn get_missing_events_route<'a>( - db: State<'a, Database>, +pub fn get_missing_events_route( + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -1832,8 +1832,8 @@ pub fn get_missing_events_route<'a>( get("/_matrix/federation/v1/state_ids/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn get_room_state_ids_route<'a>( - db: State<'a, Database>, +pub fn get_room_state_ids_route( + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -1884,8 +1884,8 @@ pub fn get_room_state_ids_route<'a>( get("/_matrix/federation/v1/make_join/<_>/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn create_join_event_template_route<'a>( - db: State<'a, Database>, +pub fn create_join_event_template_route( + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2055,8 +2055,8 @@ pub fn create_join_event_template_route<'a>( put("/_matrix/federation/v2/send_join/<_>/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub async fn create_join_event_route<'a>( - db: State<'a, Database>, +pub async fn create_join_event_route( + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2171,8 +2171,8 @@ pub async fn create_join_event_route<'a>( put("/_matrix/federation/v2/invite/<_>/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub async fn create_invite_route<'a>( - db: State<'a, Database>, +pub async fn create_invite_route( + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2276,8 +2276,8 @@ pub async fn create_invite_route<'a>( get("/_matrix/federation/v1/user/devices/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn get_devices_route<'a>( - db: State<'a, Database>, +pub fn get_devices_route( + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2316,8 +2316,8 @@ pub fn get_devices_route<'a>( get("/_matrix/federation/v1/query/directory", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn get_room_information_route<'a>( - db: State<'a, Database>, +pub fn get_room_information_route( + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2344,8 +2344,8 @@ pub fn get_room_information_route<'a>( get("/_matrix/federation/v1/query/profile", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn get_profile_information_route<'a>( - db: State<'a, Database>, +pub fn get_profile_information_route( + db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2378,8 +2378,8 @@ pub fn get_profile_information_route<'a>( post("/_matrix/federation/v1/user/keys/query", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn get_keys_route<'a>( - db: State<'a, Database>, +pub fn get_keys_route( + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2406,8 +2406,8 @@ pub fn get_keys_route<'a>( post("/_matrix/federation/v1/user/keys/claim", data = "") )] #[tracing::instrument(skip(db, body))] -pub async fn claim_keys_route<'a>( - db: State<'a, Database>, +pub async fn claim_keys_route( + db: State<'_, Arc>, body: Ruma, ) -> ConduitResult { if !db.globals.allow_federation() { diff --git a/src/utils.rs b/src/utils.rs index 106baff..f59afb3 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,6 +1,7 @@ use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; +use rocksdb::MergeOperands; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ cmp, @@ -15,6 +16,14 @@ pub fn millis_since_unix_epoch() -> u64 { .as_millis() as u64 } +pub fn increment_rocksdb( + _new_key: &[u8], + old: Option<&[u8]>, + _operands: &mut MergeOperands, +) -> Option> { + increment(old) +} + pub fn increment(old: Option<&[u8]>) -> Option> { let number = match old.map(|bytes| bytes.try_into()) { Some(Ok(bytes)) => { @@ -27,16 +36,14 @@ pub fn increment(old: Option<&[u8]>) -> Option> { Some(number.to_be_bytes().to_vec()) } -pub fn generate_keypair(old: Option<&[u8]>) -> Option> { - Some(old.map(|s| s.to_vec()).unwrap_or_else(|| { - let mut value = random_string(8).as_bytes().to_vec(); - value.push(0xff); - value.extend_from_slice( - &ruma::signatures::Ed25519KeyPair::generate() - .expect("Ed25519KeyPair generation always works (?)"), - ); - value - })) +pub fn generate_keypair() -> Vec { + let mut value = random_string(8).as_bytes().to_vec(); + value.push(0xff); + value.extend_from_slice( + &ruma::signatures::Ed25519KeyPair::generate() + .expect("Ed25519KeyPair generation always works (?)"), + ); + value } /// Parses the bytes into an u64. From 972caacdc2de95183ddddd4084282a069a75b89a Mon Sep 17 00:00:00 2001 From: hamidreza kalbasi Date: Fri, 4 Jun 2021 08:06:12 +0430 Subject: [PATCH 0608/1727] put media in filesystem --- src/client_server/media.rs | 14 +++++---- src/database.rs | 4 +-- src/database/abstraction.rs | 6 ++-- src/database/globals.rs | 21 ++++++++++---- src/database/media.rs | 58 +++++++++++++++++++++++++++---------- src/error.rs | 5 ++++ 6 files changed, 78 insertions(+), 30 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 0673787..14ab6db 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -38,6 +38,7 @@ pub async fn create_content_route( ); db.media.create( mxc.clone(), + &db.globals, &body .filename .as_ref() @@ -45,7 +46,7 @@ pub async fn create_content_route( .as_deref(), &body.content_type.as_deref(), &body.file, - )?; + ).await?; db.flush().await?; @@ -71,7 +72,7 @@ pub async fn get_content_route( content_disposition, content_type, file, - }) = db.media.get(&mxc)? + }) = db.media.get(&db.globals, &mxc).await? { Ok(get_content::Response { file, @@ -95,10 +96,11 @@ pub async fn get_content_route( db.media.create( mxc, + &db.globals, &get_content_response.content_disposition.as_deref(), &get_content_response.content_type.as_deref(), &get_content_response.file, - )?; + ).await?; Ok(get_content_response.into()) } else { @@ -121,13 +123,14 @@ pub async fn get_content_thumbnail_route( content_type, file, .. }) = db.media.get_thumbnail( mxc.clone(), + &db.globals, body.width .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, body.height .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - )? { + ).await? { Ok(get_content_thumbnail::Response { file, content_type }.into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_thumbnail_response = db @@ -148,12 +151,13 @@ pub async fn get_content_thumbnail_route( db.media.upload_thumbnail( mxc, + &db.globals, &None, &get_thumbnail_response.content_type, body.width.try_into().expect("all UInts are valid u32s"), body.height.try_into().expect("all UInts are valid u32s"), &get_thumbnail_response.file, - )?; + ).await?; Ok(get_thumbnail_response.into()) } else { diff --git a/src/database.rs b/src/database.rs index e3b954e..04d3735 100644 --- a/src/database.rs +++ b/src/database.rs @@ -245,7 +245,7 @@ impl Database { db.globals.bump_database_version(1)?; - info!("Migration: 0 -> 1 finished"); + println!("Migration: 0 -> 1 finished"); } if db.globals.database_version()? < 2 { @@ -262,7 +262,7 @@ impl Database { db.globals.bump_database_version(2)?; - info!("Migration: 1 -> 2 finished"); + println!("Migration: 1 -> 2 finished"); } // This data is probably outdated diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 5a2afd5..ad032fb 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -47,7 +47,7 @@ pub trait Tree: Send + Sync { fn scan_prefix<'a>( &'a self, prefix: Vec, - ) -> Box, Box<[u8]>)> + 'a>; + ) -> Box, Box<[u8]>)> + Send + 'a>; fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>>; @@ -142,7 +142,7 @@ impl Tree for SledEngineTree { fn scan_prefix<'a>( &'a self, prefix: Vec, - ) -> Box, Box<[u8]>)> + 'a> { + ) -> Box, Box<[u8]>)> + Send + 'a> { let iter = self .0 .scan_prefix(prefix) @@ -279,7 +279,7 @@ impl Tree for RocksDbEngineTree<'_> { fn scan_prefix<'a>( &'a self, prefix: Vec, - ) -> Box, Box<[u8]>)> + 'a> { + ) -> Box, Box<[u8]>)> + Send + 'a> { Box::new( self.db .0 diff --git a/src/database/globals.rs b/src/database/globals.rs index 37ebf13..1ca64de 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -5,11 +5,7 @@ use ruma::{ EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, }; use rustls::{ServerCertVerifier, WebPKIVerifier}; -use std::{ - collections::{BTreeMap, HashMap}, - sync::{Arc, RwLock}, - time::{Duration, Instant}, -}; +use std::{collections::{BTreeMap, HashMap}, path::{PathBuf}, sync::{Arc, RwLock}, time::{Duration, Instant}}; use tokio::sync::Semaphore; use trust_dns_resolver::TokioAsyncResolver; @@ -275,4 +271,19 @@ impl Globals { .insert(b"version", &new_version.to_be_bytes())?; Ok(()) } + + pub fn get_media_folder(&self) -> PathBuf { + let mut r = PathBuf::new(); + r.push(self.config.database_path.clone()); + r.push("media"); + r + } + + pub fn get_media_file(&self, key: &Vec) -> PathBuf { + let mut r = PathBuf::new(); + r.push(self.config.database_path.clone()); + r.push("media"); + r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); + r + } } diff --git a/src/database/media.rs b/src/database/media.rs index ca45484..666a494 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -1,9 +1,10 @@ use image::{imageops::FilterType, GenericImageView}; +use crate::database::globals::Globals; use crate::{utils, Error, Result}; use std::{mem, sync::Arc}; - use super::abstraction::Tree; +use tokio::{fs::{self, File}, io::AsyncWriteExt, io::AsyncReadExt}; pub struct FileMeta { pub content_disposition: Option, @@ -16,10 +17,11 @@ pub struct Media { } impl Media { - /// Uploads or replaces a file. - pub fn create( + /// Uploads a file. + pub async fn create( &self, mxc: String, + globals: &Globals, content_disposition: &Option<&str>, content_type: &Option<&str>, file: &[u8], @@ -43,15 +45,20 @@ impl Media { .unwrap_or_default(), ); - self.mediaid_file.insert(&key, file)?; + let path = globals.get_media_file(&key); + fs::create_dir_all(path.parent().unwrap()).await?; + let mut f = File::create(path).await?; + f.write_all(file).await?; + self.mediaid_file.insert(&key, &[])?; Ok(()) } /// Uploads or replaces a file thumbnail. - pub fn upload_thumbnail( + pub async fn upload_thumbnail( &self, mxc: String, + globals: &Globals, content_disposition: &Option, content_type: &Option, width: u32, @@ -77,20 +84,29 @@ impl Media { .unwrap_or_default(), ); - self.mediaid_file.insert(&key, file)?; + let path = globals.get_media_file(&key); + fs::create_dir_all(path.parent().unwrap()).await?; + let mut f = File::create(path).await?; + f.write_all(file).await?; + + self.mediaid_file.insert(&key, &[])?; Ok(()) } /// Downloads a file. - pub fn get(&self, mxc: &str) -> Result> { + pub async fn get(&self, globals: &Globals, mxc: &str) -> Result> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail prefix.push(0xff); - if let Some((key, file)) = self.mediaid_file.scan_prefix(prefix).next() { + let mut iter = self.mediaid_file.scan_prefix(prefix); + if let Some((key, _)) = iter.next() { + let path = globals.get_media_file(&key.to_vec()); + let mut file = vec![]; + File::open(path).await?.read_to_end(&mut file).await?; let mut parts = key.rsplit(|&b| b == 0xff); let content_type = parts @@ -121,7 +137,7 @@ impl Media { Ok(Some(FileMeta { content_disposition, content_type, - file: file.to_vec(), + file, })) } else { Ok(None) @@ -151,7 +167,7 @@ impl Media { /// - Server creates the thumbnail and sends it to the user /// /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. - pub fn get_thumbnail(&self, mxc: String, width: u32, height: u32) -> Result> { + pub async fn get_thumbnail(&self, mxc: String, globals: &Globals, width: u32, height: u32) -> Result> { let (width, height, crop) = self .thumbnail_properties(width, height) .unwrap_or((0, 0, false)); // 0, 0 because that's the original file @@ -169,8 +185,11 @@ impl Media { original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail original_prefix.push(0xff); - if let Some((key, file)) = self.mediaid_file.scan_prefix(thumbnail_prefix).next() { + if let Some((key, _)) = self.mediaid_file.scan_prefix(thumbnail_prefix).next() { // Using saved thumbnail + let path = globals.get_media_file(&key.to_vec()); + let mut file = vec![]; + File::open(path).await?.read_to_end(&mut file).await?; let mut parts = key.rsplit(|&b| b == 0xff); let content_type = parts @@ -201,8 +220,12 @@ impl Media { content_type, file: file.to_vec(), })) - } else if let Some((key, file)) = self.mediaid_file.scan_prefix(original_prefix).next() { + } else if let Some((key, _)) = self.mediaid_file.scan_prefix(original_prefix).next() { // Generate a thumbnail + let path = globals.get_media_file(&key.to_vec()); + let mut file = vec![]; + File::open(path).await?.read_to_end(&mut file).await?; + let mut parts = key.rsplit(|&b| b == 0xff); let content_type = parts @@ -299,19 +322,24 @@ impl Media { widthheight, ); - self.mediaid_file.insert(&thumbnail_key, &thumbnail_bytes)?; + let path = globals.get_media_file(&thumbnail_key); + fs::create_dir_all(path.parent().unwrap()).await?; + let mut f = File::create(path).await?; + f.write_all(&thumbnail_bytes).await?; + + self.mediaid_file.insert(&thumbnail_key, &[])?; Ok(Some(FileMeta { content_disposition, content_type, - file: thumbnail_bytes.to_vec(), + file: thumbnail_bytes.to_vec() })) } else { // Couldn't parse file to generate thumbnail, send original Ok(Some(FileMeta { content_disposition, content_type, - file: file.to_vec(), + file: file.to_vec() })) } } else { diff --git a/src/error.rs b/src/error.rs index 93c67c1..10a48b7 100644 --- a/src/error.rs +++ b/src/error.rs @@ -45,6 +45,11 @@ pub enum Error { }, #[error("{0}")] FederationError(Box, RumaError), + #[error("Could not do this io: {source}")] + IoError { + #[from] + source: std::io::Error, + }, #[error("{0}")] BadServerResponse(&'static str), #[error("{0}")] From 804105479c7ae275a79c9fdeeb32e7a20b22fa3b Mon Sep 17 00:00:00 2001 From: hamidreza kalbasi Date: Sun, 6 Jun 2021 16:58:32 +0430 Subject: [PATCH 0609/1727] fix fmt and clippy warnings --- src/client_server/media.rs | 85 +++++++++++++++++++++----------------- src/database/globals.rs | 9 +++- src/database/media.rs | 26 ++++++++---- 3 files changed, 73 insertions(+), 47 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 14ab6db..0b1fbd7 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -36,17 +36,20 @@ pub async fn create_content_route( db.globals.server_name(), utils::random_string(MXC_LENGTH) ); - db.media.create( - mxc.clone(), - &db.globals, - &body - .filename - .as_ref() - .map(|filename| "inline; filename=".to_owned() + filename) - .as_deref(), - &body.content_type.as_deref(), - &body.file, - ).await?; + + db.media + .create( + mxc.clone(), + &db.globals, + &body + .filename + .as_ref() + .map(|filename| "inline; filename=".to_owned() + filename) + .as_deref(), + &body.content_type.as_deref(), + &body.file, + ) + .await?; db.flush().await?; @@ -94,13 +97,15 @@ pub async fn get_content_route( ) .await?; - db.media.create( - mxc, - &db.globals, - &get_content_response.content_disposition.as_deref(), - &get_content_response.content_type.as_deref(), - &get_content_response.file, - ).await?; + db.media + .create( + mxc, + &db.globals, + &get_content_response.content_disposition.as_deref(), + &get_content_response.content_type.as_deref(), + &get_content_response.file, + ) + .await?; Ok(get_content_response.into()) } else { @@ -121,16 +126,20 @@ pub async fn get_content_thumbnail_route( if let Some(FileMeta { content_type, file, .. - }) = db.media.get_thumbnail( - mxc.clone(), - &db.globals, - body.width - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - body.height - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - ).await? { + }) = db + .media + .get_thumbnail( + mxc.clone(), + &db.globals, + body.width + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + body.height + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + ) + .await? + { Ok(get_content_thumbnail::Response { file, content_type }.into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_thumbnail_response = db @@ -149,15 +158,17 @@ pub async fn get_content_thumbnail_route( ) .await?; - db.media.upload_thumbnail( - mxc, - &db.globals, - &None, - &get_thumbnail_response.content_type, - body.width.try_into().expect("all UInts are valid u32s"), - body.height.try_into().expect("all UInts are valid u32s"), - &get_thumbnail_response.file, - ).await?; + db.media + .upload_thumbnail( + mxc, + &db.globals, + &None, + &get_thumbnail_response.content_type, + body.width.try_into().expect("all UInts are valid u32s"), + body.height.try_into().expect("all UInts are valid u32s"), + &get_thumbnail_response.file, + ) + .await?; Ok(get_thumbnail_response.into()) } else { diff --git a/src/database/globals.rs b/src/database/globals.rs index 1ca64de..de97578 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -5,7 +5,12 @@ use ruma::{ EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, }; use rustls::{ServerCertVerifier, WebPKIVerifier}; -use std::{collections::{BTreeMap, HashMap}, path::{PathBuf}, sync::{Arc, RwLock}, time::{Duration, Instant}}; +use std::{ + collections::{BTreeMap, HashMap}, + path::PathBuf, + sync::{Arc, RwLock}, + time::{Duration, Instant}, +}; use tokio::sync::Semaphore; use trust_dns_resolver::TokioAsyncResolver; @@ -279,7 +284,7 @@ impl Globals { r } - pub fn get_media_file(&self, key: &Vec) -> PathBuf { + pub fn get_media_file(&self, key: &[u8]) -> PathBuf { let mut r = PathBuf::new(); r.push(self.config.database_path.clone()); r.push("media"); diff --git a/src/database/media.rs b/src/database/media.rs index 666a494..6fd142d 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -1,10 +1,14 @@ -use image::{imageops::FilterType, GenericImageView}; use crate::database::globals::Globals; +use image::{imageops::FilterType, GenericImageView}; +use super::abstraction::Tree; use crate::{utils, Error, Result}; use std::{mem, sync::Arc}; -use super::abstraction::Tree; -use tokio::{fs::{self, File}, io::AsyncWriteExt, io::AsyncReadExt}; +use tokio::{ + fs::{self, File}, + io::AsyncReadExt, + io::AsyncWriteExt, +}; pub struct FileMeta { pub content_disposition: Option, @@ -167,7 +171,13 @@ impl Media { /// - Server creates the thumbnail and sends it to the user /// /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. - pub async fn get_thumbnail(&self, mxc: String, globals: &Globals, width: u32, height: u32) -> Result> { + pub async fn get_thumbnail( + &self, + mxc: String, + globals: &Globals, + width: u32, + height: u32, + ) -> Result> { let (width, height, crop) = self .thumbnail_properties(width, height) .unwrap_or((0, 0, false)); // 0, 0 because that's the original file @@ -225,7 +235,7 @@ impl Media { let path = globals.get_media_file(&key.to_vec()); let mut file = vec![]; File::open(path).await?.read_to_end(&mut file).await?; - + let mut parts = key.rsplit(|&b| b == 0xff); let content_type = parts @@ -326,20 +336,20 @@ impl Media { fs::create_dir_all(path.parent().unwrap()).await?; let mut f = File::create(path).await?; f.write_all(&thumbnail_bytes).await?; - + self.mediaid_file.insert(&thumbnail_key, &[])?; Ok(Some(FileMeta { content_disposition, content_type, - file: thumbnail_bytes.to_vec() + file: thumbnail_bytes.to_vec(), })) } else { // Couldn't parse file to generate thumbnail, send original Ok(Some(FileMeta { content_disposition, content_type, - file: file.to_vec() + file: file.to_vec(), })) } } else { From ff841b73c54fca06a604f7c99d48321965da053b Mon Sep 17 00:00:00 2001 From: hamidreza kalbasi Date: Tue, 8 Jun 2021 17:05:13 +0430 Subject: [PATCH 0610/1727] use .keys() and remove unneccery .to_vec() --- src/database.rs | 2 +- src/database/media.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/database.rs b/src/database.rs index 04d3735..2870e2d 100644 --- a/src/database.rs +++ b/src/database.rs @@ -16,7 +16,7 @@ pub mod users; use crate::{utils, Error, Result}; use abstraction::DatabaseEngine; use directories::ProjectDirs; -use log::{error, info}; +use log::error; use rocket::futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; diff --git a/src/database/media.rs b/src/database/media.rs index 6fd142d..205f3a6 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -108,7 +108,7 @@ impl Media { let mut iter = self.mediaid_file.scan_prefix(prefix); if let Some((key, _)) = iter.next() { - let path = globals.get_media_file(&key.to_vec()); + let path = globals.get_media_file(&key); let mut file = vec![]; File::open(path).await?.read_to_end(&mut file).await?; let mut parts = key.rsplit(|&b| b == 0xff); @@ -197,7 +197,7 @@ impl Media { if let Some((key, _)) = self.mediaid_file.scan_prefix(thumbnail_prefix).next() { // Using saved thumbnail - let path = globals.get_media_file(&key.to_vec()); + let path = globals.get_media_file(&key); let mut file = vec![]; File::open(path).await?.read_to_end(&mut file).await?; let mut parts = key.rsplit(|&b| b == 0xff); @@ -232,7 +232,7 @@ impl Media { })) } else if let Some((key, _)) = self.mediaid_file.scan_prefix(original_prefix).next() { // Generate a thumbnail - let path = globals.get_media_file(&key.to_vec()); + let path = globals.get_media_file(&key); let mut file = vec![]; File::open(path).await?.read_to_end(&mut file).await?; From affa124864d30eed4453416077db40b63b98e479 Mon Sep 17 00:00:00 2001 From: hamidreza kalbasi Date: Tue, 8 Jun 2021 17:50:06 +0430 Subject: [PATCH 0611/1727] create media folder in init --- src/database/globals.rs | 9 +++++++-- src/database/media.rs | 9 +-------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index de97578..5525644 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -7,6 +7,7 @@ use ruma::{ use rustls::{ServerCertVerifier, WebPKIVerifier}; use std::{ collections::{BTreeMap, HashMap}, + fs, path::PathBuf, sync::{Arc, RwLock}, time::{Duration, Instant}, @@ -137,7 +138,7 @@ impl Globals { .as_ref() .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); - Ok(Self { + let s = Self { globals, config, keypair: Arc::new(keypair), @@ -152,7 +153,11 @@ impl Globals { bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), - }) + }; + + fs::create_dir_all(s.get_media_folder())?; + + Ok(s) } /// Returns this server's keypair. diff --git a/src/database/media.rs b/src/database/media.rs index 205f3a6..b81e863 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -4,11 +4,7 @@ use image::{imageops::FilterType, GenericImageView}; use super::abstraction::Tree; use crate::{utils, Error, Result}; use std::{mem, sync::Arc}; -use tokio::{ - fs::{self, File}, - io::AsyncReadExt, - io::AsyncWriteExt, -}; +use tokio::{fs::File, io::AsyncReadExt, io::AsyncWriteExt}; pub struct FileMeta { pub content_disposition: Option, @@ -50,7 +46,6 @@ impl Media { ); let path = globals.get_media_file(&key); - fs::create_dir_all(path.parent().unwrap()).await?; let mut f = File::create(path).await?; f.write_all(file).await?; @@ -89,7 +84,6 @@ impl Media { ); let path = globals.get_media_file(&key); - fs::create_dir_all(path.parent().unwrap()).await?; let mut f = File::create(path).await?; f.write_all(file).await?; @@ -333,7 +327,6 @@ impl Media { ); let path = globals.get_media_file(&thumbnail_key); - fs::create_dir_all(path.parent().unwrap()).await?; let mut f = File::create(path).await?; f.write_all(&thumbnail_bytes).await?; From 2385bd1cfd41fe14e8a73b9d6b556b5f8bb55ff1 Mon Sep 17 00:00:00 2001 From: hamidreza kalbasi Date: Tue, 8 Jun 2021 20:53:24 +0430 Subject: [PATCH 0612/1727] add migrations --- src/database.rs | 29 ++++++++++++++++++++++------- src/database/media.rs | 6 +++--- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/src/database.rs b/src/database.rs index 2870e2d..fa84881 100644 --- a/src/database.rs +++ b/src/database.rs @@ -20,11 +20,7 @@ use log::error; use rocket::futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; -use std::{ - collections::HashMap, - fs::remove_dir_all, - sync::{Arc, RwLock}, -}; +use std::{collections::HashMap, fs::{self, remove_dir_all}, io::Write, sync::{Arc, RwLock}}; use tokio::sync::Semaphore; #[derive(Clone, Debug, Deserialize)] @@ -253,9 +249,11 @@ impl Database { for (userid, password) in db.users.userid_password.iter() { let password = utils::string_from_bytes(&password); - if password.map_or(false, |password| { + let empty_hashed_password = password.map_or(false, |password| { argon2::verify_encoded(&password, b"").unwrap_or(false) - }) { + }); + + if empty_hashed_password { db.users.userid_password.insert(&userid, b"")?; } } @@ -265,6 +263,23 @@ impl Database { println!("Migration: 1 -> 2 finished"); } + if db.globals.database_version()? < 3 { + // Move media to filesystem + for (key, content) in db.media.mediaid_file.iter() { + if content.len() == 0 { + continue; + } + + let path = db.globals.get_media_file(&key); + let mut file = fs::File::create(path)?; + file.write_all(&content)?; + db.media.mediaid_file.insert(&key, &[])?; + } + + db.globals.bump_database_version(3)?; + + println!("Migration: 2 -> 3 finished"); + } // This data is probably outdated db.rooms.edus.presenceid_presence.clear()?; diff --git a/src/database/media.rs b/src/database/media.rs index b81e863..944c5bd 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -103,7 +103,7 @@ impl Media { let mut iter = self.mediaid_file.scan_prefix(prefix); if let Some((key, _)) = iter.next() { let path = globals.get_media_file(&key); - let mut file = vec![]; + let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; let mut parts = key.rsplit(|&b| b == 0xff); @@ -192,7 +192,7 @@ impl Media { if let Some((key, _)) = self.mediaid_file.scan_prefix(thumbnail_prefix).next() { // Using saved thumbnail let path = globals.get_media_file(&key); - let mut file = vec![]; + let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; let mut parts = key.rsplit(|&b| b == 0xff); @@ -227,7 +227,7 @@ impl Media { } else if let Some((key, _)) = self.mediaid_file.scan_prefix(original_prefix).next() { // Generate a thumbnail let path = globals.get_media_file(&key); - let mut file = vec![]; + let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; let mut parts = key.rsplit(|&b| b == 0xff); From 2078af59d8b2a95fbaf522962265c222d3f17b1b Mon Sep 17 00:00:00 2001 From: hamidreza kalbasi Date: Tue, 8 Jun 2021 20:54:36 +0430 Subject: [PATCH 0613/1727] fix fmt problems --- src/database.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index fa84881..b5a25ea 100644 --- a/src/database.rs +++ b/src/database.rs @@ -20,7 +20,12 @@ use log::error; use rocket::futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; -use std::{collections::HashMap, fs::{self, remove_dir_all}, io::Write, sync::{Arc, RwLock}}; +use std::{ + collections::HashMap, + fs::{self, remove_dir_all}, + io::Write, + sync::{Arc, RwLock}, +}; use tokio::sync::Semaphore; #[derive(Clone, Debug, Deserialize)] From cd4bc520d8ef5410870cc82e967a3d480c5ca8c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 12 Jun 2021 15:04:28 +0200 Subject: [PATCH 0614/1727] improvement: feature flags for sled, rocksdb --- Cargo.toml | 8 ++-- src/client_server/membership.rs | 2 +- src/database.rs | 4 ++ src/database/abstraction.rs | 68 +++++++++++++++++++++------------ src/database/rooms.rs | 13 +++---- src/database/sending.rs | 2 - src/error.rs | 2 + src/ruma_wrapper.rs | 16 ++++---- src/server_server.rs | 2 +- src/utils.rs | 4 +- 10 files changed, 71 insertions(+), 50 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index eb43da5..e7ebadf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,8 +24,8 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "b39537812c12caafcbf8b7bd74 # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" # Used for storing data permanently -sled = { version = "0.34.6", features = ["compression", "no_metrics"] } -rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"] } +sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } +rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } # Used for the http request / response body type for Ruma endpoints used with reqwest @@ -75,7 +75,9 @@ opentelemetry-jaeger = "0.11.0" pretty_env_logger = "0.4.0" [features] -default = ["conduit_bin"] +default = ["conduit_bin", "backend_sled"] +backend_sled = ["sled"] +backend_rocksdb = ["rocksdb"] conduit_bin = [] # TODO: add rocket to this when it is optional [[bin]] diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 92d7ace..a3f1389 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -621,7 +621,7 @@ async fn join_room_by_id_helper( &pdu, utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), count, - pdu_id.into(), + &pdu_id, &[pdu.event_id.clone()], db, )?; diff --git a/src/database.rs b/src/database.rs index b5a25ea..e00bdcd 100644 --- a/src/database.rs +++ b/src/database.rs @@ -77,8 +77,12 @@ fn default_log() -> String { "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() } +#[cfg(feature = "sled")] pub type Engine = abstraction::SledEngine; +#[cfg(feature = "rocksdb")] +pub type Engine = abstraction::RocksDbEngine; + pub struct Database { pub globals: globals::Globals, pub users: users::Users, diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index ad032fb..f81c9de 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -1,21 +1,19 @@ -use std::{ - collections::BTreeMap, - future::Future, - pin::Pin, - sync::{Arc, RwLock}, -}; - -use log::warn; -use rocksdb::{ - BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, Direction, MultiThreaded, Options, -}; - use super::Config; use crate::{utils, Result}; +use log::warn; +use std::{future::Future, pin::Pin, sync::Arc}; +#[cfg(feature = "rocksdb")] +use std::{collections::BTreeMap, sync::RwLock}; + +#[cfg(feature = "sled")] pub struct SledEngine(sled::Db); +#[cfg(feature = "sled")] pub struct SledEngineTree(sled::Tree); -pub struct RocksDbEngine(rocksdb::DBWithThreadMode); + +#[cfg(feature = "rocksdb")] +pub struct RocksDbEngine(rocksdb::DBWithThreadMode); +#[cfg(feature = "rocksdb")] pub struct RocksDbEngineTree<'a> { db: Arc, name: &'a str, @@ -60,6 +58,7 @@ pub trait Tree: Send + Sync { } } +#[cfg(feature = "sled")] impl DatabaseEngine for SledEngine { fn open(config: &Config) -> Result> { Ok(Arc::new(SledEngine( @@ -76,6 +75,7 @@ impl DatabaseEngine for SledEngine { } } +#[cfg(feature = "sled")] impl Tree for SledEngineTree { fn get(&self, key: &[u8]) -> Result>> { Ok(self.0.get(key)?.map(|v| v.to_vec())) @@ -165,29 +165,42 @@ impl Tree for SledEngineTree { } } +#[cfg(feature = "rocksdb")] impl DatabaseEngine for RocksDbEngine { fn open(config: &Config) -> Result> { - let mut db_opts = Options::default(); + let mut db_opts = rocksdb::Options::default(); db_opts.create_if_missing(true); + db_opts.set_max_open_files(16); + db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy); + db_opts.set_target_file_size_base(256 << 20); + db_opts.set_write_buffer_size(256 << 20); - let cfs = DBWithThreadMode::::list_cf(&db_opts, &config.database_path) - .unwrap_or_default(); + let mut block_based_options = rocksdb::BlockBasedOptions::default(); + block_based_options.set_block_size(512 << 10); + db_opts.set_block_based_table_factory(&block_based_options); - let mut options = Options::default(); + let cfs = rocksdb::DBWithThreadMode::::list_cf( + &db_opts, + &config.database_path, + ) + .unwrap_or_default(); + + let mut options = rocksdb::Options::default(); options.set_merge_operator_associative("increment", utils::increment_rocksdb); - let db = DBWithThreadMode::::open_cf_descriptors( + let db = rocksdb::DBWithThreadMode::::open_cf_descriptors( &db_opts, &config.database_path, cfs.iter() - .map(|name| ColumnFamilyDescriptor::new(name, options.clone())), + .map(|name| rocksdb::ColumnFamilyDescriptor::new(name, options.clone())), )?; Ok(Arc::new(RocksDbEngine(db))) } fn open_tree(self: &Arc, name: &'static str) -> Result> { - let mut options = Options::default(); + let mut options = rocksdb::Options::default(); options.set_merge_operator_associative("increment", utils::increment_rocksdb); // Create if it doesn't exist @@ -201,12 +214,14 @@ impl DatabaseEngine for RocksDbEngine { } } +#[cfg(feature = "rocksdb")] impl RocksDbEngineTree<'_> { - fn cf(&self) -> BoundColumnFamily<'_> { + fn cf(&self) -> rocksdb::BoundColumnFamily<'_> { self.db.0.cf_handle(self.name).unwrap() } } +#[cfg(feature = "rocksdb")] impl Tree for RocksDbEngineTree<'_> { fn get(&self, key: &[u8]) -> Result>> { Ok(self.db.0.get_cf(self.cf(), key)?) @@ -260,15 +275,20 @@ impl Tree for RocksDbEngineTree<'_> { rocksdb::IteratorMode::From( from, if backwards { - Direction::Reverse + rocksdb::Direction::Reverse } else { - Direction::Forward + rocksdb::Direction::Forward }, ), )) } fn increment(&self, key: &[u8]) -> Result> { + let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.db.0]), None).unwrap(); + dbg!(stats.mem_table_total); + dbg!(stats.mem_table_unflushed); + dbg!(stats.mem_table_readers_total); + dbg!(stats.cache_total); // TODO: atomic? let old = self.get(key)?; let new = utils::increment(old.as_deref()).unwrap(); @@ -285,7 +305,7 @@ impl Tree for RocksDbEngineTree<'_> { .0 .iterator_cf( self.cf(), - rocksdb::IteratorMode::From(&prefix, Direction::Forward), + rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), ) .take_while(move |(k, _)| k.starts_with(&prefix)), ) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0a8239d..736ff4d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -19,8 +19,6 @@ use ruma::{ state_res::{self, Event, RoomVersion, StateMap}, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; -use sled::IVec; - use std::{ collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, @@ -34,7 +32,7 @@ use super::{abstraction::Tree, admin::AdminCommand, pusher}; /// /// This is created when a state group is added to the database by /// hashing the entire state. -pub type StateHashId = IVec; +pub type StateHashId = Vec; pub struct Rooms { pub edus: edus::RoomEdus, @@ -665,7 +663,7 @@ impl Rooms { pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, count: u64, - pdu_id: IVec, + pdu_id: &[u8], leaves: &[EventId], db: &Database, ) -> Result<()> { @@ -713,14 +711,13 @@ impl Rooms { self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; self.pduid_pdu.insert( - &pdu_id, + pdu_id, &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), )?; // This also replaces the eventid of any outliers with the correct // pduid, removing the place holder. - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; // See if the event matches any known pushers for user in db @@ -1360,7 +1357,7 @@ impl Rooms { &pdu, pdu_json, count, - pdu_id.clone().into(), + &pdu_id, // Since this PDU references all pdu_leaves we can update the leaves // of the room &[pdu.event_id.clone()], diff --git a/src/database/sending.rs b/src/database/sending.rs index 77f6ed7..ecf0761 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -91,8 +91,6 @@ enum TransactionStatus { impl Sending { pub fn start_handler(&self, db: Arc, mut receiver: mpsc::UnboundedReceiver>) { - let db = db.clone(); - tokio::spawn(async move { let mut futures = FuturesUnordered::new(); diff --git a/src/error.rs b/src/error.rs index 10a48b7..4f363ff 100644 --- a/src/error.rs +++ b/src/error.rs @@ -23,11 +23,13 @@ pub type Result = std::result::Result; #[derive(Error, Debug)] pub enum Error { + #[cfg(feature = "sled")] #[error("There was a problem with the connection to the sled database.")] SledError { #[from] source: sled::Error, }, + #[cfg(feature = "rocksdb")] #[error("There was a problem with the connection to the rocksdb database: {source}")] RocksDbError { #[from] diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index ba2c37e..2912a57 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,14 +1,15 @@ -use crate::{Database, Error}; +use crate::Error; use ruma::{ api::OutgoingResponse, identifiers::{DeviceId, UserId}, - Outgoing, + signatures::CanonicalJsonValue, + Outgoing, ServerName, }; -use std::{ops::Deref, sync::Arc}; +use std::ops::Deref; #[cfg(feature = "conduit_bin")] use { - crate::server_server, + crate::{server_server, Database}, log::{debug, warn}, rocket::{ data::{self, ByteUnit, Data, FromData}, @@ -18,14 +19,11 @@ use { tokio::io::AsyncReadExt, Request, State, }, - ruma::{ - api::{AuthScheme, IncomingRequest}, - signatures::CanonicalJsonValue, - ServerName, - }, + ruma::api::{AuthScheme, IncomingRequest}, std::collections::BTreeMap, std::convert::TryFrom, std::io::Cursor, + std::sync::Arc, }; /// This struct converts rocket requests into ruma structs by converting them into http requests diff --git a/src/server_server.rs b/src/server_server.rs index 7a338dc..2a445c2 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1681,7 +1681,7 @@ pub(crate) fn append_incoming_pdu( pdu, pdu_json, count, - pdu_id.clone().into(), + &pdu_id, &new_room_leaves.into_iter().collect::>(), &db, )?; diff --git a/src/utils.rs b/src/utils.rs index f59afb3..0c8fb5c 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,7 +1,6 @@ use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; -use rocksdb::MergeOperands; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ cmp, @@ -16,10 +15,11 @@ pub fn millis_since_unix_epoch() -> u64 { .as_millis() as u64 } +#[cfg(feature = "rocksdb")] pub fn increment_rocksdb( _new_key: &[u8], old: Option<&[u8]>, - _operands: &mut MergeOperands, + _operands: &mut rocksdb::MergeOperands, ) -> Option> { increment(old) } From e8f67089272bb9bf57130972c0031b5c28599df7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 12 Jun 2021 18:40:33 +0200 Subject: [PATCH 0615/1727] improvement: show more users in our user directory --- src/client_server/user_directory.rs | 14 +++++++++----- src/database.rs | 24 ++++++++++++++++++++++++ src/database/rooms.rs | 12 ++++++++---- 3 files changed, 41 insertions(+), 9 deletions(-) diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index 0ddc7e8..d7c16d7 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -21,9 +21,6 @@ pub async fn search_users_route( let mut users = db.users.iter().filter_map(|user_id| { // Filter out buggy users (they should not exist, but you never know...) let user_id = user_id.ok()?; - if db.users.is_deactivated(&user_id).ok()? { - return None; - } let user = search_users::User { user_id: user_id.clone(), @@ -31,11 +28,18 @@ pub async fn search_users_route( avatar_url: db.users.avatar_url(&user_id).ok()?, }; - if !user.user_id.to_string().contains(&body.search_term) + if !user + .user_id + .to_string() + .to_lowercase() + .contains(&body.search_term.to_lowercase()) && user .display_name .as_ref() - .filter(|name| name.contains(&body.search_term)) + .filter(|name| { + name.to_lowercase() + .contains(&body.search_term.to_lowercase()) + }) .is_none() { return None; diff --git a/src/database.rs b/src/database.rs index e00bdcd..2846928 100644 --- a/src/database.rs +++ b/src/database.rs @@ -289,6 +289,30 @@ impl Database { println!("Migration: 2 -> 3 finished"); } + + if db.globals.database_version()? < 4 { + // Add federated users to db as deactivated + for our_user in db.users.iter() { + let our_user = our_user?; + if db.users.is_deactivated(&our_user)? { + continue; + } + for room in db.rooms.rooms_joined(&our_user) { + for user in db.rooms.room_members(&room?) { + let user = user?; + if user.server_name() != db.globals.server_name() { + println!("Migration: Creating user {}", user); + db.users.create(&user, None)?; + } + } + } + } + + db.globals.bump_database_version(4)?; + + println!("Migration: 3 -> 4 finished"); + } + // This data is probably outdated db.rooms.edus.presenceid_presence.clear()?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 736ff4d..0820395 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -592,9 +592,8 @@ impl Rooms { prefix.push(0xff); self.roomid_pduleaves - .scan_prefix(dbg!(prefix)) - .map(|(key, bytes)| { - dbg!(key); + .scan_prefix(prefix) + .map(|(_, bytes)| { Ok::<_, Error>( EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") @@ -1195,7 +1194,6 @@ impl Rooms { room_id: &RoomId, db: &Database, ) -> Result { - dbg!(&pdu_builder); let PduBuilder { event_type, content, @@ -1583,6 +1581,12 @@ impl Rooms { last_state: Option>>, db: &Database, ) -> Result<()> { + // Keep track what remote users exist by adding them as "deactivated" users + if user_id.server_name() != db.globals.server_name() { + db.users.create(user_id, None)?; + // TODO: displayname, avatar url + } + let mut roomserver_id = room_id.as_bytes().to_vec(); roomserver_id.push(0xff); roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); From 3c9ea55938df9ec3ca52d00babfd8d86014b8b0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 14 Jun 2021 10:52:27 +0200 Subject: [PATCH 0616/1727] feat: /state --- src/main.rs | 1 + src/server_server.rs | 67 ++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index 8b63d1d..8a19f13 100644 --- a/src/main.rs +++ b/src/main.rs @@ -154,6 +154,7 @@ fn setup_rocket(config: Figment, data: Arc) -> rocket::Rocket", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn get_room_state_route( + db: State<'_, Arc>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let shortstatehash = db + .rooms + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pdus = db + .rooms + .state_full_ids(shortstatehash)? + .into_iter() + .map(|id| { + PduEvent::convert_to_outgoing_federation_event( + db.rooms.get_pdu_json(&id).unwrap().unwrap(), + ) + }) + .collect(); + + let mut auth_chain = Vec::new(); + let mut auth_chain_ids = BTreeSet::::new(); + let mut todo = BTreeSet::new(); + todo.insert(body.event_id.clone()); + + while let Some(event_id) = todo.iter().next().cloned() { + if let Some(pdu) = db.rooms.get_pdu(&event_id)? { + todo.extend( + pdu.auth_events + .clone() + .into_iter() + .collect::>() + .difference(&auth_chain_ids) + .cloned(), + ); + auth_chain_ids.extend(pdu.auth_events.into_iter()); + + let pdu_json = PduEvent::convert_to_outgoing_federation_event( + db.rooms.get_pdu_json(&event_id)?.unwrap(), + ); + auth_chain.push(pdu_json); + } else { + warn!("Could not find pdu mentioned in auth events."); + } + + todo.remove(&event_id); + } + + Ok(get_room_state::v1::Response { auth_chain, pdus }.into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/state_ids/<_>", data = "") From 77a23f89698d4845a21cb2681197ca02f064bc7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 14 Jun 2021 11:24:32 +0200 Subject: [PATCH 0617/1727] improvement: filter our room directory Fixes #35 --- src/client_server/directory.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index ad609cd..c41e81e 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -315,6 +315,36 @@ pub async fn get_public_rooms_filtered_helper( Ok(chunk) }) .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms + .filter(|chunk| { + if let Some(query) = filter + .generic_search_term + .as_ref() + .map(|q| q.to_lowercase()) + { + if let Some(name) = &chunk.name { + if name.to_lowercase().contains(&query) { + return true; + } + } + + if let Some(topic) = &chunk.topic { + if topic.to_lowercase().contains(&query) { + return true; + } + } + + if let Some(canonical_alias) = &chunk.canonical_alias { + if canonical_alias.as_str().to_lowercase().contains(&query) { + return true; + } + } + + false + } else { + // No search term + true + } + }) // We need to collect all, so we can sort by member count .collect::>(); From 67f9592b177b41537e50cfb624d56f15b84a6875 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 14 Jun 2021 11:36:18 +0200 Subject: [PATCH 0618/1727] feat: /event_auth --- src/main.rs | 3 ++- src/server_server.rs | 45 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 8a19f13..99d4560 100644 --- a/src/main.rs +++ b/src/main.rs @@ -153,8 +153,9 @@ fn setup_rocket(config: Figment, data: Arc) -> rocket::Rocket/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn get_event_authorization_route( + db: State<'_, Arc>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut auth_chain = Vec::new(); + let mut auth_chain_ids = BTreeSet::::new(); + let mut todo = BTreeSet::new(); + todo.insert(body.event_id.clone()); + + while let Some(event_id) = todo.iter().next().cloned() { + if let Some(pdu) = db.rooms.get_pdu(&event_id)? { + todo.extend( + pdu.auth_events + .clone() + .into_iter() + .collect::>() + .difference(&auth_chain_ids) + .cloned(), + ); + auth_chain_ids.extend(pdu.auth_events.into_iter()); + + let pdu_json = PduEvent::convert_to_outgoing_federation_event( + db.rooms.get_pdu_json(&event_id)?.unwrap(), + ); + auth_chain.push(pdu_json); + } else { + warn!("Could not find pdu mentioned in auth events."); + } + + todo.remove(&event_id); + } + + Ok(get_event_authorization::v1::Response { auth_chain }.into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/state/<_>", data = "") From f6046871f42beca8b21c22807e306b8fafea49d5 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 17 Jun 2021 20:12:36 +0200 Subject: [PATCH 0619/1727] Upgrade Ruma --- Cargo.lock | 301 +++++++++++++++++---------------- Cargo.toml | 2 +- src/client_server/account.rs | 2 +- src/client_server/room.rs | 4 +- src/client_server/to_device.rs | 32 ++-- src/database/users.rs | 4 +- src/server_server.rs | 18 +- 7 files changed, 183 insertions(+), 180 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 630d414..c3d7408 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,9 +8,9 @@ checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" dependencies = [ "memchr", ] @@ -174,9 +174,9 @@ checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" [[package]] name = "cc" -version = "1.0.68" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" dependencies = [ "jobserver", ] @@ -272,13 +272,14 @@ dependencies = [ [[package]] name = "const-oid" version = "0.5.2" -source = "git+https://github.com/RustCrypto/utils?rev=51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0#51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "279bc8fc53f788a75c7804af68237d1fce02cde1e275a886a4b320604dc2aeda" [[package]] name = "const_fn" -version = "0.4.8" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" +checksum = "402da840495de3f976eaefc3485b7f5eb5b0bf9761f9a47be27fe975b3b8c2ec" [[package]] name = "constant_time_eq" @@ -339,9 +340,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.4" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52fb27eab85b17fbb9f6fd667089e07d6a2eb8743d02639ee7f6a7a7729c9c94" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -352,9 +353,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4feb231f0d4d6af81aed15928e58ecf5816aa62a2393e2c82f46973e92a9a278" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" dependencies = [ "autocfg", "cfg-if 1.0.0", @@ -392,17 +393,18 @@ dependencies = [ [[package]] name = "der" -version = "0.3.4" -source = "git+https://github.com/RustCrypto/utils?rev=51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0#51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eeb9d92785d1facb50567852ce75d0858630630e7eabea59cf7eb7474051087" dependencies = [ "const-oid", ] [[package]] name = "derive_more" -version = "0.99.14" +version = "0.99.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc7b9cef1e351660e5443924e4f43ab25fbbed3e9a5f052df3677deb4d6b320" +checksum = "f82b1b72f1263f214c0f823371768776c4f5841b942c9883aa8e5ec584fd0ba6" dependencies = [ "convert_case", "proc-macro2", @@ -451,18 +453,18 @@ dependencies = [ [[package]] name = "directories" -version = "3.0.2" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" +checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" -version = "0.3.6" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", @@ -586,9 +588,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.15" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" +checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" dependencies = [ "futures-channel", "futures-core", @@ -601,9 +603,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.15" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" +checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" dependencies = [ "futures-core", "futures-sink", @@ -611,15 +613,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.15" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" +checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" [[package]] name = "futures-executor" -version = "0.3.15" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" +checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" dependencies = [ "futures-core", "futures-task", @@ -628,17 +630,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.15" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" +checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" [[package]] name = "futures-macro" -version = "0.3.15" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" +checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" dependencies = [ - "autocfg", "proc-macro-hack", "proc-macro2", "quote", @@ -647,23 +648,22 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.15" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" +checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" [[package]] name = "futures-task" -version = "0.3.15" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" +checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" [[package]] name = "futures-util" -version = "0.3.15" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" +checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" dependencies = [ - "autocfg", "futures-channel", "futures-core", "futures-io", @@ -723,9 +723,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ "cfg-if 1.0.0", "libc", @@ -750,9 +750,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" +checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" dependencies = [ "bytes", "fnv", @@ -815,9 +815,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" +checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" dependencies = [ "bytes", "http", @@ -826,15 +826,15 @@ dependencies = [ [[package]] name = "httparse" -version = "1.4.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" +checksum = "4a1ce40d6fc9764887c2fdc7305c3dcc429ba11ff981c1509416afd5697e4437" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "05842d0d43232b23ccb7060ecb0f0626922c21f30012e97b767b30afd4a5d4b9" [[package]] name = "humantime" @@ -847,9 +847,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.8" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3f71a7eea53a3f8257a7b4795373ff886397178cd634430ea94e12d7fe4fe34" +checksum = "1e5f105c494081baa3bf9e200b279e27ec1623895cd504c7dbef8d0b080fcf54" dependencies = [ "bytes", "futures-channel", @@ -1012,18 +1012,18 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.51" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" dependencies = [ "wasm-bindgen", ] [[package]] name = "js_int" -version = "0.2.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaba9bcd19568a4b4b3736b23e368e5b75e3ea126fd4cb3e4ad2ea5af274fd" +checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" dependencies = [ "serde", ] @@ -1056,9 +1056,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" +checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" [[package]] name = "libloading" @@ -1090,9 +1090,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" dependencies = [ "scopeguard", ] @@ -1157,9 +1157,9 @@ checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "memchr" -version = "2.4.0" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memoffset" @@ -1321,9 +1321,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "opentelemetry" @@ -1383,7 +1383,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.6", "smallvec", "winapi", ] @@ -1396,9 +1396,9 @@ checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" [[package]] name = "pear" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e44241c5e4c868e3eaa78b7c1848cadd6344ed4f54d029832d32b415a58702" +checksum = "86ab3a2b792945ed67eadbbdcbd2898f8dd2319392b2a45ac21adea5245cb113" dependencies = [ "inlinable_string", "pear_codegen", @@ -1407,9 +1407,9 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a5ca643c2303ecb740d506539deba189e16f2754040a42901cd8105d0282d0" +checksum = "620c9c4776ba41b59ab101360c9b1419c0c8c81cd2e6e39fae7109e7425994cb" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", @@ -1474,8 +1474,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.6.0" -source = "git+https://github.com/RustCrypto/utils?rev=51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0#51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9c2f795bc591cb3384cb64082a578b89207ac92bb89c9d98c1ea2ace7cd8110" dependencies = [ "der", "spki", @@ -1534,9 +1535,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.27" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" dependencies = [ "unicode-xid", ] @@ -1629,7 +1630,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.2", ] [[package]] @@ -1652,21 +1653,28 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.8" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "redox_syscall" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8270314b5ccceb518e7e578952f0b72b88222d02e8f77f5ecf7abbb673539041" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.2.3", - "redox_syscall", + "getrandom 0.1.16", + "redox_syscall 0.1.57", + "rust-argon2", ] [[package]] @@ -1691,9 +1699,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.4" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" dependencies = [ "aho-corasick", "memchr", @@ -1712,9 +1720,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" [[package]] name = "remove_dir_all" @@ -1874,7 +1882,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.2" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "assign", "js_int", @@ -1895,7 +1903,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "bytes", "http", @@ -1911,7 +1919,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1922,7 +1930,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "ruma-api", "ruma-common", @@ -1936,7 +1944,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.2" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "assign", "bytes", @@ -1955,8 +1963,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.5.2" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +version = "0.5.3" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "indexmap", "js_int", @@ -1971,7 +1979,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "indoc", "js_int", @@ -1986,7 +1994,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1997,7 +2005,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "js_int", "ruma-api", @@ -2011,8 +2019,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.19.1" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +version = "0.19.2" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "paste", "rand 0.8.3", @@ -2025,8 +2033,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.19.1" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +version = "0.19.2" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2036,12 +2044,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" [[package]] name = "ruma-identity-service-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "js_int", "ruma-api", @@ -2054,7 +2062,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "js_int", "ruma-api", @@ -2069,7 +2077,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "bytes", "form_urlencoded", @@ -2083,7 +2091,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2094,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.7.2" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2112,7 +2120,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=b39537812c12caafcbf8b7bd744a06d196d45281#b39537812c12caafcbf8b7bd744a06d196d45281" +source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ "itertools 0.10.0", "js_int", @@ -2182,9 +2190,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.5" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" +checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" [[package]] name = "ryu" @@ -2436,7 +2444,8 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" version = "0.3.0" -source = "git+https://github.com/RustCrypto/utils?rev=51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0#51e7c9d734e4d3c5279ba1c181c65b1bd77bcad0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dae7e047abc519c96350e9484a96c6bf1492348af912fd3446dd2dc323f6268" dependencies = [ "der", ] @@ -2524,9 +2533,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.72" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" +checksum = "b9505f307c872bab8eb46f77ae357c8eba1fdacead58ee5a850116b1d7f82883" dependencies = [ "proc-macro2", "quote", @@ -2554,7 +2563,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "rand 0.8.3", - "redox_syscall", + "redox_syscall 0.2.6", "remove_dir_all", "winapi", ] @@ -2570,18 +2579,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.25" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6" +checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.25" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d" +checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" dependencies = [ "proc-macro2", "quote", @@ -2684,9 +2693,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.6.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd3076b5c8cc18138b8f8814895c11eb4de37114a5d127bafdc5e55798ceef37" +checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5" dependencies = [ "autocfg", "bytes", @@ -2703,9 +2712,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.2.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" +checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" dependencies = [ "proc-macro2", "quote", @@ -2725,9 +2734,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.7" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "940a12c99365c31ea8dd9ba04ec1be183ffe4920102bb7122c2f515437601e8e" dependencies = [ "bytes", "futures-core", @@ -2820,9 +2829,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.18" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa5553bf0883ba7c9cbe493b085c29926bd41b66afc31ff72cf17ff4fb60dcd5" +checksum = "705096c6f83bf68ea5d357a6aa01829ddbdac531b357b45abeca842938085baa" dependencies = [ "ansi_term", "chrono", @@ -2842,9 +2851,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.3" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" +checksum = "952a078337565ba39007de99b151770f41039253a31846f0a3d5cd5a4ac8eedf" dependencies = [ "async-trait", "cfg-if 1.0.0", @@ -2867,9 +2876,9 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.20.3" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" +checksum = "da9c97f7d103e0f94dbe384a57908833505ae5870126492f166821b7cf685589" dependencies = [ "cfg-if 1.0.0", "futures-util", @@ -2893,9 +2902,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "twoway" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c57ffb460d7c24cd6eda43694110189030a3d1dfe418416d9468fd1c1d290b47" +checksum = "6b40075910de3a912adbd80b5d8bad6ad10a23eeb1f5bf9d4006839e899ba5bc" dependencies = [ "memchr", "unchecked-index", @@ -2958,9 +2967,9 @@ checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] name = "unindent" @@ -2976,9 +2985,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ "form_urlencoded", "idna", @@ -3016,9 +3025,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.74" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" dependencies = [ "cfg-if 1.0.0", "serde", @@ -3028,9 +3037,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.74" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" dependencies = [ "bumpalo", "lazy_static", @@ -3043,9 +3052,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.24" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" +checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3055,9 +3064,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.74" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3065,9 +3074,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.74" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" dependencies = [ "proc-macro2", "quote", @@ -3078,15 +3087,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.74" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" [[package]] name = "web-sys" -version = "0.3.51" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" +checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index e7ebadf..96260ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ rust = "1.50" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "b39537812c12caafcbf8b7bd744a06d196d45281", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "5a7e2cddcf257e367465cced51442c91e8f557c9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 56de5fc..f43f73e 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -295,7 +295,7 @@ pub async fn register_route( state_default: 50.into(), users, users_default: 0.into(), - notifications: ruma::events::room::power_levels::NotificationPowerLevels { + notifications: ruma::power_levels::NotificationPowerLevels { room: 50.into(), }, }, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 7c50775..72b2d60 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -122,9 +122,7 @@ pub async fn create_room_route( state_default: 50.into(), users, users_default: 0.into(), - notifications: ruma::events::room::power_levels::NotificationPowerLevels { - room: 50.into(), - }, + notifications: ruma::power_levels::NotificationPowerLevels { room: 50.into() }, }) .expect("event is valid, we just created it"); diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index f2a97ab..ada0c9a 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -2,9 +2,9 @@ use std::sync::Arc; use super::State; use crate::{ConduitResult, Database, Error, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - r0::to_device::{self, send_event_to_device}, +use ruma::{ + api::client::{error::ErrorKind, r0::to_device::send_event_to_device}, + to_device::DeviceIdOrAllDevices, }; #[cfg(feature = "conduit_bin")] @@ -34,27 +34,25 @@ pub async fn send_event_to_device_route( for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { match target_device_id_maybe { - to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => { - db.users.add_to_device_event( - sender_user, - &target_user_id, - &target_device_id, - &body.event_type, - serde_json::from_str(event.get()).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") - })?, - &db.globals, - )? - } + DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event( + sender_user, + &target_user_id, + &target_device_id, + &body.event_type, + event.deserialize_as().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, + &db.globals, + )?, - to_device::DeviceIdOrAllDevices::AllDevices => { + DeviceIdOrAllDevices::AllDevices => { for target_device_id in db.users.all_device_ids(&target_user_id) { db.users.add_to_device_event( sender_user, &target_user_id, &target_device_id?, &body.event_type, - serde_json::from_str(event.get()).map_err(|_| { + event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, &db.globals, diff --git a/src/database/users.rs b/src/database/users.rs index b6d3b3c..d89569a 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -719,7 +719,7 @@ impl Users { sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, - event_type: &EventType, + event_type: &str, content: serde_json::Value, globals: &super::globals::Globals, ) -> Result<()> { @@ -730,7 +730,7 @@ impl Users { key.extend_from_slice(&globals.next_count()?.to_be_bytes()); let mut json = serde_json::Map::new(); - json.insert("type".to_owned(), event_type.to_string().into()); + json.insert("type".to_owned(), event_type.to_owned().into()); json.insert("sender".to_owned(), sender.to_string().into()); json.insert("content".to_owned(), content); diff --git a/src/server_server.rs b/src/server_server.rs index 8384a9e..f6d6594 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -9,10 +9,7 @@ use regex::Regex; use rocket::{response::content::Json, State}; use ruma::{ api::{ - client::{ - error::{Error as RumaError, ErrorKind}, - r0::to_device, - }, + client::error::{Error as RumaError, ErrorKind}, federation::{ authorization::get_event_authorization, device::get_devices::{self, v1::UserDevice}, @@ -49,6 +46,7 @@ use ruma::{ serde::Raw, signatures::{CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, Event, EventMap, RoomVersion, StateMap}, + to_device::DeviceIdOrAllDevices, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; @@ -748,13 +746,13 @@ pub async fn send_transaction_message_route( for (target_user_id, map) in &messages { for (target_device_id_maybe, event) in map { match target_device_id_maybe { - to_device::DeviceIdOrAllDevices::DeviceId(target_device_id) => { + DeviceIdOrAllDevices::DeviceId(target_device_id) => { db.users.add_to_device_event( &sender, &target_user_id, &target_device_id, - &ev_type, - serde_json::from_str(event.get()).map_err(|_| { + &ev_type.to_string(), + event.deserialize_as().map_err(|_| { Error::BadRequest( ErrorKind::InvalidParam, "Event is invalid", @@ -764,14 +762,14 @@ pub async fn send_transaction_message_route( )? } - to_device::DeviceIdOrAllDevices::AllDevices => { + DeviceIdOrAllDevices::AllDevices => { for target_device_id in db.users.all_device_ids(&target_user_id) { db.users.add_to_device_event( &sender, &target_user_id, &target_device_id?, - &ev_type, - serde_json::from_str(event.get()).map_err(|_| { + &ev_type.to_string(), + event.deserialize_as().map_err(|_| { Error::BadRequest( ErrorKind::InvalidParam, "Event is invalid", From 7fa54e44113054818ad81030a988be08582c27d7 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 17 Jun 2021 20:14:37 +0200 Subject: [PATCH 0620/1727] Use Ruma-provided default power levels for shorter code --- src/client_server/account.rs | 12 +----------- src/client_server/room.rs | 10 +--------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index f43f73e..2fc8b19 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -286,18 +286,8 @@ pub async fn register_route( event_type: EventType::RoomPowerLevels, content: serde_json::to_value( ruma::events::room::power_levels::PowerLevelsEventContent { - ban: 50.into(), - events: BTreeMap::new(), - events_default: 0.into(), - invite: 50.into(), - kick: 50.into(), - redact: 50.into(), - state_default: 50.into(), users, - users_default: 0.into(), - notifications: ruma::power_levels::NotificationPowerLevels { - room: 50.into(), - }, + ..Default::default() }, ) .expect("event is valid, we just created it"), diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 72b2d60..3f91324 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -113,16 +113,8 @@ pub async fn create_room_route( let mut power_levels_content = serde_json::to_value(ruma::events::room::power_levels::PowerLevelsEventContent { - ban: 50.into(), - events: BTreeMap::new(), - events_default: 0.into(), - invite: 50.into(), - kick: 50.into(), - redact: 50.into(), - state_default: 50.into(), users, - users_default: 0.into(), - notifications: ruma::power_levels::NotificationPowerLevels { room: 50.into() }, + ..Default::default() }) .expect("event is valid, we just created it"); From af2ce5803ebabb8b4b03ab24952878d997db05a1 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 17 Jun 2021 20:29:02 +0200 Subject: [PATCH 0621/1727] Remove pointless double deserialization --- src/client_server/sync.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2f28706..dc92fb2 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -286,10 +286,8 @@ pub async fn sync_events_route( .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) .map(|(_, pdu)| { let content = serde_json::from_value::< - Raw, + ruma::events::room::member::MemberEventContent, >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() .map_err(|_| { Error::bad_database("Invalid member event in database.") })?; From f3e630c064c255bd8228de50363aa6a7fc720996 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 17 Jun 2021 20:34:14 +0200 Subject: [PATCH 0622/1727] Fix lots of clippy lints --- src/client_server/alias.rs | 3 +- src/client_server/directory.rs | 4 +- src/client_server/membership.rs | 10 +- src/client_server/session.rs | 6 +- src/client_server/sync.rs | 15 ++- src/database/appservice.rs | 18 ++-- src/database/globals.rs | 8 +- src/database/key_backups.rs | 5 +- src/database/media.rs | 12 +-- src/database/pusher.rs | 8 +- src/database/rooms.rs | 168 +++++++++++++------------------- src/database/rooms/edus.rs | 10 +- src/database/transaction_ids.rs | 2 +- src/database/uiaa.rs | 4 +- src/database/users.rs | 31 +++--- src/server_server.rs | 13 +-- src/utils.rs | 25 +++-- 17 files changed, 140 insertions(+), 202 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 40252af..a2ca0c8 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -97,13 +97,12 @@ pub async fn get_alias_helper( .map_or_else(Vec::new, |aliases| { aliases .iter() - .map(|aliases| { + .filter_map(|aliases| { aliases .get("regex") .and_then(|regex| regex.as_str()) .and_then(|regex| Regex::new(regex).ok()) }) - .filter_map(|o| o) .collect::>() }); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index c41e81e..dfef34d 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -135,9 +135,7 @@ pub async fn get_public_rooms_filtered_helper( filter: &IncomingFilter, _network: &IncomingRoomNetwork, ) -> ConduitResult { - if let Some(other_server) = server - .clone() - .filter(|server| *server != db.globals.server_name().as_str()) + if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str()) { let response = db .sending diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index a3f1389..2dfa077 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -743,12 +743,10 @@ pub async fn invite_helper( let create_event_content = create_event .as_ref() .map(|create_event| { - Ok::<_, Error>( - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?, - ) + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db.")) }) .transpose()?; diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 9a75ae2..dd504f1 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -202,10 +202,8 @@ pub async fn logout_all_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for device_id in db.users.all_device_ids(sender_user) { - if let Ok(device_id) = device_id { - db.users.remove_device(&sender_user, &device_id)?; - } + for device_id in db.users.all_device_ids(sender_user).flatten() { + db.users.remove_device(&sender_user, &device_id)?; } db.flush().await?; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index dc92fb2..1c078e9 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -146,11 +146,9 @@ pub async fn sync_events_route( let since_state = since_shortstatehash .as_ref() .map(|since_shortstatehash| { - Ok::<_, Error>( - since_shortstatehash - .map(|since_shortstatehash| db.rooms.state_full(since_shortstatehash)) - .transpose()?, - ) + since_shortstatehash + .map(|since_shortstatehash| db.rooms.state_full(since_shortstatehash)) + .transpose() }) .transpose()?; @@ -255,7 +253,7 @@ pub async fn sync_events_route( device_list_updates.extend( db.rooms .room_members(&room_id) - .filter_map(|user_id| Some(user_id.ok()?)) + .flatten() .filter(|user_id| { // Don't send key updates from the sender to the sender sender_user != user_id @@ -313,9 +311,10 @@ pub async fn sync_events_route( Ok(None) } }) - .filter_map(|u| u.ok()) // Filter out buggy users + // Filter out buggy users + .filter_map(|u| u.ok()) // Filter for possible heroes - .filter_map(|u| u) + .flatten() { if heroes.contains(&hero) || hero == sender_user.as_str() { continue; diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 21b18a7..4bf3a21 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -34,29 +34,25 @@ impl Appservice { .get(id) .map_or_else( || { - Ok(self - .id_appserviceregistrations + self.id_appserviceregistrations .get(id.as_bytes())? .map(|bytes| { - Ok::<_, Error>(serde_yaml::from_slice(&bytes).map_err(|_| { + serde_yaml::from_slice(&bytes).map_err(|_| { Error::bad_database( "Invalid registration bytes in id_appserviceregistrations.", ) - })?) + }) }) - .transpose()?) + .transpose() }, |r| Ok(Some(r.clone())), ) } - pub fn iter_ids<'a>( - &'a self, - ) -> Result> + Send + Sync + 'a> { + pub fn iter_ids(&self) -> Result> + Send + Sync + '_> { Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { - Ok(utils::string_from_bytes(&id).map_err(|_| { - Error::bad_database("Invalid id bytes in id_appserviceregistrations.") - })?) + utils::string_from_bytes(&id) + .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) })) } diff --git a/src/database/globals.rs b/src/database/globals.rs index 5525644..1ce87bd 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -171,14 +171,14 @@ impl Globals { } pub fn next_count(&self) -> Result { - Ok(utils::u64_from_bytes(&self.globals.increment(COUNTER)?) - .map_err(|_| Error::bad_database("Count has invalid bytes."))?) + utils::u64_from_bytes(&self.globals.increment(COUNTER)?) + .map_err(|_| Error::bad_database("Count has invalid bytes.")) } pub fn current_count(&self) -> Result { self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { - Ok(utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Count has invalid bytes."))?) + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Count has invalid bytes.")) }) } diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 0685c48..2bb3b6d 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -119,9 +119,8 @@ impl KeyBackups { self.backupid_algorithm .get(&key)? .map_or(Ok(None), |bytes| { - Ok(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("Algorithm in backupid_algorithm is invalid.") - })?) + serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid.")) }) } diff --git a/src/database/media.rs b/src/database/media.rs index 944c5bd..a1fe26e 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -110,9 +110,9 @@ impl Media { let content_type = parts .next() .map(|bytes| { - Ok::<_, Error>(utils::string_from_bytes(bytes).map_err(|_| { + utils::string_from_bytes(bytes).map_err(|_| { Error::bad_database("Content type in mediaid_file is invalid unicode.") - })?) + }) }) .transpose()?; @@ -199,9 +199,9 @@ impl Media { let content_type = parts .next() .map(|bytes| { - Ok::<_, Error>(utils::string_from_bytes(bytes).map_err(|_| { + utils::string_from_bytes(bytes).map_err(|_| { Error::bad_database("Content type in mediaid_file is invalid unicode.") - })?) + }) }) .transpose()?; @@ -235,9 +235,9 @@ impl Media { let content_type = parts .next() .map(|bytes| { - Ok::<_, Error>(utils::string_from_bytes(bytes).map_err(|_| { + utils::string_from_bytes(bytes).map_err(|_| { Error::bad_database("Content type in mediaid_file is invalid unicode.") - })?) + }) }) .transpose()?; diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 39b631d..358c3c9 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -51,8 +51,8 @@ impl PushData { self.senderkey_pusher .get(senderkey)? .map(|push| { - Ok(serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) + serde_json::from_slice(&*push) + .map_err(|_| Error::bad_database("Invalid Pusher in db.")) }) .transpose() } @@ -64,8 +64,8 @@ impl PushData { self.senderkey_pusher .scan_prefix(prefix) .map(|(_, push)| { - Ok(serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) + serde_json::from_slice(&*push) + .map_err(|_| Error::bad_database("Invalid Pusher in db.")) }) .collect() } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0820395..9e1245f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -93,14 +93,10 @@ impl Rooms { .map(|(_, bytes)| self.shorteventid_eventid.get(&bytes).ok().flatten()) .flatten() .map(|bytes| { - Ok::<_, Error>( - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") - })?) - .map_err(|_| { - Error::bad_database("EventId in stateid_shorteventid is invalid.") - })?, - ) + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in stateid_shorteventid is invalid.")) }) .filter_map(|r| r.ok()) .collect()) @@ -116,14 +112,10 @@ impl Rooms { .map(|(_, bytes)| self.shorteventid_eventid.get(&bytes).ok().flatten()) .flatten() .map(|bytes| { - Ok::<_, Error>( - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") - })?) - .map_err(|_| { - Error::bad_database("EventId in stateid_shorteventid is invalid.") - })?, - ) + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in stateid_shorteventid is invalid.")) }) .filter_map(|r| r.ok()) .map(|eventid| self.get_pdu(&eventid)) @@ -168,16 +160,10 @@ impl Rooms { .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) .flatten() .map(|bytes| { - Ok::<_, Error>( - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "EventID in stateid_shorteventid is invalid unicode.", - ) - })?) - .map_err(|_| { - Error::bad_database("EventId in stateid_shorteventid is invalid.") - })?, - ) + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in stateid_shorteventid is invalid.")) }) .map(|r| r.ok()) .flatten()) @@ -204,16 +190,16 @@ impl Rooms { self.eventid_shorteventid .get(event_id.as_bytes())? .map_or(Ok(None), |shorteventid| { - Ok(self - .shorteventid_shortstatehash - .get(&shorteventid)? - .map_or(Ok::<_, Error>(None), |bytes| { + self.shorteventid_shortstatehash.get(&shorteventid)?.map_or( + Ok::<_, Error>(None), + |bytes| { Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database( "Invalid shortstatehash bytes in shorteventid_shortstatehash", ) })?)) - })?) + }, + ) }) } @@ -485,7 +471,7 @@ impl Rooms { self.eventid_pduid .get(event_id.as_bytes())? .map_or_else::, _, _>( - || Ok(self.eventid_outlierpdu.get(event_id.as_bytes())?), + || self.eventid_outlierpdu.get(event_id.as_bytes()), |pduid| { Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { Error::bad_database("Invalid pduid in eventid_pduid.") @@ -493,8 +479,7 @@ impl Rooms { }, )? .map(|pdu| { - Ok(serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?) + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) .transpose() } @@ -521,8 +506,7 @@ impl Rooms { }, )? .map(|pdu| { - Ok(serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?) + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) .transpose() } @@ -534,7 +518,7 @@ impl Rooms { self.eventid_pduid .get(event_id.as_bytes())? .map_or_else::, _, _>( - || Ok(self.eventid_outlierpdu.get(event_id.as_bytes())?), + || self.eventid_outlierpdu.get(event_id.as_bytes()), |pduid| { Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { Error::bad_database("Invalid pduid in eventid_pduid.") @@ -542,8 +526,7 @@ impl Rooms { }, )? .map(|pdu| { - Ok(serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?) + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) .transpose() } @@ -594,12 +577,10 @@ impl Rooms { self.roomid_pduleaves .scan_prefix(prefix) .map(|(_, bytes)| { - Ok::<_, Error>( - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?, - ) + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) }) .collect() } @@ -1213,12 +1194,10 @@ impl Rooms { let create_event_content = create_event .as_ref() .map(|create_event| { - Ok::<_, Error>( - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?, - ) + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db.")) }) .transpose()?; @@ -1382,13 +1361,12 @@ impl Rooms { .map_or_else(Vec::new, |users| { users .iter() - .map(|users| { + .filter_map(|users| { users .get("regex") .and_then(|regex| regex.as_str()) .and_then(|regex| Regex::new(regex).ok()) }) - .filter_map(|o| o) .collect::>() }); let aliases = namespaces @@ -1397,13 +1375,12 @@ impl Rooms { .map_or_else(Vec::new, |aliases| { aliases .iter() - .map(|aliases| { + .filter_map(|aliases| { aliases .get("regex") .and_then(|regex| regex.as_str()) .and_then(|regex| Regex::new(regex).ok()) }) - .filter_map(|o| o) .collect::>() }); let rooms = namespaces @@ -2011,10 +1988,10 @@ impl Rooms { prefix.push(0xff); self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - Ok(utils::string_from_bytes(&bytes) + utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))?) + .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) }) } @@ -2032,14 +2009,14 @@ impl Rooms { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - pub fn public_rooms<'a>(&'a self) -> impl Iterator> + 'a { + pub fn public_rooms(&self) -> impl Iterator> + '_ { self.publicroomids.iter().map(|(bytes, _)| { - Ok( - RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + RoomId::try_from( + utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid."))?, + })?, ) + .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) }) } @@ -2105,34 +2082,27 @@ impl Rooms { &'a self, users: Vec, ) -> Result> + 'a> { - let iterators = users - .into_iter() - .map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + let iterators = users.into_iter().map(move |user_id| { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); - Ok::<_, Error>( - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| { - Error::bad_database("Invalid userroomid_joined in db.") - })? - .0 - + 1; // +1 because the room id starts AFTER the separator + self.userroomid_joined + .scan_prefix(prefix) + .map(|(key, _)| { + let roomid_index = key + .iter() + .enumerate() + .find(|(_, &b)| b == 0xff) + .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? + .0 + + 1; // +1 because the room id starts AFTER the separator - let room_id = key[roomid_index..].to_vec(); + let room_id = key[roomid_index..].to_vec(); - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()), - ) - }) - .filter_map(|r| r.ok()); + Ok::<_, Error>(room_id) + }) + .filter_map(|r| r.ok()) + }); // We use the default compare function because keys are sorted correctly (not reversed) Ok(utils::common_elements(iterators, Ord::cmp) @@ -2154,7 +2124,7 @@ impl Rooms { prefix.push(0xff); self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - Ok(Box::::try_from( + Box::::try_from( utils::string_from_bytes( &key.rsplit(|&b| b == 0xff) .next() @@ -2164,7 +2134,7 @@ impl Rooms { Error::bad_database("Server name in roomserverids is invalid unicode.") })?, ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid."))?) + .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) }) } @@ -2177,7 +2147,7 @@ impl Rooms { prefix.push(0xff); self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - Ok(RoomId::try_from( + RoomId::try_from( utils::string_from_bytes( &key.rsplit(|&b| b == 0xff) .next() @@ -2185,7 +2155,7 @@ impl Rooms { ) .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid."))?) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) }) } @@ -2199,7 +2169,7 @@ impl Rooms { prefix.push(0xff); self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - Ok(UserId::try_from( + UserId::try_from( utils::string_from_bytes( &key.rsplit(|&b| b == 0xff) .next() @@ -2209,7 +2179,7 @@ impl Rooms { Error::bad_database("User ID in roomuserid_joined is invalid unicode.") })?, ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid."))?) + .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) }) } @@ -2224,7 +2194,7 @@ impl Rooms { self.roomuseroncejoinedids .scan_prefix(prefix) .map(|(key, _)| { - Ok(UserId::try_from( + UserId::try_from( utils::string_from_bytes( &key.rsplit(|&b| b == 0xff) .next() @@ -2234,7 +2204,7 @@ impl Rooms { Error::bad_database("User ID in room_useroncejoined is invalid unicode.") })?, ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid."))?) + .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) }) } @@ -2250,7 +2220,7 @@ impl Rooms { self.roomuserid_invitecount .scan_prefix(prefix) .map(|(key, _)| { - Ok(UserId::try_from( + UserId::try_from( utils::string_from_bytes( &key.rsplit(|&b| b == 0xff) .next() @@ -2260,7 +2230,7 @@ impl Rooms { Error::bad_database("User ID in roomuserid_invited is invalid unicode.") })?, ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid."))?) + .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) }) } @@ -2303,7 +2273,7 @@ impl Rooms { self.userroomid_joined .scan_prefix(user_id.as_bytes().to_vec()) .map(|(key, _)| { - Ok(RoomId::try_from( + RoomId::try_from( utils::string_from_bytes( &key.rsplit(|&b| b == 0xff) .next() @@ -2313,7 +2283,7 @@ impl Rooms { Error::bad_database("Room ID in userroomid_joined is invalid unicode.") })?, ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid."))?) + .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) }) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 677d26e..9a5cdeb 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -306,12 +306,10 @@ impl RoomEdus { .typingid_userid .scan_prefix(prefix) .map(|(_, user_id)| { - Ok::<_, Error>( - UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?, - ) + UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| { + Error::bad_database("User ID in typingid_userid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid.")) }) { user_ids.insert(user_id?); diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index 3e37779..f346757 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -41,6 +41,6 @@ impl TransactionIds { key.extend_from_slice(txn_id.as_bytes()); // If there's no entry, this is a new transaction - Ok(self.userdevicetxnid_response.get(&key)?) + self.userdevicetxnid_response.get(&key) } } diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index f7f3d1f..1372fef 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -57,9 +57,7 @@ impl Uiaa { { let mut uiaainfo = session .as_ref() - .map(|session| { - Ok::<_, Error>(self.get_uiaa_session(&user_id, &device_id, session)?) - }) + .map(|session| self.get_uiaa_session(&user_id, &device_id, session)) .unwrap_or_else(|| Ok(uiaainfo.clone()))?; if uiaainfo.session.is_none() { diff --git a/src/database/users.rs b/src/database/users.rs index d89569a..f99084f 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -88,14 +88,12 @@ impl Users { } /// Returns an iterator over all users on this homeserver. - pub fn iter<'a>(&'a self) -> impl Iterator> + 'a { + pub fn iter(&self) -> impl Iterator> + '_ { self.userid_password.iter().map(|(bytes, _)| { - Ok( - UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in userid_password is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in userid_password is invalid."))?, - ) + UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("User ID in userid_password is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) }) } @@ -588,16 +586,10 @@ impl Users { .iter_from(&start, false) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(_, bytes)| { - Ok( - UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "User ID in devicekeychangeid_userid is invalid unicode.", - ) - })?) - .map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid.") - })?, - ) + UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) }) } @@ -863,9 +855,8 @@ impl Users { self.userdeviceid_metadata .scan_prefix(key) .map(|(_, bytes)| { - Ok(serde_json::from_slice::(&bytes).map_err(|_| { - Error::bad_database("Device in userdeviceid_metadata is invalid.") - })?) + serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) }) } diff --git a/src/server_server.rs b/src/server_server.rs index f6d6594..f34633a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1691,13 +1691,12 @@ pub(crate) fn append_incoming_pdu( .map_or_else(Vec::new, |users| { users .iter() - .map(|users| { + .filter_map(|users| { users .get("regex") .and_then(|regex| regex.as_str()) .and_then(|regex| Regex::new(regex).ok()) }) - .filter_map(|o| o) .collect::>() }); let aliases = namespaces @@ -2026,12 +2025,10 @@ pub fn create_join_event_template_route( let create_event_content = create_event .as_ref() .map(|create_event| { - Ok::<_, Error>( - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?, - ) + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db.")) }) .transpose()?; diff --git a/src/utils.rs b/src/utils.rs index 0c8fb5c..2b5336c 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -84,22 +84,19 @@ pub fn common_elements( let mut other_iterators = iterators.map(|i| i.peekable()).collect::>(); Some(first_iterator.filter(move |target| { - other_iterators - .iter_mut() - .map(|it| { - while let Some(element) = it.peek() { - match check_order(element, target) { - Ordering::Greater => return false, // We went too far - Ordering::Equal => return true, // Element is in both iters - Ordering::Less => { - // Keep searching - it.next(); - } + other_iterators.iter_mut().all(|it| { + while let Some(element) = it.peek() { + match check_order(element, target) { + Ordering::Greater => return false, // We went too far + Ordering::Equal => return true, // Element is in both iters + Ordering::Less => { + // Keep searching + it.next(); } } - false - }) - .all(|b| b) + } + false + }) })) } From b291e765729c5e367841de855578e38cdec43f21 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 17 Jun 2021 20:37:07 +0200 Subject: [PATCH 0623/1727] Use try operator for Option more --- src/client_server/alias.rs | 7 +------ src/database/rooms.rs | 14 ++------------ src/server_server.rs | 7 +------ 3 files changed, 4 insertions(+), 24 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index a2ca0c8..a54bd36 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -97,12 +97,7 @@ pub async fn get_alias_helper( .map_or_else(Vec::new, |aliases| { aliases .iter() - .filter_map(|aliases| { - aliases - .get("regex") - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()) - }) + .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) .collect::>() }); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 9e1245f..7bb019e 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1361,12 +1361,7 @@ impl Rooms { .map_or_else(Vec::new, |users| { users .iter() - .filter_map(|users| { - users - .get("regex") - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()) - }) + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) .collect::>() }); let aliases = namespaces @@ -1375,12 +1370,7 @@ impl Rooms { .map_or_else(Vec::new, |aliases| { aliases .iter() - .filter_map(|aliases| { - aliases - .get("regex") - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()) - }) + .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) .collect::>() }); let rooms = namespaces diff --git a/src/server_server.rs b/src/server_server.rs index f34633a..6d074ec 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1691,12 +1691,7 @@ pub(crate) fn append_incoming_pdu( .map_or_else(Vec::new, |users| { users .iter() - .filter_map(|users| { - users - .get("regex") - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()) - }) + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) .collect::>() }); let aliases = namespaces From 808741bcb685820fb7aa237f7f803a2e5c92ca49 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 17 Jun 2021 20:44:29 +0200 Subject: [PATCH 0624/1727] Remove unnecessary Option-wrapping and successive unwrapping --- src/client_server/directory.rs | 11 +++---- src/database/rooms.rs | 52 +++++++++++++++------------------- src/server_server.rs | 20 ++++--------- 3 files changed, 33 insertions(+), 50 deletions(-) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index dfef34d..be5501a 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -160,15 +160,12 @@ pub async fn get_public_rooms_filtered_helper( .map(|c| { // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk // to ruma::api::client::r0::directory::PublicRoomsChunk - Ok::<_, Error>( - serde_json::from_str( - &serde_json::to_string(&c) - .expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type"), + serde_json::from_str( + &serde_json::to_string(&c) + .expect("PublicRoomsChunk::to_string always works"), ) + .expect("federation and client-server PublicRoomsChunk are the same type") }) - .filter_map(|r| r.ok()) .collect(), prev_batch: response.prev_batch, next_batch: response.next_batch, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7bb019e..f19d4b9 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2023,39 +2023,33 @@ impl Rooms { .map(str::to_lowercase) .collect::>(); - let iterators = words - .clone() - .into_iter() - .map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); + let iterators = words.clone().into_iter().map(move |word| { + let mut prefix2 = prefix.clone(); + prefix2.extend_from_slice(word.as_bytes()); + prefix2.push(0xff); - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); + let mut last_possible_id = prefix2.clone(); + last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - Ok::<_, Error>( - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| { - let pduid_index = key - .iter() - .enumerate() - .filter(|(_, &b)| b == 0xff) - .nth(1) - .ok_or_else(|| Error::bad_database("Invalid tokenid in db."))? - .0 - + 1; // +1 because the pdu id starts AFTER the separator + self.tokenids + .iter_from(&last_possible_id, true) // Newest pdus first + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(|(key, _)| { + let pduid_index = key + .iter() + .enumerate() + .filter(|(_, &b)| b == 0xff) + .nth(1) + .ok_or_else(|| Error::bad_database("Invalid tokenid in db."))? + .0 + + 1; // +1 because the pdu id starts AFTER the separator - let pdu_id = key[pduid_index..].to_vec(); + let pdu_id = key[pduid_index..].to_vec(); - Ok::<_, Error>(pdu_id) - }) - .filter_map(|r| r.ok()), - ) - }) - .filter_map(|r| r.ok()); + Ok::<_, Error>(pdu_id) + }) + .filter_map(|r| r.ok()) + }); Ok(( utils::common_elements(iterators, |a, b| { diff --git a/src/server_server.rs b/src/server_server.rs index 6d074ec..961cc9d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -532,15 +532,11 @@ pub async fn get_public_rooms_filtered_route( .map(|c| { // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk // to ruma::api::client::r0::directory::PublicRoomsChunk - Ok::<_, Error>( - serde_json::from_str( - &serde_json::to_string(&c) - .expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type"), + serde_json::from_str( + &serde_json::to_string(&c).expect("PublicRoomsChunk::to_string always works"), ) + .expect("federation and client-server PublicRoomsChunk are the same type") }) - .filter_map(|r| r.ok()) .collect(), prev_batch: response.prev_batch, next_batch: response.next_batch, @@ -580,15 +576,11 @@ pub async fn get_public_rooms_route( .map(|c| { // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk // to ruma::api::client::r0::directory::PublicRoomsChunk - Ok::<_, Error>( - serde_json::from_str( - &serde_json::to_string(&c) - .expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type"), + serde_json::from_str( + &serde_json::to_string(&c).expect("PublicRoomsChunk::to_string always works"), ) + .expect("federation and client-server PublicRoomsChunk are the same type") }) - .filter_map(|r| r.ok()) .collect(), prev_batch: response.prev_batch, next_batch: response.next_batch, From 637d9d3b6f0f9cf7e4f35d08a11e1b1e69ef1e96 Mon Sep 17 00:00:00 2001 From: phesch Date: Sat, 19 Jun 2021 16:12:05 +0200 Subject: [PATCH 0625/1727] Always allow appservices to register new users --- src/client_server/account.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 56de5fc..9de482c 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -88,7 +88,7 @@ pub async fn register_route( db: State<'_, Arc>, body: Ruma>, ) -> ConduitResult { - if !db.globals.allow_registration() { + if !db.globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, "Registration has been disabled.", From dcac1361ec1eab9b82398888ef924140de3ef68d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 21 Jun 2021 15:20:37 +0200 Subject: [PATCH 0626/1727] improvement: /search works for multiple rooms --- src/client_server/search.rs | 53 ++++++++++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 13 deletions(-) diff --git a/src/client_server/search.rs b/src/client_server/search.rs index ef5ddc2..5fc64d0 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -20,17 +20,32 @@ pub async fn search_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); - let filter = search_criteria.filter.as_ref().unwrap(); + let filter = search_criteria.filter.clone().unwrap_or_default(); - let room_id = filter.rooms.as_ref().unwrap().first().unwrap(); + let room_ids = filter.rooms.clone().unwrap_or_else(|| { + db.rooms + .rooms_joined(&sender_user) + .filter_map(|r| r.ok()) + .collect() + }); let limit = filter.limit.map_or(10, |l| u64::from(l) as usize); - if !db.rooms.is_joined(sender_user, &room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); + let mut searches = Vec::new(); + + for room_id in room_ids { + if !db.rooms.is_joined(sender_user, &room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + let search = db + .rooms + .search_pdus(&room_id, &search_criteria.search_term)?; + + searches.push(search.0.peekable()); } let skip = match body.next_batch.as_ref().map(|s| s.parse()) { @@ -44,12 +59,20 @@ pub async fn search_events_route( None => 0, // Default to the start }; - let search = db - .rooms - .search_pdus(&room_id, &search_criteria.search_term)?; + let mut results = Vec::new(); + for _ in 0..skip + limit { + if let Some(s) = searches + .iter_mut() + .map(|s| (s.peek().cloned(), s)) + .max_by_key(|(peek, _)| peek.clone()) + .and_then(|(_, i)| i.next()) + { + results.push(s); + } + } - let results = search - .0 + let results = results + .iter() .map(|result| { Ok::<_, Error>(SearchResult { context: EventContextResult { @@ -84,7 +107,11 @@ pub async fn search_events_route( next_batch, results, state: BTreeMap::new(), // TODO - highlights: search.1, + highlights: search_criteria + .search_term + .split_terminator(|c: char| !c.is_alphanumeric()) + .map(str::to_lowercase) + .collect::>(), }, }) .into()) From 09a8737f242f6ea7c709ef4afb5a6d6c6a6c8536 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 11 Jun 2021 15:47:53 -0400 Subject: [PATCH 0627/1727] Export conduits Config struct and fix clipp warningsy --- Cargo.lock | 6 +++--- src/error.rs | 3 ++- src/lib.rs | 4 ++-- src/server_server.rs | 3 +-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c3d7408..1bf8a3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -982,9 +982,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" +checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" dependencies = [ "either", ] @@ -2122,7 +2122,7 @@ name = "ruma-state-res" version = "0.1.0" source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" dependencies = [ - "itertools 0.10.0", + "itertools 0.10.1", "js_int", "maplit", "ruma-common", diff --git a/src/error.rs b/src/error.rs index 4f363ff..e139386 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,4 @@ -use log::{error, warn}; +use log::warn; use ruma::{ api::client::{ error::{Error as RumaError, ErrorKind}, @@ -12,6 +12,7 @@ use thiserror::Error; use { crate::RumaResponse, http::StatusCode, + log::error, rocket::{ response::{self, Responder}, Request, diff --git a/src/lib.rs b/src/lib.rs index 50ca6ea..fbffb7e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,10 +10,10 @@ mod ruma_wrapper; pub mod server_server; mod utils; -pub use database::Database; +pub use database::{Config, Database}; pub use error::{Error, Result}; pub use pdu::PduEvent; -pub use rocket::Config; +pub use rocket::Config as RocketConfig; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; use std::ops::Deref; diff --git a/src/server_server.rs b/src/server_server.rs index 961cc9d..20e24ad 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -662,8 +662,7 @@ pub async fn send_transaction_message_route( for edu in body .edus .iter() - .map(|edu| serde_json::from_str::(edu.json().get())) - .filter_map(|r| r.ok()) + .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) { match edu { Edu::Presence(_) => {} From 05821d6fd5bdf96b2e8615bb527cb07ec87f4c6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 30 Jun 2021 09:52:01 +0200 Subject: [PATCH 0628/1727] improvement: pdu cache, /sync cache --- Cargo.lock | 1 + Cargo.toml | 1 + src/client_server/directory.rs | 216 ++++++++++++++++---------------- src/client_server/membership.rs | 23 ++-- src/client_server/profile.rs | 6 +- src/client_server/room.rs | 6 +- src/client_server/state.rs | 10 +- src/client_server/sync.rs | 172 +++++++++++++++++++++---- src/database.rs | 2 + src/database/abstraction.rs | 2 +- src/database/globals.rs | 19 ++- src/database/pusher.rs | 2 +- src/database/rooms.rs | 69 ++++++---- src/error.rs | 31 +++-- src/ruma_wrapper.rs | 77 +++++++----- src/server_server.rs | 30 ++--- 16 files changed, 424 insertions(+), 243 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c3d7408..c9bce96 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -243,6 +243,7 @@ dependencies = [ "image", "jsonwebtoken", "log", + "lru-cache", "opentelemetry", "opentelemetry-jaeger", "pretty_env_logger", diff --git a/Cargo.toml b/Cargo.toml index 96260ec..bb44918 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -73,6 +73,7 @@ tracing-subscriber = "0.2.16" tracing-opentelemetry = "0.11.0" opentelemetry-jaeger = "0.11.0" pretty_env_logger = "0.4.0" +lru-cache = "0.1.2" [features] default = ["conduit_bin", "backend_sled"] diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index be5501a..1b6b1d7 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -200,84 +200,84 @@ pub async fn get_public_rooms_filtered_helper( } } - let mut all_rooms = db - .rooms - .public_rooms() - .map(|room_id| { - let room_id = room_id?; + let mut all_rooms = + db.rooms + .public_rooms() + .map(|room_id| { + let room_id = room_id?; - let chunk = PublicRoomsChunk { - aliases: Vec::new(), - canonical_alias: db - .rooms - .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok( - serde_json::from_value::< + let chunk = PublicRoomsChunk { + aliases: Vec::new(), + canonical_alias: db + .rooms + .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? + .map_or(Ok::<_, Error>(None), |s| { + Ok(serde_json::from_value::< Raw, - >(s.content) + >(s.content.clone()) .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database("Invalid canonical alias event in database.") })? - .alias, - ) - })?, - name: db - .rooms - .room_state_get(&room_id, &EventType::RoomName, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok( - serde_json::from_value::>(s.content) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid room name event in database.") - })? - .name() - .map(|n| n.to_owned()), - ) - })?, - num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), - topic: db - .rooms - .room_state_get(&room_id, &EventType::RoomTopic, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok(Some( - serde_json::from_value::>(s.content) + .alias) + })?, + name: db + .rooms + .room_state_get(&room_id, &EventType::RoomName, "")? + .map_or(Ok::<_, Error>(None), |s| { + Ok(serde_json::from_value::>( + s.content.clone(), + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid room name event in database.") + })? + .name() + .map(|n| n.to_owned())) + })?, + num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), + topic: db + .rooms + .room_state_get(&room_id, &EventType::RoomTopic, "")? + .map_or(Ok::<_, Error>(None), |s| { + Ok(Some( + serde_json::from_value::>( + s.content.clone(), + ) .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database("Invalid room topic event in database.") })? .topic, - )) - })?, - world_readable: db - .rooms - .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? - .map_or(Ok::<_, Error>(false), |s| { - Ok(serde_json::from_value::< - Raw, - >(s.content) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - })? - .history_visibility - == history_visibility::HistoryVisibility::WorldReadable) - })?, - guest_can_join: db - .rooms - .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? - .map_or(Ok::<_, Error>(false), |s| { - Ok( + )) + })?, + world_readable: db + .rooms + .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? + .map_or(Ok::<_, Error>(false), |s| { + Ok(serde_json::from_value::< + Raw, + >(s.content.clone()) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + })? + .history_visibility + == history_visibility::HistoryVisibility::WorldReadable) + })?, + guest_can_join: db + .rooms + .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? + .map_or(Ok::<_, Error>(false), |s| { + Ok( serde_json::from_value::>( - s.content, + s.content.clone(), ) .expect("from_value::> can never fail") .deserialize() @@ -287,61 +287,63 @@ pub async fn get_public_rooms_filtered_helper( .guest_access == guest_access::GuestAccess::CanJoin, ) - })?, - avatar_url: db - .rooms - .room_state_get(&room_id, &EventType::RoomAvatar, "")? - .map(|s| { - Ok::<_, Error>( - serde_json::from_value::>(s.content) + })?, + avatar_url: db + .rooms + .room_state_get(&room_id, &EventType::RoomAvatar, "")? + .map(|s| { + Ok::<_, Error>( + serde_json::from_value::>( + s.content.clone(), + ) .expect("from_value::> can never fail") .deserialize() .map_err(|_| { Error::bad_database("Invalid room avatar event in database.") })? .url, - ) - }) - .transpose()? - // url is now an Option so we must flatten - .flatten(), - room_id, - }; - Ok(chunk) - }) - .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms - .filter(|chunk| { - if let Some(query) = filter - .generic_search_term - .as_ref() - .map(|q| q.to_lowercase()) - { - if let Some(name) = &chunk.name { - if name.to_lowercase().contains(&query) { - return true; + ) + }) + .transpose()? + // url is now an Option so we must flatten + .flatten(), + room_id, + }; + Ok(chunk) + }) + .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms + .filter(|chunk| { + if let Some(query) = filter + .generic_search_term + .as_ref() + .map(|q| q.to_lowercase()) + { + if let Some(name) = &chunk.name { + if name.to_lowercase().contains(&query) { + return true; + } } - } - if let Some(topic) = &chunk.topic { - if topic.to_lowercase().contains(&query) { - return true; + if let Some(topic) = &chunk.topic { + if topic.to_lowercase().contains(&query) { + return true; + } } - } - if let Some(canonical_alias) = &chunk.canonical_alias { - if canonical_alias.as_str().to_lowercase().contains(&query) { - return true; + if let Some(canonical_alias) = &chunk.canonical_alias { + if canonical_alias.as_str().to_lowercase().contains(&query) { + return true; + } } - } - false - } else { - // No search term - true - } - }) - // We need to collect all, so we can sort by member count - .collect::>(); + false + } else { + // No search term + true + } + }) + // We need to collect all, so we can sort by member count + .collect::>(); all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 2dfa077..87fead2 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -189,7 +189,8 @@ pub async fn kick_user_route( ErrorKind::BadState, "Cannot kick member that's not in the room.", ))? - .content, + .content + .clone(), ) .expect("Raw::from_value always works") .deserialize() @@ -245,11 +246,12 @@ pub async fn ban_user_route( third_party_invite: None, }), |event| { - let mut event = - serde_json::from_value::>(event.content) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + let mut event = serde_json::from_value::>( + event.content.clone(), + ) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = ruma::events::room::member::MembershipState::Ban; Ok(event) }, @@ -295,7 +297,8 @@ pub async fn unban_user_route( ErrorKind::BadState, "Cannot unban a user who is not banned.", ))? - .content, + .content + .clone(), ) .expect("from_value::> can never fail") .deserialize() @@ -753,7 +756,7 @@ pub async fn invite_helper( let create_prev_event = if prev_events.len() == 1 && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) { - create_event.map(Arc::new) + create_event } else { None }; @@ -792,10 +795,10 @@ pub async fn invite_helper( let mut unsigned = BTreeMap::new(); if let Some(prev_pdu) = db.rooms.room_state_get(room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content); + unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), - serde_json::to_value(prev_pdu.sender).expect("UserId::to_value always works"), + serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), ); } diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 32bb608..4e9a37b 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -53,7 +53,8 @@ pub async fn set_displayname_route( room.", ) })? - .content, + .content + .clone(), ) .expect("from_value::> can never fail") .deserialize() @@ -154,7 +155,8 @@ pub async fn set_avatar_url_route( room.", ) })? - .content, + .content + .clone(), ) .expect("from_value::> can never fail") .deserialize() diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 3f91324..b33b550 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -362,7 +362,8 @@ pub async fn upgrade_room_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? - .content, + .content + .clone(), ) .expect("Raw::from_value always works") .deserialize() @@ -463,7 +464,8 @@ pub async fn upgrade_room_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? - .content, + .content + .clone(), ) .expect("database contains invalid PDU") .deserialize() diff --git a/src/client_server/state.rs b/src/client_server/state.rs index c431ac0..be52834 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -92,7 +92,7 @@ pub async fn get_state_events_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|event| { - serde_json::from_value::(event.content) + serde_json::from_value::(event.content.clone()) .map_err(|_| { Error::bad_database( "Invalid room history visibility event in database.", @@ -139,7 +139,7 @@ pub async fn get_state_events_for_key_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|event| { - serde_json::from_value::(event.content) + serde_json::from_value::(event.content.clone()) .map_err(|_| { Error::bad_database( "Invalid room history visibility event in database.", @@ -165,7 +165,7 @@ pub async fn get_state_events_for_key_route( ))?; Ok(get_state_events_for_key::Response { - content: serde_json::from_value(event.content) + content: serde_json::from_value(event.content.clone()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) @@ -190,7 +190,7 @@ pub async fn get_state_events_for_empty_key_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|event| { - serde_json::from_value::(event.content) + serde_json::from_value::(event.content.clone()) .map_err(|_| { Error::bad_database( "Invalid room history visibility event in database.", @@ -216,7 +216,7 @@ pub async fn get_state_events_for_empty_key_route( ))?; Ok(get_state_events_for_key::Response { - content: serde_json::from_value(event.content) + content: serde_json::from_value(event.content.clone()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 1c078e9..69511fa 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,21 +1,22 @@ use super::State; -use crate::{ConduitResult, Database, Error, Result, Ruma}; +use crate::{ConduitResult, Database, Error, Result, Ruma, RumaResponse}; use log::error; use ruma::{ - api::client::r0::sync::sync_events, + api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, serde::Raw, - RoomId, UserId, + DeviceId, RoomId, UserId, }; - -#[cfg(feature = "conduit_bin")] -use rocket::{get, tokio}; use std::{ - collections::{hash_map, BTreeMap, HashMap, HashSet}, + collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, sync::Arc, time::Duration, }; +use tokio::sync::watch::Sender; + +#[cfg(feature = "conduit_bin")] +use rocket::{get, tokio}; /// # `GET /_matrix/client/r0/sync` /// @@ -36,21 +37,134 @@ use std::{ pub async fn sync_events_route( db: State<'_, Arc>, body: Ruma>, -) -> ConduitResult { +) -> std::result::Result, RumaResponse> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let mut rx = match db + .globals + .sync_receivers + .write() + .unwrap() + .entry((sender_user.clone(), sender_device.clone())) + { + Entry::Vacant(v) => { + let (tx, rx) = tokio::sync::watch::channel(None); + + tokio::spawn(sync_helper_wrapper( + Arc::clone(&db), + sender_user.clone(), + sender_device.clone(), + body.since.clone(), + body.full_state, + body.timeout, + tx, + )); + + v.insert((body.since.clone(), rx)).1.clone() + } + Entry::Occupied(mut o) => { + if o.get().0 != body.since { + let (tx, rx) = tokio::sync::watch::channel(None); + + tokio::spawn(sync_helper_wrapper( + Arc::clone(&db), + sender_user.clone(), + sender_device.clone(), + body.since.clone(), + body.full_state, + body.timeout, + tx, + )); + + o.insert((body.since.clone(), rx.clone())); + + rx + } else { + o.get().1.clone() + } + } + }; + + let we_have_to_wait = rx.borrow().is_none(); + if we_have_to_wait { + let _ = rx.changed().await; + } + + let result = match rx + .borrow() + .as_ref() + .expect("When sync channel changes it's always set to some") + { + Ok(response) => Ok(response.clone()), + Err(error) => Err(error.to_response()), + }; + + result +} + +pub async fn sync_helper_wrapper( + db: Arc, + sender_user: UserId, + sender_device: Box, + since: Option, + full_state: bool, + timeout: Option, + tx: Sender>>, +) { + let r = sync_helper( + Arc::clone(&db), + sender_user.clone(), + sender_device.clone(), + since.clone(), + full_state, + timeout, + ) + .await; + + if let Ok((_, caching_allowed)) = r { + if !caching_allowed { + match db + .globals + .sync_receivers + .write() + .unwrap() + .entry((sender_user, sender_device)) + { + Entry::Occupied(o) => { + // Only remove if the device didn't start a different /sync already + if o.get().0 == since { + o.remove(); + } + } + Entry::Vacant(_) => {} + } + } + } + + let _ = tx.send(Some(r.map(|(r, _)| r.into()))); +} + +async fn sync_helper( + db: Arc, + sender_user: UserId, + sender_device: Box, + since: Option, + full_state: bool, + timeout: Option, + // bool = caching allowed +) -> std::result::Result<(sync_events::Response, bool), Error> { // TODO: match body.set_presence { db.rooms.edus.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them - let watcher = db.watch(sender_user, sender_device); + let watcher = db.watch(&sender_user, &sender_device); - let next_batch = db.globals.current_count()?.to_string(); + let next_batch = db.globals.current_count()?; + let next_batch_string = next_batch.to_string(); let mut joined_rooms = BTreeMap::new(); - let since = body - .since + let since = since .clone() .and_then(|string| string.parse().ok()) .unwrap_or(0); @@ -114,10 +228,11 @@ pub async fn sync_events_route( // since and the current room state, meaning there should be no updates. // The inner Option is None when there is an event, but there is no state hash associated // with it. This can happen for the RoomCreate event, so all updates should arrive. - let first_pdu_before_since = db.rooms.pdus_until(sender_user, &room_id, since).next(); + let first_pdu_before_since = db.rooms.pdus_until(&sender_user, &room_id, since).next(); + let pdus_after_since = db .rooms - .pdus_after(sender_user, &room_id, since) + .pdus_after(&sender_user, &room_id, since) .next() .is_some(); @@ -256,11 +371,11 @@ pub async fn sync_events_route( .flatten() .filter(|user_id| { // Don't send key updates from the sender to the sender - sender_user != user_id + &sender_user != user_id }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&db, sender_user, user_id, &room_id) + !share_encrypted_room(&db, &sender_user, user_id, &room_id) .unwrap_or(false) }), ); @@ -335,7 +450,7 @@ pub async fn sync_events_route( let state_events = if joined_since_last_sync { current_state - .into_iter() + .iter() .map(|(_, pdu)| pdu.to_sync_state_event()) .collect() } else { @@ -520,7 +635,7 @@ pub async fn sync_events_route( account_data: sync_events::RoomAccountData { events: Vec::new() }, timeline: sync_events::Timeline { limited: false, - prev_batch: Some(next_batch.clone()), + prev_batch: Some(next_batch_string.clone()), events: Vec::new(), }, state: sync_events::State { @@ -573,10 +688,10 @@ pub async fn sync_events_route( // Remove all to-device events the device received *last time* db.users - .remove_to_device_events(sender_user, sender_device, since)?; + .remove_to_device_events(&sender_user, &sender_device, since)?; let response = sync_events::Response { - next_batch, + next_batch: next_batch_string, rooms: sync_events::Rooms { leave: left_rooms, join: joined_rooms, @@ -604,20 +719,22 @@ pub async fn sync_events_route( changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_user)? > since + device_one_time_keys_count: if db.users.last_one_time_keys_update(&sender_user)? > since || since == 0 { - db.users.count_one_time_keys(sender_user, sender_device)? + db.users.count_one_time_keys(&sender_user, &sender_device)? } else { BTreeMap::new() }, to_device: sync_events::ToDevice { - events: db.users.get_to_device_events(sender_user, sender_device)?, + events: db + .users + .get_to_device_events(&sender_user, &sender_device)?, }, }; // TODO: Retry the endpoint instead of returning (waiting for #118) - if !body.full_state + if !full_state && response.rooms.is_empty() && response.presence.is_empty() && response.account_data.is_empty() @@ -627,14 +744,15 @@ pub async fn sync_events_route( { // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives - let mut duration = body.timeout.unwrap_or_default(); + let mut duration = timeout.unwrap_or_default(); if duration.as_secs() > 30 { duration = Duration::from_secs(30); } let _ = tokio::time::timeout(duration, watcher).await; + Ok((response, false)) + } else { + Ok((response, since != next_batch)) // Only cache if we made progress } - - Ok(response.into()) } #[tracing::instrument(skip(db))] diff --git a/src/database.rs b/src/database.rs index 2846928..8968010 100644 --- a/src/database.rs +++ b/src/database.rs @@ -17,6 +17,7 @@ use crate::{utils, Error, Result}; use abstraction::DatabaseEngine; use directories::ProjectDirs; use log::error; +use lru_cache::LruCache; use rocket::futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; @@ -189,6 +190,7 @@ impl Database { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, prevevent_parent: builder.open_tree("prevevent_parent")?, + pdu_cache: RwLock::new(LruCache::new(1_000_000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index f81c9de..bf292eb 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -65,7 +65,7 @@ impl DatabaseEngine for SledEngine { sled::Config::default() .path(&config.database_path) .cache_capacity(config.cache_capacity as u64) - .use_compression(true) + .use_compression(false) .open()?, ))) } diff --git a/src/database/globals.rs b/src/database/globals.rs index 1ce87bd..4859ef4 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,8 +1,11 @@ -use crate::{database::Config, utils, Error, Result}; +use crate::{database::Config, utils, ConduitResult, Error, Result}; use log::{error, info}; use ruma::{ - api::federation::discovery::{ServerSigningKeys, VerifyKey}, - EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, + api::{ + client::r0::sync::sync_events, + federation::discovery::{ServerSigningKeys, VerifyKey}, + }, + DeviceId, EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, UserId, }; use rustls::{ServerCertVerifier, WebPKIVerifier}; use std::{ @@ -35,6 +38,15 @@ pub struct Globals { pub bad_event_ratelimiter: Arc>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, + pub sync_receivers: RwLock< + BTreeMap< + (UserId, Box), + ( + Option, + tokio::sync::watch::Receiver>>, + ), // since, rx + >, + >, } struct MatrixServerVerifier { @@ -153,6 +165,7 @@ impl Globals { bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), + sync_receivers: RwLock::new(BTreeMap::new()), }; fs::create_dir_all(s.get_media_folder())?; diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 358c3c9..a27bf2c 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -203,7 +203,7 @@ pub fn get_actions<'a>( .rooms .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? .map(|ev| { - serde_json::from_value(ev.content) + serde_json::from_value(ev.content.clone()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) }) .transpose()? diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f19d4b9..e23b804 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -5,6 +5,7 @@ use member::MembershipState; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use log::{debug, error, warn}; +use lru_cache::LruCache; use regex::Regex; use ring::digest; use ruma::{ @@ -23,7 +24,7 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem, - sync::Arc, + sync::{Arc, RwLock}, }; use super::{abstraction::Tree, admin::AdminCommand, pusher}; @@ -81,6 +82,8 @@ pub struct Rooms { /// RoomId + EventId -> Parent PDU EventId. pub(super) prevevent_parent: Arc, + + pub(super) pdu_cache: RwLock>>, } impl Rooms { @@ -105,8 +108,8 @@ impl Rooms { pub fn state_full( &self, shortstatehash: u64, - ) -> Result> { - Ok(self + ) -> Result>> { + let state = self .stateid_shorteventid .scan_prefix(shortstatehash.to_be_bytes().to_vec()) .map(|(_, bytes)| self.shorteventid_eventid.get(&bytes).ok().flatten()) @@ -133,7 +136,9 @@ impl Rooms { )) }) .filter_map(|r| r.ok()) - .collect()) + .collect(); + + Ok(state) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -179,7 +184,7 @@ impl Rooms { shortstatehash: u64, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result>> { self.state_get_id(shortstatehash, event_type, state_key)? .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) } @@ -234,7 +239,7 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { if let Some(pdu) = self.room_state_get(room_id, &event_type, &state_key)? { - events.insert((event_type, state_key), Arc::new(pdu)); + events.insert((event_type, state_key), pdu); } else { // This is okay because when creating a new room some events were not created yet debug!( @@ -396,7 +401,7 @@ impl Rooms { pub fn room_state_full( &self, room_id: &RoomId, - ) -> Result> { + ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { self.state_full(current_shortstatehash) } else { @@ -426,7 +431,7 @@ impl Rooms { room_id: &RoomId, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { self.state_get(current_shortstatehash, event_type, state_key) } else { @@ -514,21 +519,42 @@ impl Rooms { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid + pub fn get_pdu(&self, event_id: &EventId) -> Result>> { + if let Some(p) = self.pdu_cache.write().unwrap().get_mut(&event_id) { + return Ok(Some(Arc::clone(p))); + } + + if let Some(pdu) = self + .eventid_pduid .get(event_id.as_bytes())? .map_or_else::, _, _>( - || self.eventid_outlierpdu.get(event_id.as_bytes()), + || { + let r = self.eventid_outlierpdu.get(event_id.as_bytes()); + r + }, |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + let r = Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) + })?)); + r }, )? .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + let r = serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + .map(Arc::new); + r }) - .transpose() + .transpose()? + { + self.pdu_cache + .write() + .unwrap() + .insert(event_id.clone(), Arc::clone(&pdu)); + Ok(Some(pdu)) + } else { + Ok(None) + } } /// Returns the pdu. @@ -663,7 +689,7 @@ impl Rooms { unsigned.insert( "prev_content".to_owned(), CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content) + utils::to_canonical_object(prev_state.content.clone()) .expect("event is valid, we just created it"), ), ); @@ -1204,7 +1230,7 @@ impl Rooms { let create_prev_event = if prev_events.len() == 1 && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) { - create_event.map(Arc::new) + create_event } else { None }; @@ -1235,10 +1261,10 @@ impl Rooms { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { if let Some(prev_pdu) = self.room_state_get(&room_id, &event_type, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content); + unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), - serde_json::to_value(prev_pdu.sender).expect("UserId::to_value always works"), + serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), ); } } @@ -1583,7 +1609,7 @@ impl Rooms { .and_then(|create| { serde_json::from_value::< Raw, - >(create.content) + >(create.content.clone()) .expect("Raw::from_value always works") .deserialize() .ok() @@ -1764,7 +1790,8 @@ impl Rooms { ErrorKind::BadState, "Cannot leave a room you are not a member of.", ))? - .content, + .content + .clone(), ) .expect("from_value::> can never fail") .deserialize() diff --git a/src/error.rs b/src/error.rs index 4f363ff..501c77d 100644 --- a/src/error.rs +++ b/src/error.rs @@ -61,7 +61,6 @@ pub enum Error { BadDatabase(&'static str), #[error("uiaa")] Uiaa(UiaaInfo), - #[error("{0}: {1}")] BadRequest(ErrorKind, &'static str), #[error("{0}")] @@ -80,19 +79,16 @@ impl Error { } } -#[cfg(feature = "conduit_bin")] -impl<'r, 'o> Responder<'r, 'o> for Error -where - 'o: 'r, -{ - fn respond_to(self, r: &'r Request<'_>) -> response::Result<'o> { +impl Error { + pub fn to_response(&self) -> RumaResponse { if let Self::Uiaa(uiaainfo) = self { - return RumaResponse::from(UiaaResponse::AuthResponse(uiaainfo)).respond_to(r); + return RumaResponse(UiaaResponse::AuthResponse(uiaainfo.clone())); } - if let Self::FederationError(origin, mut error) = self { + if let Self::FederationError(origin, error) = self { + let mut error = error.clone(); error.message = format!("Answer from {}: {}", origin, error.message); - return RumaResponse::from(error).respond_to(r); + return RumaResponse(UiaaResponse::MatrixError(error)); } let message = format!("{}", self); @@ -119,11 +115,20 @@ where warn!("{}: {}", status_code, message); - RumaResponse::from(RumaError { + RumaResponse(UiaaResponse::MatrixError(RumaError { kind, message, status_code, - }) - .respond_to(r) + })) + } +} + +#[cfg(feature = "conduit_bin")] +impl<'r, 'o> Responder<'r, 'o> for Error +where + 'o: 'r, +{ + fn respond_to(self, r: &'r Request<'_>) -> response::Result<'o> { + self.to_response().respond_to(r) } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 2912a57..8c22f79 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ use crate::Error; use ruma::{ - api::OutgoingResponse, + api::{client::r0::uiaa::UiaaResponse, OutgoingResponse}, identifiers::{DeviceId, UserId}, signatures::CanonicalJsonValue, Outgoing, ServerName, @@ -335,49 +335,60 @@ impl Deref for Ruma { /// This struct converts ruma responses into rocket http responses. pub type ConduitResult = std::result::Result, Error>; -pub struct RumaResponse(pub T); +pub fn response(response: RumaResponse) -> response::Result<'static> { + let http_response = response + .0 + .try_into_http_response::>() + .map_err(|_| Status::InternalServerError)?; -impl From for RumaResponse { + let mut response = rocket::response::Response::build(); + + let status = http_response.status(); + response.raw_status(status.into(), ""); + + for header in http_response.headers() { + response.raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); + } + + let http_body = http_response.into_body(); + + response.sized_body(http_body.len(), Cursor::new(http_body)); + + response.raw_header("Access-Control-Allow-Origin", "*"); + response.raw_header( + "Access-Control-Allow-Methods", + "GET, POST, PUT, DELETE, OPTIONS", + ); + response.raw_header( + "Access-Control-Allow-Headers", + "Origin, X-Requested-With, Content-Type, Accept, Authorization", + ); + response.raw_header("Access-Control-Max-Age", "86400"); + response.ok() +} + +#[derive(Clone)] +pub struct RumaResponse(pub T); + +impl From for RumaResponse { fn from(t: T) -> Self { Self(t) } } +impl From for RumaResponse { + fn from(t: Error) -> Self { + t.to_response() + } +} + #[cfg(feature = "conduit_bin")] impl<'r, 'o, T> Responder<'r, 'o> for RumaResponse where - T: Send + OutgoingResponse, 'o: 'r, + T: OutgoingResponse, { fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { - let http_response = self - .0 - .try_into_http_response::>() - .map_err(|_| Status::InternalServerError)?; - - let mut response = rocket::response::Response::build(); - - let status = http_response.status(); - response.raw_status(status.into(), ""); - - for header in http_response.headers() { - response.raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); - } - - let http_body = http_response.into_body(); - - response.sized_body(http_body.len(), Cursor::new(http_body)); - - response.raw_header("Access-Control-Allow-Origin", "*"); - response.raw_header( - "Access-Control-Allow-Methods", - "GET, POST, PUT, DELETE, OPTIONS", - ); - response.raw_header( - "Access-Control-Allow-Headers", - "Origin, X-Requested-With, Content-Type, Accept, Authorization", - ); - response.raw_header("Access-Control-Max-Age", "86400"); - response.ok() + response(self) } } diff --git a/src/server_server.rs b/src/server_server.rs index 961cc9d..a9d8b8c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -966,7 +966,7 @@ pub fn handle_incoming_pdu<'a>( auth_cache .get(&incoming_pdu.auth_events[0]) .cloned() - .filter(|maybe_create| **maybe_create == create_event) + .filter(|maybe_create| **maybe_create == *create_event) } else { None }; @@ -1181,15 +1181,12 @@ pub fn handle_incoming_pdu<'a>( let mut leaf_state = db .rooms .state_full(pdu_shortstatehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())? - .into_iter() - .map(|(k, v)| (k, Arc::new(v))) - .collect::>(); + .map_err(|_| "Failed to ask db for room state.".to_owned())?; if let Some(state_key) = &leaf_pdu.state_key { // Now it's the state after let key = (leaf_pdu.kind.clone(), state_key.clone()); - leaf_state.insert(key, Arc::new(leaf_pdu)); + leaf_state.insert(key, leaf_pdu); } fork_states.insert(leaf_state); @@ -1209,10 +1206,7 @@ pub fn handle_incoming_pdu<'a>( let current_state = db .rooms .room_state_full(&room_id) - .map_err(|_| "Failed to load room state.".to_owned())? - .into_iter() - .map(|(k, v)| (k, Arc::new(v))) - .collect::>(); + .map_err(|_| "Failed to load room state.".to_owned())?; fork_states.insert(current_state.clone()); @@ -1424,7 +1418,7 @@ pub(crate) fn fetch_and_handle_events<'a>( auth_cache, ) .await?; - Arc::new(pdu) + pdu } None => { // d. Ask origin server over federation @@ -1838,7 +1832,7 @@ pub fn get_event_authorization_route( .difference(&auth_chain_ids) .cloned(), ); - auth_chain_ids.extend(pdu.auth_events.into_iter()); + auth_chain_ids.extend(pdu.auth_events.clone().into_iter()); let pdu_json = PduEvent::convert_to_outgoing_federation_event( db.rooms.get_pdu_json(&event_id)?.unwrap(), @@ -1901,7 +1895,7 @@ pub fn get_room_state_route( .difference(&auth_chain_ids) .cloned(), ); - auth_chain_ids.extend(pdu.auth_events.into_iter()); + auth_chain_ids.extend(pdu.auth_events.clone().into_iter()); let pdu_json = PduEvent::convert_to_outgoing_federation_event( db.rooms.get_pdu_json(&event_id)?.unwrap(), @@ -1954,7 +1948,7 @@ pub fn get_room_state_ids_route( .difference(&auth_chain_ids) .cloned(), ); - auth_chain_ids.extend(pdu.auth_events.into_iter()); + auth_chain_ids.extend(pdu.auth_events.clone().into_iter()); } else { warn!("Could not find pdu mentioned in auth events."); } @@ -2022,7 +2016,7 @@ pub fn create_join_event_template_route( let create_prev_event = if prev_events.len() == 1 && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) { - create_event.map(Arc::new) + create_event } else { None }; @@ -2066,10 +2060,10 @@ pub fn create_join_event_template_route( let mut unsigned = BTreeMap::new(); if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content); + unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), - serde_json::to_value(prev_pdu.sender).expect("UserId::to_value always works"), + serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), ); } @@ -2220,7 +2214,7 @@ pub async fn create_join_event_route( .difference(&auth_chain_ids) .cloned(), ); - auth_chain_ids.extend(pdu.auth_events.into_iter()); + auth_chain_ids.extend(pdu.auth_events.clone().into_iter()); } else { warn!("Could not find pdu mentioned in auth events."); } From 98f1480e2b0900d02d92c6bcb8a872cd966d9205 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 29 Jun 2021 20:18:52 -0400 Subject: [PATCH 0629/1727] Remove auth_cache using a closure to fetch events in state-res --- Cargo.lock | 88 +++++------ Cargo.toml | 4 +- rust-toolchain | 2 +- src/client_server/membership.rs | 36 ++--- src/database/abstraction.rs | 2 +- src/server_server.rs | 270 ++++++++++++-------------------- 6 files changed, 163 insertions(+), 239 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9bce96..3c9de4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -272,9 +272,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279bc8fc53f788a75c7804af68237d1fce02cde1e275a886a4b320604dc2aeda" +checksum = "44c32f031ea41b4291d695026c023b95d59db2d8a2c7640800ed56bc8f510f22" [[package]] name = "const_fn" @@ -394,9 +394,9 @@ dependencies = [ [[package]] name = "der" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eeb9d92785d1facb50567852ce75d0858630630e7eabea59cf7eb7474051087" +checksum = "49f215f706081a44cb702c71c39a52c05da637822e9c1645a50b7202689e982d" dependencies = [ "const-oid", ] @@ -1475,9 +1475,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9c2f795bc591cb3384cb64082a578b89207ac92bb89c9d98c1ea2ace7cd8110" +checksum = "09d156817ae0125e8aa5067710b0db24f0984830614f99875a70aa5e3b74db69" dependencies = [ "der", "spki", @@ -1882,8 +1882,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.1.2" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.2.0" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "assign", "js_int", @@ -1903,8 +1903,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.17.1" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "bytes", "http", @@ -1919,8 +1919,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.17.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.17.1" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1930,8 +1930,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.3.0" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "ruma-api", "ruma-common", @@ -1944,8 +1944,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.10.2" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.11.0" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "assign", "bytes", @@ -1964,8 +1964,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.5.3" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.5.4" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "indexmap", "js_int", @@ -1979,8 +1979,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.23.2" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "indoc", "js_int", @@ -1994,8 +1994,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.22.2" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.23.2" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2005,8 +2005,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.2.0" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "js_int", "ruma-api", @@ -2020,8 +2020,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.19.2" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.19.4" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "paste", "rand 0.8.3", @@ -2034,8 +2034,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.19.2" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.19.4" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2045,12 +2045,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" [[package]] name = "ruma-identity-service-api" -version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.2.0" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "js_int", "ruma-api", @@ -2062,8 +2062,8 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" -version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.2.0" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "js_int", "ruma-api", @@ -2077,8 +2077,8 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.4.1" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "bytes", "form_urlencoded", @@ -2091,8 +2091,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.4.1" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2102,8 +2102,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.7.2" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.8.0" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2115,13 +2115,12 @@ dependencies = [ "sha2", "thiserror", "tracing", - "untrusted", ] [[package]] name = "ruma-state-res" -version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=5a7e2cddcf257e367465cced51442c91e8f557c9#5a7e2cddcf257e367465cced51442c91e8f557c9" +version = "0.2.0" +source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" dependencies = [ "itertools 0.10.0", "js_int", @@ -2130,7 +2129,6 @@ dependencies = [ "ruma-events", "ruma-identifiers", "ruma-serde", - "ruma-signatures", "serde", "serde_json", "thiserror", @@ -2444,9 +2442,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dae7e047abc519c96350e9484a96c6bf1492348af912fd3446dd2dc323f6268" +checksum = "987637c5ae6b3121aba9d513f869bd2bff11c4cc086c22473befd6649c0bd521" dependencies = [ "der", ] diff --git a/Cargo.toml b/Cargo.toml index bb44918..c9a8143 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,6 @@ repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.1.0" edition = "2018" -rust = "1.50" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -18,7 +17,8 @@ rust = "1.50" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "5a7e2cddcf257e367465cced51442c91e8f557c9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/ruma/ruma", rev = "5a7e2cddcf257e367465cced51442c91e8f557c9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/DevinR528/ruma", branch = "state-closure", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/rust-toolchain b/rust-toolchain index 5a5c721..ba0a719 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.50.0 +1.51.0 diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 87fead2..5c57b68 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -25,7 +25,7 @@ use ruma::{ EventType, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, EventMap, RoomVersion}, + state_res::{self, RoomVersion}, uint, EventId, RoomId, RoomVersionId, ServerName, UserId, }; use std::{ @@ -883,7 +883,6 @@ pub async fn invite_helper( .await?; let pub_key_map = RwLock::new(BTreeMap::new()); - let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event) { @@ -906,26 +905,19 @@ pub async fn invite_helper( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id = server_server::handle_incoming_pdu( - &origin, - &event_id, - value, - true, - &db, - &pub_key_map, - &mut auth_cache, - ) - .await - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; + let pdu_id = + server_server::handle_incoming_pdu(&origin, &event_id, value, true, &db, &pub_key_map) + .await + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Error while handling incoming PDU.", + ) + })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; for server in db .rooms diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index bf292eb..f81c9de 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -65,7 +65,7 @@ impl DatabaseEngine for SledEngine { sled::Config::default() .path(&config.database_path) .cache_capacity(config.cache_capacity as u64) - .use_compression(false) + .use_compression(true) .open()?, ))) } diff --git a/src/server_server.rs b/src/server_server.rs index a9d8b8c..fa91758 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -45,7 +45,7 @@ use ruma::{ receipt::ReceiptType, serde::Raw, signatures::{CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, Event, EventMap, RoomVersion, StateMap}, + state_res::{self, Event, RoomVersion, StateMap}, to_device::DeviceIdOrAllDevices, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -612,7 +612,7 @@ pub async fn send_transaction_message_route( // TODO: This could potentially also be some sort of trie (suffix tree) like structure so // that once an auth event is known it would know (using indexes maybe) all of the auth // events that it references. - let mut auth_cache = EventMap::new(); + // let mut auth_cache = EventMap::new(); for pdu in &body.pdus { // We do not add the event_id field to the pdu here because of signature and hashes checks @@ -627,17 +627,9 @@ pub async fn send_transaction_message_route( let start_time = Instant::now(); resolved_map.insert( event_id.clone(), - handle_incoming_pdu( - &body.origin, - &event_id, - value, - true, - &db, - &pub_key_map, - &mut auth_cache, - ) - .await - .map(|_| ()), + handle_incoming_pdu(&body.origin, &event_id, value, true, &db, &pub_key_map) + .await + .map(|_| ()), ); let elapsed = start_time.elapsed(); @@ -820,7 +812,6 @@ pub fn handle_incoming_pdu<'a>( is_timeline_event: bool, db: &'a Database, pub_key_map: &'a RwLock>>, - auth_cache: &'a mut EventMap>, ) -> AsyncRecursiveResult<'a, Option>, String> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json @@ -908,15 +899,9 @@ pub fn handle_incoming_pdu<'a>( // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // EDIT: Step 5 is not applied anymore because it failed too often debug!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_events( - db, - origin, - &incoming_pdu.auth_events, - pub_key_map, - auth_cache, - ) - .await - .map_err(|e| e.to_string())?; + fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, pub_key_map) + .await + .map_err(|e| e.to_string())?; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events debug!( @@ -927,9 +912,13 @@ pub fn handle_incoming_pdu<'a>( // Build map of auth events let mut auth_events = BTreeMap::new(); for id in &incoming_pdu.auth_events { - let auth_event = auth_cache.get(id).ok_or_else(|| { - "Auth event not found, event failed recursive auth checks.".to_string() - })?; + let auth_event = db + .rooms + .get_pdu(id) + .map_err(|e| e.to_string())? + .ok_or_else(|| { + "Auth event not found, event failed recursive auth checks.".to_string() + })?; match auth_events.entry(( auth_event.kind.clone(), @@ -963,9 +952,9 @@ pub fn handle_incoming_pdu<'a>( let previous_create = if incoming_pdu.auth_events.len() == 1 && incoming_pdu.prev_events == incoming_pdu.auth_events { - auth_cache - .get(&incoming_pdu.auth_events[0]) - .cloned() + db.rooms + .get_pdu(&incoming_pdu.auth_events[0]) + .map_err(|e| e.to_string())? .filter(|maybe_create| **maybe_create == *create_event) } else { None @@ -1008,7 +997,6 @@ pub fn handle_incoming_pdu<'a>( debug!("Requesting state at event."); let mut state_at_incoming_event = None; - let mut incoming_auth_events = Vec::new(); if incoming_pdu.prev_events.len() == 1 { let prev_event = &incoming_pdu.prev_events[0]; @@ -1031,7 +1019,7 @@ pub fn handle_incoming_pdu<'a>( state_vec.push(prev_event.clone()); } state_at_incoming_event = Some( - fetch_and_handle_events(db, origin, &state_vec, pub_key_map, auth_cache) + fetch_and_handle_events(db, origin, &state_vec, pub_key_map) .await .map_err(|_| "Failed to fetch state events locally".to_owned())? .into_iter() @@ -1069,18 +1057,12 @@ pub fn handle_incoming_pdu<'a>( { Ok(res) => { debug!("Fetching state events at event."); - let state_vec = match fetch_and_handle_events( - &db, - origin, - &res.pdu_ids, - pub_key_map, - auth_cache, - ) - .await - { - Ok(state) => state, - Err(_) => return Err("Failed to fetch state events.".to_owned()), - }; + let state_vec = + match fetch_and_handle_events(&db, origin, &res.pdu_ids, pub_key_map).await + { + Ok(state) => state, + Err(_) => return Err("Failed to fetch state events.".to_owned()), + }; let mut state = BTreeMap::new(); for pdu in state_vec { @@ -1106,14 +1088,8 @@ pub fn handle_incoming_pdu<'a>( } debug!("Fetching auth chain events at event."); - incoming_auth_events = match fetch_and_handle_events( - &db, - origin, - &res.auth_chain_ids, - pub_key_map, - auth_cache, - ) - .await + match fetch_and_handle_events(&db, origin, &res.auth_chain_ids, pub_key_map) + .await { Ok(state) => state, Err(_) => return Err("Failed to fetch auth chain.".to_owned()), @@ -1243,14 +1219,8 @@ pub fn handle_incoming_pdu<'a>( for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { - match fetch_and_handle_events( - &db, - origin, - &[auth_id.clone()], - pub_key_map, - auth_cache, - ) - .await + match fetch_and_handle_events(&db, origin, &[auth_id.clone()], pub_key_map) + .await { // This should always contain exactly one element when Ok Ok(events) => state_auth.extend_from_slice(&events), @@ -1259,31 +1229,9 @@ pub fn handle_incoming_pdu<'a>( } } } - auth_cache.extend( - map.iter() - .map(|pdu| (pdu.1.event_id.clone(), pdu.1.clone())), - ); auth_events.push(state_auth); } - // Add everything we will need to event_map - auth_cache.extend( - auth_events - .iter() - .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id.clone(), pdu.clone()))) - .flatten(), - ); - auth_cache.extend( - incoming_auth_events - .into_iter() - .map(|pdu| (pdu.event_id().clone(), pdu)), - ); - auth_cache.extend( - state_after - .into_iter() - .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), - ); - match state_res::StateResolution::resolve( &room_id, room_version_id, @@ -1299,7 +1247,13 @@ pub fn handle_incoming_pdu<'a>( .into_iter() .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) .collect(), - auth_cache, + &|id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }, ) { Ok(new_state) => new_state, Err(_) => { @@ -1373,7 +1327,6 @@ pub(crate) fn fetch_and_handle_events<'a>( origin: &'a ServerName, events: &'a [EventId], pub_key_map: &'a RwLock>>, - auth_cache: &'a mut EventMap>, ) -> AsyncRecursiveResult<'a, Vec>, Error> { Box::pin(async move { let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { @@ -1397,84 +1350,73 @@ pub(crate) fn fetch_and_handle_events<'a>( continue; } } - // a. Look at auth cache - let pdu = match auth_cache.get(id) { + + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu checks both) + let pdu = match db.rooms.get_pdu(&id)? { Some(pdu) => { - // We already have the auth chain for events in cache - pdu.clone() + trace!("Found {} in db", id); + pdu } - // b. Look in the main timeline (pduid_pdu tree) - // c. Look at outlier pdu tree - // (get_pdu checks both) - None => match db.rooms.get_pdu(&id)? { - Some(pdu) => { - trace!("Found {} in db", id); - // We need to fetch the auth chain - let _ = fetch_and_handle_events( - db, + None => { + // c. Ask origin server over federation + debug!("Fetching {} over federation.", id); + match db + .sending + .send_federation_request( + &db.globals, origin, - &pdu.auth_events, - pub_key_map, - auth_cache, + get_event::v1::Request { event_id: &id }, ) - .await?; - pdu - } - None => { - // d. Ask origin server over federation - debug!("Fetching {} over federation.", id); - match db - .sending - .send_federation_request( - &db.globals, + .await + { + Ok(res) => { + debug!("Got {} over federation", id); + let (event_id, mut value) = + crate::pdu::gen_event_id_canonical_json(&res.pdu)?; + // This will also fetch the auth chain + match handle_incoming_pdu( origin, - get_event::v1::Request { event_id: &id }, + &event_id, + value.clone(), + false, + db, + pub_key_map, ) .await - { - Ok(res) => { - debug!("Got {} over federation", id); - let (event_id, mut value) = - crate::pdu::gen_event_id_canonical_json(&res.pdu)?; - // This will also fetch the auth chain - match handle_incoming_pdu( - origin, - &event_id, - value.clone(), - false, - db, - pub_key_map, - auth_cache, - ) - .await - { - Ok(_) => { - value.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.into()), - ); + { + Ok(_) => { + value.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.into()), + ); - Arc::new(serde_json::from_value( - serde_json::to_value(value).expect("canonicaljsonobject is valid value"), - ).expect("This is possible because handle_incoming_pdu worked")) - } - Err(e) => { - warn!("Authentication of event {} failed: {:?}", id, e); - back_off(id.clone()); - continue; - } + Arc::new( + serde_json::from_value( + serde_json::to_value(value) + .expect("canonicaljsonobject is valid value"), + ) + .expect( + "This is possible because handle_incoming_pdu worked", + ), + ) + } + Err(e) => { + warn!("Authentication of event {} failed: {:?}", id, e); + back_off(id.clone()); + continue; } } - Err(_) => { - warn!("Failed to fetch event: {}", id); - back_off(id.clone()); - continue; - } + } + Err(_) => { + warn!("Failed to fetch event: {}", id); + back_off(id.clone()); + continue; } } - }, + } }; - auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); pdus.push(pdu); } Ok(pdus) @@ -2155,7 +2097,7 @@ pub async fn create_join_event_route( ))?; let pub_key_map = RwLock::new(BTreeMap::new()); - let mut auth_cache = EventMap::new(); + // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&body.pdu) { @@ -2178,26 +2120,18 @@ pub async fn create_join_event_route( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id = handle_incoming_pdu( - &origin, - &event_id, - value, - true, - &db, - &pub_key_map, - &mut auth_cache, - ) - .await - .map_err(|_| { - Error::BadRequest( + let pdu_id = handle_incoming_pdu(&origin, &event_id, value, true, &db, &pub_key_map) + .await + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Error while handling incoming PDU.", + ) + })? + .ok_or(Error::BadRequest( ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; + "Could not accept incoming PDU as timeline event.", + ))?; let state_ids = db.rooms.state_full_ids(shortstatehash)?; From 1bb84a8e2dc08e29682f8ef322da6dbf071f1646 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 30 Jun 2021 07:40:06 -0400 Subject: [PATCH 0630/1727] Fix docs for fetch_and_handle_events --- src/server_server.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index fa91758..2bcfd2b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1313,14 +1313,10 @@ pub fn handle_incoming_pdu<'a>( /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// -/// a. Look in the auth_cache -/// b. Look in the main timeline (pduid_pdu tree) -/// c. Look at outlier pdu tree -/// d. Ask origin server over federation -/// e. TODO: Ask other servers over federation? -/// -/// If the event is unknown to the `auth_cache` it is added. This guarantees that any -/// event we need to know of will be present. +/// a. Look in the main timeline (pduid_pdu tree) +/// b. Look at outlier pdu tree +/// c. Ask origin server over federation +/// d. TODO: Ask other servers over federation? //#[tracing::instrument(skip(db, key_map, auth_cache))] pub(crate) fn fetch_and_handle_events<'a>( db: &'a Database, From b2d55160585f15c93695f1dc60f3ca3eb7967911 Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Tue, 13 Apr 2021 12:15:58 -0600 Subject: [PATCH 0631/1727] add support for arbitrary proxies --- Cargo.lock | 13 +++++ Cargo.toml | 2 +- src/database.rs | 121 ++++++++++++++++++++++++++++++++++++++++ src/database/globals.rs | 10 ++-- src/utils.rs | 27 +++++++++ 5 files changed, 168 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c3d7408..c31894a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1761,6 +1761,7 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-rustls", + "tokio-socks", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -2732,6 +2733,18 @@ dependencies = [ "webpki", ] +[[package]] +name = "tokio-socks" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51165dfa029d2a65969413a6cc96f354b86b464498702f174a4efa13608fd8c0" +dependencies = [ + "either", + "futures-util", + "thiserror", + "tokio", +] + [[package]] name = "tokio-util" version = "0.6.6" diff --git a/Cargo.toml b/Cargo.toml index 96260ec..4f7095d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,7 @@ rand = "0.8.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { version = "0.11.3", default-features = false, features = ["rustls-tls-native-roots"] } +reqwest = { version = "0.11.3", default-features = false, features = ["rustls-tls-native-roots", "socks"] } # Custom TLS verifier rustls = { version = "0.19", features = ["dangerous_configuration"] } rustls-native-certs = "0.5.0" diff --git a/src/database.rs b/src/database.rs index 2846928..52d92a5 100644 --- a/src/database.rs +++ b/src/database.rs @@ -46,6 +46,8 @@ pub struct Config { allow_federation: bool, #[serde(default = "false_fn")] pub allow_jaeger: bool, + #[serde(default)] + proxy: ProxyConfig, jwt_secret: Option, #[serde(default = "Vec::new")] trusted_servers: Vec>, @@ -83,6 +85,125 @@ pub type Engine = abstraction::SledEngine; #[cfg(feature = "rocksdb")] pub type Engine = abstraction::RocksDbEngine; +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ProxyConfig { + None, + Global { + #[serde(deserialize_with = "crate::utils::deserialize_from_str")] + url: reqwest::Url, + }, + ByDomain(Vec), +} +impl ProxyConfig { + pub fn to_proxy(&self) -> Result> { + Ok(match self.clone() { + ProxyConfig::None => None, + ProxyConfig::Global { url } => Some(reqwest::Proxy::all(url)?), + ProxyConfig::ByDomain(proxies) => Some(reqwest::Proxy::custom(move |url| { + proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() // first matching proxy + })), + }) + } +} +impl Default for ProxyConfig { + fn default() -> Self { + ProxyConfig::None + } +} + +#[derive(Clone, Debug, Deserialize)] +pub struct PartialProxyConfig { + #[serde(deserialize_with = "crate::utils::deserialize_from_str")] + url: reqwest::Url, + #[serde(default)] + include: Vec, + #[serde(default)] + exclude: Vec, +} +impl PartialProxyConfig { + pub fn for_url(&self, url: &reqwest::Url) -> Option<&reqwest::Url> { + let domain = url.domain()?; + let mut included_because = None; // most specific reason it was included + let mut excluded_because = None; // most specific reason it was excluded + if self.include.is_empty() { + // treat empty include list as `*` + included_because = Some(&WildCardedDomain::WildCard) + } + for wc_domain in &self.include { + if wc_domain.matches(domain) { + match included_because { + Some(prev) if !wc_domain.more_specific_than(prev) => (), + _ => included_because = Some(wc_domain), + } + } + } + for wc_domain in &self.exclude { + if wc_domain.matches(domain) { + match excluded_because { + Some(prev) if !wc_domain.more_specific_than(prev) => (), + _ => excluded_because = Some(wc_domain), + } + } + } + match (included_because, excluded_because) { + (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), // included for a more specific reason than excluded + (Some(_), None) => Some(&self.url), + _ => None, + } + } +} + +/// A domain name, that optionally allows a * as its first subdomain. +#[derive(Clone, Debug)] +pub enum WildCardedDomain { + WildCard, + WildCarded(String), + Exact(String), +} +impl WildCardedDomain { + pub fn matches(&self, domain: &str) -> bool { + match self { + WildCardedDomain::WildCard => true, + WildCardedDomain::WildCarded(d) => domain.ends_with(d), + WildCardedDomain::Exact(d) => domain == d, + } + } + pub fn more_specific_than(&self, other: &Self) -> bool { + match (self, other) { + (WildCardedDomain::WildCard, WildCardedDomain::WildCard) => false, + (_, WildCardedDomain::WildCard) => true, + (WildCardedDomain::Exact(a), WildCardedDomain::WildCarded(_)) => other.matches(a), + (WildCardedDomain::WildCarded(a), WildCardedDomain::WildCarded(b)) => { + a != b && a.ends_with(b) + } + _ => false, + } + } +} +impl std::str::FromStr for WildCardedDomain { + type Err = std::convert::Infallible; + fn from_str(s: &str) -> std::result::Result { + // maybe do some domain validation? + Ok(if s.starts_with("*.") { + WildCardedDomain::WildCarded(s[1..].to_owned()) + } else if s == "*" { + WildCardedDomain::WildCarded("".to_owned()) + } else { + WildCardedDomain::Exact(s.to_owned()) + }) + } +} +impl<'de> serde::de::Deserialize<'de> for WildCardedDomain { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::de::Deserializer<'de>, + { + crate::utils::deserialize_from_str(deserializer) + } +} + +#[derive(Clone)] pub struct Database { pub globals: globals::Globals, pub users: users::Users, diff --git a/src/database/globals.rs b/src/database/globals.rs index 1ce87bd..db166e9 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -125,13 +125,15 @@ impl Globals { tlsconfig.root_store = rustls_native_certs::load_native_certs().expect("Error loading system certificates"); - let reqwest_client = reqwest::Client::builder() + let mut reqwest_client_builder = reqwest::Client::builder() .connect_timeout(Duration::from_secs(30)) .timeout(Duration::from_secs(60 * 3)) .pool_max_idle_per_host(1) - .use_preconfigured_tls(tlsconfig) - .build() - .unwrap(); + .use_preconfigured_tls(tlsconfig); + if let Some(proxy) = config.proxy.to_proxy()? { + reqwest_client_builder = reqwest_client_builder.proxy(proxy); + } + let reqwest_client = reqwest_client_builder.build().unwrap(); let jwt_decoding_key = config .jwt_secret diff --git a/src/utils.rs b/src/utils.rs index 2b5336c..b8ce303 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -5,6 +5,7 @@ use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ cmp, convert::TryInto, + str::FromStr, time::{SystemTime, UNIX_EPOCH}, }; @@ -115,3 +116,29 @@ pub fn to_canonical_object( ))), } } + +pub fn deserialize_from_str< + 'de, + D: serde::de::Deserializer<'de>, + T: FromStr, + E: std::fmt::Display, +>( + deserializer: D, +) -> std::result::Result { + struct Visitor, E>(std::marker::PhantomData); + impl<'de, T: FromStr, Err: std::fmt::Display> serde::de::Visitor<'de> + for Visitor + { + type Value = T; + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "a parsable string") + } + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + v.parse().map_err(|e| serde::de::Error::custom(e)) + } + } + deserializer.deserialize_str(Visitor(std::marker::PhantomData)) +} From f25f61d4a9e42d29704c357868074e45d24bd4df Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Thu, 1 Jul 2021 12:48:12 -0600 Subject: [PATCH 0632/1727] fix errors introduced by rebase --- src/database.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index 52d92a5..64b5ee3 100644 --- a/src/database.rs +++ b/src/database.rs @@ -203,7 +203,6 @@ impl<'de> serde::de::Deserialize<'de> for WildCardedDomain { } } -#[derive(Clone)] pub struct Database { pub globals: globals::Globals, pub users: users::Users, From c53cc03ff8db65b6b447a852eee85e540ad38cb1 Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Thu, 1 Jul 2021 13:38:25 -0600 Subject: [PATCH 0633/1727] address pr comments --- conduit-example.toml | 2 + src/database.rs | 121 +--------------------------------- src/database/proxy.rs | 146 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 151 insertions(+), 118 deletions(-) create mode 100644 src/database/proxy.rs diff --git a/conduit-example.toml b/conduit-example.toml index 66c105b..db0bbb7 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -41,3 +41,5 @@ trusted_servers = ["matrix.org"] #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy + +proxy = "none" # more examples can be found at src/database/proxy.rs:6 diff --git a/src/database.rs b/src/database.rs index 64b5ee3..0ea4d78 100644 --- a/src/database.rs +++ b/src/database.rs @@ -6,6 +6,7 @@ pub mod appservice; pub mod globals; pub mod key_backups; pub mod media; +pub mod proxy; pub mod pusher; pub mod rooms; pub mod sending; @@ -28,6 +29,8 @@ use std::{ }; use tokio::sync::Semaphore; +use self::proxy::ProxyConfig; + #[derive(Clone, Debug, Deserialize)] pub struct Config { server_name: Box, @@ -85,124 +88,6 @@ pub type Engine = abstraction::SledEngine; #[cfg(feature = "rocksdb")] pub type Engine = abstraction::RocksDbEngine; -#[derive(Clone, Debug, Deserialize)] -#[serde(rename_all = "snake_case")] -pub enum ProxyConfig { - None, - Global { - #[serde(deserialize_with = "crate::utils::deserialize_from_str")] - url: reqwest::Url, - }, - ByDomain(Vec), -} -impl ProxyConfig { - pub fn to_proxy(&self) -> Result> { - Ok(match self.clone() { - ProxyConfig::None => None, - ProxyConfig::Global { url } => Some(reqwest::Proxy::all(url)?), - ProxyConfig::ByDomain(proxies) => Some(reqwest::Proxy::custom(move |url| { - proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() // first matching proxy - })), - }) - } -} -impl Default for ProxyConfig { - fn default() -> Self { - ProxyConfig::None - } -} - -#[derive(Clone, Debug, Deserialize)] -pub struct PartialProxyConfig { - #[serde(deserialize_with = "crate::utils::deserialize_from_str")] - url: reqwest::Url, - #[serde(default)] - include: Vec, - #[serde(default)] - exclude: Vec, -} -impl PartialProxyConfig { - pub fn for_url(&self, url: &reqwest::Url) -> Option<&reqwest::Url> { - let domain = url.domain()?; - let mut included_because = None; // most specific reason it was included - let mut excluded_because = None; // most specific reason it was excluded - if self.include.is_empty() { - // treat empty include list as `*` - included_because = Some(&WildCardedDomain::WildCard) - } - for wc_domain in &self.include { - if wc_domain.matches(domain) { - match included_because { - Some(prev) if !wc_domain.more_specific_than(prev) => (), - _ => included_because = Some(wc_domain), - } - } - } - for wc_domain in &self.exclude { - if wc_domain.matches(domain) { - match excluded_because { - Some(prev) if !wc_domain.more_specific_than(prev) => (), - _ => excluded_because = Some(wc_domain), - } - } - } - match (included_because, excluded_because) { - (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), // included for a more specific reason than excluded - (Some(_), None) => Some(&self.url), - _ => None, - } - } -} - -/// A domain name, that optionally allows a * as its first subdomain. -#[derive(Clone, Debug)] -pub enum WildCardedDomain { - WildCard, - WildCarded(String), - Exact(String), -} -impl WildCardedDomain { - pub fn matches(&self, domain: &str) -> bool { - match self { - WildCardedDomain::WildCard => true, - WildCardedDomain::WildCarded(d) => domain.ends_with(d), - WildCardedDomain::Exact(d) => domain == d, - } - } - pub fn more_specific_than(&self, other: &Self) -> bool { - match (self, other) { - (WildCardedDomain::WildCard, WildCardedDomain::WildCard) => false, - (_, WildCardedDomain::WildCard) => true, - (WildCardedDomain::Exact(a), WildCardedDomain::WildCarded(_)) => other.matches(a), - (WildCardedDomain::WildCarded(a), WildCardedDomain::WildCarded(b)) => { - a != b && a.ends_with(b) - } - _ => false, - } - } -} -impl std::str::FromStr for WildCardedDomain { - type Err = std::convert::Infallible; - fn from_str(s: &str) -> std::result::Result { - // maybe do some domain validation? - Ok(if s.starts_with("*.") { - WildCardedDomain::WildCarded(s[1..].to_owned()) - } else if s == "*" { - WildCardedDomain::WildCarded("".to_owned()) - } else { - WildCardedDomain::Exact(s.to_owned()) - }) - } -} -impl<'de> serde::de::Deserialize<'de> for WildCardedDomain { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::de::Deserializer<'de>, - { - crate::utils::deserialize_from_str(deserializer) - } -} - pub struct Database { pub globals: globals::Globals, pub users: users::Users, diff --git a/src/database/proxy.rs b/src/database/proxy.rs new file mode 100644 index 0000000..78e9d2b --- /dev/null +++ b/src/database/proxy.rs @@ -0,0 +1,146 @@ +use reqwest::{Proxy, Url}; +use serde::Deserialize; + +use crate::Result; + +/// ## Examples: +/// - No proxy (default): +/// ```toml +/// proxy ="none" +/// ``` +/// - Global proxy +/// ```toml +/// [proxy] +/// global = { url = "socks5h://localhost:9050" } +/// ``` +/// - Proxy some domains +/// ```toml +/// [proxy] +/// [[proxy.by_domain]] +/// url = "socks5h://localhost:9050" +/// include = ["*.onion", "matrix.myspecial.onion"] +/// exclude = ["*.myspecial.onion"] +/// ``` +/// ## Include vs. Exclude +/// If include is an empty list, it is assumed to be `["*"]`. +/// +/// If a domain matches both the exclude and include list, the proxy will only be used if it was +/// included because of a more specific rule than it was excluded. In the above example, the proxy +/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`. +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ProxyConfig { + None, + Global { + #[serde(deserialize_with = "crate::utils::deserialize_from_str")] + url: Url, + }, + ByDomain(Vec), +} +impl ProxyConfig { + pub fn to_proxy(&self) -> Result> { + Ok(match self.clone() { + ProxyConfig::None => None, + ProxyConfig::Global { url } => Some(Proxy::all(url)?), + ProxyConfig::ByDomain(proxies) => Some(Proxy::custom(move |url| { + proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() // first matching proxy + })), + }) + } +} +impl Default for ProxyConfig { + fn default() -> Self { + ProxyConfig::None + } +} + +#[derive(Clone, Debug, Deserialize)] +pub struct PartialProxyConfig { + #[serde(deserialize_with = "crate::utils::deserialize_from_str")] + url: Url, + #[serde(default)] + include: Vec, + #[serde(default)] + exclude: Vec, +} +impl PartialProxyConfig { + pub fn for_url(&self, url: &Url) -> Option<&Url> { + let domain = url.domain()?; + let mut included_because = None; // most specific reason it was included + let mut excluded_because = None; // most specific reason it was excluded + if self.include.is_empty() { + // treat empty include list as `*` + included_because = Some(&WildCardedDomain::WildCard) + } + for wc_domain in &self.include { + if wc_domain.matches(domain) { + match included_because { + Some(prev) if !wc_domain.more_specific_than(prev) => (), + _ => included_because = Some(wc_domain), + } + } + } + for wc_domain in &self.exclude { + if wc_domain.matches(domain) { + match excluded_because { + Some(prev) if !wc_domain.more_specific_than(prev) => (), + _ => excluded_because = Some(wc_domain), + } + } + } + match (included_because, excluded_because) { + (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), // included for a more specific reason than excluded + (Some(_), None) => Some(&self.url), + _ => None, + } + } +} + +/// A domain name, that optionally allows a * as its first subdomain. +#[derive(Clone, Debug)] +pub enum WildCardedDomain { + WildCard, + WildCarded(String), + Exact(String), +} +impl WildCardedDomain { + pub fn matches(&self, domain: &str) -> bool { + match self { + WildCardedDomain::WildCard => true, + WildCardedDomain::WildCarded(d) => domain.ends_with(d), + WildCardedDomain::Exact(d) => domain == d, + } + } + pub fn more_specific_than(&self, other: &Self) -> bool { + match (self, other) { + (WildCardedDomain::WildCard, WildCardedDomain::WildCard) => false, + (_, WildCardedDomain::WildCard) => true, + (WildCardedDomain::Exact(a), WildCardedDomain::WildCarded(_)) => other.matches(a), + (WildCardedDomain::WildCarded(a), WildCardedDomain::WildCarded(b)) => { + a != b && a.ends_with(b) + } + _ => false, + } + } +} +impl std::str::FromStr for WildCardedDomain { + type Err = std::convert::Infallible; + fn from_str(s: &str) -> std::result::Result { + // maybe do some domain validation? + Ok(if s.starts_with("*.") { + WildCardedDomain::WildCarded(s[1..].to_owned()) + } else if s == "*" { + WildCardedDomain::WildCarded("".to_owned()) + } else { + WildCardedDomain::Exact(s.to_owned()) + }) + } +} +impl<'de> serde::de::Deserialize<'de> for WildCardedDomain { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::de::Deserializer<'de>, + { + crate::utils::deserialize_from_str(deserializer) + } +} From c30cc50a0b4d3ef8bd314521200ce12e5a23db27 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 2 Jul 2021 06:40:40 -0400 Subject: [PATCH 0634/1727] Switch ruma to a commit from next --- Cargo.lock | 40 ++++++++++++++++++++-------------------- Cargo.toml | 3 +-- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c9de4e..1ffd0ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1883,7 +1883,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.2.0" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "assign", "js_int", @@ -1904,7 +1904,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.1" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "bytes", "http", @@ -1920,7 +1920,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.1" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1931,7 +1931,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "ruma-api", "ruma-common", @@ -1945,7 +1945,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.11.0" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "assign", "bytes", @@ -1965,7 +1965,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.4" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "indexmap", "js_int", @@ -1979,8 +1979,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.23.2" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +version = "0.23.1" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "indoc", "js_int", @@ -1994,8 +1994,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.23.2" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +version = "0.23.1" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2006,7 +2006,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.2.0" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "js_int", "ruma-api", @@ -2021,7 +2021,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.4" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "paste", "rand 0.8.3", @@ -2035,7 +2035,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.4" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2045,12 +2045,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.4.0" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" [[package]] name = "ruma-identity-service-api" version = "0.2.0" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "js_int", "ruma-api", @@ -2063,7 +2063,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.2.0" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "js_int", "ruma-api", @@ -2078,7 +2078,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.1" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "bytes", "form_urlencoded", @@ -2092,7 +2092,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.1" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2103,7 +2103,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.8.0" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2120,7 +2120,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.2.0" -source = "git+https://github.com/DevinR528/ruma?branch=state-closure#95208b9d03876e7c85543fe8655ceb2f7dc76363" +source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "itertools 0.10.0", "js_int", diff --git a/Cargo.toml b/Cargo.toml index c9a8143..5ec696a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,8 +17,7 @@ edition = "2018" rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", rev = "5a7e2cddcf257e367465cced51442c91e8f557c9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/DevinR528/ruma", branch = "state-closure", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "174555857ef90d49e4b9a672be9e2fe0acdc2687", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio From a7cb1c999ae42d199e1e1ee342031ab29d64d7f8 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 2 Jul 2021 12:26:26 +0000 Subject: [PATCH 0635/1727] Publish master builds as nightly releases & also build debs --- .gitlab-ci.yml | 168 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 135 insertions(+), 33 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ae9b32b..930e8f6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,11 +1,13 @@ stages: - test - build - - release + - publish artifacts + - release nightly variables: GIT_SUBMODULE_STRATEGY: recursive FF_USE_FASTZIP: 1 + CACHE_COMPRESSION_LEVEL: fastest test:cargo: @@ -31,56 +33,156 @@ test:cargo: - cargo fmt --all -- --check - cargo clippy +# --------------------------------------------------------------------- # +# Cargo: Compiling for different architectures # +# --------------------------------------------------------------------- # -# Compile conduit for different linux target architectures -build:cargo: +.build-cargo-shared-settings: stage: "build" needs: [] + rules: + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' interruptible: true - parallel: - matrix: - - TARGET: "x86_64-unknown-linux-gnu" - - TARGET: "armv7-unknown-linux-gnueabihf" - NEEDED_PACKAGES: "build-essential gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross" - - TARGET: "aarch64-unknown-linux-gnu" - NEEDED_PACKAGES: "build-essential gcc-8-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross" - TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-8" - TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-8" image: "rust:latest" cache: paths: + - cargohome - target/ key: "build_cache-$TARGET" - variables: - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER: "/linux-runner armv7" - CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc - CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++ - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER: "/linux-runner aarch64" - CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc - CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++ before_script: - - apt-get update -yqq + - 'echo "Building for target $TARGET"' + - 'mkdir -p cargohome && CARGOHOME="cargohome"' + - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging + - 'apt-get update -yqq' + - 'echo "Installing packages: $NEEDED_PACKAGES"' - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" - "rustup target add $TARGET" script: - - rustc --version && cargo --version # Print version info for debugging - - cargo build --target $TARGET --release + - time cargo build --target $TARGET --release - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' artifacts: name: "conduit-$TARGET" + expose_as: "Binary" paths: - "conduit-$TARGET" +build:cargo:x86_64-unknown-linux-gnu: + extends: .build-cargo-shared-settings + variables: + TARGET: "x86_64-unknown-linux-gnu" -# Store the resulting binaries into the GitLab package registry, so they can be downloaded -publish:package: - stage: release - image: curlimages/curl:latest +build:cargo:armv7-unknown-linux-gnueabihf: + extends: .build-cargo-shared-settings + variables: + TARGET: "armv7-unknown-linux-gnueabihf" + NEEDED_PACKAGES: "build-essential gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross" + CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc + CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc + CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++ + +build:cargo:aarch64-unknown-linux-gnu: + extends: .build-cargo-shared-settings + variables: + TARGET: "aarch64-unknown-linux-gnu" + NEEDED_PACKAGES: "build-essential gcc-8-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross" + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc + CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc + CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++ + TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-8" + TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-8" + + +# --------------------------------------------------------------------- # +# Cargo: Compiling deb packages for different architectures # +# --------------------------------------------------------------------- # + + +.build-cargo-deb-shared-settings: + stage: "build" + needs: [] + rules: + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + interruptible: true + image: "rust:latest" + cache: + paths: + - cargohome + - target/ + key: "build_cache-deb-$TARGET" + before_script: + - 'echo "Building debian package for target $TARGET"' + - 'mkdir -p cargohome && CARGOHOME="cargohome"' + - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging + - 'apt-get update -yqq' + - 'echo "Installing packages: $NEEDED_PACKAGES"' + - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" + - "rustup target add $TARGET" + - "cargo install cargo-deb" script: - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/conduit-x86_64"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/conduit-armv7"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/conduit-aarch64"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file README.md "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/README.md"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file LICENSE "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_JOB_ID}/LICENSE"' + - time cargo deb --target $TARGET + - 'mv target/$TARGET/debian/*.deb "conduit-$TARGET.deb"' + artifacts: + name: "conduit-$TARGET.deb" + expose_as: "Debian Package" + paths: + - "conduit-$TARGET.deb" + +build:cargo-deb:x86_64-unknown-linux-gnu: + extends: .build-cargo-deb-shared-settings + variables: + TARGET: "x86_64-unknown-linux-gnu" + NEEDED_PACKAGES: "" + + + + +# --------------------------------------------------------------------- # +# Storing and releasing compiled binaries # +# --------------------------------------------------------------------- # + +publish:package: + stage: "publish artifacts" + needs: + - "build:cargo:x86_64-unknown-linux-gnu" + - "build:cargo:armv7-unknown-linux-gnueabihf" + - "build:cargo:aarch64-unknown-linux-gnu" + - "build:cargo-deb:x86_64-unknown-linux-gnu" + rules: + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + image: curlimages/curl:latest + variables: + GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts + script: + - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${BASE_URL}/conduit-armv7-unknown-linux-gnueabihf"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' + + + +publish:nightly: + stage: "release nightly" + image: registry.gitlab.com/gitlab-org/release-cli:latest + needs: + - job: "publish:package" + artifacts: false + variables: + GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts + script: "echo 'Releasing current state as release'" + rules: + - if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH" # Run this job on the main branch + release: + name: 'Nightly' + description: '⚠️ The latest development version of the day, fresh from the repository. Use at your own risk!' + tag_name: '$CI_COMMIT_REF_SLUG' + assets: + links: + - name: 'conduit-x86_64-unknown-linux-gnu' + url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}/conduit-x86_64-unknown-linux-gnu" + - name: 'conduit-armv7-unknown-linux-gnueabihf' + url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}/conduit-armv7-unknown-linux-gnueabihf" + - name: 'conduit-aarch64-unknown-linux-gnu' + url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}/conduit-aarch64-unknown-linux-gnu" + - name: 'conduit-x86_64-unknown-linux-gnu.deb' + url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}/conduit-x86_64-unknown-linux-gnu.deb" From fcc30f059e3ca624497f15f8abe0150e5c90fe59 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 2 Jul 2021 14:58:00 +0000 Subject: [PATCH 0636/1727] Fix: Nightly release tag name should not be a branch name According to tulir this breaks the GitLab Matrix bot, and nightly is a better match anyway --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 930e8f6..66cdf5c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -175,7 +175,7 @@ publish:nightly: release: name: 'Nightly' description: '⚠️ The latest development version of the day, fresh from the repository. Use at your own risk!' - tag_name: '$CI_COMMIT_REF_SLUG' + tag_name: 'nightly' assets: links: - name: 'conduit-x86_64-unknown-linux-gnu' From 6a96cfaac1fad23ae689ecc80ea2ed0a8af097e5 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Tue, 6 Jul 2021 10:40:57 +0200 Subject: [PATCH 0637/1727] Change default port in docker to the new conduit default port 6167 and fix the docker healthcheck --- Dockerfile | 9 ++++++--- docker-compose.yml | 6 +++--- docker/docker-compose.traefik.yml | 4 ++-- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 51f146d..0eae25a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -61,8 +61,8 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \ maintainer="Weasy666" -# Standard port on which Rocket launches -EXPOSE 8000 +# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose. +EXPOSE 6167 # Copy config files from context and the binary from # the "builder" stage to the current stage into folder @@ -90,7 +90,10 @@ RUN apk add --no-cache \ VOLUME ["/srv/conduit/.local/share/conduit"] # Test if Conduit is still alive, uses the same endpoint as Element -HEALTHCHECK --start-period=2s CMD curl --fail -s http://localhost:8000/_matrix/client/versions || curl -k --fail -s https://localhost:8000/_matrix/client/versions || exit 1 +HEALTHCHECK --start-period=5s \ + CMD curl --fail -s "http://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ + curl -k --fail -s "https://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ + exit 1 # Set user to www-data USER www-data diff --git a/docker-compose.yml b/docker-compose.yml index cfc2462..cf0d2c1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -18,18 +18,18 @@ services: GIT_REF: origin/master restart: unless-stopped ports: - - 8448:8000 + - 8448:6167 volumes: - db:/srv/conduit/.local/share/conduit ### Uncomment if you want to use conduit.toml to configure Conduit ### Note: Set env vars will override conduit.toml values # - ./conduit.toml:/srv/conduit/conduit.toml environment: - CONDUIT_SERVER_NAME: localhost:8000 # replace with your own name + CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' ### Uncomment and change values as desired # CONDUIT_ADDRESS: 127.0.0.1 - # CONDUIT_PORT: 8000 + # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index 943cf3c..3b36d10 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -25,11 +25,11 @@ services: networks: - proxy environment: - CONDUIT_SERVER_NAME: localhost:8000 # replace with your own name + CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' ### Uncomment and change values as desired # CONDUIT_ADDRESS: 127.0.0.1 - # CONDUIT_PORT: 8000 + # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" From 36681dd3acdca1eb716c689aa87f8d45a1b022a0 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 11 Jul 2021 11:43:48 +0000 Subject: [PATCH 0638/1727] Fix: Duplicate releases don't work, remove nightly --- .gitlab-ci.yml | 35 ++++------------------------------- DEPLOY.md | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 35 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 66cdf5c..005f0ee 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,8 +1,7 @@ stages: - test - build - - publish artifacts - - release nightly + - upload artifacts variables: GIT_SUBMODULE_STRATEGY: recursive @@ -137,11 +136,11 @@ build:cargo-deb:x86_64-unknown-linux-gnu: # --------------------------------------------------------------------- # -# Storing and releasing compiled binaries # +# Store binaries as package so they have download urls # # --------------------------------------------------------------------- # publish:package: - stage: "publish artifacts" + stage: "upload artifacts" needs: - "build:cargo:x86_64-unknown-linux-gnu" - "build:cargo:armv7-unknown-linux-gnueabihf" @@ -159,30 +158,4 @@ publish:package: - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' - - -publish:nightly: - stage: "release nightly" - image: registry.gitlab.com/gitlab-org/release-cli:latest - needs: - - job: "publish:package" - artifacts: false - variables: - GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts - script: "echo 'Releasing current state as release'" - rules: - - if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH" # Run this job on the main branch - release: - name: 'Nightly' - description: '⚠️ The latest development version of the day, fresh from the repository. Use at your own risk!' - tag_name: 'nightly' - assets: - links: - - name: 'conduit-x86_64-unknown-linux-gnu' - url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}/conduit-x86_64-unknown-linux-gnu" - - name: 'conduit-armv7-unknown-linux-gnueabihf' - url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}/conduit-armv7-unknown-linux-gnueabihf" - - name: 'conduit-aarch64-unknown-linux-gnu' - url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}/conduit-aarch64-unknown-linux-gnu" - - name: 'conduit-x86_64-unknown-linux-gnu.deb' - url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}/conduit-x86_64-unknown-linux-gnu.deb" + diff --git a/DEPLOY.md b/DEPLOY.md index fe8c331..778d0e0 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -8,10 +8,18 @@ If you run into any problems while setting up Conduit, write an email to `timo@k You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -- x84_64: `https://conduit.rs/master/x86_64/conduit-bin` -- armv7: `https://conduit.rs/master/armv7/conduit-bin` -- armv8: `https://conduit.rs/master/armv8/conduit-bin` -- arm: `https://conduit.rs/master/arm/conduit-bin` + +| CPU Architecture | GNU (Debian, ArchLinux, ...) | MUSL (Alpine, ... ) | +| ------------------- | ---------------------------- | ----------------------- | +| x84_64 / amd64 | [Download][x84_64-gnu] | - | +| armv7 (Raspberry Pi) | [Download][armv7-gnu] | - | +| armv8 / aarch64 | [Download][armv8-gnu] | - | +| arm | [Download][arm] | - | + +[x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:cargo:x86_64-unknown-linux-gnu +[armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:cargo:armv7-unknown-linux-gnueabihf +[armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:cargo:aarch64-unknown-linux-gnu +[arm]: https://conduit.rs/master/arm/conduit-bin ```bash $ sudo wget -O /usr/local/bin/matrix-conduit From 699f77671fa762e5ee5455d281cba6abc0a5e184 Mon Sep 17 00:00:00 2001 From: Kurt Roeckx Date: Wed, 30 Jun 2021 23:12:22 +0200 Subject: [PATCH 0639/1727] Return proper error in case of invalid UTF-8 in json_body json_body is used in places that need authentication. In case an unknown field is set, Ruma doesn't parse the field and so doesn't give an error on invalid UTF-8. But Conduit has parsed and on error makes json_body None. Return an error to the client instead of generating an internal error. --- src/client_server/account.rs | 54 +++++++++++++++++++----------------- src/client_server/device.rs | 32 ++++++++++----------- src/client_server/keys.rs | 16 +++++------ 3 files changed, 53 insertions(+), 49 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index f495e28..5326a79 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -157,15 +157,19 @@ pub async fn register_route( } // Success! } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &UserId::parse_with_server_name("", db.globals.server_name()) - .expect("we know this is valid"), - "".into(), - &uiaainfo, - &body.json_body.expect("body is json"), - )?; - return Err(Error::Uiaa(uiaainfo)); + if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create( + &UserId::parse_with_server_name("", db.globals.server_name()) + .expect("we know this is valid"), + "".into(), + &uiaainfo, + &json, + )?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } } } @@ -526,14 +530,14 @@ pub async fn change_password_route( } // Success! } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &sender_user, - &sender_device, - &uiaainfo, - &body.json_body.expect("body is json"), - )?; - return Err(Error::Uiaa(uiaainfo)); + if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } } db.users @@ -618,14 +622,14 @@ pub async fn deactivate_route( } // Success! } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &sender_user, - &sender_device, - &uiaainfo, - &body.json_body.expect("body is json"), - )?; - return Err(Error::Uiaa(uiaainfo)); + if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } } // Leave all joined rooms and reject all invitations diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 2441524..2c4b527 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -116,14 +116,14 @@ pub async fn delete_device_route( } // Success! } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &sender_user, - &sender_device, - &uiaainfo, - &body.json_body.expect("body is json"), - )?; - return Err(Error::Uiaa(uiaainfo)); + if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } } db.users.remove_device(&sender_user, &body.device_id)?; @@ -170,14 +170,14 @@ pub async fn delete_devices_route( } // Success! } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &sender_user, - &sender_device, - &uiaainfo, - &body.json_body.expect("body is json"), - )?; - return Err(Error::Uiaa(uiaainfo)); + if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } } for device_id in &body.devices { diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index f80a329..6026981 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -145,14 +145,14 @@ pub async fn upload_signing_keys_route( } // Success! } else { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &sender_user, - &sender_device, - &uiaainfo, - &body.json_body.expect("body is json"), - )?; - return Err(Error::Uiaa(uiaainfo)); + if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } } if let Some(master_key) = &body.master_key { From dcb5e5900f85677371a7bef337292ffe45c2926e Mon Sep 17 00:00:00 2001 From: Kurt Roeckx Date: Sun, 11 Jul 2021 22:07:10 +0200 Subject: [PATCH 0640/1727] Getting capabilities requires authentication --- src/client_server/capabilities.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index ddc213d..8740928 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,4 +1,5 @@ use crate::ConduitResult; +use crate::Ruma; use ruma::{ api::client::r0::capabilities::{ get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, @@ -13,9 +14,14 @@ use rocket::get; /// # `GET /_matrix/client/r0/capabilities` /// /// Get information on this server's supported feature set and other relevent capabilities. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/capabilities"))] -#[tracing::instrument] -pub async fn get_capabilities_route() -> ConduitResult { +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/capabilities", data = "<_body>") +)] +#[tracing::instrument(skip(_body))] +pub async fn get_capabilities_route( + _body: Ruma, +) -> ConduitResult { let mut available = BTreeMap::new(); available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); From 888a2f7fa169d6f0fd13d0a36633814d10d42190 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 12 Jul 2021 19:58:35 +0000 Subject: [PATCH 0641/1727] Make CI run on famedly runners By default, jobs without tags only run on CI runners configured to do so [1]. Conduit can use famedly runners, which are more powerfull than gitlab's runners, but require a tag on the job to run it there. This commit tags each job with the "docker" tag. On the famedly/conduit repo this means faster CI. On other gitlab.com forks the normal ci. Selfhosted gitlab's might need to add a "docker" tag to their runner. [1]: https://docs.gitlab.com/ee/ci/runners/configure_runners.html#use-tags-to-limit-the-number-of-jobs-using-the-runner --- .gitlab-ci.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 005f0ee..14c10ed 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,10 +9,12 @@ variables: CACHE_COMPRESSION_LEVEL: fastest + test:cargo: stage: "test" needs: [] image: "rust:latest" + tags: ["docker"] variables: CARGO_HOME: "cargohome" cache: @@ -43,6 +45,7 @@ test:cargo: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' interruptible: true image: "rust:latest" + tags: ["docker"] cache: paths: - cargohome @@ -103,6 +106,7 @@ build:cargo:aarch64-unknown-linux-gnu: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' interruptible: true image: "rust:latest" + tags: ["docker"] cache: paths: - cargohome @@ -149,6 +153,7 @@ publish:package: rules: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' image: curlimages/curl:latest + tags: ["docker"] variables: GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts script: From 0080932aef8effc75ff6ca508eaa0fc3da23c16a Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 12 Jul 2021 20:18:14 +0000 Subject: [PATCH 0642/1727] Speed up release builds Setting cargo to run incremental builds means partial build results should be cached. This is not enabled by default in release mode. Incremental builds use 256 codegen units by default [1]. We set them to 16 (release default) again for somewhat faster code but slightly slower builds. [1]: https://doc.rust-lang.org/cargo/reference/profiles.html#codegen-units --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 14c10ed..2fb9d54 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -60,6 +60,9 @@ test:cargo: - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" - "rustup target add $TARGET" script: + # Set some cargo tuning here, because targets overwrite the 'variables' + - "export CARGO_INCREMENTAL=true" + - "export CARGO_PROFILE_RELEASE_CODEGEN_UNITS=16" - time cargo build --target $TARGET --release - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' artifacts: From 3fa09ff57de49ccbd5c0ad48fbcdf88837b743b2 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 12 Jul 2021 20:21:13 +0000 Subject: [PATCH 0643/1727] Use thin-lto [1] for "better" release builds. This performs a rather quick variant of Link Time Optimization [2]. It should add negligible build time but also more optimized binaries. [1]: https://doc.rust-lang.org/cargo/reference/profiles.html#lto [2]: https://llvm.org/docs/LinkTimeOptimization.html --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2fb9d54..424dc96 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -63,6 +63,7 @@ test:cargo: # Set some cargo tuning here, because targets overwrite the 'variables' - "export CARGO_INCREMENTAL=true" - "export CARGO_PROFILE_RELEASE_CODEGEN_UNITS=16" + - "export CARGO_PROFILE_RELEASE_LTO=thin" - time cargo build --target $TARGET --release - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' artifacts: From 9d4fa9a2201399a652e52cc1d76f0e2a3a5608d2 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 14 Jul 2021 07:07:08 +0000 Subject: [PATCH 0644/1727] Sqlite --- .gitignore | 1 + Cargo.lock | 143 ++++++++- Cargo.toml | 8 +- DEPLOY.md | 4 +- conduit-example.toml | 4 +- debian/postinst | 4 +- docker-compose.yml | 2 +- src/client_server/account.rs | 14 +- src/client_server/alias.rs | 11 +- src/client_server/backup.rs | 33 +-- src/client_server/config.rs | 13 +- src/client_server/context.rs | 7 +- src/client_server/device.rs | 15 +- src/client_server/directory.rs | 13 +- src/client_server/keys.rs | 21 +- src/client_server/media.rs | 15 +- src/client_server/membership.rs | 32 +- src/client_server/message.rs | 8 +- src/client_server/mod.rs | 4 +- src/client_server/presence.rs | 9 +- src/client_server/profile.rs | 15 +- src/client_server/push.rs | 25 +- src/client_server/read_marker.rs | 9 +- src/client_server/redact.rs | 6 +- src/client_server/room.rs | 14 +- src/client_server/search.rs | 6 +- src/client_server/session.rs | 12 +- src/client_server/state.rs | 17 +- src/client_server/sync.rs | 19 +- src/client_server/tag.rs | 11 +- src/client_server/to_device.rs | 7 +- src/client_server/typing.rs | 7 +- src/client_server/user_directory.rs | 7 +- src/database.rs | 382 ++++++++++++++++++------ src/database/abstraction.rs | 297 +------------------ src/database/abstraction/rocksdb.rs | 176 +++++++++++ src/database/abstraction/sled.rs | 119 ++++++++ src/database/abstraction/sqlite.rs | 444 ++++++++++++++++++++++++++++ src/database/account_data.rs | 2 +- src/database/admin.rs | 68 +++-- src/database/appservice.rs | 4 +- src/database/globals.rs | 30 +- src/database/pusher.rs | 2 +- src/database/rooms.rs | 12 +- src/database/sending.rs | 57 +++- src/error.rs | 6 + src/main.rs | 31 +- src/ruma_wrapper.rs | 9 +- src/server_server.rs | 51 ++-- 49 files changed, 1525 insertions(+), 681 deletions(-) create mode 100644 src/database/abstraction/rocksdb.rs create mode 100644 src/database/abstraction/sled.rs create mode 100644 src/database/abstraction/sqlite.rs diff --git a/.gitignore b/.gitignore index e2f4e88..1f5f395 100644 --- a/.gitignore +++ b/.gitignore @@ -59,6 +59,7 @@ $RECYCLE.BIN/ # Conduit Rocket.toml conduit.toml +conduit.db # Etc. **/*.rs.bk diff --git a/Cargo.lock b/Cargo.lock index 7efeeac..a0d7a70 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6,6 +6,17 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +[[package]] +name = "ahash" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +dependencies = [ + "getrandom 0.2.2", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" version = "0.7.15" @@ -238,14 +249,17 @@ version = "0.1.0" dependencies = [ "base64 0.13.0", "bytes", + "crossbeam", "directories", "http", "image", "jsonwebtoken", "log", "lru-cache", + "num_cpus", "opentelemetry", "opentelemetry-jaeger", + "parking_lot", "pretty_env_logger", "rand 0.8.3", "regex", @@ -254,6 +268,7 @@ dependencies = [ "rocket", "rocksdb", "ruma", + "rusqlite", "rust-argon2", "rustls", "rustls-native-certs", @@ -340,10 +355,45 @@ dependencies = [ ] [[package]] -name = "crossbeam-epoch" -version = "0.9.3" +name = "crossbeam" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" +checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -353,12 +403,21 @@ dependencies = [ ] [[package]] -name = "crossbeam-utils" -version = "0.8.3" +name = "crossbeam-queue" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" +checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" dependencies = [ - "autocfg", "cfg-if 1.0.0", "lazy_static", ] @@ -547,6 +606,18 @@ dependencies = [ "termcolor", ] +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "figment" version = "0.10.5" @@ -774,6 +845,24 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashlink" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" +dependencies = [ + "hashbrown 0.11.2", +] + [[package]] name = "heck" version = "0.3.2" @@ -920,7 +1009,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.9.1", "serde", ] @@ -1083,6 +1172,17 @@ dependencies = [ "libc", ] +[[package]] +name = "libsqlite3-sys" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.4" @@ -1484,6 +1584,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "pkg-config" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" + [[package]] name = "png" version = "0.16.8" @@ -2136,6 +2242,21 @@ dependencies = [ "tracing", ] +[[package]] +name = "rusqlite" +version = "0.25.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57adcf67c8faaf96f3248c2a7b419a0dbc52ebe36ba83dd57fe83827c1ea4eb3" +dependencies = [ + "bitflags", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "memchr", + "smallvec", +] + [[package]] name = "rust-argon2" version = "0.8.3" @@ -3007,6 +3128,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "vcpkg" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa" + [[package]] name = "version_check" version = "0.9.3" diff --git a/Cargo.toml b/Cargo.toml index 426d242..896140c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -73,11 +73,17 @@ tracing-opentelemetry = "0.11.0" opentelemetry-jaeger = "0.11.0" pretty_env_logger = "0.4.0" lru-cache = "0.1.2" +rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } +parking_lot = { version = "0.11.1", optional = true } +crossbeam = { version = "0.8.1", optional = true } +num_cpus = { version = "1.13.0", optional = true } [features] -default = ["conduit_bin", "backend_sled"] +default = ["conduit_bin", "backend_sqlite"] backend_sled = ["sled"] backend_rocksdb = ["rocksdb"] +backend_sqlite = ["sqlite"] +sqlite = ["rusqlite", "parking_lot", "crossbeam", "num_cpus", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional [[bin]] diff --git a/DEPLOY.md b/DEPLOY.md index 778d0e0..8e16c19 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -114,11 +114,13 @@ allow_federation = true trusted_servers = ["matrix.org"] -#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy + +# The total amount of memory that the database will use. +#db_cache_capacity_mb = 200 ``` ## Setting the correct file permissions diff --git a/conduit-example.toml b/conduit-example.toml index db0bbb7..d184991 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -35,7 +35,6 @@ max_request_size = 20_000_000 # in bytes trusted_servers = ["matrix.org"] -#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #log = "info,state_res=warn,rocket=off,_=off,sled=off" #workers = 4 # default: cpu core count * 2 @@ -43,3 +42,6 @@ trusted_servers = ["matrix.org"] address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy proxy = "none" # more examples can be found at src/database/proxy.rs:6 + +# The total amount of memory that the database will use. +#db_cache_capacity_mb = 200 \ No newline at end of file diff --git a/debian/postinst b/debian/postinst index 6a4cdb8..824fd64 100644 --- a/debian/postinst +++ b/debian/postinst @@ -73,10 +73,12 @@ max_request_size = 20_000_000 # in bytes # Enable jaeger to support monitoring and troubleshooting through jaeger. #allow_jaeger = false -#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #log = "info,state_res=warn,rocket=off,_=off,sled=off" #workers = 4 # default: cpu core count * 2 + +# The total amount of memory that the database will use. +#db_cache_capacity_mb = 200 EOF fi ;; diff --git a/docker-compose.yml b/docker-compose.yml index cf0d2c1..d643709 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -56,4 +56,4 @@ services: # - homeserver volumes: - db: + db: diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 5326a79..7f38eb1 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,7 +1,7 @@ -use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{collections::BTreeMap, convert::TryInto}; -use super::{State, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; -use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; +use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; use log::info; use ruma::{ api::client::{ @@ -42,7 +42,7 @@ const GUEST_NAME_LENGTH: usize = 10; )] #[tracing::instrument(skip(db, body))] pub async fn get_register_available_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { // Validate user id @@ -85,7 +85,7 @@ pub async fn get_register_available_route( )] #[tracing::instrument(skip(db, body))] pub async fn register_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_registration() && !body.from_appservice { @@ -500,7 +500,7 @@ pub async fn register_route( )] #[tracing::instrument(skip(db, body))] pub async fn change_password_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -592,7 +592,7 @@ pub async fn whoami_route(body: Ruma) -> ConduitResult>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index a54bd36..f5d9f64 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,7 +1,4 @@ -use std::sync::Arc; - -use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Ruma}; use regex::Regex; use ruma::{ api::{ @@ -24,7 +21,7 @@ use rocket::{delete, get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn create_alias_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if db.rooms.id_from_alias(&body.room_alias)?.is_some() { @@ -45,7 +42,7 @@ pub async fn create_alias_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_alias_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { db.rooms.set_alias(&body.room_alias, None, &db.globals)?; @@ -61,7 +58,7 @@ pub async fn delete_alias_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_alias_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { get_alias_helper(&db, &body.room_alias).await diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index fcca676..ccb17fa 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -1,7 +1,4 @@ -use std::sync::Arc; - -use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::backup::{ @@ -21,7 +18,7 @@ use rocket::{delete, get, post, put}; )] #[tracing::instrument(skip(db, body))] pub async fn create_backup_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -40,7 +37,7 @@ pub async fn create_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn update_backup_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -58,7 +55,7 @@ pub async fn update_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_latest_backup_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -86,7 +83,7 @@ pub async fn get_latest_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_backup_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -113,7 +110,7 @@ pub async fn get_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -132,7 +129,7 @@ pub async fn delete_backup_route( )] #[tracing::instrument(skip(db, body))] pub async fn add_backup_keys_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -166,7 +163,7 @@ pub async fn add_backup_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn add_backup_key_sessions_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -198,7 +195,7 @@ pub async fn add_backup_key_sessions_route( )] #[tracing::instrument(skip(db, body))] pub async fn add_backup_key_session_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -227,7 +224,7 @@ pub async fn add_backup_key_session_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_backup_keys_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -243,7 +240,7 @@ pub async fn get_backup_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_backup_key_sessions_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -261,7 +258,7 @@ pub async fn get_backup_key_sessions_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_backup_key_session_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -283,7 +280,7 @@ pub async fn get_backup_key_session_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_keys_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -306,7 +303,7 @@ pub async fn delete_backup_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_sessions_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -329,7 +326,7 @@ pub async fn delete_backup_key_sessions_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_session_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 829bf94..4f33689 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -1,7 +1,4 @@ -use std::sync::Arc; - -use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -25,7 +22,7 @@ use rocket::{get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn set_global_account_data_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -60,7 +57,7 @@ pub async fn set_global_account_data_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_room_account_data_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -92,7 +89,7 @@ pub async fn set_room_account_data_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_global_account_data_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -119,7 +116,7 @@ pub async fn get_global_account_data_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_room_account_data_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/context.rs b/src/client_server/context.rs index b86fd0b..dbc121e 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,7 +1,6 @@ -use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::api::client::{error::ErrorKind, r0::context::get_context}; -use std::{convert::TryFrom, sync::Arc}; +use std::convert::TryFrom; #[cfg(feature = "conduit_bin")] use rocket::get; @@ -12,7 +11,7 @@ use rocket::get; )] #[tracing::instrument(skip(db, body))] pub async fn get_context_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 2c4b527..a10d788 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -1,7 +1,4 @@ -use std::sync::Arc; - -use super::State; -use crate::{utils, ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::{ @@ -20,7 +17,7 @@ use rocket::{delete, get, post, put}; )] #[tracing::instrument(skip(db, body))] pub async fn get_devices_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -40,7 +37,7 @@ pub async fn get_devices_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_device_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -59,7 +56,7 @@ pub async fn get_device_route( )] #[tracing::instrument(skip(db, body))] pub async fn update_device_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -85,7 +82,7 @@ pub async fn update_device_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_device_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -139,7 +136,7 @@ pub async fn delete_device_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_devices_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 1b6b1d7..4a440fd 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,7 +1,4 @@ -use std::sync::Arc; - -use super::State; -use crate::{ConduitResult, Database, Error, Result, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma}; use log::info; use ruma::{ api::{ @@ -35,7 +32,7 @@ use rocket::{get, post, put}; )] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { get_public_rooms_filtered_helper( @@ -55,7 +52,7 @@ pub async fn get_public_rooms_filtered_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let response = get_public_rooms_filtered_helper( @@ -84,7 +81,7 @@ pub async fn get_public_rooms_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_room_visibility_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -114,7 +111,7 @@ pub async fn set_room_visibility_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_room_visibility_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { Ok(get_room_visibility::Response { diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 6026981..621e5dd 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -1,5 +1,5 @@ -use super::{State, SESSION_ID_LENGTH}; -use crate::{utils, ConduitResult, Database, Error, Result, Ruma}; +use super::SESSION_ID_LENGTH; +use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -14,10 +14,7 @@ use ruma::{ encryption::UnsignedDeviceInfo, DeviceId, DeviceKeyAlgorithm, UserId, }; -use std::{ - collections::{BTreeMap, HashSet}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashSet}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -28,7 +25,7 @@ use rocket::{get, post}; )] #[tracing::instrument(skip(db, body))] pub async fn upload_keys_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -77,7 +74,7 @@ pub async fn upload_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_keys_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -98,7 +95,7 @@ pub async fn get_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let response = claim_keys_helper(&body.one_time_keys, &db)?; @@ -114,7 +111,7 @@ pub async fn claim_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn upload_signing_keys_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -177,7 +174,7 @@ pub async fn upload_signing_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn upload_signatures_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -238,7 +235,7 @@ pub async fn upload_signatures_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_key_changes_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 0b1fbd7..eaaf939 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -1,20 +1,21 @@ -use super::State; -use crate::{database::media::FileMeta, utils, ConduitResult, Database, Error, Ruma}; +use crate::{ + database::media::FileMeta, database::DatabaseGuard, utils, ConduitResult, Error, Ruma, +}; use ruma::api::client::{ error::ErrorKind, r0::media::{create_content, get_content, get_content_thumbnail, get_media_config}, }; +use std::convert::TryInto; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; -use std::{convert::TryInto, sync::Arc}; const MXC_LENGTH: usize = 32; #[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] #[tracing::instrument(skip(db))] pub async fn get_media_config_route( - db: State<'_, Arc>, + db: DatabaseGuard, ) -> ConduitResult { Ok(get_media_config::Response { upload_size: db.globals.max_request_size().into(), @@ -28,7 +29,7 @@ pub async fn get_media_config_route( )] #[tracing::instrument(skip(db, body))] pub async fn create_content_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let mxc = format!( @@ -66,7 +67,7 @@ pub async fn create_content_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_content_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -119,7 +120,7 @@ pub async fn get_content_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_content_thumbnail_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 5c57b68..4667f25 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -1,6 +1,6 @@ -use super::State; use crate::{ client_server, + database::DatabaseGuard, pdu::{PduBuilder, PduEvent}, server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; @@ -44,7 +44,7 @@ use rocket::{get, post}; )] #[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -65,14 +65,18 @@ pub async fn join_room_by_id_route( servers.insert(body.room_id.server_name().to_owned()); - join_room_by_id_helper( + let ret = join_room_by_id_helper( &db, body.sender_user.as_ref(), &body.room_id, &servers, body.third_party_signed.as_ref(), ) - .await + .await; + + db.flush().await?; + + ret } #[cfg_attr( @@ -81,7 +85,7 @@ pub async fn join_room_by_id_route( )] #[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -135,7 +139,7 @@ pub async fn join_room_by_id_or_alias_route( )] #[tracing::instrument(skip(db, body))] pub async fn leave_room_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -153,7 +157,7 @@ pub async fn leave_room_route( )] #[tracing::instrument(skip(db, body))] pub async fn invite_user_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -173,7 +177,7 @@ pub async fn invite_user_route( )] #[tracing::instrument(skip(db, body))] pub async fn kick_user_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -223,7 +227,7 @@ pub async fn kick_user_route( )] #[tracing::instrument(skip(db, body))] pub async fn ban_user_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -281,7 +285,7 @@ pub async fn ban_user_route( )] #[tracing::instrument(skip(db, body))] pub async fn unban_user_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -330,7 +334,7 @@ pub async fn unban_user_route( )] #[tracing::instrument(skip(db, body))] pub async fn forget_room_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -348,7 +352,7 @@ pub async fn forget_room_route( )] #[tracing::instrument(skip(db, body))] pub async fn joined_rooms_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -369,7 +373,7 @@ pub async fn joined_rooms_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_member_events_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -399,7 +403,7 @@ pub async fn get_member_events_route( )] #[tracing::instrument(skip(db, body))] pub async fn joined_members_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 0d19f34..7e898b1 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -1,5 +1,4 @@ -use super::State; -use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -11,7 +10,6 @@ use ruma::{ use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, - sync::Arc, }; #[cfg(feature = "conduit_bin")] @@ -23,7 +21,7 @@ use rocket::{get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn send_message_event_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -86,7 +84,7 @@ pub async fn send_message_event_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_message_events_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index 825dbbb..f211a57 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -64,9 +64,7 @@ pub use voip::*; use super::State; #[cfg(feature = "conduit_bin")] use { - crate::ConduitResult, - rocket::{options, State}, - ruma::api::client::r0::to_device::send_event_to_device, + crate::ConduitResult, rocket::options, ruma::api::client::r0::to_device::send_event_to_device, }; pub const DEVICE_ID_LENGTH: usize = 10; diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index ce80dfd..bfe638f 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -1,7 +1,6 @@ -use super::State; -use crate::{utils, ConduitResult, Database, Ruma}; +use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; use ruma::api::client::r0::presence::{get_presence, set_presence}; -use std::{convert::TryInto, sync::Arc, time::Duration}; +use std::{convert::TryInto, time::Duration}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -12,7 +11,7 @@ use rocket::{get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn set_presence_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -53,7 +52,7 @@ pub async fn set_presence_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_presence_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 4e9a37b..5281a4a 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -1,5 +1,4 @@ -use super::State; -use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -10,10 +9,10 @@ use ruma::{ events::EventType, serde::Raw, }; +use std::convert::TryInto; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; -use std::{convert::TryInto, sync::Arc}; #[cfg_attr( feature = "conduit_bin", @@ -21,7 +20,7 @@ use std::{convert::TryInto, sync::Arc}; )] #[tracing::instrument(skip(db, body))] pub async fn set_displayname_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -108,7 +107,7 @@ pub async fn set_displayname_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_displayname_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { Ok(get_display_name::Response { @@ -123,7 +122,7 @@ pub async fn get_displayname_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_avatar_url_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -210,7 +209,7 @@ pub async fn set_avatar_url_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_avatar_url_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { Ok(get_avatar_url::Response { @@ -225,7 +224,7 @@ pub async fn get_avatar_url_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_profile_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.users.exists(&body.user_id)? { diff --git a/src/client_server/push.rs b/src/client_server/push.rs index d6f6212..794cbce 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -1,7 +1,4 @@ -use std::sync::Arc; - -use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -24,7 +21,7 @@ use rocket::{delete, get, post, put}; )] #[tracing::instrument(skip(db, body))] pub async fn get_pushrules_all_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -49,7 +46,7 @@ pub async fn get_pushrules_all_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -103,7 +100,7 @@ pub async fn get_pushrule_route( )] #[tracing::instrument(skip(db, req))] pub async fn set_pushrule_route( - db: State<'_, Arc>, + db: DatabaseGuard, req: Ruma>, ) -> ConduitResult { let sender_user = req.sender_user.as_ref().expect("user is authenticated"); @@ -206,7 +203,7 @@ pub async fn set_pushrule_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_actions_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -265,7 +262,7 @@ pub async fn get_pushrule_actions_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_actions_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -339,7 +336,7 @@ pub async fn set_pushrule_actions_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_enabled_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -400,7 +397,7 @@ pub async fn get_pushrule_enabled_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_enabled_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -479,7 +476,7 @@ pub async fn set_pushrule_enabled_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_pushrule_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -548,7 +545,7 @@ pub async fn delete_pushrule_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_pushers_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -565,7 +562,7 @@ pub async fn get_pushers_route( )] #[tracing::instrument(skip(db, body))] pub async fn set_pushers_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 837170f..fe49af9 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -1,5 +1,4 @@ -use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -9,10 +8,10 @@ use ruma::{ receipt::ReceiptType, MilliSecondsSinceUnixEpoch, }; +use std::collections::BTreeMap; #[cfg(feature = "conduit_bin")] use rocket::post; -use std::{collections::BTreeMap, sync::Arc}; #[cfg_attr( feature = "conduit_bin", @@ -20,7 +19,7 @@ use std::{collections::BTreeMap, sync::Arc}; )] #[tracing::instrument(skip(db, body))] pub async fn set_read_marker_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -87,7 +86,7 @@ pub async fn set_read_marker_route( )] #[tracing::instrument(skip(db, body))] pub async fn create_receipt_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index e193082..3db2771 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -1,10 +1,8 @@ -use super::State; -use crate::{pdu::PduBuilder, ConduitResult, Database, Ruma}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma}; use ruma::{ api::client::r0::redact::redact_event, events::{room::redaction, EventType}, }; -use std::sync::Arc; #[cfg(feature = "conduit_bin")] use rocket::put; @@ -15,7 +13,7 @@ use rocket::put; )] #[tracing::instrument(skip(db, body))] pub async fn redact_event_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index b33b550..43625fe 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -1,5 +1,7 @@ -use super::State; -use crate::{client_server::invite_helper, pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; +use crate::{ + client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Error, + Ruma, +}; use log::info; use ruma::{ api::client::{ @@ -13,7 +15,7 @@ use ruma::{ serde::Raw, RoomAliasId, RoomId, RoomVersionId, }; -use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; +use std::{cmp::max, collections::BTreeMap, convert::TryFrom}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -24,7 +26,7 @@ use rocket::{get, post}; )] #[tracing::instrument(skip(db, body))] pub async fn create_room_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -294,7 +296,7 @@ pub async fn create_room_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_room_event_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -322,7 +324,7 @@ pub async fn get_room_event_route( )] #[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, _room_id: String, ) -> ConduitResult { diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 5fc64d0..ec23dd4 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,7 +1,5 @@ -use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::api::client::{error::ErrorKind, r0::search::search_events}; -use std::sync::Arc; #[cfg(feature = "conduit_bin")] use rocket::post; @@ -14,7 +12,7 @@ use std::collections::BTreeMap; )] #[tracing::instrument(skip(db, body))] pub async fn search_events_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/session.rs b/src/client_server/session.rs index dd504f1..7ad792b 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -1,7 +1,5 @@ -use std::sync::Arc; - -use super::{State, DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{utils, ConduitResult, Database, Error, Ruma}; +use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; +use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; use log::info; use ruma::{ api::client::{ @@ -52,7 +50,7 @@ pub async fn get_login_types_route() -> ConduitResult )] #[tracing::instrument(skip(db, body))] pub async fn login_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { // Validate login method @@ -169,7 +167,7 @@ pub async fn login_route( )] #[tracing::instrument(skip(db, body))] pub async fn logout_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -197,7 +195,7 @@ pub async fn logout_route( )] #[tracing::instrument(skip(db, body))] pub async fn logout_all_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/state.rs b/src/client_server/state.rs index be52834..68246d5 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -1,7 +1,6 @@ -use std::sync::Arc; - -use super::State; -use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma}; +use crate::{ + database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma, +}; use ruma::{ api::client::{ error::ErrorKind, @@ -27,7 +26,7 @@ use rocket::{get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -53,7 +52,7 @@ pub async fn send_state_event_for_key_route( )] #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -79,7 +78,7 @@ pub async fn send_state_event_for_empty_key_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -126,7 +125,7 @@ pub async fn get_state_events_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_key_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -177,7 +176,7 @@ pub async fn get_state_events_for_key_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 69511fa..c57f1da 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,5 +1,4 @@ -use super::State; -use crate::{ConduitResult, Database, Error, Result, Ruma, RumaResponse}; +use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; use log::error; use ruma::{ api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, @@ -35,13 +34,15 @@ use rocket::{get, tokio}; )] #[tracing::instrument(skip(db, body))] pub async fn sync_events_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> std::result::Result, RumaResponse> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - let mut rx = match db + let arc_db = Arc::new(db); + + let mut rx = match arc_db .globals .sync_receivers .write() @@ -52,7 +53,7 @@ pub async fn sync_events_route( let (tx, rx) = tokio::sync::watch::channel(None); tokio::spawn(sync_helper_wrapper( - Arc::clone(&db), + Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), body.since.clone(), @@ -68,7 +69,7 @@ pub async fn sync_events_route( let (tx, rx) = tokio::sync::watch::channel(None); tokio::spawn(sync_helper_wrapper( - Arc::clone(&db), + Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), body.since.clone(), @@ -104,7 +105,7 @@ pub async fn sync_events_route( } pub async fn sync_helper_wrapper( - db: Arc, + db: Arc, sender_user: UserId, sender_device: Box, since: Option, @@ -142,11 +143,13 @@ pub async fn sync_helper_wrapper( } } + drop(db); + let _ = tx.send(Some(r.map(|(r, _)| r.into()))); } async fn sync_helper( - db: Arc, + db: Arc, sender_user: UserId, sender_device: Box, since: Option, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 2382fe0..17df2c2 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -1,10 +1,9 @@ -use super::State; -use crate::{ConduitResult, Database, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Ruma}; use ruma::{ api::client::r0::tag::{create_tag, delete_tag, get_tags}, events::EventType, }; -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; #[cfg(feature = "conduit_bin")] use rocket::{delete, get, put}; @@ -15,7 +14,7 @@ use rocket::{delete, get, put}; )] #[tracing::instrument(skip(db, body))] pub async fn update_tag_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -52,7 +51,7 @@ pub async fn update_tag_route( )] #[tracing::instrument(skip(db, body))] pub async fn delete_tag_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -86,7 +85,7 @@ pub async fn delete_tag_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_tags_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index ada0c9a..3bb135e 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -1,7 +1,4 @@ -use std::sync::Arc; - -use super::State; -use crate::{ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ api::client::{error::ErrorKind, r0::to_device::send_event_to_device}, to_device::DeviceIdOrAllDevices, @@ -16,7 +13,7 @@ use rocket::put; )] #[tracing::instrument(skip(db, body))] pub async fn send_event_to_device_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index a0a5d43..7a590af 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,7 +1,4 @@ -use std::sync::Arc; - -use super::State; -use crate::{utils, ConduitResult, Database, Ruma}; +use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; use create_typing_event::Typing; use ruma::api::client::r0::typing::create_typing_event; @@ -14,7 +11,7 @@ use rocket::put; )] #[tracing::instrument(skip(db, body))] pub fn create_typing_event_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index d7c16d7..14b85a6 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,7 +1,4 @@ -use std::sync::Arc; - -use super::State; -use crate::{ConduitResult, Database, Ruma}; +use crate::{database::DatabaseGuard, ConduitResult, Ruma}; use ruma::api::client::r0::user_directory::search_users; #[cfg(feature = "conduit_bin")] @@ -13,7 +10,7 @@ use rocket::post; )] #[tracing::instrument(skip(db, body))] pub async fn search_users_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { let limit = u64::from(body.limit) as usize; diff --git a/src/database.rs b/src/database.rs index ec4052c..ac17372 100644 --- a/src/database.rs +++ b/src/database.rs @@ -19,16 +19,23 @@ use abstraction::DatabaseEngine; use directories::ProjectDirs; use log::error; use lru_cache::LruCache; -use rocket::futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; +use rocket::{ + futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}, + outcome::IntoOutcome, + request::{FromRequest, Request}, + try_outcome, State, +}; use ruma::{DeviceId, ServerName, UserId}; -use serde::Deserialize; +use serde::{de::IgnoredAny, Deserialize}; use std::{ - collections::HashMap, + collections::{BTreeMap, HashMap}, fs::{self, remove_dir_all}, io::Write, + ops::Deref, + path::Path, sync::{Arc, RwLock}, }; -use tokio::sync::Semaphore; +use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use self::proxy::ProxyConfig; @@ -36,8 +43,16 @@ use self::proxy::ProxyConfig; pub struct Config { server_name: Box, database_path: String, - #[serde(default = "default_cache_capacity")] - cache_capacity: u32, + #[serde(default = "default_db_cache_capacity_mb")] + db_cache_capacity_mb: f64, + #[serde(default = "default_sqlite_read_pool_size")] + sqlite_read_pool_size: usize, + #[serde(default = "true_fn")] + sqlite_wal_clean_timer: bool, + #[serde(default = "default_sqlite_wal_clean_second_interval")] + sqlite_wal_clean_second_interval: u32, + #[serde(default = "default_sqlite_wal_clean_second_timeout")] + sqlite_wal_clean_second_timeout: u32, #[serde(default = "default_max_request_size")] max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] @@ -57,6 +72,29 @@ pub struct Config { trusted_servers: Vec>, #[serde(default = "default_log")] pub log: String, + + #[serde(flatten)] + catchall: BTreeMap, +} + +const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; + +impl Config { + pub fn warn_deprecated(&self) { + let mut was_deprecated = false; + for key in self + .catchall + .keys() + .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) + { + log::warn!("Config parameter {} is deprecated", key); + was_deprecated = true; + } + + if was_deprecated { + log::warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); + } + } } fn false_fn() -> bool { @@ -67,8 +105,20 @@ fn true_fn() -> bool { true } -fn default_cache_capacity() -> u32 { - 1024 * 1024 * 1024 +fn default_db_cache_capacity_mb() -> f64 { + 200.0 +} + +fn default_sqlite_read_pool_size() -> usize { + num_cpus::get().max(1) +} + +fn default_sqlite_wal_clean_second_interval() -> u32 { + 60 * 60 +} + +fn default_sqlite_wal_clean_second_timeout() -> u32 { + 2 } fn default_max_request_size() -> u32 { @@ -84,12 +134,16 @@ fn default_log() -> String { } #[cfg(feature = "sled")] -pub type Engine = abstraction::SledEngine; +pub type Engine = abstraction::sled::Engine; #[cfg(feature = "rocksdb")] -pub type Engine = abstraction::RocksDbEngine; +pub type Engine = abstraction::rocksdb::Engine; + +#[cfg(feature = "sqlite")] +pub type Engine = abstraction::sqlite::Engine; pub struct Database { + _db: Arc, pub globals: globals::Globals, pub users: users::Users, pub uiaa: uiaa::Uiaa, @@ -117,8 +171,37 @@ impl Database { Ok(()) } + fn check_sled_or_sqlite_db(config: &Config) -> Result<()> { + let path = Path::new(&config.database_path); + + #[cfg(feature = "backend_sqlite")] + { + let sled_exists = path.join("db").exists(); + let sqlite_exists = path.join("conduit.db").exists(); + if sled_exists { + if sqlite_exists { + // most likely an in-place directory, only warn + log::warn!("Both sled and sqlite databases are detected in database directory"); + log::warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") + } else { + log::error!( + "Sled database detected, conduit now uses sqlite for database operations" + ); + log::error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); + return Err(Error::bad_config( + "sled database detected, migrate to sqlite", + )); + } + } + } + + Ok(()) + } + /// Load an existing database or create a new one. - pub async fn load_or_create(config: Config) -> Result> { + pub async fn load_or_create(config: Config) -> Result>> { + Self::check_sled_or_sqlite_db(&config)?; + let builder = Engine::open(&config)?; if config.max_request_size < 1024 { @@ -128,7 +211,8 @@ impl Database { let (admin_sender, admin_receiver) = mpsc::unbounded(); let (sending_sender, sending_receiver) = mpsc::unbounded(); - let db = Arc::new(Self { + let db = Arc::new(TokioRwLock::from(Self { + _db: builder.clone(), users: users::Users { userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, @@ -231,100 +315,112 @@ impl Database { globals: globals::Globals::load( builder.open_tree("global")?, builder.open_tree("server_signingkeys")?, - config, + config.clone(), )?, - }); + })); - // MIGRATIONS - // TODO: database versions of new dbs should probably not be 0 - if db.globals.database_version()? < 1 { - for (roomserverid, _) in db.rooms.roomserverids.iter() { - let mut parts = roomserverid.split(|&b| b == 0xff); - let room_id = parts.next().expect("split always returns one element"); - let servername = match parts.next() { - Some(s) => s, - None => { - error!("Migration: Invalid roomserverid in db."); + { + let db = db.read().await; + // MIGRATIONS + // TODO: database versions of new dbs should probably not be 0 + if db.globals.database_version()? < 1 { + for (roomserverid, _) in db.rooms.roomserverids.iter() { + let mut parts = roomserverid.split(|&b| b == 0xff); + let room_id = parts.next().expect("split always returns one element"); + let servername = match parts.next() { + Some(s) => s, + None => { + error!("Migration: Invalid roomserverid in db."); + continue; + } + }; + let mut serverroomid = servername.to_vec(); + serverroomid.push(0xff); + serverroomid.extend_from_slice(room_id); + + db.rooms.serverroomids.insert(&serverroomid, &[])?; + } + + db.globals.bump_database_version(1)?; + + println!("Migration: 0 -> 1 finished"); + } + + if db.globals.database_version()? < 2 { + // We accidentally inserted hashed versions of "" into the db instead of just "" + for (userid, password) in db.users.userid_password.iter() { + let password = utils::string_from_bytes(&password); + + let empty_hashed_password = password.map_or(false, |password| { + argon2::verify_encoded(&password, b"").unwrap_or(false) + }); + + if empty_hashed_password { + db.users.userid_password.insert(&userid, b"")?; + } + } + + db.globals.bump_database_version(2)?; + + println!("Migration: 1 -> 2 finished"); + } + + if db.globals.database_version()? < 3 { + // Move media to filesystem + for (key, content) in db.media.mediaid_file.iter() { + if content.len() == 0 { continue; } - }; - let mut serverroomid = servername.to_vec(); - serverroomid.push(0xff); - serverroomid.extend_from_slice(room_id); - db.rooms.serverroomids.insert(&serverroomid, &[])?; - } - - db.globals.bump_database_version(1)?; - - println!("Migration: 0 -> 1 finished"); - } - - if db.globals.database_version()? < 2 { - // We accidentally inserted hashed versions of "" into the db instead of just "" - for (userid, password) in db.users.userid_password.iter() { - let password = utils::string_from_bytes(&password); - - let empty_hashed_password = password.map_or(false, |password| { - argon2::verify_encoded(&password, b"").unwrap_or(false) - }); - - if empty_hashed_password { - db.users.userid_password.insert(&userid, b"")?; - } - } - - db.globals.bump_database_version(2)?; - - println!("Migration: 1 -> 2 finished"); - } - - if db.globals.database_version()? < 3 { - // Move media to filesystem - for (key, content) in db.media.mediaid_file.iter() { - if content.len() == 0 { - continue; + let path = db.globals.get_media_file(&key); + let mut file = fs::File::create(path)?; + file.write_all(&content)?; + db.media.mediaid_file.insert(&key, &[])?; } - let path = db.globals.get_media_file(&key); - let mut file = fs::File::create(path)?; - file.write_all(&content)?; - db.media.mediaid_file.insert(&key, &[])?; + db.globals.bump_database_version(3)?; + + println!("Migration: 2 -> 3 finished"); } - db.globals.bump_database_version(3)?; - - println!("Migration: 2 -> 3 finished"); - } - - if db.globals.database_version()? < 4 { - // Add federated users to db as deactivated - for our_user in db.users.iter() { - let our_user = our_user?; - if db.users.is_deactivated(&our_user)? { - continue; - } - for room in db.rooms.rooms_joined(&our_user) { - for user in db.rooms.room_members(&room?) { - let user = user?; - if user.server_name() != db.globals.server_name() { - println!("Migration: Creating user {}", user); - db.users.create(&user, None)?; + if db.globals.database_version()? < 4 { + // Add federated users to db as deactivated + for our_user in db.users.iter() { + let our_user = our_user?; + if db.users.is_deactivated(&our_user)? { + continue; + } + for room in db.rooms.rooms_joined(&our_user) { + for user in db.rooms.room_members(&room?) { + let user = user?; + if user.server_name() != db.globals.server_name() { + println!("Migration: Creating user {}", user); + db.users.create(&user, None)?; + } } } } + + db.globals.bump_database_version(4)?; + + println!("Migration: 3 -> 4 finished"); } - - db.globals.bump_database_version(4)?; - - println!("Migration: 3 -> 4 finished"); } - // This data is probably outdated - db.rooms.edus.presenceid_presence.clear()?; + let guard = db.read().await; - db.admin.start_handler(Arc::clone(&db), admin_receiver); - db.sending.start_handler(Arc::clone(&db), sending_receiver); + // This data is probably outdated + guard.rooms.edus.presenceid_presence.clear()?; + + guard.admin.start_handler(Arc::clone(&db), admin_receiver); + guard + .sending + .start_handler(Arc::clone(&db), sending_receiver); + + drop(guard); + + #[cfg(feature = "sqlite")] + Self::start_wal_clean_task(&db, &config).await; Ok(db) } @@ -413,13 +509,113 @@ impl Database { .watch_prefix(&userid_bytes), ); + futures.push(Box::pin(self.globals.rotate.watch())); + // Wait until one of them finds something futures.next().await; } pub async fn flush(&self) -> Result<()> { - // noop while we don't use sled 1.0 - //self._db.flush_async().await?; - Ok(()) + let start = std::time::Instant::now(); + + let res = self._db.flush(); + + log::debug!("flush: took {:?}", start.elapsed()); + + res + } + + #[cfg(feature = "sqlite")] + pub fn flush_wal(&self) -> Result<()> { + self._db.flush_wal() + } + + #[cfg(feature = "sqlite")] + pub async fn start_wal_clean_task(lock: &Arc>, config: &Config) { + use tokio::{ + select, + signal::unix::{signal, SignalKind}, + time::{interval, timeout}, + }; + + use std::{ + sync::Weak, + time::{Duration, Instant}, + }; + + let weak: Weak> = Arc::downgrade(&lock); + + let lock_timeout = Duration::from_secs(config.sqlite_wal_clean_second_timeout as u64); + let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64); + let do_timer = config.sqlite_wal_clean_timer; + + tokio::spawn(async move { + let mut i = interval(timer_interval); + let mut s = signal(SignalKind::hangup()).unwrap(); + + loop { + select! { + _ = i.tick(), if do_timer => { + log::info!(target: "wal-trunc", "Timer ticked") + } + _ = s.recv() => { + log::info!(target: "wal-trunc", "Received SIGHUP") + } + }; + + if let Some(arc) = Weak::upgrade(&weak) { + log::info!(target: "wal-trunc", "Rotating sync helpers..."); + // This actually creates a very small race condition between firing this and trying to acquire the subsequent write lock. + // Though it is not a huge deal if the write lock doesn't "catch", as it'll harmlessly time out. + arc.read().await.globals.rotate.fire(); + + log::info!(target: "wal-trunc", "Locking..."); + let guard = { + if let Ok(guard) = timeout(lock_timeout, arc.write()).await { + guard + } else { + log::info!(target: "wal-trunc", "Lock failed in timeout, canceled."); + continue; + } + }; + log::info!(target: "wal-trunc", "Locked, flushing..."); + let start = Instant::now(); + if let Err(e) = guard.flush_wal() { + log::error!(target: "wal-trunc", "Errored: {}", e); + } else { + log::info!(target: "wal-trunc", "Flushed in {:?}", start.elapsed()); + } + } else { + break; + } + } + }); + } +} + +pub struct DatabaseGuard(OwnedRwLockReadGuard); + +impl Deref for DatabaseGuard { + type Target = OwnedRwLockReadGuard; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[rocket::async_trait] +impl<'r> FromRequest<'r> for DatabaseGuard { + type Error = (); + + async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome { + let db = try_outcome!(req.guard::>>>().await); + + Ok(DatabaseGuard(Arc::clone(&db).read_owned().await)).or_forward(()) + } +} + +impl Into for OwnedRwLockReadGuard { + fn into(self) -> DatabaseGuard { + DatabaseGuard(self) } } diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index f81c9de..fb11ba0 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -1,28 +1,21 @@ use super::Config; -use crate::{utils, Result}; -use log::warn; +use crate::Result; + use std::{future::Future, pin::Pin, sync::Arc}; #[cfg(feature = "rocksdb")] -use std::{collections::BTreeMap, sync::RwLock}; +pub mod rocksdb; #[cfg(feature = "sled")] -pub struct SledEngine(sled::Db); -#[cfg(feature = "sled")] -pub struct SledEngineTree(sled::Tree); +pub mod sled; -#[cfg(feature = "rocksdb")] -pub struct RocksDbEngine(rocksdb::DBWithThreadMode); -#[cfg(feature = "rocksdb")] -pub struct RocksDbEngineTree<'a> { - db: Arc, - name: &'a str, - watchers: RwLock, Vec>>>, -} +#[cfg(feature = "sqlite")] +pub mod sqlite; pub trait DatabaseEngine: Sized { fn open(config: &Config) -> Result>; fn open_tree(self: &Arc, name: &'static str) -> Result>; + fn flush(self: &Arc) -> Result<()>; } pub trait Tree: Send + Sync { @@ -32,20 +25,20 @@ pub trait Tree: Send + Sync { fn remove(&self, key: &[u8]) -> Result<()>; - fn iter<'a>(&'a self) -> Box, Box<[u8]>)> + Send + Sync + 'a>; + fn iter<'a>(&'a self) -> Box, Vec)> + Send + 'a>; fn iter_from<'a>( &'a self, from: &[u8], backwards: bool, - ) -> Box, Box<[u8]>)> + 'a>; + ) -> Box, Vec)> + Send + 'a>; fn increment(&self, key: &[u8]) -> Result>; fn scan_prefix<'a>( &'a self, prefix: Vec, - ) -> Box, Box<[u8]>)> + Send + 'a>; + ) -> Box, Vec)> + Send + 'a>; fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>>; @@ -57,273 +50,3 @@ pub trait Tree: Send + Sync { Ok(()) } } - -#[cfg(feature = "sled")] -impl DatabaseEngine for SledEngine { - fn open(config: &Config) -> Result> { - Ok(Arc::new(SledEngine( - sled::Config::default() - .path(&config.database_path) - .cache_capacity(config.cache_capacity as u64) - .use_compression(true) - .open()?, - ))) - } - - fn open_tree(self: &Arc, name: &'static str) -> Result> { - Ok(Arc::new(SledEngineTree(self.0.open_tree(name)?))) - } -} - -#[cfg(feature = "sled")] -impl Tree for SledEngineTree { - fn get(&self, key: &[u8]) -> Result>> { - Ok(self.0.get(key)?.map(|v| v.to_vec())) - } - - fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { - self.0.insert(key, value)?; - Ok(()) - } - - fn remove(&self, key: &[u8]) -> Result<()> { - self.0.remove(key)?; - Ok(()) - } - - fn iter<'a>(&'a self) -> Box, Box<[u8]>)> + Send + Sync + 'a> { - Box::new( - self.0 - .iter() - .filter_map(|r| { - if let Err(e) = &r { - warn!("Error: {}", e); - } - r.ok() - }) - .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())), - ) - } - - fn iter_from( - &self, - from: &[u8], - backwards: bool, - ) -> Box, Box<[u8]>)>> { - let iter = if backwards { - self.0.range(..from) - } else { - self.0.range(from..) - }; - - let iter = iter - .filter_map(|r| { - if let Err(e) = &r { - warn!("Error: {}", e); - } - r.ok() - }) - .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())); - - if backwards { - Box::new(iter.rev()) - } else { - Box::new(iter) - } - } - - fn increment(&self, key: &[u8]) -> Result> { - Ok(self - .0 - .update_and_fetch(key, utils::increment) - .map(|o| o.expect("increment always sets a value").to_vec())?) - } - - fn scan_prefix<'a>( - &'a self, - prefix: Vec, - ) -> Box, Box<[u8]>)> + Send + 'a> { - let iter = self - .0 - .scan_prefix(prefix) - .filter_map(|r| { - if let Err(e) = &r { - warn!("Error: {}", e); - } - r.ok() - }) - .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())); - - Box::new(iter) - } - - fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let prefix = prefix.to_vec(); - Box::pin(async move { - self.0.watch_prefix(prefix).await; - }) - } -} - -#[cfg(feature = "rocksdb")] -impl DatabaseEngine for RocksDbEngine { - fn open(config: &Config) -> Result> { - let mut db_opts = rocksdb::Options::default(); - db_opts.create_if_missing(true); - db_opts.set_max_open_files(16); - db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy); - db_opts.set_target_file_size_base(256 << 20); - db_opts.set_write_buffer_size(256 << 20); - - let mut block_based_options = rocksdb::BlockBasedOptions::default(); - block_based_options.set_block_size(512 << 10); - db_opts.set_block_based_table_factory(&block_based_options); - - let cfs = rocksdb::DBWithThreadMode::::list_cf( - &db_opts, - &config.database_path, - ) - .unwrap_or_default(); - - let mut options = rocksdb::Options::default(); - options.set_merge_operator_associative("increment", utils::increment_rocksdb); - - let db = rocksdb::DBWithThreadMode::::open_cf_descriptors( - &db_opts, - &config.database_path, - cfs.iter() - .map(|name| rocksdb::ColumnFamilyDescriptor::new(name, options.clone())), - )?; - - Ok(Arc::new(RocksDbEngine(db))) - } - - fn open_tree(self: &Arc, name: &'static str) -> Result> { - let mut options = rocksdb::Options::default(); - options.set_merge_operator_associative("increment", utils::increment_rocksdb); - - // Create if it doesn't exist - let _ = self.0.create_cf(name, &options); - - Ok(Arc::new(RocksDbEngineTree { - name, - db: Arc::clone(self), - watchers: RwLock::new(BTreeMap::new()), - })) - } -} - -#[cfg(feature = "rocksdb")] -impl RocksDbEngineTree<'_> { - fn cf(&self) -> rocksdb::BoundColumnFamily<'_> { - self.db.0.cf_handle(self.name).unwrap() - } -} - -#[cfg(feature = "rocksdb")] -impl Tree for RocksDbEngineTree<'_> { - fn get(&self, key: &[u8]) -> Result>> { - Ok(self.db.0.get_cf(self.cf(), key)?) - } - - fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { - let watchers = self.watchers.read().unwrap(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write().unwrap(); - for prefix in triggered { - if let Some(txs) = watchers.remove(prefix) { - for tx in txs { - let _ = tx.send(()); - } - } - } - } - - Ok(self.db.0.put_cf(self.cf(), key, value)?) - } - - fn remove(&self, key: &[u8]) -> Result<()> { - Ok(self.db.0.delete_cf(self.cf(), key)?) - } - - fn iter<'a>(&'a self) -> Box, Box<[u8]>)> + Send + Sync + 'a> { - Box::new( - self.db - .0 - .iterator_cf(self.cf(), rocksdb::IteratorMode::Start), - ) - } - - fn iter_from<'a>( - &'a self, - from: &[u8], - backwards: bool, - ) -> Box, Box<[u8]>)> + 'a> { - Box::new(self.db.0.iterator_cf( - self.cf(), - rocksdb::IteratorMode::From( - from, - if backwards { - rocksdb::Direction::Reverse - } else { - rocksdb::Direction::Forward - }, - ), - )) - } - - fn increment(&self, key: &[u8]) -> Result> { - let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.db.0]), None).unwrap(); - dbg!(stats.mem_table_total); - dbg!(stats.mem_table_unflushed); - dbg!(stats.mem_table_readers_total); - dbg!(stats.cache_total); - // TODO: atomic? - let old = self.get(key)?; - let new = utils::increment(old.as_deref()).unwrap(); - self.insert(key, &new)?; - Ok(new) - } - - fn scan_prefix<'a>( - &'a self, - prefix: Vec, - ) -> Box, Box<[u8]>)> + Send + 'a> { - Box::new( - self.db - .0 - .iterator_cf( - self.cf(), - rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), - ) - .take_while(move |(k, _)| k.starts_with(&prefix)), - ) - } - - fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.watchers - .write() - .unwrap() - .entry(prefix.to_vec()) - .or_default() - .push(tx); - - Box::pin(async move { - // Tx is never destroyed - rx.await.unwrap(); - }) - } -} diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs new file mode 100644 index 0000000..b996130 --- /dev/null +++ b/src/database/abstraction/rocksdb.rs @@ -0,0 +1,176 @@ +use super::super::Config; +use crate::{utils, Result}; + +use std::{future::Future, pin::Pin, sync::Arc}; + +use super::{DatabaseEngine, Tree}; + +use std::{collections::BTreeMap, sync::RwLock}; + +pub struct Engine(rocksdb::DBWithThreadMode); + +pub struct RocksDbEngineTree<'a> { + db: Arc, + name: &'a str, + watchers: RwLock, Vec>>>, +} + +impl DatabaseEngine for Engine { + fn open(config: &Config) -> Result> { + let mut db_opts = rocksdb::Options::default(); + db_opts.create_if_missing(true); + db_opts.set_max_open_files(16); + db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy); + db_opts.set_target_file_size_base(256 << 20); + db_opts.set_write_buffer_size(256 << 20); + + let mut block_based_options = rocksdb::BlockBasedOptions::default(); + block_based_options.set_block_size(512 << 10); + db_opts.set_block_based_table_factory(&block_based_options); + + let cfs = rocksdb::DBWithThreadMode::::list_cf( + &db_opts, + &config.database_path, + ) + .unwrap_or_default(); + + let mut options = rocksdb::Options::default(); + options.set_merge_operator_associative("increment", utils::increment_rocksdb); + + let db = rocksdb::DBWithThreadMode::::open_cf_descriptors( + &db_opts, + &config.database_path, + cfs.iter() + .map(|name| rocksdb::ColumnFamilyDescriptor::new(name, options.clone())), + )?; + + Ok(Arc::new(Engine(db))) + } + + fn open_tree(self: &Arc, name: &'static str) -> Result> { + let mut options = rocksdb::Options::default(); + options.set_merge_operator_associative("increment", utils::increment_rocksdb); + + // Create if it doesn't exist + let _ = self.0.create_cf(name, &options); + + Ok(Arc::new(RocksDbEngineTree { + name, + db: Arc::clone(self), + watchers: RwLock::new(BTreeMap::new()), + })) + } +} + +impl RocksDbEngineTree<'_> { + fn cf(&self) -> rocksdb::BoundColumnFamily<'_> { + self.db.0.cf_handle(self.name).unwrap() + } +} + +impl Tree for RocksDbEngineTree<'_> { + fn get(&self, key: &[u8]) -> Result>> { + Ok(self.db.0.get_cf(self.cf(), key)?) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let watchers = self.watchers.read().unwrap(); + let mut triggered = Vec::new(); + + for length in 0..=key.len() { + if watchers.contains_key(&key[..length]) { + triggered.push(&key[..length]); + } + } + + drop(watchers); + + if !triggered.is_empty() { + let mut watchers = self.watchers.write().unwrap(); + for prefix in triggered { + if let Some(txs) = watchers.remove(prefix) { + for tx in txs { + let _ = tx.send(()); + } + } + } + } + + Ok(self.db.0.put_cf(self.cf(), key, value)?) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + Ok(self.db.0.delete_cf(self.cf(), key)?) + } + + fn iter<'a>(&'a self) -> Box, Vec)> + Send + Sync + 'a> { + Box::new( + self.db + .0 + .iterator_cf(self.cf(), rocksdb::IteratorMode::Start), + ) + } + + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box, Vec)> + 'a> { + Box::new(self.db.0.iterator_cf( + self.cf(), + rocksdb::IteratorMode::From( + from, + if backwards { + rocksdb::Direction::Reverse + } else { + rocksdb::Direction::Forward + }, + ), + )) + } + + fn increment(&self, key: &[u8]) -> Result> { + let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.db.0]), None).unwrap(); + dbg!(stats.mem_table_total); + dbg!(stats.mem_table_unflushed); + dbg!(stats.mem_table_readers_total); + dbg!(stats.cache_total); + // TODO: atomic? + let old = self.get(key)?; + let new = utils::increment(old.as_deref()).unwrap(); + self.insert(key, &new)?; + Ok(new) + } + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Vec)> + Send + 'a> { + Box::new( + self.db + .0 + .iterator_cf( + self.cf(), + rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), + ) + .take_while(move |(k, _)| k.starts_with(&prefix)), + ) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + let (tx, rx) = tokio::sync::oneshot::channel(); + + self.watchers + .write() + .unwrap() + .entry(prefix.to_vec()) + .or_default() + .push(tx); + + Box::pin(async move { + // Tx is never destroyed + rx.await.unwrap(); + }) + } +} diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs new file mode 100644 index 0000000..271be1e --- /dev/null +++ b/src/database/abstraction/sled.rs @@ -0,0 +1,119 @@ +use super::super::Config; +use crate::{utils, Result}; +use log::warn; +use std::{future::Future, pin::Pin, sync::Arc}; + +use super::{DatabaseEngine, Tree}; + +pub struct Engine(sled::Db); + +pub struct SledEngineTree(sled::Tree); + +impl DatabaseEngine for Engine { + fn open(config: &Config) -> Result> { + Ok(Arc::new(Engine( + sled::Config::default() + .path(&config.database_path) + .cache_capacity((config.db_cache_capacity_mb * 1024 * 1024) as u64) + .use_compression(true) + .open()?, + ))) + } + + fn open_tree(self: &Arc, name: &'static str) -> Result> { + Ok(Arc::new(SledEngineTree(self.0.open_tree(name)?))) + } + + fn flush(self: &Arc) -> Result<()> { + Ok(()) // noop + } +} + +impl Tree for SledEngineTree { + fn get(&self, key: &[u8]) -> Result>> { + Ok(self.0.get(key)?.map(|v| v.to_vec())) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + self.0.insert(key, value)?; + Ok(()) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + self.0.remove(key)?; + Ok(()) + } + + fn iter<'a>(&'a self) -> Box, Vec)> + Send + 'a> { + Box::new( + self.0 + .iter() + .filter_map(|r| { + if let Err(e) = &r { + warn!("Error: {}", e); + } + r.ok() + }) + .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())), + ) + } + + fn iter_from( + &self, + from: &[u8], + backwards: bool, + ) -> Box, Vec)> + Send> { + let iter = if backwards { + self.0.range(..from) + } else { + self.0.range(from..) + }; + + let iter = iter + .filter_map(|r| { + if let Err(e) = &r { + warn!("Error: {}", e); + } + r.ok() + }) + .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())); + + if backwards { + Box::new(iter.rev()) + } else { + Box::new(iter) + } + } + + fn increment(&self, key: &[u8]) -> Result> { + Ok(self + .0 + .update_and_fetch(key, utils::increment) + .map(|o| o.expect("increment always sets a value").to_vec())?) + } + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Vec)> + Send + 'a> { + let iter = self + .0 + .scan_prefix(prefix) + .filter_map(|r| { + if let Err(e) = &r { + warn!("Error: {}", e); + } + r.ok() + }) + .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())); + + Box::new(iter) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + let prefix = prefix.to_vec(); + Box::pin(async move { + self.0.watch_prefix(prefix).await; + }) + } +} diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs new file mode 100644 index 0000000..22a5559 --- /dev/null +++ b/src/database/abstraction/sqlite.rs @@ -0,0 +1,444 @@ +use std::{ + collections::BTreeMap, + future::Future, + ops::Deref, + path::{Path, PathBuf}, + pin::Pin, + sync::Arc, + thread, + time::{Duration, Instant}, +}; + +use crate::{database::Config, Result}; + +use super::{DatabaseEngine, Tree}; + +use log::debug; + +use crossbeam::channel::{bounded, Sender as ChannelSender}; +use parking_lot::{Mutex, MutexGuard, RwLock}; +use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension}; + +use tokio::sync::oneshot::Sender; + +// const SQL_CREATE_TABLE: &str = +// "CREATE TABLE IF NOT EXISTS {} {{ \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL }}"; +// const SQL_SELECT: &str = "SELECT value FROM {} WHERE key = ?"; +// const SQL_INSERT: &str = "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)"; +// const SQL_DELETE: &str = "DELETE FROM {} WHERE key = ?"; +// const SQL_SELECT_ITER: &str = "SELECT key, value FROM {}"; +// const SQL_SELECT_PREFIX: &str = "SELECT key, value FROM {} WHERE key LIKE ?||'%' ORDER BY key ASC"; +// const SQL_SELECT_ITER_FROM_FORWARDS: &str = "SELECT key, value FROM {} WHERE key >= ? ORDER BY ASC"; +// const SQL_SELECT_ITER_FROM_BACKWARDS: &str = +// "SELECT key, value FROM {} WHERE key <= ? ORDER BY DESC"; + +struct Pool { + writer: Mutex, + readers: Vec>, + spill_tracker: Arc<()>, + path: PathBuf, +} + +pub const MILLI: Duration = Duration::from_millis(1); + +enum HoldingConn<'a> { + FromGuard(MutexGuard<'a, Connection>), + FromOwned(Connection, Arc<()>), +} + +impl<'a> Deref for HoldingConn<'a> { + type Target = Connection; + + fn deref(&self) -> &Self::Target { + match self { + HoldingConn::FromGuard(guard) => guard.deref(), + HoldingConn::FromOwned(conn, _) => conn, + } + } +} + +impl Pool { + fn new>(path: P, num_readers: usize, total_cache_size_mb: f64) -> Result { + // calculates cache-size per permanent connection + // 1. convert MB to KiB + // 2. divide by permanent connections + // 3. round down to nearest integer + let cache_size: u32 = ((total_cache_size_mb * 1024.0) / (num_readers + 1) as f64) as u32; + + let writer = Mutex::new(Self::prepare_conn(&path, Some(cache_size))?); + + let mut readers = Vec::new(); + + for _ in 0..num_readers { + readers.push(Mutex::new(Self::prepare_conn(&path, Some(cache_size))?)) + } + + Ok(Self { + writer, + readers, + spill_tracker: Arc::new(()), + path: path.as_ref().to_path_buf(), + }) + } + + fn prepare_conn>(path: P, cache_size: Option) -> Result { + let conn = Connection::open(path)?; + + conn.pragma_update(Some(Main), "journal_mode", &"WAL".to_owned())?; + + // conn.pragma_update(Some(Main), "wal_autocheckpoint", &250)?; + + // conn.pragma_update(Some(Main), "wal_checkpoint", &"FULL".to_owned())?; + + conn.pragma_update(Some(Main), "synchronous", &"OFF".to_owned())?; + + if let Some(cache_kib) = cache_size { + conn.pragma_update(Some(Main), "cache_size", &(-Into::::into(cache_kib)))?; + } + + Ok(conn) + } + + fn write_lock(&self) -> MutexGuard<'_, Connection> { + self.writer.lock() + } + + fn read_lock(&self) -> HoldingConn<'_> { + for r in &self.readers { + if let Some(reader) = r.try_lock() { + return HoldingConn::FromGuard(reader); + } + } + + let spill_arc = self.spill_tracker.clone(); + let now_count = Arc::strong_count(&spill_arc) - 1 /* because one is held by the pool */; + + log::warn!("read_lock: all readers locked, creating spillover reader..."); + + if now_count > 1 { + log::warn!("read_lock: now {} spillover readers exist", now_count); + } + + let spilled = Self::prepare_conn(&self.path, None).unwrap(); + + return HoldingConn::FromOwned(spilled, spill_arc); + } +} + +pub struct Engine { + pool: Pool, +} + +impl DatabaseEngine for Engine { + fn open(config: &Config) -> Result> { + let pool = Pool::new( + Path::new(&config.database_path).join("conduit.db"), + config.sqlite_read_pool_size, + config.db_cache_capacity_mb, + )?; + + pool.write_lock() + .execute("CREATE TABLE IF NOT EXISTS _noop (\"key\" INT)", params![])?; + + let arc = Arc::new(Engine { pool }); + + Ok(arc) + } + + fn open_tree(self: &Arc, name: &str) -> Result> { + self.pool.write_lock().execute(format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name).as_str(), [])?; + + Ok(Arc::new(SqliteTable { + engine: Arc::clone(self), + name: name.to_owned(), + watchers: RwLock::new(BTreeMap::new()), + })) + } + + fn flush(self: &Arc) -> Result<()> { + self.pool + .write_lock() + .execute_batch( + " + PRAGMA synchronous=FULL; + BEGIN; + DELETE FROM _noop; + INSERT INTO _noop VALUES (1); + COMMIT; + PRAGMA synchronous=OFF; + ", + ) + .map_err(Into::into) + } +} + +impl Engine { + pub fn flush_wal(self: &Arc) -> Result<()> { + self.pool + .write_lock() + .execute_batch( + " + PRAGMA synchronous=FULL; PRAGMA wal_checkpoint=TRUNCATE; + BEGIN; + DELETE FROM _noop; + INSERT INTO _noop VALUES (1); + COMMIT; + PRAGMA wal_checkpoint=PASSIVE; PRAGMA synchronous=OFF; + ", + ) + .map_err(Into::into) + } +} + +pub struct SqliteTable { + engine: Arc, + name: String, + watchers: RwLock, Vec>>>, +} + +type TupleOfBytes = (Vec, Vec); + +impl SqliteTable { + fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { + Ok(guard + .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? + .query_row([key], |row| row.get(0)) + .optional()?) + } + + fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { + guard.execute( + format!( + "INSERT INTO {} (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value", + self.name + ) + .as_str(), + [key, value], + )?; + Ok(()) + } + + fn _iter_from_thread(&self, f: F) -> Box + Send> + where + F: (for<'a> FnOnce(&'a Connection, ChannelSender)) + Send + 'static, + { + let (s, r) = bounded::(5); + + let engine = self.engine.clone(); + + thread::spawn(move || { + let _ = f(&engine.pool.read_lock(), s); + }); + + Box::new(r.into_iter()) + } +} + +macro_rules! iter_from_thread { + ($self:expr, $sql:expr, $param:expr) => { + $self._iter_from_thread(move |guard, s| { + let _ = guard + .prepare($sql) + .unwrap() + .query_map($param, |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .unwrap() + .map(|r| r.unwrap()) + .try_for_each(|bob| s.send(bob)); + }) + }; +} + +impl Tree for SqliteTable { + fn get(&self, key: &[u8]) -> Result>> { + let guard = self.engine.pool.read_lock(); + + // let start = Instant::now(); + + let val = self.get_with_guard(&guard, key); + + // debug!("get: took {:?}", start.elapsed()); + // debug!("get key: {:?}", &key) + + val + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let guard = self.engine.pool.write_lock(); + + let start = Instant::now(); + + self.insert_with_guard(&guard, key, value)?; + + let elapsed = start.elapsed(); + if elapsed > MILLI { + debug!("insert: took {:012?} : {}", elapsed, &self.name); + } + + drop(guard); + + let watchers = self.watchers.read(); + let mut triggered = Vec::new(); + + for length in 0..=key.len() { + if watchers.contains_key(&key[..length]) { + triggered.push(&key[..length]); + } + } + + drop(watchers); + + if !triggered.is_empty() { + let mut watchers = self.watchers.write(); + for prefix in triggered { + if let Some(txs) = watchers.remove(prefix) { + for tx in txs { + let _ = tx.send(()); + } + } + } + }; + + Ok(()) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + let guard = self.engine.pool.write_lock(); + + let start = Instant::now(); + + guard.execute( + format!("DELETE FROM {} WHERE key = ?", self.name).as_str(), + [key], + )?; + + let elapsed = start.elapsed(); + + if elapsed > MILLI { + debug!("remove: took {:012?} : {}", elapsed, &self.name); + } + // debug!("remove key: {:?}", &key); + + Ok(()) + } + + fn iter<'a>(&'a self) -> Box + Send + 'a> { + let name = self.name.clone(); + iter_from_thread!( + self, + format!("SELECT key, value FROM {}", name).as_str(), + params![] + ) + } + + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box + Send + 'a> { + let name = self.name.clone(); + let from = from.to_vec(); // TODO change interface? + if backwards { + iter_from_thread!( + self, + format!( + "SELECT key, value FROM {} WHERE key <= ? ORDER BY key DESC", + name + ) + .as_str(), + [from] + ) + } else { + iter_from_thread!( + self, + format!( + "SELECT key, value FROM {} WHERE key >= ? ORDER BY key ASC", + name + ) + .as_str(), + [from] + ) + } + } + + fn increment(&self, key: &[u8]) -> Result> { + let guard = self.engine.pool.write_lock(); + + let start = Instant::now(); + + let old = self.get_with_guard(&guard, key)?; + + let new = + crate::utils::increment(old.as_deref()).expect("utils::increment always returns Some"); + + self.insert_with_guard(&guard, key, &new)?; + + let elapsed = start.elapsed(); + + if elapsed > MILLI { + debug!("increment: took {:012?} : {}", elapsed, &self.name); + } + // debug!("increment key: {:?}", &key); + + Ok(new) + } + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box + Send + 'a> { + // let name = self.name.clone(); + // iter_from_thread!( + // self, + // format!( + // "SELECT key, value FROM {} WHERE key BETWEEN ?1 AND ?1 || X'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' ORDER BY key ASC", + // name + // ) + // .as_str(), + // [prefix] + // ) + Box::new( + self.iter_from(&prefix, false) + .take_while(move |(key, _)| key.starts_with(&prefix)), + ) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + let (tx, rx) = tokio::sync::oneshot::channel(); + + self.watchers + .write() + .entry(prefix.to_vec()) + .or_default() + .push(tx); + + Box::pin(async move { + // Tx is never destroyed + rx.await.unwrap(); + }) + } + + fn clear(&self) -> Result<()> { + debug!("clear: running"); + self.engine + .pool + .write_lock() + .execute(format!("DELETE FROM {}", self.name).as_str(), [])?; + debug!("clear: ran"); + Ok(()) + } +} + +// TODO +// struct Pool { +// writer: Mutex, +// readers: [Mutex; NUM_READERS], +// } + +// // then, to pick a reader: +// for r in &pool.readers { +// if let Ok(reader) = r.try_lock() { +// // use reader +// } +// } +// // none unlocked, pick the next reader +// pool.readers[pool.counter.fetch_add(1, Relaxed) % NUM_READERS].lock() diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 2ba7bc3..b1d5b6b 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -127,7 +127,7 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, kind: &EventType, - ) -> Result, Box<[u8]>)>> { + ) -> Result, Vec)>> { let mut prefix = room_id .map(|r| r.to_string()) .unwrap_or_default() diff --git a/src/database/admin.rs b/src/database/admin.rs index 7826cfe..cd5fa84 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -10,6 +10,7 @@ use ruma::{ events::{room::message, EventType}, UserId, }; +use tokio::sync::{RwLock, RwLockReadGuard}; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), @@ -25,20 +26,23 @@ pub struct Admin { impl Admin { pub fn start_handler( &self, - db: Arc, + db: Arc>, mut receiver: mpsc::UnboundedReceiver, ) { tokio::spawn(async move { // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); - let conduit_user = UserId::try_from(format!("@conduit:{}", db.globals.server_name())) - .expect("@conduit:server_name is valid"); + let guard = db.read().await; - let conduit_room = db + let conduit_user = + UserId::try_from(format!("@conduit:{}", guard.globals.server_name())) + .expect("@conduit:server_name is valid"); + + let conduit_room = guard .rooms .id_from_alias( - &format!("#admins:{}", db.globals.server_name()) + &format!("#admins:{}", guard.globals.server_name()) .try_into() .expect("#admins:server_name is a valid room alias"), ) @@ -48,48 +52,54 @@ impl Admin { warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this."); } - let send_message = |message: message::MessageEventContent| { - if let Some(conduit_room) = &conduit_room { - db.rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMessage, - content: serde_json::to_value(message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - &db, - ) - .unwrap(); - } - }; + drop(guard); + + let send_message = + |message: message::MessageEventContent, guard: RwLockReadGuard<'_, Database>| { + if let Some(conduit_room) = &conduit_room { + guard + .rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMessage, + content: serde_json::to_value(message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + &guard, + ) + .unwrap(); + } + }; loop { tokio::select! { Some(event) = receiver.next() => { + let guard = db.read().await; + match event { AdminCommand::RegisterAppservice(yaml) => { - db.appservice.register_appservice(yaml).unwrap(); // TODO handle error + guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error } AdminCommand::ListAppservices => { - if let Ok(appservices) = db.appservice.iter_ids().map(|ids| ids.collect::>()) { + if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { let count = appservices.len(); let output = format!( "Appservices ({}): {}", count, appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") ); - send_message(message::MessageEventContent::text_plain(output)); + send_message(message::MessageEventContent::text_plain(output), guard); } else { - send_message(message::MessageEventContent::text_plain("Failed to get appservices.")); + send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard); } } AdminCommand::SendMessage(message) => { - send_message(message); + send_message(message, guard) } } } diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 4bf3a21..f39520c 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -49,7 +49,7 @@ impl Appservice { ) } - pub fn iter_ids(&self) -> Result> + Send + Sync + '_> { + pub fn iter_ids(&self) -> Result> + Send + '_> { Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { utils::string_from_bytes(&id) .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) @@ -58,7 +58,7 @@ impl Appservice { pub fn iter_all( &self, - ) -> Result> + '_ + Send + Sync> { + ) -> Result> + '_ + Send> { Ok(self.iter_ids()?.filter_map(|id| id.ok()).map(move |id| { Ok(( id.clone(), diff --git a/src/database/globals.rs b/src/database/globals.rs index eef478a..4242cf5 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -11,11 +11,12 @@ use rustls::{ServerCertVerifier, WebPKIVerifier}; use std::{ collections::{BTreeMap, HashMap}, fs, + future::Future, path::PathBuf, sync::{Arc, RwLock}, time::{Duration, Instant}, }; -use tokio::sync::Semaphore; +use tokio::sync::{broadcast, Semaphore}; use trust_dns_resolver::TokioAsyncResolver; use super::abstraction::Tree; @@ -47,6 +48,7 @@ pub struct Globals { ), // since, rx >, >, + pub rotate: RotationHandler, } struct MatrixServerVerifier { @@ -82,6 +84,31 @@ impl ServerCertVerifier for MatrixServerVerifier { } } +/// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. +/// +/// This is utilized to have sync workers return early and release read locks on the database. +pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>); + +impl RotationHandler { + pub fn new() -> Self { + let (s, r) = broadcast::channel::<()>(1); + + Self(s, r) + } + + pub fn watch(&self) -> impl Future { + let mut r = self.0.subscribe(); + + async move { + let _ = r.recv().await; + } + } + + pub fn fire(&self) { + let _ = self.0.send(()); + } +} + impl Globals { pub fn load( globals: Arc, @@ -168,6 +195,7 @@ impl Globals { bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), sync_receivers: RwLock::new(BTreeMap::new()), + rotate: RotationHandler::new(), }; fs::create_dir_all(s.get_media_folder())?; diff --git a/src/database/pusher.rs b/src/database/pusher.rs index a27bf2c..3210cb1 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -73,7 +73,7 @@ impl PushData { pub fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator> + 'a { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e23b804..7b64c46 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1078,13 +1078,13 @@ impl Rooms { .scan_prefix(old_shortstatehash.clone()) // Chop the old_shortstatehash out leaving behind the short state key .map(|(k, v)| (k[old_shortstatehash.len()..].to_vec(), v)) - .collect::, Box<[u8]>>>() + .collect::, Vec>>() } else { HashMap::new() }; if let Some(state_key) = &new_pdu.state_key { - let mut new_state: HashMap, Box<[u8]>> = old_state; + let mut new_state: HashMap, Vec> = old_state; let mut new_state_key = new_pdu.kind.as_ref().as_bytes().to_vec(); new_state_key.push(0xff); @@ -1450,7 +1450,7 @@ impl Rooms { &'a self, user_id: &UserId, room_id: &RoomId, - ) -> impl Iterator, PduEvent)>> + 'a { + ) -> impl Iterator, PduEvent)>> + 'a { self.pdus_since(user_id, room_id, 0) } @@ -1462,7 +1462,7 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, since: u64, - ) -> impl Iterator, PduEvent)>> + 'a { + ) -> impl Iterator, PduEvent)>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1491,7 +1491,7 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> impl Iterator, PduEvent)>> + 'a { + ) -> impl Iterator, PduEvent)>> + 'a { // Create the first part of the full pdu id let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1523,7 +1523,7 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, from: u64, - ) -> impl Iterator, PduEvent)>> + 'a { + ) -> impl Iterator, PduEvent)>> + 'a { // Create the first part of the full pdu id let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/sending.rs b/src/database/sending.rs index ecf0761..7c9cf64 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -30,7 +30,10 @@ use ruma::{ receipt::ReceiptType, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, }; -use tokio::{select, sync::Semaphore}; +use tokio::{ + select, + sync::{RwLock, Semaphore}, +}; use super::abstraction::Tree; @@ -90,7 +93,11 @@ enum TransactionStatus { } impl Sending { - pub fn start_handler(&self, db: Arc, mut receiver: mpsc::UnboundedReceiver>) { + pub fn start_handler( + &self, + db: Arc>, + mut receiver: mpsc::UnboundedReceiver>, + ) { tokio::spawn(async move { let mut futures = FuturesUnordered::new(); @@ -98,8 +105,12 @@ impl Sending { // Retry requests we could not finish yet let mut initial_transactions = HashMap::>::new(); + + let guard = db.read().await; + for (key, outgoing_kind, event) in - db.sending + guard + .sending .servercurrentevents .iter() .filter_map(|(key, _)| { @@ -117,17 +128,23 @@ impl Sending { "Dropping some current events: {:?} {:?} {:?}", key, outgoing_kind, event ); - db.sending.servercurrentevents.remove(&key).unwrap(); + guard.sending.servercurrentevents.remove(&key).unwrap(); continue; } entry.push(event); } + drop(guard); + for (outgoing_kind, events) in initial_transactions { current_transaction_status .insert(outgoing_kind.get_prefix(), TransactionStatus::Running); - futures.push(Self::handle_events(outgoing_kind.clone(), events, &db)); + futures.push(Self::handle_events( + outgoing_kind.clone(), + events, + Arc::clone(&db), + )); } loop { @@ -135,15 +152,17 @@ impl Sending { Some(response) = futures.next() => { match response { Ok(outgoing_kind) => { + let guard = db.read().await; + let prefix = outgoing_kind.get_prefix(); - for (key, _) in db.sending.servercurrentevents + for (key, _) in guard.sending.servercurrentevents .scan_prefix(prefix.clone()) { - db.sending.servercurrentevents.remove(&key).unwrap(); + guard.sending.servercurrentevents.remove(&key).unwrap(); } // Find events that have been added since starting the last request - let new_events = db.sending.servernamepduids + let new_events = guard.sending.servernamepduids .scan_prefix(prefix.clone()) .map(|(k, _)| { SendingEventType::Pdu(k[prefix.len()..].to_vec()) @@ -161,17 +180,19 @@ impl Sending { SendingEventType::Pdu(b) | SendingEventType::Edu(b) => { current_key.extend_from_slice(&b); - db.sending.servercurrentevents.insert(¤t_key, &[]).unwrap(); - db.sending.servernamepduids.remove(¤t_key).unwrap(); + guard.sending.servercurrentevents.insert(¤t_key, &[]).unwrap(); + guard.sending.servernamepduids.remove(¤t_key).unwrap(); } } } + drop(guard); + futures.push( Self::handle_events( outgoing_kind.clone(), new_events, - &db, + Arc::clone(&db), ) ); } else { @@ -192,13 +213,15 @@ impl Sending { }, Some(key) = receiver.next() => { if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key) { + let guard = db.read().await; + if let Ok(Some(events)) = Self::select_events( &outgoing_kind, vec![(event, key)], &mut current_transaction_status, - &db + &guard ) { - futures.push(Self::handle_events(outgoing_kind, events, &db)); + futures.push(Self::handle_events(outgoing_kind, events, Arc::clone(&db))); } } } @@ -357,7 +380,7 @@ impl Sending { } #[tracing::instrument(skip(self))] - pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Box<[u8]>) -> Result<()> { + pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Vec) -> Result<()> { let mut key = b"$".to_vec(); key.extend_from_slice(&senderkey); key.push(0xff); @@ -403,8 +426,10 @@ impl Sending { async fn handle_events( kind: OutgoingKind, events: Vec, - db: &Database, + db: Arc>, ) -> std::result::Result { + let db = db.read().await; + match &kind { OutgoingKind::Appservice(server) => { let mut pdu_jsons = Vec::new(); @@ -543,7 +568,7 @@ impl Sending { &pusher, rules_for_user, &pdu, - db, + &db, ) .await .map(|_response| kind.clone()) diff --git a/src/error.rs b/src/error.rs index 1017fb1..f62bdee 100644 --- a/src/error.rs +++ b/src/error.rs @@ -36,6 +36,12 @@ pub enum Error { #[from] source: rocksdb::Error, }, + #[cfg(feature = "sqlite")] + #[error("There was a problem with the connection to the sqlite database: {source}")] + SqliteError { + #[from] + source: rusqlite::Error, + }, #[error("Could not generate an image.")] ImageError { #[from] diff --git a/src/main.rs b/src/main.rs index 99d4560..e0d2e3d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,10 +30,11 @@ use rocket::{ }, routes, Request, }; +use tokio::sync::RwLock; use tracing::span; use tracing_subscriber::{prelude::*, Registry}; -fn setup_rocket(config: Figment, data: Arc) -> rocket::Rocket { +fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket { rocket::custom(config) .manage(data) .mount( @@ -193,13 +194,14 @@ async fn main() { ) .merge(Env::prefixed("CONDUIT_").global()); + std::env::set_var("RUST_LOG", "warn"); + let config = raw_config .extract::() .expect("It looks like your config is invalid. Please take a look at the error"); - let db = Database::load_or_create(config.clone()) - .await - .expect("config is valid"); + let mut _span: Option = None; + let mut _enter: Option> = None; if config.allow_jaeger { let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() @@ -209,18 +211,21 @@ async fn main() { let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); Registry::default().with(telemetry).try_init().unwrap(); - let root = span!(tracing::Level::INFO, "app_start", work_units = 2); - let _enter = root.enter(); - - let rocket = setup_rocket(raw_config, db); - rocket.launch().await.unwrap(); + _span = Some(span!(tracing::Level::INFO, "app_start", work_units = 2)); + _enter = Some(_span.as_ref().unwrap().enter()); } else { - std::env::set_var("RUST_LOG", config.log); + std::env::set_var("RUST_LOG", &config.log); tracing_subscriber::fmt::init(); - - let rocket = setup_rocket(raw_config, db); - rocket.launch().await.unwrap(); } + + config.warn_deprecated(); + + let db = Database::load_or_create(config) + .await + .expect("config is valid"); + + let rocket = setup_rocket(raw_config, db); + rocket.launch().await.unwrap(); } #[catch(404)] diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 8c22f79..347406d 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,4 +1,4 @@ -use crate::Error; +use crate::{database::DatabaseGuard, Error}; use ruma::{ api::{client::r0::uiaa::UiaaResponse, OutgoingResponse}, identifiers::{DeviceId, UserId}, @@ -9,7 +9,7 @@ use std::ops::Deref; #[cfg(feature = "conduit_bin")] use { - crate::{server_server, Database}, + crate::server_server, log::{debug, warn}, rocket::{ data::{self, ByteUnit, Data, FromData}, @@ -17,13 +17,12 @@ use { outcome::Outcome::*, response::{self, Responder}, tokio::io::AsyncReadExt, - Request, State, + Request, }, ruma::api::{AuthScheme, IncomingRequest}, std::collections::BTreeMap, std::convert::TryFrom, std::io::Cursor, - std::sync::Arc, }; /// This struct converts rocket requests into ruma structs by converting them into http requests @@ -49,7 +48,7 @@ where async fn from_data(request: &'a Request<'_>, data: Data) -> data::Outcome { let metadata = T::Incoming::METADATA; let db = request - .guard::>>() + .guard::() .await .expect("database was loaded"); diff --git a/src/server_server.rs b/src/server_server.rs index d00e3d6..25cdd99 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,12 +1,13 @@ use crate::{ client_server::{self, claim_keys_helper, get_keys_helper}, + database::DatabaseGuard, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, }; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{debug, error, info, trace, warn}; use regex::Regex; -use rocket::{response::content::Json, State}; +use rocket::response::content::Json; use ruma::{ api::{ client::error::{Error as RumaError, ErrorKind}, @@ -432,7 +433,7 @@ pub async fn request_well_known( #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] #[tracing::instrument(skip(db))] pub fn get_server_version_route( - db: State<'_, Arc>, + db: DatabaseGuard, ) -> ConduitResult { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -450,7 +451,7 @@ pub fn get_server_version_route( // Response type for this endpoint is Json because we need to calculate a signature for the response #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] #[tracing::instrument(skip(db))] -pub fn get_server_keys_route(db: State<'_, Arc>) -> Json { +pub fn get_server_keys_route(db: DatabaseGuard) -> Json { if !db.globals.allow_federation() { // TODO: Use proper types return Json("Federation is disabled.".to_owned()); @@ -497,7 +498,7 @@ pub fn get_server_keys_route(db: State<'_, Arc>) -> Json { #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] #[tracing::instrument(skip(db))] -pub fn get_server_keys_deprecated_route(db: State<'_, Arc>) -> Json { +pub fn get_server_keys_deprecated_route(db: DatabaseGuard) -> Json { get_server_keys_route(db) } @@ -507,7 +508,7 @@ pub fn get_server_keys_deprecated_route(db: State<'_, Arc>) -> Json>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -551,7 +552,7 @@ pub async fn get_public_rooms_filtered_route( )] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -595,7 +596,7 @@ pub async fn get_public_rooms_route( )] #[tracing::instrument(skip(db, body))] pub async fn send_transaction_message_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -774,6 +775,8 @@ pub async fn send_transaction_message_route( } } + db.flush().await?; + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } @@ -1673,7 +1676,7 @@ pub(crate) fn append_incoming_pdu( )] #[tracing::instrument(skip(db, body))] pub fn get_event_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -1698,7 +1701,7 @@ pub fn get_event_route( )] #[tracing::instrument(skip(db, body))] pub fn get_missing_events_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -1747,7 +1750,7 @@ pub fn get_missing_events_route( )] #[tracing::instrument(skip(db, body))] pub fn get_event_authorization_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -1791,7 +1794,7 @@ pub fn get_event_authorization_route( )] #[tracing::instrument(skip(db, body))] pub fn get_room_state_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -1854,7 +1857,7 @@ pub fn get_room_state_route( )] #[tracing::instrument(skip(db, body))] pub fn get_room_state_ids_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -1906,7 +1909,7 @@ pub fn get_room_state_ids_route( )] #[tracing::instrument(skip(db, body))] pub fn create_join_event_template_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2075,7 +2078,7 @@ pub fn create_join_event_template_route( )] #[tracing::instrument(skip(db, body))] pub async fn create_join_event_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2160,6 +2163,8 @@ pub async fn create_join_event_route( db.sending.send_pdu(&server, &pdu_id)?; } + db.flush().await?; + Ok(create_join_event::v2::Response { room_state: RoomState { auth_chain: auth_chain_ids @@ -2183,7 +2188,7 @@ pub async fn create_join_event_route( )] #[tracing::instrument(skip(db, body))] pub async fn create_invite_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2276,6 +2281,8 @@ pub async fn create_invite_route( )?; } + db.flush().await?; + Ok(create_invite::v2::Response { event: PduEvent::convert_to_outgoing_federation_event(signed_event), } @@ -2288,7 +2295,7 @@ pub async fn create_invite_route( )] #[tracing::instrument(skip(db, body))] pub fn get_devices_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2328,7 +2335,7 @@ pub fn get_devices_route( )] #[tracing::instrument(skip(db, body))] pub fn get_room_information_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2356,7 +2363,7 @@ pub fn get_room_information_route( )] #[tracing::instrument(skip(db, body))] pub fn get_profile_information_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2389,8 +2396,8 @@ pub fn get_profile_information_route( post("/_matrix/federation/v1/user/keys/query", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn get_keys_route( - db: State<'_, Arc>, +pub async fn get_keys_route( + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { if !db.globals.allow_federation() { @@ -2404,6 +2411,8 @@ pub fn get_keys_route( &db, )?; + db.flush().await?; + Ok(get_keys::v1::Response { device_keys: result.device_keys, master_keys: result.master_keys, @@ -2418,7 +2427,7 @@ pub fn get_keys_route( )] #[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( - db: State<'_, Arc>, + db: DatabaseGuard, body: Ruma, ) -> ConduitResult { if !db.globals.allow_federation() { From 15471d9ac4e6f866af454e4a2a508871f0328739 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 14 Jul 2021 11:28:24 +0200 Subject: [PATCH 0645/1727] update rocket and lock --- Cargo.lock | 588 +++++++++++++++++++++++++------------------- Cargo.toml | 3 +- src/database.rs | 6 +- src/ruma_wrapper.rs | 29 ++- 4 files changed, 353 insertions(+), 273 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a0d7a70..befd3e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,16 +12,16 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" dependencies = [ - "getrandom 0.2.2", + "getrandom 0.2.3", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.15" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ "memchr", ] @@ -53,6 +53,27 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" +[[package]] +name = "async-stream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.50" @@ -161,15 +182,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.6.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" +checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" [[package]] name = "bytemuck" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed57e2090563b83ba8f83366628ce535a7584c9afa4c9fc0612a03925c6df58" +checksum = "9966d2ab714d0f785dbac0a0396251a35280aeb42413281617d0209ab4898435" [[package]] name = "byteorder" @@ -185,9 +206,9 @@ checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" [[package]] name = "cc" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" dependencies = [ "jobserver", ] @@ -201,12 +222,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -261,7 +276,7 @@ dependencies = [ "opentelemetry-jaeger", "parking_lot", "pretty_env_logger", - "rand 0.8.3", + "rand 0.8.4", "regex", "reqwest", "ring", @@ -293,9 +308,9 @@ checksum = "44c32f031ea41b4291d695026c023b95d59db2d8a2c7640800ed56bc8f510f22" [[package]] name = "const_fn" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402da840495de3f976eaefc3485b7f5eb5b0bf9761f9a47be27fe975b3b8c2ec" +checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" [[package]] name = "constant_time_eq" @@ -316,7 +331,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffdf8865bac3d9a3bde5bde9088ca431b11f5d37c7a578b8086af77248b76627" dependencies = [ "percent-encoding", - "time 0.2.26", + "time 0.2.27", "version_check", ] @@ -338,9 +353,9 @@ checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" [[package]] name = "cpufeatures" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8" +checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" dependencies = [ "libc", ] @@ -351,7 +366,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -360,7 +375,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", @@ -374,7 +389,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -384,7 +399,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] @@ -395,7 +410,7 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", "lazy_static", "memoffset", @@ -408,7 +423,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -418,7 +433,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "lazy_static", ] @@ -462,20 +477,22 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.13" +version = "0.99.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b1b72f1263f214c0f823371768776c4f5841b942c9883aa8e5ec584fd0ba6" +checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" dependencies = [ "convert_case", "proc-macro2", "quote", + "rustc_version 0.3.3", "syn", ] [[package]] name = "devise" version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=df00b5#df00b5162edd53e8d496e7935774e69b5f7f6bdf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "411cf45ac38f00df3679689616649dc12607b846db171780bb790b514a042832" dependencies = [ "devise_codegen", "devise_core", @@ -484,7 +501,8 @@ dependencies = [ [[package]] name = "devise_codegen" version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=df00b5#df00b5162edd53e8d496e7935774e69b5f7f6bdf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cf7081f06822f1787e29359354426132cf832cc977d7a8ff747848631462ad1" dependencies = [ "devise_core", "quote", @@ -493,7 +511,8 @@ dependencies = [ [[package]] name = "devise_core" version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=df00b5#df00b5162edd53e8d496e7935774e69b5f7f6bdf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80c23631758736875d7ce08f847f296b4001b72cf90878e85b47df7ac5442147" dependencies = [ "bitflags", "proc-macro2", @@ -513,18 +532,18 @@ dependencies = [ [[package]] name = "directories" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" +checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" dependencies = [ "libc", "redox_users", @@ -578,7 +597,7 @@ version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -620,9 +639,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "figment" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca029e813a72b7526d28273d25f3e4a2f365d1b7a1018a6f93ec9053a119763" +checksum = "790b4292c72618abbab50f787a477014fe15634f96291de45672ce46afe122df" dependencies = [ "atomic", "pear", @@ -660,9 +679,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" +checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" dependencies = [ "futures-channel", "futures-core", @@ -675,9 +694,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" +checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" dependencies = [ "futures-core", "futures-sink", @@ -685,15 +704,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" +checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" [[package]] name = "futures-executor" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" +checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" dependencies = [ "futures-core", "futures-task", @@ -702,16 +721,17 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" +checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" [[package]] name = "futures-macro" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" +checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" dependencies = [ + "autocfg", "proc-macro-hack", "proc-macro2", "quote", @@ -720,22 +740,23 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" +checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" [[package]] name = "futures-task" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" +checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" [[package]] name = "futures-util" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" +checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" dependencies = [ + "autocfg", "futures-channel", "futures-core", "futures-io", @@ -761,9 +782,9 @@ dependencies = [ [[package]] name = "generator" -version = "0.6.25" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061d3be1afec479d56fa3bd182bf966c7999ec175fcfdb87ac14d417241366c6" +checksum = "c1d9279ca822891c1a4dae06d185612cf8fc6acfe5dff37781b41297811b12ee" dependencies = [ "cc", "libc", @@ -788,18 +809,18 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.10.2+wasi-snapshot-preview1", ] @@ -822,9 +843,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" +checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" dependencies = [ "bytes", "fnv", @@ -839,12 +860,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" - [[package]] name = "hashbrown" version = "0.11.2" @@ -860,23 +875,23 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" dependencies = [ - "hashbrown 0.11.2", + "hashbrown", ] [[package]] name = "heck" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] @@ -905,9 +920,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" +checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" dependencies = [ "bytes", "http", @@ -916,15 +931,15 @@ dependencies = [ [[package]] name = "httparse" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1ce40d6fc9764887c2fdc7305c3dcc429ba11ff981c1509416afd5697e4437" +checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" [[package]] name = "httpdate" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05842d0d43232b23ccb7060ecb0f0626922c21f30012e97b767b30afd4a5d4b9" +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "humantime" @@ -937,9 +952,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.7" +version = "0.14.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e5f105c494081baa3bf9e200b279e27ec1623895cd504c7dbef8d0b080fcf54" +checksum = "7728a72c4c7d72665fde02204bcbd93b247721025b222ef78606f14513e0fd03" dependencies = [ "bytes", "futures-channel", @@ -951,7 +966,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project", + "pin-project-lite", "socket2 0.4.0", "tokio", "tower-service", @@ -1004,12 +1019,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ "autocfg", - "hashbrown 0.9.1", + "hashbrown", "serde", ] @@ -1030,11 +1045,11 @@ checksum = "3094308123a0e9fd59659ce45e22de9f53fc1d2ac6e1feb9fef988e4f76cad77" [[package]] name = "instant" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1057,9 +1072,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itertools" @@ -1102,18 +1117,18 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.50" +version = "0.3.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" +checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" dependencies = [ "wasm-bindgen", ] [[package]] name = "js_int" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" +checksum = "defaba9bcd19568a4b4b3736b23e368e5b75e3ea126fd4cb3e4ad2ea5af274fd" dependencies = [ "serde", ] @@ -1146,9 +1161,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.93" +version = "0.2.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" +checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" [[package]] name = "libloading" @@ -1156,7 +1171,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -1191,9 +1206,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" +checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" dependencies = [ "scopeguard", ] @@ -1204,16 +1219,16 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "loom" -version = "0.3.6" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" +checksum = "2111607c723d7857e0d8299d5ce7a0bf4b844d3e44f8de136b13da513eaf8fc4" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", "generator", "scoped-tls", "serde", @@ -1258,15 +1273,15 @@ checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "memchr" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" [[package]] name = "memoffset" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" +checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" dependencies = [ "autocfg", ] @@ -1288,9 +1303,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.11" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" +checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" dependencies = [ "libc", "log", @@ -1310,8 +1325,9 @@ dependencies = [ [[package]] name = "multer" -version = "1.2.2" -source = "git+https://github.com/rousan/multer-rs.git?rev=7e4f0c5f#7e4f0c5fe14e4c531f503922bfe04f68b32ddf17" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fdd568fea4758b30d6423f013f7171e193c34aa97828d1bd9f924fb3af30a8c" dependencies = [ "bytes", "derive_more", @@ -1321,6 +1337,7 @@ dependencies = [ "httparse", "log", "mime", + "spin 0.9.2", "tokio", "tokio-util", "twoway", @@ -1410,9 +1427,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "opaque-debug" @@ -1422,9 +1439,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "opentelemetry" @@ -1438,7 +1455,7 @@ dependencies = [ "lazy_static", "percent-encoding", "pin-project", - "rand 0.8.3", + "rand 0.8.4", "thiserror", ] @@ -1481,10 +1498,10 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", - "redox_syscall 0.2.6", + "redox_syscall", "smallvec", "winapi", ] @@ -1497,9 +1514,9 @@ checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" [[package]] name = "pear" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ab3a2b792945ed67eadbbdcbd2898f8dd2319392b2a45ac21adea5245cb113" +checksum = "15e44241c5e4c868e3eaa78b7c1848cadd6344ed4f54d029832d32b415a58702" dependencies = [ "inlinable_string", "pear_codegen", @@ -1508,9 +1525,9 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620c9c4776ba41b59ab101360c9b1419c0c8c81cd2e6e39fae7109e7425994cb" +checksum = "82a5ca643c2303ecb740d506539deba189e16f2754040a42901cd8105d0282d0" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", @@ -1541,6 +1558,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "pest" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +dependencies = [ + "ucd-trie", +] + [[package]] name = "pin-project" version = "1.0.7" @@ -1563,9 +1589,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" @@ -1642,9 +1668,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" dependencies = [ "unicode-xid", ] @@ -1692,14 +1718,14 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.2", - "rand_hc 0.3.0", + "rand_chacha 0.3.1", + "rand_core 0.6.3", + "rand_hc 0.3.1", ] [[package]] @@ -1714,12 +1740,12 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] @@ -1733,11 +1759,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.2", + "getrandom 0.2.3", ] [[package]] @@ -1751,37 +1777,30 @@ dependencies = [ [[package]] name = "rand_hc" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] name = "redox_syscall" -version = "0.1.57" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - -[[package]] -name = "redox_syscall" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8270314b5ccceb518e7e578952f0b72b88222d02e8f77f5ecf7abbb673539041" +checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" +checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.1.16", - "redox_syscall 0.1.57", - "rust-argon2", + "getrandom 0.2.3", + "redox_syscall", ] [[package]] @@ -1806,9 +1825,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.6" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", @@ -1817,19 +1836,18 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "byteorder", "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.23" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "remove_dir_all" @@ -1842,9 +1860,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2296f2fac53979e8ccbc4a1136b25dcefd37be9ed7e4a1f6b05a6029c84ff124" +checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" dependencies = [ "base64 0.13.0", "bytes", @@ -1895,7 +1913,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi", @@ -1903,9 +1921,11 @@ dependencies = [ [[package]] name = "rocket" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=801e04bd5369eb39e126c75f6d11e1e9597304d8#801e04bd5369eb39e126c75f6d11e1e9597304d8" +version = "0.5.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a71c18c42a0eb15bf3816831caf0dad11e7966f2a41aaf486a701979c4dd1f2" dependencies = [ + "async-stream", "async-trait", "atomic", "atty", @@ -1921,15 +1941,16 @@ dependencies = [ "num_cpus", "parking_lot", "pin-project-lite", - "rand 0.8.3", + "rand 0.8.4", "ref-cast", "rocket_codegen", "rocket_http", "serde", "state", "tempfile", - "time 0.2.26", + "time 0.2.27", "tokio", + "tokio-stream", "tokio-util", "ubyte", "version_check", @@ -1938,21 +1959,25 @@ dependencies = [ [[package]] name = "rocket_codegen" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=801e04bd5369eb39e126c75f6d11e1e9597304d8#801e04bd5369eb39e126c75f6d11e1e9597304d8" +version = "0.5.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66f5fa462f7eb958bba8710c17c5d774bbbd59809fa76fb1957af7e545aea8bb" dependencies = [ "devise", "glob", "indexmap", + "proc-macro2", "quote", "rocket_http", + "syn", "unicode-xid", ] [[package]] name = "rocket_http" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=801e04bd5369eb39e126c75f6d11e1e9597304d8#801e04bd5369eb39e126c75f6d11e1e9597304d8" +version = "0.5.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23c8b7d512d2fcac2316ebe590cde67573844b99e6cc9ee0f53375fa16e25ebd" dependencies = [ "cookie", "either", @@ -1971,7 +1996,7 @@ dependencies = [ "smallvec", "stable-pattern", "state", - "time 0.2.26", + "time 0.2.27", "tokio", "tokio-rustls", "uncased", @@ -2131,7 +2156,7 @@ version = "0.19.4" source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "paste", - "rand 0.8.3", + "rand 0.8.4", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -2281,7 +2306,16 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" dependencies = [ - "semver", + "semver 0.9.0", +] + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", ] [[package]] @@ -2311,9 +2345,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" +checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" [[package]] name = "ryu" @@ -2355,9 +2389,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" +checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" dependencies = [ "bitflags", "core-foundation", @@ -2368,9 +2402,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" +checksum = "7e4effb91b4b8b6fb7732e670b6cee160278ff8e6bf485c7805d9e319d76e284" dependencies = [ "core-foundation-sys", "libc", @@ -2382,7 +2416,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser 0.10.2", ] [[package]] @@ -2391,6 +2434,15 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "serde" version = "1.0.126" @@ -2459,7 +2511,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" dependencies = [ "block-buffer", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", "opaque-debug", @@ -2482,18 +2534,18 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] [[package]] name = "signature" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" +checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335" [[package]] name = "simple_asn1" @@ -2541,7 +2593,7 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "winapi", ] @@ -2562,6 +2614,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" + [[package]] name = "spki" version = "0.4.0" @@ -2591,8 +2649,9 @@ dependencies = [ [[package]] name = "state" -version = "0.4.2" -source = "git+https://github.com/SergioBenitez/state.git?rev=8f94dc#8f94dce673b7d4b0e7b96c808a84f5e2a4be4a60" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cf4f5369e6d3044b5e365c9690f451516ac8f0954084622b49ea3fde2f6de5" dependencies = [ "loom", ] @@ -2604,7 +2663,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" dependencies = [ "discard", - "rustc_version", + "rustc_version 0.2.3", "stdweb-derive", "stdweb-internal-macros", "stdweb-internal-runtime", @@ -2648,15 +2707,15 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "subtle" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.70" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9505f307c872bab8eb46f77ae357c8eba1fdacead58ee5a850116b1d7f82883" +checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" dependencies = [ "proc-macro2", "quote", @@ -2665,9 +2724,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" dependencies = [ "proc-macro2", "quote", @@ -2681,10 +2740,10 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "rand 0.8.3", - "redox_syscall 0.2.6", + "rand 0.8.4", + "redox_syscall", "remove_dir_all", "winapi", ] @@ -2700,18 +2759,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" +checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" +checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" dependencies = [ "proc-macro2", "quote", @@ -2761,9 +2820,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" +checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" dependencies = [ "const_fn", "libc", @@ -2786,9 +2845,9 @@ dependencies = [ [[package]] name = "time-macros-impl" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -2814,9 +2873,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.5.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5" +checksum = "98c8b05dc14c75ea83d63dd391100353789f5f24b8b3866542a5e85c8be8e985" dependencies = [ "autocfg", "bytes", @@ -2833,9 +2892,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" +checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" dependencies = [ "proc-macro2", "quote", @@ -2866,10 +2925,21 @@ dependencies = [ ] [[package]] -name = "tokio-util" -version = "0.6.6" +name = "tokio-stream" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "940a12c99365c31ea8dd9ba04ec1be183ffe4920102bb7122c2f515437601e8e" +checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" dependencies = [ "bytes", "futures-core", @@ -2900,7 +2970,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -2962,9 +3032,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705096c6f83bf68ea5d357a6aa01829ddbdac531b357b45abeca842938085baa" +checksum = "ab69019741fca4d98be3c62d2b75254528b5432233fd8a4d2739fec20278de48" dependencies = [ "ansi_term", "chrono", @@ -2984,12 +3054,12 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952a078337565ba39007de99b151770f41039253a31846f0a3d5cd5a4ac8eedf" +checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" dependencies = [ "async-trait", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", @@ -2999,7 +3069,7 @@ dependencies = [ "ipnet", "lazy_static", "log", - "rand 0.8.3", + "rand 0.8.4", "smallvec", "thiserror", "tinyvec", @@ -3009,11 +3079,11 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9c97f7d103e0f94dbe384a57908833505ae5870126492f166821b7cf685589" +checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", "lazy_static", @@ -3035,9 +3105,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "twoway" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b40075910de3a912adbd80b5d8bad6ad10a23eeb1f5bf9d4006839e899ba5bc" +checksum = "c57ffb460d7c24cd6eda43694110189030a3d1dfe418416d9468fd1c1d290b47" dependencies = [ "memchr", "unchecked-index", @@ -3058,6 +3128,12 @@ dependencies = [ "serde", ] +[[package]] +name = "ucd-trie" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" + [[package]] name = "uncased" version = "0.9.6" @@ -3085,24 +3161,24 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "unindent" @@ -3118,9 +3194,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ "form_urlencoded", "idna", @@ -3130,9 +3206,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" @@ -3164,11 +3240,11 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" +checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -3176,9 +3252,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" +checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" dependencies = [ "bumpalo", "lazy_static", @@ -3191,11 +3267,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" +checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -3203,9 +3279,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" +checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3213,9 +3289,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" +checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" dependencies = [ "proc-macro2", "quote", @@ -3226,15 +3302,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" +checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" [[package]] name = "web-sys" -version = "0.3.50" +version = "0.3.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" +checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 896140c..537813f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,8 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests +# rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests +rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers ruma = { git = "https://github.com/ruma/ruma", rev = "174555857ef90d49e4b9a672be9e2fe0acdc2687", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/database.rs b/src/database.rs index ac17372..14ce4f0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -21,9 +21,9 @@ use log::error; use lru_cache::LruCache; use rocket::{ futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}, - outcome::IntoOutcome, + outcome::{try_outcome, IntoOutcome}, request::{FromRequest, Request}, - try_outcome, State, + State, }; use ruma::{DeviceId, ServerName, UserId}; use serde::{de::IgnoredAny, Deserialize}; @@ -608,7 +608,7 @@ impl<'r> FromRequest<'r> for DatabaseGuard { type Error = (); async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome { - let db = try_outcome!(req.guard::>>>().await); + let db = try_outcome!(req.guard::<&State>>>().await); Ok(DatabaseGuard(Arc::clone(&db).read_owned().await)).or_forward(()) } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 347406d..4f6318a 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -45,7 +45,10 @@ where { type Error = (); - async fn from_data(request: &'a Request<'_>, data: Data) -> data::Outcome { + async fn from_data( + request: &'a Request<'_>, + data: Data<'a>, + ) -> data::Outcome<'a, Self, Self::Error> { let metadata = T::Incoming::METADATA; let db = request .guard::() @@ -102,7 +105,7 @@ where if !db.users.exists(&user_id).unwrap() { // Forbidden - return Failure((Status::raw(580), ())); + return Failure((Status::new(580), ())); } // TODO: Check if appservice is allowed to be that user @@ -117,7 +120,7 @@ where if let Some(token) = token { match db.users.find_from_token(&token).unwrap() { // Unknown Token - None => return Failure((Status::raw(581), ())), + None => return Failure((Status::new(581), ())), Some((user_id, device_id)) => ( Some(user_id), Some(Box::::from(device_id)), @@ -127,7 +130,7 @@ where } } else { // Missing Token - return Failure((Status::raw(582), ())); + return Failure((Status::new(582), ())); } } AuthScheme::ServerSignatures => { @@ -149,7 +152,7 @@ where warn!("No Authorization header"); // Forbidden - return Failure((Status::raw(580), ())); + return Failure((Status::new(580), ())); } }; @@ -159,7 +162,7 @@ where warn!("Invalid X-Matrix header origin field: {:?}", x_matrix); // Forbidden - return Failure((Status::raw(580), ())); + return Failure((Status::new(580), ())); } }; @@ -172,7 +175,7 @@ where ); // Forbidden - return Failure((Status::raw(580), ())); + return Failure((Status::new(580), ())); } }; @@ -182,7 +185,7 @@ where warn!("Invalid X-Matrix header key field: {:?}", x_matrix); // Forbidden - return Failure((Status::raw(580), ())); + return Failure((Status::new(580), ())); } }; @@ -192,7 +195,7 @@ where warn!("Invalid X-Matrix header sig field: {:?}", x_matrix); // Forbidden - return Failure((Status::raw(580), ())); + return Failure((Status::new(580), ())); } }; @@ -243,7 +246,7 @@ where warn!("Failed to fetch signing keys: {}", e); // Forbidden - return Failure((Status::raw(580), ())); + return Failure((Status::new(580), ())); } }; @@ -260,7 +263,7 @@ where } // Forbidden - return Failure((Status::raw(580), ())); + return Failure((Status::new(580), ())); } } } @@ -317,7 +320,7 @@ where }), Err(e) => { warn!("{:?}", e); - Failure((Status::raw(583), ())) + Failure((Status::new(583), ())) } } } @@ -343,7 +346,7 @@ pub fn response(response: RumaResponse) -> response::Res let mut response = rocket::response::Response::build(); let status = http_response.status(); - response.raw_status(status.into(), ""); + response.status(Status::new(status.as_u16())); for header in http_response.headers() { response.raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); From e1b89c1248680efa9b508fb30b58ba28e84d6bce Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 14 Jul 2021 12:31:38 +0200 Subject: [PATCH 0646/1727] apply lint suggestions and version bump --- rust-toolchain | 2 +- src/client_server/account.rs | 52 +++++++++++++---------------- src/client_server/device.rs | 28 +++++++--------- src/client_server/keys.rs | 14 ++++---- src/client_server/user_directory.rs | 24 +++++++------ src/database.rs | 8 ++--- src/database/abstraction/sqlite.rs | 13 ++------ src/database/globals.rs | 23 +++++++------ src/database/media.rs | 1 + src/database/rooms.rs | 12 +++---- src/utils.rs | 2 +- 11 files changed, 81 insertions(+), 98 deletions(-) diff --git a/rust-toolchain b/rust-toolchain index ba0a719..a63cb35 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.51.0 +1.52.0 diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 7f38eb1..0fc8b28 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -156,20 +156,18 @@ pub async fn register_route( return Err(Error::Uiaa(uiaainfo)); } // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create( + &UserId::parse_with_server_name("", db.globals.server_name()) + .expect("we know this is valid"), + "".into(), + &uiaainfo, + &json, + )?; + return Err(Error::Uiaa(uiaainfo)); } else { - if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &UserId::parse_with_server_name("", db.globals.server_name()) - .expect("we know this is valid"), - "".into(), - &uiaainfo, - &json, - )?; - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - } + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } } @@ -529,15 +527,13 @@ pub async fn change_password_route( return Err(Error::Uiaa(uiaainfo)); } // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); } else { - if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - } + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } db.users @@ -621,15 +617,13 @@ pub async fn deactivate_route( return Err(Error::Uiaa(uiaainfo)); } // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); } else { - if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - } + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } // Leave all joined rooms and reject all invitations diff --git a/src/client_server/device.rs b/src/client_server/device.rs index a10d788..44b9c32 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -112,15 +112,13 @@ pub async fn delete_device_route( return Err(Error::Uiaa(uiaainfo)); } // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); } else { - if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - } + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } db.users.remove_device(&sender_user, &body.device_id)?; @@ -166,15 +164,13 @@ pub async fn delete_devices_route( return Err(Error::Uiaa(uiaainfo)); } // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); } else { - if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - } + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } for device_id in &body.devices { diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 621e5dd..8eee408 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -141,15 +141,13 @@ pub async fn upload_signing_keys_route( return Err(Error::Uiaa(uiaainfo)); } // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa + .create(&sender_user, &sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); } else { - if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - } + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } if let Some(master_key) = &body.master_key { diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index 14b85a6..a09d527 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -25,20 +25,22 @@ pub async fn search_users_route( avatar_url: db.users.avatar_url(&user_id).ok()?, }; - if !user + let user_id_matches = user .user_id .to_string() .to_lowercase() - .contains(&body.search_term.to_lowercase()) - && user - .display_name - .as_ref() - .filter(|name| { - name.to_lowercase() - .contains(&body.search_term.to_lowercase()) - }) - .is_none() - { + .contains(&body.search_term.to_lowercase()); + + let user_displayname_matches = user + .display_name + .as_ref() + .filter(|name| { + name.to_lowercase() + .contains(&body.search_term.to_lowercase()) + }) + .is_some(); + + if !user_id_matches && !user_displayname_matches { return None; } diff --git a/src/database.rs b/src/database.rs index 14ce4f0..b32f539 100644 --- a/src/database.rs +++ b/src/database.rs @@ -368,7 +368,7 @@ impl Database { if db.globals.database_version()? < 3 { // Move media to filesystem for (key, content) in db.media.mediaid_file.iter() { - if content.len() == 0 { + if content.is_empty() { continue; } @@ -614,8 +614,8 @@ impl<'r> FromRequest<'r> for DatabaseGuard { } } -impl Into for OwnedRwLockReadGuard { - fn into(self) -> DatabaseGuard { - DatabaseGuard(self) +impl From> for DatabaseGuard { + fn from(val: OwnedRwLockReadGuard) -> Self { + Self(val) } } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 22a5559..25d236a 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -121,7 +121,7 @@ impl Pool { let spilled = Self::prepare_conn(&self.path, None).unwrap(); - return HoldingConn::FromOwned(spilled, spill_arc); + HoldingConn::FromOwned(spilled, spill_arc) } } @@ -250,16 +250,7 @@ macro_rules! iter_from_thread { impl Tree for SqliteTable { fn get(&self, key: &[u8]) -> Result>> { - let guard = self.engine.pool.read_lock(); - - // let start = Instant::now(); - - let val = self.get_with_guard(&guard, key); - - // debug!("get: took {:?}", start.elapsed()); - // debug!("get key: {:?}", &key) - - val + self.get_with_guard(&self.engine.pool.read_lock(), key) } fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { diff --git a/src/database/globals.rs b/src/database/globals.rs index 4242cf5..307ec40 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -16,7 +16,7 @@ use std::{ sync::{Arc, RwLock}, time::{Duration, Instant}, }; -use tokio::sync::{broadcast, Semaphore}; +use tokio::sync::{broadcast, watch::Receiver, Semaphore}; use trust_dns_resolver::TokioAsyncResolver; use super::abstraction::Tree; @@ -26,6 +26,11 @@ pub const COUNTER: &[u8] = b"c"; type WellKnownMap = HashMap, (String, String)>; type TlsNameMap = HashMap; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries +type SyncHandle = ( + Option, // since + Receiver>>, // rx +); + pub struct Globals { pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, @@ -39,15 +44,7 @@ pub struct Globals { pub bad_event_ratelimiter: Arc>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, - pub sync_receivers: RwLock< - BTreeMap< - (UserId, Box), - ( - Option, - tokio::sync::watch::Receiver>>, - ), // since, rx - >, - >, + pub sync_receivers: RwLock), SyncHandle>>, pub rotate: RotationHandler, } @@ -109,6 +106,12 @@ impl RotationHandler { } } +impl Default for RotationHandler { + fn default() -> Self { + Self::new() + } +} + impl Globals { pub fn load( globals: Arc, diff --git a/src/database/media.rs b/src/database/media.rs index a1fe26e..f576ca4 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -54,6 +54,7 @@ impl Media { } /// Uploads or replaces a file thumbnail. + #[allow(clippy::too_many_arguments)] pub async fn upload_thumbnail( &self, mxc: String, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7b64c46..4d66f9f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -533,17 +533,15 @@ impl Rooms { r }, |pduid| { - let r = Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { Error::bad_database("Invalid pduid in eventid_pduid.") - })?)); - r + })?)) }, )? .map(|pdu| { - let r = serde_json::from_slice(&pdu) + serde_json::from_slice(&pdu) .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new); - r + .map(Arc::new) }) .transpose()? { @@ -1112,7 +1110,7 @@ impl Rooms { } }; - new_state.insert(shortstatekey, shorteventid.into()); + new_state.insert(shortstatekey, shorteventid); let new_state_hash = self.calculate_hash( &new_state diff --git a/src/utils.rs b/src/utils.rs index b8ce303..a4dfe03 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -137,7 +137,7 @@ pub fn deserialize_from_str< where E: serde::de::Error, { - v.parse().map_err(|e| serde::de::Error::custom(e)) + v.parse().map_err(serde::de::Error::custom) } } deserializer.deserialize_str(Visitor(std::marker::PhantomData)) From ac0027756ec37e7b08b7463fc902f8ca6810eafc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 13 Jul 2021 10:22:04 +0200 Subject: [PATCH 0647/1727] improvement: more efficient state res --- src/database.rs | 2 +- src/server_server.rs | 23 ++++++----------------- 2 files changed, 7 insertions(+), 18 deletions(-) diff --git a/src/database.rs b/src/database.rs index b32f539..1fd963b 100644 --- a/src/database.rs +++ b/src/database.rs @@ -279,7 +279,7 @@ impl Database { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, prevevent_parent: builder.open_tree("prevevent_parent")?, - pdu_cache: RwLock::new(LruCache::new(1_000_000)), + pdu_cache: RwLock::new(LruCache::new(10_000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/server_server.rs b/src/server_server.rs index 25cdd99..458e32d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -46,7 +46,7 @@ use ruma::{ receipt::ReceiptType, serde::Raw, signatures::{CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, Event, RoomVersion, StateMap}, + state_res::{self, RoomVersion, StateMap}, to_device::DeviceIdOrAllDevices, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -1219,18 +1219,10 @@ pub fn handle_incoming_pdu<'a>( let mut auth_events = vec![]; for map in &fork_states { - let mut state_auth = vec![]; - for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { - match fetch_and_handle_events(&db, origin, &[auth_id.clone()], pub_key_map) - .await - { - // This should always contain exactly one element when Ok - Ok(events) => state_auth.extend_from_slice(&events), - Err(e) => { - debug!("Event was not present: {}", e); - } - } - } + let state_auth = map + .values() + .flat_map(|pdu| pdu.auth_events.clone()) + .collect(); auth_events.push(state_auth); } @@ -1245,10 +1237,7 @@ pub fn handle_incoming_pdu<'a>( .collect::>() }) .collect::>(), - auth_events - .into_iter() - .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) - .collect(), + auth_events, &|id| { let res = db.rooms.get_pdu(id); if let Err(e) = &res { From 8c4431248535356ca811fb9302f6bd3cfa210baa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 30 Jun 2021 20:31:51 +0200 Subject: [PATCH 0648/1727] fix: e2ee verification --- src/client_server/sync.rs | 4 +++- src/client_server/to_device.rs | 3 +++ src/database/abstraction/sled.rs | 2 +- src/database/rooms.rs | 2 +- src/database/users.rs | 9 ++++----- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index c57f1da..7d3af6b 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -89,7 +89,9 @@ pub async fn sync_events_route( let we_have_to_wait = rx.borrow().is_none(); if we_have_to_wait { - let _ = rx.changed().await; + if let Err(e) = rx.changed().await { + error!("Error waiting for sync: {}", e); + } } let result = match rx diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 3bb135e..9faa255 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -19,7 +19,9 @@ pub async fn send_event_to_device_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); + // TODO: uncomment when https://github.com/vector-im/element-android/issues/3589 is solved // Check if this is a new transaction id + /* if db .transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? @@ -27,6 +29,7 @@ pub async fn send_event_to_device_route( { return Ok(send_event_to_device::Response.into()); } + */ for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs index 271be1e..e58184d 100644 --- a/src/database/abstraction/sled.rs +++ b/src/database/abstraction/sled.rs @@ -64,7 +64,7 @@ impl Tree for SledEngineTree { backwards: bool, ) -> Box, Vec)> + Send> { let iter = if backwards { - self.0.range(..from) + self.0.range(..=from) } else { self.0.range(from..) }; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4d66f9f..af0761f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1495,7 +1495,7 @@ impl Rooms { prefix.push(0xff); let mut current = prefix.clone(); - current.extend_from_slice(&until.to_be_bytes()); + current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` let current: &[u8] = ¤t; diff --git a/src/database/users.rs b/src/database/users.rs index f99084f..1480d3f 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -726,10 +726,9 @@ impl Users { json.insert("sender".to_owned(), sender.to_string().into()); json.insert("content".to_owned(), content); - self.todeviceid_events.insert( - &key, - &serde_json::to_vec(&json).expect("Map::to_vec always works"), - )?; + let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); + + self.todeviceid_events.insert(&key, &value)?; Ok(()) } @@ -774,7 +773,7 @@ impl Users { for (key, _) in self .todeviceid_events - .iter_from(&last, true) + .iter_from(&last, true) // this includes last .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(key, _)| { Ok::<_, Error>(( From 1c25492a7eccf58cc8faa627c574b38ffbbab034 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 1 Jul 2021 11:06:05 +0200 Subject: [PATCH 0649/1727] fix: stuck messages Conduit did not send the event in /sync because of a race condition. There is a brief moment in time where Conduit accepted the event, but did not store it yet. So when the client /syncs it updates the since token without sending the event. I hope I fixed it by significantly shortening the race-condition period. --- src/client_server/membership.rs | 7 ------- src/database/rooms.rs | 27 +++++++++++++++++---------- src/ruma_wrapper.rs | 1 + src/server_server.rs | 9 +-------- 4 files changed, 19 insertions(+), 25 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 4667f25..68a3ea6 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -619,16 +619,9 @@ async fn join_room_by_id_helper( // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?; - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_pdu( &pdu, utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), - count, - &pdu_id, &[pdu.event_id.clone()], db, )?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index af0761f..75ef334 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -666,11 +666,10 @@ impl Rooms { &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - count: u64, - pdu_id: &[u8], leaves: &[EventId], db: &Database, - ) -> Result<()> { + ) -> Result> { + // returns pdu id // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily // interpret things like membership changes @@ -708,20 +707,30 @@ impl Rooms { self.replace_pdu_leaves(&pdu.room_id, leaves)?; + let count1 = db.globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending // fails self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; + .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; + let count2 = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count2.to_be_bytes()); + + // There's a brief moment of time here where the count is updated but the pdu does not + // exist. This could theoretically lead to dropped pdus, but it's extremely rare + self.pduid_pdu.insert( - pdu_id, + &pdu_id, &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), )?; // This also replaces the eventid of any outliers with the correct // pduid, removing the place holder. - self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), &pdu_id)?; // See if the event matches any known pushers for user in db @@ -909,7 +918,7 @@ impl Rooms { _ => {} } - Ok(()) + Ok(pdu_id) } pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -1354,11 +1363,9 @@ impl Rooms { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = self.append_to_state(&pdu, &db.globals)?; - self.append_pdu( + let pdu_id = self.append_pdu( &pdu, pdu_json, - count, - &pdu_id, // Since this PDU references all pdu_leaves we can update the leaves // of the room &[pdu.event_id.clone()], diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 4f6318a..a4beac6 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -320,6 +320,7 @@ where }), Err(e) => { warn!("{:?}", e); + // Bad Json Failure((Status::new(583), ())) } } diff --git a/src/server_server.rs b/src/server_server.rs index 458e32d..3515d85 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1579,21 +1579,14 @@ pub(crate) fn append_incoming_pdu( new_room_leaves: HashSet, state: &StateMap>, ) -> Result> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. db.rooms .set_event_state(&pdu.event_id, state, &db.globals)?; - db.rooms.append_pdu( + let pdu_id = db.rooms.append_pdu( pdu, pdu_json, - count, - &pdu_id, &new_room_leaves.into_iter().collect::>(), &db, )?; From e15e6d4405d77e13b9a12c54f7e2a36f6005ce74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 1 Jul 2021 19:55:26 +0200 Subject: [PATCH 0650/1727] improvement: efficient /sync, mutex for federation transactions --- src/client_server/membership.rs | 44 ++-- src/client_server/sync.rs | 382 +++++++++++++++++--------------- src/database/globals.rs | 6 +- src/database/rooms.rs | 5 +- src/server_server.rs | 185 ++++++++++------ 5 files changed, 359 insertions(+), 263 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 68a3ea6..9401b76 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -902,19 +902,37 @@ pub async fn invite_helper( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id = - server_server::handle_incoming_pdu(&origin, &event_id, value, true, &db, &pub_key_map) - .await - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + + let pdu_id = server_server::handle_incoming_pdu( + &origin, + &event_id, + &room_id, + value, + true, + &db, + &pub_key_map, + ) + .await + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Error while handling incoming PDU.", + ) + })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + drop(mutex_lock); for server in db .rooms diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 7d3af6b..092c4a9 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -227,13 +227,16 @@ async fn sync_helper( // Database queries: - let current_shortstatehash = db.rooms.current_shortstatehash(&room_id)?; + let current_shortstatehash = db + .rooms + .current_shortstatehash(&room_id)? + .expect("All rooms have state"); - // These type is Option>. The outer Option is None when there is no event between - // since and the current room state, meaning there should be no updates. - // The inner Option is None when there is an event, but there is no state hash associated - // with it. This can happen for the RoomCreate event, so all updates should arrive. - let first_pdu_before_since = db.rooms.pdus_until(&sender_user, &room_id, since).next(); + let first_pdu_before_since = db + .rooms + .pdus_until(&sender_user, &room_id, since) + .next() + .transpose()?; let pdus_after_since = db .rooms @@ -241,11 +244,78 @@ async fn sync_helper( .next() .is_some(); - let since_shortstatehash = first_pdu_before_since.as_ref().map(|pdu| { - db.rooms - .pdu_shortstatehash(&pdu.as_ref().ok()?.1.event_id) - .ok()? - }); + let since_shortstatehash = first_pdu_before_since + .as_ref() + .map(|pdu| { + db.rooms + .pdu_shortstatehash(&pdu.1.event_id) + .transpose() + .expect("all pdus have state") + }) + .transpose()?; + + // Calculates joined_member_count, invited_member_count and heroes + let calculate_counts = || { + let joined_member_count = db.rooms.room_members(&room_id).count(); + let invited_member_count = db.rooms.room_members_invited(&room_id).count(); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still joined or + // invited until we have 5 or we reach the end + + for hero in db + .rooms + .all_pdus(&sender_user, &room_id) + .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus + .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) + .map(|(_, pdu)| { + let content = serde_json::from_value::< + ruma::events::room::member::MemberEventContent, + >(pdu.content.clone()) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::try_from(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + // The membership was and still is invite or join + if matches!( + content.membership, + MembershipState::Join | MembershipState::Invite + ) && (db.rooms.is_joined(&user_id, &room_id)? + || db.rooms.is_invited(&user_id, &room_id)?) + { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + // Filter out buggy users + .filter_map(|u| u.ok()) + // Filter for possible heroes + .flatten() + { + if heroes.contains(&hero) || hero == sender_user.as_str() { + continue; + } + + heroes.push(hero); + } + } + + ( + Some(joined_member_count), + Some(invited_member_count), + heroes, + ) + }; let ( heroes, @@ -253,63 +323,107 @@ async fn sync_helper( invited_member_count, joined_since_last_sync, state_events, - ) = if pdus_after_since && Some(current_shortstatehash) != since_shortstatehash { - let current_state = db.rooms.room_state_full(&room_id)?; - let current_members = current_state - .iter() - .filter(|(key, _)| key.0 == EventType::RoomMember) - .map(|(key, value)| (&key.1, value)) // Only keep state key - .collect::>(); - let encrypted_room = current_state - .get(&(EventType::RoomEncryption, "".to_owned())) - .is_some(); - let since_state = since_shortstatehash - .as_ref() - .map(|since_shortstatehash| { - since_shortstatehash - .map(|since_shortstatehash| db.rooms.state_full(since_shortstatehash)) - .transpose() - }) - .transpose()?; + ) = if since_shortstatehash.is_none() { + // Probably since = 0, we will do an initial sync + let (joined_member_count, invited_member_count, heroes) = calculate_counts(); - let since_encryption = since_state.as_ref().map(|state| { - state - .as_ref() - .map(|state| state.get(&(EventType::RoomEncryption, "".to_owned()))) - }); + let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; + let state_events = current_state_ids + .iter() + .map(|id| db.rooms.get_pdu(id)) + .filter_map(|r| r.ok().flatten()) + .collect::>(); + + ( + heroes, + joined_member_count, + invited_member_count, + true, + state_events, + ) + } else if !pdus_after_since || since_shortstatehash == Some(current_shortstatehash) { + // No state changes + (Vec::new(), None, None, false, Vec::new()) + } else { + // Incremental /sync + let since_shortstatehash = since_shortstatehash.unwrap(); + + let since_sender_member = db + .rooms + .state_get( + since_shortstatehash, + &EventType::RoomMember, + sender_user.as_str(), + )? + .and_then(|pdu| { + serde_json::from_value::>( + pdu.content.clone(), + ) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); + + let joined_since_last_sync = since_sender_member + .map_or(true, |member| member.membership != MembershipState::Join); + + let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; + + let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; + + let state_events = if joined_since_last_sync { + current_state_ids + .iter() + .map(|id| db.rooms.get_pdu(id)) + .filter_map(|r| r.ok().flatten()) + .collect::>() + } else { + current_state_ids + .difference(&since_state_ids) + .filter(|id| { + !timeline_pdus + .iter() + .any(|(_, timeline_pdu)| timeline_pdu.event_id == **id) + }) + .map(|id| db.rooms.get_pdu(id)) + .filter_map(|r| r.ok().flatten()) + .collect() + }; + + let encrypted_room = db + .rooms + .state_get(current_shortstatehash, &EventType::RoomEncryption, "")? + .is_some(); + + let since_encryption = + db.rooms + .state_get(since_shortstatehash, &EventType::RoomEncryption, "")?; // Calculations: - let new_encrypted_room = - encrypted_room && since_encryption.map_or(true, |encryption| encryption.is_none()); + let new_encrypted_room = encrypted_room && since_encryption.is_none(); - let send_member_count = since_state.as_ref().map_or(true, |since_state| { - since_state.as_ref().map_or(true, |since_state| { - current_members.len() - != since_state - .iter() - .filter(|(key, _)| key.0 == EventType::RoomMember) - .count() - }) - }); - - let since_sender_member = since_state.as_ref().map(|since_state| { - since_state.as_ref().and_then(|state| { - state - .get(&(EventType::RoomMember, sender_user.as_str().to_owned())) - .and_then(|pdu| { - serde_json::from_value::< - Raw, - >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }) - }) - }); + let send_member_count = state_events + .iter() + .any(|event| event.kind == EventType::RoomMember); if encrypted_room { - for (user_id, current_member) in current_members { + for (user_id, current_member) in db + .rooms + .room_members(&room_id) + .filter_map(|r| r.ok()) + .filter_map(|user_id| { + db.rooms + .state_get( + current_shortstatehash, + &EventType::RoomMember, + user_id.as_str(), + ) + .ok() + .flatten() + .map(|current_member| (user_id, current_member)) + }) + { let current_membership = serde_json::from_value::< Raw, >(current_member.content.clone()) @@ -318,31 +432,23 @@ async fn sync_helper( .map_err(|_| Error::bad_database("Invalid PDU in database."))? .membership; - let since_membership = - since_state - .as_ref() - .map_or(MembershipState::Leave, |since_state| { - since_state - .as_ref() - .and_then(|since_state| { - since_state - .get(&(EventType::RoomMember, user_id.clone())) - .and_then(|since_member| { - serde_json::from_value::< - Raw, - >( - since_member.content.clone() - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid PDU in database.") - }) - .ok() - }) - }) - .map_or(MembershipState::Leave, |member| member.membership) - }); + let since_membership = db + .rooms + .state_get( + since_shortstatehash, + &EventType::RoomMember, + user_id.as_str(), + )? + .and_then(|since_member| { + serde_json::from_value::< + Raw, + >(since_member.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }) + .map_or(MembershipState::Leave, |member| member.membership); let user_id = UserId::try_from(user_id.clone()) .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; @@ -364,10 +470,6 @@ async fn sync_helper( } } - let joined_since_last_sync = since_sender_member.map_or(true, |member| { - member.map_or(true, |member| member.membership != MembershipState::Join) - }); - if joined_since_last_sync && encrypted_room || new_encrypted_room { // If the user is in a new encrypted room, give them all joined users device_list_updates.extend( @@ -387,100 +489,11 @@ async fn sync_helper( } let (joined_member_count, invited_member_count, heroes) = if send_member_count { - let joined_member_count = db.rooms.room_members(&room_id).count(); - let invited_member_count = db.rooms.room_members_invited(&room_id).count(); - - // Recalculate heroes (first 5 members) - let mut heroes = Vec::new(); - - if joined_member_count + invited_member_count <= 5 { - // Go through all PDUs and for each member event, check if the user is still joined or - // invited until we have 5 or we reach the end - - for hero in db - .rooms - .all_pdus(&sender_user, &room_id) - .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) - .map(|(_, pdu)| { - let content = serde_json::from_value::< - ruma::events::room::member::MemberEventContent, - >(pdu.content.clone()) - .map_err(|_| { - Error::bad_database("Invalid member event in database.") - })?; - - if let Some(state_key) = &pdu.state_key { - let user_id = - UserId::try_from(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - // The membership was and still is invite or join - if matches!( - content.membership, - MembershipState::Join | MembershipState::Invite - ) && (db.rooms.is_joined(&user_id, &room_id)? - || db.rooms.is_invited(&user_id, &room_id)?) - { - Ok::<_, Error>(Some(state_key.clone())) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - // Filter out buggy users - .filter_map(|u| u.ok()) - // Filter for possible heroes - .flatten() - { - if heroes.contains(&hero) || hero == sender_user.as_str() { - continue; - } - - heroes.push(hero); - } - } - - ( - Some(joined_member_count), - Some(invited_member_count), - heroes, - ) + calculate_counts() } else { (None, None, Vec::new()) }; - let state_events = if joined_since_last_sync { - current_state - .iter() - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect() - } else { - match since_state { - None => Vec::new(), - Some(Some(since_state)) => current_state - .iter() - .filter(|(key, value)| { - since_state.get(key).map(|e| &e.event_id) != Some(&value.event_id) - }) - .filter(|(_, value)| { - !timeline_pdus.iter().any(|(_, timeline_pdu)| { - timeline_pdu.kind == value.kind - && timeline_pdu.state_key == value.state_key - }) - }) - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect(), - Some(None) => current_state - .iter() - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect(), - } - }; - ( heroes, joined_member_count, @@ -488,8 +501,6 @@ async fn sync_helper( joined_since_last_sync, state_events, ) - } else { - (Vec::new(), None, None, false, Vec::new()) }; // Look for device list updates in this room @@ -580,7 +591,10 @@ async fn sync_helper( events: room_events, }, state: sync_events::State { - events: state_events, + events: state_events + .iter() + .map(|pdu| pdu.to_sync_state_event()) + .collect(), }, ephemeral: sync_events::Ephemeral { events: edus }, }; diff --git a/src/database/globals.rs b/src/database/globals.rs index 307ec40..b6fe9e1 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -5,7 +5,7 @@ use ruma::{ client::r0::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, - DeviceId, EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, UserId, + DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId, }; use rustls::{ServerCertVerifier, WebPKIVerifier}; use std::{ @@ -16,7 +16,7 @@ use std::{ sync::{Arc, RwLock}, time::{Duration, Instant}, }; -use tokio::sync::{broadcast, watch::Receiver, Semaphore}; +use tokio::sync::{broadcast, watch::Receiver, Mutex, Semaphore}; use trust_dns_resolver::TokioAsyncResolver; use super::abstraction::Tree; @@ -45,6 +45,7 @@ pub struct Globals { pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, pub sync_receivers: RwLock), SyncHandle>>, + pub roomid_mutex: RwLock>>>, pub rotate: RotationHandler, } @@ -197,6 +198,7 @@ impl Globals { bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), + roomid_mutex: RwLock::new(BTreeMap::new()), sync_receivers: RwLock::new(BTreeMap::new()), rotate: RotationHandler::new(), }; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 75ef334..060bf45 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -21,7 +21,7 @@ use ruma::{ uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem, sync::{Arc, RwLock}, @@ -89,7 +89,7 @@ pub struct Rooms { impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { + pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { Ok(self .stateid_shorteventid .scan_prefix(shortstatehash.to_be_bytes().to_vec()) @@ -1215,6 +1215,7 @@ impl Rooms { state_key, redacts, } = pdu_builder; + // TODO: Make sure this isn't called twice in parallel let prev_events = self .get_pdu_leaves(&room_id)? diff --git a/src/server_server.rs b/src/server_server.rs index 3515d85..f90caad 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -625,13 +625,44 @@ pub async fn send_transaction_message_route( } }; + // 0. Check the server is in the room + let room_id = match value + .get("room_id") + .and_then(|id| RoomId::try_from(id.as_str()?).ok()) + { + Some(id) => id, + None => { + // Event is invalid + resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_string())); + continue; + } + }; + + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; let start_time = Instant::now(); resolved_map.insert( event_id.clone(), - handle_incoming_pdu(&body.origin, &event_id, value, true, &db, &pub_key_map) - .await - .map(|_| ()), + handle_incoming_pdu( + &body.origin, + &event_id, + &room_id, + value, + true, + &db, + &pub_key_map, + ) + .await + .map(|_| ()), ); + drop(mutex_lock); let elapsed = start_time.elapsed(); if elapsed > Duration::from_secs(1) { @@ -784,8 +815,8 @@ pub async fn send_transaction_message_route( type AsyncRecursiveResult<'a, T, E> = Pin> + 'a + Send>>; /// When receiving an event one needs to: -/// 0. Skip the PDU if we already know about it -/// 1. Check the server is in the room +/// 0. Check the server is in the room +/// 1. Skip the PDU if we already know about it /// 2. Check signatures, otherwise drop /// 3. Check content hash, redact if doesn't match /// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not @@ -810,6 +841,7 @@ type AsyncRecursiveResult<'a, T, E> = Pin( origin: &'a ServerName, event_id: &'a EventId, + room_id: &'a RoomId, value: BTreeMap, is_timeline_event: bool, db: &'a Database, @@ -817,24 +849,6 @@ pub fn handle_incoming_pdu<'a>( ) -> AsyncRecursiveResult<'a, Option>, String> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // 0. Skip the PDU if we already have it as a timeline event - if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) { - return Ok(Some(pdu_id.to_vec())); - } - - // 1. Check the server is in the room - let room_id = match value - .get("room_id") - .and_then(|id| RoomId::try_from(id.as_str()?).ok()) - { - Some(id) => id, - None => { - // Event is invalid - return Err("Event needs a valid RoomId.".to_string()); - } - }; - match db.rooms.exists(&room_id) { Ok(true) => {} _ => { @@ -842,6 +856,11 @@ pub fn handle_incoming_pdu<'a>( } } + // 1. Skip the PDU if we already have it as a timeline event + if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) { + return Ok(Some(pdu_id.to_vec())); + } + // We go through all the signatures we see on the value and fetch the corresponding signing // keys fetch_required_signing_keys(&value, &pub_key_map, db) @@ -901,7 +920,7 @@ pub fn handle_incoming_pdu<'a>( // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // EDIT: Step 5 is not applied anymore because it failed too often debug!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, pub_key_map) + fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, &room_id, pub_key_map) .await .map_err(|e| e.to_string())?; @@ -1002,13 +1021,13 @@ pub fn handle_incoming_pdu<'a>( if incoming_pdu.prev_events.len() == 1 { let prev_event = &incoming_pdu.prev_events[0]; - let state_vec = db + let state = db .rooms .pdu_shortstatehash(prev_event) .map_err(|_| "Failed talking to db".to_owned())? .map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok()) .flatten(); - if let Some(mut state_vec) = state_vec { + if let Some(mut state) = state { if db .rooms .get_pdu(prev_event) @@ -1018,25 +1037,31 @@ pub fn handle_incoming_pdu<'a>( .state_key .is_some() { - state_vec.push(prev_event.clone()); + state.insert(prev_event.clone()); } state_at_incoming_event = Some( - fetch_and_handle_events(db, origin, &state_vec, pub_key_map) - .await - .map_err(|_| "Failed to fetch state events locally".to_owned())? - .into_iter() - .map(|pdu| { + fetch_and_handle_events( + db, + origin, + &state.into_iter().collect::>(), + &room_id, + pub_key_map, + ) + .await + .map_err(|_| "Failed to fetch state events locally".to_owned())? + .into_iter() + .map(|pdu| { + ( ( - ( - pdu.kind.clone(), - pdu.state_key - .clone() - .expect("events from state_full_ids are state events"), - ), - pdu, - ) - }) - .collect(), + pdu.kind.clone(), + pdu.state_key + .clone() + .expect("events from state_full_ids are state events"), + ), + pdu, + ) + }) + .collect(), ); } // TODO: set incoming_auth_events? @@ -1059,12 +1084,18 @@ pub fn handle_incoming_pdu<'a>( { Ok(res) => { debug!("Fetching state events at event."); - let state_vec = - match fetch_and_handle_events(&db, origin, &res.pdu_ids, pub_key_map).await - { - Ok(state) => state, - Err(_) => return Err("Failed to fetch state events.".to_owned()), - }; + let state_vec = match fetch_and_handle_events( + &db, + origin, + &res.pdu_ids, + &room_id, + pub_key_map, + ) + .await + { + Ok(state) => state, + Err(_) => return Err("Failed to fetch state events.".to_owned()), + }; let mut state = BTreeMap::new(); for pdu in state_vec { @@ -1090,8 +1121,14 @@ pub fn handle_incoming_pdu<'a>( } debug!("Fetching auth chain events at event."); - match fetch_and_handle_events(&db, origin, &res.auth_chain_ids, pub_key_map) - .await + match fetch_and_handle_events( + &db, + origin, + &res.auth_chain_ids, + &room_id, + pub_key_map, + ) + .await { Ok(state) => state, Err(_) => return Err("Failed to fetch auth chain.".to_owned()), @@ -1313,6 +1350,7 @@ pub(crate) fn fetch_and_handle_events<'a>( db: &'a Database, origin: &'a ServerName, events: &'a [EventId], + room_id: &'a RoomId, pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveResult<'a, Vec>, Error> { Box::pin(async move { @@ -1366,6 +1404,7 @@ pub(crate) fn fetch_and_handle_events<'a>( match handle_incoming_pdu( origin, &event_id, + &room_id, value.clone(), false, db, @@ -1854,7 +1893,11 @@ pub fn get_room_state_ids_route( "Pdu state not found.", ))?; - let pdu_ids = db.rooms.state_full_ids(shortstatehash)?; + let pdu_ids = db + .rooms + .state_full_ids(shortstatehash)? + .into_iter() + .collect(); let mut auth_chain_ids = BTreeSet::::new(); let mut todo = BTreeSet::new(); @@ -2100,18 +2143,36 @@ pub async fn create_join_event_route( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id = handle_incoming_pdu(&origin, &event_id, value, true, &db, &pub_key_map) - .await - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? - .ok_or(Error::BadRequest( + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let pdu_id = handle_incoming_pdu( + &origin, + &event_id, + &body.room_id, + value, + true, + &db, + &pub_key_map, + ) + .await + .map_err(|_| { + Error::BadRequest( ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; + "Error while handling incoming PDU.", + ) + })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + drop(mutex_lock); let state_ids = db.rooms.state_full_ids(shortstatehash)?; From e12b1ff8639ad7beb81b13b247f100300019387f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 13 Jul 2021 15:44:25 +0200 Subject: [PATCH 0651/1727] improvement: locks --- src/client_server/account.rs | 35 ++++++++++++++- src/client_server/membership.rs | 78 ++++++++++++++++++++++++++++----- src/client_server/message.rs | 14 ++++++ src/client_server/profile.rs | 34 +++++++++++--- src/client_server/redact.rs | 15 +++++++ src/client_server/room.rs | 40 ++++++++++++++++- src/client_server/state.rs | 13 ++++++ src/client_server/sync.rs | 38 ++++++++++++++++ src/database/admin.rs | 71 ++++++++++++++++++------------ src/database/globals.rs | 2 + src/database/rooms.rs | 14 +++++- src/server_server.rs | 21 +++++++-- 12 files changed, 321 insertions(+), 54 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 0fc8b28..9e16d90 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, convert::TryInto}; +use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; @@ -238,6 +238,16 @@ pub async fn register_route( let room_id = RoomId::new(db.globals.server_name()); + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone()); content.federate = true; content.predecessor = None; @@ -255,6 +265,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; // 2. Make conduit bot join @@ -276,6 +287,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; // 3. Power levels @@ -300,6 +312,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; // 4.1 Join Rules @@ -317,6 +330,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; // 4.2 History Visibility @@ -336,6 +350,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; // 4.3 Guest Access @@ -353,6 +368,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; // 6. Events implied by name and topic @@ -372,6 +388,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; db.rooms.build_and_append_pdu( @@ -388,6 +405,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; // Room alias @@ -410,6 +428,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; @@ -433,6 +452,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; db.rooms.build_and_append_pdu( PduBuilder { @@ -452,6 +472,7 @@ pub async fn register_route( &user_id, &room_id, &db, + &mutex_lock, )?; // Send welcome message @@ -470,6 +491,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, + &mutex_lock, )?; } @@ -641,6 +663,16 @@ pub async fn deactivate_route( third_party_invite: None, }; + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, @@ -652,6 +684,7 @@ pub async fn deactivate_route( &sender_user, &room_id, &db, + &mutex_lock, )?; } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 9401b76..a74950b 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -203,6 +203,16 @@ pub async fn kick_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; // TODO: reason + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, @@ -214,8 +224,11 @@ pub async fn kick_user_route( &sender_user, &body.room_id, &db, + &mutex_lock, )?; + drop(mutex_lock); + db.flush().await?; Ok(kick_user::Response::new().into()) @@ -261,6 +274,16 @@ pub async fn ban_user_route( }, )?; + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, @@ -272,8 +295,11 @@ pub async fn ban_user_route( &sender_user, &body.room_id, &db, + &mutex_lock, )?; + drop(mutex_lock); + db.flush().await?; Ok(ban_user::Response::new().into()) @@ -310,6 +336,16 @@ pub async fn unban_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, @@ -321,8 +357,11 @@ pub async fn unban_user_route( &sender_user, &body.room_id, &db, + &mutex_lock, )?; + drop(mutex_lock); + db.flush().await?; Ok(unban_user::Response::new().into()) @@ -446,6 +485,16 @@ async fn join_room_by_id_helper( ) -> ConduitResult { let sender_user = sender_user.expect("user is authenticated"); + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + // Ask a remote server if we don't have this room if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() { let mut make_join_response_and_server = Err(Error::BadServerResponse( @@ -649,9 +698,12 @@ async fn join_room_by_id_helper( &sender_user, &room_id, &db, + &mutex_lock, )?; } + drop(mutex_lock); + db.flush().await?; Ok(join_room_by_id::Response::new(room_id.clone()).into()) @@ -721,13 +773,23 @@ async fn validate_and_add_event_id( Ok((event_id, value)) } -pub async fn invite_helper( +pub async fn invite_helper<'a>( sender_user: &UserId, user_id: &UserId, room_id: &RoomId, db: &Database, is_direct: bool, ) -> Result<()> { + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + if user_id.server_name() != db.globals.server_name() { let prev_events = db .rooms @@ -863,6 +925,8 @@ pub async fn invite_helper( ) .expect("event is valid, we just created it"); + drop(mutex_lock); + let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; let response = db .sending @@ -902,16 +966,6 @@ pub async fn invite_helper( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let mutex = Arc::clone( - db.globals - .roomid_mutex - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - let pdu_id = server_server::handle_incoming_pdu( &origin, &event_id, @@ -932,7 +986,6 @@ pub async fn invite_helper( ErrorKind::InvalidParam, "Could not accept incoming PDU as timeline event.", ))?; - drop(mutex_lock); for server in db .rooms @@ -964,6 +1017,7 @@ pub async fn invite_helper( &sender_user, room_id, &db, + &mutex_lock, )?; Ok(()) diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 7e898b1..3d8218c 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -10,6 +10,7 @@ use ruma::{ use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, + sync::Arc, }; #[cfg(feature = "conduit_bin")] @@ -27,6 +28,16 @@ pub async fn send_message_event_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + // Check if this is a new transaction id if let Some(response) = db.transaction_ids @@ -64,6 +75,7 @@ pub async fn send_message_event_route( &sender_user, &body.room_id, &db, + &mutex_lock, )?; db.transaction_ids.add_txnid( @@ -73,6 +85,8 @@ pub async fn send_message_event_route( event_id.as_bytes(), )?; + drop(mutex_lock); + db.flush().await?; Ok(send_message_event::Response::new(event_id).into()) diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 5281a4a..d947bbe 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -9,7 +9,7 @@ use ruma::{ events::EventType, serde::Raw, }; -use std::convert::TryInto; +use std::{convert::TryInto, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -69,9 +69,19 @@ pub async fn set_displayname_route( }) .filter_map(|r| r.ok()) { - let _ = db - .rooms - .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db); + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + + let _ = + db.rooms + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock); // Presence update db.rooms.edus.update_presence( @@ -171,9 +181,19 @@ pub async fn set_avatar_url_route( }) .filter_map(|r| r.ok()) { - let _ = db - .rooms - .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db); + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + + let _ = + db.rooms + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock); // Presence update db.rooms.edus.update_presence( diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 3db2771..2e4c651 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma}; use ruma::{ api::client::r0::redact::redact_event, @@ -18,6 +20,16 @@ pub async fn redact_event_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let event_id = db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomRedaction, @@ -32,8 +44,11 @@ pub async fn redact_event_route( &sender_user, &body.room_id, &db, + &mutex_lock, )?; + drop(mutex_lock); + db.flush().await?; Ok(redact_event::Response { event_id }.into()) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 43625fe..f48c5e9 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -15,7 +15,7 @@ use ruma::{ serde::Raw, RoomAliasId, RoomId, RoomVersionId, }; -use std::{cmp::max, collections::BTreeMap, convert::TryFrom}; +use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -33,6 +33,16 @@ pub async fn create_room_route( let room_id = RoomId::new(db.globals.server_name()); + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let alias = body .room_alias_name .as_ref() @@ -69,6 +79,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, + &mutex_lock, )?; // 2. Let the room creator join @@ -90,6 +101,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, + &mutex_lock, )?; // 3. Power levels @@ -144,6 +156,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, + &mutex_lock, )?; // 4. Events set by preset @@ -170,6 +183,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, + &mutex_lock, )?; // 4.2 History Visibility @@ -187,6 +201,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, + &mutex_lock, )?; // 4.3 Guest Access @@ -212,6 +227,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, + &mutex_lock, )?; // 5. Events listed in initial_state @@ -227,7 +243,7 @@ pub async fn create_room_route( } db.rooms - .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db)?; + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock)?; } // 6. Events implied by name and topic @@ -248,6 +264,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, + &mutex_lock, )?; } @@ -266,10 +283,12 @@ pub async fn create_room_route( &sender_user, &room_id, &db, + &mutex_lock, )?; } // 7. Events implied by invite (and TODO: invite_3pid) + drop(mutex_lock); for user_id in &body.invite { let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await; } @@ -340,6 +359,16 @@ pub async fn upgrade_room_route( // Create a replacement room let replacement_room = RoomId::new(db.globals.server_name()); + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions let tombstone_event_id = db.rooms.build_and_append_pdu( @@ -357,6 +386,7 @@ pub async fn upgrade_room_route( sender_user, &body.room_id, &db, + &mutex_lock, )?; // Get the old room federations status @@ -397,6 +427,7 @@ pub async fn upgrade_room_route( sender_user, &replacement_room, &db, + &mutex_lock, )?; // Join the new room @@ -418,6 +449,7 @@ pub async fn upgrade_room_route( sender_user, &replacement_room, &db, + &mutex_lock, )?; // Recommended transferable state events list from the specs @@ -451,6 +483,7 @@ pub async fn upgrade_room_route( sender_user, &replacement_room, &db, + &mutex_lock, )?; } @@ -494,8 +527,11 @@ pub async fn upgrade_room_route( sender_user, &body.room_id, &db, + &mutex_lock, )?; + drop(mutex_lock); + db.flush().await?; // Return the replacement room id diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 68246d5..e0e5d29 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{ database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma, }; @@ -257,6 +259,16 @@ pub async fn send_state_event_for_key_helper( } } + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let event_id = db.rooms.build_and_append_pdu( PduBuilder { event_type, @@ -268,6 +280,7 @@ pub async fn send_state_event_for_key_helper( &sender_user, &room_id, &db, + &mutex_lock, )?; Ok(event_id) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 092c4a9..fe11304 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -189,6 +189,18 @@ async fn sync_helper( for room_id in db.rooms.rooms_joined(&sender_user) { let room_id = room_id?; + // Get and drop the lock to wait for remaining operations to finish + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + drop(mutex_lock); + let mut non_timeline_pdus = db .rooms .pdus_until(&sender_user, &room_id, u64::MAX) @@ -641,6 +653,19 @@ async fn sync_helper( let mut left_rooms = BTreeMap::new(); for result in db.rooms.rooms_left(&sender_user) { let (room_id, left_state_events) = result?; + + // Get and drop the lock to wait for remaining operations to finish + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + drop(mutex_lock); + let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; // Left before last sync @@ -667,6 +692,19 @@ async fn sync_helper( let mut invited_rooms = BTreeMap::new(); for result in db.rooms.rooms_invited(&sender_user) { let (room_id, invite_state_events) = result?; + + // Get and drop the lock to wait for remaining operations to finish + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + drop(mutex_lock); + let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; // Invited before last sync diff --git a/src/database/admin.rs b/src/database/admin.rs index cd5fa84..d8b7ae5 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -10,7 +10,7 @@ use ruma::{ events::{room::message, EventType}, UserId, }; -use tokio::sync::{RwLock, RwLockReadGuard}; +use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), @@ -48,38 +48,51 @@ impl Admin { ) .unwrap(); - if conduit_room.is_none() { - warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this."); - } + let conduit_room = match conduit_room { + None => { + warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this."); + return; + } + Some(r) => r, + }; drop(guard); - let send_message = - |message: message::MessageEventContent, guard: RwLockReadGuard<'_, Database>| { - if let Some(conduit_room) = &conduit_room { - guard - .rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMessage, - content: serde_json::to_value(message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - &guard, - ) - .unwrap(); - } - }; + let send_message = |message: message::MessageEventContent, + guard: RwLockReadGuard<'_, Database>, + mutex_lock: &MutexGuard<'_, ()>| { + guard + .rooms + .build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMessage, + content: serde_json::to_value(message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + &guard, + mutex_lock, + ) + .unwrap(); + }; loop { tokio::select! { Some(event) = receiver.next() => { let guard = db.read().await; + let mutex = Arc::clone( + guard.globals + .roomid_mutex + .write() + .unwrap() + .entry(conduit_room.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; match event { AdminCommand::RegisterAppservice(yaml) => { @@ -93,15 +106,17 @@ impl Admin { count, appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") ); - send_message(message::MessageEventContent::text_plain(output), guard); + send_message(message::MessageEventContent::text_plain(output), guard, &mutex_lock); } else { - send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard); + send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard, &mutex_lock); } } AdminCommand::SendMessage(message) => { - send_message(message, guard) + send_message(message, guard, &mutex_lock); } } + + drop(mutex_lock); } } } diff --git a/src/database/globals.rs b/src/database/globals.rs index b6fe9e1..0e72297 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -46,6 +46,7 @@ pub struct Globals { pub servername_ratelimiter: Arc, Arc>>>, pub sync_receivers: RwLock), SyncHandle>>, pub roomid_mutex: RwLock>>>, + pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer pub rotate: RotationHandler, } @@ -199,6 +200,7 @@ impl Globals { bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), roomid_mutex: RwLock::new(BTreeMap::new()), + roomid_mutex_federation: RwLock::new(BTreeMap::new()), sync_receivers: RwLock::new(BTreeMap::new()), rotate: RotationHandler::new(), }; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 060bf45..1542db8 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2,6 +2,7 @@ mod edus; pub use edus::RoomEdus; use member::MembershipState; +use tokio::sync::MutexGuard; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use log::{debug, error, warn}; @@ -1207,6 +1208,7 @@ impl Rooms { sender: &UserId, room_id: &RoomId, db: &Database, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex ) -> Result { let PduBuilder { event_type, @@ -1216,7 +1218,6 @@ impl Rooms { redacts, } = pdu_builder; - // TODO: Make sure this isn't called twice in parallel let prev_events = self .get_pdu_leaves(&room_id)? .into_iter() @@ -1790,6 +1791,16 @@ impl Rooms { db, )?; } else { + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let mut event = serde_json::from_value::>( self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or(Error::BadRequest( @@ -1817,6 +1828,7 @@ impl Rooms { user_id, room_id, db, + &mutex_lock, )?; } diff --git a/src/server_server.rs b/src/server_server.rs index f90caad..fb49d0c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -640,7 +640,7 @@ pub async fn send_transaction_message_route( let mutex = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_federation .write() .unwrap() .entry(room_id.clone()) @@ -1308,11 +1308,13 @@ pub fn handle_incoming_pdu<'a>( pdu_id = Some( append_incoming_pdu( &db, + &room_id, &incoming_pdu, val, extremities, &state_at_incoming_event, ) + .await .map_err(|_| "Failed to add pdu to db.".to_owned())?, ); debug!("Appended incoming pdu."); @@ -1611,13 +1613,24 @@ pub(crate) async fn fetch_signing_keys( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip(db))] -pub(crate) fn append_incoming_pdu( +async fn append_incoming_pdu( db: &Database, + room_id: &RoomId, pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: HashSet, state: &StateMap>, ) -> Result> { + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. db.rooms @@ -1630,6 +1643,8 @@ pub(crate) fn append_incoming_pdu( &db, )?; + drop(mutex_lock); + for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces @@ -2145,7 +2160,7 @@ pub async fn create_join_event_route( let mutex = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_federation .write() .unwrap() .entry(body.room_id.clone()) From 952fb75795fa86096aabdd43a4ab7fc5e490aaba Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 14 Jul 2021 14:50:07 +0200 Subject: [PATCH 0652/1727] add shutdown handler to kick sync --- src/database.rs | 15 +++++++++++++-- src/main.rs | 10 ++++++++-- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/src/database.rs b/src/database.rs index b32f539..5a896a8 100644 --- a/src/database.rs +++ b/src/database.rs @@ -23,7 +23,7 @@ use rocket::{ futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}, outcome::{try_outcome, IntoOutcome}, request::{FromRequest, Request}, - State, + Shutdown, State, }; use ruma::{DeviceId, ServerName, UserId}; use serde::{de::IgnoredAny, Deserialize}; @@ -199,7 +199,7 @@ impl Database { } /// Load an existing database or create a new one. - pub async fn load_or_create(config: Config) -> Result>> { + pub async fn load_or_create(config: &Config) -> Result>> { Self::check_sled_or_sqlite_db(&config)?; let builder = Engine::open(&config)?; @@ -425,6 +425,17 @@ impl Database { Ok(db) } + #[cfg(feature = "conduit_bin")] + pub async fn start_on_shutdown_tasks(db: Arc>, shutdown: Shutdown) { + tokio::spawn(async move { + shutdown.await; + + log::info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + + db.read().await.globals.rotate.fire(); + }); + } + pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { let userid_bytes = user_id.as_bytes().to_vec(); let mut userid_prefix = userid_bytes.clone(); diff --git a/src/main.rs b/src/main.rs index e0d2e3d..324a3ad 100644 --- a/src/main.rs +++ b/src/main.rs @@ -220,11 +220,17 @@ async fn main() { config.warn_deprecated(); - let db = Database::load_or_create(config) + let db = Database::load_or_create(&config) .await .expect("config is valid"); - let rocket = setup_rocket(raw_config, db); + let rocket = setup_rocket(raw_config, Arc::clone(&db)) + .ignite() + .await + .unwrap(); + + Database::start_on_shutdown_tasks(db, rocket.shutdown()).await; + rocket.launch().await.unwrap(); } From 9de32ae12f906048afbd0c9c7d01caa33e42a6b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 14 Jul 2021 15:46:30 +0200 Subject: [PATCH 0653/1727] fix toolchain --- rust-toolchain | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain b/rust-toolchain index a63cb35..d96ae40 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.52.0 +1.52 From eaa4c776413e997de32b5e25b09b78cfb4f8d4f9 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 14 Jul 2021 20:33:19 +0000 Subject: [PATCH 0654/1727] CI: Check format before running test Testing needs compilation and is slow. Format checking is quick. As format checking fails more often than tests, switching them should result in faster failure and feedback --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 424dc96..92da543 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,8 +30,8 @@ test:cargo: - rustup component add clippy rustfmt script: - rustc --version && cargo --version # Print version info for debugging - - cargo test --workspace --verbose --locked - cargo fmt --all -- --check + - cargo test --workspace --verbose --locked - cargo clippy # --------------------------------------------------------------------- # From 661101c9ae751b09b97119a1f8071871f1b2a252 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 15 Jul 2021 13:29:08 +0200 Subject: [PATCH 0655/1727] add sled cache_capacity back --- Cargo.toml | 4 ++-- src/database.rs | 6 ++++++ src/database/abstraction/sled.rs | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 537813f..0f40ab7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,14 +77,14 @@ lru-cache = "0.1.2" rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } parking_lot = { version = "0.11.1", optional = true } crossbeam = { version = "0.8.1", optional = true } -num_cpus = { version = "1.13.0", optional = true } +num_cpus = "1.13.0" [features] default = ["conduit_bin", "backend_sqlite"] backend_sled = ["sled"] backend_rocksdb = ["rocksdb"] backend_sqlite = ["sqlite"] -sqlite = ["rusqlite", "parking_lot", "crossbeam", "num_cpus", "tokio/signal"] +sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional [[bin]] diff --git a/src/database.rs b/src/database.rs index c39f0fb..85beee8 100644 --- a/src/database.rs +++ b/src/database.rs @@ -45,6 +45,8 @@ pub struct Config { database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, + #[serde(default = "default_sled_cache_capacity_bytes")] + sled_cache_capacity_bytes: u64, #[serde(default = "default_sqlite_read_pool_size")] sqlite_read_pool_size: usize, #[serde(default = "true_fn")] @@ -109,6 +111,10 @@ fn default_db_cache_capacity_mb() -> f64 { 200.0 } +fn default_sled_cache_capacity_bytes() -> u64 { + 1024 * 1024 * 1024 +} + fn default_sqlite_read_pool_size() -> usize { num_cpus::get().max(1) } diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs index e58184d..1aa631f 100644 --- a/src/database/abstraction/sled.rs +++ b/src/database/abstraction/sled.rs @@ -14,7 +14,7 @@ impl DatabaseEngine for Engine { Ok(Arc::new(Engine( sled::Config::default() .path(&config.database_path) - .cache_capacity((config.db_cache_capacity_mb * 1024 * 1024) as u64) + .cache_capacity(config.sled_cache_capacity_bytes) .use_compression(true) .open()?, ))) From d76e95e8fc9d39fcab36e43fcd6f971c556782d8 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 15 Jul 2021 13:47:21 +0200 Subject: [PATCH 0656/1727] use existing db cache size --- src/database.rs | 6 ------ src/database/abstraction/sled.rs | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/database.rs b/src/database.rs index 85beee8..c39f0fb 100644 --- a/src/database.rs +++ b/src/database.rs @@ -45,8 +45,6 @@ pub struct Config { database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, - #[serde(default = "default_sled_cache_capacity_bytes")] - sled_cache_capacity_bytes: u64, #[serde(default = "default_sqlite_read_pool_size")] sqlite_read_pool_size: usize, #[serde(default = "true_fn")] @@ -111,10 +109,6 @@ fn default_db_cache_capacity_mb() -> f64 { 200.0 } -fn default_sled_cache_capacity_bytes() -> u64 { - 1024 * 1024 * 1024 -} - fn default_sqlite_read_pool_size() -> usize { num_cpus::get().max(1) } diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs index 1aa631f..12e0275 100644 --- a/src/database/abstraction/sled.rs +++ b/src/database/abstraction/sled.rs @@ -14,7 +14,7 @@ impl DatabaseEngine for Engine { Ok(Arc::new(Engine( sled::Config::default() .path(&config.database_path) - .cache_capacity(config.sled_cache_capacity_bytes) + .cache_capacity((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64) .use_compression(true) .open()?, ))) From b1993421c20085d25afa9cf04daa36aea6040540 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 15 Jul 2021 18:09:10 +0200 Subject: [PATCH 0657/1727] fix signal compiling on windows --- src/database.rs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/database.rs b/src/database.rs index c39f0fb..2d7886e 100644 --- a/src/database.rs +++ b/src/database.rs @@ -543,11 +543,10 @@ impl Database { #[cfg(feature = "sqlite")] pub async fn start_wal_clean_task(lock: &Arc>, config: &Config) { - use tokio::{ - select, - signal::unix::{signal, SignalKind}, - time::{interval, timeout}, - }; + use tokio::time::{interval, timeout}; + + #[cfg(unix)] + use tokio::signal::unix::{signal, SignalKind}; use std::{ sync::Weak, @@ -562,10 +561,12 @@ impl Database { tokio::spawn(async move { let mut i = interval(timer_interval); + #[cfg(unix)] let mut s = signal(SignalKind::hangup()).unwrap(); loop { - select! { + #[cfg(unix)] + tokio::select! { _ = i.tick(), if do_timer => { log::info!(target: "wal-trunc", "Timer ticked") } @@ -573,7 +574,14 @@ impl Database { log::info!(target: "wal-trunc", "Received SIGHUP") } }; - + #[cfg(not(unix))] + if do_timer { + i.tick().await; + log::info!(target: "wal-trunc", "Timer ticked") + } else { + // timer disabled, and there's no concept of signals on windows, bailing... + return; + } if let Some(arc) = Weak::upgrade(&weak) { log::info!(target: "wal-trunc", "Rotating sync helpers..."); // This actually creates a very small race condition between firing this and trying to acquire the subsequent write lock. From 82a4ec9cbde1a42d7c41f537a024cb3b8d45eb87 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sat, 17 Jul 2021 12:00:38 +0200 Subject: [PATCH 0658/1727] Add more detailed issue templates Shamelessly stolen from https://github.com/HedgeDoc/client Originally written by https://github.com/ErikMichelson and https://github.com/DerMolly --- .gitlab/issue_templates/Bug Report.md | 48 ++++++++++++++++++++++ .gitlab/issue_templates/Feature Request.md | 28 +++++++++++++ .gitlab/issue_templates/Issue Template.md | 15 ------- 3 files changed, 76 insertions(+), 15 deletions(-) create mode 100644 .gitlab/issue_templates/Bug Report.md create mode 100644 .gitlab/issue_templates/Feature Request.md delete mode 100644 .gitlab/issue_templates/Issue Template.md diff --git a/.gitlab/issue_templates/Bug Report.md b/.gitlab/issue_templates/Bug Report.md new file mode 100644 index 0000000..3fdc303 --- /dev/null +++ b/.gitlab/issue_templates/Bug Report.md @@ -0,0 +1,48 @@ + + +### Description + + + + +### To Reproduce + + + + +### Expected behavior + + + + +### Error/Log + +```log +Copy and paste the error log from Conduit here. +``` + + +### Your Setup (please complete the following information): + +- Conduit version [e.g. Git commit at download time] +- OS: [e.g. Raspbian , Ubuntu 20.04, ...] +- RAM available to Conduit: [e.g. 1 GB] +- Cores available to Conduit: [e.g. 1 Core] + + +### Additional context + + + + +/label ~conduit diff --git a/.gitlab/issue_templates/Feature Request.md b/.gitlab/issue_templates/Feature Request.md new file mode 100644 index 0000000..4100eb6 --- /dev/null +++ b/.gitlab/issue_templates/Feature Request.md @@ -0,0 +1,28 @@ + + + +### Is your feature request related to a problem? Please describe. + + + + +### Describe the solution you'd like + + + + +### Describe alternatives you've considered + + + + +### Additional context + + + + + +/label ~conduit diff --git a/.gitlab/issue_templates/Issue Template.md b/.gitlab/issue_templates/Issue Template.md deleted file mode 100644 index e1a0667..0000000 --- a/.gitlab/issue_templates/Issue Template.md +++ /dev/null @@ -1,15 +0,0 @@ -# Headline - -### Description - - - - - - - - - - - -/label ~conduit From 7054f74783effdb96733c38b65e2f4f315ec2fd6 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sat, 17 Jul 2021 12:09:50 +0200 Subject: [PATCH 0659/1727] Add issue template for GitHub which redirects to Gitlab --- .github/ISSUE_TEMPLATE/Issue.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/Issue.md diff --git a/.github/ISSUE_TEMPLATE/Issue.md b/.github/ISSUE_TEMPLATE/Issue.md new file mode 100644 index 0000000..9022062 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/Issue.md @@ -0,0 +1,11 @@ +--- +name: "Issue with / Feature Request for Conduit" +about: "Please file issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new" +title: "CLOSE ME" +--- + + + +**⚠️ Conduit development does not happen on GitHub. Issues opened here will not be addressed** + +Please open issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new \ No newline at end of file From 5d8ad4fd72bf9a5a06c14b1624432ce52b59cbe6 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sat, 17 Jul 2021 12:22:11 +0200 Subject: [PATCH 0660/1727] Add some more details to the MR template --- .gitlab/merge_request_templates/MR.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .gitlab/merge_request_templates/MR.md diff --git a/.gitlab/merge_request_templates/MR.md b/.gitlab/merge_request_templates/MR.md new file mode 100644 index 0000000..cf3c09e --- /dev/null +++ b/.gitlab/merge_request_templates/MR.md @@ -0,0 +1,8 @@ + + + +----------------------------------------------------------------------------- + +- [ ] I ran `cargo fmt --all`, `cargo test --workspace` and `cargo clippy` +- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license + From 092221ca3f9c10eb66672b61bbadc15e8bfea7ab Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 18 Jul 2021 20:30:57 +0000 Subject: [PATCH 0661/1727] Reduce amount of text in Bug Report template --- .gitlab/issue_templates/Bug Report.md | 41 ++++----------------------- 1 file changed, 6 insertions(+), 35 deletions(-) diff --git a/.gitlab/issue_templates/Bug Report.md b/.gitlab/issue_templates/Bug Report.md index 3fdc303..3e66d43 100644 --- a/.gitlab/issue_templates/Bug Report.md +++ b/.gitlab/issue_templates/Bug Report.md @@ -2,47 +2,18 @@ If you're requesting a new feature, that isn't part of this project yet, then please consider filling out a "Feature Request" instead! -If you just have trouble setting up a conduit server or other questions, feel free to ask for help in the +If you need a hand setting up your conduit server, feel free to ask for help in the Conduit Matrix Chat: https://matrix.to/#/#conduit:fachschaften.org. --> ### Description + - +### System Configuration + - -### To Reproduce - - - - -### Expected behavior - - - - -### Error/Log - -```log -Copy and paste the error log from Conduit here. -``` - - -### Your Setup (please complete the following information): - -- Conduit version [e.g. Git commit at download time] -- OS: [e.g. Raspbian , Ubuntu 20.04, ...] -- RAM available to Conduit: [e.g. 1 GB] -- Cores available to Conduit: [e.g. 1 Core] - - -### Additional context - - +Conduit Version: +Database backend (default is sqlite): sqlite /label ~conduit From cdd01262d23e92b445cbb757005278358a6097d8 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 18 Jul 2021 20:43:21 +0000 Subject: [PATCH 0662/1727] Shorten more templates --- .github/ISSUE_TEMPLATE/Issue.md | 2 +- .gitlab/issue_templates/Feature Request.md | 13 +------------ .gitlab/merge_request_templates/MR.md | 4 ++-- 3 files changed, 4 insertions(+), 15 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/Issue.md b/.github/ISSUE_TEMPLATE/Issue.md index 9022062..7889665 100644 --- a/.github/ISSUE_TEMPLATE/Issue.md +++ b/.github/ISSUE_TEMPLATE/Issue.md @@ -8,4 +8,4 @@ title: "CLOSE ME" **⚠️ Conduit development does not happen on GitHub. Issues opened here will not be addressed** -Please open issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new \ No newline at end of file +Please open issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new diff --git a/.gitlab/issue_templates/Feature Request.md b/.gitlab/issue_templates/Feature Request.md index 4100eb6..3f636e7 100644 --- a/.gitlab/issue_templates/Feature Request.md +++ b/.gitlab/issue_templates/Feature Request.md @@ -6,22 +6,11 @@ then please consider filling out a "Bug Report" instead! ### Is your feature request related to a problem? Please describe. - + ### Describe the solution you'd like - - - -### Describe alternatives you've considered - - - - -### Additional context - - diff --git a/.gitlab/merge_request_templates/MR.md b/.gitlab/merge_request_templates/MR.md index cf3c09e..c592a3b 100644 --- a/.gitlab/merge_request_templates/MR.md +++ b/.gitlab/merge_request_templates/MR.md @@ -1,8 +1,8 @@ - + ----------------------------------------------------------------------------- -- [ ] I ran `cargo fmt --all`, `cargo test --workspace` and `cargo clippy` +- [ ] I ran `cargo fmt` and `cargo test` - [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license From 2babff1e418d9c82d187f7c016d7e33ef00a3d2a Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 19 Jul 2021 08:23:04 +0000 Subject: [PATCH 0663/1727] CI: Test registration with element web --- .gitlab-ci.yml | 31 +++++- .../test-element-web-registration.js | 101 ++++++++++++++++++ tests/test-config.toml | 15 +++ 3 files changed, 144 insertions(+), 3 deletions(-) create mode 100644 tests/client-element-web/test-element-web-registration.js create mode 100644 tests/test-config.toml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 92da543..cb7385a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,6 @@ stages: - - test - build + - test - upload artifacts variables: @@ -8,8 +8,6 @@ variables: FF_USE_FASTZIP: 1 CACHE_COMPRESSION_LEVEL: fastest - - test:cargo: stage: "test" needs: [] @@ -34,6 +32,31 @@ test:cargo: - cargo test --workspace --verbose --locked - cargo clippy + +test:register:element-web-stable: + stage: "test" + needs: + - "build:cargo:x86_64-unknown-linux-gnu" + image: "buildkite/puppeteer:latest" + tags: ["docker"] + interruptible: true + script: + - "CONDUIT_CONFIG=tests/test-config.toml ./conduit-x86_64-unknown-linux-gnu > conduit.log &" + - "cd tests/client-element-web/" + - "npm install puppeteer" + - "node test-element-web-registration.js \"https://app.element.io/\" \"http://localhost:6167\"" + - "killall --regexp \"conduit\"" + - "cd ../.." + - "cat conduit.log" + artifacts: + paths: + - "tests/client-element-web/*.png" + - "*.log" + expire_in: 1 week + when: always + retry: 1 + + # --------------------------------------------------------------------- # # Cargo: Compiling for different architectures # # --------------------------------------------------------------------- # @@ -76,6 +99,8 @@ build:cargo:x86_64-unknown-linux-gnu: extends: .build-cargo-shared-settings variables: TARGET: "x86_64-unknown-linux-gnu" + rules: + - if: "$CI_COMMIT_BRANCH" build:cargo:armv7-unknown-linux-gnueabihf: extends: .build-cargo-shared-settings diff --git a/tests/client-element-web/test-element-web-registration.js b/tests/client-element-web/test-element-web-registration.js new file mode 100644 index 0000000..8f2e7f0 --- /dev/null +++ b/tests/client-element-web/test-element-web-registration.js @@ -0,0 +1,101 @@ +const puppeteer = require('puppeteer'); + +run().then(() => console.log('Done')).catch(error => { + console.error("Registration test failed."); + console.error("There might be a screenshot of the failure in the artifacts.\n"); + console.error(error); + process.exit(111); +}); + +async function run() { + + const elementUrl = process.argv[process.argv.length - 2]; + console.debug("Testing registration with ElementWeb hosted at "+ elementUrl); + + const homeserverUrl = process.argv[process.argv.length - 1]; + console.debug("Homeserver url: "+ homeserverUrl); + + const username = "testuser" + String(Math.floor(Math.random() * 100000)); + const password = "testpassword" + String(Math.floor(Math.random() * 100000)); + console.debug("Testuser for this run:\n User: " + username + "\n Password: " + password); + + const browser = await puppeteer.launch({ + headless: true, args: [ + "--no-sandbox" + ] + }); + + const page = await browser.newPage(); + await page.goto(elementUrl); + + await page.screenshot({ path: '01-element-web-opened.png' }); + + console.debug("Click [Create Account] button"); + await page.waitForSelector('a.mx_ButtonCreateAccount'); + await page.click('a.mx_ButtonCreateAccount'); + + await page.screenshot({ path: '02-clicked-create-account-button.png' }); + + // The webapp should have loaded right now, if anything takes more than 5 seconds, something probably broke + page.setDefaultTimeout(5000); + + console.debug("Click [Edit] to switch homeserver"); + await page.waitForSelector('div.mx_ServerPicker_change'); + await page.click('div.mx_ServerPicker_change'); + + await page.screenshot({ path: '03-clicked-edit-homeserver-button.png' }); + + console.debug("Type in local homeserver url"); + await page.waitForSelector('input#mx_homeserverInput'); + await page.click('input#mx_homeserverInput'); + await page.click('input#mx_homeserverInput'); + await page.keyboard.type(homeserverUrl); + + await page.screenshot({ path: '04-typed-in-homeserver.png' }); + + console.debug("[Continue] with changed homeserver"); + await page.waitForSelector("div.mx_ServerPickerDialog_continue"); + await page.click('div.mx_ServerPickerDialog_continue'); + + await page.screenshot({ path: '05-back-to-enter-user-credentials.png' }); + + console.debug("Type in username"); + await page.waitForSelector("input#mx_RegistrationForm_username"); + await page.click('input#mx_RegistrationForm_username'); + await page.keyboard.type(username); + + await page.screenshot({ path: '06-typed-in-username.png' }); + + console.debug("Type in password"); + await page.waitForSelector("input#mx_RegistrationForm_password"); + await page.click('input#mx_RegistrationForm_password'); + await page.keyboard.type(password); + + await page.screenshot({ path: '07-typed-in-password-once.png' }); + + console.debug("Type in password again"); + await page.waitForSelector("input#mx_RegistrationForm_passwordConfirm"); + await page.click('input#mx_RegistrationForm_passwordConfirm'); + await page.keyboard.type(password); + + await page.screenshot({ path: '08-typed-in-password-twice.png' }); + + console.debug("Click on [Register] to finish the account creation"); + await page.waitForSelector("input.mx_Login_submit"); + await page.click('input.mx_Login_submit'); + + await page.screenshot({ path: '09-clicked-on-register-button.png' }); + + // Waiting for the app to login can take some time, so be patient. + page.setDefaultTimeout(10000); + + console.debug("Wait for chat window to show up"); + await page.waitForSelector("div.mx_HomePage_default_buttons"); + console.debug("Apparently the registration worked."); + + await page.screenshot({ path: '10-logged-in-homescreen.png' }); + + + // Close the browser and exit the script + await browser.close(); +} \ No newline at end of file diff --git a/tests/test-config.toml b/tests/test-config.toml new file mode 100644 index 0000000..c466687 --- /dev/null +++ b/tests/test-config.toml @@ -0,0 +1,15 @@ +[global] + +# Server runs in same container as tests do, so localhost is fine +server_name = "localhost" + +# With a bit of luck /tmp is a RAM disk, so that the file system does not become the bottleneck while testing +database_path = "/tmp" + +# All the other settings are left at their defaults: +port = 6167 +max_request_size = 20_000_000 +allow_registration = true +trusted_servers = ["matrix.org"] +address = "127.0.0.1" +proxy = "none" \ No newline at end of file From 130b9841da144b2bdc3bfb6d494fd817afaf96b2 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 19 Jul 2021 08:47:31 +0000 Subject: [PATCH 0664/1727] CI: Add sytest --- .gitlab-ci.yml | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cb7385a..3bdb8ee 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -32,6 +32,37 @@ test:cargo: - cargo test --workspace --verbose --locked - cargo clippy +test:sytest: + stage: "test" + allow_failure: true + needs: + - "build:cargo:x86_64-unknown-linux-musl" + image: + name: "valkum/sytest-conduit:latest" + entrypoint: [""] + tags: ["docker"] + variables: + PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" + before_script: + - "mkdir -p /app" + - "cp ./conduit-x86_64-unknown-linux-musl /app/conduit" + - "chmod +x /app/conduit" + - "rm -rf /src && ln -s $CI_PROJECT_DIR/ /src" + - "mkdir -p /work/server-0/database/ && mkdir -p /work/server-1/database/ && mkdir -p /work/server-2/database/" + - "cd /" + script: + - "SYTEST_EXIT_CODE=0" + - "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1" + - "perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml \"Sytest\" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap" + - "exit $SYTEST_EXIT_CODE" + artifacts: + when: always + paths: + - "$CI_PROJECT_DIR/sytest.xml" + - "$CI_PROJECT_DIR/results.tap" + reports: + junit: "$CI_PROJECT_DIR/sytest.xml" + test:register:element-web-stable: stage: "test" @@ -122,6 +153,20 @@ build:cargo:aarch64-unknown-linux-gnu: TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-8" TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-8" +build:cargo:x86_64-unknown-linux-musl: + extends: .build-cargo-shared-settings + image: "rust:alpine" + rules: + - if: '$CI_COMMIT_BRANCH' # Always run + variables: + TARGET: "x86_64-unknown-linux-musl" + before_script: + - 'echo "Building for target $TARGET"' + - 'mkdir -p cargohome && CARGOHOME="cargohome"' + - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging + - "rustup target add $TARGET" + - "apk add libc-dev" + # --------------------------------------------------------------------- # # Cargo: Compiling deb packages for different architectures # @@ -178,6 +223,7 @@ publish:package: - "build:cargo:x86_64-unknown-linux-gnu" - "build:cargo:armv7-unknown-linux-gnueabihf" - "build:cargo:aarch64-unknown-linux-gnu" + - "build:cargo:x86_64-unknown-linux-musl" - "build:cargo-deb:x86_64-unknown-linux-gnu" rules: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' @@ -190,6 +236,7 @@ publish:package: - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${BASE_URL}/conduit-armv7-unknown-linux-gnueabihf"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl" ${BASE_URL}/conduit-x86_64-unknown-linux-musl"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' From 3ce75d1f0294115d60de2ca5efe64de080806a76 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 19 Jul 2021 11:55:39 +0200 Subject: [PATCH 0665/1727] apply fix --- src/main.rs | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 324a3ad..34489d3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -185,7 +185,7 @@ async fn main() { std::env::set_var("CONDUIT_LOG_LEVEL", "off"); let raw_config = - Figment::from(rocket::Config::release_default()) + Figment::from(default_config()) .merge( Toml::file(Env::var("CONDUIT_CONFIG").expect( "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", @@ -261,3 +261,32 @@ fn missing_token_catcher() -> Result<()> { fn bad_json_catcher() -> Result<()> { Err(Error::BadRequest(ErrorKind::BadJson, "Bad json.")) } + +fn default_config() -> rocket::Config { + let mut config = rocket::Config::release_default(); + + { + let mut shutdown = &mut config.shutdown; + + #[cfg(unix)] + { + use rocket::config::Sig; + + let signals = &mut shutdown.signals; + + signals.insert(Sig::Term); + signals.insert(Sig::Int); + } + + // Once shutdown is triggered, this is the amount of seconds before rocket + // will forcefully start shutting down connections, this gives enough time to /sync + // requests and the like (which havent gotten the memo, somehow) to still complete gracefully. + shutdown.grace = 35; + + // After the grace period, rocket starts shutting down connections, and waits at least this + // many seconds before forcefully shutting all of them down. + shutdown.mercy = 10; + } + + config +} From d6b37480e7a8a52994682f0cf98cc4eb1bd093cb Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 19 Jul 2021 10:00:44 +0000 Subject: [PATCH 0666/1727] CI: Fix package upload --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3bdb8ee..0924ee3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -236,7 +236,7 @@ publish:package: - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${BASE_URL}/conduit-armv7-unknown-linux-gnueabihf"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl" ${BASE_URL}/conduit-x86_64-unknown-linux-musl"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' From faa283d35b7771cec7f9ecaff13ca2960b4e49d5 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 19 Jul 2021 13:44:53 +0200 Subject: [PATCH 0667/1727] review feedback --- src/main.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/main.rs b/src/main.rs index 34489d3..f69bc48 100644 --- a/src/main.rs +++ b/src/main.rs @@ -272,10 +272,8 @@ fn default_config() -> rocket::Config { { use rocket::config::Sig; - let signals = &mut shutdown.signals; - - signals.insert(Sig::Term); - signals.insert(Sig::Int); + shutdown.signals.insert(Sig::Term); + shutdown.signals.insert(Sig::Int); } // Once shutdown is triggered, this is the amount of seconds before rocket From 0f2dc9a239d8dd031ce84193b7a00544e87f8d34 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 19 Jul 2021 15:56:20 +0200 Subject: [PATCH 0668/1727] add stuff and bits --- src/database.rs | 46 +++++++++- src/database/abstraction/sqlite.rs | 134 ++++++++++++++++++++++------- 2 files changed, 148 insertions(+), 32 deletions(-) diff --git a/src/database.rs b/src/database.rs index 2d7886e..82560db 100644 --- a/src/database.rs +++ b/src/database.rs @@ -53,6 +53,10 @@ pub struct Config { sqlite_wal_clean_second_interval: u32, #[serde(default = "default_sqlite_wal_clean_second_timeout")] sqlite_wal_clean_second_timeout: u32, + #[serde(default = "default_sqlite_spillover_reap_chunk")] + sqlite_spillover_reap_chunk: u32, + #[serde(default = "default_sqlite_spillover_reap_interval_secs")] + sqlite_spillover_reap_interval_secs: u32, #[serde(default = "default_max_request_size")] max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] @@ -121,6 +125,14 @@ fn default_sqlite_wal_clean_second_timeout() -> u32 { 2 } +fn default_sqlite_spillover_reap_chunk() -> u32 { + 5 +} + +fn default_sqlite_spillover_reap_interval_secs() -> u32 { + 10 +} + fn default_max_request_size() -> u32 { 20 * 1024 * 1024 // Default to 20 MB } @@ -420,7 +432,10 @@ impl Database { drop(guard); #[cfg(feature = "sqlite")] - Self::start_wal_clean_task(&db, &config).await; + { + Self::start_wal_clean_task(&db, &config).await; + Self::start_spillover_reap_task(builder, &config).await; + } Ok(db) } @@ -541,6 +556,35 @@ impl Database { self._db.flush_wal() } + #[cfg(feature = "sqlite")] + pub async fn start_spillover_reap_task(engine: Arc, config: &Config) { + let chunk_size = match config.sqlite_spillover_reap_chunk { + 0 => None, // zero means no chunking, reap everything + a @ _ => Some(a), + }; + let interval_secs = config.sqlite_spillover_reap_interval_secs as u64; + + let weak = Arc::downgrade(&engine); + + tokio::spawn(async move { + use tokio::time::interval; + + use std::{sync::Weak, time::Duration}; + + let mut i = interval(Duration::from_secs(interval_secs)); + + loop { + i.tick().await; + + if let Some(arc) = Weak::upgrade(&weak) { + arc.reap_spillover(chunk_size); + } else { + break; + } + } + }); + } + #[cfg(feature = "sqlite")] pub async fn start_wal_clean_task(lock: &Arc>, config: &Config) { use tokio::time::{interval, timeout}; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 25d236a..ac92a45 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -1,3 +1,11 @@ +use super::{DatabaseEngine, Tree}; +use crate::{database::Config, Result}; +use crossbeam::channel::{ + bounded, unbounded, Receiver as ChannelReceiver, Sender as ChannelSender, TryRecvError, +}; +use log::debug; +use parking_lot::{Mutex, MutexGuard, RwLock}; +use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension}; use std::{ collections::BTreeMap, future::Future, @@ -8,33 +16,12 @@ use std::{ thread, time::{Duration, Instant}, }; - -use crate::{database::Config, Result}; - -use super::{DatabaseEngine, Tree}; - -use log::debug; - -use crossbeam::channel::{bounded, Sender as ChannelSender}; -use parking_lot::{Mutex, MutexGuard, RwLock}; -use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension}; - use tokio::sync::oneshot::Sender; -// const SQL_CREATE_TABLE: &str = -// "CREATE TABLE IF NOT EXISTS {} {{ \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL }}"; -// const SQL_SELECT: &str = "SELECT value FROM {} WHERE key = ?"; -// const SQL_INSERT: &str = "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)"; -// const SQL_DELETE: &str = "DELETE FROM {} WHERE key = ?"; -// const SQL_SELECT_ITER: &str = "SELECT key, value FROM {}"; -// const SQL_SELECT_PREFIX: &str = "SELECT key, value FROM {} WHERE key LIKE ?||'%' ORDER BY key ASC"; -// const SQL_SELECT_ITER_FROM_FORWARDS: &str = "SELECT key, value FROM {} WHERE key >= ? ORDER BY ASC"; -// const SQL_SELECT_ITER_FROM_BACKWARDS: &str = -// "SELECT key, value FROM {} WHERE key <= ? ORDER BY DESC"; - struct Pool { writer: Mutex, readers: Vec>, + spills: ConnectionRecycler, spill_tracker: Arc<()>, path: PathBuf, } @@ -43,7 +30,7 @@ pub const MILLI: Duration = Duration::from_millis(1); enum HoldingConn<'a> { FromGuard(MutexGuard<'a, Connection>), - FromOwned(Connection, Arc<()>), + FromRecycled(RecycledConn, Arc<()>), } impl<'a> Deref for HoldingConn<'a> { @@ -52,7 +39,57 @@ impl<'a> Deref for HoldingConn<'a> { fn deref(&self) -> &Self::Target { match self { HoldingConn::FromGuard(guard) => guard.deref(), - HoldingConn::FromOwned(conn, _) => conn, + HoldingConn::FromRecycled(conn, _) => conn.deref(), + } + } +} + +struct ConnectionRecycler(ChannelSender, ChannelReceiver); + +impl ConnectionRecycler { + fn new() -> Self { + let (s, r) = unbounded(); + Self(s, r) + } + + fn recycle(&self, conn: Connection) -> RecycledConn { + let sender = self.0.clone(); + + RecycledConn(Some(conn), sender) + } + + fn try_take(&self) -> Option { + match self.1.try_recv() { + Ok(conn) => Some(conn), + Err(TryRecvError::Empty) => None, + // as this is pretty impossible, a panic is warranted if it ever occurs + Err(TryRecvError::Disconnected) => panic!("Receiving channel was disconnected. A a sender is owned by the current struct, this should never happen(!!!)") + } + } +} + +struct RecycledConn( + Option, // To allow moving out of the struct when `Drop` is called. + ChannelSender, +); + +impl Deref for RecycledConn { + type Target = Connection; + + fn deref(&self) -> &Self::Target { + self.0 + .as_ref() + .expect("RecycledConn does not have a connection in Option<>") + } +} + +impl Drop for RecycledConn { + fn drop(&mut self) { + if let Some(conn) = self.0.take() { + log::debug!("Recycled connection"); + if let Err(e) = self.1.send(conn) { + log::warn!("Recycling a connection led to the following error: {:?}", e) + } } } } @@ -76,6 +113,7 @@ impl Pool { Ok(Self { writer, readers, + spills: ConnectionRecycler::new(), spill_tracker: Arc::new(()), path: path.as_ref().to_path_buf(), }) @@ -104,24 +142,38 @@ impl Pool { } fn read_lock(&self) -> HoldingConn<'_> { + // First try to get a connection from the permanent pool for r in &self.readers { if let Some(reader) = r.try_lock() { return HoldingConn::FromGuard(reader); } } - let spill_arc = self.spill_tracker.clone(); + // We didn't get a connection from the permanent pool, so we'll dumpster-dive for recycled connections. + // Either we have a connection or we dont, if we don't, we make a new one. + let conn = match self.spills.try_take() { + Some(conn) => conn, + None => Self::prepare_conn(&self.path, None).unwrap(), + }; + + // Clone the spill Arc to mark how many spilled connections actually exist. + let spill_arc = Arc::clone(&self.spill_tracker); + + // Get a sense of how many connections exist now. let now_count = Arc::strong_count(&spill_arc) - 1 /* because one is held by the pool */; - log::warn!("read_lock: all readers locked, creating spillover reader..."); + log::debug!("read_lock: all readers locked, creating spillover reader..."); - if now_count > 1 { - log::warn!("read_lock: now {} spillover readers exist", now_count); + // If the spillover readers are more than the number of total readers, there might be a problem. + if now_count > self.readers.len() { + log::warn!( + "read_lock: possible high load; now {} spillover readers exist", + now_count + ); } - let spilled = Self::prepare_conn(&self.path, None).unwrap(); - - HoldingConn::FromOwned(spilled, spill_arc) + // Return the recyclable connection. + HoldingConn::FromRecycled(self.spills.recycle(conn), spill_arc) } } @@ -188,6 +240,26 @@ impl Engine { ) .map_err(Into::into) } + + // Reaps (at most) X amount of connections if `amount` is Some. + // If none, reaps all currently idle connections. + pub fn reap_spillover(&self, amount: Option) { + let mut reaped = 0; + + if let Some(amount) = amount { + for _ in 0..amount { + if self.pool.spills.try_take().is_some() { + reaped += 1; + } + } + } else { + while let Some(_) = self.pool.spills.try_take() { + reaped += 1; + } + } + + log::debug!("Reaped {} connections", reaped); + } } pub struct SqliteTable { From 7e579f8d346d04bdcde32917cf296a9a7ef4f582 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 19 Jul 2021 16:25:41 +0200 Subject: [PATCH 0669/1727] change to fraction-based approach --- src/database.rs | 19 +++++++++---------- src/database/abstraction/sqlite.rs | 18 +++++++----------- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/src/database.rs b/src/database.rs index 82560db..f7c3d9d 100644 --- a/src/database.rs +++ b/src/database.rs @@ -53,8 +53,8 @@ pub struct Config { sqlite_wal_clean_second_interval: u32, #[serde(default = "default_sqlite_wal_clean_second_timeout")] sqlite_wal_clean_second_timeout: u32, - #[serde(default = "default_sqlite_spillover_reap_chunk")] - sqlite_spillover_reap_chunk: u32, + #[serde(default = "default_sqlite_spillover_reap_fraction")] + sqlite_spillover_reap_fraction: u32, #[serde(default = "default_sqlite_spillover_reap_interval_secs")] sqlite_spillover_reap_interval_secs: u32, #[serde(default = "default_max_request_size")] @@ -125,12 +125,12 @@ fn default_sqlite_wal_clean_second_timeout() -> u32 { 2 } -fn default_sqlite_spillover_reap_chunk() -> u32 { - 5 +fn default_sqlite_spillover_reap_fraction() -> u32 { + 2 } fn default_sqlite_spillover_reap_interval_secs() -> u32 { - 10 + 60 } fn default_max_request_size() -> u32 { @@ -558,10 +558,9 @@ impl Database { #[cfg(feature = "sqlite")] pub async fn start_spillover_reap_task(engine: Arc, config: &Config) { - let chunk_size = match config.sqlite_spillover_reap_chunk { - 0 => None, // zero means no chunking, reap everything - a @ _ => Some(a), - }; + use std::convert::TryInto; + + let fraction_factor = config.sqlite_spillover_reap_fraction.max(1).try_into().unwrap(/* We just converted it to be at least 1 */); let interval_secs = config.sqlite_spillover_reap_interval_secs as u64; let weak = Arc::downgrade(&engine); @@ -577,7 +576,7 @@ impl Database { i.tick().await; if let Some(arc) = Weak::upgrade(&weak) { - arc.reap_spillover(chunk_size); + arc.reap_spillover_by_fraction(fraction_factor); } else { break; } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index ac92a45..e4acdbb 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -9,6 +9,7 @@ use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension}; use std::{ collections::BTreeMap, future::Future, + num::NonZeroU32, ops::Deref, path::{Path, PathBuf}, pin::Pin, @@ -241,19 +242,14 @@ impl Engine { .map_err(Into::into) } - // Reaps (at most) X amount of connections if `amount` is Some. - // If none, reaps all currently idle connections. - pub fn reap_spillover(&self, amount: Option) { + // Reaps (at most) (.len() / `fraction`) (rounded down, min 1) connections. + pub fn reap_spillover_by_fraction(&self, fraction: NonZeroU32) { let mut reaped = 0; - if let Some(amount) = amount { - for _ in 0..amount { - if self.pool.spills.try_take().is_some() { - reaped += 1; - } - } - } else { - while let Some(_) = self.pool.spills.try_take() { + let amount = ((self.pool.spills.1.len() as u32) / fraction).max(1); + + for _ in 0..amount { + if self.pool.spills.try_take().is_some() { reaped += 1; } } From 79bf7fc597aa71419ffcf74f863225b7d12d82a6 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 19 Jul 2021 16:46:59 +0200 Subject: [PATCH 0670/1727] some logging shuffling --- src/database/abstraction/sqlite.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index e4acdbb..14306e1 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -150,11 +150,16 @@ impl Pool { } } + log::debug!("read_lock: All permanent readers locked, obtaining spillover reader..."); + // We didn't get a connection from the permanent pool, so we'll dumpster-dive for recycled connections. // Either we have a connection or we dont, if we don't, we make a new one. let conn = match self.spills.try_take() { Some(conn) => conn, - None => Self::prepare_conn(&self.path, None).unwrap(), + None => { + log::debug!("read_lock: No recycled connections left, creating new one..."); + Self::prepare_conn(&self.path, None).unwrap() + } }; // Clone the spill Arc to mark how many spilled connections actually exist. @@ -163,8 +168,6 @@ impl Pool { // Get a sense of how many connections exist now. let now_count = Arc::strong_count(&spill_arc) - 1 /* because one is held by the pool */; - log::debug!("read_lock: all readers locked, creating spillover reader..."); - // If the spillover readers are more than the number of total readers, there might be a problem. if now_count > self.readers.len() { log::warn!( From e7a51c07d0612b257b631babc0967923ccc3469c Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 19 Jul 2021 17:17:10 +0200 Subject: [PATCH 0671/1727] log change feedback --- src/database/abstraction/sqlite.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 14306e1..445093a 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -171,7 +171,7 @@ impl Pool { // If the spillover readers are more than the number of total readers, there might be a problem. if now_count > self.readers.len() { log::warn!( - "read_lock: possible high load; now {} spillover readers exist", + "Database is under high load. Consider increasing sqlite_read_pool_size ({} spillover readers exist)", now_count ); } From 678ce0abc6932630fb97409d9ef7aeaff6b97d80 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 19 Jul 2021 17:18:25 +0200 Subject: [PATCH 0672/1727] CI: Create docker image with musl binary --- .gitlab-ci.yml | 41 ++++++++++++++ docker/ci-binaries-packaging.Dockerfile | 73 +++++++++++++++++++++++++ 2 files changed, 114 insertions(+) create mode 100644 docker/ci-binaries-packaging.Dockerfile diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0924ee3..60b8833 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,5 +1,6 @@ stages: - build + - build docker image - test - upload artifacts @@ -212,6 +213,46 @@ build:cargo-deb:x86_64-unknown-linux-gnu: +# --------------------------------------------------------------------- # +# Create and publish docker image # +# --------------------------------------------------------------------- # + +.docker-shared-settings: + stage: "build docker image" + needs: [] + interruptible: true + image: + name: "gcr.io/kaniko-project/executor:debug" + entrypoint: [""] + tags: ["docker"] + variables: + # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache + KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache" + before_script: + - "mkdir -p /kaniko/.docker" + - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json' + + +# Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image +build:docker:main: + extends: .docker-shared-settings + needs: + - "build:cargo:x86_64-unknown-linux-musl" + script: + - > + /kaniko/executor + $KANIKO_CACHE_ARGS + --context $CI_PROJECT_DIR + --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') + --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) + --build-arg "GIT_REF=$CI_COMMIT_REF_NAME" + --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" + --destination "$CI_REGISTRY_IMAGE/conduit:latest" + --destination "$CI_REGISTRY_IMAGE/conduit:alpine" + --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" + rules: + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + # --------------------------------------------------------------------- # # Store binaries as package so they have download urls # diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile new file mode 100644 index 0000000..0122797 --- /dev/null +++ b/docker/ci-binaries-packaging.Dockerfile @@ -0,0 +1,73 @@ +# --------------------------------------------------------------------------------------------------------- +# This Dockerfile is intended to be built as part of Conduit's CI pipeline. +# It does not build Conduit in Docker, but just copies the matching build artifact from the build job. +# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary. +# +# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching. +# Credit's for the original Dockerfile: Weasy666. +# --------------------------------------------------------------------------------------------------------- + +FROM alpine:3.12 + +ARG CREATED +ARG VERSION +ARG GIT_REF + +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" + +# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md +# including a custom label specifying the build command +LABEL org.opencontainers.image.created=${CREATED} \ + org.opencontainers.image.authors="Conduit Contributors" \ + org.opencontainers.image.title="Conduit" \ + org.opencontainers.image.version=${VERSION} \ + org.opencontainers.image.vendor="Conduit Contributors" \ + org.opencontainers.image.description="A Matrix homeserver written in Rust" \ + org.opencontainers.image.url="https://conduit.rs/" \ + org.opencontainers.image.revision=${GIT_REF} \ + org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ + org.opencontainers.image.licenses="Apache-2.0" \ + org.opencontainers.image.documentation="" \ + org.opencontainers.image.ref.name="" + +# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose. +EXPOSE 6167 + +# create data folder for database +RUN mkdir -p /srv/conduit/.local/share/conduit + +# Add www-data user and group with UID 82, as used by alpine +# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install +RUN set -x ; \ + addgroup -Sg 82 www-data 2>/dev/null ; \ + adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \ + addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1 + +# Change ownership of Conduit files to www-data user and group +RUN chown -cR www-data:www-data /srv/conduit + +# Install packages needed to run Conduit +RUN apk add --no-cache \ + ca-certificates \ + curl \ + libgcc + +# Create a volume for the database, to persist its contents +VOLUME ["/srv/conduit/.local/share/conduit"] + +# Test if Conduit is still alive, uses the same endpoint as Element +HEALTHCHECK --start-period=5s \ + CMD curl --fail -s "http://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ + curl -k --fail -s "https://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ + exit 1 + +# Set user to www-data +USER www-data +# Set container home directory +WORKDIR /srv/conduit +# Run Conduit +ENTRYPOINT [ "/srv/conduit/conduit" ] + + +# Copy the COnduit binary into the image at the latest possible moment to maximise caching: +COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit From 0a8dadb79799450952ba18b0d3c1d2df3ef1d95c Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 19 Jul 2021 19:42:25 +0200 Subject: [PATCH 0673/1727] Add hardcoded artifacts.expose_as to show them in MRs --- .gitlab-ci.yml | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 60b8833..6f9b78b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -121,11 +121,6 @@ test:register:element-web-stable: - "export CARGO_PROFILE_RELEASE_LTO=thin" - time cargo build --target $TARGET --release - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' - artifacts: - name: "conduit-$TARGET" - expose_as: "Binary" - paths: - - "conduit-$TARGET" build:cargo:x86_64-unknown-linux-gnu: extends: .build-cargo-shared-settings @@ -133,6 +128,11 @@ build:cargo:x86_64-unknown-linux-gnu: TARGET: "x86_64-unknown-linux-gnu" rules: - if: "$CI_COMMIT_BRANCH" + artifacts: + name: "conduit-x86_64-unknown-linux-gnu" + paths: + - "conduit-x86_64-unknown-linux-gnu" + expose_as: "Release binary x86_64-unknown-linux-gnu" build:cargo:armv7-unknown-linux-gnueabihf: extends: .build-cargo-shared-settings @@ -142,6 +142,11 @@ build:cargo:armv7-unknown-linux-gnueabihf: CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++ + artifacts: + name: "conduit-armv7-unknown-linux-gnueabihf" + paths: + - "conduit-armv7-unknown-linux-gnueabihf" + expose_as: "Release binary armv7-unknown-linux-gnueabihf" build:cargo:aarch64-unknown-linux-gnu: extends: .build-cargo-shared-settings @@ -153,6 +158,11 @@ build:cargo:aarch64-unknown-linux-gnu: CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++ TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-8" TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-8" + artifacts: + name: "conduit-aarch64-unknown-linux-gnu" + paths: + - "conduit-aarch64-unknown-linux-gnu" + expose_as: "Release binary aarch64-unknown-linux-gnu" build:cargo:x86_64-unknown-linux-musl: extends: .build-cargo-shared-settings @@ -167,6 +177,11 @@ build:cargo:x86_64-unknown-linux-musl: - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - "rustup target add $TARGET" - "apk add libc-dev" + artifacts: + name: "conduit-x86_64-unknown-linux-musl" + paths: + - "conduit-x86_64-unknown-linux-musl" + expose_as: "Release binary x86_64-unknown-linux-musl" # --------------------------------------------------------------------- # @@ -199,17 +214,17 @@ build:cargo:x86_64-unknown-linux-musl: script: - time cargo deb --target $TARGET - 'mv target/$TARGET/debian/*.deb "conduit-$TARGET.deb"' - artifacts: - name: "conduit-$TARGET.deb" - expose_as: "Debian Package" - paths: - - "conduit-$TARGET.deb" build:cargo-deb:x86_64-unknown-linux-gnu: extends: .build-cargo-deb-shared-settings variables: TARGET: "x86_64-unknown-linux-gnu" NEEDED_PACKAGES: "" + artifacts: + name: "conduit-x86_64-unknown-linux-gnu.deb" + paths: + - "conduit-x86_64-unknown-linux-gnu.deb" + expose_as: "Debian Package x86_64" From 167e903a56f614e4f161bbe171a3e41a1fc70df0 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 19 Jul 2021 22:02:10 +0200 Subject: [PATCH 0674/1727] Fix typo --- docker/ci-binaries-packaging.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 0122797..43ebc98 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -69,5 +69,5 @@ WORKDIR /srv/conduit ENTRYPOINT [ "/srv/conduit/conduit" ] -# Copy the COnduit binary into the image at the latest possible moment to maximise caching: +# Copy the Conduit binary into the image at the latest possible moment to maximise caching: COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit From ec44f3d568fd0093c7cfab821d7a43eb01af4843 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Tue, 20 Jul 2021 10:47:36 +0200 Subject: [PATCH 0675/1727] change to f64 --- src/database.rs | 10 ++++------ src/database/abstraction/sqlite.rs | 8 +++++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/database.rs b/src/database.rs index f7c3d9d..34bce73 100644 --- a/src/database.rs +++ b/src/database.rs @@ -54,7 +54,7 @@ pub struct Config { #[serde(default = "default_sqlite_wal_clean_second_timeout")] sqlite_wal_clean_second_timeout: u32, #[serde(default = "default_sqlite_spillover_reap_fraction")] - sqlite_spillover_reap_fraction: u32, + sqlite_spillover_reap_fraction: f64, #[serde(default = "default_sqlite_spillover_reap_interval_secs")] sqlite_spillover_reap_interval_secs: u32, #[serde(default = "default_max_request_size")] @@ -125,8 +125,8 @@ fn default_sqlite_wal_clean_second_timeout() -> u32 { 2 } -fn default_sqlite_spillover_reap_fraction() -> u32 { - 2 +fn default_sqlite_spillover_reap_fraction() -> f64 { + 2.0 } fn default_sqlite_spillover_reap_interval_secs() -> u32 { @@ -558,9 +558,7 @@ impl Database { #[cfg(feature = "sqlite")] pub async fn start_spillover_reap_task(engine: Arc, config: &Config) { - use std::convert::TryInto; - - let fraction_factor = config.sqlite_spillover_reap_fraction.max(1).try_into().unwrap(/* We just converted it to be at least 1 */); + let fraction_factor = config.sqlite_spillover_reap_fraction.max(1.0); let interval_secs = config.sqlite_spillover_reap_interval_secs as u64; let weak = Arc::downgrade(&engine); diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 445093a..f7c178f 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -9,7 +9,6 @@ use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension}; use std::{ collections::BTreeMap, future::Future, - num::NonZeroU32, ops::Deref, path::{Path, PathBuf}, pin::Pin, @@ -246,10 +245,13 @@ impl Engine { } // Reaps (at most) (.len() / `fraction`) (rounded down, min 1) connections. - pub fn reap_spillover_by_fraction(&self, fraction: NonZeroU32) { + pub fn reap_spillover_by_fraction(&self, fraction: f64) { let mut reaped = 0; - let amount = ((self.pool.spills.1.len() as u32) / fraction).max(1); + let spill_amount = self.pool.spills.1.len() as f64; + let fraction = fraction.max(1.0 /* Can never be too sure */); + + let amount = (spill_amount / fraction).max(1.0) as u32; for _ in 0..amount { if self.pool.spills.try_take().is_some() { From d253f9236a87f0571c8561290cebda0437885a62 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Tue, 20 Jul 2021 11:01:35 +0200 Subject: [PATCH 0676/1727] change fraction type --- src/database.rs | 6 +++--- src/database/abstraction/sqlite.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/database.rs b/src/database.rs index 34bce73..9452e63 100644 --- a/src/database.rs +++ b/src/database.rs @@ -126,7 +126,7 @@ fn default_sqlite_wal_clean_second_timeout() -> u32 { } fn default_sqlite_spillover_reap_fraction() -> f64 { - 2.0 + 0.5 } fn default_sqlite_spillover_reap_interval_secs() -> u32 { @@ -558,7 +558,7 @@ impl Database { #[cfg(feature = "sqlite")] pub async fn start_spillover_reap_task(engine: Arc, config: &Config) { - let fraction_factor = config.sqlite_spillover_reap_fraction.max(1.0); + let fraction = config.sqlite_spillover_reap_fraction.clamp(0.01, 1.0); let interval_secs = config.sqlite_spillover_reap_interval_secs as u64; let weak = Arc::downgrade(&engine); @@ -574,7 +574,7 @@ impl Database { i.tick().await; if let Some(arc) = Weak::upgrade(&weak) { - arc.reap_spillover_by_fraction(fraction_factor); + arc.reap_spillover_by_fraction(fraction); } else { break; } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index f7c178f..8100ed9 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -244,14 +244,14 @@ impl Engine { .map_err(Into::into) } - // Reaps (at most) (.len() / `fraction`) (rounded down, min 1) connections. + // Reaps (at most) (.len() * `fraction`) (rounded down, min 1) connections. pub fn reap_spillover_by_fraction(&self, fraction: f64) { let mut reaped = 0; let spill_amount = self.pool.spills.1.len() as f64; - let fraction = fraction.max(1.0 /* Can never be too sure */); + let fraction = fraction.clamp(0.01, 1.0); - let amount = (spill_amount / fraction).max(1.0) as u32; + let amount = (spill_amount * fraction).max(1.0) as u32; for _ in 0..amount { if self.pool.spills.try_take().is_some() { From 0fcefa4125d924395063672cab9a238c8e6ff589 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 15 Jul 2021 19:54:04 +0200 Subject: [PATCH 0677/1727] fix: ruma --- Cargo.lock | 23 ++--------- Cargo.toml | 8 ++-- src/client_server/account.rs | 17 ++++---- src/client_server/directory.rs | 16 ++++---- src/client_server/push.rs | 14 +++---- src/client_server/room.rs | 14 ++++--- src/database/rooms.rs | 73 +++++++++++++++++++++++++++++++++- src/server_server.rs | 16 ++------ 8 files changed, 115 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index befd3e2..6c50f88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2015,7 +2015,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "assign", "js_int", @@ -2036,7 +2035,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "bytes", "http", @@ -2052,7 +2050,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2063,7 +2060,6 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "ruma-api", "ruma-common", @@ -2077,7 +2073,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.11.0" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "assign", "bytes", @@ -2097,7 +2092,6 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.4" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "indexmap", "js_int", @@ -2111,8 +2105,7 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.23.1" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" +version = "0.23.2" dependencies = [ "indoc", "js_int", @@ -2122,12 +2115,12 @@ dependencies = [ "ruma-serde", "serde", "serde_json", + "thiserror", ] [[package]] name = "ruma-events-macros" -version = "0.23.1" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" +version = "0.23.2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2138,7 +2131,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "js_int", "ruma-api", @@ -2153,7 +2145,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.4" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "paste", "rand 0.8.4", @@ -2167,7 +2158,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.4" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2177,12 +2167,10 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" [[package]] name = "ruma-identity-service-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "js_int", "ruma-api", @@ -2195,7 +2183,6 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "js_int", "ruma-api", @@ -2210,7 +2197,6 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "bytes", "form_urlencoded", @@ -2224,7 +2210,6 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2235,7 +2220,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2252,7 +2236,6 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=174555857ef90d49e4b9a672be9e2fe0acdc2687#174555857ef90d49e4b9a672be9e2fe0acdc2687" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 0f40ab7..2a0f697 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,8 +18,8 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "174555857ef90d49e4b9a672be9e2fe0acdc2687", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/ruma/ruma", rev = "174555857ef90d49e4b9a672be9e2fe0acdc2687", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" @@ -119,5 +119,5 @@ maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } # For flamegraphs: -#[profile.release] -#debug = true +[profile.release] +debug = true diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 9e16d90..740a2dc 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,4 +1,8 @@ -use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, + sync::Arc, +}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; @@ -16,7 +20,8 @@ use ruma::{ }, events::{ room::{ - canonical_alias, guest_access, history_visibility, join_rules, member, message, name, + canonical_alias, guest_access, history_visibility, join_rules, member, message, + name::{self, RoomName}, topic, }, EventType, @@ -375,11 +380,9 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomName, - content: serde_json::to_value( - name::NameEventContent::new("Admin Room".to_owned()).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") - })?, - ) + content: serde_json::to_value(name::NameEventContent::new(Some( + RoomName::try_from("Admin Room".to_owned()).expect("Room name is valid"), + ))) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 4a440fd..36b817f 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -87,17 +87,17 @@ pub async fn set_room_visibility_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); match &body.visibility { - room::Visibility::_Custom(_s) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room visibility type is not supported.", - )); - } room::Visibility::Public => { db.rooms.set_public(&body.room_id, true)?; info!("{} made {} public", sender_user, body.room_id); } room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, + _ => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room visibility type is not supported.", + )); + } } db.flush().await?; @@ -231,8 +231,8 @@ pub async fn get_public_rooms_filtered_helper( .map_err(|_| { Error::bad_database("Invalid room name event in database.") })? - .name() - .map(|n| n.to_owned())) + .name + .map(|n| n.to_owned().into())) })?, num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), topic: db diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 794cbce..33e62d9 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -81,7 +81,7 @@ pub async fn get_pushrule_route( .content .get(body.rule_id.as_str()) .map(|rule| rule.clone().into()), - RuleKind::_Custom(_) => None, + _ => None, }; if let Some(rule) = rule { @@ -181,7 +181,7 @@ pub async fn set_pushrule_route( .into(), ); } - RuleKind::_Custom(_) => {} + _ => {} } db.account_data.update( @@ -245,7 +245,7 @@ pub async fn get_pushrule_actions_route( .content .get(body.rule_id.as_str()) .map(|rule| rule.actions.clone()), - RuleKind::_Custom(_) => None, + _ => None, }; db.flush().await?; @@ -314,7 +314,7 @@ pub async fn set_pushrule_actions_route( global.content.replace(rule); } } - RuleKind::_Custom(_) => {} + _ => {} }; db.account_data.update( @@ -383,7 +383,7 @@ pub async fn get_pushrule_enabled_route( .iter() .find(|rule| rule.rule_id == body.rule_id) .map_or(false, |rule| rule.enabled), - RuleKind::_Custom(_) => false, + _ => false, }; db.flush().await?; @@ -454,7 +454,7 @@ pub async fn set_pushrule_enabled_route( global.content.insert(rule); } } - RuleKind::_Custom(_) => {} + _ => {} } db.account_data.update( @@ -523,7 +523,7 @@ pub async fn delete_pushrule_route( global.content.remove(&rule); } } - RuleKind::_Custom(_) => {} + _ => {} } db.account_data.update( diff --git a/src/client_server/room.rs b/src/client_server/room.rs index f48c5e9..9f381a0 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -9,7 +9,11 @@ use ruma::{ r0::room::{self, create_room, get_room_event, upgrade_room}, }, events::{ - room::{guest_access, history_visibility, join_rules, member, name, topic}, + room::{ + guest_access, history_visibility, join_rules, member, + name::{self, RoomName}, + topic, + }, EventType, }, serde::Raw, @@ -113,7 +117,7 @@ pub async fn create_room_route( .unwrap_or_else(|| match &body.visibility { room::Visibility::Private => create_room::RoomPreset::PrivateChat, room::Visibility::Public => create_room::RoomPreset::PublicChat, - room::Visibility::_Custom(_) => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom + _ => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom }); let mut users = BTreeMap::new(); @@ -251,11 +255,11 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomName, - content: serde_json::to_value( - name::NameEventContent::new(name.clone()).map_err(|_| { + content: serde_json::to_value(name::NameEventContent::new(Some( + RoomName::try_from(name.clone()).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") })?, - ) + ))) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 1542db8..f6f5021 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -490,6 +490,27 @@ impl Rooms { .transpose() } + /// Returns the json of a pdu. + pub fn get_non_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map_or_else::, _, _>( + || Ok(None), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .transpose() + } + /// Returns the pdu's id. pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { self.eventid_pduid @@ -903,11 +924,59 @@ impl Rooms { "list_appservices" => { db.admin.send(AdminCommand::ListAppservices); } + "get_pdu" => { + if args.len() == 1 { + if let Ok(event_id) = EventId::try_from(args[0]) { + let mut outlier = false; + let mut pdu_json = + db.rooms.get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = db.rooms.get_pdu_json(&event_id)?; + } + match pdu_json { + Some(json) => { + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_html( + format!("{}\n```json\n{:#?}\n```", + if outlier { + "PDU is outlier" + } else { "PDU was accepted"}, json), + format!("

{}

\n
{}\n
\n", + if outlier { + "PDU is outlier" + } else { "PDU was accepted"}, serde_json::to_string_pretty(&json).expect("canonical json is valid json")) + ), + )); + } + None => { + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + "PDU not found.", + ), + )); + } + } + } else { + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + "Event ID could not be parsed.", + ), + )); + } + } else { + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + "Usage: get_pdu ", + ), + )); + } + } _ => { db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain(format!( - "Command: {}, Args: {:?}", - command, args + "Unrecognized command: {}", + command )), )); } diff --git a/src/server_server.rs b/src/server_server.rs index fb49d0c..bc85407 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1254,15 +1254,6 @@ pub fn handle_incoming_pdu<'a>( // We do need to force an update to this room's state update_state = true; - let mut auth_events = vec![]; - for map in &fork_states { - let state_auth = map - .values() - .flat_map(|pdu| pdu.auth_events.clone()) - .collect(); - auth_events.push(state_auth); - } - match state_res::StateResolution::resolve( &room_id, room_version_id, @@ -1274,8 +1265,7 @@ pub fn handle_incoming_pdu<'a>( .collect::>() }) .collect::>(), - auth_events, - &|id| { + |id| { let res = db.rooms.get_pdu(id); if let Err(e) = &res { error!("LOOK AT ME Failed to fetch event: {}", e); @@ -2432,10 +2422,10 @@ pub fn get_profile_information_route( let mut avatar_url = None; match &body.field { - // TODO: what to do with custom - Some(ProfileField::_Custom(_s)) => {} Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?, + // TODO: what to do with custom + Some(_) => {} None => { displayname = db.users.displayname(&body.user_id)?; avatar_url = db.users.avatar_url(&body.user_id)?; From f5273f7eb11c00e84e3ccd30cc4d5485d7ff5e10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 15 Jul 2021 23:17:58 +0200 Subject: [PATCH 0678/1727] improvement: upgrade ruma and implement blurhashes --- Cargo.lock | 18 ++++++++++++++++++ Cargo.toml | 4 ++-- src/client_server/account.rs | 6 +++++- src/client_server/backup.rs | 4 ++-- src/client_server/config.rs | 4 ++-- src/client_server/device.rs | 6 +++--- src/client_server/directory.rs | 2 +- src/client_server/keys.rs | 4 ++-- src/client_server/membership.rs | 7 ++++++- src/client_server/mod.rs | 2 +- src/client_server/presence.rs | 2 +- src/client_server/profile.rs | 8 ++++++-- src/client_server/push.rs | 8 ++++---- src/client_server/read_marker.rs | 4 ++-- src/client_server/room.rs | 2 ++ src/client_server/sync.rs | 1 + src/client_server/tag.rs | 4 ++-- src/client_server/to_device.rs | 2 +- src/client_server/typing.rs | 2 +- src/database.rs | 1 + src/database/users.rs | 28 +++++++++++++++++++++++++++- src/server_server.rs | 9 ++++++++- 22 files changed, 98 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c50f88..828d7cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2015,6 +2015,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "assign", "js_int", @@ -2035,6 +2036,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.1" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "bytes", "http", @@ -2050,6 +2052,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.1" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2060,6 +2063,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "ruma-api", "ruma-common", @@ -2073,6 +2077,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.11.0" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "assign", "bytes", @@ -2092,6 +2097,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.4" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "indexmap", "js_int", @@ -2106,6 +2112,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.23.2" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "indoc", "js_int", @@ -2121,6 +2128,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.23.2" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2131,6 +2139,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "js_int", "ruma-api", @@ -2145,6 +2154,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.4" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "paste", "rand 0.8.4", @@ -2158,6 +2168,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.4" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2167,10 +2178,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" [[package]] name = "ruma-identity-service-api" version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "js_int", "ruma-api", @@ -2183,6 +2196,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "js_int", "ruma-api", @@ -2197,6 +2211,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.1" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "bytes", "form_urlencoded", @@ -2210,6 +2225,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.1" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2220,6 +2236,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2236,6 +2253,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 2a0f697..fd72d0e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,8 +18,8 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", rev = "174555857ef90d49e4b9a672be9e2fe0acdc2687", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "c29c2b16ec114fa655e2b70bdd53c82e35859005", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 740a2dc..ebaf7b1 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -283,6 +283,7 @@ pub async fn register_route( avatar_url: None, is_direct: None, third_party_invite: None, + blurhash: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -446,6 +447,7 @@ pub async fn register_route( avatar_url: None, is_direct: None, third_party_invite: None, + blurhash: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -466,6 +468,7 @@ pub async fn register_route( avatar_url: None, is_direct: None, third_party_invite: None, + blurhash: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -578,7 +581,7 @@ pub async fn change_password_route( db.flush().await?; - Ok(change_password::Response.into()) + Ok(change_password::Response {}.into()) } /// # `GET _matrix/client/r0/account/whoami` @@ -664,6 +667,7 @@ pub async fn deactivate_route( avatar_url: None, is_direct: None, third_party_invite: None, + blurhash: None, }; let mutex = Arc::clone( diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index ccb17fa..6d540cb 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -46,7 +46,7 @@ pub async fn update_backup_route( db.flush().await?; - Ok(update_backup::Response.into()) + Ok(update_backup::Response {}.into()) } #[cfg_attr( @@ -119,7 +119,7 @@ pub async fn delete_backup_route( db.flush().await?; - Ok(delete_backup::Response.into()) + Ok(delete_backup::Response {}.into()) } /// Add the received backup keys to the database. diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 4f33689..b9826bf 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -45,7 +45,7 @@ pub async fn set_global_account_data_route( db.flush().await?; - Ok(set_global_account_data::Response.into()) + Ok(set_global_account_data::Response {}.into()) } #[cfg_attr( @@ -80,7 +80,7 @@ pub async fn set_room_account_data_route( db.flush().await?; - Ok(set_room_account_data::Response.into()) + Ok(set_room_account_data::Response {}.into()) } #[cfg_attr( diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 44b9c32..085d034 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -73,7 +73,7 @@ pub async fn update_device_route( db.flush().await?; - Ok(update_device::Response.into()) + Ok(update_device::Response {}.into()) } #[cfg_attr( @@ -125,7 +125,7 @@ pub async fn delete_device_route( db.flush().await?; - Ok(delete_device::Response.into()) + Ok(delete_device::Response {}.into()) } #[cfg_attr( @@ -179,5 +179,5 @@ pub async fn delete_devices_route( db.flush().await?; - Ok(delete_devices::Response.into()) + Ok(delete_devices::Response {}.into()) } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 36b817f..64375f0 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -102,7 +102,7 @@ pub async fn set_room_visibility_route( db.flush().await?; - Ok(set_room_visibility::Response.into()) + Ok(set_room_visibility::Response {}.into()) } #[cfg_attr( diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 8eee408..8f1afba 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -163,7 +163,7 @@ pub async fn upload_signing_keys_route( db.flush().await?; - Ok(upload_signing_keys::Response.into()) + Ok(upload_signing_keys::Response {}.into()) } #[cfg_attr( @@ -224,7 +224,7 @@ pub async fn upload_signatures_route( db.flush().await?; - Ok(upload_signatures::Response.into()) + Ok(upload_signatures::Response {}.into()) } #[cfg_attr( diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index a74950b..ef141f0 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -165,7 +165,7 @@ pub async fn invite_user_route( if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; db.flush().await?; - Ok(invite_user::Response.into()) + Ok(invite_user::Response {}.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) } @@ -261,6 +261,7 @@ pub async fn ban_user_route( avatar_url: db.users.avatar_url(&body.user_id)?, is_direct: None, third_party_invite: None, + blurhash: db.users.blurhash(&body.user_id)?, }), |event| { let mut event = serde_json::from_value::>( @@ -556,6 +557,7 @@ async fn join_room_by_id_helper( avatar_url: db.users.avatar_url(&sender_user)?, is_direct: None, third_party_invite: None, + blurhash: db.users.blurhash(&sender_user)?, }) .expect("event is valid, we just created it"), ); @@ -685,6 +687,7 @@ async fn join_room_by_id_helper( avatar_url: db.users.avatar_url(&sender_user)?, is_direct: None, third_party_invite: None, + blurhash: db.users.blurhash(&sender_user)?, }; db.rooms.build_and_append_pdu( @@ -833,6 +836,7 @@ pub async fn invite_helper<'a>( is_direct: Some(is_direct), membership: MembershipState::Invite, third_party_invite: None, + blurhash: db.users.blurhash(&sender_user)?, }) .expect("member event is valid value"); @@ -1008,6 +1012,7 @@ pub async fn invite_helper<'a>( avatar_url: db.users.avatar_url(&user_id)?, is_direct: Some(is_direct), third_party_invite: None, + blurhash: db.users.blurhash(&sender_user)?, }) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index f211a57..040015d 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -75,5 +75,5 @@ pub const SESSION_ID_LENGTH: usize = 256; #[options("/<_..>")] #[tracing::instrument] pub async fn options_route() -> ConduitResult { - Ok(send_event_to_device::Response.into()) + Ok(send_event_to_device::Response {}.into()) } diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index bfe638f..ca78a88 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -43,7 +43,7 @@ pub async fn set_presence_route( db.flush().await?; - Ok(set_presence::Response.into()) + Ok(set_presence::Response {}.into()) } #[cfg_attr( diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index d947bbe..1938c87 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -108,7 +108,7 @@ pub async fn set_displayname_route( db.flush().await?; - Ok(set_display_name::Response.into()) + Ok(set_display_name::Response {}.into()) } #[cfg_attr( @@ -140,6 +140,8 @@ pub async fn set_avatar_url_route( db.users .set_avatar_url(&sender_user, body.avatar_url.clone())?; + db.users.set_blurhash(&sender_user, body.blurhash.clone())?; + // Send a new membership event and presence update into all joined rooms for (pdu_builder, room_id) in db .rooms @@ -220,7 +222,7 @@ pub async fn set_avatar_url_route( db.flush().await?; - Ok(set_avatar_url::Response.into()) + Ok(set_avatar_url::Response {}.into()) } #[cfg_attr( @@ -234,6 +236,7 @@ pub async fn get_avatar_url_route( ) -> ConduitResult { Ok(get_avatar_url::Response { avatar_url: db.users.avatar_url(&body.user_id)?, + blurhash: db.users.blurhash(&body.user_id)?, } .into()) } @@ -257,6 +260,7 @@ pub async fn get_profile_route( Ok(get_profile::Response { avatar_url: db.users.avatar_url(&body.user_id)?, + blurhash: db.users.blurhash(&body.user_id)?, displayname: db.users.displayname(&body.user_id)?, } .into()) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 33e62d9..867b452 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -194,7 +194,7 @@ pub async fn set_pushrule_route( db.flush().await?; - Ok(set_pushrule::Response.into()) + Ok(set_pushrule::Response {}.into()) } #[cfg_attr( @@ -327,7 +327,7 @@ pub async fn set_pushrule_actions_route( db.flush().await?; - Ok(set_pushrule_actions::Response.into()) + Ok(set_pushrule_actions::Response {}.into()) } #[cfg_attr( @@ -467,7 +467,7 @@ pub async fn set_pushrule_enabled_route( db.flush().await?; - Ok(set_pushrule_enabled::Response.into()) + Ok(set_pushrule_enabled::Response {}.into()) } #[cfg_attr( @@ -536,7 +536,7 @@ pub async fn delete_pushrule_route( db.flush().await?; - Ok(delete_pushrule::Response.into()) + Ok(delete_pushrule::Response {}.into()) } #[cfg_attr( diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index fe49af9..f5e2924 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -77,7 +77,7 @@ pub async fn set_read_marker_route( db.flush().await?; - Ok(set_read_marker::Response.into()) + Ok(set_read_marker::Response {}.into()) } #[cfg_attr( @@ -130,5 +130,5 @@ pub async fn create_receipt_route( db.flush().await?; - Ok(create_receipt::Response.into()) + Ok(create_receipt::Response {}.into()) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 9f381a0..c9b93c3 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -96,6 +96,7 @@ pub async fn create_room_route( avatar_url: db.users.avatar_url(&sender_user)?, is_direct: Some(body.is_direct), third_party_invite: None, + blurhash: db.users.blurhash(&sender_user)?, }) .expect("event is valid, we just created it"), unsigned: None, @@ -444,6 +445,7 @@ pub async fn upgrade_room_route( avatar_url: db.users.avatar_url(&sender_user)?, is_direct: None, third_party_invite: None, + blurhash: db.users.blurhash(&sender_user)?, }) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index fe11304..3beddad 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -753,6 +753,7 @@ async fn sync_helper( leave: left_rooms, join: joined_rooms, invite: invited_rooms, + knock: BTreeMap::new(), // TODO }, presence: sync_events::Presence { events: presence_updates diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 17df2c2..223d122 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -42,7 +42,7 @@ pub async fn update_tag_route( db.flush().await?; - Ok(create_tag::Response.into()) + Ok(create_tag::Response {}.into()) } #[cfg_attr( @@ -76,7 +76,7 @@ pub async fn delete_tag_route( db.flush().await?; - Ok(delete_tag::Response.into()) + Ok(delete_tag::Response {}.into()) } #[cfg_attr( diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 9faa255..7896af9 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -69,5 +69,5 @@ pub async fn send_event_to_device_route( db.flush().await?; - Ok(send_event_to_device::Response.into()) + Ok(send_event_to_device::Response {}.into()) } diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 7a590af..50082ee 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -29,5 +29,5 @@ pub fn create_typing_event_route( .typing_remove(&sender_user, &body.room_id, &db.globals)?; } - Ok(create_typing_event::Response.into()) + Ok(create_typing_event::Response {}.into()) } diff --git a/src/database.rs b/src/database.rs index 9452e63..27b9eb6 100644 --- a/src/database.rs +++ b/src/database.rs @@ -229,6 +229,7 @@ impl Database { userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, userid_avatarurl: builder.open_tree("userid_avatarurl")?, + userid_blurhash: builder.open_tree("userid_blurhash")?, userdeviceid_token: builder.open_tree("userdeviceid_token")?, userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, diff --git a/src/database/users.rs b/src/database/users.rs index 1480d3f..cd46c45 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -15,6 +15,7 @@ pub struct Users { pub(super) userid_password: Arc, pub(super) userid_displayname: Arc, pub(super) userid_avatarurl: Arc, + pub(super) userid_blurhash: Arc, pub(super) userdeviceid_token: Arc, pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 @@ -150,7 +151,7 @@ impl Users { Ok(()) } - /// Get a the avatar_url of a user. + /// Get the avatar_url of a user. pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl .get(user_id.as_bytes())? @@ -174,6 +175,31 @@ impl Users { Ok(()) } + /// Get the blurhash of a user. + pub fn blurhash(&self, user_id: &UserId) -> Result> { + self.userid_blurhash + .get(user_id.as_bytes())? + .map(|bytes| { + let s = utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; + + Ok(s) + }) + .transpose() + } + + /// Sets a new avatar_url or removes it if avatar_url is None. + pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { + if let Some(blurhash) = blurhash { + self.userid_blurhash + .insert(user_id.as_bytes(), blurhash.as_bytes())?; + } else { + self.userid_blurhash.remove(user_id.as_bytes())?; + } + + Ok(()) + } + /// Adds a new device to a user. pub fn create_device( &self, diff --git a/src/server_server.rs b/src/server_server.rs index bc85407..bfb3e72 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2001,6 +2001,7 @@ pub fn create_join_event_template_route( let content = serde_json::to_value(MemberEventContent { avatar_url: None, + blurhash: None, displayname: None, is_direct: None, membership: MembershipState::Join, @@ -2420,19 +2421,25 @@ pub fn get_profile_information_route( let mut displayname = None; let mut avatar_url = None; + let mut blurhash = None; match &body.field { Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, - Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?, + Some(ProfileField::AvatarUrl) => { + avatar_url = db.users.avatar_url(&body.user_id)?; + blurhash = db.users.blurhash(&body.user_id)? + } // TODO: what to do with custom Some(_) => {} None => { displayname = db.users.displayname(&body.user_id)?; avatar_url = db.users.avatar_url(&body.user_id)?; + blurhash = db.users.blurhash(&body.user_id)?; } } Ok(get_profile_information::v1::Response { + blurhash, displayname, avatar_url, } From cfaa900e8306b35c2d9b719b96bfce29ac37f58f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Jul 2021 20:43:39 +0200 Subject: [PATCH 0679/1727] improvement: auth chain cache --- Cargo.lock | 36 ++-- Cargo.toml | 7 +- src/client_server/membership.rs | 4 +- src/client_server/sync.rs | 6 +- src/database.rs | 5 +- src/database/abstraction/rocksdb.rs | 6 +- src/database/abstraction/sqlite.rs | 6 +- src/database/globals.rs | 24 +-- src/database/rooms.rs | 23 ++- src/server_server.rs | 260 +++++++++++++++------------- 10 files changed, 201 insertions(+), 176 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 828d7cc..8bfba0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2015,7 +2015,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "assign", "js_int", @@ -2036,7 +2036,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "bytes", "http", @@ -2052,7 +2052,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2063,7 +2063,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "ruma-api", "ruma-common", @@ -2077,7 +2077,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.11.0" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "assign", "bytes", @@ -2097,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.4" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "indexmap", "js_int", @@ -2112,7 +2112,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.23.2" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "indoc", "js_int", @@ -2128,7 +2128,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.23.2" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2139,7 +2139,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "js_int", "ruma-api", @@ -2154,7 +2154,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.4" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "paste", "rand 0.8.4", @@ -2168,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.4" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2178,12 +2178,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" [[package]] name = "ruma-identity-service-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "js_int", "ruma-api", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "js_int", "ruma-api", @@ -2211,7 +2211,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "bytes", "form_urlencoded", @@ -2225,7 +2225,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2236,7 +2236,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2253,7 +2253,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=c29c2b16ec114fa655e2b70bdd53c82e35859005#c29c2b16ec114fa655e2b70bdd53c82e35859005" +source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index fd72d0e..64d67a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,8 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "c29c2b16ec114fa655e2b70bdd53c82e35859005", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/ruma/ruma", rev = "c29c2b16ec114fa655e2b70bdd53c82e35859005", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/timokoesters/ruma", rev = "a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio @@ -119,5 +120,5 @@ maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } # For flamegraphs: -[profile.release] -debug = true +#[profile.release] +#debug = true diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ef141f0..d8c2781 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -29,7 +29,7 @@ use ruma::{ uint, EventId, RoomId, RoomVersionId, ServerName, UserId, }; use std::{ - collections::{btree_map::Entry, BTreeMap, HashSet}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, sync::{Arc, RwLock}, time::{Duration, Instant}, @@ -607,7 +607,7 @@ async fn join_room_by_id_helper( let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; - let mut state = BTreeMap::new(); + let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); for result in futures::future::join_all( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 3beddad..65922be 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -7,7 +7,7 @@ use ruma::{ DeviceId, RoomId, UserId, }; use std::{ - collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, sync::Arc, time::Duration, @@ -622,10 +622,10 @@ async fn sync_helper( .presence_since(&room_id, since, &db.rooms, &db.globals)? { match presence_updates.entry(user_id) { - hash_map::Entry::Vacant(v) => { + Entry::Vacant(v) => { v.insert(presence); } - hash_map::Entry::Occupied(mut o) => { + Entry::Occupied(mut o) => { let p = o.get_mut(); // Update existing presence event with more info diff --git a/src/database.rs b/src/database.rs index 27b9eb6..e359a5f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -33,7 +33,7 @@ use std::{ io::Write, ops::Deref, path::Path, - sync::{Arc, RwLock}, + sync::{Arc, Mutex, RwLock}, }; use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; @@ -292,7 +292,8 @@ impl Database { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, prevevent_parent: builder.open_tree("prevevent_parent")?, - pdu_cache: RwLock::new(LruCache::new(10_000)), + pdu_cache: Mutex::new(LruCache::new(100_000)), + auth_chain_cache: Mutex::new(LruCache::new(100_000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index b996130..4699b2d 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -5,14 +5,14 @@ use std::{future::Future, pin::Pin, sync::Arc}; use super::{DatabaseEngine, Tree}; -use std::{collections::BTreeMap, sync::RwLock}; +use std::{collections::HashMap, sync::RwLock}; pub struct Engine(rocksdb::DBWithThreadMode); pub struct RocksDbEngineTree<'a> { db: Arc, name: &'a str, - watchers: RwLock, Vec>>>, + watchers: RwLock, Vec>>>, } impl DatabaseEngine for Engine { @@ -58,7 +58,7 @@ impl DatabaseEngine for Engine { Ok(Arc::new(RocksDbEngineTree { name, db: Arc::clone(self), - watchers: RwLock::new(BTreeMap::new()), + watchers: RwLock::new(HashMap::new()), })) } } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 8100ed9..8cc6a8d 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -7,7 +7,7 @@ use log::debug; use parking_lot::{Mutex, MutexGuard, RwLock}; use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension}; use std::{ - collections::BTreeMap, + collections::HashMap, future::Future, ops::Deref, path::{Path, PathBuf}, @@ -206,7 +206,7 @@ impl DatabaseEngine for Engine { Ok(Arc::new(SqliteTable { engine: Arc::clone(self), name: name.to_owned(), - watchers: RwLock::new(BTreeMap::new()), + watchers: RwLock::new(HashMap::new()), })) } @@ -266,7 +266,7 @@ impl Engine { pub struct SqliteTable { engine: Arc, name: String, - watchers: RwLock, Vec>>>, + watchers: RwLock, Vec>>>, } type TupleOfBytes = (Vec, Vec); diff --git a/src/database/globals.rs b/src/database/globals.rs index 0e72297..fbd41a3 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -41,12 +41,12 @@ pub struct Globals { dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, pub(super) server_signingkeys: Arc, - pub bad_event_ratelimiter: Arc>>, - pub bad_signature_ratelimiter: Arc, RateLimitState>>>, - pub servername_ratelimiter: Arc, Arc>>>, - pub sync_receivers: RwLock), SyncHandle>>, - pub roomid_mutex: RwLock>>>, - pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer + pub bad_event_ratelimiter: Arc>>, + pub bad_signature_ratelimiter: Arc, RateLimitState>>>, + pub servername_ratelimiter: Arc, Arc>>>, + pub sync_receivers: RwLock), SyncHandle>>, + pub roomid_mutex: RwLock>>>, + pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer pub rotate: RotationHandler, } @@ -196,12 +196,12 @@ impl Globals { tls_name_override, server_signingkeys, jwt_decoding_key, - bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), - bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), - servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), - roomid_mutex: RwLock::new(BTreeMap::new()), - roomid_mutex_federation: RwLock::new(BTreeMap::new()), - sync_receivers: RwLock::new(BTreeMap::new()), + bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + roomid_mutex: RwLock::new(HashMap::new()), + roomid_mutex_federation: RwLock::new(HashMap::new()), + sync_receivers: RwLock::new(HashMap::new()), rotate: RotationHandler::new(), }; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f6f5021..fa121bd 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -25,7 +25,7 @@ use std::{ collections::{BTreeMap, BTreeSet, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem, - sync::{Arc, RwLock}, + sync::{Arc, Mutex}, }; use super::{abstraction::Tree, admin::AdminCommand, pusher}; @@ -84,7 +84,8 @@ pub struct Rooms { /// RoomId + EventId -> Parent PDU EventId. pub(super) prevevent_parent: Arc, - pub(super) pdu_cache: RwLock>>, + pub(super) pdu_cache: Mutex>>, + pub(super) auth_chain_cache: Mutex>>, } impl Rooms { @@ -109,7 +110,7 @@ impl Rooms { pub fn state_full( &self, shortstatehash: u64, - ) -> Result>> { + ) -> Result>> { let state = self .stateid_shorteventid .scan_prefix(shortstatehash.to_be_bytes().to_vec()) @@ -282,7 +283,7 @@ impl Rooms { pub fn force_state( &self, room_id: &RoomId, - state: BTreeMap<(EventType, String), EventId>, + state: HashMap<(EventType, String), EventId>, db: &Database, ) -> Result<()> { let state_hash = self.calculate_hash( @@ -402,11 +403,11 @@ impl Rooms { pub fn room_state_full( &self, room_id: &RoomId, - ) -> Result>> { + ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { self.state_full(current_shortstatehash) } else { - Ok(BTreeMap::new()) + Ok(HashMap::new()) } } @@ -542,7 +543,7 @@ impl Rooms { /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.write().unwrap().get_mut(&event_id) { + if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(&event_id) { return Ok(Some(Arc::clone(p))); } @@ -568,7 +569,7 @@ impl Rooms { .transpose()? { self.pdu_cache - .write() + .lock() .unwrap() .insert(event_id.clone(), Arc::clone(&pdu)); Ok(Some(pdu)) @@ -2520,4 +2521,10 @@ impl Rooms { Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) } + + pub fn auth_chain_cache( + &self, + ) -> std::sync::MutexGuard<'_, LruCache>> { + self.auth_chain_cache.lock().unwrap() + } } diff --git a/src/server_server.rs b/src/server_server.rs index bfb3e72..39a1847 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -6,6 +6,7 @@ use crate::{ use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{debug, error, info, trace, warn}; +use lru_cache::LruCache; use regex::Regex; use rocket::response::content::Json; use ruma::{ @@ -52,7 +53,7 @@ use ruma::{ ServerSigningKeyId, UserId, }; use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet, HashSet}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, fmt::Debug, future::Future, @@ -931,7 +932,7 @@ pub fn handle_incoming_pdu<'a>( ); // Build map of auth events - let mut auth_events = BTreeMap::new(); + let mut auth_events = HashMap::new(); for id in &incoming_pdu.auth_events { let auth_event = db .rooms @@ -1097,7 +1098,7 @@ pub fn handle_incoming_pdu<'a>( Err(_) => return Err("Failed to fetch state events.".to_owned()), }; - let mut state = BTreeMap::new(); + let mut state = HashMap::new(); for pdu in state_vec { match state.entry((pdu.kind.clone(), pdu.state_key.clone().ok_or_else(|| "Found non-state pdu in state events.".to_owned())?)) { Entry::Vacant(v) => { @@ -1173,7 +1174,8 @@ pub fn handle_incoming_pdu<'a>( } } - let mut fork_states = BTreeSet::new(); + let mut extremity_statehashes = Vec::new(); + for id in &extremities { match db .rooms @@ -1181,30 +1183,19 @@ pub fn handle_incoming_pdu<'a>( .map_err(|_| "Failed to ask db for pdu.".to_owned())? { Some(leaf_pdu) => { - let pdu_shortstatehash = db - .rooms - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? - .ok_or_else(|| { - error!( - "Found extremity pdu with no statehash in db: {:?}", - leaf_pdu - ); - "Found pdu with no statehash in db.".to_owned() - })?; - - let mut leaf_state = db - .rooms - .state_full(pdu_shortstatehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &leaf_pdu.state_key { - // Now it's the state after - let key = (leaf_pdu.kind.clone(), state_key.clone()); - leaf_state.insert(key, leaf_pdu); - } - - fork_states.insert(leaf_state); + extremity_statehashes.push(( + db.rooms + .pdu_shortstatehash(&leaf_pdu.event_id) + .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + "Found pdu with no statehash in db.".to_owned() + })?, + Some(leaf_pdu), + )); } _ => { error!("Missing state snapshot for {:?}", id); @@ -1218,12 +1209,36 @@ pub fn handle_incoming_pdu<'a>( // don't just trust a set of state we got from a remote). // We do this by adding the current state to the list of fork states + let current_statehash = db + .rooms + .current_shortstatehash(&room_id) + .map_err(|_| "Failed to load current state hash.".to_owned())? + .expect("every room has state"); + let current_state = db .rooms - .room_state_full(&room_id) - .map_err(|_| "Failed to load room state.".to_owned())?; + .state_full(current_statehash) + .map_err(|_| "Failed to load room state.")?; - fork_states.insert(current_state.clone()); + extremity_statehashes.push((current_statehash.clone(), None)); + + let mut fork_states = Vec::new(); + for (statehash, leaf_pdu) in extremity_statehashes { + let mut leaf_state = db + .rooms + .state_full(statehash) + .map_err(|_| "Failed to ask db for room state.".to_owned())?; + + if let Some(leaf_pdu) = leaf_pdu { + if let Some(state_key) = &leaf_pdu.state_key { + // Now it's the state after + let key = (leaf_pdu.kind.clone(), state_key.clone()); + leaf_state.insert(key, leaf_pdu); + } + } + + fork_states.push(leaf_state); + } // We also add state after incoming event to the fork states extremities.insert(incoming_pdu.event_id.clone()); @@ -1234,9 +1249,7 @@ pub fn handle_incoming_pdu<'a>( incoming_pdu.clone(), ); } - fork_states.insert(state_after.clone()); - - let fork_states = fork_states.into_iter().collect::>(); + fork_states.push(state_after.clone()); let mut update_state = false; // 14. Use state resolution to find new room state @@ -1254,17 +1267,31 @@ pub fn handle_incoming_pdu<'a>( // We do need to force an update to this room's state update_state = true; - match state_res::StateResolution::resolve( + let fork_states = &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id.clone())) + .collect::>() + }) + .collect::>(); + + let auth_chain_t = Instant::now(); + let mut auth_chain_sets = Vec::new(); + for state in fork_states { + auth_chain_sets.push( + get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) + .map_err(|_| "Failed to load auth chain.".to_owned())?, + ); + } + dbg!(auth_chain_t.elapsed()); + + let state_res_t = Instant::now(); + let state = match state_res::StateResolution::resolve( &room_id, room_version_id, - &fork_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(), + fork_states, + auth_chain_sets, |id| { let res = db.rooms.get_pdu(id); if let Err(e) = &res { @@ -1277,7 +1304,9 @@ pub fn handle_incoming_pdu<'a>( Err(_) => { return Err("State resolution failed, either an event could not be found or deserialization".into()); } - } + }; + dbg!(state_res_t.elapsed()); + state }; // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it @@ -1696,6 +1725,42 @@ async fn append_incoming_pdu( Ok(pdu_id) } +fn get_auth_chain(starting_events: Vec, db: &Database) -> Result> { + let mut auth_chain_cache = db.rooms.auth_chain_cache(); + + let mut auth_chain = HashSet::new(); + + for event in starting_events { + auth_chain.extend(get_auth_chain_recursive(&event, &mut auth_chain_cache, db)?); + } + + Ok(auth_chain) +} + +fn get_auth_chain_recursive( + event_id: &EventId, + auth_chain_cache: &mut std::sync::MutexGuard<'_, LruCache>>, + db: &Database, +) -> Result> { + if let Some(cached) = auth_chain_cache.get_mut(event_id) { + return Ok(cached.clone()); + } + + let mut auth_chain = HashSet::new(); + + if let Some(pdu) = db.rooms.get_pdu(&event_id)? { + for auth_event in &pdu.auth_events { + auth_chain.extend(get_auth_chain_recursive(&auth_event, auth_chain_cache, db)?); + } + } else { + warn!("Could not find pdu mentioned in auth events."); + } + + auth_chain_cache.insert(event_id.clone(), auth_chain.clone()); + + Ok(auth_chain) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/event/<_>", data = "") @@ -1783,35 +1848,20 @@ pub fn get_event_authorization_route( return Err(Error::bad_config("Federation is disabled.")); } - let mut auth_chain = Vec::new(); - let mut auth_chain_ids = BTreeSet::::new(); - let mut todo = BTreeSet::new(); - todo.insert(body.event_id.clone()); + let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; - while let Some(event_id) = todo.iter().next().cloned() { - if let Some(pdu) = db.rooms.get_pdu(&event_id)? { - todo.extend( - pdu.auth_events - .clone() - .into_iter() - .collect::>() - .difference(&auth_chain_ids) - .cloned(), - ); - auth_chain_ids.extend(pdu.auth_events.clone().into_iter()); - - let pdu_json = PduEvent::convert_to_outgoing_federation_event( - db.rooms.get_pdu_json(&event_id)?.unwrap(), - ); - auth_chain.push(pdu_json); - } else { - warn!("Could not find pdu mentioned in auth events."); - } - - todo.remove(&event_id); + Ok(get_event_authorization::v1::Response { + auth_chain: auth_chain_ids + .into_iter() + .map(|id| { + Ok::<_, Error>(PduEvent::convert_to_outgoing_federation_event( + db.rooms.get_pdu_json(&id)?.unwrap(), + )) + }) + .filter_map(|r| r.ok()) + .collect(), } - - Ok(get_event_authorization::v1::Response { auth_chain }.into()) + .into()) } #[cfg_attr( @@ -1846,35 +1896,21 @@ pub fn get_room_state_route( }) .collect(); - let mut auth_chain = Vec::new(); - let mut auth_chain_ids = BTreeSet::::new(); - let mut todo = BTreeSet::new(); - todo.insert(body.event_id.clone()); + let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; - while let Some(event_id) = todo.iter().next().cloned() { - if let Some(pdu) = db.rooms.get_pdu(&event_id)? { - todo.extend( - pdu.auth_events - .clone() - .into_iter() - .collect::>() - .difference(&auth_chain_ids) - .cloned(), - ); - auth_chain_ids.extend(pdu.auth_events.clone().into_iter()); - - let pdu_json = PduEvent::convert_to_outgoing_federation_event( - db.rooms.get_pdu_json(&event_id)?.unwrap(), - ); - auth_chain.push(pdu_json); - } else { - warn!("Could not find pdu mentioned in auth events."); - } - - todo.remove(&event_id); + Ok(get_room_state::v1::Response { + auth_chain: auth_chain_ids + .into_iter() + .map(|id| { + Ok::<_, Error>(PduEvent::convert_to_outgoing_federation_event( + db.rooms.get_pdu_json(&id)?.unwrap(), + )) + }) + .filter_map(|r| r.ok()) + .collect(), + pdus, } - - Ok(get_room_state::v1::Response { auth_chain, pdus }.into()) + .into()) } #[cfg_attr( @@ -1904,27 +1940,7 @@ pub fn get_room_state_ids_route( .into_iter() .collect(); - let mut auth_chain_ids = BTreeSet::::new(); - let mut todo = BTreeSet::new(); - todo.insert(body.event_id.clone()); - - while let Some(event_id) = todo.iter().next().cloned() { - if let Some(pdu) = db.rooms.get_pdu(&event_id)? { - todo.extend( - pdu.auth_events - .clone() - .into_iter() - .collect::>() - .difference(&auth_chain_ids) - .cloned(), - ); - auth_chain_ids.extend(pdu.auth_events.clone().into_iter()); - } else { - warn!("Could not find pdu mentioned in auth events."); - } - - todo.remove(&event_id); - } + let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.into_iter().collect(), @@ -2182,8 +2198,8 @@ pub async fn create_join_event_route( let state_ids = db.rooms.state_full_ids(shortstatehash)?; - let mut auth_chain_ids = BTreeSet::::new(); - let mut todo = state_ids.iter().cloned().collect::>(); + let mut auth_chain_ids = HashSet::::new(); + let mut todo = state_ids.iter().cloned().collect::>(); while let Some(event_id) = todo.iter().next().cloned() { if let Some(pdu) = db.rooms.get_pdu(&event_id)? { @@ -2191,7 +2207,7 @@ pub async fn create_join_event_route( pdu.auth_events .clone() .into_iter() - .collect::>() + .collect::>() .difference(&auth_chain_ids) .cloned(), ); From 0f8ecdc625cd5fa66e726d2b47a9fe7f419b5541 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 12:41:35 +0200 Subject: [PATCH 0680/1727] fix: state res fixes --- Cargo.lock | 36 +++++++++++++++++----------------- Cargo.toml | 2 +- src/database/rooms.rs | 6 +++--- src/server_server.rs | 45 ++++++++++++++++++++++--------------------- 4 files changed, 45 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8bfba0e..a6b87c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2015,7 +2015,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "assign", "js_int", @@ -2036,7 +2036,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.1" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "bytes", "http", @@ -2052,7 +2052,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.1" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2063,7 +2063,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "ruma-api", "ruma-common", @@ -2077,7 +2077,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.11.0" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "assign", "bytes", @@ -2097,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.4" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "indexmap", "js_int", @@ -2112,7 +2112,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.23.2" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "indoc", "js_int", @@ -2128,7 +2128,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.23.2" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2139,7 +2139,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "js_int", "ruma-api", @@ -2154,7 +2154,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.4" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "paste", "rand 0.8.4", @@ -2168,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.4" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2178,12 +2178,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.4.0" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" [[package]] name = "ruma-identity-service-api" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "js_int", "ruma-api", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "js_int", "ruma-api", @@ -2211,7 +2211,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.1" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "bytes", "form_urlencoded", @@ -2225,7 +2225,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.1" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2236,7 +2236,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.8.0" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2253,7 +2253,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386#a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386" +source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 64d67a1..c62ff82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { git = "https://github.com/ruma/ruma", rev = "c29c2b16ec114fa655e2b70bdd53c82e35859005", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/timokoesters/ruma", rev = "a3fd405d6b331c7bc4c6f366bc1b6ec303b3a386", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/timokoesters/ruma", rev = "74cf83c4ca937fa5e2709fb71e9d11848e72e487", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fa121bd..aad691b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -653,9 +653,9 @@ impl Rooms { Ok(()) } - pub fn is_pdu_referenced(&self, pdu: &PduEvent) -> Result { - let mut key = pdu.room_id().as_bytes().to_vec(); - key.extend_from_slice(pdu.event_id().as_bytes()); + pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(event_id.as_bytes()); Ok(self.prevevent_parent.get(&key)?.is_some()) } diff --git a/src/server_server.rs b/src/server_server.rs index 39a1847..e463bba 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -6,7 +6,6 @@ use crate::{ use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{debug, error, info, trace, warn}; -use lru_cache::LruCache; use regex::Regex; use rocket::response::content::Json; use ruma::{ @@ -1174,6 +1173,9 @@ pub fn handle_incoming_pdu<'a>( } } + // Only keep those extremities we don't have in our timeline yet + extremities.retain(|id| !matches!(db.rooms.get_non_outlier_pdu_json(id), Ok(Some(_)))); + let mut extremity_statehashes = Vec::new(); for id in &extremities { @@ -1276,7 +1278,6 @@ pub fn handle_incoming_pdu<'a>( }) .collect::>(); - let auth_chain_t = Instant::now(); let mut auth_chain_sets = Vec::new(); for state in fork_states { auth_chain_sets.push( @@ -1284,9 +1285,7 @@ pub fn handle_incoming_pdu<'a>( .map_err(|_| "Failed to load auth chain.".to_owned())?, ); } - dbg!(auth_chain_t.elapsed()); - let state_res_t = Instant::now(); let state = match state_res::StateResolution::resolve( &room_id, room_version_id, @@ -1305,7 +1304,6 @@ pub fn handle_incoming_pdu<'a>( return Err("State resolution failed, either an event could not be found or deserialization".into()); } }; - dbg!(state_res_t.elapsed()); state }; @@ -1726,38 +1724,41 @@ async fn append_incoming_pdu( } fn get_auth_chain(starting_events: Vec, db: &Database) -> Result> { - let mut auth_chain_cache = db.rooms.auth_chain_cache(); + let mut full_auth_chain = HashSet::new(); - let mut auth_chain = HashSet::new(); + let mut cache = db.rooms.auth_chain_cache(); + for event_id in starting_events { + let auth_chain = if let Some(cached) = cache.get_mut(&event_id) { + cached.clone() + } else { + drop(cache); + let auth_chain = get_auth_chain_recursive(&event_id, db)?; - for event in starting_events { - auth_chain.extend(get_auth_chain_recursive(&event, &mut auth_chain_cache, db)?); + cache = db.rooms.auth_chain_cache(); + + cache.insert(event_id, auth_chain.clone()); + + auth_chain + }; + + full_auth_chain.extend(auth_chain); } - Ok(auth_chain) + Ok(full_auth_chain) } -fn get_auth_chain_recursive( - event_id: &EventId, - auth_chain_cache: &mut std::sync::MutexGuard<'_, LruCache>>, - db: &Database, -) -> Result> { - if let Some(cached) = auth_chain_cache.get_mut(event_id) { - return Ok(cached.clone()); - } - +fn get_auth_chain_recursive(event_id: &EventId, db: &Database) -> Result> { let mut auth_chain = HashSet::new(); if let Some(pdu) = db.rooms.get_pdu(&event_id)? { + auth_chain.extend(pdu.auth_events.iter().cloned()); for auth_event in &pdu.auth_events { - auth_chain.extend(get_auth_chain_recursive(&auth_event, auth_chain_cache, db)?); + auth_chain.extend(get_auth_chain_recursive(&auth_event, db)?); } } else { warn!("Could not find pdu mentioned in auth events."); } - auth_chain_cache.insert(event_id.clone(), auth_chain.clone()); - Ok(auth_chain) } From f0ac7ed933d4b256192a7d904a37333653883cc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 15:24:18 +0200 Subject: [PATCH 0681/1727] chore: update ruma again --- Cargo.lock | 36 +++++++++++++++++----------------- Cargo.toml | 4 ++-- src/client_server/account.rs | 13 ++++++------ src/client_server/directory.rs | 2 +- src/client_server/room.rs | 14 +++---------- src/database/pusher.rs | 5 ++++- 6 files changed, 35 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6b87c9..808ba4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2015,7 +2015,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "assign", "js_int", @@ -2036,7 +2036,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.1" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "bytes", "http", @@ -2052,7 +2052,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.1" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2063,7 +2063,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "ruma-api", "ruma-common", @@ -2077,7 +2077,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.11.0" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "assign", "bytes", @@ -2097,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.4" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "indexmap", "js_int", @@ -2112,7 +2112,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.23.2" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "indoc", "js_int", @@ -2128,7 +2128,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.23.2" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2139,7 +2139,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "js_int", "ruma-api", @@ -2154,7 +2154,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.4" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "paste", "rand 0.8.4", @@ -2168,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.4" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2178,12 +2178,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.4.0" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" [[package]] name = "ruma-identity-service-api" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "js_int", "ruma-api", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "js_int", "ruma-api", @@ -2211,7 +2211,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.1" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "bytes", "form_urlencoded", @@ -2225,7 +2225,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.1" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2236,7 +2236,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.8.0" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2253,7 +2253,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=74cf83c4ca937fa5e2709fb71e9d11848e72e487#74cf83c4ca937fa5e2709fb71e9d11848e72e487" +source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index c62ff82..2ce4b03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,8 +18,8 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", rev = "c29c2b16ec114fa655e2b70bdd53c82e35859005", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/timokoesters/ruma", rev = "74cf83c4ca937fa5e2709fb71e9d11848e72e487", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "eb19b0e08a901b87d11b3be0890ec788cc760492", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/timokoesters/ruma", rev = "74cf83c4ca937fa5e2709fb71e9d11848e72e487", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/account.rs b/src/client_server/account.rs index ebaf7b1..ddb44d6 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -20,12 +20,12 @@ use ruma::{ }, events::{ room::{ - canonical_alias, guest_access, history_visibility, join_rules, member, message, - name::{self, RoomName}, + canonical_alias, guest_access, history_visibility, join_rules, member, message, name, topic, }, EventType, }, + identifiers::RoomName, push, RoomAliasId, RoomId, RoomVersionId, UserId, }; @@ -378,13 +378,14 @@ pub async fn register_route( )?; // 6. Events implied by name and topic + let room_name = + Box::::try_from(format!("{} Admin Room", db.globals.server_name())) + .expect("Room name is valid"); db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomName, - content: serde_json::to_value(name::NameEventContent::new(Some( - RoomName::try_from("Admin Room".to_owned()).expect("Room name is valid"), - ))) - .expect("event is valid, we just created it"), + content: serde_json::to_value(name::NameEventContent::new(Some(room_name))) + .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), redacts: None, diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 64375f0..d738886 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -316,7 +316,7 @@ pub async fn get_public_rooms_filtered_helper( .map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { - if name.to_lowercase().contains(&query) { + if name.as_str().to_lowercase().contains(&query) { return true; } } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index c9b93c3..6e27130 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -9,11 +9,7 @@ use ruma::{ r0::room::{self, create_room, get_room_event, upgrade_room}, }, events::{ - room::{ - guest_access, history_visibility, join_rules, member, - name::{self, RoomName}, - topic, - }, + room::{guest_access, history_visibility, join_rules, member, name, topic}, EventType, }, serde::Raw, @@ -256,12 +252,8 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomName, - content: serde_json::to_value(name::NameEventContent::new(Some( - RoomName::try_from(name.clone()).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.") - })?, - ))) - .expect("event is valid, we just created it"), + content: serde_json::to_value(name::NameEventContent::new(Some(name.clone()))) + .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), redacts: None, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 3210cb1..348f4dc 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -11,6 +11,7 @@ use ruma::{ IncomingResponse, OutgoingRequest, SendAccessToken, }, events::{room::power_levels::PowerLevelsEventContent, EventType}, + identifiers::RoomName, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, uint, UInt, UserId, }; @@ -299,7 +300,9 @@ async fn send_notice( .rooms .room_state_get(&event.room_id, &EventType::RoomName, "")? .map(|pdu| match pdu.content.get("name") { - Some(serde_json::Value::String(s)) => Some(s.to_string()), + Some(serde_json::Value::String(s)) => { + Some(Box::::try_from(&**s).expect("room name is valid")) + } _ => None, }) .flatten(); From 56e5d5e3d2e191add0aafb5c89be62f59f33710c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 18:38:17 +0200 Subject: [PATCH 0682/1727] fix: add warning for strange bug --- src/client_server/sync.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 65922be..7ab27d9 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,5 +1,5 @@ use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; -use log::error; +use log::{error, warn}; use ruma::{ api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, @@ -262,9 +262,12 @@ async fn sync_helper( db.rooms .pdu_shortstatehash(&pdu.1.event_id) .transpose() - .expect("all pdus have state") + .ok_or_else(|| { + warn!("PDU without state: {}", pdu.1.event_id); + Error::bad_database("Found PDU without state") + }) }) - .transpose()?; + .transpose()?.transpose()?; // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { From 68cc743f2a42a4f4155298a6231c48091aa401d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 18:45:21 +0200 Subject: [PATCH 0683/1727] fix: use get_auth_chain method more often --- src/client_server/sync.rs | 3 ++- src/server_server.rs | 22 +--------------------- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 7ab27d9..427b9ad 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -267,7 +267,8 @@ async fn sync_helper( Error::bad_database("Found PDU without state") }) }) - .transpose()?.transpose()?; + .transpose()? + .transpose()?; // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { diff --git a/src/server_server.rs b/src/server_server.rs index e463bba..d51b672 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2198,27 +2198,7 @@ pub async fn create_join_event_route( drop(mutex_lock); let state_ids = db.rooms.state_full_ids(shortstatehash)?; - - let mut auth_chain_ids = HashSet::::new(); - let mut todo = state_ids.iter().cloned().collect::>(); - - while let Some(event_id) = todo.iter().next().cloned() { - if let Some(pdu) = db.rooms.get_pdu(&event_id)? { - todo.extend( - pdu.auth_events - .clone() - .into_iter() - .collect::>() - .difference(&auth_chain_ids) - .cloned(), - ); - auth_chain_ids.extend(pdu.auth_events.clone().into_iter()); - } else { - warn!("Could not find pdu mentioned in auth events."); - } - - todo.remove(&event_id); - } + let auth_chain_ids = get_auth_chain(state_ids.iter().cloned().collect(), &db)?; for server in db .rooms From 728e176a06a372eb49430f1a24f0f9cfd65e6c5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 19:40:25 +0200 Subject: [PATCH 0684/1727] feat: /keys/query and /keys/claim over federation --- src/client_server/keys.rs | 88 +++++++++++++++++++++++++++++++++------ src/server_server.rs | 5 ++- 2 files changed, 79 insertions(+), 14 deletions(-) diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 8f1afba..1ae9f80 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -1,19 +1,23 @@ use super::SESSION_ID_LENGTH; use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::{ - keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, - upload_signing_keys, + api::{ + client::{ + error::ErrorKind, + r0::{ + keys::{ + claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + upload_signing_keys, + }, + uiaa::{AuthFlow, UiaaInfo}, }, - uiaa::{AuthFlow, UiaaInfo}, }, + federation, }, encryption::UnsignedDeviceInfo, DeviceId, DeviceKeyAlgorithm, UserId, }; +use serde_json::json; use std::collections::{BTreeMap, HashSet}; #[cfg(feature = "conduit_bin")] @@ -84,7 +88,8 @@ pub async fn get_keys_route( &body.device_keys, |u| u == sender_user, &db, - )?; + ) + .await?; Ok(response.into()) } @@ -98,7 +103,7 @@ pub async fn claim_keys_route( db: DatabaseGuard, body: Ruma, ) -> ConduitResult { - let response = claim_keys_helper(&body.one_time_keys, &db)?; + let response = claim_keys_helper(&body.one_time_keys, &db).await?; db.flush().await?; @@ -278,7 +283,7 @@ pub async fn get_key_changes_route( .into()) } -pub fn get_keys_helper bool>( +pub async fn get_keys_helper bool>( sender_user: Option<&UserId>, device_keys_input: &BTreeMap>>, allowed_signatures: F, @@ -289,7 +294,16 @@ pub fn get_keys_helper bool>( let mut user_signing_keys = BTreeMap::new(); let mut device_keys = BTreeMap::new(); + let mut get_over_federation = BTreeMap::new(); + for (user_id, device_ids) in device_keys_input { + if user_id.server_name() != db.globals.server_name() { + get_over_federation + .entry(user_id.server_name()) + .or_insert_with(Vec::new) + .push((user_id, device_ids)); + } + if device_ids.is_empty() { let mut container = BTreeMap::new(); for device_id in db.users.all_device_ids(user_id) { @@ -347,21 +361,51 @@ pub fn get_keys_helper bool>( } } + let mut failures = BTreeMap::new(); + + for (server, vec) in get_over_federation { + let mut device_keys = BTreeMap::new(); + for (user_id, keys) in vec { + device_keys.insert(user_id.clone(), keys.clone()); + } + if let Err(_e) = db + .sending + .send_federation_request( + &db.globals, + server, + federation::keys::get_keys::v1::Request { device_keys }, + ) + .await + { + failures.insert(server.to_string(), json!({})); + } + } + Ok(get_keys::Response { master_keys, self_signing_keys, user_signing_keys, device_keys, - failures: BTreeMap::new(), + failures, }) } -pub fn claim_keys_helper( +pub async fn claim_keys_helper( one_time_keys_input: &BTreeMap, DeviceKeyAlgorithm>>, db: &Database, ) -> Result { let mut one_time_keys = BTreeMap::new(); + + let mut get_over_federation = BTreeMap::new(); + for (user_id, map) in one_time_keys_input { + if user_id.server_name() != db.globals.server_name() { + get_over_federation + .entry(user_id.server_name()) + .or_insert_with(Vec::new) + .push((user_id, map)); + } + let mut container = BTreeMap::new(); for (device_id, key_algorithm) in map { if let Some(one_time_keys) = @@ -376,6 +420,26 @@ pub fn claim_keys_helper( one_time_keys.insert(user_id.clone(), container); } + for (server, vec) in get_over_federation { + let mut one_time_keys_input_fed = BTreeMap::new(); + for (user_id, keys) in vec { + one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); + } + // Ignore failures + let keys = db + .sending + .send_federation_request( + &db.globals, + server, + federation::keys::claim_keys::v1::Request { + one_time_keys: one_time_keys_input_fed, + }, + ) + .await?; + + one_time_keys.extend(keys.one_time_keys); + } + Ok(claim_keys::Response { failures: BTreeMap::new(), one_time_keys, diff --git a/src/server_server.rs b/src/server_server.rs index d51b672..e8c19db 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2461,7 +2461,8 @@ pub async fn get_keys_route( &body.device_keys, |u| Some(u.server_name()) == body.sender_servername.as_deref(), &db, - )?; + ) + .await?; db.flush().await?; @@ -2486,7 +2487,7 @@ pub async fn claim_keys_route( return Err(Error::bad_config("Federation is disabled.")); } - let result = claim_keys_helper(&body.one_time_keys, &db)?; + let result = claim_keys_helper(&body.one_time_keys, &db).await?; db.flush().await?; From e20f55970e2e26182d963eae67e5c36c296dd4f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 19:49:47 +0200 Subject: [PATCH 0685/1727] feat: call /query/profile over federation when local user asks --- src/client_server/profile.rs | 71 ++++++++++++++++++++++++++++++++++-- 1 file changed, 67 insertions(+), 4 deletions(-) diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 1938c87..693254f 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -1,10 +1,13 @@ use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::profile::{ - get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, + api::{ + client::{ + error::ErrorKind, + r0::profile::{ + get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, + }, }, + federation::{self, query::get_profile_information::v1::ProfileField}, }, events::EventType, serde::Raw, @@ -120,6 +123,25 @@ pub async fn get_displayname_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { + if body.user_id.server_name() != db.globals.server_name() { + let response = db + .sending + .send_federation_request( + &db.globals, + body.user_id.server_name(), + federation::query::get_profile_information::v1::Request { + user_id: &body.user_id, + field: Some(&ProfileField::DisplayName), + }, + ) + .await?; + + return Ok(get_display_name::Response { + displayname: response.displayname, + } + .into()); + } + Ok(get_display_name::Response { displayname: db.users.displayname(&body.user_id)?, } @@ -234,6 +256,26 @@ pub async fn get_avatar_url_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { + if body.user_id.server_name() != db.globals.server_name() { + let response = db + .sending + .send_federation_request( + &db.globals, + body.user_id.server_name(), + federation::query::get_profile_information::v1::Request { + user_id: &body.user_id, + field: Some(&ProfileField::AvatarUrl), + }, + ) + .await?; + + return Ok(get_avatar_url::Response { + avatar_url: response.avatar_url, + blurhash: response.blurhash, + } + .into()); + } + Ok(get_avatar_url::Response { avatar_url: db.users.avatar_url(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?, @@ -250,6 +292,27 @@ pub async fn get_profile_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { + if body.user_id.server_name() != db.globals.server_name() { + let response = db + .sending + .send_federation_request( + &db.globals, + body.user_id.server_name(), + federation::query::get_profile_information::v1::Request { + user_id: &body.user_id, + field: None, + }, + ) + .await?; + + return Ok(get_profile::Response { + displayname: response.displayname, + avatar_url: response.avatar_url, + blurhash: response.blurhash, + } + .into()); + } + if !db.users.exists(&body.user_id)? { // Return 404 if this user doesn't exist return Err(Error::BadRequest( From fe3b5d32a7bcbcd1097b925a022255e54f24c140 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 21:17:15 +0200 Subject: [PATCH 0686/1727] feat: send to-device events over federation --- src/client_server/keys.rs | 20 +++++++++++++++----- src/client_server/to_device.rs | 30 +++++++++++++++++++++++++++++- src/database/sending.rs | 32 +++++++++++++++++++++++++------- 3 files changed, 69 insertions(+), 13 deletions(-) diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 1ae9f80..418e41a 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -302,6 +302,7 @@ pub async fn get_keys_helper bool>( .entry(user_id.server_name()) .or_insert_with(Vec::new) .push((user_id, device_ids)); + continue; } if device_ids.is_empty() { @@ -364,20 +365,29 @@ pub async fn get_keys_helper bool>( let mut failures = BTreeMap::new(); for (server, vec) in get_over_federation { - let mut device_keys = BTreeMap::new(); + let mut device_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { - device_keys.insert(user_id.clone(), keys.clone()); + device_keys_input_fed.insert(user_id.clone(), keys.clone()); } - if let Err(_e) = db + match db .sending .send_federation_request( &db.globals, server, - federation::keys::get_keys::v1::Request { device_keys }, + federation::keys::get_keys::v1::Request { + device_keys: device_keys_input_fed, + }, ) .await { - failures.insert(server.to_string(), json!({})); + Ok(response) => { + master_keys.extend(response.master_keys); + self_signing_keys.extend(response.self_signing_keys); + device_keys.extend(response.device_keys); + } + Err(_e) => { + failures.insert(server.to_string(), json!({})); + } } } diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 7896af9..e3fd780 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -1,6 +1,12 @@ +use std::collections::BTreeMap; + use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ - api::client::{error::ErrorKind, r0::to_device::send_event_to_device}, + api::{ + client::{error::ErrorKind, r0::to_device::send_event_to_device}, + federation::{self, transactions::edu::DirectDeviceContent}, + }, + events::EventType, to_device::DeviceIdOrAllDevices, }; @@ -33,6 +39,28 @@ pub async fn send_event_to_device_route( for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { + if target_user_id.server_name() != db.globals.server_name() { + let mut map = BTreeMap::new(); + map.insert(target_device_id_maybe.clone(), event.clone()); + let mut messages = BTreeMap::new(); + messages.insert(target_user_id.clone(), map); + + db.sending.send_reliable_edu( + target_user_id.server_name(), + &serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( + DirectDeviceContent { + sender: sender_user.clone(), + ev_type: EventType::from(&body.event_type), + message_id: body.txn_id.clone(), + messages, + }, + )) + .expect("DirectToDevice EDU can be serialized"), + )?; + + continue; + } + match target_device_id_maybe { DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event( sender_user, diff --git a/src/database/sending.rs b/src/database/sending.rs index 7c9cf64..8dfcbee 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -164,9 +164,10 @@ impl Sending { // Find events that have been added since starting the last request let new_events = guard.sending.servernamepduids .scan_prefix(prefix.clone()) - .map(|(k, _)| { - SendingEventType::Pdu(k[prefix.len()..].to_vec()) + .filter_map(|(k, _)| { + Self::parse_servercurrentevent(&k).ok() }) + .map(|(_, event)| event) .take(30) .collect::>(); @@ -290,7 +291,14 @@ impl Sending { if let OutgoingKind::Normal(server_name) = outgoing_kind { if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) { - events.extend_from_slice(&select_edus); + for edu in &select_edus { + let mut full_key = vec![b'*']; + full_key.extend_from_slice(&edu); + db.sending.servercurrentevents.insert(&full_key, &[])?; + } + + events.extend(select_edus.into_iter().map(SendingEventType::Edu)); + db.sending .servername_educount .insert(server_name.as_bytes(), &last_count.to_be_bytes())?; @@ -301,7 +309,7 @@ impl Sending { Ok(Some(events)) } - pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec, u64)> { + pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec>, u64)> { // u64: count of last edu let since = db .sending @@ -366,9 +374,7 @@ impl Sending { } }; - events.push(SendingEventType::Edu( - serde_json::to_vec(&federation_event).expect("json can be serialized"), - )); + events.push(serde_json::to_vec(&federation_event).expect("json can be serialized")); if events.len() >= 20 { break 'outer; @@ -402,6 +408,18 @@ impl Sending { Ok(()) } + #[tracing::instrument(skip(self))] + pub fn send_reliable_edu(&self, server: &ServerName, serialized: &[u8]) -> Result<()> { + let mut key = server.as_bytes().to_vec(); + key.push(0xff); + key.push(b'*'); + key.extend_from_slice(serialized); + self.servernamepduids.insert(&key, b"")?; + self.sender.unbounded_send(key).unwrap(); + + Ok(()) + } + #[tracing::instrument(skip(self))] pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { let mut key = b"+".to_vec(); From 08a33264311f9791716ce0c2ff3d7d2feeddc852 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 21:18:37 +0200 Subject: [PATCH 0687/1727] docs: Initial end-to-end encryption over federation support --- README.md | 4 ++-- src/database/sending.rs | 6 ------ 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index e856dd5..fde762c 100644 --- a/README.md +++ b/README.md @@ -38,8 +38,8 @@ are not aware of such a bug today, but we would like to do more testing. There are still a few important features missing: - Database stability (currently you might have to do manual upgrades or even wipe the db for new versions) -- End-to-end encrypted chats over federation -- Typing, presence, read receipts etc. over federation +- Edge cases for end-to-end encryption over federation +- Typing and presence over federation - Lots of testing Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). diff --git a/src/database/sending.rs b/src/database/sending.rs index 8dfcbee..44d19ed 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -291,12 +291,6 @@ impl Sending { if let OutgoingKind::Normal(server_name) = outgoing_kind { if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) { - for edu in &select_edus { - let mut full_key = vec![b'*']; - full_key.extend_from_slice(&edu); - db.sending.servercurrentevents.insert(&full_key, &[])?; - } - events.extend(select_edus.into_iter().map(SendingEventType::Edu)); db.sending From 80533bfab27a95c4ce77beca41dc0d33f2543917 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 22:06:42 +0200 Subject: [PATCH 0688/1727] fix: improve code when skipping /state_ids --- src/server_server.rs | 65 ++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 35 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index e8c19db..5ab8646 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1027,42 +1027,37 @@ pub fn handle_incoming_pdu<'a>( .map_err(|_| "Failed talking to db".to_owned())? .map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok()) .flatten(); - if let Some(mut state) = state { - if db - .rooms - .get_pdu(prev_event) - .ok() - .flatten() - .ok_or_else(|| "Could not find prev event, but we know the state.".to_owned())? - .state_key - .is_some() - { - state.insert(prev_event.clone()); - } - state_at_incoming_event = Some( - fetch_and_handle_events( - db, - origin, - &state.into_iter().collect::>(), - &room_id, - pub_key_map, - ) - .await - .map_err(|_| "Failed to fetch state events locally".to_owned())? - .into_iter() - .map(|pdu| { + if let Some(state) = state { + let mut state = fetch_and_handle_events( + db, + origin, + &state.into_iter().collect::>(), + &room_id, + pub_key_map, + ) + .await + .map_err(|_| "Failed to fetch state events locally".to_owned())? + .into_iter() + .map(|pdu| { + ( ( - ( - pdu.kind.clone(), - pdu.state_key - .clone() - .expect("events from state_full_ids are state events"), - ), - pdu, - ) - }) - .collect(), - ); + pdu.kind.clone(), + pdu.state_key + .clone() + .expect("events from state_full_ids are state events"), + ), + pdu, + ) + }) + .collect::>(); + + let prev_pdu = db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { + "Could not find prev event, but we know the state.".to_owned() + })?; + + if let Some(state_key) = &prev_pdu.state_key { + state.insert((prev_pdu.kind.clone(), state_key.clone()), prev_pdu); + } } // TODO: set incoming_auth_events? } From f1219788972c7b6b66199c54f499ff4057583ed2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 20 Jul 2021 23:36:03 +0200 Subject: [PATCH 0689/1727] fix: state bug --- src/database/sending.rs | 18 +++++------------- src/server_server.rs | 2 ++ 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index 44d19ed..a07192e 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -165,9 +165,8 @@ impl Sending { let new_events = guard.sending.servernamepduids .scan_prefix(prefix.clone()) .filter_map(|(k, _)| { - Self::parse_servercurrentevent(&k).ok() + Self::parse_servercurrentevent(&k).ok().map(|ev| (ev, k)) }) - .map(|(_, event)| event) .take(30) .collect::>(); @@ -175,16 +174,9 @@ impl Sending { if !new_events.is_empty() { // Insert pdus we found - for event in &new_events { - let mut current_key = prefix.clone(); - match event { - SendingEventType::Pdu(b) | - SendingEventType::Edu(b) => { - current_key.extend_from_slice(&b); - guard.sending.servercurrentevents.insert(¤t_key, &[]).unwrap(); - guard.sending.servernamepduids.remove(¤t_key).unwrap(); - } - } + for (_, key) in &new_events { + guard.sending.servercurrentevents.insert(&key, &[]).unwrap(); + guard.sending.servernamepduids.remove(&key).unwrap(); } drop(guard); @@ -192,7 +184,7 @@ impl Sending { futures.push( Self::handle_events( outgoing_kind.clone(), - new_events, + new_events.into_iter().map(|(event, _)| event.1).collect(), Arc::clone(&db), ) ); diff --git a/src/server_server.rs b/src/server_server.rs index 5ab8646..f666188 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1058,6 +1058,8 @@ pub fn handle_incoming_pdu<'a>( if let Some(state_key) = &prev_pdu.state_key { state.insert((prev_pdu.kind.clone(), state_key.clone()), prev_pdu); } + + state_at_incoming_event = Some(state); } // TODO: set incoming_auth_events? } From 1587f2cd52ea3886c7c0c2434f5b860a6978e5b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Jul 2021 09:39:15 +0200 Subject: [PATCH 0690/1727] fix: check events in timeline (not only state) for member changes --- src/client_server/membership.rs | 4 ++-- src/client_server/sync.rs | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index d8c2781..e17a4d7 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -836,7 +836,7 @@ pub async fn invite_helper<'a>( is_direct: Some(is_direct), membership: MembershipState::Invite, third_party_invite: None, - blurhash: db.users.blurhash(&sender_user)?, + blurhash: None, }) .expect("member event is valid value"); @@ -1012,7 +1012,7 @@ pub async fn invite_helper<'a>( avatar_url: db.users.avatar_url(&user_id)?, is_direct: Some(is_direct), third_party_invite: None, - blurhash: db.users.blurhash(&sender_user)?, + blurhash: db.users.blurhash(&user_id)?, }) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 427b9ad..fdb8f25 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -421,7 +421,10 @@ async fn sync_helper( let send_member_count = state_events .iter() - .any(|event| event.kind == EventType::RoomMember); + .any(|event| event.kind == EventType::RoomMember) + || timeline_pdus.iter().any(|(_, event)| { + event.state_key.is_some() && event.kind == EventType::RoomMember + }); if encrypted_room { for (user_id, current_member) in db From 666e1d30a3f591a0609261eaeb66550e1a84ddd4 Mon Sep 17 00:00:00 2001 From: phesch Date: Wed, 21 Jul 2021 10:55:23 +0200 Subject: [PATCH 0691/1727] Implement From on PduBuilder The conversion between the two is now direct and thus shouldn't lose the state_keys. --- src/client_server/room.rs | 7 +++---- src/pdu.rs | 20 +++++++++++++++++--- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 6e27130..f107cc5 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -233,10 +233,9 @@ pub async fn create_room_route( // 5. Events listed in initial_state for event in &body.initial_state { - let pdu_builder = serde_json::from_str::( - &serde_json::to_string(&event).expect("AnyInitialStateEvent::to_string always works"), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event."))?; + let pdu_builder = PduBuilder::from(event.deserialize().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.") + })?); // Silently skip encryption events if they are not allowed if pdu_builder.event_type == EventType::RoomEncryption && !db.globals.allow_encryption() { diff --git a/src/pdu.rs b/src/pdu.rs index a72f04d..2167971 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -2,9 +2,9 @@ use crate::Error; use log::error; use ruma::{ events::{ - pdu::EventHash, room::member::MemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, - AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, - StateEvent, + pdu::EventHash, room::member::MemberEventContent, AnyEphemeralRoomEvent, + AnyInitialStateEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, + AnySyncStateEvent, EventType, StateEvent, }, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, @@ -346,3 +346,17 @@ pub struct PduBuilder { pub state_key: Option, pub redacts: Option, } + +/// Direct conversion prevents loss of the empty `state_key` that ruma requires. +impl From for PduBuilder { + fn from(event: AnyInitialStateEvent) -> Self { + Self { + event_type: EventType::from(event.event_type()), + content: serde_json::value::to_value(event.content()) + .expect("AnyStateEventContent came from JSON and can thus turn back into JSON."), + unsigned: None, + state_key: Some(event.state_key().to_owned()), + redacts: None, + } + } +} From 0d33cc4acd9d17798c74922729f941b50bfb2fa8 Mon Sep 17 00:00:00 2001 From: Kurt Roeckx Date: Sun, 11 Jul 2021 11:28:55 +0200 Subject: [PATCH 0692/1727] Implement getting room aliases Signed-off-by: Kurt Roeckx --- src/client_server/room.rs | 30 +++++++++++++++++++++++++++++- src/main.rs | 1 + 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 6e27130..1fcf2d6 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -6,7 +6,7 @@ use log::info; use ruma::{ api::client::{ error::ErrorKind, - r0::room::{self, create_room, get_room_event, upgrade_room}, + r0::room::{self, aliases, create_room, get_room_event, upgrade_room}, }, events::{ room::{guest_access, history_visibility, join_rules, member, name, topic}, @@ -334,6 +334,34 @@ pub async fn get_room_event_route( .into()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/rooms/<_>/aliases", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn get_room_aliases_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !db.rooms.is_joined(sender_user, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + Ok(aliases::Response { + aliases: db + .rooms + .room_aliases(&body.room_id) + .filter_map(|a| a.ok()) + .collect(), + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_room_id>/upgrade", data = "") diff --git a/src/main.rs b/src/main.rs index f69bc48..a5face7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -60,6 +60,7 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket< client_server::set_pushrule_actions_route, client_server::delete_pushrule_route, client_server::get_room_event_route, + client_server::get_room_aliases_route, client_server::get_filter_route, client_server::create_filter_route, client_server::set_global_account_data_route, From 32db4c09a0852463bd047122d06ac356ec4c834c Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 21 Jul 2021 09:21:49 +0000 Subject: [PATCH 0693/1727] Docs: Remove arm download, but add x86_64 musl one --- DEPLOY.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 8e16c19..1010c0f 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -9,17 +9,16 @@ If you run into any problems while setting up Conduit, write an email to `timo@k You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -| CPU Architecture | GNU (Debian, ArchLinux, ...) | MUSL (Alpine, ... ) | -| ------------------- | ---------------------------- | ----------------------- | -| x84_64 / amd64 | [Download][x84_64-gnu] | - | -| armv7 (Raspberry Pi) | [Download][armv7-gnu] | - | -| armv8 / aarch64 | [Download][armv8-gnu] | - | -| arm | [Download][arm] | - | +| CPU Architecture | GNU (Ubuntu, Debian, ArchLinux, ...) | MUSL (Alpine, ... ) | +| -------------------- | ------------------------------------- | ----------------------- | +| x84_64 / amd64 | [Download][x84_64-gnu] | [Download][x84_64-musl] | +| armv7 (Raspberry Pi) | [Download][armv7-gnu] | - | +| armv8 / aarch64 | [Download][armv8-gnu] | - | [x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:cargo:x86_64-unknown-linux-gnu +[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:cargo:x86_64-unknown-linux-musl [armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:cargo:armv7-unknown-linux-gnueabihf [armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:cargo:aarch64-unknown-linux-gnu -[arm]: https://conduit.rs/master/arm/conduit-bin ```bash $ sudo wget -O /usr/local/bin/matrix-conduit @@ -34,6 +33,7 @@ Note that this currently requires Rust 1.50. If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). + ## Adding a Conduit user While Conduit can run as any user it is usually better to use dedicated users for different services. From 918df9ca6d36c9fccb34c179459d12d06d9fb5e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Jul 2021 11:29:13 +0200 Subject: [PATCH 0694/1727] feat: support room version 5 --- src/client_server/capabilities.rs | 1 + src/client_server/membership.rs | 11 ++++++++--- src/client_server/room.rs | 5 ++++- src/database/rooms.rs | 6 +++++- src/pdu.rs | 1 + src/server_server.rs | 33 +++++++++++++++---------------- 6 files changed, 35 insertions(+), 22 deletions(-) diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 8740928..65c8879 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -23,6 +23,7 @@ pub async fn get_capabilities_route( _body: Ruma, ) -> ConduitResult { let mut available = BTreeMap::new(); + available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); let mut capabilities = Capabilities::new(); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e17a4d7..52bb18c 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -511,7 +511,7 @@ async fn join_room_by_id_helper( federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, - ver: &[RoomVersionId::Version6], + ver: &[RoomVersionId::Version5, RoomVersionId::Version6], }, ) .await; @@ -526,7 +526,12 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) if room_version == RoomVersionId::Version6 => room_version, + Some(room_version) + if room_version == RoomVersionId::Version5 + || room_version == RoomVersionId::Version6 => + { + room_version + } _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -940,7 +945,7 @@ pub async fn invite_helper<'a>( create_invite::v2::Request { room_id: room_id.clone(), event_id: ruma::event_id!("$receivingservershouldsetthis"), - room_version: RoomVersionId::Version6, + room_version: room_version_id, event: PduEvent::convert_to_outgoing_federation_event(pdu_json), invite_room_state, }, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 6e27130..1d41fd8 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -346,7 +346,10 @@ pub async fn upgrade_room_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !matches!(body.new_version, RoomVersionId::Version6) { + if !matches!( + body.new_version, + RoomVersionId::Version5 | RoomVersionId::Version6 + ) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", diff --git a/src/database/rooms.rs b/src/database/rooms.rs index aad691b..f0ec683 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1954,7 +1954,11 @@ impl Rooms { let (make_leave_response, remote_server) = make_leave_response_and_server?; let room_version_id = match make_leave_response.room_version { - Some(id @ RoomVersionId::Version6) => id, + Some(version) + if version == RoomVersionId::Version5 || version == RoomVersionId::Version6 => + { + version + } _ => return Err(Error::BadServerResponse("Room version is not supported")), }; diff --git a/src/pdu.rs b/src/pdu.rs index a72f04d..c449eb8 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -328,6 +328,7 @@ pub(crate) fn gen_event_id_canonical_json( let event_id = EventId::try_from(&*format!( "${}", + // Anything higher than version3 behaves the same ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) .expect("ruma can calculate reference hashes") )) diff --git a/src/server_server.rs b/src/server_server.rs index f666188..f725dce 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1967,15 +1967,6 @@ pub fn create_join_event_template_route( )); } - if !body.ver.contains(&RoomVersionId::Version6) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: RoomVersionId::Version6, - }, - "Room version not supported.", - )); - } - let prev_events = db .rooms .get_pdu_leaves(&body.room_id)? @@ -2006,12 +1997,19 @@ pub fn create_join_event_template_route( }; // If there was no create event yet, assume we are creating a version 6 room right now - let room_version = RoomVersion::new( - &create_event_content.map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }), - ) - .expect("room version is supported"); + let room_version_id = create_event_content.map_or(RoomVersionId::Version6, |create_event| { + create_event.room_version + }); + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); + + if !body.ver.contains(&room_version_id) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: room_version_id, + }, + "Room version not supported.", + )); + } let content = serde_json::to_value(MemberEventContent { avatar_url: None, @@ -2108,7 +2106,7 @@ pub fn create_join_event_template_route( ); Ok(create_join_event_template::v1::Response { - room_version: Some(RoomVersionId::Version6), + room_version: Some(room_version_id), event: serde_json::from_value::>( serde_json::to_value(pdu_json).expect("CanonicalJson is valid serde_json::Value"), ) @@ -2238,7 +2236,8 @@ pub async fn create_invite_route( return Err(Error::bad_config("Federation is disabled.")); } - if body.room_version < RoomVersionId::Version6 { + if body.room_version != RoomVersionId::Version5 && body.room_version != RoomVersionId::Version6 + { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), From 5e924227b6266a65a9717881b0dfbac6ea396667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 29 Jul 2021 08:36:01 +0200 Subject: [PATCH 0695/1727] feat: add threadpool for iterator threads, bug fixes, tracing_flame support --- Cargo.lock | 161 +++++++++++++++++++---------- Cargo.toml | 38 +++---- src/appservice_server.rs | 2 +- src/client_server/account.rs | 2 +- src/client_server/directory.rs | 2 +- src/client_server/membership.rs | 2 +- src/client_server/room.rs | 2 +- src/client_server/session.rs | 2 +- src/client_server/sync.rs | 2 +- src/database.rs | 47 +++++---- src/database/abstraction/sled.rs | 2 +- src/database/abstraction/sqlite.rs | 125 +++++++++++++--------- src/database/account_data.rs | 5 +- src/database/admin.rs | 2 +- src/database/globals.rs | 5 +- src/database/pusher.rs | 10 +- src/database/rooms.rs | 51 +++++++-- src/database/rooms/edus.rs | 1 + src/database/sending.rs | 20 ++-- src/database/users.rs | 84 +++++++++++++-- src/error.rs | 4 +- src/main.rs | 71 ++++++++----- src/pdu.rs | 2 +- src/ruma_wrapper.rs | 8 +- src/server_server.rs | 42 ++++---- src/utils.rs | 8 ++ 26 files changed, 472 insertions(+), 228 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 808ba4e..01d2ba2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -137,9 +137,9 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.57.0" +version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d" +checksum = "0b2ce639ee22f41a6ea0a3061e9bea9f690cf0c6ffc1ada0a3a599778f99ccba" dependencies = [ "bitflags", "cexpr", @@ -160,6 +160,18 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +[[package]] +name = "bitvec" +version = "0.19.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + [[package]] name = "blake2b_simd" version = "0.5.11" @@ -215,9 +227,9 @@ dependencies = [ [[package]] name = "cexpr" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89" dependencies = [ "nom", ] @@ -269,7 +281,6 @@ dependencies = [ "http", "image", "jsonwebtoken", - "log", "lru-cache", "num_cpus", "opentelemetry", @@ -292,8 +303,10 @@ dependencies = [ "serde_yaml", "sled", "thiserror", + "threadpool", "tokio", "tracing", + "tracing-flame", "tracing-opentelemetry", "tracing-subscriber", "trust-dns-resolver", @@ -326,9 +339,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "cookie" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf8865bac3d9a3bde5bde9088ca431b11f5d37c7a578b8086af77248b76627" +checksum = "d5f1c7727e460397e56abc4bddc1d49e07a1ad78fc98eb2e1c8f032a58a2f80d" dependencies = [ "percent-encoding", "time 0.2.27", @@ -564,9 +577,9 @@ checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" [[package]] name = "ed25519" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d0860415b12243916284c67a9be413e044ee6668247b99ba26d94b2bc06c8f6" +checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" dependencies = [ "signature", ] @@ -677,6 +690,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "futures" version = "0.3.15" @@ -952,9 +971,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.10" +version = "0.14.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7728a72c4c7d72665fde02204bcbd93b247721025b222ef78606f14513e0fd03" +checksum = "0b61cf2d1aebcf6e6352c97b81dc2244ca29194be1b276f5d8ad5c6330fffb11" dependencies = [ "bytes", "futures-channel", @@ -1177,9 +1196,9 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "6.17.3" +version = "6.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da125e1c0f22c7cae785982115523a0738728498547f415c9054cb17c7e89f9" +checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" dependencies = [ "bindgen", "cc", @@ -1346,10 +1365,12 @@ dependencies = [ [[package]] name = "nom" -version = "5.1.2" +version = "6.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" dependencies = [ + "bitvec", + "funty", "memchr", "version_check", ] @@ -1445,11 +1466,12 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "opentelemetry" -version = "0.12.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514d24875c140ed269eecc2d1b56d7b71b573716922a763c317fb1b1b4b58f15" +checksum = "ff27b33e30432e7b9854936693ca103d8591b0501f7ae9f633de48cda3bf2a67" dependencies = [ "async-trait", + "crossbeam-channel", "futures", "js-sys", "lazy_static", @@ -1461,9 +1483,9 @@ dependencies = [ [[package]] name = "opentelemetry-jaeger" -version = "0.11.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5677b3a361784aff6e2b1b30dbdb5f85f4ec57ff2ced41d9a481ad70a9d0b57" +checksum = "09a9fc8192722e7daa0c56e59e2336b797122fb8598383dcb11c8852733b435c" dependencies = [ "async-trait", "lazy_static", @@ -1569,18 +1591,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" +checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" +checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" dependencies = [ "proc-macro2", "quote", @@ -1601,9 +1623,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d156817ae0125e8aa5067710b0db24f0984830614f99875a70aa5e3b74db69" +checksum = "87bb2d5c68b7505a3a89eb2f3583a4d56303863005226c2ef99319930a262be4" dependencies = [ "der", "spki", @@ -1703,6 +1725,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" + [[package]] name = "rand" version = "0.7.3" @@ -2015,7 +2043,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "assign", "js_int", @@ -2036,7 +2064,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "bytes", "http", @@ -2052,7 +2080,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.1" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2063,7 +2091,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "ruma-api", "ruma-common", @@ -2077,7 +2105,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.11.0" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "assign", "bytes", @@ -2097,7 +2125,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.5.4" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "indexmap", "js_int", @@ -2112,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.23.2" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "indoc", "js_int", @@ -2128,7 +2156,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.23.2" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2139,7 +2167,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "js_int", "ruma-api", @@ -2154,7 +2182,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.19.4" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "paste", "rand 0.8.4", @@ -2168,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.19.4" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2178,12 +2206,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" [[package]] name = "ruma-identity-service-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "js_int", "ruma-api", @@ -2196,7 +2224,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "js_int", "ruma-api", @@ -2211,7 +2239,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "bytes", "form_urlencoded", @@ -2225,7 +2253,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2236,7 +2264,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2253,7 +2281,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=eb19b0e08a901b87d11b3be0890ec788cc760492#eb19b0e08a901b87d11b3be0890ec788cc760492" +source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" dependencies = [ "itertools 0.10.1", "js_int", @@ -2529,9 +2557,9 @@ dependencies = [ [[package]] name = "shlex" -version = "0.1.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" +checksum = "42a568c8f2cd051a4d283bd6eb0343ac214c1b0f1ac19f93e1175b2dee38c73d" [[package]] name = "signal-hook-registry" @@ -2714,9 +2742,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.73" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" +checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c" dependencies = [ "proc-macro2", "quote", @@ -2735,6 +2763,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tempfile" version = "3.2.0" @@ -2859,9 +2893,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" +checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" dependencies = [ "tinyvec_macros", ] @@ -2874,9 +2908,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c8b05dc14c75ea83d63dd391100353789f5f24b8b3866542a5e85c8be8e985" +checksum = "c2602b8af3767c285202012822834005f596c811042315fa7e9f5b12b2a43207" dependencies = [ "autocfg", "bytes", @@ -2997,6 +3031,17 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "tracing-flame" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd520fe41c667b437952383f3a1ec14f1fa45d653f719a77eedd6e6a02d8fa54" +dependencies = [ + "lazy_static", + "tracing", + "tracing-subscriber", +] + [[package]] name = "tracing-log" version = "0.1.2" @@ -3010,9 +3055,9 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.11.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccdf13c28f1654fe806838f28c5b9cb23ca4c0eae71450daa489f50e523ceb1" +checksum = "c47440f2979c4cd3138922840eec122e3c0ba2148bc290f756bd7fd60fc97fff" dependencies = [ "opentelemetry", "tracing", @@ -3394,6 +3439,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "yaml-rust" version = "0.4.5" @@ -3411,9 +3462,9 @@ checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" [[package]] name = "zeroize" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 2ce4b03..92134a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "eb19b0e08a901b87d11b3be0890ec788cc760492", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { git = "https://github.com/timokoesters/ruma", rev = "74cf83c4ca937fa5e2709fb71e9d11848e72e487", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/ruma/ruma", rev = "eb19b0e08a901b87d11b3be0890ec788cc760492", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/timokoesters/ruma", rev = "a2d93500e1dbc87e7032a3c74f3b2479a7f84e93", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = "1.2.0" +tokio = "1.8.2" # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true } @@ -31,30 +31,28 @@ rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = tru # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.0.1" -# Used for emitting log entries -log = "0.4.14" # Used for rocket<->ruma conversions -http = "0.2.3" +http = "0.2.4" # Used to find data directory for default db path -directories = "3.0.1" +directories = "3.0.2" # Used for ruma wrapper serde_json = { version = "1.0.64", features = ["raw_value"] } # Used for appservice registration files serde_yaml = "0.8.17" # Used for pdu definition -serde = "1.0.123" +serde = "1.0.126" # Used for secure identifiers -rand = "0.8.3" +rand = "0.8.4" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { version = "0.11.3", default-features = false, features = ["rustls-tls-native-roots", "socks"] } +reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls-native-roots", "socks"] } # Custom TLS verifier -rustls = { version = "0.19", features = ["dangerous_configuration"] } +rustls = { version = "0.19.1", features = ["dangerous_configuration"] } rustls-native-certs = "0.5.0" webpki = "0.21.0" # Used for conduit::Error type -thiserror = "1.0.24" +thiserror = "1.0.26" # Used to generate thumbnails for images image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key @@ -62,23 +60,25 @@ base64 = "0.13.0" # Used when hashing the state ring = "0.16.20" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.20.0" +trust-dns-resolver = "0.20.3" # Used to find matching events for appservices -regex = "1.4.3" +regex = "1.5.4" # jwt jsonwebtokens jsonwebtoken = "7.2.0" # Performance measurements -tracing = "0.1.25" -opentelemetry = "0.12.0" -tracing-subscriber = "0.2.16" -tracing-opentelemetry = "0.11.0" -opentelemetry-jaeger = "0.11.0" +tracing = { version = "0.1.26", features = ["release_max_level_warn"] } +opentelemetry = "0.15.0" +tracing-subscriber = "0.2.19" +tracing-opentelemetry = "0.14.0" +tracing-flame = "0.1.0" +opentelemetry-jaeger = "0.14.0" pretty_env_logger = "0.4.0" lru-cache = "0.1.2" rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } parking_lot = { version = "0.11.1", optional = true } crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" +threadpool = "1.8.1" [features] default = ["conduit_bin", "backend_sqlite"] diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 4291857..7868e45 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,6 +1,5 @@ use crate::{utils, Error, Result}; use bytes::BytesMut; -use log::warn; use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; use std::{ convert::{TryFrom, TryInto}, @@ -8,6 +7,7 @@ use std::{ mem, time::Duration, }; +use tracing::warn; pub async fn send_request( globals: &crate::database::globals::Globals, diff --git a/src/client_server/account.rs b/src/client_server/account.rs index ddb44d6..c00cc87 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -6,7 +6,6 @@ use std::{ use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; -use log::info; use ruma::{ api::client::{ error::ErrorKind, @@ -28,6 +27,7 @@ use ruma::{ identifiers::RoomName, push, RoomAliasId, RoomId, RoomVersionId, UserId, }; +use tracing::info; use register::RegistrationKind; #[cfg(feature = "conduit_bin")] diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index d738886..f1ec4b8 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,5 +1,4 @@ use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma}; -use log::info; use ruma::{ api::{ client::{ @@ -22,6 +21,7 @@ use ruma::{ serde::Raw, ServerName, UInt, }; +use tracing::info; #[cfg(feature = "conduit_bin")] use rocket::{get, post, put}; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 52bb18c..ea7fdab 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,7 +4,6 @@ use crate::{ pdu::{PduBuilder, PduEvent}, server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; -use log::{debug, error, warn}; use member::{MemberEventContent, MembershipState}; use rocket::futures; use ruma::{ @@ -34,6 +33,7 @@ use std::{ sync::{Arc, RwLock}, time::{Duration, Instant}, }; +use tracing::{debug, error, warn}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 7aa22d0..49a6052 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -2,7 +2,6 @@ use crate::{ client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Error, Ruma, }; -use log::info; use ruma::{ api::client::{ error::ErrorKind, @@ -16,6 +15,7 @@ use ruma::{ RoomAliasId, RoomId, RoomVersionId, }; use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; +use tracing::info; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 7ad792b..f8452e0 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -1,6 +1,5 @@ use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; -use log::info; use ruma::{ api::client::{ error::ErrorKind, @@ -9,6 +8,7 @@ use ruma::{ UserId, }; use serde::Deserialize; +use tracing::info; #[derive(Debug, Deserialize)] struct Claims { diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index fdb8f25..541045e 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,5 +1,4 @@ use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; -use log::{error, warn}; use ruma::{ api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, @@ -13,6 +12,7 @@ use std::{ time::Duration, }; use tokio::sync::watch::Sender; +use tracing::{error, warn}; #[cfg(feature = "conduit_bin")] use rocket::{get, tokio}; diff --git a/src/database.rs b/src/database.rs index e359a5f..9f24345 100644 --- a/src/database.rs +++ b/src/database.rs @@ -17,7 +17,6 @@ pub mod users; use crate::{utils, Error, Result}; use abstraction::DatabaseEngine; use directories::ProjectDirs; -use log::error; use lru_cache::LruCache; use rocket::{ futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}, @@ -36,6 +35,7 @@ use std::{ sync::{Arc, Mutex, RwLock}, }; use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; +use tracing::{debug, error, warn}; use self::proxy::ProxyConfig; @@ -69,6 +69,8 @@ pub struct Config { allow_federation: bool, #[serde(default = "false_fn")] pub allow_jaeger: bool, + #[serde(default = "false_fn")] + pub tracing_flame: bool, #[serde(default)] proxy: ProxyConfig, jwt_secret: Option, @@ -91,12 +93,12 @@ impl Config { .keys() .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) { - log::warn!("Config parameter {} is deprecated", key); + warn!("Config parameter {} is deprecated", key); was_deprecated = true; } if was_deprecated { - log::warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); + warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); } } } @@ -193,13 +195,13 @@ impl Database { if sled_exists { if sqlite_exists { // most likely an in-place directory, only warn - log::warn!("Both sled and sqlite databases are detected in database directory"); - log::warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") + warn!("Both sled and sqlite databases are detected in database directory"); + warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") } else { - log::error!( + error!( "Sled database detected, conduit now uses sqlite for database operations" ); - log::error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); + error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); return Err(Error::bad_config( "sled database detected, migrate to sqlite", )); @@ -291,7 +293,7 @@ impl Database { statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, - prevevent_parent: builder.open_tree("prevevent_parent")?, + referencedevents: builder.open_tree("referencedevents")?, pdu_cache: Mutex::new(LruCache::new(100_000)), auth_chain_cache: Mutex::new(LruCache::new(100_000)), }, @@ -444,10 +446,12 @@ impl Database { #[cfg(feature = "conduit_bin")] pub async fn start_on_shutdown_tasks(db: Arc>, shutdown: Shutdown) { + use tracing::info; + tokio::spawn(async move { shutdown.await; - log::info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); db.read().await.globals.rotate.fire(); }); @@ -543,22 +547,25 @@ impl Database { futures.next().await; } + #[tracing::instrument(skip(self))] pub async fn flush(&self) -> Result<()> { let start = std::time::Instant::now(); let res = self._db.flush(); - log::debug!("flush: took {:?}", start.elapsed()); + debug!("flush: took {:?}", start.elapsed()); res } #[cfg(feature = "sqlite")] + #[tracing::instrument(skip(self))] pub fn flush_wal(&self) -> Result<()> { self._db.flush_wal() } #[cfg(feature = "sqlite")] + #[tracing::instrument(skip(engine, config))] pub async fn start_spillover_reap_task(engine: Arc, config: &Config) { let fraction = config.sqlite_spillover_reap_fraction.clamp(0.01, 1.0); let interval_secs = config.sqlite_spillover_reap_interval_secs as u64; @@ -585,11 +592,13 @@ impl Database { } #[cfg(feature = "sqlite")] + #[tracing::instrument(skip(lock, config))] pub async fn start_wal_clean_task(lock: &Arc>, config: &Config) { use tokio::time::{interval, timeout}; #[cfg(unix)] use tokio::signal::unix::{signal, SignalKind}; + use tracing::info; use std::{ sync::Weak, @@ -611,41 +620,41 @@ impl Database { #[cfg(unix)] tokio::select! { _ = i.tick(), if do_timer => { - log::info!(target: "wal-trunc", "Timer ticked") + info!(target: "wal-trunc", "Timer ticked") } _ = s.recv() => { - log::info!(target: "wal-trunc", "Received SIGHUP") + info!(target: "wal-trunc", "Received SIGHUP") } }; #[cfg(not(unix))] if do_timer { i.tick().await; - log::info!(target: "wal-trunc", "Timer ticked") + info!(target: "wal-trunc", "Timer ticked") } else { // timer disabled, and there's no concept of signals on windows, bailing... return; } if let Some(arc) = Weak::upgrade(&weak) { - log::info!(target: "wal-trunc", "Rotating sync helpers..."); + info!(target: "wal-trunc", "Rotating sync helpers..."); // This actually creates a very small race condition between firing this and trying to acquire the subsequent write lock. // Though it is not a huge deal if the write lock doesn't "catch", as it'll harmlessly time out. arc.read().await.globals.rotate.fire(); - log::info!(target: "wal-trunc", "Locking..."); + info!(target: "wal-trunc", "Locking..."); let guard = { if let Ok(guard) = timeout(lock_timeout, arc.write()).await { guard } else { - log::info!(target: "wal-trunc", "Lock failed in timeout, canceled."); + info!(target: "wal-trunc", "Lock failed in timeout, canceled."); continue; } }; - log::info!(target: "wal-trunc", "Locked, flushing..."); + info!(target: "wal-trunc", "Locked, flushing..."); let start = Instant::now(); if let Err(e) = guard.flush_wal() { - log::error!(target: "wal-trunc", "Errored: {}", e); + error!(target: "wal-trunc", "Errored: {}", e); } else { - log::info!(target: "wal-trunc", "Flushed in {:?}", start.elapsed()); + info!(target: "wal-trunc", "Flushed in {:?}", start.elapsed()); } } else { break; diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs index 12e0275..d99ce26 100644 --- a/src/database/abstraction/sled.rs +++ b/src/database/abstraction/sled.rs @@ -1,7 +1,7 @@ use super::super::Config; use crate::{utils, Result}; -use log::warn; use std::{future::Future, pin::Pin, sync::Arc}; +use tracing::warn; use super::{DatabaseEngine, Tree}; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 8cc6a8d..a46d3ad 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -3,9 +3,8 @@ use crate::{database::Config, Result}; use crossbeam::channel::{ bounded, unbounded, Receiver as ChannelReceiver, Sender as ChannelSender, TryRecvError, }; -use log::debug; use parking_lot::{Mutex, MutexGuard, RwLock}; -use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension}; +use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension, Params}; use std::{ collections::HashMap, future::Future, @@ -13,10 +12,11 @@ use std::{ path::{Path, PathBuf}, pin::Pin, sync::Arc, - thread, time::{Duration, Instant}, }; +use threadpool::ThreadPool; use tokio::sync::oneshot::Sender; +use tracing::{debug, warn}; struct Pool { writer: Mutex, @@ -86,9 +86,9 @@ impl Deref for RecycledConn { impl Drop for RecycledConn { fn drop(&mut self) { if let Some(conn) = self.0.take() { - log::debug!("Recycled connection"); + debug!("Recycled connection"); if let Err(e) = self.1.send(conn) { - log::warn!("Recycling a connection led to the following error: {:?}", e) + warn!("Recycling a connection led to the following error: {:?}", e) } } } @@ -149,14 +149,14 @@ impl Pool { } } - log::debug!("read_lock: All permanent readers locked, obtaining spillover reader..."); + debug!("read_lock: All permanent readers locked, obtaining spillover reader..."); // We didn't get a connection from the permanent pool, so we'll dumpster-dive for recycled connections. // Either we have a connection or we dont, if we don't, we make a new one. let conn = match self.spills.try_take() { Some(conn) => conn, None => { - log::debug!("read_lock: No recycled connections left, creating new one..."); + debug!("read_lock: No recycled connections left, creating new one..."); Self::prepare_conn(&self.path, None).unwrap() } }; @@ -169,7 +169,7 @@ impl Pool { // If the spillover readers are more than the number of total readers, there might be a problem. if now_count > self.readers.len() { - log::warn!( + warn!( "Database is under high load. Consider increasing sqlite_read_pool_size ({} spillover readers exist)", now_count ); @@ -182,6 +182,7 @@ impl Pool { pub struct Engine { pool: Pool, + iter_pool: Mutex, } impl DatabaseEngine for Engine { @@ -195,7 +196,10 @@ impl DatabaseEngine for Engine { pool.write_lock() .execute("CREATE TABLE IF NOT EXISTS _noop (\"key\" INT)", params![])?; - let arc = Arc::new(Engine { pool }); + let arc = Arc::new(Engine { + pool, + iter_pool: Mutex::new(ThreadPool::new(10)), + }); Ok(arc) } @@ -259,7 +263,7 @@ impl Engine { } } - log::debug!("Reaped {} connections", reaped); + debug!("Reaped {} connections", reaped); } } @@ -272,6 +276,7 @@ pub struct SqliteTable { type TupleOfBytes = (Vec, Vec); impl SqliteTable { + #[tracing::instrument(skip(self, guard, key))] fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { Ok(guard .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? @@ -279,6 +284,7 @@ impl SqliteTable { .optional()?) } + #[tracing::instrument(skip(self, guard, key, value))] fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { guard.execute( format!( @@ -291,41 +297,67 @@ impl SqliteTable { Ok(()) } - fn _iter_from_thread(&self, f: F) -> Box + Send> - where - F: (for<'a> FnOnce(&'a Connection, ChannelSender)) + Send + 'static, - { + #[tracing::instrument(skip(self, sql, param))] + fn iter_from_thread( + &self, + sql: String, + param: Option>, + ) -> Box + Send + Sync> { let (s, r) = bounded::(5); - let engine = self.engine.clone(); + let engine = Arc::clone(&self.engine); - thread::spawn(move || { - let _ = f(&engine.pool.read_lock(), s); - }); + let lock = self.engine.iter_pool.lock(); + if lock.active_count() < lock.max_count() { + lock.execute(move || { + if let Some(param) = param { + iter_from_thread_work(&engine.pool.read_lock(), &s, &sql, [param]); + } else { + iter_from_thread_work(&engine.pool.read_lock(), &s, &sql, []); + } + }); + } else { + std::thread::spawn(move || { + if let Some(param) = param { + iter_from_thread_work(&engine.pool.read_lock(), &s, &sql, [param]); + } else { + iter_from_thread_work(&engine.pool.read_lock(), &s, &sql, []); + } + }); + } Box::new(r.into_iter()) } } -macro_rules! iter_from_thread { - ($self:expr, $sql:expr, $param:expr) => { - $self._iter_from_thread(move |guard, s| { - let _ = guard - .prepare($sql) - .unwrap() - .query_map($param, |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) - .unwrap() - .map(|r| r.unwrap()) - .try_for_each(|bob| s.send(bob)); - }) - }; +fn iter_from_thread_work

( + guard: &HoldingConn<'_>, + s: &ChannelSender<(Vec, Vec)>, + sql: &str, + params: P, +) where + P: Params, +{ + for bob in guard + .prepare(sql) + .unwrap() + .query_map(params, |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .unwrap() + .map(|r| r.unwrap()) + { + if s.send(bob).is_err() { + return; + } + } } impl Tree for SqliteTable { + #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { self.get_with_guard(&self.engine.pool.read_lock(), key) } + #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let guard = self.engine.pool.write_lock(); @@ -365,6 +397,7 @@ impl Tree for SqliteTable { Ok(()) } + #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let guard = self.engine.pool.write_lock(); @@ -385,15 +418,13 @@ impl Tree for SqliteTable { Ok(()) } + #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box + Send + 'a> { let name = self.name.clone(); - iter_from_thread!( - self, - format!("SELECT key, value FROM {}", name).as_str(), - params![] - ) + self.iter_from_thread(format!("SELECT key, value FROM {}", name), None) } + #[tracing::instrument(skip(self, from, backwards))] fn iter_from<'a>( &'a self, from: &[u8], @@ -402,28 +433,25 @@ impl Tree for SqliteTable { let name = self.name.clone(); let from = from.to_vec(); // TODO change interface? if backwards { - iter_from_thread!( - self, + self.iter_from_thread( format!( "SELECT key, value FROM {} WHERE key <= ? ORDER BY key DESC", name - ) - .as_str(), - [from] + ), + Some(from), ) } else { - iter_from_thread!( - self, + self.iter_from_thread( format!( "SELECT key, value FROM {} WHERE key >= ? ORDER BY key ASC", name - ) - .as_str(), - [from] + ), + Some(from), ) } } + #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { let guard = self.engine.pool.write_lock(); @@ -446,18 +474,17 @@ impl Tree for SqliteTable { Ok(new) } + #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>( &'a self, prefix: Vec, ) -> Box + Send + 'a> { // let name = self.name.clone(); - // iter_from_thread!( - // self, + // self.iter_from_thread( // format!( // "SELECT key, value FROM {} WHERE key BETWEEN ?1 AND ?1 || X'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' ORDER BY key ASC", // name // ) - // .as_str(), // [prefix] // ) Box::new( @@ -466,6 +493,7 @@ impl Tree for SqliteTable { ) } + #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { let (tx, rx) = tokio::sync::oneshot::channel(); @@ -481,6 +509,7 @@ impl Tree for SqliteTable { }) } + #[tracing::instrument(skip(self))] fn clear(&self) -> Result<()> { debug!("clear: running"); self.engine diff --git a/src/database/account_data.rs b/src/database/account_data.rs index b1d5b6b..8a8d2c2 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -16,6 +16,7 @@ pub struct AccountData { impl AccountData { /// Places one event in the account data of the user and removes the previous entry. + #[tracing::instrument(skip(self, room_id, user_id, event_type, data, globals))] pub fn update( &self, room_id: Option<&RoomId>, @@ -60,6 +61,7 @@ impl AccountData { } /// Searches the account data for a specific kind. + #[tracing::instrument(skip(self, room_id, user_id, kind))] pub fn get( &self, room_id: Option<&RoomId>, @@ -74,7 +76,7 @@ impl AccountData { } /// Returns all changes to the account data that happened after `since`. - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, room_id, user_id, since))] pub fn changes_since( &self, room_id: Option<&RoomId>, @@ -122,6 +124,7 @@ impl AccountData { Ok(userdata) } + #[tracing::instrument(skip(self, room_id, user_id, kind))] fn find_event( &self, room_id: Option<&RoomId>, diff --git a/src/database/admin.rs b/src/database/admin.rs index d8b7ae5..e1b24d0 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -4,13 +4,13 @@ use std::{ }; use crate::{pdu::PduBuilder, Database}; -use log::warn; use rocket::futures::{channel::mpsc, stream::StreamExt}; use ruma::{ events::{room::message, EventType}, UserId, }; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; +use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), diff --git a/src/database/globals.rs b/src/database/globals.rs index fbd41a3..0edb9ca 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,5 +1,4 @@ use crate::{database::Config, utils, ConduitResult, Error, Result}; -use log::{error, info}; use ruma::{ api::{ client::r0::sync::sync_events, @@ -17,6 +16,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{broadcast, watch::Receiver, Mutex, Semaphore}; +use tracing::{error, info}; use trust_dns_resolver::TokioAsyncResolver; use super::abstraction::Tree; @@ -56,6 +56,7 @@ struct MatrixServerVerifier { } impl ServerCertVerifier for MatrixServerVerifier { + #[tracing::instrument(skip(self, roots, presented_certs, dns_name, ocsp_response))] fn verify_server_cert( &self, roots: &rustls::RootCertStore, @@ -220,11 +221,13 @@ impl Globals { &self.reqwest_client } + #[tracing::instrument(skip(self))] pub fn next_count(&self) -> Result { utils::u64_from_bytes(&self.globals.increment(COUNTER)?) .map_err(|_| Error::bad_database("Count has invalid bytes.")) } + #[tracing::instrument(skip(self))] pub fn current_count(&self) -> Result { self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { utils::u64_from_bytes(&bytes) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 348f4dc..9e81dd1 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -1,6 +1,5 @@ use crate::{Database, Error, PduEvent, Result}; use bytes::BytesMut; -use log::{error, info, warn}; use ruma::{ api::{ client::r0::push::{get_pushers, set_pusher, PusherKind}, @@ -15,6 +14,7 @@ use ruma::{ push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, uint, UInt, UserId, }; +use tracing::{error, info, warn}; use std::{convert::TryFrom, fmt::Debug, mem, sync::Arc}; @@ -26,6 +26,7 @@ pub struct PushData { } impl PushData { + #[tracing::instrument(skip(self, sender, pusher))] pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); @@ -48,6 +49,7 @@ impl PushData { Ok(()) } + #[tracing::instrument(skip(self, senderkey))] pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { self.senderkey_pusher .get(senderkey)? @@ -58,6 +60,7 @@ impl PushData { .transpose() } + #[tracing::instrument(skip(self, sender))] pub fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); @@ -71,6 +74,7 @@ impl PushData { .collect() } + #[tracing::instrument(skip(self, sender))] pub fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, @@ -82,6 +86,7 @@ impl PushData { } } +#[tracing::instrument(skip(globals, destination, request))] pub async fn send_request( globals: &crate::database::globals::Globals, destination: &str, @@ -155,6 +160,7 @@ where } } +#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] pub async fn send_push_notice( user: &UserId, unread: UInt, @@ -194,6 +200,7 @@ pub async fn send_push_notice( Ok(()) } +#[tracing::instrument(skip(user, ruleset, pdu, db))] pub fn get_actions<'a>( user: &UserId, ruleset: &'a Ruleset, @@ -225,6 +232,7 @@ pub fn get_actions<'a>( Ok(ruleset.get_actions(&pdu.to_sync_room_event(), &ctx)) } +#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] async fn send_notice( unread: UInt, pusher: &get_pushers::Pusher, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f0ec683..756f65e 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -5,7 +5,6 @@ use member::MembershipState; use tokio::sync::MutexGuard; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::{debug, error, warn}; use lru_cache::LruCache; use regex::Regex; use ring::digest; @@ -27,6 +26,7 @@ use std::{ mem, sync::{Arc, Mutex}, }; +use tracing::{debug, error, warn}; use super::{abstraction::Tree, admin::AdminCommand, pusher}; @@ -82,7 +82,7 @@ pub struct Rooms { pub(super) eventid_outlierpdu: Arc, /// RoomId + EventId -> Parent PDU EventId. - pub(super) prevevent_parent: Arc, + pub(super) referencedevents: Arc, pub(super) pdu_cache: Mutex>>, pub(super) auth_chain_cache: Mutex>>, @@ -617,6 +617,7 @@ impl Rooms { } /// Returns the leaf pdus of a room. + #[tracing::instrument(skip(self))] pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -636,6 +637,7 @@ impl Rooms { /// /// The provided `event_ids` become the new leaves, this allows a room to have multiple /// `prev_events`. + #[tracing::instrument(skip(self))] pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -653,13 +655,15 @@ impl Rooms { Ok(()) } + #[tracing::instrument(skip(self))] pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(event_id.as_bytes()); - Ok(self.prevevent_parent.get(&key)?.is_some()) + Ok(self.referencedevents.get(&key)?.is_some()) } /// Returns the pdu from the outlier tree. + #[tracing::instrument(skip(self))] pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? @@ -671,6 +675,7 @@ impl Rooms { /// Append the PDU as an outlier. /// /// Any event given to this will be processed (state-res) on another thread. + #[tracing::instrument(skip(self, pdu))] pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { self.eventid_outlierpdu.insert( &event_id.as_bytes(), @@ -684,7 +689,7 @@ impl Rooms { /// /// By this point the incoming event should be fully authenticated, no auth happens /// in `append_pdu`. - #[allow(clippy::too_many_arguments)] + #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] pub fn append_pdu( &self, pdu: &PduEvent, @@ -721,11 +726,10 @@ impl Rooms { } // We must keep track of all events that have been referenced. - for leaf in leaves { + for prev in &pdu.prev_events { let mut key = pdu.room_id().as_bytes().to_vec(); - key.extend_from_slice(leaf.as_bytes()); - self.prevevent_parent - .insert(&key, pdu.event_id().as_bytes())?; + key.extend_from_slice(prev.as_bytes()); + self.referencedevents.insert(&key, &[])?; } self.replace_pdu_leaves(&pdu.room_id, leaves)?; @@ -757,12 +761,11 @@ impl Rooms { // See if the event matches any known pushers for user in db - .users - .iter() + .rooms + .room_members(&pdu.room_id) .filter_map(|r| r.ok()) .filter(|user_id| user_id.server_name() == db.globals.server_name()) .filter(|user_id| !db.users.is_deactivated(user_id).unwrap_or(false)) - .filter(|user_id| self.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) { // Don't notify the user of their own events if user == pdu.sender { @@ -992,6 +995,7 @@ impl Rooms { Ok(pdu_id) } + #[tracing::instrument(skip(self))] pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -1005,6 +1009,7 @@ impl Rooms { Ok(()) } + #[tracing::instrument(skip(self))] pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -1019,6 +1024,7 @@ impl Rooms { .unwrap_or(Ok(0)) } + #[tracing::instrument(skip(self))] pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -1037,6 +1043,7 @@ impl Rooms { /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + #[tracing::instrument(skip(self, state, globals))] pub fn set_event_state( &self, event_id: &EventId, @@ -1121,6 +1128,7 @@ impl Rooms { /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + #[tracing::instrument(skip(self, new_pdu, globals))] pub fn append_to_state( &self, new_pdu: &PduEvent, @@ -1227,6 +1235,7 @@ impl Rooms { } } + #[tracing::instrument(skip(self, invite_event))] pub fn calculate_invite_state( &self, invite_event: &PduEvent, @@ -1264,6 +1273,7 @@ impl Rooms { Ok(state) } + #[tracing::instrument(skip(self))] pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { self.roomid_shortstatehash .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; @@ -1272,6 +1282,7 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. + #[tracing::instrument(skip(self, db, _mutex_lock))] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, @@ -1563,6 +1574,7 @@ impl Rooms { /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. + #[tracing::instrument(skip(self))] pub fn pdus_until<'a>( &'a self, user_id: &UserId, @@ -1625,6 +1637,7 @@ impl Rooms { } /// Replace a PDU with the redacted form. + #[tracing::instrument(skip(self, reason))] pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { if let Some(pdu_id) = self.get_pdu_id(event_id)? { let mut pdu = self @@ -1642,6 +1655,7 @@ impl Rooms { } /// Update current membership data. + #[tracing::instrument(skip(self, last_state, db))] pub fn update_membership( &self, room_id: &RoomId, @@ -2026,6 +2040,7 @@ impl Rooms { } /// Makes a user forget a room. + #[tracing::instrument(skip(self))] pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -2041,6 +2056,7 @@ impl Rooms { Ok(()) } + #[tracing::instrument(skip(self, globals))] pub fn set_alias( &self, alias: &RoomAliasId, @@ -2076,6 +2092,7 @@ impl Rooms { Ok(()) } + #[tracing::instrument(skip(self))] pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result> { self.alias_roomid .get(alias.alias().as_bytes())? @@ -2089,6 +2106,7 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] pub fn room_aliases<'a>( &'a self, room_id: &RoomId, @@ -2104,6 +2122,7 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { if public { self.publicroomids.insert(room_id.as_bytes(), &[])?; @@ -2114,10 +2133,12 @@ impl Rooms { Ok(()) } + #[tracing::instrument(skip(self))] pub fn is_public_room(&self, room_id: &RoomId) -> Result { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } + #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator> + '_ { self.publicroomids.iter().map(|(bytes, _)| { RoomId::try_from( @@ -2219,6 +2240,7 @@ impl Rooms { } /// Returns an iterator of all servers participating in this room. + #[tracing::instrument(skip(self))] pub fn room_servers<'a>( &'a self, room_id: &RoomId, @@ -2242,6 +2264,7 @@ impl Rooms { } /// Returns an iterator of all rooms a server participates in (as far as we know). + #[tracing::instrument(skip(self))] pub fn server_rooms<'a>( &'a self, server: &ServerName, @@ -2287,6 +2310,7 @@ impl Rooms { } /// Returns an iterator over all User IDs who ever joined a room. + #[tracing::instrument(skip(self))] pub fn room_useroncejoined<'a>( &'a self, room_id: &RoomId, @@ -2494,6 +2518,7 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -2502,6 +2527,7 @@ impl Rooms { Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) } + #[tracing::instrument(skip(self))] pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -2510,6 +2536,7 @@ impl Rooms { Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) } + #[tracing::instrument(skip(self))] pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -2518,6 +2545,7 @@ impl Rooms { Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) } + #[tracing::instrument(skip(self))] pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -2526,6 +2554,7 @@ impl Rooms { Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) } + #[tracing::instrument(skip(self))] pub fn auth_chain_cache( &self, ) -> std::sync::MutexGuard<'_, LruCache>> { diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 9a5cdeb..664c171 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -116,6 +116,7 @@ impl RoomEdus { } /// Sets a private read marker at `count`. + #[tracing::instrument(skip(self, globals))] pub fn private_read_set( &self, room_id: &RoomId, diff --git a/src/database/sending.rs b/src/database/sending.rs index a07192e..f28e883 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -10,7 +10,6 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::{error, warn}; use ring::digest; use rocket::futures::{ channel::mpsc, @@ -34,6 +33,7 @@ use tokio::{ select, sync::{RwLock, Semaphore}, }; +use tracing::{error, warn}; use super::abstraction::Tree; @@ -45,6 +45,7 @@ pub enum OutgoingKind { } impl OutgoingKind { + #[tracing::instrument(skip(self))] pub fn get_prefix(&self) -> Vec { let mut prefix = match self { OutgoingKind::Appservice(server) => { @@ -223,6 +224,7 @@ impl Sending { }); } + #[tracing::instrument(skip(outgoing_kind, new_events, current_transaction_status, db))] fn select_events( outgoing_kind: &OutgoingKind, new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key @@ -295,6 +297,7 @@ impl Sending { Ok(Some(events)) } + #[tracing::instrument(skip(db, server))] pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec>, u64)> { // u64: count of last edu let since = db @@ -371,7 +374,7 @@ impl Sending { Ok((events, max_edu_count)) } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, pdu_id, senderkey))] pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Vec) -> Result<()> { let mut key = b"$".to_vec(); key.extend_from_slice(&senderkey); @@ -383,7 +386,7 @@ impl Sending { Ok(()) } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, server, pdu_id))] pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { let mut key = server.as_bytes().to_vec(); key.push(0xff); @@ -394,7 +397,7 @@ impl Sending { Ok(()) } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, server, serialized))] pub fn send_reliable_edu(&self, server: &ServerName, serialized: &[u8]) -> Result<()> { let mut key = server.as_bytes().to_vec(); key.push(0xff); @@ -418,7 +421,7 @@ impl Sending { Ok(()) } - #[tracing::instrument] + #[tracing::instrument(skip(keys))] fn calculate_hash(keys: &[&[u8]]) -> Vec { // We only hash the pdu's event ids, not the whole pdu let bytes = keys.join(&0xff); @@ -426,7 +429,7 @@ impl Sending { hash.as_ref().to_owned() } - #[tracing::instrument(skip(db))] + #[tracing::instrument(skip(db, events, kind))] async fn handle_events( kind: OutgoingKind, events: Vec, @@ -658,6 +661,7 @@ impl Sending { } } + #[tracing::instrument(skip(key))] fn parse_servercurrentevent(key: &[u8]) -> Result<(OutgoingKind, SendingEventType)> { // Appservices start with a plus Ok::<_, Error>(if key.starts_with(b"+") { @@ -723,7 +727,7 @@ impl Sending { }) } - #[tracing::instrument(skip(self, globals))] + #[tracing::instrument(skip(self, globals, destination, request))] pub async fn send_federation_request( &self, globals: &crate::database::globals::Globals, @@ -740,7 +744,7 @@ impl Sending { response } - #[tracing::instrument(skip(self, globals))] + #[tracing::instrument(skip(self, globals, registration, request))] pub async fn send_appservice_request( &self, globals: &crate::database::globals::Globals, diff --git a/src/database/users.rs b/src/database/users.rs index cd46c45..f501ec3 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,6 +8,7 @@ use ruma::{ DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, UInt, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc}; +use tracing::warn; use super::abstraction::Tree; @@ -34,11 +35,13 @@ pub struct Users { impl Users { /// Check if a user has an account on this homeserver. + #[tracing::instrument(skip(self, user_id))] pub fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) } /// Check if account is deactivated + #[tracing::instrument(skip(self, user_id))] pub fn is_deactivated(&self, user_id: &UserId) -> Result { Ok(self .userid_password @@ -51,17 +54,20 @@ impl Users { } /// Create a new user account on this homeserver. + #[tracing::instrument(skip(self, user_id, password))] pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { self.set_password(user_id, password)?; Ok(()) } /// Returns the number of users registered on this server. + #[tracing::instrument(skip(self))] pub fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } /// Find out which user an access token belongs to. + #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result> { self.token_userdeviceid .get(token.as_bytes())? @@ -89,6 +95,7 @@ impl Users { } /// Returns an iterator over all users on this homeserver. + #[tracing::instrument(skip(self))] pub fn iter(&self) -> impl Iterator> + '_ { self.userid_password.iter().map(|(bytes, _)| { UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { @@ -99,6 +106,7 @@ impl Users { } /// Returns the password hash for the given user. + #[tracing::instrument(skip(self, user_id))] pub fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password .get(user_id.as_bytes())? @@ -110,6 +118,7 @@ impl Users { } /// Hash and set the user's password to the Argon2 hash + #[tracing::instrument(skip(self, user_id, password))] pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { if let Ok(hash) = utils::calculate_hash(&password) { @@ -129,6 +138,7 @@ impl Users { } /// Returns the displayname of a user on this homeserver. + #[tracing::instrument(skip(self, user_id))] pub fn displayname(&self, user_id: &UserId) -> Result> { self.userid_displayname .get(user_id.as_bytes())? @@ -140,6 +150,7 @@ impl Users { } /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. + #[tracing::instrument(skip(self, user_id, displayname))] pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { if let Some(displayname) = displayname { self.userid_displayname @@ -152,6 +163,7 @@ impl Users { } /// Get the avatar_url of a user. + #[tracing::instrument(skip(self, user_id))] pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl .get(user_id.as_bytes())? @@ -164,6 +176,7 @@ impl Users { } /// Sets a new avatar_url or removes it if avatar_url is None. + #[tracing::instrument(skip(self, user_id, avatar_url))] pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl @@ -176,6 +189,7 @@ impl Users { } /// Get the blurhash of a user. + #[tracing::instrument(skip(self, user_id))] pub fn blurhash(&self, user_id: &UserId) -> Result> { self.userid_blurhash .get(user_id.as_bytes())? @@ -189,6 +203,7 @@ impl Users { } /// Sets a new avatar_url or removes it if avatar_url is None. + #[tracing::instrument(skip(self, user_id, blurhash))] pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { if let Some(blurhash) = blurhash { self.userid_blurhash @@ -201,6 +216,7 @@ impl Users { } /// Adds a new device to a user. + #[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))] pub fn create_device( &self, user_id: &UserId, @@ -235,6 +251,7 @@ impl Users { } /// Removes a device from a user. + #[tracing::instrument(skip(self, user_id, device_id))] pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); @@ -265,6 +282,7 @@ impl Users { } /// Returns an iterator over all device ids of this user. + #[tracing::instrument(skip(self, user_id))] pub fn all_device_ids<'a>( &'a self, user_id: &UserId, @@ -287,6 +305,7 @@ impl Users { } /// Replaces the access token of one device. + #[tracing::instrument(skip(self, user_id, device_id, token))] pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); @@ -310,6 +329,14 @@ impl Users { Ok(()) } + #[tracing::instrument(skip( + self, + user_id, + device_id, + one_time_key_key, + one_time_key_value, + globals + ))] pub fn add_one_time_key( &self, user_id: &UserId, @@ -346,7 +373,7 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, user_id))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate .get(&user_id.as_bytes())? @@ -358,6 +385,7 @@ impl Users { .unwrap_or(Ok(0)) } + #[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))] pub fn take_one_time_key( &self, user_id: &UserId, @@ -397,7 +425,7 @@ impl Users { .transpose() } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, user_id, device_id))] pub fn count_one_time_keys( &self, user_id: &UserId, @@ -430,6 +458,7 @@ impl Users { Ok(counts) } + #[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))] pub fn add_device_keys( &self, user_id: &UserId, @@ -452,6 +481,14 @@ impl Users { Ok(()) } + #[tracing::instrument(skip( + self, + master_key, + self_signing_key, + user_signing_key, + rooms, + globals + ))] pub fn add_cross_signing_keys( &self, user_id: &UserId, @@ -552,6 +589,7 @@ impl Users { Ok(()) } + #[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))] pub fn sign_key( &self, target_id: &UserId, @@ -595,7 +633,7 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, user_or_room_id, from, to))] pub fn keys_changed<'a>( &'a self, user_or_room_id: &str, @@ -608,9 +646,24 @@ impl Users { let mut start = prefix.clone(); start.extend_from_slice(&(from + 1).to_be_bytes()); + let to = to.unwrap_or(u64::MAX); + self.keychangeid_userid .iter_from(&start, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) + .take_while(move |(k, _)| { + k.starts_with(&prefix) + && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { + if let Ok(c) = utils::u64_from_bytes(current) { + c <= to + } else { + warn!("BadDatabase: Could not parse keychangeid_userid bytes"); + false + } + } else { + warn!("BadDatabase: Could not parse keychangeid_userid"); + false + } + }) .map(|(_, bytes)| { UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") @@ -619,6 +672,7 @@ impl Users { }) } + #[tracing::instrument(skip(self, user_id, rooms, globals))] fn mark_device_key_update( &self, user_id: &UserId, @@ -650,6 +704,7 @@ impl Users { Ok(()) } + #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_keys( &self, user_id: &UserId, @@ -666,6 +721,7 @@ impl Users { }) } + #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_master_key bool>( &self, user_id: &UserId, @@ -693,6 +749,7 @@ impl Users { }) } + #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_self_signing_key bool>( &self, user_id: &UserId, @@ -720,6 +777,7 @@ impl Users { }) } + #[tracing::instrument(skip(self, user_id))] pub fn get_user_signing_key(&self, user_id: &UserId) -> Result> { self.userid_usersigningkeyid .get(user_id.as_bytes())? @@ -732,6 +790,15 @@ impl Users { }) } + #[tracing::instrument(skip( + self, + sender, + target_user_id, + target_device_id, + event_type, + content, + globals + ))] pub fn add_to_device_event( &self, sender: &UserId, @@ -759,7 +826,7 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_to_device_events( &self, user_id: &UserId, @@ -782,7 +849,7 @@ impl Users { Ok(events) } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, user_id, device_id, until))] pub fn remove_to_device_events( &self, user_id: &UserId, @@ -817,6 +884,7 @@ impl Users { Ok(()) } + #[tracing::instrument(skip(self, user_id, device_id, device))] pub fn update_device_metadata( &self, user_id: &UserId, @@ -842,6 +910,7 @@ impl Users { } /// Get device metadata. + #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_metadata( &self, user_id: &UserId, @@ -860,6 +929,7 @@ impl Users { }) } + #[tracing::instrument(skip(self, user_id))] pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { self.userid_devicelistversion .get(user_id.as_bytes())? @@ -870,6 +940,7 @@ impl Users { }) } + #[tracing::instrument(skip(self, user_id))] pub fn all_devices_metadata<'a>( &'a self, user_id: &UserId, @@ -886,6 +957,7 @@ impl Users { } /// Deactivate account + #[tracing::instrument(skip(self, user_id))] pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { // Remove all associated devices for device_id in self.all_device_ids(user_id) { diff --git a/src/error.rs b/src/error.rs index f62bdee..eda522a 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,3 @@ -use log::warn; use ruma::{ api::client::{ error::{Error as RumaError, ErrorKind}, @@ -7,17 +6,18 @@ use ruma::{ ServerName, }; use thiserror::Error; +use tracing::warn; #[cfg(feature = "conduit_bin")] use { crate::RumaResponse, http::StatusCode, - log::error, rocket::{ response::{self, Responder}, Request, }, ruma::api::client::r0::uiaa::UiaaResponse, + tracing::error, }; pub type Result = std::result::Result; diff --git a/src/main.rs b/src/main.rs index a5face7..9f6cced 100644 --- a/src/main.rs +++ b/src/main.rs @@ -17,6 +17,7 @@ use std::sync::Arc; use database::Config; pub use database::Database; pub use error::{Error, Result}; +use opentelemetry::trace::Tracer; pub use pdu::PduEvent; pub use rocket::State; use ruma::api::client::error::ErrorKind; @@ -31,8 +32,7 @@ use rocket::{ routes, Request, }; use tokio::sync::RwLock; -use tracing::span; -use tracing_subscriber::{prelude::*, Registry}; +use tracing_subscriber::{prelude::*, EnvFilter}; fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket { rocket::custom(config) @@ -201,38 +201,57 @@ async fn main() { .extract::() .expect("It looks like your config is invalid. Please take a look at the error"); - let mut _span: Option = None; - let mut _enter: Option> = None; + let start = async { + config.warn_deprecated(); + + let db = Database::load_or_create(&config) + .await + .expect("config is valid"); + + let rocket = setup_rocket(raw_config, Arc::clone(&db)) + .ignite() + .await + .unwrap(); + + Database::start_on_shutdown_tasks(db, rocket.shutdown()).await; + + rocket.launch().await.unwrap(); + }; if config.allow_jaeger { - let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() + let tracer = opentelemetry_jaeger::new_pipeline() .with_service_name("conduit") - .install() + .install_simple() .unwrap(); - let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - Registry::default().with(telemetry).try_init().unwrap(); - _span = Some(span!(tracing::Level::INFO, "app_start", work_units = 2)); - _enter = Some(_span.as_ref().unwrap().enter()); + let span = tracer.start("conduit"); + start.await; + drop(span); } else { std::env::set_var("RUST_LOG", &config.log); - tracing_subscriber::fmt::init(); + + let registry = tracing_subscriber::Registry::default(); + if config.tracing_flame { + let (flame_layer, _guard) = + tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap(); + let flame_layer = flame_layer.with_empty_samples(false); + + let filter_layer = EnvFilter::new("trace,h2=off"); + + let subscriber = registry.with(filter_layer).with(flame_layer); + tracing::subscriber::set_global_default(subscriber).unwrap(); + start.await; + } else { + let fmt_layer = tracing_subscriber::fmt::Layer::new(); + let filter_layer = EnvFilter::try_from_default_env() + .or_else(|_| EnvFilter::try_new("info")) + .unwrap(); + + let subscriber = registry.with(filter_layer).with(fmt_layer); + tracing::subscriber::set_global_default(subscriber).unwrap(); + start.await; + } } - - config.warn_deprecated(); - - let db = Database::load_or_create(&config) - .await - .expect("config is valid"); - - let rocket = setup_rocket(raw_config, Arc::clone(&db)) - .ignite() - .await - .unwrap(); - - Database::start_on_shutdown_tasks(db, rocket.shutdown()).await; - - rocket.launch().await.unwrap(); } #[catch(404)] diff --git a/src/pdu.rs b/src/pdu.rs index f8dddd9..00eda5b 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,5 +1,4 @@ use crate::Error; -use log::error; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEphemeralRoomEvent, @@ -13,6 +12,7 @@ use ruma::{ use serde::{Deserialize, Serialize}; use serde_json::json; use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom}; +use tracing::error; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index a4beac6..2121439 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -10,7 +10,6 @@ use std::ops::Deref; #[cfg(feature = "conduit_bin")] use { crate::server_server, - log::{debug, warn}, rocket::{ data::{self, ByteUnit, Data, FromData}, http::Status, @@ -23,6 +22,7 @@ use { std::collections::BTreeMap, std::convert::TryFrom, std::io::Cursor, + tracing::{debug, warn}, }; /// This struct converts rocket requests into ruma structs by converting them into http requests @@ -45,6 +45,7 @@ where { type Error = (); + #[tracing::instrument(skip(request, data))] async fn from_data( request: &'a Request<'_>, data: Data<'a>, @@ -256,7 +257,10 @@ where match ruma::signatures::verify_json(&pub_key_map, &request_map) { Ok(()) => (None, None, Some(origin), false), Err(e) => { - warn!("Failed to verify json request from {}: {}", origin, e); + warn!( + "Failed to verify json request from {}: {}\n{:?}", + origin, e, request_map + ); if request.uri().to_string().contains('@') { warn!("Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: use nocanon)"); diff --git a/src/server_server.rs b/src/server_server.rs index f725dce..232c5d4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,7 +5,6 @@ use crate::{ }; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{debug, error, info, trace, warn}; use regex::Regex; use rocket::response::content::Json; use ruma::{ @@ -63,7 +62,8 @@ use std::{ sync::{Arc, RwLock}, time::{Duration, Instant, SystemTime}, }; -use tokio::sync::Semaphore; +use tokio::sync::{MutexGuard, Semaphore}; +use tracing::{debug, error, info, trace, warn}; #[cfg(feature = "conduit_bin")] use rocket::{get, post, put}; @@ -838,6 +838,7 @@ type AsyncRecursiveResult<'a, T, E> = Pin( origin: &'a ServerName, event_id: &'a EventId, @@ -1156,6 +1157,18 @@ pub fn handle_incoming_pdu<'a>( } debug!("Auth check succeeded."); + // We start looking at current room state now, so lets lock the room + + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) let mut extremities = db @@ -1170,8 +1183,8 @@ pub fn handle_incoming_pdu<'a>( } } - // Only keep those extremities we don't have in our timeline yet - extremities.retain(|id| !matches!(db.rooms.get_non_outlier_pdu_json(id), Ok(Some(_)))); + // Only keep those extremities were not referenced yet + extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true))); let mut extremity_statehashes = Vec::new(); @@ -1301,9 +1314,11 @@ pub fn handle_incoming_pdu<'a>( return Err("State resolution failed, either an event could not be found or deserialization".into()); } }; + state }; + debug!("starting soft fail auth check"); // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it let soft_fail = !state_res::event_auth::auth_check( &room_version, @@ -1322,11 +1337,11 @@ pub fn handle_incoming_pdu<'a>( pdu_id = Some( append_incoming_pdu( &db, - &room_id, &incoming_pdu, val, extremities, &state_at_incoming_event, + &mutex_lock, ) .await .map_err(|_| "Failed to add pdu to db.".to_owned())?, @@ -1350,6 +1365,7 @@ pub fn handle_incoming_pdu<'a>( } // Event has passed all auth/stateres checks + drop(mutex_lock); Ok(pdu_id) }) } @@ -1626,25 +1642,15 @@ pub(crate) async fn fetch_signing_keys( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. -#[tracing::instrument(skip(db))] +#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state, _mutex_lock))] async fn append_incoming_pdu( db: &Database, - room_id: &RoomId, pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: HashSet, state: &StateMap>, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex ) -> Result> { - let mutex = Arc::clone( - db.globals - .roomid_mutex - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. db.rooms @@ -1657,8 +1663,6 @@ async fn append_incoming_pdu( &db, )?; - drop(mutex_lock); - for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces diff --git a/src/utils.rs b/src/utils.rs index a4dfe03..60a4e0c 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -9,6 +9,7 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; +#[tracing::instrument] pub fn millis_since_unix_epoch() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) @@ -48,16 +49,19 @@ pub fn generate_keypair() -> Vec { } /// Parses the bytes into an u64. +#[tracing::instrument(skip(bytes))] pub fn u64_from_bytes(bytes: &[u8]) -> Result { let array: [u8; 8] = bytes.try_into()?; Ok(u64::from_be_bytes(array)) } /// Parses the bytes into a string. +#[tracing::instrument(skip(bytes))] pub fn string_from_bytes(bytes: &[u8]) -> Result { String::from_utf8(bytes.to_vec()) } +#[tracing::instrument(skip(length))] pub fn random_string(length: usize) -> String { thread_rng() .sample_iter(&rand::distributions::Alphanumeric) @@ -67,6 +71,7 @@ pub fn random_string(length: usize) -> String { } /// Calculate a new hash for the given password +#[tracing::instrument(skip(password))] pub fn calculate_hash(password: &str) -> Result { let hashing_config = Config { variant: Variant::Argon2id, @@ -77,6 +82,7 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } +#[tracing::instrument(skip(iterators, check_order))] pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, @@ -104,6 +110,7 @@ pub fn common_elements( /// Fallible conversion from any value that implements `Serialize` to a `CanonicalJsonObject`. /// /// `value` must serialize to an `serde_json::Value::Object`. +#[tracing::instrument(skip(value))] pub fn to_canonical_object( value: T, ) -> Result { @@ -117,6 +124,7 @@ pub fn to_canonical_object( } } +#[tracing::instrument(skip(deserializer))] pub fn deserialize_from_str< 'de, D: serde::de::Deserializer<'de>, From c209775abdc9be52580d37a7331ea4ad9a06b2cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 29 Jul 2021 12:33:44 +0200 Subject: [PATCH 0696/1727] fix: pdu without state bug --- Cargo.lock | 82 +++++++++++++++++++++---------------------- src/database/rooms.rs | 7 ++++ 2 files changed, 48 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 01d2ba2..417e437 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722" +checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" dependencies = [ "proc-macro2", "quote", @@ -137,9 +137,9 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.59.0" +version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2ce639ee22f41a6ea0a3061e9bea9f690cf0c6ffc1ada0a3a599778f99ccba" +checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375" dependencies = [ "bitflags", "cexpr", @@ -200,9 +200,9 @@ checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" [[package]] name = "bytemuck" -version = "1.7.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9966d2ab714d0f785dbac0a0396251a35280aeb42413281617d0209ab4898435" +checksum = "72957246c41db82b8ef88a5486143830adeb8227ef9837740bdec67724cf2c5b" [[package]] name = "byteorder" @@ -503,9 +503,9 @@ dependencies = [ [[package]] name = "devise" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "411cf45ac38f00df3679689616649dc12607b846db171780bb790b514a042832" +checksum = "50c7580b072f1c8476148f16e0a0d5dedddab787da98d86c5082c5e9ed8ab595" dependencies = [ "devise_codegen", "devise_core", @@ -513,9 +513,9 @@ dependencies = [ [[package]] name = "devise_codegen" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cf7081f06822f1787e29359354426132cf832cc977d7a8ff747848631462ad1" +checksum = "123c73e7a6e51b05c75fe1a1b2f4e241399ea5740ed810b0e3e6cacd9db5e7b2" dependencies = [ "devise_core", "quote", @@ -523,9 +523,9 @@ dependencies = [ [[package]] name = "devise_core" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80c23631758736875d7ce08f847f296b4001b72cf90878e85b47df7ac5442147" +checksum = "841ef46f4787d9097405cac4e70fb8644fc037b526e8c14054247c0263c400d0" dependencies = [ "bitflags", "proc-macro2", @@ -698,9 +698,9 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" +checksum = "1adc00f486adfc9ce99f77d717836f0c5aa84965eb0b4f051f4e83f7cab53f8b" dependencies = [ "futures-channel", "futures-core", @@ -713,9 +713,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" +checksum = "74ed2411805f6e4e3d9bc904c95d5d423b89b3b25dc0250aa74729de20629ff9" dependencies = [ "futures-core", "futures-sink", @@ -723,15 +723,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" +checksum = "af51b1b4a7fdff033703db39de8802c673eb91855f2e0d47dcf3bf2c0ef01f99" [[package]] name = "futures-executor" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" +checksum = "4d0d535a57b87e1ae31437b892713aee90cd2d7b0ee48727cd11fc72ef54761c" dependencies = [ "futures-core", "futures-task", @@ -740,15 +740,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" +checksum = "0b0e06c393068f3a6ef246c75cdca793d6a46347e75286933e5e75fd2fd11582" [[package]] name = "futures-macro" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" +checksum = "c54913bae956fb8df7f4dc6fc90362aa72e69148e3f39041fbe8742d21e0ac57" dependencies = [ "autocfg", "proc-macro-hack", @@ -759,21 +759,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" +checksum = "c0f30aaa67363d119812743aa5f33c201a7a66329f97d1a887022971feea4b53" [[package]] name = "futures-task" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" +checksum = "bbe54a98670017f3be909561f6ad13e810d9a51f3f061b902062ca3da80799f2" [[package]] name = "futures-util" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" +checksum = "67eb846bfd58e44a8481a00049e82c43e0ccb5d61f8dc071057cb19249dd4d78" dependencies = [ "autocfg", "futures-channel", @@ -986,7 +986,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.0", + "socket2 0.4.1", "tokio", "tower-service", "tracing", @@ -1623,9 +1623,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.7.2" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87bb2d5c68b7505a3a89eb2f3583a4d56303863005226c2ef99319930a262be4" +checksum = "fbee84ed13e44dd82689fa18348a49934fa79cc774a344c42fc9b301c71b140a" dependencies = [ "der", "spki", @@ -1690,9 +1690,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612" dependencies = [ "unicode-xid", ] @@ -2494,9 +2494,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +checksum = "28c5e91e4240b46c4c19219d6cc84784444326131a4210f496f948d5cc827a29" dependencies = [ "itoa", "ryu", @@ -2629,9 +2629,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" dependencies = [ "libc", "winapi", @@ -2908,9 +2908,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.8.2" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2602b8af3767c285202012822834005f596c811042315fa7e9f5b12b2a43207" +checksum = "4b7b349f11a7047e6d1276853e612d152f5e8a352c61917887cc2169e2366b4c" dependencies = [ "autocfg", "bytes", diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 756f65e..7c38542 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1435,6 +1435,13 @@ impl Rooms { CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), ); + // Generate short event id + let shorteventid = db.globals.next_count()?; + self.eventid_shorteventid + .insert(pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), pdu.event_id.as_bytes())?; + // Increment the last index and use that // This is also the next_batch/since value let count = db.globals.next_count()?; From 5c776e9ba7162e6adc2ddb8901ec5deaf6cc4e3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 29 Jul 2021 20:17:47 +0200 Subject: [PATCH 0697/1727] feat: heed db backend (LMDB) --- Cargo.lock | 163 +++++++++++++++++---- Cargo.toml | 4 +- src/client_server/to_device.rs | 2 +- src/database.rs | 39 +++-- src/database/abstraction.rs | 3 + src/database/abstraction/heed.rs | 241 +++++++++++++++++++++++++++++++ src/database/rooms.rs | 1 + src/database/sending.rs | 97 +++++++------ src/error.rs | 3 + 9 files changed, 456 insertions(+), 97 deletions(-) create mode 100644 src/database/abstraction/heed.rs diff --git a/Cargo.lock b/Cargo.lock index 417e437..347f235 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,6 +135,15 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.59.1" @@ -234,6 +243,12 @@ dependencies = [ "nom", ] +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + [[package]] name = "cfg-if" version = "1.0.0" @@ -278,6 +293,7 @@ dependencies = [ "bytes", "crossbeam", "directories", + "heed", "http", "image", "jsonwebtoken", @@ -379,7 +395,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -388,12 +404,12 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", - "crossbeam-queue", - "crossbeam-utils", + "crossbeam-queue 0.3.2", + "crossbeam-utils 0.8.5", ] [[package]] @@ -402,8 +418,8 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" dependencies = [ - "cfg-if", - "crossbeam-utils", + "cfg-if 1.0.0", + "crossbeam-utils 0.8.5", ] [[package]] @@ -412,9 +428,9 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-utils 0.8.5", ] [[package]] @@ -423,21 +439,40 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ - "cfg-if", - "crossbeam-utils", + "cfg-if 1.0.0", + "crossbeam-utils 0.8.5", "lazy_static", "memoffset", "scopeguard", ] +[[package]] +name = "crossbeam-queue" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" +dependencies = [ + "crossbeam-utils 0.6.6", +] + [[package]] name = "crossbeam-queue" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9" dependencies = [ - "cfg-if", - "crossbeam-utils", + "cfg-if 1.0.0", + "crossbeam-utils 0.8.5", +] + +[[package]] +name = "crossbeam-utils" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" +dependencies = [ + "cfg-if 0.1.10", + "lazy_static", ] [[package]] @@ -446,7 +481,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "lazy_static", ] @@ -610,7 +645,7 @@ version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -828,7 +863,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -839,7 +874,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "wasi 0.10.2+wasi-snapshot-preview1", ] @@ -906,6 +941,42 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "heed" +version = "0.10.6" +source = "git+https://github.com/Kerollmops/heed.git?rev=b235e9c3e9984737c967b5de1014b48f125dc28b#b235e9c3e9984737c967b5de1014b48f125dc28b" +dependencies = [ + "bytemuck", + "byteorder", + "heed-traits", + "heed-types", + "libc", + "lmdb-rkv-sys", + "once_cell", + "page_size", + "serde", + "synchronoise", + "url", +] + +[[package]] +name = "heed-traits" +version = "0.7.0" +source = "git+https://github.com/Kerollmops/heed.git?rev=b235e9c3e9984737c967b5de1014b48f125dc28b#b235e9c3e9984737c967b5de1014b48f125dc28b" + +[[package]] +name = "heed-types" +version = "0.7.2" +source = "git+https://github.com/Kerollmops/heed.git?rev=b235e9c3e9984737c967b5de1014b48f125dc28b#b235e9c3e9984737c967b5de1014b48f125dc28b" +dependencies = [ + "bincode", + "bytemuck", + "byteorder", + "heed-traits", + "serde", + "serde_json", +] + [[package]] name = "hermit-abi" version = "0.1.19" @@ -1068,7 +1139,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -1190,7 +1261,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "winapi", ] @@ -1223,6 +1294,17 @@ version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +[[package]] +name = "lmdb-rkv-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + [[package]] name = "lock_api" version = "0.4.4" @@ -1238,7 +1320,7 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -1247,7 +1329,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2111607c723d7857e0d8299d5ce7a0bf4b844d3e44f8de136b13da513eaf8fc4" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "generator", "scoped-tls", "serde", @@ -1503,6 +1585,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "page_size" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "parking_lot" version = "0.11.1" @@ -1520,7 +1612,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "instant", "libc", "redox_syscall", @@ -2320,7 +2412,7 @@ dependencies = [ "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils", + "crossbeam-utils 0.8.5", ] [[package]] @@ -2540,7 +2632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" dependencies = [ "block-buffer", - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest", "opaque-debug", @@ -2601,7 +2693,7 @@ checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" dependencies = [ "crc32fast", "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-utils 0.8.5", "fs2", "fxhash", "libc", @@ -2622,7 +2714,7 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "winapi", ] @@ -2751,6 +2843,15 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "synchronoise" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d717ed0efc9d39ab3b642a096bc369a3e02a38a51c41845d7fe31bdad1d6eaeb" +dependencies = [ + "crossbeam-queue 0.1.2", +] + [[package]] name = "synstructure" version = "0.12.5" @@ -2775,7 +2876,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "rand 0.8.4", "redox_syscall", @@ -3005,7 +3106,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -3105,7 +3206,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" dependencies = [ "async-trait", - "cfg-if", + "cfg-if 1.0.0", "data-encoding", "enum-as-inner", "futures-channel", @@ -3129,7 +3230,7 @@ version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "futures-util", "ipconfig", "lazy_static", @@ -3290,7 +3391,7 @@ version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "serde", "serde_json", "wasm-bindgen-macro", @@ -3317,7 +3418,7 @@ version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", diff --git a/Cargo.toml b/Cargo.toml index 92134a4..1d774dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,12 +79,14 @@ parking_lot = { version = "0.11.1", optional = true } crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" +heed = { git = "https://github.com/Kerollmops/heed.git", rev = "b235e9c3e9984737c967b5de1014b48f125dc28b", optional = true } [features] -default = ["conduit_bin", "backend_sqlite"] +default = ["conduit_bin", "backend_heed"] backend_sled = ["sled"] backend_rocksdb = ["rocksdb"] backend_sqlite = ["sqlite"] +backend_heed = ["heed", "crossbeam"] sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index e3fd780..d3f7d25 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -47,7 +47,7 @@ pub async fn send_event_to_device_route( db.sending.send_reliable_edu( target_user_id.server_name(), - &serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( + serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { sender: sender_user.clone(), ev_type: EventType::from(&body.event_type), diff --git a/src/database.rs b/src/database.rs index 9f24345..65a60f0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -156,6 +156,9 @@ pub type Engine = abstraction::rocksdb::Engine; #[cfg(feature = "sqlite")] pub type Engine = abstraction::sqlite::Engine; +#[cfg(feature = "heed")] +pub type Engine = abstraction::heed::Engine; + pub struct Database { _db: Arc, pub globals: globals::Globals, @@ -188,24 +191,20 @@ impl Database { fn check_sled_or_sqlite_db(config: &Config) -> Result<()> { let path = Path::new(&config.database_path); - #[cfg(feature = "backend_sqlite")] - { - let sled_exists = path.join("db").exists(); - let sqlite_exists = path.join("conduit.db").exists(); - if sled_exists { - if sqlite_exists { - // most likely an in-place directory, only warn - warn!("Both sled and sqlite databases are detected in database directory"); - warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") - } else { - error!( - "Sled database detected, conduit now uses sqlite for database operations" - ); - error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); - return Err(Error::bad_config( - "sled database detected, migrate to sqlite", - )); - } + let sled_exists = path.join("db").exists(); + let sqlite_exists = path.join("conduit.db").exists(); + // TODO: heed + if sled_exists { + if sqlite_exists { + // most likely an in-place directory, only warn + warn!("Both sled and sqlite databases are detected in database directory"); + warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") + } else { + error!("Sled database detected, conduit now uses sqlite for database operations"); + error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); + return Err(Error::bad_config( + "sled database detected, migrate to sqlite", + )); } } @@ -313,8 +312,8 @@ impl Database { }, sending: sending::Sending { servername_educount: builder.open_tree("servername_educount")?, - servernamepduids: builder.open_tree("servernamepduids")?, - servercurrentevents: builder.open_tree("servercurrentevents")?, + servernameevent_data: builder.open_tree("servernameevent_data")?, + servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), sender: sending_sender, }, diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index fb11ba0..8ccac78 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -12,6 +12,9 @@ pub mod sled; #[cfg(feature = "sqlite")] pub mod sqlite; +#[cfg(feature = "heed")] +pub mod heed; + pub trait DatabaseEngine: Sized { fn open(config: &Config) -> Result>; fn open_tree(self: &Arc, name: &'static str) -> Result>; diff --git a/src/database/abstraction/heed.rs b/src/database/abstraction/heed.rs new file mode 100644 index 0000000..61e7927 --- /dev/null +++ b/src/database/abstraction/heed.rs @@ -0,0 +1,241 @@ +use super::super::Config; +use crossbeam::channel::{bounded, Sender as ChannelSender}; +use threadpool::ThreadPool; + +use crate::{Error, Result}; +use std::{ + collections::HashMap, + future::Future, + pin::Pin, + sync::{Arc, Mutex, RwLock}, +}; +use tokio::sync::oneshot::Sender; + +use super::{DatabaseEngine, Tree}; + +type TupleOfBytes = (Vec, Vec); + +pub struct Engine { + env: heed::Env, + iter_pool: Mutex, +} + +pub struct EngineTree { + engine: Arc, + tree: Arc, + watchers: RwLock, Vec>>>, +} + +fn convert_error(error: heed::Error) -> Error { + panic!(error.to_string()); + Error::HeedError { + error: error.to_string(), + } +} + +impl DatabaseEngine for Engine { + fn open(config: &Config) -> Result> { + let mut env_builder = heed::EnvOpenOptions::new(); + env_builder.map_size(1024 * 1024 * 1024 * 1024); // 1 Terabyte + env_builder.max_readers(126); + env_builder.max_dbs(128); + unsafe { + env_builder.flag(heed::flags::Flags::MdbNoSync); + env_builder.flag(heed::flags::Flags::MdbNoMetaSync); + } + + Ok(Arc::new(Engine { + env: env_builder + .open(&config.database_path) + .map_err(convert_error)?, + iter_pool: Mutex::new(ThreadPool::new(10)), + })) + } + + fn open_tree(self: &Arc, name: &'static str) -> Result> { + // Creates the db if it doesn't exist already + Ok(Arc::new(EngineTree { + engine: Arc::clone(self), + tree: Arc::new( + self.env + .create_database(Some(name)) + .map_err(convert_error)?, + ), + watchers: RwLock::new(HashMap::new()), + })) + } + + fn flush(self: &Arc) -> Result<()> { + self.env.force_sync().map_err(convert_error)?; + Ok(()) + } +} + +impl EngineTree { + #[tracing::instrument(skip(self, tree, from, backwards))] + fn iter_from_thread( + &self, + tree: Arc, + from: Vec, + backwards: bool, + ) -> Box + Send + Sync> { + let (s, r) = bounded::(5); + let engine = Arc::clone(&self.engine); + + let lock = self.engine.iter_pool.lock().unwrap(); + if lock.active_count() < lock.max_count() { + lock.execute(move || { + iter_from_thread_work(tree, &engine.env.read_txn().unwrap(), from, backwards, &s); + }); + } else { + std::thread::spawn(move || { + iter_from_thread_work(tree, &engine.env.read_txn().unwrap(), from, backwards, &s); + }); + } + + Box::new(r.into_iter()) + } +} + +#[tracing::instrument(skip(tree, txn, from, backwards))] +fn iter_from_thread_work( + tree: Arc, + txn: &heed::RoTxn<'_>, + from: Vec, + backwards: bool, + s: &ChannelSender<(Vec, Vec)>, +) { + if backwards { + for (k, v) in tree.rev_range(txn, ..=&*from).unwrap().map(|r| r.unwrap()) { + if s.send((k.to_vec(), v.to_vec())).is_err() { + return; + } + } + } else { + if from.is_empty() { + for (k, v) in tree.iter(txn).unwrap().map(|r| r.unwrap()) { + if s.send((k.to_vec(), v.to_vec())).is_err() { + return; + } + } + } else { + for (k, v) in tree.range(txn, &*from..).unwrap().map(|r| r.unwrap()) { + if s.send((k.to_vec(), v.to_vec())).is_err() { + return; + } + } + } + } +} + +impl Tree for EngineTree { + #[tracing::instrument(skip(self, key))] + fn get(&self, key: &[u8]) -> Result>> { + let txn = self.engine.env.read_txn().map_err(convert_error)?; + Ok(self + .tree + .get(&txn, &key) + .map_err(convert_error)? + .map(|s| s.to_vec())) + } + + #[tracing::instrument(skip(self, key, value))] + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let mut txn = self.engine.env.write_txn().map_err(convert_error)?; + self.tree + .put(&mut txn, &key, &value) + .map_err(convert_error)?; + txn.commit().map_err(convert_error)?; + + let watchers = self.watchers.read().unwrap(); + let mut triggered = Vec::new(); + + for length in 0..=key.len() { + if watchers.contains_key(&key[..length]) { + triggered.push(&key[..length]); + } + } + + drop(watchers); + + if !triggered.is_empty() { + let mut watchers = self.watchers.write().unwrap(); + for prefix in triggered { + if let Some(txs) = watchers.remove(prefix) { + for tx in txs { + let _ = tx.send(()); + } + } + } + }; + + Ok(()) + } + + #[tracing::instrument(skip(self, key))] + fn remove(&self, key: &[u8]) -> Result<()> { + let mut txn = self.engine.env.write_txn().map_err(convert_error)?; + self.tree.delete(&mut txn, &key).map_err(convert_error)?; + txn.commit().map_err(convert_error)?; + Ok(()) + } + + #[tracing::instrument(skip(self))] + fn iter<'a>(&'a self) -> Box, Vec)> + Send + 'a> { + self.iter_from(&[], false) + } + + #[tracing::instrument(skip(self, from, backwards))] + fn iter_from( + &self, + from: &[u8], + backwards: bool, + ) -> Box, Vec)> + Send> { + self.iter_from_thread(Arc::clone(&self.tree), from.to_vec(), backwards) + } + + #[tracing::instrument(skip(self, key))] + fn increment(&self, key: &[u8]) -> Result> { + let mut txn = self.engine.env.write_txn().map_err(convert_error)?; + + let old = self.tree.get(&txn, &key).map_err(convert_error)?; + let new = + crate::utils::increment(old.as_deref()).expect("utils::increment always returns Some"); + + self.tree + .put(&mut txn, &key, &&*new) + .map_err(convert_error)?; + + txn.commit().map_err(convert_error)?; + + Ok(new) + } + + #[tracing::instrument(skip(self, prefix))] + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Vec)> + Send + 'a> { + Box::new( + self.iter_from(&prefix, false) + .take_while(move |(key, _)| key.starts_with(&prefix)), + ) + } + + #[tracing::instrument(skip(self, prefix))] + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + let (tx, rx) = tokio::sync::oneshot::channel(); + + self.watchers + .write() + .unwrap() + .entry(prefix.to_vec()) + .or_default() + .push(tx); + + Box::pin(async move { + // Tx is never destroyed + rx.await.unwrap(); + }) + } +} diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7c38542..8ada87f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -863,6 +863,7 @@ impl Rooms { if let Some(body) = pdu.content.get("body").and_then(|b| b.as_str()) { for word in body .split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|word| word.len() <= 50) .map(str::to_lowercase) { let mut key = pdu.room_id.as_bytes().to_vec(); diff --git a/src/database/sending.rs b/src/database/sending.rs index f28e883..506bc17 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -81,10 +81,10 @@ pub enum SendingEventType { pub struct Sending { /// The state for a given state hash. pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync - pub(super) servernamepduids: Arc, // ServernamePduId = (+ / $)SenderKey / ServerName / UserId + PduId - pub(super) servercurrentevents: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / (*)EduEvent + pub(super) servernameevent_data: Arc, // ServernamEvent = (+ / $)SenderKey / ServerName / UserId + PduId / * (for edus), Data = EDU content + pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / * (for edus), Data = EDU content pub(super) maximum_requests: Arc, - pub sender: mpsc::UnboundedSender>, + pub sender: mpsc::UnboundedSender<(Vec, Vec)>, } enum TransactionStatus { @@ -97,7 +97,7 @@ impl Sending { pub fn start_handler( &self, db: Arc>, - mut receiver: mpsc::UnboundedReceiver>, + mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>, ) { tokio::spawn(async move { let mut futures = FuturesUnordered::new(); @@ -109,16 +109,15 @@ impl Sending { let guard = db.read().await; - for (key, outgoing_kind, event) in - guard - .sending - .servercurrentevents - .iter() - .filter_map(|(key, _)| { - Self::parse_servercurrentevent(&key) - .ok() - .map(|(k, e)| (key, k, e)) - }) + for (key, outgoing_kind, event) in guard + .sending + .servercurrentevent_data + .iter() + .filter_map(|(key, v)| { + Self::parse_servercurrentevent(&key, v) + .ok() + .map(|(k, e)| (key, k, e)) + }) { let entry = initial_transactions .entry(outgoing_kind.clone()) @@ -129,7 +128,7 @@ impl Sending { "Dropping some current events: {:?} {:?} {:?}", key, outgoing_kind, event ); - guard.sending.servercurrentevents.remove(&key).unwrap(); + guard.sending.servercurrentevent_data.remove(&key).unwrap(); continue; } @@ -156,17 +155,17 @@ impl Sending { let guard = db.read().await; let prefix = outgoing_kind.get_prefix(); - for (key, _) in guard.sending.servercurrentevents + for (key, _) in guard.sending.servercurrentevent_data .scan_prefix(prefix.clone()) { - guard.sending.servercurrentevents.remove(&key).unwrap(); + guard.sending.servercurrentevent_data.remove(&key).unwrap(); } // Find events that have been added since starting the last request - let new_events = guard.sending.servernamepduids + let new_events = guard.sending.servernameevent_data .scan_prefix(prefix.clone()) - .filter_map(|(k, _)| { - Self::parse_servercurrentevent(&k).ok().map(|ev| (ev, k)) + .filter_map(|(k, v)| { + Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) }) .take(30) .collect::>(); @@ -175,9 +174,10 @@ impl Sending { if !new_events.is_empty() { // Insert pdus we found - for (_, key) in &new_events { - guard.sending.servercurrentevents.insert(&key, &[]).unwrap(); - guard.sending.servernamepduids.remove(&key).unwrap(); + for (e, key) in &new_events { + let value = if let SendingEventType::Edu(value) = &e.1 { &**value } else { &[] }; + guard.sending.servercurrentevent_data.insert(&key, value).unwrap(); + guard.sending.servernameevent_data.remove(&key).unwrap(); } drop(guard); @@ -205,8 +205,8 @@ impl Sending { } }; }, - Some(key) = receiver.next() => { - if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key) { + Some((key, value)) = receiver.next() => { + if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key, value) { let guard = db.read().await; if let Ok(Some(events)) = Self::select_events( @@ -267,18 +267,25 @@ impl Sending { if retry { // We retry the previous transaction - for (key, _) in db.sending.servercurrentevents.scan_prefix(prefix) { - if let Ok((_, e)) = Self::parse_servercurrentevent(&key) { + for (key, value) in db.sending.servercurrentevent_data.scan_prefix(prefix) { + if let Ok((_, e)) = Self::parse_servercurrentevent(&key, value) { events.push(e); } } } else { for (e, full_key) in new_events { - db.sending.servercurrentevents.insert(&full_key, &[])?; + let value = if let SendingEventType::Edu(value) = &e { + &**value + } else { + &[][..] + }; + db.sending + .servercurrentevent_data + .insert(&full_key, value)?; // If it was a PDU we have to unqueue it // TODO: don't try to unqueue EDUs - db.sending.servernamepduids.remove(&full_key)?; + db.sending.servernameevent_data.remove(&full_key)?; events.push(e); } @@ -380,8 +387,8 @@ impl Sending { key.extend_from_slice(&senderkey); key.push(0xff); key.extend_from_slice(pdu_id); - self.servernamepduids.insert(&key, b"")?; - self.sender.unbounded_send(key).unwrap(); + self.servernameevent_data.insert(&key, &[])?; + self.sender.unbounded_send((key, vec![])).unwrap(); Ok(()) } @@ -391,20 +398,19 @@ impl Sending { let mut key = server.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu_id); - self.servernamepduids.insert(&key, b"")?; - self.sender.unbounded_send(key).unwrap(); + self.servernameevent_data.insert(&key, &[])?; + self.sender.unbounded_send((key, vec![])).unwrap(); Ok(()) } #[tracing::instrument(skip(self, server, serialized))] - pub fn send_reliable_edu(&self, server: &ServerName, serialized: &[u8]) -> Result<()> { + pub fn send_reliable_edu(&self, server: &ServerName, serialized: Vec) -> Result<()> { let mut key = server.as_bytes().to_vec(); key.push(0xff); key.push(b'*'); - key.extend_from_slice(serialized); - self.servernamepduids.insert(&key, b"")?; - self.sender.unbounded_send(key).unwrap(); + self.servernameevent_data.insert(&key, &serialized)?; + self.sender.unbounded_send((key, serialized)).unwrap(); Ok(()) } @@ -415,8 +421,8 @@ impl Sending { key.extend_from_slice(appservice_id.as_bytes()); key.push(0xff); key.extend_from_slice(pdu_id); - self.servernamepduids.insert(&key, b"")?; - self.sender.unbounded_send(key).unwrap(); + self.servernameevent_data.insert(&key, &[])?; + self.sender.unbounded_send((key, vec![])).unwrap(); Ok(()) } @@ -451,7 +457,7 @@ impl Sending { ( kind.clone(), Error::bad_database( - "[Appservice] Event in servernamepduids not found in db.", + "[Appservice] Event in servernameevent_data not found in db.", ), ) })? @@ -508,7 +514,7 @@ impl Sending { ( kind.clone(), Error::bad_database( - "[Push] Event in servernamepduids not found in db.", + "[Push] Event in servernamevent_datas not found in db.", ), ) })?, @@ -602,7 +608,7 @@ impl Sending { ( OutgoingKind::Normal(server.clone()), Error::bad_database( - "[Normal] Event in servernamepduids not found in db.", + "[Normal] Event in servernamevent_datas not found in db.", ), ) })?, @@ -662,7 +668,10 @@ impl Sending { } #[tracing::instrument(skip(key))] - fn parse_servercurrentevent(key: &[u8]) -> Result<(OutgoingKind, SendingEventType)> { + fn parse_servercurrentevent( + key: &[u8], + value: Vec, + ) -> Result<(OutgoingKind, SendingEventType)> { // Appservices start with a plus Ok::<_, Error>(if key.starts_with(b"+") { let mut parts = key[1..].splitn(2, |&b| b == 0xff); @@ -680,7 +689,7 @@ impl Sending { Error::bad_database("Invalid server string in server_currenttransaction") })?), if event.starts_with(b"*") { - SendingEventType::Edu(event[1..].to_vec()) + SendingEventType::Edu(value.to_vec()) } else { SendingEventType::Pdu(event.to_vec()) }, diff --git a/src/error.rs b/src/error.rs index eda522a..24e52ec 100644 --- a/src/error.rs +++ b/src/error.rs @@ -42,6 +42,9 @@ pub enum Error { #[from] source: rusqlite::Error, }, + #[cfg(feature = "heed")] + #[error("There was a problem with the connection to the heed database: {error}")] + HeedError { error: String }, #[error("Could not generate an image.")] ImageError { #[from] From 5df6b8cd5f3fde2e5b295836ea58fd9fc2ab6b51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 30 Jul 2021 12:11:06 +0200 Subject: [PATCH 0698/1727] improvement: better account data implementation --- Cargo.lock | 10 ++-- Cargo.toml | 4 +- src/client_server/room.rs | 5 +- src/database.rs | 63 +++++++++++++++++------- src/database/abstraction/heed.rs | 7 ++- src/database/account_data.rs | 84 +++++++++++++++----------------- src/database/pusher.rs | 46 ++++++++++------- src/database/rooms.rs | 25 +++++++++- 8 files changed, 149 insertions(+), 95 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 347f235..485aeab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -944,7 +944,7 @@ dependencies = [ [[package]] name = "heed" version = "0.10.6" -source = "git+https://github.com/Kerollmops/heed.git?rev=b235e9c3e9984737c967b5de1014b48f125dc28b#b235e9c3e9984737c967b5de1014b48f125dc28b" +source = "git+https://github.com/timokoesters/heed.git?rev=c6b149fd5621999b0d5ef0c28e199015cfc60fa1#c6b149fd5621999b0d5ef0c28e199015cfc60fa1" dependencies = [ "bytemuck", "byteorder", @@ -962,12 +962,12 @@ dependencies = [ [[package]] name = "heed-traits" version = "0.7.0" -source = "git+https://github.com/Kerollmops/heed.git?rev=b235e9c3e9984737c967b5de1014b48f125dc28b#b235e9c3e9984737c967b5de1014b48f125dc28b" +source = "git+https://github.com/timokoesters/heed.git?rev=c6b149fd5621999b0d5ef0c28e199015cfc60fa1#c6b149fd5621999b0d5ef0c28e199015cfc60fa1" [[package]] name = "heed-types" version = "0.7.2" -source = "git+https://github.com/Kerollmops/heed.git?rev=b235e9c3e9984737c967b5de1014b48f125dc28b#b235e9c3e9984737c967b5de1014b48f125dc28b" +source = "git+https://github.com/timokoesters/heed.git?rev=c6b149fd5621999b0d5ef0c28e199015cfc60fa1#c6b149fd5621999b0d5ef0c28e199015cfc60fa1" dependencies = [ "bincode", "bytemuck", @@ -2586,9 +2586,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c5e91e4240b46c4c19219d6cc84784444326131a4210f496f948d5cc827a29" +checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index 1d774dd..19ce6b1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,10 +79,10 @@ parking_lot = { version = "0.11.1", optional = true } crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" -heed = { git = "https://github.com/Kerollmops/heed.git", rev = "b235e9c3e9984737c967b5de1014b48f125dc28b", optional = true } +heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } [features] -default = ["conduit_bin", "backend_heed"] +default = ["conduit_bin", "backend_sqlite"] backend_sled = ["sled"] backend_rocksdb = ["rocksdb"] backend_sqlite = ["sqlite"] diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 49a6052..d5188e8 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -15,7 +15,7 @@ use ruma::{ RoomAliasId, RoomId, RoomVersionId, }; use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; -use tracing::info; +use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -233,7 +233,8 @@ pub async fn create_room_route( // 5. Events listed in initial_state for event in &body.initial_state { - let pdu_builder = PduBuilder::from(event.deserialize().map_err(|_| { + let pdu_builder = PduBuilder::from(event.deserialize().map_err(|e| { + warn!("Invalid initial state event: {:?}", e); Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.") })?); diff --git a/src/database.rs b/src/database.rs index 65a60f0..db0eae8 100644 --- a/src/database.rs +++ b/src/database.rs @@ -189,22 +189,26 @@ impl Database { } fn check_sled_or_sqlite_db(config: &Config) -> Result<()> { - let path = Path::new(&config.database_path); + #[cfg(feature = "backend_sqlite")] + { + let path = Path::new(&config.database_path); - let sled_exists = path.join("db").exists(); - let sqlite_exists = path.join("conduit.db").exists(); - // TODO: heed - if sled_exists { - if sqlite_exists { - // most likely an in-place directory, only warn - warn!("Both sled and sqlite databases are detected in database directory"); - warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") - } else { - error!("Sled database detected, conduit now uses sqlite for database operations"); - error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); - return Err(Error::bad_config( - "sled database detected, migrate to sqlite", - )); + let sled_exists = path.join("db").exists(); + let sqlite_exists = path.join("conduit.db").exists(); + if sled_exists { + if sqlite_exists { + // most likely an in-place directory, only warn + warn!("Both sled and sqlite databases are detected in database directory"); + warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") + } else { + error!( + "Sled database detected, conduit now uses sqlite for database operations" + ); + error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); + return Err(Error::bad_config( + "sled database detected, migrate to sqlite", + )); + } } } @@ -298,6 +302,7 @@ impl Database { }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, + roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, }, media: media::Media { mediaid_file: builder.open_tree("mediaid_file")?, @@ -420,6 +425,30 @@ impl Database { println!("Migration: 3 -> 4 finished"); } + + if db.globals.database_version()? < 5 { + // Upgrade user data store + for (roomuserdataid, _) in db.account_data.roomuserdataid_accountdata.iter() { + let mut parts = roomuserdataid.split(|&b| b == 0xff); + let user_id = parts.next().unwrap(); + let room_id = parts.next().unwrap(); + let event_type = roomuserdataid.rsplit(|&b| b == 0xff).next().unwrap(); + + let mut key = room_id.to_vec(); + key.push(0xff); + key.extend_from_slice(user_id); + key.push(0xff); + key.extend_from_slice(event_type); + + db.account_data + .roomusertype_roomuserdataid + .insert(&key, &roomuserdataid)?; + } + + db.globals.bump_database_version(5)?; + + println!("Migration: 4 -> 5 finished"); + } } let guard = db.read().await; @@ -516,7 +545,7 @@ impl Database { futures.push( self.account_data - .roomuserdataid_accountdata + .roomusertype_roomuserdataid .watch_prefix(&roomuser_prefix), ); } @@ -526,7 +555,7 @@ impl Database { futures.push( self.account_data - .roomuserdataid_accountdata + .roomusertype_roomuserdataid .watch_prefix(&globaluserdata_prefix), ); diff --git a/src/database/abstraction/heed.rs b/src/database/abstraction/heed.rs index 61e7927..0421b14 100644 --- a/src/database/abstraction/heed.rs +++ b/src/database/abstraction/heed.rs @@ -27,7 +27,6 @@ pub struct EngineTree { } fn convert_error(error: heed::Error) -> Error { - panic!(error.to_string()); Error::HeedError { error: error.to_string(), } @@ -40,8 +39,8 @@ impl DatabaseEngine for Engine { env_builder.max_readers(126); env_builder.max_dbs(128); unsafe { - env_builder.flag(heed::flags::Flags::MdbNoSync); - env_builder.flag(heed::flags::Flags::MdbNoMetaSync); + env_builder.flag(heed::flags::Flags::MdbWriteMap); + env_builder.flag(heed::flags::Flags::MdbMapAsync); } Ok(Arc::new(Engine { @@ -79,7 +78,7 @@ impl EngineTree { from: Vec, backwards: bool, ) -> Box + Send + Sync> { - let (s, r) = bounded::(5); + let (s, r) = bounded::(100); let engine = Arc::clone(&self.engine); let lock = self.engine.iter_pool.lock().unwrap(); diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 8a8d2c2..e1d4c62 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -12,6 +12,7 @@ use super::abstraction::Tree; pub struct AccountData { pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type + pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type } impl AccountData { @@ -34,15 +35,13 @@ impl AccountData { prefix.extend_from_slice(&user_id.as_bytes()); prefix.push(0xff); - // Remove old entry - if let Some((old_key, _)) = self.find_event(room_id, user_id, &event_type)? { - self.roomuserdataid_accountdata.remove(&old_key)?; - } + let mut roomuserdataid = prefix.clone(); + roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + roomuserdataid.push(0xff); + roomuserdataid.extend_from_slice(&event_type.as_bytes()); - let mut key = prefix; - key.extend_from_slice(&globals.next_count()?.to_be_bytes()); - key.push(0xff); - key.extend_from_slice(event_type.as_ref().as_bytes()); + let mut key = prefix.clone(); + key.extend_from_slice(event_type.as_bytes()); let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling if json.get("type").is_none() || json.get("content").is_none() { @@ -53,10 +52,20 @@ impl AccountData { } self.roomuserdataid_accountdata.insert( - &key, + &roomuserdataid, &serde_json::to_vec(&json).expect("to_vec always works on json values"), )?; + let prev = self.roomusertype_roomuserdataid.get(&key)?; + + self.roomusertype_roomuserdataid + .insert(&key, &roomuserdataid)?; + + // Remove old entry + if let Some(prev) = prev { + self.roomuserdataid_accountdata.remove(&prev)?; + } + Ok(()) } @@ -68,9 +77,27 @@ impl AccountData { user_id: &UserId, kind: EventType, ) -> Result> { - self.find_event(room_id, user_id, &kind)? - .map(|(_, v)| { - serde_json::from_slice(&v).map_err(|_| Error::bad_database("could not deserialize")) + let mut key = room_id + .map(|r| r.to_string()) + .unwrap_or_default() + .as_bytes() + .to_vec(); + key.push(0xff); + key.extend_from_slice(&user_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(kind.as_ref().as_bytes()); + + self.roomusertype_roomuserdataid + .get(&key)? + .and_then(|roomuserdataid| { + self.roomuserdataid_accountdata + .get(&roomuserdataid) + .transpose() + }) + .transpose()? + .map(|data| { + serde_json::from_slice(&data) + .map_err(|_| Error::bad_database("could not deserialize")) }) .transpose() } @@ -123,37 +150,4 @@ impl AccountData { Ok(userdata) } - - #[tracing::instrument(skip(self, room_id, user_id, kind))] - fn find_event( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - kind: &EventType, - ) -> Result, Vec)>> { - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(&user_id.as_bytes()); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - let kind = kind.clone(); - - Ok(self - .roomuserdataid_accountdata - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .find(move |(k, _)| { - k.rsplit(|&b| b == 0xff) - .next() - .map(|current_event_type| current_event_type == kind.as_ref().as_bytes()) - .unwrap_or(false) - })) - } } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 9e81dd1..3df9ed4 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -9,10 +9,11 @@ use ruma::{ }, IncomingResponse, OutgoingRequest, SendAccessToken, }, - events::{room::power_levels::PowerLevelsEventContent, EventType}, + events::{room::power_levels::PowerLevelsEventContent, AnySyncRoomEvent, EventType}, identifiers::RoomName, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - uint, UInt, UserId, + serde::Raw, + uint, RoomId, UInt, UserId, }; use tracing::{error, info, warn}; @@ -172,7 +173,24 @@ pub async fn send_push_notice( let mut notify = None; let mut tweaks = Vec::new(); - for action in get_actions(user, &ruleset, pdu, db)? { + let power_levels: PowerLevelsEventContent = db + .rooms + .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_value(ev.content.clone()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + for action in get_actions( + user, + &ruleset, + &power_levels, + &pdu.to_sync_room_event(), + &pdu.room_id, + db, + )? { let n = match action { Action::DontNotify => false, // TODO: Implement proper support for coalesce @@ -204,32 +222,24 @@ pub async fn send_push_notice( pub fn get_actions<'a>( user: &UserId, ruleset: &'a Ruleset, - pdu: &PduEvent, + power_levels: &PowerLevelsEventContent, + pdu: &Raw, + room_id: &RoomId, db: &Database, ) -> Result<&'a [Action]> { - let power_levels: PowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_value(ev.content.clone()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - let ctx = PushConditionRoomCtx { - room_id: pdu.room_id.clone(), + room_id: room_id.clone(), member_count: 10_u32.into(), // TODO: get member count efficiently user_display_name: db .users .displayname(&user)? .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users, + users_power_levels: power_levels.users.clone(), default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications, + notification_power_levels: power_levels.notifications.clone(), }; - Ok(ruleset.get_actions(&pdu.to_sync_room_event(), &ctx)) + Ok(ruleset.get_actions(pdu, &ctx)) } #[tracing::instrument(skip(unread, pusher, tweaks, event, db))] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 8ada87f..79bb059 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -12,7 +12,9 @@ use ruma::{ api::{client::error::ErrorKind, federation}, events::{ ignored_user_list, push_rules, - room::{create::CreateEventContent, member, message}, + room::{ + create::CreateEventContent, member, message, power_levels::PowerLevelsEventContent, + }, AnyStrippedStateEvent, AnySyncStateEvent, EventType, }, push::{self, Action, Tweak}, @@ -760,6 +762,18 @@ impl Rooms { .insert(pdu.event_id.as_bytes(), &pdu_id)?; // See if the event matches any known pushers + let power_levels: PowerLevelsEventContent = db + .rooms + .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_value(ev.content.clone()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + let sync_pdu = pdu.to_sync_room_event(); + for user in db .rooms .room_members(&pdu.room_id) @@ -781,7 +795,14 @@ impl Rooms { let mut highlight = false; let mut notify = false; - for action in pusher::get_actions(&user, &rules_for_user, pdu, db)? { + for action in pusher::get_actions( + &user, + &rules_for_user, + &power_levels, + &sync_pdu, + &pdu.room_id, + db, + )? { match action { Action::DontNotify => notify = false, // TODO: Implement proper support for coalesce From fcd127aadcee6345184a14822b02aaaafd354c2b Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Fri, 30 Jul 2021 15:29:45 +0200 Subject: [PATCH 0699/1727] Also push docker image to docker hub --- .gitlab-ci.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6f9b78b..4fa515b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -245,7 +245,7 @@ build:cargo-deb:x86_64-unknown-linux-gnu: KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache" before_script: - "mkdir -p /kaniko/.docker" - - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json' + - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json' # Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image @@ -265,6 +265,9 @@ build:docker:main: --destination "$CI_REGISTRY_IMAGE/conduit:latest" --destination "$CI_REGISTRY_IMAGE/conduit:alpine" --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" + --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" + --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:alpine" + --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" rules: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' @@ -295,4 +298,4 @@ publish:package: - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' - + From dc85a8fafd5069eccf66125a6c27f40954ce0ea4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 30 Jul 2021 18:05:26 +0200 Subject: [PATCH 0700/1727] fix: migration and push edu bugs --- Cargo.lock | 6 +++--- src/database.rs | 2 +- src/database/sending.rs | 42 +++++++++++++++++++---------------------- 3 files changed, 23 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 485aeab..4f58ef3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -944,7 +944,7 @@ dependencies = [ [[package]] name = "heed" version = "0.10.6" -source = "git+https://github.com/timokoesters/heed.git?rev=c6b149fd5621999b0d5ef0c28e199015cfc60fa1#c6b149fd5621999b0d5ef0c28e199015cfc60fa1" +source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d" dependencies = [ "bytemuck", "byteorder", @@ -962,12 +962,12 @@ dependencies = [ [[package]] name = "heed-traits" version = "0.7.0" -source = "git+https://github.com/timokoesters/heed.git?rev=c6b149fd5621999b0d5ef0c28e199015cfc60fa1#c6b149fd5621999b0d5ef0c28e199015cfc60fa1" +source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d" [[package]] name = "heed-types" version = "0.7.2" -source = "git+https://github.com/timokoesters/heed.git?rev=c6b149fd5621999b0d5ef0c28e199015cfc60fa1#c6b149fd5621999b0d5ef0c28e199015cfc60fa1" +source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d" dependencies = [ "bincode", "bytemuck", diff --git a/src/database.rs b/src/database.rs index db0eae8..5e9e025 100644 --- a/src/database.rs +++ b/src/database.rs @@ -430,8 +430,8 @@ impl Database { // Upgrade user data store for (roomuserdataid, _) in db.account_data.roomuserdataid_accountdata.iter() { let mut parts = roomuserdataid.split(|&b| b == 0xff); - let user_id = parts.next().unwrap(); let room_id = parts.next().unwrap(); + let user_id = parts.next().unwrap(); let event_type = roomuserdataid.rsplit(|&b| b == 0xff).next().unwrap(); let mut key = room_id.to_vec(); diff --git a/src/database/sending.rs b/src/database/sending.rs index 506bc17..7d7a44a 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -599,29 +599,25 @@ impl Sending { match event { SendingEventType::Pdu(pdu_id) => { // TODO: check room version and remove event_id if needed - pdu_jsons.push(serde_json::from_str( - PduEvent::convert_to_outgoing_federation_event( - db.rooms - .get_pdu_json_from_id(&pdu_id) - .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? - .ok_or_else(|| { - ( - OutgoingKind::Normal(server.clone()), - Error::bad_database( - "[Normal] Event in servernamevent_datas not found in db.", - ), - ) - })?, - ) - .json() - .get(), - ) - .expect("Raw<..> is always valid")); + let raw = PduEvent::convert_to_outgoing_federation_event( + db.rooms + .get_pdu_json_from_id(&pdu_id) + .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? + .ok_or_else(|| { + ( + OutgoingKind::Normal(server.clone()), + Error::bad_database( + "[Normal] Event in servernamevent_datas not found in db.", + ), + ) + })?, + ); + pdu_jsons.push(raw); } SendingEventType::Edu(edu) => { - edu_jsons.push( - serde_json::from_slice(edu).expect("Raw<..> is always valid"), - ); + if let Ok(raw) = serde_json::from_slice(edu) { + edu_jsons.push(raw); + } } } } @@ -689,7 +685,7 @@ impl Sending { Error::bad_database("Invalid server string in server_currenttransaction") })?), if event.starts_with(b"*") { - SendingEventType::Edu(value.to_vec()) + SendingEventType::Edu(value) } else { SendingEventType::Pdu(event.to_vec()) }, @@ -707,7 +703,7 @@ impl Sending { ( OutgoingKind::Push(user.to_vec(), pushkey.to_vec()), if event.starts_with(b"*") { - SendingEventType::Edu(event[1..].to_vec()) + SendingEventType::Edu(value) } else { SendingEventType::Pdu(event.to_vec()) }, From 0f045890eb920060d0fdf50bdcfb898b54169139 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 30 Jul 2021 19:27:37 +0200 Subject: [PATCH 0701/1727] docs: make it clear that database_path is a folder --- conduit-example.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index d184991..7d419cf 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -12,7 +12,7 @@ #server_name = "your.server.name" # This is the only directory where Conduit will save its data -database_path = "/var/lib/conduit/conduit.db" +database_path = "/var/lib/conduit/" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port @@ -44,4 +44,4 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re proxy = "none" # more examples can be found at src/database/proxy.rs:6 # The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 \ No newline at end of file +#db_cache_capacity_mb = 200 From 2c4f966d60ad59ba7185c70ca8e8681bc26fac18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 1 Aug 2021 16:59:52 +0200 Subject: [PATCH 0702/1727] improvement: use sqlite properly --- src/database.rs | 66 +++++++----------------------- src/database/abstraction/sqlite.rs | 48 ++++------------------ 2 files changed, 24 insertions(+), 90 deletions(-) diff --git a/src/database.rs b/src/database.rs index 5e9e025..baf66b5 100644 --- a/src/database.rs +++ b/src/database.rs @@ -47,12 +47,8 @@ pub struct Config { db_cache_capacity_mb: f64, #[serde(default = "default_sqlite_read_pool_size")] sqlite_read_pool_size: usize, - #[serde(default = "true_fn")] - sqlite_wal_clean_timer: bool, #[serde(default = "default_sqlite_wal_clean_second_interval")] sqlite_wal_clean_second_interval: u32, - #[serde(default = "default_sqlite_wal_clean_second_timeout")] - sqlite_wal_clean_second_timeout: u32, #[serde(default = "default_sqlite_spillover_reap_fraction")] sqlite_spillover_reap_fraction: f64, #[serde(default = "default_sqlite_spillover_reap_interval_secs")] @@ -120,11 +116,7 @@ fn default_sqlite_read_pool_size() -> usize { } fn default_sqlite_wal_clean_second_interval() -> u32 { - 60 * 60 -} - -fn default_sqlite_wal_clean_second_timeout() -> u32 { - 2 + 15 * 60 // every 15 minutes } fn default_sqlite_spillover_reap_fraction() -> f64 { @@ -465,7 +457,7 @@ impl Database { #[cfg(feature = "sqlite")] { - Self::start_wal_clean_task(&db, &config).await; + Self::start_wal_clean_task(Arc::clone(&db), &config).await; Self::start_spillover_reap_task(builder, &config).await; } @@ -620,24 +612,17 @@ impl Database { } #[cfg(feature = "sqlite")] - #[tracing::instrument(skip(lock, config))] - pub async fn start_wal_clean_task(lock: &Arc>, config: &Config) { - use tokio::time::{interval, timeout}; + #[tracing::instrument(skip(db, config))] + pub async fn start_wal_clean_task(db: Arc>, config: &Config) { + use tokio::time::interval; #[cfg(unix)] use tokio::signal::unix::{signal, SignalKind}; use tracing::info; - use std::{ - sync::Weak, - time::{Duration, Instant}, - }; + use std::time::{Duration, Instant}; - let weak: Weak> = Arc::downgrade(&lock); - - let lock_timeout = Duration::from_secs(config.sqlite_wal_clean_second_timeout as u64); let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64); - let do_timer = config.sqlite_wal_clean_timer; tokio::spawn(async move { let mut i = interval(timer_interval); @@ -647,45 +632,24 @@ impl Database { loop { #[cfg(unix)] tokio::select! { - _ = i.tick(), if do_timer => { - info!(target: "wal-trunc", "Timer ticked") + _ = i.tick() => { + info!("wal-trunc: Timer ticked"); } _ = s.recv() => { - info!(target: "wal-trunc", "Received SIGHUP") + info!("wal-trunc: Received SIGHUP"); } }; #[cfg(not(unix))] - if do_timer { + { i.tick().await; - info!(target: "wal-trunc", "Timer ticked") - } else { - // timer disabled, and there's no concept of signals on windows, bailing... - return; + info!("wal-trunc: Timer ticked") } - if let Some(arc) = Weak::upgrade(&weak) { - info!(target: "wal-trunc", "Rotating sync helpers..."); - // This actually creates a very small race condition between firing this and trying to acquire the subsequent write lock. - // Though it is not a huge deal if the write lock doesn't "catch", as it'll harmlessly time out. - arc.read().await.globals.rotate.fire(); - info!(target: "wal-trunc", "Locking..."); - let guard = { - if let Ok(guard) = timeout(lock_timeout, arc.write()).await { - guard - } else { - info!(target: "wal-trunc", "Lock failed in timeout, canceled."); - continue; - } - }; - info!(target: "wal-trunc", "Locked, flushing..."); - let start = Instant::now(); - if let Err(e) = guard.flush_wal() { - error!(target: "wal-trunc", "Errored: {}", e); - } else { - info!(target: "wal-trunc", "Flushed in {:?}", start.elapsed()); - } + let start = Instant::now(); + if let Err(e) = db.read().await.flush_wal() { + error!("wal-trunc: Errored: {}", e); } else { - break; + info!("wal-trunc: Flushed in {:?}", start.elapsed()); } } }); diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index a46d3ad..bbf7508 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -4,7 +4,7 @@ use crossbeam::channel::{ bounded, unbounded, Receiver as ChannelReceiver, Sender as ChannelSender, TryRecvError, }; use parking_lot::{Mutex, MutexGuard, RwLock}; -use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension, Params}; +use rusqlite::{Connection, DatabaseName::Main, OptionalExtension, Params}; use std::{ collections::HashMap, future::Future, @@ -122,16 +122,11 @@ impl Pool { fn prepare_conn>(path: P, cache_size: Option) -> Result { let conn = Connection::open(path)?; - conn.pragma_update(Some(Main), "journal_mode", &"WAL".to_owned())?; - - // conn.pragma_update(Some(Main), "wal_autocheckpoint", &250)?; - - // conn.pragma_update(Some(Main), "wal_checkpoint", &"FULL".to_owned())?; - - conn.pragma_update(Some(Main), "synchronous", &"OFF".to_owned())?; + conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; + conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; if let Some(cache_kib) = cache_size { - conn.pragma_update(Some(Main), "cache_size", &(-Into::::into(cache_kib)))?; + conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_kib)))?; } Ok(conn) @@ -193,9 +188,6 @@ impl DatabaseEngine for Engine { config.db_cache_capacity_mb, )?; - pool.write_lock() - .execute("CREATE TABLE IF NOT EXISTS _noop (\"key\" INT)", params![])?; - let arc = Arc::new(Engine { pool, iter_pool: Mutex::new(ThreadPool::new(10)), @@ -205,7 +197,7 @@ impl DatabaseEngine for Engine { } fn open_tree(self: &Arc, name: &str) -> Result> { - self.pool.write_lock().execute(format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name).as_str(), [])?; + self.pool.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; Ok(Arc::new(SqliteTable { engine: Arc::clone(self), @@ -215,37 +207,15 @@ impl DatabaseEngine for Engine { } fn flush(self: &Arc) -> Result<()> { - self.pool - .write_lock() - .execute_batch( - " - PRAGMA synchronous=FULL; - BEGIN; - DELETE FROM _noop; - INSERT INTO _noop VALUES (1); - COMMIT; - PRAGMA synchronous=OFF; - ", - ) - .map_err(Into::into) + // we enabled PRAGMA synchronous=normal, so this should not be necessary + Ok(()) } } impl Engine { pub fn flush_wal(self: &Arc) -> Result<()> { - self.pool - .write_lock() - .execute_batch( - " - PRAGMA synchronous=FULL; PRAGMA wal_checkpoint=TRUNCATE; - BEGIN; - DELETE FROM _noop; - INSERT INTO _noop VALUES (1); - COMMIT; - PRAGMA wal_checkpoint=PASSIVE; PRAGMA synchronous=OFF; - ", - ) - .map_err(Into::into) + self.pool.write_lock().pragma_update(Some(Main), "wal_checkpoint", &"RESTART")?; + Ok(()) } // Reaps (at most) (.len() * `fraction`) (rounded down, min 1) connections. From bd63797213cec5dbf137c047505c6606d4cd0c5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 2 Aug 2021 10:13:34 +0200 Subject: [PATCH 0703/1727] improvement: make better use of sqlite connections --- src/client_server/account.rs | 22 +- src/client_server/alias.rs | 7 +- src/client_server/backup.rs | 18 +- src/client_server/config.rs | 8 +- src/client_server/device.rs | 6 +- src/client_server/directory.rs | 2 +- src/client_server/keys.rs | 8 +- src/client_server/media.rs | 2 +- src/client_server/membership.rs | 327 ++++++++++++----------- src/client_server/message.rs | 2 +- src/client_server/presence.rs | 2 +- src/client_server/profile.rs | 18 +- src/client_server/push.rs | 14 +- src/client_server/read_marker.rs | 4 +- src/client_server/redact.rs | 2 +- src/client_server/room.rs | 4 +- src/client_server/session.rs | 6 +- src/client_server/state.rs | 4 +- src/client_server/sync.rs | 10 +- src/client_server/tag.rs | 4 +- src/client_server/to_device.rs | 2 +- src/database.rs | 48 +--- src/database/abstraction.rs | 6 +- src/database/abstraction/heed.rs | 2 +- src/database/abstraction/sqlite.rs | 400 ++++++++++------------------- src/database/appservice.rs | 23 +- src/database/globals.rs | 6 +- src/database/media.rs | 10 +- src/database/rooms.rs | 4 +- src/ruma_wrapper.rs | 4 +- src/server_server.rs | 15 +- 31 files changed, 422 insertions(+), 568 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index c00cc87..ca8b7b1 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -504,7 +504,7 @@ pub async fn register_route( info!("{} registered on this server", user_id); - db.flush().await?; + db.flush()?; Ok(register::Response { access_token: Some(token), @@ -580,7 +580,7 @@ pub async fn change_password_route( } } - db.flush().await?; + db.flush()?; Ok(change_password::Response {}.into()) } @@ -656,11 +656,17 @@ pub async fn deactivate_route( } // Leave all joined rooms and reject all invitations - for room_id in db.rooms.rooms_joined(&sender_user).chain( - db.rooms - .rooms_invited(&sender_user) - .map(|t| t.map(|(r, _)| r)), - ) { + let all_rooms = db + .rooms + .rooms_joined(&sender_user) + .chain( + db.rooms + .rooms_invited(&sender_user) + .map(|t| t.map(|(r, _)| r)), + ) + .collect::>(); + + for room_id in all_rooms { let room_id = room_id?; let event = member::MemberEventContent { membership: member::MembershipState::Leave, @@ -701,7 +707,7 @@ pub async fn deactivate_route( info!("{} deactivated their account", sender_user); - db.flush().await?; + db.flush()?; Ok(deactivate::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index f5d9f64..143e607 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -31,7 +31,7 @@ pub async fn create_alias_route( db.rooms .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?; - db.flush().await?; + db.flush()?; Ok(create_alias::Response::new().into()) } @@ -47,7 +47,7 @@ pub async fn delete_alias_route( ) -> ConduitResult { db.rooms.set_alias(&body.room_alias, None, &db.globals)?; - db.flush().await?; + db.flush()?; Ok(delete_alias::Response::new().into()) } @@ -85,8 +85,7 @@ pub async fn get_alias_helper( match db.rooms.id_from_alias(&room_alias)? { Some(r) => room_id = Some(r), None => { - let iter = db.appservice.iter_all()?; - for (_id, registration) in iter.filter_map(|r| r.ok()) { + for (_id, registration) in db.appservice.all()? { let aliases = registration .get("namespaces") .and_then(|ns| ns.get("aliases")) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 6d540cb..06f9818 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -26,7 +26,7 @@ pub async fn create_backup_route( .key_backups .create_backup(&sender_user, &body.algorithm, &db.globals)?; - db.flush().await?; + db.flush()?; Ok(create_backup::Response { version }.into()) } @@ -44,7 +44,7 @@ pub async fn update_backup_route( db.key_backups .update_backup(&sender_user, &body.version, &body.algorithm, &db.globals)?; - db.flush().await?; + db.flush()?; Ok(update_backup::Response {}.into()) } @@ -117,7 +117,7 @@ pub async fn delete_backup_route( db.key_backups.delete_backup(&sender_user, &body.version)?; - db.flush().await?; + db.flush()?; Ok(delete_backup::Response {}.into()) } @@ -147,7 +147,7 @@ pub async fn add_backup_keys_route( } } - db.flush().await?; + db.flush()?; Ok(add_backup_keys::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), @@ -179,7 +179,7 @@ pub async fn add_backup_key_sessions_route( )? } - db.flush().await?; + db.flush()?; Ok(add_backup_key_sessions::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), @@ -209,7 +209,7 @@ pub async fn add_backup_key_session_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(add_backup_key_session::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), @@ -288,7 +288,7 @@ pub async fn delete_backup_keys_route( db.key_backups .delete_all_keys(&sender_user, &body.version)?; - db.flush().await?; + db.flush()?; Ok(delete_backup_keys::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), @@ -311,7 +311,7 @@ pub async fn delete_backup_key_sessions_route( db.key_backups .delete_room_keys(&sender_user, &body.version, &body.room_id)?; - db.flush().await?; + db.flush()?; Ok(delete_backup_key_sessions::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), @@ -334,7 +334,7 @@ pub async fn delete_backup_key_session_route( db.key_backups .delete_room_key(&sender_user, &body.version, &body.room_id, &body.session_id)?; - db.flush().await?; + db.flush()?; Ok(delete_backup_key_session::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), diff --git a/src/client_server/config.rs b/src/client_server/config.rs index b9826bf..b692749 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -43,7 +43,7 @@ pub async fn set_global_account_data_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(set_global_account_data::Response {}.into()) } @@ -78,7 +78,7 @@ pub async fn set_room_account_data_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(set_room_account_data::Response {}.into()) } @@ -98,7 +98,7 @@ pub async fn get_global_account_data_route( .account_data .get::>(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - db.flush().await?; + db.flush()?; let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? @@ -129,7 +129,7 @@ pub async fn get_room_account_data_route( body.event_type.clone().into(), )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - db.flush().await?; + db.flush()?; let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 085d034..5210467 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -71,7 +71,7 @@ pub async fn update_device_route( db.users .update_device_metadata(&sender_user, &body.device_id, &device)?; - db.flush().await?; + db.flush()?; Ok(update_device::Response {}.into()) } @@ -123,7 +123,7 @@ pub async fn delete_device_route( db.users.remove_device(&sender_user, &body.device_id)?; - db.flush().await?; + db.flush()?; Ok(delete_device::Response {}.into()) } @@ -177,7 +177,7 @@ pub async fn delete_devices_route( db.users.remove_device(&sender_user, &device_id)? } - db.flush().await?; + db.flush()?; Ok(delete_devices::Response {}.into()) } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index f1ec4b8..7cab1a7 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -100,7 +100,7 @@ pub async fn set_room_visibility_route( } } - db.flush().await?; + db.flush()?; Ok(set_room_visibility::Response {}.into()) } diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 418e41a..8db7688 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -64,7 +64,7 @@ pub async fn upload_keys_route( } } - db.flush().await?; + db.flush()?; Ok(upload_keys::Response { one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, @@ -105,7 +105,7 @@ pub async fn claim_keys_route( ) -> ConduitResult { let response = claim_keys_helper(&body.one_time_keys, &db).await?; - db.flush().await?; + db.flush()?; Ok(response.into()) } @@ -166,7 +166,7 @@ pub async fn upload_signing_keys_route( )?; } - db.flush().await?; + db.flush()?; Ok(upload_signing_keys::Response {}.into()) } @@ -227,7 +227,7 @@ pub async fn upload_signatures_route( } } - db.flush().await?; + db.flush()?; Ok(upload_signatures::Response {}.into()) } diff --git a/src/client_server/media.rs b/src/client_server/media.rs index eaaf939..2bd189a 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -52,7 +52,7 @@ pub async fn create_content_route( ) .await?; - db.flush().await?; + db.flush()?; Ok(create_content::Response { content_uri: mxc.try_into().expect("Invalid mxc:// URI"), diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ea7fdab..895ad27 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -74,7 +74,7 @@ pub async fn join_room_by_id_route( ) .await; - db.flush().await?; + db.flush()?; ret } @@ -125,7 +125,7 @@ pub async fn join_room_by_id_or_alias_route( ) .await?; - db.flush().await?; + db.flush()?; Ok(join_room_by_id_or_alias::Response { room_id: join_room_response.0.room_id, @@ -146,7 +146,7 @@ pub async fn leave_room_route( db.rooms.leave_room(sender_user, &body.room_id, &db).await?; - db.flush().await?; + db.flush()?; Ok(leave_room::Response::new().into()) } @@ -164,7 +164,7 @@ pub async fn invite_user_route( if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; - db.flush().await?; + db.flush()?; Ok(invite_user::Response {}.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) @@ -229,7 +229,7 @@ pub async fn kick_user_route( drop(mutex_lock); - db.flush().await?; + db.flush()?; Ok(kick_user::Response::new().into()) } @@ -301,7 +301,7 @@ pub async fn ban_user_route( drop(mutex_lock); - db.flush().await?; + db.flush()?; Ok(ban_user::Response::new().into()) } @@ -363,7 +363,7 @@ pub async fn unban_user_route( drop(mutex_lock); - db.flush().await?; + db.flush()?; Ok(unban_user::Response::new().into()) } @@ -381,7 +381,7 @@ pub async fn forget_room_route( db.rooms.forget(&body.room_id, &sender_user)?; - db.flush().await?; + db.flush()?; Ok(forget_room::Response::new().into()) } @@ -712,7 +712,7 @@ async fn join_room_by_id_helper( drop(mutex_lock); - db.flush().await?; + db.flush()?; Ok(join_room_by_id::Response::new(room_id.clone()).into()) } @@ -788,155 +788,165 @@ pub async fn invite_helper<'a>( db: &Database, is_direct: bool, ) -> Result<()> { - let mutex = Arc::clone( - db.globals - .roomid_mutex - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - if user_id.server_name() != db.globals.server_name() { - let prev_events = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = db - .rooms - .room_state_get(room_id, &EventType::RoomCreate, "")?; - - let create_event_content = create_event - .as_ref() - .map(|create_event| { - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db.")) - }) - .transpose()?; - - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - - // If there was no create event yet, assume we are creating a version 6 room right now - let room_version_id = create_event_content - .map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let content = serde_json::to_value(MemberEventContent { - avatar_url: None, - displayname: None, - is_direct: Some(is_direct), - membership: MembershipState::Invite, - third_party_invite: None, - blurhash: None, - }) - .expect("member event is valid value"); - - let state_key = user_id.to_string(); - let kind = EventType::RoomMember; - - let auth_events = - db.rooms - .get_auth_events(room_id, &kind, &sender_user, Some(&state_key), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = BTreeMap::new(); - - if let Some(prev_pdu) = db.rooms.room_state_get(room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), + let (room_version_id, pdu_json, invite_room_state) = { + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), ); - } + let mutex_lock = mutex.lock().await; - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender_user.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind, - content, - state_key: Some(state_key), - prev_events, - depth, - auth_events: auth_events + let prev_events = db + .rooms + .get_pdu_leaves(room_id)? + .into_iter() + .take(20) + .collect::>(); + + let create_event = db + .rooms + .room_state_get(room_id, &EventType::RoomCreate, "")?; + + let create_event_content = create_event + .as_ref() + .map(|create_event| { + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db.")) + }) + .transpose()?; + + let create_prev_event = if prev_events.len() == 1 + && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) + { + create_event + } else { + None + }; + + // If there was no create event yet, assume we are creating a version 6 room right now + let room_version_id = create_event_content + .map_or(RoomVersionId::Version6, |create_event| { + create_event.room_version + }); + let room_version = + RoomVersion::new(&room_version_id).expect("room version is supported"); + + let content = serde_json::to_value(MemberEventContent { + avatar_url: None, + displayname: None, + is_direct: Some(is_direct), + membership: MembershipState::Invite, + third_party_invite: None, + blurhash: None, + }) + .expect("member event is valid value"); + + let state_key = user_id.to_string(); + let kind = EventType::RoomMember; + + let auth_events = db.rooms.get_auth_events( + room_id, + &kind, + &sender_user, + Some(&state_key), + &content, + )?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts: None, - unsigned, - hashes: ruma::events::pdu::EventHash { - sha256: "aaa".to_owned(), - }, - signatures: BTreeMap::new(), + .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) + .max() + .unwrap_or_else(|| uint!(0)) + + uint!(1); + + let mut unsigned = BTreeMap::new(); + + if let Some(prev_pdu) = db.rooms.room_state_get(room_id, &kind, &state_key)? { + unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); + unsigned.insert( + "prev_sender".to_owned(), + serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), + ); + } + + let pdu = PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater"), + room_id: room_id.clone(), + sender: sender_user.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind, + content, + state_key: Some(state_key), + prev_events, + depth, + auth_events: auth_events + .iter() + .map(|(_, pdu)| pdu.event_id.clone()) + .collect(), + redacts: None, + unsigned, + hashes: ruma::events::pdu::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: BTreeMap::new(), + }; + + let auth_check = state_res::auth_check( + &room_version, + &Arc::new(pdu.clone()), + create_prev_event, + &auth_events, + None, // TODO: third_party_invite + ) + .map_err(|e| { + error!("{:?}", e); + Error::bad_database("Auth check failed.") + })?; + + if !auth_check { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Event is not authorized.", + )); + } + + // Hash and sign + let mut pdu_json = + utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); + + pdu_json.remove("event_id"); + + // Add origin because synapse likes that (and it's required in the spec) + pdu_json.insert( + "origin".to_owned(), + to_canonical_value(db.globals.server_name()) + .expect("server name is a valid CanonicalJsonValue"), + ); + + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut pdu_json, + &room_version_id, + ) + .expect("event is valid, we just created it"); + + let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; + + drop(mutex_lock); + + (room_version_id, pdu_json, invite_room_state) }; - let auth_check = state_res::auth_check( - &room_version, - &Arc::new(pdu.clone()), - create_prev_event, - &auth_events, - None, // TODO: third_party_invite - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - drop(mutex_lock); - - let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; let response = db .sending .send_federation_request( @@ -1008,6 +1018,17 @@ pub async fn invite_helper<'a>( return Ok(()); } + let mutex = Arc::clone( + db.globals + .roomid_mutex + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + + let mutex_lock = mutex.lock().await; + db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, @@ -1030,5 +1051,7 @@ pub async fn invite_helper<'a>( &mutex_lock, )?; + drop(mutex_lock); + Ok(()) } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 3d8218c..f77ca89 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -87,7 +87,7 @@ pub async fn send_message_event_route( drop(mutex_lock); - db.flush().await?; + db.flush()?; Ok(send_message_event::Response::new(event_id).into()) } diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index ca78a88..7312cb3 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -41,7 +41,7 @@ pub async fn set_presence_route( )?; } - db.flush().await?; + db.flush()?; Ok(set_presence::Response {}.into()) } diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 693254f..648afea 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -32,9 +32,10 @@ pub async fn set_displayname_route( .set_displayname(&sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms - for (pdu_builder, room_id) in db - .rooms - .rooms_joined(&sender_user) + let all_rooms_joined = db.rooms.rooms_joined(&sender_user).collect::>(); + + for (pdu_builder, room_id) in all_rooms_joined + .into_iter() .filter_map(|r| r.ok()) .map(|room_id| { Ok::<_, Error>(( @@ -109,7 +110,7 @@ pub async fn set_displayname_route( )?; } - db.flush().await?; + db.flush()?; Ok(set_display_name::Response {}.into()) } @@ -165,9 +166,10 @@ pub async fn set_avatar_url_route( db.users.set_blurhash(&sender_user, body.blurhash.clone())?; // Send a new membership event and presence update into all joined rooms - for (pdu_builder, room_id) in db - .rooms - .rooms_joined(&sender_user) + let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::>(); + + for (pdu_builder, room_id) in all_joined_rooms + .into_iter() .filter_map(|r| r.ok()) .map(|room_id| { Ok::<_, Error>(( @@ -242,7 +244,7 @@ pub async fn set_avatar_url_route( )?; } - db.flush().await?; + db.flush()?; Ok(set_avatar_url::Response {}.into()) } diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 867b452..9489f07 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -192,7 +192,7 @@ pub async fn set_pushrule_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(set_pushrule::Response {}.into()) } @@ -248,7 +248,7 @@ pub async fn get_pushrule_actions_route( _ => None, }; - db.flush().await?; + db.flush()?; Ok(get_pushrule_actions::Response { actions: actions.unwrap_or_default(), @@ -325,7 +325,7 @@ pub async fn set_pushrule_actions_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(set_pushrule_actions::Response {}.into()) } @@ -386,7 +386,7 @@ pub async fn get_pushrule_enabled_route( _ => false, }; - db.flush().await?; + db.flush()?; Ok(get_pushrule_enabled::Response { enabled }.into()) } @@ -465,7 +465,7 @@ pub async fn set_pushrule_enabled_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(set_pushrule_enabled::Response {}.into()) } @@ -534,7 +534,7 @@ pub async fn delete_pushrule_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(delete_pushrule::Response {}.into()) } @@ -570,7 +570,7 @@ pub async fn set_pushers_route( db.pusher.set_pusher(sender_user, pusher)?; - db.flush().await?; + db.flush()?; Ok(set_pusher::Response::default().into()) } diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index f5e2924..85b0bf6 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -75,7 +75,7 @@ pub async fn set_read_marker_route( )?; } - db.flush().await?; + db.flush()?; Ok(set_read_marker::Response {}.into()) } @@ -128,7 +128,7 @@ pub async fn create_receipt_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(create_receipt::Response {}.into()) } diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 2e4c651..63d3d4a 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -49,7 +49,7 @@ pub async fn redact_event_route( drop(mutex_lock); - db.flush().await?; + db.flush()?; Ok(redact_event::Response { event_id }.into()) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index d5188e8..1b14a93 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -301,7 +301,7 @@ pub async fn create_room_route( info!("{} created a room", sender_user); - db.flush().await?; + db.flush()?; Ok(create_room::Response::new(room_id).into()) } @@ -561,7 +561,7 @@ pub async fn upgrade_room_route( drop(mutex_lock); - db.flush().await?; + db.flush()?; // Return the replacement room id Ok(upgrade_room::Response { replacement_room }.into()) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index f8452e0..d4d3c03 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -143,7 +143,7 @@ pub async fn login_route( info!("{} logged in", user_id); - db.flush().await?; + db.flush()?; Ok(login::Response { user_id, @@ -175,7 +175,7 @@ pub async fn logout_route( db.users.remove_device(&sender_user, sender_device)?; - db.flush().await?; + db.flush()?; Ok(logout::Response::new().into()) } @@ -204,7 +204,7 @@ pub async fn logout_all_route( db.users.remove_device(&sender_user, &device_id)?; } - db.flush().await?; + db.flush()?; Ok(logout_all::Response::new().into()) } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index e0e5d29..5afac03 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -43,7 +43,7 @@ pub async fn send_state_event_for_key_route( ) .await?; - db.flush().await?; + db.flush()?; Ok(send_state_event::Response { event_id }.into()) } @@ -69,7 +69,7 @@ pub async fn send_state_event_for_empty_key_route( ) .await?; - db.flush().await?; + db.flush()?; Ok(send_state_event::Response { event_id }.into()) } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 541045e..b09a212 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -186,7 +186,8 @@ async fn sync_helper( .filter_map(|r| r.ok()), ); - for room_id in db.rooms.rooms_joined(&sender_user) { + let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::>(); + for room_id in all_joined_rooms { let room_id = room_id?; // Get and drop the lock to wait for remaining operations to finish @@ -198,6 +199,7 @@ async fn sync_helper( .entry(room_id.clone()) .or_default(), ); + let mutex_lock = mutex.lock().await; drop(mutex_lock); @@ -658,7 +660,8 @@ async fn sync_helper( } let mut left_rooms = BTreeMap::new(); - for result in db.rooms.rooms_left(&sender_user) { + let all_left_rooms = db.rooms.rooms_left(&sender_user).collect::>(); + for result in all_left_rooms { let (room_id, left_state_events) = result?; // Get and drop the lock to wait for remaining operations to finish @@ -697,7 +700,8 @@ async fn sync_helper( } let mut invited_rooms = BTreeMap::new(); - for result in db.rooms.rooms_invited(&sender_user) { + let all_invited_rooms = db.rooms.rooms_invited(&sender_user).collect::>(); + for result in all_invited_rooms { let (room_id, invite_state_events) = result?; // Get and drop the lock to wait for remaining operations to finish diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 223d122..5582bcd 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -40,7 +40,7 @@ pub async fn update_tag_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(create_tag::Response {}.into()) } @@ -74,7 +74,7 @@ pub async fn delete_tag_route( &db.globals, )?; - db.flush().await?; + db.flush()?; Ok(delete_tag::Response {}.into()) } diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index d3f7d25..69147c9 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -95,7 +95,7 @@ pub async fn send_event_to_device_route( db.transaction_ids .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; - db.flush().await?; + db.flush()?; Ok(send_event_to_device::Response {}.into()) } diff --git a/src/database.rs b/src/database.rs index baf66b5..5b47302 100644 --- a/src/database.rs +++ b/src/database.rs @@ -45,14 +45,8 @@ pub struct Config { database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, - #[serde(default = "default_sqlite_read_pool_size")] - sqlite_read_pool_size: usize, #[serde(default = "default_sqlite_wal_clean_second_interval")] sqlite_wal_clean_second_interval: u32, - #[serde(default = "default_sqlite_spillover_reap_fraction")] - sqlite_spillover_reap_fraction: f64, - #[serde(default = "default_sqlite_spillover_reap_interval_secs")] - sqlite_spillover_reap_interval_secs: u32, #[serde(default = "default_max_request_size")] max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] @@ -111,22 +105,10 @@ fn default_db_cache_capacity_mb() -> f64 { 200.0 } -fn default_sqlite_read_pool_size() -> usize { - num_cpus::get().max(1) -} - fn default_sqlite_wal_clean_second_interval() -> u32 { 15 * 60 // every 15 minutes } -fn default_sqlite_spillover_reap_fraction() -> f64 { - 0.5 -} - -fn default_sqlite_spillover_reap_interval_secs() -> u32 { - 60 -} - fn default_max_request_size() -> u32 { 20 * 1024 * 1024 // Default to 20 MB } @@ -458,7 +440,6 @@ impl Database { #[cfg(feature = "sqlite")] { Self::start_wal_clean_task(Arc::clone(&db), &config).await; - Self::start_spillover_reap_task(builder, &config).await; } Ok(db) @@ -568,7 +549,7 @@ impl Database { } #[tracing::instrument(skip(self))] - pub async fn flush(&self) -> Result<()> { + pub fn flush(&self) -> Result<()> { let start = std::time::Instant::now(); let res = self._db.flush(); @@ -584,33 +565,6 @@ impl Database { self._db.flush_wal() } - #[cfg(feature = "sqlite")] - #[tracing::instrument(skip(engine, config))] - pub async fn start_spillover_reap_task(engine: Arc, config: &Config) { - let fraction = config.sqlite_spillover_reap_fraction.clamp(0.01, 1.0); - let interval_secs = config.sqlite_spillover_reap_interval_secs as u64; - - let weak = Arc::downgrade(&engine); - - tokio::spawn(async move { - use tokio::time::interval; - - use std::{sync::Weak, time::Duration}; - - let mut i = interval(Duration::from_secs(interval_secs)); - - loop { - i.tick().await; - - if let Some(arc) = Weak::upgrade(&weak) { - arc.reap_spillover_by_fraction(fraction); - } else { - break; - } - } - }); - } - #[cfg(feature = "sqlite")] #[tracing::instrument(skip(db, config))] pub async fn start_wal_clean_task(db: Arc>, config: &Config) { diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 8ccac78..d0fa780 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -28,20 +28,20 @@ pub trait Tree: Send + Sync { fn remove(&self, key: &[u8]) -> Result<()>; - fn iter<'a>(&'a self) -> Box, Vec)> + Send + 'a>; + fn iter<'a>(&'a self) -> Box, Vec)> + 'a>; fn iter_from<'a>( &'a self, from: &[u8], backwards: bool, - ) -> Box, Vec)> + Send + 'a>; + ) -> Box, Vec)> + 'a>; fn increment(&self, key: &[u8]) -> Result>; fn scan_prefix<'a>( &'a self, prefix: Vec, - ) -> Box, Vec)> + Send + 'a>; + ) -> Box, Vec)> + 'a>; fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>>; diff --git a/src/database/abstraction/heed.rs b/src/database/abstraction/heed.rs index 0421b14..e767e22 100644 --- a/src/database/abstraction/heed.rs +++ b/src/database/abstraction/heed.rs @@ -81,7 +81,7 @@ impl EngineTree { let (s, r) = bounded::(100); let engine = Arc::clone(&self.engine); - let lock = self.engine.iter_pool.lock().unwrap(); + let lock = self.engine.iter_pool.lock().await; if lock.active_count() < lock.max_count() { lock.execute(move || { iter_from_thread_work(tree, &engine.env.read_txn().unwrap(), from, backwards, &s); diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index bbf7508..d2ecb3a 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -1,133 +1,61 @@ use super::{DatabaseEngine, Tree}; use crate::{database::Config, Result}; -use crossbeam::channel::{ - bounded, unbounded, Receiver as ChannelReceiver, Sender as ChannelSender, TryRecvError, -}; use parking_lot::{Mutex, MutexGuard, RwLock}; -use rusqlite::{Connection, DatabaseName::Main, OptionalExtension, Params}; +use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; use std::{ + cell::RefCell, collections::HashMap, future::Future, - ops::Deref, path::{Path, PathBuf}, pin::Pin, sync::Arc, time::{Duration, Instant}, }; -use threadpool::ThreadPool; use tokio::sync::oneshot::Sender; use tracing::{debug, warn}; -struct Pool { - writer: Mutex, - readers: Vec>, - spills: ConnectionRecycler, - spill_tracker: Arc<()>, - path: PathBuf, -} - pub const MILLI: Duration = Duration::from_millis(1); -enum HoldingConn<'a> { - FromGuard(MutexGuard<'a, Connection>), - FromRecycled(RecycledConn, Arc<()>), +thread_local! { + static READ_CONNECTION: RefCell> = RefCell::new(None); } -impl<'a> Deref for HoldingConn<'a> { - type Target = Connection; +struct PreparedStatementIterator<'a> { + pub iterator: Box + 'a>, + pub statement_ref: NonAliasingBox>, +} - fn deref(&self) -> &Self::Target { - match self { - HoldingConn::FromGuard(guard) => guard.deref(), - HoldingConn::FromRecycled(conn, _) => conn.deref(), - } +impl Iterator for PreparedStatementIterator<'_> { + type Item = TupleOfBytes; + + fn next(&mut self) -> Option { + self.iterator.next() } } -struct ConnectionRecycler(ChannelSender, ChannelReceiver); - -impl ConnectionRecycler { - fn new() -> Self { - let (s, r) = unbounded(); - Self(s, r) - } - - fn recycle(&self, conn: Connection) -> RecycledConn { - let sender = self.0.clone(); - - RecycledConn(Some(conn), sender) - } - - fn try_take(&self) -> Option { - match self.1.try_recv() { - Ok(conn) => Some(conn), - Err(TryRecvError::Empty) => None, - // as this is pretty impossible, a panic is warranted if it ever occurs - Err(TryRecvError::Disconnected) => panic!("Receiving channel was disconnected. A a sender is owned by the current struct, this should never happen(!!!)") - } - } -} - -struct RecycledConn( - Option, // To allow moving out of the struct when `Drop` is called. - ChannelSender, -); - -impl Deref for RecycledConn { - type Target = Connection; - - fn deref(&self) -> &Self::Target { - self.0 - .as_ref() - .expect("RecycledConn does not have a connection in Option<>") - } -} - -impl Drop for RecycledConn { +struct NonAliasingBox(*mut T); +impl Drop for NonAliasingBox { fn drop(&mut self) { - if let Some(conn) = self.0.take() { - debug!("Recycled connection"); - if let Err(e) = self.1.send(conn) { - warn!("Recycling a connection led to the following error: {:?}", e) - } - } + unsafe { Box::from_raw(self.0) }; } } -impl Pool { - fn new>(path: P, num_readers: usize, total_cache_size_mb: f64) -> Result { - // calculates cache-size per permanent connection - // 1. convert MB to KiB - // 2. divide by permanent connections - // 3. round down to nearest integer - let cache_size: u32 = ((total_cache_size_mb * 1024.0) / (num_readers + 1) as f64) as u32; +pub struct Engine { + writer: Mutex, - let writer = Mutex::new(Self::prepare_conn(&path, Some(cache_size))?); + path: PathBuf, + cache_size_per_thread: u32, +} - let mut readers = Vec::new(); - - for _ in 0..num_readers { - readers.push(Mutex::new(Self::prepare_conn(&path, Some(cache_size))?)) - } - - Ok(Self { - writer, - readers, - spills: ConnectionRecycler::new(), - spill_tracker: Arc::new(()), - path: path.as_ref().to_path_buf(), - }) - } - - fn prepare_conn>(path: P, cache_size: Option) -> Result { - let conn = Connection::open(path)?; +impl Engine { + fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result { + let conn = Connection::open(&path)?; + conn.pragma_update(Some(Main), "page_size", &32768)?; conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; - - if let Some(cache_kib) = cache_size { - conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_kib)))?; - } + conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; + conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?; Ok(conn) } @@ -136,68 +64,52 @@ impl Pool { self.writer.lock() } - fn read_lock(&self) -> HoldingConn<'_> { - // First try to get a connection from the permanent pool - for r in &self.readers { - if let Some(reader) = r.try_lock() { - return HoldingConn::FromGuard(reader); + fn read_lock(&self) -> &'static Connection { + READ_CONNECTION.with(|cell| { + let connection = &mut cell.borrow_mut(); + + if (*connection).is_none() { + let c = Box::leak(Box::new( + Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap(), + )); + **connection = Some(c); } - } - debug!("read_lock: All permanent readers locked, obtaining spillover reader..."); - - // We didn't get a connection from the permanent pool, so we'll dumpster-dive for recycled connections. - // Either we have a connection or we dont, if we don't, we make a new one. - let conn = match self.spills.try_take() { - Some(conn) => conn, - None => { - debug!("read_lock: No recycled connections left, creating new one..."); - Self::prepare_conn(&self.path, None).unwrap() - } - }; - - // Clone the spill Arc to mark how many spilled connections actually exist. - let spill_arc = Arc::clone(&self.spill_tracker); - - // Get a sense of how many connections exist now. - let now_count = Arc::strong_count(&spill_arc) - 1 /* because one is held by the pool */; - - // If the spillover readers are more than the number of total readers, there might be a problem. - if now_count > self.readers.len() { - warn!( - "Database is under high load. Consider increasing sqlite_read_pool_size ({} spillover readers exist)", - now_count - ); - } - - // Return the recyclable connection. - HoldingConn::FromRecycled(self.spills.recycle(conn), spill_arc) + connection.unwrap() + }) } -} -pub struct Engine { - pool: Pool, - iter_pool: Mutex, + pub fn flush_wal(self: &Arc) -> Result<()> { + self.write_lock() + .pragma_update(Some(Main), "wal_checkpoint", &"TRUNCATE")?; + Ok(()) + } } impl DatabaseEngine for Engine { fn open(config: &Config) -> Result> { - let pool = Pool::new( - Path::new(&config.database_path).join("conduit.db"), - config.sqlite_read_pool_size, - config.db_cache_capacity_mb, - )?; + let path = Path::new(&config.database_path).join("conduit.db"); + + // calculates cache-size per permanent connection + // 1. convert MB to KiB + // 2. divide by permanent connections + // 3. round down to nearest integer + let cache_size_per_thread: u32 = + ((config.db_cache_capacity_mb * 1024.0) / (num_cpus::get().max(1) + 1) as f64) as u32; + + let writer = Mutex::new(Self::prepare_conn(&path, cache_size_per_thread)?); let arc = Arc::new(Engine { - pool, - iter_pool: Mutex::new(ThreadPool::new(10)), + writer, + path, + cache_size_per_thread, }); Ok(arc) } fn open_tree(self: &Arc, name: &str) -> Result> { - self.pool.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; + self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; Ok(Arc::new(SqliteTable { engine: Arc::clone(self), @@ -212,31 +124,6 @@ impl DatabaseEngine for Engine { } } -impl Engine { - pub fn flush_wal(self: &Arc) -> Result<()> { - self.pool.write_lock().pragma_update(Some(Main), "wal_checkpoint", &"RESTART")?; - Ok(()) - } - - // Reaps (at most) (.len() * `fraction`) (rounded down, min 1) connections. - pub fn reap_spillover_by_fraction(&self, fraction: f64) { - let mut reaped = 0; - - let spill_amount = self.pool.spills.1.len() as f64; - let fraction = fraction.clamp(0.01, 1.0); - - let amount = (spill_amount * fraction).max(1.0) as u32; - - for _ in 0..amount { - if self.pool.spills.try_take().is_some() { - reaped += 1; - } - } - - debug!("Reaped {} connections", reaped); - } -} - pub struct SqliteTable { engine: Arc, name: String, @@ -258,7 +145,7 @@ impl SqliteTable { fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { guard.execute( format!( - "INSERT INTO {} (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value", + "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", self.name ) .as_str(), @@ -266,70 +153,17 @@ impl SqliteTable { )?; Ok(()) } - - #[tracing::instrument(skip(self, sql, param))] - fn iter_from_thread( - &self, - sql: String, - param: Option>, - ) -> Box + Send + Sync> { - let (s, r) = bounded::(5); - - let engine = Arc::clone(&self.engine); - - let lock = self.engine.iter_pool.lock(); - if lock.active_count() < lock.max_count() { - lock.execute(move || { - if let Some(param) = param { - iter_from_thread_work(&engine.pool.read_lock(), &s, &sql, [param]); - } else { - iter_from_thread_work(&engine.pool.read_lock(), &s, &sql, []); - } - }); - } else { - std::thread::spawn(move || { - if let Some(param) = param { - iter_from_thread_work(&engine.pool.read_lock(), &s, &sql, [param]); - } else { - iter_from_thread_work(&engine.pool.read_lock(), &s, &sql, []); - } - }); - } - - Box::new(r.into_iter()) - } -} - -fn iter_from_thread_work

( - guard: &HoldingConn<'_>, - s: &ChannelSender<(Vec, Vec)>, - sql: &str, - params: P, -) where - P: Params, -{ - for bob in guard - .prepare(sql) - .unwrap() - .query_map(params, |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) - .unwrap() - .map(|r| r.unwrap()) - { - if s.send(bob).is_err() { - return; - } - } } impl Tree for SqliteTable { #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { - self.get_with_guard(&self.engine.pool.read_lock(), key) + self.get_with_guard(&self.engine.read_lock(), key) } #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { - let guard = self.engine.pool.write_lock(); + let guard = self.engine.write_lock(); let start = Instant::now(); @@ -337,7 +171,7 @@ impl Tree for SqliteTable { let elapsed = start.elapsed(); if elapsed > MILLI { - debug!("insert: took {:012?} : {}", elapsed, &self.name); + warn!("insert took {:?} : {}", elapsed, &self.name); } drop(guard); @@ -369,7 +203,7 @@ impl Tree for SqliteTable { #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { - let guard = self.engine.pool.write_lock(); + let guard = self.engine.write_lock(); let start = Instant::now(); @@ -389,9 +223,28 @@ impl Tree for SqliteTable { } #[tracing::instrument(skip(self))] - fn iter<'a>(&'a self) -> Box + Send + 'a> { - let name = self.name.clone(); - self.iter_from_thread(format!("SELECT key, value FROM {}", name), None) + fn iter<'a>(&'a self) -> Box + 'a> { + let guard = self.engine.read_lock(); + + let statement = Box::leak(Box::new( + guard + .prepare(&format!("SELECT key, value FROM {}", &self.name)) + .unwrap(), + )); + + let statement_ref = NonAliasingBox(statement); + + let iterator = Box::new( + statement + .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .unwrap() + .map(|r| r.unwrap()), + ); + + Box::new(PreparedStatementIterator { + iterator, + statement_ref, + }) } #[tracing::instrument(skip(self, from, backwards))] @@ -399,31 +252,61 @@ impl Tree for SqliteTable { &'a self, from: &[u8], backwards: bool, - ) -> Box + Send + 'a> { - let name = self.name.clone(); + ) -> Box + 'a> { + let guard = self.engine.read_lock(); let from = from.to_vec(); // TODO change interface? + if backwards { - self.iter_from_thread( - format!( - "SELECT key, value FROM {} WHERE key <= ? ORDER BY key DESC", - name - ), - Some(from), - ) + let statement = Box::leak(Box::new( + guard + .prepare(&format!( + "SELECT key, value FROM {} WHERE key <= ? ORDER BY key DESC", + &self.name + )) + .unwrap(), + )); + + let statement_ref = NonAliasingBox(statement); + + let iterator = Box::new( + statement + .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .unwrap() + .map(|r| r.unwrap()), + ); + Box::new(PreparedStatementIterator { + iterator, + statement_ref, + }) } else { - self.iter_from_thread( - format!( - "SELECT key, value FROM {} WHERE key >= ? ORDER BY key ASC", - name - ), - Some(from), - ) + let statement = Box::leak(Box::new( + guard + .prepare(&format!( + "SELECT key, value FROM {} WHERE key >= ? ORDER BY key ASC", + &self.name + )) + .unwrap(), + )); + + let statement_ref = NonAliasingBox(statement); + + let iterator = Box::new( + statement + .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .unwrap() + .map(|r| r.unwrap()), + ); + + Box::new(PreparedStatementIterator { + iterator, + statement_ref, + }) } } #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { - let guard = self.engine.pool.write_lock(); + let guard = self.engine.write_lock(); let start = Instant::now(); @@ -445,10 +328,7 @@ impl Tree for SqliteTable { } #[tracing::instrument(skip(self, prefix))] - fn scan_prefix<'a>( - &'a self, - prefix: Vec, - ) -> Box + Send + 'a> { + fn scan_prefix<'a>(&'a self, prefix: Vec) -> Box + 'a> { // let name = self.name.clone(); // self.iter_from_thread( // format!( @@ -483,25 +363,9 @@ impl Tree for SqliteTable { fn clear(&self) -> Result<()> { debug!("clear: running"); self.engine - .pool .write_lock() .execute(format!("DELETE FROM {}", self.name).as_str(), [])?; debug!("clear: ran"); Ok(()) } } - -// TODO -// struct Pool { -// writer: Mutex, -// readers: [Mutex; NUM_READERS], -// } - -// // then, to pick a reader: -// for r in &pool.readers { -// if let Ok(reader) = r.try_lock() { -// // use reader -// } -// } -// // none unlocked, pick the next reader -// pool.readers[pool.counter.fetch_add(1, Relaxed) % NUM_READERS].lock() diff --git a/src/database/appservice.rs b/src/database/appservice.rs index f39520c..7cc9137 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -49,22 +49,23 @@ impl Appservice { ) } - pub fn iter_ids(&self) -> Result> + Send + '_> { + pub fn iter_ids(&self) -> Result> + '_> { Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { utils::string_from_bytes(&id) .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) })) } - pub fn iter_all( - &self, - ) -> Result> + '_ + Send> { - Ok(self.iter_ids()?.filter_map(|id| id.ok()).map(move |id| { - Ok(( - id.clone(), - self.get_registration(&id)? - .expect("iter_ids only returns appservices that exist"), - )) - })) + pub fn all(&self) -> Result> { + self.iter_ids()? + .filter_map(|id| id.ok()) + .map(move |id| { + Ok(( + id.clone(), + self.get_registration(&id)? + .expect("iter_ids only returns appservices that exist"), + )) + }) + .collect() } } diff --git a/src/database/globals.rs b/src/database/globals.rs index 0edb9ca..2ca8de9 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -15,7 +15,7 @@ use std::{ sync::{Arc, RwLock}, time::{Duration, Instant}, }; -use tokio::sync::{broadcast, watch::Receiver, Mutex, Semaphore}; +use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; use tracing::{error, info}; use trust_dns_resolver::TokioAsyncResolver; @@ -45,8 +45,8 @@ pub struct Globals { pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, pub sync_receivers: RwLock), SyncHandle>>, - pub roomid_mutex: RwLock>>>, - pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer + pub roomid_mutex: RwLock>>>, + pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer pub rotate: RotationHandler, } diff --git a/src/database/media.rs b/src/database/media.rs index f576ca4..a9bb42b 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -101,8 +101,8 @@ impl Media { prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail prefix.push(0xff); - let mut iter = self.mediaid_file.scan_prefix(prefix); - if let Some((key, _)) = iter.next() { + let first = self.mediaid_file.scan_prefix(prefix).next(); + if let Some((key, _)) = first { let path = globals.get_media_file(&key); let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; @@ -190,7 +190,9 @@ impl Media { original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail original_prefix.push(0xff); - if let Some((key, _)) = self.mediaid_file.scan_prefix(thumbnail_prefix).next() { + let first_thumbnailprefix = self.mediaid_file.scan_prefix(thumbnail_prefix).next(); + let first_originalprefix = self.mediaid_file.scan_prefix(original_prefix).next(); + if let Some((key, _)) = first_thumbnailprefix { // Using saved thumbnail let path = globals.get_media_file(&key); let mut file = Vec::new(); @@ -225,7 +227,7 @@ impl Media { content_type, file: file.to_vec(), })) - } else if let Some((key, _)) = self.mediaid_file.scan_prefix(original_prefix).next() { + } else if let Some((key, _)) = first_originalprefix { // Generate a thumbnail let path = globals.get_media_file(&key); let mut file = Vec::new(); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 79bb059..c3148c2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2,7 +2,6 @@ mod edus; pub use edus::RoomEdus; use member::MembershipState; -use tokio::sync::MutexGuard; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use lru_cache::LruCache; @@ -28,6 +27,7 @@ use std::{ mem, sync::{Arc, Mutex}, }; +use tokio::sync::MutexGuard; use tracing::{debug, error, warn}; use super::{abstraction::Tree, admin::AdminCommand, pusher}; @@ -1496,7 +1496,7 @@ impl Rooms { db.sending.send_pdu(&server, &pdu_id)?; } - for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) { + for appservice in db.appservice.all()? { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 2121439..5681194 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -75,9 +75,9 @@ where registration, )) = db .appservice - .iter_all() + .all() .unwrap() - .filter_map(|r| r.ok()) + .iter() .find(|(_id, registration)| { registration .get("as_token") diff --git a/src/server_server.rs b/src/server_server.rs index 232c5d4..09b6bfc 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -806,7 +806,7 @@ pub async fn send_transaction_message_route( } } - db.flush().await?; + db.flush()?; Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } @@ -1343,7 +1343,6 @@ pub fn handle_incoming_pdu<'a>( &state_at_incoming_event, &mutex_lock, ) - .await .map_err(|_| "Failed to add pdu to db.".to_owned())?, ); debug!("Appended incoming pdu."); @@ -1643,7 +1642,7 @@ pub(crate) async fn fetch_signing_keys( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state, _mutex_lock))] -async fn append_incoming_pdu( +fn append_incoming_pdu( db: &Database, pdu: &PduEvent, pdu_json: CanonicalJsonObject, @@ -1663,7 +1662,7 @@ async fn append_incoming_pdu( &db, )?; - for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) { + for appservice in db.appservice.all()? { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") @@ -2208,7 +2207,7 @@ pub async fn create_join_event_route( db.sending.send_pdu(&server, &pdu_id)?; } - db.flush().await?; + db.flush()?; Ok(create_join_event::v2::Response { room_state: RoomState { @@ -2327,7 +2326,7 @@ pub async fn create_invite_route( )?; } - db.flush().await?; + db.flush()?; Ok(create_invite::v2::Response { event: PduEvent::convert_to_outgoing_federation_event(signed_event), @@ -2464,7 +2463,7 @@ pub async fn get_keys_route( ) .await?; - db.flush().await?; + db.flush()?; Ok(get_keys::v1::Response { device_keys: result.device_keys, @@ -2489,7 +2488,7 @@ pub async fn claim_keys_route( let result = claim_keys_helper(&body.one_time_keys, &db).await?; - db.flush().await?; + db.flush()?; Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys, From 6b06fc9707df74db225b7bbdd2c4fabc8a5039aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 3 Aug 2021 10:23:20 +0200 Subject: [PATCH 0704/1727] fix: don't run push rules for users that don't exist --- src/database/rooms.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c3148c2..eb98152 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -779,7 +779,7 @@ impl Rooms { .room_members(&pdu.room_id) .filter_map(|r| r.ok()) .filter(|user_id| user_id.server_name() == db.globals.server_name()) - .filter(|user_id| !db.users.is_deactivated(user_id).unwrap_or(false)) + .filter(|user_id| !db.users.is_deactivated(user_id).unwrap_or(true)) { // Don't notify the user of their own events if user == pdu.sender { From 0eeba86b32490a6880ccb5565ba27dc55dd6b12d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 3 Aug 2021 11:10:58 +0200 Subject: [PATCH 0705/1727] fix: improve locks --- src/client_server/account.rs | 38 ++++++++++----------- src/client_server/membership.rs | 59 ++++++++++++++++----------------- src/client_server/message.rs | 10 +++--- src/client_server/profile.rs | 16 ++++----- src/client_server/redact.rs | 10 +++--- src/client_server/room.rs | 59 +++++++++++++++++++-------------- src/client_server/state.rs | 8 ++--- src/client_server/sync.rs | 26 +++++++-------- src/database/admin.rs | 14 ++++---- src/database/globals.rs | 8 +++-- src/database/rooms.rs | 29 ++++++++++------ src/server_server.rs | 10 +++--- 12 files changed, 153 insertions(+), 134 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index ca8b7b1..87e3731 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -243,15 +243,15 @@ pub async fn register_route( let room_id = RoomId::new(db.globals.server_name()); - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone()); content.federate = true; @@ -270,7 +270,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 2. Make conduit bot join @@ -293,7 +293,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 3. Power levels @@ -318,7 +318,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 4.1 Join Rules @@ -336,7 +336,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 4.2 History Visibility @@ -356,7 +356,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 4.3 Guest Access @@ -374,7 +374,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 6. Events implied by name and topic @@ -393,7 +393,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; db.rooms.build_and_append_pdu( @@ -410,7 +410,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // Room alias @@ -433,7 +433,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; @@ -458,7 +458,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; db.rooms.build_and_append_pdu( PduBuilder { @@ -479,7 +479,7 @@ pub async fn register_route( &user_id, &room_id, &db, - &mutex_lock, + &state_lock, )?; // Send welcome message @@ -498,7 +498,7 @@ pub async fn register_route( &conduit_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; } @@ -677,15 +677,15 @@ pub async fn deactivate_route( blurhash: None, }; - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; db.rooms.build_and_append_pdu( PduBuilder { @@ -698,7 +698,7 @@ pub async fn deactivate_route( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 895ad27..716a615 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -203,15 +203,15 @@ pub async fn kick_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; // TODO: reason - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(body.room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; db.rooms.build_and_append_pdu( PduBuilder { @@ -224,10 +224,10 @@ pub async fn kick_user_route( &sender_user, &body.room_id, &db, - &mutex_lock, + &state_lock, )?; - drop(mutex_lock); + drop(state_lock); db.flush()?; @@ -275,15 +275,15 @@ pub async fn ban_user_route( }, )?; - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(body.room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; db.rooms.build_and_append_pdu( PduBuilder { @@ -296,10 +296,10 @@ pub async fn ban_user_route( &sender_user, &body.room_id, &db, - &mutex_lock, + &state_lock, )?; - drop(mutex_lock); + drop(state_lock); db.flush()?; @@ -337,15 +337,15 @@ pub async fn unban_user_route( event.membership = ruma::events::room::member::MembershipState::Leave; - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(body.room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; db.rooms.build_and_append_pdu( PduBuilder { @@ -358,10 +358,10 @@ pub async fn unban_user_route( &sender_user, &body.room_id, &db, - &mutex_lock, + &state_lock, )?; - drop(mutex_lock); + drop(state_lock); db.flush()?; @@ -486,15 +486,15 @@ async fn join_room_by_id_helper( ) -> ConduitResult { let sender_user = sender_user.expect("user is authenticated"); - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; // Ask a remote server if we don't have this room if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() { @@ -706,11 +706,11 @@ async fn join_room_by_id_helper( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; } - drop(mutex_lock); + drop(state_lock); db.flush()?; @@ -790,15 +790,15 @@ pub async fn invite_helper<'a>( ) -> Result<()> { if user_id.server_name() != db.globals.server_name() { let (room_version_id, pdu_json, invite_room_state) = { - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; let prev_events = db .rooms @@ -942,7 +942,7 @@ pub async fn invite_helper<'a>( let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; - drop(mutex_lock); + drop(state_lock); (room_version_id, pdu_json, invite_room_state) }; @@ -1018,16 +1018,15 @@ pub async fn invite_helper<'a>( return Ok(()); } - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; db.rooms.build_and_append_pdu( PduBuilder { @@ -1048,10 +1047,10 @@ pub async fn invite_helper<'a>( &sender_user, room_id, &db, - &mutex_lock, + &state_lock, )?; - drop(mutex_lock); + drop(state_lock); Ok(()) } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index f77ca89..9cb6faa 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -28,15 +28,15 @@ pub async fn send_message_event_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(body.room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; // Check if this is a new transaction id if let Some(response) = @@ -75,7 +75,7 @@ pub async fn send_message_event_route( &sender_user, &body.room_id, &db, - &mutex_lock, + &state_lock, )?; db.transaction_ids.add_txnid( @@ -85,7 +85,7 @@ pub async fn send_message_event_route( event_id.as_bytes(), )?; - drop(mutex_lock); + drop(state_lock); db.flush()?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 648afea..de1baba 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -73,19 +73,19 @@ pub async fn set_displayname_route( }) .filter_map(|r| r.ok()) { - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; let _ = db.rooms - .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock); + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &state_lock); // Presence update db.rooms.edus.update_presence( @@ -207,19 +207,19 @@ pub async fn set_avatar_url_route( }) .filter_map(|r| r.ok()) { - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; let _ = db.rooms - .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock); + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &state_lock); // Presence update db.rooms.edus.update_presence( diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 63d3d4a..63bf103 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -20,15 +20,15 @@ pub async fn redact_event_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(body.room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; let event_id = db.rooms.build_and_append_pdu( PduBuilder { @@ -44,10 +44,10 @@ pub async fn redact_event_route( &sender_user, &body.room_id, &db, - &mutex_lock, + &state_lock, )?; - drop(mutex_lock); + drop(state_lock); db.flush()?; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 1b14a93..f73d544 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -33,15 +33,15 @@ pub async fn create_room_route( let room_id = RoomId::new(db.globals.server_name()); - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; let alias = body .room_alias_name @@ -79,7 +79,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 2. Let the room creator join @@ -102,7 +102,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 3. Power levels @@ -157,7 +157,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 4. Events set by preset @@ -184,7 +184,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 4.2 History Visibility @@ -202,7 +202,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 4.3 Guest Access @@ -228,7 +228,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; // 5. Events listed in initial_state @@ -244,7 +244,7 @@ pub async fn create_room_route( } db.rooms - .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock)?; + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &state_lock)?; } // 6. Events implied by name and topic @@ -261,7 +261,7 @@ pub async fn create_room_route( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; } @@ -280,12 +280,12 @@ pub async fn create_room_route( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; } // 7. Events implied by invite (and TODO: invite_3pid) - drop(mutex_lock); + drop(state_lock); for user_id in &body.invite { let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await; } @@ -364,13 +364,12 @@ pub async fn get_room_aliases_route( #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_room_id>/upgrade", data = "") + post("/_matrix/client/r0/rooms/<_>/upgrade", data = "") )] #[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( db: DatabaseGuard, body: Ruma>, - _room_id: String, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -387,15 +386,15 @@ pub async fn upgrade_room_route( // Create a replacement room let replacement_room = RoomId::new(db.globals.server_name()); - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(body.room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions @@ -414,9 +413,21 @@ pub async fn upgrade_room_route( sender_user, &body.room_id, &db, - &mutex_lock, + &state_lock, )?; + // Change lock to replacement room + drop(state_lock); + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(replacement_room.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + // Get the old room federations status let federate = serde_json::from_value::>( db.rooms @@ -455,7 +466,7 @@ pub async fn upgrade_room_route( sender_user, &replacement_room, &db, - &mutex_lock, + &state_lock, )?; // Join the new room @@ -478,7 +489,7 @@ pub async fn upgrade_room_route( sender_user, &replacement_room, &db, - &mutex_lock, + &state_lock, )?; // Recommended transferable state events list from the specs @@ -512,7 +523,7 @@ pub async fn upgrade_room_route( sender_user, &replacement_room, &db, - &mutex_lock, + &state_lock, )?; } @@ -556,10 +567,10 @@ pub async fn upgrade_room_route( sender_user, &body.room_id, &db, - &mutex_lock, + &state_lock, )?; - drop(mutex_lock); + drop(state_lock); db.flush()?; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 5afac03..aa020b5 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -259,15 +259,15 @@ pub async fn send_state_event_for_key_helper( } } - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; let event_id = db.rooms.build_and_append_pdu( PduBuilder { @@ -280,7 +280,7 @@ pub async fn send_state_event_for_key_helper( &sender_user, &room_id, &db, - &mutex_lock, + &state_lock, )?; Ok(event_id) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index b09a212..937a252 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -191,17 +191,17 @@ async fn sync_helper( let room_id = room_id?; // Get and drop the lock to wait for remaining operations to finish - let mutex = Arc::clone( + // This will make sure the we have all events until next_batch + let mutex_insert = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_insert .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - - let mutex_lock = mutex.lock().await; - drop(mutex_lock); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); let mut non_timeline_pdus = db .rooms @@ -665,16 +665,16 @@ async fn sync_helper( let (room_id, left_state_events) = result?; // Get and drop the lock to wait for remaining operations to finish - let mutex = Arc::clone( + let mutex_insert = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_insert .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; - drop(mutex_lock); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; @@ -705,16 +705,16 @@ async fn sync_helper( let (room_id, invite_state_events) = result?; // Get and drop the lock to wait for remaining operations to finish - let mutex = Arc::clone( + let mutex_insert = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_insert .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; - drop(mutex_lock); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; diff --git a/src/database/admin.rs b/src/database/admin.rs index e1b24d0..424e674 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -84,15 +84,15 @@ impl Admin { tokio::select! { Some(event) = receiver.next() => { let guard = db.read().await; - let mutex = Arc::clone( + let mutex_state = Arc::clone( guard.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(conduit_room.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; match event { AdminCommand::RegisterAppservice(yaml) => { @@ -106,17 +106,17 @@ impl Admin { count, appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") ); - send_message(message::MessageEventContent::text_plain(output), guard, &mutex_lock); + send_message(message::MessageEventContent::text_plain(output), guard, &state_lock); } else { - send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard, &mutex_lock); + send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock); } } AdminCommand::SendMessage(message) => { - send_message(message, guard, &mutex_lock); + send_message(message, guard, &state_lock); } } - drop(mutex_lock); + drop(state_lock); } } } diff --git a/src/database/globals.rs b/src/database/globals.rs index 2ca8de9..823ce34 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -12,7 +12,7 @@ use std::{ fs, future::Future, path::PathBuf, - sync::{Arc, RwLock}, + sync::{Arc, Mutex, RwLock}, time::{Duration, Instant}, }; use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; @@ -45,7 +45,8 @@ pub struct Globals { pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, pub sync_receivers: RwLock), SyncHandle>>, - pub roomid_mutex: RwLock>>>, + pub roomid_mutex_insert: RwLock>>>, + pub roomid_mutex_state: RwLock>>>, pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer pub rotate: RotationHandler, } @@ -200,7 +201,8 @@ impl Globals { bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - roomid_mutex: RwLock::new(HashMap::new()), + roomid_mutex_state: RwLock::new(HashMap::new()), + roomid_mutex_insert: RwLock::new(HashMap::new()), roomid_mutex_federation: RwLock::new(HashMap::new()), sync_receivers: RwLock::new(HashMap::new()), rotate: RotationHandler::new(), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index eb98152..e5eb2ee 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -736,6 +736,16 @@ impl Rooms { self.replace_pdu_leaves(&pdu.room_id, leaves)?; + let mutex_insert = Arc::clone( + db.globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(pdu.room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + let count1 = db.globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending // fails @@ -750,6 +760,8 @@ impl Rooms { // There's a brief moment of time here where the count is updated but the pdu does not // exist. This could theoretically lead to dropped pdus, but it's extremely rare + // + // Update: We fixed this using insert_lock self.pduid_pdu.insert( &pdu_id, @@ -761,6 +773,8 @@ impl Rooms { self.eventid_pduid .insert(pdu.event_id.as_bytes(), &pdu_id)?; + drop(insert_lock); + // See if the event matches any known pushers let power_levels: PowerLevelsEventContent = db .rooms @@ -1464,13 +1478,6 @@ impl Rooms { self.shorteventid_eventid .insert(&shorteventid.to_be_bytes(), pdu.event_id.as_bytes())?; - // Increment the last index and use that - // This is also the next_batch/since value - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = self.append_to_state(&pdu, &db.globals)?; @@ -1904,15 +1911,15 @@ impl Rooms { db, )?; } else { - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; let mut event = serde_json::from_value::>( self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? @@ -1941,7 +1948,7 @@ impl Rooms { user_id, room_id, db, - &mutex_lock, + &state_lock, )?; } diff --git a/src/server_server.rs b/src/server_server.rs index 09b6bfc..a20c9ab 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1159,15 +1159,15 @@ pub fn handle_incoming_pdu<'a>( // We start looking at current room state now, so lets lock the room - let mutex = Arc::clone( + let mutex_state = Arc::clone( db.globals - .roomid_mutex + .roomid_mutex_state .write() .unwrap() .entry(room_id.clone()) .or_default(), ); - let mutex_lock = mutex.lock().await; + let state_lock = mutex_state.lock().await; // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) @@ -1341,7 +1341,7 @@ pub fn handle_incoming_pdu<'a>( val, extremities, &state_at_incoming_event, - &mutex_lock, + &state_lock, ) .map_err(|_| "Failed to add pdu to db.".to_owned())?, ); @@ -1364,7 +1364,7 @@ pub fn handle_incoming_pdu<'a>( } // Event has passed all auth/stateres checks - drop(mutex_lock); + drop(state_lock); Ok(pdu_id) }) } From 49ade0cfbd74010ae11caa7c86850a63b25e9184 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 3 Aug 2021 11:24:21 +0200 Subject: [PATCH 0706/1727] improvement: allow batch inserts --- src/database.rs | 10 ++++++++++ src/database/abstraction.rs | 1 + src/database/abstraction/sqlite.rs | 17 ++++++++++++++++- 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index 5b47302..54abc53 100644 --- a/src/database.rs +++ b/src/database.rs @@ -484,6 +484,16 @@ impl Database { .watch_prefix(&userid_prefix), ); futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix)); + futures.push( + self.rooms + .userroomid_notificationcount + .watch_prefix(&userid_prefix), + ); + futures.push( + self.rooms + .userroomid_highlightcount + .watch_prefix(&userid_prefix), + ); // Events for rooms we are in for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index d0fa780..465bb10 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -25,6 +25,7 @@ pub trait Tree: Send + Sync { fn get(&self, key: &[u8]) -> Result>>; fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; + fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()>; fn remove(&self, key: &[u8]) -> Result<()>; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index d2ecb3a..ce30a56 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -201,6 +201,21 @@ impl Tree for SqliteTable { Ok(()) } + #[tracing::instrument(skip(self, iter))] + fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + let guard = self.engine.write_lock(); + + guard.execute("BEGIN", [])?; + for (key, value) in iter { + self.insert_with_guard(&guard, &key, &value)?; + } + guard.execute("COMMIT", [])?; + + drop(guard); + + Ok(()) + } + #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); @@ -228,7 +243,7 @@ impl Tree for SqliteTable { let statement = Box::leak(Box::new( guard - .prepare(&format!("SELECT key, value FROM {}", &self.name)) + .prepare(&format!("SELECT key, value FROM {} ORDER BY key ASC", &self.name)) .unwrap(), )); From 41ec7cf5d028bb382dec1cdb7d1ecbf79b047689 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 3 Aug 2021 16:14:07 +0200 Subject: [PATCH 0707/1727] improvement: batch inserts for stateids --- src/database/abstraction/sqlite.rs | 5 +- src/database/rooms.rs | 147 +++++++++++++++++------------ src/database/rooms/edus.rs | 4 +- 3 files changed, 91 insertions(+), 65 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index ce30a56..72fb5f7 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -243,7 +243,10 @@ impl Tree for SqliteTable { let statement = Box::leak(Box::new( guard - .prepare(&format!("SELECT key, value FROM {} ORDER BY key ASC", &self.name)) + .prepare(&format!( + "SELECT key, value FROM {} ORDER BY key ASC", + &self.name + )) .unwrap(), )); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e5eb2ee..48a135a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -313,41 +313,50 @@ impl Rooms { let new_state = if !already_existed { let mut new_state = HashSet::new(); - for ((event_type, state_key), eventid) in state { - new_state.insert(eventid.clone()); + let batch = state + .iter() + .filter_map(|((event_type, state_key), eventid)| { + new_state.insert(eventid.clone()); - let mut statekey = event_type.as_ref().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(&state_key.as_bytes()); + let mut statekey = event_type.as_ref().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(&state_key.as_bytes()); - let shortstatekey = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => shortstatekey.to_vec(), - None => { - let shortstatekey = db.globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - shortstatekey.to_be_bytes().to_vec() - } - }; + let shortstatekey = match self.statekey_shortstatekey.get(&statekey).ok()? { + Some(shortstatekey) => shortstatekey.to_vec(), + None => { + let shortstatekey = db.globals.next_count().ok()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes()) + .ok()?; + shortstatekey.to_be_bytes().to_vec() + } + }; - let shorteventid = match self.eventid_shorteventid.get(eventid.as_bytes())? { - Some(shorteventid) => shorteventid.to_vec(), - None => { - let shorteventid = db.globals.next_count()?; - self.eventid_shorteventid - .insert(eventid.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), eventid.as_bytes())?; - shorteventid.to_be_bytes().to_vec() - } - }; + let shorteventid = + match self.eventid_shorteventid.get(eventid.as_bytes()).ok()? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = db.globals.next_count().ok()?; + self.eventid_shorteventid + .insert(eventid.as_bytes(), &shorteventid.to_be_bytes()) + .ok()?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), eventid.as_bytes()) + .ok()?; + shorteventid.to_be_bytes().to_vec() + } + }; - let mut state_id = shortstatehash.to_be_bytes().to_vec(); - state_id.extend_from_slice(&shortstatekey); + let mut state_id = shortstatehash.to_be_bytes().to_vec(); + state_id.extend_from_slice(&shortstatekey); - self.stateid_shorteventid - .insert(&state_id, &*shorteventid)?; - } + Some((state_id, shorteventid)) + }) + .collect::>(); + + self.stateid_shorteventid + .insert_batch(&mut batch.into_iter())?; new_state } else { @@ -1120,39 +1129,51 @@ impl Rooms { } }; - for ((event_type, state_key), pdu) in state { - let mut statekey = event_type.as_ref().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(&state_key.as_bytes()); + let batch = state + .iter() + .filter_map(|((event_type, state_key), pdu)| { + let mut statekey = event_type.as_ref().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(&state_key.as_bytes()); - let shortstatekey = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => shortstatekey.to_vec(), - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - shortstatekey.to_be_bytes().to_vec() - } - }; + let shortstatekey = match self.statekey_shortstatekey.get(&statekey).ok()? { + Some(shortstatekey) => shortstatekey.to_vec(), + None => { + let shortstatekey = globals.next_count().ok()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes()) + .ok()?; + shortstatekey.to_be_bytes().to_vec() + } + }; - let shorteventid = match self.eventid_shorteventid.get(pdu.event_id.as_bytes())? { - Some(shorteventid) => shorteventid.to_vec(), - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), pdu.event_id.as_bytes())?; - shorteventid.to_be_bytes().to_vec() - } - }; + let shorteventid = match self + .eventid_shorteventid + .get(pdu.event_id.as_bytes()) + .ok()? + { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = globals.next_count().ok()?; + self.eventid_shorteventid + .insert(pdu.event_id.as_bytes(), &shorteventid.to_be_bytes()) + .ok()?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), pdu.event_id.as_bytes()) + .ok()?; + shorteventid.to_be_bytes().to_vec() + } + }; - let mut state_id = shortstatehash.clone(); - state_id.extend_from_slice(&shortstatekey); + let mut state_id = shortstatehash.clone(); + state_id.extend_from_slice(&shortstatekey); - self.stateid_shorteventid - .insert(&*state_id, &*shorteventid)?; - } + Some((state_id, shorteventid)) + }) + .collect::>(); + + self.stateid_shorteventid + .insert_batch(&mut batch.into_iter())?; self.shorteventid_shortstatehash .insert(&shorteventid, &*shortstatehash)?; @@ -1257,11 +1278,13 @@ impl Rooms { } }; - for (shortstatekey, shorteventid) in new_state { + let mut batch = new_state.into_iter().map(|(shortstatekey, shorteventid)| { let mut state_id = shortstatehash.to_be_bytes().to_vec(); state_id.extend_from_slice(&shortstatekey); - self.stateid_shorteventid.insert(&state_id, &shorteventid)?; - } + (state_id, shorteventid) + }); + + self.stateid_shorteventid.insert_batch(&mut batch)?; Ok(shortstatehash) } else { diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 664c171..ff28436 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -422,7 +422,7 @@ impl RoomEdus { } /// Sets all users to offline who have been quiet for too long. - pub fn presence_maintain( + fn presence_maintain( &self, rooms: &super::Rooms, globals: &super::super::globals::Globals, @@ -497,7 +497,7 @@ impl RoomEdus { rooms: &super::Rooms, globals: &super::super::globals::Globals, ) -> Result> { - self.presence_maintain(rooms, globals)?; + //self.presence_maintain(rooms, globals)?; let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); From fce22362d401227bd41e50a26cf53b697d6fb019 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 3 Aug 2021 19:18:41 +0200 Subject: [PATCH 0708/1727] improvement: better auth chain calculation --- src/server_server.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index a20c9ab..aa70ce0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1732,7 +1732,7 @@ fn get_auth_chain(starting_events: Vec, db: &Database) -> Result, db: &Database) -> Result Result> { - let mut auth_chain = HashSet::new(); - +fn get_auth_chain_recursive(event_id: &EventId, mut found: HashSet, db: &Database) -> Result> { if let Some(pdu) = db.rooms.get_pdu(&event_id)? { - auth_chain.extend(pdu.auth_events.iter().cloned()); for auth_event in &pdu.auth_events { - auth_chain.extend(get_auth_chain_recursive(&auth_event, db)?); + if !found.contains(auth_event) { + found.insert(auth_event.clone()); + found = get_auth_chain_recursive(&auth_event, found, db)?; + } } } else { warn!("Could not find pdu mentioned in auth events."); } - Ok(auth_chain) + Ok(found) } #[cfg_attr( From 9bb4c3cd0160d08058d3be780ca70d99e1f4c108 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 4 Aug 2021 12:54:26 +0200 Subject: [PATCH 0709/1727] improvement: batched inserts for tokenids --- src/database.rs | 4 ++-- src/database/rooms.rs | 20 +++++++++++--------- src/server_server.rs | 13 +++++++++++-- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/database.rs b/src/database.rs index 54abc53..4f3d332 100644 --- a/src/database.rs +++ b/src/database.rs @@ -271,8 +271,8 @@ impl Database { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, referencedevents: builder.open_tree("referencedevents")?, - pdu_cache: Mutex::new(LruCache::new(100_000)), - auth_chain_cache: Mutex::new(LruCache::new(100_000)), + pdu_cache: Mutex::new(LruCache::new(1_000_000)), + auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 48a135a..9f7a600 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -905,18 +905,20 @@ impl Rooms { } EventType::RoomMessage => { if let Some(body) = pdu.content.get("body").and_then(|b| b.as_str()) { - for word in body + let mut batch = body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|word| word.len() <= 50) .map(str::to_lowercase) - { - let mut key = pdu.room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - self.tokenids.insert(&key, &[])?; - } + .map(|word| { + let mut key = pdu.room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(word.as_bytes()); + key.push(0xff); + key.extend_from_slice(&pdu_id); + (key, Vec::new()) + }); + + self.tokenids.insert_batch(&mut batch)?; if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self diff --git a/src/server_server.rs b/src/server_server.rs index aa70ce0..3ea1c0a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,6 +1,6 @@ use crate::{ client_server::{self, claim_keys_helper, get_keys_helper}, - database::DatabaseGuard, + database::{abstraction::sqlite::MILLI, DatabaseGuard}, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, }; use get_profile_information::v1::ProfileField; @@ -1732,7 +1732,12 @@ fn get_auth_chain(starting_events: Vec, db: &Database) -> Result MILLI { + println!("auth chain for {} took {:?}", &event_id, elapsed) + } cache = db.rooms.auth_chain_cache(); @@ -1747,7 +1752,11 @@ fn get_auth_chain(starting_events: Vec, db: &Database) -> Result, db: &Database) -> Result> { +fn get_auth_chain_recursive( + event_id: &EventId, + mut found: HashSet, + db: &Database, +) -> Result> { if let Some(pdu) = db.rooms.get_pdu(&event_id)? { for auth_event in &pdu.auth_events { if !found.contains(auth_event) { From ab7835dedb16b7ac0208ad6329f6b1252188240b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 4 Aug 2021 15:19:50 +0200 Subject: [PATCH 0710/1727] fix: early return from state res --- src/server_server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 3ea1c0a..7b0bd58 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1267,10 +1267,10 @@ pub fn handle_incoming_pdu<'a>( // 14. Use state resolution to find new room state let new_room_state = if fork_states.is_empty() { return Err("State is empty.".to_owned()); - } else if fork_states.len() == 1 { + } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { // There was only one state, so it has to be the room's current state (because that is // always included) - debug!("Skipping stateres because there is no new state."); + warn!("Skipping stateres because there is no new state."); fork_states[0] .iter() .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) From b813c3464210f249ca4da42dfd8c8c58a5d4a794 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 4 Aug 2021 18:30:56 +0200 Subject: [PATCH 0711/1727] improvement: more auth chain caching --- src/database/rooms.rs | 4 ++-- src/server_server.rs | 17 +++++++++++------ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 9f7a600..549aa8c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -87,7 +87,7 @@ pub struct Rooms { pub(super) referencedevents: Arc, pub(super) pdu_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex>>, + pub(super) auth_chain_cache: Mutex, HashSet>>, } impl Rooms { @@ -2618,7 +2618,7 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn auth_chain_cache( &self, - ) -> std::sync::MutexGuard<'_, LruCache>> { + ) -> std::sync::MutexGuard<'_, LruCache, HashSet>> { self.auth_chain_cache.lock().unwrap() } } diff --git a/src/server_server.rs b/src/server_server.rs index 7b0bd58..0e595d4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1727,9 +1727,13 @@ fn get_auth_chain(starting_events: Vec, db: &Database) -> Result, db: &Database) -> Result Date: Wed, 4 Aug 2021 21:15:01 +0200 Subject: [PATCH 0712/1727] improvement: save member count + sled fixes --- src/client_server/directory.rs | 14 ++++++++++++-- src/database.rs | 22 +++++++++++++++++++--- src/database/abstraction/sled.rs | 15 ++++++++++++--- src/database/abstraction/sqlite.rs | 1 - src/database/rooms.rs | 21 +++++++++++++++++++++ src/server_server.rs | 11 +---------- 6 files changed, 65 insertions(+), 19 deletions(-) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 7cab1a7..589aacd 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,3 +1,5 @@ +use std::convert::TryInto; + use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ api::{ @@ -21,7 +23,7 @@ use ruma::{ serde::Raw, ServerName, UInt, }; -use tracing::info; +use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] use rocket::{get, post, put}; @@ -234,7 +236,15 @@ pub async fn get_public_rooms_filtered_helper( .name .map(|n| n.to_owned().into())) })?, - num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(), + num_joined_members: db + .rooms + .room_joined_count(&room_id)? + .unwrap_or_else(|| { + warn!("Room {} has no member count", room_id); + 0 + }) + .try_into() + .expect("user count should not be that big"), topic: db .rooms .room_state_get(&room_id, &EventType::RoomTopic, "")? diff --git a/src/database.rs b/src/database.rs index 4f3d332..2e7e60c 100644 --- a/src/database.rs +++ b/src/database.rs @@ -24,10 +24,11 @@ use rocket::{ request::{FromRequest, Request}, Shutdown, State, }; -use ruma::{DeviceId, ServerName, UserId}; +use ruma::{DeviceId, RoomId, ServerName, UserId}; use serde::{de::IgnoredAny, Deserialize}; use std::{ collections::{BTreeMap, HashMap}, + convert::TryFrom, fs::{self, remove_dir_all}, io::Write, ops::Deref, @@ -252,6 +253,7 @@ impl Database { serverroomids: builder.open_tree("serverroomids")?, userroomid_joined: builder.open_tree("userroomid_joined")?, roomuserid_joined: builder.open_tree("roomuserid_joined")?, + roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, @@ -271,8 +273,8 @@ impl Database { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, referencedevents: builder.open_tree("referencedevents")?, - pdu_cache: Mutex::new(LruCache::new(1_000_000)), - auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), + pdu_cache: Mutex::new(LruCache::new(0)), + auth_chain_cache: Mutex::new(LruCache::new(0)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, @@ -423,6 +425,20 @@ impl Database { println!("Migration: 4 -> 5 finished"); } + + if db.globals.database_version()? < 9 { // TODO update to 6 + // Set room member count + for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { + let room_id = + RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap(); + + db.rooms.update_joined_count(&room_id)?; + } + + db.globals.bump_database_version(6)?; + + println!("Migration: 5 -> 6 finished"); + } } let guard = db.read().await; diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs index d99ce26..35ba1b2 100644 --- a/src/database/abstraction/sled.rs +++ b/src/database/abstraction/sled.rs @@ -39,12 +39,21 @@ impl Tree for SledEngineTree { Ok(()) } + #[tracing::instrument(skip(self, iter))] + fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + for (key, value) in iter { + self.0.insert(key, value)?; + } + + Ok(()) + } + fn remove(&self, key: &[u8]) -> Result<()> { self.0.remove(key)?; Ok(()) } - fn iter<'a>(&'a self) -> Box, Vec)> + Send + 'a> { + fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { Box::new( self.0 .iter() @@ -62,7 +71,7 @@ impl Tree for SledEngineTree { &self, from: &[u8], backwards: bool, - ) -> Box, Vec)> + Send> { + ) -> Box, Vec)>> { let iter = if backwards { self.0.range(..=from) } else { @@ -95,7 +104,7 @@ impl Tree for SledEngineTree { fn scan_prefix<'a>( &'a self, prefix: Vec, - ) -> Box, Vec)> + Send + 'a> { + ) -> Box, Vec)> + 'a> { let iter = self .0 .scan_prefix(prefix) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 72fb5f7..0dbb261 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -55,7 +55,6 @@ impl Engine { conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; - conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?; Ok(conn) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 549aa8c..10a6215 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -55,6 +55,7 @@ pub struct Rooms { pub(super) userroomid_joined: Arc, pub(super) roomuserid_joined: Arc, + pub(super) roomid_joinedcount: Arc, pub(super) roomuseroncejoinedids: Arc, pub(super) userroomid_invitestate: Arc, // InviteState = Vec> pub(super) roomuserid_invitecount: Arc, // InviteCount = Count @@ -1906,9 +1907,18 @@ impl Rooms { _ => {} } + self.update_joined_count(room_id)?; + Ok(()) } + pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { + self.roomid_joinedcount.insert( + room_id.as_bytes(), + &(self.room_members(&room_id).count() as u64).to_be_bytes(), + ) + } + pub async fn leave_room( &self, user_id: &UserId, @@ -2370,6 +2380,17 @@ impl Rooms { }) } + pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { + Ok(self + .roomid_joinedcount + .get(room_id.as_bytes())? + .map(|b| { + utils::u64_from_bytes(&b) + .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + }) + .transpose()?) + } + /// Returns an iterator over all User IDs who ever joined a room. #[tracing::instrument(skip(self))] pub fn room_useroncejoined<'a>( diff --git a/src/server_server.rs b/src/server_server.rs index 0e595d4..4255f12 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,6 +1,6 @@ use crate::{ client_server::{self, claim_keys_helper, get_keys_helper}, - database::{abstraction::sqlite::MILLI, DatabaseGuard}, + database::{DatabaseGuard}, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, }; use get_profile_information::v1::ProfileField; @@ -1736,20 +1736,11 @@ fn get_auth_chain(starting_events: Vec, db: &Database) -> Result MILLI { - println!("auth chain for {} took {:?}", &event_id, elapsed) - } - cache = db.rooms.auth_chain_cache(); - cache.insert(vec![event_id.clone()], auth_chain.clone()); - full_auth_chain.extend(auth_chain); }; - } cache.insert(starting_events, full_auth_chain.clone()); From df727688efab57fce71ad2c222065e83ec942cc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 4 Aug 2021 21:17:40 +0200 Subject: [PATCH 0713/1727] remove rocksdb --- Cargo.lock | 144 ----------------------- Cargo.toml | 2 - src/database.rs | 6 +- src/database/abstraction.rs | 3 - src/database/abstraction/rocksdb.rs | 176 ---------------------------- src/error.rs | 6 - src/server_server.rs | 2 +- src/utils.rs | 9 -- 8 files changed, 3 insertions(+), 345 deletions(-) delete mode 100644 src/database/abstraction/rocksdb.rs diff --git a/Cargo.lock b/Cargo.lock index 4f58ef3..6ed4ee7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -144,43 +144,12 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.59.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375" -dependencies = [ - "bitflags", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", -] - [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -[[package]] -name = "bitvec" -version = "0.19.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - [[package]] name = "blake2b_simd" version = "0.5.11" @@ -234,15 +203,6 @@ dependencies = [ "jobserver", ] -[[package]] -name = "cexpr" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89" -dependencies = [ - "nom", -] - [[package]] name = "cfg-if" version = "0.1.10" @@ -268,17 +228,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "clang-sys" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "color_quant" version = "1.1.0" @@ -308,7 +257,6 @@ dependencies = [ "reqwest", "ring", "rocket", - "rocksdb", "ruma", "rusqlite", "rust-argon2", @@ -725,12 +673,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - [[package]] name = "futures" version = "0.3.16" @@ -1243,40 +1185,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" version = "0.2.98" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" -[[package]] -name = "libloading" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" -dependencies = [ - "cfg-if 1.0.0", - "winapi", -] - -[[package]] -name = "librocksdb-sys" -version = "6.20.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" -dependencies = [ - "bindgen", - "cc", - "glob", - "libc", -] - [[package]] name = "libsqlite3-sys" version = "0.22.2" @@ -1445,18 +1359,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "nom" -version = "6.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" -dependencies = [ - "bitvec", - "funty", - "memchr", - "version_check", -] - [[package]] name = "ntapi" version = "0.3.6" @@ -1649,12 +1551,6 @@ dependencies = [ "syn", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "0.8.3" @@ -1817,12 +1713,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "radium" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" - [[package]] name = "rand" version = "0.7.3" @@ -2122,16 +2012,6 @@ dependencies = [ "uncased", ] -[[package]] -name = "rocksdb" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3" -dependencies = [ - "libc", - "librocksdb-sys", -] - [[package]] name = "ruma" version = "0.2.0" @@ -2415,12 +2295,6 @@ dependencies = [ "crossbeam-utils 0.8.5", ] -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - [[package]] name = "rustc_version" version = "0.2.3" @@ -2647,12 +2521,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shlex" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a568c8f2cd051a4d283bd6eb0343ac214c1b0f1ac19f93e1175b2dee38c73d" - [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -2864,12 +2732,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - [[package]] name = "tempfile" version = "3.2.0" @@ -3540,12 +3402,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/Cargo.toml b/Cargo.toml index 19ce6b1..3d18bfb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,6 @@ ruma = { git = "https://github.com/timokoesters/ruma", rev = "a2d93500e1dbc87e70 tokio = "1.8.2" # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } -rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } # Used for the http request / response body type for Ruma endpoints used with reqwest @@ -84,7 +83,6 @@ heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c7 [features] default = ["conduit_bin", "backend_sqlite"] backend_sled = ["sled"] -backend_rocksdb = ["rocksdb"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] diff --git a/src/database.rs b/src/database.rs index 2e7e60c..4b7c7fe 100644 --- a/src/database.rs +++ b/src/database.rs @@ -125,9 +125,6 @@ fn default_log() -> String { #[cfg(feature = "sled")] pub type Engine = abstraction::sled::Engine; -#[cfg(feature = "rocksdb")] -pub type Engine = abstraction::rocksdb::Engine; - #[cfg(feature = "sqlite")] pub type Engine = abstraction::sqlite::Engine; @@ -426,7 +423,8 @@ impl Database { println!("Migration: 4 -> 5 finished"); } - if db.globals.database_version()? < 9 { // TODO update to 6 + if db.globals.database_version()? < 6 { + // TODO update to 6 // Set room member count for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { let room_id = diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 465bb10..f381ce9 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -3,9 +3,6 @@ use crate::Result; use std::{future::Future, pin::Pin, sync::Arc}; -#[cfg(feature = "rocksdb")] -pub mod rocksdb; - #[cfg(feature = "sled")] pub mod sled; diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs deleted file mode 100644 index 4699b2d..0000000 --- a/src/database/abstraction/rocksdb.rs +++ /dev/null @@ -1,176 +0,0 @@ -use super::super::Config; -use crate::{utils, Result}; - -use std::{future::Future, pin::Pin, sync::Arc}; - -use super::{DatabaseEngine, Tree}; - -use std::{collections::HashMap, sync::RwLock}; - -pub struct Engine(rocksdb::DBWithThreadMode); - -pub struct RocksDbEngineTree<'a> { - db: Arc, - name: &'a str, - watchers: RwLock, Vec>>>, -} - -impl DatabaseEngine for Engine { - fn open(config: &Config) -> Result> { - let mut db_opts = rocksdb::Options::default(); - db_opts.create_if_missing(true); - db_opts.set_max_open_files(16); - db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy); - db_opts.set_target_file_size_base(256 << 20); - db_opts.set_write_buffer_size(256 << 20); - - let mut block_based_options = rocksdb::BlockBasedOptions::default(); - block_based_options.set_block_size(512 << 10); - db_opts.set_block_based_table_factory(&block_based_options); - - let cfs = rocksdb::DBWithThreadMode::::list_cf( - &db_opts, - &config.database_path, - ) - .unwrap_or_default(); - - let mut options = rocksdb::Options::default(); - options.set_merge_operator_associative("increment", utils::increment_rocksdb); - - let db = rocksdb::DBWithThreadMode::::open_cf_descriptors( - &db_opts, - &config.database_path, - cfs.iter() - .map(|name| rocksdb::ColumnFamilyDescriptor::new(name, options.clone())), - )?; - - Ok(Arc::new(Engine(db))) - } - - fn open_tree(self: &Arc, name: &'static str) -> Result> { - let mut options = rocksdb::Options::default(); - options.set_merge_operator_associative("increment", utils::increment_rocksdb); - - // Create if it doesn't exist - let _ = self.0.create_cf(name, &options); - - Ok(Arc::new(RocksDbEngineTree { - name, - db: Arc::clone(self), - watchers: RwLock::new(HashMap::new()), - })) - } -} - -impl RocksDbEngineTree<'_> { - fn cf(&self) -> rocksdb::BoundColumnFamily<'_> { - self.db.0.cf_handle(self.name).unwrap() - } -} - -impl Tree for RocksDbEngineTree<'_> { - fn get(&self, key: &[u8]) -> Result>> { - Ok(self.db.0.get_cf(self.cf(), key)?) - } - - fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { - let watchers = self.watchers.read().unwrap(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write().unwrap(); - for prefix in triggered { - if let Some(txs) = watchers.remove(prefix) { - for tx in txs { - let _ = tx.send(()); - } - } - } - } - - Ok(self.db.0.put_cf(self.cf(), key, value)?) - } - - fn remove(&self, key: &[u8]) -> Result<()> { - Ok(self.db.0.delete_cf(self.cf(), key)?) - } - - fn iter<'a>(&'a self) -> Box, Vec)> + Send + Sync + 'a> { - Box::new( - self.db - .0 - .iterator_cf(self.cf(), rocksdb::IteratorMode::Start), - ) - } - - fn iter_from<'a>( - &'a self, - from: &[u8], - backwards: bool, - ) -> Box, Vec)> + 'a> { - Box::new(self.db.0.iterator_cf( - self.cf(), - rocksdb::IteratorMode::From( - from, - if backwards { - rocksdb::Direction::Reverse - } else { - rocksdb::Direction::Forward - }, - ), - )) - } - - fn increment(&self, key: &[u8]) -> Result> { - let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.db.0]), None).unwrap(); - dbg!(stats.mem_table_total); - dbg!(stats.mem_table_unflushed); - dbg!(stats.mem_table_readers_total); - dbg!(stats.cache_total); - // TODO: atomic? - let old = self.get(key)?; - let new = utils::increment(old.as_deref()).unwrap(); - self.insert(key, &new)?; - Ok(new) - } - - fn scan_prefix<'a>( - &'a self, - prefix: Vec, - ) -> Box, Vec)> + Send + 'a> { - Box::new( - self.db - .0 - .iterator_cf( - self.cf(), - rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), - ) - .take_while(move |(k, _)| k.starts_with(&prefix)), - ) - } - - fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.watchers - .write() - .unwrap() - .entry(prefix.to_vec()) - .or_default() - .push(tx); - - Box::pin(async move { - // Tx is never destroyed - rx.await.unwrap(); - }) - } -} diff --git a/src/error.rs b/src/error.rs index 24e52ec..1ecef3a 100644 --- a/src/error.rs +++ b/src/error.rs @@ -30,12 +30,6 @@ pub enum Error { #[from] source: sled::Error, }, - #[cfg(feature = "rocksdb")] - #[error("There was a problem with the connection to the rocksdb database: {source}")] - RocksDbError { - #[from] - source: rocksdb::Error, - }, #[cfg(feature = "sqlite")] #[error("There was a problem with the connection to the sqlite database: {source}")] SqliteError { diff --git a/src/server_server.rs b/src/server_server.rs index 4255f12..45d9022 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,6 +1,6 @@ use crate::{ client_server::{self, claim_keys_helper, get_keys_helper}, - database::{DatabaseGuard}, + database::DatabaseGuard, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, }; use get_profile_information::v1::ProfileField; diff --git a/src/utils.rs b/src/utils.rs index 60a4e0c..d21395e 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -17,15 +17,6 @@ pub fn millis_since_unix_epoch() -> u64 { .as_millis() as u64 } -#[cfg(feature = "rocksdb")] -pub fn increment_rocksdb( - _new_key: &[u8], - old: Option<&[u8]>, - _operands: &mut rocksdb::MergeOperands, -) -> Option> { - increment(old) -} - pub fn increment(old: Option<&[u8]>) -> Option> { let number = match old.map(|bytes| bytes.try_into()) { Some(Ok(bytes)) => { From 55a2ff025fefe89118dd2b3244151fe3492f1fd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 4 Aug 2021 22:55:03 +0200 Subject: [PATCH 0714/1727] improvement: longer timeout, more descriptive errors --- src/database/abstraction/sqlite.rs | 1 + src/server_server.rs | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 0dbb261..72fb5f7 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -55,6 +55,7 @@ impl Engine { conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; + conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?; Ok(conn) } diff --git a/src/server_server.rs b/src/server_server.rs index 45d9022..9ae42ad 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -228,11 +228,9 @@ where .headers_mut() .insert(HOST, HeaderValue::from_str(&host).unwrap()); - let mut reqwest_request = reqwest::Request::try_from(http_request) + let reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); - *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); - let url = reqwest_request.url().clone(); let response = globals.reqwest_client().execute(reqwest_request).await; @@ -273,7 +271,10 @@ where if status == 200 { let response = T::IncomingResponse::try_from_http_response(http_response); - response.map_err(|_| Error::BadServerResponse("Server returned bad 200 response.")) + response.map_err(|e| { + warn!("Invalid 200 response: {}", e); + Error::BadServerResponse("Server returned bad 200 response.") + }) } else { Err(Error::FederationError( destination.to_owned(), From 989a20e99ce1ddbc7fb179b69ef3358d15d040b6 Mon Sep 17 00:00:00 2001 From: Kurt Roeckx Date: Sat, 7 Aug 2021 15:55:03 +0200 Subject: [PATCH 0715/1727] Support creating rooms with a version --- src/client_server/room.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index f73d544..cc7dba6 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -65,7 +65,19 @@ pub async fn create_room_route( let mut content = ruma::events::room::create::CreateEventContent::new(sender_user.clone()); content.federate = body.creation_content.federate; content.predecessor = body.creation_content.predecessor.clone(); - content.room_version = RoomVersionId::Version6; + content.room_version = match body.room_version.clone() { + Some(room_version) => { + if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { + room_version + } else { + return Err(Error::BadRequest( + ErrorKind::UnsupportedRoomVersion, + "This server does not support that room version.", + )); + } + } + None => RoomVersionId::Version6, + }; // 1. The room create event db.rooms.build_and_append_pdu( From d2f406e0e8aff98b9ee7251d02add51e396c7522 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 6 Aug 2021 20:00:08 +0200 Subject: [PATCH 0716/1727] fix: handle bad events in db better --- src/database.rs | 4 +-- src/server_server.rs | 74 +++++++++++++++++++++++--------------------- 2 files changed, 41 insertions(+), 37 deletions(-) diff --git a/src/database.rs b/src/database.rs index 4b7c7fe..bdff386 100644 --- a/src/database.rs +++ b/src/database.rs @@ -270,8 +270,8 @@ impl Database { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, referencedevents: builder.open_tree("referencedevents")?, - pdu_cache: Mutex::new(LruCache::new(0)), - auth_chain_cache: Mutex::new(LruCache::new(0)), + pdu_cache: Mutex::new(LruCache::new(1_000_000)), + auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/server_server.rs b/src/server_server.rs index 9ae42ad..9a847c3 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -666,14 +666,12 @@ pub async fn send_transaction_message_route( drop(mutex_lock); let elapsed = start_time.elapsed(); - if elapsed > Duration::from_secs(1) { - warn!( - "Handling event {} took {}m{}s", - event_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } + warn!( + "Handling event {} took {}m{}s", + event_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); } for pdu in &resolved_map { @@ -1271,7 +1269,6 @@ pub fn handle_incoming_pdu<'a>( } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { // There was only one state, so it has to be the room's current state (because that is // always included) - warn!("Skipping stateres because there is no new state."); fork_states[0] .iter() .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) @@ -1411,12 +1408,12 @@ pub(crate) fn fetch_and_handle_events<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu checks both) - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => { + let pdu = match db.rooms.get_pdu(&id) { + Ok(Some(pdu)) => { trace!("Found {} in db", id); pdu } - None => { + Ok(None) => { // c. Ask origin server over federation debug!("Fetching {} over federation.", id); match db @@ -1431,7 +1428,11 @@ pub(crate) fn fetch_and_handle_events<'a>( Ok(res) => { debug!("Got {} over federation", id); let (event_id, mut value) = - crate::pdu::gen_event_id_canonical_json(&res.pdu)?; + match crate::pdu::gen_event_id_canonical_json(&res.pdu) { + Ok(t) => t, + Err(_) => continue, + }; + // This will also fetch the auth chain match handle_incoming_pdu( origin, @@ -1474,6 +1475,10 @@ pub(crate) fn fetch_and_handle_events<'a>( } } } + Err(e) => { + debug!("Error loading {}: {}", id, e); + continue; + } }; pdus.push(pdu); } @@ -1728,44 +1733,47 @@ fn get_auth_chain(starting_events: Vec, db: &Database) -> Result, + found: &mut HashSet, db: &Database, -) -> Result> { - if let Some(pdu) = db.rooms.get_pdu(&event_id)? { - for auth_event in &pdu.auth_events { - if !found.contains(auth_event) { - found.insert(auth_event.clone()); - found = get_auth_chain_recursive(&auth_event, found, db)?; +) -> Result<()> { + let r = db.rooms.get_pdu(&event_id); + match r { + Ok(Some(pdu)) => { + for auth_event in &pdu.auth_events { + if !found.contains(auth_event) { + found.insert(auth_event.clone()); + get_auth_chain_recursive(&auth_event, found, db)?; + } } } - } else { - warn!("Could not find pdu mentioned in auth events."); + Ok(None) => { + warn!("Could not find pdu mentioned in auth events."); + } + Err(e) => { + warn!("Could not load event in auth chain: {}", e); + } } - Ok(found) + Ok(()) } #[cfg_attr( @@ -1860,12 +1868,8 @@ pub fn get_event_authorization_route( Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids .into_iter() - .map(|id| { - Ok::<_, Error>(PduEvent::convert_to_outgoing_federation_event( - db.rooms.get_pdu_json(&id)?.unwrap(), - )) - }) - .filter_map(|r| r.ok()) + .filter_map(|id| Some(db.rooms.get_pdu_json(&id).ok()??)) + .map(|event| PduEvent::convert_to_outgoing_federation_event(event)) .collect(), } .into()) From 260db9fcc701f536625a60f1a23b4036c46f5f53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 9 Aug 2021 19:15:14 +0200 Subject: [PATCH 0717/1727] improvement: try to load missing prev events --- src/database/rooms.rs | 18 ++++++++++ src/server_server.rs | 80 ++++++++++++++++++++++++++++--------------- 2 files changed, 71 insertions(+), 27 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 10a6215..0f42235 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -280,6 +280,24 @@ impl Rooms { .is_some()) } + /// Checks if a room exists. + pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + // Look for PDUs in that room. + self.pduid_pdu + .iter_from(&prefix, false) + .filter(|(k, _)| k.starts_with(&prefix)) + .map(|(_, pdu)| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid first PDU in db.")) + .map(Arc::new) + }) + .next() + .transpose() + } + /// Force the creation of a new StateHash and insert it into the db. /// /// Whatever `state` is supplied to `force_state` __is__ the current room state snapshot. diff --git a/src/server_server.rs b/src/server_server.rs index 9a847c3..bf5e4f3 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -272,13 +272,14 @@ where if status == 200 { let response = T::IncomingResponse::try_from_http_response(http_response); response.map_err(|e| { - warn!("Invalid 200 response: {}", e); + warn!("Invalid 200 response from {}: {}", &destination, e); Error::BadServerResponse("Server returned bad 200 response.") }) } else { Err(Error::FederationError( destination.to_owned(), - RumaError::try_from_http_response(http_response).map_err(|_| { + RumaError::try_from_http_response(http_response).map_err(|e| { + warn!("Server returned bad error response: {}", e); Error::BadServerResponse("Server returned bad error response.") })?, )) @@ -811,7 +812,7 @@ pub async fn send_transaction_message_route( } /// An async function that can recursively call itself. -type AsyncRecursiveResult<'a, T, E> = Pin> + 'a + Send>>; +type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; /// When receiving an event one needs to: /// 0. Check the server is in the room @@ -836,7 +837,7 @@ type AsyncRecursiveResult<'a, T, E> = Pin( origin: &'a ServerName, @@ -846,7 +847,7 @@ pub fn handle_incoming_pdu<'a>( is_timeline_event: bool, db: &'a Database, pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveResult<'a, Option>, String> { +) -> AsyncRecursiveType<'a, StdResult>, String>> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json match db.rooms.exists(&room_id) { @@ -920,9 +921,15 @@ pub fn handle_incoming_pdu<'a>( // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // EDIT: Step 5 is not applied anymore because it failed too often debug!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, &room_id, pub_key_map) - .await - .map_err(|e| e.to_string())?; + fetch_and_handle_events( + db, + origin, + &incoming_pdu.auth_events, + &room_id, + pub_key_map, + false, + ) + .await; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events debug!( @@ -1004,10 +1011,28 @@ pub fn handle_incoming_pdu<'a>( debug!("Added pdu as outlier."); // 8. if not timeline event: stop - if !is_timeline_event { + if !is_timeline_event + || incoming_pdu.origin_server_ts + < db.rooms + .first_pdu_in_room(&room_id) + .map_err(|_| "Error loading first room event.".to_owned())? + .expect("Room exists") + .origin_server_ts + { return Ok(None); } + // Load missing prev events first + fetch_and_handle_events( + db, + origin, + &incoming_pdu.prev_events, + &room_id, + pub_key_map, + true, + ) + .await; + // TODO: 9. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities @@ -1034,9 +1059,9 @@ pub fn handle_incoming_pdu<'a>( &state.into_iter().collect::>(), &room_id, pub_key_map, + false, ) .await - .map_err(|_| "Failed to fetch state events locally".to_owned())? .into_iter() .map(|pdu| { ( @@ -1081,18 +1106,15 @@ pub fn handle_incoming_pdu<'a>( { Ok(res) => { debug!("Fetching state events at event."); - let state_vec = match fetch_and_handle_events( + let state_vec = fetch_and_handle_events( &db, origin, &res.pdu_ids, &room_id, pub_key_map, + false, ) - .await - { - Ok(state) => state, - Err(_) => return Err("Failed to fetch state events.".to_owned()), - }; + .await; let mut state = HashMap::new(); for pdu in state_vec { @@ -1118,18 +1140,15 @@ pub fn handle_incoming_pdu<'a>( } debug!("Fetching auth chain events at event."); - match fetch_and_handle_events( + fetch_and_handle_events( &db, origin, &res.auth_chain_ids, &room_id, pub_key_map, + false, ) - .await - { - Ok(state) => state, - Err(_) => return Err("Failed to fetch auth chain.".to_owned()), - }; + .await; state_at_incoming_event = Some(state); } @@ -1381,7 +1400,8 @@ pub(crate) fn fetch_and_handle_events<'a>( events: &'a [EventId], room_id: &'a RoomId, pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveResult<'a, Vec>, Error> { + are_timeline_events: bool, +) -> AsyncRecursiveType<'a, Vec>> { Box::pin(async move { let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { Entry::Vacant(e) => { @@ -1408,7 +1428,12 @@ pub(crate) fn fetch_and_handle_events<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu checks both) - let pdu = match db.rooms.get_pdu(&id) { + let local_pdu = if are_timeline_events { + db.rooms.get_non_outlier_pdu(&id).map(|o| o.map(Arc::new)) + } else { + db.rooms.get_pdu(&id) + }; + let pdu = match local_pdu { Ok(Some(pdu)) => { trace!("Found {} in db", id); pdu @@ -1439,7 +1464,7 @@ pub(crate) fn fetch_and_handle_events<'a>( &event_id, &room_id, value.clone(), - false, + are_timeline_events, db, pub_key_map, ) @@ -1482,7 +1507,7 @@ pub(crate) fn fetch_and_handle_events<'a>( }; pdus.push(pdu); } - Ok(pdus) + pdus }) } @@ -2193,7 +2218,8 @@ pub async fn create_join_event_route( &pub_key_map, ) .await - .map_err(|_| { + .map_err(|e| { + warn!("Error while handling incoming send join PDU: {}", e); Error::BadRequest( ErrorKind::InvalidParam, "Error while handling incoming PDU.", From a7b6af7caf4c4e01f3c0ddbccafb8fe27f2ab4ce Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Mon, 9 Aug 2021 21:21:18 +0200 Subject: [PATCH 0718/1727] Bump version of alpine docker image to 3.14 --- Dockerfile | 12 ++---------- docker-compose.yml | 20 ++++++++++---------- docker/ci-binaries-packaging.Dockerfile | 5 +---- docker/docker-compose.traefik.yml | 10 +++++----- 4 files changed, 18 insertions(+), 29 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0eae25a..68dce3f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ ########################## BUILD IMAGE ########################## # Alpine build image to build Conduit's statically compiled binary -FROM alpine:3.12 as builder +FROM alpine:3.14 as builder # Specifies if the local project is build or if Conduit gets build # from the official git repository. Defaults to the git repo. @@ -13,11 +13,6 @@ ARG LOCAL=false # Specifies which revision/commit is build. Defaults to HEAD ARG GIT_REF=origin/master -# Add 'edge'-repository to get Rust 1.45 -RUN sed -i \ - -e 's|v3\.12|edge|' \ - /etc/apk/repositories - # Install packages needed for building all crates RUN apk add --no-cache \ cargo \ @@ -36,7 +31,7 @@ RUN if [[ $LOCAL == "true" ]]; then \ ########################## RUNTIME IMAGE ########################## # Create new stage with a minimal image for the actual # runtime image/container -FROM alpine:3.12 +FROM alpine:3.14 ARG CREATED ARG VERSION @@ -86,9 +81,6 @@ RUN apk add --no-cache \ curl \ libgcc -# Create a volume for the database, to persist its contents -VOLUME ["/srv/conduit/.local/share/conduit"] - # Test if Conduit is still alive, uses the same endpoint as Element HEALTHCHECK --start-period=5s \ CMD curl --fail -s "http://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ diff --git a/docker-compose.yml b/docker-compose.yml index d643709..3f8f832 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,19 +3,19 @@ version: '3' services: homeserver: - ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, + ### If you already built the Conduit image with 'docker build' or want to use a registry image, ### then you are ready to go. - #image: matrixconduit/matrix-conduit:latest + image: matrixconduit/matrix-conduit:latest ### If you want to build a fresh image from the sources, then comment the image line and uncomment the ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d - build: - context: . - args: - CREATED: '2021-03-16T08:18:27Z' - VERSION: '0.1.0' - LOCAL: 'false' - GIT_REF: origin/master + # build: + # context: . + # args: + # CREATED: '2021-03-16T08:18:27Z' + # VERSION: '0.1.0' + # LOCAL: 'false' + # GIT_REF: origin/master restart: unless-stopped ports: - 8448:6167 @@ -28,7 +28,7 @@ services: CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' ### Uncomment and change values as desired - # CONDUIT_ADDRESS: 127.0.0.1 + # CONDUIT_ADDRESS: 0.0.0.0 # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 43ebc98..797ef0c 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -7,7 +7,7 @@ # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM alpine:3.12 +FROM alpine:3.14 ARG CREATED ARG VERSION @@ -52,9 +52,6 @@ RUN apk add --no-cache \ curl \ libgcc -# Create a volume for the database, to persist its contents -VOLUME ["/srv/conduit/.local/share/conduit"] - # Test if Conduit is still alive, uses the same endpoint as Element HEALTHCHECK --start-period=5s \ CMD curl --fail -s "http://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index 3b36d10..58fa3ed 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -12,8 +12,8 @@ services: # build: # context: . # args: - # CREATED: - # VERSION: + # CREATED: '2021-03-16T08:18:27Z' + # VERSION: '0.1.0' # LOCAL: 'false' # GIT_REF: origin/master restart: unless-stopped @@ -28,9 +28,9 @@ services: CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' ### Uncomment and change values as desired - # CONDUIT_ADDRESS: 127.0.0.1 + # CONDUIT_ADDRESS: 0.0.0.0 # CONDUIT_PORT: 6167 - # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if want to configure purely by env vars, set this to an empty string '' + # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' @@ -59,7 +59,7 @@ volumes: db: networks: - # This is the network Traefik listens to, if you network has a different + # This is the network Traefik listens to, if your network has a different # name, don't forget to change it here and in the docker-compose.override.yml proxy: external: true From 879aeafa9e1e81e3816962c50d413d62e92d5840 Mon Sep 17 00:00:00 2001 From: Ahmed Charles Date: Mon, 9 Aug 2021 12:29:35 -0700 Subject: [PATCH 0719/1727] Add support for a minimal `r0/account/3pid`. --- src/client_server/account.rs | 12 +++++++----- src/main.rs | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 87e3731..5d399b8 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -14,6 +14,7 @@ use ruma::{ change_password, deactivate, get_username_availability, register, whoami, ThirdPartyIdRemovalStatus, }, + contact::get_contacts, uiaa::{AuthFlow, UiaaInfo}, }, }, @@ -715,16 +716,17 @@ pub async fn deactivate_route( .into()) } -/*/ +/// # `GET _matrix/client/r0/account/3pid` +/// +/// Get a list of third party identifiers associated with this account. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/account/3pid", data = "") )] pub async fn third_party_route( - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(account::add_3pid::Response::default().into()) + Ok(get_contacts::Response::new(Vec::new()).into()) } -*/ diff --git a/src/main.rs b/src/main.rs index 9f6cced..5a6f8c7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -50,6 +50,7 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket< client_server::logout_all_route, client_server::change_password_route, client_server::deactivate_route, + client_server::third_party_route, client_server::get_capabilities_route, client_server::get_pushrules_all_route, client_server::set_pushrule_route, From 8335f44bfe80f9be71928eb5000f84d16344dfb1 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Tue, 10 Aug 2021 05:43:44 +0000 Subject: [PATCH 0720/1727] Set nicer displayname --- src/client_server/account.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 87e3731..d11a2e0 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -192,6 +192,11 @@ pub async fn register_route( // Create user db.users.create(&user_id, password)?; + let displayname = format!("{} ⚡️", user_id.localpart()); + + db.users + .set_displayname(&user_id, Some(displayname.clone()))?; + // Initial data db.account_data.update( None, @@ -465,7 +470,7 @@ pub async fn register_route( event_type: EventType::RoomMember, content: serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Join, - displayname: None, + displayname: Some(displayname), avatar_url: None, is_direct: None, third_party_invite: None, From 4cf3c432afc24aa07aaa8deab4dd084f10508d41 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 12 Aug 2021 14:02:46 +0200 Subject: [PATCH 0721/1727] Try to set canonical room alias on room creation. The spec does not require servers to apply a room canonical alias event upon room creation (yet). Still, synapse does that, since users can set their desired alias in Elements room creation dialog. With this commit, conduit also sets that alias if it is a valid one. This closes https://gitlab.com/famedly/conduit/-/issues/123 --- src/client_server/room.rs | 72 ++++++++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 24 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index cc7dba6..89241f5 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -43,24 +43,24 @@ pub async fn create_room_route( ); let state_lock = mutex_state.lock().await; - let alias = body - .room_alias_name - .as_ref() - .map_or(Ok(None), |localpart| { - // TODO: Check for invalid characters and maximum length - let alias = - RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let alias: Option = + body.room_alias_name + .as_ref() + .map_or(Ok(None), |localpart| { + // TODO: Check for invalid characters and maximum length + let alias = + RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - if db.rooms.id_from_alias(&alias)?.is_some() { - Err(Error::BadRequest( - ErrorKind::RoomInUse, - "Room alias already exists.", - )) - } else { - Ok(Some(alias)) - } - })?; + if db.rooms.id_from_alias(&alias)?.is_some() { + Err(Error::BadRequest( + ErrorKind::RoomInUse, + "Room alias already exists.", + )) + } else { + Ok(Some(alias)) + } + })?; let mut content = ruma::events::room::create::CreateEventContent::new(sender_user.clone()); content.federate = body.creation_content.federate; @@ -172,9 +172,33 @@ pub async fn create_room_route( &state_lock, )?; - // 4. Events set by preset + // 4. Canonical room alias - // 4.1 Join Rules + if let Some(room_alias_id) = &alias { + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCanonicalAlias, + content: serde_json::to_value( + ruma::events::room::canonical_alias::CanonicalAliasEventContent { + alias: Some(room_alias_id.clone()), + alt_aliases: vec![], + }, + ) + .expect("We checked that alias earlier, it must be fine"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &sender_user, + &room_id, + &db, + &state_lock, + ); + } + + // 5. Events set by preset + + // 5.1 Join Rules db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomJoinRules, @@ -199,7 +223,7 @@ pub async fn create_room_route( &state_lock, )?; - // 4.2 History Visibility + // 5.2 History Visibility db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomHistoryVisibility, @@ -217,7 +241,7 @@ pub async fn create_room_route( &state_lock, )?; - // 4.3 Guest Access + // 5.3 Guest Access db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomGuestAccess, @@ -243,7 +267,7 @@ pub async fn create_room_route( &state_lock, )?; - // 5. Events listed in initial_state + // 6. Events listed in initial_state for event in &body.initial_state { let pdu_builder = PduBuilder::from(event.deserialize().map_err(|e| { warn!("Invalid initial state event: {:?}", e); @@ -259,7 +283,7 @@ pub async fn create_room_route( .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &state_lock)?; } - // 6. Events implied by name and topic + // 7. Events implied by name and topic if let Some(name) = &body.name { db.rooms.build_and_append_pdu( PduBuilder { @@ -296,7 +320,7 @@ pub async fn create_room_route( )?; } - // 7. Events implied by invite (and TODO: invite_3pid) + // 8. Events implied by invite (and TODO: invite_3pid) drop(state_lock); for user_id in &body.invite { let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await; From 096e0971f1da629adeec53bbac0fd775cc46fa79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 11 Aug 2021 10:24:16 +0200 Subject: [PATCH 0722/1727] improvement: smaller cache, better prev event fetching --- src/database.rs | 4 ++-- src/server_server.rs | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/database.rs b/src/database.rs index bdff386..1bf9434 100644 --- a/src/database.rs +++ b/src/database.rs @@ -270,8 +270,8 @@ impl Database { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, referencedevents: builder.open_tree("referencedevents")?, - pdu_cache: Mutex::new(LruCache::new(1_000_000)), - auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), + pdu_cache: Mutex::new(LruCache::new(100_000)), + auth_chain_cache: Mutex::new(LruCache::new(100_000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/server_server.rs b/src/server_server.rs index bf5e4f3..68adcd0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1022,7 +1022,7 @@ pub fn handle_incoming_pdu<'a>( return Ok(None); } - // Load missing prev events first + // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events fetch_and_handle_events( db, origin, @@ -1033,8 +1033,6 @@ pub fn handle_incoming_pdu<'a>( ) .await; - // TODO: 9. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities // doing all the checks in this list starting at 1. These are not timeline events. From c2c6a8673e2a8a3fca079e20692c2da76e9622d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 11 Aug 2021 19:15:38 +0200 Subject: [PATCH 0723/1727] improvement: use u64s in auth chain cache --- src/database/rooms.rs | 230 ++++++++++++++++++------------------------ src/server_server.rs | 43 +++++--- 2 files changed, 131 insertions(+), 142 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0f42235..c53fa9e 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -88,7 +88,7 @@ pub struct Rooms { pub(super) referencedevents: Arc, pub(super) pdu_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, HashSet>>, + pub(super) auth_chain_cache: Mutex>>, } impl Rooms { @@ -315,19 +315,7 @@ impl Rooms { ); let (shortstatehash, already_existed) = - match self.statehash_shortstatehash.get(&state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = db.globals.next_count()?; - self.statehash_shortstatehash - .insert(&state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }; + self.get_or_create_shortstatehash(&state_hash, &db.globals)?; let new_state = if !already_existed { let mut new_state = HashSet::new(); @@ -352,25 +340,14 @@ impl Rooms { } }; - let shorteventid = - match self.eventid_shorteventid.get(eventid.as_bytes()).ok()? { - Some(shorteventid) => shorteventid.to_vec(), - None => { - let shorteventid = db.globals.next_count().ok()?; - self.eventid_shorteventid - .insert(eventid.as_bytes(), &shorteventid.to_be_bytes()) - .ok()?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), eventid.as_bytes()) - .ok()?; - shorteventid.to_be_bytes().to_vec() - } - }; + let shorteventid = self + .get_or_create_shorteventid(&eventid, &db.globals) + .ok()?; let mut state_id = shortstatehash.to_be_bytes().to_vec(); state_id.extend_from_slice(&shortstatekey); - Some((state_id, shorteventid)) + Some((state_id, shorteventid.to_be_bytes().to_vec())) }) .collect::>(); @@ -428,6 +405,61 @@ impl Rooms { Ok(()) } + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash( + &self, + state_hash: &StateHashId, + globals: &super::globals::Globals, + ) -> Result<(u64, bool)> { + Ok(match self.statehash_shortstatehash.get(&state_hash)? { + Some(shortstatehash) => ( + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, + true, + ), + None => { + let shortstatehash = globals.next_count()?; + self.statehash_shortstatehash + .insert(&state_hash, &shortstatehash.to_be_bytes())?; + (shortstatehash, false) + } + }) + } + + /// Returns (shortstatehash, already_existed) + pub fn get_or_create_shorteventid( + &self, + event_id: &EventId, + globals: &super::globals::Globals, + ) -> Result { + Ok(match self.eventid_shorteventid.get(event_id.as_bytes())? { + Some(shorteventid) => utils::u64_from_bytes(&shorteventid) + .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, + None => { + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; + shorteventid + } + }) + } + + pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result { + let bytes = self + .shorteventid_eventid + .get(&shorteventid.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; + + EventId::try_from( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) + } + /// Returns the full room state. #[tracing::instrument(skip(self))] pub fn room_state_full( @@ -1116,17 +1148,7 @@ impl Rooms { state: &StateMap>, globals: &super::globals::Globals, ) -> Result<()> { - let shorteventid = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => shorteventid.to_vec(), - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid.to_be_bytes().to_vec() - } - }; + let shorteventid = self.get_or_create_shorteventid(&event_id, globals)?; let state_hash = self.calculate_hash( &state @@ -1135,69 +1157,45 @@ impl Rooms { .collect::>(), ); - let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { - Some(shortstatehash) => { - // State already existed in db - self.shorteventid_shortstatehash - .insert(&shorteventid, &*shortstatehash)?; - return Ok(()); - } - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(&state_hash, &shortstatehash.to_be_bytes())?; - shortstatehash.to_be_bytes().to_vec() - } - }; + let (shortstatehash, already_existed) = + self.get_or_create_shortstatehash(&state_hash, globals)?; - let batch = state - .iter() - .filter_map(|((event_type, state_key), pdu)| { - let mut statekey = event_type.as_ref().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(&state_key.as_bytes()); + if !already_existed { + let batch = state + .iter() + .filter_map(|((event_type, state_key), pdu)| { + let mut statekey = event_type.as_ref().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(&state_key.as_bytes()); - let shortstatekey = match self.statekey_shortstatekey.get(&statekey).ok()? { - Some(shortstatekey) => shortstatekey.to_vec(), - None => { - let shortstatekey = globals.next_count().ok()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes()) - .ok()?; - shortstatekey.to_be_bytes().to_vec() - } - }; + let shortstatekey = match self.statekey_shortstatekey.get(&statekey).ok()? { + Some(shortstatekey) => shortstatekey.to_vec(), + None => { + let shortstatekey = globals.next_count().ok()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes()) + .ok()?; + shortstatekey.to_be_bytes().to_vec() + } + }; - let shorteventid = match self - .eventid_shorteventid - .get(pdu.event_id.as_bytes()) - .ok()? - { - Some(shorteventid) => shorteventid.to_vec(), - None => { - let shorteventid = globals.next_count().ok()?; - self.eventid_shorteventid - .insert(pdu.event_id.as_bytes(), &shorteventid.to_be_bytes()) - .ok()?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), pdu.event_id.as_bytes()) - .ok()?; - shorteventid.to_be_bytes().to_vec() - } - }; + let shorteventid = self + .get_or_create_shorteventid(&pdu.event_id, globals) + .ok()?; - let mut state_id = shortstatehash.clone(); - state_id.extend_from_slice(&shortstatekey); + let mut state_id = shortstatehash.to_be_bytes().to_vec(); + state_id.extend_from_slice(&shortstatekey); - Some((state_id, shorteventid)) - }) - .collect::>(); + Some((state_id, shorteventid.to_be_bytes().to_vec())) + }) + .collect::>(); - self.stateid_shorteventid - .insert_batch(&mut batch.into_iter())?; + self.stateid_shorteventid + .insert_batch(&mut batch.into_iter())?; + } self.shorteventid_shortstatehash - .insert(&shorteventid, &*shortstatehash)?; + .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } @@ -1212,26 +1210,16 @@ impl Rooms { new_pdu: &PduEvent, globals: &super::globals::Globals, ) -> Result { + let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; + let old_state = if let Some(old_shortstatehash) = self.roomid_shortstatehash.get(new_pdu.room_id.as_bytes())? { // Store state for event. The state does not include the event itself. // Instead it's the state before the pdu, so the room's old state. - - let shorteventid = match self.eventid_shorteventid.get(new_pdu.event_id.as_bytes())? { - Some(shorteventid) => shorteventid.to_vec(), - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(new_pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), new_pdu.event_id.as_bytes())?; - shorteventid.to_be_bytes().to_vec() - } - }; - self.shorteventid_shortstatehash - .insert(&shorteventid, &old_shortstatehash)?; + .insert(&shorteventid.to_be_bytes(), &old_shortstatehash)?; + if new_pdu.state_key.is_none() { return utils::u64_from_bytes(&old_shortstatehash).map_err(|_| { Error::bad_database("Invalid shortstatehash in roomid_shortstatehash.") @@ -1264,19 +1252,7 @@ impl Rooms { } }; - let shorteventid = match self.eventid_shorteventid.get(new_pdu.event_id.as_bytes())? { - Some(shorteventid) => shorteventid.to_vec(), - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(new_pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), new_pdu.event_id.as_bytes())?; - shorteventid.to_be_bytes().to_vec() - } - }; - - new_state.insert(shortstatekey, shorteventid); + new_state.insert(shortstatekey, shorteventid.to_be_bytes().to_vec()); let new_state_hash = self.calculate_hash( &new_state @@ -1516,11 +1492,7 @@ impl Rooms { ); // Generate short event id - let shorteventid = db.globals.next_count()?; - self.eventid_shorteventid - .insert(pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), pdu.event_id.as_bytes())?; + let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. @@ -2655,9 +2627,7 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn auth_chain_cache( - &self, - ) -> std::sync::MutexGuard<'_, LruCache, HashSet>> { + pub fn auth_chain_cache(&self) -> std::sync::MutexGuard<'_, LruCache>> { self.auth_chain_cache.lock().unwrap() } } diff --git a/src/server_server.rs b/src/server_server.rs index 68adcd0..23c80ee 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1044,13 +1044,16 @@ pub fn handle_incoming_pdu<'a>( if incoming_pdu.prev_events.len() == 1 { let prev_event = &incoming_pdu.prev_events[0]; - let state = db + let prev_event_sstatehash = db .rooms .pdu_shortstatehash(prev_event) - .map_err(|_| "Failed talking to db".to_owned())? - .map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok()) - .flatten(); - if let Some(state) = state { + .map_err(|_| "Failed talking to db".to_owned())?; + + let state = + prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); + + if let Some(Ok(state)) = state { + warn!("Using cached state"); let mut state = fetch_and_handle_events( db, origin, @@ -1088,6 +1091,7 @@ pub fn handle_incoming_pdu<'a>( } if state_at_incoming_event.is_none() { + warn!("Calling /state_ids"); // Call /state_ids to find out what the state at this pdu is. We trust the server's // response to some extend, but we still do a lot of checks on the events match db @@ -1755,35 +1759,50 @@ fn append_incoming_pdu( fn get_auth_chain(starting_events: Vec, db: &Database) -> Result> { let mut full_auth_chain = HashSet::new(); + let starting_events = starting_events + .iter() + .map(|id| { + (db.rooms + .get_or_create_shorteventid(id, &db.globals) + .map(|s| (s, id))) + }) + .collect::>>()?; + let mut cache = db.rooms.auth_chain_cache(); - for event_id in &starting_events { - if let Some(cached) = cache.get_mut(&[event_id.clone()][..]) { + for (sevent_id, event_id) in starting_events { + if let Some(cached) = cache.get_mut(&sevent_id) { full_auth_chain.extend(cached.iter().cloned()); } else { drop(cache); let mut auth_chain = HashSet::new(); get_auth_chain_recursive(&event_id, &mut auth_chain, db)?; cache = db.rooms.auth_chain_cache(); - cache.insert(vec![event_id.clone()], auth_chain.clone()); + cache.insert(sevent_id, auth_chain.clone()); full_auth_chain.extend(auth_chain); }; } - Ok(full_auth_chain) + full_auth_chain + .into_iter() + .map(|sid| db.rooms.get_eventid_from_short(sid)) + .collect() } fn get_auth_chain_recursive( event_id: &EventId, - found: &mut HashSet, + found: &mut HashSet, db: &Database, ) -> Result<()> { let r = db.rooms.get_pdu(&event_id); match r { Ok(Some(pdu)) => { for auth_event in &pdu.auth_events { - if !found.contains(auth_event) { - found.insert(auth_event.clone()); + let sauthevent = db + .rooms + .get_or_create_shorteventid(auth_event, &db.globals)?; + if !found.contains(&sauthevent) { + found.insert(sauthevent); get_auth_chain_recursive(&auth_event, found, db)?; } } From 5173d0deb59a4bdaf03026f6894610e396ddbec4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 11 Aug 2021 21:14:22 +0200 Subject: [PATCH 0724/1727] improvement: cache for short event ids --- src/database.rs | 1 + src/database/rooms.rs | 16 ++++++++++++++-- src/server_server.rs | 20 +++++++++----------- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/database.rs b/src/database.rs index 1bf9434..7996057 100644 --- a/src/database.rs +++ b/src/database.rs @@ -272,6 +272,7 @@ impl Database { referencedevents: builder.open_tree("referencedevents")?, pdu_cache: Mutex::new(LruCache::new(100_000)), auth_chain_cache: Mutex::new(LruCache::new(100_000)), + shorteventid_cache: Mutex::new(LruCache::new(100_000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c53fa9e..246aa0b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -89,6 +89,7 @@ pub struct Rooms { pub(super) pdu_cache: Mutex>>, pub(super) auth_chain_cache: Mutex>>, + pub(super) shorteventid_cache: Mutex>, } impl Rooms { @@ -447,17 +448,28 @@ impl Rooms { } pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result { + if let Some(id) = self.shorteventid_cache.lock().unwrap().get_mut(&shorteventid) { + return Ok(id.clone()); + } + let bytes = self .shorteventid_eventid .get(&shorteventid.to_be_bytes())? .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - EventId::try_from( + let event_id = EventId::try_from( utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") })?, ) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) + .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?; + + self.shorteventid_cache + .lock() + .unwrap() + .insert(shorteventid, event_id.clone()); + + Ok(event_id) } /// Returns the full room state. diff --git a/src/server_server.rs b/src/server_server.rs index 23c80ee..0b9a7e6 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1311,7 +1311,7 @@ pub fn handle_incoming_pdu<'a>( for state in fork_states { auth_chain_sets.push( get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) - .map_err(|_| "Failed to load auth chain.".to_owned())?, + .map_err(|_| "Failed to load auth chain.".to_owned())?.collect(), ); } @@ -1756,15 +1756,15 @@ fn append_incoming_pdu( Ok(pdu_id) } -fn get_auth_chain(starting_events: Vec, db: &Database) -> Result> { +fn get_auth_chain(starting_events: Vec, db: &Database) -> Result + '_> { let mut full_auth_chain = HashSet::new(); let starting_events = starting_events .iter() .map(|id| { - (db.rooms + db.rooms .get_or_create_shorteventid(id, &db.globals) - .map(|s| (s, id))) + .map(|s| (s, id)) }) .collect::>>()?; @@ -1783,10 +1783,11 @@ fn get_auth_chain(starting_events: Vec, db: &Database) -> Result(PduEvent::convert_to_outgoing_federation_event( db.rooms.get_pdu_json(&id)?.unwrap(), @@ -1996,7 +1995,7 @@ pub fn get_room_state_ids_route( let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; Ok(get_room_state_ids::v1::Response { - auth_chain_ids: auth_chain_ids.into_iter().collect(), + auth_chain_ids: auth_chain_ids.collect(), pdu_ids, } .into()) @@ -2265,7 +2264,6 @@ pub async fn create_join_event_route( Ok(create_join_event::v2::Response { room_state: RoomState { auth_chain: auth_chain_ids - .iter() .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), From 665aee11c04672a1c928d381b7b691e41785c7e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 11 Aug 2021 21:17:01 +0200 Subject: [PATCH 0725/1727] less warnings --- src/database/abstraction/sqlite.rs | 39 +----------------------------- src/server_server.rs | 3 ++- 2 files changed, 3 insertions(+), 39 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 72fb5f7..f420021 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -12,9 +12,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::oneshot::Sender; -use tracing::{debug, warn}; - -pub const MILLI: Duration = Duration::from_millis(1); +use tracing::debug; thread_local! { static READ_CONNECTION: RefCell> = RefCell::new(None); @@ -164,16 +162,7 @@ impl Tree for SqliteTable { #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); - - let start = Instant::now(); - self.insert_with_guard(&guard, key, value)?; - - let elapsed = start.elapsed(); - if elapsed > MILLI { - warn!("insert took {:?} : {}", elapsed, &self.name); - } - drop(guard); let watchers = self.watchers.read(); @@ -220,20 +209,11 @@ impl Tree for SqliteTable { fn remove(&self, key: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); - let start = Instant::now(); - guard.execute( format!("DELETE FROM {} WHERE key = ?", self.name).as_str(), [key], )?; - let elapsed = start.elapsed(); - - if elapsed > MILLI { - debug!("remove: took {:012?} : {}", elapsed, &self.name); - } - // debug!("remove key: {:?}", &key); - Ok(()) } @@ -326,8 +306,6 @@ impl Tree for SqliteTable { fn increment(&self, key: &[u8]) -> Result> { let guard = self.engine.write_lock(); - let start = Instant::now(); - let old = self.get_with_guard(&guard, key)?; let new = @@ -335,26 +313,11 @@ impl Tree for SqliteTable { self.insert_with_guard(&guard, key, &new)?; - let elapsed = start.elapsed(); - - if elapsed > MILLI { - debug!("increment: took {:012?} : {}", elapsed, &self.name); - } - // debug!("increment key: {:?}", &key); - Ok(new) } #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>(&'a self, prefix: Vec) -> Box + 'a> { - // let name = self.name.clone(); - // self.iter_from_thread( - // format!( - // "SELECT key, value FROM {} WHERE key BETWEEN ?1 AND ?1 || X'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' ORDER BY key ASC", - // name - // ) - // [prefix] - // ) Box::new( self.iter_from(&prefix, false) .take_while(move |(key, _)| key.starts_with(&prefix)), diff --git a/src/server_server.rs b/src/server_server.rs index 0b9a7e6..a4c90a7 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -721,7 +721,8 @@ pub async fn send_transaction_message_route( &db.globals, )?; } else { - warn!("No known event ids in read receipt: {:?}", user_updates); + // TODO fetch missing events + debug!("No known event ids in read receipt: {:?}", user_updates); } } } From 9410d3ef9cc2a19096149ac69ba3422231c760b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 12 Aug 2021 17:55:16 +0200 Subject: [PATCH 0726/1727] fix: long prev event fetch times for huge rooms --- src/database.rs | 2 +- src/database/rooms.rs | 49 ++++++++++++++++++------------------------- src/server_server.rs | 36 ++++++++++++++++++++++++------- 3 files changed, 49 insertions(+), 38 deletions(-) diff --git a/src/database.rs b/src/database.rs index 7996057..7a17e53 100644 --- a/src/database.rs +++ b/src/database.rs @@ -272,7 +272,7 @@ impl Database { referencedevents: builder.open_tree("referencedevents")?, pdu_cache: Mutex::new(LruCache::new(100_000)), auth_chain_cache: Mutex::new(LruCache::new(100_000)), - shorteventid_cache: Mutex::new(LruCache::new(100_000)), + shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 246aa0b..88878e9 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -99,15 +99,11 @@ impl Rooms { Ok(self .stateid_shorteventid .scan_prefix(shortstatehash.to_be_bytes().to_vec()) - .map(|(_, bytes)| self.shorteventid_eventid.get(&bytes).ok().flatten()) - .flatten() - .map(|bytes| { - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in stateid_shorteventid is invalid.")) + .map(|(_, bytes)| { + self.get_eventid_from_short(utils::u64_from_bytes(&bytes).unwrap()) + .ok() }) - .filter_map(|r| r.ok()) + .flatten() .collect()) } @@ -118,15 +114,11 @@ impl Rooms { let state = self .stateid_shorteventid .scan_prefix(shortstatehash.to_be_bytes().to_vec()) - .map(|(_, bytes)| self.shorteventid_eventid.get(&bytes).ok().flatten()) - .flatten() - .map(|bytes| { - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in stateid_shorteventid is invalid.")) + .map(|(_, bytes)| { + self.get_eventid_from_short(utils::u64_from_bytes(&bytes).unwrap()) + .ok() }) - .filter_map(|r| r.ok()) + .flatten() .map(|eventid| self.get_pdu(&eventid)) .filter_map(|r| r.ok().flatten()) .map(|pdu| { @@ -168,15 +160,10 @@ impl Rooms { Ok(self .stateid_shorteventid .get(&stateid)? - .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) - .flatten() .map(|bytes| { - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in stateid_shorteventid is invalid.")) + self.get_eventid_from_short(utils::u64_from_bytes(&bytes).unwrap()) + .ok() }) - .map(|r| r.ok()) .flatten()) } else { Ok(None) @@ -448,7 +435,12 @@ impl Rooms { } pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result { - if let Some(id) = self.shorteventid_cache.lock().unwrap().get_mut(&shorteventid) { + if let Some(id) = self + .shorteventid_cache + .lock() + .unwrap() + .get_mut(&shorteventid) + { return Ok(id.clone()); } @@ -457,12 +449,11 @@ impl Rooms { .get(&shorteventid.to_be_bytes())? .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - let event_id = EventId::try_from( - utils::string_from_bytes(&bytes).map_err(|_| { + let event_id = + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?; + })?) + .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?; self.shorteventid_cache .lock() diff --git a/src/server_server.rs b/src/server_server.rs index a4c90a7..b3f0353 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -668,7 +668,7 @@ pub async fn send_transaction_message_route( let elapsed = start_time.elapsed(); warn!( - "Handling event {} took {}m{}s", + "Handling transaction of event {} took {}m{}s", event_id, elapsed.as_secs() / 60, elapsed.as_secs() % 60 @@ -850,6 +850,8 @@ pub fn handle_incoming_pdu<'a>( pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, StdResult>, String>> { Box::pin(async move { + let start_time = Instant::now(); + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json match db.rooms.exists(&room_id) { Ok(true) => {} @@ -1014,12 +1016,18 @@ pub fn handle_incoming_pdu<'a>( // 8. if not timeline event: stop if !is_timeline_event || incoming_pdu.origin_server_ts - < db.rooms - .first_pdu_in_room(&room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists") - .origin_server_ts + < (utils::millis_since_unix_epoch() - 1000 * 60 * 20) + .try_into() + .expect("time is valid") + // Not older than 20 mins { + let elapsed = start_time.elapsed(); + warn!( + "Handling outlier event {} took {}m{}s", + event_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); return Ok(None); } @@ -1312,7 +1320,8 @@ pub fn handle_incoming_pdu<'a>( for state in fork_states { auth_chain_sets.push( get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) - .map_err(|_| "Failed to load auth chain.".to_owned())?.collect(), + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), ); } @@ -1385,6 +1394,14 @@ pub fn handle_incoming_pdu<'a>( // Event has passed all auth/stateres checks drop(state_lock); + + let elapsed = start_time.elapsed(); + warn!( + "Handling timeline event {} took {}m{}s", + event_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); Ok(pdu_id) }) } @@ -1757,7 +1774,10 @@ fn append_incoming_pdu( Ok(pdu_id) } -fn get_auth_chain(starting_events: Vec, db: &Database) -> Result + '_> { +fn get_auth_chain( + starting_events: Vec, + db: &Database, +) -> Result + '_> { let mut full_auth_chain = HashSet::new(); let starting_events = starting_events From 41dd620d74276a4619bdc878307b136a5710bbae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 1 Aug 2021 15:14:54 +0200 Subject: [PATCH 0727/1727] WIP improvement: much better state storage --- src/database.rs | 338 ++++++++++++++++++++++++++++- src/database/abstraction/sqlite.rs | 5 +- src/database/rooms.rs | 12 +- 3 files changed, 345 insertions(+), 10 deletions(-) diff --git a/src/database.rs b/src/database.rs index 7a17e53..e0f9eec 100644 --- a/src/database.rs +++ b/src/database.rs @@ -24,13 +24,14 @@ use rocket::{ request::{FromRequest, Request}, Shutdown, State, }; -use ruma::{DeviceId, RoomId, ServerName, UserId}; +use ruma::{DeviceId, EventId, RoomId, ServerName, UserId}; use serde::{de::IgnoredAny, Deserialize}; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, convert::TryFrom, fs::{self, remove_dir_all}, io::Write, + mem::size_of, ops::Deref, path::Path, sync::{Arc, Mutex, RwLock}, @@ -261,7 +262,12 @@ impl Database { userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, + + shortroomid_roomid: builder.open_tree("shortroomid_roomid")?, + roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, + stateid_shorteventid: builder.open_tree("stateid_shorteventid")?, + shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, @@ -438,6 +444,334 @@ impl Database { println!("Migration: 5 -> 6 finished"); } + + fn load_shortstatehash_info( + shortstatehash: &[u8], + db: &Database, + lru: &mut LruCache< + Vec, + Vec<( + Vec, + HashSet>, + HashSet>, + HashSet>, + )>, + >, + ) -> Result< + Vec<( + Vec, // sstatehash + HashSet>, // full state + HashSet>, // added + HashSet>, // removed + )>, + > { + if let Some(result) = lru.get_mut(shortstatehash) { + return Ok(result.clone()); + } + + let value = db + .rooms + .shortstatehash_statediff + .get(shortstatehash)? + .ok_or_else(|| Error::bad_database("State hash does not exist"))?; + let parent = value[0..size_of::()].to_vec(); + + let mut add_mode = true; + let mut added = HashSet::new(); + let mut removed = HashSet::new(); + + let mut i = size_of::(); + while let Some(v) = value.get(i..i + 2 * size_of::()) { + if add_mode && v.starts_with(&0_u64.to_be_bytes()) { + add_mode = false; + i += size_of::(); + continue; + } + if add_mode { + added.insert(v.to_vec()); + } else { + removed.insert(v.to_vec()); + } + i += 2 * size_of::(); + } + + if parent != 0_u64.to_be_bytes() { + let mut response = load_shortstatehash_info(&parent, db, lru)?; + let mut state = response.last().unwrap().1.clone(); + state.extend(added.iter().cloned()); + for r in &removed { + state.remove(r); + } + + response.push((shortstatehash.to_vec(), state, added, removed)); + + lru.insert(shortstatehash.to_vec(), response.clone()); + Ok(response) + } else { + let mut response = Vec::new(); + response.push((shortstatehash.to_vec(), added.clone(), added, removed)); + lru.insert(shortstatehash.to_vec(), response.clone()); + Ok(response) + } + } + + fn update_shortstatehash_level( + current_shortstatehash: &[u8], + statediffnew: HashSet>, + statediffremoved: HashSet>, + diff_to_sibling: usize, + mut parent_states: Vec<( + Vec, // sstatehash + HashSet>, // full state + HashSet>, // added + HashSet>, // removed + )>, + db: &Database, + ) -> Result<()> { + let diffsum = statediffnew.len() + statediffremoved.len(); + + if parent_states.len() > 3 { + // Number of layers + // To many layers, we have to go deeper + let parent = parent_states.pop().unwrap(); + + let mut parent_new = parent.2; + let mut parent_removed = parent.3; + + for removed in statediffremoved { + if !parent_new.remove(&removed) { + parent_removed.insert(removed); + } + } + parent_new.extend(statediffnew); + + update_shortstatehash_level( + current_shortstatehash, + parent_new, + parent_removed, + diffsum, + parent_states, + db, + )?; + + return Ok(()); + } + + if parent_states.len() == 0 { + // There is no parent layer, create a new state + let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent + for new in &statediffnew { + value.extend_from_slice(&new); + } + + if !statediffremoved.is_empty() { + warn!("Tried to create new state with removals"); + } + + db.rooms + .shortstatehash_statediff + .insert(¤t_shortstatehash, &value)?; + + return Ok(()); + }; + + // Else we have two options. + // 1. We add the current diff on top of the parent layer. + // 2. We replace a layer above + + let parent = parent_states.pop().unwrap(); + let parent_diff = parent.2.len() + parent.3.len(); + + if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { + // Diff too big, we replace above layer(s) + let mut parent_new = parent.2; + let mut parent_removed = parent.3; + + for removed in statediffremoved { + if !parent_new.remove(&removed) { + parent_removed.insert(removed); + } + } + + parent_new.extend(statediffnew); + update_shortstatehash_level( + current_shortstatehash, + parent_new, + parent_removed, + diffsum, + parent_states, + db, + )?; + } else { + // Diff small enough, we add diff as layer on top of parent + let mut value = parent.0.clone(); + for new in &statediffnew { + value.extend_from_slice(&new); + } + + if !statediffremoved.is_empty() { + value.extend_from_slice(&0_u64.to_be_bytes()); + for removed in &statediffremoved { + value.extend_from_slice(&removed); + } + } + + db.rooms + .shortstatehash_statediff + .insert(¤t_shortstatehash, &value)?; + } + + Ok(()) + } + + if db.globals.database_version()? < 7 { + // Upgrade state store + let mut lru = LruCache::new(1000); + let mut last_roomstates: HashMap> = HashMap::new(); + let mut current_sstatehash: Vec = Vec::new(); + let mut current_room = None; + let mut current_state = HashSet::new(); + let mut counter = 0; + for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() { + let sstatehash = k[0..size_of::()].to_vec(); + let sstatekey = k[size_of::()..].to_vec(); + if sstatehash != current_sstatehash { + if !current_sstatehash.is_empty() { + counter += 1; + println!("counter: {}", counter); + let current_room = current_room.as_ref().unwrap(); + let last_roomsstatehash = last_roomstates.get(¤t_room); + + let states_parents = last_roomsstatehash.map_or_else( + || Ok(Vec::new()), + |last_roomsstatehash| { + load_shortstatehash_info(&last_roomsstatehash, &db, &mut lru) + }, + )?; + + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew = current_state + .difference(&parent_stateinfo.1) + .cloned() + .collect::>(); + + let statediffremoved = parent_stateinfo + .1 + .difference(¤t_state) + .cloned() + .collect::>(); + + (statediffnew, statediffremoved) + } else { + (current_state, HashSet::new()) + }; + + update_shortstatehash_level( + ¤t_sstatehash, + statediffnew, + statediffremoved, + 2, // every state change is 2 event changes on average + states_parents, + &db, + )?; + + /* + let mut tmp = load_shortstatehash_info(¤t_sstatehash, &db)?; + let state = tmp.pop().unwrap(); + println!( + "{}\t{}{:?}: {:?} + {:?} - {:?}", + current_room, + " ".repeat(tmp.len()), + utils::u64_from_bytes(¤t_sstatehash).unwrap(), + tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()), + state + .2 + .iter() + .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) + .collect::>(), + state + .3 + .iter() + .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) + .collect::>() + ); + */ + + last_roomstates.insert(current_room.clone(), current_sstatehash); + } + current_state = HashSet::new(); + current_sstatehash = sstatehash; + + let event_id = db + .rooms + .shorteventid_eventid + .get(&seventid) + .unwrap() + .unwrap(); + let event_id = + EventId::try_from(utils::string_from_bytes(&event_id).unwrap()) + .unwrap(); + let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap(); + + if Some(&pdu.room_id) != current_room.as_ref() { + current_room = Some(pdu.room_id.clone()); + } + } + + let mut val = sstatekey; + val.extend_from_slice(&seventid); + current_state.insert(val); + } + + db.globals.bump_database_version(7)?; + + println!("Migration: 6 -> 7 finished"); + } + + if db.globals.database_version()? < 8 { + // Generate short room ids for all rooms + for (room_id, _) in db.rooms.roomid_shortstatehash.iter() { + let shortroomid = db.globals.next_count()?.to_be_bytes(); + db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; + db.rooms.shortroomid_roomid.insert(&shortroomid, &room_id)?; + } + // Update pduids db layout + for (key, v) in db.rooms.pduid_pdu.iter() { + let mut parts = key.splitn(2, |&b| b == 0xff); + let room_id = parts.next().unwrap(); + let count = parts.next().unwrap(); + + let short_room_id = db.rooms.roomid_shortroomid.get(&room_id)?.unwrap(); + + let mut new_key = short_room_id; + new_key.extend_from_slice(count); + + println!("{:?}", new_key); + } + + // Update tokenids db layout + for (key, _) in db.rooms.tokenids.iter() { + let mut parts = key.splitn(4, |&b| b == 0xff); + let room_id = parts.next().unwrap(); + let word = parts.next().unwrap(); + let _pdu_id_room = parts.next().unwrap(); + let pdu_id_count = parts.next().unwrap(); + + let short_room_id = db.rooms.roomid_shortroomid.get(&room_id)?.unwrap(); + let mut new_key = short_room_id; + new_key.extend_from_slice(word); + new_key.push(0xff); + new_key.extend_from_slice(pdu_id_count); + println!("{:?}", new_key); + } + + db.globals.bump_database_version(8)?; + + println!("Migration: 7 -> 8 finished"); + } + + panic!(); } let guard = db.read().await; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index f420021..f37dd9e 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -223,10 +223,7 @@ impl Tree for SqliteTable { let statement = Box::leak(Box::new( guard - .prepare(&format!( - "SELECT key, value FROM {} ORDER BY key ASC", - &self.name - )) + .prepare(&format!("SELECT key, value FROM {} ORDER BY key ASC", &self.name)) .unwrap(), )); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 88878e9..a3a1c41 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -47,7 +47,7 @@ pub struct Rooms { pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count pub(super) publicroomids: Arc, - pub(super) tokenids: Arc, // TokenId = RoomId + Token + PduId + pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount /// Participating servers in a room. pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName @@ -71,14 +71,18 @@ pub struct Rooms { pub(super) shorteventid_shortstatehash: Arc, /// StateKey = EventType + StateKey, ShortStateKey = Count pub(super) statekey_shortstatekey: Arc, + + pub(super) shortroomid_roomid: Arc, + pub(super) roomid_shortroomid: Arc, + pub(super) shorteventid_eventid: Arc, - /// ShortEventId = Count pub(super) eventid_shorteventid: Arc, - /// ShortEventId = Count + pub(super) statehash_shortstatehash: Arc, /// ShortStateHash = Count - /// StateId = ShortStateHash + ShortStateKey + /// StateId = ShortStateHash pub(super) stateid_shorteventid: Arc, + pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. From 31f60ad6fd7feee087f5d340ac5858f70d675e48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 2 Aug 2021 22:32:28 +0200 Subject: [PATCH 0728/1727] improvement: migrations, batch inserts --- src/database.rs | 57 +++++++++++++++++++++++++----- src/database/abstraction/sqlite.rs | 5 ++- 2 files changed, 53 insertions(+), 9 deletions(-) diff --git a/src/database.rs b/src/database.rs index e0f9eec..2e471d8 100644 --- a/src/database.rs +++ b/src/database.rs @@ -735,40 +735,81 @@ impl Database { let shortroomid = db.globals.next_count()?.to_be_bytes(); db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; db.rooms.shortroomid_roomid.insert(&shortroomid, &room_id)?; + println!("Migration: 8"); } // Update pduids db layout - for (key, v) in db.rooms.pduid_pdu.iter() { + let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| { + if !key.starts_with(b"!") { + return None; + } let mut parts = key.splitn(2, |&b| b == 0xff); let room_id = parts.next().unwrap(); let count = parts.next().unwrap(); - let short_room_id = db.rooms.roomid_shortroomid.get(&room_id)?.unwrap(); + let short_room_id = db + .rooms + .roomid_shortroomid + .get(&room_id) + .unwrap() + .expect("shortroomid should exist"); let mut new_key = short_room_id; new_key.extend_from_slice(count); - println!("{:?}", new_key); + Some((new_key, v)) + }); + + db.rooms.pduid_pdu.insert_batch(&mut batch)?; + + for (key, _) in db.rooms.pduid_pdu.iter() { + if key.starts_with(b"!") { + db.rooms.pduid_pdu.remove(&key); + } } + db.globals.bump_database_version(8)?; + + println!("Migration: 7 -> 8 finished"); + } + + if db.globals.database_version()? < 9 { // Update tokenids db layout - for (key, _) in db.rooms.tokenids.iter() { + let mut batch = db.rooms.tokenids.iter().filter_map(|(key, _)| { + if !key.starts_with(b"!") { + return None; + } let mut parts = key.splitn(4, |&b| b == 0xff); let room_id = parts.next().unwrap(); let word = parts.next().unwrap(); let _pdu_id_room = parts.next().unwrap(); let pdu_id_count = parts.next().unwrap(); - let short_room_id = db.rooms.roomid_shortroomid.get(&room_id)?.unwrap(); + let short_room_id = db + .rooms + .roomid_shortroomid + .get(&room_id) + .unwrap() + .expect("shortroomid should exist"); let mut new_key = short_room_id; new_key.extend_from_slice(word); new_key.push(0xff); new_key.extend_from_slice(pdu_id_count); - println!("{:?}", new_key); + println!("old {:?}", key); + println!("new {:?}", new_key); + Some((new_key, Vec::new())) + }); + + db.rooms.tokenids.insert_batch(&mut batch)?; + + for (key, _) in db.rooms.tokenids.iter() { + if key.starts_with(b"!") { + db.rooms.pduid_pdu.remove(&key)?; + } } - db.globals.bump_database_version(8)?; + db.globals.bump_database_version(9)?; - println!("Migration: 7 -> 8 finished"); + println!("Migration: 8 -> 9 finished"); } panic!(); diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index f37dd9e..f420021 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -223,7 +223,10 @@ impl Tree for SqliteTable { let statement = Box::leak(Box::new( guard - .prepare(&format!("SELECT key, value FROM {} ORDER BY key ASC", &self.name)) + .prepare(&format!( + "SELECT key, value FROM {} ORDER BY key ASC", + &self.name + )) .unwrap(), )); From 3eabaa2a95645378b130134220f264a23e0fd7ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 12 Aug 2021 23:04:00 +0200 Subject: [PATCH 0729/1727] finish implementing better state store --- src/client_server/account.rs | 2 + src/client_server/context.rs | 4 +- src/client_server/membership.rs | 2 + src/client_server/message.rs | 4 +- src/client_server/room.rs | 5 +- src/client_server/sync.rs | 16 +- src/database.rs | 373 +++++----------- src/database/abstraction/sqlite.rs | 72 +-- src/database/rooms.rs | 691 +++++++++++++++++++---------- src/server_server.rs | 2 +- 10 files changed, 645 insertions(+), 526 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 48159c9..d4f103c 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -249,6 +249,8 @@ pub async fn register_route( let room_id = RoomId::new(db.globals.server_name()); + db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + let mutex_state = Arc::clone( db.globals .roomid_mutex_state diff --git a/src/client_server/context.rs b/src/client_server/context.rs index dbc121e..701e584 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -44,7 +44,7 @@ pub async fn get_context_route( let events_before = db .rooms - .pdus_until(&sender_user, &body.room_id, base_token) + .pdus_until(&sender_user, &body.room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -66,7 +66,7 @@ pub async fn get_context_route( let events_after = db .rooms - .pdus_after(&sender_user, &body.room_id, base_token) + .pdus_after(&sender_user, &body.room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 716a615..de6fa5a 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -609,6 +609,8 @@ async fn join_room_by_id_helper( ) .await?; + db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 9cb6faa..70cc00f 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -128,7 +128,7 @@ pub async fn get_message_events_route( get_message_events::Direction::Forward => { let events_after = db .rooms - .pdus_after(&sender_user, &body.room_id, from) + .pdus_after(&sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { @@ -158,7 +158,7 @@ pub async fn get_message_events_route( get_message_events::Direction::Backward => { let events_before = db .rooms - .pdus_until(&sender_user, &body.room_id, from) + .pdus_until(&sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 89241f5..c323be4 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -33,6 +33,8 @@ pub async fn create_room_route( let room_id = RoomId::new(db.globals.server_name()); + db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + let mutex_state = Arc::clone( db.globals .roomid_mutex_state @@ -173,7 +175,6 @@ pub async fn create_room_route( )?; // 4. Canonical room alias - if let Some(room_alias_id) = &alias { db.rooms.build_and_append_pdu( PduBuilder { @@ -193,7 +194,7 @@ pub async fn create_room_route( &room_id, &db, &state_lock, - ); + )?; } // 5. Events set by preset diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 937a252..c196b2a 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -205,7 +205,7 @@ async fn sync_helper( let mut non_timeline_pdus = db .rooms - .pdus_until(&sender_user, &room_id, u64::MAX) + .pdus_until(&sender_user, &room_id, u64::MAX)? .filter_map(|r| { // Filter out buggy events if r.is_err() { @@ -248,13 +248,13 @@ async fn sync_helper( let first_pdu_before_since = db .rooms - .pdus_until(&sender_user, &room_id, since) + .pdus_until(&sender_user, &room_id, since)? .next() .transpose()?; let pdus_after_since = db .rooms - .pdus_after(&sender_user, &room_id, since) + .pdus_after(&sender_user, &room_id, since)? .next() .is_some(); @@ -286,7 +286,7 @@ async fn sync_helper( for hero in db .rooms - .all_pdus(&sender_user, &room_id) + .all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) .map(|(_, pdu)| { @@ -328,11 +328,11 @@ async fn sync_helper( } } - ( + Ok::<_, Error>(( Some(joined_member_count), Some(invited_member_count), heroes, - ) + )) }; let ( @@ -343,7 +343,7 @@ async fn sync_helper( state_events, ) = if since_shortstatehash.is_none() { // Probably since = 0, we will do an initial sync - let (joined_member_count, invited_member_count, heroes) = calculate_counts(); + let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let state_events = current_state_ids @@ -510,7 +510,7 @@ async fn sync_helper( } let (joined_member_count, invited_member_count, heroes) = if send_member_count { - calculate_counts() + calculate_counts()? } else { (None, None, Vec::new()) }; diff --git a/src/database.rs b/src/database.rs index 2e471d8..4e34019 100644 --- a/src/database.rs +++ b/src/database.rs @@ -28,7 +28,7 @@ use ruma::{DeviceId, EventId, RoomId, ServerName, UserId}; use serde::{de::IgnoredAny, Deserialize}; use std::{ collections::{BTreeMap, HashMap, HashSet}, - convert::TryFrom, + convert::{TryFrom, TryInto}, fs::{self, remove_dir_all}, io::Write, mem::size_of, @@ -266,7 +266,6 @@ impl Database { shortroomid_roomid: builder.open_tree("shortroomid_roomid")?, roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, - stateid_shorteventid: builder.open_tree("stateid_shorteventid")?, shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, @@ -431,7 +430,6 @@ impl Database { } if db.globals.database_version()? < 6 { - // TODO update to 6 // Set room member count for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { let room_id = @@ -445,263 +443,98 @@ impl Database { println!("Migration: 5 -> 6 finished"); } - fn load_shortstatehash_info( - shortstatehash: &[u8], - db: &Database, - lru: &mut LruCache< - Vec, - Vec<( - Vec, - HashSet>, - HashSet>, - HashSet>, - )>, - >, - ) -> Result< - Vec<( - Vec, // sstatehash - HashSet>, // full state - HashSet>, // added - HashSet>, // removed - )>, - > { - if let Some(result) = lru.get_mut(shortstatehash) { - return Ok(result.clone()); - } - - let value = db - .rooms - .shortstatehash_statediff - .get(shortstatehash)? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = value[0..size_of::()].to_vec(); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.to_vec()); - } else { - removed.insert(v.to_vec()); - } - i += 2 * size_of::(); - } - - if parent != 0_u64.to_be_bytes() { - let mut response = load_shortstatehash_info(&parent, db, lru)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().cloned()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash.to_vec(), state, added, removed)); - - lru.insert(shortstatehash.to_vec(), response.clone()); - Ok(response) - } else { - let mut response = Vec::new(); - response.push((shortstatehash.to_vec(), added.clone(), added, removed)); - lru.insert(shortstatehash.to_vec(), response.clone()); - Ok(response) - } - } - - fn update_shortstatehash_level( - current_shortstatehash: &[u8], - statediffnew: HashSet>, - statediffremoved: HashSet>, - diff_to_sibling: usize, - mut parent_states: Vec<( - Vec, // sstatehash - HashSet>, // full state - HashSet>, // added - HashSet>, // removed - )>, - db: &Database, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - parent_removed.insert(removed); - } - } - parent_new.extend(statediffnew); - - update_shortstatehash_level( - current_shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - db, - )?; - - return Ok(()); - } - - if parent_states.len() == 0 { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - db.rooms - .shortstatehash_statediff - .insert(¤t_shortstatehash, &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - parent_removed.insert(removed); - } - } - - parent_new.extend(statediffnew); - update_shortstatehash_level( - current_shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - db, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.clone(); - for new in &statediffnew { - value.extend_from_slice(&new); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed); - } - } - - db.rooms - .shortstatehash_statediff - .insert(¤t_shortstatehash, &value)?; - } - - Ok(()) - } - if db.globals.database_version()? < 7 { // Upgrade state store - let mut lru = LruCache::new(1000); - let mut last_roomstates: HashMap> = HashMap::new(); - let mut current_sstatehash: Vec = Vec::new(); + let mut last_roomstates: HashMap = HashMap::new(); + let mut current_sstatehash: Option = None; let mut current_room = None; let mut current_state = HashSet::new(); let mut counter = 0; + + let mut handle_state = + |current_sstatehash: u64, + current_room: &RoomId, + current_state: HashSet<_>, + last_roomstates: &mut HashMap<_, _>| { + counter += 1; + println!("counter: {}", counter); + let last_roomsstatehash = last_roomstates.get(current_room); + + let states_parents = last_roomsstatehash.map_or_else( + || Ok(Vec::new()), + |&last_roomsstatehash| { + db.rooms.load_shortstatehash_info(dbg!(last_roomsstatehash)) + }, + )?; + + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew = current_state + .difference(&parent_stateinfo.1) + .cloned() + .collect::>(); + + let statediffremoved = parent_stateinfo + .1 + .difference(¤t_state) + .cloned() + .collect::>(); + + (statediffnew, statediffremoved) + } else { + (current_state, HashSet::new()) + }; + + db.rooms.save_state_from_diff( + dbg!(current_sstatehash), + statediffnew, + statediffremoved, + 2, // every state change is 2 event changes on average + states_parents, + )?; + + /* + let mut tmp = db.rooms.load_shortstatehash_info(¤t_sstatehash, &db)?; + let state = tmp.pop().unwrap(); + println!( + "{}\t{}{:?}: {:?} + {:?} - {:?}", + current_room, + " ".repeat(tmp.len()), + utils::u64_from_bytes(¤t_sstatehash).unwrap(), + tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()), + state + .2 + .iter() + .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) + .collect::>(), + state + .3 + .iter() + .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) + .collect::>() + ); + */ + + Ok::<_, Error>(()) + }; + for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() { - let sstatehash = k[0..size_of::()].to_vec(); + let sstatehash = utils::u64_from_bytes(&k[0..size_of::()]) + .expect("number of bytes is correct"); let sstatekey = k[size_of::()..].to_vec(); - if sstatehash != current_sstatehash { - if !current_sstatehash.is_empty() { - counter += 1; - println!("counter: {}", counter); - let current_room = current_room.as_ref().unwrap(); - let last_roomsstatehash = last_roomstates.get(¤t_room); - - let states_parents = last_roomsstatehash.map_or_else( - || Ok(Vec::new()), - |last_roomsstatehash| { - load_shortstatehash_info(&last_roomsstatehash, &db, &mut lru) - }, + if Some(sstatehash) != current_sstatehash { + if let Some(current_sstatehash) = current_sstatehash { + handle_state( + current_sstatehash, + current_room.as_ref().unwrap(), + current_state, + &mut last_roomstates, )?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew = current_state - .difference(&parent_stateinfo.1) - .cloned() - .collect::>(); - - let statediffremoved = parent_stateinfo - .1 - .difference(¤t_state) - .cloned() - .collect::>(); - - (statediffnew, statediffremoved) - } else { - (current_state, HashSet::new()) - }; - - update_shortstatehash_level( - ¤t_sstatehash, - statediffnew, - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - &db, - )?; - - /* - let mut tmp = load_shortstatehash_info(¤t_sstatehash, &db)?; - let state = tmp.pop().unwrap(); - println!( - "{}\t{}{:?}: {:?} + {:?} - {:?}", - current_room, - " ".repeat(tmp.len()), - utils::u64_from_bytes(¤t_sstatehash).unwrap(), - tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()), - state - .2 - .iter() - .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) - .collect::>(), - state - .3 - .iter() - .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) - .collect::>() - ); - */ - - last_roomstates.insert(current_room.clone(), current_sstatehash); + last_roomstates + .insert(current_room.clone().unwrap(), current_sstatehash); } current_state = HashSet::new(); - current_sstatehash = sstatehash; + current_sstatehash = Some(sstatehash); let event_id = db .rooms @@ -721,7 +554,16 @@ impl Database { let mut val = sstatekey; val.extend_from_slice(&seventid); - current_state.insert(val); + current_state.insert(val.try_into().expect("size is correct")); + } + + if let Some(current_sstatehash) = current_sstatehash { + handle_state( + current_sstatehash, + current_room.as_ref().unwrap(), + current_state, + &mut last_roomstates, + )?; } db.globals.bump_database_version(7)?; @@ -761,11 +603,28 @@ impl Database { db.rooms.pduid_pdu.insert_batch(&mut batch)?; - for (key, _) in db.rooms.pduid_pdu.iter() { - if key.starts_with(b"!") { - db.rooms.pduid_pdu.remove(&key); + let mut batch2 = db.rooms.eventid_pduid.iter().filter_map(|(k, value)| { + if !value.starts_with(b"!") { + return None; } - } + let mut parts = value.splitn(2, |&b| b == 0xff); + let room_id = parts.next().unwrap(); + let count = parts.next().unwrap(); + + let short_room_id = db + .rooms + .roomid_shortroomid + .get(&room_id) + .unwrap() + .expect("shortroomid should exist"); + + let mut new_value = short_room_id; + new_value.extend_from_slice(count); + + Some((k, new_value)) + }); + + db.rooms.eventid_pduid.insert_batch(&mut batch2)?; db.globals.bump_database_version(8)?; @@ -803,7 +662,7 @@ impl Database { for (key, _) in db.rooms.tokenids.iter() { if key.starts_with(b"!") { - db.rooms.pduid_pdu.remove(&key)?; + db.rooms.tokenids.remove(&key)?; } } @@ -811,8 +670,6 @@ impl Database { println!("Migration: 8 -> 9 finished"); } - - panic!(); } let guard = db.read().await; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index f420021..3c4ae9c 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -9,13 +9,13 @@ use std::{ path::{Path, PathBuf}, pin::Pin, sync::Arc, - time::{Duration, Instant}, }; use tokio::sync::oneshot::Sender; use tracing::debug; thread_local! { static READ_CONNECTION: RefCell> = RefCell::new(None); + static READ_CONNECTION_ITERATOR: RefCell> = RefCell::new(None); } struct PreparedStatementIterator<'a> { @@ -77,6 +77,21 @@ impl Engine { }) } + fn read_lock_iterator(&self) -> &'static Connection { + READ_CONNECTION_ITERATOR.with(|cell| { + let connection = &mut cell.borrow_mut(); + + if (*connection).is_none() { + let c = Box::leak(Box::new( + Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap(), + )); + **connection = Some(c); + } + + connection.unwrap() + }) + } + pub fn flush_wal(self: &Arc) -> Result<()> { self.write_lock() .pragma_update(Some(Main), "wal_checkpoint", &"TRUNCATE")?; @@ -151,6 +166,34 @@ impl SqliteTable { )?; Ok(()) } + + pub fn iter_with_guard<'a>( + &'a self, + guard: &'a Connection, + ) -> Box + 'a> { + let statement = Box::leak(Box::new( + guard + .prepare(&format!( + "SELECT key, value FROM {} ORDER BY key ASC", + &self.name + )) + .unwrap(), + )); + + let statement_ref = NonAliasingBox(statement); + + let iterator = Box::new( + statement + .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .unwrap() + .map(|r| r.unwrap()), + ); + + Box::new(PreparedStatementIterator { + iterator, + statement_ref, + }) + } } impl Tree for SqliteTable { @@ -219,30 +262,9 @@ impl Tree for SqliteTable { #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box + 'a> { - let guard = self.engine.read_lock(); + let guard = self.engine.read_lock_iterator(); - let statement = Box::leak(Box::new( - guard - .prepare(&format!( - "SELECT key, value FROM {} ORDER BY key ASC", - &self.name - )) - .unwrap(), - )); - - let statement_ref = NonAliasingBox(statement); - - let iterator = Box::new( - statement - .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) - .unwrap() - .map(|r| r.unwrap()), - ); - - Box::new(PreparedStatementIterator { - iterator, - statement_ref, - }) + self.iter_with_guard(&guard) } #[tracing::instrument(skip(self, from, backwards))] @@ -251,7 +273,7 @@ impl Tree for SqliteTable { from: &[u8], backwards: bool, ) -> Box + 'a> { - let guard = self.engine.read_lock(); + let guard = self.engine.read_lock_iterator(); let from = from.to_vec(); // TODO change interface? if backwards { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a3a1c41..fc01e8a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -24,7 +24,7 @@ use ruma::{ use std::{ collections::{BTreeMap, BTreeSet, HashMap, HashSet}, convert::{TryFrom, TryInto}, - mem, + mem::size_of, sync::{Arc, Mutex}, }; use tokio::sync::MutexGuard; @@ -37,10 +37,11 @@ use super::{abstraction::Tree, admin::AdminCommand, pusher}; /// This is created when a state group is added to the database by /// hashing the entire state. pub type StateHashId = Vec; +pub type CompressedStateEvent = [u8; 2 * size_of::()]; pub struct Rooms { pub edus: edus::RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = RoomId + Count + pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count pub(super) eventid_pduid: Arc, pub(super) roomid_pduleaves: Arc, pub(super) alias_roomid: Arc, @@ -79,9 +80,6 @@ pub struct Rooms { pub(super) eventid_shorteventid: Arc, pub(super) statehash_shortstatehash: Arc, - /// ShortStateHash = Count - /// StateId = ShortStateHash - pub(super) stateid_shorteventid: Arc, pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) /// RoomId + EventId -> outlier PDU. @@ -100,29 +98,30 @@ impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { - Ok(self - .stateid_shorteventid - .scan_prefix(shortstatehash.to_be_bytes().to_vec()) - .map(|(_, bytes)| { - self.get_eventid_from_short(utils::u64_from_bytes(&bytes).unwrap()) - .ok() - }) - .flatten() - .collect()) + let full_state = self + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + full_state + .into_iter() + .map(|compressed| self.parse_compressed_state_event(compressed)) + .collect() } pub fn state_full( &self, shortstatehash: u64, ) -> Result>> { - let state = self - .stateid_shorteventid - .scan_prefix(shortstatehash.to_be_bytes().to_vec()) - .map(|(_, bytes)| { - self.get_eventid_from_short(utils::u64_from_bytes(&bytes).unwrap()) - .ok() - }) - .flatten() + let full_state = self + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + Ok(full_state + .into_iter() + .map(|compressed| self.parse_compressed_state_event(compressed)) + .filter_map(|r| r.ok()) .map(|eventid| self.get_pdu(&eventid)) .filter_map(|r| r.ok().flatten()) .map(|pdu| { @@ -138,9 +137,7 @@ impl Rooms { )) }) .filter_map(|r| r.ok()) - .collect(); - - Ok(state) + .collect()) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -151,27 +148,19 @@ impl Rooms { event_type: &EventType, state_key: &str, ) -> Result> { - let mut key = event_type.as_ref().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&state_key.as_bytes()); - - let shortstatekey = self.statekey_shortstatekey.get(&key)?; - - if let Some(shortstatekey) = shortstatekey { - let mut stateid = shortstatehash.to_be_bytes().to_vec(); - stateid.extend_from_slice(&shortstatekey); - - Ok(self - .stateid_shorteventid - .get(&stateid)? - .map(|bytes| { - self.get_eventid_from_short(utils::u64_from_bytes(&bytes).unwrap()) - .ok() - }) - .flatten()) - } else { - Ok(None) - } + let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { + Some(s) => s, + None => return Ok(None), + }; + let full_state = self + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + Ok(full_state + .into_iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + .and_then(|compressed| self.parse_compressed_state_event(compressed).ok())) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -260,8 +249,7 @@ impl Rooms { /// Checks if a room exists. pub fn exists(&self, room_id: &RoomId) -> Result { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); // Look for PDUs in that room. Ok(self @@ -274,8 +262,7 @@ impl Rooms { /// Checks if a room exists. pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); // Look for PDUs in that room. self.pduid_pdu @@ -292,74 +279,78 @@ impl Rooms { /// Force the creation of a new StateHash and insert it into the db. /// - /// Whatever `state` is supplied to `force_state` __is__ the current room state snapshot. + /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. pub fn force_state( &self, room_id: &RoomId, - state: HashMap<(EventType, String), EventId>, + new_state: HashMap<(EventType, String), EventId>, db: &Database, ) -> Result<()> { + let previous_shortstatehash = self.current_shortstatehash(&room_id)?; + + let new_state_ids_compressed = new_state + .iter() + .filter_map(|((event_type, state_key), event_id)| { + let shortstatekey = self + .get_or_create_shortstatekey(event_type, state_key, &db.globals) + .ok()?; + Some( + self.compress_state_event(shortstatekey, event_id, &db.globals) + .ok()?, + ) + }) + .collect::>(); + let state_hash = self.calculate_hash( - &state + &new_state .values() .map(|event_id| event_id.as_bytes()) .collect::>(), ); - let (shortstatehash, already_existed) = + let (new_shortstatehash, already_existed) = self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - let new_state = if !already_existed { - let mut new_state = HashSet::new(); + if Some(new_shortstatehash) == previous_shortstatehash { + return Ok(()); + } - let batch = state - .iter() - .filter_map(|((event_type, state_key), eventid)| { - new_state.insert(eventid.clone()); + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - let mut statekey = event_type.as_ref().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(&state_key.as_bytes()); + let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() + { + let statediffnew = new_state_ids_compressed + .difference(&parent_stateinfo.1) + .cloned() + .collect::>(); - let shortstatekey = match self.statekey_shortstatekey.get(&statekey).ok()? { - Some(shortstatekey) => shortstatekey.to_vec(), - None => { - let shortstatekey = db.globals.next_count().ok()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes()) - .ok()?; - shortstatekey.to_be_bytes().to_vec() - } - }; + let statediffremoved = parent_stateinfo + .1 + .difference(&new_state_ids_compressed) + .cloned() + .collect::>(); - let shorteventid = self - .get_or_create_shorteventid(&eventid, &db.globals) - .ok()?; - - let mut state_id = shortstatehash.to_be_bytes().to_vec(); - state_id.extend_from_slice(&shortstatekey); - - Some((state_id, shorteventid.to_be_bytes().to_vec())) - }) - .collect::>(); - - self.stateid_shorteventid - .insert_batch(&mut batch.into_iter())?; - - new_state + (statediffnew, statediffremoved) } else { - self.state_full_ids(shortstatehash)?.into_iter().collect() + (new_state_ids_compressed, HashSet::new()) }; - let old_state = self - .current_shortstatehash(&room_id)? - .map(|s| self.state_full_ids(s)) - .transpose()? - .map(|vec| vec.into_iter().collect::>()) - .unwrap_or_default(); + if !already_existed { + self.save_state_from_diff( + new_shortstatehash, + statediffnew.clone(), + statediffremoved.clone(), + 2, // every state change is 2 event changes on average + states_parents, + )?; + }; - for event_id in new_state.difference(&old_state) { - if let Some(pdu) = self.get_pdu_json(event_id)? { + for event_id in statediffnew + .into_iter() + .filter_map(|new| self.parse_compressed_state_event(new).ok()) + { + if let Some(pdu) = self.get_pdu_json(&event_id)? { if pdu.get("type").and_then(|val| val.as_str()) == Some("m.room.member") { if let Ok(pdu) = serde_json::from_value::( serde_json::to_value(&pdu).expect("CanonicalJsonObj is a valid JsonValue"), @@ -392,7 +383,206 @@ impl Rooms { } self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; + .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; + + Ok(()) + } + + /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. + pub fn load_shortstatehash_info( + &self, + shortstatehash: u64, + ) -> Result< + Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + > { + let value = self + .shortstatehash_statediff + .get(&shortstatehash.to_be_bytes())? + .ok_or_else(|| Error::bad_database("State hash does not exist"))?; + let parent = + utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); + + let mut add_mode = true; + let mut added = HashSet::new(); + let mut removed = HashSet::new(); + + let mut i = size_of::(); + while let Some(v) = value.get(i..i + 2 * size_of::()) { + if add_mode && v.starts_with(&0_u64.to_be_bytes()) { + add_mode = false; + i += size_of::(); + continue; + } + if add_mode { + added.insert(v.try_into().expect("we checked the size above")); + } else { + removed.insert(v.try_into().expect("we checked the size above")); + } + i += 2 * size_of::(); + } + + if parent != 0_u64 { + let mut response = self.load_shortstatehash_info(parent)?; + let mut state = response.last().unwrap().1.clone(); + state.extend(added.iter().cloned()); + for r in &removed { + state.remove(r); + } + + response.push((shortstatehash, state, added, removed)); + + Ok(response) + } else { + let mut response = Vec::new(); + response.push((shortstatehash, added.clone(), added, removed)); + Ok(response) + } + } + + pub fn compress_state_event( + &self, + shortstatekey: u64, + event_id: &EventId, + globals: &super::globals::Globals, + ) -> Result { + let mut v = shortstatekey.to_be_bytes().to_vec(); + v.extend_from_slice( + &self + .get_or_create_shorteventid(event_id, globals)? + .to_be_bytes(), + ); + Ok(v.try_into().expect("we checked the size above")) + } + + pub fn parse_compressed_state_event( + &self, + compressed_event: CompressedStateEvent, + ) -> Result { + self.get_eventid_from_short( + utils::u64_from_bytes(&compressed_event[size_of::()..]) + .expect("bytes have right length"), + ) + } + + /// Creates a new shortstatehash that often is just a diff to an already existing + /// shortstatehash and therefore very efficient. + /// + /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer + /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 + /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's + /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. + /// + /// * `shortstatehash` - Shortstatehash of this state + /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid + /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid + /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer + /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer + pub fn save_state_from_diff( + &self, + shortstatehash: u64, + statediffnew: HashSet, + statediffremoved: HashSet, + diff_to_sibling: usize, + mut parent_states: Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + ) -> Result<()> { + let diffsum = statediffnew.len() + statediffremoved.len(); + + if parent_states.len() > 3 { + // Number of layers + // To many layers, we have to go deeper + let parent = parent_states.pop().unwrap(); + + let mut parent_new = parent.2; + let mut parent_removed = parent.3; + + for removed in statediffremoved { + if !parent_new.remove(&removed) { + parent_removed.insert(removed); + } + } + parent_new.extend(statediffnew); + + self.save_state_from_diff( + shortstatehash, + parent_new, + parent_removed, + diffsum, + parent_states, + )?; + + return Ok(()); + } + + if parent_states.len() == 0 { + // There is no parent layer, create a new state + let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent + for new in &statediffnew { + value.extend_from_slice(&new[..]); + } + + if !statediffremoved.is_empty() { + warn!("Tried to create new state with removals"); + } + + self.shortstatehash_statediff + .insert(&shortstatehash.to_be_bytes(), &value)?; + + return Ok(()); + }; + + // Else we have two options. + // 1. We add the current diff on top of the parent layer. + // 2. We replace a layer above + + let parent = parent_states.pop().unwrap(); + let parent_diff = parent.2.len() + parent.3.len(); + + if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { + // Diff too big, we replace above layer(s) + let mut parent_new = parent.2; + let mut parent_removed = parent.3; + + for removed in statediffremoved { + if !parent_new.remove(&removed) { + parent_removed.insert(removed); + } + } + + parent_new.extend(statediffnew); + self.save_state_from_diff( + shortstatehash, + parent_new, + parent_removed, + diffsum, + parent_states, + )?; + } else { + // Diff small enough, we add diff as layer on top of parent + let mut value = parent.0.to_be_bytes().to_vec(); + for new in &statediffnew { + value.extend_from_slice(&new[..]); + } + + if !statediffremoved.is_empty() { + value.extend_from_slice(&0_u64.to_be_bytes()); + for removed in &statediffremoved { + value.extend_from_slice(&removed[..]); + } + } + + self.shortstatehash_statediff + .insert(&shortstatehash.to_be_bytes(), &value)?; + } Ok(()) } @@ -418,7 +608,6 @@ impl Rooms { }) } - /// Returns (shortstatehash, already_existed) pub fn get_or_create_shorteventid( &self, event_id: &EventId, @@ -438,6 +627,71 @@ impl Rooms { }) } + pub fn get_shortroomid(&self, room_id: &RoomId) -> Result { + let bytes = self + .roomid_shortroomid + .get(&room_id.as_bytes())? + .expect("every room has a shortroomid"); + utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + } + + pub fn get_shortstatekey( + &self, + event_type: &EventType, + state_key: &str, + ) -> Result> { + let mut statekey = event_type.as_ref().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(&state_key.as_bytes()); + + self.statekey_shortstatekey + .get(&statekey)? + .map(|shortstatekey| { + utils::u64_from_bytes(&shortstatekey) + .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) + }) + .transpose() + } + + pub fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + globals: &super::globals::Globals, + ) -> Result { + Ok(match self.roomid_shortroomid.get(&room_id.as_bytes())? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, + None => { + let short = globals.next_count()?; + self.roomid_shortroomid + .insert(&room_id.as_bytes(), &short.to_be_bytes())?; + short + } + }) + } + + pub fn get_or_create_shortstatekey( + &self, + event_type: &EventType, + state_key: &str, + globals: &super::globals::Globals, + ) -> Result { + let mut statekey = event_type.as_ref().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(&state_key.as_bytes()); + + Ok(match self.statekey_shortstatekey.get(&statekey)? { + Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) + .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, + None => { + let shortstatekey = globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes())?; + shortstatekey + } + }) + } + pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result { if let Some(id) = self .shorteventid_cache @@ -514,7 +768,7 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { Ok( - utils::u64_from_bytes(&pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()]) + utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?, ) } @@ -527,8 +781,7 @@ impl Rooms { } pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); let mut last_possible_key = prefix.clone(); last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); @@ -758,6 +1011,8 @@ impl Rooms { /// /// By this point the incoming event should be fully authenticated, no auth happens /// in `append_pdu`. + /// + /// Returns pdu id #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] pub fn append_pdu( &self, @@ -766,7 +1021,8 @@ impl Rooms { leaves: &[EventId], db: &Database, ) -> Result> { - // returns pdu id + let shortroomid = self.get_shortroomid(&pdu.room_id)?; + // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily // interpret things like membership changes @@ -821,8 +1077,7 @@ impl Rooms { self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; let count2 = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); + let mut pdu_id = shortroomid.to_be_bytes().to_vec(); pdu_id.extend_from_slice(&count2.to_be_bytes()); // There's a brief moment of time here where the count is updated but the pdu does not @@ -968,8 +1223,7 @@ impl Rooms { .filter(|word| word.len() <= 50) .map(str::to_lowercase) .map(|word| { - let mut key = pdu.room_id.as_bytes().to_vec(); - key.push(0xff); + let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(word.as_bytes()); key.push(0xff); key.extend_from_slice(&pdu_id); @@ -1152,11 +1406,27 @@ impl Rooms { pub fn set_event_state( &self, event_id: &EventId, + room_id: &RoomId, state: &StateMap>, globals: &super::globals::Globals, ) -> Result<()> { let shorteventid = self.get_or_create_shorteventid(&event_id, globals)?; + let previous_shortstatehash = self.current_shortstatehash(&room_id)?; + + let state_ids_compressed = state + .iter() + .filter_map(|((event_type, state_key), pdu)| { + let shortstatekey = self + .get_or_create_shortstatekey(event_type, state_key, globals) + .ok()?; + Some( + self.compress_state_event(shortstatekey, &pdu.event_id, globals) + .ok()?, + ) + }) + .collect::>(); + let state_hash = self.calculate_hash( &state .values() @@ -1168,37 +1438,33 @@ impl Rooms { self.get_or_create_shortstatehash(&state_hash, globals)?; if !already_existed { - let batch = state - .iter() - .filter_map(|((event_type, state_key), pdu)| { - let mut statekey = event_type.as_ref().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(&state_key.as_bytes()); + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - let shortstatekey = match self.statekey_shortstatekey.get(&statekey).ok()? { - Some(shortstatekey) => shortstatekey.to_vec(), - None => { - let shortstatekey = globals.next_count().ok()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes()) - .ok()?; - shortstatekey.to_be_bytes().to_vec() - } - }; + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew = state_ids_compressed + .difference(&parent_stateinfo.1) + .cloned() + .collect::>(); - let shorteventid = self - .get_or_create_shorteventid(&pdu.event_id, globals) - .ok()?; + let statediffremoved = parent_stateinfo + .1 + .difference(&state_ids_compressed) + .cloned() + .collect::>(); - let mut state_id = shortstatehash.to_be_bytes().to_vec(); - state_id.extend_from_slice(&shortstatekey); - - Some((state_id, shorteventid.to_be_bytes().to_vec())) - }) - .collect::>(); - - self.stateid_shorteventid - .insert_batch(&mut batch.into_iter())?; + (statediffnew, statediffremoved) + } else { + (state_ids_compressed, HashSet::new()) + }; + self.save_state_from_diff( + shortstatehash, + statediffnew.clone(), + statediffremoved.clone(), + 1_000_000, // high number because no state will be based on this one + states_parents, + )?; } self.shorteventid_shortstatehash @@ -1219,82 +1485,52 @@ impl Rooms { ) -> Result { let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - let old_state = if let Some(old_shortstatehash) = - self.roomid_shortstatehash.get(new_pdu.room_id.as_bytes())? - { - // Store state for event. The state does not include the event itself. - // Instead it's the state before the pdu, so the room's old state. + let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; + + if let Some(p) = previous_shortstatehash { self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &old_shortstatehash)?; - - if new_pdu.state_key.is_none() { - return utils::u64_from_bytes(&old_shortstatehash).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash.") - }); - } - - self.stateid_shorteventid - .scan_prefix(old_shortstatehash.clone()) - // Chop the old_shortstatehash out leaving behind the short state key - .map(|(k, v)| (k[old_shortstatehash.len()..].to_vec(), v)) - .collect::, Vec>>() - } else { - HashMap::new() - }; + .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; + } if let Some(state_key) = &new_pdu.state_key { - let mut new_state: HashMap, Vec> = old_state; + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - let mut new_state_key = new_pdu.kind.as_ref().as_bytes().to_vec(); - new_state_key.push(0xff); - new_state_key.extend_from_slice(state_key.as_bytes()); + let shortstatekey = + self.get_or_create_shortstatekey(&new_pdu.kind, &state_key, globals)?; - let shortstatekey = match self.statekey_shortstatekey.get(&new_state_key)? { - Some(shortstatekey) => shortstatekey.to_vec(), - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&new_state_key, &shortstatekey.to_be_bytes())?; - shortstatekey.to_be_bytes().to_vec() - } - }; + let replaces = states_parents + .last() + .map(|info| { + info.1 + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + }) + .unwrap_or_default(); - new_state.insert(shortstatekey, shorteventid.to_be_bytes().to_vec()); + // TODO: statehash with deterministic inputs + let shortstatehash = globals.next_count()?; - let new_state_hash = self.calculate_hash( - &new_state - .values() - .map(|event_id| &**event_id) - .collect::>(), - ); + let mut statediffnew = HashSet::new(); + let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; + statediffnew.insert(new); - let shortstatehash = match self.statehash_shortstatehash.get(&new_state_hash)? { - Some(shortstatehash) => { - warn!("state hash already existed?!"); - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("PDU has invalid count bytes."))? - } - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(&new_state_hash, &shortstatehash.to_be_bytes())?; - shortstatehash - } - }; + let mut statediffremoved = HashSet::new(); + if let Some(replaces) = replaces { + statediffremoved.insert(replaces.clone()); + } - let mut batch = new_state.into_iter().map(|(shortstatekey, shorteventid)| { - let mut state_id = shortstatehash.to_be_bytes().to_vec(); - state_id.extend_from_slice(&shortstatekey); - (state_id, shorteventid) - }); - - self.stateid_shorteventid.insert_batch(&mut batch)?; + self.save_state_from_diff( + shortstatehash, + statediffnew, + statediffremoved, + 2, + states_parents, + )?; Ok(shortstatehash) } else { - Err(Error::bad_database( - "Tried to insert non-state event into room without a state.", - )) + Ok(previous_shortstatehash.expect("first event in room must be a state event")) } } @@ -1597,7 +1833,7 @@ impl Rooms { &'a self, user_id: &UserId, room_id: &RoomId, - ) -> impl Iterator, PduEvent)>> + 'a { + ) -> Result, PduEvent)>> + 'a> { self.pdus_since(user_id, room_id, 0) } @@ -1609,16 +1845,17 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, since: u64, - ) -> impl Iterator, PduEvent)>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + ) -> Result, PduEvent)>> + 'a> { + let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); // Skip the first pdu if it's exactly at since, because we sent that last time let mut first_pdu_id = prefix.clone(); first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); let user_id = user_id.clone(); - self.pduid_pdu + + Ok(self + .pduid_pdu .iter_from(&first_pdu_id, false) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { @@ -1628,7 +1865,7 @@ impl Rooms { pdu.unsigned.remove("transaction_id"); } Ok((pdu_id, pdu)) - }) + })) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -1639,10 +1876,9 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> impl Iterator, PduEvent)>> + 'a { + ) -> Result, PduEvent)>> + 'a> { // Create the first part of the full pdu id - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); let mut current = prefix.clone(); current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` @@ -1650,7 +1886,9 @@ impl Rooms { let current: &[u8] = ¤t; let user_id = user_id.clone(); - self.pduid_pdu + + Ok(self + .pduid_pdu .iter_from(current, true) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { @@ -1660,7 +1898,7 @@ impl Rooms { pdu.unsigned.remove("transaction_id"); } Ok((pdu_id, pdu)) - }) + })) } /// Returns an iterator over all events and their token in a room that happened after the event @@ -1671,10 +1909,9 @@ impl Rooms { user_id: &UserId, room_id: &RoomId, from: u64, - ) -> impl Iterator, PduEvent)>> + 'a { + ) -> Result, PduEvent)>> + 'a> { // Create the first part of the full pdu id - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); let mut current = prefix.clone(); current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event @@ -1682,7 +1919,9 @@ impl Rooms { let current: &[u8] = ¤t; let user_id = user_id.clone(); - self.pduid_pdu + + Ok(self + .pduid_pdu .iter_from(current, false) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { @@ -1692,7 +1931,7 @@ impl Rooms { pdu.unsigned.remove("transaction_id"); } Ok((pdu_id, pdu)) - }) + })) } /// Replace a PDU with the redacted form. @@ -2223,8 +2462,8 @@ impl Rooms { room_id: &RoomId, search_string: &str, ) -> Result<(impl Iterator> + 'a, Vec)> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); + let prefix_clone = prefix.clone(); let words = search_string .split_terminator(|c: char| !c.is_alphanumeric()) @@ -2243,16 +2482,7 @@ impl Rooms { .iter_from(&last_possible_id, true) // Newest pdus first .take_while(move |(k, _)| k.starts_with(&prefix2)) .map(|(key, _)| { - let pduid_index = key - .iter() - .enumerate() - .filter(|(_, &b)| b == 0xff) - .nth(1) - .ok_or_else(|| Error::bad_database("Invalid tokenid in db."))? - .0 - + 1; // +1 because the pdu id starts AFTER the separator - - let pdu_id = key[pduid_index..].to_vec(); + let pdu_id = key[key.len() - size_of::()..].to_vec(); Ok::<_, Error>(pdu_id) }) @@ -2264,7 +2494,12 @@ impl Rooms { // We compare b with a because we reversed the iterator earlier b.cmp(a) }) - .unwrap(), + .unwrap() + .map(move |id| { + let mut pduid = prefix_clone.clone(); + pduid.extend_from_slice(&id); + pduid + }), words, )) } diff --git a/src/server_server.rs b/src/server_server.rs index b3f0353..0c226ac 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1704,7 +1704,7 @@ fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. db.rooms - .set_event_state(&pdu.event_id, state, &db.globals)?; + .set_event_state(&pdu.event_id, &pdu.room_id, state, &db.globals)?; let pdu_id = db.rooms.append_pdu( pdu, From 3cf0145bc5b564e1230417bf98b9aeffde1f1085 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Aug 2021 08:26:45 +0200 Subject: [PATCH 0730/1727] fix: room exists panic --- src/database/rooms.rs | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fc01e8a..400ce38 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -249,7 +249,10 @@ impl Rooms { /// Checks if a room exists. pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); + let prefix = match self.get_shortroomid(room_id)? { + Some(b) => b.to_be_bytes().to_vec(), + None => return Ok(false), + }; // Look for PDUs in that room. Ok(self @@ -262,7 +265,7 @@ impl Rooms { /// Checks if a room exists. pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); + let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); // Look for PDUs in that room. self.pduid_pdu @@ -627,12 +630,12 @@ impl Rooms { }) } - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result { - let bytes = self + pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self .roomid_shortroomid .get(&room_id.as_bytes())? - .expect("every room has a shortroomid"); - utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + .map(|bytes| + utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid shortroomid in db."))).transpose() } pub fn get_shortstatekey( @@ -781,7 +784,7 @@ impl Rooms { } pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); + let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); let mut last_possible_key = prefix.clone(); last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); @@ -1021,7 +1024,7 @@ impl Rooms { leaves: &[EventId], db: &Database, ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?; + let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -1846,7 +1849,7 @@ impl Rooms { room_id: &RoomId, since: u64, ) -> Result, PduEvent)>> + 'a> { - let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); + let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); // Skip the first pdu if it's exactly at since, because we sent that last time let mut first_pdu_id = prefix.clone(); @@ -1878,7 +1881,7 @@ impl Rooms { until: u64, ) -> Result, PduEvent)>> + 'a> { // Create the first part of the full pdu id - let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); + let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); let mut current = prefix.clone(); current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` @@ -1911,7 +1914,7 @@ impl Rooms { from: u64, ) -> Result, PduEvent)>> + 'a> { // Create the first part of the full pdu id - let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); + let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); let mut current = prefix.clone(); current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event @@ -2462,7 +2465,7 @@ impl Rooms { room_id: &RoomId, search_string: &str, ) -> Result<(impl Iterator> + 'a, Vec)> { - let prefix = self.get_shortroomid(room_id)?.to_be_bytes().to_vec(); + let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); let prefix_clone = prefix.clone(); let words = search_string From 38effda799570e1a0837673d671f364d9c7bd4b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Aug 2021 19:07:50 +0200 Subject: [PATCH 0731/1727] fix: delta calculation --- src/database.rs | 2 +- src/database/rooms.rs | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/src/database.rs b/src/database.rs index 4e34019..0bf2a44 100644 --- a/src/database.rs +++ b/src/database.rs @@ -108,7 +108,7 @@ fn default_db_cache_capacity_mb() -> f64 { } fn default_sqlite_wal_clean_second_interval() -> u32 { - 15 * 60 // every 15 minutes + 1 * 60 // every minute } fn default_max_request_size() -> u32 { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 400ce38..fc2bd05 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -510,10 +510,19 @@ impl Rooms { for removed in statediffremoved { if !parent_new.remove(&removed) { + // It was not added in the parent and we removed it parent_removed.insert(removed); } + // Else it was added in the parent and we removed it again. We can forget this change + } + + for new in statediffnew { + if !parent_removed.remove(&new) { + // It was not touched in the parent and we added it + parent_new.insert(new); + } + // Else it was removed in the parent and we added it again. We can forget this change } - parent_new.extend(statediffnew); self.save_state_from_diff( shortstatehash, @@ -557,11 +566,20 @@ impl Rooms { for removed in statediffremoved { if !parent_new.remove(&removed) { + // It was not added in the parent and we removed it parent_removed.insert(removed); } + // Else it was added in the parent and we removed it again. We can forget this change + } + + for new in statediffnew { + if !parent_removed.remove(&new) { + // It was not touched in the parent and we added it + parent_new.insert(new); + } + // Else it was removed in the parent and we added it again. We can forget this change } - parent_new.extend(statediffnew); self.save_state_from_diff( shortstatehash, parent_new, From 9c3a8edcaedf9d4f5e3cf25200defa9a09b72a0b Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 13 Aug 2021 17:20:40 +0200 Subject: [PATCH 0732/1727] Use full optimizations for master and faster config else Signed-off-by: Jonas Zohren --- .gitlab-ci.yml | 243 ++++++++++++++++++++++++++++--------------------- Cargo.toml | 21 ++++- DEPLOY.md | 15 +-- 3 files changed, 168 insertions(+), 111 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4fa515b..b7ea88e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,85 +9,6 @@ variables: FF_USE_FASTZIP: 1 CACHE_COMPRESSION_LEVEL: fastest -test:cargo: - stage: "test" - needs: [] - image: "rust:latest" - tags: ["docker"] - variables: - CARGO_HOME: "cargohome" - cache: - paths: - - target - - cargohome - key: test_cache - interruptible: true - before_script: - - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" - - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - - rustup component add clippy rustfmt - script: - - rustc --version && cargo --version # Print version info for debugging - - cargo fmt --all -- --check - - cargo test --workspace --verbose --locked - - cargo clippy - -test:sytest: - stage: "test" - allow_failure: true - needs: - - "build:cargo:x86_64-unknown-linux-musl" - image: - name: "valkum/sytest-conduit:latest" - entrypoint: [""] - tags: ["docker"] - variables: - PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" - before_script: - - "mkdir -p /app" - - "cp ./conduit-x86_64-unknown-linux-musl /app/conduit" - - "chmod +x /app/conduit" - - "rm -rf /src && ln -s $CI_PROJECT_DIR/ /src" - - "mkdir -p /work/server-0/database/ && mkdir -p /work/server-1/database/ && mkdir -p /work/server-2/database/" - - "cd /" - script: - - "SYTEST_EXIT_CODE=0" - - "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1" - - "perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml \"Sytest\" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap" - - "exit $SYTEST_EXIT_CODE" - artifacts: - when: always - paths: - - "$CI_PROJECT_DIR/sytest.xml" - - "$CI_PROJECT_DIR/results.tap" - reports: - junit: "$CI_PROJECT_DIR/sytest.xml" - - -test:register:element-web-stable: - stage: "test" - needs: - - "build:cargo:x86_64-unknown-linux-gnu" - image: "buildkite/puppeteer:latest" - tags: ["docker"] - interruptible: true - script: - - "CONDUIT_CONFIG=tests/test-config.toml ./conduit-x86_64-unknown-linux-gnu > conduit.log &" - - "cd tests/client-element-web/" - - "npm install puppeteer" - - "node test-element-web-registration.js \"https://app.element.io/\" \"http://localhost:6167\"" - - "killall --regexp \"conduit\"" - - "cd ../.." - - "cat conduit.log" - artifacts: - paths: - - "tests/client-element-web/*.png" - - "*.log" - expire_in: 1 week - when: always - retry: 1 - # --------------------------------------------------------------------- # # Cargo: Compiling for different architectures # @@ -105,7 +26,7 @@ test:register:element-web-stable: paths: - cargohome - target/ - key: "build_cache-$TARGET" + key: "build_cache-$TARGET-release" before_script: - 'echo "Building for target $TARGET"' - 'mkdir -p cargohome && CARGOHOME="cargohome"' @@ -115,26 +36,21 @@ test:register:element-web-stable: - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" - "rustup target add $TARGET" script: - # Set some cargo tuning here, because targets overwrite the 'variables' - - "export CARGO_INCREMENTAL=true" - - "export CARGO_PROFILE_RELEASE_CODEGEN_UNITS=16" - - "export CARGO_PROFILE_RELEASE_LTO=thin" - time cargo build --target $TARGET --release - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' -build:cargo:x86_64-unknown-linux-gnu: + +build:release:cargo:x86_64-unknown-linux-gnu: extends: .build-cargo-shared-settings variables: TARGET: "x86_64-unknown-linux-gnu" - rules: - - if: "$CI_COMMIT_BRANCH" artifacts: name: "conduit-x86_64-unknown-linux-gnu" paths: - "conduit-x86_64-unknown-linux-gnu" - expose_as: "Release binary x86_64-unknown-linux-gnu" + expose_as: "Conduit for x86_64-unknown-linux-gnu" -build:cargo:armv7-unknown-linux-gnueabihf: +build:release:cargo:armv7-unknown-linux-gnueabihf: extends: .build-cargo-shared-settings variables: TARGET: "armv7-unknown-linux-gnueabihf" @@ -146,9 +62,9 @@ build:cargo:armv7-unknown-linux-gnueabihf: name: "conduit-armv7-unknown-linux-gnueabihf" paths: - "conduit-armv7-unknown-linux-gnueabihf" - expose_as: "Release binary armv7-unknown-linux-gnueabihf" + expose_as: "Conduit for armv7-unknown-linux-gnueabihf" -build:cargo:aarch64-unknown-linux-gnu: +build:release:cargo:aarch64-unknown-linux-gnu: extends: .build-cargo-shared-settings variables: TARGET: "aarch64-unknown-linux-gnu" @@ -162,13 +78,11 @@ build:cargo:aarch64-unknown-linux-gnu: name: "conduit-aarch64-unknown-linux-gnu" paths: - "conduit-aarch64-unknown-linux-gnu" - expose_as: "Release binary aarch64-unknown-linux-gnu" + expose_as: "Conduit for aarch64-unknown-linux-gnu" -build:cargo:x86_64-unknown-linux-musl: +build:release:cargo:x86_64-unknown-linux-musl: extends: .build-cargo-shared-settings image: "rust:alpine" - rules: - - if: '$CI_COMMIT_BRANCH' # Always run variables: TARGET: "x86_64-unknown-linux-musl" before_script: @@ -181,7 +95,47 @@ build:cargo:x86_64-unknown-linux-musl: name: "conduit-x86_64-unknown-linux-musl" paths: - "conduit-x86_64-unknown-linux-musl" - expose_as: "Release binary x86_64-unknown-linux-musl" + expose_as: "Conduit for x86_64-unknown-linux-musl" + + + +.cargo-debug-shared-settings: + extends: ".build-cargo-shared-settings" + rules: + - if: '$CI_COMMIT_BRANCH' + cache: + key: "build_cache-$TARGET-debug" + script: + - "time cargo build --target $TARGET" + - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' + +build:debug:cargo:x86_64-unknown-linux-gnu: + extends: ".cargo-debug-shared-settings" + variables: + TARGET: "x86_64-unknown-linux-gnu" + artifacts: + name: "conduit-debug-x86_64-unknown-linux-gnu" + paths: + - "conduit-debug-x86_64-unknown-linux-gnu" + expose_as: "Conduit DEBUG for x86_64-unknown-linux-gnu" + +build:debug:cargo:x86_64-unknown-linux-musl: + extends: ".cargo-debug-shared-settings" + image: "rust:alpine" + variables: + TARGET: "x86_64-unknown-linux-musl" + before_script: + - 'echo "Building for target $TARGET"' + - 'mkdir -p cargohome && CARGOHOME="cargohome"' + - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging + - "rustup target add $TARGET" + - "apk add libc-dev" + artifacts: + name: "conduit-debug-x86_64-unknown-linux-musl" + paths: + - "conduit-debug-x86_64-unknown-linux-musl" + expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl" + # --------------------------------------------------------------------- # @@ -191,7 +145,7 @@ build:cargo:x86_64-unknown-linux-musl: .build-cargo-deb-shared-settings: stage: "build" - needs: [] + needs: [ ] rules: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' interruptible: true @@ -252,7 +206,7 @@ build:cargo-deb:x86_64-unknown-linux-gnu: build:docker:main: extends: .docker-shared-settings needs: - - "build:cargo:x86_64-unknown-linux-musl" + - "build:release:cargo:x86_64-unknown-linux-musl" script: - > /kaniko/executor @@ -272,6 +226,91 @@ build:docker:main: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + +# --------------------------------------------------------------------- # +# Run tests # +# --------------------------------------------------------------------- # + +test:cargo: + stage: "test" + needs: [ ] + image: "rust:latest" + tags: [ "docker" ] + variables: + CARGO_HOME: "cargohome" + cache: + paths: + - target + - cargohome + key: test_cache + interruptible: true + before_script: + - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" + - apt-get update -yqq + - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config + - rustup component add clippy rustfmt + script: + - rustc --version && cargo --version # Print version info for debugging + - cargo fmt --all -- --check + - cargo test --workspace --verbose --locked + - cargo clippy + +test:sytest: + stage: "test" + allow_failure: true + needs: + - "build:debug:cargo:x86_64-unknown-linux-musl" + image: + name: "valkum/sytest-conduit:latest" + entrypoint: [ "" ] + tags: [ "docker" ] + variables: + PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" + before_script: + - "mkdir -p /app" + - "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit" + - "chmod +x /app/conduit" + - "rm -rf /src && ln -s $CI_PROJECT_DIR/ /src" + - "mkdir -p /work/server-0/database/ && mkdir -p /work/server-1/database/ && mkdir -p /work/server-2/database/" + - "cd /" + script: + - "SYTEST_EXIT_CODE=0" + - "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1" + - "perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml \"Sytest\" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap" + - "exit $SYTEST_EXIT_CODE" + artifacts: + when: always + paths: + - "$CI_PROJECT_DIR/sytest.xml" + - "$CI_PROJECT_DIR/results.tap" + reports: + junit: "$CI_PROJECT_DIR/sytest.xml" + + +test:register:element-web-stable: + stage: "test" + needs: + - "build:debug:cargo:x86_64-unknown-linux-gnu" + image: "buildkite/puppeteer:latest" + tags: [ "docker" ] + interruptible: true + script: + - "CONDUIT_CONFIG=tests/test-config.toml ./conduit-debug-x86_64-unknown-linux-gnu > conduit.log &" + - "cd tests/client-element-web/" + - "npm install puppeteer" + - "node test-element-web-registration.js \"https://app.element.io/\" \"http://localhost:6167\"" + - "killall --regexp \"conduit\"" + - "cd ../.." + - "cat conduit.log" + artifacts: + paths: + - "tests/client-element-web/*.png" + - "*.log" + expire_in: 1 week + when: always + retry: 1 + + # --------------------------------------------------------------------- # # Store binaries as package so they have download urls # # --------------------------------------------------------------------- # @@ -279,10 +318,10 @@ build:docker:main: publish:package: stage: "upload artifacts" needs: - - "build:cargo:x86_64-unknown-linux-gnu" - - "build:cargo:armv7-unknown-linux-gnueabihf" - - "build:cargo:aarch64-unknown-linux-gnu" - - "build:cargo:x86_64-unknown-linux-musl" + - "build:release:cargo:x86_64-unknown-linux-gnu" + - "build:release:cargo:armv7-unknown-linux-gnueabihf" + - "build:release:cargo:aarch64-unknown-linux-gnu" + - "build:release:cargo:x86_64-unknown-linux-musl" - "build:cargo-deb:x86_64-unknown-linux-gnu" rules: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' diff --git a/Cargo.toml b/Cargo.toml index 3d18bfb..47bbd2f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,6 +119,21 @@ conf-files = [ maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } -# For flamegraphs: -#[profile.release] -#debug = true +[profile.dev] +lto = 'thin' +incremental = true + +[profile.release] +lto = true +incremental = true +codegen-units = 1 +# If you want to make flamegraphs, enable debug info: +# debug = true + +# For releases also try to max optimizations for dependencies: +[profile.release.build-override] +opt-level = 3 +codegen-units = 1 +[profile.release.package."*"] +opt-level = 3 +codegen-units = 1 diff --git a/DEPLOY.md b/DEPLOY.md index 1010c0f..85f3f07 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -6,8 +6,7 @@ If you run into any problems while setting up Conduit, write an email to `timo@k ## Installing Conduit -You may simply download the binary that fits your machine. Run `uname -m` to see -what you need. Now copy the right url: +You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: | CPU Architecture | GNU (Ubuntu, Debian, ArchLinux, ...) | MUSL (Alpine, ... ) | | -------------------- | ------------------------------------- | ----------------------- | @@ -15,10 +14,13 @@ what you need. Now copy the right url: | armv7 (Raspberry Pi) | [Download][armv7-gnu] | - | | armv8 / aarch64 | [Download][armv8-gnu] | - | -[x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:cargo:x86_64-unknown-linux-gnu -[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:cargo:x86_64-unknown-linux-musl -[armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:cargo:armv7-unknown-linux-gnueabihf -[armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:cargo:aarch64-unknown-linux-gnu +[x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:release:cargo:x86_64-unknown-linux-gnu + +[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl + +[armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:release:cargo:armv7-unknown-linux-gnueabihf + +[armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:release:cargo:aarch64-unknown-linux-gnu ```bash $ sudo wget -O /usr/local/bin/matrix-conduit @@ -26,6 +28,7 @@ $ sudo chmod +x /usr/local/bin/matrix-conduit ``` Alternatively, you may compile the binary yourself using + ```bash $ cargo build --release ``` From 0cb22996be02f38dff2e049cfeae351f607aeb38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Aug 2021 19:47:16 +0200 Subject: [PATCH 0733/1727] remove prev event fetch limit --- src/server_server.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 0c226ac..cbbb850 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1016,10 +1016,11 @@ pub fn handle_incoming_pdu<'a>( // 8. if not timeline event: stop if !is_timeline_event || incoming_pdu.origin_server_ts - < (utils::millis_since_unix_epoch() - 1000 * 60 * 20) - .try_into() - .expect("time is valid") - // Not older than 20 mins + < db.rooms + .first_pdu_in_room(&room_id) + .map_err(|_| "Error loading first room event.".to_owned())? + .expect("Room exists") + .origin_server_ts { let elapsed = start_time.elapsed(); warn!( @@ -1031,6 +1032,7 @@ pub fn handle_incoming_pdu<'a>( return Ok(None); } + // TODO: make not recursive // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events fetch_and_handle_events( db, From 1e3a8ca35dff2287963da7cd4fae30539790fa76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Aug 2021 19:47:49 +0200 Subject: [PATCH 0734/1727] fmt --- src/database/rooms.rs | 46 +++++++++++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fc2bd05..99f0c83 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -265,7 +265,11 @@ impl Rooms { /// Checks if a room exists. pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); + let prefix = self + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); // Look for PDUs in that room. self.pduid_pdu @@ -649,11 +653,13 @@ impl Rooms { } pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self - .roomid_shortroomid + self.roomid_shortroomid .get(&room_id.as_bytes())? - .map(|bytes| - utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid shortroomid in db."))).transpose() + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + }) + .transpose() } pub fn get_shortstatekey( @@ -802,7 +808,11 @@ impl Rooms { } pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); + let prefix = self + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); let mut last_possible_key = prefix.clone(); last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); @@ -1867,7 +1877,11 @@ impl Rooms { room_id: &RoomId, since: u64, ) -> Result, PduEvent)>> + 'a> { - let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); + let prefix = self + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); // Skip the first pdu if it's exactly at since, because we sent that last time let mut first_pdu_id = prefix.clone(); @@ -1899,7 +1913,11 @@ impl Rooms { until: u64, ) -> Result, PduEvent)>> + 'a> { // Create the first part of the full pdu id - let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); + let prefix = self + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); let mut current = prefix.clone(); current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` @@ -1932,7 +1950,11 @@ impl Rooms { from: u64, ) -> Result, PduEvent)>> + 'a> { // Create the first part of the full pdu id - let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); + let prefix = self + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); let mut current = prefix.clone(); current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event @@ -2483,7 +2505,11 @@ impl Rooms { room_id: &RoomId, search_string: &str, ) -> Result<(impl Iterator> + 'a, Vec)> { - let prefix = self.get_shortroomid(room_id)?.expect("room exists").to_be_bytes().to_vec(); + let prefix = self + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); let prefix_clone = prefix.clone(); let words = search_string From 1d465699296837fe147f855d95fcfef57775a3a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Aug 2021 21:30:14 +0200 Subject: [PATCH 0735/1727] fix: don't use recursion for prev events --- src/server_server.rs | 228 ++++++++++++++++++++++++------------------- 1 file changed, 129 insertions(+), 99 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index cbbb850..5b2acd0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -840,7 +840,7 @@ type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; /// 14. Use state resolution to find new room state // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively #[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] -pub fn handle_incoming_pdu<'a>( +pub async fn handle_incoming_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, @@ -848,22 +848,76 @@ pub fn handle_incoming_pdu<'a>( is_timeline_event: bool, db: &'a Database, pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, StdResult>, String>> { +) -> StdResult>, String> { + match db.rooms.exists(&room_id) { + Ok(true) => {} + _ => { + return Err("Room is unknown to this server.".to_string()); + } + } + + // 1. Skip the PDU if we already have it as a timeline event + if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) { + return Ok(Some(pdu_id.to_vec())); + } + + let create_event = db + .rooms + .room_state_get(&room_id, &EventType::RoomCreate, "") + .map_err(|_| "Failed to ask database for event.".to_owned())? + .ok_or_else(|| "Failed to find create event in db.".to_owned())?; + + let (incoming_pdu, val) = handle_outlier_pdu(origin, &create_event, event_id, room_id, value, db, pub_key_map).await?; + + // 8. if not timeline event: stop + if !is_timeline_event + || incoming_pdu.origin_server_ts + < db.rooms + .first_pdu_in_room(&room_id) + .map_err(|_| "Error loading first room event.".to_owned())? + .expect("Room exists") + .origin_server_ts + { + return Ok(None); + } + + // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let mut todo_outlier_stack = incoming_pdu.prev_events.clone(); + let mut todo_timeline_stack = Vec::new(); + while let Some(prev_event_id) = todo_outlier_stack.pop() { + if let Some((pdu, Some(json))) = fetch_and_handle_outliers( + db, + origin, + &[prev_event_id], + &create_event, + &room_id, + pub_key_map, + ) + .await.pop() { + todo_timeline_stack.push((pdu, json)); + } + } + + while let Some(prev) = todo_timeline_stack.pop() { + upgrade_outlier_to_timeline_pdu(prev.0, prev.1, &create_event, origin, db, room_id, pub_key_map).await?; + } + + upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, db, room_id, pub_key_map).await +} + +fn handle_outlier_pdu<'a>( + origin: &'a ServerName, + create_event: &'a PduEvent, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + db: &'a Database, + pub_key_map: &'a RwLock>>, +) -> AsyncRecursiveType<'a, StdResult<(Arc, BTreeMap), String>> { Box::pin(async move { let start_time = Instant::now(); // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - match db.rooms.exists(&room_id) { - Ok(true) => {} - _ => { - return Err("Room is unknown to this server.".to_string()); - } - } - - // 1. Skip the PDU if we already have it as a timeline event - if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) { - return Ok(Some(pdu_id.to_vec())); - } // We go through all the signatures we see on the value and fetch the corresponding signing // keys @@ -873,17 +927,12 @@ pub fn handle_incoming_pdu<'a>( // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let create_event = db - .rooms - .room_state_get(&room_id, &EventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.".to_owned())? - .ok_or_else(|| "Failed to find create event in db.".to_owned())?; - let create_event_content = - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; + let create_event_content = + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; let room_version_id = &create_event_content.room_version; let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); @@ -924,13 +973,13 @@ pub fn handle_incoming_pdu<'a>( // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // EDIT: Step 5 is not applied anymore because it failed too often debug!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_events( + fetch_and_handle_outliers( db, origin, &incoming_pdu.auth_events, + &create_event, &room_id, pub_key_map, - false, ) .await; @@ -1013,37 +1062,20 @@ pub fn handle_incoming_pdu<'a>( .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; debug!("Added pdu as outlier."); - // 8. if not timeline event: stop - if !is_timeline_event - || incoming_pdu.origin_server_ts - < db.rooms - .first_pdu_in_room(&room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists") - .origin_server_ts - { - let elapsed = start_time.elapsed(); - warn!( - "Handling outlier event {} took {}m{}s", - event_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - return Ok(None); - } + Ok((incoming_pdu,val)) + }) - // TODO: make not recursive - // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - fetch_and_handle_events( - db, - origin, - &incoming_pdu.prev_events, - &room_id, - pub_key_map, - true, - ) - .await; +} +async fn upgrade_outlier_to_timeline_pdu( + incoming_pdu: Arc, + val: BTreeMap, + create_event: &PduEvent, + origin: &ServerName, + db: &Database, + room_id: &RoomId, + pub_key_map: &RwLock>>, +) -> StdResult>, String> { // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities // doing all the checks in this list starting at 1. These are not timeline events. @@ -1065,17 +1097,17 @@ pub fn handle_incoming_pdu<'a>( if let Some(Ok(state)) = state { warn!("Using cached state"); - let mut state = fetch_and_handle_events( + let mut state = fetch_and_handle_outliers( db, origin, &state.into_iter().collect::>(), + &create_event, &room_id, pub_key_map, - false, ) .await .into_iter() - .map(|pdu| { + .map(|(pdu,_)| { ( ( pdu.kind.clone(), @@ -1119,18 +1151,18 @@ pub fn handle_incoming_pdu<'a>( { Ok(res) => { debug!("Fetching state events at event."); - let state_vec = fetch_and_handle_events( + let state_vec = fetch_and_handle_outliers( &db, origin, &res.pdu_ids, + &create_event, &room_id, pub_key_map, - false, ) .await; let mut state = HashMap::new(); - for pdu in state_vec { + for (pdu, _) in state_vec { match state.entry((pdu.kind.clone(), pdu.state_key.clone().ok_or_else(|| "Found non-state pdu in state events.".to_owned())?)) { Entry::Vacant(v) => { v.insert(pdu); @@ -1153,13 +1185,13 @@ pub fn handle_incoming_pdu<'a>( } debug!("Fetching auth chain events at event."); - fetch_and_handle_events( + fetch_and_handle_outliers( &db, origin, &res.auth_chain_ids, + &create_event, &room_id, pub_key_map, - false, ) .await; @@ -1175,6 +1207,28 @@ pub fn handle_incoming_pdu<'a>( state_at_incoming_event.expect("we always set this to some above"); // 11. Check the auth of the event passes based on the state of the event + let create_event_content = + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; + + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + + // If the previous event was the create event special rules apply + let previous_create = if incoming_pdu.auth_events.len() == 1 + && incoming_pdu.prev_events == incoming_pdu.auth_events + { + db.rooms + .get_pdu(&incoming_pdu.auth_events[0]) + .map_err(|e| e.to_string())? + .filter(|maybe_create| **maybe_create == *create_event) + } else { + None + }; + + if !state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -1396,34 +1450,27 @@ pub fn handle_incoming_pdu<'a>( // Event has passed all auth/stateres checks drop(state_lock); - - let elapsed = start_time.elapsed(); - warn!( - "Handling timeline event {} took {}m{}s", - event_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); Ok(pdu_id) - }) } /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// +/// Returns pdu and if we fetched it over federation the raw json. +/// /// a. Look in the main timeline (pduid_pdu tree) /// b. Look at outlier pdu tree /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? //#[tracing::instrument(skip(db, key_map, auth_cache))] -pub(crate) fn fetch_and_handle_events<'a>( +pub(crate) fn fetch_and_handle_outliers<'a>( db: &'a Database, origin: &'a ServerName, events: &'a [EventId], + create_event: &'a PduEvent, room_id: &'a RoomId, pub_key_map: &'a RwLock>>, - are_timeline_events: bool, -) -> AsyncRecursiveType<'a, Vec>> { +) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { Box::pin(async move { let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { Entry::Vacant(e) => { @@ -1449,16 +1496,12 @@ pub(crate) fn fetch_and_handle_events<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree - // (get_pdu checks both) - let local_pdu = if are_timeline_events { - db.rooms.get_non_outlier_pdu(&id).map(|o| o.map(Arc::new)) - } else { - db.rooms.get_pdu(&id) - }; + // (get_pdu_json checks both) + let local_pdu = db.rooms.get_pdu(&id); let pdu = match local_pdu { Ok(Some(pdu)) => { trace!("Found {} in db", id); - pdu + (pdu, None) } Ok(None) => { // c. Ask origin server over federation @@ -1474,39 +1517,26 @@ pub(crate) fn fetch_and_handle_events<'a>( { Ok(res) => { debug!("Got {} over federation", id); - let (event_id, mut value) = + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => continue, }; // This will also fetch the auth chain - match handle_incoming_pdu( + match handle_outlier_pdu( origin, + create_event, &event_id, &room_id, value.clone(), - are_timeline_events, db, pub_key_map, ) .await { - Ok(_) => { - value.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.into()), - ); - - Arc::new( - serde_json::from_value( - serde_json::to_value(value) - .expect("canonicaljsonobject is valid value"), - ) - .expect( - "This is possible because handle_incoming_pdu worked", - ), - ) + Ok((pdu, json)) => { + (pdu, Some(json)) } Err(e) => { warn!("Authentication of event {} failed: {:?}", id, e); From ecd1e45a449036e8d3166e1c59b51a161bf31f12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Aug 2021 21:56:15 +0200 Subject: [PATCH 0736/1727] fix: fetch more than one prev event --- src/server_server.rs | 745 ++++++++++++++++++++++--------------------- 1 file changed, 389 insertions(+), 356 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 5b2acd0..e722126 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -867,17 +867,19 @@ pub async fn handle_incoming_pdu<'a>( .map_err(|_| "Failed to ask database for event.".to_owned())? .ok_or_else(|| "Failed to find create event in db.".to_owned())?; - let (incoming_pdu, val) = handle_outlier_pdu(origin, &create_event, event_id, room_id, value, db, pub_key_map).await?; + let (incoming_pdu, val) = handle_outlier_pdu( + origin, + &create_event, + event_id, + room_id, + value, + db, + pub_key_map, + ) + .await?; // 8. if not timeline event: stop - if !is_timeline_event - || incoming_pdu.origin_server_ts - < db.rooms - .first_pdu_in_room(&room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists") - .origin_server_ts - { + if !is_timeline_event { return Ok(None); } @@ -893,16 +895,45 @@ pub async fn handle_incoming_pdu<'a>( &room_id, pub_key_map, ) - .await.pop() { - todo_timeline_stack.push((pdu, json)); + .await + .pop() + { + if incoming_pdu.origin_server_ts + > db.rooms + .first_pdu_in_room(&room_id) + .map_err(|_| "Error loading first room event.".to_owned())? + .expect("Room exists") + .origin_server_ts + { + todo_outlier_stack.extend(pdu.prev_events.iter().cloned()); + todo_timeline_stack.push((pdu, json)); + } } } while let Some(prev) = todo_timeline_stack.pop() { - upgrade_outlier_to_timeline_pdu(prev.0, prev.1, &create_event, origin, db, room_id, pub_key_map).await?; + upgrade_outlier_to_timeline_pdu( + prev.0, + prev.1, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await?; } - upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, db, room_id, pub_key_map).await + upgrade_outlier_to_timeline_pdu( + incoming_pdu, + val, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await } fn handle_outlier_pdu<'a>( @@ -913,7 +944,8 @@ fn handle_outlier_pdu<'a>( value: BTreeMap, db: &'a Database, pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, StdResult<(Arc, BTreeMap), String>> { +) -> AsyncRecursiveType<'a, StdResult<(Arc, BTreeMap), String>> +{ Box::pin(async move { let start_time = Instant::now(); @@ -928,11 +960,11 @@ fn handle_outlier_pdu<'a>( // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let create_event_content = - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; + let create_event_content = + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; let room_version_id = &create_event_content.room_version; let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); @@ -1062,9 +1094,8 @@ fn handle_outlier_pdu<'a>( .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; debug!("Added pdu as outlier."); - Ok((incoming_pdu,val)) + Ok((incoming_pdu, val)) }) - } async fn upgrade_outlier_to_timeline_pdu( @@ -1076,381 +1107,385 @@ async fn upgrade_outlier_to_timeline_pdu( room_id: &RoomId, pub_key_map: &RwLock>>, ) -> StdResult>, String> { - // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities - // doing all the checks in this list starting at 1. These are not timeline events. + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + // doing all the checks in this list starting at 1. These are not timeline events. - // TODO: if we know the prev_events of the incoming event we can avoid the request and build - // the state from a known point and resolve if > 1 prev_event + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event - debug!("Requesting state at event."); - let mut state_at_incoming_event = None; + debug!("Requesting state at event."); + let mut state_at_incoming_event = None; - if incoming_pdu.prev_events.len() == 1 { - let prev_event = &incoming_pdu.prev_events[0]; - let prev_event_sstatehash = db - .rooms - .pdu_shortstatehash(prev_event) - .map_err(|_| "Failed talking to db".to_owned())?; + if incoming_pdu.prev_events.len() == 1 { + let prev_event = &incoming_pdu.prev_events[0]; + let prev_event_sstatehash = db + .rooms + .pdu_shortstatehash(prev_event) + .map_err(|_| "Failed talking to db".to_owned())?; - let state = - prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); + let state = + prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); - if let Some(Ok(state)) = state { - warn!("Using cached state"); - let mut state = fetch_and_handle_outliers( - db, + if let Some(Ok(state)) = state { + warn!("Using cached state"); + let mut state = fetch_and_handle_outliers( + db, + origin, + &state.into_iter().collect::>(), + &create_event, + &room_id, + pub_key_map, + ) + .await + .into_iter() + .map(|(pdu, _)| { + ( + ( + pdu.kind.clone(), + pdu.state_key + .clone() + .expect("events from state_full_ids are state events"), + ), + pdu, + ) + }) + .collect::>(); + + let prev_pdu = + db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { + "Could not find prev event, but we know the state.".to_owned() + })?; + + if let Some(state_key) = &prev_pdu.state_key { + state.insert((prev_pdu.kind.clone(), state_key.clone()), prev_pdu); + } + + state_at_incoming_event = Some(state); + } + // TODO: set incoming_auth_events? + } + + if state_at_incoming_event.is_none() { + warn!("Calling /state_ids"); + // Call /state_ids to find out what the state at this pdu is. We trust the server's + // response to some extend, but we still do a lot of checks on the events + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_room_state_ids::v1::Request { + room_id: &room_id, + event_id: &incoming_pdu.event_id, + }, + ) + .await + { + Ok(res) => { + debug!("Fetching state events at event."); + let state_vec = fetch_and_handle_outliers( + &db, origin, - &state.into_iter().collect::>(), + &res.pdu_ids, &create_event, &room_id, pub_key_map, ) - .await - .into_iter() - .map(|(pdu,_)| { - ( - ( - pdu.kind.clone(), - pdu.state_key - .clone() - .expect("events from state_full_ids are state events"), + .await; + + let mut state = HashMap::new(); + for (pdu, _) in state_vec { + match state.entry(( + pdu.kind.clone(), + pdu.state_key + .clone() + .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?, + )) { + Entry::Vacant(v) => { + v.insert(pdu); + } + Entry::Occupied(_) => return Err( + "State event's type and state_key combination exists multiple times." + .to_owned(), ), - pdu, - ) - }) - .collect::>(); - - let prev_pdu = db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { - "Could not find prev event, but we know the state.".to_owned() - })?; - - if let Some(state_key) = &prev_pdu.state_key { - state.insert((prev_pdu.kind.clone(), state_key.clone()), prev_pdu); + } } + // The original create event must still be in the state + if state + .get(&(EventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(&create_event) + { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + debug!("Fetching auth chain events at event."); + fetch_and_handle_outliers( + &db, + origin, + &res.auth_chain_ids, + &create_event, + &room_id, + pub_key_map, + ) + .await; + state_at_incoming_event = Some(state); } - // TODO: set incoming_auth_events? - } + Err(_) => { + return Err("Fetching state for event failed".into()); + } + }; + } - if state_at_incoming_event.is_none() { - warn!("Calling /state_ids"); - // Call /state_ids to find out what the state at this pdu is. We trust the server's - // response to some extend, but we still do a lot of checks on the events - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id: &room_id, - event_id: &incoming_pdu.event_id, - }, - ) - .await - { - Ok(res) => { - debug!("Fetching state events at event."); - let state_vec = fetch_and_handle_outliers( - &db, - origin, - &res.pdu_ids, - &create_event, - &room_id, - pub_key_map, - ) - .await; + let state_at_incoming_event = + state_at_incoming_event.expect("we always set this to some above"); - let mut state = HashMap::new(); - for (pdu, _) in state_vec { - match state.entry((pdu.kind.clone(), pdu.state_key.clone().ok_or_else(|| "Found non-state pdu in state events.".to_owned())?)) { - Entry::Vacant(v) => { - v.insert(pdu); - } - Entry::Occupied(_) => { - return Err( - "State event's type and state_key combination exists multiple times.".to_owned(), - ) - } - } - } - - // The original create event must still be in the state - if state - .get(&(EventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()) - != Some(&create_event) - { - return Err("Incoming event refers to wrong create event.".to_owned()); - } - - debug!("Fetching auth chain events at event."); - fetch_and_handle_outliers( - &db, - origin, - &res.auth_chain_ids, - &create_event, - &room_id, - pub_key_map, - ) - .await; - - state_at_incoming_event = Some(state); - } - Err(_) => { - return Err("Fetching state for event failed".into()); - } - }; - } - - let state_at_incoming_event = - state_at_incoming_event.expect("we always set this to some above"); - - // 11. Check the auth of the event passes based on the state of the event + // 11. Check the auth of the event passes based on the state of the event let create_event_content = serde_json::from_value::>(create_event.content.clone()) .expect("Raw::from_value always works.") .deserialize() .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - // If the previous event was the create event special rules apply - let previous_create = if incoming_pdu.auth_events.len() == 1 - && incoming_pdu.prev_events == incoming_pdu.auth_events - { - db.rooms - .get_pdu(&incoming_pdu.auth_events[0]) - .map_err(|e| e.to_string())? - .filter(|maybe_create| **maybe_create == *create_event) - } else { - None - }; + // If the previous event was the create event special rules apply + let previous_create = if incoming_pdu.auth_events.len() == 1 + && incoming_pdu.prev_events == incoming_pdu.auth_events + { + db.rooms + .get_pdu(&incoming_pdu.auth_events[0]) + .map_err(|e| e.to_string())? + .filter(|maybe_create| **maybe_create == *create_event) + } else { + None + }; + if !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + previous_create.clone(), + &state_at_incoming_event, + None, // TODO: third party invite + ) + .map_err(|_e| "Auth check failed.".to_owned())? + { + return Err("Event has failed auth check with state at the event.".into()); + } + debug!("Auth check succeeded."); - if !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - previous_create.clone(), - &state_at_incoming_event, - None, // TODO: third party invite - ) - .map_err(|_e| "Auth check failed.".to_owned())? - { - return Err("Event has failed auth check with state at the event.".into()); + // We start looking at current room state now, so lets lock the room + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Now we calculate the set of extremities this room has after the incoming event has been + // applied. We start with the previous extremities (aka leaves) + let mut extremities = db + .rooms + .get_pdu_leaves(&room_id) + .map_err(|_| "Failed to load room leaves".to_owned())?; + + // Remove any forward extremities that are referenced by this incoming event's prev_events + for prev_event in &incoming_pdu.prev_events { + if extremities.contains(prev_event) { + extremities.remove(prev_event); } - debug!("Auth check succeeded."); + } - // We start looking at current room state now, so lets lock the room + // Only keep those extremities were not referenced yet + extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true))); - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), + let mut extremity_statehashes = Vec::new(); + + for id in &extremities { + match db + .rooms + .get_pdu(&id) + .map_err(|_| "Failed to ask db for pdu.".to_owned())? + { + Some(leaf_pdu) => { + extremity_statehashes.push(( + db.rooms + .pdu_shortstatehash(&leaf_pdu.event_id) + .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + "Found pdu with no statehash in db.".to_owned() + })?, + Some(leaf_pdu), + )); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err("Missing state snapshot.".to_owned()); + } + } + } + + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). + + // We do this by adding the current state to the list of fork states + let current_statehash = db + .rooms + .current_shortstatehash(&room_id) + .map_err(|_| "Failed to load current state hash.".to_owned())? + .expect("every room has state"); + + let current_state = db + .rooms + .state_full(current_statehash) + .map_err(|_| "Failed to load room state.")?; + + extremity_statehashes.push((current_statehash.clone(), None)); + + let mut fork_states = Vec::new(); + for (statehash, leaf_pdu) in extremity_statehashes { + let mut leaf_state = db + .rooms + .state_full(statehash) + .map_err(|_| "Failed to ask db for room state.".to_owned())?; + + if let Some(leaf_pdu) = leaf_pdu { + if let Some(state_key) = &leaf_pdu.state_key { + // Now it's the state after + let key = (leaf_pdu.kind.clone(), state_key.clone()); + leaf_state.insert(key, leaf_pdu); + } + } + + fork_states.push(leaf_state); + } + + // We also add state after incoming event to the fork states + extremities.insert(incoming_pdu.event_id.clone()); + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + state_after.insert( + (incoming_pdu.kind.clone(), state_key.clone()), + incoming_pdu.clone(), ); - let state_lock = mutex_state.lock().await; + } + fork_states.push(state_after.clone()); - // Now we calculate the set of extremities this room has after the incoming event has been - // applied. We start with the previous extremities (aka leaves) - let mut extremities = db - .rooms - .get_pdu_leaves(&room_id) - .map_err(|_| "Failed to load room leaves".to_owned())?; + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + return Err("State is empty.".to_owned()); + } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + // There was only one state, so it has to be the room's current state (because that is + // always included) + fork_states[0] + .iter() + .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) + .collect() + } else { + // We do need to force an update to this room's state + update_state = true; - // Remove any forward extremities that are referenced by this incoming event's prev_events - for prev_event in &incoming_pdu.prev_events { - if extremities.contains(prev_event) { - extremities.remove(prev_event); - } - } + let fork_states = &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id.clone())) + .collect::>() + }) + .collect::>(); - // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true))); - - let mut extremity_statehashes = Vec::new(); - - for id in &extremities { - match db - .rooms - .get_pdu(&id) - .map_err(|_| "Failed to ask db for pdu.".to_owned())? - { - Some(leaf_pdu) => { - extremity_statehashes.push(( - db.rooms - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? - .ok_or_else(|| { - error!( - "Found extremity pdu with no statehash in db: {:?}", - leaf_pdu - ); - "Found pdu with no statehash in db.".to_owned() - })?, - Some(leaf_pdu), - )); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err("Missing state snapshot.".to_owned()); - } - } - } - - // 12. Ensure that the state is derived from the previous current state (i.e. we calculated - // by doing state res where one of the inputs was a previously trusted set of state, - // don't just trust a set of state we got from a remote). - - // We do this by adding the current state to the list of fork states - let current_statehash = db - .rooms - .current_shortstatehash(&room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? - .expect("every room has state"); - - let current_state = db - .rooms - .state_full(current_statehash) - .map_err(|_| "Failed to load room state.")?; - - extremity_statehashes.push((current_statehash.clone(), None)); - - let mut fork_states = Vec::new(); - for (statehash, leaf_pdu) in extremity_statehashes { - let mut leaf_state = db - .rooms - .state_full(statehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(leaf_pdu) = leaf_pdu { - if let Some(state_key) = &leaf_pdu.state_key { - // Now it's the state after - let key = (leaf_pdu.kind.clone(), state_key.clone()); - leaf_state.insert(key, leaf_pdu); - } - } - - fork_states.push(leaf_state); - } - - // We also add state after incoming event to the fork states - extremities.insert(incoming_pdu.event_id.clone()); - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - state_after.insert( - (incoming_pdu.kind.clone(), state_key.clone()), - incoming_pdu.clone(), + let mut auth_chain_sets = Vec::new(); + for state in fork_states { + auth_chain_sets.push( + get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), ); } - fork_states.push(state_after.clone()); - let mut update_state = false; - // 14. Use state resolution to find new room state - let new_room_state = if fork_states.is_empty() { - return Err("State is empty.".to_owned()); - } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { - // There was only one state, so it has to be the room's current state (because that is - // always included) - fork_states[0] - .iter() - .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) - .collect() - } else { - // We do need to force an update to this room's state - update_state = true; - - let fork_states = &fork_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(); - - let mut auth_chain_sets = Vec::new(); - for state in fork_states { - auth_chain_sets.push( - get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), - ); - } - - let state = match state_res::StateResolution::resolve( - &room_id, - room_version_id, - fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { - Ok(new_state) => new_state, - Err(_) => { - return Err("State resolution failed, either an event could not be found or deserialization".into()); + let state = match state_res::StateResolution::resolve( + &room_id, + room_version_id, + fork_states, + auth_chain_sets, + |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); } - }; - - state + res.ok().flatten() + }, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err("State resolution failed, either an event could not be found or deserialization".into()); + } }; - debug!("starting soft fail auth check"); - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - let soft_fail = !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - previous_create, - ¤t_state, - None, - ) - .map_err(|_e| "Auth check failed.".to_owned())?; + state + }; - let mut pdu_id = None; - if !soft_fail { - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - pdu_id = Some( - append_incoming_pdu( - &db, - &incoming_pdu, - val, - extremities, - &state_at_incoming_event, - &state_lock, - ) - .map_err(|_| "Failed to add pdu to db.".to_owned())?, - ); - debug!("Appended incoming pdu."); - } else { - warn!("Event was soft failed: {:?}", incoming_pdu); - } + debug!("starting soft fail auth check"); + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let soft_fail = !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + previous_create, + ¤t_state, + None, + ) + .map_err(|_e| "Auth check failed.".to_owned())?; - // Set the new room state to the resolved state - if update_state { - db.rooms - .force_state(&room_id, new_room_state, &db) - .map_err(|_| "Failed to set new room state.".to_owned())?; - } - debug!("Updated resolved state"); + let mut pdu_id = None; + if !soft_fail { + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + pdu_id = Some( + append_incoming_pdu( + &db, + &incoming_pdu, + val, + extremities, + &state_at_incoming_event, + &state_lock, + ) + .map_err(|_| "Failed to add pdu to db.".to_owned())?, + ); + debug!("Appended incoming pdu."); + } else { + warn!("Event was soft failed: {:?}", incoming_pdu); + } - if soft_fail { - // Soft fail, we leave the event as an outlier but don't add it to the timeline - return Err("Event has been soft failed".into()); - } + // Set the new room state to the resolved state + if update_state { + db.rooms + .force_state(&room_id, new_room_state, &db) + .map_err(|_| "Failed to set new room state.".to_owned())?; + } + debug!("Updated resolved state"); - // Event has passed all auth/stateres checks - drop(state_lock); - Ok(pdu_id) + if soft_fail { + // Soft fail, we leave the event as an outlier but don't add it to the timeline + return Err("Event has been soft failed".into()); + } + + // Event has passed all auth/stateres checks + drop(state_lock); + Ok(pdu_id) } /// Find the event and auth it. Once the event is validated (steps 1 - 8) @@ -1535,9 +1570,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( ) .await { - Ok((pdu, json)) => { - (pdu, Some(json)) - } + Ok((pdu, json)) => (pdu, Some(json)), Err(e) => { warn!("Authentication of event {} failed: {:?}", id, e); back_off(id.clone()); From f9a2edc0dda2b44fd04d20d946ea2b5431cf5d29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Aug 2021 22:50:45 +0200 Subject: [PATCH 0737/1727] fix: also fetch prev events that are outliers already --- src/database/rooms.rs | 13 +++++++++++++ src/server_server.rs | 24 +++++++++++++----------- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 99f0c83..4a3ab71 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -844,6 +844,19 @@ impl Rooms { .transpose() } + /// Returns the json of a pdu. + pub fn get_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map(|pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .transpose() + } + /// Returns the json of a pdu. pub fn get_non_outlier_pdu_json( &self, diff --git a/src/server_server.rs b/src/server_server.rs index e722126..a293d1b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -887,10 +887,10 @@ pub async fn handle_incoming_pdu<'a>( let mut todo_outlier_stack = incoming_pdu.prev_events.clone(); let mut todo_timeline_stack = Vec::new(); while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, Some(json))) = fetch_and_handle_outliers( + if let Some((pdu, json_opt)) = fetch_and_handle_outliers( db, origin, - &[prev_event_id], + &[prev_event_id.clone()], &create_event, &room_id, pub_key_map, @@ -898,15 +898,17 @@ pub async fn handle_incoming_pdu<'a>( .await .pop() { - if incoming_pdu.origin_server_ts - > db.rooms - .first_pdu_in_room(&room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists") - .origin_server_ts - { - todo_outlier_stack.extend(pdu.prev_events.iter().cloned()); - todo_timeline_stack.push((pdu, json)); + if let Some(json) = json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) { + if incoming_pdu.origin_server_ts + > db.rooms + .first_pdu_in_room(&room_id) + .map_err(|_| "Error loading first room event.".to_owned())? + .expect("Room exists") + .origin_server_ts + { + todo_outlier_stack.extend(pdu.prev_events.iter().cloned()); + todo_timeline_stack.push((pdu, json)); + } } } } From 5bd5b41c70791e9664d55425066cfe0be8282e35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Aug 2021 23:29:25 +0200 Subject: [PATCH 0738/1727] fix: fetch event multiple times --- src/database/rooms.rs | 8 ++------ src/server_server.rs | 15 ++++++++++++--- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4a3ab71..2832cc2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -845,10 +845,7 @@ impl Rooms { } /// Returns the json of a pdu. - pub fn get_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { + pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? .map(|pdu| { @@ -1134,10 +1131,9 @@ impl Rooms { &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), )?; - // This also replaces the eventid of any outliers with the correct - // pduid, removing the place holder. self.eventid_pduid .insert(pdu.event_id.as_bytes(), &pdu_id)?; + self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; drop(insert_lock); diff --git a/src/server_server.rs b/src/server_server.rs index a293d1b..a4bfda5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -884,9 +884,15 @@ pub async fn handle_incoming_pdu<'a>( } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let mut visited = HashSet::new(); let mut todo_outlier_stack = incoming_pdu.prev_events.clone(); let mut todo_timeline_stack = Vec::new(); while let Some(prev_event_id) = todo_outlier_stack.pop() { + if visited.contains(&prev_event_id) { + continue; + } + visited.insert(prev_event_id.clone()); + if let Some((pdu, json_opt)) = fetch_and_handle_outliers( db, origin, @@ -898,7 +904,9 @@ pub async fn handle_incoming_pdu<'a>( .await .pop() { - if let Some(json) = json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) { + if let Some(json) = + json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) + { if incoming_pdu.origin_server_ts > db.rooms .first_pdu_in_room(&room_id) @@ -949,8 +957,6 @@ fn handle_outlier_pdu<'a>( ) -> AsyncRecursiveType<'a, StdResult<(Arc, BTreeMap), String>> { Box::pin(async move { - let start_time = Instant::now(); - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // We go through all the signatures we see on the value and fetch the corresponding signing @@ -1109,6 +1115,9 @@ async fn upgrade_outlier_to_timeline_pdu( room_id: &RoomId, pub_key_map: &RwLock>>, ) -> StdResult>, String> { + if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { + return Ok(Some(pduid)); + } // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities // doing all the checks in this list starting at 1. These are not timeline events. From a4310f840ef4c6041b6098c87e4bcce697217209 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 15 Aug 2021 06:46:00 +0200 Subject: [PATCH 0739/1727] improvement: state info cache --- src/database.rs | 1 + src/database/rooms.rs | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/src/database.rs b/src/database.rs index 0bf2a44..e66ff04 100644 --- a/src/database.rs +++ b/src/database.rs @@ -278,6 +278,7 @@ impl Database { pdu_cache: Mutex::new(LruCache::new(100_000)), auth_chain_cache: Mutex::new(LruCache::new(100_000)), shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), + stateinfo_cache: Mutex::new(LruCache::new(1000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2832cc2..5baadf9 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -92,6 +92,13 @@ pub struct Rooms { pub(super) pdu_cache: Mutex>>, pub(super) auth_chain_cache: Mutex>>, pub(super) shorteventid_cache: Mutex>, + pub(super) stateinfo_cache: Mutex, // full state + HashSet, // added + HashSet, // removed + )>>>, } impl Rooms { @@ -407,6 +414,14 @@ impl Rooms { HashSet, // removed )>, > { + if let Some(r) = self.stateinfo_cache + .lock() + .unwrap() + .get_mut(&shortstatehash) + { + return Ok(r.clone()); + } + let value = self .shortstatehash_statediff .get(&shortstatehash.to_be_bytes())? @@ -443,10 +458,18 @@ impl Rooms { response.push((shortstatehash, state, added, removed)); + self.stateinfo_cache + .lock() + .unwrap() + .insert(shortstatehash, response.clone()); Ok(response) } else { let mut response = Vec::new(); response.push((shortstatehash, added.clone(), added, removed)); + self.stateinfo_cache + .lock() + .unwrap() + .insert(shortstatehash, response.clone()); Ok(response) } } From 2c3bee34a0a9da43374d48527c2208104ea1c95c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 15 Aug 2021 13:17:42 +0200 Subject: [PATCH 0740/1727] improvement: better sqlite --- src/database/abstraction.rs | 1 + src/database/abstraction/sqlite.rs | 27 ++++++++++++++++++---- src/database/rooms.rs | 37 +++++++++++++++++++----------- 3 files changed, 47 insertions(+), 18 deletions(-) diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index f381ce9..5b941fb 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -35,6 +35,7 @@ pub trait Tree: Send + Sync { ) -> Box, Vec)> + 'a>; fn increment(&self, key: &[u8]) -> Result>; + fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()>; fn scan_prefix<'a>( &'a self, diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 3c4ae9c..1e55418 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -49,11 +49,11 @@ impl Engine { fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result { let conn = Connection::open(&path)?; - conn.pragma_update(Some(Main), "page_size", &32768)?; + conn.pragma_update(Some(Main), "page_size", &1024)?; conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; - conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?; + conn.pragma_update(Some(Main), "wal_autocheckpoint", &8000)?; Ok(conn) } @@ -93,8 +93,9 @@ impl Engine { } pub fn flush_wal(self: &Arc) -> Result<()> { - self.write_lock() - .pragma_update(Some(Main), "wal_checkpoint", &"TRUNCATE")?; + // We use autocheckpoints + //self.write_lock() + //.pragma_update(Some(Main), "wal_checkpoint", &"TRUNCATE")?; Ok(()) } } @@ -248,6 +249,24 @@ impl Tree for SqliteTable { Ok(()) } + #[tracing::instrument(skip(self, iter))] + fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + let guard = self.engine.write_lock(); + + guard.execute("BEGIN", [])?; + for key in iter { + let old = self.get_with_guard(&guard, &key)?; + let new = crate::utils::increment(old.as_deref()) + .expect("utils::increment always returns Some"); + self.insert_with_guard(&guard, &key, &new)?; + } + guard.execute("COMMIT", [])?; + + drop(guard); + + Ok(()) + } + #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5baadf9..d648b7d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -92,13 +92,17 @@ pub struct Rooms { pub(super) pdu_cache: Mutex>>, pub(super) auth_chain_cache: Mutex>>, pub(super) shorteventid_cache: Mutex>, - pub(super) stateinfo_cache: Mutex, // full state - HashSet, // added - HashSet, // removed - )>>>, + pub(super) stateinfo_cache: Mutex< + LruCache< + u64, + Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + >, + >, } impl Rooms { @@ -414,7 +418,8 @@ impl Rooms { HashSet, // removed )>, > { - if let Some(r) = self.stateinfo_cache + if let Some(r) = self + .stateinfo_cache .lock() .unwrap() .get_mut(&shortstatehash) @@ -458,10 +463,6 @@ impl Rooms { response.push((shortstatehash, state, added, removed)); - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); Ok(response) } else { let mut response = Vec::new(); @@ -1173,6 +1174,9 @@ impl Rooms { let sync_pdu = pdu.to_sync_room_event(); + let mut notifies = Vec::new(); + let mut highlights = Vec::new(); + for user in db .rooms .room_members(&pdu.room_id) @@ -1218,11 +1222,11 @@ impl Rooms { userroom_id.extend_from_slice(pdu.room_id.as_bytes()); if notify { - self.userroomid_notificationcount.increment(&userroom_id)?; + notifies.push(userroom_id.clone()); } if highlight { - self.userroomid_highlightcount.increment(&userroom_id)?; + highlights.push(userroom_id); } for senderkey in db.pusher.get_pusher_senderkeys(&user) { @@ -1230,6 +1234,11 @@ impl Rooms { } } + self.userroomid_notificationcount + .increment_batch(&mut notifies.into_iter())?; + self.userroomid_highlightcount + .increment_batch(&mut highlights.into_iter())?; + match pdu.kind { EventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { From 0823506d05b63efe7ef3a3c6611f066f24ec5bc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 16 Aug 2021 23:24:52 +0200 Subject: [PATCH 0741/1727] fix: don't load endless prev events and fix room join bug --- src/database/rooms.rs | 7 ++++++- src/server_server.rs | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d648b7d..75251aa 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1571,6 +1571,8 @@ impl Rooms { let shortstatekey = self.get_or_create_shortstatekey(&new_pdu.kind, &state_key, globals)?; + let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; + let replaces = states_parents .last() .map(|info| { @@ -1580,11 +1582,14 @@ impl Rooms { }) .unwrap_or_default(); + if Some(&new) == replaces { + return Ok(previous_shortstatehash.expect("must exist")); + } + // TODO: statehash with deterministic inputs let shortstatehash = globals.next_count()?; let mut statediffnew = HashSet::new(); - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; statediffnew.insert(new); let mut statediffremoved = HashSet::new(); diff --git a/src/server_server.rs b/src/server_server.rs index a4bfda5..7a28e5d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -907,7 +907,7 @@ pub async fn handle_incoming_pdu<'a>( if let Some(json) = json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) { - if incoming_pdu.origin_server_ts + if pdu.origin_server_ts > db.rooms .first_pdu_in_room(&room_id) .map_err(|_| "Error loading first room event.".to_owned())? From 75ba8bb5657231ebc8ea1b54b49cb667e11ad3a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 17 Aug 2021 00:22:52 +0200 Subject: [PATCH 0742/1727] fix: faster room joins --- src/database/abstraction/sqlite.rs | 4 ++-- src/database/rooms.rs | 10 +++++++++- src/server_server.rs | 1 + 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 1e55418..5b895c7 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -49,11 +49,11 @@ impl Engine { fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result { let conn = Connection::open(&path)?; - conn.pragma_update(Some(Main), "page_size", &1024)?; + conn.pragma_update(Some(Main), "page_size", &2048)?; conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; - conn.pragma_update(Some(Main), "wal_autocheckpoint", &8000)?; + conn.pragma_update(Some(Main), "wal_autocheckpoint", &2000)?; Ok(conn) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 75251aa..e2415a4 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -392,6 +392,7 @@ impl Rooms { &pdu.sender, None, db, + false, )?; } } @@ -400,6 +401,8 @@ impl Rooms { } } + self.update_joined_count(room_id)?; + self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; @@ -1285,6 +1288,7 @@ impl Rooms { &pdu.sender, invite_state, db, + true, )?; } } @@ -2051,6 +2055,7 @@ impl Rooms { sender: &UserId, last_state: Option>>, db: &Database, + update_joined_count: bool, ) -> Result<()> { // Keep track what remote users exist by adding them as "deactivated" users if user_id.server_name() != db.globals.server_name() { @@ -2232,7 +2237,9 @@ impl Rooms { _ => {} } - self.update_joined_count(room_id)?; + if update_joined_count { + self.update_joined_count(room_id)?; + } Ok(()) } @@ -2269,6 +2276,7 @@ impl Rooms { user_id, last_state, db, + true, )?; } else { let mutex_state = Arc::clone( diff --git a/src/server_server.rs b/src/server_server.rs index 7a28e5d..de3eef5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2471,6 +2471,7 @@ pub async fn create_invite_route( &sender, Some(invite_state), &db, + true, )?; } From bf7e019a686c4263163a3b278431c9fbc184d74b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 17 Aug 2021 16:06:09 +0200 Subject: [PATCH 0743/1727] improvement: better prev event fetching, perf improvements --- src/client_server/account.rs | 2 +- src/database.rs | 2 + src/database/rooms.rs | 140 +++++++++++++++++++++++++++++------ src/server_server.rs | 127 +++++++++++++++++++------------ 4 files changed, 202 insertions(+), 69 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index d4f103c..b00882a 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -733,7 +733,7 @@ pub async fn deactivate_route( pub async fn third_party_route( body: Ruma, ) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_contacts::Response::new(Vec::new()).into()) } diff --git a/src/database.rs b/src/database.rs index e66ff04..5ad2add 100644 --- a/src/database.rs +++ b/src/database.rs @@ -278,6 +278,8 @@ impl Database { pdu_cache: Mutex::new(LruCache::new(100_000)), auth_chain_cache: Mutex::new(LruCache::new(100_000)), shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), + eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), + statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), stateinfo_cache: Mutex::new(LruCache::new(1000)), }, account_data: account_data::AccountData { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e2415a4..600566c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -92,6 +92,8 @@ pub struct Rooms { pub(super) pdu_cache: Mutex>>, pub(super) auth_chain_cache: Mutex>>, pub(super) shorteventid_cache: Mutex>, + pub(super) eventidshort_cache: Mutex>, + pub(super) statekeyshort_cache: Mutex>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -665,7 +667,11 @@ impl Rooms { event_id: &EventId, globals: &super::globals::Globals, ) -> Result { - Ok(match self.eventid_shorteventid.get(event_id.as_bytes())? { + if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(&event_id) { + return Ok(*short); + } + + let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { Some(shorteventid) => utils::u64_from_bytes(&shorteventid) .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, None => { @@ -676,7 +682,14 @@ impl Rooms { .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; shorteventid } - }) + }; + + self.eventidshort_cache + .lock() + .unwrap() + .insert(event_id.clone(), short); + + Ok(short) } pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { @@ -694,17 +707,36 @@ impl Rooms { event_type: &EventType, state_key: &str, ) -> Result> { + if let Some(short) = self + .statekeyshort_cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + return Ok(Some(*short)); + } + let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); - self.statekey_shortstatekey + let short = self + .statekey_shortstatekey .get(&statekey)? .map(|shortstatekey| { utils::u64_from_bytes(&shortstatekey) .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) }) - .transpose() + .transpose()?; + + if let Some(s) = short { + self.statekeyshort_cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), s); + } + + Ok(short) } pub fn get_or_create_shortroomid( @@ -730,11 +762,20 @@ impl Rooms { state_key: &str, globals: &super::globals::Globals, ) -> Result { + if let Some(short) = self + .statekeyshort_cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + return Ok(*short); + } + let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); - Ok(match self.statekey_shortstatekey.get(&statekey)? { + let short = match self.statekey_shortstatekey.get(&statekey)? { Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, None => { @@ -743,7 +784,14 @@ impl Rooms { .insert(&statekey, &shortstatekey.to_be_bytes())?; shortstatekey } - }) + }; + + self.statekeyshort_cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), short); + + Ok(short) } pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result { @@ -2173,8 +2221,10 @@ impl Rooms { } } - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; + if update_joined_count { + self.roomserverids.insert(&roomserver_id, &[])?; + self.serverroomids.insert(&serverroom_id, &[])?; + } self.userroomid_joined.insert(&userroom_id, &[])?; self.roomuserid_joined.insert(&roomuser_id, &[])?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -2199,8 +2249,10 @@ impl Rooms { return Ok(()); } - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; + if update_joined_count { + self.roomserverids.insert(&roomserver_id, &[])?; + self.serverroomids.insert(&serverroom_id, &[])?; + } self.userroomid_invitestate.insert( &userroom_id, &serde_json::to_vec(&last_state.unwrap_or_default()) @@ -2214,14 +2266,16 @@ impl Rooms { self.roomuserid_leftcount.remove(&roomuser_id)?; } member::MembershipState::Leave | member::MembershipState::Ban => { - if self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; + if update_joined_count { + if self + .room_members(room_id) + .chain(self.room_members_invited(room_id)) + .filter_map(|r| r.ok()) + .all(|u| u.server_name() != user_id.server_name()) + { + self.roomserverids.remove(&roomserver_id)?; + self.serverroomids.remove(&serverroom_id)?; + } } self.userroomid_leftstate.insert( &userroom_id, @@ -2245,10 +2299,52 @@ impl Rooms { } pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { - self.roomid_joinedcount.insert( - room_id.as_bytes(), - &(self.room_members(&room_id).count() as u64).to_be_bytes(), - ) + let mut joinedcount = 0_u64; + let mut joined_servers = HashSet::new(); + + for joined in self.room_members(&room_id).filter_map(|r| r.ok()) { + joined_servers.insert(joined.server_name().to_owned()); + joinedcount += 1; + } + + for invited in self.room_members_invited(&room_id).filter_map(|r| r.ok()) { + joined_servers.insert(invited.server_name().to_owned()); + } + + self.roomid_joinedcount + .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; + + for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { + if !joined_servers.remove(&old_joined_server) { + // Server not in room anymore + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xff); + roomserver_id.extend_from_slice(old_joined_server.as_bytes()); + + let mut serverroom_id = old_joined_server.as_bytes().to_vec(); + serverroom_id.push(0xff); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.remove(&roomserver_id)?; + self.serverroomids.remove(&serverroom_id)?; + } + } + + // Now only new servers are in joined_servers anymore + for server in joined_servers { + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xff); + roomserver_id.extend_from_slice(server.as_bytes()); + + let mut serverroom_id = server.as_bytes().to_vec(); + serverroom_id.push(0xff); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.insert(&roomserver_id, &[])?; + self.serverroomids.insert(&serverroom_id, &[])?; + } + + Ok(()) } pub async fn leave_room( diff --git a/src/server_server.rs b/src/server_server.rs index de3eef5..49f225f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -272,14 +272,20 @@ where if status == 200 { let response = T::IncomingResponse::try_from_http_response(http_response); response.map_err(|e| { - warn!("Invalid 200 response from {}: {}", &destination, e); + warn!( + "Invalid 200 response from {} on: {} {}", + &destination, url, e + ); Error::BadServerResponse("Server returned bad 200 response.") }) } else { Err(Error::FederationError( destination.to_owned(), RumaError::try_from_http_response(http_response).map_err(|e| { - warn!("Server returned bad error response: {}", e); + warn!( + "Invalid {} response from {} on: {} {}", + status, &destination, url, e + ); Error::BadServerResponse("Server returned bad error response.") })?, )) @@ -884,15 +890,10 @@ pub async fn handle_incoming_pdu<'a>( } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let mut visited = HashSet::new(); + let mut graph = HashMap::new(); + let mut eventid_info = HashMap::new(); let mut todo_outlier_stack = incoming_pdu.prev_events.clone(); - let mut todo_timeline_stack = Vec::new(); while let Some(prev_event_id) = todo_outlier_stack.pop() { - if visited.contains(&prev_event_id) { - continue; - } - visited.insert(prev_event_id.clone()); - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( db, origin, @@ -914,24 +915,58 @@ pub async fn handle_incoming_pdu<'a>( .expect("Room exists") .origin_server_ts { - todo_outlier_stack.extend(pdu.prev_events.iter().cloned()); - todo_timeline_stack.push((pdu, json)); + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(dbg!(prev_prev.clone())); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + graph.insert(prev_event_id.clone(), HashSet::new()); + eventid_info.insert(prev_event_id.clone(), (pdu, json)); } + } else { + graph.insert(prev_event_id.clone(), HashSet::new()); } } } - while let Some(prev) = todo_timeline_stack.pop() { - upgrade_outlier_to_timeline_pdu( - prev.0, - prev.1, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await?; + let sorted = + state_res::StateResolution::lexicographical_topological_sort(dbg!(&graph), |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + println!("{}", event_id); + Ok(( + 0, + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts.clone()), + ), + ruma::event_id!("$notimportant"), + )) + }) + .map_err(|_| "Error sorting prev events".to_owned())?; + + for prev_id in dbg!(sorted) { + if let Some((pdu, json)) = eventid_info.remove(&prev_id) { + upgrade_outlier_to_timeline_pdu( + pdu, + json, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await?; + } } upgrade_outlier_to_timeline_pdu( @@ -1872,8 +1907,7 @@ fn get_auth_chain( full_auth_chain.extend(cached.iter().cloned()); } else { drop(cache); - let mut auth_chain = HashSet::new(); - get_auth_chain_recursive(&event_id, &mut auth_chain, db)?; + let auth_chain = get_auth_chain_inner(&event_id, db)?; cache = db.rooms.auth_chain_cache(); cache.insert(sevent_id, auth_chain.clone()); full_auth_chain.extend(auth_chain); @@ -1887,33 +1921,34 @@ fn get_auth_chain( .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) } -fn get_auth_chain_recursive( - event_id: &EventId, - found: &mut HashSet, - db: &Database, -) -> Result<()> { - let r = db.rooms.get_pdu(&event_id); - match r { - Ok(Some(pdu)) => { - for auth_event in &pdu.auth_events { - let sauthevent = db - .rooms - .get_or_create_shorteventid(auth_event, &db.globals)?; - if !found.contains(&sauthevent) { - found.insert(sauthevent); - get_auth_chain_recursive(&auth_event, found, db)?; +fn get_auth_chain_inner(event_id: &EventId, db: &Database) -> Result> { + let mut todo = vec![event_id.clone()]; + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop() { + match db.rooms.get_pdu(&event_id) { + Ok(Some(pdu)) => { + for auth_event in &pdu.auth_events { + let sauthevent = db + .rooms + .get_or_create_shorteventid(auth_event, &db.globals)?; + + if !found.contains(&sauthevent) { + found.insert(sauthevent); + todo.push(auth_event.clone()); + } } } - } - Ok(None) => { - warn!("Could not find pdu mentioned in auth events."); - } - Err(e) => { - warn!("Could not load event in auth chain: {}", e); + Ok(None) => { + warn!("Could not find pdu mentioned in auth events: {}", event_id); + } + Err(e) => { + warn!("Could not load event in auth chain: {} {}", event_id, e); + } } } - Ok(()) + Ok(found) } #[cfg_attr( From 46d8a46e1f79420ab14fd7c84c431671216039bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 19 Aug 2021 11:01:18 +0200 Subject: [PATCH 0744/1727] improvement: faster incoming transaction handling --- Cargo.lock | 114 ++++++++----- Cargo.toml | 8 +- src/client_server/account.rs | 4 + src/client_server/membership.rs | 5 + src/client_server/room.rs | 2 + src/client_server/session.rs | 7 +- src/database.rs | 2 +- src/database/rooms.rs | 39 +++++ src/database/uiaa.rs | 166 ++++++++----------- src/main.rs | 11 +- src/pdu.rs | 4 +- src/server_server.rs | 283 ++++++++++++++++++-------------- 12 files changed, 365 insertions(+), 280 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6ed4ee7..83e21a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -248,7 +248,7 @@ dependencies = [ "jsonwebtoken", "lru-cache", "num_cpus", - "opentelemetry", + "opentelemetry 0.16.0", "opentelemetry-jaeger", "parking_lot", "pretty_env_logger", @@ -1466,16 +1466,46 @@ dependencies = [ ] [[package]] -name = "opentelemetry-jaeger" -version = "0.14.0" +name = "opentelemetry" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a9fc8192722e7daa0c56e59e2336b797122fb8598383dcb11c8852733b435c" +checksum = "e1cf9b1c4e9a6c4de793c632496fa490bdc0e1eea73f0c91394f7b6990935d22" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures", + "js-sys", + "lazy_static", + "percent-encoding", + "pin-project", + "rand 0.8.4", + "thiserror", + "tokio", + "tokio-stream", +] + +[[package]] +name = "opentelemetry-jaeger" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db22f492873ea037bc267b35a0e8e4fb846340058cb7c864efe3d0bf23684593" dependencies = [ "async-trait", "lazy_static", - "opentelemetry", + "opentelemetry 0.16.0", + "opentelemetry-semantic-conventions", "thiserror", "thrift", + "tokio", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffeac823339e8b0f27b961f4385057bf9f97f2863bc745bd015fd6091f2270e9" +dependencies = [ + "opentelemetry 0.16.0", ] [[package]] @@ -2014,8 +2044,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "assign", "js_int", @@ -2035,8 +2065,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.17.1" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.18.3" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "bytes", "http", @@ -2051,8 +2081,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.17.1" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.18.3" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2062,8 +2092,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "ruma-api", "ruma-common", @@ -2076,8 +2106,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.11.0" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.12.2" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "assign", "bytes", @@ -2096,8 +2126,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.5.4" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "indexmap", "js_int", @@ -2111,8 +2141,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.23.2" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.24.4" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "indoc", "js_int", @@ -2127,8 +2157,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.23.2" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.24.4" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2138,8 +2168,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "js_int", "ruma-api", @@ -2153,8 +2183,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.19.4" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.20.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "paste", "rand 0.8.4", @@ -2167,8 +2197,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.19.4" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.20.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2177,13 +2207,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.4.0" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" [[package]] name = "ruma-identity-service-api" -version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "js_int", "ruma-api", @@ -2195,8 +2225,8 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" -version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "js_int", "ruma-api", @@ -2210,8 +2240,8 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.4.1" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "bytes", "form_urlencoded", @@ -2224,8 +2254,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.4.1" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2235,8 +2265,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.8.0" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.9.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2252,8 +2282,8 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.2.0" -source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" dependencies = [ "itertools 0.10.1", "js_int", @@ -3022,7 +3052,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c47440f2979c4cd3138922840eec122e3c0ba2148bc290f756bd7fd60fc97fff" dependencies = [ - "opentelemetry", + "opentelemetry 0.15.0", "tracing", "tracing-core", "tracing-log", diff --git a/Cargo.toml b/Cargo.toml index 3d18bfb..69b54c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,8 +18,8 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", rev = "eb19b0e08a901b87d11b3be0890ec788cc760492", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/timokoesters/ruma", rev = "a2d93500e1dbc87e7032a3c74f3b2479a7f84e93", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/timokoesters/ruma", rev = "995ccea20f5f6d4a8fb22041749ed4de22fa1b6a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio @@ -66,11 +66,11 @@ regex = "1.5.4" jsonwebtoken = "7.2.0" # Performance measurements tracing = { version = "0.1.26", features = ["release_max_level_warn"] } -opentelemetry = "0.15.0" tracing-subscriber = "0.2.19" tracing-opentelemetry = "0.14.0" tracing-flame = "0.1.0" -opentelemetry-jaeger = "0.14.0" +opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } +opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] } pretty_env_logger = "0.4.0" lru-cache = "0.1.2" rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index b00882a..e68c957 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -292,6 +292,7 @@ pub async fn register_route( is_direct: None, third_party_invite: None, blurhash: None, + reason: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -457,6 +458,7 @@ pub async fn register_route( is_direct: None, third_party_invite: None, blurhash: None, + reason: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -478,6 +480,7 @@ pub async fn register_route( is_direct: None, third_party_invite: None, blurhash: None, + reason: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -683,6 +686,7 @@ pub async fn deactivate_route( is_direct: None, third_party_invite: None, blurhash: None, + reason: None, }; let mutex_state = Arc::clone( diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index de6fa5a..222d204 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -262,6 +262,7 @@ pub async fn ban_user_route( is_direct: None, third_party_invite: None, blurhash: db.users.blurhash(&body.user_id)?, + reason: None, }), |event| { let mut event = serde_json::from_value::>( @@ -563,6 +564,7 @@ async fn join_room_by_id_helper( is_direct: None, third_party_invite: None, blurhash: db.users.blurhash(&sender_user)?, + reason: None, }) .expect("event is valid, we just created it"), ); @@ -695,6 +697,7 @@ async fn join_room_by_id_helper( is_direct: None, third_party_invite: None, blurhash: db.users.blurhash(&sender_user)?, + reason: None, }; db.rooms.build_and_append_pdu( @@ -846,6 +849,7 @@ pub async fn invite_helper<'a>( membership: MembershipState::Invite, third_party_invite: None, blurhash: None, + reason: None, }) .expect("member event is valid value"); @@ -1040,6 +1044,7 @@ pub async fn invite_helper<'a>( is_direct: Some(is_direct), third_party_invite: None, blurhash: db.users.blurhash(&user_id)?, + reason: None, }) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index c323be4..2541278 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -107,6 +107,7 @@ pub async fn create_room_route( is_direct: Some(body.is_direct), third_party_invite: None, blurhash: db.users.blurhash(&sender_user)?, + reason: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -517,6 +518,7 @@ pub async fn upgrade_room_route( is_direct: None, third_party_invite: None, blurhash: db.users.blurhash(&sender_user)?, + reason: None, }) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/session.rs b/src/client_server/session.rs index d4d3c03..dada2d5 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -3,7 +3,10 @@ use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::session::{get_login_types, login, logout, logout_all}, + r0::{ + session::{get_login_types, login, logout, logout_all}, + uiaa::IncomingUserIdentifier, + }, }, UserId, }; @@ -60,7 +63,7 @@ pub async fn login_route( identifier, password, } => { - let username = if let login::IncomingUserIdentifier::MatrixId(matrix_id) = identifier { + let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier { matrix_id } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); diff --git a/src/database.rs b/src/database.rs index 5ad2add..bfc33f2 100644 --- a/src/database.rs +++ b/src/database.rs @@ -280,7 +280,7 @@ impl Database { shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), - stateinfo_cache: Mutex::new(LruCache::new(1000)), + stateinfo_cache: Mutex::new(LruCache::new(50)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 600566c..adb748d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -110,6 +110,7 @@ pub struct Rooms { impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. + #[tracing::instrument(skip(self))] pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { let full_state = self .load_shortstatehash_info(shortstatehash)? @@ -122,6 +123,7 @@ impl Rooms { .collect() } + #[tracing::instrument(skip(self))] pub fn state_full( &self, shortstatehash: u64, @@ -220,6 +222,7 @@ impl Rooms { } /// This fetches auth events from the current state. + #[tracing::instrument(skip(self))] pub fn get_auth_events( &self, room_id: &RoomId, @@ -261,6 +264,7 @@ impl Rooms { } /// Checks if a room exists. + #[tracing::instrument(skip(self))] pub fn exists(&self, room_id: &RoomId) -> Result { let prefix = match self.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), @@ -277,6 +281,7 @@ impl Rooms { } /// Checks if a room exists. + #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { let prefix = self .get_shortroomid(room_id)? @@ -300,6 +305,7 @@ impl Rooms { /// Force the creation of a new StateHash and insert it into the db. /// /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. + #[tracing::instrument(skip(self, new_state, db))] pub fn force_state( &self, room_id: &RoomId, @@ -412,6 +418,7 @@ impl Rooms { } /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. + #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( &self, shortstatehash: u64, @@ -480,6 +487,7 @@ impl Rooms { } } + #[tracing::instrument(skip(self, globals))] pub fn compress_state_event( &self, shortstatekey: u64, @@ -495,6 +503,7 @@ impl Rooms { Ok(v.try_into().expect("we checked the size above")) } + #[tracing::instrument(skip(self, compressed_event))] pub fn parse_compressed_state_event( &self, compressed_event: CompressedStateEvent, @@ -518,6 +527,13 @@ impl Rooms { /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer + #[tracing::instrument(skip( + self, + statediffnew, + statediffremoved, + diff_to_sibling, + parent_states + ))] pub fn save_state_from_diff( &self, shortstatehash: u64, @@ -642,6 +658,7 @@ impl Rooms { } /// Returns (shortstatehash, already_existed) + #[tracing::instrument(skip(self, globals))] fn get_or_create_shortstatehash( &self, state_hash: &StateHashId, @@ -662,6 +679,7 @@ impl Rooms { }) } + #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shorteventid( &self, event_id: &EventId, @@ -692,6 +710,7 @@ impl Rooms { Ok(short) } + #[tracing::instrument(skip(self))] pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { self.roomid_shortroomid .get(&room_id.as_bytes())? @@ -702,6 +721,7 @@ impl Rooms { .transpose() } + #[tracing::instrument(skip(self))] pub fn get_shortstatekey( &self, event_type: &EventType, @@ -739,6 +759,7 @@ impl Rooms { Ok(short) } + #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shortroomid( &self, room_id: &RoomId, @@ -756,6 +777,7 @@ impl Rooms { }) } + #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shortstatekey( &self, event_type: &EventType, @@ -794,6 +816,7 @@ impl Rooms { Ok(short) } + #[tracing::instrument(skip(self))] pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result { if let Some(id) = self .shorteventid_cache @@ -876,12 +899,14 @@ impl Rooms { } /// Returns the `count` of this pdu's id. + #[tracing::instrument(skip(self))] pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| self.pdu_count(&pdu_id).map(Some)) } + #[tracing::instrument(skip(self))] pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { let prefix = self .get_shortroomid(room_id)? @@ -902,6 +927,7 @@ impl Rooms { } /// Returns the json of a pdu. + #[tracing::instrument(skip(self))] pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -920,6 +946,7 @@ impl Rooms { } /// Returns the json of a pdu. + #[tracing::instrument(skip(self))] pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? @@ -930,6 +957,7 @@ impl Rooms { } /// Returns the json of a pdu. + #[tracing::instrument(skip(self))] pub fn get_non_outlier_pdu_json( &self, event_id: &EventId, @@ -951,6 +979,7 @@ impl Rooms { } /// Returns the pdu's id. + #[tracing::instrument(skip(self))] pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { self.eventid_pduid .get(event_id.as_bytes())? @@ -960,6 +989,7 @@ impl Rooms { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + #[tracing::instrument(skip(self))] pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -980,6 +1010,7 @@ impl Rooms { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + #[tracing::instrument(skip(self))] pub fn get_pdu(&self, event_id: &EventId) -> Result>> { if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(&event_id) { return Ok(Some(Arc::clone(p))); @@ -1019,6 +1050,7 @@ impl Rooms { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. + #[tracing::instrument(skip(self))] pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( @@ -1029,6 +1061,7 @@ impl Rooms { } /// Returns the pdu as a `BTreeMap`. + #[tracing::instrument(skip(self))] pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( @@ -1039,6 +1072,7 @@ impl Rooms { } /// Removes a pdu and creates a new one with the same id. + #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(&pdu_id)?.is_some() { self.pduid_pdu.insert( @@ -2298,6 +2332,7 @@ impl Rooms { Ok(()) } + #[tracing::instrument(skip(self))] pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { let mut joinedcount = 0_u64; let mut joined_servers = HashSet::new(); @@ -2347,6 +2382,7 @@ impl Rooms { Ok(()) } + #[tracing::instrument(skip(self, db))] pub async fn leave_room( &self, user_id: &UserId, @@ -2419,6 +2455,7 @@ impl Rooms { Ok(()) } + #[tracing::instrument(skip(self, db))] async fn remote_leave_room( &self, user_id: &UserId, @@ -2650,6 +2687,7 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( &'a self, room_id: &RoomId, @@ -2809,6 +2847,7 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { Ok(self .roomid_joinedcount diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 1372fef..8a3fe4f 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -4,11 +4,14 @@ use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, - r0::uiaa::{IncomingAuthData, UiaaInfo}, + r0::uiaa::{ + IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, UiaaInfo, + }, }, signatures::CanonicalJsonValue, DeviceId, UserId, }; +use tracing::error; use super::abstraction::Tree; @@ -49,126 +52,91 @@ impl Uiaa { users: &super::users::Users, globals: &super::globals::Globals, ) -> Result<(bool, UiaaInfo)> { - if let IncomingAuthData::DirectRequest { - kind, - session, - auth_parameters, - } = &auth - { - let mut uiaainfo = session - .as_ref() - .map(|session| self.get_uiaa_session(&user_id, &device_id, session)) - .unwrap_or_else(|| Ok(uiaainfo.clone()))?; + let mut uiaainfo = auth + .session() + .map(|session| self.get_uiaa_session(&user_id, &device_id, session)) + .unwrap_or_else(|| Ok(uiaainfo.clone()))?; - if uiaainfo.session.is_none() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - } + if uiaainfo.session.is_none() { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + } + match auth { // Find out what the user completed - match &**kind { - "m.login.password" => { - let identifier = auth_parameters.get("identifier").ok_or(Error::BadRequest( - ErrorKind::MissingParam, - "m.login.password needs identifier.", - ))?; - - let identifier_type = identifier.get("type").ok_or(Error::BadRequest( - ErrorKind::MissingParam, - "Identifier needs a type.", - ))?; - - if identifier_type != "m.id.user" { + IncomingAuthData::Password(IncomingPassword { + identifier, + password, + .. + }) => { + let username = match identifier { + MatrixId(username) => username, + _ => { return Err(Error::BadRequest( ErrorKind::Unrecognized, "Identifier type not recognized.", - )); + )) } + }; - let username = identifier - .get("user") - .ok_or(Error::BadRequest( - ErrorKind::MissingParam, - "Identifier needs user field.", - ))? - .as_str() - .ok_or(Error::BadRequest( - ErrorKind::BadJson, - "User is not a string.", - ))?; - - let user_id = UserId::parse_with_server_name(username, globals.server_name()) + let user_id = + UserId::parse_with_server_name(username.clone(), globals.server_name()) .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") - })?; + Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") + })?; - let password = auth_parameters - .get("password") - .ok_or(Error::BadRequest( - ErrorKind::MissingParam, - "Password is missing.", - ))? - .as_str() - .ok_or(Error::BadRequest( - ErrorKind::BadJson, - "Password is not a string.", - ))?; + // Check if password is correct + if let Some(hash) = users.password_hash(&user_id)? { + let hash_matches = + argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); - // Check if password is correct - if let Some(hash) = users.password_hash(&user_id)? { - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); - - if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { - kind: ErrorKind::Forbidden, - message: "Invalid username or password.".to_owned(), - }); - return Ok((false, uiaainfo)); - } - } - - // Password was correct! Let's add it to `completed` - uiaainfo.completed.push("m.login.password".to_owned()); - } - "m.login.dummy" => { - uiaainfo.completed.push("m.login.dummy".to_owned()); - } - k => panic!("type not supported: {}", k), - } - - // Check if a flow now succeeds - let mut completed = false; - 'flows: for flow in &mut uiaainfo.flows { - for stage in &flow.stages { - if !uiaainfo.completed.contains(stage) { - continue 'flows; + if !hash_matches { + uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { + kind: ErrorKind::Forbidden, + message: "Invalid username or password.".to_owned(), + }); + return Ok((false, uiaainfo)); } } - // We didn't break, so this flow succeeded! - completed = true; - } - if !completed { - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - Some(&uiaainfo), - )?; - return Ok((false, uiaainfo)); + // Password was correct! Let's add it to `completed` + uiaainfo.completed.push("m.login.password".to_owned()); } + IncomingAuthData::Dummy(_) => { + uiaainfo.completed.push("m.login.dummy".to_owned()); + } + k => error!("type not supported: {:?}", k), + } - // UIAA was successful! Remove this session and return true + // Check if a flow now succeeds + let mut completed = false; + 'flows: for flow in &mut uiaainfo.flows { + for stage in &flow.stages { + if !uiaainfo.completed.contains(stage) { + continue 'flows; + } + } + // We didn't break, so this flow succeeded! + completed = true; + } + + if !completed { self.update_uiaa_session( user_id, device_id, uiaainfo.session.as_ref().expect("session is always set"), - None, + Some(&uiaainfo), )?; - Ok((true, uiaainfo)) - } else { - panic!("FallbackAcknowledgement is not supported yet"); + return Ok((false, uiaainfo)); } + + // UIAA was successful! Remove this session and return true + self.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session is always set"), + None, + )?; + Ok((true, uiaainfo)) } fn set_uiaa_request( diff --git a/src/main.rs b/src/main.rs index 5a6f8c7..72f753f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use database::Config; pub use database::Database; pub use error::{Error, Result}; -use opentelemetry::trace::Tracer; +use opentelemetry::trace::{FutureExt, Tracer}; pub use pdu::PduEvent; pub use rocket::State; use ruma::api::client::error::ErrorKind; @@ -220,14 +220,17 @@ async fn main() { }; if config.allow_jaeger { + opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); let tracer = opentelemetry_jaeger::new_pipeline() - .with_service_name("conduit") - .install_simple() + .install_batch(opentelemetry::runtime::Tokio) .unwrap(); let span = tracer.start("conduit"); - start.await; + start.with_current_context().await; drop(span); + + println!("exporting"); + opentelemetry::global::shutdown_tracer_provider(); } else { std::env::set_var("RUST_LOG", &config.log); diff --git a/src/pdu.rs b/src/pdu.rs index 00eda5b..1016fe6 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -12,7 +12,7 @@ use ruma::{ use serde::{Deserialize, Serialize}; use serde_json::json; use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom}; -use tracing::error; +use tracing::warn; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { @@ -322,7 +322,7 @@ pub(crate) fn gen_event_id_canonical_json( pdu: &Raw, ) -> crate::Result<(EventId, CanonicalJsonObject)> { let value = serde_json::from_str(pdu.json().get()).map_err(|e| { - error!("{:?}: {:?}", pdu, e); + warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; diff --git a/src/server_server.rs b/src/server_server.rs index 49f225f..56b28f2 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -111,7 +111,7 @@ impl FedDest { } } -#[tracing::instrument(skip(globals))] +#[tracing::instrument(skip(globals, request))] pub async fn send_request( globals: &crate::database::globals::Globals, destination: &ServerName, @@ -501,7 +501,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { ) .unwrap(); - Json(ruma::serde::to_canonical_json_string(&response).expect("JSON is canonical")) + Json(serde_json::to_string(&response).expect("JSON is canonical")) } #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] @@ -927,12 +927,17 @@ pub async fn handle_incoming_pdu<'a>( ); eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { + // Time based check failed graph.insert(prev_event_id.clone(), HashSet::new()); eventid_info.insert(prev_event_id.clone(), (pdu, json)); } } else { + // Get json failed graph.insert(prev_event_id.clone(), HashSet::new()); } + } else { + // Fetch and handle failed + graph.insert(prev_event_id.clone(), HashSet::new()); } } @@ -956,7 +961,9 @@ pub async fn handle_incoming_pdu<'a>( for prev_id in dbg!(sorted) { if let Some((pdu, json)) = eventid_info.remove(&prev_id) { - upgrade_outlier_to_timeline_pdu( + let start_time = Instant::now(); + let event_id = pdu.event_id.clone(); + if let Err(e) = upgrade_outlier_to_timeline_pdu( pdu, json, &create_event, @@ -965,7 +972,17 @@ pub async fn handle_incoming_pdu<'a>( room_id, pub_key_map, ) - .await?; + .await + { + warn!("Prev event {} failed: {}", event_id, e); + } + let elapsed = start_time.elapsed(); + warn!( + "Handling prev event {} took {}m{}s", + event_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); } } @@ -981,6 +998,7 @@ pub async fn handle_incoming_pdu<'a>( .await } +#[tracing::instrument(skip(origin, create_event, event_id, room_id, value, db, pub_key_map))] fn handle_outlier_pdu<'a>( origin: &'a ServerName, create_event: &'a PduEvent, @@ -1141,6 +1159,7 @@ fn handle_outlier_pdu<'a>( }) } +#[tracing::instrument(skip(incoming_pdu, val, create_event, origin, db, room_id, pub_key_map))] async fn upgrade_outlier_to_timeline_pdu( incoming_pdu: Arc, val: BTreeMap, @@ -1352,41 +1371,6 @@ async fn upgrade_outlier_to_timeline_pdu( // Only keep those extremities were not referenced yet extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true))); - let mut extremity_statehashes = Vec::new(); - - for id in &extremities { - match db - .rooms - .get_pdu(&id) - .map_err(|_| "Failed to ask db for pdu.".to_owned())? - { - Some(leaf_pdu) => { - extremity_statehashes.push(( - db.rooms - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? - .ok_or_else(|| { - error!( - "Found extremity pdu with no statehash in db: {:?}", - leaf_pdu - ); - "Found pdu with no statehash in db.".to_owned() - })?, - Some(leaf_pdu), - )); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err("Missing state snapshot.".to_owned()); - } - } - } - - // 12. Ensure that the state is derived from the previous current state (i.e. we calculated - // by doing state res where one of the inputs was a previously trusted set of state, - // don't just trust a set of state we got from a remote). - - // We do this by adding the current state to the list of fork states let current_statehash = db .rooms .current_shortstatehash(&room_id) @@ -1398,91 +1382,138 @@ async fn upgrade_outlier_to_timeline_pdu( .state_full(current_statehash) .map_err(|_| "Failed to load room state.")?; - extremity_statehashes.push((current_statehash.clone(), None)); + if incoming_pdu.state_key.is_some() { + let mut extremity_statehashes = Vec::new(); - let mut fork_states = Vec::new(); - for (statehash, leaf_pdu) in extremity_statehashes { - let mut leaf_state = db - .rooms - .state_full(statehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(leaf_pdu) = leaf_pdu { - if let Some(state_key) = &leaf_pdu.state_key { - // Now it's the state after - let key = (leaf_pdu.kind.clone(), state_key.clone()); - leaf_state.insert(key, leaf_pdu); + for id in &extremities { + match db + .rooms + .get_pdu(&id) + .map_err(|_| "Failed to ask db for pdu.".to_owned())? + { + Some(leaf_pdu) => { + extremity_statehashes.push(( + db.rooms + .pdu_shortstatehash(&leaf_pdu.event_id) + .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + "Found pdu with no statehash in db.".to_owned() + })?, + Some(leaf_pdu), + )); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err("Missing state snapshot.".to_owned()); + } } } - fork_states.push(leaf_state); - } + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). - // We also add state after incoming event to the fork states - extremities.insert(incoming_pdu.event_id.clone()); - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - state_after.insert( - (incoming_pdu.kind.clone(), state_key.clone()), - incoming_pdu.clone(), - ); - } - fork_states.push(state_after.clone()); + // We do this by adding the current state to the list of fork states - let mut update_state = false; - // 14. Use state resolution to find new room state - let new_room_state = if fork_states.is_empty() { - return Err("State is empty.".to_owned()); - } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { - // There was only one state, so it has to be the room's current state (because that is - // always included) - fork_states[0] - .iter() - .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) - .collect() - } else { - // We do need to force an update to this room's state - update_state = true; + extremity_statehashes.push((current_statehash.clone(), None)); - let fork_states = &fork_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(); + let mut fork_states = Vec::new(); + for (statehash, leaf_pdu) in extremity_statehashes { + let mut leaf_state = db + .rooms + .state_full(statehash) + .map_err(|_| "Failed to ask db for room state.".to_owned())?; - let mut auth_chain_sets = Vec::new(); - for state in fork_states { - auth_chain_sets.push( - get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), + if let Some(leaf_pdu) = leaf_pdu { + if let Some(state_key) = &leaf_pdu.state_key { + // Now it's the state after + let key = (leaf_pdu.kind.clone(), state_key.clone()); + leaf_state.insert(key, leaf_pdu); + } + } + + fork_states.push(leaf_state); + } + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + state_after.insert( + (incoming_pdu.kind.clone(), state_key.clone()), + incoming_pdu.clone(), ); } + fork_states.push(state_after.clone()); - let state = match state_res::StateResolution::resolve( - &room_id, - room_version_id, - fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { - Ok(new_state) => new_state, - Err(_) => { - return Err("State resolution failed, either an event could not be found or deserialization".into()); + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + return Err("State is empty.".to_owned()); + } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + // There was only one state, so it has to be the room's current state (because that is + // always included) + fork_states[0] + .iter() + .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) + .collect() + } else { + // We do need to force an update to this room's state + update_state = true; + + let fork_states = &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id.clone())) + .collect::>() + }) + .collect::>(); + + let mut auth_chain_sets = Vec::new(); + for state in fork_states { + auth_chain_sets.push( + get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), + ); } + + let state = match state_res::StateResolution::resolve( + &room_id, + room_version_id, + fork_states, + auth_chain_sets, + |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err("State resolution failed, either an event could not be found or deserialization".into()); + } + }; + + state }; - state - }; + // Set the new room state to the resolved state + if update_state { + db.rooms + .force_state(&room_id, new_room_state, &db) + .map_err(|_| "Failed to set new room state.".to_owned())?; + } + debug!("Updated resolved state"); + } + + extremities.insert(incoming_pdu.event_id.clone()); debug!("starting soft fail auth check"); // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it @@ -1516,14 +1547,6 @@ async fn upgrade_outlier_to_timeline_pdu( warn!("Event was soft failed: {:?}", incoming_pdu); } - // Set the new room state to the resolved state - if update_state { - db.rooms - .force_state(&room_id, new_room_state, &db) - .map_err(|_| "Failed to set new room state.".to_owned())?; - } - debug!("Updated resolved state"); - if soft_fail { // Soft fail, we leave the event as an outlier but don't add it to the timeline return Err("Event has been soft failed".into()); @@ -1543,7 +1566,7 @@ async fn upgrade_outlier_to_timeline_pdu( /// b. Look at outlier pdu tree /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? -//#[tracing::instrument(skip(db, key_map, auth_cache))] +#[tracing::instrument(skip(db, origin, events, create_event, room_id, pub_key_map))] pub(crate) fn fetch_and_handle_outliers<'a>( db: &'a Database, origin: &'a ServerName, @@ -1562,15 +1585,16 @@ pub(crate) fn fetch_and_handle_outliers<'a>( let mut pdus = vec![]; for id in events { + info!("loading {}", id); if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&id) { // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { min_elapsed_duration = Duration::from_secs(60 * 60 * 24); } if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {}", id); + info!("Backing off from {}", id); continue; } } @@ -1586,7 +1610,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } Ok(None) => { // c. Ask origin server over federation - debug!("Fetching {} over federation.", id); + info!("Fetching {} over federation.", id); match db .sending .send_federation_request( @@ -1597,11 +1621,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .await { Ok(res) => { - debug!("Got {} over federation", id); + info!("Got {} over federation", id); let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, - Err(_) => continue, + Err(_) => { + back_off(id.clone()); + continue; + } }; // This will also fetch the auth chain @@ -1632,7 +1659,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } } Err(e) => { - debug!("Error loading {}: {}", id, e); + warn!("Error loading {}: {}", id, e); continue; } }; @@ -1644,7 +1671,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. -#[tracing::instrument(skip(db))] +#[tracing::instrument(skip(db, origin, signature_ids))] pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, @@ -1885,6 +1912,7 @@ fn append_incoming_pdu( Ok(pdu_id) } +#[tracing::instrument(skip(starting_events, db))] fn get_auth_chain( starting_events: Vec, db: &Database, @@ -1921,6 +1949,7 @@ fn get_auth_chain( .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) } +#[tracing::instrument(skip(event_id, db))] fn get_auth_chain_inner(event_id: &EventId, db: &Database) -> Result> { let mut todo = vec![event_id.clone()]; let mut found = HashSet::new(); @@ -2204,6 +2233,7 @@ pub fn create_join_event_template_route( is_direct: None, membership: MembershipState::Join, third_party_invite: None, + reason: None, }) .expect("member event is valid value"); @@ -2680,6 +2710,7 @@ pub async fn claim_keys_route( .into()) } +#[tracing::instrument(skip(event, pub_key_map, db))] pub async fn fetch_required_signing_keys( event: &BTreeMap, pub_key_map: &RwLock>>, From b09499c2df67c4fc101fe8cdfb0e2baec91d5b2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 19 Aug 2021 14:05:23 +0200 Subject: [PATCH 0745/1727] fix: don't save empty tokens --- src/database/rooms.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index adb748d..d3600f1 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1378,6 +1378,7 @@ impl Rooms { if let Some(body) = pdu.content.get("body").and_then(|b| b.as_str()) { let mut batch = body .split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) .filter(|word| word.len() <= 50) .map(str::to_lowercase) .map(|word| { @@ -2702,6 +2703,7 @@ impl Rooms { let words = search_string .split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) .map(str::to_lowercase) .collect::>(); From 4956fb9fbac289df09c92197ff5119913c896788 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 21 Aug 2021 14:22:21 +0200 Subject: [PATCH 0746/1727] improvement: limit prev event fetching --- src/database.rs | 25 ++++++++++++++++++++----- src/server_server.rs | 13 ++++++++++++- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/src/database.rs b/src/database.rs index bfc33f2..23d3bdf 100644 --- a/src/database.rs +++ b/src/database.rs @@ -636,7 +636,7 @@ impl Database { if db.globals.database_version()? < 9 { // Update tokenids db layout - let mut batch = db.rooms.tokenids.iter().filter_map(|(key, _)| { + let batch = db.rooms.tokenids.iter().filter_map(|(key, _)| { if !key.starts_with(b"!") { return None; } @@ -659,14 +659,29 @@ impl Database { println!("old {:?}", key); println!("new {:?}", new_key); Some((new_key, Vec::new())) - }); + }).collect::>(); - db.rooms.tokenids.insert_batch(&mut batch)?; + let mut iter = batch.into_iter().peekable(); - for (key, _) in db.rooms.tokenids.iter() { + while iter.peek().is_some() { + db.rooms.tokenids.insert_batch(&mut iter.by_ref().take(1000))?; + println!("smaller batch done"); + } + + println!("Deleting starts"); + + let batch2 = db.rooms.tokenids.iter().filter_map(|(key, _)| { if key.starts_with(b"!") { - db.rooms.tokenids.remove(&key)?; + println!("del {:?}", key); + Some(key) + } else { + None } + }).collect::>(); + + for key in batch2 { + println!("del"); + db.rooms.tokenids.remove(&key)?; } db.globals.bump_database_version(9)?; diff --git a/src/server_server.rs b/src/server_server.rs index 56b28f2..5b09872 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -254,7 +254,7 @@ where }); // TODO: handle timeout if status != 200 { - info!( + warn!( "{} {}: {}", url, status, @@ -893,6 +893,9 @@ pub async fn handle_incoming_pdu<'a>( let mut graph = HashMap::new(); let mut eventid_info = HashMap::new(); let mut todo_outlier_stack = incoming_pdu.prev_events.clone(); + + let mut amount = 0; + while let Some(prev_event_id) = todo_outlier_stack.pop() { if let Some((pdu, json_opt)) = fetch_and_handle_outliers( db, @@ -905,6 +908,13 @@ pub async fn handle_incoming_pdu<'a>( .await .pop() { + if amount > 100 { + // Max limit reached + warn!("Max prev event limit reached!"); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue + } + if let Some(json) = json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) { @@ -915,6 +925,7 @@ pub async fn handle_incoming_pdu<'a>( .expect("Room exists") .origin_server_ts { + amount += 1; for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { todo_outlier_stack.push(dbg!(prev_prev.clone())); From 3b78e43a18b17729973d0620c344853eb2c2d610 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 21 Aug 2021 14:24:10 +0200 Subject: [PATCH 0747/1727] fmt --- src/database.rs | 76 +++++++++++++++++++++++++------------------- src/server_server.rs | 2 +- 2 files changed, 45 insertions(+), 33 deletions(-) diff --git a/src/database.rs b/src/database.rs index 23d3bdf..3d1324e 100644 --- a/src/database.rs +++ b/src/database.rs @@ -636,48 +636,60 @@ impl Database { if db.globals.database_version()? < 9 { // Update tokenids db layout - let batch = db.rooms.tokenids.iter().filter_map(|(key, _)| { - if !key.starts_with(b"!") { - return None; - } - let mut parts = key.splitn(4, |&b| b == 0xff); - let room_id = parts.next().unwrap(); - let word = parts.next().unwrap(); - let _pdu_id_room = parts.next().unwrap(); - let pdu_id_count = parts.next().unwrap(); + let batch = db + .rooms + .tokenids + .iter() + .filter_map(|(key, _)| { + if !key.starts_with(b"!") { + return None; + } + let mut parts = key.splitn(4, |&b| b == 0xff); + let room_id = parts.next().unwrap(); + let word = parts.next().unwrap(); + let _pdu_id_room = parts.next().unwrap(); + let pdu_id_count = parts.next().unwrap(); - let short_room_id = db - .rooms - .roomid_shortroomid - .get(&room_id) - .unwrap() - .expect("shortroomid should exist"); - let mut new_key = short_room_id; - new_key.extend_from_slice(word); - new_key.push(0xff); - new_key.extend_from_slice(pdu_id_count); - println!("old {:?}", key); - println!("new {:?}", new_key); - Some((new_key, Vec::new())) - }).collect::>(); + let short_room_id = db + .rooms + .roomid_shortroomid + .get(&room_id) + .unwrap() + .expect("shortroomid should exist"); + let mut new_key = short_room_id; + new_key.extend_from_slice(word); + new_key.push(0xff); + new_key.extend_from_slice(pdu_id_count); + println!("old {:?}", key); + println!("new {:?}", new_key); + Some((new_key, Vec::new())) + }) + .collect::>(); let mut iter = batch.into_iter().peekable(); while iter.peek().is_some() { - db.rooms.tokenids.insert_batch(&mut iter.by_ref().take(1000))?; + db.rooms + .tokenids + .insert_batch(&mut iter.by_ref().take(1000))?; println!("smaller batch done"); } println!("Deleting starts"); - let batch2 = db.rooms.tokenids.iter().filter_map(|(key, _)| { - if key.starts_with(b"!") { - println!("del {:?}", key); - Some(key) - } else { - None - } - }).collect::>(); + let batch2 = db + .rooms + .tokenids + .iter() + .filter_map(|(key, _)| { + if key.starts_with(b"!") { + println!("del {:?}", key); + Some(key) + } else { + None + } + }) + .collect::>(); for key in batch2 { println!("del"); diff --git a/src/server_server.rs b/src/server_server.rs index 5b09872..5299e1f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -912,7 +912,7 @@ pub async fn handle_incoming_pdu<'a>( // Max limit reached warn!("Max prev event limit reached!"); graph.insert(prev_event_id.clone(), HashSet::new()); - continue + continue; } if let Some(json) = From 2ef23b213a6e669a26aa541adfe59e5f563904a9 Mon Sep 17 00:00:00 2001 From: Tom Smeding Date: Sat, 21 Aug 2021 14:38:00 +0200 Subject: [PATCH 0748/1727] Consistent and escaped response in get_pdu 1. The fallback text of the get_pdu admin room command response message now contains the same text as the formatted_body content (namely, the json instead of Debug-formatting of a serde type). 2. The formatted_body content of the get_pdu response is now html-escaped. --- src/database/rooms.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0f42235..68837f6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -7,6 +7,7 @@ use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use lru_cache::LruCache; use regex::Regex; use ring::digest; +use rocket::http::RawStr; use ruma::{ api::{client::error::ErrorKind, federation}, events::{ @@ -1006,16 +1007,19 @@ impl Rooms { } match pdu_json { Some(json) => { + let json_text = + serde_json::to_string_pretty(&json) + .expect("canonical json is valid json"); db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_html( - format!("{}\n```json\n{:#?}\n```", + format!("{}\n```json\n{}\n```", if outlier { "PDU is outlier" - } else { "PDU was accepted"}, json), + } else { "PDU was accepted"}, json_text), format!("

{}

\n
{}\n
\n", if outlier { "PDU is outlier" - } else { "PDU was accepted"}, serde_json::to_string_pretty(&json).expect("canonical json is valid json")) + } else { "PDU was accepted"}, RawStr::new(&json_text).html_escape()) ), )); } From 48494c946496c36b2d1f85da1e8877ac95e2d664 Mon Sep 17 00:00:00 2001 From: Kurt Roeckx Date: Sun, 25 Jul 2021 19:28:54 +0200 Subject: [PATCH 0749/1727] Implement federation/v1/send_join --- src/main.rs | 3 +- src/server_server.rs | 119 +++++++++++++++++++++++++------------------ 2 files changed, 71 insertions(+), 51 deletions(-) diff --git a/src/main.rs b/src/main.rs index 72f753f..2ca49e2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -160,7 +160,8 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket< server_server::get_room_state_route, server_server::get_room_state_ids_route, server_server::create_join_event_template_route, - server_server::create_join_event_route, + server_server::create_join_event_v1_route, + server_server::create_join_event_v2_route, server_server::create_invite_route, server_server::get_devices_route, server_server::get_room_information_route, diff --git a/src/server_server.rs b/src/server_server.rs index 5299e1f..4cfe0a8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2342,33 +2342,29 @@ pub fn create_join_event_template_route( .into()) } -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v2/send_join/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn create_join_event_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { +async fn create_join_event( + db: &DatabaseGuard, + room_id: &RoomId, + pdu: &Raw, +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } // We need to return the state prior to joining, let's keep a reference to that here - let shortstatehash = - db.rooms - .current_shortstatehash(&body.room_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; + let shortstatehash = db + .rooms + .current_shortstatehash(&room_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; let pub_key_map = RwLock::new(BTreeMap::new()); // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&body.pdu) { + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&pdu) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -2393,31 +2389,23 @@ pub async fn create_join_event_route( .roomid_mutex_federation .write() .unwrap() - .entry(body.room_id.clone()) + .entry(room_id.clone()) .or_default(), ); let mutex_lock = mutex.lock().await; - let pdu_id = handle_incoming_pdu( - &origin, - &event_id, - &body.room_id, - value, - true, - &db, - &pub_key_map, - ) - .await - .map_err(|e| { - warn!("Error while handling incoming send join PDU: {}", e); - Error::BadRequest( + let pdu_id = handle_incoming_pdu(&origin, &event_id, &room_id, value, true, &db, &pub_key_map) + .await + .map_err(|e| { + warn!("Error while handling incoming send join PDU: {}", e); + Error::BadRequest( + ErrorKind::InvalidParam, + "Error while handling incoming PDU.", + ) + })? + .ok_or(Error::BadRequest( ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; + "Could not accept incoming PDU as timeline event.", + ))?; drop(mutex_lock); let state_ids = db.rooms.state_full_ids(shortstatehash)?; @@ -2425,7 +2413,7 @@ pub async fn create_join_event_route( for server in db .rooms - .room_servers(&body.room_id) + .room_servers(&room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != db.globals.server_name()) { @@ -2434,18 +2422,49 @@ pub async fn create_join_event_route( db.flush()?; + Ok(RoomState { + auth_chain: auth_chain_ids + .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + state: state_ids + .iter() + .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + }) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/federation/v1/send_join/<_>/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn create_join_event_v1_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; + + Ok(create_join_event::v1::Response { + room_state: room_state, + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/federation/v2/send_join/<_>/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn create_join_event_v2_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; + Ok(create_join_event::v2::Response { - room_state: RoomState { - auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - state: state_ids - .iter() - .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - }, + room_state: room_state, } .into()) } From 667ffb4239f303808d218ccbb7992054568de9b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 22 Aug 2021 13:00:36 +0200 Subject: [PATCH 0750/1727] fix: correct create event warnings --- src/client_server/membership.rs | 5 ++++- src/database/rooms.rs | 5 ++++- src/server_server.rs | 15 ++++++++++++--- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 222d204..46f4b9f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -822,7 +822,10 @@ pub async fn invite_helper<'a>( serde_json::from_value::>(create_event.content.clone()) .expect("Raw::from_value always works.") .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db.")) + .map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) }) .transpose()?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d3600f1..e83c1ab 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1776,7 +1776,10 @@ impl Rooms { serde_json::from_value::>(create_event.content.clone()) .expect("Raw::from_value always works.") .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db.")) + .map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) }) .transpose()?; diff --git a/src/server_server.rs b/src/server_server.rs index 5299e1f..ea2edce 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1036,7 +1036,10 @@ fn handle_outlier_pdu<'a>( serde_json::from_value::>(create_event.content.clone()) .expect("Raw::from_value always works.") .deserialize() - .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; + .map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; let room_version_id = &create_event_content.room_version; let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); @@ -1323,7 +1326,10 @@ async fn upgrade_outlier_to_timeline_pdu( serde_json::from_value::>(create_event.content.clone()) .expect("Raw::from_value always works.") .deserialize() - .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; + .map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; let room_version_id = &create_event_content.room_version; let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); @@ -2210,7 +2216,10 @@ pub fn create_join_event_template_route( serde_json::from_value::>(create_event.content.clone()) .expect("Raw::from_value always works.") .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db.")) + .map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) }) .transpose()?; From 6d83954c40f4768d4bd2a03fed34345ba3f8b00f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 22 Aug 2021 13:05:56 +0200 Subject: [PATCH 0751/1727] fix: room upgrades --- src/client_server/room.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 2541278..6981afc 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -423,6 +423,8 @@ pub async fn upgrade_room_route( // Create a replacement room let replacement_room = RoomId::new(db.globals.server_name()); + db.rooms + .get_or_create_shortroomid(&replacement_room, &db.globals)?; let mutex_state = Arc::clone( db.globals From b6e755f67ec189a7c36272d7939cbf153536a83c Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 22 Aug 2021 21:05:32 +0000 Subject: [PATCH 0752/1727] Only apply max. optimizations in CI builds. The average german man has a life expectancy of 78.7 years, or 689884.2 hours. Assuming that Timo is 20 years old, he has rougly 514564.2 hours left on planet earth. Also assuming that cross release builds took him 25 minutes before, but 2-2.5x of that with the current release compilation config he wasted roughly an hour waiting for it to complete. If he continued to work on Conduit for 20 more years (or 175320 hours), and makes a release compilation about once per day, this means 7305 hours or 304 days wasted waiting for the rust compiler. By cutting that back down to the original settings, he get's 182 days of his life back. That's about 0.63% of his remaining life. 182 joyful days he can spend with family and loved ones. --- .gitlab-ci.yml | 3 +++ Cargo.toml | 7 +++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b7ea88e..6f6f56f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -27,6 +27,9 @@ variables: - cargohome - target/ key: "build_cache-$TARGET-release" + variables: + CARGO_PROFILE_RELEASE_LTO=true + CARGO_PROFILE_RELEASE_CODEGEN_UNITS=1 before_script: - 'echo "Building for target $TARGET"' - 'mkdir -p cargohome && CARGOHOME="cargohome"' diff --git a/Cargo.toml b/Cargo.toml index d28e0b7..f80763e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -124,16 +124,15 @@ lto = 'thin' incremental = true [profile.release] -lto = true +lto = 'thin' incremental = true -codegen-units = 1 + +codegen-units=32 # If you want to make flamegraphs, enable debug info: # debug = true # For releases also try to max optimizations for dependencies: [profile.release.build-override] opt-level = 3 -codegen-units = 1 [profile.release.package."*"] opt-level = 3 -codegen-units = 1 From 81e056417cd64510dd3111bda9ec7c680167d786 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 24 Aug 2021 19:10:31 +0200 Subject: [PATCH 0753/1727] improvement: better e2ee over fed, faster incoming event handling --- Cargo.lock | 39 ++-- Cargo.toml | 4 +- src/client_server/membership.rs | 39 ++-- src/client_server/sync.rs | 11 +- src/database.rs | 19 +- src/database/rooms.rs | 202 +++++++++++++-------- src/database/sending.rs | 36 +++- src/database/users.rs | 2 +- src/server_server.rs | 311 ++++++++++++++++++-------------- 9 files changed, 407 insertions(+), 256 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 83e21a3..2a7791c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2045,7 +2045,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "assign", "js_int", @@ -2066,7 +2066,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.3" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "bytes", "http", @@ -2082,7 +2082,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.3" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2093,7 +2093,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "ruma-api", "ruma-common", @@ -2107,7 +2107,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.2" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "assign", "bytes", @@ -2127,7 +2127,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "indexmap", "js_int", @@ -2142,7 +2142,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.4" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "indoc", "js_int", @@ -2158,7 +2158,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.4" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2169,7 +2169,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "js_int", "ruma-api", @@ -2184,7 +2184,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "paste", "rand 0.8.4", @@ -2198,7 +2198,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2208,12 +2208,15 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +dependencies = [ + "thiserror", +] [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "js_int", "ruma-api", @@ -2226,7 +2229,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "js_int", "ruma-api", @@ -2241,7 +2244,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "bytes", "form_urlencoded", @@ -2255,7 +2258,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2266,7 +2269,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2283,7 +2286,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" +source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index d28e0b7..2593b4a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,8 +18,8 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { git = "https://github.com/timokoesters/ruma", rev = "995ccea20f5f6d4a8fb22041749ed4de22fa1b6a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/DevinR528/ruma", rev = "2215049b60a1c3358f5a52215adf1e7bb88619a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 46f4b9f..29926e3 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -640,23 +640,40 @@ async fn join_room_by_id_helper( db.rooms.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - state.insert((pdu.kind.clone(), state_key.clone()), pdu.event_id.clone()); + let shortstatekey = + db.rooms + .get_or_create_shortstatekey(&pdu.kind, state_key, &db.globals)?; + state.insert(shortstatekey, pdu.event_id.clone()); } } - state.insert( - ( - pdu.kind.clone(), - pdu.state_key.clone().expect("join event has state key"), - ), - pdu.event_id.clone(), - ); + let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey( + &pdu.kind, + pdu.state_key + .as_ref() + .expect("Pdu is a membership state event"), + &db.globals, + )?; - if state.get(&(EventType::RoomCreate, "".to_owned())).is_none() { + state.insert(incoming_shortstatekey, pdu.event_id.clone()); + + let create_shortstatekey = db + .rooms + .get_shortstatekey(&EventType::RoomCreate, "")? + .expect("Room exists"); + + if state.get(&create_shortstatekey).is_none() { return Err(Error::BadServerResponse("State contained no create event.")); } - db.rooms.force_state(room_id, state, &db)?; + db.rooms.force_state( + room_id, + state + .into_iter() + .map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals)) + .collect::>>()?, + &db, + )?; for result in futures::future::join_all( send_join_response @@ -913,8 +930,8 @@ pub async fn invite_helper<'a>( &room_version, &Arc::new(pdu.clone()), create_prev_event, - &auth_events, None, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), ) .map_err(|e| { error!("{:?}", e); diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index c196b2a..8126047 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -348,7 +348,7 @@ async fn sync_helper( let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let state_events = current_state_ids .iter() - .map(|id| db.rooms.get_pdu(id)) + .map(|(_, id)| db.rooms.get_pdu(id)) .filter_map(|r| r.ok().flatten()) .collect::>(); @@ -393,18 +393,19 @@ async fn sync_helper( let state_events = if joined_since_last_sync { current_state_ids .iter() - .map(|id| db.rooms.get_pdu(id)) + .map(|(_, id)| db.rooms.get_pdu(id)) .filter_map(|r| r.ok().flatten()) .collect::>() } else { current_state_ids - .difference(&since_state_ids) - .filter(|id| { + .iter() + .filter(|(key, id)| since_state_ids.get(key) != Some(id)) + .filter(|(_, id)| { !timeline_pdus .iter() .any(|(_, timeline_pdu)| timeline_pdu.event_id == **id) }) - .map(|id| db.rooms.get_pdu(id)) + .map(|(_, id)| db.rooms.get_pdu(id)) .filter_map(|r| r.ok().flatten()) .collect() }; diff --git a/src/database.rs b/src/database.rs index 3d1324e..a6ac67f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -262,8 +262,8 @@ impl Database { userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, + shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, - shortroomid_roomid: builder.open_tree("shortroomid_roomid")?, roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, @@ -279,8 +279,9 @@ impl Database { auth_chain_cache: Mutex::new(LruCache::new(100_000)), shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), + shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), - stateinfo_cache: Mutex::new(LruCache::new(50)), + stateinfo_cache: Mutex::new(LruCache::new(1000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, @@ -579,7 +580,6 @@ impl Database { for (room_id, _) in db.rooms.roomid_shortstatehash.iter() { let shortroomid = db.globals.next_count()?.to_be_bytes(); db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; - db.rooms.shortroomid_roomid.insert(&shortroomid, &room_id)?; println!("Migration: 8"); } // Update pduids db layout @@ -700,6 +700,19 @@ impl Database { println!("Migration: 8 -> 9 finished"); } + + if db.globals.database_version()? < 10 { + // Add other direction for shortstatekeys + for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() { + db.rooms + .shortstatekey_statekey + .insert(&shortstatekey, &statekey)?; + } + + db.globals.bump_database_version(10)?; + + println!("Migration: 9 -> 10 finished"); + } } let guard = db.read().await; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 9e57c40..8bb32fe 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -23,13 +23,13 @@ use ruma::{ uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use std::{ - collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem::size_of, sync::{Arc, Mutex}, }; use tokio::sync::MutexGuard; -use tracing::{debug, error, warn}; +use tracing::{error, warn}; use super::{abstraction::Tree, admin::AdminCommand, pusher}; @@ -73,8 +73,8 @@ pub struct Rooms { pub(super) shorteventid_shortstatehash: Arc, /// StateKey = EventType + StateKey, ShortStateKey = Count pub(super) statekey_shortstatekey: Arc, + pub(super) shortstatekey_statekey: Arc, - pub(super) shortroomid_roomid: Arc, pub(super) roomid_shortroomid: Arc, pub(super) shorteventid_eventid: Arc, @@ -95,6 +95,7 @@ pub struct Rooms { pub(super) shorteventid_cache: Mutex>, pub(super) eventidshort_cache: Mutex>, pub(super) statekeyshort_cache: Mutex>, + pub(super) shortstatekey_cache: Mutex>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -112,7 +113,7 @@ impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] - pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { + pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { let full_state = self .load_shortstatehash_info(shortstatehash)? .pop() @@ -138,7 +139,7 @@ impl Rooms { .into_iter() .map(|compressed| self.parse_compressed_state_event(compressed)) .filter_map(|r| r.ok()) - .map(|eventid| self.get_pdu(&eventid)) + .map(|(_, eventid)| self.get_pdu(&eventid)) .filter_map(|r| r.ok().flatten()) .map(|pdu| { Ok::<_, Error>(( @@ -176,7 +177,11 @@ impl Rooms { Ok(full_state .into_iter() .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| self.parse_compressed_state_event(compressed).ok())) + .and_then(|compressed| { + self.parse_compressed_state_event(compressed) + .ok() + .map(|(_, id)| id) + })) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -232,6 +237,13 @@ impl Rooms { state_key: Option<&str>, content: &serde_json::Value, ) -> Result>> { + let shortstatehash = + if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + current_shortstatehash + } else { + return Ok(HashMap::new()); + }; + let auth_events = state_res::auth_types_for_event( kind, sender, @@ -239,19 +251,30 @@ impl Rooms { content.clone(), ); - let mut events = StateMap::new(); - for (event_type, state_key) in auth_events { - if let Some(pdu) = self.room_state_get(room_id, &event_type, &state_key)? { - events.insert((event_type, state_key), pdu); - } else { - // This is okay because when creating a new room some events were not created yet - debug!( - "{:?}: Could not find {} {:?} in state", - content, event_type, state_key - ); - } - } - Ok(events) + let mut sauthevents = auth_events + .into_iter() + .filter_map(|(event_type, state_key)| { + self.get_shortstatekey(&event_type, &state_key) + .ok() + .flatten() + .map(|s| (s, (event_type, state_key))) + }) + .collect::>(); + + let full_state = self + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + + Ok(full_state + .into_iter() + .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) + .filter_map(|(shortstatekey, event_id)| { + sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) + }) + .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) + .collect()) } /// Generate a new StateHash. @@ -306,32 +329,19 @@ impl Rooms { /// Force the creation of a new StateHash and insert it into the db. /// /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state, db))] + #[tracing::instrument(skip(self, new_state_ids_compressed, db))] pub fn force_state( &self, room_id: &RoomId, - new_state: HashMap<(EventType, String), EventId>, + new_state_ids_compressed: HashSet, db: &Database, ) -> Result<()> { let previous_shortstatehash = self.current_shortstatehash(&room_id)?; - let new_state_ids_compressed = new_state - .iter() - .filter_map(|((event_type, state_key), event_id)| { - let shortstatekey = self - .get_or_create_shortstatekey(event_type, state_key, &db.globals) - .ok()?; - Some( - self.compress_state_event(shortstatekey, event_id, &db.globals) - .ok()?, - ) - }) - .collect::>(); - let state_hash = self.calculate_hash( - &new_state - .values() - .map(|event_id| event_id.as_bytes()) + &new_state_ids_compressed + .iter() + .map(|bytes| &bytes[..]) .collect::>(), ); @@ -373,10 +383,11 @@ impl Rooms { )?; }; - for event_id in statediffnew - .into_iter() - .filter_map(|new| self.parse_compressed_state_event(new).ok()) - { + for event_id in statediffnew.into_iter().filter_map(|new| { + self.parse_compressed_state_event(new) + .ok() + .map(|(_, id)| id) + }) { if let Some(pdu) = self.get_pdu_json(&event_id)? { if pdu.get("type").and_then(|val| val.as_str()) == Some("m.room.member") { if let Ok(pdu) = serde_json::from_value::( @@ -504,15 +515,20 @@ impl Rooms { Ok(v.try_into().expect("we checked the size above")) } + /// Returns shortstatekey, event id #[tracing::instrument(skip(self, compressed_event))] pub fn parse_compressed_state_event( &self, compressed_event: CompressedStateEvent, - ) -> Result { - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) + ) -> Result<(u64, EventId)> { + Ok(( + utils::u64_from_bytes(&compressed_event[0..size_of::()]) .expect("bytes have right length"), - ) + self.get_eventid_from_short( + utils::u64_from_bytes(&compressed_event[size_of::()..]) + .expect("bytes have right length"), + )?, + )) } /// Creates a new shortstatehash that often is just a diff to an already existing @@ -805,6 +821,8 @@ impl Rooms { let shortstatekey = globals.next_count()?; self.statekey_shortstatekey .insert(&statekey, &shortstatekey.to_be_bytes())?; + self.shortstatekey_statekey + .insert(&shortstatekey.to_be_bytes(), &statekey)?; shortstatekey } }; @@ -833,11 +851,10 @@ impl Rooms { .get(&shorteventid.to_be_bytes())? .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - let event_id = - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?; + let event_id = EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; self.shorteventid_cache .lock() @@ -847,6 +864,48 @@ impl Rooms { Ok(event_id) } + #[tracing::instrument(skip(self))] + pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(EventType, String)> { + if let Some(id) = self + .shortstatekey_cache + .lock() + .unwrap() + .get_mut(&shortstatekey) + { + return Ok(id.clone()); + } + + let bytes = self + .shortstatekey_statekey + .get(&shortstatekey.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; + + let mut parts = bytes.splitn(2, |&b| b == 0xff); + let eventtype_bytes = parts.next().expect("split always returns one entry"); + let statekey_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; + + let event_type = + EventType::try_from(utils::string_from_bytes(&eventtype_bytes).map_err(|_| { + Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; + + let state_key = utils::string_from_bytes(&statekey_bytes).map_err(|_| { + Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") + })?; + + let result = (event_type, state_key); + + self.shortstatekey_cache + .lock() + .unwrap() + .insert(shortstatekey, result.clone()); + + Ok(result) + } + /// Returns the full room state. #[tracing::instrument(skip(self))] pub fn room_state_full( @@ -1106,6 +1165,17 @@ impl Rooms { .collect() } + #[tracing::instrument(skip(self, room_id, event_ids))] + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + for prev in event_ids { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(prev.as_bytes()); + self.referencedevents.insert(&key, &[])?; + } + + Ok(()) + } + /// Replace the leaves of a room. /// /// The provided `event_ids` become the new leaves, this allows a room to have multiple @@ -1202,12 +1272,7 @@ impl Rooms { } // We must keep track of all events that have been referenced. - for prev in &pdu.prev_events { - let mut key = pdu.room_id().as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - + self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; self.replace_pdu_leaves(&pdu.room_id, leaves)?; let mutex_insert = Arc::clone( @@ -1565,35 +1630,22 @@ impl Rooms { /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state, globals))] + #[tracing::instrument(skip(self, state_ids_compressed, globals))] pub fn set_event_state( &self, event_id: &EventId, room_id: &RoomId, - state: &StateMap>, + state_ids_compressed: HashSet, globals: &super::globals::Globals, ) -> Result<()> { let shorteventid = self.get_or_create_shorteventid(&event_id, globals)?; let previous_shortstatehash = self.current_shortstatehash(&room_id)?; - let state_ids_compressed = state - .iter() - .filter_map(|((event_type, state_key), pdu)| { - let shortstatekey = self - .get_or_create_shortstatekey(event_type, state_key, globals) - .ok()?; - Some( - self.compress_state_event(shortstatekey, &pdu.event_id, globals) - .ok()?, - ) - }) - .collect::>(); - let state_hash = self.calculate_hash( - &state - .values() - .map(|pdu| pdu.event_id.as_bytes()) + &state_ids_compressed + .iter() + .map(|s| &s[..]) .collect::>(), ); @@ -1857,8 +1909,8 @@ impl Rooms { &room_version, &Arc::new(pdu.clone()), create_prev_event, - &auth_events, None, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), ) .map_err(|e| { error!("{:?}", e); diff --git a/src/database/sending.rs b/src/database/sending.rs index 7d7a44a..31a1f67 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, fmt::Debug, sync::Arc, @@ -20,14 +20,17 @@ use ruma::{ appservice, federation::{ self, - transactions::edu::{Edu, ReceiptContent, ReceiptData, ReceiptMap}, + transactions::edu::{ + DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap, + }, }, OutgoingRequest, }, + device_id, events::{push_rules, AnySyncEphemeralRoomEvent, EventType}, push, receipt::ReceiptType, - MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, + uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, }; use tokio::{ select, @@ -317,8 +320,19 @@ impl Sending { })?; let mut events = Vec::new(); let mut max_edu_count = since; + let mut device_list_changes = HashSet::new(); + 'outer: for room_id in db.rooms.server_rooms(server) { let room_id = room_id?; + // Look for device list updates in this room + device_list_changes.extend( + db.users + .keys_changed(&room_id.to_string(), since, None) + .filter_map(|r| r.ok()) + .filter(|user_id| user_id.server_name() == db.globals.server_name()), + ); + + // Look for read receipts in this room for r in db.rooms.edus.readreceipts_since(&room_id, since) { let (user_id, count, read_receipt) = r?; @@ -378,6 +392,22 @@ impl Sending { } } + for user_id in device_list_changes { + // Empty prev id forces synapse to resync: https://github.com/matrix-org/synapse/blob/98aec1cc9da2bd6b8e34ffb282c85abf9b8b42ca/synapse/handlers/device.py#L767 + // Because synapse resyncs, we can just insert dummy data + let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { + user_id, + device_id: device_id!("dummy"), + device_display_name: "Dummy".to_owned(), + stream_id: uint!(1), + prev_id: Vec::new(), + deleted: None, + keys: None, + }); + + events.push(serde_json::to_vec(&edu).expect("json can be serialized")); + } + Ok((events, max_edu_count)) } diff --git a/src/database/users.rs b/src/database/users.rs index f501ec3..88d66be 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -673,7 +673,7 @@ impl Users { } #[tracing::instrument(skip(self, user_id, rooms, globals))] - fn mark_device_key_update( + pub fn mark_device_key_update( &self, user_id: &UserId, rooms: &super::rooms::Rooms, diff --git a/src/server_server.rs b/src/server_server.rs index 6b6ed28..e8ea486 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,6 +1,6 @@ use crate::{ client_server::{self, claim_keys_helper, get_keys_helper}, - database::DatabaseGuard, + database::{rooms::CompressedStateEvent, DatabaseGuard}, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, }; use get_profile_information::v1::ProfileField; @@ -27,7 +27,7 @@ use ruma::{ }, query::{get_profile_information, get_room_information}, transactions::{ - edu::{DirectDeviceContent, Edu}, + edu::{DeviceListUpdateContent, DirectDeviceContent, Edu}, send_transaction_message, }, }, @@ -51,7 +51,7 @@ use ruma::{ ServerSigningKeyId, UserId, }; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, fmt::Debug, future::Future, @@ -747,8 +747,9 @@ pub async fn send_transaction_message_route( .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?; } } - Edu::DeviceListUpdate(_) => { - // TODO: Instead of worrying about stream ids we can just fetch all devices again + Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { + db.users + .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; } Edu::DirectToDevice(DirectDeviceContent { sender, @@ -1079,7 +1080,7 @@ fn handle_outlier_pdu<'a>( // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // EDIT: Step 5 is not applied anymore because it failed too often - debug!("Fetching auth events for {}", incoming_pdu.event_id); + warn!("Fetching auth events for {}", incoming_pdu.event_id); fetch_and_handle_outliers( db, origin, @@ -1114,10 +1115,10 @@ fn handle_outlier_pdu<'a>( .clone() .expect("all auth events have state keys"), )) { - Entry::Vacant(v) => { + hash_map::Entry::Vacant(v) => { v.insert(auth_event.clone()); } - Entry::Occupied(_) => { + hash_map::Entry::Occupied(_) => { return Err( "Auth event's type and state_key combination exists multiple times." .to_owned(), @@ -1153,8 +1154,8 @@ fn handle_outlier_pdu<'a>( &room_version, &incoming_pdu, previous_create.clone(), - &auth_events, None, // TODO: third party invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), ) .map_err(|_e| "Auth check failed".to_string())? { @@ -1205,38 +1206,21 @@ async fn upgrade_outlier_to_timeline_pdu( let state = prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); - if let Some(Ok(state)) = state { + if let Some(Ok(mut state)) = state { warn!("Using cached state"); - let mut state = fetch_and_handle_outliers( - db, - origin, - &state.into_iter().collect::>(), - &create_event, - &room_id, - pub_key_map, - ) - .await - .into_iter() - .map(|(pdu, _)| { - ( - ( - pdu.kind.clone(), - pdu.state_key - .clone() - .expect("events from state_full_ids are state events"), - ), - pdu, - ) - }) - .collect::>(); - let prev_pdu = db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { "Could not find prev event, but we know the state.".to_owned() })?; if let Some(state_key) = &prev_pdu.state_key { - state.insert((prev_pdu.kind.clone(), state_key.clone()), prev_pdu); + let shortstatekey = db + .rooms + .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + state.insert(shortstatekey, prev_event.clone()); + // Now it's the state after the pdu } state_at_incoming_event = Some(state); @@ -1261,7 +1245,7 @@ async fn upgrade_outlier_to_timeline_pdu( .await { Ok(res) => { - debug!("Fetching state events at event."); + warn!("Fetching state events at event."); let state_vec = fetch_and_handle_outliers( &db, origin, @@ -1272,18 +1256,23 @@ async fn upgrade_outlier_to_timeline_pdu( ) .await; - let mut state = HashMap::new(); + let mut state = BTreeMap::new(); for (pdu, _) in state_vec { - match state.entry(( - pdu.kind.clone(), - pdu.state_key - .clone() - .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?, - )) { - Entry::Vacant(v) => { - v.insert(pdu); + let state_key = pdu + .state_key + .clone() + .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; + + let shortstatekey = db + .rooms + .get_or_create_shortstatekey(&pdu.kind, &state_key, &db.globals) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + match state.entry(shortstatekey) { + btree_map::Entry::Vacant(v) => { + v.insert(pdu.event_id.clone()); } - Entry::Occupied(_) => return Err( + btree_map::Entry::Occupied(_) => return Err( "State event's type and state_key combination exists multiple times." .to_owned(), ), @@ -1291,28 +1280,20 @@ async fn upgrade_outlier_to_timeline_pdu( } // The original create event must still be in the state - if state - .get(&(EventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()) - != Some(&create_event) - { + let create_shortstatekey = db + .rooms + .get_shortstatekey(&EventType::RoomCreate, "") + .map_err(|_| "Failed to talk to db.")? + .expect("Room exists"); + + if state.get(&create_shortstatekey) != Some(&create_event.event_id) { return Err("Incoming event refers to wrong create event.".to_owned()); } - debug!("Fetching auth chain events at event."); - fetch_and_handle_outliers( - &db, - origin, - &res.auth_chain_ids, - &create_event, - &room_id, - pub_key_map, - ) - .await; - state_at_incoming_event = Some(state); } - Err(_) => { + Err(e) => { + warn!("Fetching state for event failed: {}", e); return Err("Fetching state for event failed".into()); } }; @@ -1350,8 +1331,15 @@ async fn upgrade_outlier_to_timeline_pdu( &room_version, &incoming_pdu, previous_create.clone(), - &state_at_incoming_event, None, // TODO: third party invite + |k, s| { + db.rooms + .get_shortstatekey(&k, &s) + .ok() + .flatten() + .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) + .and_then(|event_id| db.rooms.get_pdu(&event_id).ok().flatten()) + }, ) .map_err(|_e| "Auth check failed.".to_owned())? { @@ -1388,28 +1376,28 @@ async fn upgrade_outlier_to_timeline_pdu( // Only keep those extremities were not referenced yet extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true))); - let current_statehash = db + let current_sstatehash = db .rooms .current_shortstatehash(&room_id) .map_err(|_| "Failed to load current state hash.".to_owned())? .expect("every room has state"); - let current_state = db + let current_state_ids = db .rooms - .state_full(current_statehash) + .state_full_ids(current_sstatehash) .map_err(|_| "Failed to load room state.")?; if incoming_pdu.state_key.is_some() { - let mut extremity_statehashes = Vec::new(); + let mut extremity_sstatehashes = HashMap::new(); - for id in &extremities { + for id in dbg!(&extremities) { match db .rooms .get_pdu(&id) .map_err(|_| "Failed to ask db for pdu.".to_owned())? { Some(leaf_pdu) => { - extremity_statehashes.push(( + extremity_sstatehashes.insert( db.rooms .pdu_shortstatehash(&leaf_pdu.event_id) .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? @@ -1420,8 +1408,8 @@ async fn upgrade_outlier_to_timeline_pdu( ); "Found pdu with no statehash in db.".to_owned() })?, - Some(leaf_pdu), - )); + leaf_pdu, + ); } _ => { error!("Missing state snapshot for {:?}", id); @@ -1430,27 +1418,30 @@ async fn upgrade_outlier_to_timeline_pdu( } } + let mut fork_states = Vec::new(); + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated // by doing state res where one of the inputs was a previously trusted set of state, // don't just trust a set of state we got from a remote). // We do this by adding the current state to the list of fork states + extremity_sstatehashes.remove(¤t_sstatehash); + fork_states.push(current_state_ids); + dbg!(&extremity_sstatehashes); - extremity_statehashes.push((current_statehash.clone(), None)); - - let mut fork_states = Vec::new(); - for (statehash, leaf_pdu) in extremity_statehashes { + for (sstatehash, leaf_pdu) in extremity_sstatehashes { let mut leaf_state = db .rooms - .state_full(statehash) + .state_full_ids(sstatehash) .map_err(|_| "Failed to ask db for room state.".to_owned())?; - if let Some(leaf_pdu) = leaf_pdu { - if let Some(state_key) = &leaf_pdu.state_key { - // Now it's the state after - let key = (leaf_pdu.kind.clone(), state_key.clone()); - leaf_state.insert(key, leaf_pdu); - } + if let Some(state_key) = &leaf_pdu.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + leaf_state.insert(shortstatekey, leaf_pdu.event_id.clone()); + // Now it's the state after the pdu } fork_states.push(leaf_state); @@ -1459,10 +1450,12 @@ async fn upgrade_outlier_to_timeline_pdu( // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); if let Some(state_key) = &incoming_pdu.state_key { - state_after.insert( - (incoming_pdu.kind.clone(), state_key.clone()), - incoming_pdu.clone(), - ); + let shortstatekey = db + .rooms + .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + state_after.insert(shortstatekey, incoming_pdu.event_id.clone()); } fork_states.push(state_after.clone()); @@ -1475,8 +1468,12 @@ async fn upgrade_outlier_to_timeline_pdu( // always included) fork_states[0] .iter() - .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) - .collect() + .map(|(k, id)| { + db.rooms + .compress_state_event(*k, &id, &db.globals) + .map_err(|_| "Failed to compress_state_event.".to_owned()) + }) + .collect::>()? } else { // We do need to force an update to this room's state update_state = true; @@ -1485,10 +1482,11 @@ async fn upgrade_outlier_to_timeline_pdu( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() + .map(|(k, id)| (db.rooms.get_statekey_from_short(k).map(|k| (k, id)))) + .collect::>>() }) - .collect::>(); + .collect::>>() + .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; let mut auth_chain_sets = Vec::new(); for state in fork_states { @@ -1519,6 +1517,17 @@ async fn upgrade_outlier_to_timeline_pdu( }; state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + db.rooms + .compress_state_event(shortstatekey, &event_id, &db.globals) + .map_err(|_| "Failed to compress state event".to_owned()) + }) + .collect::>()? }; // Set the new room state to the resolved state @@ -1534,38 +1543,55 @@ async fn upgrade_outlier_to_timeline_pdu( debug!("starting soft fail auth check"); // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let auth_events = db + .rooms + .get_auth_events( + &room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + ) + .map_err(|_| "Failed to get_auth_events.".to_owned())?; + let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, previous_create, - ¤t_state, None, + |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), ) .map_err(|_e| "Auth check failed.".to_owned())?; - let mut pdu_id = None; - if !soft_fail { - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - pdu_id = Some( - append_incoming_pdu( - &db, - &incoming_pdu, - val, - extremities, - &state_at_incoming_event, - &state_lock, - ) - .map_err(|_| "Failed to add pdu to db.".to_owned())?, - ); - debug!("Appended incoming pdu."); - } else { - warn!("Event was soft failed: {:?}", incoming_pdu); - } + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + + let state_ids_compressed = state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + db.rooms + .compress_state_event(*shortstatekey, &id, &db.globals) + .map_err(|_| "Failed to compress_state_event".to_owned()) + }) + .collect::>()?; + + let pdu_id = append_incoming_pdu( + &db, + &incoming_pdu, + val, + extremities, + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|_| "Failed to add pdu to db.".to_owned())?; + + debug!("Appended incoming pdu."); if soft_fail { - // Soft fail, we leave the event as an outlier but don't add it to the timeline + // Soft fail, we keep the event as an outlier but don't add it to the timeline + warn!("Event was soft failed: {:?}", incoming_pdu); return Err("Event has been soft failed".into()); } @@ -1594,15 +1620,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>( ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { Box::pin(async move { let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { - Entry::Vacant(e) => { + hash_map::Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } - Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; let mut pdus = vec![]; for id in events { - info!("loading {}", id); if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&id) { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); @@ -1627,7 +1652,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } Ok(None) => { // c. Ask origin server over federation - info!("Fetching {} over federation.", id); + warn!("Fetching {} over federation.", id); match db .sending .send_federation_request( @@ -1638,7 +1663,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .await { Ok(res) => { - info!("Got {} over federation", id); + warn!("Got {} over federation", id); let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, @@ -1727,10 +1752,10 @@ pub(crate) async fn fetch_signing_keys( .unwrap() .entry(id) { - Entry::Vacant(e) => { + hash_map::Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } - Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; if let Some((time, tries)) = db @@ -1847,19 +1872,34 @@ pub(crate) async fn fetch_signing_keys( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. -#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state, _mutex_lock))] +#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))] fn append_incoming_pdu( db: &Database, pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: HashSet, - state: &StateMap>, + state_ids_compressed: HashSet, + soft_fail: bool, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex -) -> Result> { +) -> Result>> { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms - .set_event_state(&pdu.event_id, &pdu.room_id, state, &db.globals)?; + db.rooms.set_event_state( + &pdu.event_id, + &pdu.room_id, + state_ids_compressed, + &db.globals, + )?; + + if soft_fail { + db.rooms + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + db.rooms.replace_pdu_leaves( + &pdu.room_id, + &new_room_leaves.into_iter().collect::>(), + )?; + return Ok(None); + } let pdu_id = db.rooms.append_pdu( pdu, @@ -1926,7 +1966,7 @@ fn append_incoming_pdu( } } - Ok(pdu_id) + Ok(Some(pdu_id)) } #[tracing::instrument(skip(starting_events, db))] @@ -2120,7 +2160,7 @@ pub fn get_room_state_route( .rooms .state_full_ids(shortstatehash)? .into_iter() - .map(|id| { + .map(|(_, id)| { PduEvent::convert_to_outgoing_federation_event( db.rooms.get_pdu_json(&id).unwrap().unwrap(), ) @@ -2168,6 +2208,7 @@ pub fn get_room_state_ids_route( .rooms .state_full_ids(shortstatehash)? .into_iter() + .map(|(_, id)| id) .collect(); let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; @@ -2314,8 +2355,8 @@ pub fn create_join_event_template_route( &room_version, &Arc::new(pdu.clone()), create_prev_event, - &auth_events, None, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), ) .map_err(|e| { error!("{:?}", e); @@ -2418,7 +2459,7 @@ async fn create_join_event( drop(mutex_lock); let state_ids = db.rooms.state_full_ids(shortstatehash)?; - let auth_chain_ids = get_auth_chain(state_ids.iter().cloned().collect(), &db)?; + let auth_chain_ids = get_auth_chain(state_ids.iter().map(|(_, id)| id.clone()).collect(), &db)?; for server in db .rooms @@ -2438,7 +2479,7 @@ async fn create_join_event( .collect(), state: state_ids .iter() - .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) + .filter_map(|(_, id)| db.rooms.get_pdu_json(&id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -2455,10 +2496,7 @@ pub async fn create_join_event_v1_route( ) -> ConduitResult { let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; - Ok(create_join_event::v1::Response { - room_state: room_state, - } - .into()) + Ok(create_join_event::v1::Response { room_state }.into()) } #[cfg_attr( @@ -2472,10 +2510,7 @@ pub async fn create_join_event_v2_route( ) -> ConduitResult { let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; - Ok(create_join_event::v2::Response { - room_state: room_state, - } - .into()) + Ok(create_join_event::v2::Response { room_state }.into()) } #[cfg_attr( From dd8706654604c618e3534b740ca871f9e03dd1a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 24 Aug 2021 21:10:01 +0200 Subject: [PATCH 0754/1727] improvement: more efficient auth chain cache --- Cargo.lock | 36 ++++++++++++++++----------------- Cargo.toml | 2 +- src/database/rooms.rs | 6 +++--- src/server_server.rs | 46 +++++++++++++++++++++++++++---------------- 4 files changed, 51 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2a7791c..cf60ae4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2045,7 +2045,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "assign", "js_int", @@ -2066,7 +2066,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.3" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "bytes", "http", @@ -2082,7 +2082,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.3" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2093,7 +2093,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "ruma-api", "ruma-common", @@ -2107,7 +2107,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.2" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "assign", "bytes", @@ -2127,7 +2127,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "indexmap", "js_int", @@ -2142,7 +2142,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.4" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "indoc", "js_int", @@ -2158,7 +2158,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.4" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2169,7 +2169,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "js_int", "ruma-api", @@ -2184,7 +2184,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "paste", "rand 0.8.4", @@ -2198,7 +2198,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2208,7 +2208,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "thiserror", ] @@ -2216,7 +2216,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "js_int", "ruma-api", @@ -2229,7 +2229,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "js_int", "ruma-api", @@ -2244,7 +2244,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "bytes", "form_urlencoded", @@ -2258,7 +2258,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2269,7 +2269,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2286,7 +2286,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 291f51e..0b0dda9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/DevinR528/ruma", rev = "2215049b60a1c3358f5a52215adf1e7bb88619a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/timokoesters/ruma", rev = "2215049b60a1c3358f5a52215adf1e7bb88619a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 8bb32fe..0d99c52 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -19,7 +19,7 @@ use ruma::{ }, push::{self, Action, Tweak}, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, Event, RoomVersion, StateMap}, + state_res::{self, RoomVersion, StateMap}, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use std::{ @@ -91,7 +91,7 @@ pub struct Rooms { pub(super) referencedevents: Arc, pub(super) pdu_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex>>, + pub(super) auth_chain_cache: Mutex, HashSet>>, pub(super) shorteventid_cache: Mutex>, pub(super) eventidshort_cache: Mutex>, pub(super) statekeyshort_cache: Mutex>, @@ -3166,7 +3166,7 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn auth_chain_cache(&self) -> std::sync::MutexGuard<'_, LruCache>> { + pub fn auth_chain_cache(&self) -> std::sync::MutexGuard<'_, LruCache, HashSet>> { self.auth_chain_cache.lock().unwrap() } } diff --git a/src/server_server.rs b/src/server_server.rs index e8ea486..cb89e40 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1976,27 +1976,39 @@ fn get_auth_chain( ) -> Result + '_> { let mut full_auth_chain = HashSet::new(); - let starting_events = starting_events - .iter() - .map(|id| { - db.rooms - .get_or_create_shorteventid(id, &db.globals) - .map(|s| (s, id)) - }) - .collect::>>()?; + const NUM_BUCKETS: usize = 100; + + let mut buckets = vec![HashSet::new(); NUM_BUCKETS]; + + for id in starting_events { + let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; + let bucket_id = (short % NUM_BUCKETS as u64) as usize; + buckets[bucket_id].insert((short, id)); + } let mut cache = db.rooms.auth_chain_cache(); - for (sevent_id, event_id) in starting_events { - if let Some(cached) = cache.get_mut(&sevent_id) { + for chunk in buckets { + let chunk_key = chunk.iter().map(|(short, _)| short).copied().collect(); + if let Some(cached) = cache.get_mut(&chunk_key) { full_auth_chain.extend(cached.iter().cloned()); - } else { - drop(cache); - let auth_chain = get_auth_chain_inner(&event_id, db)?; - cache = db.rooms.auth_chain_cache(); - cache.insert(sevent_id, auth_chain.clone()); - full_auth_chain.extend(auth_chain); - }; + continue; + } + + let mut chunk_cache = HashSet::new(); + for (sevent_id, event_id) in chunk { + if let Some(cached) = cache.get_mut(&[sevent_id][..]) { + chunk_cache.extend(cached.iter().cloned()); + } else { + drop(cache); + let auth_chain = get_auth_chain_inner(&event_id, db)?; + cache = db.rooms.auth_chain_cache(); + cache.insert(vec![sevent_id], auth_chain.clone()); + chunk_cache.extend(auth_chain); + }; + } + cache.insert(chunk_key, chunk_cache.clone()); + full_auth_chain.extend(chunk_cache); } drop(cache); From 5800e9b79738b2d211512d189a871f6bf0c122fc Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Tue, 17 Aug 2021 14:44:53 +0200 Subject: [PATCH 0755/1727] Add Traefik setup, incl. step-by-step, to docker README. --- DEPLOY.md | 10 ++-- README.md | 2 +- docker/README.md | 68 ++++++++++++++++++++-- docker/docker-compose.override.traefik.yml | 23 ++++++++ docker/docker-compose.traefik.yml | 10 ++++ 5 files changed, 102 insertions(+), 11 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 85f3f07..b058418 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -44,7 +44,7 @@ This also allows you to make sure that the file permissions are correctly set up In Debian you can use this command to create a Conduit user: -``` +```bash sudo adduser --system conduit --no-create-home ``` @@ -131,13 +131,13 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on Debian: -``` +```bash sudo chown -R conduit:nogroup /etc/matrix-conduit ``` If you use the default database path you also need to run this: -``` +```bash sudo mkdir -p /var/lib/matrix-conduit/conduit_db sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db ``` @@ -151,7 +151,7 @@ This depends on whether you use Apache, Nginx or another web server. Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this: -``` +```apache Listen 8448 @@ -180,7 +180,7 @@ $ sudo systemctl reload apache2 If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf` -``` +```nginx server { listen 443 ssl http2; listen [::]:443 ssl http2; diff --git a/README.md b/README.md index fde762c..836f9c6 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ Pull and run the docker image with ``` bash docker pull matrixconduit/matrix-conduit:latest -docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest +docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest ``` > Note: You also need to supply a `conduit.toml` config file, you can find an example [here](./conduit-example.toml). diff --git a/docker/README.md b/docker/README.md index 499d1ad..0e83482 100644 --- a/docker/README.md +++ b/docker/README.md @@ -40,18 +40,27 @@ which also will tag the resulting image as `matrixconduit/matrix-conduit:latest` After building the image you can simply run it with ``` bash -docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest +docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest ``` -For detached mode, you also need to use the `-d` flag. You also need to supply a `conduit.toml` config file, you can find an example [here](../conduit-example.toml). +or you can skip the build step and pull the image from one of the following registries: + +| Registry | Image | Size | +| --------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | +| Docker Hub | [matrixconduit/matrix-conduit:latest](https://hub.docker.com/r/matrixconduit/matrix-conduit) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | +| GitLab Registry | [registry.gitlab.com/famedly/conduit/conduit:latest](https://gitlab.com/famedly/conduit/container_registry/2134341) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | + +The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need -too pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. +to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. + If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. ## Docker-compose -If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) including [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. +If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying +Conduit can be found [here](../DEPLOY.md). ### Build @@ -67,8 +76,57 @@ This will also start the container right afterwards, so if want it to run in det ### Run -If you already have built the image, you can just start the container and everything else in the compose file in detached mode with: +If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with: ``` bash docker-compose up -d ``` + +> **Note:** Don't forget to modify and adjust the compose file to your needs. + +### Use Traefik as Proxy + +As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making containerized app and services available through the web. With the +two provided files, [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml), it is +equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is +the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to either expose ports +`443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`. + +With the service `well-known` we use a single `nginx` container that will serve those two files. + +So...step by step: + +1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) from the repository and remove `.traefik` from the filenames. +2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs. +3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. +4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. +5. Create the files needed by the `well-known` service. + - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping) + ```nginx + server { + server_name .; + listen 80 default_server; + + location /.well-known/matrix/ { + root /var/www; + default_type application/json; + add_header Access-Control-Allow-Origin *; + } + } + ``` + - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping) + ```json + { + "m.homeserver": { + "base_url": "https://." + } + } + ``` + - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping) + ```json + { + "m.server": ".:443" + } + ``` +6. Run `docker-compose up -d` +7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin. diff --git a/docker/docker-compose.override.traefik.yml b/docker/docker-compose.override.traefik.yml index 2096d79..5633348 100644 --- a/docker/docker-compose.override.traefik.yml +++ b/docker/docker-compose.override.traefik.yml @@ -10,6 +10,29 @@ services: - "traefik.http.routers.to-conduit.rule=Host(`.`)" # Change to the address on which Conduit is hosted - "traefik.http.routers.to-conduit.tls=true" - "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt" + - "traefik.http.routers.to-conduit.middlewares=cors-headers@docker" + + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOrigin=*" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" + + # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container + # to serve those two as static files. If you want to use a different way, delete or comment the below service, here + # and in the docker-compose file. + well-known: + labels: + - "traefik.enable=true" + - "traefik.docker.network=proxy" + + - "traefik.http.routers.to-matrix-wellknown.rule=Host(`.`) && PathPrefix(`/.well-known/matrix`)" + - "traefik.http.routers.to-matrix-wellknown.tls=true" + - "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt" + - "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker" + + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOrigin=*" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" + ### Uncomment this if you uncommented Element-Web App in the docker-compose.yml # element-web: diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index 58fa3ed..c2c024a 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -41,6 +41,16 @@ services: # CONDUIT_WORKERS: 10 # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container + # to serve those two as static files. If you want to use a different way, delete or comment the below service, here + # and in the docker-compose override file. + well-known: + image: nginx:latest + restart: unless-stopped + volumes: + - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files + - ./nginx/www:/var/www/ # location of the client and server .well-known-files + ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second ### Domain or Subdomain for the communication between Element and Conduit From 9f8c45c7635f839d35e4b07d98d38e9892a50705 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 25 Aug 2021 14:42:46 +0200 Subject: [PATCH 0756/1727] fix: e2ee over federation to device events were not being sent --- src/client_server/to_device.rs | 1 + src/database/sending.rs | 31 ++++++++++++++++++------------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 69147c9..cd770bd 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -56,6 +56,7 @@ pub async fn send_event_to_device_route( }, )) .expect("DirectToDevice EDU can be serialized"), + db.globals.next_count()?, )?; continue; diff --git a/src/database/sending.rs b/src/database/sending.rs index 31a1f67..1050c07 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -84,8 +84,8 @@ pub enum SendingEventType { pub struct Sending { /// The state for a given state hash. pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync - pub(super) servernameevent_data: Arc, // ServernamEvent = (+ / $)SenderKey / ServerName / UserId + PduId / * (for edus), Data = EDU content - pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / * (for edus), Data = EDU content + pub(super) servernameevent_data: Arc, // ServernamEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content + pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content pub(super) maximum_requests: Arc, pub sender: mpsc::UnboundedSender<(Vec, Vec)>, } @@ -435,10 +435,15 @@ impl Sending { } #[tracing::instrument(skip(self, server, serialized))] - pub fn send_reliable_edu(&self, server: &ServerName, serialized: Vec) -> Result<()> { + pub fn send_reliable_edu( + &self, + server: &ServerName, + serialized: Vec, + id: u64, + ) -> Result<()> { let mut key = server.as_bytes().to_vec(); key.push(0xff); - key.push(b'*'); + key.extend_from_slice(&id.to_be_bytes()); self.servernameevent_data.insert(&key, &serialized)?; self.sender.unbounded_send((key, serialized)).unwrap(); @@ -714,10 +719,10 @@ impl Sending { OutgoingKind::Appservice(Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), - if event.starts_with(b"*") { - SendingEventType::Edu(value) - } else { + if value.is_empty() { SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) }, ) } else if key.starts_with(b"$") { @@ -732,10 +737,10 @@ impl Sending { .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; ( OutgoingKind::Push(user.to_vec(), pushkey.to_vec()), - if event.starts_with(b"*") { - SendingEventType::Edu(value) - } else { + if value.is_empty() { SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) }, ) } else { @@ -753,10 +758,10 @@ impl Sending { OutgoingKind::Normal(Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), - if event.starts_with(b"*") { - SendingEventType::Edu(event[1..].to_vec()) - } else { + if value.is_empty() { SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) }, ) }) From 9152b877a7f9e2439d5ac1211505e75b58605ffe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 25 Aug 2021 17:36:10 +0200 Subject: [PATCH 0757/1727] fix: wrong soft fail check, too many events in /sync state response --- src/client_server/sync.rs | 30 +++++------------------------- src/database.rs | 1 + src/database/rooms.rs | 33 +++++++++++++++++++++++++++++++++ src/server_server.rs | 23 ++++++++++++----------- 4 files changed, 51 insertions(+), 36 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 8126047..270a5f0 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -246,31 +246,13 @@ async fn sync_helper( .current_shortstatehash(&room_id)? .expect("All rooms have state"); - let first_pdu_before_since = db - .rooms - .pdus_until(&sender_user, &room_id, since)? - .next() - .transpose()?; - let pdus_after_since = db .rooms .pdus_after(&sender_user, &room_id, since)? .next() .is_some(); - let since_shortstatehash = first_pdu_before_since - .as_ref() - .map(|pdu| { - db.rooms - .pdu_shortstatehash(&pdu.1.event_id) - .transpose() - .ok_or_else(|| { - warn!("PDU without state: {}", pdu.1.event_id); - Error::bad_database("Found PDU without state") - }) - }) - .transpose()? - .transpose()?; + let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?; // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { @@ -359,7 +341,7 @@ async fn sync_helper( true, state_events, ) - } else if !pdus_after_since || since_shortstatehash == Some(current_shortstatehash) { + } else if !pdus_after_since && since_shortstatehash == Some(current_shortstatehash) { // No state changes (Vec::new(), None, None, false, Vec::new()) } else { @@ -400,11 +382,6 @@ async fn sync_helper( current_state_ids .iter() .filter(|(key, id)| since_state_ids.get(key) != Some(id)) - .filter(|(_, id)| { - !timeline_pdus - .iter() - .any(|(_, timeline_pdu)| timeline_pdu.event_id == **id) - }) .map(|(_, id)| db.rooms.get_pdu(id)) .filter_map(|r| r.ok().flatten()) .collect() @@ -585,6 +562,9 @@ async fn sync_helper( ); } + // Save the state after this sync so we can send the correct state diff next sync + db.rooms.associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; + let joined_room = sync_events::JoinedRoom { account_data: sync_events::RoomAccountData { events: db diff --git a/src/database.rs b/src/database.rs index a6ac67f..193fcf2 100644 --- a/src/database.rs +++ b/src/database.rs @@ -271,6 +271,7 @@ impl Database { shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, + roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?, statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0d99c52..bb27e01 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -69,6 +69,7 @@ pub struct Rooms { /// Remember the current state hash of a room. pub(super) roomid_shortstatehash: Arc, + pub(super) roomsynctoken_shortstatehash: Arc, /// Remember the state hash at events in the past. pub(super) shorteventid_shortstatehash: Arc, /// StateKey = EventType + StateKey, ShortStateKey = Count @@ -1800,6 +1801,38 @@ impl Rooms { Ok(()) } + pub fn associate_token_shortstatehash( + &self, + room_id: &RoomId, + token: u64, + shortstatehash: u64, + ) -> Result<()> { + let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); + + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(&token.to_be_bytes()); + + self.roomsynctoken_shortstatehash + .insert(&key, &shortstatehash.to_be_bytes()) + } + + pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { + let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); + + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(&token.to_be_bytes()); + + Ok(self + .roomsynctoken_shortstatehash + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") + }) + }) + .transpose()?) + } + /// Creates a new persisted data unit and adds it to a room. #[tracing::instrument(skip(self, db, _mutex_lock))] pub fn build_and_append_pdu( diff --git a/src/server_server.rs b/src/server_server.rs index cb89e40..526ed51 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1387,6 +1387,17 @@ async fn upgrade_outlier_to_timeline_pdu( .state_full_ids(current_sstatehash) .map_err(|_| "Failed to load room state.")?; + let auth_events = db + .rooms + .get_auth_events( + &room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + ) + .map_err(|_| "Failed to get_auth_events.".to_owned())?; + if incoming_pdu.state_key.is_some() { let mut extremity_sstatehashes = HashMap::new(); @@ -1541,18 +1552,8 @@ async fn upgrade_outlier_to_timeline_pdu( extremities.insert(incoming_pdu.event_id.clone()); - debug!("starting soft fail auth check"); // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - let auth_events = db - .rooms - .get_auth_events( - &room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - ) - .map_err(|_| "Failed to get_auth_events.".to_owned())?; + debug!("starting soft fail auth check"); let soft_fail = !state_res::event_auth::auth_check( &room_version, From bef4fe50ce61d3533eb9f8ef77905473f71f820f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 25 Aug 2021 17:40:10 +0200 Subject: [PATCH 0758/1727] fix: better migration, force e2ee device key updates --- src/client_server/sync.rs | 3 ++- src/database.rs | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 270a5f0..7ce3b5b 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -563,7 +563,8 @@ async fn sync_helper( } // Save the state after this sync so we can send the correct state diff next sync - db.rooms.associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; + db.rooms + .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; let joined_room = sync_events::JoinedRoom { account_data: sync_events::RoomAccountData { diff --git a/src/database.rs b/src/database.rs index 193fcf2..a183f24 100644 --- a/src/database.rs +++ b/src/database.rs @@ -710,6 +710,12 @@ impl Database { .insert(&shortstatekey, &statekey)?; } + // Force E2EE device list updates so we can send them over federation + for user_id in db.users.iter().filter_map(|r| r.ok()) { + db.users + .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; + } + db.globals.bump_database_version(10)?; println!("Migration: 9 -> 10 finished"); From 41d07be97b9cec1948b1e2b2a78dc9e0d28f0ac7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 26 Aug 2021 14:18:19 +0200 Subject: [PATCH 0759/1727] improvement: persist cached auth chains in db --- src/client_server/sync.rs | 2 +- src/database.rs | 4 +- src/database/rooms.rs | 65 ++++++++++++++++++++++++++++-- src/server_server.rs | 83 +++++++++++++++++++++++++++++---------- 4 files changed, 128 insertions(+), 26 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 7ce3b5b..21a9ef2 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -12,7 +12,7 @@ use std::{ time::Duration, }; use tokio::sync::watch::Sender; -use tracing::{error, warn}; +use tracing::error; #[cfg(feature = "conduit_bin")] use rocket::{get, tokio}; diff --git a/src/database.rs b/src/database.rs index a183f24..8fd745b 100644 --- a/src/database.rs +++ b/src/database.rs @@ -264,6 +264,8 @@ impl Database { statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, + shorteventid_authchain: builder.open_tree("shorteventid_authchain")?, + roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, @@ -277,7 +279,7 @@ impl Database { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, referencedevents: builder.open_tree("referencedevents")?, pdu_cache: Mutex::new(LruCache::new(100_000)), - auth_chain_cache: Mutex::new(LruCache::new(100_000)), + auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index bb27e01..e0ffded 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -84,6 +84,8 @@ pub struct Rooms { pub(super) statehash_shortstatehash: Arc, pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) + pub(super) shorteventid_authchain: Arc, + /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. pub(super) eventid_outlierpdu: Arc, @@ -92,7 +94,7 @@ pub struct Rooms { pub(super) referencedevents: Arc, pub(super) pdu_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, HashSet>>, + pub(super) auth_chain_cache: Mutex, Arc>>>, pub(super) shorteventid_cache: Mutex>, pub(super) eventidshort_cache: Mutex>, pub(super) statekeyshort_cache: Mutex>, @@ -3199,7 +3201,64 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn auth_chain_cache(&self) -> std::sync::MutexGuard<'_, LruCache, HashSet>> { - self.auth_chain_cache.lock().unwrap() + pub fn get_auth_chain_from_cache<'a>( + &'a self, + key: &[u64], + ) -> Result>>> { + // Check RAM cache + if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { + return Ok(Some(Arc::clone(result))); + } + + // Check DB cache + if key.len() == 1 { + if let Some(chain) = + self.shorteventid_authchain + .get(&key[0].to_be_bytes())? + .map(|chain| { + chain + .chunks_exact(size_of::()) + .map(|chunk| { + utils::u64_from_bytes(chunk).expect("byte length is correct") + }) + .collect() + }) + { + let chain = Arc::new(chain); + + // Cache in RAM + self.auth_chain_cache + .lock() + .unwrap() + .insert(vec![key[0]], Arc::clone(&chain)); + + return Ok(Some(chain)); + } + } + + Ok(None) + } + + #[tracing::instrument(skip(self))] + pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { + // Persist in db + if key.len() == 1 { + self.shorteventid_authchain.insert( + &key[0].to_be_bytes(), + &chain + .iter() + .map(|s| s.to_be_bytes().to_vec()) + .flatten() + .collect::>(), + )?; + } + + // Cache in RAM + self.auth_chain_cache + .lock() + .unwrap() + .insert(key.clone(), chain); + + Ok(()) } } diff --git a/src/server_server.rs b/src/server_server.rs index 526ed51..65fd4a8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -51,7 +51,7 @@ use ruma::{ ServerSigningKeyId, UserId, }; use std::{ - collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, + collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, convert::{TryFrom, TryInto}, fmt::Debug, future::Future, @@ -1975,44 +1975,85 @@ fn get_auth_chain( starting_events: Vec, db: &Database, ) -> Result + '_> { - let mut full_auth_chain = HashSet::new(); + const NUM_BUCKETS: usize = 50; - const NUM_BUCKETS: usize = 100; - - let mut buckets = vec![HashSet::new(); NUM_BUCKETS]; + let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; for id in starting_events { - let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; - let bucket_id = (short % NUM_BUCKETS as u64) as usize; - buckets[bucket_id].insert((short, id)); + if let Some(pdu) = db.rooms.get_pdu(&id)? { + for auth_event in &pdu.auth_events { + let short = db + .rooms + .get_or_create_shorteventid(&auth_event, &db.globals)?; + let bucket_id = (short % NUM_BUCKETS as u64) as usize; + buckets[bucket_id].insert((short, auth_event.clone())); + } + } } - let mut cache = db.rooms.auth_chain_cache(); + let mut full_auth_chain = HashSet::new(); + let mut hits = 0; + let mut misses = 0; for chunk in buckets { - let chunk_key = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = cache.get_mut(&chunk_key) { - full_auth_chain.extend(cached.iter().cloned()); + if chunk.is_empty() { continue; } + // The code below will only get the auth chains, not the events in the chunk. So let's add + // them first + full_auth_chain.extend(chunk.iter().map(|(id, _)| id)); + + let chunk_key = chunk + .iter() + .map(|(short, _)| short) + .copied() + .collect::>(); + if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { + hits += 1; + full_auth_chain.extend(cached.iter().cloned()); + continue; + } + misses += 1; + let mut chunk_cache = HashSet::new(); + let mut hits2 = 0; + let mut misses2 = 0; for (sevent_id, event_id) in chunk { - if let Some(cached) = cache.get_mut(&[sevent_id][..]) { + if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { + hits2 += 1; chunk_cache.extend(cached.iter().cloned()); } else { - drop(cache); - let auth_chain = get_auth_chain_inner(&event_id, db)?; - cache = db.rooms.auth_chain_cache(); - cache.insert(vec![sevent_id], auth_chain.clone()); - chunk_cache.extend(auth_chain); + misses2 += 1; + let auth_chain = Arc::new(get_auth_chain_inner(&event_id, db)?); + db.rooms + .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; + println!( + "cache missed event {} with auth chain len {}", + event_id, + auth_chain.len() + ); + chunk_cache.extend(auth_chain.iter()); }; } - cache.insert(chunk_key, chunk_cache.clone()); - full_auth_chain.extend(chunk_cache); + println!( + "chunk missed with len {}, event hits2: {}, misses2: {}", + chunk_cache.len(), + hits2, + misses2 + ); + let chunk_cache = Arc::new(chunk_cache); + db.rooms + .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + full_auth_chain.extend(chunk_cache.iter()); } - drop(cache); + println!( + "total: {}, chunk hits: {}, misses: {}", + full_auth_chain.len(), + hits, + misses + ); Ok(full_auth_chain .into_iter() From d485eb5a241e5f50d4f87cdbe7b724d7f1336148 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 26 Aug 2021 14:38:25 +0200 Subject: [PATCH 0760/1727] chore: bump dependencies --- Cargo.lock | 333 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 3 +- 2 files changed, 167 insertions(+), 169 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf60ae4..0006b1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,9 +146,9 @@ dependencies = [ [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "blake2b_simd" @@ -190,9 +190,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" @@ -295,12 +295,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "cookie" version = "0.15.1" @@ -372,9 +366,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", @@ -435,9 +429,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "639891fde0dbea823fc3d798a0fdf9d2f9440a42d64a78ab3488b0ca025117b3" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", "digest", @@ -464,26 +458,13 @@ dependencies = [ [[package]] name = "der" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f215f706081a44cb702c71c39a52c05da637822e9c1645a50b7202689e982d" +checksum = "31e21d2d0f22cde6e88694108429775c0219760a07779bf96503b434a03d7412" dependencies = [ "const-oid", ] -[[package]] -name = "derive_more" -version = "0.99.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" -dependencies = [ - "convert_case", - "proc-macro2", - "quote", - "rustc_version 0.3.3", - "syn", -] - [[package]] name = "devise" version = "0.3.1" @@ -839,9 +820,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" +checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" dependencies = [ "bytes", "fnv", @@ -952,9 +933,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" +checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" dependencies = [ "bytes", "http", @@ -963,9 +944,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" +checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" @@ -984,9 +965,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.11" +version = "0.14.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b61cf2d1aebcf6e6352c97b81dc2244ca29194be1b276f5d8ad5c6330fffb11" +checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd" dependencies = [ "bytes", "futures-channel", @@ -1044,7 +1025,7 @@ dependencies = [ "gif", "jpeg-decoder", "num-iter", - "num-rational", + "num-rational 0.3.2", "num-traits", "png", ] @@ -1128,15 +1109,15 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "jobserver" -version = "0.1.22" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd" +checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" dependencies = [ "libc", ] @@ -1149,9 +1130,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.51" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" +checksum = "e4bf49d50e2961077d9c99f4b7997d770a1114f087c3c2e0069b36c13fc2979d" dependencies = [ "wasm-bindgen", ] @@ -1187,9 +1168,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.98" +version = "0.2.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" +checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" [[package]] name = "libsqlite3-sys" @@ -1282,15 +1263,15 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "memchr" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" @@ -1340,12 +1321,11 @@ dependencies = [ [[package]] name = "multer" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdd568fea4758b30d6423f013f7171e193c34aa97828d1bd9f924fb3af30a8c" +checksum = "408327e2999b839cd1af003fc01b2019a6c10a1361769542203f6fedc5179680" dependencies = [ "bytes", - "derive_more", "encoding_rs", "futures-util", "http", @@ -1368,6 +1348,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "num" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +dependencies = [ + "num-bigint 0.4.0", + "num-complex", + "num-integer", + "num-iter", + "num-rational 0.4.0", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.2.6" @@ -1379,6 +1373,26 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e0d047c1062aa51e256408c560894e5251f08925980e53cf1aa5bd00eec6512" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" +dependencies = [ + "num-traits", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -1411,6 +1425,18 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-rational" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +dependencies = [ + "autocfg", + "num-bigint 0.4.0", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.14" @@ -1598,15 +1624,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" -[[package]] -name = "pest" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" -dependencies = [ - "ucd-trie", -] - [[package]] name = "pin-project" version = "1.0.8" @@ -1826,9 +1843,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ "bitflags", ] @@ -2044,8 +2061,9 @@ dependencies = [ [[package]] name = "ruma" -version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668031e3108d6a2cfbe6eca271d8698f4593440e71a44afdadcf67ce3cb93c1f" dependencies = [ "assign", "js_int", @@ -2066,7 +2084,8 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.3" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5f1843792b6749ec1ece62595cf99ad30bf9589c96bb237515235e71da396ea" dependencies = [ "bytes", "http", @@ -2082,7 +2101,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.3" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18abda5cca94178d08b622bca042e1cbb5eb7d4ebf3a2a81590a3bb3c57008" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2093,7 +2113,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49369332a5f299e832e19661f92d49e08c345c3c6c4ab16e09cb31c5ff6da878" dependencies = [ "ruma-api", "ruma-common", @@ -2107,7 +2128,8 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.2" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9568a222c12cf6220e751484ab78feec28071f85965113a5bb802936a2920ff0" dependencies = [ "assign", "bytes", @@ -2127,7 +2149,8 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d5b7605f58dc0d9cf1848cc7f1af2bae4e4bcd1d2b7a87bbb9864c8a785b91" dependencies = [ "indexmap", "js_int", @@ -2141,8 +2164,9 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.24.4" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +version = "0.24.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87801e1207cfebdee02e7997ebf181a1c9837260b78c1b8ce96b896a2bcb3763" dependencies = [ "indoc", "js_int", @@ -2157,8 +2181,9 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.24.4" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +version = "0.24.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5da4498845347de88adf1b7da4578e2ca7355ad4ce47b0976f6594bacf958660" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2169,7 +2194,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61c9adbe1a29c301ae627604406d60102c89fc833b110cd35bbf29ae205ea6c" dependencies = [ "js_int", "ruma-api", @@ -2184,7 +2210,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb417d091e8dd5a633e4e5998231a156049d7fcc221045cfdc0642eb72067732" dependencies = [ "paste", "rand 0.8.4", @@ -2198,7 +2225,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c708edad7f605638f26c951cbad7501fbf28ab01009e5ca65ea5a2db74a882b1" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2208,15 +2236,14 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" -dependencies = [ - "thiserror", -] +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42285e7fb5d5f2d5268e45bb683e36d5c6fd9fc1e11a4559ba3c3521f3bbb2cb" [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e76e66e24f2d5a31511fbf6c79e79f67a7a6a98ebf48d72381b7d5bb6c09f035" dependencies = [ "js_int", "ruma-api", @@ -2229,7 +2256,8 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ef5b29da7065efc5b1e1a8f61add7543c9ab4ecce5ee0dd1c1c5ecec83fbeec" dependencies = [ "js_int", "ruma-api", @@ -2244,7 +2272,8 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2b22aae842e7ecda695e42b7b39d4558959d9d9a27acc2a16acf4f4f7f00c3" dependencies = [ "bytes", "form_urlencoded", @@ -2258,7 +2287,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "243e9bef188b08f94c79bc2f8fd1eb307a9e636b2b8e4571acf8c7be16381d28" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2269,7 +2299,8 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a4f64027165b59500162d10d435b1253898bf3ad4f5002cb0d56913fe7f76d7" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2285,8 +2316,9 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "796427aaa2d266238c5c1b1a6ca4640a4d282ec2cb2e844c69a8f3a262d3db15" dependencies = [ "itertools 0.10.1", "js_int", @@ -2334,16 +2366,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" dependencies = [ - "semver 0.9.0", -] - -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", + "semver", ] [[package]] @@ -2417,22 +2440,23 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" +checksum = "5b9bd29cdffb8875b04f71c51058f940cf4e390bbfd2ce669c4f22cd70b492a5" dependencies = [ "bitflags", "core-foundation", "core-foundation-sys", "libc", + "num", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4effb91b4b8b6fb7732e670b6cee160278ff8e6bf485c7805d9e319d76e284" +checksum = "19133a286e494cc3311c165c4676ccb1fd47bed45b55f9d71fbd784ad4cea6f8" dependencies = [ "core-foundation-sys", "libc", @@ -2444,16 +2468,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser 0.7.0", -] - -[[package]] -name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser 0.10.2", + "semver-parser", ] [[package]] @@ -2462,29 +2477,20 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "serde" -version = "1.0.126" +version = "1.0.129" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +checksum = "d1f72836d2aa753853178eda473a3b9d8e4eefdaf20523b919677e6de489f8f1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.126" +version = "1.0.129" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +checksum = "e57ae87ad533d9a56427558b516d0adac283614e347abf85b0dc0cbbf0a249f3" dependencies = [ "proc-macro2", "quote", @@ -2516,12 +2522,12 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.17" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" +checksum = "6375dbd828ed6964c3748e4ef6d18e7a175d408ffe184bca01698d0c73f915a9" dependencies = [ "dtoa", - "linked-hash-map", + "indexmap", "serde", "yaml-rust", ] @@ -2547,9 +2553,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +checksum = "740223c51853f3145fe7c90360d2d4232f2b62e3449489c207eccde818979982" dependencies = [ "lazy_static", ] @@ -2576,15 +2582,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" dependencies = [ "chrono", - "num-bigint", + "num-bigint 0.2.6", "num-traits", ] [[package]] name = "slab" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" +checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" [[package]] name = "sled" @@ -2685,7 +2691,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" dependencies = [ "discard", - "rustc_version 0.2.3", + "rustc_version", "stdweb-derive", "stdweb-internal-macros", "stdweb-internal-runtime", @@ -2735,9 +2741,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.74" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c" +checksum = "b7f58f7e8eaa0009c5fec437aabf511bd9933e4b2d7407bd05273c01a8906ea7" dependencies = [ "proc-macro2", "quote", @@ -2904,9 +2910,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.9.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7b349f11a7047e6d1276853e612d152f5e8a352c61917887cc2169e2366b4c" +checksum = "92036be488bb6594459f2e03b60e42df6f937fe6ca5c5ffdcb539c6b84dc40f5" dependencies = [ "autocfg", "bytes", @@ -3020,9 +3026,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" +checksum = "2ca517f43f0fb96e0c3072ed5c275fe5eece87e8cb52f4a77b69226d3b1c9df8" dependencies = [ "lazy_static", ] @@ -3074,9 +3080,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab69019741fca4d98be3c62d2b75254528b5432233fd8a4d2739fec20278de48" +checksum = "b9cbe87a2fa7e35900ce5de20220a582a9483a7063811defce79d7cbd59d4cfe" dependencies = [ "ansi_term", "chrono", @@ -3170,12 +3176,6 @@ dependencies = [ "serde", ] -[[package]] -name = "ucd-trie" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" - [[package]] name = "uncased" version = "0.9.6" @@ -3194,12 +3194,9 @@ checksum = "eeba86d422ce181a719445e51872fa30f1f7413b62becb52e95ec91aa262d85c" [[package]] name = "unicode-bidi" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" -dependencies = [ - "matches", -] +checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" [[package]] name = "unicode-normalization" @@ -3282,9 +3279,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" +checksum = "8ce9b1b516211d33767048e5d47fa2a381ed8b76fc48d2ce4aa39877f9f183e0" dependencies = [ "cfg-if 1.0.0", "serde", @@ -3294,9 +3291,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" +checksum = "cfe8dc78e2326ba5f845f4b5bf548401604fa20b1dd1d365fb73b6c1d6364041" dependencies = [ "bumpalo", "lazy_static", @@ -3309,9 +3306,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" +checksum = "95fded345a6559c2cfee778d562300c581f7d4ff3edb9b0d230d69800d213972" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3321,9 +3318,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" +checksum = "44468aa53335841d9d6b6c023eaab07c0cd4bddbcfdee3e2bb1e8d2cb8069fef" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3331,9 +3328,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" +checksum = "0195807922713af1e67dc66132c7328206ed9766af3858164fb583eedc25fbad" dependencies = [ "proc-macro2", "quote", @@ -3344,15 +3341,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" +checksum = "acdb075a845574a1fa5f09fd77e43f7747599301ea3417a9fbffdeedfc1f4a29" [[package]] name = "web-sys" -version = "0.3.51" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" +checksum = "224b2f6b67919060055ef1a67807367c2066ed520c3862cc013d26cf893a783c" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 0b0dda9..0290957 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,8 +18,9 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers +ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/timokoesters/ruma", rev = "2215049b60a1c3358f5a52215adf1e7bb88619a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/timokoesters/ruma", rev = "2215049b60a1c3358f5a52215adf1e7bb88619a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio From 33172a70e6683248feae7a79398c1391d58ef2a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 26 Aug 2021 18:59:10 +0200 Subject: [PATCH 0761/1727] fix: improve key fetching --- src/client_server/keys.rs | 59 +++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 8db7688..f9895f9 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -1,5 +1,6 @@ use super::SESSION_ID_LENGTH; use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma}; +use rocket::futures::{prelude::*, stream::FuturesUnordered}; use ruma::{ api::{ client::{ @@ -18,7 +19,10 @@ use ruma::{ DeviceId, DeviceKeyAlgorithm, UserId, }; use serde_json::json; -use std::collections::{BTreeMap, HashSet}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + time::{Duration, Instant}, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -294,7 +298,7 @@ pub async fn get_keys_helper bool>( let mut user_signing_keys = BTreeMap::new(); let mut device_keys = BTreeMap::new(); - let mut get_over_federation = BTreeMap::new(); + let mut get_over_federation = HashMap::new(); for (user_id, device_ids) in device_keys_input { if user_id.server_name() != db.globals.server_name() { @@ -364,22 +368,30 @@ pub async fn get_keys_helper bool>( let mut failures = BTreeMap::new(); - for (server, vec) in get_over_federation { - let mut device_keys_input_fed = BTreeMap::new(); - for (user_id, keys) in vec { - device_keys_input_fed.insert(user_id.clone(), keys.clone()); - } - match db - .sending - .send_federation_request( - &db.globals, + let mut futures = get_over_federation + .into_iter() + .map(|(server, vec)| async move { + let mut device_keys_input_fed = BTreeMap::new(); + for (user_id, keys) in vec { + device_keys_input_fed.insert(user_id.clone(), keys.clone()); + } + ( server, - federation::keys::get_keys::v1::Request { - device_keys: device_keys_input_fed, - }, + db.sending + .send_federation_request( + &db.globals, + server, + federation::keys::get_keys::v1::Request { + device_keys: device_keys_input_fed, + }, + ) + .await, ) - .await - { + }) + .collect::>(); + + while let Some((server, response)) = futures.next().await { + match response { Ok(response) => { master_keys.extend(response.master_keys); self_signing_keys.extend(response.self_signing_keys); @@ -430,13 +442,15 @@ pub async fn claim_keys_helper( one_time_keys.insert(user_id.clone(), container); } + let mut failures = BTreeMap::new(); + for (server, vec) in get_over_federation { let mut one_time_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); } // Ignore failures - let keys = db + if let Ok(keys) = db .sending .send_federation_request( &db.globals, @@ -445,13 +459,16 @@ pub async fn claim_keys_helper( one_time_keys: one_time_keys_input_fed, }, ) - .await?; - - one_time_keys.extend(keys.one_time_keys); + .await + { + one_time_keys.extend(keys.one_time_keys); + } else { + failures.insert(server.to_string(), json!({})); + } } Ok(claim_keys::Response { - failures: BTreeMap::new(), + failures, one_time_keys, }) } From a1e8a99db57a1db11fdec59cad9d77cbdeed04b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 26 Aug 2021 18:59:38 +0200 Subject: [PATCH 0762/1727] improvement: less IO for auth chains --- src/server_server.rs | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 65fd4a8..7794124 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1980,15 +1980,9 @@ fn get_auth_chain( let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; for id in starting_events { - if let Some(pdu) = db.rooms.get_pdu(&id)? { - for auth_event in &pdu.auth_events { - let short = db - .rooms - .get_or_create_shorteventid(&auth_event, &db.globals)?; - let bucket_id = (short % NUM_BUCKETS as u64) as usize; - buckets[bucket_id].insert((short, auth_event.clone())); - } - } + let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; + let bucket_id = (short % NUM_BUCKETS as u64) as usize; + buckets[bucket_id].insert((short, id.clone())); } let mut full_auth_chain = HashSet::new(); @@ -2000,10 +1994,6 @@ fn get_auth_chain( continue; } - // The code below will only get the auth chains, not the events in the chunk. So let's add - // them first - full_auth_chain.extend(chunk.iter().map(|(id, _)| id)); - let chunk_key = chunk .iter() .map(|(short, _)| short) From 0330d3e2700151e59e3f9b2d2ed97fa8235a595a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 26 Aug 2021 19:00:08 +0200 Subject: [PATCH 0763/1727] fix: server resolution with well-known files --- src/server_server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 7794124..b58a0d1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -343,9 +343,9 @@ async fn find_actual_destination( match get_ip_with_port(&delegated_hostname) { Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file None => { - if let Some(pos) = destination_str.find(':') { + if let Some(pos) = delegated_hostname.find(':') { // 3.2: Hostname with port in .well-known file - let (host, port) = destination_str.split_at(pos); + let (host, port) = delegated_hostname.split_at(pos); FedDest::Named(host.to_string(), port.to_string()) } else { match query_srv_record(globals, &delegated_hostname).await { From 19b89ab91f4305521bdeffa42dee24f843d6c7fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 26 Aug 2021 23:11:13 +0200 Subject: [PATCH 0764/1727] fix: server resolution --- src/appservice_server.rs | 6 +- src/client_server/keys.rs | 5 +- src/database/globals.rs | 76 ++++---------------- src/database/pusher.rs | 6 +- src/database/rooms.rs | 26 +++++-- src/server_server.rs | 148 ++++++++++++++++++++++++++++---------- 6 files changed, 156 insertions(+), 111 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 7868e45..9fc7dce 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -46,7 +46,11 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = globals.reqwest_client().execute(reqwest_request).await?; + let mut response = globals + .reqwest_client()? + .build()? + .execute(reqwest_request) + .await?; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index f9895f9..0815737 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -19,10 +19,7 @@ use ruma::{ DeviceId, DeviceKeyAlgorithm, UserId, }; use serde_json::json; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - time::{Duration, Instant}, -}; +use std::collections::{BTreeMap, HashMap, HashSet}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; diff --git a/src/database/globals.rs b/src/database/globals.rs index 823ce34..6d11f49 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,4 +1,4 @@ -use crate::{database::Config, utils, ConduitResult, Error, Result}; +use crate::{database::Config, server_server::FedDest, utils, ConduitResult, Error, Result}; use ruma::{ api::{ client::r0::sync::sync_events, @@ -6,25 +6,25 @@ use ruma::{ }, DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId, }; -use rustls::{ServerCertVerifier, WebPKIVerifier}; use std::{ collections::{BTreeMap, HashMap}, fs, future::Future, + net::IpAddr, path::PathBuf, sync::{Arc, Mutex, RwLock}, time::{Duration, Instant}, }; use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; -use tracing::{error, info}; +use tracing::error; use trust_dns_resolver::TokioAsyncResolver; use super::abstraction::Tree; pub const COUNTER: &[u8] = b"c"; -type WellKnownMap = HashMap, (String, String)>; -type TlsNameMap = HashMap; +type WellKnownMap = HashMap, (FedDest, String)>; +type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type SyncHandle = ( Option, // since @@ -37,7 +37,6 @@ pub struct Globals { pub(super) globals: Arc, config: Config, keypair: Arc, - reqwest_client: reqwest::Client, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, pub(super) server_signingkeys: Arc, @@ -51,40 +50,6 @@ pub struct Globals { pub rotate: RotationHandler, } -struct MatrixServerVerifier { - inner: WebPKIVerifier, - tls_name_override: Arc>, -} - -impl ServerCertVerifier for MatrixServerVerifier { - #[tracing::instrument(skip(self, roots, presented_certs, dns_name, ocsp_response))] - fn verify_server_cert( - &self, - roots: &rustls::RootCertStore, - presented_certs: &[rustls::Certificate], - dns_name: webpki::DNSNameRef<'_>, - ocsp_response: &[u8], - ) -> std::result::Result { - if let Some(override_name) = self.tls_name_override.read().unwrap().get(dns_name.into()) { - let result = self.inner.verify_server_cert( - roots, - presented_certs, - override_name.as_ref(), - ocsp_response, - ); - if result.is_ok() { - return result; - } - info!( - "Server {:?} is non-compliant, retrying TLS verification with original name", - dns_name - ); - } - self.inner - .verify_server_cert(roots, presented_certs, dns_name, ocsp_response) - } -} - /// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. /// /// This is utilized to have sync workers return early and release read locks on the database. @@ -162,24 +127,6 @@ impl Globals { }; let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new())); - let verifier = Arc::new(MatrixServerVerifier { - inner: WebPKIVerifier::new(), - tls_name_override: tls_name_override.clone(), - }); - let mut tlsconfig = rustls::ClientConfig::new(); - tlsconfig.dangerous().set_certificate_verifier(verifier); - tlsconfig.root_store = - rustls_native_certs::load_native_certs().expect("Error loading system certificates"); - - let mut reqwest_client_builder = reqwest::Client::builder() - .connect_timeout(Duration::from_secs(30)) - .timeout(Duration::from_secs(60 * 3)) - .pool_max_idle_per_host(1) - .use_preconfigured_tls(tlsconfig); - if let Some(proxy) = config.proxy.to_proxy()? { - reqwest_client_builder = reqwest_client_builder.proxy(proxy); - } - let reqwest_client = reqwest_client_builder.build().unwrap(); let jwt_decoding_key = config .jwt_secret @@ -190,7 +137,6 @@ impl Globals { globals, config, keypair: Arc::new(keypair), - reqwest_client, dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { Error::bad_config("Failed to set up trust dns resolver with system config.") })?, @@ -219,8 +165,16 @@ impl Globals { } /// Returns a reqwest client which can be used to send requests. - pub fn reqwest_client(&self) -> &reqwest::Client { - &self.reqwest_client + pub fn reqwest_client(&self) -> Result { + let mut reqwest_client_builder = reqwest::Client::builder() + .connect_timeout(Duration::from_secs(30)) + .timeout(Duration::from_secs(60 * 3)) + .pool_max_idle_per_host(1); + if let Some(proxy) = self.config.proxy.to_proxy()? { + reqwest_client_builder = reqwest_client_builder.proxy(proxy); + } + + Ok(reqwest_client_builder) } #[tracing::instrument(skip(self))] diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 3df9ed4..da4a6e7 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -113,7 +113,11 @@ where //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let response = globals.reqwest_client().execute(reqwest_request).await; + let response = globals + .reqwest_client()? + .build()? + .execute(reqwest_request) + .await; match response { Ok(mut response) => { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e0ffded..3fb4337 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,7 @@ mod edus; pub use edus::RoomEdus; use member::MembershipState; -use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; +use crate::{Database, Error, PduEvent, Result, pdu::PduBuilder, server_server, utils}; use lru_cache::LruCache; use regex::Regex; use ring::digest; @@ -22,12 +22,7 @@ use ruma::{ state_res::{self, RoomVersion, StateMap}, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, - mem::size_of, - sync::{Arc, Mutex}, -}; +use std::{collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem::size_of, sync::{Arc, Mutex}, time::Instant}; use tokio::sync::MutexGuard; use tracing::{error, warn}; @@ -1515,6 +1510,23 @@ impl Rooms { "list_appservices" => { db.admin.send(AdminCommand::ListAppservices); } + "get_auth_chain" => { + if args.len() == 1 { + if let Ok(event_id) = EventId::try_from(args[0]) { + let start = Instant::now(); + let count = + server_server::get_auth_chain(vec![event_id], db)? + .count(); + let elapsed = start.elapsed(); + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )), + )); + } + } + } "get_pdu" => { if args.len() == 1 { if let Ok(event_id) = EventId::try_from(args[0]) { diff --git a/src/server_server.rs b/src/server_server.rs index b58a0d1..37cabe8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -4,7 +4,7 @@ use crate::{ utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, }; use get_profile_information::v1::ProfileField; -use http::header::{HeaderValue, AUTHORIZATION, HOST}; +use http::header::{HeaderValue, AUTHORIZATION}; use regex::Regex; use rocket::response::content::Json; use ruma::{ @@ -83,7 +83,7 @@ use rocket::{get, post, put}; /// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); /// ``` #[derive(Clone, Debug, PartialEq)] -enum FedDest { +pub enum FedDest { Literal(SocketAddr), Named(String, String), } @@ -109,6 +109,13 @@ impl FedDest { Self::Named(host, _) => host.clone(), } } + + fn port(&self) -> Option { + match &self { + Self::Literal(addr) => Some(addr.port()), + Self::Named(_, port) => port[1..].parse().ok(), + } + } } #[tracing::instrument(skip(globals, request))] @@ -124,41 +131,34 @@ where return Err(Error::bad_config("Federation is disabled.")); } - let maybe_result = globals + let mut write_destination_to_cache = false; + + let cached_result = globals .actual_destination_cache .read() .unwrap() .get(destination) .cloned(); - let (actual_destination, host) = if let Some(result) = maybe_result { + let (actual_destination, host) = if let Some(result) = cached_result { result } else { + write_destination_to_cache = true; + let result = find_actual_destination(globals, &destination).await; - let (actual_destination, host) = result.clone(); - let result_string = (result.0.into_https_string(), result.1.into_uri_string()); - globals - .actual_destination_cache - .write() - .unwrap() - .insert(Box::::from(destination), result_string.clone()); - let dest_hostname = actual_destination.hostname(); - let host_hostname = host.hostname(); - if dest_hostname != host_hostname { - globals.tls_name_override.write().unwrap().insert( - dest_hostname, - webpki::DNSNameRef::try_from_ascii_str(&host_hostname) - .unwrap() - .to_owned(), - ); - } - result_string + + (result.0, result.1.clone().into_uri_string()) }; + let actual_destination_str = actual_destination.clone().into_https_string(); + let mut http_request = request - .try_into_http_request::>(&actual_destination, SendAccessToken::IfRequired("")) + .try_into_http_request::>(&actual_destination_str, SendAccessToken::IfRequired("")) .map_err(|e| { - warn!("Failed to find destination {}: {}", actual_destination, e); + warn!( + "Failed to find destination {}: {}", + actual_destination_str, e + ); Error::BadServerResponse("Invalid destination") })?; @@ -224,15 +224,26 @@ where } } - http_request - .headers_mut() - .insert(HOST, HeaderValue::from_str(&host).unwrap()); - let reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); let url = reqwest_request.url().clone(); - let response = globals.reqwest_client().execute(reqwest_request).await; + + let mut client = globals.reqwest_client()?; + if let Some((override_name, port)) = globals + .tls_name_override + .read() + .unwrap() + .get(&actual_destination.hostname()) + { + client = client.resolve( + &actual_destination.hostname(), + SocketAddr::new(override_name[0], *port), + ); + // port will be ignored + } + + let response = client.build()?.execute(reqwest_request).await; match response { Ok(mut response) => { @@ -271,6 +282,13 @@ where if status == 200 { let response = T::IncomingResponse::try_from_http_response(http_response); + if response.is_ok() && write_destination_to_cache { + globals.actual_destination_cache.write().unwrap().insert( + Box::::from(destination), + (actual_destination, host), + ); + } + response.map_err(|e| { warn!( "Invalid 200 response from {} on: {} {}", @@ -339,7 +357,7 @@ async fn find_actual_destination( match request_well_known(globals, &destination.as_str()).await { // 3: A .well-known file is available Some(delegated_hostname) => { - hostname = delegated_hostname.clone(); + hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); match get_ip_with_port(&delegated_hostname) { Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file None => { @@ -348,11 +366,40 @@ async fn find_actual_destination( let (host, port) = delegated_hostname.split_at(pos); FedDest::Named(host.to_string(), port.to_string()) } else { - match query_srv_record(globals, &delegated_hostname).await { + // Delegated hostname has no port in this branch + if let Some(hostname_override) = + query_srv_record(globals, &delegated_hostname).await + { // 3.3: SRV lookup successful - Some(hostname) => hostname, + let force_port = hostname_override.port(); + + if let Ok(override_ip) = globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + globals.tls_name_override.write().unwrap().insert( + delegated_hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); + } else { + warn!("Using SRV record, but could not resolve to IP"); + } + + if let Some(port) = force_port { + FedDest::Named( + delegated_hostname, + format!(":{}", port.to_string()), + ) + } else { + add_port_to_hostname(&delegated_hostname) + } + } else { // 3.4: No SRV records, just use the hostname from .well-known - None => add_port_to_hostname(&delegated_hostname), + add_port_to_hostname(&delegated_hostname) } } } @@ -362,7 +409,31 @@ async fn find_actual_destination( None => { match query_srv_record(globals, &destination_str).await { // 4: SRV record found - Some(hostname) => hostname, + Some(hostname_override) => { + let force_port = hostname_override.port(); + + if let Ok(override_ip) = globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + globals.tls_name_override.write().unwrap().insert( + hostname.clone(), + (override_ip.iter().collect(), force_port.unwrap_or(8448)), + ); + } else { + warn!("Using SRV record, but could not resolve to IP"); + } + + if let Some(port) = force_port { + FedDest::Named( + hostname.clone(), + format!(":{}", port.to_string()), + ) + } else { + add_port_to_hostname(&hostname) + } + } // 5: No SRV record found None => add_port_to_hostname(&destination_str), } @@ -377,12 +448,12 @@ async fn find_actual_destination( let hostname = if let Ok(addr) = hostname.parse::() { FedDest::Literal(addr) } else if let Ok(addr) = hostname.parse::() { - FedDest::Named(addr.to_string(), "".to_string()) + FedDest::Named(addr.to_string(), ":8448".to_string()) } else if let Some(pos) = hostname.find(':') { let (host, port) = hostname.split_at(pos); FedDest::Named(host.to_string(), port.to_string()) } else { - FedDest::Named(hostname, "".to_string()) + FedDest::Named(hostname, ":8448".to_string()) }; (actual_destination, hostname) } @@ -423,6 +494,9 @@ pub async fn request_well_known( let body: serde_json::Value = serde_json::from_str( &globals .reqwest_client() + .ok()? + .build() + .ok()? .get(&format!( "https://{}/.well-known/matrix/server", destination @@ -1971,7 +2045,7 @@ fn append_incoming_pdu( } #[tracing::instrument(skip(starting_events, db))] -fn get_auth_chain( +pub fn get_auth_chain( starting_events: Vec, db: &Database, ) -> Result + '_> { From 9bff276fa9e3c45b851bcef88514da690346609f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 26 Aug 2021 17:58:32 -0400 Subject: [PATCH 0765/1727] Use Arc in place of most EventIds --- Cargo.lock | 77 +++++++++++++++++-------------------------- Cargo.toml | 4 +-- src/database/rooms.rs | 46 ++++++++++++++++---------- src/server_server.rs | 70 ++++++++++++++++++++++++--------------- 4 files changed, 104 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0006b1c..880829a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -324,9 +324,9 @@ checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" [[package]] name = "cpufeatures" -version = "0.1.5" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" +checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" dependencies = [ "libc", ] @@ -2061,9 +2061,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668031e3108d6a2cfbe6eca271d8698f4593440e71a44afdadcf67ce3cb93c1f" +version = "0.3.0" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "assign", "js_int", @@ -2084,8 +2083,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5f1843792b6749ec1ece62595cf99ad30bf9589c96bb237515235e71da396ea" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "bytes", "http", @@ -2101,8 +2099,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b18abda5cca94178d08b622bca042e1cbb5eb7d4ebf3a2a81590a3bb3c57008" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2113,8 +2110,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49369332a5f299e832e19661f92d49e08c345c3c6c4ab16e09cb31c5ff6da878" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "ruma-api", "ruma-common", @@ -2128,8 +2124,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9568a222c12cf6220e751484ab78feec28071f85965113a5bb802936a2920ff0" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "assign", "bytes", @@ -2149,8 +2144,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d5b7605f58dc0d9cf1848cc7f1af2bae4e4bcd1d2b7a87bbb9864c8a785b91" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "indexmap", "js_int", @@ -2164,9 +2158,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.24.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87801e1207cfebdee02e7997ebf181a1c9837260b78c1b8ce96b896a2bcb3763" +version = "0.24.4" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "indoc", "js_int", @@ -2181,9 +2174,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.24.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da4498845347de88adf1b7da4578e2ca7355ad4ce47b0976f6594bacf958660" +version = "0.24.4" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2194,8 +2186,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61c9adbe1a29c301ae627604406d60102c89fc833b110cd35bbf29ae205ea6c" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "js_int", "ruma-api", @@ -2210,8 +2201,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb417d091e8dd5a633e4e5998231a156049d7fcc221045cfdc0642eb72067732" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "paste", "rand 0.8.4", @@ -2225,8 +2215,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c708edad7f605638f26c951cbad7501fbf28ab01009e5ca65ea5a2db74a882b1" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2236,14 +2225,15 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42285e7fb5d5f2d5268e45bb683e36d5c6fd9fc1e11a4559ba3c3521f3bbb2cb" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +dependencies = [ + "thiserror", +] [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e76e66e24f2d5a31511fbf6c79e79f67a7a6a98ebf48d72381b7d5bb6c09f035" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "js_int", "ruma-api", @@ -2256,8 +2246,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef5b29da7065efc5b1e1a8f61add7543c9ab4ecce5ee0dd1c1c5ecec83fbeec" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "js_int", "ruma-api", @@ -2272,8 +2261,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b2b22aae842e7ecda695e42b7b39d4558959d9d9a27acc2a16acf4f4f7f00c3" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "bytes", "form_urlencoded", @@ -2287,8 +2275,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243e9bef188b08f94c79bc2f8fd1eb307a9e636b2b8e4571acf8c7be16381d28" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2299,8 +2286,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a4f64027165b59500162d10d435b1253898bf3ad4f5002cb0d56913fe7f76d7" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2316,9 +2302,8 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "796427aaa2d266238c5c1b1a6ca4640a4d282ec2cb2e844c69a8f3a262d3db15" +version = "0.3.0" +source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" dependencies = [ "itertools 0.10.1", "js_int", @@ -2522,9 +2507,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6375dbd828ed6964c3748e4ef6d18e7a175d408ffe184bca01698d0c73f915a9" +checksum = "ad104641f3c958dab30eb3010e834c2622d1f3f4c530fef1dee20ad9485f3c09" dependencies = [ "dtoa", "indexmap", @@ -2540,9 +2525,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "sha2" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" +checksum = "9204c41a1597a8c5af23c82d1c921cb01ec0a4c59e07a9c7306062829a3903f3" dependencies = [ "block-buffer", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index 0290957..034f94b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,9 +18,9 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { git = "https://github.com/timokoesters/ruma", rev = "2215049b60a1c3358f5a52215adf1e7bb88619a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/DevinR528/ruma", rev = "c7860fcb89dbde636e2c83d0636934fb9924f40c", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3fb4337..b829a1b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,7 @@ mod edus; pub use edus::RoomEdus; use member::MembershipState; -use crate::{Database, Error, PduEvent, Result, pdu::PduBuilder, server_server, utils}; +use crate::{pdu::PduBuilder, server_server, utils, Database, Error, PduEvent, Result}; use lru_cache::LruCache; use regex::Regex; use ring::digest; @@ -22,7 +22,13 @@ use ruma::{ state_res::{self, RoomVersion, StateMap}, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; -use std::{collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem::size_of, sync::{Arc, Mutex}, time::Instant}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + convert::{TryFrom, TryInto}, + mem::size_of, + sync::{Arc, Mutex}, + time::Instant, +}; use tokio::sync::MutexGuard; use tracing::{error, warn}; @@ -89,8 +95,8 @@ pub struct Rooms { pub(super) referencedevents: Arc, pub(super) pdu_cache: Mutex>>, + pub(super) shorteventid_cache: Mutex>>, pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) shorteventid_cache: Mutex>, pub(super) eventidshort_cache: Mutex>, pub(super) statekeyshort_cache: Mutex>, pub(super) shortstatekey_cache: Mutex>, @@ -111,7 +117,7 @@ impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] - pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { + pub fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = self .load_shortstatehash_info(shortstatehash)? .pop() @@ -162,7 +168,7 @@ impl Rooms { shortstatehash: u64, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result>> { let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { Some(s) => s, None => return Ok(None), @@ -518,7 +524,7 @@ impl Rooms { pub fn parse_compressed_state_event( &self, compressed_event: CompressedStateEvent, - ) -> Result<(u64, EventId)> { + ) -> Result<(u64, Arc)> { Ok(( utils::u64_from_bytes(&compressed_event[0..size_of::()]) .expect("bytes have right length"), @@ -834,14 +840,14 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result { + pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { if let Some(id) = self .shorteventid_cache .lock() .unwrap() .get_mut(&shorteventid) { - return Ok(id.clone()); + return Ok(Arc::clone(id)); } let bytes = self @@ -849,15 +855,17 @@ impl Rooms { .get(&shorteventid.to_be_bytes())? .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - let event_id = EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; + let event_id = Arc::new( + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?, + ); self.shorteventid_cache .lock() .unwrap() - .insert(shorteventid, event_id.clone()); + .insert(shorteventid, Arc::clone(&event_id)); Ok(event_id) } @@ -924,7 +932,7 @@ impl Rooms { room_id: &RoomId, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { self.state_get_id(current_shortstatehash, event_type, state_key) } else { @@ -1514,9 +1522,11 @@ impl Rooms { if args.len() == 1 { if let Ok(event_id) = EventId::try_from(args[0]) { let start = Instant::now(); - let count = - server_server::get_auth_chain(vec![event_id], db)? - .count(); + let count = server_server::get_auth_chain( + vec![Arc::new(event_id)], + db, + )? + .count(); let elapsed = start.elapsed(); db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain(format!( @@ -1548,7 +1558,7 @@ impl Rooms { if outlier { "PDU is outlier" } else { "PDU was accepted"}, json_text), - format!("

{}

\n
{}\n
\n", + format!("

{}

\n
{}\n
\n", if outlier { "PDU is outlier" } else { "PDU was accepted"}, RawStr::new(&json_text).html_escape()) diff --git a/src/server_server.rs b/src/server_server.rs index 37cabe8..85a32f8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -967,7 +967,12 @@ pub async fn handle_incoming_pdu<'a>( // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events let mut graph = HashMap::new(); let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack = incoming_pdu.prev_events.clone(); + let mut todo_outlier_stack = incoming_pdu + .prev_events + .iter() + .cloned() + .map(Arc::new) + .collect::>(); let mut amount = 0; @@ -1003,13 +1008,13 @@ pub async fn handle_incoming_pdu<'a>( amount += 1; for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(prev_prev.clone())); + todo_outlier_stack.push(dbg!(Arc::new(prev_prev.clone()))); } } graph.insert( prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), + pdu.prev_events.iter().cloned().map(Arc::new).collect(), ); eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { @@ -1038,9 +1043,9 @@ pub async fn handle_incoming_pdu<'a>( MilliSecondsSinceUnixEpoch( eventid_info .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts.clone()), + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), - ruma::event_id!("$notimportant"), + Arc::new(ruma::event_id!("$notimportant")), )) }) .map_err(|_| "Error sorting prev events".to_owned())?; @@ -1158,7 +1163,12 @@ fn handle_outlier_pdu<'a>( fetch_and_handle_outliers( db, origin, - &incoming_pdu.auth_events, + &incoming_pdu + .auth_events + .iter() + .cloned() + .map(Arc::new) + .collect::>(), &create_event, &room_id, pub_key_map, @@ -1227,7 +1237,7 @@ fn handle_outlier_pdu<'a>( if !state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.clone(), + previous_create, None, // TODO: third party invite |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), ) @@ -1293,7 +1303,7 @@ async fn upgrade_outlier_to_timeline_pdu( .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - state.insert(shortstatekey, prev_event.clone()); + state.insert(shortstatekey, Arc::new(prev_event.clone())); // Now it's the state after the pdu } @@ -1323,7 +1333,11 @@ async fn upgrade_outlier_to_timeline_pdu( let state_vec = fetch_and_handle_outliers( &db, origin, - &res.pdu_ids, + &res.pdu_ids + .iter() + .cloned() + .map(Arc::new) + .collect::>(), &create_event, &room_id, pub_key_map, @@ -1344,7 +1358,7 @@ async fn upgrade_outlier_to_timeline_pdu( match state.entry(shortstatekey) { btree_map::Entry::Vacant(v) => { - v.insert(pdu.event_id.clone()); + v.insert(Arc::new(pdu.event_id.clone())); } btree_map::Entry::Occupied(_) => return Err( "State event's type and state_key combination exists multiple times." @@ -1360,7 +1374,9 @@ async fn upgrade_outlier_to_timeline_pdu( .map_err(|_| "Failed to talk to db.")? .expect("Room exists"); - if state.get(&create_shortstatekey) != Some(&create_event.event_id) { + if state.get(&create_shortstatekey).map(|id| id.as_ref()) + != Some(&create_event.event_id) + { return Err("Incoming event refers to wrong create event.".to_owned()); } @@ -1525,7 +1541,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, leaf_pdu.event_id.clone()); + leaf_state.insert(shortstatekey, Arc::new(leaf_pdu.event_id.clone())); // Now it's the state after the pdu } @@ -1540,9 +1556,9 @@ async fn upgrade_outlier_to_timeline_pdu( .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - state_after.insert(shortstatekey, incoming_pdu.event_id.clone()); + state_after.insert(shortstatekey, Arc::new(incoming_pdu.event_id.clone())); } - fork_states.push(state_after.clone()); + fork_states.push(state_after); let mut update_state = false; // 14. Use state resolution to find new room state @@ -1688,7 +1704,7 @@ async fn upgrade_outlier_to_timeline_pdu( pub(crate) fn fetch_and_handle_outliers<'a>( db: &'a Database, origin: &'a ServerName, - events: &'a [EventId], + events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, pub_key_map: &'a RwLock>>, @@ -1743,7 +1759,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match crate::pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => { - back_off(id.clone()); + back_off((**id).clone()); continue; } }; @@ -1763,14 +1779,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>( Ok((pdu, json)) => (pdu, Some(json)), Err(e) => { warn!("Authentication of event {} failed: {:?}", id, e); - back_off(id.clone()); + back_off((**id).clone()); continue; } } } Err(_) => { warn!("Failed to fetch event: {}", id); - back_off(id.clone()); + back_off((**id).clone()); continue; } } @@ -2046,9 +2062,9 @@ fn append_incoming_pdu( #[tracing::instrument(skip(starting_events, db))] pub fn get_auth_chain( - starting_events: Vec, + starting_events: Vec>, db: &Database, -) -> Result + '_> { +) -> Result> + '_> { const NUM_BUCKETS: usize = 50; let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; @@ -2242,12 +2258,12 @@ pub fn get_event_authorization_route( return Err(Error::bad_config("Federation is disabled.")); } - let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; + let auth_chain_ids = get_auth_chain(vec![Arc::new(body.event_id.clone())], &db)?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids - .filter_map(|id| Some(db.rooms.get_pdu_json(&id).ok()??)) - .map(|event| PduEvent::convert_to_outgoing_federation_event(event)) + .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) + .map(PduEvent::convert_to_outgoing_federation_event) .collect(), } .into()) @@ -2285,7 +2301,7 @@ pub fn get_room_state_route( }) .collect(); - let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; + let auth_chain_ids = get_auth_chain(vec![Arc::new(body.event_id.clone())], &db)?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids @@ -2326,13 +2342,13 @@ pub fn get_room_state_ids_route( .rooms .state_full_ids(shortstatehash)? .into_iter() - .map(|(_, id)| id) + .map(|(_, id)| (*id).clone()) .collect(); - let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; + let auth_chain_ids = get_auth_chain(vec![Arc::new(body.event_id.clone())], &db)?; Ok(get_room_state_ids::v1::Response { - auth_chain_ids: auth_chain_ids.collect(), + auth_chain_ids: auth_chain_ids.map(|id| (*id).clone()).collect(), pdu_ids, } .into()) From afca61fe7ca09156e1625cf150ffb233a15c6c3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 28 Aug 2021 11:39:33 +0200 Subject: [PATCH 0766/1727] fix: don't retry soft failed events --- src/client_server/sync.rs | 86 +++++++++++++----------------------- src/database.rs | 6 ++- src/database/rooms.rs | 91 ++++++++++++++++++++++++++++++++------- src/server_server.rs | 12 ++++++ 4 files changed, 124 insertions(+), 71 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 21a9ef2..d6e32ea 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -256,8 +256,8 @@ async fn sync_helper( // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { - let joined_member_count = db.rooms.room_members(&room_id).count(); - let invited_member_count = db.rooms.room_members_invited(&room_id).count(); + let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0); + let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0); // Recalculate heroes (first 5 members) let mut heroes = Vec::new(); @@ -407,64 +407,40 @@ async fn sync_helper( }); if encrypted_room { - for (user_id, current_member) in db - .rooms - .room_members(&room_id) - .filter_map(|r| r.ok()) - .filter_map(|user_id| { - db.rooms - .state_get( - current_shortstatehash, - &EventType::RoomMember, - user_id.as_str(), - ) - .ok() - .flatten() - .map(|current_member| (user_id, current_member)) - }) - { - let current_membership = serde_json::from_value::< - Raw, - >(current_member.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; + for state_event in &state_events { + if state_event.kind != EventType::RoomMember { + continue; + } - let since_membership = db - .rooms - .state_get( - since_shortstatehash, - &EventType::RoomMember, - user_id.as_str(), - )? - .and_then(|since_member| { - serde_json::from_value::< - Raw, - >(since_member.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }) - .map_or(MembershipState::Leave, |member| member.membership); + if let Some(state_key) = &state_event.state_key { + let user_id = UserId::try_from(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - let user_id = UserId::try_from(user_id.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + if user_id == sender_user { + continue; + } - match (since_membership, current_membership) { - (MembershipState::Leave, MembershipState::Join) => { - // A new user joined an encrypted room - if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? { - device_list_updates.insert(user_id); + let new_membership = serde_json::from_value::< + Raw, + >(state_event.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? { + device_list_updates.insert(user_id); + } } + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} } - // TODO: Remove, this should never happen here, right? - (MembershipState::Join, MembershipState::Leave) => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} } } } diff --git a/src/database.rs b/src/database.rs index 8fd745b..ca3d2f0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -252,6 +252,7 @@ impl Database { userroomid_joined: builder.open_tree("userroomid_joined")?, roomuserid_joined: builder.open_tree("roomuserid_joined")?, roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, + roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, @@ -277,6 +278,8 @@ impl Database { statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, + softfailedeventids: builder.open_tree("softfailedeventids")?, + referencedevents: builder.open_tree("referencedevents")?, pdu_cache: Mutex::new(LruCache::new(100_000)), auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), @@ -285,6 +288,7 @@ impl Database { shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), stateinfo_cache: Mutex::new(LruCache::new(1000)), + our_real_users_cache: RwLock::new(HashMap::new()), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, @@ -442,7 +446,7 @@ impl Database { let room_id = RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap(); - db.rooms.update_joined_count(&room_id)?; + db.rooms.update_joined_count(&room_id, &db)?; } db.globals.bump_database_version(6)?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b829a1b..729c8f3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -26,7 +26,7 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem::size_of, - sync::{Arc, Mutex}, + sync::{Arc, Mutex, RwLock}, time::Instant, }; use tokio::sync::MutexGuard; @@ -59,6 +59,7 @@ pub struct Rooms { pub(super) userroomid_joined: Arc, pub(super) roomuserid_joined: Arc, pub(super) roomid_joinedcount: Arc, + pub(super) roomid_invitedcount: Arc, pub(super) roomuseroncejoinedids: Arc, pub(super) userroomid_invitestate: Arc, // InviteState = Vec> pub(super) roomuserid_invitecount: Arc, // InviteCount = Count @@ -90,6 +91,7 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. pub(super) eventid_outlierpdu: Arc, + pub(super) softfailedeventids: Arc, /// RoomId + EventId -> Parent PDU EventId. pub(super) referencedevents: Arc, @@ -100,6 +102,7 @@ pub struct Rooms { pub(super) eventidshort_cache: Mutex>, pub(super) statekeyshort_cache: Mutex>, pub(super) shortstatekey_cache: Mutex>, + pub(super) our_real_users_cache: RwLock>>>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -425,7 +428,7 @@ impl Rooms { } } - self.update_joined_count(room_id)?; + self.update_joined_count(room_id, &db)?; self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; @@ -1229,9 +1232,19 @@ impl Rooms { self.eventid_outlierpdu.insert( &event_id.as_bytes(), &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - )?; + ) + } - Ok(()) + #[tracing::instrument(skip(self))] + pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { + self.softfailedeventids.insert(&event_id.as_bytes(), &[]) + } + + #[tracing::instrument(skip(self))] + pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { + self.softfailedeventids + .get(&event_id.as_bytes()) + .map(|o| o.is_some()) } /// Creates a new persisted data unit and adds it to a room. @@ -1334,15 +1347,9 @@ impl Rooms { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - for user in db - .rooms - .room_members(&pdu.room_id) - .filter_map(|r| r.ok()) - .filter(|user_id| user_id.server_name() == db.globals.server_name()) - .filter(|user_id| !db.users.is_deactivated(user_id).unwrap_or(true)) - { + for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { // Don't notify the user of their own events - if user == pdu.sender { + if user == &pdu.sender { continue; } @@ -2443,29 +2450,45 @@ impl Rooms { } if update_joined_count { - self.update_joined_count(room_id)?; + self.update_joined_count(room_id, db)?; } Ok(()) } - #[tracing::instrument(skip(self))] - pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { + #[tracing::instrument(skip(self, room_id, db))] + pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { let mut joinedcount = 0_u64; + let mut invitedcount = 0_u64; let mut joined_servers = HashSet::new(); + let mut real_users = HashSet::new(); for joined in self.room_members(&room_id).filter_map(|r| r.ok()) { joined_servers.insert(joined.server_name().to_owned()); + if joined.server_name() == db.globals.server_name() + && !db.users.is_deactivated(&joined).unwrap_or(true) + { + real_users.insert(joined); + } joinedcount += 1; } for invited in self.room_members_invited(&room_id).filter_map(|r| r.ok()) { joined_servers.insert(invited.server_name().to_owned()); + invitedcount += 1; } self.roomid_joinedcount .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; + self.roomid_invitedcount + .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; + + self.our_real_users_cache + .write() + .unwrap() + .insert(room_id.clone(), Arc::new(real_users)); + for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { if !joined_servers.remove(&old_joined_server) { // Server not in room anymore @@ -2499,6 +2522,32 @@ impl Rooms { Ok(()) } + #[tracing::instrument(skip(self, room_id, db))] + pub fn get_our_real_users( + &self, + room_id: &RoomId, + db: &Database, + ) -> Result>> { + let maybe = self + .our_real_users_cache + .read() + .unwrap() + .get(room_id) + .cloned(); + if let Some(users) = maybe { + Ok(users) + } else { + self.update_joined_count(room_id, &db)?; + Ok(Arc::clone( + self.our_real_users_cache + .read() + .unwrap() + .get(room_id) + .unwrap(), + )) + } + } + #[tracing::instrument(skip(self, db))] pub async fn leave_room( &self, @@ -2977,6 +3026,18 @@ impl Rooms { .transpose()?) } + #[tracing::instrument(skip(self))] + pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { + Ok(self + .roomid_invitedcount + .get(room_id.as_bytes())? + .map(|b| { + utils::u64_from_bytes(&b) + .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + }) + .transpose()?) + } + /// Returns an iterator over all User IDs who ever joined a room. #[tracing::instrument(skip(self))] pub fn room_useroncejoined<'a>( diff --git a/src/server_server.rs b/src/server_server.rs index 85a32f8..331e956 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1271,6 +1271,15 @@ async fn upgrade_outlier_to_timeline_pdu( if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); } + + if db + .rooms + .is_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to ask db for soft fail".to_owned())? + { + return Err("Event has been soft failed".into()); + } + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities // doing all the checks in this list starting at 1. These are not timeline events. @@ -1683,6 +1692,9 @@ async fn upgrade_outlier_to_timeline_pdu( if soft_fail { // Soft fail, we keep the event as an outlier but don't add it to the timeline warn!("Event was soft failed: {:?}", incoming_pdu); + db.rooms + .mark_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to set soft failed flag".to_owned())?; return Err("Event has been soft failed".into()); } From 16010276054b1d3c9aacd98a8570a2a31d0e86c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 28 Aug 2021 19:35:15 +0200 Subject: [PATCH 0767/1727] add warning if calculated event id != requested event id --- src/database/abstraction/sqlite.rs | 5 +-- src/database/rooms.rs | 60 ++++++++++++++++++++++++++++++ src/server_server.rs | 23 +++++++----- 3 files changed, 76 insertions(+), 12 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 5b895c7..3a4623f 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -93,9 +93,8 @@ impl Engine { } pub fn flush_wal(self: &Arc) -> Result<()> { - // We use autocheckpoints - //self.write_lock() - //.pragma_update(Some(Main), "wal_checkpoint", &"TRUNCATE")?; + self.write_lock() + .pragma_update(Some(Main), "wal_checkpoint", &"TRUNCATE")?; Ok(()) } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 729c8f3..8eb9b30 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1544,6 +1544,66 @@ impl Rooms { } } } + "parse_pdu" => { + if body.len() > 2 + && body[0].trim() == "```" + && body.last().unwrap().trim() == "```" + { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + let event_id = EventId::try_from(&*format!( + "${}", + // Anything higher than version3 behaves the same + ruma::signatures::reference_hash( + &value, + &RoomVersionId::Version6 + ) + .expect("ruma can calculate reference hashes") + )) + .expect( + "ruma's reference hashes are valid event ids", + ); + + match serde_json::from_value::( + serde_json::to_value(value) + .expect("value is json"), + ) { + Ok(pdu) => { + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + format!("EventId: {:?}\n{:#?}", event_id, pdu), + ), + )); + } + Err(e) => { + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + format!("EventId: {:?}\nCould not parse event: {}", event_id, e), + ), + )); + } + } + } + Err(e) => { + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + format!( + "Invalid json in command body: {}", + e + ), + ), + )); + } + } + } else { + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + "Expected code block in command body.", + ), + )); + } + } "get_pdu" => { if args.len() == 1 { if let Ok(event_id) = EventId::try_from(args[0]) { diff --git a/src/server_server.rs b/src/server_server.rs index 331e956..bb83ac0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1184,13 +1184,13 @@ fn handle_outlier_pdu<'a>( // Build map of auth events let mut auth_events = HashMap::new(); for id in &incoming_pdu.auth_events { - let auth_event = db - .rooms - .get_pdu(id) - .map_err(|e| e.to_string())? - .ok_or_else(|| { - "Auth event not found, event failed recursive auth checks.".to_string() - })?; + let auth_event = match db.rooms.get_pdu(id).map_err(|e| e.to_string())? { + Some(e) => e, + None => { + warn!("Could not find auth event {}", id); + continue; + } + }; match auth_events.entry(( auth_event.kind.clone(), @@ -1767,7 +1767,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( { Ok(res) => { warn!("Got {} over federation", id); - let (event_id, value) = + let (calculated_event_id, value) = match crate::pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => { @@ -1776,11 +1776,16 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } }; + if calculated_event_id != **id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + id, calculated_event_id, &res.pdu); + } + // This will also fetch the auth chain match handle_outlier_pdu( origin, create_event, - &event_id, + &id, &room_id, value.clone(), db, From 632a1343eb900f05714621ed8b0d752523f54f38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 29 Aug 2021 20:00:02 +0200 Subject: [PATCH 0768/1727] fix: make appservices more efficient --- src/client_server/sync.rs | 13 +---- src/database.rs | 3 +- src/database/abstraction/sqlite.rs | 21 ++++++-- src/database/rooms.rs | 87 ++++++++++++++++++++++++------ src/server_server.rs | 51 ++++++++---------- 5 files changed, 116 insertions(+), 59 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index d6e32ea..f7f2454 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -246,12 +246,6 @@ async fn sync_helper( .current_shortstatehash(&room_id)? .expect("All rooms have state"); - let pdus_after_since = db - .rooms - .pdus_after(&sender_user, &room_id, since)? - .next() - .is_some(); - let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?; // Calculates joined_member_count, invited_member_count and heroes @@ -341,7 +335,7 @@ async fn sync_helper( true, state_events, ) - } else if !pdus_after_since && since_shortstatehash == Some(current_shortstatehash) { + } else if timeline_pdus.len() == 0 && since_shortstatehash == Some(current_shortstatehash) { // No state changes (Vec::new(), None, None, false, Vec::new()) } else { @@ -401,10 +395,7 @@ async fn sync_helper( let send_member_count = state_events .iter() - .any(|event| event.kind == EventType::RoomMember) - || timeline_pdus.iter().any(|(_, event)| { - event.state_key.is_some() && event.kind == EventType::RoomMember - }); + .any(|event| event.kind == EventType::RoomMember); if encrypted_room { for state_event in &state_events { diff --git a/src/database.rs b/src/database.rs index ca3d2f0..ca0ed88 100644 --- a/src/database.rs +++ b/src/database.rs @@ -287,8 +287,9 @@ impl Database { eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), - stateinfo_cache: Mutex::new(LruCache::new(1000)), our_real_users_cache: RwLock::new(HashMap::new()), + appservice_in_room_cache: RwLock::new(HashMap::new()), + stateinfo_cache: Mutex::new(LruCache::new(1000)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 3a4623f..f17eac9 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -148,6 +148,7 @@ type TupleOfBytes = (Vec, Vec); impl SqliteTable { #[tracing::instrument(skip(self, guard, key))] fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { + //dbg!(&self.name); Ok(guard .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? .query_row([key], |row| row.get(0)) @@ -156,6 +157,7 @@ impl SqliteTable { #[tracing::instrument(skip(self, guard, key, value))] fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { + //dbg!(&self.name); guard.execute( format!( "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", @@ -182,11 +184,16 @@ impl SqliteTable { let statement_ref = NonAliasingBox(statement); + //let name = self.name.clone(); + let iterator = Box::new( statement .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(|r| r.unwrap()), + .map(move |r| { + //dbg!(&name); + r.unwrap() + }), ); Box::new(PreparedStatementIterator { @@ -294,6 +301,8 @@ impl Tree for SqliteTable { let guard = self.engine.read_lock_iterator(); let from = from.to_vec(); // TODO change interface? + //let name = self.name.clone(); + if backwards { let statement = Box::leak(Box::new( guard @@ -310,7 +319,10 @@ impl Tree for SqliteTable { statement .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(|r| r.unwrap()), + .map(move |r| { + //dbg!(&name); + r.unwrap() + }), ); Box::new(PreparedStatementIterator { iterator, @@ -332,7 +344,10 @@ impl Tree for SqliteTable { statement .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(|r| r.unwrap()), + .map(move |r| { + //dbg!(&name); + r.unwrap() + }), ); Box::new(PreparedStatementIterator { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 8eb9b30..59ed950 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -103,6 +103,7 @@ pub struct Rooms { pub(super) statekeyshort_cache: Mutex>, pub(super) shortstatekey_cache: Mutex>, pub(super) our_real_users_cache: RwLock>>>, + pub(super) appservice_in_room_cache: RwLock>>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -2110,6 +2111,11 @@ impl Rooms { } for appservice in db.appservice.all()? { + if self.appservice_in_room(room_id, &appservice, db)? { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + continue; + } + if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") @@ -2133,17 +2139,6 @@ impl Rooms { .get("rooms") .and_then(|rooms| rooms.as_sequence()); - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let user_is_joined = - |bridge_user_id| self.is_joined(&bridge_user_id, room_id).unwrap_or(false); - let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) || pdu.kind == EventType::RoomMember @@ -2151,9 +2146,6 @@ impl Rooms { .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) - || self.room_members(&room_id).any(|userid| { - userid.map_or(false, |userid| users.is_match(userid.as_str())) - }) }; let matching_aliases = |aliases: &Regex| { self.room_aliases(&room_id) @@ -2161,8 +2153,7 @@ impl Rooms { .any(|room_alias| aliases.is_match(room_alias.as_str())) }; - if bridge_user_id.map_or(false, user_is_joined) - || aliases.iter().any(matching_aliases) + if aliases.iter().any(matching_aliases) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) || users.iter().any(matching_users) { @@ -2579,6 +2570,11 @@ impl Rooms { self.serverroomids.insert(&serverroom_id, &[])?; } + self.appservice_in_room_cache + .write() + .unwrap() + .remove(room_id); + Ok(()) } @@ -2608,6 +2604,65 @@ impl Rooms { } } + #[tracing::instrument(skip(self, room_id, appservice, db))] + pub fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &(String, serde_yaml::Value), + db: &Database, + ) -> Result { + let maybe = self + .appservice_in_room_cache + .read() + .unwrap() + .get(room_id) + .and_then(|map| map.get(&appservice.0)) + .copied(); + + if let Some(b) = maybe { + Ok(b) + } else { + if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else(Vec::new, |users| { + users + .iter() + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) + .collect::>() + }); + + let bridge_user_id = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, db.globals.server_name()).ok() + }); + + let in_room = bridge_user_id + .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) + || self.room_members(&room_id).any(|userid| { + userid.map_or(false, |userid| { + users.iter().any(|r| r.is_match(userid.as_str())) + }) + }); + + self.appservice_in_room_cache + .write() + .unwrap() + .entry(room_id.clone()) + .or_default() + .insert(appservice.0.clone(), in_room); + + Ok(in_room) + } else { + Ok(false) + } + } + } + #[tracing::instrument(skip(self, db))] pub async fn leave_room( &self, diff --git a/src/server_server.rs b/src/server_server.rs index bb83ac0..f5210db 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -48,7 +48,7 @@ use ruma::{ state_res::{self, RoomVersion, StateMap}, to_device::DeviceIdOrAllDevices, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, - ServerSigningKeyId, UserId, + ServerSigningKeyId, }; use std::{ collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, @@ -2017,6 +2017,11 @@ fn append_incoming_pdu( )?; for appservice in db.appservice.all()? { + if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + continue; + } + if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") @@ -2029,45 +2034,35 @@ fn append_incoming_pdu( }); let aliases = namespaces .get("aliases") - .and_then(|users| users.get("regex")) - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()); + .and_then(|aliases| aliases.as_sequence()) + .map_or_else(Vec::new, |aliases| { + aliases + .iter() + .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) + .collect::>() + }); let rooms = namespaces .get("rooms") .and_then(|rooms| rooms.as_sequence()); - let room_aliases = db.rooms.room_aliases(&pdu.room_id); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - #[allow(clippy::blocks_in_if_conditions)] - if bridge_user_id.map_or(false, |bridge_user_id| { - db.rooms - .is_joined(&bridge_user_id, &pdu.room_id) - .unwrap_or(false) - }) || users.iter().any(|users| { + let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) || pdu.kind == EventType::RoomMember && pdu .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) - }) || aliases.map_or(false, |aliases| { - room_aliases + }; + let matching_aliases = |aliases: &Regex| { + db.rooms + .room_aliases(&pdu.room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) - }) || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) - || db - .rooms - .room_members(&pdu.room_id) - .filter_map(|r| r.ok()) - .any(|member| users.iter().any(|regex| regex.is_match(member.as_str()))) + }; + + if aliases.iter().any(matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) + || users.iter().any(matching_users) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } From a469ca04610de779fe938629e139b0e3de8260a8 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Sun, 29 Aug 2021 20:01:38 +0200 Subject: [PATCH 0769/1727] Move docker healthcheck into dedicated script. --- Dockerfile | 24 +++++++++++++----------- docker/healthcheck.sh | 13 +++++++++++++ 2 files changed, 26 insertions(+), 11 deletions(-) create mode 100644 docker/healthcheck.sh diff --git a/Dockerfile b/Dockerfile index 68dce3f..f4b176f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,25 +7,29 @@ # Alpine build image to build Conduit's statically compiled binary FROM alpine:3.14 as builder +# Install packages needed for building all crates +RUN apk add --no-cache \ + cargo \ + openssl-dev + # Specifies if the local project is build or if Conduit gets build # from the official git repository. Defaults to the git repo. ARG LOCAL=false # Specifies which revision/commit is build. Defaults to HEAD ARG GIT_REF=origin/master -# Install packages needed for building all crates -RUN apk add --no-cache \ - cargo \ - openssl-dev - - # Copy project files from current folder COPY . . # Build it from the copied local files or from the official git repository RUN if [[ $LOCAL == "true" ]]; then \ + mv ./docker/healthcheck.sh . ; \ + echo "Building from local source..." ; \ cargo install --path . ; \ else \ - cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF}; \ + echo "Building revision '${GIT_REF}' from online source..." ; \ + cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF} ; \ + echo "Loadings healthcheck script from online source..." ; \ + wget "https://gitlab.com/famedly/conduit/-/raw/${GIT_REF#origin/}/docker/healthcheck.sh" ; \ fi ########################## RUNTIME IMAGE ########################## @@ -64,6 +68,7 @@ EXPOSE 6167 # /srv/conduit and create data folder for database RUN mkdir -p /srv/conduit/.local/share/conduit COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/ +COPY --from=builder ./healthcheck.sh /srv/conduit/ # Add www-data user and group with UID 82, as used by alpine # https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install @@ -82,10 +87,7 @@ RUN apk add --no-cache \ libgcc # Test if Conduit is still alive, uses the same endpoint as Element -HEALTHCHECK --start-period=5s \ - CMD curl --fail -s "http://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ - curl -k --fail -s "https://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ - exit 1 +HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh # Set user to www-data USER www-data diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh new file mode 100644 index 0000000..568838e --- /dev/null +++ b/docker/healthcheck.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +# If the port is not specified as env var, take it from the config file +if [ -z ${CONDUIT_PORT} ]; then + CONDUIT_PORT=$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*') +fi + +# The actual health check. +# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. +# TODO: Change this to a single curl call. Do we have a config value that we can check for that? +curl --fail -s "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ + curl -k --fail -s "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ + exit 1 From 9ded40e98329a02c1d504ae328c7ce4b8ca951da Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Mon, 30 Aug 2021 09:14:08 +0200 Subject: [PATCH 0770/1727] Change healthcheck in ci dockerfile --- docker/ci-binaries-packaging.Dockerfile | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 797ef0c..1fe85bf 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -53,10 +53,7 @@ RUN apk add --no-cache \ libgcc # Test if Conduit is still alive, uses the same endpoint as Element -HEALTHCHECK --start-period=5s \ - CMD curl --fail -s "http://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ - curl -k --fail -s "https://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \ - exit 1 +HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh # Set user to www-data USER www-data @@ -68,3 +65,4 @@ ENTRYPOINT [ "/srv/conduit/conduit" ] # Copy the Conduit binary into the image at the latest possible moment to maximise caching: COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit +COPY ./docker/healthcheck.sh /srv/conduit/ From d57c19802d8c3502c1aa739f0615b0f3aa448266 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 30 Aug 2021 10:46:36 +0200 Subject: [PATCH 0771/1727] improvement: don't do state updates if the event was soft failed --- src/server_server.rs | 71 +++++++++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 30 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index f5210db..42d8185 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1497,6 +1497,47 @@ async fn upgrade_outlier_to_timeline_pdu( ) .map_err(|_| "Failed to get_auth_events.".to_owned())?; + let state_ids_compressed = state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + db.rooms + .compress_state_event(*shortstatekey, &id, &db.globals) + .map_err(|_| "Failed to compress_state_event".to_owned()) + }) + .collect::>()?; + + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + debug!("starting soft fail auth check"); + + let soft_fail = !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + previous_create, + None, + |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), + ) + .map_err(|_e| "Auth check failed.".to_owned())?; + + if soft_fail { + append_incoming_pdu( + &db, + &incoming_pdu, + val, + extremities, + state_ids_compressed, + soft_fail, + &state_lock + ) + .map_err(|_| "Failed to add pdu to db.".to_owned())?; + + // Soft fail, we keep the event as an outlier but don't add it to the timeline + warn!("Event was soft failed: {:?}", incoming_pdu); + db.rooms + .mark_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to set soft failed flag".to_owned())?; + return Err("Event has been soft failed".into()); + } + if incoming_pdu.state_key.is_some() { let mut extremity_sstatehashes = HashMap::new(); @@ -1651,31 +1692,10 @@ async fn upgrade_outlier_to_timeline_pdu( extremities.insert(incoming_pdu.event_id.clone()); - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - debug!("starting soft fail auth check"); - - let soft_fail = !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - previous_create, - None, - |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - // Now that the event has passed all auth it is added into the timeline. // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - let state_ids_compressed = state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - db.rooms - .compress_state_event(*shortstatekey, &id, &db.globals) - .map_err(|_| "Failed to compress_state_event".to_owned()) - }) - .collect::>()?; - let pdu_id = append_incoming_pdu( &db, &incoming_pdu, @@ -1689,15 +1709,6 @@ async fn upgrade_outlier_to_timeline_pdu( debug!("Appended incoming pdu."); - if soft_fail { - // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {:?}", incoming_pdu); - db.rooms - .mark_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to set soft failed flag".to_owned())?; - return Err("Event has been soft failed".into()); - } - // Event has passed all auth/stateres checks drop(state_lock); Ok(pdu_id) From 1b25e78e3ae6d60e73cea1c50af802cdf80045c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 30 Aug 2021 10:56:41 +0200 Subject: [PATCH 0772/1727] fix: inviting dendrite users --- src/client_server/membership.rs | 16 ++++++++++++++-- src/server_server.rs | 2 +- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 29926e3..0a7ca81 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -973,6 +973,14 @@ pub async fn invite_helper<'a>( (room_version_id, pdu_json, invite_room_state) }; + // Generate event id + let expected_event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&pdu_json, &room_version_id) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + let response = db .sending .send_federation_request( @@ -980,9 +988,9 @@ pub async fn invite_helper<'a>( user_id.server_name(), create_invite::v2::Request { room_id: room_id.clone(), - event_id: ruma::event_id!("$receivingservershouldsetthis"), + event_id: expected_event_id.clone(), room_version: room_version_id, - event: PduEvent::convert_to_outgoing_federation_event(pdu_json), + event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state, }, ) @@ -1002,6 +1010,10 @@ pub async fn invite_helper<'a>( } }; + if expected_event_id != event_id { + warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); + } + let origin = serde_json::from_value::>( serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( ErrorKind::InvalidParam, diff --git a/src/server_server.rs b/src/server_server.rs index 42d8185..3682a49 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1526,7 +1526,7 @@ async fn upgrade_outlier_to_timeline_pdu( extremities, state_ids_compressed, soft_fail, - &state_lock + &state_lock, ) .map_err(|_| "Failed to add pdu to db.".to_owned())?; From bd4ea14a29b46d92d7ed3dd415121579d9e84dde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 30 Aug 2021 16:02:55 +0200 Subject: [PATCH 0773/1727] improvement: call /state_ids less often by using state res --- src/server_server.rs | 129 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 116 insertions(+), 13 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 3682a49..b965fcf 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -964,6 +964,16 @@ pub async fn handle_incoming_pdu<'a>( return Ok(None); } + if incoming_pdu.origin_server_ts + < db.rooms + .first_pdu_in_room(&room_id) + .map_err(|_| "Error loading first room event.".to_owned())? + .expect("Room exists") + .origin_server_ts + { + return Ok(None); + } + // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events let mut graph = HashMap::new(); let mut eventid_info = HashMap::new(); @@ -1280,6 +1290,18 @@ async fn upgrade_outlier_to_timeline_pdu( return Err("Event has been soft failed".into()); } + let create_event_content = + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; + + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities // doing all the checks in this list starting at 1. These are not timeline events. @@ -1318,7 +1340,100 @@ async fn upgrade_outlier_to_timeline_pdu( state_at_incoming_event = Some(state); } - // TODO: set incoming_auth_events? + } else { + warn!("Calculating state at event using state res"); + let mut extremity_sstatehashes = HashMap::new(); + + let mut okay = true; + for prev_eventid in &incoming_pdu.prev_events { + let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { + pdu + } else { + okay = false; + break; + }; + + let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { + s + } else { + okay = false; + break; + }; + + extremity_sstatehashes.insert(sstatehash, prev_event); + } + + let mut fork_states = Vec::new(); + + if okay { + for (sstatehash, prev_event) in extremity_sstatehashes { + let mut leaf_state = db + .rooms + .state_full_ids(sstatehash) + .map_err(|_| "Failed to ask db for room state.".to_owned())?; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); + // Now it's the state after the pdu + } + + fork_states.push(leaf_state); + } + + let fork_states = &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, id)| (db.rooms.get_statekey_from_short(k).map(|k| (k, id)))) + .collect::>>() + }) + .collect::>>() + .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; + + let mut auth_chain_sets = Vec::new(); + for state in fork_states { + auth_chain_sets.push( + get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), + ); + } + + state_at_incoming_event = match state_res::StateResolution::resolve( + &room_id, + room_version_id, + &fork_states, + auth_chain_sets, + |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }, + ) { + Ok(new_state) => Some( + new_state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + Ok((shortstatekey, event_id)) + }) + .collect::>()?, + ), + Err(e) => { + warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); + None + } + }; + } } if state_at_incoming_event.is_none() { @@ -1402,18 +1517,6 @@ async fn upgrade_outlier_to_timeline_pdu( state_at_incoming_event.expect("we always set this to some above"); // 11. Check the auth of the event passes based on the state of the event - let create_event_content = - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; - - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - // If the previous event was the create event special rules apply let previous_create = if incoming_pdu.auth_events.len() == 1 && incoming_pdu.prev_events == incoming_pdu.auth_events From 69df9a0145ed5743b37ce44ece87bbbb69eb6a1c Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 30 Aug 2021 20:18:48 +0200 Subject: [PATCH 0774/1727] add tls --- Cargo.lock | 1 + Cargo.toml | 1 + src/database/abstraction/sqlite.rs | 44 ++++++++++-------------------- 3 files changed, 17 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 880829a..03b1731 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -267,6 +267,7 @@ dependencies = [ "serde_yaml", "sled", "thiserror", + "thread_local", "threadpool", "tokio", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 034f94b..bb44d08 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,6 +80,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } +thread_local = "1.1.3" [features] default = ["conduit_bin", "backend_sqlite"] diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index f17eac9..99deeba 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -10,6 +10,7 @@ use std::{ pin::Pin, sync::Arc, }; +use thread_local::ThreadLocal; use tokio::sync::oneshot::Sender; use tracing::debug; @@ -40,6 +41,8 @@ impl Drop for NonAliasingBox { pub struct Engine { writer: Mutex, + read_conn_tls: ThreadLocal, + read_iterator_conn_tls: ThreadLocal, path: PathBuf, cache_size_per_thread: u32, @@ -62,34 +65,14 @@ impl Engine { self.writer.lock() } - fn read_lock(&self) -> &'static Connection { - READ_CONNECTION.with(|cell| { - let connection = &mut cell.borrow_mut(); - - if (*connection).is_none() { - let c = Box::leak(Box::new( - Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap(), - )); - **connection = Some(c); - } - - connection.unwrap() - }) + fn read_lock<'a>(&'a self) -> &'a Connection { + self.read_conn_tls + .get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()) } - fn read_lock_iterator(&self) -> &'static Connection { - READ_CONNECTION_ITERATOR.with(|cell| { - let connection = &mut cell.borrow_mut(); - - if (*connection).is_none() { - let c = Box::leak(Box::new( - Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap(), - )); - **connection = Some(c); - } - - connection.unwrap() - }) + fn read_lock_iterator<'a>(&'a self) -> &'a Connection { + self.read_iterator_conn_tls + .get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()) } pub fn flush_wal(self: &Arc) -> Result<()> { @@ -105,15 +88,18 @@ impl DatabaseEngine for Engine { // calculates cache-size per permanent connection // 1. convert MB to KiB - // 2. divide by permanent connections + // 2. divide by permanent connections + permanent iter connections + write connection // 3. round down to nearest integer - let cache_size_per_thread: u32 = - ((config.db_cache_capacity_mb * 1024.0) / (num_cpus::get().max(1) + 1) as f64) as u32; + let cache_size_per_thread: u32 = ((config.db_cache_capacity_mb * 1024.0) + / ((num_cpus::get().max(1) * 2) + 1) as f64) + as u32; let writer = Mutex::new(Self::prepare_conn(&path, cache_size_per_thread)?); let arc = Arc::new(Engine { writer, + read_conn_tls: ThreadLocal::new(), + read_iterator_conn_tls: ThreadLocal::new(), path, cache_size_per_thread, }); From a08ea1569599fa12e31f1ac45bc24374cf9dacaf Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Tue, 31 Aug 2021 18:03:44 +0200 Subject: [PATCH 0775/1727] Use `$CI_COMMIT_SHORT_SHA` for `GIT_REF` Using `$CI_COMMIT_REF_NAME` means we get `master` for every image build, which is not very useful/informative. Using `$CI_COMMIT_SHORT_SHA`, on the other hand, makes it possible to see exactly from which commit an image was built. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6f6f56f..75bdfd6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -217,7 +217,7 @@ build:docker:main: --context $CI_PROJECT_DIR --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) - --build-arg "GIT_REF=$CI_COMMIT_REF_NAME" + --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" --destination "$CI_REGISTRY_IMAGE/conduit:latest" --destination "$CI_REGISTRY_IMAGE/conduit:alpine" From 9ec8b7f2b324d2f4d344baecf845e30c8036f5b6 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Tue, 31 Aug 2021 18:25:35 +0200 Subject: [PATCH 0776/1727] registration default true --- DEPLOY.md | 4 ++-- conduit-example.toml | 4 ++-- debian/postinst | 4 ++-- docker-compose.yml | 2 +- docker/docker-compose.traefik.yml | 2 +- src/database.rs | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 85f3f07..7f92d1d 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -107,8 +107,8 @@ port = 6167 # Max size for uploads max_request_size = 20_000_000 # in bytes -# Disabling registration means no new users will be able to register on this server -allow_registration = false +# Enables registration. If set to false, no users can register on this server. +allow_registration = true # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work diff --git a/conduit-example.toml b/conduit-example.toml index 7d419cf..8008256 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -22,8 +22,8 @@ port = 6167 # Max size for uploads max_request_size = 20_000_000 # in bytes -# Disable registration. No new users will be able to register on this server -#allow_registration = false +# Enables registration. If set to false, no users can register on this server. +allow_registration = true # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work diff --git a/debian/postinst b/debian/postinst index 824fd64..6bd1a3a 100644 --- a/debian/postinst +++ b/debian/postinst @@ -62,8 +62,8 @@ port = ${CONDUIT_PORT} # Max size for uploads max_request_size = 20_000_000 # in bytes -# Disable registration. No new users will be able to register on this server. -#allow_registration = false +# Enables registration. If set to false, no users can register on this server. +allow_registration = true # Disable encryption, so no new encrypted rooms can be created. # Note: Existing rooms will continue to work. diff --git a/docker-compose.yml b/docker-compose.yml index 3f8f832..530fc19 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -27,6 +27,7 @@ services: environment: CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + CONDUIT_ALLOW_REGISTRATION: 'true' ### Uncomment and change values as desired # CONDUIT_ADDRESS: 0.0.0.0 # CONDUIT_PORT: 6167 @@ -34,7 +35,6 @@ services: # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' - # CONDUIT_ALLOW_REGISTRATION : 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' # CONDUIT_ALLOW_FEDERATION: 'false' # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index 58fa3ed..29d0ee3 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -27,6 +27,7 @@ services: environment: CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + CONDUIT_ALLOW_REGISTRATION : 'true' ### Uncomment and change values as desired # CONDUIT_ADDRESS: 0.0.0.0 # CONDUIT_PORT: 6167 @@ -34,7 +35,6 @@ services: # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' - # CONDUIT_ALLOW_REGISTRATION : 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' # CONDUIT_ALLOW_FEDERATION: 'false' # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit diff --git a/src/database.rs b/src/database.rs index ca0ed88..79571f6 100644 --- a/src/database.rs +++ b/src/database.rs @@ -53,7 +53,7 @@ pub struct Config { max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] max_concurrent_requests: u16, - #[serde(default = "true_fn")] + #[serde(default = "false_fn")] allow_registration: bool, #[serde(default = "true_fn")] allow_encryption: bool, From b9eb39a9c69ef3be69d517553adb8646cd4ea4a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 31 Aug 2021 19:14:37 +0200 Subject: [PATCH 0777/1727] docs: documentation for every endpoint --- src/appservice_server.rs | 2 +- src/client_server/account.rs | 59 ++++--- src/client_server/alias.rs | 32 +++- src/client_server/backup.rs | 89 ++++++++++ src/client_server/capabilities.rs | 2 +- src/client_server/config.rs | 14 +- src/client_server/context.rs | 6 + src/client_server/device.rs | 29 ++++ src/client_server/directory.rs | 20 ++- src/client_server/filter.rs | 6 + src/client_server/keys.rs | 34 +++- src/client_server/media.rs | 19 +++ src/client_server/membership.rs | 60 ++++++- src/client_server/message.rs | 13 ++ src/client_server/mod.rs | 3 + src/client_server/presence.rs | 9 ++ src/client_server/profile.rs | 25 +++ src/client_server/push.rs | 32 ++++ src/client_server/read_marker.rs | 9 ++ src/client_server/redact.rs | 5 + src/client_server/room.rs | 36 +++++ src/client_server/search.rs | 5 + src/client_server/session.rs | 19 ++- src/client_server/state.rs | 33 +++- src/client_server/sync.rs | 35 +++- src/client_server/tag.rs | 15 ++ src/client_server/thirdparty.rs | 3 + src/client_server/to_device.rs | 3 + src/client_server/typing.rs | 3 + src/client_server/unversioned.rs | 2 +- src/client_server/user_directory.rs | 5 + src/client_server/voip.rs | 3 + src/database/key_backups.rs | 21 +++ src/database/rooms.rs | 47 ++++-- src/server_server.rs | 242 ++++++++++++++++++++++++---- 35 files changed, 847 insertions(+), 93 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 9fc7dce..8be524c 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -9,7 +9,7 @@ use std::{ }; use tracing::warn; -pub async fn send_request( +pub(crate) async fn send_request( globals: &crate::database::globals::Globals, registration: serde_yaml::Value, request: T, diff --git a/src/client_server/account.rs b/src/client_server/account.rs index e68c957..4b610a3 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -40,8 +40,12 @@ const GUEST_NAME_LENGTH: usize = 10; /// /// Checks if a username is valid and available on this server. /// -/// - Returns true if no user or appservice on this server claimed this username -/// - This will not reserve the username, so the username might become invalid when trying to register +/// Conditions for returning true: +/// - The user id is not historical +/// - The server name of the user id matches this server +/// - No user or appservice on this server already claimed this username +/// +/// Note: This will not reserve the username, so the username might become invalid when trying to register #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/register/available", data = "") @@ -80,11 +84,15 @@ pub async fn get_register_available_route( /// /// Register an account on this homeserver. /// -/// - Returns the device id and access_token unless `inhibit_login` is true -/// - When registering a guest account, all parameters except initial_device_display_name will be -/// ignored -/// - Creates a new account and a device for it -/// - The account will be populated with default account data +/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html) +/// to check if the user id is valid and available. +/// +/// - Only works if registration is enabled +/// - If type is guest: ignores all parameters except initial_device_display_name +/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage) +/// - If type is not guest and no username is given: Always fails after UIAA check +/// - Creates a new account and populates it with default account data +/// - If `inhibit_login` is false: Creates a device and returns device id and access_token #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/register", data = "") @@ -129,7 +137,7 @@ pub async fn register_route( ))?; // Check if username is creative enough - if !missing_username && db.users.exists(&user_id)? { + if db.users.exists(&user_id)? { return Err(Error::BadRequest( ErrorKind::UserInUse, "Desired user ID is already taken.", @@ -193,12 +201,12 @@ pub async fn register_route( // Create user db.users.create(&user_id, password)?; + // Default to pretty displayname let displayname = format!("{} ⚡️", user_id.localpart()); - db.users .set_displayname(&user_id, Some(displayname.clone()))?; - // Initial data + // Initial account data db.account_data.update( None, &user_id, @@ -211,6 +219,7 @@ pub async fn register_route( &db.globals, )?; + // Inhibit login does not work for guests if !is_guest && body.inhibit_login { return Ok(register::Response { access_token: None, @@ -231,7 +240,7 @@ pub async fn register_route( // Generate new token for the device let token = utils::random_string(TOKEN_LENGTH); - // Add device + // Create device for this account db.users.create_device( &user_id, &device_id, @@ -239,7 +248,7 @@ pub async fn register_route( body.initial_device_display_name.clone(), )?; - // If this is the first user on this server, create the admins room + // If this is the first user on this server, create the admin room if db.users.count()? == 1 { // Create a user for the server let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) @@ -529,9 +538,16 @@ pub async fn register_route( /// /// Changes the password of this account. /// -/// - Invalidates all other access tokens if logout_devices is true -/// - Deletes all other devices and most of their data (to-device events, last seen, etc.) if -/// logout_devices is true +/// - Requires UIAA to verify user password +/// - Changes the password of the sender user +/// - The password hash is calculated using argon2 with 32 character salt, the plain password is +/// not saved +/// +/// If logout_devices is true it does the following for each device except the sender device: +/// - Invalidates access token +/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) +/// - Forgets to-device events +/// - Triggers device list updates #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/account/password", data = "") @@ -598,9 +614,9 @@ pub async fn change_password_route( /// # `GET _matrix/client/r0/account/whoami` /// -/// Get user_id of this account. +/// Get user_id of the sender user. /// -/// - Also works for Application Services +/// Note: Also works for Application Services #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/account/whoami", data = "") @@ -616,11 +632,13 @@ pub async fn whoami_route(body: Ruma) -> ConduitResult", data = "") @@ -24,6 +27,13 @@ pub async fn create_alias_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { + if body.room_alias.server_name() != db.globals.server_name() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Alias is from another server.", + )); + } + if db.rooms.id_from_alias(&body.room_alias)?.is_some() { return Err(Error::Conflict("Alias already exists.")); } @@ -36,6 +46,12 @@ pub async fn create_alias_route( Ok(create_alias::Response::new().into()) } +/// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}` +/// +/// Deletes a room alias from this server. +/// +/// - TODO: additional access control checks +/// - TODO: Update canonical alias event #[cfg_attr( feature = "conduit_bin", delete("/_matrix/client/r0/directory/room/<_>", data = "") @@ -45,13 +61,27 @@ pub async fn delete_alias_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { + if body.room_alias.server_name() != db.globals.server_name() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Alias is from another server.", + )); + } + db.rooms.set_alias(&body.room_alias, None, &db.globals)?; + // TODO: update alt_aliases? + db.flush()?; Ok(delete_alias::Response::new().into()) } +/// # `GET /_matrix/client/r0/directory/room/{roomAlias}` +/// +/// Resolve an alias locally or over federation. +/// +/// - TODO: Suggest more servers to join via #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/directory/room/<_>", data = "") @@ -64,7 +94,7 @@ pub async fn get_alias_route( get_alias_helper(&db, &body.room_alias).await } -pub async fn get_alias_helper( +pub(crate) async fn get_alias_helper( db: &Database, room_alias: &RoomAliasId, ) -> ConduitResult { diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 06f9818..259f1a9 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -12,6 +12,9 @@ use ruma::api::client::{ #[cfg(feature = "conduit_bin")] use rocket::{delete, get, post, put}; +/// # `POST /_matrix/client/r0/room_keys/version` +/// +/// Creates a new backup. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/unstable/room_keys/version", data = "") @@ -31,6 +34,9 @@ pub async fn create_backup_route( Ok(create_backup::Response { version }.into()) } +/// # `PUT /_matrix/client/r0/room_keys/version/{version}` +/// +/// Update information about an existing backup. Only `auth_data` can be modified. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/version/<_>", data = "") @@ -49,6 +55,9 @@ pub async fn update_backup_route( Ok(update_backup::Response {}.into()) } +/// # `GET /_matrix/client/r0/room_keys/version` +/// +/// Get information about the latest backup version. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/version", data = "") @@ -77,6 +86,9 @@ pub async fn get_latest_backup_route( .into()) } +/// # `GET /_matrix/client/r0/room_keys/version` +/// +/// Get information about an existing backup. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/version/<_>", data = "") @@ -104,6 +116,11 @@ pub async fn get_backup_route( .into()) } +/// # `DELETE /_matrix/client/r0/room_keys/version/{version}` +/// +/// Delete an existing key backup. +/// +/// - Deletes both information about the backup, as well as all key data related to the backup #[cfg_attr( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/version/<_>", data = "") @@ -122,7 +139,13 @@ pub async fn delete_backup_route( Ok(delete_backup::Response {}.into()) } +/// # `PUT /_matrix/client/r0/room_keys/keys` +/// /// Add the received backup keys to the database. +/// +/// - Only manipulating the most recently created version of the backup is allowed +/// - Adds the keys to the backup +/// - Returns the new number of keys in this backup and the etag #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys", data = "") @@ -134,6 +157,18 @@ pub async fn add_backup_keys_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if Some(&body.version) + != db + .key_backups + .get_latest_backup_version(sender_user)? + .as_ref() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "You may only manipulate the most recently created version of the backup.", + )); + } + for (room_id, room) in &body.rooms { for (session_id, key_data) in &room.sessions { db.key_backups.add_key( @@ -156,7 +191,13 @@ pub async fn add_backup_keys_route( .into()) } +/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` +/// /// Add the received backup keys to the database. +/// +/// - Only manipulating the most recently created version of the backup is allowed +/// - Adds the keys to the backup +/// - Returns the new number of keys in this backup and the etag #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys/<_>", data = "") @@ -168,6 +209,18 @@ pub async fn add_backup_key_sessions_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if Some(&body.version) + != db + .key_backups + .get_latest_backup_version(sender_user)? + .as_ref() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "You may only manipulate the most recently created version of the backup.", + )); + } + for (session_id, key_data) in &body.sessions { db.key_backups.add_key( &sender_user, @@ -188,7 +241,13 @@ pub async fn add_backup_key_sessions_route( .into()) } +/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` +/// /// Add the received backup key to the database. +/// +/// - Only manipulating the most recently created version of the backup is allowed +/// - Adds the keys to the backup +/// - Returns the new number of keys in this backup and the etag #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") @@ -200,6 +259,18 @@ pub async fn add_backup_key_session_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if Some(&body.version) + != db + .key_backups + .get_latest_backup_version(sender_user)? + .as_ref() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "You may only manipulate the most recently created version of the backup.", + )); + } + db.key_backups.add_key( &sender_user, &body.version, @@ -218,6 +289,9 @@ pub async fn add_backup_key_session_route( .into()) } +/// # `GET /_matrix/client/r0/room_keys/keys` +/// +/// Retrieves all keys from the backup. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys", data = "") @@ -234,6 +308,9 @@ pub async fn get_backup_keys_route( Ok(get_backup_keys::Response { rooms }.into()) } +/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` +/// +/// Retrieves all keys from the backup for a given room. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys/<_>", data = "") @@ -252,6 +329,9 @@ pub async fn get_backup_key_sessions_route( Ok(get_backup_key_sessions::Response { sessions }.into()) } +/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` +/// +/// Retrieves a key from the backup. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") @@ -274,6 +354,9 @@ pub async fn get_backup_key_session_route( Ok(get_backup_key_session::Response { key_data }.into()) } +/// # `DELETE /_matrix/client/r0/room_keys/keys` +/// +/// Delete the keys from the backup. #[cfg_attr( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys", data = "") @@ -297,6 +380,9 @@ pub async fn delete_backup_keys_route( .into()) } +/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` +/// +/// Delete the keys from the backup for a given room. #[cfg_attr( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "") @@ -320,6 +406,9 @@ pub async fn delete_backup_key_sessions_route( .into()) } +/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` +/// +/// Delete a key from the backup. #[cfg_attr( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 65c8879..2eacd8f 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -13,7 +13,7 @@ use rocket::get; /// # `GET /_matrix/client/r0/capabilities` /// -/// Get information on this server's supported feature set and other relevent capabilities. +/// Get information on the supported feature set and other relevent capabilities of this server. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/capabilities", data = "<_body>") diff --git a/src/client_server/config.rs b/src/client_server/config.rs index b692749..bd897ba 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -16,6 +16,9 @@ use serde_json::{json, value::RawValue as RawJsonValue}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; +/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` +/// +/// Sets some account data for the sender user. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") @@ -48,6 +51,9 @@ pub async fn set_global_account_data_route( Ok(set_global_account_data::Response {}.into()) } +/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` +/// +/// Sets some room account data for the sender user. #[cfg_attr( feature = "conduit_bin", put( @@ -83,6 +89,9 @@ pub async fn set_room_account_data_route( Ok(set_room_account_data::Response {}.into()) } +/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` +/// +/// Gets some account data for the sender user. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") @@ -98,7 +107,6 @@ pub async fn get_global_account_data_route( .account_data .get::>(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - db.flush()?; let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? @@ -107,6 +115,9 @@ pub async fn get_global_account_data_route( Ok(get_global_account_data::Response { account_data }.into()) } +/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` +/// +/// Gets some room account data for the sender user. #[cfg_attr( feature = "conduit_bin", get( @@ -129,7 +140,6 @@ pub async fn get_room_account_data_route( body.event_type.clone().into(), )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - db.flush()?; let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 701e584..aaae8d6 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -5,6 +5,12 @@ use std::convert::TryFrom; #[cfg(feature = "conduit_bin")] use rocket::get; +/// # `GET /_matrix/client/r0/rooms/{roomId}/context` +/// +/// Allows loading room history around an event. +/// +/// - Only works if the user is joined (TODO: always allow, but only show events if the user was +/// joined, depending on history_visibility) #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 5210467..4aa3047 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -11,6 +11,9 @@ use super::SESSION_ID_LENGTH; #[cfg(feature = "conduit_bin")] use rocket::{delete, get, post, put}; +/// # `GET /_matrix/client/r0/devices` +/// +/// Get metadata on all devices of the sender user. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/devices", data = "") @@ -31,6 +34,9 @@ pub async fn get_devices_route( Ok(get_devices::Response { devices }.into()) } +/// # `GET /_matrix/client/r0/devices/{deviceId}` +/// +/// Get metadata on a single device of the sender user. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/devices/<_>", data = "") @@ -50,6 +56,9 @@ pub async fn get_device_route( Ok(get_device::Response { device }.into()) } +/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// +/// Updates the metadata on a given device of the sender user. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/devices/<_>", data = "") @@ -76,6 +85,15 @@ pub async fn update_device_route( Ok(update_device::Response {}.into()) } +/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// +/// Deletes the given device. +/// +/// - Requires UIAA to verify user password +/// - Invalidates access token +/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) +/// - Forgets to-device events +/// - Triggers device list updates #[cfg_attr( feature = "conduit_bin", delete("/_matrix/client/r0/devices/<_>", data = "") @@ -128,6 +146,17 @@ pub async fn delete_device_route( Ok(delete_device::Response {}.into()) } +/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// +/// Deletes the given device. +/// +/// - Requires UIAA to verify user password +/// +/// For each device: +/// - Invalidates access token +/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) +/// - Forgets to-device events +/// - Triggers device list updates #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/delete_devices", data = "") diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 589aacd..5c93e22 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -28,6 +28,11 @@ use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] use rocket::{get, post, put}; +/// # `POST /_matrix/client/r0/publicRooms` +/// +/// Lists the public rooms on this server. +/// +/// - Rooms are ordered by the number of joined members #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/publicRooms", data = "") @@ -48,6 +53,11 @@ pub async fn get_public_rooms_filtered_route( .await } +/// # `GET /_matrix/client/r0/publicRooms` +/// +/// Lists the public rooms on this server. +/// +/// - Rooms are ordered by the number of joined members #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/publicRooms", data = "") @@ -77,6 +87,11 @@ pub async fn get_public_rooms_route( .into()) } +/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}` +/// +/// Sets the visibility of a given room in the room directory. +/// +/// - TODO: Access control checks #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/directory/list/room/<_>", data = "") @@ -107,6 +122,9 @@ pub async fn set_room_visibility_route( Ok(set_room_visibility::Response {}.into()) } +/// # `GET /_matrix/client/r0/directory/list/room/{roomId}` +/// +/// Gets the visibility of a given room in the room directory. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/directory/list/room/<_>", data = "") @@ -126,7 +144,7 @@ pub async fn get_room_visibility_route( .into()) } -pub async fn get_public_rooms_filtered_helper( +pub(crate) async fn get_public_rooms_filtered_helper( db: &Database, server: Option<&ServerName>, limit: Option, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index a08eb34..dfb5377 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -4,6 +4,9 @@ use ruma::api::client::r0::filter::{self, create_filter, get_filter}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; +/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` +/// +/// TODO: Loads a filter that was previously created. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] #[tracing::instrument] pub async fn get_filter_route() -> ConduitResult { @@ -18,6 +21,9 @@ pub async fn get_filter_route() -> ConduitResult { .into()) } +/// # `PUT /_matrix/client/r0/user/{userId}/filter` +/// +/// TODO: Creates a new filter to be used by other endpoints. #[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] #[tracing::instrument] pub async fn create_filter_route() -> ConduitResult { diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 0815737..3295e16 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -24,6 +24,12 @@ use std::collections::{BTreeMap, HashMap, HashSet}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; +/// # `POST /_matrix/client/r0/keys/upload` +/// +/// Publish end-to-end encryption keys for the sender device. +/// +/// - Adds one time keys +/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/keys/upload", data = "") @@ -49,6 +55,7 @@ pub async fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { + // TODO: merge this and the existing event? // This check is needed to assure that signatures are kept if db .users @@ -73,6 +80,13 @@ pub async fn upload_keys_route( .into()) } +/// # `POST /_matrix/client/r0/keys/query` +/// +/// Get end-to-end encryption keys for the given users. +/// +/// - Always fetches users from other servers over federation +/// - Gets master keys, self-signing keys, user signing keys and device keys. +/// - The master and self-signing keys contain signatures that the user is allowed to see #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/keys/query", data = "") @@ -95,6 +109,9 @@ pub async fn get_keys_route( Ok(response.into()) } +/// # `POST /_matrix/client/r0/keys/claim` +/// +/// Claims one-time keys #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/keys/claim", data = "") @@ -111,6 +128,11 @@ pub async fn claim_keys_route( Ok(response.into()) } +/// # `POST /_matrix/client/r0/keys/device_signing/upload` +/// +/// Uploads end-to-end key information for the sender user. +/// +/// - Requires UIAA to verify password #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/unstable/keys/device_signing/upload", data = "") @@ -172,6 +194,9 @@ pub async fn upload_signing_keys_route( Ok(upload_signing_keys::Response {}.into()) } +/// # `POST /_matrix/client/r0/keys/signatures/upload` +/// +/// Uploads end-to-end key signatures from the sender user. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/unstable/keys/signatures/upload", data = "") @@ -233,6 +258,11 @@ pub async fn upload_signatures_route( Ok(upload_signatures::Response {}.into()) } +/// # `POST /_matrix/client/r0/keys/changes` +/// +/// Gets a list of users who have updated their device identity keys since the previous sync token. +/// +/// - TODO: left users #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/keys/changes", data = "") @@ -284,7 +314,7 @@ pub async fn get_key_changes_route( .into()) } -pub async fn get_keys_helper bool>( +pub(crate) async fn get_keys_helper bool>( sender_user: Option<&UserId>, device_keys_input: &BTreeMap>>, allowed_signatures: F, @@ -409,7 +439,7 @@ pub async fn get_keys_helper bool>( }) } -pub async fn claim_keys_helper( +pub(crate) async fn claim_keys_helper( one_time_keys_input: &BTreeMap, DeviceKeyAlgorithm>>, db: &Database, ) -> Result { diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 2bd189a..4cec0af 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -12,6 +12,9 @@ use rocket::{get, post}; const MXC_LENGTH: usize = 32; +/// # `GET /_matrix/media/r0/config` +/// +/// Returns max upload size. #[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] #[tracing::instrument(skip(db))] pub async fn get_media_config_route( @@ -23,6 +26,12 @@ pub async fn get_media_config_route( .into()) } +/// # `POST /_matrix/media/r0/upload` +/// +/// Permanently save media in the server. +/// +/// - Some metadata will be saved in the database +/// - Media will be saved in the media/ directory #[cfg_attr( feature = "conduit_bin", post("/_matrix/media/r0/upload", data = "") @@ -61,6 +70,11 @@ pub async fn create_content_route( .into()) } +/// # `POST /_matrix/media/r0/download/{serverName}/{mediaId}` +/// +/// Load media from our server or over federation. +/// +/// - Only allows federation if `allow_remote` is true #[cfg_attr( feature = "conduit_bin", get("/_matrix/media/r0/download/<_>/<_>", data = "") @@ -114,6 +128,11 @@ pub async fn get_content_route( } } +/// # `POST /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` +/// +/// Load media thumbnail from our server or over federation. +/// +/// - Only allows federation if `allow_remote` is true #[cfg_attr( feature = "conduit_bin", get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "") diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0a7ca81..c88e0a8 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -38,6 +38,12 @@ use tracing::{debug, error, warn}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; +/// # `POST /_matrix/client/r0/rooms/{roomId}/join` +/// +/// Tries to join the sender user into a room. +/// +/// - If the server knowns about this room: creates the join event and does auth rules locally +/// - If the server does not know about the room: asks other servers over federation #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/join", data = "") @@ -79,6 +85,12 @@ pub async fn join_room_by_id_route( ret } +/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` +/// +/// Tries to join the sender user into a room. +/// +/// - If the server knowns about this room: creates the join event and does auth rules locally +/// - If the server does not know about the room: asks other servers over federation #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/join/<_>", data = "") @@ -133,6 +145,11 @@ pub async fn join_room_by_id_or_alias_route( .into()) } +/// # `POST /_matrix/client/r0/rooms/{roomId}/leave` +/// +/// Tries to leave the sender user from a room. +/// +/// - This should always work if the user is currently joined. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/leave", data = "") @@ -151,6 +168,9 @@ pub async fn leave_room_route( Ok(leave_room::Response::new().into()) } +/// # `POST /_matrix/client/r0/rooms/{roomId}/invite` +/// +/// Tries to send an invite event into the room. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/invite", data = "") @@ -171,6 +191,9 @@ pub async fn invite_user_route( } } +/// # `POST /_matrix/client/r0/rooms/{roomId}/kick` +/// +/// Tries to send a kick event into the room. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/kick", data = "") @@ -234,6 +257,9 @@ pub async fn kick_user_route( Ok(kick_user::Response::new().into()) } +/// # `POST /_matrix/client/r0/rooms/{roomId}/ban` +/// +/// Tries to send a ban event into the room. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/ban", data = "") @@ -307,6 +333,9 @@ pub async fn ban_user_route( Ok(ban_user::Response::new().into()) } +/// # `POST /_matrix/client/r0/rooms/{roomId}/unban` +/// +/// Tries to send an unban event into the room. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/unban", data = "") @@ -369,6 +398,14 @@ pub async fn unban_user_route( Ok(unban_user::Response::new().into()) } +/// # `POST /_matrix/client/r0/rooms/{roomId}/forget` +/// +/// Forgets about a room. +/// +/// - If the sender user currently left the room: Stops sender user from receiving information about the room +/// +/// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to +/// be called from every device #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/forget", data = "") @@ -387,6 +424,9 @@ pub async fn forget_room_route( Ok(forget_room::Response::new().into()) } +/// # `POST /_matrix/client/r0/joined_rooms` +/// +/// Lists all rooms the user has joined. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/joined_rooms", data = "") @@ -408,6 +448,11 @@ pub async fn joined_rooms_route( .into()) } +/// # `POST /_matrix/client/r0/rooms/{roomId}/members` +/// +/// Lists all joined users in a room (TODO: at a specific point in time, with a specific membership). +/// +/// - Only works if the user is currently joined #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/members", data = "") @@ -419,6 +464,7 @@ pub async fn get_member_events_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + // TODO: check history visibility? if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -438,6 +484,12 @@ pub async fn get_member_events_route( .into()) } +/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` +/// +/// Lists all members of a room. +/// +/// - The sender user must be in the room +/// - TODO: An appservice just needs a puppet joined #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") @@ -449,11 +501,7 @@ pub async fn joined_members_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db - .rooms - .is_joined(&sender_user, &body.room_id) - .unwrap_or(false) - { + if !db.rooms.is_joined(&sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -803,7 +851,7 @@ async fn validate_and_add_event_id( Ok((event_id, value)) } -pub async fn invite_helper<'a>( +pub(crate) async fn invite_helper<'a>( sender_user: &UserId, user_id: &UserId, room_id: &RoomId, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 70cc00f..78008a5 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -16,6 +16,13 @@ use std::{ #[cfg(feature = "conduit_bin")] use rocket::{get, put}; +/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}` +/// +/// Send a message event into the room. +/// +/// - Is a NOOP if the txn id was already used before and returns the same event id again +/// - The only requirement for the content is that it has to be valid json +/// - Tries to send the event into the room, auth rules will determine if it is allowed #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") @@ -92,6 +99,12 @@ pub async fn send_message_event_route( Ok(send_message_event::Response::new(event_id).into()) } +/// # `GET /_matrix/client/r0/rooms/{roomId}/messages` +/// +/// Allows paginating through room history. +/// +/// - Only works if the user is joined (TODO: always allow, but only show events where the user was +/// joined, depending on history_visibility) #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/messages", data = "") diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index 040015d..e0c340f 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -71,6 +71,9 @@ pub const DEVICE_ID_LENGTH: usize = 10; pub const TOKEN_LENGTH: usize = 256; pub const SESSION_ID_LENGTH: usize = 256; +/// # `OPTIONS` +/// +/// Web clients use this to get CORS headers. #[cfg(feature = "conduit_bin")] #[options("/<_..>")] #[tracing::instrument] diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 7312cb3..54eb210 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -5,6 +5,9 @@ use std::{convert::TryInto, time::Duration}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; +/// # `PUT /_matrix/client/r0/presence/{userId}/status` +/// +/// Sets the presence state of the sender user. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/presence/<_>/status", data = "") @@ -46,6 +49,11 @@ pub async fn set_presence_route( Ok(set_presence::Response {}.into()) } +/// # `GET /_matrix/client/r0/presence/{userId}/status` +/// +/// Gets the presence state of the given user. +/// +/// - Only works if you share a room with the user #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/presence/<_>/status", data = "") @@ -71,6 +79,7 @@ pub async fn get_presence_route( .get_last_presence_event(&sender_user, &room_id)? { presence_event = Some(presence); + break; } } diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index de1baba..e2a2d6c 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -17,6 +17,11 @@ use std::{convert::TryInto, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; +/// # `PUT /_matrix/client/r0/profile/{userId}/displayname` +/// +/// Updates the displayname. +/// +/// - Also makes sure other users receive the update using presence EDUs #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/profile/<_>/displayname", data = "") @@ -115,6 +120,11 @@ pub async fn set_displayname_route( Ok(set_display_name::Response {}.into()) } +/// # `GET /_matrix/client/r0/profile/{userId}/displayname` +/// +/// Returns the displayname of the user. +/// +/// - If user is on another server: Fetches displayname over federation #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>/displayname", data = "") @@ -149,6 +159,11 @@ pub async fn get_displayname_route( .into()) } +/// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url` +/// +/// Updates the avatar_url and blurhash. +/// +/// - Also makes sure other users receive the update using presence EDUs #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") @@ -249,6 +264,11 @@ pub async fn set_avatar_url_route( Ok(set_avatar_url::Response {}.into()) } +/// # `GET /_matrix/client/r0/profile/{userId}/avatar_url` +/// +/// Returns the avatar_url and blurhash of the user. +/// +/// - If user is on another server: Fetches avatar_url and blurhash over federation #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") @@ -285,6 +305,11 @@ pub async fn get_avatar_url_route( .into()) } +/// # `GET /_matrix/client/r0/profile/{userId}` +/// +/// Returns the displayname, avatar_url and blurhash of the user. +/// +/// - If user is on another server: Fetches profile over federation #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>", data = "") diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 9489f07..4e4611b 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -15,6 +15,9 @@ use ruma::{ #[cfg(feature = "conduit_bin")] use rocket::{delete, get, post, put}; +/// # `GET /_matrix/client/r0/pushrules` +/// +/// Retrieves the push rules event for this user. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/pushrules", data = "") @@ -40,6 +43,9 @@ pub async fn get_pushrules_all_route( .into()) } +/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` +/// +/// Retrieves a single specified push rule for this user. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") @@ -94,6 +100,9 @@ pub async fn get_pushrule_route( } } +/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` +/// +/// Creates a single specified push rule for this user. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") @@ -197,6 +206,9 @@ pub async fn set_pushrule_route( Ok(set_pushrule::Response {}.into()) } +/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` +/// +/// Gets the actions of a single specified push rule for this user. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") @@ -256,6 +268,9 @@ pub async fn get_pushrule_actions_route( .into()) } +/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` +/// +/// Sets the actions of a single specified push rule for this user. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") @@ -330,6 +345,9 @@ pub async fn set_pushrule_actions_route( Ok(set_pushrule_actions::Response {}.into()) } +/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` +/// +/// Gets the enabled status of a single specified push rule for this user. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") @@ -391,6 +409,9 @@ pub async fn get_pushrule_enabled_route( Ok(get_pushrule_enabled::Response { enabled }.into()) } +/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` +/// +/// Sets the enabled status of a single specified push rule for this user. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") @@ -470,6 +491,9 @@ pub async fn set_pushrule_enabled_route( Ok(set_pushrule_enabled::Response {}.into()) } +/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` +/// +/// Deletes a single specified push rule for this user. #[cfg_attr( feature = "conduit_bin", delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") @@ -539,6 +563,9 @@ pub async fn delete_pushrule_route( Ok(delete_pushrule::Response {}.into()) } +/// # `GET /_matrix/client/r0/pushers` +/// +/// Gets all currently active pushers for the sender user. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/pushers", data = "") @@ -556,6 +583,11 @@ pub async fn get_pushers_route( .into()) } +/// # `POST /_matrix/client/r0/pushers/set` +/// +/// Adds a pusher for the sender user. +/// +/// - TODO: Handle `append` #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/pushers/set", data = "") diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 85b0bf6..10298b9 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -13,6 +13,12 @@ use std::collections::BTreeMap; #[cfg(feature = "conduit_bin")] use rocket::post; +/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` +/// +/// Sets different types of read markers. +/// +/// - Updates fully-read account data event to `fully_read` +/// - If `read_receipt` is set: Update private marker and public read receipt EDU #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") @@ -80,6 +86,9 @@ pub async fn set_read_marker_route( Ok(set_read_marker::Response {}.into()) } +/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` +/// +/// Sets private read marker and public read receipt EDU. #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "") diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 63bf103..6d3e33c 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -9,6 +9,11 @@ use ruma::{ #[cfg(feature = "conduit_bin")] use rocket::put; +/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` +/// +/// Tries to send a redaction event into the room. +/// +/// - TODO: Handle txn id #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 6981afc..4ae8a3f 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -20,6 +20,22 @@ use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; +/// # `POST /_matrix/client/r0/createRoom` +/// +/// Creates a new room. +/// +/// - Room ID is randomly generated +/// - Create alias if room_alias_name is set +/// - Send create event +/// - Join sender user +/// - Send power levels event +/// - Send canonical room alias +/// - Send join rules +/// - Send history visibility +/// - Send guest access +/// - Send events listed in initial state +/// - Send events implied by `name` and `topic` +/// - Send invite events #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/createRoom", data = "") @@ -344,6 +360,11 @@ pub async fn create_room_route( Ok(create_room::Response::new(room_id).into()) } +/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` +/// +/// Gets a single event. +/// +/// - You have to currently be joined to the room (TODO: Respect history visibility) #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") @@ -372,6 +393,11 @@ pub async fn get_room_event_route( .into()) } +/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases` +/// +/// Lists all aliases of the room. +/// +/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/aliases", data = "") @@ -400,6 +426,16 @@ pub async fn get_room_aliases_route( .into()) } +/// # `GET /_matrix/client/r0/rooms/{roomId}/upgrade` +/// +/// Upgrades the room. +/// +/// - Creates a replacement room +/// - Sends a tombstone event into the current room +/// - Sender user joins the room +/// - Transfers some state events +/// - Moves local aliases +/// - Modifies old room power levels to prevent users from speaking #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/upgrade", data = "") diff --git a/src/client_server/search.rs b/src/client_server/search.rs index ec23dd4..cbd4ed7 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -6,6 +6,11 @@ use rocket::post; use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}; use std::collections::BTreeMap; +/// # `POST /_matrix/client/r0/search` +/// +/// Searches rooms for messages. +/// +/// - Only works if the user is currently joined to the room (TODO: Respect history visibility) #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/search", data = "") diff --git a/src/client_server/session.rs b/src/client_server/session.rs index dada2d5..9472627 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -24,7 +24,7 @@ use rocket::{get, post}; /// # `GET /_matrix/client/r0/login` /// -/// Get the homeserver's supported login types. One of these should be used as the `type` field +/// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] #[tracing::instrument] @@ -41,9 +41,10 @@ pub async fn get_login_types_route() -> ConduitResult /// /// Authenticates the user and returns an access token it can use in subsequent requests. /// -/// - The returned access token is associated with the user and device -/// - Old access tokens of that device should be invalidated -/// - If `device_id` is unknown, a new device will be created +/// - The user needs to authenticate using their password (or if enabled using a json web token) +/// - If `device_id` is known: invalidates old access token of that device +/// - If `device_id` is unknown: creates a new device +/// - Returns access token that is associated with the user and device /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. @@ -162,8 +163,10 @@ pub async fn login_route( /// /// Log out the current device. /// -/// - Invalidates the access token -/// - Deletes the device and most of it's data (to-device events, last seen, etc.) +/// - Invalidates access token +/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) +/// - Forgets to-device events +/// - Triggers device list updates #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/logout", data = "") @@ -188,7 +191,9 @@ pub async fn logout_route( /// Log out all devices of this user. /// /// - Invalidates all access tokens -/// - Deletes devices and most of their data (to-device events, last seen, etc.) +/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts) +/// - Forgets all to-device events +/// - Triggers device list updates /// /// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html) /// from each device of this user. diff --git a/src/client_server/state.rs b/src/client_server/state.rs index aa020b5..3555353 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -22,6 +22,13 @@ use ruma::{ #[cfg(feature = "conduit_bin")] use rocket::{get, put}; +/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}` +/// +/// Sends a state event into the room. +/// +/// - The only requirement for the content is that it has to be valid json +/// - Tries to send the event into the room, auth rules will determine if it is allowed +/// - If event is new canonical_alias: Rejects if alias is incorrect #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") @@ -48,6 +55,13 @@ pub async fn send_state_event_for_key_route( Ok(send_state_event::Response { event_id }.into()) } +/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}` +/// +/// Sends a state event into the room. +/// +/// - The only requirement for the content is that it has to be valid json +/// - Tries to send the event into the room, auth rules will determine if it is allowed +/// - If event is new canonical_alias: Rejects if alias is incorrect #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") @@ -74,6 +88,11 @@ pub async fn send_state_event_for_empty_key_route( Ok(send_state_event::Response { event_id }.into()) } +/// # `GET /_matrix/client/r0/rooms/{roomid}/state` +/// +/// Get all state events for a room. +/// +/// - If not joined: Only works if current room history visibility is world readable #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state", data = "") @@ -121,6 +140,11 @@ pub async fn get_state_events_route( .into()) } +/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}` +/// +/// Get single state event of a room. +/// +/// - If not joined: Only works if current room history visibility is world readable #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") @@ -172,6 +196,11 @@ pub async fn get_state_events_for_key_route( .into()) } +/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}` +/// +/// Get single state event of a room. +/// +/// - If not joined: Only works if current room history visibility is world readable #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") @@ -223,7 +252,7 @@ pub async fn get_state_events_for_empty_key_route( .into()) } -pub async fn send_state_event_for_key_helper( +async fn send_state_event_for_key_helper( db: &Database, sender: &UserId, room_id: &RoomId, @@ -233,6 +262,8 @@ pub async fn send_state_event_for_key_helper( ) -> Result { let sender_user = sender; + // TODO: Review this check, error if event is unparsable, use event type, allow alias if it + // previously existed if let Ok(canonical_alias) = serde_json::from_str::(json.json().get()) { diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index f7f2454..6612e2f 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -22,12 +22,33 @@ use rocket::{get, tokio}; /// Synchronize the client's state with the latest state on the server. /// /// - This endpoint takes a `since` parameter which should be the `next_batch` value from a -/// previous request. -/// - Calling this endpoint without a `since` parameter will return all recent events, the state -/// of all rooms and more data. This should only be called on the initial login of the device. -/// - To get incremental updates, you can call this endpoint with a `since` parameter. This will -/// return all recent events, state updates and more data that happened since the last /sync -/// request. +/// previous request for incremental syncs. +/// +/// Calling this endpoint without a `since` parameter returns: +/// - Some of the most recent events of each timeline +/// - Notification counts for each room +/// - Joined and invited member counts, heroes +/// - All state events +/// +/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns: +/// For joined rooms: +/// - Some of the most recent events of each timeline that happened after since +/// - If user joined the room after since: All state events and device list updates in that room +/// - If the user was already in the room: A list of all events that are in the state now, but were +/// not in the state at `since` +/// - If the state we send contains a member event: Joined and invited member counts, heroes +/// - Device list updates that happened after `since` +/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts +/// - EDUs that are active now (read receipts, typing updates, presence) +/// +/// For invited rooms: +/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite +/// +/// For left rooms: +/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave) +/// +/// - Sync is handled in an async task, multiple requests from the same device with the same +/// `since` will be cached #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/sync", data = "") @@ -106,7 +127,7 @@ pub async fn sync_events_route( result } -pub async fn sync_helper_wrapper( +async fn sync_helper_wrapper( db: Arc, sender_user: UserId, sender_device: Box, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 5582bcd..1eb508c 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -8,6 +8,11 @@ use std::collections::BTreeMap; #[cfg(feature = "conduit_bin")] use rocket::{delete, get, put}; +/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` +/// +/// Adds a tag to the room. +/// +/// - Inserts the tag into the tag event of the room account data. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") @@ -45,6 +50,11 @@ pub async fn update_tag_route( Ok(create_tag::Response {}.into()) } +/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` +/// +/// Deletes a tag from the room. +/// +/// - Removes the tag from the tag event of the room account data. #[cfg_attr( feature = "conduit_bin", delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") @@ -79,6 +89,11 @@ pub async fn delete_tag_route( Ok(delete_tag::Response {}.into()) } +/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` +/// +/// Returns tags on the room. +/// +/// - Gets the tag event of the room account data. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index 5d3c540..4305902 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -5,6 +5,9 @@ use ruma::api::client::r0::thirdparty::get_protocols; use rocket::get; use std::collections::BTreeMap; +/// # `GET /_matrix/client/r0/thirdparty/protocols` +/// +/// TODO: Fetches all metadata about protocols supported by the homeserver. #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/thirdparty/protocols") diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index cd770bd..bf2caef 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -13,6 +13,9 @@ use ruma::{ #[cfg(feature = "conduit_bin")] use rocket::put; +/// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` +/// +/// Send a to-device event to a set of client devices. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 50082ee..4cf4bb1 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -5,6 +5,9 @@ use ruma::api::client::r0::typing::create_typing_event; #[cfg(feature = "conduit_bin")] use rocket::put; +/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` +/// +/// Sets the typing state of the sender user. #[cfg_attr( feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "") diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index d25dce6..f2624bb 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -10,7 +10,7 @@ use rocket::get; /// /// - Versions take the form MAJOR.MINOR.PATCH /// - Only the latest PATCH release will be reported for each MAJOR.MINOR value -/// - Unstable features should be namespaced and may include version information in their name +/// - Unstable features are namespaced and may include version information in their name /// /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index a09d527..cfcb9bb 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -4,6 +4,11 @@ use ruma::api::client::r0::user_directory::search_users; #[cfg(feature = "conduit_bin")] use rocket::post; +/// # `POST /_matrix/client/r0/user_directory/search` +/// +/// Searches all known users for a match. +/// +/// - TODO: Hide users that are not in any public rooms? #[cfg_attr( feature = "conduit_bin", post("/_matrix/client/r0/user_directory/search", data = "") diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 7924a7f..2a7f28e 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -5,6 +5,9 @@ use std::time::Duration; #[cfg(feature = "conduit_bin")] use rocket::get; +/// # `GET /_matrix/client/r0/voip/turnServer` +/// +/// TODO: Returns information about the recommended turn server. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] #[tracing::instrument] pub async fn turn_server_route() -> ConduitResult { diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 2bb3b6d..3315be3 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -84,6 +84,27 @@ impl KeyBackups { Ok(version.to_string()) } + pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.backupid_algorithm + .iter_from(&last_possible_key, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .next() + .map_or(Ok(None), |(key, _)| { + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) + .map(Some) + }) + } + pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 59ed950..4b47454 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1529,19 +1529,35 @@ impl Rooms { "get_auth_chain" => { if args.len() == 1 { if let Ok(event_id) = EventId::try_from(args[0]) { - let start = Instant::now(); - let count = server_server::get_auth_chain( - vec![Arc::new(event_id)], - db, - )? - .count(); - let elapsed = start.elapsed(); - db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain(format!( + if let Some(event) = db.rooms.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| { + Error::bad_database( + "Invalid event in database", + ) + })?; + + let room_id = RoomId::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + let start = Instant::now(); + let count = server_server::get_auth_chain( + &room_id, + vec![Arc::new(event_id)], + db, + )? + .count(); + let elapsed = start.elapsed(); + db.admin.send(AdminCommand::SendMessage( + message::MessageEventContent::text_plain( + format!( "Loaded auth chain with length {} in {:?}", count, elapsed - )), - )); + ), + ), + )); + } } } } @@ -3083,6 +3099,15 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] + pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { + let mut key = server.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.serverroomids.get(&key).map(|o| o.is_some()) + } + /// Returns an iterator of all rooms a server participates in (as far as we know). #[tracing::instrument(skip(self))] pub fn server_rooms<'a>( diff --git a/src/server_server.rs b/src/server_server.rs index b965fcf..dee92e8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -119,7 +119,7 @@ impl FedDest { } #[tracing::instrument(skip(globals, request))] -pub async fn send_request( +pub(crate) async fn send_request( globals: &crate::database::globals::Globals, destination: &ServerName, request: T, @@ -487,7 +487,7 @@ async fn query_srv_record( } #[tracing::instrument(skip(globals))] -pub async fn request_well_known( +async fn request_well_known( globals: &crate::database::globals::Globals, destination: &str, ) -> Option { @@ -512,6 +512,9 @@ pub async fn request_well_known( Some(body.get("m.server")?.as_str()?.to_owned()) } +/// # `GET /_matrix/federation/v1/version` +/// +/// Get version information on this server. #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] #[tracing::instrument(skip(db))] pub fn get_server_version_route( @@ -530,6 +533,12 @@ pub fn get_server_version_route( .into()) } +/// # `GET /_matrix/key/v2/server` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by this will be valid +/// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] #[tracing::instrument(skip(db))] @@ -578,12 +587,21 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { Json(serde_json::to_string(&response).expect("JSON is canonical")) } +/// # `GET /_matrix/key/v2/server/{keyId}` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by this will be valid +/// forever. #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] #[tracing::instrument(skip(db))] pub fn get_server_keys_deprecated_route(db: DatabaseGuard) -> Json { get_server_keys_route(db) } +/// # `POST /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/publicRooms", data = "") @@ -628,6 +646,9 @@ pub async fn get_public_rooms_filtered_route( .into()) } +/// # `GET /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/publicRooms", data = "") @@ -672,6 +693,9 @@ pub async fn get_public_rooms_route( .into()) } +/// # `PUT /_matrix/federation/v1/send/{txnId}` +/// +/// Push EDUs and PDUs to this server. #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -921,7 +945,7 @@ type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; /// 14. Use state resolution to find new room state // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively #[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] -pub async fn handle_incoming_pdu<'a>( +pub(crate) async fn handle_incoming_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, @@ -1397,9 +1421,13 @@ async fn upgrade_outlier_to_timeline_pdu( let mut auth_chain_sets = Vec::new(); for state in fork_states { auth_chain_sets.push( - get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), + get_auth_chain( + &room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + db, + ) + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), ); } @@ -1745,9 +1773,13 @@ async fn upgrade_outlier_to_timeline_pdu( let mut auth_chain_sets = Vec::new(); for state in fork_states { auth_chain_sets.push( - get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), + get_auth_chain( + &room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + db, + ) + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), ); } @@ -2187,10 +2219,11 @@ fn append_incoming_pdu( } #[tracing::instrument(skip(starting_events, db))] -pub fn get_auth_chain( +pub(crate) fn get_auth_chain<'a>( + room_id: &RoomId, starting_events: Vec>, - db: &Database, -) -> Result> + '_> { + db: &'a Database, +) -> Result> + 'a> { const NUM_BUCKETS: usize = 50; let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; @@ -2231,7 +2264,7 @@ pub fn get_auth_chain( chunk_cache.extend(cached.iter().cloned()); } else { misses2 += 1; - let auth_chain = Arc::new(get_auth_chain_inner(&event_id, db)?); + let auth_chain = Arc::new(get_auth_chain_inner(&room_id, &event_id, db)?); db.rooms .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; println!( @@ -2267,13 +2300,20 @@ pub fn get_auth_chain( } #[tracing::instrument(skip(event_id, db))] -fn get_auth_chain_inner(event_id: &EventId, db: &Database) -> Result> { +fn get_auth_chain_inner( + room_id: &RoomId, + event_id: &EventId, + db: &Database, +) -> Result> { let mut todo = vec![event_id.clone()]; let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { match db.rooms.get_pdu(&event_id) { Ok(Some(pdu)) => { + if &pdu.room_id != room_id { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); + } for auth_event in &pdu.auth_events { let sauthevent = db .rooms @@ -2297,6 +2337,11 @@ fn get_auth_chain_inner(event_id: &EventId, db: &Database) -> Result", data = "") @@ -2310,18 +2355,39 @@ pub fn get_event_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let event = db + .rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = RoomId::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if !db.rooms.server_in_room(sender_servername, &room_id)? { + return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); + } + Ok(get_event::v1::Response { origin: db.globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdu: PduEvent::convert_to_outgoing_federation_event( - db.rooms - .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?, - ), + pdu: PduEvent::convert_to_outgoing_federation_event(event), } .into()) } +/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` +/// +/// Retrieves events that the sender is missing. #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") @@ -2335,22 +2401,44 @@ pub fn get_missing_events_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room", + )); + } + let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); let mut i = 0; while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { - let event_id = - serde_json::from_value( - serde_json::to_value(pdu.get("event_id").cloned().ok_or_else(|| { - Error::bad_database("Event in db has no event_id field.") - })?) - .expect("canonical json is valid json value"), - ) - .map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?; + let room_id_str = pdu + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - if body.earliest_events.contains(&event_id) { + let event_room_id = RoomId::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if event_room_id != body.room_id { + warn!( + "Evil event detected: Event {} found while searching in room {}", + queued_events[i], body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Evil event detected", + )); + } + + if body.earliest_events.contains(&queued_events[i]) { i += 1; continue; } @@ -2371,6 +2459,11 @@ pub fn get_missing_events_route( Ok(get_missing_events::v1::Response { events }.into()) } +/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` +/// +/// Retrieves the auth chain for a given event. +/// +/// - This does not include the event itself #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/event_auth/<_>/<_>", data = "") @@ -2384,7 +2477,29 @@ pub fn get_event_authorization_route( return Err(Error::bad_config("Federation is disabled.")); } - let auth_chain_ids = get_auth_chain(vec![Arc::new(body.event_id.clone())], &db)?; + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let event = db + .rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = RoomId::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if !db.rooms.server_in_room(sender_servername, &room_id)? { + return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); + } + + let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::new(body.event_id.clone())], &db)?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -2395,6 +2510,9 @@ pub fn get_event_authorization_route( .into()) } +/// # `GET /_matrix/federation/v1/state/{roomId}` +/// +/// Retrieves the current state of the room. #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/state/<_>", data = "") @@ -2408,6 +2526,18 @@ pub fn get_room_state_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + let shortstatehash = db .rooms .pdu_shortstatehash(&body.event_id)? @@ -2427,7 +2557,7 @@ pub fn get_room_state_route( }) .collect(); - let auth_chain_ids = get_auth_chain(vec![Arc::new(body.event_id.clone())], &db)?; + let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids @@ -2443,6 +2573,9 @@ pub fn get_room_state_route( .into()) } +/// # `GET /_matrix/federation/v1/state_ids/{roomId}` +/// +/// Retrieves the current state of the room. #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/state_ids/<_>", data = "") @@ -2456,6 +2589,18 @@ pub fn get_room_state_ids_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + let shortstatehash = db .rooms .pdu_shortstatehash(&body.event_id)? @@ -2471,7 +2616,7 @@ pub fn get_room_state_ids_route( .map(|(_, id)| (*id).clone()) .collect(); - let auth_chain_ids = get_auth_chain(vec![Arc::new(body.event_id.clone())], &db)?; + let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).clone()).collect(), @@ -2480,6 +2625,9 @@ pub fn get_room_state_ids_route( .into()) } +/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` +/// +/// Creates a join template. #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/make_join/<_>/<_>", data = "") @@ -2719,7 +2867,11 @@ async fn create_join_event( drop(mutex_lock); let state_ids = db.rooms.state_full_ids(shortstatehash)?; - let auth_chain_ids = get_auth_chain(state_ids.iter().map(|(_, id)| id.clone()).collect(), &db)?; + let auth_chain_ids = get_auth_chain( + &room_id, + state_ids.iter().map(|(_, id)| id.clone()).collect(), + &db, + )?; for server in db .rooms @@ -2745,6 +2897,9 @@ async fn create_join_event( }) } +/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send_join/<_>/<_>", data = "") @@ -2759,6 +2914,9 @@ pub async fn create_join_event_v1_route( Ok(create_join_event::v1::Response { room_state }.into()) } +/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v2/send_join/<_>/<_>", data = "") @@ -2773,6 +2931,9 @@ pub async fn create_join_event_v2_route( Ok(create_join_event::v2::Response { room_state }.into()) } +/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` +/// +/// Invites a remote user to a room. #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v2/invite/<_>/<_>", data = "") @@ -2882,6 +3043,9 @@ pub async fn create_invite_route( .into()) } +/// # `GET /_matrix/federation/v1/user/devices/{userId}` +/// +/// Gets information on all devices of the user. #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/user/devices/<_>", data = "") @@ -2922,6 +3086,9 @@ pub fn get_devices_route( .into()) } +/// # `GET /_matrix/federation/v1/query/directory` +/// +/// Resolve a room alias to a room id. #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/query/directory", data = "") @@ -2950,6 +3117,9 @@ pub fn get_room_information_route( .into()) } +/// # `GET /_matrix/federation/v1/query/profile` +/// +/// Gets information on a profile. #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/query/profile", data = "") @@ -2990,6 +3160,9 @@ pub fn get_profile_information_route( .into()) } +/// # `POST /_matrix/federation/v1/user/keys/query` +/// +/// Gets devices and identity keys for the given users. #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/user/keys/query", data = "") @@ -3021,6 +3194,9 @@ pub async fn get_keys_route( .into()) } +/// # `POST /_matrix/federation/v1/user/keys/claim` +/// +/// Claims one-time keys. #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/user/keys/claim", data = "") @@ -3045,7 +3221,7 @@ pub async fn claim_keys_route( } #[tracing::instrument(skip(event, pub_key_map, db))] -pub async fn fetch_required_signing_keys( +pub(crate) async fn fetch_required_signing_keys( event: &BTreeMap, pub_key_map: &RwLock>>, db: &Database, From a6bb9bbe68dd023e2299b37eba13ad0df256a671 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 31 Aug 2021 21:20:03 +0200 Subject: [PATCH 0778/1727] Fix a bunch of clippy lints --- src/client_server/directory.rs | 3 +- src/client_server/profile.rs | 26 +++---- src/client_server/sync.rs | 2 +- src/database.rs | 6 +- src/database/abstraction/sqlite.rs | 4 +- src/database/account_data.rs | 2 +- src/database/rooms.rs | 110 +++++++++++++---------------- src/server_server.rs | 9 +-- 8 files changed, 77 insertions(+), 85 deletions(-) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 589aacd..e913da0 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -233,8 +233,7 @@ pub async fn get_public_rooms_filtered_helper( .map_err(|_| { Error::bad_database("Invalid room name event in database.") })? - .name - .map(|n| n.to_owned().into())) + .name) })?, num_joined_members: db .rooms diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index de1baba..1ffb81c 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -32,10 +32,9 @@ pub async fn set_displayname_route( .set_displayname(&sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms - let all_rooms_joined = db.rooms.rooms_joined(&sender_user).collect::>(); - - for (pdu_builder, room_id) in all_rooms_joined - .into_iter() + let all_rooms_joined: Vec<_> = db + .rooms + .rooms_joined(&sender_user) .filter_map(|r| r.ok()) .map(|room_id| { Ok::<_, Error>(( @@ -53,7 +52,7 @@ pub async fn set_displayname_route( .ok_or_else(|| { Error::bad_database( "Tried to send displayname update for user not in the \ - room.", + room.", ) })? .content @@ -72,7 +71,9 @@ pub async fn set_displayname_route( )) }) .filter_map(|r| r.ok()) - { + .collect(); + + for (pdu_builder, room_id) in all_rooms_joined { let mutex_state = Arc::clone( db.globals .roomid_mutex_state @@ -166,10 +167,9 @@ pub async fn set_avatar_url_route( db.users.set_blurhash(&sender_user, body.blurhash.clone())?; // Send a new membership event and presence update into all joined rooms - let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::>(); - - for (pdu_builder, room_id) in all_joined_rooms - .into_iter() + let all_joined_rooms: Vec<_> = db + .rooms + .rooms_joined(&sender_user) .filter_map(|r| r.ok()) .map(|room_id| { Ok::<_, Error>(( @@ -187,7 +187,7 @@ pub async fn set_avatar_url_route( .ok_or_else(|| { Error::bad_database( "Tried to send displayname update for user not in the \ - room.", + room.", ) })? .content @@ -206,7 +206,9 @@ pub async fn set_avatar_url_route( )) }) .filter_map(|r| r.ok()) - { + .collect(); + + for (pdu_builder, room_id) in all_joined_rooms { let mutex_state = Arc::clone( db.globals .roomid_mutex_state diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index f7f2454..d3470d9 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -335,7 +335,7 @@ async fn sync_helper( true, state_events, ) - } else if timeline_pdus.len() == 0 && since_shortstatehash == Some(current_shortstatehash) { + } else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { // No state changes (Vec::new(), None, None, false, Vec::new()) } else { diff --git a/src/database.rs b/src/database.rs index 79571f6..7abddbb 100644 --- a/src/database.rs +++ b/src/database.rs @@ -644,7 +644,7 @@ impl Database { if db.globals.database_version()? < 9 { // Update tokenids db layout - let batch = db + let mut iter = db .rooms .tokenids .iter() @@ -672,9 +672,7 @@ impl Database { println!("new {:?}", new_key); Some((new_key, Vec::new())) }) - .collect::>(); - - let mut iter = batch.into_iter().peekable(); + .peekable(); while iter.peek().is_some() { db.rooms diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 99deeba..06e371e 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -65,12 +65,12 @@ impl Engine { self.writer.lock() } - fn read_lock<'a>(&'a self) -> &'a Connection { + fn read_lock(&self) -> &Connection { self.read_conn_tls .get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()) } - fn read_lock_iterator<'a>(&'a self) -> &'a Connection { + fn read_lock_iterator(&self) -> &Connection { self.read_iterator_conn_tls .get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()) } diff --git a/src/database/account_data.rs b/src/database/account_data.rs index e1d4c62..1a8ad76 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -40,7 +40,7 @@ impl AccountData { roomuserdataid.push(0xff); roomuserdataid.extend_from_slice(&event_type.as_bytes()); - let mut key = prefix.clone(); + let mut key = prefix; key.extend_from_slice(event_type.as_bytes()); let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 59ed950..4d47694 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -385,7 +385,7 @@ impl Rooms { self.save_state_from_diff( new_shortstatehash, statediffnew.clone(), - statediffremoved.clone(), + statediffremoved, 2, // every state change is 2 event changes on average states_parents, )?; @@ -497,8 +497,7 @@ impl Rooms { Ok(response) } else { - let mut response = Vec::new(); - response.push((shortstatehash, added.clone(), added, removed)); + let response = vec![(shortstatehash, added.clone(), added, removed)]; self.stateinfo_cache .lock() .unwrap() @@ -609,7 +608,7 @@ impl Rooms { return Ok(()); } - if parent_states.len() == 0 { + if parent_states.is_empty() { // There is no parent layer, create a new state let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent for new in &statediffnew { @@ -689,7 +688,7 @@ impl Rooms { state_hash: &StateHashId, globals: &super::globals::Globals, ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(&state_hash)? { + Ok(match self.statehash_shortstatehash.get(state_hash)? { Some(shortstatehash) => ( utils::u64_from_bytes(&shortstatehash) .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, @@ -698,7 +697,7 @@ impl Rooms { None => { let shortstatehash = globals.next_count()?; self.statehash_shortstatehash - .insert(&state_hash, &shortstatehash.to_be_bytes())?; + .insert(state_hash, &shortstatehash.to_be_bytes())?; (shortstatehash, false) } }) @@ -1768,8 +1767,8 @@ impl Rooms { }; self.save_state_from_diff( shortstatehash, - statediffnew.clone(), - statediffremoved.clone(), + statediffnew, + statediffremoved, 1_000_000, // high number because no state will be based on this one states_parents, )?; @@ -1914,15 +1913,14 @@ impl Rooms { let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); - Ok(self - .roomsynctoken_shortstatehash + self.roomsynctoken_shortstatehash .get(&key)? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") }) }) - .transpose()?) + .transpose() } /// Creates a new persisted data unit and adds it to a room. @@ -2475,16 +2473,15 @@ impl Rooms { self.roomuserid_leftcount.remove(&roomuser_id)?; } member::MembershipState::Leave | member::MembershipState::Ban => { - if update_joined_count { - if self + if update_joined_count + && self .room_members(room_id) .chain(self.room_members_invited(room_id)) .filter_map(|r| r.ok()) .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } + { + self.roomserverids.remove(&roomserver_id)?; + self.serverroomids.remove(&serverroom_id)?; } self.userroomid_leftstate.insert( &userroom_id, @@ -2621,45 +2618,43 @@ impl Rooms { if let Some(b) = maybe { Ok(b) + } else if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else(Vec::new, |users| { + users + .iter() + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) + .collect::>() + }); + + let bridge_user_id = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, db.globals.server_name()).ok() + }); + + let in_room = bridge_user_id + .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) + || self.room_members(&room_id).any(|userid| { + userid.map_or(false, |userid| { + users.iter().any(|r| r.is_match(userid.as_str())) + }) + }); + + self.appservice_in_room_cache + .write() + .unwrap() + .entry(room_id.clone()) + .or_default() + .insert(appservice.0.clone(), in_room); + + Ok(in_room) } else { - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(&room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.clone()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } + Ok(false) } } @@ -3452,10 +3447,7 @@ impl Rooms { } // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(key.clone(), chain); + self.auth_chain_cache.lock().unwrap().insert(key, chain); Ok(()) } diff --git a/src/server_server.rs b/src/server_server.rs index b965fcf..24e8e4b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -147,7 +147,7 @@ where let result = find_actual_destination(globals, &destination).await; - (result.0, result.1.clone().into_uri_string()) + (result.0, result.1.into_uri_string()) }; let actual_destination_str = actual_destination.clone().into_https_string(); @@ -1529,7 +1529,7 @@ async fn upgrade_outlier_to_timeline_pdu( None }; - if !state_res::event_auth::auth_check( + let check_result = state_res::event_auth::auth_check( &room_version, &incoming_pdu, previous_create.clone(), @@ -1543,8 +1543,9 @@ async fn upgrade_outlier_to_timeline_pdu( .and_then(|event_id| db.rooms.get_pdu(&event_id).ok().flatten()) }, ) - .map_err(|_e| "Auth check failed.".to_owned())? - { + .map_err(|_e| "Auth check failed.".to_owned())?; + + if !check_result { return Err("Event has failed auth check with state at the event.".into()); } debug!("Auth check succeeded."); From 33738dbbc2a01011ec9e2ad2dc02d5f5974b817f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 31 Aug 2021 22:04:47 +0200 Subject: [PATCH 0779/1727] improvement: stop prev event fetching if too many events fail --- src/server_server.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/server_server.rs b/src/server_server.rs index dee92e8..e2b1344 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1084,7 +1084,11 @@ pub(crate) async fn handle_incoming_pdu<'a>( }) .map_err(|_| "Error sorting prev events".to_owned())?; + let mut errors = 0; for prev_id in dbg!(sorted) { + if errors >= 5 { + break; + } if let Some((pdu, json)) = eventid_info.remove(&prev_id) { let start_time = Instant::now(); let event_id = pdu.event_id.clone(); @@ -1099,6 +1103,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( ) .await { + errors += 1; warn!("Prev event {} failed: {}", event_id, e); } let elapsed = start_time.elapsed(); From 8c584887c93735e1035d59461c6c1d4b96c0d7d6 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Tue, 31 Aug 2021 23:17:32 +0200 Subject: [PATCH 0780/1727] Fix healthcheck.sh permissions and rearange ci dockerfile --- docker/ci-binaries-packaging.Dockerfile | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 1fe85bf..fb67439 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -9,6 +9,12 @@ FROM alpine:3.14 +# Install packages needed to run Conduit +RUN apk add --no-cache \ + ca-certificates \ + curl \ + libgcc + ARG CREATED ARG VERSION ARG GIT_REF @@ -36,6 +42,10 @@ EXPOSE 6167 # create data folder for database RUN mkdir -p /srv/conduit/.local/share/conduit +# Copy the Conduit binary into the image at the latest possible moment to maximise caching: +COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit +COPY ./docker/healthcheck.sh /srv/conduit/ + # Add www-data user and group with UID 82, as used by alpine # https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install RUN set -x ; \ @@ -45,12 +55,8 @@ RUN set -x ; \ # Change ownership of Conduit files to www-data user and group RUN chown -cR www-data:www-data /srv/conduit +RUN chmod +x /srv/conduit/healthcheck.sh -# Install packages needed to run Conduit -RUN apk add --no-cache \ - ca-certificates \ - curl \ - libgcc # Test if Conduit is still alive, uses the same endpoint as Element HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh @@ -61,8 +67,3 @@ USER www-data WORKDIR /srv/conduit # Run Conduit ENTRYPOINT [ "/srv/conduit/conduit" ] - - -# Copy the Conduit binary into the image at the latest possible moment to maximise caching: -COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit -COPY ./docker/healthcheck.sh /srv/conduit/ From 73d876643cf6d94f97dbe2658279c67f89dc130f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 1 Sep 2021 11:03:12 +0200 Subject: [PATCH 0781/1727] improvement: make pdu cache capacity configurable --- src/database.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index 79571f6..e9dd661 100644 --- a/src/database.rs +++ b/src/database.rs @@ -47,6 +47,8 @@ pub struct Config { database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, + #[serde(default = "default_pdu_cache_capacity")] + pdu_cache_capacity: u32, #[serde(default = "default_sqlite_wal_clean_second_interval")] sqlite_wal_clean_second_interval: u32, #[serde(default = "default_max_request_size")] @@ -107,6 +109,10 @@ fn default_db_cache_capacity_mb() -> f64 { 200.0 } +fn default_pdu_cache_capacity() -> u32 { + 100_000 +} + fn default_sqlite_wal_clean_second_interval() -> u32 { 1 * 60 // every minute } @@ -281,7 +287,12 @@ impl Database { softfailedeventids: builder.open_tree("softfailedeventids")?, referencedevents: builder.open_tree("referencedevents")?, - pdu_cache: Mutex::new(LruCache::new(100_000)), + pdu_cache: Mutex::new(LruCache::new( + config + .pdu_cache_capacity + .try_into() + .expect("pdu cache capacity fits into usize"), + )), auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), From a87519fb71875182bce79d3fea9d3f02150cab3f Mon Sep 17 00:00:00 2001 From: Kurt Roeckx Date: Wed, 25 Aug 2021 16:02:01 +0200 Subject: [PATCH 0782/1727] Get required keys in batch when joining a room We now ask the trusted server for all keys in 1 request, instead of asking each server individual for it's own keys. --- src/client_server/membership.rs | 36 +++--- src/server_server.rs | 206 +++++++++++++++++++++++++++++++- 2 files changed, 221 insertions(+), 21 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index c88e0a8..52e074c 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -5,7 +5,6 @@ use crate::{ server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; use member::{MemberEventContent, MembershipState}; -use rocket::futures; use ruma::{ api::{ client::{ @@ -667,14 +666,19 @@ async fn join_room_by_id_helper( let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); - for result in futures::future::join_all( - send_join_response - .room_state - .state - .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)), + server_server::fetch_join_signing_keys( + &send_join_response, + &room_version, + &pub_key_map, + &db, ) - .await + .await?; + + for result in send_join_response + .room_state + .state + .iter() + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)) { let (event_id, value) = match result { Ok(t) => t, @@ -723,14 +727,11 @@ async fn join_room_by_id_helper( &db, )?; - for result in futures::future::join_all( - send_join_response - .room_state - .auth_chain - .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)), - ) - .await + for result in send_join_response + .room_state + .auth_chain + .iter() + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)) { let (event_id, value) = match result { Ok(t) => t, @@ -787,7 +788,7 @@ async fn join_room_by_id_helper( Ok(join_room_by_id::Response::new(room_id.clone()).into()) } -async fn validate_and_add_event_id( +fn validate_and_add_event_id( pdu: &Raw, room_version: &RoomVersionId, pub_key_map: &RwLock>>, @@ -830,7 +831,6 @@ async fn validate_and_add_event_id( } } - server_server::fetch_required_signing_keys(&value, pub_key_map, db).await?; if let Err(e) = ruma::signatures::verify_event( &*pub_key_map .read() diff --git a/src/server_server.rs b/src/server_server.rs index b81610e..b83eaa4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -6,7 +6,7 @@ use crate::{ use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; use regex::Regex; -use rocket::response::content::Json; +use rocket::{futures, response::content::Json}; use ruma::{ api::{ client::error::{Error as RumaError, ErrorKind}, @@ -15,8 +15,9 @@ use ruma::{ device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ - get_remote_server_keys, get_server_keys, get_server_version, ServerSigningKeys, - VerifyKey, + get_remote_server_keys, get_remote_server_keys_batch, + get_remote_server_keys_batch::v2::QueryCriteria, get_server_keys, + get_server_version, ServerSigningKeys, VerifyKey, }, event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, keys::{claim_keys, get_keys}, @@ -35,6 +36,7 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ + pdu::Pdu, receipt::{ReceiptEvent, ReceiptEventContent}, room::{ create::CreateEventContent, @@ -3277,6 +3279,204 @@ pub(crate) async fn fetch_required_signing_keys( Ok(()) } +pub fn get_missing_signing_keys_for_pdus( + pdus: &Vec>, + servers: &mut BTreeMap, BTreeMap>, + room_version: &RoomVersionId, + pub_key_map: &RwLock>>, + db: &Database, +) -> Result<()> { + for pdu in pdus { + let value = serde_json::from_str::(pdu.json().get()).map_err(|e| { + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value, &room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } + + let signatures = value + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let contains_all_ids = |keys: &BTreeMap| { + signature_ids.iter().all(|id| keys.contains_key(id)) + }; + + let origin = &Box::::try_from(&**signature_server).map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?; + + trace!("Loading signing keys for {}", origin); + + let result = db + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); + + if !contains_all_ids(&result) { + trace!("Signing key not loaded for {}", origin); + servers.insert( + origin.clone(), + BTreeMap::::new(), + ); + } + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(origin.to_string(), result); + } + } + + Ok(()) +} + +pub async fn fetch_join_signing_keys( + event: &create_join_event::v2::Response, + room_version: &RoomVersionId, + pub_key_map: &RwLock>>, + db: &Database, +) -> Result<()> { + let mut servers = + BTreeMap::, BTreeMap>::new(); + + get_missing_signing_keys_for_pdus( + &event.room_state.state, + &mut servers, + &room_version, + &pub_key_map, + &db, + )?; + get_missing_signing_keys_for_pdus( + &event.room_state.auth_chain, + &mut servers, + &room_version, + &pub_key_map, + &db, + )?; + + if servers.is_empty() { + return Ok(()); + } + + for server in db.globals.trusted_servers() { + if db.globals.signing_keys_for(server)?.is_empty() { + servers.insert( + server.clone(), + BTreeMap::::new(), + ); + } + } + + for server in db.globals.trusted_servers() { + trace!("Asking batch signing keys from trusted server {}", server); + if let Ok(keys) = db + .sending + .send_federation_request( + &db.globals, + server, + get_remote_server_keys_batch::v2::Request { + server_keys: servers.clone(), + minimum_valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(60), + ) + .expect("time is valid"), + }, + ) + .await + { + trace!("Got signing keys: {:?}", keys); + for k in keys.server_keys { + // TODO: Check signature + servers.remove(&k.server_name); + + db.globals.add_signing_key(&k.server_name, k.clone())?; + + let result = db + .globals + .signing_keys_for(&k.server_name)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(k.server_name.to_string(), result); + } + } + if servers.is_empty() { + return Ok(()); + } + } + + for result in futures::future::join_all(servers.iter().map(|(server, _)| { + db.sending + .send_federation_request(&db.globals, server, get_server_keys::v2::Request::new()) + })) + .await + { + if let Ok(get_keys_response) = result { + // TODO: We should probably not trust the server_name in the response. + let server = &get_keys_response.server_key.server_name; + db.globals + .add_signing_key(server, get_keys_response.server_key.clone())?; + + let result = db + .globals + .signing_keys_for(server)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(server.to_string(), result); + } + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::{add_port_to_hostname, get_ip_with_port, FedDest}; From b546a5bf152d2f5cc65eee14e019588609bea9d2 Mon Sep 17 00:00:00 2001 From: Kurt Roeckx Date: Wed, 25 Aug 2021 16:06:35 +0200 Subject: [PATCH 0783/1727] Let our server signing be valid for 1 week It was valid for 2 minutes, which can result in the server being asked too much for something that never changes. --- src/server_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server_server.rs b/src/server_server.rs index b83eaa4..129f595 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -568,7 +568,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { old_verify_keys: BTreeMap::new(), signatures: BTreeMap::new(), valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(60 * 2), + SystemTime::now() + Duration::from_secs(86400 * 7), ) .expect("time is valid"), }, From 984ad5ecd65b6b26f519294f7820b637451c11ef Mon Sep 17 00:00:00 2001 From: Kurt Roeckx Date: Sun, 29 Aug 2021 13:25:20 +0200 Subject: [PATCH 0784/1727] fixup! Get required keys in batch when joining a room --- src/database/globals.rs | 15 +++++++++++++-- src/server_server.rs | 42 ++++++++++++++++++++--------------------- 2 files changed, 33 insertions(+), 24 deletions(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index 6d11f49..048b9b8 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -227,7 +227,11 @@ impl Globals { /// Remove the outdated keys and insert the new ones. /// /// This doesn't actually check that the keys provided are newer than the old set. - pub fn add_signing_key(&self, origin: &ServerName, new_keys: ServerSigningKeys) -> Result<()> { + pub fn add_signing_key( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result> { // Not atomic, but this is not critical let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; @@ -252,7 +256,14 @@ impl Globals { &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), )?; - Ok(()) + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + + Ok(tree) } /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. diff --git a/src/server_server.rs b/src/server_server.rs index 129f595..2a3665f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -3279,13 +3279,18 @@ pub(crate) async fn fetch_required_signing_keys( Ok(()) } -pub fn get_missing_signing_keys_for_pdus( +// Gets a list of servers for which we don't have the signing key yet. We go over +// the PDUs and either cache the key or add it to the list that needs to be retrieved. +fn get_missing_servers_for_pdus( pdus: &Vec>, servers: &mut BTreeMap, BTreeMap>, room_version: &RoomVersionId, pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; for pdu in pdus { let value = serde_json::from_str::(pdu.json().get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); @@ -3342,6 +3347,10 @@ pub fn get_missing_signing_keys_for_pdus( Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?; + if servers.contains_key(origin) { + continue; + } + trace!("Loading signing keys for {}", origin); let result = db @@ -3359,10 +3368,7 @@ pub fn get_missing_signing_keys_for_pdus( ); } - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(origin.to_string(), result); + pkm.insert(origin.to_string(), result); } } @@ -3378,14 +3384,14 @@ pub async fn fetch_join_signing_keys( let mut servers = BTreeMap::, BTreeMap>::new(); - get_missing_signing_keys_for_pdus( + get_missing_servers_for_pdus( &event.room_state.state, &mut servers, &room_version, &pub_key_map, &db, )?; - get_missing_signing_keys_for_pdus( + get_missing_servers_for_pdus( &event.room_state.auth_chain, &mut servers, &room_version, @@ -3424,23 +3430,19 @@ pub async fn fetch_join_signing_keys( .await { trace!("Got signing keys: {:?}", keys); + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; for k in keys.server_keys { // TODO: Check signature servers.remove(&k.server_name); - db.globals.add_signing_key(&k.server_name, k.clone())?; - - let result = db - .globals - .signing_keys_for(&k.server_name)? + let result = db.globals.add_signing_key(&k.server_name, k.clone())? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect::>(); - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(k.server_name.to_string(), result); + pkm.insert(k.server_name.to_string(), result); } } if servers.is_empty() { @@ -3457,12 +3459,8 @@ pub async fn fetch_join_signing_keys( if let Ok(get_keys_response) = result { // TODO: We should probably not trust the server_name in the response. let server = &get_keys_response.server_key.server_name; - db.globals - .add_signing_key(server, get_keys_response.server_key.clone())?; - - let result = db - .globals - .signing_keys_for(server)? + let result = db.globals + .add_signing_key(server, get_keys_response.server_key.clone())? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect::>(); From c53d79e28795e3c1f1438cbea85b9df244dc4937 Mon Sep 17 00:00:00 2001 From: Kurt Roeckx Date: Sun, 29 Aug 2021 14:39:38 +0200 Subject: [PATCH 0785/1727] fixup! Get required keys in batch when joining a room --- src/server_server.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 2a3665f..f34aed1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -3437,7 +3437,9 @@ pub async fn fetch_join_signing_keys( // TODO: Check signature servers.remove(&k.server_name); - let result = db.globals.add_signing_key(&k.server_name, k.clone())? + let result = db + .globals + .add_signing_key(&k.server_name, k.clone())? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect::>(); @@ -3459,7 +3461,8 @@ pub async fn fetch_join_signing_keys( if let Ok(get_keys_response) = result { // TODO: We should probably not trust the server_name in the response. let server = &get_keys_response.server_key.server_name; - let result = db.globals + let result = db + .globals .add_signing_key(server, get_keys_response.server_key.clone())? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) From 4b39d7cb64cd9c41622293ce1b9284ba557393f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 1 Sep 2021 15:21:02 +0200 Subject: [PATCH 0786/1727] fix: batch key fetching --- Cargo.lock | 47 ++++---- Cargo.toml | 2 +- src/database/rooms/edus.rs | 8 +- src/server_server.rs | 236 +++++++++++++++++++------------------ 4 files changed, 147 insertions(+), 146 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 03b1731..8037aa5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2062,8 +2062,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +version = "0.4.0" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "assign", "js_int", @@ -2084,7 +2084,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.3" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "bytes", "http", @@ -2100,7 +2100,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.3" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2111,7 +2111,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "ruma-api", "ruma-common", @@ -2125,7 +2125,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.2" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "assign", "bytes", @@ -2145,7 +2145,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "indexmap", "js_int", @@ -2159,8 +2159,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.24.4" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +version = "0.24.5" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "indoc", "js_int", @@ -2175,8 +2175,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.24.4" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +version = "0.24.5" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2186,8 +2186,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +version = "0.3.1" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "js_int", "ruma-api", @@ -2202,7 +2202,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "paste", "rand 0.8.4", @@ -2216,7 +2216,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2226,7 +2226,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "thiserror", ] @@ -2234,7 +2234,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "js_int", "ruma-api", @@ -2247,7 +2247,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "js_int", "ruma-api", @@ -2262,7 +2262,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "bytes", "form_urlencoded", @@ -2276,7 +2276,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2287,7 +2287,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2303,12 +2303,11 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.3.0" -source = "git+https://github.com/DevinR528/ruma?rev=c7860fcb89dbde636e2c83d0636934fb9924f40c#c7860fcb89dbde636e2c83d0636934fb9924f40c" +version = "0.4.0" +source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" dependencies = [ "itertools 0.10.1", "js_int", - "maplit", "ruma-common", "ruma-events", "ruma-identifiers", diff --git a/Cargo.toml b/Cargo.toml index bb44d08..ca802e9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/DevinR528/ruma", rev = "c7860fcb89dbde636e2c83d0636934fb9924f40c", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index ff28436..14146fb 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -422,7 +422,7 @@ impl RoomEdus { } /// Sets all users to offline who have been quiet for too long. - fn presence_maintain( + fn _presence_maintain( &self, rooms: &super::Rooms, globals: &super::super::globals::Globals, @@ -489,13 +489,13 @@ impl RoomEdus { } /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, globals, rooms))] + #[tracing::instrument(skip(self, since, _rooms, _globals))] pub fn presence_since( &self, room_id: &RoomId, since: u64, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, + _rooms: &super::Rooms, + _globals: &super::super::globals::Globals, ) -> Result> { //self.presence_maintain(rooms, globals)?; diff --git a/src/server_server.rs b/src/server_server.rs index f34aed1..60998a7 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -6,7 +6,10 @@ use crate::{ use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; use regex::Regex; -use rocket::{futures, response::content::Json}; +use rocket::{ + futures::{prelude::*, stream::FuturesUnordered}, + response::content::Json, +}; use ruma::{ api::{ client::error::{Error as RumaError, ErrorKind}, @@ -61,7 +64,7 @@ use std::{ net::{IpAddr, SocketAddr}, pin::Pin, result::Result as StdResult, - sync::{Arc, RwLock}, + sync::{Arc, RwLock, RwLockWriteGuard}, time::{Duration, Instant, SystemTime}, }; use tokio::sync::{MutexGuard, Semaphore}; @@ -3281,101 +3284,96 @@ pub(crate) async fn fetch_required_signing_keys( // Gets a list of servers for which we don't have the signing key yet. We go over // the PDUs and either cache the key or add it to the list that needs to be retrieved. -fn get_missing_servers_for_pdus( - pdus: &Vec>, +fn get_server_keys_from_cache( + pdu: &Raw, servers: &mut BTreeMap, BTreeMap>, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, db: &Database, ) -> Result<()> { - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - for pdu in pdus { - let value = serde_json::from_str::(pdu.json().get()).map_err(|e| { - error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") + let value = serde_json::from_str::(pdu.json().get()).map_err(|e| { + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value, &room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } + + let signatures = value + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let origin = &Box::::try_from(&**signature_server).map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?; - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&value, &room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - if let Some((time, tries)) = db + if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { + continue; + } + + trace!("Loading signing keys for {}", origin); + + let result = db .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&event_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {}", event_id); - return Err(Error::BadServerResponse("bad event, still backing off")); - } + if !contains_all_ids(&result) { + trace!("Signing key not loaded for {}", origin); + servers.insert( + origin.clone(), + BTreeMap::::new(), + ); } - let signatures = value - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let contains_all_ids = |keys: &BTreeMap| { - signature_ids.iter().all(|id| keys.contains_key(id)) - }; - - let origin = &Box::::try_from(&**signature_server).map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?; - - if servers.contains_key(origin) { - continue; - } - - trace!("Loading signing keys for {}", origin); - - let result = db - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); - - if !contains_all_ids(&result) { - trace!("Signing key not loaded for {}", origin); - servers.insert( - origin.clone(), - BTreeMap::::new(), - ); - } - - pkm.insert(origin.to_string(), result); - } + pub_key_map.insert(origin.to_string(), result); } Ok(()) } -pub async fn fetch_join_signing_keys( +pub(crate) async fn fetch_join_signing_keys( event: &create_join_event::v2::Response, room_version: &RoomVersionId, pub_key_map: &RwLock>>, @@ -3384,32 +3382,26 @@ pub async fn fetch_join_signing_keys( let mut servers = BTreeMap::, BTreeMap>::new(); - get_missing_servers_for_pdus( - &event.room_state.state, - &mut servers, - &room_version, - &pub_key_map, - &db, - )?; - get_missing_servers_for_pdus( - &event.room_state.auth_chain, - &mut servers, - &room_version, - &pub_key_map, - &db, - )?; + { + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - if servers.is_empty() { - return Ok(()); + // Try to fetch keys, failure is okay + // Servers we couldn't find in the cache will be added to `servers` + for pdu in &event.room_state.state { + let _ = get_server_keys_from_cache(pdu, &mut servers, &room_version, &mut pkm, &db); + } + for pdu in &event.room_state.auth_chain { + let _ = get_server_keys_from_cache(pdu, &mut servers, &room_version, &mut pkm, &db); + } + + drop(pkm); } - for server in db.globals.trusted_servers() { - if db.globals.signing_keys_for(server)?.is_empty() { - servers.insert( - server.clone(), - BTreeMap::::new(), - ); - } + if servers.is_empty() { + // We had all keys locally + return Ok(()); } for server in db.globals.trusted_servers() { @@ -3434,7 +3426,7 @@ pub async fn fetch_join_signing_keys( .write() .map_err(|_| Error::bad_database("RwLock is poisoned."))?; for k in keys.server_keys { - // TODO: Check signature + // TODO: Check signature from trusted server? servers.remove(&k.server_name); let result = db @@ -3447,23 +3439,33 @@ pub async fn fetch_join_signing_keys( pkm.insert(k.server_name.to_string(), result); } } + if servers.is_empty() { return Ok(()); } } - for result in futures::future::join_all(servers.iter().map(|(server, _)| { - db.sending - .send_federation_request(&db.globals, server, get_server_keys::v2::Request::new()) - })) - .await - { - if let Ok(get_keys_response) = result { - // TODO: We should probably not trust the server_name in the response. - let server = &get_keys_response.server_key.server_name; + let mut futures = servers + .into_iter() + .map(|(server, _)| async move { + ( + db.sending + .send_federation_request( + &db.globals, + &server, + get_server_keys::v2::Request::new(), + ) + .await, + server, + ) + }) + .collect::>(); + + while let Some(result) = futures.next().await { + if let (Ok(get_keys_response), origin) = result { let result = db .globals - .add_signing_key(server, get_keys_response.server_key.clone())? + .add_signing_key(&origin, get_keys_response.server_key.clone())? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect::>(); @@ -3471,7 +3473,7 @@ pub async fn fetch_join_signing_keys( pub_key_map .write() .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(server.to_string(), result); + .insert(origin.to_string(), result); } } From 487601a24995e6ec33ec38ecc3328902148c5b29 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 1 Sep 2021 21:46:05 +0200 Subject: [PATCH 0787/1727] Remove unneeded intermediary container collection --- src/server_server.rs | 47 ++++++++++++++++++++------------------------ 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 60998a7..273203a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1397,39 +1397,34 @@ async fn upgrade_outlier_to_timeline_pdu( extremity_sstatehashes.insert(sstatehash, prev_event); } - let mut fork_states = Vec::new(); - if okay { - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state = db - .rooms - .state_full_ids(sstatehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); - // Now it's the state after the pdu - } - - fork_states.push(leaf_state); - } - - let fork_states = &fork_states + let fork_states: Vec<_> = extremity_sstatehashes .into_iter() - .map(|map| { - map.into_iter() + .map(|(sstatehash, prev_event)| { + let mut leaf_state = db + .rooms + .state_full_ids(sstatehash) + .map_err(|_| "Failed to ask db for room state.".to_owned())?; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); + // Now it's the state after the pdu + } + + leaf_state + .into_iter() .map(|(k, id)| (db.rooms.get_statekey_from_short(k).map(|k| (k, id)))) .collect::>>() + .map_err(|_| "Failed to get_statekey_from_short.".to_owned()) }) - .collect::>>() - .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; + .collect::>()?; let mut auth_chain_sets = Vec::new(); - for state in fork_states { + for state in &fork_states { auth_chain_sets.push( get_auth_chain( &room_id, From 875b345a5f770251b640035c41148d2481beb3d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 1 Sep 2021 13:09:24 +0200 Subject: [PATCH 0788/1727] docs: update readme and version --- Cargo.lock | 2 +- Cargo.toml | 2 +- DEPLOY.md | 11 ++++++ README.md | 72 +++++++----------------------------- src/client_server/account.rs | 4 +- 5 files changed, 28 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8037aa5..13d027e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -236,7 +236,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.1.0" +version = "0.2.0" dependencies = [ "base64 0.13.0", "bytes", diff --git a/Cargo.toml b/Cargo.toml index ca802e9..4a90a4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.1.0" +version = "0.2.0" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/DEPLOY.md b/DEPLOY.md index 8218b45..3a81eb0 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -231,4 +231,15 @@ Set it to start automatically when your system boots with: $ sudo systemctl enable conduit ``` +## How do I know it works? + +You can open , enter your homeserver and try to register. + +You can also use these commands as a quick health check. + +```bash +$ curl https://your.server.name/_matrix/client/versions +$ curl https://your.server.name:8448/_matrix/client/versions +``` + If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). diff --git a/README.md b/README.md index 836f9c6..34344c9 100644 --- a/README.md +++ b/README.md @@ -3,37 +3,26 @@ #### What is the goal? -A fast Matrix homeserver that's easy to set up and just works. You can install +An efficient Matrix homeserver that's easy to set up and just works. You can install it on a mini-computer like the Raspberry Pi to host Matrix for your family, friends or company. #### Can I try it out? -Yes! Just open a Matrix client ( or Element Android for -example) and register on the `https://conduit.koesters.xyz` homeserver. +Yes! You can test our Conduit instance by opening a Matrix client ( or Element Android for +example) and registering on the `conduit.rs` homeserver. - -#### What is it built on? - -- [Ruma](https://www.ruma.io): Useful structures for endpoint requests and - responses that can be (de)serialized -- [Sled](https://github.com/spacejam/sled): A simple (key, value) database with - good performance -- [Rocket](https://rocket.rs): A flexible web framework +It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which +was used in the Samsung Galaxy S5. It joined many big rooms including Matrix +HQ. #### What is the current status? -Conduit can already be used chat with other users on Conduit, chat with users -from other Matrix servers and even to chat with users on other platforms using -appservices. When chatting with users on the same Conduit server, everything -should work assuming you use a compatible client. - -**You should not join Matrix rooms without asking the admins first.** We do not -know whether Conduit is safe for general use yet, so you should assume there is -some chance that it breaks rooms permanently for all participating users. We -are not aware of such a bug today, but we would like to do more testing. +As of 2021-09-01 Conduit is Beta, meaning you can join and participate in most +Matrix rooms, but not all features are supported and you might run into bugs +from time to time. There are still a few important features missing: @@ -47,46 +36,11 @@ Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit #### How can I deploy my own? -##### Deploy +Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)\ +Debian package: [debian/README.Debian](debian/README.Debian)\ +Docker: [docker/README.md](docker/README.md) -Download or compile a Conduit binary, set up the config and call it from somewhere like a systemd script. [Read -more](DEPLOY.md) - -If you want to connect an Appservice to Conduit, take a look at the [Appservice Guide](APPSERVICES.md). - -##### Deploy using a Debian package - -You need to have the `deb` helper command installed that creates Debian packages from Cargo projects (see [cargo-deb](https://github.com/mmstick/cargo-deb/) for more info): - -```shell -$ cargo install cargo-deb -``` - -Then, you can create and install a Debian package at a whim: - -```shell -$ cargo deb -$ dpkg -i target/debian/matrix-conduit_0.1.0_amd64.deb -``` - -This will build, package, install, configure and start Conduit. [Read more](debian/README.Debian). - -Note that `cargo deb` supports [cross-compilation](https://github.com/mmstick/cargo-deb/#cross-compilation) too! -Official Debian packages will follow once Conduit starts to have stable releases. - -##### Deploy using Docker - -Pull and run the docker image with - -``` bash -docker pull matrixconduit/matrix-conduit:latest -docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest -``` - -> Note: You also need to supply a `conduit.toml` config file, you can find an example [here](./conduit-example.toml). -> Or you can pass in `-e CONDUIT_CONFIG=""` and configure Conduit purely with env vars. - -Or build and run it with docker or docker-compose. [Read more](docker/README.md) +If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md). #### How can I contribute? diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 4b610a3..e9300b5 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -507,8 +507,8 @@ pub async fn register_route( PduBuilder { event_type: EventType::RoomMessage, content: serde_json::to_value(message::MessageEventContent::text_html( - "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), - "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing /join #conduit:matrix.org. Important: Please don't join any other Matrix rooms over federation without permission from the room's admins. Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), + "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), + "

Thank you for trying out Conduit!

\n

Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

\n

Helpful links:

\n
\n

Website: https://conduit.rs
Git and Documentation: https://gitlab.com/famedly/conduit
Report issues: https://gitlab.com/famedly/conduit/-/issues

\n
\n

Here are some rooms you can join (by typing the command):

\n

Conduit room (Ask questions and get notified on updates):
/join #conduit:fachschaften.org

\n

Conduit lounge (Off-topic, only Conduit users are allowed to join)
/join #conduit-lounge:conduit.rs

\n".to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, From 43d693ca197c5f5fa08d53a1baf443450899f2ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 1 Sep 2021 22:18:06 +0200 Subject: [PATCH 0789/1727] revert Arc commit --- Cargo.lock | 202 ++++++++++++++++++------------------------- Cargo.toml | 4 +- src/server_server.rs | 55 ++++++++---- 3 files changed, 123 insertions(+), 138 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13d027e..02ba835 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -196,9 +196,9 @@ checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" +checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" dependencies = [ "jobserver", ] @@ -657,9 +657,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adc00f486adfc9ce99f77d717836f0c5aa84965eb0b4f051f4e83f7cab53f8b" +checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" dependencies = [ "futures-channel", "futures-core", @@ -672,9 +672,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74ed2411805f6e4e3d9bc904c95d5d423b89b3b25dc0250aa74729de20629ff9" +checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" dependencies = [ "futures-core", "futures-sink", @@ -682,15 +682,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af51b1b4a7fdff033703db39de8802c673eb91855f2e0d47dcf3bf2c0ef01f99" +checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" [[package]] name = "futures-executor" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d0d535a57b87e1ae31437b892713aee90cd2d7b0ee48727cd11fc72ef54761c" +checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" dependencies = [ "futures-core", "futures-task", @@ -699,15 +699,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b0e06c393068f3a6ef246c75cdca793d6a46347e75286933e5e75fd2fd11582" +checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" [[package]] name = "futures-macro" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54913bae956fb8df7f4dc6fc90362aa72e69148e3f39041fbe8742d21e0ac57" +checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" dependencies = [ "autocfg", "proc-macro-hack", @@ -718,21 +718,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0f30aaa67363d119812743aa5f33c201a7a66329f97d1a887022971feea4b53" +checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" [[package]] name = "futures-task" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe54a98670017f3be909561f6ad13e810d9a51f3f061b902062ca3da80799f2" +checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" [[package]] name = "futures-util" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eb846bfd58e44a8481a00049e82c43e0ccb5d61f8dc071057cb19249dd4d78" +checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" dependencies = [ "autocfg", "futures-channel", @@ -1026,7 +1026,7 @@ dependencies = [ "gif", "jpeg-decoder", "num-iter", - "num-rational 0.3.2", + "num-rational", "num-traits", "png", ] @@ -1203,9 +1203,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" dependencies = [ "scopeguard", ] @@ -1349,20 +1349,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "num" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" -dependencies = [ - "num-bigint 0.4.0", - "num-complex", - "num-integer", - "num-iter", - "num-rational 0.4.0", - "num-traits", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -1374,26 +1360,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-bigint" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d047c1062aa51e256408c560894e5251f08925980e53cf1aa5bd00eec6512" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-complex" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" -dependencies = [ - "num-traits", -] - [[package]] name = "num-integer" version = "0.1.44" @@ -1426,18 +1392,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-rational" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" -dependencies = [ - "autocfg", - "num-bigint 0.4.0", - "num-integer", - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.14" @@ -1556,9 +1510,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", @@ -1567,9 +1521,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ "cfg-if 1.0.0", "instant", @@ -1726,9 +1680,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.28" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612" +checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" dependencies = [ "unicode-xid", ] @@ -2063,7 +2017,8 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668031e3108d6a2cfbe6eca271d8698f4593440e71a44afdadcf67ce3cb93c1f" dependencies = [ "assign", "js_int", @@ -2084,7 +2039,8 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.3" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5f1843792b6749ec1ece62595cf99ad30bf9589c96bb237515235e71da396ea" dependencies = [ "bytes", "http", @@ -2100,7 +2056,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.3" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18abda5cca94178d08b622bca042e1cbb5eb7d4ebf3a2a81590a3bb3c57008" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2111,7 +2068,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49369332a5f299e832e19661f92d49e08c345c3c6c4ab16e09cb31c5ff6da878" dependencies = [ "ruma-api", "ruma-common", @@ -2125,7 +2083,8 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.2" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9568a222c12cf6220e751484ab78feec28071f85965113a5bb802936a2920ff0" dependencies = [ "assign", "bytes", @@ -2145,7 +2104,8 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d5b7605f58dc0d9cf1848cc7f1af2bae4e4bcd1d2b7a87bbb9864c8a785b91" dependencies = [ "indexmap", "js_int", @@ -2160,7 +2120,8 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.5" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87801e1207cfebdee02e7997ebf181a1c9837260b78c1b8ce96b896a2bcb3763" dependencies = [ "indoc", "js_int", @@ -2176,7 +2137,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.5" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5da4498845347de88adf1b7da4578e2ca7355ad4ce47b0976f6594bacf958660" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2187,7 +2149,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa3d1db1a064ab26484df6ef5d96c384fc053022004f34d96c3b4939e13dc204" dependencies = [ "js_int", "ruma-api", @@ -2202,7 +2165,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb417d091e8dd5a633e4e5998231a156049d7fcc221045cfdc0642eb72067732" dependencies = [ "paste", "rand 0.8.4", @@ -2216,7 +2180,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c708edad7f605638f26c951cbad7501fbf28ab01009e5ca65ea5a2db74a882b1" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2226,15 +2191,14 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" -dependencies = [ - "thiserror", -] +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42285e7fb5d5f2d5268e45bb683e36d5c6fd9fc1e11a4559ba3c3521f3bbb2cb" [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e76e66e24f2d5a31511fbf6c79e79f67a7a6a98ebf48d72381b7d5bb6c09f035" dependencies = [ "js_int", "ruma-api", @@ -2247,7 +2211,8 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ef5b29da7065efc5b1e1a8f61add7543c9ab4ecce5ee0dd1c1c5ecec83fbeec" dependencies = [ "js_int", "ruma-api", @@ -2262,7 +2227,8 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2b22aae842e7ecda695e42b7b39d4558959d9d9a27acc2a16acf4f4f7f00c3" dependencies = [ "bytes", "form_urlencoded", @@ -2276,7 +2242,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "243e9bef188b08f94c79bc2f8fd1eb307a9e636b2b8e4571acf8c7be16381d28" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2287,7 +2254,8 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a4f64027165b59500162d10d435b1253898bf3ad4f5002cb0d56913fe7f76d7" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2303,8 +2271,9 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.4.0" -source = "git+https://github.com/timokoesters/ruma?rev=50c1db7e0a3a21fc794b0cce3b64285a4c750c71#50c1db7e0a3a21fc794b0cce3b64285a4c750c71" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "518c1afbddfcc5ffac8818a5cf0902709e6eca11aca8f24f6479df6f0601f1ba" dependencies = [ "itertools 0.10.1", "js_int", @@ -2425,23 +2394,22 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.4.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9bd29cdffb8875b04f71c51058f940cf4e390bbfd2ce669c4f22cd70b492a5" +checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" dependencies = [ "bitflags", "core-foundation", "core-foundation-sys", "libc", - "num", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.4.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19133a286e494cc3311c165c4676ccb1fd47bed45b55f9d71fbd784ad4cea6f8" +checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" dependencies = [ "core-foundation-sys", "libc", @@ -2464,18 +2432,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.129" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1f72836d2aa753853178eda473a3b9d8e4eefdaf20523b919677e6de489f8f1" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.129" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57ae87ad533d9a56427558b516d0adac283614e347abf85b0dc0cbbf0a249f3" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ "proc-macro2", "quote", @@ -2484,9 +2452,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" +checksum = "a7f9e390c27c3c0ce8bc5d725f6e4d30a29d26659494aa4b17535f7522c5c950" dependencies = [ "itoa", "ryu", @@ -2567,7 +2535,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" dependencies = [ "chrono", - "num-bigint 0.2.6", + "num-bigint", "num-traits", ] @@ -2781,18 +2749,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" +checksum = "283d5230e63df9608ac7d9691adc1dfb6e701225436eb64d0b9a7f0a5a04f6ec" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" +checksum = "fa3884228611f5cd3608e2d409bf7dce832e4eb3135e3f11addbd7e41bd68e71" dependencies = [ "proc-macro2", "quote", @@ -2895,9 +2863,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92036be488bb6594459f2e03b60e42df6f937fe6ca5c5ffdcb539c6b84dc40f5" +checksum = "b4efe6fc2395938c8155973d7be49fe8d03a843726e285e100a8a383cc0154ce" dependencies = [ "autocfg", "bytes", @@ -3148,9 +3116,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" +checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" [[package]] name = "ubyte" diff --git a/Cargo.toml b/Cargo.toml index 4a90a4c..aadedce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,9 +18,9 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/server_server.rs b/src/server_server.rs index 273203a..122545a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1030,7 +1030,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( if amount > 100 { // Max limit reached warn!("Max prev event limit reached!"); - graph.insert(prev_event_id.clone(), HashSet::new()); + graph.insert((*prev_event_id).clone(), HashSet::new()); continue; } @@ -1052,22 +1052,22 @@ pub(crate) async fn handle_incoming_pdu<'a>( } graph.insert( - prev_event_id.clone(), - pdu.prev_events.iter().cloned().map(Arc::new).collect(), + (*prev_event_id).clone(), + pdu.prev_events.iter().cloned().collect(), ); eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { // Time based check failed - graph.insert(prev_event_id.clone(), HashSet::new()); + graph.insert((*prev_event_id).clone(), HashSet::new()); eventid_info.insert(prev_event_id.clone(), (pdu, json)); } } else { // Get json failed - graph.insert(prev_event_id.clone(), HashSet::new()); + graph.insert((*prev_event_id).clone(), HashSet::new()); } } else { // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); + graph.insert((*prev_event_id).clone(), HashSet::new()); } } @@ -1084,7 +1084,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( .get(event_id) .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), - Arc::new(ruma::event_id!("$notimportant")), + ruma::event_id!("$notimportant"), )) }) .map_err(|_| "Error sorting prev events".to_owned())?; @@ -1432,10 +1432,22 @@ async fn upgrade_outlier_to_timeline_pdu( db, ) .map_err(|_| "Failed to load auth chain.".to_owned())? + .map(|event_id| (*event_id).clone()) .collect(), ); } + let fork_states = &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, id)| { + (k, (*id).clone()) + }) + .collect() + }) + .collect::>(); + state_at_incoming_event = match state_res::StateResolution::resolve( &room_id, room_version_id, @@ -1457,7 +1469,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, event_id)) + Ok((shortstatekey, Arc::new(event_id))) }) .collect::>()?, ), @@ -1766,18 +1778,8 @@ async fn upgrade_outlier_to_timeline_pdu( // We do need to force an update to this room's state update_state = true; - let fork_states = &fork_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, id)| (db.rooms.get_statekey_from_short(k).map(|k| (k, id)))) - .collect::>>() - }) - .collect::>>() - .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - let mut auth_chain_sets = Vec::new(); - for state in fork_states { + for state in &fork_states { auth_chain_sets.push( get_auth_chain( &room_id, @@ -1785,10 +1787,25 @@ async fn upgrade_outlier_to_timeline_pdu( db, ) .map_err(|_| "Failed to load auth chain.".to_owned())? + .map(|event_id| (*event_id).clone()) .collect(), ); } + let fork_states = &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, id)| { + db.rooms + .get_statekey_from_short(k) + .map(|k| (k, (*id).clone())) + }) + .collect::>>() + }) + .collect::>>() + .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; + let state = match state_res::StateResolution::resolve( &room_id, room_version_id, From 7cd7f1923f562775dbfc2e416f80042c446905eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 1 Sep 2021 22:31:25 +0200 Subject: [PATCH 0790/1727] chore: update dependencies --- Cargo.lock | 47 ++++++++++++++++++++--------------------------- Cargo.toml | 20 ++++++++++---------- 2 files changed, 30 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 02ba835..a60ca29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -248,7 +248,7 @@ dependencies = [ "jsonwebtoken", "lru-cache", "num_cpus", - "opentelemetry 0.16.0", + "opentelemetry", "opentelemetry-jaeger", "parking_lot", "pretty_env_logger", @@ -275,7 +275,7 @@ dependencies = [ "tracing-opentelemetry", "tracing-subscriber", "trust-dns-resolver", - "webpki", + "webpki 0.22.0", ] [[package]] @@ -1000,7 +1000,7 @@ dependencies = [ "rustls", "tokio", "tokio-rustls", - "webpki", + "webpki 0.21.4", ] [[package]] @@ -1429,23 +1429,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" -[[package]] -name = "opentelemetry" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff27b33e30432e7b9854936693ca103d8591b0501f7ae9f633de48cda3bf2a67" -dependencies = [ - "async-trait", - "crossbeam-channel", - "futures", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.4", - "thiserror", -] - [[package]] name = "opentelemetry" version = "0.16.0" @@ -1473,7 +1456,7 @@ checksum = "db22f492873ea037bc267b35a0e8e4fb846340058cb7c864efe3d0bf23684593" dependencies = [ "async-trait", "lazy_static", - "opentelemetry 0.16.0", + "opentelemetry", "opentelemetry-semantic-conventions", "thiserror", "thrift", @@ -1486,7 +1469,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffeac823339e8b0f27b961f4385057bf9f97f2863bc745bd015fd6091f2270e9" dependencies = [ - "opentelemetry 0.16.0", + "opentelemetry", ] [[package]] @@ -2333,7 +2316,7 @@ dependencies = [ "log", "ring", "sct", - "webpki", + "webpki 0.21.4", ] [[package]] @@ -2899,7 +2882,7 @@ checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", "tokio", - "webpki", + "webpki 0.21.4", ] [[package]] @@ -3010,11 +2993,11 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.14.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c47440f2979c4cd3138922840eec122e3c0ba2148bc290f756bd7fd60fc97fff" +checksum = "599f388ecb26b28d9c1b2e4437ae019a7b336018b45ed911458cd9ebf91129f6" dependencies = [ - "opentelemetry 0.15.0", + "opentelemetry", "tracing", "tracing-core", "tracing-log", @@ -3318,6 +3301,16 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "weezl" version = "0.1.5" diff --git a/Cargo.toml b/Cargo.toml index aadedce..a78307a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,23 +24,23 @@ ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "c #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = "1.8.2" +tokio = "1.11.0" # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } # Used for the http request / response body type for Ruma endpoints used with reqwest -bytes = "1.0.1" +bytes = "1.1.0" # Used for rocket<->ruma conversions http = "0.2.4" # Used to find data directory for default db path directories = "3.0.2" # Used for ruma wrapper -serde_json = { version = "1.0.64", features = ["raw_value"] } +serde_json = { version = "1.0.67", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.8.17" +serde_yaml = "0.8.20" # Used for pdu definition -serde = "1.0.126" +serde = "1.0.130" # Used for secure identifiers rand = "0.8.4" # Used to hash passwords @@ -50,9 +50,9 @@ reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tl # Custom TLS verifier rustls = { version = "0.19.1", features = ["dangerous_configuration"] } rustls-native-certs = "0.5.0" -webpki = "0.21.0" +webpki = "0.22.0" # Used for conduit::Error type -thiserror = "1.0.26" +thiserror = "1.0.28" # Used to generate thumbnails for images image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key @@ -67,15 +67,15 @@ regex = "1.5.4" jsonwebtoken = "7.2.0" # Performance measurements tracing = { version = "0.1.26", features = ["release_max_level_warn"] } -tracing-subscriber = "0.2.19" -tracing-opentelemetry = "0.14.0" +tracing-subscriber = "0.2.20" +tracing-opentelemetry = "0.15.0" tracing-flame = "0.1.0" opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] } pretty_env_logger = "0.4.0" lru-cache = "0.1.2" rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } -parking_lot = { version = "0.11.1", optional = true } +parking_lot = { version = "0.11.2", optional = true } crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" From 2770ce2cf61f71c835f5952558b7fe463d61217d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 1 Sep 2021 22:49:16 +0200 Subject: [PATCH 0791/1727] fix: avoid panic --- README.md | 8 +++----- src/ruma_wrapper.rs | 6 +++++- src/server_server.rs | 8 +------- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 34344c9..9d8de2b 100644 --- a/README.md +++ b/README.md @@ -20,16 +20,14 @@ HQ. #### What is the current status? -As of 2021-09-01 Conduit is Beta, meaning you can join and participate in most +As of 2021-09-01, Conduit is Beta, meaning you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time. There are still a few important features missing: -- Database stability (currently you might have to do manual upgrades or even wipe the db for new versions) -- Edge cases for end-to-end encryption over federation -- Typing and presence over federation -- Lots of testing +- E2EE verification over federation +- Outgoing read receipts, typing, presence over federation Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 5681194..fa28379 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -66,7 +66,11 @@ where let limit = db.globals.max_request_size(); let mut handle = data.open(ByteUnit::Byte(limit.into())); let mut body = Vec::new(); - handle.read_to_end(&mut body).await.unwrap(); + if let Err(_) = handle.read_to_end(&mut body).await { + // Client disconnected + // Missing Token + return Failure((Status::new(582), ())); + } let mut json_body = serde_json::from_slice::(&body).ok(); diff --git a/src/server_server.rs b/src/server_server.rs index 122545a..bac7203 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1439,13 +1439,7 @@ async fn upgrade_outlier_to_timeline_pdu( let fork_states = &fork_states .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, id)| { - (k, (*id).clone()) - }) - .collect() - }) + .map(|map| map.into_iter().map(|(k, id)| (k, (*id).clone())).collect()) .collect::>(); state_at_incoming_event = match state_res::StateResolution::resolve( From 3357bbec1edf0036cd7d25a13119cf8f5e88ec1e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 2 Sep 2021 09:17:25 +0200 Subject: [PATCH 0792/1727] chore: Also run CI on git tags, not only new commits. Signed-off-by: Jonas Zohren --- .gitlab-ci.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 75bdfd6..640c3ec 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -19,6 +19,7 @@ variables: needs: [] rules: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: '$CI_COMMIT_TAG' interruptible: true image: "rust:latest" tags: ["docker"] @@ -106,6 +107,7 @@ build:release:cargo:x86_64-unknown-linux-musl: extends: ".build-cargo-shared-settings" rules: - if: '$CI_COMMIT_BRANCH' + - if: '$CI_COMMIT_TAG' cache: key: "build_cache-$TARGET-debug" script: @@ -151,6 +153,7 @@ build:debug:cargo:x86_64-unknown-linux-musl: needs: [ ] rules: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: '$CI_COMMIT_TAG' interruptible: true image: "rust:latest" tags: ["docker"] @@ -227,6 +230,7 @@ build:docker:main: --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" rules: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: '$CI_COMMIT_TAG' @@ -328,6 +332,7 @@ publish:package: - "build:cargo-deb:x86_64-unknown-linux-gnu" rules: - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: '$CI_COMMIT_TAG' image: curlimages/curl:latest tags: ["docker"] variables: From d0baca44f8440d6a1f04860798612a7dc23b1646 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 2 Sep 2021 13:05:19 +0200 Subject: [PATCH 0793/1727] docs: add thanks to --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 9d8de2b..e667d18 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,17 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md] 3. Fork the repo and work on the issue. #conduit:nordgedanken.dev is happy to help :) 4. Submit a MR + +#### Thanks to + +Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project. + +Thanks to the contributors to Conduit and all libraries we use, for example: + +- Ruma: A clean library for the Matrix Spec in Rust +- Rocket: A flexible web framework + + #### Donate Liberapay: \ From a1f51440e2af9971b08f03de2e4032e517be0b19 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 2 Sep 2021 17:17:51 +0200 Subject: [PATCH 0794/1727] chore(CI): Adjust CI for master and next branch development model - Build release builds for branches "master" and "next" - Push docker images under different tags, depending on why the pipeline started - branch master: push to `latest` - branch next: push to `next` - tag: push to `$TAG_NAME` Signed-off-by: Jonas Zohren --- .gitlab-ci.yml | 69 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 58 insertions(+), 11 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 640c3ec..018e5a1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,7 +18,8 @@ variables: stage: "build" needs: [] rules: - - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: '$CI_COMMIT_BRANCH == "master"' + - if: '$CI_COMMIT_BRANCH == "next"' - if: '$CI_COMMIT_TAG' interruptible: true image: "rust:latest" @@ -42,6 +43,8 @@ variables: script: - time cargo build --target $TARGET --release - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' + artifacts: + expire_in: never build:release:cargo:x86_64-unknown-linux-gnu: @@ -113,6 +116,8 @@ build:release:cargo:x86_64-unknown-linux-musl: script: - "time cargo build --target $TARGET" - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' + artifacts: + expire_in: 4 weeks build:debug:cargo:x86_64-unknown-linux-gnu: extends: ".cargo-debug-shared-settings" @@ -152,7 +157,8 @@ build:debug:cargo:x86_64-unknown-linux-musl: stage: "build" needs: [ ] rules: - - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: '$CI_COMMIT_BRANCH == "master"' + - if: '$CI_COMMIT_BRANCH == "next"' - if: '$CI_COMMIT_TAG' interruptible: true image: "rust:latest" @@ -187,11 +193,11 @@ build:cargo-deb:x86_64-unknown-linux-gnu: expose_as: "Debian Package x86_64" - # --------------------------------------------------------------------- # # Create and publish docker image # # --------------------------------------------------------------------- # +# Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image .docker-shared-settings: stage: "build docker image" needs: [] @@ -208,8 +214,30 @@ build:cargo-deb:x86_64-unknown-linux-gnu: - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json' -# Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image -build:docker:main: +build:docker:next: + extends: .docker-shared-settings + needs: + - "build:release:cargo:x86_64-unknown-linux-musl" + script: + - > + /kaniko/executor + $KANIKO_CACHE_ARGS + --context $CI_PROJECT_DIR + --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') + --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) + --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" + --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" + --destination "$CI_REGISTRY_IMAGE/conduit:next" + --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine" + --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" + --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" + --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine" + --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" + rules: + - if: '$CI_COMMIT_BRANCH == "next"' + + +build:docker:master: extends: .docker-shared-settings needs: - "build:release:cargo:x86_64-unknown-linux-musl" @@ -223,13 +251,31 @@ build:docker:main: --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" --destination "$CI_REGISTRY_IMAGE/conduit:latest" - --destination "$CI_REGISTRY_IMAGE/conduit:alpine" - --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" + --destination "$CI_REGISTRY_IMAGE/conduit:latest-alpine" --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" + --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest-alpine" + rules: + - if: '$CI_COMMIT_BRANCH == "master"' + + +build:docker:tags: + extends: .docker-shared-settings + needs: + - "build:release:cargo:x86_64-unknown-linux-musl" + script: + - > + /kaniko/executor + $KANIKO_CACHE_ARGS + --context $CI_PROJECT_DIR + --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') + --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) + --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" + --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" + --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG" + --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG-alpine" + --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG" + --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG-alpine" rules: - - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' - if: '$CI_COMMIT_TAG' @@ -331,7 +377,8 @@ publish:package: - "build:release:cargo:x86_64-unknown-linux-musl" - "build:cargo-deb:x86_64-unknown-linux-gnu" rules: - - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: '$CI_COMMIT_BRANCH == "master"' + - if: '$CI_COMMIT_BRANCH == "next"' - if: '$CI_COMMIT_TAG' image: curlimages/curl:latest tags: ["docker"] From 71341ea05ad6bbb8dad01d023024b287f9debd2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 3 Sep 2021 11:26:15 +0200 Subject: [PATCH 0795/1727] fix: make sure old events don't sneek into the timeline --- src/server_server.rs | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index bac7203..a6557cc 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -977,6 +977,12 @@ pub(crate) async fn handle_incoming_pdu<'a>( .map_err(|_| "Failed to ask database for event.".to_owned())? .ok_or_else(|| "Failed to find create event in db.".to_owned())?; + let first_pdu_in_room = db + .rooms + .first_pdu_in_room(&room_id) + .map_err(|_| "Error loading first room event.".to_owned())? + .expect("Room exists"); + let (incoming_pdu, val) = handle_outlier_pdu( origin, &create_event, @@ -993,13 +999,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( return Ok(None); } - if incoming_pdu.origin_server_ts - < db.rooms - .first_pdu_in_room(&room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists") - .origin_server_ts - { + if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { return Ok(None); } @@ -1037,13 +1037,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( if let Some(json) = json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) { - if pdu.origin_server_ts - > db.rooms - .first_pdu_in_room(&room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists") - .origin_server_ts - { + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { amount += 1; for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { @@ -1095,6 +1089,10 @@ pub(crate) async fn handle_incoming_pdu<'a>( break; } if let Some((pdu, json)) = eventid_info.remove(&prev_id) { + if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + continue; + } + let start_time = Instant::now(); let event_id = pdu.event_id.clone(); if let Err(e) = upgrade_outlier_to_timeline_pdu( From bbe36810ec0be944769a34f7d3927eb6dd07a8f7 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Fri, 3 Sep 2021 21:51:17 +0200 Subject: [PATCH 0796/1727] Fix deprecated/removed Traefik label --- docker/docker-compose.override.traefik.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/docker-compose.override.traefik.yml b/docker/docker-compose.override.traefik.yml index 5633348..9525078 100644 --- a/docker/docker-compose.override.traefik.yml +++ b/docker/docker-compose.override.traefik.yml @@ -12,7 +12,7 @@ services: - "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt" - "traefik.http.routers.to-conduit.middlewares=cors-headers@docker" - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOrigin=*" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" @@ -29,7 +29,7 @@ services: - "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt" - "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker" - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOrigin=*" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" From 487046571c5561c554558db9fdb17f27b0ca1215 Mon Sep 17 00:00:00 2001 From: charludo Date: Mon, 6 Sep 2021 18:35:35 +0000 Subject: [PATCH 0797/1727] These lines get *generated* by certbot. Having them in the file before running certbot results in an apache2 error, and putting them in afterwards is not necessary, since certbot places them there on its own. --- DEPLOY.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 3a81eb0..84dd2be 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -162,9 +162,6 @@ AllowEncodedSlashes NoDecode ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ -Include /etc/letsencrypt/options-ssl-apache.conf -SSLCertificateFile /etc/letsencrypt/live/your.server.name/fullchain.pem # EDIT THIS -SSLCertificateKeyFile /etc/letsencrypt/live/your.server.name/privkey.pem # EDIT THIS
``` From 5821b8e705ea3b0fcb23ebac6d4d61f6400f760e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 7 Sep 2021 15:41:01 +0200 Subject: [PATCH 0798/1727] Remove unused dependencies --- Cargo.lock | 67 ++---------------------------------------------------- Cargo.toml | 2 -- 2 files changed, 2 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a60ca29..86ef48c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "adler32" version = "1.2.0" @@ -251,7 +253,6 @@ dependencies = [ "opentelemetry", "opentelemetry-jaeger", "parking_lot", - "pretty_env_logger", "rand 0.8.4", "regex", "reqwest", @@ -272,7 +273,6 @@ dependencies = [ "tokio", "tracing", "tracing-flame", - "tracing-opentelemetry", "tracing-subscriber", "trust-dns-resolver", "webpki 0.22.0", @@ -590,19 +590,6 @@ dependencies = [ "syn", ] -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -955,15 +942,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error", -] - [[package]] name = "hyper" version = "0.14.12" @@ -1629,16 +1607,6 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" -[[package]] -name = "pretty_env_logger" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" -dependencies = [ - "env_logger", - "log", -] - [[package]] name = "proc-macro-crate" version = "1.0.0" @@ -2721,15 +2689,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "termcolor" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" -dependencies = [ - "winapi-util", -] - [[package]] name = "thiserror" version = "1.0.28" @@ -2991,19 +2950,6 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-opentelemetry" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "599f388ecb26b28d9c1b2e4437ae019a7b336018b45ed911458cd9ebf91129f6" -dependencies = [ - "opentelemetry", - "tracing", - "tracing-core", - "tracing-log", - "tracing-subscriber", -] - [[package]] name = "tracing-serde" version = "0.1.2" @@ -3345,15 +3291,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index a78307a..04887d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,11 +68,9 @@ jsonwebtoken = "7.2.0" # Performance measurements tracing = { version = "0.1.26", features = ["release_max_level_warn"] } tracing-subscriber = "0.2.20" -tracing-opentelemetry = "0.15.0" tracing-flame = "0.1.0" opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] } -pretty_env_logger = "0.4.0" lru-cache = "0.1.2" rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } parking_lot = { version = "0.11.2", optional = true } From 51245d34f172f9025699e8d9653be62625872a49 Mon Sep 17 00:00:00 2001 From: Rasmus Thomsen Date: Tue, 7 Sep 2021 19:41:14 +0100 Subject: [PATCH 0799/1727] fix(database): handle errors in config parsin or database creation Showing the user a backtrace can be pretty confusing, so just printing a nice error message makes errors easier to understand for end users. fixes #121 --- src/database.rs | 5 +++++ src/main.rs | 23 +++++++++++++++++------ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/src/database.rs b/src/database.rs index 6ea0abd..5fb6de4 100644 --- a/src/database.rs +++ b/src/database.rs @@ -198,6 +198,11 @@ impl Database { pub async fn load_or_create(config: &Config) -> Result>> { Self::check_sled_or_sqlite_db(&config)?; + if !Path::new(&config.database_path).exists() { + std::fs::create_dir_all(&config.database_path) + .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; + } + let builder = Engine::open(&config)?; if config.max_request_size < 1024 { diff --git a/src/main.rs b/src/main.rs index 2ca49e2..06409ee 100644 --- a/src/main.rs +++ b/src/main.rs @@ -199,16 +199,27 @@ async fn main() { std::env::set_var("RUST_LOG", "warn"); - let config = raw_config - .extract::() - .expect("It looks like your config is invalid. Please take a look at the error"); + let config = match raw_config.extract::() { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); + std::process::exit(1); + } + }; let start = async { config.warn_deprecated(); - let db = Database::load_or_create(&config) - .await - .expect("config is valid"); + let db = match Database::load_or_create(&config).await { + Ok(db) => db, + Err(e) => { + eprintln!( + "The database couldn't be loaded or created. The following error occured: {}", + e + ); + std::process::exit(1); + } + }; let rocket = setup_rocket(raw_config, Arc::clone(&db)) .ignite() From 23c5ec8099829a6d8115d09ee9a9cf6a0380f848 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 8 Sep 2021 14:50:44 +0200 Subject: [PATCH 0800/1727] fix sync not firing on new events in room --- src/database.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index 5fb6de4..dcba2ab 100644 --- a/src/database.rs +++ b/src/database.rs @@ -815,12 +815,21 @@ impl Database { // Events for rooms we are in for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { + let short_roomid = self + .rooms + .get_shortroomid(&room_id) + .ok() + .flatten() + .expect("room exists") + .to_be_bytes() + .to_vec(); + let roomid_bytes = room_id.as_bytes().to_vec(); let mut roomid_prefix = roomid_bytes.clone(); roomid_prefix.push(0xff); // PDUs - futures.push(self.rooms.pduid_pdu.watch_prefix(&roomid_prefix)); + futures.push(self.rooms.pduid_pdu.watch_prefix(&short_roomid)); // EDUs futures.push( From 00927a7ce367df88f1230e2ba60492fda0901750 Mon Sep 17 00:00:00 2001 From: Daniel Wiesenberg Date: Thu, 9 Sep 2021 10:02:11 +0200 Subject: [PATCH 0801/1727] Add mautrix-signal appservice instructions --- APPSERVICES.md | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/APPSERVICES.md b/APPSERVICES.md index a84f1d2..ba9ae89 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -7,9 +7,32 @@ If you run into any problems while setting up an Appservice, write an email to ` ## Tested appservices Here are some appservices we tested and that work with Conduit: -- matrix-appservice-discord -- mautrix-hangouts -- mautrix-telegram +- [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord) +- [mautrix-hangouts](https://github.com/mautrix/hangouts/) +- [mautrix-telegram](https://github.com/mautrix/telegram/) +- [mautrix-signal](https://github.com/mautrix/signal) + - There are a few things you need to do, in order for the bridge (at least up to version `0.2.0`) to work. Before following the bridge installation guide, you need to map apply a patch to bridges `portal.py`. Go to [portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py) at [mautrix-signal](https://github.com/mautrix/signal) (don't forget to change to the correct commit/version of the file) and copy its content, create a `portal.py` on your host system and paste it in. Now you need to change two lines: + [Line 1020](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1020) + + ```diff + --- levels.users[self.main_intent.mxid] = 9001 if is_initial else 100 + +++ levels.users[self.main_intent.mxid] = 100 if is_initial else 100 + ``` + + and add a new line between [Lines 1041 and 1042](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1041-L1042) + + ```diff + "type": str(EventType.ROOM_POWER_LEVELS), + +++ "state_key": "", + "content": power_levels.serialize(), + ``` + + Now you just need to map the patched `portal.py` into the `mautrix-signal` container + ```yml + volumes: + - ./////portal.py:/usr/lib/python3.9/site-packages/mautrix_signal/portal.py + ``` + and then read below and start following the bridge [installation instructions](https://docs.mau.fi/bridges/index.html). ## Set up the appservice From 2c8412fe58efa91a467fad34cfa93dca57204b92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 13 Sep 2021 20:11:50 +0200 Subject: [PATCH 0802/1727] improvement: more efficient sqlite --- src/database/abstraction/sqlite.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 06e371e..feac690 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -56,7 +56,7 @@ impl Engine { conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; - conn.pragma_update(Some(Main), "wal_autocheckpoint", &2000)?; + conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?; Ok(conn) } @@ -77,7 +77,7 @@ impl Engine { pub fn flush_wal(self: &Arc) -> Result<()> { self.write_lock() - .pragma_update(Some(Main), "wal_checkpoint", &"TRUNCATE")?; + .pragma_update(Some(Main), "wal_checkpoint", &"RESTART")?; Ok(()) } } From 910ad7fed1dad053f81b721d2d021d320d793bd5 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 5 Sep 2021 00:05:59 +0200 Subject: [PATCH 0803/1727] Get rid of more unnecessary intermediate collections --- src/server_server.rs | 69 +++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 36 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index a6557cc..c27ea22 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1396,49 +1396,46 @@ async fn upgrade_outlier_to_timeline_pdu( } if okay { - let fork_states: Vec<_> = extremity_sstatehashes - .into_iter() - .map(|(sstatehash, prev_event)| { - let mut leaf_state = db + let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); + let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); + + for (sstatehash, prev_event) in extremity_sstatehashes { + let mut leaf_state: BTreeMap<_, _> = db + .rooms + .state_full_ids(sstatehash) + .map_err(|_| "Failed to ask db for room state.".to_owned())?; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = db .rooms - .state_full_ids(sstatehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())?; + .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); + // Now it's the state after the pdu + } - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); - // Now it's the state after the pdu - } + let mut state = StateMap::with_capacity(leaf_state.len()); + let mut starting_events = Vec::with_capacity(leaf_state.len()); - leaf_state - .into_iter() - .map(|(k, id)| (db.rooms.get_statekey_from_short(k).map(|k| (k, id)))) - .collect::>>() - .map_err(|_| "Failed to get_statekey_from_short.".to_owned()) - }) - .collect::>()?; + for (k, id) in leaf_state { + let k = db + .rooms + .get_statekey_from_short(k) + .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; + + state.insert(k, (*id).clone()); + starting_events.push(id); + } - let mut auth_chain_sets = Vec::new(); - for state in &fork_states { auth_chain_sets.push( - get_auth_chain( - &room_id, - state.iter().map(|(_, id)| id.clone()).collect(), - db, - ) - .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).clone()) - .collect(), + get_auth_chain(&room_id, starting_events, db) + .map_err(|_| "Failed to load auth chain.".to_owned())? + .map(|event_id| (*event_id).clone()) + .collect(), ); - } - let fork_states = &fork_states - .into_iter() - .map(|map| map.into_iter().map(|(k, id)| (k, (*id).clone())).collect()) - .collect::>(); + fork_states.push(state); + } state_at_incoming_event = match state_res::StateResolution::resolve( &room_id, From 979ec6b4fa5cd5b415f56ebc8c59ba742f835eb6 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 1 Sep 2021 15:28:02 +0200 Subject: [PATCH 0804/1727] Upgrade ruma --- Cargo.lock | 54 ++++++++++------------------- Cargo.toml | 4 +-- src/client_server/membership.rs | 6 ++-- src/database/rooms.rs | 13 +++---- src/pdu.rs | 30 ++++++++++------ src/server_server.rs | 61 ++++++++++++++++----------------- 6 files changed, 76 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86ef48c..70d7f4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1968,8 +1968,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668031e3108d6a2cfbe6eca271d8698f4593440e71a44afdadcf67ce3cb93c1f" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "assign", "js_int", @@ -1990,8 +1989,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5f1843792b6749ec1ece62595cf99ad30bf9589c96bb237515235e71da396ea" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "bytes", "http", @@ -2007,8 +2005,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b18abda5cca94178d08b622bca042e1cbb5eb7d4ebf3a2a81590a3bb3c57008" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2019,8 +2016,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49369332a5f299e832e19661f92d49e08c345c3c6c4ab16e09cb31c5ff6da878" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "ruma-api", "ruma-common", @@ -2034,8 +2030,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9568a222c12cf6220e751484ab78feec28071f85965113a5bb802936a2920ff0" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "assign", "bytes", @@ -2055,8 +2050,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d5b7605f58dc0d9cf1848cc7f1af2bae4e4bcd1d2b7a87bbb9864c8a785b91" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "indexmap", "js_int", @@ -2071,8 +2065,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87801e1207cfebdee02e7997ebf181a1c9837260b78c1b8ce96b896a2bcb3763" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "indoc", "js_int", @@ -2088,8 +2081,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da4498845347de88adf1b7da4578e2ca7355ad4ce47b0976f6594bacf958660" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2100,8 +2092,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa3d1db1a064ab26484df6ef5d96c384fc053022004f34d96c3b4939e13dc204" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "js_int", "ruma-api", @@ -2116,8 +2107,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb417d091e8dd5a633e4e5998231a156049d7fcc221045cfdc0642eb72067732" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "paste", "rand 0.8.4", @@ -2131,8 +2121,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c708edad7f605638f26c951cbad7501fbf28ab01009e5ca65ea5a2db74a882b1" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2142,14 +2131,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42285e7fb5d5f2d5268e45bb683e36d5c6fd9fc1e11a4559ba3c3521f3bbb2cb" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e76e66e24f2d5a31511fbf6c79e79f67a7a6a98ebf48d72381b7d5bb6c09f035" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "js_int", "ruma-api", @@ -2162,8 +2149,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef5b29da7065efc5b1e1a8f61add7543c9ab4ecce5ee0dd1c1c5ecec83fbeec" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "js_int", "ruma-api", @@ -2178,8 +2164,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b2b22aae842e7ecda695e42b7b39d4558959d9d9a27acc2a16acf4f4f7f00c3" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "bytes", "form_urlencoded", @@ -2193,8 +2178,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243e9bef188b08f94c79bc2f8fd1eb307a9e636b2b8e4571acf8c7be16381d28" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2205,8 +2189,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a4f64027165b59500162d10d435b1253898bf3ad4f5002cb0d56913fe7f76d7" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2223,8 +2206,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518c1afbddfcc5ffac8818a5cf0902709e6eca11aca8f24f6479df6f0601f1ba" +source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 04887d0..593a1fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,8 +18,8 @@ edition = "2018" rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests # Used for matrix spec type definitions and helpers -ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "a6a1224652912a957b09f136ec5da2686be6e0e2", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 52e074c..01c19c2 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -976,10 +976,10 @@ pub(crate) async fn invite_helper<'a>( let auth_check = state_res::auth_check( &room_version, - &Arc::new(pdu.clone()), + &pdu, create_prev_event, - None, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), + None::, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) .map_err(|e| { error!("{:?}", e); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4245483..51023ba 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -252,12 +252,7 @@ impl Rooms { return Ok(HashMap::new()); }; - let auth_events = state_res::auth_types_for_event( - kind, - sender, - state_key.map(|s| s.to_string()), - content.clone(), - ); + let auth_events = state_res::auth_types_for_event(kind, sender, state_key, &content); let mut sauthevents = auth_events .into_iter() @@ -2046,10 +2041,10 @@ impl Rooms { let auth_check = state_res::auth_check( &room_version, - &Arc::new(pdu.clone()), + &pdu, create_prev_event, - None, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), + None::, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) .map_err(|e| { error!("{:?}", e); diff --git a/src/pdu.rs b/src/pdu.rs index 1016fe6..8623b1a 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -260,37 +260,47 @@ impl state_res::Event for PduEvent { fn sender(&self) -> &UserId { &self.sender } - fn kind(&self) -> EventType { - self.kind.clone() + + fn event_type(&self) -> &EventType { + &self.kind } - fn content(&self) -> serde_json::Value { - self.content.clone() + fn content(&self) -> &serde_json::Value { + &self.content } + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { MilliSecondsSinceUnixEpoch(self.origin_server_ts) } - fn state_key(&self) -> Option { - self.state_key.clone() + + fn state_key(&self) -> Option<&str> { + self.state_key.as_deref() } - fn prev_events(&self) -> Vec { - self.prev_events.to_vec() + + fn prev_events(&self) -> Box + '_> { + Box::new(self.prev_events.iter()) } + fn depth(&self) -> &UInt { &self.depth } - fn auth_events(&self) -> Vec { - self.auth_events.to_vec() + + fn auth_events(&self) -> Box + '_> { + Box::new(self.auth_events.iter()) } + fn redacts(&self) -> Option<&EventId> { self.redacts.as_ref() } + fn hashes(&self) -> &EventHash { &self.hashes } + fn signatures(&self) -> BTreeMap, BTreeMap> { self.signatures.clone() } + fn unsigned(&self) -> &BTreeMap { &self.unsigned } diff --git a/src/server_server.rs b/src/server_server.rs index c27ea22..89b9c5c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1065,23 +1065,22 @@ pub(crate) async fn handle_incoming_pdu<'a>( } } - let sorted = - state_res::StateResolution::lexicographical_topological_sort(dbg!(&graph), |event_id| { - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - println!("{}", event_id); - Ok(( - 0, - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - ruma::event_id!("$notimportant"), - )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; + let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + println!("{}", event_id); + Ok(( + 0, + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), + ), + ruma::event_id!("$notimportant"), + )) + }) + .map_err(|_| "Error sorting prev events".to_owned())?; let mut errors = 0; for prev_id in dbg!(sorted) { @@ -1280,8 +1279,8 @@ fn handle_outlier_pdu<'a>( &room_version, &incoming_pdu, previous_create, - None, // TODO: third party invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), + None::, // TODO: third party invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) .map_err(|_e| "Auth check failed".to_string())? { @@ -1437,8 +1436,7 @@ async fn upgrade_outlier_to_timeline_pdu( fork_states.push(state); } - state_at_incoming_event = match state_res::StateResolution::resolve( - &room_id, + state_at_incoming_event = match state_res::resolve( room_version_id, &fork_states, auth_chain_sets, @@ -1566,8 +1564,8 @@ async fn upgrade_outlier_to_timeline_pdu( let check_result = state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.clone(), - None, // TODO: third party invite + previous_create.as_deref(), + None::, // TODO: third party invite |k, s| { db.rooms .get_shortstatekey(&k, &s) @@ -1650,9 +1648,9 @@ async fn upgrade_outlier_to_timeline_pdu( let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create, - None, - |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), + previous_create.as_deref(), + None::, + |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) .map_err(|_e| "Auth check failed.".to_owned())?; @@ -1795,8 +1793,7 @@ async fn upgrade_outlier_to_timeline_pdu( .collect::>>() .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - let state = match state_res::StateResolution::resolve( - &room_id, + let state = match state_res::resolve( room_version_id, fork_states, auth_chain_sets, @@ -2773,10 +2770,10 @@ pub fn create_join_event_template_route( let auth_check = state_res::auth_check( &room_version, - &Arc::new(pdu.clone()), - create_prev_event, - None, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), + &pdu, + create_prev_event.as_deref(), + None::, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) .map_err(|e| { error!("{:?}", e); From d68c93b5fa0a7fcbf749fc15cbddd82da29b285d Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 13 Sep 2021 19:45:56 +0200 Subject: [PATCH 0805/1727] Clean up (mostly automated with cargo clippy --fix) --- src/appservice_server.rs | 2 +- src/client_server/account.rs | 24 ++--- src/client_server/alias.rs | 4 +- src/client_server/backup.rs | 39 ++++---- src/client_server/capabilities.rs | 3 +- src/client_server/context.rs | 4 +- src/client_server/device.rs | 22 ++--- src/client_server/keys.rs | 18 ++-- src/client_server/media.rs | 3 +- src/client_server/membership.rs | 66 +++++++------- src/client_server/message.rs | 6 +- src/client_server/presence.rs | 10 +-- src/client_server/profile.rs | 34 +++---- src/client_server/push.rs | 52 ++++------- src/client_server/read_marker.rs | 14 +-- src/client_server/redact.rs | 2 +- src/client_server/room.rs | 32 +++---- src/client_server/search.rs | 4 +- src/client_server/session.rs | 8 +- src/client_server/state.rs | 6 +- src/client_server/to_device.rs | 8 +- src/client_server/typing.rs | 4 +- src/database.rs | 14 +-- src/database/abstraction/sqlite.rs | 4 +- src/database/account_data.rs | 8 +- src/database/globals.rs | 2 +- src/database/key_backups.rs | 28 +++--- src/database/media.rs | 5 +- src/database/pusher.rs | 6 +- src/database/rooms.rs | 137 ++++++++++++++--------------- src/database/rooms/edus.rs | 26 +++--- src/database/sending.rs | 20 ++--- src/database/uiaa.rs | 2 +- src/database/users.rs | 22 ++--- src/ruma_wrapper.rs | 4 +- src/server_server.rs | 114 ++++++++++++------------ 36 files changed, 364 insertions(+), 393 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 8be524c..ed886d6 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -21,7 +21,7 @@ where let hs_token = registration.get("hs_token").unwrap().as_str().unwrap(); let mut http_request = request - .try_into_http_request::(&destination, SendAccessToken::IfRequired("")) + .try_into_http_request::(destination, SendAccessToken::IfRequired("")) .unwrap() .map(|body| body.freeze()); diff --git a/src/client_server/account.rs b/src/client_server/account.rs index e9300b5..fb33842 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -572,7 +572,7 @@ pub async fn change_password_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_user, + sender_user, sender_device, auth, &uiaainfo, @@ -586,24 +586,24 @@ pub async fn change_password_route( } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; + .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } db.users - .set_password(&sender_user, Some(&body.new_password))?; + .set_password(sender_user, Some(&body.new_password))?; if body.logout_devices { // Logout all devices except the current one for id in db .users - .all_device_ids(&sender_user) + .all_device_ids(sender_user) .filter_map(|id| id.ok()) .filter(|id| id != sender_device) { - db.users.remove_device(&sender_user, &id)?; + db.users.remove_device(sender_user, &id)?; } } @@ -664,8 +664,8 @@ pub async fn deactivate_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_user, - &sender_device, + sender_user, + sender_device, auth, &uiaainfo, &db.users, @@ -678,7 +678,7 @@ pub async fn deactivate_route( } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; + .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); @@ -688,10 +688,10 @@ pub async fn deactivate_route( // TODO: work over federation invites let all_rooms = db .rooms - .rooms_joined(&sender_user) + .rooms_joined(sender_user) .chain( db.rooms - .rooms_invited(&sender_user) + .rooms_invited(sender_user) .map(|t| t.map(|(r, _)| r)), ) .collect::>(); @@ -726,7 +726,7 @@ pub async fn deactivate_route( state_key: Some(sender_user.to_string()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -734,7 +734,7 @@ pub async fn deactivate_route( } // Remove devices and mark account as deactivated - db.users.deactivate_account(&sender_user)?; + db.users.deactivate_account(sender_user)?; info!("{} deactivated their account", sender_user); diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index c806a9c..129ac16 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -112,7 +112,7 @@ pub(crate) async fn get_alias_helper( } let mut room_id = None; - match db.rooms.id_from_alias(&room_alias)? { + match db.rooms.id_from_alias(room_alias)? { Some(r) => room_id = Some(r), None => { for (_id, registration) in db.appservice.all()? { @@ -140,7 +140,7 @@ pub(crate) async fn get_alias_helper( .await .is_ok() { - room_id = Some(db.rooms.id_from_alias(&room_alias)?.ok_or_else(|| { + room_id = Some(db.rooms.id_from_alias(room_alias)?.ok_or_else(|| { Error::bad_config("Appservice lied to us. Room does not exist.") })?); break; diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 259f1a9..bbb8672 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -27,7 +27,7 @@ pub async fn create_backup_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let version = db .key_backups - .create_backup(&sender_user, &body.algorithm, &db.globals)?; + .create_backup(sender_user, &body.algorithm, &db.globals)?; db.flush()?; @@ -48,7 +48,7 @@ pub async fn update_backup_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups - .update_backup(&sender_user, &body.version, &body.algorithm, &db.globals)?; + .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; db.flush()?; @@ -71,7 +71,7 @@ pub async fn get_latest_backup_route( let (version, algorithm) = db.key_backups - .get_latest_backup(&sender_user)? + .get_latest_backup(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Key backup does not exist.", @@ -101,7 +101,7 @@ pub async fn get_backup_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db .key_backups - .get_backup(&sender_user, &body.version)? + .get_backup(sender_user, &body.version)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Key backup does not exist.", @@ -132,7 +132,7 @@ pub async fn delete_backup_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups.delete_backup(&sender_user, &body.version)?; + db.key_backups.delete_backup(sender_user, &body.version)?; db.flush()?; @@ -172,11 +172,11 @@ pub async fn add_backup_keys_route( for (room_id, room) in &body.rooms { for (session_id, key_data) in &room.sessions { db.key_backups.add_key( - &sender_user, + sender_user, &body.version, - &room_id, - &session_id, - &key_data, + room_id, + session_id, + key_data, &db.globals, )? } @@ -223,11 +223,11 @@ pub async fn add_backup_key_sessions_route( for (session_id, key_data) in &body.sessions { db.key_backups.add_key( - &sender_user, + sender_user, &body.version, &body.room_id, - &session_id, - &key_data, + session_id, + key_data, &db.globals, )? } @@ -272,7 +272,7 @@ pub async fn add_backup_key_session_route( } db.key_backups.add_key( - &sender_user, + sender_user, &body.version, &body.room_id, &body.session_id, @@ -303,7 +303,7 @@ pub async fn get_backup_keys_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let rooms = db.key_backups.get_all(&sender_user, &body.version)?; + let rooms = db.key_backups.get_all(sender_user, &body.version)?; Ok(get_backup_keys::Response { rooms }.into()) } @@ -324,7 +324,7 @@ pub async fn get_backup_key_sessions_route( let sessions = db .key_backups - .get_room(&sender_user, &body.version, &body.room_id)?; + .get_room(sender_user, &body.version, &body.room_id)?; Ok(get_backup_key_sessions::Response { sessions }.into()) } @@ -345,7 +345,7 @@ pub async fn get_backup_key_session_route( let key_data = db .key_backups - .get_session(&sender_user, &body.version, &body.room_id, &body.session_id)? + .get_session(sender_user, &body.version, &body.room_id, &body.session_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Backup key not found for this user's session.", @@ -368,8 +368,7 @@ pub async fn delete_backup_keys_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups - .delete_all_keys(&sender_user, &body.version)?; + db.key_backups.delete_all_keys(sender_user, &body.version)?; db.flush()?; @@ -395,7 +394,7 @@ pub async fn delete_backup_key_sessions_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups - .delete_room_keys(&sender_user, &body.version, &body.room_id)?; + .delete_room_keys(sender_user, &body.version, &body.room_id)?; db.flush()?; @@ -421,7 +420,7 @@ pub async fn delete_backup_key_session_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups - .delete_room_key(&sender_user, &body.version, &body.room_id, &body.session_id)?; + .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?; db.flush()?; diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 2eacd8f..f86b23b 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,5 +1,4 @@ -use crate::ConduitResult; -use crate::Ruma; +use crate::{ConduitResult, Ruma}; use ruma::{ api::client::r0::capabilities::{ get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, diff --git a/src/client_server/context.rs b/src/client_server/context.rs index aaae8d6..b2346f5 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -50,7 +50,7 @@ pub async fn get_context_route( let events_before = db .rooms - .pdus_until(&sender_user, &body.room_id, base_token)? + .pdus_until(sender_user, &body.room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -72,7 +72,7 @@ pub async fn get_context_route( let events_after = db .rooms - .pdus_after(&sender_user, &body.room_id, base_token)? + .pdus_after(sender_user, &body.room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 4aa3047..100b591 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -50,7 +50,7 @@ pub async fn get_device_route( let device = db .users - .get_device_metadata(&sender_user, &body.body.device_id)? + .get_device_metadata(sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; Ok(get_device::Response { device }.into()) @@ -72,13 +72,13 @@ pub async fn update_device_route( let mut device = db .users - .get_device_metadata(&sender_user, &body.device_id)? + .get_device_metadata(sender_user, &body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; device.display_name = body.display_name.clone(); db.users - .update_device_metadata(&sender_user, &body.device_id, &device)?; + .update_device_metadata(sender_user, &body.device_id, &device)?; db.flush()?; @@ -119,8 +119,8 @@ pub async fn delete_device_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_user, - &sender_device, + sender_user, + sender_device, auth, &uiaainfo, &db.users, @@ -133,13 +133,13 @@ pub async fn delete_device_route( } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; + .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - db.users.remove_device(&sender_user, &body.device_id)?; + db.users.remove_device(sender_user, &body.device_id)?; db.flush()?; @@ -182,8 +182,8 @@ pub async fn delete_devices_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_user, - &sender_device, + sender_user, + sender_device, auth, &uiaainfo, &db.users, @@ -196,14 +196,14 @@ pub async fn delete_devices_route( } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; + .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } for device_id in &body.devices { - db.users.remove_device(&sender_user, &device_id)? + db.users.remove_device(sender_user, device_id)? } db.flush()?; diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 3295e16..a74c409 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -158,8 +158,8 @@ pub async fn upload_signing_keys_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = db.uiaa.try_auth( - &sender_user, - &sender_device, + sender_user, + sender_device, auth, &uiaainfo, &db.users, @@ -172,7 +172,7 @@ pub async fn upload_signing_keys_route( } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); db.uiaa - .create(&sender_user, &sender_device, &uiaainfo, &json)?; + .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); @@ -181,7 +181,7 @@ pub async fn upload_signing_keys_route( if let Some(master_key) = &body.master_key { db.users.add_cross_signing_keys( sender_user, - &master_key, + master_key, &body.self_signing_key, &body.user_signing_key, &db.rooms, @@ -242,10 +242,10 @@ pub async fn upload_signatures_route( .to_owned(), ); db.users.sign_key( - &user_id, - &key_id, + user_id, + key_id, signature, - &sender_user, + sender_user, &db.rooms, &db.globals, )?; @@ -359,8 +359,8 @@ pub(crate) async fn get_keys_helper bool>( } else { for device_id in device_ids { let mut container = BTreeMap::new(); - if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? { - let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( + if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), device_id)? { + let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or( Error::BadRequest( ErrorKind::InvalidParam, "Tried to get keys for nonexistent device.", diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 4cec0af..0a7f4bb 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -1,5 +1,6 @@ use crate::{ - database::media::FileMeta, database::DatabaseGuard, utils, ConduitResult, Error, Ruma, + database::{media::FileMeta, DatabaseGuard}, + utils, ConduitResult, Error, Ruma, }; use ruma::api::client::{ error::ErrorKind, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 01c19c2..10c052e 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -56,7 +56,7 @@ pub async fn join_room_by_id_route( let mut servers = db .rooms - .invite_state(&sender_user, &body.room_id)? + .invite_state(sender_user, &body.room_id)? .unwrap_or_default() .iter() .filter_map(|event| { @@ -105,7 +105,7 @@ pub async fn join_room_by_id_or_alias_route( Ok(room_id) => { let mut servers = db .rooms - .invite_state(&sender_user, &room_id)? + .invite_state(sender_user, &room_id)? .unwrap_or_default() .iter() .filter_map(|event| { @@ -243,7 +243,7 @@ pub async fn kick_user_route( state_key: Some(body.user_id.to_string()), redacts: None, }, - &sender_user, + sender_user, &body.room_id, &db, &state_lock, @@ -319,7 +319,7 @@ pub async fn ban_user_route( state_key: Some(body.user_id.to_string()), redacts: None, }, - &sender_user, + sender_user, &body.room_id, &db, &state_lock, @@ -384,7 +384,7 @@ pub async fn unban_user_route( state_key: Some(body.user_id.to_string()), redacts: None, }, - &sender_user, + sender_user, &body.room_id, &db, &state_lock, @@ -416,7 +416,7 @@ pub async fn forget_room_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.forget(&body.room_id, &sender_user)?; + db.rooms.forget(&body.room_id, sender_user)?; db.flush()?; @@ -440,7 +440,7 @@ pub async fn joined_rooms_route( Ok(joined_rooms::Response { joined_rooms: db .rooms - .rooms_joined(&sender_user) + .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect(), } @@ -500,7 +500,7 @@ pub async fn joined_members_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(&sender_user, &body.room_id)? { + if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -545,7 +545,7 @@ async fn join_room_by_id_helper( let state_lock = mutex_state.lock().await; // Ask a remote server if we don't have this room - if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() { + if !db.rooms.exists(room_id)? && room_id.server_name() != db.globals.server_name() { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); @@ -606,11 +606,11 @@ async fn join_room_by_id_helper( "content".to_owned(), to_canonical_value(member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_user)?, - avatar_url: db.users.avatar_url(&sender_user)?, + displayname: db.users.displayname(sender_user)?, + avatar_url: db.users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(&sender_user)?, + blurhash: db.users.blurhash(sender_user)?, reason: None, }) .expect("event is valid, we just created it"), @@ -658,7 +658,7 @@ async fn join_room_by_id_helper( ) .await?; - db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; @@ -670,7 +670,7 @@ async fn join_room_by_id_helper( &send_join_response, &room_version, &pub_key_map, - &db, + db, ) .await?; @@ -678,7 +678,7 @@ async fn join_room_by_id_helper( .room_state .state .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db)) { let (event_id, value) = match result { Ok(t) => t, @@ -724,14 +724,14 @@ async fn join_room_by_id_helper( .into_iter() .map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals)) .collect::>>()?, - &db, + db, )?; for result in send_join_response .room_state .auth_chain .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db)) { let (event_id, value) = match result { Ok(t) => t, @@ -754,15 +754,15 @@ async fn join_room_by_id_helper( // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - db.rooms.set_room_state(&room_id, statehashid)?; + db.rooms.set_room_state(room_id, statehashid)?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_user)?, - avatar_url: db.users.avatar_url(&sender_user)?, + displayname: db.users.displayname(sender_user)?, + avatar_url: db.users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(&sender_user)?, + blurhash: db.users.blurhash(sender_user)?, reason: None, }; @@ -774,9 +774,9 @@ async fn join_room_by_id_helper( state_key: Some(sender_user.to_string()), redacts: None, }, - &sender_user, - &room_id, - &db, + sender_user, + room_id, + db, &state_lock, )?; } @@ -800,7 +800,7 @@ fn validate_and_add_event_id( })?; let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&value, &room_version) + ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -927,7 +927,7 @@ pub(crate) async fn invite_helper<'a>( let auth_events = db.rooms.get_auth_events( room_id, &kind, - &sender_user, + sender_user, Some(&state_key), &content, )?; @@ -1074,10 +1074,10 @@ pub(crate) async fn invite_helper<'a>( let pdu_id = server_server::handle_incoming_pdu( &origin, &event_id, - &room_id, + room_id, value, true, - &db, + db, &pub_key_map, ) .await @@ -1119,11 +1119,11 @@ pub(crate) async fn invite_helper<'a>( event_type: EventType::RoomMember, content: serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Invite, - displayname: db.users.displayname(&user_id)?, - avatar_url: db.users.avatar_url(&user_id)?, + displayname: db.users.displayname(user_id)?, + avatar_url: db.users.avatar_url(user_id)?, is_direct: Some(is_direct), third_party_invite: None, - blurhash: db.users.blurhash(&user_id)?, + blurhash: db.users.blurhash(user_id)?, reason: None, }) .expect("event is valid, we just created it"), @@ -1131,9 +1131,9 @@ pub(crate) async fn invite_helper<'a>( state_key: Some(user_id.to_string()), redacts: None, }, - &sender_user, + sender_user, room_id, - &db, + db, &state_lock, )?; diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 78008a5..93ead2c 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -79,7 +79,7 @@ pub async fn send_message_event_route( state_key: None, redacts: None, }, - &sender_user, + sender_user, &body.room_id, &db, &state_lock, @@ -141,7 +141,7 @@ pub async fn get_message_events_route( get_message_events::Direction::Forward => { let events_after = db .rooms - .pdus_after(&sender_user, &body.room_id, from)? + .pdus_after(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { @@ -171,7 +171,7 @@ pub async fn get_message_events_route( get_message_events::Direction::Backward => { let events_before = db .rooms - .pdus_until(&sender_user, &body.room_id, from)? + .pdus_until(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 54eb210..aaa78a9 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -19,17 +19,17 @@ pub async fn set_presence_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for room_id in db.rooms.rooms_joined(&sender_user) { + for room_id in db.rooms.rooms_joined(sender_user) { let room_id = room_id?; db.rooms.edus.update_presence( - &sender_user, + sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_user)?, + avatar_url: db.users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(&sender_user)?, + displayname: db.users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -76,7 +76,7 @@ pub async fn get_presence_route( if let Some(presence) = db .rooms .edus - .get_last_presence_event(&sender_user, &room_id)? + .get_last_presence_event(sender_user, &room_id)? { presence_event = Some(presence); break; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 5a8c7d2..ab7fb02 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -34,12 +34,12 @@ pub async fn set_displayname_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users - .set_displayname(&sender_user, body.displayname.clone())?; + .set_displayname(sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms let all_rooms_joined: Vec<_> = db .rooms - .rooms_joined(&sender_user) + .rooms_joined(sender_user) .filter_map(|r| r.ok()) .map(|room_id| { Ok::<_, Error>(( @@ -89,19 +89,19 @@ pub async fn set_displayname_route( ); let state_lock = mutex_state.lock().await; - let _ = - db.rooms - .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &state_lock); + let _ = db + .rooms + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock); // Presence update db.rooms.edus.update_presence( - &sender_user, + sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_user)?, + avatar_url: db.users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(&sender_user)?, + displayname: db.users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -177,14 +177,14 @@ pub async fn set_avatar_url_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users - .set_avatar_url(&sender_user, body.avatar_url.clone())?; + .set_avatar_url(sender_user, body.avatar_url.clone())?; - db.users.set_blurhash(&sender_user, body.blurhash.clone())?; + db.users.set_blurhash(sender_user, body.blurhash.clone())?; // Send a new membership event and presence update into all joined rooms let all_joined_rooms: Vec<_> = db .rooms - .rooms_joined(&sender_user) + .rooms_joined(sender_user) .filter_map(|r| r.ok()) .map(|room_id| { Ok::<_, Error>(( @@ -234,19 +234,19 @@ pub async fn set_avatar_url_route( ); let state_lock = mutex_state.lock().await; - let _ = - db.rooms - .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &state_lock); + let _ = db + .rooms + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock); // Presence update db.rooms.edus.update_presence( - &sender_user, + sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(&sender_user)?, + avatar_url: db.users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(&sender_user)?, + displayname: db.users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 4e4611b..98555d0 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -31,7 +31,7 @@ pub async fn get_pushrules_all_route( let event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -59,7 +59,7 @@ pub async fn get_pushrule_route( let event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -124,7 +124,7 @@ pub async fn set_pushrule_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -193,13 +193,8 @@ pub async fn set_pushrule_route( _ => {} } - db.account_data.update( - None, - &sender_user, - EventType::PushRules, - &event, - &db.globals, - )?; + db.account_data + .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; db.flush()?; @@ -229,7 +224,7 @@ pub async fn get_pushrule_actions_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -291,7 +286,7 @@ pub async fn set_pushrule_actions_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -332,13 +327,8 @@ pub async fn set_pushrule_actions_route( _ => {} }; - db.account_data.update( - None, - &sender_user, - EventType::PushRules, - &event, - &db.globals, - )?; + db.account_data + .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; db.flush()?; @@ -368,7 +358,7 @@ pub async fn get_pushrule_enabled_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -432,7 +422,7 @@ pub async fn set_pushrule_enabled_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -478,13 +468,8 @@ pub async fn set_pushrule_enabled_route( _ => {} } - db.account_data.update( - None, - &sender_user, - EventType::PushRules, - &event, - &db.globals, - )?; + db.account_data + .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; db.flush()?; @@ -514,7 +499,7 @@ pub async fn delete_pushrule_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -550,13 +535,8 @@ pub async fn delete_pushrule_route( _ => {} } - db.account_data.update( - None, - &sender_user, - EventType::PushRules, - &event, - &db.globals, - )?; + db.account_data + .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; db.flush()?; diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 10298b9..60aa4ce 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -37,7 +37,7 @@ pub async fn set_read_marker_route( }; db.account_data.update( Some(&body.room_id), - &sender_user, + sender_user, EventType::FullyRead, &fully_read_event, &db.globals, @@ -46,7 +46,7 @@ pub async fn set_read_marker_route( if let Some(event) = &body.read_receipt { db.rooms.edus.private_read_set( &body.room_id, - &sender_user, + sender_user, db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", @@ -54,7 +54,7 @@ pub async fn set_read_marker_route( &db.globals, )?; db.rooms - .reset_notification_counts(&sender_user, &body.room_id)?; + .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); user_receipts.insert( @@ -71,7 +71,7 @@ pub async fn set_read_marker_route( receipt_content.insert(event.to_owned(), receipts); db.rooms.edus.readreceipt_update( - &sender_user, + sender_user, &body.room_id, AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), @@ -102,7 +102,7 @@ pub async fn create_receipt_route( db.rooms.edus.private_read_set( &body.room_id, - &sender_user, + sender_user, db.rooms .get_pdu_count(&body.event_id)? .ok_or(Error::BadRequest( @@ -112,7 +112,7 @@ pub async fn create_receipt_route( &db.globals, )?; db.rooms - .reset_notification_counts(&sender_user, &body.room_id)?; + .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); user_receipts.insert( @@ -128,7 +128,7 @@ pub async fn create_receipt_route( receipt_content.insert(body.event_id.to_owned(), receipts); db.rooms.edus.readreceipt_update( - &sender_user, + sender_user, &body.room_id, AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 6d3e33c..4b5219b 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -46,7 +46,7 @@ pub async fn redact_event_route( state_key: None, redacts: Some(body.event_id.clone()), }, - &sender_user, + sender_user, &body.room_id, &db, &state_lock, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 4ae8a3f..5a02699 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -106,7 +106,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -118,11 +118,11 @@ pub async fn create_room_route( event_type: EventType::RoomMember, content: serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_user)?, - avatar_url: db.users.avatar_url(&sender_user)?, + displayname: db.users.displayname(sender_user)?, + avatar_url: db.users.avatar_url(sender_user)?, is_direct: Some(body.is_direct), third_party_invite: None, - blurhash: db.users.blurhash(&sender_user)?, + blurhash: db.users.blurhash(sender_user)?, reason: None, }) .expect("event is valid, we just created it"), @@ -130,7 +130,7 @@ pub async fn create_room_route( state_key: Some(sender_user.to_string()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -185,7 +185,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -207,7 +207,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -235,7 +235,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -253,7 +253,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -279,7 +279,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -298,7 +298,7 @@ pub async fn create_room_route( } db.rooms - .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &state_lock)?; + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock)?; } // 7. Events implied by name and topic @@ -312,7 +312,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -331,7 +331,7 @@ pub async fn create_room_route( state_key: Some("".to_owned()), redacts: None, }, - &sender_user, + sender_user, &room_id, &db, &state_lock, @@ -551,11 +551,11 @@ pub async fn upgrade_room_route( event_type: EventType::RoomMember, content: serde_json::to_value(member::MemberEventContent { membership: member::MembershipState::Join, - displayname: db.users.displayname(&sender_user)?, - avatar_url: db.users.avatar_url(&sender_user)?, + displayname: db.users.displayname(sender_user)?, + avatar_url: db.users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(&sender_user)?, + blurhash: db.users.blurhash(sender_user)?, reason: None, }) .expect("event is valid, we just created it"), diff --git a/src/client_server/search.rs b/src/client_server/search.rs index cbd4ed7..9ff1a1b 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -27,7 +27,7 @@ pub async fn search_events_route( let room_ids = filter.rooms.clone().unwrap_or_else(|| { db.rooms - .rooms_joined(&sender_user) + .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect() }); @@ -88,7 +88,7 @@ pub async fn search_events_route( rank: None, result: db .rooms - .get_pdu_from_id(&result)? + .get_pdu_from_id(result)? .map(|pdu| pdu.to_room_event()), }) }) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 9472627..b42689d 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -100,8 +100,8 @@ pub async fn login_route( login::IncomingLoginInfo::Token { token } => { if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { let token = jsonwebtoken::decode::( - &token, - &jwt_decoding_key, + token, + jwt_decoding_key, &jsonwebtoken::Validation::default(), ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?; @@ -179,7 +179,7 @@ pub async fn logout_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - db.users.remove_device(&sender_user, sender_device)?; + db.users.remove_device(sender_user, sender_device)?; db.flush()?; @@ -209,7 +209,7 @@ pub async fn logout_all_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for device_id in db.users.all_device_ids(sender_user).flatten() { - db.users.remove_device(&sender_user, &device_id)?; + db.users.remove_device(sender_user, &device_id)?; } db.flush()?; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 3555353..24cc2a1 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -308,9 +308,9 @@ async fn send_state_event_for_key_helper( state_key: Some(state_key), redacts: None, }, - &sender_user, - &room_id, - &db, + sender_user, + room_id, + db, &state_lock, )?; diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index bf2caef..177b123 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -68,8 +68,8 @@ pub async fn send_event_to_device_route( match target_device_id_maybe { DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event( sender_user, - &target_user_id, - &target_device_id, + target_user_id, + target_device_id, &body.event_type, event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") @@ -78,10 +78,10 @@ pub async fn send_event_to_device_route( )?, DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(&target_user_id) { + for target_device_id in db.users.all_device_ids(target_user_id) { db.users.add_to_device_event( sender_user, - &target_user_id, + target_user_id, &target_device_id?, &body.event_type, event.deserialize_as().map_err(|_| { diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 4cf4bb1..15e74b3 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -21,7 +21,7 @@ pub fn create_typing_event_route( if let Typing::Yes(duration) = body.state { db.rooms.edus.typing_add( - &sender_user, + sender_user, &body.room_id, duration.as_millis() as u64 + utils::millis_since_unix_epoch(), &db.globals, @@ -29,7 +29,7 @@ pub fn create_typing_event_route( } else { db.rooms .edus - .typing_remove(&sender_user, &body.room_id, &db.globals)?; + .typing_remove(sender_user, &body.room_id, &db.globals)?; } Ok(create_typing_event::Response {}.into()) diff --git a/src/database.rs b/src/database.rs index dcba2ab..110d4d0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -196,14 +196,14 @@ impl Database { /// Load an existing database or create a new one. pub async fn load_or_create(config: &Config) -> Result>> { - Self::check_sled_or_sqlite_db(&config)?; + Self::check_sled_or_sqlite_db(config)?; if !Path::new(&config.database_path).exists() { std::fs::create_dir_all(&config.database_path) .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; } - let builder = Engine::open(&config)?; + let builder = Engine::open(config)?; if config.max_request_size < 1024 { eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); @@ -618,7 +618,7 @@ impl Database { let short_room_id = db .rooms .roomid_shortroomid - .get(&room_id) + .get(room_id) .unwrap() .expect("shortroomid should exist"); @@ -641,7 +641,7 @@ impl Database { let short_room_id = db .rooms .roomid_shortroomid - .get(&room_id) + .get(room_id) .unwrap() .expect("shortroomid should exist"); @@ -677,7 +677,7 @@ impl Database { let short_room_id = db .rooms .roomid_shortroomid - .get(&room_id) + .get(room_id) .unwrap() .expect("shortroomid should exist"); let mut new_key = short_room_id; @@ -757,7 +757,7 @@ impl Database { #[cfg(feature = "sqlite")] { - Self::start_wal_clean_task(Arc::clone(&db), &config).await; + Self::start_wal_clean_task(Arc::clone(&db), config).await; } Ok(db) @@ -964,7 +964,7 @@ impl<'r> FromRequest<'r> for DatabaseGuard { async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome { let db = try_outcome!(req.guard::<&State>>>().await); - Ok(DatabaseGuard(Arc::clone(&db).read_owned().await)).or_forward(()) + Ok(DatabaseGuard(Arc::clone(db).read_owned().await)).or_forward(()) } } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 06e371e..d1b7b5d 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -192,7 +192,7 @@ impl SqliteTable { impl Tree for SqliteTable { #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { - self.get_with_guard(&self.engine.read_lock(), key) + self.get_with_guard(self.engine.read_lock(), key) } #[tracing::instrument(skip(self, key, value))] @@ -275,7 +275,7 @@ impl Tree for SqliteTable { fn iter<'a>(&'a self) -> Box + 'a> { let guard = self.engine.read_lock_iterator(); - self.iter_with_guard(&guard) + self.iter_with_guard(guard) } #[tracing::instrument(skip(self, from, backwards))] diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 1a8ad76..456283b 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -32,13 +32,13 @@ impl AccountData { .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id.as_bytes()); + prefix.extend_from_slice(user_id.as_bytes()); prefix.push(0xff); let mut roomuserdataid = prefix.clone(); roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes()); roomuserdataid.push(0xff); - roomuserdataid.extend_from_slice(&event_type.as_bytes()); + roomuserdataid.extend_from_slice(event_type.as_bytes()); let mut key = prefix; key.extend_from_slice(event_type.as_bytes()); @@ -83,7 +83,7 @@ impl AccountData { .as_bytes() .to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.as_bytes()); + key.extend_from_slice(user_id.as_bytes()); key.push(0xff); key.extend_from_slice(kind.as_ref().as_bytes()); @@ -118,7 +118,7 @@ impl AccountData { .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id.as_bytes()); + prefix.extend_from_slice(user_id.as_bytes()); prefix.push(0xff); // Skip the data that's exactly at since, because we sent that last time diff --git a/src/database/globals.rs b/src/database/globals.rs index 048b9b8..2f1b45a 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -113,7 +113,7 @@ impl Globals { .map(|key| (version, key)) }) .and_then(|(version, key)| { - ruma::signatures::Ed25519KeyPair::from_der(&key, version) + ruma::signatures::Ed25519KeyPair::from_der(key, version) .map_err(|_| Error::bad_database("Private or public keys are invalid.")) }); diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 3315be3..27d8030 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -27,7 +27,7 @@ impl KeyBackups { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&version.as_bytes()); + key.extend_from_slice(version.as_bytes()); self.backupid_algorithm.insert( &key, @@ -41,7 +41,7 @@ impl KeyBackups { pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&version.as_bytes()); + key.extend_from_slice(version.as_bytes()); self.backupid_algorithm.remove(&key)?; self.backupid_etag.remove(&key)?; @@ -64,7 +64,7 @@ impl KeyBackups { ) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&version.as_bytes()); + key.extend_from_slice(version.as_bytes()); if self.backupid_algorithm.get(&key)?.is_none() { return Err(Error::BadRequest( @@ -75,7 +75,7 @@ impl KeyBackups { self.backupid_algorithm.insert( &key, - &serde_json::to_string(backup_metadata) + serde_json::to_string(backup_metadata) .expect("BackupAlgorithm::to_string always works") .as_bytes(), )?; @@ -192,7 +192,7 @@ impl KeyBackups { pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&version.as_bytes()); + key.extend_from_slice(version.as_bytes()); Ok(utils::u64_from_bytes( &self @@ -223,7 +223,7 @@ impl KeyBackups { let mut parts = key.rsplit(|&b| b == 0xff); let session_id = - utils::string_from_bytes(&parts.next().ok_or_else(|| { + utils::string_from_bytes(parts.next().ok_or_else(|| { Error::bad_database("backupkeyid_backup key is invalid.") })?) .map_err(|_| { @@ -231,7 +231,7 @@ impl KeyBackups { })?; let room_id = RoomId::try_from( - utils::string_from_bytes(&parts.next().ok_or_else(|| { + utils::string_from_bytes(parts.next().ok_or_else(|| { Error::bad_database("backupkeyid_backup key is invalid.") })?) .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, @@ -280,7 +280,7 @@ impl KeyBackups { let mut parts = key.rsplit(|&b| b == 0xff); let session_id = - utils::string_from_bytes(&parts.next().ok_or_else(|| { + utils::string_from_bytes(parts.next().ok_or_else(|| { Error::bad_database("backupkeyid_backup key is invalid.") })?) .map_err(|_| { @@ -325,7 +325,7 @@ impl KeyBackups { pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&version.as_bytes()); + key.extend_from_slice(version.as_bytes()); key.push(0xff); for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { @@ -343,9 +343,9 @@ impl KeyBackups { ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&version.as_bytes()); + key.extend_from_slice(version.as_bytes()); key.push(0xff); - key.extend_from_slice(&room_id.as_bytes()); + key.extend_from_slice(room_id.as_bytes()); key.push(0xff); for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { @@ -364,11 +364,11 @@ impl KeyBackups { ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&version.as_bytes()); + key.extend_from_slice(version.as_bytes()); key.push(0xff); - key.extend_from_slice(&room_id.as_bytes()); + key.extend_from_slice(room_id.as_bytes()); key.push(0xff); - key.extend_from_slice(&session_id.as_bytes()); + key.extend_from_slice(session_id.as_bytes()); for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { self.backupkeyid_backup.remove(&outdated_key)?; diff --git a/src/database/media.rs b/src/database/media.rs index a9bb42b..4663013 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -4,7 +4,10 @@ use image::{imageops::FilterType, GenericImageView}; use super::abstraction::Tree; use crate::{utils, Error, Result}; use std::{mem, sync::Arc}; -use tokio::{fs::File, io::AsyncReadExt, io::AsyncWriteExt}; +use tokio::{ + fs::File, + io::{AsyncReadExt, AsyncWriteExt}, +}; pub struct FileMeta { pub content_disposition: Option, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index da4a6e7..b19f339 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -236,7 +236,7 @@ pub fn get_actions<'a>( member_count: 10_u32.into(), // TODO: get member count efficiently user_display_name: db .users - .displayname(&user)? + .displayname(user)? .unwrap_or_else(|| user.localpart().to_owned()), users_power_levels: power_levels.users.clone(), default_power_level: power_levels.users_default, @@ -302,7 +302,7 @@ async fn send_notice( if event_id_only { send_request( &db.globals, - &url, + url, send_event_notification::v1::Request::new(notifi), ) .await?; @@ -332,7 +332,7 @@ async fn send_notice( send_request( &db.globals, - &url, + url, send_event_notification::v1::Request::new(notifi), ) .await?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 51023ba..b272a5c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -252,7 +252,7 @@ impl Rooms { return Ok(HashMap::new()); }; - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, &content); + let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content); let mut sauthevents = auth_events .into_iter() @@ -339,7 +339,7 @@ impl Rooms { new_state_ids_compressed: HashSet, db: &Database, ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(&room_id)?; + let previous_shortstatehash = self.current_shortstatehash(room_id)?; let state_hash = self.calculate_hash( &new_state_ids_compressed @@ -424,7 +424,7 @@ impl Rooms { } } - self.update_joined_count(room_id, &db)?; + self.update_joined_count(room_id, db)?; self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; @@ -704,7 +704,7 @@ impl Rooms { event_id: &EventId, globals: &super::globals::Globals, ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(&event_id) { + if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { return Ok(*short); } @@ -732,7 +732,7 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { self.roomid_shortroomid - .get(&room_id.as_bytes())? + .get(room_id.as_bytes())? .map(|bytes| { utils::u64_from_bytes(&bytes) .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) @@ -757,7 +757,7 @@ impl Rooms { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); - statekey.extend_from_slice(&state_key.as_bytes()); + statekey.extend_from_slice(state_key.as_bytes()); let short = self .statekey_shortstatekey @@ -784,13 +784,13 @@ impl Rooms { room_id: &RoomId, globals: &super::globals::Globals, ) -> Result { - Ok(match self.roomid_shortroomid.get(&room_id.as_bytes())? { + Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { Some(short) => utils::u64_from_bytes(&short) .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, None => { let short = globals.next_count()?; self.roomid_shortroomid - .insert(&room_id.as_bytes(), &short.to_be_bytes())?; + .insert(room_id.as_bytes(), &short.to_be_bytes())?; short } }) @@ -814,7 +814,7 @@ impl Rooms { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); - statekey.extend_from_slice(&state_key.as_bytes()); + statekey.extend_from_slice(state_key.as_bytes()); let short = match self.statekey_shortstatekey.get(&statekey)? { Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) @@ -891,12 +891,12 @@ impl Rooms { .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; let event_type = - EventType::try_from(utils::string_from_bytes(&eventtype_bytes).map_err(|_| { + EventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") })?) .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - let state_key = utils::string_from_bytes(&statekey_bytes).map_err(|_| { + let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") })?; @@ -956,10 +956,8 @@ impl Rooms { /// Returns the `count` of this pdu's id. #[tracing::instrument(skip(self))] pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - Ok( - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?, - ) + utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) } /// Returns the `count` of this pdu's id. @@ -1076,7 +1074,7 @@ impl Rooms { /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. #[tracing::instrument(skip(self))] pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(&event_id) { + if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { return Ok(Some(Arc::clone(p))); } @@ -1138,9 +1136,9 @@ impl Rooms { /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(&pdu_id)?.is_some() { + if self.pduid_pdu.get(pdu_id)?.is_some() { self.pduid_pdu.insert( - &pdu_id, + pdu_id, &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), )?; Ok(()) @@ -1225,20 +1223,20 @@ impl Rooms { #[tracing::instrument(skip(self, pdu))] pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { self.eventid_outlierpdu.insert( - &event_id.as_bytes(), + event_id.as_bytes(), &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), ) } #[tracing::instrument(skip(self))] pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(&event_id.as_bytes(), &[]) + self.softfailedeventids.insert(event_id.as_bytes(), &[]) } #[tracing::instrument(skip(self))] pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { self.softfailedeventids - .get(&event_id.as_bytes()) + .get(event_id.as_bytes()) .map(|o| o.is_some()) } @@ -1268,7 +1266,7 @@ impl Rooms { { if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind, &state_key) + .state_get(shortstatehash, &pdu.kind, state_key) .unwrap() { unsigned.insert( @@ -1350,15 +1348,15 @@ impl Rooms { let rules_for_user = db .account_data - .get::(None, &user, EventType::PushRules)? + .get::(None, user, EventType::PushRules)? .map(|ev| ev.content.global) - .unwrap_or_else(|| push::Ruleset::server_default(&user)); + .unwrap_or_else(|| push::Ruleset::server_default(user)); let mut highlight = false; let mut notify = false; for action in pusher::get_actions( - &user, + user, &rules_for_user, &power_levels, &sync_pdu, @@ -1388,7 +1386,7 @@ impl Rooms { highlights.push(userroom_id); } - for senderkey in db.pusher.get_pusher_senderkeys(&user) { + for senderkey in db.pusher.get_pusher_senderkeys(user) { db.sending.send_push_pdu(&*pdu_id, senderkey)?; } } @@ -1401,7 +1399,7 @@ impl Rooms { match pdu.kind { EventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(&redact_id, &pdu)?; + self.redact_pdu(redact_id, pdu)?; } } EventType::RoomMember => { @@ -1741,9 +1739,9 @@ impl Rooms { state_ids_compressed: HashSet, globals: &super::globals::Globals, ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(&event_id, globals)?; + let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - let previous_shortstatehash = self.current_shortstatehash(&room_id)?; + let previous_shortstatehash = self.current_shortstatehash(room_id)?; let state_hash = self.calculate_hash( &state_ids_compressed @@ -1815,7 +1813,7 @@ impl Rooms { .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; let shortstatekey = - self.get_or_create_shortstatekey(&new_pdu.kind, &state_key, globals)?; + self.get_or_create_shortstatekey(&new_pdu.kind, state_key, globals)?; let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; @@ -1840,7 +1838,7 @@ impl Rooms { let mut statediffremoved = HashSet::new(); if let Some(replaces) = replaces { - statediffremoved.insert(replaces.clone()); + statediffremoved.insert(*replaces); } self.save_state_from_diff( @@ -1953,12 +1951,12 @@ impl Rooms { } = pdu_builder; let prev_events = self - .get_pdu_leaves(&room_id)? + .get_pdu_leaves(room_id)? .into_iter() .take(20) .collect::>(); - let create_event = self.room_state_get(&room_id, &EventType::RoomCreate, "")?; + let create_event = self.room_state_get(room_id, &EventType::RoomCreate, "")?; let create_event_content = create_event .as_ref() @@ -1988,13 +1986,8 @@ impl Rooms { }); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - let auth_events = self.get_auth_events( - &room_id, - &event_type, - &sender, - state_key.as_deref(), - &content, - )?; + let auth_events = + self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; // Our depth is the maximum depth of prev_events + 1 let depth = prev_events @@ -2006,7 +1999,7 @@ impl Rooms { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some(prev_pdu) = self.room_state_get(&room_id, &event_type, &state_key)? { + if let Some(prev_pdu) = self.room_state_get(room_id, &event_type, state_key)? { unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), @@ -2109,7 +2102,7 @@ impl Rooms { // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - self.set_room_state(&room_id, statehashid)?; + self.set_room_state(room_id, statehashid)?; for server in self .room_servers(room_id) @@ -2154,10 +2147,10 @@ impl Rooms { && pdu .state_key .as_ref() - .map_or(false, |state_key| users.is_match(&state_key)) + .map_or(false, |state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: &Regex| { - self.room_aliases(&room_id) + self.room_aliases(room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; @@ -2300,7 +2293,7 @@ impl Rooms { let mut pdu = self .get_pdu_from_id(&pdu_id)? .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(&reason)?; + pdu.redact(reason)?; self.replace_pdu(&pdu_id, &pdu)?; Ok(()) } else { @@ -2348,13 +2341,13 @@ impl Rooms { match &membership { member::MembershipState::Join => { // Check if the user never joined this room - if !self.once_joined(&user_id, &room_id)? { + if !self.once_joined(user_id, room_id)? { // Add the user ID to the join list then self.roomuseroncejoinedids.insert(&userroom_id, &[])?; // Check if the room has a predecessor if let Some(predecessor) = self - .room_state_get(&room_id, &EventType::RoomCreate, "")? + .room_state_get(room_id, &EventType::RoomCreate, "")? .and_then(|create| { serde_json::from_value::< Raw, @@ -2455,12 +2448,12 @@ impl Rooms { let is_ignored = db .account_data .get::( - None, // Ignored users are in global account data - &user_id, // Receiver + None, // Ignored users are in global account data + user_id, // Receiver EventType::IgnoredUserList, )? .map_or(false, |ignored| { - ignored.content.ignored_users.contains(&sender) + ignored.content.ignored_users.contains(sender) }); if is_ignored { @@ -2522,7 +2515,7 @@ impl Rooms { let mut joined_servers = HashSet::new(); let mut real_users = HashSet::new(); - for joined in self.room_members(&room_id).filter_map(|r| r.ok()) { + for joined in self.room_members(room_id).filter_map(|r| r.ok()) { joined_servers.insert(joined.server_name().to_owned()); if joined.server_name() == db.globals.server_name() && !db.users.is_deactivated(&joined).unwrap_or(true) @@ -2532,7 +2525,7 @@ impl Rooms { joinedcount += 1; } - for invited in self.room_members_invited(&room_id).filter_map(|r| r.ok()) { + for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { joined_servers.insert(invited.server_name().to_owned()); invitedcount += 1; } @@ -2601,7 +2594,7 @@ impl Rooms { if let Some(users) = maybe { Ok(users) } else { - self.update_joined_count(room_id, &db)?; + self.update_joined_count(room_id, db)?; Ok(Arc::clone( self.our_real_users_cache .read() @@ -2650,7 +2643,7 @@ impl Rooms { let in_room = bridge_user_id .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(&room_id).any(|userid| { + || self.room_members(room_id).any(|userid| { userid.map_or(false, |userid| { users.iter().any(|r| r.is_match(userid.as_str())) }) @@ -2890,21 +2883,21 @@ impl Rooms { if let Some(room_id) = room_id { // New alias self.alias_roomid - .insert(&alias.alias().as_bytes(), room_id.as_bytes())?; + .insert(alias.alias().as_bytes(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); aliasid.push(0xff); aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; } else { // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(&alias.alias().as_bytes())? { + if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { let mut prefix = room_id.to_vec(); prefix.push(0xff); for (key, _) in self.aliasid_alias.scan_prefix(prefix) { self.aliasid_alias.remove(&key)?; } - self.alias_roomid.remove(&alias.alias().as_bytes())?; + self.alias_roomid.remove(alias.alias().as_bytes())?; } else { return Err(Error::BadRequest( ErrorKind::NotFound, @@ -3077,7 +3070,7 @@ impl Rooms { self.roomserverids.scan_prefix(prefix).map(|(key, _)| { Box::::try_from( utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -3110,7 +3103,7 @@ impl Rooms { self.serverroomids.scan_prefix(prefix).map(|(key, _)| { RoomId::try_from( utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -3132,7 +3125,7 @@ impl Rooms { self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { UserId::try_from( utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -3146,26 +3139,24 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - Ok(self - .roomid_joinedcount + self.roomid_joinedcount .get(room_id.as_bytes())? .map(|b| { utils::u64_from_bytes(&b) .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) }) - .transpose()?) + .transpose() } #[tracing::instrument(skip(self))] pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - Ok(self - .roomid_invitedcount + self.roomid_invitedcount .get(room_id.as_bytes())? .map(|b| { utils::u64_from_bytes(&b) .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) }) - .transpose()?) + .transpose() } /// Returns an iterator over all User IDs who ever joined a room. @@ -3182,7 +3173,7 @@ impl Rooms { .map(|(key, _)| { UserId::try_from( utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -3208,7 +3199,7 @@ impl Rooms { .map(|(key, _)| { UserId::try_from( utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -3261,7 +3252,7 @@ impl Rooms { .map(|(key, _)| { RoomId::try_from( utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -3287,7 +3278,7 @@ impl Rooms { .map(|(key, state)| { let room_id = RoomId::try_from( utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) @@ -3312,7 +3303,7 @@ impl Rooms { ) -> Result>>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&room_id.as_bytes()); + key.extend_from_slice(room_id.as_bytes()); self.userroomid_invitestate .get(&key)? @@ -3333,7 +3324,7 @@ impl Rooms { ) -> Result>>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&room_id.as_bytes()); + key.extend_from_slice(room_id.as_bytes()); self.userroomid_leftstate .get(&key)? @@ -3360,7 +3351,7 @@ impl Rooms { .map(|(key, state)| { let room_id = RoomId::try_from( utils::string_from_bytes( - &key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 14146fb..e0639ff 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -60,7 +60,7 @@ impl RoomEdus { let mut room_latest_id = prefix; room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); room_latest_id.push(0xff); - room_latest_id.extend_from_slice(&user_id.as_bytes()); + room_latest_id.extend_from_slice(user_id.as_bytes()); self.readreceiptid_readreceipt.insert( &room_latest_id, @@ -126,7 +126,7 @@ impl RoomEdus { ) -> Result<()> { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.as_bytes()); + key.extend_from_slice(user_id.as_bytes()); self.roomuserid_privateread .insert(&key, &count.to_be_bytes())?; @@ -142,7 +142,7 @@ impl RoomEdus { pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.as_bytes()); + key.extend_from_slice(user_id.as_bytes()); self.roomuserid_privateread .get(&key)? @@ -157,7 +157,7 @@ impl RoomEdus { pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.as_bytes()); + key.extend_from_slice(user_id.as_bytes()); Ok(self .roomuserid_lastprivatereadupdate @@ -193,7 +193,7 @@ impl RoomEdus { .insert(&room_typing_id, &*user_id.as_bytes())?; self.roomid_lasttypingupdate - .insert(&room_id.as_bytes(), &count)?; + .insert(room_id.as_bytes(), &count)?; Ok(()) } @@ -224,7 +224,7 @@ impl RoomEdus { if found_outdated { self.roomid_lasttypingupdate - .insert(&room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; } Ok(()) @@ -268,7 +268,7 @@ impl RoomEdus { if found_outdated { self.roomid_lasttypingupdate - .insert(&room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; } Ok(()) @@ -285,7 +285,7 @@ impl RoomEdus { Ok(self .roomid_lasttypingupdate - .get(&room_id.as_bytes())? + .get(room_id.as_bytes())? .map_or(Ok::<_, Error>(None), |bytes| { Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") @@ -342,7 +342,7 @@ impl RoomEdus { presence_id.push(0xff); presence_id.extend_from_slice(&count); presence_id.push(0xff); - presence_id.extend_from_slice(&presence.sender.as_bytes()); + presence_id.extend_from_slice(presence.sender.as_bytes()); self.presenceid_presence.insert( &presence_id, @@ -361,7 +361,7 @@ impl RoomEdus { #[tracing::instrument(skip(self))] pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_lastpresenceupdate.insert( - &user_id.as_bytes(), + user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), )?; @@ -371,7 +371,7 @@ impl RoomEdus { /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. pub fn last_presence_update(&self, user_id: &UserId) -> Result> { self.userid_lastpresenceupdate - .get(&user_id.as_bytes())? + .get(user_id.as_bytes())? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") @@ -394,7 +394,7 @@ impl RoomEdus { presence_id.push(0xff); presence_id.extend_from_slice(&last_update.to_be_bytes()); presence_id.push(0xff); - presence_id.extend_from_slice(&user_id.as_bytes()); + presence_id.extend_from_slice(user_id.as_bytes()); self.presenceid_presence .get(&presence_id)? @@ -480,7 +480,7 @@ impl RoomEdus { } self.userid_lastpresenceupdate.insert( - &user_id.as_bytes(), + user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), )?; } diff --git a/src/database/sending.rs b/src/database/sending.rs index 1050c07..c14f581 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -58,9 +58,9 @@ impl OutgoingKind { } OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(&user); + p.extend_from_slice(user); p.push(0xff); - p.extend_from_slice(&pushkey); + p.extend_from_slice(pushkey); p } OutgoingKind::Normal(server) => { @@ -179,8 +179,8 @@ impl Sending { // Insert pdus we found for (e, key) in &new_events { let value = if let SendingEventType::Edu(value) = &e.1 { &**value } else { &[] }; - guard.sending.servercurrentevent_data.insert(&key, value).unwrap(); - guard.sending.servernameevent_data.remove(&key).unwrap(); + guard.sending.servercurrentevent_data.insert(key, value).unwrap(); + guard.sending.servernameevent_data.remove(key).unwrap(); } drop(guard); @@ -345,7 +345,7 @@ impl Sending { } let event = - serde_json::from_str::(&read_receipt.json().get()) + serde_json::from_str::(read_receipt.json().get()) .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; let federation_event = match event { AnySyncEphemeralRoomEvent::Receipt(r) => { @@ -486,7 +486,7 @@ impl Sending { match event { SendingEventType::Pdu(pdu_id) => { pdu_jsons.push(db.rooms - .get_pdu_from_id(&pdu_id) + .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { ( @@ -543,7 +543,7 @@ impl Sending { SendingEventType::Pdu(pdu_id) => { pdus.push( db.rooms - .get_pdu_from_id(&pdu_id) + .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { ( @@ -636,7 +636,7 @@ impl Sending { // TODO: check room version and remove event_id if needed let raw = PduEvent::convert_to_outgoing_federation_event( db.rooms - .get_pdu_json_from_id(&pdu_id) + .get_pdu_json_from_id(pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { ( @@ -711,7 +711,7 @@ impl Sending { let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(&server).map_err(|_| { + let server = utils::string_from_bytes(server).map_err(|_| { Error::bad_database("Invalid server bytes in server_currenttransaction") })?; @@ -750,7 +750,7 @@ impl Sending { let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(&server).map_err(|_| { + let server = utils::string_from_bytes(server).map_err(|_| { Error::bad_database("Invalid server bytes in server_currenttransaction") })?; diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 8a3fe4f..60b9bd3 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -54,7 +54,7 @@ impl Uiaa { ) -> Result<(bool, UiaaInfo)> { let mut uiaainfo = auth .session() - .map(|session| self.get_uiaa_session(&user_id, &device_id, session)) + .map(|session| self.get_uiaa_session(user_id, device_id, session)) .unwrap_or_else(|| Ok(uiaainfo.clone()))?; if uiaainfo.session.is_none() { diff --git a/src/database/users.rs b/src/database/users.rs index 88d66be..63ed071 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -81,13 +81,13 @@ impl Users { })?; Ok(Some(( - UserId::try_from(utils::string_from_bytes(&user_bytes).map_err(|_| { + UserId::try_from(utils::string_from_bytes(user_bytes).map_err(|_| { Error::bad_database("User ID in token_userdeviceid is invalid unicode.") })?) .map_err(|_| { Error::bad_database("User ID in token_userdeviceid is invalid.") })?, - utils::string_from_bytes(&device_bytes).map_err(|_| { + utils::string_from_bytes(device_bytes).map_err(|_| { Error::bad_database("Device ID in token_userdeviceid is invalid.") })?, ))) @@ -121,7 +121,7 @@ impl Users { #[tracing::instrument(skip(self, user_id, password))] pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { - if let Ok(hash) = utils::calculate_hash(&password) { + if let Ok(hash) = utils::calculate_hash(password) { self.userid_password .insert(user_id.as_bytes(), hash.as_bytes())?; Ok(()) @@ -245,7 +245,7 @@ impl Users { .expect("Device::to_string never fails."), )?; - self.set_token(user_id, &device_id, token)?; + self.set_token(user_id, device_id, token)?; Ok(()) } @@ -294,7 +294,7 @@ impl Users { .scan_prefix(prefix) .map(|(bytes, _)| { Ok(utils::string_from_bytes( - &bytes + bytes .rsplit(|&b| b == 0xff) .next() .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, @@ -357,7 +357,7 @@ impl Users { // TODO: Use DeviceKeyId::to_string when it's available (and update everything, // because there are no wrapping quotation marks anymore) key.extend_from_slice( - &serde_json::to_string(one_time_key_key) + serde_json::to_string(one_time_key_key) .expect("DeviceKeyId::to_string always works") .as_bytes(), ); @@ -368,7 +368,7 @@ impl Users { )?; self.userid_lastonetimekeyupdate - .insert(&user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; Ok(()) } @@ -376,7 +376,7 @@ impl Users { #[tracing::instrument(skip(self, user_id))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate - .get(&user_id.as_bytes())? + .get(user_id.as_bytes())? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") @@ -402,7 +402,7 @@ impl Users { prefix.push(b':'); self.userid_lastonetimekeyupdate - .insert(&user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; self.onetimekeyid_onetimekeys .scan_prefix(prefix) @@ -680,7 +680,7 @@ impl Users { globals: &super::globals::Globals, ) -> Result<()> { let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { + for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { // Don't send key updates to unencrypted rooms if rooms .room_state_get(&room_id, &EventType::RoomEncryption, "")? @@ -961,7 +961,7 @@ impl Users { pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { // Remove all associated devices for device_id in self.all_device_ids(user_id) { - self.remove_device(&user_id, &device_id?)?; + self.remove_device(user_id, &device_id?)?; } // Set the password to "" to indicate a deactivated account. Hashes will never result in an diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index fa28379..4629de9 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -66,7 +66,7 @@ where let limit = db.globals.max_request_size(); let mut handle = data.open(ByteUnit::Byte(limit.into())); let mut body = Vec::new(); - if let Err(_) = handle.read_to_end(&mut body).await { + if handle.read_to_end(&mut body).await.is_err() { // Client disconnected // Missing Token return Failure((Status::new(582), ())); @@ -123,7 +123,7 @@ where match metadata.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { if let Some(token) = token { - match db.users.find_from_token(&token).unwrap() { + match db.users.find_from_token(token).unwrap() { // Unknown Token None => return Failure((Status::new(581), ())), Some((user_id, device_id)) => ( diff --git a/src/server_server.rs b/src/server_server.rs index 89b9c5c..1d9ba61 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -150,7 +150,7 @@ where } else { write_destination_to_cache = true; - let result = find_actual_destination(globals, &destination).await; + let result = find_actual_destination(globals, destination).await; (result.0, result.1.into_uri_string()) }; @@ -359,7 +359,7 @@ async fn find_actual_destination( let (host, port) = destination_str.split_at(pos); FedDest::Named(host.to_string(), port.to_string()) } else { - match request_well_known(globals, &destination.as_str()).await { + match request_well_known(globals, destination.as_str()).await { // 3: A .well-known file is available Some(delegated_hostname) => { hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); @@ -806,7 +806,7 @@ pub async fn send_transaction_message_route( .event_ids .iter() .filter_map(|id| { - db.rooms.get_pdu_count(&id).ok().flatten().map(|r| (id, r)) + db.rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) }) .max_by_key(|(_, count)| *count) { @@ -875,8 +875,8 @@ pub async fn send_transaction_message_route( DeviceIdOrAllDevices::DeviceId(target_device_id) => { db.users.add_to_device_event( &sender, - &target_user_id, - &target_device_id, + target_user_id, + target_device_id, &ev_type.to_string(), event.deserialize_as().map_err(|_| { Error::BadRequest( @@ -889,10 +889,10 @@ pub async fn send_transaction_message_route( } DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(&target_user_id) { + for target_device_id in db.users.all_device_ids(target_user_id) { db.users.add_to_device_event( &sender, - &target_user_id, + target_user_id, &target_device_id?, &ev_type.to_string(), event.deserialize_as().map_err(|_| { @@ -959,7 +959,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( db: &'a Database, pub_key_map: &'a RwLock>>, ) -> StdResult>, String> { - match db.rooms.exists(&room_id) { + match db.rooms.exists(room_id) { Ok(true) => {} _ => { return Err("Room is unknown to this server.".to_string()); @@ -967,19 +967,19 @@ pub(crate) async fn handle_incoming_pdu<'a>( } // 1. Skip the PDU if we already have it as a timeline event - if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) { + if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { return Ok(Some(pdu_id.to_vec())); } let create_event = db .rooms - .room_state_get(&room_id, &EventType::RoomCreate, "") + .room_state_get(room_id, &EventType::RoomCreate, "") .map_err(|_| "Failed to ask database for event.".to_owned())? .ok_or_else(|| "Failed to find create event in db.".to_owned())?; let first_pdu_in_room = db .rooms - .first_pdu_in_room(&room_id) + .first_pdu_in_room(room_id) .map_err(|_| "Error loading first room event.".to_owned())? .expect("Room exists"); @@ -1021,7 +1021,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( origin, &[prev_event_id.clone()], &create_event, - &room_id, + room_id, pub_key_map, ) .await @@ -1049,12 +1049,12 @@ pub(crate) async fn handle_incoming_pdu<'a>( (*prev_event_id).clone(), pdu.prev_events.iter().cloned().collect(), ); - eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { // Time based check failed graph.insert((*prev_event_id).clone(), HashSet::new()); - eventid_info.insert(prev_event_id.clone(), (pdu, json)); } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { // Get json failed graph.insert((*prev_event_id).clone(), HashSet::new()); @@ -1146,7 +1146,7 @@ fn handle_outlier_pdu<'a>( // We go through all the signatures we see on the value and fetch the corresponding signing // keys - fetch_required_signing_keys(&value, &pub_key_map, db) + fetch_required_signing_keys(&value, pub_key_map, db) .await .map_err(|e| e.to_string())?; @@ -1210,8 +1210,8 @@ fn handle_outlier_pdu<'a>( .cloned() .map(Arc::new) .collect::>(), - &create_event, - &room_id, + create_event, + room_id, pub_key_map, ) .await; @@ -1256,7 +1256,7 @@ fn handle_outlier_pdu<'a>( if auth_events .get(&(EventType::RoomCreate, "".to_owned())) .map(|a| a.as_ref()) - != Some(&create_event) + != Some(create_event) { return Err("Incoming event refers to wrong create event.".to_owned()); } @@ -1273,8 +1273,6 @@ fn handle_outlier_pdu<'a>( None }; - let incoming_pdu = Arc::new(incoming_pdu.clone()); - if !state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -1295,7 +1293,7 @@ fn handle_outlier_pdu<'a>( .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; debug!("Added pdu as outlier."); - Ok((incoming_pdu, val)) + Ok((Arc::new(incoming_pdu), val)) }) } @@ -1427,7 +1425,7 @@ async fn upgrade_outlier_to_timeline_pdu( } auth_chain_sets.push( - get_auth_chain(&room_id, starting_events, db) + get_auth_chain(room_id, starting_events, db) .map_err(|_| "Failed to load auth chain.".to_owned())? .map(|event_id| (*event_id).clone()) .collect(), @@ -1478,7 +1476,7 @@ async fn upgrade_outlier_to_timeline_pdu( &db.globals, origin, get_room_state_ids::v1::Request { - room_id: &room_id, + room_id, event_id: &incoming_pdu.event_id, }, ) @@ -1487,15 +1485,15 @@ async fn upgrade_outlier_to_timeline_pdu( Ok(res) => { warn!("Fetching state events at event."); let state_vec = fetch_and_handle_outliers( - &db, + db, origin, &res.pdu_ids .iter() .cloned() .map(Arc::new) .collect::>(), - &create_event, - &room_id, + create_event, + room_id, pub_key_map, ) .await; @@ -1568,11 +1566,11 @@ async fn upgrade_outlier_to_timeline_pdu( None::, // TODO: third party invite |k, s| { db.rooms - .get_shortstatekey(&k, &s) + .get_shortstatekey(k, s) .ok() .flatten() .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| db.rooms.get_pdu(&event_id).ok().flatten()) + .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) }, ) .map_err(|_e| "Auth check failed.".to_owned())?; @@ -1598,7 +1596,7 @@ async fn upgrade_outlier_to_timeline_pdu( // applied. We start with the previous extremities (aka leaves) let mut extremities = db .rooms - .get_pdu_leaves(&room_id) + .get_pdu_leaves(room_id) .map_err(|_| "Failed to load room leaves".to_owned())?; // Remove any forward extremities that are referenced by this incoming event's prev_events @@ -1609,11 +1607,11 @@ async fn upgrade_outlier_to_timeline_pdu( } // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true))); + extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); let current_sstatehash = db .rooms - .current_shortstatehash(&room_id) + .current_shortstatehash(room_id) .map_err(|_| "Failed to load current state hash.".to_owned())? .expect("every room has state"); @@ -1625,7 +1623,7 @@ async fn upgrade_outlier_to_timeline_pdu( let auth_events = db .rooms .get_auth_events( - &room_id, + room_id, &incoming_pdu.kind, &incoming_pdu.sender, incoming_pdu.state_key.as_deref(), @@ -1637,7 +1635,7 @@ async fn upgrade_outlier_to_timeline_pdu( .iter() .map(|(shortstatekey, id)| { db.rooms - .compress_state_event(*shortstatekey, &id, &db.globals) + .compress_state_event(*shortstatekey, id, &db.globals) .map_err(|_| "Failed to compress_state_event".to_owned()) }) .collect::>()?; @@ -1656,7 +1654,7 @@ async fn upgrade_outlier_to_timeline_pdu( if soft_fail { append_incoming_pdu( - &db, + db, &incoming_pdu, val, extremities, @@ -1680,7 +1678,7 @@ async fn upgrade_outlier_to_timeline_pdu( for id in dbg!(&extremities) { match db .rooms - .get_pdu(&id) + .get_pdu(id) .map_err(|_| "Failed to ask db for pdu.".to_owned())? { Some(leaf_pdu) => { @@ -1757,7 +1755,7 @@ async fn upgrade_outlier_to_timeline_pdu( .iter() .map(|(k, id)| { db.rooms - .compress_state_event(*k, &id, &db.globals) + .compress_state_event(*k, id, &db.globals) .map_err(|_| "Failed to compress_state_event.".to_owned()) }) .collect::>()? @@ -1769,7 +1767,7 @@ async fn upgrade_outlier_to_timeline_pdu( for state in &fork_states { auth_chain_sets.push( get_auth_chain( - &room_id, + room_id, state.iter().map(|(_, id)| id.clone()).collect(), db, ) @@ -1828,7 +1826,7 @@ async fn upgrade_outlier_to_timeline_pdu( // Set the new room state to the resolved state if update_state { db.rooms - .force_state(&room_id, new_room_state, &db) + .force_state(room_id, new_room_state, db) .map_err(|_| "Failed to set new room state.".to_owned())?; } debug!("Updated resolved state"); @@ -1841,7 +1839,7 @@ async fn upgrade_outlier_to_timeline_pdu( // represent the state for this event. let pdu_id = append_incoming_pdu( - &db, + db, &incoming_pdu, val, extremities, @@ -1886,7 +1884,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( let mut pdus = vec![]; for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&id) { + if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(id) { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { @@ -1902,7 +1900,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - let local_pdu = db.rooms.get_pdu(&id); + let local_pdu = db.rooms.get_pdu(id); let pdu = match local_pdu { Ok(Some(pdu)) => { trace!("Found {} in db", id); @@ -1916,7 +1914,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .send_federation_request( &db.globals, origin, - get_event::v1::Request { event_id: &id }, + get_event::v1::Request { event_id: id }, ) .await { @@ -1940,8 +1938,8 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match handle_outlier_pdu( origin, create_event, - &id, - &room_id, + id, + room_id, value.clone(), db, pub_key_map, @@ -2089,7 +2087,7 @@ pub(crate) async fn fetch_signing_keys( .sending .send_federation_request( &db.globals, - &server, + server, get_remote_server_keys::v2::Request::new( origin, MilliSecondsSinceUnixEpoch::from_system_time( @@ -2168,7 +2166,7 @@ fn append_incoming_pdu( pdu, pdu_json, &new_room_leaves.into_iter().collect::>(), - &db, + db, )?; for appservice in db.appservice.all()? { @@ -2206,7 +2204,7 @@ fn append_incoming_pdu( && pdu .state_key .as_ref() - .map_or(false, |state_key| users.is_match(&state_key)) + .map_or(false, |state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: &Regex| { db.rooms @@ -2273,7 +2271,7 @@ pub(crate) fn get_auth_chain<'a>( chunk_cache.extend(cached.iter().cloned()); } else { misses2 += 1; - let auth_chain = Arc::new(get_auth_chain_inner(&room_id, &event_id, db)?); + let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?); db.rooms .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; println!( @@ -2821,7 +2819,7 @@ async fn create_join_event( // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = db .rooms - .current_shortstatehash(&room_id)? + .current_shortstatehash(room_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Pdu state not found.", @@ -2831,7 +2829,7 @@ async fn create_join_event( // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&pdu) { + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -2860,7 +2858,7 @@ async fn create_join_event( .or_default(), ); let mutex_lock = mutex.lock().await; - let pdu_id = handle_incoming_pdu(&origin, &event_id, &room_id, value, true, &db, &pub_key_map) + let pdu_id = handle_incoming_pdu(&origin, &event_id, room_id, value, true, db, &pub_key_map) .await .map_err(|e| { warn!("Error while handling incoming send join PDU: {}", e); @@ -2877,14 +2875,14 @@ async fn create_join_event( let state_ids = db.rooms.state_full_ids(shortstatehash)?; let auth_chain_ids = get_auth_chain( - &room_id, + room_id, state_ids.iter().map(|(_, id)| id.clone()).collect(), - &db, + db, )?; for server in db .rooms - .room_servers(&room_id) + .room_servers(room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != db.globals.server_name()) { @@ -2900,7 +2898,7 @@ async fn create_join_event( .collect(), state: state_ids .iter() - .filter_map(|(_, id)| db.rooms.get_pdu_json(&id).ok().flatten()) + .filter_map(|(_, id)| db.rooms.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -3296,7 +3294,7 @@ fn get_server_keys_from_cache( let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&value, &room_version) + ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -3388,10 +3386,10 @@ pub(crate) async fn fetch_join_signing_keys( // Try to fetch keys, failure is okay // Servers we couldn't find in the cache will be added to `servers` for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(pdu, &mut servers, &room_version, &mut pkm, &db); + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); } for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(pdu, &mut servers, &room_version, &mut pkm, &db); + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); } drop(pkm); From cbee7fe111cac04eff44776a66181bc32a747aef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 13 Sep 2021 23:19:00 +0200 Subject: [PATCH 0806/1727] improvement: deduplicate watchers --- src/database/abstraction/sqlite.rs | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 06e371e..d5924b1 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -4,14 +4,14 @@ use parking_lot::{Mutex, MutexGuard, RwLock}; use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; use std::{ cell::RefCell, - collections::HashMap, + collections::{hash_map, HashMap}, future::Future, path::{Path, PathBuf}, pin::Pin, sync::Arc, }; use thread_local::ThreadLocal; -use tokio::sync::oneshot::Sender; +use tokio::sync::watch; use tracing::debug; thread_local! { @@ -126,7 +126,7 @@ impl DatabaseEngine for Engine { pub struct SqliteTable { engine: Arc, name: String, - watchers: RwLock, Vec>>>, + watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, } type TupleOfBytes = (Vec, Vec); @@ -215,10 +215,8 @@ impl Tree for SqliteTable { if !triggered.is_empty() { let mut watchers = self.watchers.write(); for prefix in triggered { - if let Some(txs) = watchers.remove(prefix) { - for tx in txs { - let _ = tx.send(()); - } + if let Some(tx) = watchers.remove(prefix) { + let _ = tx.0.send(()); } } }; @@ -367,17 +365,18 @@ impl Tree for SqliteTable { #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.watchers - .write() - .entry(prefix.to_vec()) - .or_default() - .push(tx); + let mut rx = match self.watchers.write().entry(prefix.to_vec()) { + hash_map::Entry::Occupied(o) => o.get().1.clone(), + hash_map::Entry::Vacant(v) => { + let (tx, rx) = tokio::sync::watch::channel(()); + v.insert((tx, rx.clone())); + rx + } + }; Box::pin(async move { // Tx is never destroyed - rx.await.unwrap(); + rx.changed().await.unwrap(); }) } From 422bd09e32f9f10f7705bfa1afeb291c97d579e7 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 14 Sep 2021 09:44:15 +0000 Subject: [PATCH 0807/1727] Remove the "register an account with element" test Broke due to a timeout and Timo does not like broken tests. Less testing means less failing tests. Also, hopefully sytest is less broken now. --- .gitlab-ci.yml | 24 ----- .../test-element-web-registration.js | 101 ------------------ 2 files changed, 125 deletions(-) delete mode 100644 tests/client-element-web/test-element-web-registration.js diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 018e5a1..dfe7198 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -340,30 +340,6 @@ test:sytest: junit: "$CI_PROJECT_DIR/sytest.xml" -test:register:element-web-stable: - stage: "test" - needs: - - "build:debug:cargo:x86_64-unknown-linux-gnu" - image: "buildkite/puppeteer:latest" - tags: [ "docker" ] - interruptible: true - script: - - "CONDUIT_CONFIG=tests/test-config.toml ./conduit-debug-x86_64-unknown-linux-gnu > conduit.log &" - - "cd tests/client-element-web/" - - "npm install puppeteer" - - "node test-element-web-registration.js \"https://app.element.io/\" \"http://localhost:6167\"" - - "killall --regexp \"conduit\"" - - "cd ../.." - - "cat conduit.log" - artifacts: - paths: - - "tests/client-element-web/*.png" - - "*.log" - expire_in: 1 week - when: always - retry: 1 - - # --------------------------------------------------------------------- # # Store binaries as package so they have download urls # # --------------------------------------------------------------------- # diff --git a/tests/client-element-web/test-element-web-registration.js b/tests/client-element-web/test-element-web-registration.js deleted file mode 100644 index 8f2e7f0..0000000 --- a/tests/client-element-web/test-element-web-registration.js +++ /dev/null @@ -1,101 +0,0 @@ -const puppeteer = require('puppeteer'); - -run().then(() => console.log('Done')).catch(error => { - console.error("Registration test failed."); - console.error("There might be a screenshot of the failure in the artifacts.\n"); - console.error(error); - process.exit(111); -}); - -async function run() { - - const elementUrl = process.argv[process.argv.length - 2]; - console.debug("Testing registration with ElementWeb hosted at "+ elementUrl); - - const homeserverUrl = process.argv[process.argv.length - 1]; - console.debug("Homeserver url: "+ homeserverUrl); - - const username = "testuser" + String(Math.floor(Math.random() * 100000)); - const password = "testpassword" + String(Math.floor(Math.random() * 100000)); - console.debug("Testuser for this run:\n User: " + username + "\n Password: " + password); - - const browser = await puppeteer.launch({ - headless: true, args: [ - "--no-sandbox" - ] - }); - - const page = await browser.newPage(); - await page.goto(elementUrl); - - await page.screenshot({ path: '01-element-web-opened.png' }); - - console.debug("Click [Create Account] button"); - await page.waitForSelector('a.mx_ButtonCreateAccount'); - await page.click('a.mx_ButtonCreateAccount'); - - await page.screenshot({ path: '02-clicked-create-account-button.png' }); - - // The webapp should have loaded right now, if anything takes more than 5 seconds, something probably broke - page.setDefaultTimeout(5000); - - console.debug("Click [Edit] to switch homeserver"); - await page.waitForSelector('div.mx_ServerPicker_change'); - await page.click('div.mx_ServerPicker_change'); - - await page.screenshot({ path: '03-clicked-edit-homeserver-button.png' }); - - console.debug("Type in local homeserver url"); - await page.waitForSelector('input#mx_homeserverInput'); - await page.click('input#mx_homeserverInput'); - await page.click('input#mx_homeserverInput'); - await page.keyboard.type(homeserverUrl); - - await page.screenshot({ path: '04-typed-in-homeserver.png' }); - - console.debug("[Continue] with changed homeserver"); - await page.waitForSelector("div.mx_ServerPickerDialog_continue"); - await page.click('div.mx_ServerPickerDialog_continue'); - - await page.screenshot({ path: '05-back-to-enter-user-credentials.png' }); - - console.debug("Type in username"); - await page.waitForSelector("input#mx_RegistrationForm_username"); - await page.click('input#mx_RegistrationForm_username'); - await page.keyboard.type(username); - - await page.screenshot({ path: '06-typed-in-username.png' }); - - console.debug("Type in password"); - await page.waitForSelector("input#mx_RegistrationForm_password"); - await page.click('input#mx_RegistrationForm_password'); - await page.keyboard.type(password); - - await page.screenshot({ path: '07-typed-in-password-once.png' }); - - console.debug("Type in password again"); - await page.waitForSelector("input#mx_RegistrationForm_passwordConfirm"); - await page.click('input#mx_RegistrationForm_passwordConfirm'); - await page.keyboard.type(password); - - await page.screenshot({ path: '08-typed-in-password-twice.png' }); - - console.debug("Click on [Register] to finish the account creation"); - await page.waitForSelector("input.mx_Login_submit"); - await page.click('input.mx_Login_submit'); - - await page.screenshot({ path: '09-clicked-on-register-button.png' }); - - // Waiting for the app to login can take some time, so be patient. - page.setDefaultTimeout(10000); - - console.debug("Wait for chat window to show up"); - await page.waitForSelector("div.mx_HomePage_default_buttons"); - console.debug("Apparently the registration worked."); - - await page.screenshot({ path: '10-logged-in-homescreen.png' }); - - - // Close the browser and exit the script - await browser.close(); -} \ No newline at end of file From ab472e9b7c58d27059d738e3d24d03d6dfc1cce6 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 14 Sep 2021 12:25:35 +0200 Subject: [PATCH 0808/1727] fix(ci): Fix aarch64 build gcc-8-aarch64-linux-gnu is not available in debian 11 (which the rust image uses), so update to gcc-10 Signed-off-by: Jonas Zohren --- .gitlab-ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index dfe7198..1925272 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -75,12 +75,12 @@ build:release:cargo:aarch64-unknown-linux-gnu: extends: .build-cargo-shared-settings variables: TARGET: "aarch64-unknown-linux-gnu" - NEEDED_PACKAGES: "build-essential gcc-8-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross" + NEEDED_PACKAGES: "build-essential gcc-10-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross" CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++ - TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-8" - TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-8" + TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-10" + TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-10" artifacts: name: "conduit-aarch64-unknown-linux-gnu" paths: From 5c02dc783066e98b445206c18cebe86972620de2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 14 Sep 2021 14:23:43 +0200 Subject: [PATCH 0809/1727] improvement: batch inserts for inserting pdus --- src/client_server/membership.rs | 9 ++++----- src/database/rooms.rs | 9 ++++----- src/database/sending.rs | 26 ++++++++++++++++++-------- src/server_server.rs | 9 ++++----- 4 files changed, 30 insertions(+), 23 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 10c052e..146af79 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -1092,14 +1092,13 @@ pub(crate) async fn invite_helper<'a>( "Could not accept incoming PDU as timeline event.", ))?; - for server in db + let servers = db .rooms .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()) - { - db.sending.send_pdu(&server, &pdu_id)?; - } + .filter(|server| &**server != db.globals.server_name()); + + db.sending.send_pdu(servers, &pdu_id)?; return Ok(()); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b272a5c..ec03e3a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2104,13 +2104,12 @@ impl Rooms { // where events in the current room state do not exist self.set_room_state(room_id, statehashid)?; - for server in self + let servers = self .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()) - { - db.sending.send_pdu(&server, &pdu_id)?; - } + .filter(|server| &**server != db.globals.server_name()); + + db.sending.send_pdu(servers, &pdu_id)?; for appservice in db.appservice.all()? { if self.appservice_in_room(room_id, &appservice, db)? { diff --git a/src/database/sending.rs b/src/database/sending.rs index c14f581..70ff1b6 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -84,7 +84,7 @@ pub enum SendingEventType { pub struct Sending { /// The state for a given state hash. pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync - pub(super) servernameevent_data: Arc, // ServernamEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content + pub(super) servernameevent_data: Arc, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content pub(super) maximum_requests: Arc, pub sender: mpsc::UnboundedSender<(Vec, Vec)>, @@ -423,13 +423,23 @@ impl Sending { Ok(()) } - #[tracing::instrument(skip(self, server, pdu_id))] - pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu_id); - self.servernameevent_data.insert(&key, &[])?; - self.sender.unbounded_send((key, vec![])).unwrap(); + #[tracing::instrument(skip(self, servers, pdu_id))] + pub fn send_pdu>>( + &self, + servers: I, + pdu_id: &[u8], + ) -> Result<()> { + let mut batch = servers.map(|server| { + let mut key = server.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pdu_id); + + self.sender.unbounded_send((key.clone(), vec![])).unwrap(); + + (key, Vec::new()) + }); + + self.servernameevent_data.insert_batch(&mut batch)?; Ok(()) } diff --git a/src/server_server.rs b/src/server_server.rs index 1d9ba61..2b8b06c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2880,14 +2880,13 @@ async fn create_join_event( db, )?; - for server in db + let servers = db .rooms .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()) - { - db.sending.send_pdu(&server, &pdu_id)?; - } + .filter(|server| &**server != db.globals.server_name()); + + db.sending.send_pdu(servers, &pdu_id)?; db.flush()?; From e8d998cedfda8650c13410e165acc199ea40433e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 14 Sep 2021 20:44:11 +0000 Subject: [PATCH 0810/1727] fix(ci): Convince kaniko that it is indeed running in a container by --force-ing it. --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1925272..386986f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -222,6 +222,7 @@ build:docker:next: - > /kaniko/executor $KANIKO_CACHE_ARGS + --force --context $CI_PROJECT_DIR --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) From fa616342b610dfeea0f6e12dc633fe89258c9e1e Mon Sep 17 00:00:00 2001 From: Greg Sutcliffe Date: Mon, 13 Sep 2021 16:22:52 +0000 Subject: [PATCH 0811/1727] Add two flavours of change for the mautrix-signal patch --- APPSERVICES.md | 47 +++++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/APPSERVICES.md b/APPSERVICES.md index ba9ae89..8e57bc2 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -11,28 +11,35 @@ Here are some appservices we tested and that work with Conduit: - [mautrix-hangouts](https://github.com/mautrix/hangouts/) - [mautrix-telegram](https://github.com/mautrix/telegram/) - [mautrix-signal](https://github.com/mautrix/signal) - - There are a few things you need to do, in order for the bridge (at least up to version `0.2.0`) to work. Before following the bridge installation guide, you need to map apply a patch to bridges `portal.py`. Go to [portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py) at [mautrix-signal](https://github.com/mautrix/signal) (don't forget to change to the correct commit/version of the file) and copy its content, create a `portal.py` on your host system and paste it in. Now you need to change two lines: - [Line 1020](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1020) - ```diff - --- levels.users[self.main_intent.mxid] = 9001 if is_initial else 100 - +++ levels.users[self.main_intent.mxid] = 100 if is_initial else 100 - ``` +There are a few things you need to do, in order for the bridge (at least up to version `0.2.0`) to work. Before following the bridge installation guide, you need to map apply a patch to bridges `portal.py`. How you do this depends upon whether you are running the bridge in `Docker` or `virtualenv`. + - Find / create the changed file: + - For Docker: + - Go to [portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py) at [mautrix-signal](https://github.com/mautrix/signal) (don't forget to change to the correct commit/version of the file) and copy its content, create a `portal.py` on your host system and paste it in + - For virtualenv + - Find `./lib/python3.7/site-packages/mautrix_signal/portal.py` (the exact version of Python may be different on your system). + - Once you have `portal.py` you now need to change two lines. Lines numbers given here are approximate, you may need to look nearby: + - [Edit Line 1020](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1020) + ```diff + --- levels.users[self.main_intent.mxid] = 9001 if is_initial else 100 + +++ levels.users[self.main_intent.mxid] = 100 if is_initial else 100 + ``` + - Add a new line between [Lines 1041 and 1042](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1041-L1042) - and add a new line between [Lines 1041 and 1042](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1041-L1042) - - ```diff - "type": str(EventType.ROOM_POWER_LEVELS), - +++ "state_key": "", - "content": power_levels.serialize(), - ``` - - Now you just need to map the patched `portal.py` into the `mautrix-signal` container - ```yml - volumes: - - ./////portal.py:/usr/lib/python3.9/site-packages/mautrix_signal/portal.py - ``` - and then read below and start following the bridge [installation instructions](https://docs.mau.fi/bridges/index.html). + ```diff + "type": str(EventType.ROOM_POWER_LEVELS), + +++ "state_key": "", + "content": power_levels.serialize(), + ``` + - Deploy the change + - Docker: + - Now you just need to map the patched `portal.py` into the `mautrix-signal` container + ```yml + volumes: + - ./////portal.py:/usr/lib/python3.9/site-packages/mautrix_signal/portal.py + ``` + - For virtualenv, that's all you need to do - it uses the edited file directly + - Now continue with the bridge [installation instructions](https://docs.mau.fi/bridges/index.html) and the notes below. ## Set up the appservice From d38f9b5f0186b2e36d72296e6b842e1b36fbe75a Mon Sep 17 00:00:00 2001 From: Greg Sutcliffe Date: Wed, 15 Sep 2021 20:16:59 +0000 Subject: [PATCH 0812/1727] Move Generic instructions for APPSERVICES above notes for specific bridges and tidy up. --- APPSERVICES.md | 84 +++++++++++++++++++++++++++----------------------- 1 file changed, 46 insertions(+), 38 deletions(-) diff --git a/APPSERVICES.md b/APPSERVICES.md index 8e57bc2..26c34cc 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -4,44 +4,7 @@ If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). -## Tested appservices - -Here are some appservices we tested and that work with Conduit: -- [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord) -- [mautrix-hangouts](https://github.com/mautrix/hangouts/) -- [mautrix-telegram](https://github.com/mautrix/telegram/) -- [mautrix-signal](https://github.com/mautrix/signal) - -There are a few things you need to do, in order for the bridge (at least up to version `0.2.0`) to work. Before following the bridge installation guide, you need to map apply a patch to bridges `portal.py`. How you do this depends upon whether you are running the bridge in `Docker` or `virtualenv`. - - Find / create the changed file: - - For Docker: - - Go to [portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py) at [mautrix-signal](https://github.com/mautrix/signal) (don't forget to change to the correct commit/version of the file) and copy its content, create a `portal.py` on your host system and paste it in - - For virtualenv - - Find `./lib/python3.7/site-packages/mautrix_signal/portal.py` (the exact version of Python may be different on your system). - - Once you have `portal.py` you now need to change two lines. Lines numbers given here are approximate, you may need to look nearby: - - [Edit Line 1020](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1020) - ```diff - --- levels.users[self.main_intent.mxid] = 9001 if is_initial else 100 - +++ levels.users[self.main_intent.mxid] = 100 if is_initial else 100 - ``` - - Add a new line between [Lines 1041 and 1042](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1041-L1042) - - ```diff - "type": str(EventType.ROOM_POWER_LEVELS), - +++ "state_key": "", - "content": power_levels.serialize(), - ``` - - Deploy the change - - Docker: - - Now you just need to map the patched `portal.py` into the `mautrix-signal` container - ```yml - volumes: - - ./////portal.py:/usr/lib/python3.9/site-packages/mautrix_signal/portal.py - ``` - - For virtualenv, that's all you need to do - it uses the edited file directly - - Now continue with the bridge [installation instructions](https://docs.mau.fi/bridges/index.html) and the notes below. - -## Set up the appservice +## Set up the appservice - general instructions Follow whatever instructions are given by the appservice. This usually includes downloading, changing its config (setting domain, homeserver url, port etc.) @@ -76,3 +39,48 @@ Then you are done. Conduit will send messages to the appservices and the appservice can send requests to the homeserver. You don't need to restart Conduit, but if it doesn't work, restarting while the appservice is running could help. + +## Appservice-specific instructions + +### Tested appservices + +These appservices have been tested and work with Conduit without any extra steps: + +- [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord) +- [mautrix-hangouts](https://github.com/mautrix/hangouts/) +- [mautrix-telegram](https://github.com/mautrix/telegram/) + +### [mautrix-signal](https://github.com/mautrix/signal) + +There are a few things you need to do, in order for the Signal bridge (at least +up to version `0.2.0`) to work. How you do this depends on whether you use +Docker or `virtualenv` to run it. In either case you need to modify +[portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py). +Do this **before** following the bridge installation guide. + +1. **Create a copy of `portal.py`**. Go to + [portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py) +at [mautrix-signal](https://github.com/mautrix/signal) (make sure you change to +the correct commit/version of mautrix-signal you're using) and copy its +content. Create a new `portal.py` on your system and paste the content in. +2. **Patch the copy**. Exact line numbers may be slightly different, look nearby if they don't match: + - [Line 1020](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1020) + ```diff + --- levels.users[self.main_intent.mxid] = 9001 if is_initial else 100 + +++ levels.users[self.main_intent.mxid] = 100 if is_initial else 100 + ``` + - [Between lines 1041 and 1042](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1041-L1042) add a new line: + ```diff + "type": str(EventType.ROOM_POWER_LEVELS), + +++ "state_key": "", + "content": power_levels.serialize(), + ``` +3. **Deploy the patch**. This is different depending on how you have `mautrix-signal` deployed: + - [*If using virtualenv*] Copy your patched `portal.py` to `./lib/python3.7/site-packages/mautrix_signal/portal.py` (the exact version of Python may be different on your system). + - [*If using Docker*] Map the patched `portal.py` into the `mautrix-signal` container: + + ```yaml + volumes: + - ./your/path/on/host/portal.py:/usr/lib/python3.9/site-packages/mautrix_signal/portal.py + ``` +4. Now continue with the [bridge installation instructions ](https://docs.mau.fi/bridges/index.html) and the general bridge notes above. From 5b23d3d06e0d907301d857797b329c22ddc7dd14 Mon Sep 17 00:00:00 2001 From: Luc-pascal Ceccaldi Date: Thu, 23 Sep 2021 07:49:52 +0000 Subject: [PATCH 0813/1727] Change listen address when running inside a Container to prevent Bad Gateway error --- conduit-example.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/conduit-example.toml b/conduit-example.toml index 8008256..4275f52 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -40,6 +40,7 @@ trusted_servers = ["matrix.org"] #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy +#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. proxy = "none" # more examples can be found at src/database/proxy.rs:6 From 6bc8fb2ae7a895628e52c88f2b347dd1389b8858 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 24 Sep 2021 07:16:34 +0000 Subject: [PATCH 0814/1727] Implement admin check and add config option for allowing room creation --- src/client_server/room.rs | 10 ++++++++++ src/database.rs | 2 ++ src/database/globals.rs | 4 ++++ src/database/users.rs | 19 ++++++++++++++++++- 4 files changed, 34 insertions(+), 1 deletion(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 5a02699..f6c3a50 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -61,6 +61,16 @@ pub async fn create_room_route( ); let state_lock = mutex_state.lock().await; + if !db.globals.allow_room_creation() + && !body.from_appservice + && !db.users.is_admin(sender_user, &db.rooms, &db.globals)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Room creation has been disabled.", + )); + } + let alias: Option = body.room_alias_name .as_ref() diff --git a/src/database.rs b/src/database.rs index 110d4d0..8a58929 100644 --- a/src/database.rs +++ b/src/database.rs @@ -61,6 +61,8 @@ pub struct Config { allow_encryption: bool, #[serde(default = "false_fn")] allow_federation: bool, + #[serde(default = "true_fn")] + allow_room_creation: bool, #[serde(default = "false_fn")] pub allow_jaeger: bool, #[serde(default = "false_fn")] diff --git a/src/database/globals.rs b/src/database/globals.rs index 2f1b45a..c2ef1c3 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -211,6 +211,10 @@ impl Globals { self.config.allow_federation } + pub fn allow_room_creation(&self) -> bool { + self.config.allow_room_creation + } + pub fn trusted_servers(&self) -> &[Box] { &self.config.trusted_servers } diff --git a/src/database/users.rs b/src/database/users.rs index 63ed071..ee06490 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -5,7 +5,8 @@ use ruma::{ events::{AnyToDeviceEvent, EventType}, identifiers::MxcUri, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, + UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc}; use tracing::warn; @@ -53,6 +54,22 @@ impl Users { .is_empty()) } + /// Check if a user is an admin + #[tracing::instrument(skip(self, user_id, rooms, globals))] + pub fn is_admin( + &self, + user_id: &UserId, + rooms: &super::rooms::Rooms, + globals: &super::globals::Globals, + ) -> Result { + let admin_room_alias_id = + RoomAliasId::try_from(format!("#admins:{}", globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); + + Ok(rooms.is_joined(user_id, &admin_room_id)?) + } + /// Create a new user account on this homeserver. #[tracing::instrument(skip(self, user_id, password))] pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { From 636db8cfaaedf60ff535acdfc545ffcfa26bb364 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 24 Sep 2021 22:44:26 +0000 Subject: [PATCH 0815/1727] Make allow_encryption work again, fixing #115 --- src/client_server/message.rs | 8 ++++++++ src/client_server/state.rs | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 93ead2c..25964cc 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -45,6 +45,14 @@ pub async fn send_message_event_route( ); let state_lock = mutex_state.lock().await; + // Forbid m.room.encrypted if encryption is disabled + if &body.event_type == "m.room.encrypted" && !db.globals.allow_encryption() { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Encryption has been disabled", + )); + } + // Check if this is a new transaction id if let Some(response) = db.transaction_ids diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 24cc2a1..7618dcc 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -73,6 +73,14 @@ pub async fn send_state_event_for_empty_key_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + // Forbid m.room.encryption if encryption is disabled + if &body.event_type == "m.room.encryption" && !db.globals.allow_encryption() { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Encryption has been disabled", + )); + } + let event_id = send_state_event_for_key_helper( &db, sender_user, From 09895a20c8c3ffd5e4459db2fb5df0fc0dfc3338 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 13 Oct 2021 10:16:45 +0200 Subject: [PATCH 0816/1727] Upgrade Ruma MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Timo Kösters --- Cargo.lock | 50 +++--- Cargo.toml | 2 +- rust-toolchain | 2 +- src/client_server/account.rs | 81 +++++----- src/client_server/device.rs | 6 +- src/client_server/directory.rs | 274 ++++++++++++++++---------------- src/client_server/keys.rs | 4 +- src/client_server/membership.rs | 103 ++++++------ src/client_server/profile.rs | 20 +-- src/client_server/redact.rs | 5 +- src/client_server/room.rs | 125 +++++++-------- src/client_server/session.rs | 12 +- src/client_server/state.rs | 16 +- src/client_server/sync.rs | 34 ++-- src/database/admin.rs | 13 +- src/database/pusher.rs | 34 ++-- src/database/rooms.rs | 256 ++++++++++++++--------------- src/database/sending.rs | 12 +- src/database/uiaa.rs | 7 +- src/pdu.rs | 95 +++++------ src/server_server.rs | 100 ++++++------ 21 files changed, 628 insertions(+), 623 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70d7f4b..293bcff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1968,7 +1968,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "assign", "js_int", @@ -1988,8 +1988,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.18.3" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +version = "0.18.5" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "bytes", "http", @@ -2004,8 +2004,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.18.3" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +version = "0.18.5" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2016,7 +2016,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "ruma-api", "ruma-common", @@ -2029,8 +2029,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.12.2" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +version = "0.12.3" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "assign", "bytes", @@ -2050,7 +2050,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "indexmap", "js_int", @@ -2064,8 +2064,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.24.5" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +version = "0.24.6" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "indoc", "js_int", @@ -2080,8 +2080,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.24.5" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +version = "0.24.6" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2092,7 +2092,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "js_int", "ruma-api", @@ -2107,9 +2107,10 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "paste", + "percent-encoding", "rand 0.8.4", "ruma-identifiers-macros", "ruma-identifiers-validation", @@ -2121,7 +2122,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2131,12 +2132,15 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +dependencies = [ + "thiserror", +] [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "js_int", "ruma-api", @@ -2149,7 +2153,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "js_int", "ruma-api", @@ -2164,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "bytes", "form_urlencoded", @@ -2178,7 +2182,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2189,7 +2193,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2206,7 +2210,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" +source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 593a1fd..0f24673 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "a6a1224652912a957b09f136ec5da2686be6e0e2", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "44cfd0adbc83303c19aef590ad0d71647e19f197", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/rust-toolchain b/rust-toolchain index d96ae40..74df8b1 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.52 +1.53 diff --git a/src/client_server/account.rs b/src/client_server/account.rs index fb33842..4b3ad0d 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -15,19 +15,28 @@ use ruma::{ ThirdPartyIdRemovalStatus, }, contact::get_contacts, - uiaa::{AuthFlow, UiaaInfo}, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }, }, events::{ room::{ - canonical_alias, guest_access, history_visibility, join_rules, member, message, name, - topic, + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + message::RoomMessageEventContent, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + topic::RoomTopicEventContent, }, EventType, }, identifiers::RoomName, push, RoomAliasId, RoomId, RoomVersionId, UserId, }; +use serde_json::value::to_raw_value; use tracing::info; use register::RegistrationKind; @@ -147,7 +156,7 @@ pub async fn register_route( // UIAA let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { - stages: vec!["m.login.dummy".to_owned()], + stages: vec![AuthType::Dummy], }], completed: Vec::new(), params: Default::default(), @@ -270,7 +279,7 @@ pub async fn register_route( ); let state_lock = mutex_state.lock().await; - let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone()); + let mut content = RoomCreateEventContent::new(conduit_user.clone()); content.federate = true; content.predecessor = None; content.room_version = RoomVersionId::Version6; @@ -279,7 +288,7 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomCreate, - content: serde_json::to_value(content).expect("event is valid, we just created it"), + content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), redacts: None, @@ -294,8 +303,8 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, displayname: None, avatar_url: None, is_direct: None, @@ -322,12 +331,10 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomPowerLevels, - content: serde_json::to_value( - ruma::events::room::power_levels::PowerLevelsEventContent { - users, - ..Default::default() - }, - ) + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -343,10 +350,8 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomJoinRules, - content: serde_json::to_value(join_rules::JoinRulesEventContent::new( - join_rules::JoinRule::Invite, - )) - .expect("event is valid, we just created it"), + content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), redacts: None, @@ -361,11 +366,9 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomHistoryVisibility, - content: serde_json::to_value( - history_visibility::HistoryVisibilityEventContent::new( - history_visibility::HistoryVisibility::Shared, - ), - ) + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -381,10 +384,8 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomGuestAccess, - content: serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::Forbidden, - )) - .expect("event is valid, we just created it"), + content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), redacts: None, @@ -402,7 +403,7 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomName, - content: serde_json::to_value(name::NameEventContent::new(Some(room_name))) + content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -417,7 +418,7 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomTopic, - content: serde_json::to_value(topic::TopicEventContent { + content: to_raw_value(&RoomTopicEventContent { topic: format!("Manage {}", db.globals.server_name()), }) .expect("event is valid, we just created it"), @@ -439,7 +440,7 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomCanonicalAlias, - content: serde_json::to_value(canonical_alias::CanonicalAliasEventContent { + content: to_raw_value(&RoomCanonicalAliasEventContent { alias: Some(alias.clone()), alt_aliases: Vec::new(), }) @@ -460,8 +461,8 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, displayname: None, avatar_url: None, is_direct: None, @@ -482,8 +483,8 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, displayname: Some(displayname), avatar_url: None, is_direct: None, @@ -506,7 +507,7 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMessage, - content: serde_json::to_value(message::MessageEventContent::text_html( + content: to_raw_value(&RoomMessageEventContent::text_html( "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), "

Thank you for trying out Conduit!

\n

Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

\n

Helpful links:

\n
\n

Website: https://conduit.rs
Git and Documentation: https://gitlab.com/famedly/conduit
Report issues: https://gitlab.com/famedly/conduit/-/issues

\n
\n

Here are some rooms you can join (by typing the command):

\n

Conduit room (Ask questions and get notified on updates):
/join #conduit:fachschaften.org

\n

Conduit lounge (Off-topic, only Conduit users are allowed to join)
/join #conduit-lounge:conduit.rs

\n".to_owned(), )) @@ -562,7 +563,7 @@ pub async fn change_password_route( let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], + stages: vec![AuthType::Password], }], completed: Vec::new(), params: Default::default(), @@ -654,7 +655,7 @@ pub async fn deactivate_route( let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], + stages: vec![AuthType::Password], }], completed: Vec::new(), params: Default::default(), @@ -698,8 +699,8 @@ pub async fn deactivate_route( for room_id in all_rooms { let room_id = room_id?; - let event = member::MemberEventContent { - membership: member::MembershipState::Leave, + let event = RoomMemberEventContent { + membership: MembershipState::Leave, displayname: None, avatar_url: None, is_direct: None, @@ -721,7 +722,7 @@ pub async fn deactivate_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), + content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_user.to_string()), redacts: None, diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 100b591..b6fee37 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -3,7 +3,7 @@ use ruma::api::client::{ error::ErrorKind, r0::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, - uiaa::{AuthFlow, UiaaInfo}, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }, }; @@ -109,7 +109,7 @@ pub async fn delete_device_route( // UIAA let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], + stages: vec![AuthType::Password], }], completed: Vec::new(), params: Default::default(), @@ -172,7 +172,7 @@ pub async fn delete_devices_route( // UIAA let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], + stages: vec![AuthType::Password], }], completed: Vec::new(), params: Default::default(), diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 0065e51..835504c 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -17,10 +17,16 @@ use ruma::{ }, directory::{Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk, RoomNetwork}, events::{ - room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, + room::{ + avatar::RoomAvatarEventContent, + canonical_alias::RoomCanonicalAliasEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + name::RoomNameEventContent, + topic::RoomTopicEventContent, + }, EventType, }, - serde::Raw, ServerName, UInt, }; use tracing::{info, warn}; @@ -217,157 +223,143 @@ pub(crate) async fn get_public_rooms_filtered_helper( } } - let mut all_rooms = - db.rooms - .public_rooms() - .map(|room_id| { - let room_id = room_id?; + let mut all_rooms = db + .rooms + .public_rooms() + .map(|room_id| { + let room_id = room_id?; - let chunk = PublicRoomsChunk { - aliases: Vec::new(), - canonical_alias: db - .rooms - .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok(serde_json::from_value::< - Raw, - >(s.content.clone()) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid canonical alias event in database.") - })? - .alias) - })?, - name: db - .rooms - .room_state_get(&room_id, &EventType::RoomName, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok(serde_json::from_value::>( - s.content.clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid room name event in database.") - })? - .name) - })?, - num_joined_members: db - .rooms - .room_joined_count(&room_id)? - .unwrap_or_else(|| { - warn!("Room {} has no member count", room_id); - 0 - }) - .try_into() - .expect("user count should not be that big"), - topic: db - .rooms - .room_state_get(&room_id, &EventType::RoomTopic, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok(Some( - serde_json::from_value::>( - s.content.clone(), - ) - .expect("from_value::> can never fail") - .deserialize() + let chunk = PublicRoomsChunk { + aliases: Vec::new(), + canonical_alias: db + .rooms + .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? + .map_or(Ok::<_, Error>(None), |s| { + Ok( + serde_json::from_str::(s.content.get()) + .map_err(|_| { + Error::bad_database( + "Invalid canonical alias event in database.", + ) + })? + .alias, + ) + })?, + name: db + .rooms + .room_state_get(&room_id, &EventType::RoomName, "")? + .map_or(Ok::<_, Error>(None), |s| { + Ok( + serde_json::from_str::(s.content.get()) + .map_err(|_| { + Error::bad_database("Invalid room name event in database.") + })? + .name, + ) + })?, + num_joined_members: db + .rooms + .room_joined_count(&room_id)? + .unwrap_or_else(|| { + warn!("Room {} has no member count", room_id); + 0 + }) + .try_into() + .expect("user count should not be that big"), + topic: db + .rooms + .room_state_get(&room_id, &EventType::RoomTopic, "")? + .map_or(Ok::<_, Error>(None), |s| { + Ok(Some( + serde_json::from_str::(s.content.get()) .map_err(|_| { Error::bad_database("Invalid room topic event in database.") })? .topic, - )) - })?, - world_readable: db - .rooms - .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? - .map_or(Ok::<_, Error>(false), |s| { - Ok(serde_json::from_value::< - Raw, - >(s.content.clone()) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - })? - .history_visibility - == history_visibility::HistoryVisibility::WorldReadable) - })?, - guest_can_join: db - .rooms - .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? - .map_or(Ok::<_, Error>(false), |s| { - Ok( - serde_json::from_value::>( - s.content.clone(), - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| { - Error::bad_database("Invalid room guest access event in database.") - })? - .guest_access - == guest_access::GuestAccess::CanJoin, + )) + })?, + world_readable: db + .rooms + .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? + .map_or(Ok::<_, Error>(false), |s| { + Ok(serde_json::from_str::( + s.content.get(), ) - })?, - avatar_url: db - .rooms - .room_state_get(&room_id, &EventType::RoomAvatar, "")? - .map(|s| { - Ok::<_, Error>( - serde_json::from_value::>( - s.content.clone(), - ) - .expect("from_value::> can never fail") - .deserialize() + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + })? + .history_visibility + == HistoryVisibility::WorldReadable) + })?, + guest_can_join: db + .rooms + .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? + .map_or(Ok::<_, Error>(false), |s| { + Ok( + serde_json::from_str::(s.content.get()) + .map_err(|_| { + Error::bad_database( + "Invalid room guest access event in database.", + ) + })? + .guest_access + == GuestAccess::CanJoin, + ) + })?, + avatar_url: db + .rooms + .room_state_get(&room_id, &EventType::RoomAvatar, "")? + .map(|s| { + Ok::<_, Error>( + serde_json::from_str::(s.content.get()) .map_err(|_| { Error::bad_database("Invalid room avatar event in database.") })? .url, - ) - }) - .transpose()? - // url is now an Option so we must flatten - .flatten(), - room_id, - }; - Ok(chunk) - }) - .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms - .filter(|chunk| { - if let Some(query) = filter - .generic_search_term - .as_ref() - .map(|q| q.to_lowercase()) - { - if let Some(name) = &chunk.name { - if name.as_str().to_lowercase().contains(&query) { - return true; - } + ) + }) + .transpose()? + // url is now an Option so we must flatten + .flatten(), + room_id, + }; + Ok(chunk) + }) + .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms + .filter(|chunk| { + if let Some(query) = filter + .generic_search_term + .as_ref() + .map(|q| q.to_lowercase()) + { + if let Some(name) = &chunk.name { + if name.as_str().to_lowercase().contains(&query) { + return true; } - - if let Some(topic) = &chunk.topic { - if topic.to_lowercase().contains(&query) { - return true; - } - } - - if let Some(canonical_alias) = &chunk.canonical_alias { - if canonical_alias.as_str().to_lowercase().contains(&query) { - return true; - } - } - - false - } else { - // No search term - true } - }) - // We need to collect all, so we can sort by member count - .collect::>(); + + if let Some(topic) = &chunk.topic { + if topic.to_lowercase().contains(&query) { + return true; + } + } + + if let Some(canonical_alias) = &chunk.canonical_alias { + if canonical_alias.as_str().to_lowercase().contains(&query) { + return true; + } + } + + false + } else { + // No search term + true + } + }) + // We need to collect all, so we can sort by member count + .collect::>(); all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index a74c409..980acf0 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -10,7 +10,7 @@ use ruma::{ claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, upload_signing_keys, }, - uiaa::{AuthFlow, UiaaInfo}, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }, }, federation, @@ -148,7 +148,7 @@ pub async fn upload_signing_keys_route( // UIAA let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { - stages: vec!["m.login.password".to_owned()], + stages: vec![AuthType::Password], }], completed: Vec::new(), params: Default::default(), diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 146af79..e37fe6c 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -1,10 +1,9 @@ use crate::{ client_server, database::DatabaseGuard, - pdu::{PduBuilder, PduEvent}, + pdu::{EventHash, PduBuilder, PduEvent}, server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; -use member::{MemberEventContent, MembershipState}; use ruma::{ api::{ client::{ @@ -18,14 +17,17 @@ use ruma::{ federation::{self, membership::create_invite}, }, events::{ - pdu::Pdu, - room::{create::CreateEventContent, member}, + room::{ + create::RoomCreateEventContent, + member::{MembershipState, RoomMemberEventContent}, + }, EventType, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion}, uint, EventId, RoomId, RoomVersionId, ServerName, UserId, }; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, @@ -204,7 +206,7 @@ pub async fn kick_user_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = serde_json::from_value::>( + let mut event: RoomMemberEventContent = serde_json::from_str( db.rooms .room_state_get( &body.room_id, @@ -216,13 +218,11 @@ pub async fn kick_user_route( "Cannot kick member that's not in the room.", ))? .content - .clone(), + .get(), ) - .expect("Raw::from_value always works") - .deserialize() .map_err(|_| Error::bad_database("Invalid member event in database."))?; - event.membership = ruma::events::room::member::MembershipState::Leave; + event.membership = MembershipState::Leave; // TODO: reason let mutex_state = Arc::clone( @@ -238,7 +238,7 @@ pub async fn kick_user_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), + content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, @@ -280,8 +280,8 @@ pub async fn ban_user_route( &body.user_id.to_string(), )? .map_or( - Ok::<_, Error>(member::MemberEventContent { - membership: member::MembershipState::Ban, + Ok::<_, Error>(RoomMemberEventContent { + membership: MembershipState::Ban, displayname: db.users.displayname(&body.user_id)?, avatar_url: db.users.avatar_url(&body.user_id)?, is_direct: None, @@ -290,13 +290,9 @@ pub async fn ban_user_route( reason: None, }), |event| { - let mut event = serde_json::from_value::>( - event.content.clone(), - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - event.membership = ruma::events::room::member::MembershipState::Ban; + let mut event = serde_json::from_str::(event.content.get()) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + event.membership = MembershipState::Ban; Ok(event) }, )?; @@ -314,7 +310,7 @@ pub async fn ban_user_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), + content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, @@ -346,7 +342,7 @@ pub async fn unban_user_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = serde_json::from_value::>( + let mut event = serde_json::from_str::( db.rooms .room_state_get( &body.room_id, @@ -358,13 +354,11 @@ pub async fn unban_user_route( "Cannot unban a user who is not banned.", ))? .content - .clone(), + .get(), ) - .expect("from_value::> can never fail") - .deserialize() .map_err(|_| Error::bad_database("Invalid member event in database."))?; - event.membership = ruma::events::room::member::MembershipState::Leave; + event.membership = MembershipState::Leave; let mutex_state = Arc::clone( db.globals @@ -379,7 +373,7 @@ pub async fn unban_user_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), + content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, @@ -584,10 +578,9 @@ async fn join_room_by_id_helper( }; let mut join_event_stub = - serde_json::from_str::(make_join_response.event.json().get()) - .map_err(|_| { - Error::BadServerResponse("Invalid make_join event json received from server.") - })?; + serde_json::from_str::(make_join_response.event.get()).map_err( + |_| Error::BadServerResponse("Invalid make_join event json received from server."), + )?; // TODO: Is origin needed? join_event_stub.insert( @@ -604,8 +597,8 @@ async fn join_room_by_id_helper( ); join_event_stub.insert( "content".to_owned(), - to_canonical_value(member::MemberEventContent { - membership: member::MembershipState::Join, + to_canonical_value(RoomMemberEventContent { + membership: MembershipState::Join, displayname: db.users.displayname(sender_user)?, avatar_url: db.users.avatar_url(sender_user)?, is_direct: None, @@ -653,7 +646,7 @@ async fn join_room_by_id_helper( federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, - pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) .await?; @@ -756,8 +749,8 @@ async fn join_room_by_id_helper( // where events in the current room state do not exist db.rooms.set_room_state(room_id, statehashid)?; } else { - let event = member::MemberEventContent { - membership: member::MembershipState::Join, + let event = RoomMemberEventContent { + membership: MembershipState::Join, displayname: db.users.displayname(sender_user)?, avatar_url: db.users.avatar_url(sender_user)?, is_direct: None, @@ -769,7 +762,7 @@ async fn join_room_by_id_helper( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), + content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_user.to_string()), redacts: None, @@ -789,12 +782,12 @@ async fn join_room_by_id_helper( } fn validate_and_add_event_id( - pdu: &Raw, + pdu: &RawJsonValue, room_version: &RoomVersionId, pub_key_map: &RwLock>>, db: &Database, ) -> Result<(EventId, CanonicalJsonObject)> { - let mut value = serde_json::from_str::(pdu.json().get()).map_err(|e| { + let mut value = serde_json::from_str::(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; @@ -884,9 +877,7 @@ pub(crate) async fn invite_helper<'a>( let create_event_content = create_event .as_ref() .map(|create_event| { - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() + serde_json::from_str::(create_event.content.get()) .map_err(|e| { warn!("Invalid create event: {}", e); Error::bad_database("Invalid create event in db.") @@ -910,7 +901,7 @@ pub(crate) async fn invite_helper<'a>( let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - let content = serde_json::to_value(MemberEventContent { + let content = to_raw_value(&RoomMemberEventContent { avatar_url: None, displayname: None, is_direct: Some(is_direct), @@ -946,7 +937,7 @@ pub(crate) async fn invite_helper<'a>( unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), + serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"), ); } @@ -967,11 +958,15 @@ pub(crate) async fn invite_helper<'a>( .map(|(_, pdu)| pdu.event_id.clone()) .collect(), redacts: None, - unsigned, - hashes: ruma::events::pdu::EventHash { + unsigned: if unsigned.is_empty() { + None + } else { + Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + }, + hashes: EventHash { sha256: "aaa".to_owned(), }, - signatures: BTreeMap::new(), + signatures: None, }; let auth_check = state_res::auth_check( @@ -1035,11 +1030,11 @@ pub(crate) async fn invite_helper<'a>( &db.globals, user_id.server_name(), create_invite::v2::Request { - room_id: room_id.clone(), - event_id: expected_event_id.clone(), - room_version: room_version_id, - event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), - invite_room_state, + room_id, + event_id: &expected_event_id, + room_version: &room_version_id, + event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), + invite_room_state: &invite_room_state, }, ) .await?; @@ -1116,8 +1111,8 @@ pub(crate) async fn invite_helper<'a>( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Invite, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, displayname: db.users.displayname(user_id)?, avatar_url: db.users.avatar_url(user_id)?, is_direct: Some(is_direct), diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index ab7fb02..29b1ae8 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -9,9 +9,9 @@ use ruma::{ }, federation::{self, query::get_profile_information::v1::ProfileField}, }, - events::EventType, - serde::Raw, + events::{room::member::RoomMemberEventContent, EventType}, }; +use serde_json::value::to_raw_value; use std::{convert::TryInto, sync::Arc}; #[cfg(feature = "conduit_bin")] @@ -45,9 +45,9 @@ pub async fn set_displayname_route( Ok::<_, Error>(( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), - ..serde_json::from_value::>( + ..serde_json::from_str( db.rooms .room_state_get( &room_id, @@ -61,10 +61,8 @@ pub async fn set_displayname_route( ) })? .content - .clone(), + .get(), ) - .expect("from_value::> can never fail") - .deserialize() .map_err(|_| Error::bad_database("Database contains invalid PDU."))? }) .expect("event is valid, we just created it"), @@ -190,9 +188,9 @@ pub async fn set_avatar_url_route( Ok::<_, Error>(( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(ruma::events::room::member::MemberEventContent { + content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), - ..serde_json::from_value::>( + ..serde_json::from_str( db.rooms .room_state_get( &room_id, @@ -206,10 +204,8 @@ pub async fn set_avatar_url_route( ) })? .content - .clone(), + .get(), ) - .expect("from_value::> can never fail") - .deserialize() .map_err(|_| Error::bad_database("Database contains invalid PDU."))? }) .expect("event is valid, we just created it"), diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 4b5219b..7435c5c 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -3,11 +3,12 @@ use std::sync::Arc; use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma}; use ruma::{ api::client::r0::redact::redact_event, - events::{room::redaction, EventType}, + events::{room::redaction::RoomRedactionEventContent, EventType}, }; #[cfg(feature = "conduit_bin")] use rocket::put; +use serde_json::value::to_raw_value; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// @@ -38,7 +39,7 @@ pub async fn redact_event_route( let event_id = db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomRedaction, - content: serde_json::to_value(redaction::RedactionEventContent { + content: to_raw_value(&RoomRedactionEventContent { reason: body.reason.clone(), }) .expect("event is valid, we just created it"), diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 5a02699..d1c79df 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -8,12 +8,23 @@ use ruma::{ r0::room::{self, aliases, create_room, get_room_event, upgrade_room}, }, events::{ - room::{guest_access, history_visibility, join_rules, member, name, topic}, + room::{ + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + tombstone::RoomTombstoneEventContent, + topic::RoomTopicEventContent, + }, EventType, }, - serde::Raw, RoomAliasId, RoomId, RoomVersionId, }; +use serde_json::value::to_raw_value; use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; use tracing::{info, warn}; @@ -80,7 +91,7 @@ pub async fn create_room_route( } })?; - let mut content = ruma::events::room::create::CreateEventContent::new(sender_user.clone()); + let mut content = RoomCreateEventContent::new(sender_user.clone()); content.federate = body.creation_content.federate; content.predecessor = body.creation_content.predecessor.clone(); content.room_version = match body.room_version.clone() { @@ -101,7 +112,7 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomCreate, - content: serde_json::to_value(content).expect("event is valid, we just created it"), + content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), redacts: None, @@ -116,8 +127,8 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, displayname: db.users.displayname(sender_user)?, avatar_url: db.users.avatar_url(sender_user)?, is_direct: Some(body.is_direct), @@ -157,12 +168,11 @@ pub async fn create_room_route( } } - let mut power_levels_content = - serde_json::to_value(ruma::events::room::power_levels::PowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"); + let mut power_levels_content = serde_json::to_value(RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"); if let Some(power_level_content_override) = &body.power_level_content_override { let json = serde_json::from_str::>( @@ -180,7 +190,8 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomPowerLevels, - content: power_levels_content, + content: to_raw_value(&power_levels_content) + .expect("to_raw_value always works on serde_json::Value"), unsigned: None, state_key: Some("".to_owned()), redacts: None, @@ -196,12 +207,10 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomCanonicalAlias, - content: serde_json::to_value( - ruma::events::room::canonical_alias::CanonicalAliasEventContent { - alias: Some(room_alias_id.clone()), - alt_aliases: vec![], - }, - ) + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(room_alias_id.clone()), + alt_aliases: vec![], + }) .expect("We checked that alias earlier, it must be fine"), unsigned: None, state_key: Some("".to_owned()), @@ -220,17 +229,12 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomJoinRules, - content: match preset { - create_room::RoomPreset::PublicChat => serde_json::to_value( - join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public), - ) - .expect("event is valid, we just created it"), + content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { + create_room::RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default - _ => serde_json::to_value(join_rules::JoinRulesEventContent::new( - join_rules::JoinRule::Invite, - )) - .expect("event is valid, we just created it"), - }, + _ => JoinRule::Invite, + })) + .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), redacts: None, @@ -245,8 +249,8 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomHistoryVisibility, - content: serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( - history_visibility::HistoryVisibility::Shared, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, )) .expect("event is valid, we just created it"), unsigned: None, @@ -263,18 +267,11 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomGuestAccess, - content: match preset { - create_room::RoomPreset::PublicChat => { - serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::Forbidden, - )) - .expect("event is valid, we just created it") - } - _ => serde_json::to_value(guest_access::GuestAccessEventContent::new( - guest_access::GuestAccess::CanJoin, - )) - .expect("event is valid, we just created it"), - }, + content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { + create_room::RoomPreset::PublicChat => GuestAccess::Forbidden, + _ => GuestAccess::CanJoin, + })) + .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), redacts: None, @@ -306,7 +303,7 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomName, - content: serde_json::to_value(name::NameEventContent::new(Some(name.clone()))) + content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -323,7 +320,7 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomTopic, - content: serde_json::to_value(topic::TopicEventContent { + content: to_raw_value(&RoomTopicEventContent { topic: topic.clone(), }) .expect("event is valid, we just created it"), @@ -477,7 +474,7 @@ pub async fn upgrade_room_route( let tombstone_event_id = db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomTombstone, - content: serde_json::to_value(ruma::events::room::tombstone::TombstoneEventContent { + content: to_raw_value(&RoomTombstoneEventContent { body: "This room has been replaced".to_string(), replacement_room: replacement_room.clone(), }) @@ -505,15 +502,13 @@ pub async fn upgrade_room_route( let state_lock = mutex_state.lock().await; // Get the old room federations status - let federate = serde_json::from_value::>( + let federate = serde_json::from_str::( db.rooms .room_state_get(&body.room_id, &EventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content - .clone(), + .get(), ) - .expect("Raw::from_value always works") - .deserialize() .map_err(|_| Error::bad_database("Invalid room event in database."))? .federate; @@ -524,8 +519,7 @@ pub async fn upgrade_room_route( )); // Send a m.room.create event containing a predecessor field and the applicable room_version - let mut create_event_content = - ruma::events::room::create::CreateEventContent::new(sender_user.clone()); + let mut create_event_content = RoomCreateEventContent::new(sender_user.clone()); create_event_content.federate = federate; create_event_content.room_version = body.new_version.clone(); create_event_content.predecessor = predecessor; @@ -533,7 +527,7 @@ pub async fn upgrade_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomCreate, - content: serde_json::to_value(create_event_content) + content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -549,8 +543,8 @@ pub async fn upgrade_room_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(member::MemberEventContent { - membership: member::MembershipState::Join, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, displayname: db.users.displayname(sender_user)?, avatar_url: db.users.avatar_url(sender_user)?, is_direct: None, @@ -611,17 +605,14 @@ pub async fn upgrade_room_route( } // Get the old room power levels - let mut power_levels_event_content = - serde_json::from_value::>( - db.rooms - .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? - .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? - .content - .clone(), - ) - .expect("database contains invalid PDU") - .deserialize() - .map_err(|_| Error::bad_database("Invalid room event in database."))?; + let mut power_levels_event_content = serde_json::from_str::( + db.rooms + .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? + .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid room event in database."))?; // Setting events_default and invite to the greater of 50 and users_default + 1 let new_level = max( @@ -635,7 +626,7 @@ pub async fn upgrade_room_route( let _ = db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomPowerLevels, - content: serde_json::to_value(power_levels_event_content) + content: to_raw_value(&power_levels_event_content) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), diff --git a/src/client_server/session.rs b/src/client_server/session.rs index b42689d..61e5519 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -60,10 +60,10 @@ pub async fn login_route( // Validate login method // TODO: Other login methods let user_id = match &body.login_info { - login::IncomingLoginInfo::Password { + login::IncomingLoginInfo::Password(login::IncomingPassword { identifier, password, - } => { + }) => { let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier { matrix_id } else { @@ -97,7 +97,7 @@ pub async fn login_route( user_id } - login::IncomingLoginInfo::Token { token } => { + login::IncomingLoginInfo::Token(login::IncomingToken { token }) => { if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { let token = jsonwebtoken::decode::( token, @@ -116,6 +116,12 @@ pub async fn login_route( )); } } + _ => { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Unsupported login type.", + )); + } }; // Generate new device id if the user didn't specify one diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 24cc2a1..3515733 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -10,8 +10,8 @@ use ruma::{ }, events::{ room::{ - canonical_alias::CanonicalAliasEventContent, - history_visibility::{HistoryVisibility, HistoryVisibilityEventContent}, + canonical_alias::RoomCanonicalAliasEventContent, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, }, AnyStateEventContent, EventType, }, @@ -112,7 +112,7 @@ pub async fn get_state_events_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|event| { - serde_json::from_value::(event.content.clone()) + serde_json::from_str::(event.content.get()) .map_err(|_| { Error::bad_database( "Invalid room history visibility event in database.", @@ -164,7 +164,7 @@ pub async fn get_state_events_for_key_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|event| { - serde_json::from_value::(event.content.clone()) + serde_json::from_str::(event.content.get()) .map_err(|_| { Error::bad_database( "Invalid room history visibility event in database.", @@ -190,7 +190,7 @@ pub async fn get_state_events_for_key_route( ))?; Ok(get_state_events_for_key::Response { - content: serde_json::from_value(event.content.clone()) + content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) @@ -220,7 +220,7 @@ pub async fn get_state_events_for_empty_key_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|event| { - serde_json::from_value::(event.content.clone()) + serde_json::from_str::(event.content.get()) .map_err(|_| { Error::bad_database( "Invalid room history visibility event in database.", @@ -246,7 +246,7 @@ pub async fn get_state_events_for_empty_key_route( ))?; Ok(get_state_events_for_key::Response { - content: serde_json::from_value(event.content.clone()) + content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) @@ -265,7 +265,7 @@ async fn send_state_event_for_key_helper( // TODO: Review this check, error if event is unparsable, use event type, allow alias if it // previously existed if let Ok(canonical_alias) = - serde_json::from_str::(json.json().get()) + serde_json::from_str::(json.json().get()) { let mut aliases = canonical_alias.alt_aliases.clone(); diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2d5ad27..5b0dbaf 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,7 +1,10 @@ use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, - events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + AnySyncEphemeralRoomEvent, EventType, + }, serde::Raw, DeviceId, RoomId, UserId, }; @@ -287,10 +290,11 @@ async fn sync_helper( .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) .map(|(_, pdu)| { - let content = serde_json::from_value::< - ruma::events::room::member::MemberEventContent, - >(pdu.content.clone()) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + let content = + serde_json::from_str::(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; if let Some(state_key) = &pdu.state_key { let user_id = UserId::try_from(state_key.clone()).map_err(|_| { @@ -371,13 +375,9 @@ async fn sync_helper( sender_user.as_str(), )? .and_then(|pdu| { - serde_json::from_value::>( - pdu.content.clone(), - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() + serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() }); let joined_since_last_sync = since_sender_member @@ -432,11 +432,9 @@ async fn sync_helper( continue; } - let new_membership = serde_json::from_value::< - Raw, - >(state_event.content.clone()) - .expect("Raw::from_value always works") - .deserialize() + let new_membership = serde_json::from_str::( + state_event.content.get(), + ) .map_err(|_| Error::bad_database("Invalid PDU in database."))? .membership; @@ -739,7 +737,7 @@ async fn sync_helper( presence: sync_events::Presence { events: presence_updates .into_iter() - .map(|(_, v)| Raw::from(v)) + .map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully")) .collect(), }, account_data: sync_events::GlobalAccountData { diff --git a/src/database/admin.rs b/src/database/admin.rs index 424e674..8d8559a 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -6,16 +6,17 @@ use std::{ use crate::{pdu::PduBuilder, Database}; use rocket::futures::{channel::mpsc, stream::StreamExt}; use ruma::{ - events::{room::message, EventType}, + events::{room::message::RoomMessageEventContent, EventType}, UserId, }; +use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), ListAppservices, - SendMessage(message::MessageEventContent), + SendMessage(RoomMessageEventContent), } #[derive(Clone)] @@ -58,7 +59,7 @@ impl Admin { drop(guard); - let send_message = |message: message::MessageEventContent, + let send_message = |message: RoomMessageEventContent, guard: RwLockReadGuard<'_, Database>, mutex_lock: &MutexGuard<'_, ()>| { guard @@ -66,7 +67,7 @@ impl Admin { .build_and_append_pdu( PduBuilder { event_type: EventType::RoomMessage, - content: serde_json::to_value(message) + content: to_raw_value(&message) .expect("event is valid, we just created it"), unsigned: None, state_key: None, @@ -106,9 +107,9 @@ impl Admin { count, appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") ); - send_message(message::MessageEventContent::text_plain(output), guard, &state_lock); + send_message(RoomMessageEventContent::text_plain(output), guard, &state_lock); } else { - send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock); + send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock); } } AdminCommand::SendMessage(message) => { diff --git a/src/database/pusher.rs b/src/database/pusher.rs index b19f339..f53f137 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -9,8 +9,10 @@ use ruma::{ }, IncomingResponse, OutgoingRequest, SendAccessToken, }, - events::{room::power_levels::PowerLevelsEventContent, AnySyncRoomEvent, EventType}, - identifiers::RoomName, + events::{ + room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, + AnySyncRoomEvent, EventType, + }, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, uint, RoomId, UInt, UserId, @@ -177,11 +179,11 @@ pub async fn send_push_notice( let mut notify = None; let mut tweaks = Vec::new(); - let power_levels: PowerLevelsEventContent = db + let power_levels: RoomPowerLevelsEventContent = db .rooms .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? .map(|ev| { - serde_json::from_value(ev.content.clone()) + serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) }) .transpose()? @@ -226,7 +228,7 @@ pub async fn send_push_notice( pub fn get_actions<'a>( user: &UserId, ruleset: &'a Ruleset, - power_levels: &PowerLevelsEventContent, + power_levels: &RoomPowerLevelsEventContent, pdu: &Raw, room_id: &RoomId, db: &Database, @@ -318,16 +320,18 @@ async fn send_notice( let user_name = db.users.displayname(&event.sender)?; notifi.sender_display_name = user_name.as_deref(); - let room_name = db - .rooms - .room_state_get(&event.room_id, &EventType::RoomName, "")? - .map(|pdu| match pdu.content.get("name") { - Some(serde_json::Value::String(s)) => { - Some(Box::::try_from(&**s).expect("room name is valid")) - } - _ => None, - }) - .flatten(); + + let room_name = if let Some(room_name_pdu) = + db.rooms + .room_state_get(&event.room_id, &EventType::RoomName, "")? + { + serde_json::from_str::(room_name_pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid room name event in database."))? + .name + } else { + None + }; + notifi.room_name = room_name.as_deref(); send_request( diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ec03e3a..3096150 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1,9 +1,11 @@ mod edus; pub use edus::RoomEdus; -use member::MembershipState; -use crate::{pdu::PduBuilder, server_server, utils, Database, Error, PduEvent, Result}; +use crate::{ + pdu::{EventHash, PduBuilder}, + server_server, utils, Database, Error, PduEvent, Result, +}; use lru_cache::LruCache; use regex::Regex; use ring::digest; @@ -13,16 +15,22 @@ use ruma::{ events::{ ignored_user_list, push_rules, room::{ - create::CreateEventContent, member, message, power_levels::PowerLevelsEventContent, + create::RoomCreateEventContent, + member::{MembershipState, RoomMemberEventContent}, + message::RoomMessageEventContent, + power_levels::RoomPowerLevelsEventContent, }, AnyStrippedStateEvent, AnySyncStateEvent, EventType, }, - push::{self, Action, Tweak}, + push::{Action, Ruleset, Tweak}, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, state_res::{self, RoomVersion, StateMap}, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; +use serde::Deserialize; +use serde_json::value::to_raw_value; use std::{ + borrow::Cow, collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem::size_of, @@ -243,7 +251,7 @@ impl Rooms { kind: &EventType, sender: &UserId, state_key: Option<&str>, - content: &serde_json::Value, + content: &serde_json::value::RawValue, ) -> Result>> { let shortstatehash = if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { @@ -252,7 +260,8 @@ impl Rooms { return Ok(HashMap::new()); }; - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content); + let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) + .expect("content is a valid JSON object"); let mut sauthevents = auth_events .into_iter() @@ -391,37 +400,43 @@ impl Rooms { .ok() .map(|(_, id)| id) }) { - if let Some(pdu) = self.get_pdu_json(&event_id)? { - if pdu.get("type").and_then(|val| val.as_str()) == Some("m.room.member") { - if let Ok(pdu) = serde_json::from_value::( - serde_json::to_value(&pdu).expect("CanonicalJsonObj is a valid JsonValue"), - ) { - if let Some(membership) = - pdu.content.get("membership").and_then(|membership| { - serde_json::from_value::( - membership.clone(), - ) - .ok() - }) - { - if let Some(state_key) = pdu - .state_key - .and_then(|state_key| UserId::try_from(state_key).ok()) - { - self.update_membership( - room_id, - &state_key, - membership, - &pdu.sender, - None, - db, - false, - )?; - } - } - } - } + let pdu = match self.get_pdu_json(&event_id)? { + Some(pdu) => pdu, + None => continue, + }; + + if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { + continue; } + + let pdu = match serde_json::from_str::( + &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), + ) { + Ok(pdu) => pdu, + Err(_) => continue, + }; + + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, + } + + let membership = match serde_json::from_str::(pdu.content.get()) { + Ok(e) => e.membership, + Err(_) => continue, + }; + + let state_key = match pdu.state_key { + Some(k) => k, + None => continue, + }; + + let user_id = match UserId::try_from(state_key) { + Ok(id) => id, + Err(_) => continue, + }; + + self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; } self.update_joined_count(room_id, db)?; @@ -1325,11 +1340,11 @@ impl Rooms { drop(insert_lock); // See if the event matches any known pushers - let power_levels: PowerLevelsEventContent = db + let power_levels: RoomPowerLevelsEventContent = db .rooms .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? .map(|ev| { - serde_json::from_value(ev.content.clone()) + serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) }) .transpose()? @@ -1350,7 +1365,7 @@ impl Rooms { .account_data .get::(None, user, EventType::PushRules)? .map(|ev| ev.content.global) - .unwrap_or_else(|| push::Ruleset::server_default(user)); + .unwrap_or_else(|| Ruleset::server_default(user)); let mut highlight = false; let mut notify = false; @@ -1404,30 +1419,21 @@ impl Rooms { } EventType::RoomMember => { if let Some(state_key) = &pdu.state_key { + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, + } + // if the state_key fails let target_user_id = UserId::try_from(state_key.clone()) .expect("This state_key was previously validated"); - let membership = serde_json::from_value::( - pdu.content - .get("membership") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid member event content", - ))? - .clone(), - ) - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid membership state content.", - ) - })?; + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - let invite_state = match membership { - member::MembershipState::Invite => { + let invite_state = match content.membership { + MembershipState::Invite => { let state = self.calculate_invite_state(pdu)?; - Some(state) } _ => None, @@ -1438,7 +1444,7 @@ impl Rooms { self.update_membership( &pdu.room_id, &target_user_id, - membership, + content.membership, &pdu.sender, invite_state, db, @@ -1447,7 +1453,16 @@ impl Rooms { } } EventType::RoomMessage => { - if let Some(body) = pdu.content.get("body").and_then(|b| b.as_str()) { + #[derive(Deserialize)] + struct ExtractBody<'a> { + #[serde(borrow)] + body: Option>, + } + + let content = serde_json::from_str::>(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + if let Some(body) = content.body { let mut batch = body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) @@ -1498,18 +1513,16 @@ impl Rooms { } Err(e) => { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( - format!( - "Could not parse appservice config: {}", - e - ), - ), + RoomMessageEventContent::text_plain(format!( + "Could not parse appservice config: {}", + e + )), )); } } } else { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( + RoomMessageEventContent::text_plain( "Expected code block in command body.", ), )); @@ -1542,12 +1555,10 @@ impl Rooms { .count(); let elapsed = start.elapsed(); db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( - format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - ), - ), + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )), )); } } @@ -1580,14 +1591,17 @@ impl Rooms { ) { Ok(pdu) => { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( - format!("EventId: {:?}\n{:#?}", event_id, pdu), + RoomMessageEventContent::text_plain( + format!( + "EventId: {:?}\n{:#?}", + event_id, pdu + ), ), )); } Err(e) => { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( + RoomMessageEventContent::text_plain( format!("EventId: {:?}\nCould not parse event: {}", event_id, e), ), )); @@ -1596,18 +1610,16 @@ impl Rooms { } Err(e) => { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( - format!( - "Invalid json in command body: {}", - e - ), - ), + RoomMessageEventContent::text_plain(format!( + "Invalid json in command body: {}", + e + )), )); } } } else { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( + RoomMessageEventContent::text_plain( "Expected code block in command body.", ), )); @@ -1629,7 +1641,7 @@ impl Rooms { serde_json::to_string_pretty(&json) .expect("canonical json is valid json"); db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_html( + RoomMessageEventContent::text_html( format!("{}\n```json\n{}\n```", if outlier { "PDU is outlier" @@ -1643,7 +1655,7 @@ impl Rooms { } None => { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( + RoomMessageEventContent::text_plain( "PDU not found.", ), )); @@ -1651,14 +1663,14 @@ impl Rooms { } } else { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( + RoomMessageEventContent::text_plain( "Event ID could not be parsed.", ), )); } } else { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain( + RoomMessageEventContent::text_plain( "Usage: get_pdu ", ), )); @@ -1666,7 +1678,7 @@ impl Rooms { } _ => { db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::text_plain(format!( + RoomMessageEventContent::text_plain(format!( "Unrecognized command: {}", command )), @@ -1958,16 +1970,13 @@ impl Rooms { let create_event = self.room_state_get(room_id, &EventType::RoomCreate, "")?; - let create_event_content = create_event + let create_event_content: Option = create_event .as_ref() .map(|create_event| { - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) }) .transpose()?; @@ -2000,7 +2009,10 @@ impl Rooms { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { if let Some(prev_pdu) = self.room_state_get(room_id, &event_type, state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); + unsigned.insert( + "prev_content".to_owned(), + serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), + ); unsigned.insert( "prev_sender".to_owned(), serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), @@ -2025,11 +2037,15 @@ impl Rooms { .map(|(_, pdu)| pdu.event_id.clone()) .collect(), redacts, - unsigned, - hashes: ruma::events::pdu::EventHash { + unsigned: if unsigned.is_empty() { + None + } else { + Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + }, + hashes: EventHash { sha256: "aaa".to_owned(), }, - signatures: BTreeMap::new(), + signatures: None, }; let auth_check = state_res::auth_check( @@ -2205,7 +2221,7 @@ impl Rooms { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; if pdu.sender != user_id { - pdu.unsigned.remove("transaction_id"); + pdu.remove_transaction_id()?; } Ok((pdu_id, pdu)) })) @@ -2242,7 +2258,7 @@ impl Rooms { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; if pdu.sender != user_id { - pdu.unsigned.remove("transaction_id"); + pdu.remove_transaction_id()?; } Ok((pdu_id, pdu)) })) @@ -2279,7 +2295,7 @@ impl Rooms { let mut pdu = serde_json::from_slice::(&v) .map_err(|_| Error::bad_database("PDU in db is invalid."))?; if pdu.sender != user_id { - pdu.unsigned.remove("transaction_id"); + pdu.remove_transaction_id()?; } Ok((pdu_id, pdu)) })) @@ -2309,7 +2325,7 @@ impl Rooms { &self, room_id: &RoomId, user_id: &UserId, - membership: member::MembershipState, + membership: MembershipState, sender: &UserId, last_state: Option>>, db: &Database, @@ -2338,7 +2354,7 @@ impl Rooms { roomuser_id.extend_from_slice(user_id.as_bytes()); match &membership { - member::MembershipState::Join => { + MembershipState::Join => { // Check if the user never joined this room if !self.once_joined(user_id, room_id)? { // Add the user ID to the join list then @@ -2348,12 +2364,8 @@ impl Rooms { if let Some(predecessor) = self .room_state_get(room_id, &EventType::RoomCreate, "")? .and_then(|create| { - serde_json::from_value::< - Raw, - >(create.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .ok() + serde_json::from_str::(create.content.get()) + .ok() }) .and_then(|content| content.predecessor) { @@ -2442,7 +2454,7 @@ impl Rooms { self.userroomid_leftstate.remove(&userroom_id)?; self.roomuserid_leftcount.remove(&roomuser_id)?; } - member::MembershipState::Invite => { + MembershipState::Invite => { // We want to know if the sender is ignored by the receiver let is_ignored = db .account_data @@ -2475,7 +2487,7 @@ impl Rooms { self.userroomid_leftstate.remove(&userroom_id)?; self.roomuserid_leftcount.remove(&roomuser_id)?; } - member::MembershipState::Leave | member::MembershipState::Ban => { + MembershipState::Leave | MembershipState::Ban => { if update_joined_count && self .room_members(room_id) @@ -2700,26 +2712,23 @@ impl Rooms { ); let state_lock = mutex_state.lock().await; - let mut event = serde_json::from_value::>( + let mut event = serde_json::from_str::( self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", ))? .content - .clone(), + .get(), ) - .expect("from_value::> can never fail") - .deserialize() .map_err(|_| Error::bad_database("Invalid member event in database."))?; - event.membership = member::MembershipState::Leave; + event.membership = MembershipState::Leave; self.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMember, - content: serde_json::to_value(event) - .expect("event is valid, we just created it"), + content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(user_id.to_string()), redacts: None, @@ -2793,10 +2802,9 @@ impl Rooms { }; let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.json().get()) - .map_err(|_| { - Error::BadServerResponse("Invalid make_leave event json received from server.") - })?; + serde_json::from_str::(make_leave_response.event.get()).map_err( + |_| Error::BadServerResponse("Invalid make_leave event json received from server."), + )?; // TODO: Is origin needed? leave_event_stub.insert( @@ -2847,7 +2855,7 @@ impl Rooms { federation::membership::create_leave_event::v2::Request { room_id, event_id: &event_id, - pdu: PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), }, ) .await?; diff --git a/src/database/sending.rs b/src/database/sending.rs index 70ff1b6..c1abcde 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -398,7 +398,7 @@ impl Sending { let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, device_id: device_id!("dummy"), - device_display_name: "Dummy".to_owned(), + device_display_name: Some("Dummy".to_owned()), stream_id: uint!(1), prev_id: Vec::new(), deleted: None, @@ -573,8 +573,14 @@ impl Sending { for pdu in pdus { // Redacted events are not notification targets (we don't send push for them) - if pdu.unsigned.get("redacted_because").is_some() { - continue; + if let Some(unsigned) = &pdu.unsigned { + if let Ok(unsigned) = + serde_json::from_str::(unsigned.get()) + { + if unsigned.get("redacted_because").is_some() { + continue; + } + } } let userid = diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 60b9bd3..4679646 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -5,7 +5,8 @@ use ruma::{ api::client::{ error::ErrorKind, r0::uiaa::{ - IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, UiaaInfo, + AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, + UiaaInfo, }, }, signatures::CanonicalJsonValue, @@ -99,10 +100,10 @@ impl Uiaa { } // Password was correct! Let's add it to `completed` - uiaainfo.completed.push("m.login.password".to_owned()); + uiaainfo.completed.push(AuthType::Password); } IncomingAuthData::Dummy(_) => { - uiaainfo.completed.push("m.login.dummy".to_owned()); + uiaainfo.completed.push(AuthType::Dummy); } k => error!("type not supported: {:?}", k), } diff --git a/src/pdu.rs b/src/pdu.rs index 8623b1a..b74d079 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,19 +1,28 @@ use crate::Error; use ruma::{ events::{ - pdu::EventHash, room::member::MemberEventContent, AnyEphemeralRoomEvent, - AnyInitialStateEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, - AnySyncStateEvent, EventType, StateEvent, + room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyInitialStateEvent, + AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, + EventType, StateEvent, }, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, - ServerSigningKeyId, UInt, UserId, + state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId, }; use serde::{Deserialize, Serialize}; -use serde_json::json; +use serde_json::{ + json, + value::{to_raw_value, RawValue as RawJsonValue}, +}; use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom}; use tracing::warn; +/// Content hashes of a PDU. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct EventHash { + /// The SHA-256 hash. + pub sha256: String, +} + #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: EventId, @@ -22,7 +31,7 @@ pub struct PduEvent { pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: EventType, - pub content: serde_json::Value, + pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, pub prev_events: Vec, @@ -30,16 +39,17 @@ pub struct PduEvent { pub auth_events: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub redacts: Option, - #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] - pub unsigned: BTreeMap, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub unsigned: Option>, pub hashes: EventHash, - pub signatures: BTreeMap, BTreeMap>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub signatures: Option>, // BTreeMap, BTreeMap> } impl PduEvent { #[tracing::instrument(skip(self))] pub fn redact(&mut self, reason: &PduEvent) -> crate::Result<()> { - self.unsigned.clear(); + self.unsigned = None; let allowed: &[&str] = match self.kind { EventType::RoomMember => &["membership"], @@ -59,10 +69,9 @@ impl PduEvent { _ => &[], }; - let old_content = self - .content - .as_object_mut() - .ok_or_else(|| Error::bad_database("PDU in db has invalid content."))?; + let mut old_content = + serde_json::from_str::>(self.content.get()) + .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; let mut new_content = serde_json::Map::new(); @@ -72,12 +81,23 @@ impl PduEvent { } } - self.unsigned.insert( - "redacted_because".to_owned(), - serde_json::to_value(reason).expect("to_value(PduEvent) always works"), - ); + self.unsigned = Some(to_raw_value(&json!({ + "redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works") + })).expect("to string always works")); - self.content = new_content.into(); + self.content = to_raw_value(&new_content).expect("to string always works"); + + Ok(()) + } + + pub fn remove_transaction_id(&mut self) -> crate::Result<()> { + if let Some(unsigned) = &self.unsigned { + let mut unsigned = + serde_json::from_str::>>(unsigned.get()) + .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; + unsigned.remove("transaction_id"); + self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); + } Ok(()) } @@ -192,7 +212,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_member_event(&self) -> Raw> { + pub fn to_member_event(&self) -> Raw> { let json = json!({ "content": self.content, "type": self.kind, @@ -212,7 +232,7 @@ impl PduEvent { #[tracing::instrument] pub fn convert_to_outgoing_federation_event( mut pdu_json: CanonicalJsonObject, - ) -> Raw { + ) -> Box { if let Some(unsigned) = pdu_json .get_mut("unsigned") .and_then(|val| val.as_object_mut()) @@ -229,10 +249,7 @@ impl PduEvent { // ) // .expect("Raw::from_value always works") - serde_json::from_value::>( - serde_json::to_value(pdu_json).expect("CanonicalJson is valid serde_json::Value"), - ) - .expect("Raw::from_value always works") + to_raw_value(&pdu_json).expect("CanonicalJson is valid serde_json::Value") } pub fn from_id_val( @@ -265,7 +282,7 @@ impl state_res::Event for PduEvent { &self.kind } - fn content(&self) -> &serde_json::Value { + fn content(&self) -> &RawJsonValue { &self.content } @@ -281,10 +298,6 @@ impl state_res::Event for PduEvent { Box::new(self.prev_events.iter()) } - fn depth(&self) -> &UInt { - &self.depth - } - fn auth_events(&self) -> Box + '_> { Box::new(self.auth_events.iter()) } @@ -292,18 +305,6 @@ impl state_res::Event for PduEvent { fn redacts(&self) -> Option<&EventId> { self.redacts.as_ref() } - - fn hashes(&self) -> &EventHash { - &self.hashes - } - - fn signatures(&self) -> BTreeMap, BTreeMap> { - self.signatures.clone() - } - - fn unsigned(&self) -> &BTreeMap { - &self.unsigned - } } // These impl's allow us to dedup state snapshots when resolving state @@ -329,9 +330,9 @@ impl Ord for PduEvent { /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( - pdu: &Raw, + pdu: &RawJsonValue, ) -> crate::Result<(EventId, CanonicalJsonObject)> { - let value = serde_json::from_str(pdu.json().get()).map_err(|e| { + let value = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; @@ -352,7 +353,7 @@ pub(crate) fn gen_event_id_canonical_json( pub struct PduBuilder { #[serde(rename = "type")] pub event_type: EventType, - pub content: serde_json::Value, + pub content: Box, pub unsigned: Option>, pub state_key: Option, pub redacts: Option, @@ -363,7 +364,7 @@ impl From for PduBuilder { fn from(event: AnyInitialStateEvent) -> Self { Self { event_type: EventType::from(event.event_type()), - content: serde_json::value::to_value(event.content()) + content: to_raw_value(&event.content()) .expect("AnyStateEventContent came from JSON and can thus turn back into JSON."), unsigned: None, state_key: Some(event.state_key().to_owned()), diff --git a/src/server_server.rs b/src/server_server.rs index 2b8b06c..805ae3a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,6 +1,7 @@ use crate::{ client_server::{self, claim_keys_helper, get_keys_helper}, database::{rooms::CompressedStateEvent, DatabaseGuard}, + pdu::EventHash, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, }; use get_profile_information::v1::ProfileField; @@ -39,22 +40,22 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ - pdu::Pdu, receipt::{ReceiptEvent, ReceiptEventContent}, room::{ - create::CreateEventContent, - member::{MemberEventContent, MembershipState}, + create::RoomCreateEventContent, + member::{MembershipState, RoomMemberEventContent}, }, AnyEphemeralRoomEvent, EventType, }, + int, receipt::ReceiptType, - serde::Raw, signatures::{CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion, StateMap}, to_device::DeviceIdOrAllDevices, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, }; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, convert::{TryFrom, TryInto}, @@ -1071,7 +1072,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( // and lexically by event_id. println!("{}", event_id); Ok(( - 0, + int!(0), MilliSecondsSinceUnixEpoch( eventid_info .get(event_id) @@ -1153,14 +1154,13 @@ fn handle_outlier_pdu<'a>( // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let create_event_content = - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; + let create_event_content = serde_json::from_str::( + create_event.content.get(), + ) + .map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; let room_version_id = &create_event_content.room_version; let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); @@ -1241,7 +1241,7 @@ fn handle_outlier_pdu<'a>( .expect("all auth events have state keys"), )) { hash_map::Entry::Vacant(v) => { - v.insert(auth_event.clone()); + v.insert(auth_event); } hash_map::Entry::Occupied(_) => { return Err( @@ -1276,7 +1276,7 @@ fn handle_outlier_pdu<'a>( if !state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create, + previous_create.as_ref(), None::, // TODO: third party invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -1319,14 +1319,13 @@ async fn upgrade_outlier_to_timeline_pdu( return Err("Event has been soft failed".into()); } - let create_event_content = - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; + let create_event_content = serde_json::from_str::( + create_event.content.get(), + ) + .map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; let room_version_id = &create_event_content.room_version; let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); @@ -1562,7 +1561,7 @@ async fn upgrade_outlier_to_timeline_pdu( let check_result = state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.as_deref(), + previous_create.as_ref(), None::, // TODO: third party invite |k, s| { db.rooms @@ -1646,7 +1645,7 @@ async fn upgrade_outlier_to_timeline_pdu( let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.as_deref(), + previous_create.as_ref(), None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -2669,13 +2668,12 @@ pub fn create_join_event_template_route( let create_event_content = create_event .as_ref() .map(|create_event| { - serde_json::from_value::>(create_event.content.clone()) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|e| { + serde_json::from_str::(create_event.content.get()).map_err( + |e| { warn!("Invalid create event: {}", e); Error::bad_database("Invalid create event in db.") - }) + }, + ) }) .transpose()?; @@ -2702,7 +2700,7 @@ pub fn create_join_event_template_route( )); } - let content = serde_json::to_value(MemberEventContent { + let content = to_raw_value(&RoomMemberEventContent { avatar_url: None, blurhash: None, displayname: None, @@ -2738,7 +2736,7 @@ pub fn create_join_event_template_route( unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), + serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"), ); } @@ -2759,17 +2757,21 @@ pub fn create_join_event_template_route( .map(|(_, pdu)| pdu.event_id.clone()) .collect(), redacts: None, - unsigned, - hashes: ruma::events::pdu::EventHash { + unsigned: if unsigned.is_empty() { + None + } else { + Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + }, + hashes: EventHash { sha256: "aaa".to_owned(), }, - signatures: BTreeMap::new(), + signatures: None, }; let auth_check = state_res::auth_check( &room_version, &pdu, - create_prev_event.as_deref(), + create_prev_event, None::, // TODO: third_party_invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -2799,10 +2801,7 @@ pub fn create_join_event_template_route( Ok(create_join_event_template::v1::Response { room_version: Some(room_version_id), - event: serde_json::from_value::>( - serde_json::to_value(pdu_json).expect("CanonicalJson is valid serde_json::Value"), - ) - .expect("Raw::from_value always works"), + event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), } .into()) } @@ -2810,7 +2809,7 @@ pub fn create_join_event_template_route( async fn create_join_event( db: &DatabaseGuard, room_id: &RoomId, - pdu: &Raw, + pdu: &RawJsonValue, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2947,7 +2946,7 @@ pub async fn create_join_event_v2_route( #[tracing::instrument(skip(db, body))] pub async fn create_invite_route( db: DatabaseGuard, - body: Ruma, + body: Ruma>, ) -> ConduitResult { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -3014,10 +3013,11 @@ pub async fn create_invite_route( let mut invite_state = body.invite_room_state.clone(); - let mut event = serde_json::from_str::>( - &body.event.json().to_string(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + let mut event = + serde_json::from_str::>(body.event.get()) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes.") + })?; event.insert("event_id".to_owned(), "$dummy".into()); @@ -3280,13 +3280,13 @@ pub(crate) async fn fetch_required_signing_keys( // Gets a list of servers for which we don't have the signing key yet. We go over // the PDUs and either cache the key or add it to the list that needs to be retrieved. fn get_server_keys_from_cache( - pdu: &Raw, + pdu: &RawJsonValue, servers: &mut BTreeMap, BTreeMap>, room_version: &RoomVersionId, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, db: &Database, ) -> Result<()> { - let value = serde_json::from_str::(pdu.json().get()).map_err(|e| { + let value = serde_json::from_str::(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; @@ -3385,10 +3385,10 @@ pub(crate) async fn fetch_join_signing_keys( // Try to fetch keys, failure is okay // Servers we couldn't find in the cache will be added to `servers` for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + let _ = get_server_keys_from_cache(&pdu, &mut servers, room_version, &mut pkm, db); } for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + let _ = get_server_keys_from_cache(&pdu, &mut servers, room_version, &mut pkm, db); } drop(pkm); From 1c4d9af586611781ad6d5337778dbe2bea695ee4 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 13 Oct 2021 10:24:39 +0200 Subject: [PATCH 0817/1727] Enable more lints and apply their suggestions --- src/client_server/room.rs | 2 +- src/database.rs | 4 ++-- src/database/abstraction.rs | 4 ++-- src/database/key_backups.rs | 2 +- src/database/proxy.rs | 2 +- src/database/rooms.rs | 12 +++++------ src/database/rooms/edus.rs | 2 +- src/lib.rs | 6 ++++++ src/main.rs | 7 ++++++- src/pdu.rs | 2 +- src/ruma_wrapper.rs | 2 +- src/server_server.rs | 40 +++++++++++++++++-------------------- src/utils.rs | 2 +- 13 files changed, 47 insertions(+), 40 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index d1c79df..eb68135 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -475,7 +475,7 @@ pub async fn upgrade_room_route( PduBuilder { event_type: EventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { - body: "This room has been replaced".to_string(), + body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), }) .expect("event is valid, we just created it"), diff --git a/src/database.rs b/src/database.rs index 110d4d0..63c4ebc 100644 --- a/src/database.rs +++ b/src/database.rs @@ -499,13 +499,13 @@ impl Database { if let Some(parent_stateinfo) = states_parents.last() { let statediffnew = current_state .difference(&parent_stateinfo.1) - .cloned() + .copied() .collect::>(); let statediffremoved = parent_stateinfo .1 .difference(¤t_state) - .cloned() + .copied() .collect::>(); (statediffnew, statediffremoved) diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 5b941fb..11bbc3b 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -22,7 +22,7 @@ pub trait Tree: Send + Sync { fn get(&self, key: &[u8]) -> Result>>; fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; - fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()>; + fn insert_batch(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()>; fn remove(&self, key: &[u8]) -> Result<()>; @@ -35,7 +35,7 @@ pub trait Tree: Send + Sync { ) -> Box, Vec)> + 'a>; fn increment(&self, key: &[u8]) -> Result>; - fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()>; + fn increment_batch(&self, iter: &mut dyn Iterator>) -> Result<()>; fn scan_prefix<'a>( &'a self, diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 27d8030..a960c72 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -81,7 +81,7 @@ impl KeyBackups { )?; self.backupid_etag .insert(&key, &globals.next_count()?.to_be_bytes())?; - Ok(version.to_string()) + Ok(version.to_owned()) } pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { diff --git a/src/database/proxy.rs b/src/database/proxy.rs index 78e9d2b..33f7f3d 100644 --- a/src/database/proxy.rs +++ b/src/database/proxy.rs @@ -136,7 +136,7 @@ impl std::str::FromStr for WildCardedDomain { }) } } -impl<'de> serde::de::Deserialize<'de> for WildCardedDomain { +impl<'de> Deserialize<'de> for WildCardedDomain { fn deserialize(deserializer: D) -> std::result::Result where D: serde::de::Deserializer<'de>, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3096150..1912e0c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -50,7 +50,7 @@ pub type StateHashId = Vec; pub type CompressedStateEvent = [u8; 2 * size_of::()]; pub struct Rooms { - pub edus: edus::RoomEdus, + pub edus: RoomEdus, pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count pub(super) eventid_pduid: Arc, pub(super) roomid_pduleaves: Arc, @@ -371,13 +371,13 @@ impl Rooms { { let statediffnew = new_state_ids_compressed .difference(&parent_stateinfo.1) - .cloned() + .copied() .collect::>(); let statediffremoved = parent_stateinfo .1 .difference(&new_state_ids_compressed) - .cloned() + .copied() .collect::>(); (statediffnew, statediffremoved) @@ -498,7 +498,7 @@ impl Rooms { if parent != 0_u64 { let mut response = self.load_shortstatehash_info(parent)?; let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().cloned()); + state.extend(added.iter().copied()); for r in &removed { state.remove(r); } @@ -1773,13 +1773,13 @@ impl Rooms { if let Some(parent_stateinfo) = states_parents.last() { let statediffnew = state_ids_compressed .difference(&parent_stateinfo.1) - .cloned() + .copied() .collect::>(); let statediffremoved = parent_stateinfo .1 .difference(&state_ids_compressed) - .cloned() + .copied() .collect::>(); (statediffnew, statediffremoved) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index e0639ff..26f22bf 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -331,7 +331,7 @@ impl RoomEdus { &self, user_id: &UserId, room_id: &RoomId, - presence: ruma::events::presence::PresenceEvent, + presence: PresenceEvent, globals: &super::super::globals::Globals, ) -> Result<()> { // TODO: Remove old entry? Or maybe just wipe completely from time to time? diff --git a/src/lib.rs b/src/lib.rs index fbffb7e..82b8f34 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,9 @@ +#![warn( + rust_2018_idioms, + unused_qualifications, + clippy::cloned_instead_of_copied, + clippy::str_to_string +)] #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] diff --git a/src/main.rs b/src/main.rs index 06409ee..84dfb1f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,9 @@ -#![warn(rust_2018_idioms)] +#![warn( + rust_2018_idioms, + unused_qualifications, + clippy::cloned_instead_of_copied, + clippy::str_to_string +)] #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] diff --git a/src/pdu.rs b/src/pdu.rs index b74d079..0a765e1 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -257,7 +257,7 @@ impl PduEvent { mut json: CanonicalJsonObject, ) -> Result { json.insert( - "event_id".to_string(), + "event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()), ); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 4629de9..03c115c 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -344,7 +344,7 @@ impl Deref for Ruma { } /// This struct converts ruma responses into rocket http responses. -pub type ConduitResult = std::result::Result, Error>; +pub type ConduitResult = Result, Error>; pub fn response(response: RumaResponse) -> response::Result<'static> { let http_response = response diff --git a/src/server_server.rs b/src/server_server.rs index 805ae3a..e9a9485 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -336,7 +336,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { None => (destination_str, ":8448"), Some(pos) => destination_str.split_at(pos), }; - FedDest::Named(host.to_string(), port.to_string()) + FedDest::Named(host.to_owned(), port.to_owned()) } /// Returns: actual_destination, host header @@ -358,7 +358,7 @@ async fn find_actual_destination( if let Some(pos) = destination_str.find(':') { // 2: Hostname with included port let (host, port) = destination_str.split_at(pos); - FedDest::Named(host.to_string(), port.to_string()) + FedDest::Named(host.to_owned(), port.to_owned()) } else { match request_well_known(globals, destination.as_str()).await { // 3: A .well-known file is available @@ -370,7 +370,7 @@ async fn find_actual_destination( if let Some(pos) = delegated_hostname.find(':') { // 3.2: Hostname with port in .well-known file let (host, port) = delegated_hostname.split_at(pos); - FedDest::Named(host.to_string(), port.to_string()) + FedDest::Named(host.to_owned(), port.to_owned()) } else { // Delegated hostname has no port in this branch if let Some(hostname_override) = @@ -454,12 +454,12 @@ async fn find_actual_destination( let hostname = if let Ok(addr) = hostname.parse::() { FedDest::Literal(addr) } else if let Ok(addr) = hostname.parse::() { - FedDest::Named(addr.to_string(), ":8448".to_string()) + FedDest::Named(addr.to_string(), ":8448".to_owned()) } else if let Some(pos) = hostname.find(':') { let (host, port) = hostname.split_at(pos); - FedDest::Named(host.to_string(), port.to_string()) + FedDest::Named(host.to_owned(), port.to_owned()) } else { - FedDest::Named(hostname, ":8448".to_string()) + FedDest::Named(hostname, ":8448".to_owned()) }; (actual_destination, hostname) } @@ -476,11 +476,7 @@ async fn query_srv_record( .map(|srv| { srv.iter().next().map(|result| { FedDest::Named( - result - .target() - .to_string() - .trim_end_matches('.') - .to_string(), + result.target().to_string().trim_end_matches('.').to_owned(), format!(":{}", result.port()), ) }) @@ -745,7 +741,7 @@ pub async fn send_transaction_message_route( Some(id) => id, None => { // Event is invalid - resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_string())); + resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned())); continue; } }; @@ -963,7 +959,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( match db.rooms.exists(room_id) { Ok(true) => {} _ => { - return Err("Room is unknown to this server.".to_string()); + return Err("Room is unknown to this server.".to_owned()); } } @@ -1173,14 +1169,14 @@ fn handle_outlier_pdu<'a>( Err(e) => { // Drop warn!("Dropping bad event {}: {}", event_id, e); - return Err("Signature verification failed".to_string()); + return Err("Signature verification failed".to_owned()); } Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); match ruma::signatures::redact(&value, room_version_id) { Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), + Err(_) => return Err("Redaction failed".to_owned()), } } Ok(ruma::signatures::Verified::All) => value, @@ -1195,7 +1191,7 @@ fn handle_outlier_pdu<'a>( let incoming_pdu = serde_json::from_value::( serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), ) - .map_err(|_| "Event is not a valid PDU.".to_string())?; + .map_err(|_| "Event is not a valid PDU.".to_owned())?; // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" @@ -1280,9 +1276,9 @@ fn handle_outlier_pdu<'a>( None::, // TODO: third party invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) - .map_err(|_e| "Auth check failed".to_string())? + .map_err(|_e| "Auth check failed".to_owned())? { - return Err("Event has failed auth check with auth events.".to_string()); + return Err("Event has failed auth check with auth events.".to_owned()); } debug!("Validation successful."); @@ -2256,7 +2252,7 @@ pub(crate) fn get_auth_chain<'a>( .collect::>(); if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { hits += 1; - full_auth_chain.extend(cached.iter().cloned()); + full_auth_chain.extend(cached.iter().copied()); continue; } misses += 1; @@ -2267,7 +2263,7 @@ pub(crate) fn get_auth_chain<'a>( for (sevent_id, event_id) in chunk { if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { hits2 += 1; - chunk_cache.extend(cached.iter().cloned()); + chunk_cache.extend(cached.iter().copied()); } else { misses2 += 1; let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?); @@ -3385,10 +3381,10 @@ pub(crate) async fn fetch_join_signing_keys( // Try to fetch keys, failure is okay // Servers we couldn't find in the cache will be added to `servers` for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(&pdu, &mut servers, room_version, &mut pkm, db); + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); } for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(&pdu, &mut servers, room_version, &mut pkm, db); + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); } drop(pkm); diff --git a/src/utils.rs b/src/utils.rs index d21395e..26d71a8 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -123,7 +123,7 @@ pub fn deserialize_from_str< E: std::fmt::Display, >( deserializer: D, -) -> std::result::Result { +) -> Result { struct Visitor, E>(std::marker::PhantomData); impl<'de, T: FromStr, Err: std::fmt::Display> serde::de::Visitor<'de> for Visitor From f2ef5677e0f016399dce4da66d45137e2e592e8c Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 13 Oct 2021 11:51:30 +0200 Subject: [PATCH 0818/1727] Reduce turbofish usage Should make the code a little bit easier to read. --- src/client_server/config.rs | 12 +-- src/client_server/context.rs | 16 ++-- src/client_server/device.rs | 4 +- src/client_server/directory.rs | 104 +++++++++----------- src/client_server/keys.rs | 4 +- src/client_server/membership.rs | 63 ++++++------ src/client_server/message.rs | 21 ++-- src/client_server/push.rs | 34 +++---- src/client_server/room.rs | 13 ++- src/client_server/search.rs | 6 +- src/client_server/state.rs | 12 +-- src/client_server/sync.rs | 37 ++++--- src/client_server/tag.rs | 23 +++-- src/database.rs | 4 +- src/database/globals.rs | 7 +- src/database/key_backups.rs | 11 ++- src/database/rooms.rs | 165 ++++++++++++++------------------ src/database/rooms/edus.rs | 22 +++-- src/database/sending.rs | 14 +-- src/database/uiaa.rs | 22 ++--- src/database/users.rs | 9 +- src/pdu.rs | 8 +- src/server_server.rs | 107 +++++++++------------ 23 files changed, 331 insertions(+), 387 deletions(-) diff --git a/src/client_server/config.rs b/src/client_server/config.rs index bd897ba..0c668ff 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -30,7 +30,7 @@ pub async fn set_global_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data = serde_json::from_str::(body.data.get()) + let data: serde_json::Value = serde_json::from_str(body.data.get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); @@ -68,7 +68,7 @@ pub async fn set_room_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data = serde_json::from_str::(body.data.get()) + let data: serde_json::Value = serde_json::from_str(body.data.get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); @@ -103,9 +103,9 @@ pub async fn get_global_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = db + let event: Box = db .account_data - .get::>(None, sender_user, body.event_type.clone().into())? + .get(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; let account_data = serde_json::from_str::(event.get()) @@ -132,9 +132,9 @@ pub async fn get_room_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = db + let event: Box = db .account_data - .get::>( + .get( Some(&body.room_id), sender_user, body.event_type.clone().into(), diff --git a/src/client_server/context.rs b/src/client_server/context.rs index b2346f5..97fc4fd 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -48,7 +48,7 @@ pub async fn get_context_route( ))? .to_room_event(); - let events_before = db + let events_before: Vec<_> = db .rooms .pdus_until(sender_user, &body.room_id, base_token)? .take( @@ -58,19 +58,19 @@ pub async fn get_context_route( / 2, ) .filter_map(|r| r.ok()) // Remove buggy events - .collect::>(); + .collect(); let start_token = events_before .last() .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); - let events_before = events_before + let events_before: Vec<_> = events_before .into_iter() .map(|(_, pdu)| pdu.to_room_event()) - .collect::>(); + .collect(); - let events_after = db + let events_after: Vec<_> = db .rooms .pdus_after(sender_user, &body.room_id, base_token)? .take( @@ -80,17 +80,17 @@ pub async fn get_context_route( / 2, ) .filter_map(|r| r.ok()) // Remove buggy events - .collect::>(); + .collect(); let end_token = events_after .last() .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); - let events_after = events_after + let events_after: Vec<_> = events_after .into_iter() .map(|(_, pdu)| pdu.to_room_event()) - .collect::>(); + .collect(); let mut resp = get_context::Response::new(); resp.start = start_token; diff --git a/src/client_server/device.rs b/src/client_server/device.rs index b6fee37..03a3004 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -25,11 +25,11 @@ pub async fn get_devices_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let devices = db + let devices: Vec = db .users .all_devices_metadata(sender_user) .filter_map(|r| r.ok()) // Filter out buggy devices - .collect::>(); + .collect(); Ok(get_devices::Response { devices }.into()) } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 835504c..490f752 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -223,7 +223,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( } } - let mut all_rooms = db + let mut all_rooms: Vec<_> = db .rooms .public_rooms() .map(|room_id| { @@ -234,28 +234,22 @@ pub(crate) async fn get_public_rooms_filtered_helper( canonical_alias: db .rooms .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok( - serde_json::from_str::(s.content.get()) - .map_err(|_| { - Error::bad_database( - "Invalid canonical alias event in database.", - ) - })? - .alias, - ) + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomCanonicalAliasEventContent| c.alias) + .map_err(|_| { + Error::bad_database("Invalid canonical alias event in database.") + }) })?, name: db .rooms .room_state_get(&room_id, &EventType::RoomName, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok( - serde_json::from_str::(s.content.get()) - .map_err(|_| { - Error::bad_database("Invalid room name event in database.") - })? - .name, - ) + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomNameEventContent| c.name) + .map_err(|_| { + Error::bad_database("Invalid room name event in database.") + }) })?, num_joined_members: db .rooms @@ -269,56 +263,48 @@ pub(crate) async fn get_public_rooms_filtered_helper( topic: db .rooms .room_state_get(&room_id, &EventType::RoomTopic, "")? - .map_or(Ok::<_, Error>(None), |s| { - Ok(Some( - serde_json::from_str::(s.content.get()) - .map_err(|_| { - Error::bad_database("Invalid room topic event in database.") - })? - .topic, - )) + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomTopicEventContent| Some(c.topic)) + .map_err(|_| { + Error::bad_database("Invalid room topic event in database.") + }) })?, world_readable: db .rooms .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? - .map_or(Ok::<_, Error>(false), |s| { - Ok(serde_json::from_str::( - s.content.get(), - ) - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - })? - .history_visibility - == HistoryVisibility::WorldReadable) + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility == HistoryVisibility::WorldReadable + }) + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + }) })?, guest_can_join: db .rooms .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? - .map_or(Ok::<_, Error>(false), |s| { - Ok( - serde_json::from_str::(s.content.get()) - .map_err(|_| { - Error::bad_database( - "Invalid room guest access event in database.", - ) - })? - .guest_access - == GuestAccess::CanJoin, - ) + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomGuestAccessEventContent| { + c.guest_access == GuestAccess::CanJoin + }) + .map_err(|_| { + Error::bad_database("Invalid room guest access event in database.") + }) })?, avatar_url: db .rooms .room_state_get(&room_id, &EventType::RoomAvatar, "")? .map(|s| { - Ok::<_, Error>( - serde_json::from_str::(s.content.get()) - .map_err(|_| { - Error::bad_database("Invalid room avatar event in database.") - })? - .url, - ) + serde_json::from_str(s.content.get()) + .map(|c: RoomAvatarEventContent| c.url) + .map_err(|_| { + Error::bad_database("Invalid room avatar event in database.") + }) }) .transpose()? // url is now an Option so we must flatten @@ -359,17 +345,17 @@ pub(crate) async fn get_public_rooms_filtered_helper( } }) // We need to collect all, so we can sort by member count - .collect::>(); + .collect(); all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); let total_room_count_estimate = (all_rooms.len() as u32).into(); - let chunk = all_rooms + let chunk: Vec<_> = all_rooms .into_iter() .skip(num_since as usize) .take(limit as usize) - .collect::>(); + .collect(); let prev_batch = if num_since == 0 { None diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 980acf0..a44f5e9 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -395,7 +395,7 @@ pub(crate) async fn get_keys_helper bool>( let mut failures = BTreeMap::new(); - let mut futures = get_over_federation + let mut futures: FuturesUnordered<_> = get_over_federation .into_iter() .map(|(server, vec)| async move { let mut device_keys_input_fed = BTreeMap::new(); @@ -415,7 +415,7 @@ pub(crate) async fn get_keys_helper bool>( .await, ) }) - .collect::>(); + .collect(); while let Some((server, response)) = futures.next().await { match response { diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e37fe6c..732f616 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -56,19 +56,17 @@ pub async fn join_room_by_id_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut servers = db + let mut servers: HashSet<_> = db .rooms .invite_state(sender_user, &body.room_id)? .unwrap_or_default() .iter() - .filter_map(|event| { - serde_json::from_str::(&event.json().to_string()).ok() - }) - .filter_map(|event| event.get("sender").cloned()) + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| UserId::try_from(sender).ok()) .map(|user| user.server_name().to_owned()) - .collect::>(); + .collect(); servers.insert(body.room_id.server_name().to_owned()); @@ -105,19 +103,17 @@ pub async fn join_room_by_id_or_alias_route( let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => { - let mut servers = db + let mut servers: HashSet<_> = db .rooms .invite_state(sender_user, &room_id)? .unwrap_or_default() .iter() - .filter_map(|event| { - serde_json::from_str::(&event.json().to_string()).ok() - }) - .filter_map(|event| event.get("sender").cloned()) + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| UserId::try_from(sender).ok()) .map(|user| user.server_name().to_owned()) - .collect::>(); + .collect(); servers.insert(room_id.server_name().to_owned()); (servers, room_id) @@ -280,7 +276,7 @@ pub async fn ban_user_route( &body.user_id.to_string(), )? .map_or( - Ok::<_, Error>(RoomMemberEventContent { + Ok(RoomMemberEventContent { membership: MembershipState::Ban, displayname: db.users.displayname(&body.user_id)?, avatar_url: db.users.avatar_url(&body.user_id)?, @@ -290,10 +286,12 @@ pub async fn ban_user_route( reason: None, }), |event| { - let mut event = serde_json::from_str::(event.content.get()) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - event.membership = MembershipState::Ban; - Ok(event) + serde_json::from_str(event.content.get()) + .map(|event: RoomMemberEventContent| RoomMemberEventContent { + membership: MembershipState::Ban, + ..event + }) + .map_err(|_| Error::bad_database("Invalid member event in database.")) }, )?; @@ -342,7 +340,7 @@ pub async fn unban_user_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = serde_json::from_str::( + let mut event: RoomMemberEventContent = serde_json::from_str( db.rooms .room_state_get( &body.room_id, @@ -577,10 +575,10 @@ async fn join_room_by_id_helper( _ => return Err(Error::BadServerResponse("Room version is not supported")), }; - let mut join_event_stub = - serde_json::from_str::(make_join_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_join event json received from server."), - )?; + let mut join_event_stub: CanonicalJsonObject = + serde_json::from_str(make_join_response.event.get()).map_err(|_| { + Error::BadServerResponse("Invalid make_join event json received from server.") + })?; // TODO: Is origin needed? join_event_stub.insert( @@ -716,7 +714,7 @@ async fn join_room_by_id_helper( state .into_iter() .map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals)) - .collect::>>()?, + .collect::>()?, db, )?; @@ -787,7 +785,7 @@ fn validate_and_add_event_id( pub_key_map: &RwLock>>, db: &Database, ) -> Result<(EventId, CanonicalJsonObject)> { - let mut value = serde_json::from_str::(pdu.get()).map_err(|e| { + let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; @@ -863,25 +861,24 @@ pub(crate) async fn invite_helper<'a>( ); let state_lock = mutex_state.lock().await; - let prev_events = db + let prev_events: Vec<_> = db .rooms .get_pdu_leaves(room_id)? .into_iter() .take(20) - .collect::>(); + .collect(); let create_event = db .rooms .room_state_get(room_id, &EventType::RoomCreate, "")?; - let create_event_content = create_event + let create_event_content: Option = create_event .as_ref() .map(|create_event| { - serde_json::from_str::(create_event.content.get()) - .map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) }) .transpose()?; @@ -1057,7 +1054,7 @@ pub(crate) async fn invite_helper<'a>( warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); } - let origin = serde_json::from_value::>( + let origin: Box = serde_json::from_value( serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event needs an origin field.", diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 93ead2c..d778d6f 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -132,14 +132,11 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); // Use limit or else 10 - let limit = body - .limit - .try_into() - .map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?; + let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); match body.dir { get_message_events::Direction::Forward => { - let events_after = db + let events_after: Vec<_> = db .rooms .pdus_after(sender_user, &body.room_id, from)? .take(limit) @@ -151,14 +148,14 @@ pub async fn get_message_events_route( .ok() }) .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` - .collect::>(); + .collect(); let end_token = events_after.last().map(|(count, _)| count.to_string()); - let events_after = events_after + let events_after: Vec<_> = events_after .into_iter() .map(|(_, pdu)| pdu.to_room_event()) - .collect::>(); + .collect(); let mut resp = get_message_events::Response::new(); resp.start = Some(body.from.to_owned()); @@ -169,7 +166,7 @@ pub async fn get_message_events_route( Ok(resp.into()) } get_message_events::Direction::Backward => { - let events_before = db + let events_before: Vec<_> = db .rooms .pdus_until(sender_user, &body.room_id, from)? .take(limit) @@ -181,14 +178,14 @@ pub async fn get_message_events_route( .ok() }) .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` - .collect::>(); + .collect(); let start_token = events_before.last().map(|(count, _)| count.to_string()); - let events_before = events_before + let events_before: Vec<_> = events_before .into_iter() .map(|(_, pdu)| pdu.to_room_event()) - .collect::>(); + .collect(); let mut resp = get_message_events::Response::new(); resp.start = Some(body.from.to_owned()); diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 98555d0..64f27f1 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -8,7 +8,7 @@ use ruma::{ set_pushrule_enabled, RuleKind, }, }, - events::{push_rules, EventType}, + events::{push_rules::PushRulesEvent, EventType}, push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; @@ -29,9 +29,9 @@ pub async fn get_pushrules_all_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = db + let event: PushRulesEvent = db .account_data - .get::(None, sender_user, EventType::PushRules)? + .get(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -57,9 +57,9 @@ pub async fn get_pushrule_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = db + let event: PushRulesEvent = db .account_data - .get::(None, sender_user, EventType::PushRules)? + .get(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -122,9 +122,9 @@ pub async fn set_pushrule_route( )); } - let mut event = db + let mut event: PushRulesEvent = db .account_data - .get::(None, sender_user, EventType::PushRules)? + .get(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -222,9 +222,9 @@ pub async fn get_pushrule_actions_route( )); } - let mut event = db + let mut event: PushRulesEvent = db .account_data - .get::(None, sender_user, EventType::PushRules)? + .get(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -284,9 +284,9 @@ pub async fn set_pushrule_actions_route( )); } - let mut event = db + let mut event: PushRulesEvent = db .account_data - .get::(None, sender_user, EventType::PushRules)? + .get(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -356,9 +356,9 @@ pub async fn get_pushrule_enabled_route( )); } - let mut event = db + let mut event: PushRulesEvent = db .account_data - .get::(None, sender_user, EventType::PushRules)? + .get(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -420,9 +420,9 @@ pub async fn set_pushrule_enabled_route( )); } - let mut event = db + let mut event: PushRulesEvent = db .account_data - .get::(None, sender_user, EventType::PushRules)? + .get(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -497,9 +497,9 @@ pub async fn delete_pushrule_route( )); } - let mut event = db + let mut event: PushRulesEvent = db .account_data - .get::(None, sender_user, EventType::PushRules)? + .get(None, sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", diff --git a/src/client_server/room.rs b/src/client_server/room.rs index eb68135..47ffb0d 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,6 +22,7 @@ use ruma::{ }, EventType, }, + serde::JsonObject, RoomAliasId, RoomId, RoomVersionId, }; use serde_json::value::to_raw_value; @@ -175,12 +176,10 @@ pub async fn create_room_route( .expect("event is valid, we just created it"); if let Some(power_level_content_override) = &body.power_level_content_override { - let json = serde_json::from_str::>( - power_level_content_override.json().get(), - ) - .map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") - })?; + let json: JsonObject = serde_json::from_str(power_level_content_override.json().get()) + .map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") + })?; for (key, value) in json { power_levels_content[key] = value; @@ -605,7 +604,7 @@ pub async fn upgrade_room_route( } // Get the old room power levels - let mut power_levels_event_content = serde_json::from_str::( + let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( db.rooms .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 9ff1a1b..59c9480 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -74,7 +74,7 @@ pub async fn search_events_route( } } - let results = results + let results: Vec<_> = results .iter() .map(|result| { Ok::<_, Error>(SearchResult { @@ -95,7 +95,7 @@ pub async fn search_events_route( .filter_map(|r| r.ok()) .skip(skip) .take(limit) - .collect::>(); + .collect(); let next_batch = if results.len() < limit as usize { None @@ -114,7 +114,7 @@ pub async fn search_events_route( .search_term .split_terminator(|c: char| !c.is_alphanumeric()) .map(str::to_lowercase) - .collect::>(), + .collect(), }, }) .into()) diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 3515733..8581591 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -112,13 +112,13 @@ pub async fn get_state_events_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|event| { - serde_json::from_str::(event.content.get()) + serde_json::from_str(event.content.get()) + .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) .map_err(|_| { Error::bad_database( "Invalid room history visibility event in database.", ) }) - .map(|e| e.history_visibility) }), Some(Ok(HistoryVisibility::WorldReadable)) ) @@ -164,13 +164,13 @@ pub async fn get_state_events_for_key_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|event| { - serde_json::from_str::(event.content.get()) + serde_json::from_str(event.content.get()) + .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) .map_err(|_| { Error::bad_database( "Invalid room history visibility event in database.", ) }) - .map(|e| e.history_visibility) }), Some(Ok(HistoryVisibility::WorldReadable)) ) @@ -220,13 +220,13 @@ pub async fn get_state_events_for_empty_key_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .map(|event| { - serde_json::from_str::(event.content.get()) + serde_json::from_str(event.content.get()) + .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) .map_err(|_| { Error::bad_database( "Invalid room history visibility event in database.", ) }) - .map(|e| e.history_visibility) }), Some(Ok(HistoryVisibility::WorldReadable)) ) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 5b0dbaf..284aeb0 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -244,13 +244,13 @@ async fn sync_helper( }); // Take the last 10 events for the timeline - let timeline_pdus = non_timeline_pdus + let timeline_pdus: Vec<_> = non_timeline_pdus .by_ref() .take(10) .collect::>() .into_iter() .rev() - .collect::>(); + .collect(); let send_notification_counts = !timeline_pdus.is_empty() || db @@ -290,11 +290,10 @@ async fn sync_helper( .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) .map(|(_, pdu)| { - let content = - serde_json::from_str::(pdu.content.get()) - .map_err(|_| { - Error::bad_database("Invalid member event in database.") - })?; + let content: RoomMemberEventContent = + serde_json::from_str(pdu.content.get()).map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; if let Some(state_key) = &pdu.state_key { let user_id = UserId::try_from(state_key.clone()).map_err(|_| { @@ -347,11 +346,11 @@ async fn sync_helper( let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; - let state_events = current_state_ids + let state_events: Vec<_> = current_state_ids .iter() .map(|(_, id)| db.rooms.get_pdu(id)) .filter_map(|r| r.ok().flatten()) - .collect::>(); + .collect(); ( heroes, @@ -367,7 +366,7 @@ async fn sync_helper( // Incremental /sync let since_shortstatehash = since_shortstatehash.unwrap(); - let since_sender_member = db + let since_sender_member: Option = db .rooms .state_get( since_shortstatehash, @@ -375,7 +374,7 @@ async fn sync_helper( sender_user.as_str(), )? .and_then(|pdu| { - serde_json::from_str::(pdu.content.get()) + serde_json::from_str(pdu.content.get()) .map_err(|_| Error::bad_database("Invalid PDU in database.")) .ok() }); @@ -523,18 +522,18 @@ async fn sync_helper( Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string())) })?; - let room_events = timeline_pdus + let room_events: Vec<_> = timeline_pdus .iter() .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect::>(); + .collect(); - let mut edus = db + let mut edus: Vec<_> = db .rooms .edus .readreceipts_since(&room_id, since) .filter_map(|r| r.ok()) // Filter out buggy events .map(|(_, _, v)| v) - .collect::>(); + .collect(); if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since { edus.push( @@ -563,7 +562,7 @@ async fn sync_helper( .map_err(|_| Error::bad_database("Invalid account event in database.")) .ok() }) - .collect::>(), + .collect(), }, summary: sync_events::RoomSummary { heroes, @@ -628,7 +627,7 @@ async fn sync_helper( } let mut left_rooms = BTreeMap::new(); - let all_left_rooms = db.rooms.rooms_left(&sender_user).collect::>(); + let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect(); for result in all_left_rooms { let (room_id, left_state_events) = result?; @@ -668,7 +667,7 @@ async fn sync_helper( } let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms = db.rooms.rooms_invited(&sender_user).collect::>(); + let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect(); for result in all_invited_rooms { let (room_id, invite_state_events) = result?; @@ -750,7 +749,7 @@ async fn sync_helper( .map_err(|_| Error::bad_database("Invalid account event in database.")) .ok() }) - .collect::>(), + .collect(), }, device_lists: sync_events::DeviceLists { changed: device_list_updates.into_iter().collect(), diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 1eb508c..42bad4c 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -1,7 +1,10 @@ use crate::{database::DatabaseGuard, ConduitResult, Ruma}; use ruma::{ api::client::r0::tag::{create_tag, delete_tag, get_tags}, - events::EventType, + events::{ + tag::{TagEvent, TagEventContent}, + EventType, + }, }; use std::collections::BTreeMap; @@ -26,9 +29,9 @@ pub async fn update_tag_route( let mut tags_event = db .account_data - .get::(Some(&body.room_id), sender_user, EventType::Tag)? - .unwrap_or_else(|| ruma::events::tag::TagEvent { - content: ruma::events::tag::TagEventContent { + .get(Some(&body.room_id), sender_user, EventType::Tag)? + .unwrap_or_else(|| TagEvent { + content: TagEventContent { tags: BTreeMap::new(), }, }); @@ -68,9 +71,9 @@ pub async fn delete_tag_route( let mut tags_event = db .account_data - .get::(Some(&body.room_id), sender_user, EventType::Tag)? - .unwrap_or_else(|| ruma::events::tag::TagEvent { - content: ruma::events::tag::TagEventContent { + .get(Some(&body.room_id), sender_user, EventType::Tag)? + .unwrap_or_else(|| TagEvent { + content: TagEventContent { tags: BTreeMap::new(), }, }); @@ -108,9 +111,9 @@ pub async fn get_tags_route( Ok(get_tags::Response { tags: db .account_data - .get::(Some(&body.room_id), sender_user, EventType::Tag)? - .unwrap_or_else(|| ruma::events::tag::TagEvent { - content: ruma::events::tag::TagEventContent { + .get(Some(&body.room_id), sender_user, EventType::Tag)? + .unwrap_or_else(|| TagEvent { + content: TagEventContent { tags: BTreeMap::new(), }, }) diff --git a/src/database.rs b/src/database.rs index 63c4ebc..87190aa 100644 --- a/src/database.rs +++ b/src/database.rs @@ -699,7 +699,7 @@ impl Database { println!("Deleting starts"); - let batch2 = db + let batch2: Vec<_> = db .rooms .tokenids .iter() @@ -711,7 +711,7 @@ impl Database { None } }) - .collect::>(); + .collect(); for key in batch2 { println!("del"); diff --git a/src/database/globals.rs b/src/database/globals.rs index 2f1b45a..46eab63 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -57,8 +57,7 @@ pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>); impl RotationHandler { pub fn new() -> Self { - let (s, r) = broadcast::channel::<()>(1); - + let (s, r) = broadcast::channel(1); Self(s, r) } @@ -274,8 +273,8 @@ impl Globals { let signingkeys = self .server_signingkeys .get(origin.as_bytes())? - .and_then(|bytes| serde_json::from_slice::(&bytes).ok()) - .map(|keys| { + .and_then(|bytes| serde_json::from_slice(&bytes).ok()) + .map(|keys: ServerSigningKeys| { let mut tree = keys.verify_keys; tree.extend( keys.old_verify_keys diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index a960c72..98ea011 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -94,15 +94,15 @@ impl KeyBackups { .iter_from(&last_possible_key, true) .take_while(move |(k, _)| k.starts_with(&prefix)) .next() - .map_or(Ok(None), |(key, _)| { + .map(|(key, _)| { utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) - .map(Some) }) + .transpose() } pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { @@ -115,7 +115,7 @@ impl KeyBackups { .iter_from(&last_possible_key, true) .take_while(move |(k, _)| k.starts_with(&prefix)) .next() - .map_or(Ok(None), |(key, value)| { + .map(|(key, value)| { let version = utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -123,13 +123,14 @@ impl KeyBackups { ) .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; - Ok(Some(( + Ok(( version, serde_json::from_slice(&value).map_err(|_| { Error::bad_database("Algorithm in backupid_algorithm is invalid.") })?, - ))) + )) }) + .transpose() } pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 1912e0c..c5b795b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -13,13 +13,16 @@ use rocket::http::RawStr; use ruma::{ api::{client::error::ErrorKind, federation}, events::{ - ignored_user_list, push_rules, + direct::DirectEvent, + ignored_user_list::IgnoredUserListEvent, + push_rules::PushRulesEvent, room::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, power_levels::RoomPowerLevelsEventContent, }, + tag::TagEvent, AnyStrippedStateEvent, AnySyncStateEvent, EventType, }, push::{Action, Ruleset, Tweak}, @@ -218,16 +221,16 @@ impl Rooms { self.eventid_shorteventid .get(event_id.as_bytes())? .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash.get(&shorteventid)?.map_or( - Ok::<_, Error>(None), - |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + self.shorteventid_shortstatehash + .get(&shorteventid)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database( "Invalid shortstatehash bytes in shorteventid_shortstatehash", ) - })?)) - }, - ) + }) + }) + .transpose() }) } @@ -369,16 +372,16 @@ impl Rooms { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew = new_state_ids_compressed + let statediffnew: HashSet<_> = new_state_ids_compressed .difference(&parent_stateinfo.1) .copied() - .collect::>(); + .collect(); - let statediffremoved = parent_stateinfo + let statediffremoved: HashSet<_> = parent_stateinfo .1 .difference(&new_state_ids_compressed) .copied() - .collect::>(); + .collect(); (statediffnew, statediffremoved) } else { @@ -409,7 +412,7 @@ impl Rooms { continue; } - let pdu = match serde_json::from_str::( + let pdu: PduEvent = match serde_json::from_str( &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), ) { Ok(pdu) => pdu, @@ -980,7 +983,8 @@ impl Rooms { pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu_id| self.pdu_count(&pdu_id).map(Some)) + .map(|pdu_id| self.pdu_count(&pdu_id)) + .transpose() } #[tracing::instrument(skip(self))] @@ -1008,7 +1012,7 @@ impl Rooms { pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or_else::, _, _>( + .map_or_else( || self.eventid_outlierpdu.get(event_id.as_bytes()), |pduid| { Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { @@ -1041,14 +1045,12 @@ impl Rooms { ) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or_else::, _, _>( - || Ok(None), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? + .map(|pduid| { + self.pduid_pdu + .get(&pduid)? + .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) + }) + .transpose()? .map(|pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) @@ -1058,9 +1060,7 @@ impl Rooms { /// Returns the pdu's id. #[tracing::instrument(skip(self))] pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) + self.eventid_pduid.get(event_id.as_bytes()) } /// Returns the pdu. @@ -1070,14 +1070,12 @@ impl Rooms { pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or_else::, _, _>( - || Ok(None), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? + .map(|pduid| { + self.pduid_pdu + .get(&pduid)? + .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) + }) + .transpose()? .map(|pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) @@ -1096,11 +1094,8 @@ impl Rooms { if let Some(pdu) = self .eventid_pduid .get(event_id.as_bytes())? - .map_or_else::, _, _>( - || { - let r = self.eventid_outlierpdu.get(event_id.as_bytes()); - r - }, + .map_or_else( + || self.eventid_outlierpdu.get(event_id.as_bytes()), |pduid| { Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { Error::bad_database("Invalid pduid in eventid_pduid.") @@ -1363,8 +1358,8 @@ impl Rooms { let rules_for_user = db .account_data - .get::(None, user, EventType::PushRules)? - .map(|ev| ev.content.global) + .get(None, user, EventType::PushRules)? + .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| Ruleset::server_default(user)); let mut highlight = false; @@ -1490,11 +1485,11 @@ impl Rooms { { let mut lines = body.lines(); let command_line = lines.next().expect("each string has at least one line"); - let body = lines.collect::>(); + let body: Vec<_> = lines.collect(); let mut parts = command_line.split_whitespace().skip(1); if let Some(command) = parts.next() { - let args = parts.collect::>(); + let args: Vec<_> = parts.collect(); match command { "register_appservice" => { @@ -1771,16 +1766,16 @@ impl Rooms { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew = state_ids_compressed + let statediffnew: HashSet<_> = state_ids_compressed .difference(&parent_stateinfo.1) .copied() - .collect::>(); + .collect(); - let statediffremoved = parent_stateinfo + let statediffremoved: HashSet<_> = parent_stateinfo .1 .difference(&state_ids_compressed) .copied() - .collect::>(); + .collect(); (statediffnew, statediffremoved) } else { @@ -2363,19 +2358,16 @@ impl Rooms { // Check if the room has a predecessor if let Some(predecessor) = self .room_state_get(room_id, &EventType::RoomCreate, "")? - .and_then(|create| { - serde_json::from_str::(create.content.get()) - .ok() - }) - .and_then(|content| content.predecessor) + .and_then(|create| serde_json::from_str(create.content.get()).ok()) + .and_then(|content: RoomCreateEventContent| content.predecessor) { // Copy user settings from predecessor to the current room: // - Push rules // // TODO: finish this once push rules are implemented. // - // let mut push_rules_event_content = account_data - // .get::( + // let mut push_rules_event_content: PushRulesEvent = account_data + // .get( // None, // user_id, // EventType::PushRules, @@ -2395,13 +2387,11 @@ impl Rooms { // .ok(); // Copy old tags to new room - if let Some(tag_event) = - db.account_data.get::( - Some(&predecessor.room_id), - user_id, - EventType::Tag, - )? - { + if let Some(tag_event) = db.account_data.get::( + Some(&predecessor.room_id), + user_id, + EventType::Tag, + )? { db.account_data .update( Some(room_id), @@ -2415,11 +2405,8 @@ impl Rooms { // Copy direct chat flag if let Some(mut direct_event) = - db.account_data.get::( - None, - user_id, - EventType::Direct, - )? + db.account_data + .get::(None, user_id, EventType::Direct)? { let mut room_ids_updated = false; @@ -2458,7 +2445,7 @@ impl Rooms { // We want to know if the sender is ignored by the receiver let is_ignored = db .account_data - .get::( + .get::( None, // Ignored users are in global account data user_id, // Receiver EventType::IgnoredUserList, @@ -2712,7 +2699,7 @@ impl Rooms { ); let state_lock = mutex_state.lock().await; - let mut event = serde_json::from_str::( + let mut event: RoomMemberEventContent = serde_json::from_str( self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? .ok_or(Error::BadRequest( ErrorKind::BadState, @@ -2762,16 +2749,14 @@ impl Rooms { "User is not invited.", ))?; - let servers = invite_state + let servers: HashSet<_> = invite_state .iter() - .filter_map(|event| { - serde_json::from_str::(&event.json().to_string()).ok() - }) - .filter_map(|event| event.get("sender").cloned()) + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| UserId::try_from(sender).ok()) .map(|user| user.server_name().to_owned()) - .collect::>(); + .collect(); for remote_server in servers { let make_leave_response = db @@ -2920,14 +2905,13 @@ impl Rooms { pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result> { self.alias_roomid .get(alias.alias().as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some( - RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid."))?, - )) + .map(|bytes| { + RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in alias_roomid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) }) + .transpose() } #[tracing::instrument(skip(self))] @@ -2987,11 +2971,11 @@ impl Rooms { .to_vec(); let prefix_clone = prefix.clone(); - let words = search_string + let words: Vec<_> = search_string .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) .map(str::to_lowercase) - .collect::>(); + .collect(); let iterators = words.clone().into_iter().map(move |word| { let mut prefix2 = prefix.clone(); @@ -3004,12 +2988,7 @@ impl Rooms { self.tokenids .iter_from(&last_possible_id, true) // Newest pdus first .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| { - let pdu_id = key[key.len() - size_of::()..].to_vec(); - - Ok::<_, Error>(pdu_id) - }) - .filter_map(|r| r.ok()) + .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) }); Ok(( @@ -3241,11 +3220,11 @@ impl Rooms { self.roomuserid_leftcount .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid leftcount in db.") - })?)) + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid leftcount in db.")) }) + .transpose() } /// Returns an iterator over all rooms this user joined. diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 26f22bf..9a27e43 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -162,11 +162,12 @@ impl RoomEdus { Ok(self .roomuserid_lastprivatereadupdate .get(&key)? - .map_or(Ok::<_, Error>(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - })?)) - })? + }) + }) + .transpose()? .unwrap_or(0)) } @@ -286,11 +287,12 @@ impl RoomEdus { Ok(self .roomid_lasttypingupdate .get(room_id.as_bytes())? - .map_or(Ok::<_, Error>(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - })?)) - })? + }) + }) + .transpose()? .unwrap_or(0)) } @@ -399,7 +401,7 @@ impl RoomEdus { self.presenceid_presence .get(&presence_id)? .map(|value| { - let mut presence = serde_json::from_slice::(&value) + let mut presence: PresenceEvent = serde_json::from_slice(&value) .map_err(|_| Error::bad_database("Invalid presence event in db."))?; let current_timestamp: UInt = utils::millis_since_unix_epoch() .try_into() @@ -521,7 +523,7 @@ impl RoomEdus { ) .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - let mut presence = serde_json::from_slice::(&value) + let mut presence: PresenceEvent = serde_json::from_slice(&value) .map_err(|_| Error::bad_database("Invalid presence event in db."))?; let current_timestamp: UInt = utils::millis_since_unix_epoch() diff --git a/src/database/sending.rs b/src/database/sending.rs index c1abcde..b4acce1 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -27,7 +27,7 @@ use ruma::{ OutgoingRequest, }, device_id, - events::{push_rules, AnySyncEphemeralRoomEvent, EventType}, + events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType}, push, receipt::ReceiptType, uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, @@ -165,13 +165,13 @@ impl Sending { } // Find events that have been added since starting the last request - let new_events = guard.sending.servernameevent_data + let new_events: Vec<_> = guard.sending.servernameevent_data .scan_prefix(prefix.clone()) .filter_map(|(k, v)| { Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) }) .take(30) - .collect::>(); + .collect::<>(); // TODO: find edus @@ -344,8 +344,8 @@ impl Sending { continue; } - let event = - serde_json::from_str::(read_receipt.json().get()) + let event: AnySyncEphemeralRoomEvent = + serde_json::from_str(read_receipt.json().get()) .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; let federation_event = match event { AnySyncEphemeralRoomEvent::Receipt(r) => { @@ -612,9 +612,9 @@ impl Sending { let rules_for_user = db .account_data - .get::(None, &userid, EventType::PushRules) + .get(None, &userid, EventType::PushRules) .unwrap_or_default() - .map(|ev| ev.content.global) + .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| push::Ruleset::server_default(&userid)); let unread: UInt = db diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 4679646..1c0fb56 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -175,16 +175,14 @@ impl Uiaa { self.userdevicesessionid_uiaarequest .get(&userdevicesessionid)? - .map_or(Ok(None), |bytes| { - Ok::<_, Error>(Some( - serde_json::from_str::( - &utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid uiaa request bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid uiaa request in db."))?, - )) + .map(|bytes| { + serde_json::from_str::( + &utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid uiaa request bytes in db."))?, + ) + .map_err(|_| Error::bad_database("Invalid uiaa request in db.")) }) + .transpose() } fn update_uiaa_session( @@ -225,7 +223,7 @@ impl Uiaa { userdevicesessionid.push(0xff); userdevicesessionid.extend_from_slice(session.as_bytes()); - let uiaainfo = serde_json::from_slice::( + serde_json::from_slice( &self .userdevicesessionid_uiaainfo .get(&userdevicesessionid)? @@ -234,8 +232,6 @@ impl Uiaa { "UIAA session does not exist.", ))?, ) - .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid."))?; - - Ok(uiaainfo) + .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) } } diff --git a/src/database/users.rs b/src/database/users.rs index 63ed071..37a5dd3 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -603,10 +603,11 @@ impl Users { key.push(0xff); key.extend_from_slice(key_id.as_bytes()); - let mut cross_signing_key = - serde_json::from_slice::(&self.keyid_key.get(&key)?.ok_or( - Error::BadRequest(ErrorKind::InvalidParam, "Tried to sign nonexistent key."), - )?) + let mut cross_signing_key: serde_json::Value = + serde_json::from_slice(&self.keyid_key.get(&key)?.ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to sign nonexistent key.", + ))?) .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; let signatures = cross_signing_key diff --git a/src/pdu.rs b/src/pdu.rs index 0a765e1..0f99f43 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -69,8 +69,8 @@ impl PduEvent { _ => &[], }; - let mut old_content = - serde_json::from_str::>(self.content.get()) + let mut old_content: BTreeMap = + serde_json::from_str(self.content.get()) .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; let mut new_content = serde_json::Map::new(); @@ -92,8 +92,8 @@ impl PduEvent { pub fn remove_transaction_id(&mut self) -> crate::Result<()> { if let Some(unsigned) = &self.unsigned { - let mut unsigned = - serde_json::from_str::>>(unsigned.get()) + let mut unsigned: BTreeMap> = + serde_json::from_str(unsigned.get()) .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; unsigned.remove("transaction_id"); self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); diff --git a/src/server_server.rs b/src/server_server.rs index e9a9485..cb00baa 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -49,6 +49,7 @@ use ruma::{ }, int, receipt::ReceiptType, + serde::JsonObject, signatures::{CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion, StateMap}, to_device::DeviceIdOrAllDevices, @@ -1003,12 +1004,12 @@ pub(crate) async fn handle_incoming_pdu<'a>( // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events let mut graph = HashMap::new(); let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack = incoming_pdu + let mut todo_outlier_stack: Vec<_> = incoming_pdu .prev_events .iter() .cloned() .map(Arc::new) - .collect::>(); + .collect(); let mut amount = 0; @@ -1150,13 +1151,11 @@ fn handle_outlier_pdu<'a>( // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let create_event_content = serde_json::from_str::( - create_event.content.get(), - ) - .map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; let room_version_id = &create_event_content.room_version; let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); @@ -1315,13 +1314,11 @@ async fn upgrade_outlier_to_timeline_pdu( return Err("Event has been soft failed".into()); } - let create_event_content = serde_json::from_str::( - create_event.content.get(), - ) - .map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; let room_version_id = &create_event_content.room_version; let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); @@ -1633,7 +1630,7 @@ async fn upgrade_outlier_to_timeline_pdu( .compress_state_event(*shortstatekey, id, &db.globals) .map_err(|_| "Failed to compress_state_event".to_owned()) }) - .collect::>()?; + .collect::>()?; // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it debug!("starting soft fail auth check"); @@ -1753,7 +1750,7 @@ async fn upgrade_outlier_to_timeline_pdu( .compress_state_event(*k, id, &db.globals) .map_err(|_| "Failed to compress_state_event.".to_owned()) }) - .collect::>()? + .collect::>()? } else { // We do need to force an update to this room's state update_state = true; @@ -1772,7 +1769,7 @@ async fn upgrade_outlier_to_timeline_pdu( ); } - let fork_states = &fork_states + let fork_states: Vec<_> = fork_states .into_iter() .map(|map| { map.into_iter() @@ -1783,12 +1780,12 @@ async fn upgrade_outlier_to_timeline_pdu( }) .collect::>>() }) - .collect::>>() + .collect::>() .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; let state = match state_res::resolve( room_version_id, - fork_states, + &fork_states, auth_chain_sets, |id| { let res = db.rooms.get_pdu(id); @@ -1815,7 +1812,7 @@ async fn upgrade_outlier_to_timeline_pdu( .compress_state_event(shortstatekey, &event_id, &db.globals) .map_err(|_| "Failed to compress state event".to_owned()) }) - .collect::>()? + .collect::>()? }; // Set the new room state to the resolved state @@ -2035,12 +2032,12 @@ pub(crate) async fn fetch_signing_keys( trace!("Loading signing keys for {}", origin); - let mut result = db + let mut result: BTreeMap<_, _> = db .globals .signing_keys_for(origin)? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); + .collect(); if contains_all_ids(&result) { return Ok(result); @@ -2245,11 +2242,7 @@ pub(crate) fn get_auth_chain<'a>( continue; } - let chunk_key = chunk - .iter() - .map(|(short, _)| short) - .copied() - .collect::>(); + let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { hits += 1; full_auth_chain.extend(cached.iter().copied()); @@ -2564,9 +2557,9 @@ pub fn get_room_state_route( Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids .map(|id| { - Ok::<_, Error>(PduEvent::convert_to_outgoing_federation_event( - db.rooms.get_pdu_json(&id)?.unwrap(), - )) + db.rooms.get_pdu_json(&id).map(|maybe_json| { + PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) + }) }) .filter_map(|r| r.ok()) .collect(), @@ -2650,26 +2643,24 @@ pub fn create_join_event_template_route( )); } - let prev_events = db + let prev_events: Vec<_> = db .rooms .get_pdu_leaves(&body.room_id)? .into_iter() .take(20) - .collect::>(); + .collect(); let create_event = db .rooms .room_state_get(&body.room_id, &EventType::RoomCreate, "")?; - let create_event_content = create_event + let create_event_content: Option = create_event .as_ref() .map(|create_event| { - serde_json::from_str::(create_event.content.get()).map_err( - |e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }, - ) + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) }) .transpose()?; @@ -2835,7 +2826,7 @@ async fn create_join_event( } }; - let origin = serde_json::from_value::>( + let origin: Box = serde_json::from_value( serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event needs an origin field.", @@ -3009,15 +3000,12 @@ pub async fn create_invite_route( let mut invite_state = body.invite_room_state.clone(); - let mut event = - serde_json::from_str::>(body.event.get()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes.") - })?; + let mut event: JsonObject = serde_json::from_str(body.event.get()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; event.insert("event_id".to_owned(), "$dummy".into()); - let pdu = serde_json::from_value::(event.into()).map_err(|e| { + let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| { warn!("Invalid invite event: {}", e); Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") })?; @@ -3282,7 +3270,7 @@ fn get_server_keys_from_cache( pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, db: &Database, ) -> Result<()> { - let value = serde_json::from_str::(pdu.get()).map_err(|e| { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; @@ -3343,19 +3331,16 @@ fn get_server_keys_from_cache( trace!("Loading signing keys for {}", origin); - let result = db + let result: BTreeMap<_, _> = db .globals .signing_keys_for(origin)? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); + .collect(); if !contains_all_ids(&result) { trace!("Signing key not loaded for {}", origin); - servers.insert( - origin.clone(), - BTreeMap::::new(), - ); + servers.insert(origin.clone(), BTreeMap::new()); } pub_key_map.insert(origin.to_string(), result); @@ -3370,8 +3355,8 @@ pub(crate) async fn fetch_join_signing_keys( pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { - let mut servers = - BTreeMap::, BTreeMap>::new(); + let mut servers: BTreeMap, BTreeMap> = + BTreeMap::new(); { let mut pkm = pub_key_map @@ -3436,7 +3421,7 @@ pub(crate) async fn fetch_join_signing_keys( } } - let mut futures = servers + let mut futures: FuturesUnordered<_> = servers .into_iter() .map(|(server, _)| async move { ( @@ -3450,16 +3435,16 @@ pub(crate) async fn fetch_join_signing_keys( server, ) }) - .collect::>(); + .collect(); while let Some(result) = futures.next().await { if let (Ok(get_keys_response), origin) = result { - let result = db + let result: BTreeMap<_, _> = db .globals .add_signing_key(&origin, get_keys_response.server_key.clone())? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); + .collect(); pub_key_map .write() From 91afa1e0e025695b677eec0ddade248c184efe8e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 13 Oct 2021 11:56:46 +0200 Subject: [PATCH 0819/1727] Make Result alias usable with any error type --- src/client_server/sync.rs | 4 ++-- src/database/proxy.rs | 4 ++-- src/database/sending.rs | 2 +- src/error.rs | 2 +- src/server_server.rs | 16 +++++++--------- 5 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 284aeb0..e4c12c4 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -60,7 +60,7 @@ use rocket::{get, tokio}; pub async fn sync_events_route( db: DatabaseGuard, body: Ruma>, -) -> std::result::Result, RumaResponse> { +) -> Result, RumaResponse> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -182,7 +182,7 @@ async fn sync_helper( full_state: bool, timeout: Option, // bool = caching allowed -) -> std::result::Result<(sync_events::Response, bool), Error> { +) -> Result<(sync_events::Response, bool), Error> { // TODO: match body.set_presence { db.rooms.edus.ping_presence(&sender_user)?; diff --git a/src/database/proxy.rs b/src/database/proxy.rs index 33f7f3d..fb0387c 100644 --- a/src/database/proxy.rs +++ b/src/database/proxy.rs @@ -125,7 +125,7 @@ impl WildCardedDomain { } impl std::str::FromStr for WildCardedDomain { type Err = std::convert::Infallible; - fn from_str(s: &str) -> std::result::Result { + fn from_str(s: &str) -> Result { // maybe do some domain validation? Ok(if s.starts_with("*.") { WildCardedDomain::WildCarded(s[1..].to_owned()) @@ -137,7 +137,7 @@ impl std::str::FromStr for WildCardedDomain { } } impl<'de> Deserialize<'de> for WildCardedDomain { - fn deserialize(deserializer: D) -> std::result::Result + fn deserialize(deserializer: D) -> Result where D: serde::de::Deserializer<'de>, { diff --git a/src/database/sending.rs b/src/database/sending.rs index b4acce1..bf0cc2c 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -485,7 +485,7 @@ impl Sending { kind: OutgoingKind, events: Vec, db: Arc>, - ) -> std::result::Result { + ) -> Result { let db = db.read().await; match &kind { diff --git a/src/error.rs b/src/error.rs index 1ecef3a..7faddc9 100644 --- a/src/error.rs +++ b/src/error.rs @@ -20,7 +20,7 @@ use { tracing::error, }; -pub type Result = std::result::Result; +pub type Result = std::result::Result; #[derive(Error, Debug)] pub enum Error { diff --git a/src/server_server.rs b/src/server_server.rs index cb00baa..68e262b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -65,7 +65,6 @@ use std::{ mem, net::{IpAddr, SocketAddr}, pin::Pin, - result::Result as StdResult, sync::{Arc, RwLock, RwLockWriteGuard}, time::{Duration, Instant, SystemTime}, }; @@ -956,7 +955,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( is_timeline_event: bool, db: &'a Database, pub_key_map: &'a RwLock>>, -) -> StdResult>, String> { +) -> Result>, String> { match db.rooms.exists(room_id) { Ok(true) => {} _ => { @@ -1137,8 +1136,7 @@ fn handle_outlier_pdu<'a>( value: BTreeMap, db: &'a Database, pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, StdResult<(Arc, BTreeMap), String>> -{ +) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json @@ -1301,7 +1299,7 @@ async fn upgrade_outlier_to_timeline_pdu( db: &Database, room_id: &RoomId, pub_key_map: &RwLock>>, -) -> StdResult>, String> { +) -> Result>, String> { if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); } @@ -1448,7 +1446,7 @@ async fn upgrade_outlier_to_timeline_pdu( .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; Ok((shortstatekey, Arc::new(event_id))) }) - .collect::>()?, + .collect::>()?, ), Err(e) => { warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); @@ -1630,7 +1628,7 @@ async fn upgrade_outlier_to_timeline_pdu( .compress_state_event(*shortstatekey, id, &db.globals) .map_err(|_| "Failed to compress_state_event".to_owned()) }) - .collect::>()?; + .collect::>()?; // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it debug!("starting soft fail auth check"); @@ -1750,7 +1748,7 @@ async fn upgrade_outlier_to_timeline_pdu( .compress_state_event(*k, id, &db.globals) .map_err(|_| "Failed to compress_state_event.".to_owned()) }) - .collect::>()? + .collect::>()? } else { // We do need to force an update to this room's state update_state = true; @@ -1812,7 +1810,7 @@ async fn upgrade_outlier_to_timeline_pdu( .compress_state_event(shortstatekey, &event_id, &db.globals) .map_err(|_| "Failed to compress state event".to_owned()) }) - .collect::>()? + .collect::>()? }; // Set the new room state to the resolved state From 9082a531c99781fba5dd1abadd4dfc4ada518bbd Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 24 Sep 2021 22:44:26 +0000 Subject: [PATCH 0820/1727] Make allow_encryption work again, fixing #115 --- src/client_server/message.rs | 8 ++++++++ src/client_server/state.rs | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 93ead2c..25964cc 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -45,6 +45,14 @@ pub async fn send_message_event_route( ); let state_lock = mutex_state.lock().await; + // Forbid m.room.encrypted if encryption is disabled + if &body.event_type == "m.room.encrypted" && !db.globals.allow_encryption() { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Encryption has been disabled", + )); + } + // Check if this is a new transaction id if let Some(response) = db.transaction_ids diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 24cc2a1..7618dcc 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -73,6 +73,14 @@ pub async fn send_state_event_for_empty_key_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + // Forbid m.room.encryption if encryption is disabled + if &body.event_type == "m.room.encryption" && !db.globals.allow_encryption() { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Encryption has been disabled", + )); + } + let event_id = send_state_event_for_key_helper( &db, sender_user, From d996d1b0e65ccce8d1336ef2b382b52d0df73997 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 15 Oct 2021 12:38:20 +0000 Subject: [PATCH 0821/1727] Always send device_one_time_keys_count, fixing #178 --- src/client_server/sync.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2d5ad27..fe8aad1 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -758,13 +758,7 @@ async fn sync_helper( changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: if db.users.last_one_time_keys_update(&sender_user)? > since - || since == 0 - { - db.users.count_one_time_keys(&sender_user, &sender_device)? - } else { - BTreeMap::new() - }, + device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?, to_device: sync_events::ToDevice { events: db .users From 484a044b504dc3458799ee2eca87cd034f0ef8d5 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 15 Oct 2021 23:17:08 +0000 Subject: [PATCH 0822/1727] Remove device_one_time_keys_count from is_empty() sync checks, fixing sync issue as reported by Nekron --- src/client_server/sync.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index d98d759..65c07bc 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -769,7 +769,6 @@ async fn sync_helper( && response.presence.is_empty() && response.account_data.is_empty() && response.device_lists.is_empty() - && response.device_one_time_keys_count.is_empty() && response.to_device.is_empty() { // Hang a few seconds so requests are not spammed From 55d78b1914f9800867d4490a53deae00c1f86e31 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 16 Oct 2021 00:45:51 +0000 Subject: [PATCH 0823/1727] Bump Ruma version to fix M_BAD_JSON on login --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 0f24673..dae68bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "44cfd0adbc83303c19aef590ad0d71647e19f197", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "58cdcae1f9a8f4824bcbec1de1bb13e659c66804", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } From ccf501a420d12d79b803ccf7334d0db978e4724e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 18 Oct 2021 04:51:11 +0000 Subject: [PATCH 0824/1727] Initial implementation of /report, fixing #13 --- src/client_server/mod.rs | 2 + src/client_server/report.rs | 75 +++++++++++++++++++++++++++++++++++++ src/main.rs | 1 + 3 files changed, 78 insertions(+) create mode 100644 src/client_server/report.rs diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index e0c340f..115ddaf 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -16,6 +16,7 @@ mod profile; mod push; mod read_marker; mod redact; +mod report; mod room; mod search; mod session; @@ -47,6 +48,7 @@ pub use profile::*; pub use push::*; pub use read_marker::*; pub use redact::*; +pub use report::*; pub use room::*; pub use search::*; pub use session::*; diff --git a/src/client_server/report.rs b/src/client_server/report.rs new file mode 100644 index 0000000..e56cbc9 --- /dev/null +++ b/src/client_server/report.rs @@ -0,0 +1,75 @@ +use std::sync::Arc; + +use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma}; +use ruma::{ + api::client::{error::ErrorKind, r0::room::report_content}, + events::room::message, + Int, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::post; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}` +/// +/// Reports an inappropriate event to homeserver admins +/// +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/report/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn report_event_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let pdu = match db.rooms.get_pdu(&body.event_id) { + Ok(pdu) if !pdu.is_none() => pdu, + _ => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid Event ID", + )) + } + } + .unwrap(); + + if body.score >= Int::from(0) && body.score <= Int::from(-100) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid score, must be within 0 to -100", + )); + }; + + if body.reason.chars().count() > 160 { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Reason too long, should be 160 characters or fewer", + )); + }; + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + db.admin.send(AdminCommand::SendMessage( + message::RoomMessageEventContent::text_plain(format!( + "Report received from: {}\r\n\r\nEvent ID: {}\r\nRoom ID: {}\r\nSent By: {}\r\n\r\nReport Score: {}\r\nReport Reason: {}", + sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason, + )), + )); + + drop(state_lock); + + db.flush()?; + + Ok(report_content::Response {}.into()) +} diff --git a/src/main.rs b/src/main.rs index 84dfb1f..56faa3e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -101,6 +101,7 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket< client_server::create_typing_event_route, client_server::create_room_route, client_server::redact_event_route, + client_server::report_event_route, client_server::create_alias_route, client_server::delete_alias_route, client_server::get_alias_route, From 1541b93f457de2d5fb8c37739d6791fa3f60312b Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 18 Oct 2021 05:38:41 +0000 Subject: [PATCH 0825/1727] Make reports look nicer and reduce spam potential, increase max report length to 1000 characters --- src/client_server/report.rs | 39 ++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index e56cbc9..7f66fa1 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -8,7 +8,7 @@ use ruma::{ }; #[cfg(feature = "conduit_bin")] -use rocket::post; +use rocket::{http::RawStr, post}; /// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}` /// @@ -43,10 +43,10 @@ pub async fn report_event_route( )); }; - if body.reason.chars().count() > 160 { + if body.reason.chars().count() > 1000 { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "Reason too long, should be 160 characters or fewer", + "Reason too long, should be 1000 characters or fewer", )); }; @@ -61,10 +61,35 @@ pub async fn report_event_route( let state_lock = mutex_state.lock().await; db.admin.send(AdminCommand::SendMessage( - message::RoomMessageEventContent::text_plain(format!( - "Report received from: {}\r\n\r\nEvent ID: {}\r\nRoom ID: {}\r\nSent By: {}\r\n\r\nReport Score: {}\r\nReport Reason: {}", - sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason, - )), + message::RoomMessageEventContent::text_html( + format!( + concat!( + "Report received from: {}\r\n\r\n", + "Event ID: {}\r\n", + "Room ID: {}\r\n", + "Sent By: {}\r\n\r\n", + "Report Score: {}\r\n", + "Report Reason: {}" + ), + sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason + ) + .to_owned(), + format!( + concat!( + "
Report received from: {}
", + "Event Info

Event ID: {}
Room ID: {}
Sent By: {}", + "

Report Info

Report Score: {}", + "
Report Reason: {}

" + ), + sender_user, + pdu.event_id, + pdu.room_id, + pdu.sender, + body.score, + RawStr::new(&body.reason).html_escape() + ) + .to_owned(), + ), )); drop(state_lock); From 50f931a2fda72d94a6190092dac18f2268c96af1 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Wed, 20 Oct 2021 11:12:06 +0000 Subject: [PATCH 0826/1727] Cleanup and fix validation in report.rs, lower max report length, better html --- src/client_server/report.rs | 53 +++++++++++++------------------------ 1 file changed, 18 insertions(+), 35 deletions(-) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 7f66fa1..3dcb4d1 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, @@ -25,62 +23,49 @@ pub async fn report_event_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let pdu = match db.rooms.get_pdu(&body.event_id) { - Ok(pdu) if !pdu.is_none() => pdu, + let pdu = match db.rooms.get_pdu(&body.event_id)? { + Some(pdu) => pdu, _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Invalid Event ID", )) } - } - .unwrap(); + }; - if body.score >= Int::from(0) && body.score <= Int::from(-100) { + if body.score > Int::from(0) || body.score < Int::from(-100) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", )); }; - if body.reason.chars().count() > 1000 { + if body.reason.chars().count() > 250 { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "Reason too long, should be 1000 characters or fewer", + "Reason too long, should be 250 characters or fewer", )); }; - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(body.room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - db.admin.send(AdminCommand::SendMessage( message::RoomMessageEventContent::text_html( format!( - concat!( - "Report received from: {}\r\n\r\n", - "Event ID: {}\r\n", - "Room ID: {}\r\n", - "Sent By: {}\r\n\r\n", - "Report Score: {}\r\n", - "Report Reason: {}" - ), + "Report received from: {}\n\n\ + Event ID: {}\n\ + Room ID: {}\n\ + Sent By: {}\n\n\ + Report Score: {}\n\ + Report Reason: {}", sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason ) .to_owned(), format!( - concat!( - "
Report received from: {}
", - "Event Info

Event ID: {}
Room ID: {}
Sent By: {}", - "

Report Info

Report Score: {}", - "
Report Reason: {}

" - ), + "
Report received from: {0}\ +
  • Event Info
    • Event ID: {1}\ + 🔗
    • Room ID: {2}\ +
    • Sent By: {3}
  • \ + Report Info
    • Report Score: {4}
    • Report Reason: {5}
  • \ +
", sender_user, pdu.event_id, pdu.room_id, @@ -92,8 +77,6 @@ pub async fn report_event_route( ), )); - drop(state_lock); - db.flush()?; Ok(report_content::Response {}.into()) From bbe16f84679061f1f4af5c1ab76f519279a234c0 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 24 Oct 2021 00:45:02 +0000 Subject: [PATCH 0827/1727] Update Ruma --- Cargo.toml | 2 +- src/client_server/room.rs | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dae68bf..13a7af4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "58cdcae1f9a8f4824bcbec1de1bb13e659c66804", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "e7f01ca55a1eff437bad754bf0554cc09f44ec2a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 2d1fe23..ec09eec 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,10 +22,10 @@ use ruma::{ }, EventType, }, - serde::JsonObject, + serde::{JsonObject}, RoomAliasId, RoomId, RoomVersionId, }; -use serde_json::value::to_raw_value; +use serde_json::{value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; use tracing::{info, warn}; @@ -102,9 +102,14 @@ pub async fn create_room_route( } })?; + let creation_content = match body.creation_content.clone() { + Some(content) => content.deserialize().expect("Invalid creation content"), + None => create_room::CreationContent::new(), + }; + let mut content = RoomCreateEventContent::new(sender_user.clone()); - content.federate = body.creation_content.federate; - content.predecessor = body.creation_content.predecessor.clone(); + content.federate = creation_content.federate; + content.predecessor = creation_content.predecessor.clone(); content.room_version = match body.room_version.clone() { Some(room_version) => { if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { From 8087a26a35fdcd495e28e8bff401fa3ba2afd9ef Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 24 Oct 2021 20:26:51 +0000 Subject: [PATCH 0828/1727] Make createRoom follow spec for m.room.create, allowing creation of spaces --- src/client_server/room.rs | 65 +++++++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 12 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index ec09eec..5e59e81 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,11 +22,16 @@ use ruma::{ }, EventType, }, - serde::{JsonObject}, + serde::{CanonicalJsonObject, JsonObject, Raw}, RoomAliasId, RoomId, RoomVersionId, }; -use serde_json::{value::to_raw_value}; -use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; +use serde_json::{json, value::to_raw_value}; +use std::{ + cmp::max, + collections::BTreeMap, + convert::{TryFrom, TryInto}, + sync::Arc, +}; use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] @@ -102,15 +107,7 @@ pub async fn create_room_route( } })?; - let creation_content = match body.creation_content.clone() { - Some(content) => content.deserialize().expect("Invalid creation content"), - None => create_room::CreationContent::new(), - }; - - let mut content = RoomCreateEventContent::new(sender_user.clone()); - content.federate = creation_content.federate; - content.predecessor = creation_content.predecessor.clone(); - content.room_version = match body.room_version.clone() { + let room_version = match body.room_version.clone() { Some(room_version) => { if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { room_version @@ -124,6 +121,50 @@ pub async fn create_room_route( None => RoomVersionId::Version6, }; + let content = match &body.creation_content { + Some(content) => { + let mut content = content + .deserialize_as::() + .expect("Invalid creation content"); + content.insert( + "creator".into(), + json!(sender_user.clone()).try_into().unwrap(), + ); + content.insert( + "room_version".into(), + json!(room_version.as_str()).try_into().unwrap(), + ); + content + } + None => { + let mut content = Raw::::from_json( + to_raw_value(&RoomCreateEventContent::new(sender_user.clone())).unwrap(), + ) + .deserialize_as::() + .unwrap(); + content.insert( + "room_version".into(), + json!(room_version.as_str()).try_into().unwrap(), + ); + content + } + }; + + // Validate creation content + match Raw::::from_json( + to_raw_value(&content).expect("Invalid creation content"), + ) + .deserialize_as::() + { + Ok(_t) => {} + Err(_e) => { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Invalid creation content", + )) + } + }; + // 1. The room create event db.rooms.build_and_append_pdu( PduBuilder { From d5d25fb064449cb42a0243248e6fc2020bf77fe2 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 24 Oct 2021 22:13:08 +0000 Subject: [PATCH 0829/1727] Preserve all m.room.create entries when performing room upgrades --- src/client_server/room.rs | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 5e59e81..0c62d2d 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -478,7 +478,7 @@ pub async fn get_room_aliases_route( .into()) } -/// # `GET /_matrix/client/r0/rooms/{roomId}/upgrade` +/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` /// /// Upgrades the room. /// @@ -556,16 +556,15 @@ pub async fn upgrade_room_route( ); let state_lock = mutex_state.lock().await; - // Get the old room federations status - let federate = serde_json::from_str::( + // Get the old room creation event + let mut create_event_content = serde_json::from_str::( db.rooms .room_state_get(&body.room_id, &EventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content .get(), ) - .map_err(|_| Error::bad_database("Invalid room event in database."))? - .federate; + .map_err(|_| Error::bad_database("Invalid room event in database."))?; // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( @@ -574,10 +573,30 @@ pub async fn upgrade_room_route( )); // Send a m.room.create event containing a predecessor field and the applicable room_version - let mut create_event_content = RoomCreateEventContent::new(sender_user.clone()); - create_event_content.federate = federate; - create_event_content.room_version = body.new_version.clone(); - create_event_content.predecessor = predecessor; + create_event_content.insert( + "creator".into(), + json!(sender_user.clone()).try_into().unwrap(), + ); + create_event_content.insert( + "room_version".into(), + json!(body.new_version.clone()).try_into().unwrap(), + ); + create_event_content.insert("predecessor".into(), json!(predecessor).try_into().unwrap()); + + // Validate creation event content + match Raw::::from_json( + to_raw_value(&create_event_content).expect("Error forming creation event"), + ) + .deserialize_as::() + { + Ok(_t) => {} + Err(_e) => { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Error forming creation event", + )) + } + }; db.rooms.build_and_append_pdu( PduBuilder { From 743bdbe96125881418feb8583edb75ca703da4fc Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 26 Oct 2021 13:30:02 +0000 Subject: [PATCH 0830/1727] Add 'Federation publicRoom Name/topic keys are correct' test to sytest whitelist --- tests/sytest/sytest-whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist index eda851a..5afc3fd 100644 --- a/tests/sytest/sytest-whitelist +++ b/tests/sytest/sytest-whitelist @@ -510,3 +510,4 @@ remote user can join room with version 5 remote user can join room with version 6 setting 'm.room.name' respects room powerlevel setting 'm.room.power_levels' respects room powerlevel +Federation publicRoom Name/topic keys are correct From 86177faae7f812136d02d08fe2f6533eabe28642 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 7 Nov 2021 07:57:15 +0000 Subject: [PATCH 0831/1727] Fix join panic bug --- src/client_server/membership.rs | 2 +- src/server_server.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 732f616..ec685ec 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -934,7 +934,7 @@ pub(crate) async fn invite_helper<'a>( unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), - serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"), + to_raw_value(&prev_pdu.sender).expect("UserId is valid"), ); } diff --git a/src/server_server.rs b/src/server_server.rs index 68e262b..482edf0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2721,7 +2721,7 @@ pub fn create_join_event_template_route( unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), - serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"), + to_raw_value(&prev_pdu.sender).expect("UserId is valid"), ); } From c4bce1d0c7ee0ba9c88fdccb11ac79112c19075b Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Tue, 9 Nov 2021 16:12:44 +0000 Subject: [PATCH 0832/1727] Cleanup room.rs; replace unwraps with map_err --- src/client_server/room.rs | 86 +++++++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 36 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 0c62d2d..47c7ee6 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,7 +22,7 @@ use ruma::{ }, EventType, }, - serde::{CanonicalJsonObject, JsonObject, Raw}, + serde::{CanonicalJsonObject, JsonObject}, RoomAliasId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; @@ -128,42 +128,48 @@ pub async fn create_room_route( .expect("Invalid creation content"); content.insert( "creator".into(), - json!(sender_user.clone()).try_into().unwrap(), + json!(&sender_user).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, ); content.insert( "room_version".into(), - json!(room_version.as_str()).try_into().unwrap(), + json!(room_version.as_str()).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, ); content } None => { - let mut content = Raw::::from_json( - to_raw_value(&RoomCreateEventContent::new(sender_user.clone())).unwrap(), + let mut content = serde_json::from_str::( + to_raw_value(&RoomCreateEventContent::new(sender_user.clone())) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))? + .get(), ) - .deserialize_as::() .unwrap(); content.insert( "room_version".into(), - json!(room_version.as_str()).try_into().unwrap(), + json!(room_version.as_str()).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, ); content } }; // Validate creation content - match Raw::::from_json( - to_raw_value(&content).expect("Invalid creation content"), - ) - .deserialize_as::() - { - Ok(_t) => {} - Err(_e) => { - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Invalid creation content", - )) - } - }; + let de_result = serde_json::from_str::( + to_raw_value(&content) + .expect("Invalid creation content") + .get(), + ); + + if let Err(_) = de_result { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Invalid creation content", + )); + } // 1. The room create event db.rooms.build_and_append_pdu( @@ -575,28 +581,36 @@ pub async fn upgrade_room_route( // Send a m.room.create event containing a predecessor field and the applicable room_version create_event_content.insert( "creator".into(), - json!(sender_user.clone()).try_into().unwrap(), + json!(&sender_user) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, ); create_event_content.insert( "room_version".into(), - json!(body.new_version.clone()).try_into().unwrap(), + json!(&body.new_version) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + ); + create_event_content.insert( + "predecessor".into(), + json!(predecessor) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, ); - create_event_content.insert("predecessor".into(), json!(predecessor).try_into().unwrap()); // Validate creation event content - match Raw::::from_json( - to_raw_value(&create_event_content).expect("Error forming creation event"), - ) - .deserialize_as::() - { - Ok(_t) => {} - Err(_e) => { - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Error forming creation event", - )) - } - }; + let de_result = serde_json::from_str::( + to_raw_value(&create_event_content) + .expect("Error forming creation event") + .get(), + ); + + if let Err(_) = de_result { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Error forming creation event", + )); + } db.rooms.build_and_append_pdu( PduBuilder { From 109892b4b754e1666d4f00d9aec6356b46093668 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Fri, 1 Oct 2021 15:53:16 +0200 Subject: [PATCH 0833/1727] Implement turn server settings this fills out the infos in /_matrix/client/r0/voip/turnServer with values specified in the server config --- src/client_server/voip.rs | 14 +++++++------- src/database.rs | 12 ++++++++++++ src/database/globals.rs | 16 ++++++++++++++++ 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 2a7f28e..83f39a4 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,4 +1,4 @@ -use crate::ConduitResult; +use crate::{database::DatabaseGuard, ConduitResult}; use ruma::api::client::r0::voip::get_turn_server_info; use std::time::Duration; @@ -9,13 +9,13 @@ use rocket::get; /// /// TODO: Returns information about the recommended turn server. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] -#[tracing::instrument] -pub async fn turn_server_route() -> ConduitResult { +#[tracing::instrument(skip(db))] +pub async fn turn_server_route(db: DatabaseGuard) -> ConduitResult { Ok(get_turn_server_info::Response { - username: "".to_owned(), - password: "".to_owned(), - uris: Vec::new(), - ttl: Duration::from_secs(60 * 60 * 24), + username: db.globals.turn_username().clone(), + password: db.globals.turn_password().clone(), + uris: db.globals.turn_uris().to_vec(), + ttl: Duration::from_secs(db.globals.turn_ttl()), } .into()) } diff --git a/src/database.rs b/src/database.rs index 8cf4f64..85213c0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -74,6 +74,14 @@ pub struct Config { trusted_servers: Vec>, #[serde(default = "default_log")] pub log: String, + #[serde(default)] + turn_username: String, + #[serde(default)] + turn_password: String, + #[serde(default = "Vec::new")] + turn_uris: Vec, + #[serde(default = "default_turn_ttl")] + turn_ttl: u64, #[serde(flatten)] catchall: BTreeMap, @@ -131,6 +139,10 @@ fn default_log() -> String { "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() } +fn default_turn_ttl() -> u64 { + 60 * 60 * 24 +} + #[cfg(feature = "sled")] pub type Engine = abstraction::sled::Engine; diff --git a/src/database/globals.rs b/src/database/globals.rs index f1cbbd9..7338f1e 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -226,6 +226,22 @@ impl Globals { self.jwt_decoding_key.as_ref() } + pub fn turn_password(&self) -> &String { + &self.config.turn_password + } + + pub fn turn_ttl(&self) -> u64 { + self.config.turn_ttl + } + + pub fn turn_uris(&self) -> &[String] { + &self.config.turn_uris + } + + pub fn turn_username(&self) -> &String { + &self.config.turn_username + } + /// TODO: the key valid until timestamp is only honored in room version > 4 /// Remove the outdated keys and insert the new ones. /// From 9fccbb014a3297961fd169ce12363564e56afbc3 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Sat, 2 Oct 2021 00:37:39 +0200 Subject: [PATCH 0834/1727] Implement TURN server authentication with hmac This is a prefered method to allow limited access to the TURN server --- Cargo.lock | 35 +++++++++++++++++++++++++++ Cargo.toml | 3 +++ src/client_server/voip.rs | 51 +++++++++++++++++++++++++++++++++------ src/database.rs | 2 ++ src/database/globals.rs | 4 +++ 5 files changed, 88 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 293bcff..6829389 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -245,6 +245,7 @@ dependencies = [ "crossbeam", "directories", "heed", + "hmac", "http", "image", "jsonwebtoken", @@ -266,6 +267,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "sha-1", "sled", "thiserror", "thread_local", @@ -428,6 +430,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "crypto-mac" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" +dependencies = [ + "generic-array", + "subtle", +] + [[package]] name = "curve25519-dalek" version = "3.2.0" @@ -897,6 +909,16 @@ dependencies = [ "libc", ] +[[package]] +name = "hmac" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" +dependencies = [ + "crypto-mac", + "digest", +] + [[package]] name = "hostname" version = "0.3.1" @@ -2422,6 +2444,19 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer", + "cfg-if 1.0.0", + "cpufeatures", + "digest", + "opaque-debug", +] + [[package]] name = "sha1" version = "0.6.0" diff --git a/Cargo.toml b/Cargo.toml index 13a7af4..fc83d11 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,6 +79,9 @@ num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } thread_local = "1.1.3" +# used for TURN server authentication +hmac = "0.11.0" +sha-1 = "0.9.8" [features] default = ["conduit_bin", "backend_sqlite"] diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 83f39a4..9c3b20d 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,6 +1,11 @@ -use crate::{database::DatabaseGuard, ConduitResult}; +use crate::{database::DatabaseGuard, ConduitResult, Ruma}; +use hmac::{Hmac, Mac, NewMac}; use ruma::api::client::r0::voip::get_turn_server_info; -use std::time::Duration; +use ruma::SecondsSinceUnixEpoch; +use sha1::Sha1; +use std::time::{Duration, SystemTime}; + +type HmacSha1 = Hmac; #[cfg(feature = "conduit_bin")] use rocket::get; @@ -8,12 +13,44 @@ use rocket::get; /// # `GET /_matrix/client/r0/voip/turnServer` /// /// TODO: Returns information about the recommended turn server. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] -#[tracing::instrument(skip(db))] -pub async fn turn_server_route(db: DatabaseGuard) -> ConduitResult { +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/voip/turnServer", data = "") +)] +#[tracing::instrument(skip(body, db))] +pub async fn turn_server_route( + body: Ruma, + db: DatabaseGuard, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let turn_secret = db.globals.turn_secret(); + + let (username, password) = if turn_secret != "" { + let expiry = SecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()), + ) + .expect("time is valid"); + + let username: String = format!("{}:{}", expiry.get(), sender_user); + + let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes()) + .expect("HMAC can take key of any size"); + mac.update(username.as_bytes()); + + let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD); + + (username, password) + } else { + ( + db.globals.turn_username().clone(), + db.globals.turn_password().clone(), + ) + }; + Ok(get_turn_server_info::Response { - username: db.globals.turn_username().clone(), - password: db.globals.turn_password().clone(), + username: username, + password: password, uris: db.globals.turn_uris().to_vec(), ttl: Duration::from_secs(db.globals.turn_ttl()), } diff --git a/src/database.rs b/src/database.rs index 85213c0..080e24b 100644 --- a/src/database.rs +++ b/src/database.rs @@ -80,6 +80,8 @@ pub struct Config { turn_password: String, #[serde(default = "Vec::new")] turn_uris: Vec, + #[serde(default)] + turn_secret: String, #[serde(default = "default_turn_ttl")] turn_ttl: u64, diff --git a/src/database/globals.rs b/src/database/globals.rs index 7338f1e..05ecb56 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -242,6 +242,10 @@ impl Globals { &self.config.turn_username } + pub fn turn_secret(&self) -> &String { + &self.config.turn_secret + } + /// TODO: the key valid until timestamp is only honored in room version > 4 /// Remove the outdated keys and insert the new ones. /// From 2fff720df38c83673269fa597361c5631e991c9a Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 21 Nov 2021 17:34:08 +0000 Subject: [PATCH 0835/1727] CI: New Multiarch builds and Docker images + cargo clippy/test output now integrated into GitLab --- .dockerignore | 2 + .gitlab-ci.yml | 376 +++++++++++------------- Cargo.lock | 36 +-- Cargo.toml | 3 +- DEPLOY.md | 52 ++-- Dockerfile | 137 ++++----- docker/README.md | 105 +++---- docker/ci-binaries-packaging.Dockerfile | 48 +-- docker/healthcheck.sh | 6 +- 9 files changed, 356 insertions(+), 409 deletions(-) diff --git a/.dockerignore b/.dockerignore index 80b3072..933b380 100644 --- a/.dockerignore +++ b/.dockerignore @@ -14,6 +14,8 @@ docker-compose* # Git folder .git .gitea +.gitlab +.github # Dot files .env diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 386986f..6f2e0fe 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,7 +9,6 @@ variables: FF_USE_FASTZIP: 1 CACHE_COMPRESSION_LEVEL: fastest - # --------------------------------------------------------------------- # # Cargo: Compiling for different architectures # # --------------------------------------------------------------------- # @@ -20,7 +19,7 @@ variables: rules: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - - if: '$CI_COMMIT_TAG' + - if: "$CI_COMMIT_TAG" interruptible: true image: "rust:latest" tags: ["docker"] @@ -28,258 +27,209 @@ variables: paths: - cargohome - target/ - key: "build_cache-$TARGET-release" + key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--release" variables: - CARGO_PROFILE_RELEASE_LTO=true - CARGO_PROFILE_RELEASE_CODEGEN_UNITS=1 + CARGO_PROFILE_RELEASE_LTO: "true" + CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" before_script: - 'echo "Building for target $TARGET"' - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - 'apt-get update -yqq' - - 'echo "Installing packages: $NEEDED_PACKAGES"' - - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" + - "rustc --version && cargo --version && rustup show" # Print version info for debugging - "rustup target add $TARGET" script: - time cargo build --target $TARGET --release - - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' + - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' artifacts: expire_in: never - -build:release:cargo:x86_64-unknown-linux-gnu: +build:release:cargo:x86_64-unknown-linux-musl-with-debug: extends: .build-cargo-shared-settings + image: messense/rust-musl-cross:x86_64-musl variables: - TARGET: "x86_64-unknown-linux-gnu" + CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling + TARGET: "x86_64-unknown-linux-musl" + after_script: + - "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug" artifacts: - name: "conduit-x86_64-unknown-linux-gnu" + name: "conduit-x86_64-unknown-linux-musl-with-debug" paths: - - "conduit-x86_64-unknown-linux-gnu" - expose_as: "Conduit for x86_64-unknown-linux-gnu" - -build:release:cargo:armv7-unknown-linux-gnueabihf: - extends: .build-cargo-shared-settings - variables: - TARGET: "armv7-unknown-linux-gnueabihf" - NEEDED_PACKAGES: "build-essential gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross" - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc - CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc - CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++ - artifacts: - name: "conduit-armv7-unknown-linux-gnueabihf" - paths: - - "conduit-armv7-unknown-linux-gnueabihf" - expose_as: "Conduit for armv7-unknown-linux-gnueabihf" - -build:release:cargo:aarch64-unknown-linux-gnu: - extends: .build-cargo-shared-settings - variables: - TARGET: "aarch64-unknown-linux-gnu" - NEEDED_PACKAGES: "build-essential gcc-10-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross" - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc - CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc - CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++ - TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-10" - TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-10" - artifacts: - name: "conduit-aarch64-unknown-linux-gnu" - paths: - - "conduit-aarch64-unknown-linux-gnu" - expose_as: "Conduit for aarch64-unknown-linux-gnu" + - "conduit-x86_64-unknown-linux-musl-with-debug" + expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug" build:release:cargo:x86_64-unknown-linux-musl: extends: .build-cargo-shared-settings - image: "rust:alpine" + image: messense/rust-musl-cross:x86_64-musl variables: TARGET: "x86_64-unknown-linux-musl" - before_script: - - 'echo "Building for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - "rustup target add $TARGET" - - "apk add libc-dev" artifacts: name: "conduit-x86_64-unknown-linux-musl" paths: - "conduit-x86_64-unknown-linux-musl" expose_as: "Conduit for x86_64-unknown-linux-musl" +build:release:cargo:arm-unknown-linux-musleabihf: + extends: .build-cargo-shared-settings + image: messense/rust-musl-cross:arm-musleabihf + variables: + TARGET: "arm-unknown-linux-musleabihf" + artifacts: + name: "conduit-arm-unknown-linux-musleabihf" + paths: + - "conduit-arm-unknown-linux-musleabihf" + expose_as: "Conduit for arm-unknown-linux-musleabihf" +build:release:cargo:armv7-unknown-linux-musleabihf: + extends: .build-cargo-shared-settings + image: messense/rust-musl-cross:armv7-musleabihf + variables: + TARGET: "armv7-unknown-linux-musleabihf" + artifacts: + name: "conduit-armv7-unknown-linux-musleabihf" + paths: + - "conduit-armv7-unknown-linux-musleabihf" + expose_as: "Conduit for armv7-unknown-linux-musleabihf" + +build:release:cargo:aarch64-unknown-linux-musl: + extends: .build-cargo-shared-settings + image: messense/rust-musl-cross:aarch64-musl + variables: + TARGET: "aarch64-unknown-linux-musl" + artifacts: + name: "conduit-aarch64-unknown-linux-musl" + paths: + - "conduit-aarch64-unknown-linux-musl" + expose_as: "Conduit for aarch64-unknown-linux-musl" .cargo-debug-shared-settings: extends: ".build-cargo-shared-settings" rules: - - if: '$CI_COMMIT_BRANCH' - - if: '$CI_COMMIT_TAG' + - if: '$CI_COMMIT_BRANCH != "master"' cache: - key: "build_cache-$TARGET-debug" + key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: - "time cargo build --target $TARGET" - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' artifacts: expire_in: 4 weeks -build:debug:cargo:x86_64-unknown-linux-gnu: - extends: ".cargo-debug-shared-settings" - variables: - TARGET: "x86_64-unknown-linux-gnu" - artifacts: - name: "conduit-debug-x86_64-unknown-linux-gnu" - paths: - - "conduit-debug-x86_64-unknown-linux-gnu" - expose_as: "Conduit DEBUG for x86_64-unknown-linux-gnu" - build:debug:cargo:x86_64-unknown-linux-musl: extends: ".cargo-debug-shared-settings" - image: "rust:alpine" + image: messense/rust-musl-cross:x86_64-musl variables: TARGET: "x86_64-unknown-linux-musl" - before_script: - - 'echo "Building for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - "rustup target add $TARGET" - - "apk add libc-dev" artifacts: name: "conduit-debug-x86_64-unknown-linux-musl" paths: - "conduit-debug-x86_64-unknown-linux-musl" expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl" - - -# --------------------------------------------------------------------- # -# Cargo: Compiling deb packages for different architectures # -# --------------------------------------------------------------------- # - - -.build-cargo-deb-shared-settings: - stage: "build" - needs: [ ] - rules: - - if: '$CI_COMMIT_BRANCH == "master"' - - if: '$CI_COMMIT_BRANCH == "next"' - - if: '$CI_COMMIT_TAG' - interruptible: true - image: "rust:latest" - tags: ["docker"] - cache: - paths: - - cargohome - - target/ - key: "build_cache-deb-$TARGET" - before_script: - - 'echo "Building debian package for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - 'apt-get update -yqq' - - 'echo "Installing packages: $NEEDED_PACKAGES"' - - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" - - "rustup target add $TARGET" - - "cargo install cargo-deb" - script: - - time cargo deb --target $TARGET - - 'mv target/$TARGET/debian/*.deb "conduit-$TARGET.deb"' - -build:cargo-deb:x86_64-unknown-linux-gnu: - extends: .build-cargo-deb-shared-settings - variables: - TARGET: "x86_64-unknown-linux-gnu" - NEEDED_PACKAGES: "" - artifacts: - name: "conduit-x86_64-unknown-linux-gnu.deb" - paths: - - "conduit-x86_64-unknown-linux-gnu.deb" - expose_as: "Debian Package x86_64" - - # --------------------------------------------------------------------- # # Create and publish docker image # # --------------------------------------------------------------------- # -# Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image .docker-shared-settings: stage: "build docker image" - needs: [] - interruptible: true - image: - name: "gcr.io/kaniko-project/executor:debug" - entrypoint: [""] + image: jdrouet/docker-with-buildx:stable tags: ["docker"] + services: + - docker:dind + needs: + - "build:release:cargo:x86_64-unknown-linux-musl" + - "build:release:cargo:arm-unknown-linux-musleabihf" + - "build:release:cargo:armv7-unknown-linux-musleabihf" + - "build:release:cargo:aarch64-unknown-linux-musl" variables: - # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache - KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache" + DOCKER_HOST: tcp://docker:2375/ + DOCKER_TLS_CERTDIR: "" + DOCKER_DRIVER: overlay2 + PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64" + DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile" before_script: - - "mkdir -p /kaniko/.docker" - - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json' - + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + # Only log in to Dockerhub if the credentials are given: + - if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi + script: + # Prepare buildx to build multiarch stuff: + - docker context create 'ci-context' + - docker buildx create --name 'multiarch-builder' --use 'ci-context' + # Copy binaries to their docker arch path + - mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64 + - mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6 + - mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7 + - mkdir -p linux/arm64/ && mv ./conduit-aarch64-unknown-linux-musl linux/arm64/v8 + # Actually create multiarch image: + - > + docker buildx build + --pull + --push + --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') + --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) + --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" + --platform "$PLATFORMS" + --tag "$GL_IMAGE_TAG" + --tag "$GL_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA" + --file "$DOCKER_FILE" . + # Only try to push to docker hub, if auth data for dockerhub exists: + - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG"; fi + - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"; fi build:docker:next: extends: .docker-shared-settings - needs: - - "build:release:cargo:x86_64-unknown-linux-musl" - script: - - > - /kaniko/executor - $KANIKO_CACHE_ARGS - --force - --context $CI_PROJECT_DIR - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') - --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) - --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" - --destination "$CI_REGISTRY_IMAGE/conduit:next" - --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine" - --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" rules: - if: '$CI_COMMIT_BRANCH == "next"' - + variables: + GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next" + DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" build:docker:master: extends: .docker-shared-settings - needs: - - "build:release:cargo:x86_64-unknown-linux-musl" - script: - - > - /kaniko/executor - $KANIKO_CACHE_ARGS - --context $CI_PROJECT_DIR - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') - --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) - --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" - --destination "$CI_REGISTRY_IMAGE/conduit:latest" - --destination "$CI_REGISTRY_IMAGE/conduit:latest-alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest-alpine" rules: - if: '$CI_COMMIT_BRANCH == "master"' + variables: + GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest" + DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" - -build:docker:tags: - extends: .docker-shared-settings - needs: - - "build:release:cargo:x86_64-unknown-linux-musl" - script: - - > - /kaniko/executor - $KANIKO_CACHE_ARGS - --context $CI_PROJECT_DIR - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') - --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) - --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" - --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG" - --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG-alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG-alpine" - rules: - - if: '$CI_COMMIT_TAG' - - +## Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image +#.docker-shared-settings: +# stage: "build docker image" +# needs: [] +# interruptible: true +# image: +# name: "gcr.io/kaniko-project/executor:debug" +# entrypoint: [""] +# tags: ["docker"] +# variables: +# # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache +# KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache" +# before_script: +# - "mkdir -p /kaniko/.docker" +# - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json' +# +# +#build:docker:next: +# extends: .docker-shared-settings +# needs: +# - "build:release:cargo:x86_64-unknown-linux-musl" +# script: +# - > +# /kaniko/executor +# $KANIKO_CACHE_ARGS +# --force +# --context $CI_PROJECT_DIR +# --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') +# --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) +# --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" +# --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" +# --destination "$CI_REGISTRY_IMAGE/conduit:next" +# --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine" +# --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" +# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" +# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine" +# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" +# rules: +# - if: '$CI_COMMIT_BRANCH == "next"' +# +# # --------------------------------------------------------------------- # # Run tests # @@ -287,9 +237,9 @@ build:docker:tags: test:cargo: stage: "test" - needs: [ ] + needs: [] image: "rust:latest" - tags: [ "docker" ] + tags: ["docker"] variables: CARGO_HOME: "cargohome" cache: @@ -301,13 +251,20 @@ test:cargo: before_script: - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config + - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget - rustup component add clippy rustfmt + - wget "https://faulty-storage.de/gitlab-report" + - chmod +x ./gitlab-report script: - - rustc --version && cargo --version # Print version info for debugging + - rustc --version && cargo --version # Print version info for debugging - cargo fmt --all -- --check - - cargo test --workspace --verbose --locked - - cargo clippy + - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | ./gitlab-report -p test > $CI_PROJECT_DIR/report.xml" + - "cargo clippy --color always --verbose --message-format=json | ./gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + artifacts: + when: always + reports: + junit: report.xml + codequality: gl-code-quality-report.json test:sytest: stage: "test" @@ -316,8 +273,8 @@ test:sytest: - "build:debug:cargo:x86_64-unknown-linux-musl" image: name: "valkum/sytest-conduit:latest" - entrypoint: [ "" ] - tags: [ "docker" ] + entrypoint: [""] + tags: ["docker"] variables: PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" before_script: @@ -330,7 +287,7 @@ test:sytest: script: - "SYTEST_EXIT_CODE=0" - "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1" - - "perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml \"Sytest\" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap" + - 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap' - "exit $SYTEST_EXIT_CODE" artifacts: when: always @@ -340,7 +297,6 @@ test:sytest: reports: junit: "$CI_PROJECT_DIR/sytest.xml" - # --------------------------------------------------------------------- # # Store binaries as package so they have download urls # # --------------------------------------------------------------------- # @@ -348,25 +304,31 @@ test:sytest: publish:package: stage: "upload artifacts" needs: - - "build:release:cargo:x86_64-unknown-linux-gnu" - - "build:release:cargo:armv7-unknown-linux-gnueabihf" - - "build:release:cargo:aarch64-unknown-linux-gnu" - "build:release:cargo:x86_64-unknown-linux-musl" - - "build:cargo-deb:x86_64-unknown-linux-gnu" + - "build:release:cargo:arm-unknown-linux-musleabihf" + - "build:release:cargo:armv7-unknown-linux-musleabihf" + - "build:release:cargo:aarch64-unknown-linux-musl" + # - "build:cargo-deb:x86_64-unknown-linux-gnu" rules: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - - if: '$CI_COMMIT_TAG' + - if: "$CI_COMMIT_TAG" image: curlimages/curl:latest tags: ["docker"] variables: GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts script: - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${BASE_URL}/conduit-armv7-unknown-linux-gnueabihf"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' - + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"' +# Avoid duplicate pipelines +# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines +workflow: + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' + - if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS" + when: never + - if: "$CI_COMMIT_BRANCH" diff --git a/Cargo.lock b/Cargo.lock index 293bcff..166d67f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1968,7 +1968,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "assign", "js_int", @@ -1989,7 +1989,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "bytes", "http", @@ -2005,7 +2005,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2016,7 +2016,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "ruma-api", "ruma-common", @@ -2030,7 +2030,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "assign", "bytes", @@ -2050,7 +2050,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "indexmap", "js_int", @@ -2065,7 +2065,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "indoc", "js_int", @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2092,7 +2092,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "js_int", "ruma-api", @@ -2107,7 +2107,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "paste", "percent-encoding", @@ -2122,7 +2122,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2132,7 +2132,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "thiserror", ] @@ -2140,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "js_int", "ruma-api", @@ -2153,7 +2153,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "js_int", "ruma-api", @@ -2168,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "bytes", "form_urlencoded", @@ -2182,7 +2182,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2193,7 +2193,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2210,7 +2210,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 13a7af4..d0dd641 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,13 +120,12 @@ maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } [profile.dev] -lto = 'thin' +lto = 'off' incremental = true [profile.release] lto = 'thin' incremental = true - codegen-units=32 # If you want to make flamegraphs, enable debug info: # debug = true diff --git a/DEPLOY.md b/DEPLOY.md index 84dd2be..6470c90 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -2,25 +2,30 @@ ## Getting help -If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us +in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Installing Conduit +Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore +only offer Linux binaries. + You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -| CPU Architecture | GNU (Ubuntu, Debian, ArchLinux, ...) | MUSL (Alpine, ... ) | -| -------------------- | ------------------------------------- | ----------------------- | -| x84_64 / amd64 | [Download][x84_64-gnu] | [Download][x84_64-musl] | -| armv7 (Raspberry Pi) | [Download][armv7-gnu] | - | -| armv8 / aarch64 | [Download][armv8-gnu] | - | - -[x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:release:cargo:x86_64-unknown-linux-gnu +| CPU Architecture | Download link | +| ------------------------------------------- | ----------------------- | +| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl] | +| armv6 | [Download][armv6-musl] | +| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl] | +| armv8 / aarch64 | [Download][armv8-musl] | [x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl -[armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:release:cargo:armv7-unknown-linux-gnueabihf +[armv6-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf -[armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:release:cargo:aarch64-unknown-linux-gnu +[armv7-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf + +[armv8-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl ```bash $ sudo wget -O /usr/local/bin/matrix-conduit @@ -32,15 +37,15 @@ Alternatively, you may compile the binary yourself using ```bash $ cargo build --release ``` + Note that this currently requires Rust 1.50. If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). - ## Adding a Conduit user -While Conduit can run as any user it is usually better to use dedicated users for different services. -This also allows you to make sure that the file permissions are correctly set up. +While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows +you to make sure that the file permissions are correctly set up. In Debian you can use this command to create a Conduit user: @@ -50,9 +55,8 @@ sudo adduser --system conduit --no-create-home ## Setting up a systemd service -Now we'll set up a systemd service for Conduit, so it's easy to start/stop -Conduit and set it to autostart when your server reboots. Simply paste the -default systemd service you can find below into +Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your +server reboots. Simply paste the default systemd service you can find below into `/etc/systemd/system/conduit.service`. ```systemd @@ -77,10 +81,10 @@ Finally, run $ sudo systemctl daemon-reload ``` - ## Creating the Conduit configuration file -Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment to read it. You need to change at least the server name.** +Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment +to read it. You need to change at least the server name.** ```toml [global] @@ -128,8 +132,8 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re ## Setting the correct file permissions -As we are using a Conduit specific user we need to allow it to read the config. -To do that you can run this command on Debian: +As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on +Debian: ```bash sudo chown -R conduit:nogroup /etc/matrix-conduit @@ -142,7 +146,6 @@ sudo mkdir -p /var/lib/matrix-conduit/conduit_db sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db ``` - ## Setting up the Reverse Proxy This depends on whether you use Apache, Nginx or another web server. @@ -171,11 +174,9 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ $ sudo systemctl reload apache2 ``` - ### Nginx -If you use Nginx and not Apache, add the following server section inside the -http section of `/etc/nginx/nginx.conf` +If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf` ```nginx server { @@ -198,13 +199,13 @@ server { include /etc/letsencrypt/options-ssl-nginx.conf; } ``` + **You need to make some edits again.** When you are done, run ```bash $ sudo systemctl reload nginx ``` - ## SSL Certificate The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: @@ -213,7 +214,6 @@ The easiest way to get an SSL certificate, if you don't have one already, is to $ sudo certbot -d your.server.name ``` - ## You're done! Now you can start Conduit with: diff --git a/Dockerfile b/Dockerfile index f4b176f..d137353 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,75 +1,66 @@ -# Using multistage build: -# https://docs.docker.com/develop/develop-images/multistage-build/ -# https://whitfin.io/speeding-up-rust-docker-builds/ +# syntax=docker/dockerfile:1 +FROM docker.io/rust:1.53-alpine AS builder +WORKDIR /usr/src/conduit + +# Install required packages to build Conduit and it's dependencies +RUN apk add musl-dev + +# == Build dependencies without our own code separately for caching == +# +# Need a fake main.rs since Cargo refuses to build anything otherwise. +# +# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature +# request that would allow just dependencies to be compiled, presumably +# regardless of whether source files are available. +RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs +COPY Cargo.toml Cargo.lock ./ +RUN cargo build --release && rm -r src + +# Copy over actual Conduit sources +COPY src src + +# main.rs and lib.rs need their timestamp updated for this to work correctly since +# otherwise the build with the fake main.rs from above is newer than the +# source files (COPY preserves timestamps). +# +# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit +RUN touch src/main.rs && touch src/lib.rs && cargo build --release -########################## BUILD IMAGE ########################## -# Alpine build image to build Conduit's statically compiled binary -FROM alpine:3.14 as builder -# Install packages needed for building all crates -RUN apk add --no-cache \ - cargo \ - openssl-dev -# Specifies if the local project is build or if Conduit gets build -# from the official git repository. Defaults to the git repo. -ARG LOCAL=false -# Specifies which revision/commit is build. Defaults to HEAD -ARG GIT_REF=origin/master +# --------------------------------------------------------------------------------------------------------------- +# Stuff below this line actually ends up in the resulting docker image +# --------------------------------------------------------------------------------------------------------------- +FROM docker.io/alpine:3.14 AS runner -# Copy project files from current folder -COPY . . -# Build it from the copied local files or from the official git repository -RUN if [[ $LOCAL == "true" ]]; then \ - mv ./docker/healthcheck.sh . ; \ - echo "Building from local source..." ; \ - cargo install --path . ; \ - else \ - echo "Building revision '${GIT_REF}' from online source..." ; \ - cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF} ; \ - echo "Loadings healthcheck script from online source..." ; \ - wget "https://gitlab.com/famedly/conduit/-/raw/${GIT_REF#origin/}/docker/healthcheck.sh" ; \ - fi - -########################## RUNTIME IMAGE ########################## -# Create new stage with a minimal image for the actual -# runtime image/container -FROM alpine:3.14 - -ARG CREATED -ARG VERSION -ARG GIT_REF=origin/master - -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" - -# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md -# including a custom label specifying the build command -LABEL org.opencontainers.image.created=${CREATED} \ - org.opencontainers.image.authors="Conduit Contributors" \ - org.opencontainers.image.title="Conduit" \ - org.opencontainers.image.version=${VERSION} \ - org.opencontainers.image.vendor="Conduit Contributors" \ - org.opencontainers.image.description="A Matrix homeserver written in Rust" \ - org.opencontainers.image.url="https://conduit.rs/" \ - org.opencontainers.image.revision=${GIT_REF} \ - org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ - org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.documentation="" \ - org.opencontainers.image.ref.name="" \ - org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \ - maintainer="Weasy666" - -# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose. +# Standard port on which Conduit launches. +# You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Copy config files from context and the binary from -# the "builder" stage to the current stage into folder -# /srv/conduit and create data folder for database -RUN mkdir -p /srv/conduit/.local/share/conduit -COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/ -COPY --from=builder ./healthcheck.sh /srv/conduit/ +# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" +# Conduit needs: +# ca-certificates: for https +# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. +RUN apk add --no-cache \ + ca-certificates \ + curl \ + libgcc + + +# Created directory for the database and media files +RUN mkdir -p /srv/conduit/.local/share/conduit + +# Test if Conduit is still alive, uses the same endpoint as Element +COPY ./docker/healthcheck.sh /srv/conduit/ +HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh + +# Copy over the actual Conduit binary from the builder stage +COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/ + +# Improve security: Don't run stuff as root, that does not need to run as root: # Add www-data user and group with UID 82, as used by alpine # https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install RUN set -x ; \ @@ -79,19 +70,13 @@ RUN set -x ; \ # Change ownership of Conduit files to www-data user and group RUN chown -cR www-data:www-data /srv/conduit +RUN chmod +x /srv/conduit/healthcheck.sh -# Install packages needed to run Conduit -RUN apk add --no-cache \ - ca-certificates \ - curl \ - libgcc - -# Test if Conduit is still alive, uses the same endpoint as Element -HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh - -# Set user to www-data +# Change user to www-data USER www-data # Set container home directory WORKDIR /srv/conduit -# Run Conduit -ENTRYPOINT [ "/srv/conduit/conduit" ] + +# Run Conduit and print backtraces on panics +ENV RUST_BACKTRACE=1 +ENTRYPOINT [ "/srv/conduit/conduit" ] \ No newline at end of file diff --git a/docker/README.md b/docker/README.md index 0e83482..19d9dca 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,53 +2,41 @@ > **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate. - ## Docker ### Build & Dockerfile The Dockerfile provided by Conduit has two stages, each of which creates an image. + 1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository. -2. **Runtime:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. - -The Dockerfile includes a few build arguments that should be supplied when building it. - -``` Dockerfile -ARG LOCAL=false -ARG CREATED -ARG VERSION -ARG GIT_REF=origin/master -``` - -- **CREATED:** Date and time as string (date-time as defined by RFC 3339). Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.created`. Supply by it like this `$(date -u +'%Y-%m-%dT%H:%M:%SZ')` -- **VERSION:** The SemVer version of Conduit, which is in the image. Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.version`. If you have a `Cargo.toml` in your build context, you can get it with `$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)` -- **LOCAL:** *(Optional)* A boolean value, specifies if the local build context should be used, or if the official repository will be cloned. If not supplied with the build command, it will default to `false`. -- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `origin/master`. +2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. To build the image you can use the following command -``` bash -docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) +```bash +docker build --tag matrixconduit/matrix-conduit:latest . ``` which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`. -**Note:** it ommits the two optional `build-arg`s. - ### Run After building the image you can simply run it with -``` bash +```bash docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest ``` or you can skip the build step and pull the image from one of the following registries: -| Registry | Image | Size | -| --------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | -| Docker Hub | [matrixconduit/matrix-conduit:latest](https://hub.docker.com/r/matrixconduit/matrix-conduit) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | -| GitLab Registry | [registry.gitlab.com/famedly/conduit/conduit:latest](https://gitlab.com/famedly/conduit/container_registry/2134341) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | +| Registry | Image | Size | +| --------------- | --------------------------------------------------------------- | --------------------- | +| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] | +| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] | + +[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit +[gl]: https://gitlab.com/famedly/conduit/container_registry/ +[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need @@ -56,29 +44,26 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. - ## Docker-compose If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying Conduit can be found [here](../DEPLOY.md). - ### Build To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with: -``` bash -CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up +```bash +docker-compose up ``` -This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. For possible `build-args`, please take a look at the above `Build & Dockerfile` section. - +This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. ### Run If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with: -``` bash +```bash docker-compose up -d ``` @@ -101,32 +86,36 @@ So...step by step: 3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. 4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. 5. Create the files needed by the `well-known` service. - - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```nginx - server { - server_name .; - listen 80 default_server; - location /.well-known/matrix/ { - root /var/www; - default_type application/json; - add_header Access-Control-Allow-Origin *; - } - } - ``` - - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.homeserver": { - "base_url": "https://." - } - } - ``` - - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.server": ".:443" - } - ``` + - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping) + + ```nginx + server { + server_name .; + listen 80 default_server; + + location /.well-known/matrix/ { + root /var/www; + default_type application/json; + add_header Access-Control-Allow-Origin *; + } + } + ``` + + - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping) + ```json + { + "m.homeserver": { + "base_url": "https://." + } + } + ``` + - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping) + ```json + { + "m.server": ".:443" + } + ``` + 6. Run `docker-compose up -d` 7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin. diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index fb67439..b51df7c 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # --------------------------------------------------------------------------------------------------------- # This Dockerfile is intended to be built as part of Conduit's CI pipeline. # It does not build Conduit in Docker, but just copies the matching build artifact from the build job. @@ -7,20 +8,26 @@ # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM alpine:3.14 +FROM docker.io/alpine:3.14 AS runner -# Install packages needed to run Conduit +# Standard port on which Conduit launches. +# You still need to map the port when using the docker command or docker-compose. +EXPOSE 6167 + +# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" + +# Conduit needs: +# ca-certificates: for https +# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. RUN apk add --no-cache \ ca-certificates \ - curl \ libgcc + ARG CREATED ARG VERSION ARG GIT_REF - -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" - # Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # including a custom label specifying the build command LABEL org.opencontainers.image.created=${CREATED} \ @@ -33,19 +40,24 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.revision=${GIT_REF} \ org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.documentation="" \ + org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ org.opencontainers.image.ref.name="" -# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose. -EXPOSE 6167 - -# create data folder for database +# Created directory for the database and media files RUN mkdir -p /srv/conduit/.local/share/conduit -# Copy the Conduit binary into the image at the latest possible moment to maximise caching: -COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit +# Test if Conduit is still alive, uses the same endpoint as Element COPY ./docker/healthcheck.sh /srv/conduit/ +HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh + +# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64") +# copy the matching binary into this docker image +ARG TARGETPLATFORM +COPY ./$TARGETPLATFORM /srv/conduit/conduit + + +# Improve security: Don't run stuff as root, that does not need to run as root: # Add www-data user and group with UID 82, as used by alpine # https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install RUN set -x ; \ @@ -57,13 +69,11 @@ RUN set -x ; \ RUN chown -cR www-data:www-data /srv/conduit RUN chmod +x /srv/conduit/healthcheck.sh - -# Test if Conduit is still alive, uses the same endpoint as Element -HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh - -# Set user to www-data +# Change user to www-data USER www-data # Set container home directory WORKDIR /srv/conduit -# Run Conduit + +# Run Conduit and print backtraces on panics +ENV RUST_BACKTRACE=1 ENTRYPOINT [ "/srv/conduit/conduit" ] diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index 568838e..7ca0460 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -7,7 +7,7 @@ fi # The actual health check. # We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. -# TODO: Change this to a single curl call. Do we have a config value that we can check for that? -curl --fail -s "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ - curl -k --fail -s "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ +# TODO: Change this to a single wget call. Do we have a config value that we can check for that? +wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ + wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ exit 1 From 9bfc7b34b6d72def7da19ccd1decbe1ac2c7e6db Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 25 Nov 2021 22:36:44 +0000 Subject: [PATCH 0836/1727] Fixes for !225 --- .gitlab-ci.yml | 87 +++++++++---------------- DEPLOY.md | 23 +++---- Dockerfile | 4 +- docker/ci-binaries-packaging.Dockerfile | 2 +- 4 files changed, 44 insertions(+), 72 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6f2e0fe..a8d4384 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -142,8 +142,12 @@ build:debug:cargo:x86_64-unknown-linux-musl: DOCKER_HOST: tcp://docker:2375/ DOCKER_TLS_CERTDIR: "" DOCKER_DRIVER: overlay2 - PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64" + PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64" DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile" + cache: + paths: + - docker_cache + key: "$CI_JOB_NAME" before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY # Only log in to Dockerhub if the credentials are given: @@ -156,80 +160,51 @@ build:debug:cargo:x86_64-unknown-linux-musl: - mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64 - mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6 - mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7 - - mkdir -p linux/arm64/ && mv ./conduit-aarch64-unknown-linux-musl linux/arm64/v8 - # Actually create multiarch image: + - mv ./conduit-aarch64-unknown-linux-musl linux/arm64 + - 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"' + # Build and push image: - > docker buildx build --pull --push - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') + --cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache + --cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache + --build-arg CREATED=$CREATED --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" --platform "$PLATFORMS" - --tag "$GL_IMAGE_TAG" - --tag "$GL_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA" + --tag "$TAG" + --tag "$TAG-alpine" + --tag "$TAG-commit-$CI_COMMIT_SHORT_SHA" --file "$DOCKER_FILE" . - # Only try to push to docker hub, if auth data for dockerhub exists: - - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG"; fi - - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"; fi -build:docker:next: +docker:next:gitlab: extends: .docker-shared-settings rules: - if: '$CI_COMMIT_BRANCH == "next"' variables: - GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next" - DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" + TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next" -build:docker:master: +docker:next:dockerhub: + extends: .docker-shared-settings + rules: + - if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB' + variables: + TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" + +docker:master:gitlab: extends: .docker-shared-settings rules: - if: '$CI_COMMIT_BRANCH == "master"' variables: - GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest" - DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" + TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest" -## Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image -#.docker-shared-settings: -# stage: "build docker image" -# needs: [] -# interruptible: true -# image: -# name: "gcr.io/kaniko-project/executor:debug" -# entrypoint: [""] -# tags: ["docker"] -# variables: -# # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache -# KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache" -# before_script: -# - "mkdir -p /kaniko/.docker" -# - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json' -# -# -#build:docker:next: -# extends: .docker-shared-settings -# needs: -# - "build:release:cargo:x86_64-unknown-linux-musl" -# script: -# - > -# /kaniko/executor -# $KANIKO_CACHE_ARGS -# --force -# --context $CI_PROJECT_DIR -# --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') -# --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) -# --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" -# --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" -# --destination "$CI_REGISTRY_IMAGE/conduit:next" -# --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine" -# --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" -# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" -# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine" -# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" -# rules: -# - if: '$CI_COMMIT_BRANCH == "next"' -# -# +docker:master:dockerhub: + extends: .docker-shared-settings + rules: + - if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB' + variables: + TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" # --------------------------------------------------------------------- # # Run tests # diff --git a/DEPLOY.md b/DEPLOY.md index 6470c90..0058b93 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -12,20 +12,17 @@ only offer Linux binaries. You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -| CPU Architecture | Download link | -| ------------------------------------------- | ----------------------- | -| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl] | -| armv6 | [Download][armv6-musl] | -| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl] | -| armv8 / aarch64 | [Download][armv8-musl] | +| CPU Architecture | Download stable version | +| ------------------------------------------- | ------------------------------ | +| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | +| armv6 | [Download][armv6-musl-master] | +| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | +| armv8 / aarch64 | [Download][armv8-musl-master] | -[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl - -[armv6-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf - -[armv7-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf - -[armv8-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl +[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl +[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf +[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf +[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl ```bash $ sudo wget -O /usr/local/bin/matrix-conduit diff --git a/Dockerfile b/Dockerfile index d137353..6a9ea73 100644 --- a/Dockerfile +++ b/Dockerfile @@ -54,11 +54,11 @@ RUN apk add --no-cache \ RUN mkdir -p /srv/conduit/.local/share/conduit # Test if Conduit is still alive, uses the same endpoint as Element -COPY ./docker/healthcheck.sh /srv/conduit/ +COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh # Copy over the actual Conduit binary from the builder stage -COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/ +COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit # Improve security: Don't run stuff as root, that does not need to run as root: # Add www-data user and group with UID 82, as used by alpine diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index b51df7c..4ab874d 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -47,7 +47,7 @@ LABEL org.opencontainers.image.created=${CREATED} \ RUN mkdir -p /srv/conduit/.local/share/conduit # Test if Conduit is still alive, uses the same endpoint as Element -COPY ./docker/healthcheck.sh /srv/conduit/ +COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh From f91216dd3ce5f842c1c441d0bae5a852e689bccf Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 14 Dec 2021 11:16:02 +0100 Subject: [PATCH 0837/1727] CI: Optionally use sccache for compilation This moves compiler caching for incremental builds away from GitLab caching the whole target/ folder to caching each code unit in S3. This aleviates the need to zip and unzip and just caches on the fly. This feature is optional and gated behind the SCCACHE_BIN_URL env --- .gitlab-ci.yml | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a8d4384..664b5ea 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -26,16 +26,19 @@ variables: cache: paths: - cargohome - - target/ - key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--release" + key: "build_cache--$TARGET--$CI_COMMIT_BRANCH" variables: CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" + CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow + CARGO_HOME: $CI_PROJECT_DIR/cargohome before_script: - 'echo "Building for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' + - "mkdir -p $CARGO_HOME" - "rustc --version && cargo --version && rustup show" # Print version info for debugging - "rustup target add $TARGET" + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi script: - time cargo build --target $TARGET --release - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' @@ -216,20 +219,20 @@ test:cargo: image: "rust:latest" tags: ["docker"] variables: - CARGO_HOME: "cargohome" + CARGO_HOME: "$CI_PROJECT_DIR/cargohome" + CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow cache: paths: - - target - cargohome - key: test_cache + key: "test_cache--$CI_COMMIT_BRANCH" interruptible: true before_script: - - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" + - mkdir -p $CARGO_HOME - apt-get update -yqq - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget - rustup component add clippy rustfmt - - wget "https://faulty-storage.de/gitlab-report" - - chmod +x ./gitlab-report + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - cargo fmt --all -- --check From adb518fa0df35ba85c2ff1c96a539dda085f8991 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 14 Dec 2021 11:16:40 +0100 Subject: [PATCH 0838/1727] CI: Use curl instead of wget The rust docker image already comes with curl, no need to install wget. --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 664b5ea..1dedd8f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -229,8 +229,9 @@ test:cargo: before_script: - mkdir -p $CARGO_HOME - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget + - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - rustup component add clippy rustfmt + - curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi script: From 339a26f56c84da242d753a1894589f5923b0fd7e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 15 Dec 2021 10:14:20 +0000 Subject: [PATCH 0839/1727] Update docker images --- Dockerfile | 7 +++--- docker/ci-binaries-packaging.Dockerfile | 31 ++++++++++++------------- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6a9ea73..5812fdf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,7 +32,7 @@ RUN touch src/main.rs && touch src/lib.rs && cargo build --release # --------------------------------------------------------------------------------------------------------------- # Stuff below this line actually ends up in the resulting docker image # --------------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.14 AS runner +FROM docker.io/alpine:3.15.0 AS runner # Standard port on which Conduit launches. # You still need to map the port when using the docker command or docker-compose. @@ -45,9 +45,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # ca-certificates: for https # libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. RUN apk add --no-cache \ - ca-certificates \ - curl \ - libgcc + ca-certificates \ + libgcc # Created directory for the database and media files diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 4ab874d..f460310 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -1,14 +1,13 @@ # syntax=docker/dockerfile:1 # --------------------------------------------------------------------------------------------------------- # This Dockerfile is intended to be built as part of Conduit's CI pipeline. -# It does not build Conduit in Docker, but just copies the matching build artifact from the build job. -# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary. +# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs. # # It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching. # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.14 AS runner +FROM docker.io/alpine:3.15.0 AS runner # Standard port on which Conduit launches. # You still need to map the port when using the docker command or docker-compose. @@ -21,8 +20,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # ca-certificates: for https # libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. RUN apk add --no-cache \ - ca-certificates \ - libgcc + ca-certificates \ + libgcc ARG CREATED @@ -31,17 +30,17 @@ ARG GIT_REF # Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # including a custom label specifying the build command LABEL org.opencontainers.image.created=${CREATED} \ - org.opencontainers.image.authors="Conduit Contributors" \ - org.opencontainers.image.title="Conduit" \ - org.opencontainers.image.version=${VERSION} \ - org.opencontainers.image.vendor="Conduit Contributors" \ - org.opencontainers.image.description="A Matrix homeserver written in Rust" \ - org.opencontainers.image.url="https://conduit.rs/" \ - org.opencontainers.image.revision=${GIT_REF} \ - org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ - org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ - org.opencontainers.image.ref.name="" + org.opencontainers.image.authors="Conduit Contributors" \ + org.opencontainers.image.title="Conduit" \ + org.opencontainers.image.version=${VERSION} \ + org.opencontainers.image.vendor="Conduit Contributors" \ + org.opencontainers.image.description="A Matrix homeserver written in Rust" \ + org.opencontainers.image.url="https://conduit.rs/" \ + org.opencontainers.image.revision=${GIT_REF} \ + org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ + org.opencontainers.image.licenses="Apache-2.0" \ + org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ + org.opencontainers.image.ref.name="" # Created directory for the database and media files RUN mkdir -p /srv/conduit/.local/share/conduit From 1fc616320a2aa8ab02edbfca7620773f69abf797 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 26 Nov 2021 19:28:47 +0100 Subject: [PATCH 0840/1727] Use struct init shorthand --- src/client_server/voip.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 9c3b20d..c9a98d9 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -49,8 +49,8 @@ pub async fn turn_server_route( }; Ok(get_turn_server_info::Response { - username: username, - password: password, + username, + password, uris: db.globals.turn_uris().to_vec(), ttl: Duration::from_secs(db.globals.turn_ttl()), } From 892a0525f20a0a815e7d12f45a7c5a623de7844d Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 26 Nov 2021 20:36:40 +0100 Subject: [PATCH 0841/1727] Upgrade Ruma --- Cargo.lock | 43 ++++------ Cargo.toml | 2 +- src/client_server/account.rs | 15 ++-- src/client_server/capabilities.rs | 6 +- src/client_server/directory.rs | 2 +- src/client_server/keys.rs | 22 ++--- src/client_server/membership.rs | 41 ++++----- src/client_server/message.rs | 2 +- src/client_server/report.rs | 6 +- src/client_server/room.rs | 26 +++--- src/client_server/state.rs | 4 +- src/client_server/sync.rs | 27 +++--- src/client_server/voip.rs | 2 +- src/database.rs | 11 +-- src/database/admin.rs | 17 ++-- src/database/globals.rs | 14 +-- src/database/key_backups.rs | 6 +- src/database/pusher.rs | 4 +- src/database/rooms.rs | 138 +++++++++++++++--------------- src/database/rooms/edus.rs | 21 +++-- src/database/sending.rs | 4 +- src/database/users.rs | 35 ++++---- src/pdu.rs | 28 +++--- src/ruma_wrapper.rs | 6 +- src/server_server.rs | 119 ++++++++++++-------------- 25 files changed, 297 insertions(+), 304 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9682f2f..8b25b47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1516,12 +1516,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "paste" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" - [[package]] name = "pear" version = "0.2.3" @@ -1990,7 +1984,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "assign", "js_int", @@ -2011,7 +2005,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "bytes", "http", @@ -2027,7 +2021,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2038,7 +2032,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "ruma-api", "ruma-common", @@ -2052,7 +2046,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "assign", "bytes", @@ -2072,7 +2066,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "indexmap", "js_int", @@ -2087,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "indoc", "js_int", @@ -2103,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2114,7 +2108,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "js_int", "ruma-api", @@ -2129,9 +2123,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ - "paste", "percent-encoding", "rand 0.8.4", "ruma-identifiers-macros", @@ -2144,7 +2137,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2154,7 +2147,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "thiserror", ] @@ -2162,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "js_int", "ruma-api", @@ -2175,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "js_int", "ruma-api", @@ -2190,7 +2183,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "bytes", "form_urlencoded", @@ -2204,7 +2197,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2215,7 +2208,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2232,7 +2225,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 91c7e25..b24afb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "e7f01ca55a1eff437bad754bf0554cc09f44ec2a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "bba7d624425da2c65a834bbd0e633b7577488cdf", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 4b3ad0d..d7c2f63 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -11,10 +11,9 @@ use ruma::{ error::ErrorKind, r0::{ account::{ - change_password, deactivate, get_username_availability, register, whoami, - ThirdPartyIdRemovalStatus, + change_password, deactivate, get_3pids, get_username_availability, register, + whoami, ThirdPartyIdRemovalStatus, }, - contact::get_contacts, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, }, @@ -282,7 +281,7 @@ pub async fn register_route( let mut content = RoomCreateEventContent::new(conduit_user.clone()); content.federate = true; content.predecessor = None; - content.room_version = RoomVersionId::Version6; + content.room_version = RoomVersionId::V6; // 1. The room create event db.rooms.build_and_append_pdu( @@ -433,7 +432,7 @@ pub async fn register_route( )?; // Room alias - let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name()) + let alias: Box = format!("#admins:{}", db.globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); @@ -757,9 +756,9 @@ pub async fn deactivate_route( get("/_matrix/client/r0/account/3pid", data = "") )] pub async fn third_party_route( - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> ConduitResult { let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_contacts::Response::new(Vec::new()).into()) + Ok(get_3pids::Response::new(Vec::new()).into()) } diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index f86b23b..c69b7cb 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -22,12 +22,12 @@ pub async fn get_capabilities_route( _body: Ruma, ) -> ConduitResult { let mut available = BTreeMap::new(); - available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); - available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); + available.insert(RoomVersionId::V5, RoomVersionStability::Stable); + available.insert(RoomVersionId::V6, RoomVersionStability::Stable); let mut capabilities = Capabilities::new(); capabilities.room_versions = RoomVersionsCapability { - default: RoomVersionId::Version6, + default: RoomVersionId::V6, available, }; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 490f752..5a1bc49 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -167,7 +167,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, - since: since.as_deref(), + since, filter: Filter { generic_search_term: filter.generic_search_term.as_deref(), }, diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index a44f5e9..08ea6e7 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -316,7 +316,7 @@ pub async fn get_key_changes_route( pub(crate) async fn get_keys_helper bool>( sender_user: Option<&UserId>, - device_keys_input: &BTreeMap>>, + device_keys_input: &BTreeMap, Vec>>, allowed_signatures: F, db: &Database, ) -> Result { @@ -328,6 +328,8 @@ pub(crate) async fn get_keys_helper bool>( let mut get_over_federation = HashMap::new(); for (user_id, device_ids) in device_keys_input { + let user_id: &UserId = &**user_id; + if user_id.server_name() != db.globals.server_name() { get_over_federation .entry(user_id.server_name()) @@ -355,11 +357,11 @@ pub(crate) async fn get_keys_helper bool>( container.insert(device_id, keys); } } - device_keys.insert(user_id.clone(), container); + device_keys.insert(user_id.to_owned(), container); } else { for device_id in device_ids { let mut container = BTreeMap::new(); - if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), device_id)? { + if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? { let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or( Error::BadRequest( ErrorKind::InvalidParam, @@ -371,24 +373,24 @@ pub(crate) async fn get_keys_helper bool>( device_display_name: metadata.display_name, }; - container.insert(device_id.clone(), keys); + container.insert(device_id.to_owned(), keys); } - device_keys.insert(user_id.clone(), container); + device_keys.insert(user_id.to_owned(), container); } } if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? { - master_keys.insert(user_id.clone(), master_key); + master_keys.insert(user_id.to_owned(), master_key); } if let Some(self_signing_key) = db .users .get_self_signing_key(user_id, &allowed_signatures)? { - self_signing_keys.insert(user_id.clone(), self_signing_key); + self_signing_keys.insert(user_id.to_owned(), self_signing_key); } if Some(user_id) == sender_user { if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? { - user_signing_keys.insert(user_id.clone(), user_signing_key); + user_signing_keys.insert(user_id.to_owned(), user_signing_key); } } } @@ -400,7 +402,7 @@ pub(crate) async fn get_keys_helper bool>( .map(|(server, vec)| async move { let mut device_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { - device_keys_input_fed.insert(user_id.clone(), keys.clone()); + device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); } ( server, @@ -440,7 +442,7 @@ pub(crate) async fn get_keys_helper bool>( } pub(crate) async fn claim_keys_helper( - one_time_keys_input: &BTreeMap, DeviceKeyAlgorithm>>, + one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, db: &Database, ) -> Result { let mut one_time_keys = BTreeMap::new(); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ec685ec..f65287d 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -64,7 +64,7 @@ pub async fn join_room_by_id_route( .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::try_from(sender).ok()) + .filter_map(|sender| Box::::try_from(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -72,7 +72,7 @@ pub async fn join_room_by_id_route( let ret = join_room_by_id_helper( &db, - body.sender_user.as_ref(), + body.sender_user.as_deref(), &body.room_id, &servers, body.third_party_signed.as_ref(), @@ -101,7 +101,7 @@ pub async fn join_room_by_id_or_alias_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) { + let (servers, room_id) = match Box::::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => { let mut servers: HashSet<_> = db .rooms @@ -111,7 +111,7 @@ pub async fn join_room_by_id_or_alias_route( .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::try_from(sender).ok()) + .filter_map(|sender| Box::::try_from(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -127,7 +127,7 @@ pub async fn join_room_by_id_or_alias_route( let join_room_response = join_room_by_id_helper( &db, - body.sender_user.as_ref(), + body.sender_user.as_deref(), &room_id, &servers, body.third_party_signed.as_ref(), @@ -531,7 +531,7 @@ async fn join_room_by_id_helper( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -551,7 +551,7 @@ async fn join_room_by_id_helper( federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, - ver: &[RoomVersionId::Version5, RoomVersionId::Version6], + ver: &[RoomVersionId::V5, RoomVersionId::V6], }, ) .await; @@ -567,8 +567,7 @@ async fn join_room_by_id_helper( let room_version = match make_join_response.room_version { Some(room_version) - if room_version == RoomVersionId::Version5 - || room_version == RoomVersionId::Version6 => + if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 => { room_version } @@ -620,7 +619,7 @@ async fn join_room_by_id_helper( .expect("event is valid, we just created it"); // Generate event id - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&join_event_stub, &room_version) .expect("ruma can calculate reference hashes") @@ -776,7 +775,7 @@ async fn join_room_by_id_helper( db.flush()?; - Ok(join_room_by_id::Response::new(room_id.clone()).into()) + Ok(join_room_by_id::Response::new(room_id.to_owned()).into()) } fn validate_and_add_event_id( @@ -784,12 +783,12 @@ fn validate_and_add_event_id( room_version: &RoomVersionId, pub_key_map: &RwLock>>, db: &Database, -) -> Result<(EventId, CanonicalJsonObject)> { +) -> Result<(Box, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") @@ -856,7 +855,7 @@ pub(crate) async fn invite_helper<'a>( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -892,9 +891,7 @@ pub(crate) async fn invite_helper<'a>( // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content - .map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); + .map_or(RoomVersionId::V6, |create_event| create_event.room_version); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); @@ -939,9 +936,9 @@ pub(crate) async fn invite_helper<'a>( } let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender_user.clone(), + event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + room_id: room_id.to_owned(), + sender: sender_user.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), @@ -1014,7 +1011,7 @@ pub(crate) async fn invite_helper<'a>( }; // Generate event id - let expected_event_id = EventId::try_from(&*format!( + let expected_event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") @@ -1100,7 +1097,7 @@ pub(crate) async fn invite_helper<'a>( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; diff --git a/src/client_server/message.rs b/src/client_server/message.rs index abbbe8e..0d00610 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -67,7 +67,7 @@ pub async fn send_message_event_route( )); } - let event_id = EventId::try_from( + let event_id = Box::::try_from( utils::string_from_bytes(&response) .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?, ) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 3dcb4d1..2e6527d 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -57,8 +57,7 @@ pub async fn report_event_route( Report Score: {}\n\ Report Reason: {}", sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason - ) - .to_owned(), + ), format!( "
Report received from: {0}\
  • Event Info
    • Event ID: {1}\ @@ -72,8 +71,7 @@ pub async fn report_event_route( pdu.sender, body.score, RawStr::new(&body.reason).html_escape() - ) - .to_owned(), + ), ), )); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 47c7ee6..97b3f48 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -88,14 +88,17 @@ pub async fn create_room_route( )); } - let alias: Option = + let alias: Option> = body.room_alias_name .as_ref() .map_or(Ok(None), |localpart| { // TODO: Check for invalid characters and maximum length - let alias = - RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let alias = Box::::try_from(format!( + "#{}:{}", + localpart, + db.globals.server_name(), + )) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; if db.rooms.id_from_alias(&alias)?.is_some() { Err(Error::BadRequest( @@ -109,7 +112,7 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { + if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 { room_version } else { return Err(Error::BadRequest( @@ -118,7 +121,7 @@ pub async fn create_room_route( )); } } - None => RoomVersionId::Version6, + None => RoomVersionId::V6, }; let content = match &body.creation_content { @@ -164,7 +167,7 @@ pub async fn create_room_route( .get(), ); - if let Err(_) = de_result { + if de_result.is_err() { return Err(Error::BadRequest( ErrorKind::BadJson, "Invalid creation content", @@ -269,7 +272,7 @@ pub async fn create_room_route( PduBuilder { event_type: EventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(room_alias_id.clone()), + alias: Some(room_alias_id.to_owned()), alt_aliases: vec![], }) .expect("We checked that alias earlier, it must be fine"), @@ -505,10 +508,7 @@ pub async fn upgrade_room_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !matches!( - body.new_version, - RoomVersionId::Version5 | RoomVersionId::Version6 - ) { + if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", @@ -605,7 +605,7 @@ pub async fn upgrade_room_route( .get(), ); - if let Err(_) = de_result { + if de_result.is_err() { return Err(Error::BadRequest( ErrorKind::BadJson, "Error forming creation event", diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 307bcca..0ba2062 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -267,7 +267,7 @@ async fn send_state_event_for_key_helper( event_type: EventType, json: &Raw, state_key: String, -) -> Result { +) -> Result> { let sender_user = sender; // TODO: Review this check, error if event is unparsable, use event type, allow alias if it @@ -303,7 +303,7 @@ async fn send_state_event_for_key_helper( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 65c07bc..1060d91 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -54,15 +54,17 @@ use rocket::{get, tokio}; /// `since` will be cached #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/sync", data = "") + get("/_matrix/client/r0/sync", data = "") )] -#[tracing::instrument(skip(db, body))] +#[tracing::instrument(skip(db, req))] pub async fn sync_events_route( db: DatabaseGuard, - body: Ruma>, + req: Ruma>, ) -> Result, RumaResponse> { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let body = req.body; + + let sender_user = req.sender_user.expect("user is authenticated"); + let sender_device = req.sender_device.expect("user is authenticated"); let arc_db = Arc::new(db); @@ -132,7 +134,7 @@ pub async fn sync_events_route( async fn sync_helper_wrapper( db: Arc, - sender_user: UserId, + sender_user: Box, sender_device: Box, since: Option, full_state: bool, @@ -176,7 +178,7 @@ async fn sync_helper_wrapper( async fn sync_helper( db: Arc, - sender_user: UserId, + sender_user: Box, sender_device: Box, since: Option, full_state: bool, @@ -296,9 +298,10 @@ async fn sync_helper( })?; if let Some(state_key) = &pdu.state_key { - let user_id = UserId::try_from(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; + let user_id = + Box::::try_from(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; // The membership was and still is invite or join if matches!( @@ -424,7 +427,7 @@ async fn sync_helper( } if let Some(state_key) = &state_event.state_key { - let user_id = UserId::try_from(state_key.clone()) + let user_id = Box::::try_from(state_key.clone()) .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; if user_id == sender_user { @@ -793,7 +796,7 @@ fn share_encrypted_room( ) -> Result { Ok(db .rooms - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? + .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? .filter_map(|r| r.ok()) .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index c9a98d9..66a85f0 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -26,7 +26,7 @@ pub async fn turn_server_route( let turn_secret = db.globals.turn_secret(); - let (username, password) = if turn_secret != "" { + let (username, password) = if !turn_secret.is_empty() { let expiry = SecondsSinceUnixEpoch::from_system_time( SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()), ) diff --git a/src/database.rs b/src/database.rs index 080e24b..056d49a 100644 --- a/src/database.rs +++ b/src/database.rs @@ -477,7 +477,8 @@ impl Database { // Set room member count for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { let room_id = - RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap(); + Box::::try_from(utils::string_from_bytes(&roomid).unwrap()) + .unwrap(); db.rooms.update_joined_count(&room_id, &db)?; } @@ -489,7 +490,7 @@ impl Database { if db.globals.database_version()? < 7 { // Upgrade state store - let mut last_roomstates: HashMap = HashMap::new(); + let mut last_roomstates: HashMap, u64> = HashMap::new(); let mut current_sstatehash: Option = None; let mut current_room = None; let mut current_state = HashSet::new(); @@ -570,7 +571,7 @@ impl Database { if let Some(current_sstatehash) = current_sstatehash { handle_state( current_sstatehash, - current_room.as_ref().unwrap(), + current_room.as_deref().unwrap(), current_state, &mut last_roomstates, )?; @@ -587,7 +588,7 @@ impl Database { .unwrap() .unwrap(); let event_id = - EventId::try_from(utils::string_from_bytes(&event_id).unwrap()) + Box::::try_from(utils::string_from_bytes(&event_id).unwrap()) .unwrap(); let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap(); @@ -604,7 +605,7 @@ impl Database { if let Some(current_sstatehash) = current_sstatehash { handle_state( current_sstatehash, - current_room.as_ref().unwrap(), + current_room.as_deref().unwrap(), current_state, &mut last_roomstates, )?; diff --git a/src/database/admin.rs b/src/database/admin.rs index 8d8559a..07a487e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,13 +1,10 @@ -use std::{ - convert::{TryFrom, TryInto}, - sync::Arc, -}; +use std::{convert::TryFrom, sync::Arc}; use crate::{pdu::PduBuilder, Database}; use rocket::futures::{channel::mpsc, stream::StreamExt}; use ruma::{ events::{room::message::RoomMessageEventContent, EventType}, - UserId, + RoomAliasId, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; @@ -37,15 +34,17 @@ impl Admin { let guard = db.read().await; let conduit_user = - UserId::try_from(format!("@conduit:{}", guard.globals.server_name())) + Box::::try_from(format!("@conduit:{}", guard.globals.server_name())) .expect("@conduit:server_name is valid"); let conduit_room = guard .rooms .id_from_alias( - &format!("#admins:{}", guard.globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid room alias"), + &Box::::try_from(format!( + "#admins:{}", + guard.globals.server_name() + )) + .expect("#admins:server_name is a valid room alias"), ) .unwrap(); diff --git a/src/database/globals.rs b/src/database/globals.rs index 05ecb56..098d819 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -40,13 +40,13 @@ pub struct Globals { dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, pub(super) server_signingkeys: Arc, - pub bad_event_ratelimiter: Arc>>, + pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, - pub sync_receivers: RwLock), SyncHandle>>, - pub roomid_mutex_insert: RwLock>>>, - pub roomid_mutex_state: RwLock>>>, - pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer + pub sync_receivers: RwLock, Box), SyncHandle>>, + pub roomid_mutex_insert: RwLock, Arc>>>, + pub roomid_mutex_state: RwLock, Arc>>>, + pub roomid_mutex_federation: RwLock, Arc>>>, // this lock will be held longer pub rotate: RotationHandler, } @@ -254,7 +254,7 @@ impl Globals { &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result> { + ) -> Result, VerifyKey>> { // Not atomic, but this is not critical let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; @@ -293,7 +293,7 @@ impl Globals { pub fn signing_keys_for( &self, origin: &ServerName, - ) -> Result> { + ) -> Result, VerifyKey>> { let signingkeys = self .server_signingkeys .get(origin.as_bytes())? diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 98ea011..3010a37 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -209,13 +209,13 @@ impl KeyBackups { &self, user_id: &UserId, version: &str, - ) -> Result> { + ) -> Result, RoomKeyBackup>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::::new(); + let mut rooms = BTreeMap::, RoomKeyBackup>::new(); for result in self .backupkeyid_backup @@ -231,7 +231,7 @@ impl KeyBackups { Error::bad_database("backupkeyid_backup session_id is invalid.") })?; - let room_id = RoomId::try_from( + let room_id = Box::::try_from( utils::string_from_bytes(parts.next().ok_or_else(|| { Error::bad_database("backupkeyid_backup key is invalid.") })?) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index f53f137..97ca85d 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -234,7 +234,7 @@ pub fn get_actions<'a>( db: &Database, ) -> Result<&'a [Action]> { let ctx = PushConditionRoomCtx { - room_id: room_id.clone(), + room_id: room_id.to_owned(), member_count: 10_u32.into(), // TODO: get member count efficiently user_display_name: db .users @@ -277,7 +277,7 @@ async fn send_notice( let mut data_minus_url = pusher.data.clone(); // The url must be stripped off according to spec data_minus_url.url = None; - device.data = Some(data_minus_url); + device.data = data_minus_url; // Tweaks are only added if the format is NOT event_id_only if !event_id_only { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c5b795b..ebd0941 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -107,14 +107,14 @@ pub struct Rooms { /// RoomId + EventId -> Parent PDU EventId. pub(super) referencedevents: Arc, - pub(super) pdu_cache: Mutex>>, + pub(super) pdu_cache: Mutex, Arc>>, pub(super) shorteventid_cache: Mutex>>, pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex>, + pub(super) eventidshort_cache: Mutex, u64>>, pub(super) statekeyshort_cache: Mutex>, pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock>>>, - pub(super) appservice_in_room_cache: RwLock>>, + pub(super) our_real_users_cache: RwLock, Arc>>>>, + pub(super) appservice_in_room_cache: RwLock, HashMap>>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -434,7 +434,7 @@ impl Rooms { None => continue, }; - let user_id = match UserId::try_from(state_key) { + let user_id = match Box::::try_from(state_key) { Ok(id) => id, Err(_) => continue, }; @@ -742,7 +742,7 @@ impl Rooms { self.eventidshort_cache .lock() .unwrap() - .insert(event_id.clone(), short); + .insert(event_id.to_owned(), short); Ok(short) } @@ -871,8 +871,8 @@ impl Rooms { .get(&shorteventid.to_be_bytes())? .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - let event_id = Arc::new( - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + let event_id = Arc::from( + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") })?) .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?, @@ -1112,7 +1112,7 @@ impl Rooms { self.pdu_cache .lock() .unwrap() - .insert(event_id.clone(), Arc::clone(&pdu)); + .insert(event_id.to_owned(), Arc::clone(&pdu)); Ok(Some(pdu)) } else { Ok(None) @@ -1162,14 +1162,14 @@ impl Rooms { /// Returns the leaf pdus of a room. #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { + pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomid_pduleaves .scan_prefix(prefix) .map(|(_, bytes)| { - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") })?) .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) @@ -1178,7 +1178,7 @@ impl Rooms { } #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(prev.as_bytes()); @@ -1193,7 +1193,7 @@ impl Rooms { /// The provided `event_ids` become the new leaves, this allows a room to have multiple /// `prev_events`. #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1261,7 +1261,7 @@ impl Rooms { &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: &[EventId], + leaves: &[Box], db: &Database, ) -> Result> { let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); @@ -1420,7 +1420,7 @@ impl Rooms { } // if the state_key fails - let target_user_id = UserId::try_from(state_key.clone()) + let target_user_id = Box::::try_from(state_key.clone()) .expect("This state_key was previously validated"); let content = serde_json::from_str::(pdu.content.get()) @@ -1476,9 +1476,11 @@ impl Rooms { if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self .id_from_alias( - &format!("#admins:{}", db.globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid room alias"), + &Box::::try_from(format!( + "#admins:{}", + db.globals.server_name() + )) + .expect("#admins:server_name is a valid room alias"), )? .as_ref() == Some(&pdu.room_id) @@ -1528,7 +1530,7 @@ impl Rooms { } "get_auth_chain" => { if args.len() == 1 { - if let Ok(event_id) = EventId::try_from(args[0]) { + if let Ok(event_id) = Box::::try_from(args[0]) { if let Some(event) = db.rooms.get_pdu_json(&event_id)? { let room_id_str = event .get("room_id") @@ -1539,12 +1541,12 @@ impl Rooms { ) })?; - let room_id = RoomId::try_from(room_id_str) + let room_id = Box::::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; let start = Instant::now(); let count = server_server::get_auth_chain( &room_id, - vec![Arc::new(event_id)], + vec![Arc::from(event_id)], db, )? .count(); @@ -1567,12 +1569,12 @@ impl Rooms { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { Ok(value) => { - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", // Anything higher than version3 behaves the same ruma::signatures::reference_hash( &value, - &RoomVersionId::Version6 + &RoomVersionId::V6 ) .expect("ruma can calculate reference hashes") )) @@ -1622,7 +1624,7 @@ impl Rooms { } "get_pdu" => { if args.len() == 1 { - if let Ok(event_id) = EventId::try_from(args[0]) { + if let Ok(event_id) = Box::::try_from(args[0]) { let mut outlier = false; let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; @@ -1948,7 +1950,7 @@ impl Rooms { room_id: &RoomId, db: &Database, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result { + ) -> Result> { let PduBuilder { event_type, content, @@ -1985,9 +1987,7 @@ impl Rooms { // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content - .map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); + .map_or(RoomVersionId::V6, |create_event| create_event.room_version); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = @@ -2016,9 +2016,9 @@ impl Rooms { } let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender.clone(), + event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + room_id: room_id.to_owned(), + sender: sender.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), @@ -2083,7 +2083,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - pdu.event_id = EventId::try_from(&*format!( + pdu.event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") @@ -2206,7 +2206,7 @@ impl Rooms { let mut first_pdu_id = prefix.clone(); first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - let user_id = user_id.clone(); + let user_id = user_id.to_owned(); Ok(self .pduid_pdu @@ -2243,7 +2243,7 @@ impl Rooms { let current: &[u8] = ¤t; - let user_id = user_id.clone(); + let user_id = user_id.to_owned(); Ok(self .pduid_pdu @@ -2280,7 +2280,7 @@ impl Rooms { let current: &[u8] = ¤t; - let user_id = user_id.clone(); + let user_id = user_id.to_owned(); Ok(self .pduid_pdu @@ -2412,7 +2412,7 @@ impl Rooms { for room_ids in direct_event.content.0.values_mut() { if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.clone()); + room_ids.push(room_id.to_owned()); room_ids_updated = true; } } @@ -2451,7 +2451,11 @@ impl Rooms { EventType::IgnoredUserList, )? .map_or(false, |ignored| { - ignored.content.ignored_users.contains(sender) + ignored + .content + .ignored_users + .iter() + .any(|user| user == sender) }); if is_ignored { @@ -2537,7 +2541,7 @@ impl Rooms { self.our_real_users_cache .write() .unwrap() - .insert(room_id.clone(), Arc::new(real_users)); + .insert(room_id.to_owned(), Arc::new(real_users)); for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { if !joined_servers.remove(&old_joined_server) { @@ -2582,7 +2586,7 @@ impl Rooms { &self, room_id: &RoomId, db: &Database, - ) -> Result>> { + ) -> Result>>> { let maybe = self .our_real_users_cache .read() @@ -2650,7 +2654,7 @@ impl Rooms { self.appservice_in_room_cache .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default() .insert(appservice.0.clone(), in_room); @@ -2694,7 +2698,7 @@ impl Rooms { .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -2754,7 +2758,7 @@ impl Rooms { .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::try_from(sender).ok()) + .filter_map(|sender| Box::::try_from(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -2778,9 +2782,7 @@ impl Rooms { let (make_leave_response, remote_server) = make_leave_response_and_server?; let room_version_id = match make_leave_response.room_version { - Some(version) - if version == RoomVersionId::Version5 || version == RoomVersionId::Version6 => - { + Some(version) if version == RoomVersionId::V5 || version == RoomVersionId::V6 => { version } _ => return Err(Error::BadServerResponse("Room version is not supported")), @@ -2817,7 +2819,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) .expect("ruma can calculate reference hashes") @@ -2902,11 +2904,11 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result> { + pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { - RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in alias_roomid is invalid unicode.") })?) .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) @@ -2918,7 +2920,7 @@ impl Rooms { pub fn room_aliases<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -2947,9 +2949,9 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator> + '_ { + pub fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { - RoomId::try_from( + Box::::try_from( utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in publicroomids is invalid unicode.") })?, @@ -3009,8 +3011,8 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn get_shared_rooms<'a>( &'a self, - users: Vec, - ) -> Result> + 'a> { + users: Vec>, + ) -> Result>> + 'a> { let iterators = users.into_iter().map(move |user_id| { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -3037,7 +3039,7 @@ impl Rooms { Ok(utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { - RoomId::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { Error::bad_database("Invalid RoomId bytes in userroomid_joined") })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) @@ -3082,12 +3084,12 @@ impl Rooms { pub fn server_rooms<'a>( &'a self, server: &ServerName, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = server.as_bytes().to_vec(); prefix.push(0xff); self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3104,12 +3106,12 @@ impl Rooms { pub fn room_members<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3150,14 +3152,14 @@ impl Rooms { pub fn room_useroncejoined<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomuseroncejoinedids .scan_prefix(prefix) .map(|(key, _)| { - UserId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3176,14 +3178,14 @@ impl Rooms { pub fn room_members_invited<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomuserid_invitecount .scan_prefix(prefix) .map(|(key, _)| { - UserId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3232,11 +3234,11 @@ impl Rooms { pub fn rooms_joined<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { self.userroomid_joined .scan_prefix(user_id.as_bytes().to_vec()) .map(|(key, _)| { - RoomId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3255,14 +3257,14 @@ impl Rooms { pub fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>)>> + 'a { + ) -> impl Iterator, Vec>)>> + 'a { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); self.userroomid_invitestate .scan_prefix(prefix) .map(|(key, state)| { - let room_id = RoomId::try_from( + let room_id = Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3328,14 +3330,14 @@ impl Rooms { pub fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>)>> + 'a { + ) -> impl Iterator, Vec>)>> + 'a { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); self.userroomid_leftstate .scan_prefix(prefix) .map(|(key, state)| { - let room_id = RoomId::try_from( + let room_id = Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 9a27e43..365211b 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -76,8 +76,13 @@ impl RoomEdus { &'a self, room_id: &RoomId, since: u64, - ) -> impl Iterator)>> + 'a - { + ) -> impl Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + > + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let prefix2 = prefix.clone(); @@ -92,7 +97,7 @@ impl RoomEdus { let count = utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::try_from( + let user_id = Box::::try_from( utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) .map_err(|_| { Error::bad_database("Invalid readreceiptid userid bytes in db.") @@ -309,7 +314,7 @@ impl RoomEdus { .typingid_userid .scan_prefix(prefix) .map(|(_, user_id)| { - UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&user_id).map_err(|_| { Error::bad_database("User ID in typingid_userid is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid.")) @@ -449,7 +454,7 @@ impl RoomEdus { { // Send new presence events to set the user offline let count = globals.next_count()?.to_be_bytes(); - let user_id = utils::string_from_bytes(&user_id_bytes) + let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) .map_err(|_| { Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") })? @@ -475,7 +480,7 @@ impl RoomEdus { presence: PresenceState::Offline, status_msg: None, }, - sender: user_id.clone(), + sender: user_id.to_owned(), }) .expect("PresenceEvent can be serialized"), )?; @@ -498,7 +503,7 @@ impl RoomEdus { since: u64, _rooms: &super::Rooms, _globals: &super::super::globals::Globals, - ) -> Result> { + ) -> Result, PresenceEvent>> { //self.presence_maintain(rooms, globals)?; let mut prefix = room_id.as_bytes().to_vec(); @@ -513,7 +518,7 @@ impl RoomEdus { .iter_from(&*first_possible_edu, false) .take_while(|(key, _)| key.starts_with(&prefix)) { - let user_id = UserId::try_from( + let user_id = Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() diff --git a/src/database/sending.rs b/src/database/sending.rs index bf0cc2c..c27b573 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -397,7 +397,7 @@ impl Sending { // Because synapse resyncs, we can just insert dummy data let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, - device_id: device_id!("dummy"), + device_id: device_id!("dummy").to_owned(), device_display_name: Some("Dummy".to_owned()), stream_id: uint!(1), prev_id: Vec::new(), @@ -584,7 +584,7 @@ impl Sending { } let userid = - UserId::try_from(utils::string_from_bytes(user).map_err(|_| { + Box::::try_from(utils::string_from_bytes(user).map_err(|_| { ( kind.clone(), Error::bad_database("Invalid push user string in db."), diff --git a/src/database/users.rs b/src/database/users.rs index d0da071..4a08472 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -63,11 +63,11 @@ impl Users { globals: &super::globals::Globals, ) -> Result { let admin_room_alias_id = - RoomAliasId::try_from(format!("#admins:{}", globals.server_name())) + Box::::try_from(format!("#admins:{}", globals.server_name())) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); - Ok(rooms.is_joined(user_id, &admin_room_id)?) + rooms.is_joined(user_id, &admin_room_id) } /// Create a new user account on this homeserver. @@ -85,7 +85,7 @@ impl Users { /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] - pub fn find_from_token(&self, token: &str) -> Result> { + pub fn find_from_token(&self, token: &str) -> Result, String)>> { self.token_userdeviceid .get(token.as_bytes())? .map_or(Ok(None), |bytes| { @@ -98,9 +98,11 @@ impl Users { })?; Ok(Some(( - UserId::try_from(utils::string_from_bytes(user_bytes).map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid unicode.") - })?) + Box::::try_from(utils::string_from_bytes(user_bytes).map_err( + |_| { + Error::bad_database("User ID in token_userdeviceid is invalid unicode.") + }, + )?) .map_err(|_| { Error::bad_database("User ID in token_userdeviceid is invalid.") })?, @@ -113,9 +115,9 @@ impl Users { /// Returns an iterator over all users on this homeserver. #[tracing::instrument(skip(self))] - pub fn iter(&self) -> impl Iterator> + '_ { + pub fn iter(&self) -> impl Iterator>> + '_ { self.userid_password.iter().map(|(bytes, _)| { - UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) @@ -181,20 +183,21 @@ impl Users { /// Get the avatar_url of a user. #[tracing::instrument(skip(self, user_id))] - pub fn avatar_url(&self, user_id: &UserId) -> Result> { + pub fn avatar_url(&self, user_id: &UserId) -> Result>> { self.userid_avatarurl .get(user_id.as_bytes())? .map(|bytes| { let s = utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - MxcUri::try_from(s).map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) + Box::::try_from(s) + .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) }) .transpose() } /// Sets a new avatar_url or removes it if avatar_url is None. #[tracing::instrument(skip(self, user_id, avatar_url))] - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; @@ -409,7 +412,7 @@ impl Users { device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, globals: &super::globals::Globals, - ) -> Result> { + ) -> Result, OneTimeKey)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); @@ -459,7 +462,7 @@ impl Users { .scan_prefix(userdeviceid) .map(|(bytes, _)| { Ok::<_, Error>( - serde_json::from_slice::( + serde_json::from_slice::>( &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { Error::bad_database("OneTimeKey ID in db is invalid.") })?, @@ -632,7 +635,7 @@ impl Users { .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? .as_object_mut() .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? - .entry(sender_id.clone()) + .entry(sender_id.to_owned()) .or_insert_with(|| serde_json::Map::new().into()); signatures @@ -657,7 +660,7 @@ impl Users { user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -683,7 +686,7 @@ impl Users { } }) .map(|(_, bytes)| { - UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) diff --git a/src/pdu.rs b/src/pdu.rs index 0f99f43..3c95597 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -13,7 +13,7 @@ use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, ops::Deref}; use tracing::warn; /// Content hashes of a PDU. @@ -25,20 +25,20 @@ pub struct EventHash { #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { - pub event_id: EventId, - pub room_id: RoomId, - pub sender: UserId, + pub event_id: Box, + pub room_id: Box, + pub sender: Box, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: EventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, - pub prev_events: Vec, + pub prev_events: Vec>, pub depth: UInt, - pub auth_events: Vec, + pub auth_events: Vec>, #[serde(skip_serializing_if = "Option::is_none")] - pub redacts: Option, + pub redacts: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unsigned: Option>, pub hashes: EventHash, @@ -295,15 +295,15 @@ impl state_res::Event for PduEvent { } fn prev_events(&self) -> Box + '_> { - Box::new(self.prev_events.iter()) + Box::new(self.prev_events.iter().map(Deref::deref)) } fn auth_events(&self) -> Box + '_> { - Box::new(self.auth_events.iter()) + Box::new(self.auth_events.iter().map(Deref::deref)) } fn redacts(&self) -> Option<&EventId> { - self.redacts.as_ref() + self.redacts.as_deref() } } @@ -331,16 +331,16 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, -) -> crate::Result<(EventId, CanonicalJsonObject)> { +) -> crate::Result<(Box, CanonicalJsonObject)> { let value = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&value, &RoomVersionId::V6) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -356,7 +356,7 @@ pub struct PduBuilder { pub content: Box, pub unsigned: Option>, pub state_key: Option, - pub redacts: Option, + pub redacts: Option>, } /// Direct conversion prevents loss of the empty `state_key` that ruma requires. diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 03c115c..2cff2f5 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -29,7 +29,7 @@ use { /// first. pub struct Ruma { pub body: T::Incoming, - pub sender_user: Option, + pub sender_user: Option>, pub sender_device: Option>, pub sender_servername: Option>, // This is None when body is not a valid string @@ -86,7 +86,7 @@ where registration .get("as_token") .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| token.as_deref() == Some(as_token)) + .map_or(false, |as_token| token == Some(as_token)) }) { match metadata.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { @@ -103,7 +103,7 @@ where .unwrap() }, |string| { - UserId::try_from(string.expect("parsing to string always works")) + Box::::try_from(string.expect("parsing to string always works")) .unwrap() }, ); diff --git a/src/server_server.rs b/src/server_server.rs index 482edf0..ec5bc34 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -552,7 +552,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { let mut verify_keys = BTreeMap::new(); verify_keys.insert( - ServerSigningKeyId::try_from( + Box::::try_from( format!("ed25519:{}", db.globals.keypair().version()).as_str(), ) .expect("found invalid server signing keys in DB"), @@ -736,7 +736,7 @@ pub async fn send_transaction_message_route( // 0. Check the server is in the room let room_id = match value .get("room_id") - .and_then(|id| RoomId::try_from(id.as_str()?).ok()) + .and_then(|id| Box::::try_from(id.as_str()?).ok()) { Some(id) => id, None => { @@ -1003,11 +1003,10 @@ pub(crate) async fn handle_incoming_pdu<'a>( // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events let mut graph = HashMap::new(); let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec<_> = incoming_pdu + let mut todo_outlier_stack: Vec> = incoming_pdu .prev_events .iter() - .cloned() - .map(Arc::new) + .map(|x| Arc::from(&**x)) .collect(); let mut amount = 0; @@ -1027,7 +1026,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( if amount > 100 { // Max limit reached warn!("Max prev event limit reached!"); - graph.insert((*prev_event_id).clone(), HashSet::new()); + graph.insert((*prev_event_id).to_owned(), HashSet::new()); continue; } @@ -1038,27 +1037,27 @@ pub(crate) async fn handle_incoming_pdu<'a>( amount += 1; for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(Arc::new(prev_prev.clone()))); + todo_outlier_stack.push(dbg!(Arc::from(&**prev_prev))); } } graph.insert( - (*prev_event_id).clone(), + (*prev_event_id).to_owned(), pdu.prev_events.iter().cloned().collect(), ); } else { // Time based check failed - graph.insert((*prev_event_id).clone(), HashSet::new()); + graph.insert((*prev_event_id).to_owned(), HashSet::new()); } eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { // Get json failed - graph.insert((*prev_event_id).clone(), HashSet::new()); + graph.insert((*prev_event_id).to_owned(), HashSet::new()); } } else { // Fetch and handle failed - graph.insert((*prev_event_id).clone(), HashSet::new()); + graph.insert((*prev_event_id).to_owned(), HashSet::new()); } } @@ -1074,7 +1073,6 @@ pub(crate) async fn handle_incoming_pdu<'a>( .get(event_id) .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), - ruma::event_id!("$notimportant"), )) }) .map_err(|_| "Error sorting prev events".to_owned())?; @@ -1084,7 +1082,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( if errors >= 5 { break; } - if let Some((pdu, json)) = eventid_info.remove(&prev_id) { + if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { continue; } @@ -1200,8 +1198,7 @@ fn handle_outlier_pdu<'a>( &incoming_pdu .auth_events .iter() - .cloned() - .map(Arc::new) + .map(|x| Arc::from(&**x)) .collect::>(), create_event, room_id, @@ -1331,7 +1328,7 @@ async fn upgrade_outlier_to_timeline_pdu( let mut state_at_incoming_event = None; if incoming_pdu.prev_events.len() == 1 { - let prev_event = &incoming_pdu.prev_events[0]; + let prev_event = &*incoming_pdu.prev_events[0]; let prev_event_sstatehash = db .rooms .pdu_shortstatehash(prev_event) @@ -1353,7 +1350,7 @@ async fn upgrade_outlier_to_timeline_pdu( .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - state.insert(shortstatekey, Arc::new(prev_event.clone())); + state.insert(shortstatekey, Arc::from(prev_event)); // Now it's the state after the pdu } @@ -1397,7 +1394,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); + leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); // Now it's the state after the pdu } @@ -1410,14 +1407,14 @@ async fn upgrade_outlier_to_timeline_pdu( .get_statekey_from_short(k) .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - state.insert(k, (*id).clone()); + state.insert(k, (*id).to_owned()); starting_events.push(id); } auth_chain_sets.push( get_auth_chain(room_id, starting_events, db) .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).clone()) + .map(|event_id| (*event_id).to_owned()) .collect(), ); @@ -1444,7 +1441,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, Arc::new(event_id))) + Ok((shortstatekey, Arc::from(event_id))) }) .collect::>()?, ), @@ -1479,8 +1476,7 @@ async fn upgrade_outlier_to_timeline_pdu( origin, &res.pdu_ids .iter() - .cloned() - .map(Arc::new) + .map(|x| Arc::from(&**x)) .collect::>(), create_event, room_id, @@ -1488,7 +1484,7 @@ async fn upgrade_outlier_to_timeline_pdu( ) .await; - let mut state = BTreeMap::new(); + let mut state: BTreeMap<_, Arc> = BTreeMap::new(); for (pdu, _) in state_vec { let state_key = pdu .state_key @@ -1502,7 +1498,7 @@ async fn upgrade_outlier_to_timeline_pdu( match state.entry(shortstatekey) { btree_map::Entry::Vacant(v) => { - v.insert(Arc::new(pdu.event_id.clone())); + v.insert(Arc::from(&*pdu.event_id)); } btree_map::Entry::Occupied(_) => return Err( "State event's type and state_key combination exists multiple times." @@ -1577,7 +1573,7 @@ async fn upgrade_outlier_to_timeline_pdu( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -1715,7 +1711,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::new(leaf_pdu.event_id.clone())); + leaf_state.insert(shortstatekey, Arc::from(&*leaf_pdu.event_id)); // Now it's the state after the pdu } @@ -1730,7 +1726,7 @@ async fn upgrade_outlier_to_timeline_pdu( .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - state_after.insert(shortstatekey, Arc::new(incoming_pdu.event_id.clone())); + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } fork_states.push(state_after); @@ -1762,7 +1758,7 @@ async fn upgrade_outlier_to_timeline_pdu( db, ) .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).clone()) + .map(|event_id| (*event_id).to_owned()) .collect(), ); } @@ -1774,7 +1770,7 @@ async fn upgrade_outlier_to_timeline_pdu( .map(|(k, id)| { db.rooms .get_statekey_from_short(k) - .map(|k| (k, (*id).clone())) + .map(|k| (k, (*id).to_owned())) }) .collect::>>() }) @@ -1874,7 +1870,8 @@ pub(crate) fn fetch_and_handle_outliers<'a>( let mut pdus = vec![]; for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(id) { + if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) + { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { @@ -1914,7 +1911,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match crate::pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => { - back_off((**id).clone()); + back_off((**id).to_owned()); continue; } }; @@ -1939,14 +1936,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>( Ok((pdu, json)) => (pdu, Some(json)), Err(e) => { warn!("Authentication of event {} failed: {:?}", id, e); - back_off((**id).clone()); + back_off((**id).to_owned()); continue; } } } Err(_) => { warn!("Failed to fetch event: {}", id); - back_off((**id).clone()); + back_off((**id).to_owned()); continue; } } @@ -2128,7 +2125,7 @@ fn append_incoming_pdu( db: &Database, pdu: &PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: HashSet, + new_room_leaves: HashSet>, state_ids_compressed: HashSet, soft_fail: bool, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex @@ -2298,13 +2295,13 @@ fn get_auth_chain_inner( event_id: &EventId, db: &Database, ) -> Result> { - let mut todo = vec![event_id.clone()]; + let mut todo = vec![event_id.to_owned()]; let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { match db.rooms.get_pdu(&event_id) { Ok(Some(pdu)) => { - if &pdu.room_id != room_id { + if pdu.room_id != room_id { return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); } for auth_event in &pdu.auth_events { @@ -2314,7 +2311,7 @@ fn get_auth_chain_inner( if !found.contains(&sauthevent) { found.insert(sauthevent); - todo.push(auth_event.clone()); + todo.push(auth_event.to_owned()); } } } @@ -2363,7 +2360,7 @@ pub fn get_event_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = RoomId::try_from(room_id_str) + let room_id = Box::::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; if !db.rooms.server_in_room(sender_servername, &room_id)? { @@ -2417,7 +2414,7 @@ pub fn get_missing_events_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let event_room_id = RoomId::try_from(room_id_str) + let event_room_id = Box::::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; if event_room_id != body.room_id { @@ -2436,7 +2433,7 @@ pub fn get_missing_events_route( continue; } queued_events.extend_from_slice( - &serde_json::from_value::>( + &serde_json::from_value::>>( serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { Error::bad_database("Event in db has no prev_events field.") })?) @@ -2485,14 +2482,14 @@ pub fn get_event_authorization_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = RoomId::try_from(room_id_str) + let room_id = Box::::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; if !db.rooms.server_in_room(sender_servername, &room_id)? { return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); } - let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::new(body.event_id.clone())], &db)?; + let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -2550,7 +2547,7 @@ pub fn get_room_state_route( }) .collect(); - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; + let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids @@ -2606,13 +2603,13 @@ pub fn get_room_state_ids_route( .rooms .state_full_ids(shortstatehash)? .into_iter() - .map(|(_, id)| (*id).clone()) + .map(|(_, id)| (*id).to_owned()) .collect(); - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; + let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_room_state_ids::v1::Response { - auth_chain_ids: auth_chain_ids.map(|id| (*id).clone()).collect(), + auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), pdu_ids, } .into()) @@ -2671,9 +2668,8 @@ pub fn create_join_event_template_route( }; // If there was no create event yet, assume we are creating a version 6 room right now - let room_version_id = create_event_content.map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); + let room_version_id = + create_event_content.map_or(RoomVersionId::V6, |create_event| create_event.room_version); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); if !body.ver.contains(&room_version_id) { @@ -2726,7 +2722,7 @@ pub fn create_join_event_template_route( } let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), + event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), room_id: body.room_id.clone(), sender: body.user_id.clone(), origin_server_ts: utils::millis_since_unix_epoch() @@ -2838,7 +2834,7 @@ async fn create_join_event( .roomid_mutex_federation .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let mutex_lock = mutex.lock().await; @@ -2937,8 +2933,7 @@ pub async fn create_invite_route( return Err(Error::bad_config("Federation is disabled.")); } - if body.room_version != RoomVersionId::Version5 && body.room_version != RoomVersionId::Version6 - { + if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), @@ -2959,7 +2954,7 @@ pub async fn create_invite_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; // Generate event id - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&signed_event, &body.room_version) .expect("ruma can calculate reference hashes") @@ -2972,7 +2967,7 @@ pub async fn create_invite_route( CanonicalJsonValue::String(event_id.into()), ); - let sender = serde_json::from_value( + let sender: Box<_> = serde_json::from_value( signed_event .get("sender") .ok_or(Error::BadRequest( @@ -2984,7 +2979,7 @@ pub async fn create_invite_route( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; - let invited_user = serde_json::from_value( + let invited_user: Box<_> = serde_json::from_value( signed_event .get("state_key") .ok_or(Error::BadRequest( @@ -3263,7 +3258,7 @@ pub(crate) async fn fetch_required_signing_keys( // the PDUs and either cache the key or add it to the list that needs to be retrieved. fn get_server_keys_from_cache( pdu: &RawJsonValue, - servers: &mut BTreeMap, BTreeMap>, + servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, room_version: &RoomVersionId, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, db: &Database, @@ -3273,7 +3268,7 @@ fn get_server_keys_from_cache( Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") @@ -3353,7 +3348,7 @@ pub(crate) async fn fetch_join_signing_keys( pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { - let mut servers: BTreeMap, BTreeMap> = + let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = BTreeMap::new(); { @@ -3387,10 +3382,6 @@ pub(crate) async fn fetch_join_signing_keys( server, get_remote_server_keys_batch::v2::Request { server_keys: servers.clone(), - minimum_valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(60), - ) - .expect("time is valid"), }, ) .await From 41fef1da64ea792b9ae8827f04d72cf7bbc1c960 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 00:30:00 +0100 Subject: [PATCH 0842/1727] Remove unnecessary .to_string() calls --- src/server_server.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ec5bc34..8a50d23 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -396,10 +396,7 @@ async fn find_actual_destination( } if let Some(port) = force_port { - FedDest::Named( - delegated_hostname, - format!(":{}", port.to_string()), - ) + FedDest::Named(delegated_hostname, format!(":{}", port)) } else { add_port_to_hostname(&delegated_hostname) } @@ -432,10 +429,7 @@ async fn find_actual_destination( } if let Some(port) = force_port { - FedDest::Named( - hostname.clone(), - format!(":{}", port.to_string()), - ) + FedDest::Named(hostname.clone(), format!(":{}", port)) } else { add_port_to_hostname(&hostname) } From bffddbd4879e950a52647126284acafde4c46df4 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 00:30:28 +0100 Subject: [PATCH 0843/1727] Simplify identifier parsing code --- src/client_server/account.rs | 11 ++---- src/client_server/membership.rs | 39 +++++++++++--------- src/client_server/message.rs | 16 +++----- src/client_server/room.rs | 18 +++------ src/client_server/sync.rs | 11 +++--- src/database.rs | 15 +++----- src/database/admin.rs | 18 ++++----- src/database/key_backups.rs | 4 +- src/database/rooms.rs | 65 ++++++++++++++++----------------- src/database/rooms/edus.rs | 24 +++++------- src/database/sending.rs | 31 ++++++++-------- src/database/users.rs | 21 +++++------ src/pdu.rs | 7 ++-- src/ruma_wrapper.rs | 6 +-- src/server_server.rs | 40 ++++++++++---------- 15 files changed, 147 insertions(+), 179 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index d7c2f63..3149187 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,8 +1,4 @@ -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, -}; +use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; @@ -396,9 +392,8 @@ pub async fn register_route( )?; // 6. Events implied by name and topic - let room_name = - Box::::try_from(format!("{} Admin Room", db.globals.server_name())) - .expect("Room name is valid"); + let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name())) + .expect("Room name is valid"); db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomName, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index f65287d..6c7b721 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -64,7 +64,7 @@ pub async fn join_room_by_id_route( .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| Box::::try_from(sender).ok()) + .filter_map(|sender| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -92,16 +92,17 @@ pub async fn join_room_by_id_route( /// - If the server does not know about the room: asks other servers over federation #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/join/<_>", data = "") + post("/_matrix/client/r0/join/<_>", data = "") )] -#[tracing::instrument(skip(db, body))] +#[tracing::instrument(skip(db, req))] pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, - body: Ruma>, + req: Ruma>, ) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = req.body; + let sender_user = req.sender_user.as_ref().expect("user is authenticated"); - let (servers, room_id) = match Box::::try_from(body.room_id_or_alias.clone()) { + let (servers, room_id) = match Box::::try_from(body.room_id_or_alias) { Ok(room_id) => { let mut servers: HashSet<_> = db .rooms @@ -111,7 +112,7 @@ pub async fn join_room_by_id_or_alias_route( .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| Box::::try_from(sender).ok()) + .filter_map(|sender| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -127,7 +128,7 @@ pub async fn join_room_by_id_or_alias_route( let join_room_response = join_room_by_id_helper( &db, - body.sender_user.as_deref(), + req.sender_user.as_deref(), &room_id, &servers, body.third_party_signed.as_ref(), @@ -619,12 +620,13 @@ async fn join_room_by_id_helper( .expect("event is valid, we just created it"); // Generate event id - let event_id = Box::::try_from(&*format!( + let event_id = format!( "${}", ruma::signatures::reference_hash(&join_event_stub, &room_version) .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); // Add event_id back join_event_stub.insert( @@ -642,7 +644,7 @@ async fn join_room_by_id_helper( remote_server, federation::membership::create_join_event::v2::Request { room_id, - event_id: &event_id, + event_id, pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) @@ -650,7 +652,7 @@ async fn join_room_by_id_helper( db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; - let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) + let pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; let mut state = HashMap::new(); @@ -788,7 +790,7 @@ fn validate_and_add_event_id( error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = Box::::try_from(&*format!( + let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") @@ -1011,12 +1013,13 @@ pub(crate) async fn invite_helper<'a>( }; // Generate event id - let expected_event_id = Box::::try_from(&*format!( + let expected_event_id = format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + ); + let expected_event_id = <&EventId>::try_from(expected_event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); let response = db .sending @@ -1025,7 +1028,7 @@ pub(crate) async fn invite_helper<'a>( user_id.server_name(), create_invite::v2::Request { room_id, - event_id: &expected_event_id, + event_id: expected_event_id, room_version: &room_version_id, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 0d00610..e521943 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -5,13 +5,8 @@ use ruma::{ r0::message::{get_message_events, send_message_event}, }, events::EventType, - EventId, -}; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, }; +use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -67,11 +62,10 @@ pub async fn send_message_event_route( )); } - let event_id = Box::::try_from( - utils::string_from_bytes(&response) - .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?, - ) - .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; + let event_id = utils::string_from_bytes(&response) + .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; return Ok(send_message_event::Response { event_id }.into()); } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 97b3f48..83571f1 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -26,12 +26,7 @@ use ruma::{ RoomAliasId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; -use std::{ - cmp::max, - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, -}; +use std::{cmp::max, collections::BTreeMap, convert::TryInto, sync::Arc}; use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] @@ -93,12 +88,11 @@ pub async fn create_room_route( .as_ref() .map_or(Ok(None), |localpart| { // TODO: Check for invalid characters and maximum length - let alias = Box::::try_from(format!( - "#{}:{}", - localpart, - db.globals.server_name(), - )) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let alias = + RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name())) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.") + })?; if db.rooms.id_from_alias(&alias)?.is_some() { Err(Error::BadRequest( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 1060d91..2e372f9 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -10,7 +10,7 @@ use ruma::{ }; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryInto, sync::Arc, time::Duration, }; @@ -298,10 +298,9 @@ async fn sync_helper( })?; if let Some(state_key) = &pdu.state_key { - let user_id = - Box::::try_from(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; + let user_id = UserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; // The membership was and still is invite or join if matches!( @@ -427,7 +426,7 @@ async fn sync_helper( } if let Some(state_key) = &state_event.state_key { - let user_id = Box::::try_from(state_key.clone()) + let user_id = UserId::parse(state_key.clone()) .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; if user_id == sender_user { diff --git a/src/database.rs b/src/database.rs index 056d49a..84ca68d 100644 --- a/src/database.rs +++ b/src/database.rs @@ -476,11 +476,9 @@ impl Database { if db.globals.database_version()? < 6 { // Set room member count for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { - let room_id = - Box::::try_from(utils::string_from_bytes(&roomid).unwrap()) - .unwrap(); - - db.rooms.update_joined_count(&room_id, &db)?; + let string = utils::string_from_bytes(&roomid).unwrap(); + let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); + db.rooms.update_joined_count(room_id, &db)?; } db.globals.bump_database_version(6)?; @@ -587,10 +585,9 @@ impl Database { .get(&seventid) .unwrap() .unwrap(); - let event_id = - Box::::try_from(utils::string_from_bytes(&event_id).unwrap()) - .unwrap(); - let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap(); + let string = utils::string_from_bytes(&event_id).unwrap(); + let event_id = <&EventId>::try_from(string.as_str()).unwrap(); + let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap(); if Some(&pdu.room_id) != current_room.as_ref() { current_room = Some(pdu.room_id.clone()); diff --git a/src/database/admin.rs b/src/database/admin.rs index 07a487e..1e5c47c 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,10 +1,10 @@ -use std::{convert::TryFrom, sync::Arc}; +use std::{convert::TryInto, sync::Arc}; use crate::{pdu::PduBuilder, Database}; use rocket::futures::{channel::mpsc, stream::StreamExt}; use ruma::{ events::{room::message::RoomMessageEventContent, EventType}, - RoomAliasId, UserId, + UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; @@ -33,18 +33,16 @@ impl Admin { let guard = db.read().await; - let conduit_user = - Box::::try_from(format!("@conduit:{}", guard.globals.server_name())) - .expect("@conduit:server_name is valid"); + let conduit_user = UserId::parse(format!("@conduit:{}", guard.globals.server_name())) + .expect("@conduit:server_name is valid"); let conduit_room = guard .rooms .id_from_alias( - &Box::::try_from(format!( - "#admins:{}", - guard.globals.server_name() - )) - .expect("#admins:server_name is a valid room alias"), + format!("#admins:{}", guard.globals.server_name()) + .as_str() + .try_into() + .expect("#admins:server_name is a valid room alias"), ) .unwrap(); diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 3010a37..56963c0 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -6,7 +6,7 @@ use ruma::{ }, RoomId, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; +use std::{collections::BTreeMap, sync::Arc}; use super::abstraction::Tree; @@ -231,7 +231,7 @@ impl KeyBackups { Error::bad_database("backupkeyid_backup session_id is invalid.") })?; - let room_id = Box::::try_from( + let room_id = RoomId::parse( utils::string_from_bytes(parts.next().ok_or_else(|| { Error::bad_database("backupkeyid_backup key is invalid.") })?) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ebd0941..f8d2cad 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -434,7 +434,7 @@ impl Rooms { None => continue, }; - let user_id = match Box::::try_from(state_key) { + let user_id = match UserId::parse(state_key) { Ok(id) => id, Err(_) => continue, }; @@ -871,12 +871,10 @@ impl Rooms { .get(&shorteventid.to_be_bytes())? .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - let event_id = Arc::from( - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?, - ); + let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; self.shorteventid_cache .lock() @@ -1169,7 +1167,7 @@ impl Rooms { self.roomid_pduleaves .scan_prefix(prefix) .map(|(_, bytes)| { - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + EventId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") })?) .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) @@ -1420,7 +1418,7 @@ impl Rooms { } // if the state_key fails - let target_user_id = Box::::try_from(state_key.clone()) + let target_user_id = UserId::parse(state_key.clone()) .expect("This state_key was previously validated"); let content = serde_json::from_str::(pdu.content.get()) @@ -1476,10 +1474,9 @@ impl Rooms { if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self .id_from_alias( - &Box::::try_from(format!( - "#admins:{}", - db.globals.server_name() - )) + <&RoomAliasId>::try_from( + format!("#admins:{}", db.globals.server_name()).as_str(), + ) .expect("#admins:server_name is a valid room alias"), )? .as_ref() @@ -1530,7 +1527,7 @@ impl Rooms { } "get_auth_chain" => { if args.len() == 1 { - if let Ok(event_id) = Box::::try_from(args[0]) { + if let Ok(event_id) = EventId::parse_arc(args[0]) { if let Some(event) = db.rooms.get_pdu_json(&event_id)? { let room_id_str = event .get("room_id") @@ -1541,12 +1538,12 @@ impl Rooms { ) })?; - let room_id = Box::::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; let start = Instant::now(); let count = server_server::get_auth_chain( - &room_id, - vec![Arc::from(event_id)], + room_id, + vec![event_id], db, )? .count(); @@ -1569,7 +1566,7 @@ impl Rooms { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { Ok(value) => { - let event_id = Box::::try_from(&*format!( + let event_id = EventId::parse(format!( "${}", // Anything higher than version3 behaves the same ruma::signatures::reference_hash( @@ -1624,7 +1621,7 @@ impl Rooms { } "get_pdu" => { if args.len() == 1 { - if let Ok(event_id) = Box::::try_from(args[0]) { + if let Ok(event_id) = EventId::parse(args[0]) { let mut outlier = false; let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; @@ -2083,7 +2080,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - pdu.event_id = Box::::try_from(&*format!( + pdu.event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") @@ -2758,7 +2755,7 @@ impl Rooms { .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| Box::::try_from(sender).ok()) + .filter_map(|sender| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -2819,7 +2816,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - let event_id = Box::::try_from(&*format!( + let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) .expect("ruma can calculate reference hashes") @@ -2908,7 +2905,7 @@ impl Rooms { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in alias_roomid is invalid unicode.") })?) .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) @@ -2951,7 +2948,7 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { - Box::::try_from( + RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in publicroomids is invalid unicode.") })?, @@ -3039,7 +3036,7 @@ impl Rooms { Ok(utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { - Box::::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { + RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { Error::bad_database("Invalid RoomId bytes in userroomid_joined") })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) @@ -3056,7 +3053,7 @@ impl Rooms { prefix.push(0xff); self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - Box::::try_from( + ServerName::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3089,7 +3086,7 @@ impl Rooms { prefix.push(0xff); self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - Box::::try_from( + RoomId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3111,7 +3108,7 @@ impl Rooms { prefix.push(0xff); self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - Box::::try_from( + UserId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3159,7 +3156,7 @@ impl Rooms { self.roomuseroncejoinedids .scan_prefix(prefix) .map(|(key, _)| { - Box::::try_from( + UserId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3185,7 +3182,7 @@ impl Rooms { self.roomuserid_invitecount .scan_prefix(prefix) .map(|(key, _)| { - Box::::try_from( + UserId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3238,7 +3235,7 @@ impl Rooms { self.userroomid_joined .scan_prefix(user_id.as_bytes().to_vec()) .map(|(key, _)| { - Box::::try_from( + RoomId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3264,7 +3261,7 @@ impl Rooms { self.userroomid_invitestate .scan_prefix(prefix) .map(|(key, state)| { - let room_id = Box::::try_from( + let room_id = RoomId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3337,7 +3334,7 @@ impl Rooms { self.userroomid_leftstate .scan_prefix(prefix) .map(|(key, state)| { - let room_id = Box::::try_from( + let room_id = RoomId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 365211b..eb2d342 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -11,7 +11,7 @@ use ruma::{ }; use std::{ collections::{HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryInto, mem, sync::Arc, }; @@ -97,7 +97,7 @@ impl RoomEdus { let count = utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = Box::::try_from( + let user_id = UserId::parse( utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) .map_err(|_| { Error::bad_database("Invalid readreceiptid userid bytes in db.") @@ -310,17 +310,13 @@ impl RoomEdus { let mut user_ids = HashSet::new(); - for user_id in self - .typingid_userid - .scan_prefix(prefix) - .map(|(_, user_id)| { - Box::::try_from(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid.")) - }) - { - user_ids.insert(user_id?); + for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { + let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { + Error::bad_database("User ID in typingid_userid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; + + user_ids.insert(user_id); } Ok(SyncEphemeralRoomEvent { @@ -518,7 +514,7 @@ impl RoomEdus { .iter_from(&*first_possible_edu, false) .take_while(|(key, _)| key.starts_with(&prefix)) { - let user_id = Box::::try_from( + let user_id = UserId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() diff --git a/src/database/sending.rs b/src/database/sending.rs index c27b573..1e180d4 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,6 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryInto, fmt::Debug, sync::Arc, time::{Duration, Instant}, @@ -583,19 +583,18 @@ impl Sending { } } - let userid = - Box::::try_from(utils::string_from_bytes(user).map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user string in db."), - ) - })?) - .map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user id in db."), - ) - })?; + let userid = UserId::parse(utils::string_from_bytes(user).map_err(|_| { + ( + kind.clone(), + Error::bad_database("Invalid push user string in db."), + ) + })?) + .map_err(|_| { + ( + kind.clone(), + Error::bad_database("Invalid push user id in db."), + ) + })?; let mut senderkey = user.clone(); senderkey.push(0xff); @@ -732,7 +731,7 @@ impl Sending { })?; ( - OutgoingKind::Appservice(Box::::try_from(server).map_err(|_| { + OutgoingKind::Appservice(ServerName::parse(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), if value.is_empty() { @@ -771,7 +770,7 @@ impl Sending { })?; ( - OutgoingKind::Normal(Box::::try_from(server).map_err(|_| { + OutgoingKind::Normal(ServerName::parse(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), if value.is_empty() { diff --git a/src/database/users.rs b/src/database/users.rs index 4a08472..d4bf489 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,7 +8,7 @@ use ruma::{ DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc}; +use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc}; use tracing::warn; use super::abstraction::Tree; @@ -62,9 +62,8 @@ impl Users { rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result { - let admin_room_alias_id = - Box::::try_from(format!("#admins:{}", globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); rooms.is_joined(user_id, &admin_room_id) @@ -98,11 +97,9 @@ impl Users { })?; Ok(Some(( - Box::::try_from(utils::string_from_bytes(user_bytes).map_err( - |_| { - Error::bad_database("User ID in token_userdeviceid is invalid unicode.") - }, - )?) + UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { + Error::bad_database("User ID in token_userdeviceid is invalid unicode.") + })?) .map_err(|_| { Error::bad_database("User ID in token_userdeviceid is invalid.") })?, @@ -117,7 +114,7 @@ impl Users { #[tracing::instrument(skip(self))] pub fn iter(&self) -> impl Iterator>> + '_ { self.userid_password.iter().map(|(bytes, _)| { - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) @@ -189,7 +186,7 @@ impl Users { .map(|bytes| { let s = utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - Box::::try_from(s) + s.try_into() .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) }) .transpose() @@ -686,7 +683,7 @@ impl Users { } }) .map(|(_, bytes)| { - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) diff --git a/src/pdu.rs b/src/pdu.rs index 3c95597..c1f3d27 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -13,7 +13,7 @@ use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, ops::Deref}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, ops::Deref}; use tracing::warn; /// Content hashes of a PDU. @@ -337,12 +337,13 @@ pub(crate) fn gen_event_id_canonical_json( Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = Box::::try_from(&*format!( + let event_id = format!( "${}", // Anything higher than version3 behaves the same ruma::signatures::reference_hash(&value, &RoomVersionId::V6) .expect("ruma can calculate reference hashes") - )) + ) + .try_into() .expect("ruma's reference hashes are valid event ids"); Ok((event_id, value)) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 2cff2f5..4b8d5de 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -20,7 +20,6 @@ use { }, ruma::api::{AuthScheme, IncomingRequest}, std::collections::BTreeMap, - std::convert::TryFrom, std::io::Cursor, tracing::{debug, warn}, }; @@ -103,8 +102,7 @@ where .unwrap() }, |string| { - Box::::try_from(string.expect("parsing to string always works")) - .unwrap() + UserId::parse(string.expect("parsing to string always works")).unwrap() }, ); @@ -171,7 +169,7 @@ where } }; - let origin = match Box::::try_from(origin_str) { + let origin = match ServerName::parse(origin_str) { Ok(s) => s, _ => { warn!( diff --git a/src/server_server.rs b/src/server_server.rs index 8a50d23..b0e3f0f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -544,12 +544,11 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { return Json("Federation is disabled.".to_owned()); } - let mut verify_keys = BTreeMap::new(); + let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); verify_keys.insert( - Box::::try_from( - format!("ed25519:{}", db.globals.keypair().version()).as_str(), - ) - .expect("found invalid server signing keys in DB"), + format!("ed25519:{}", db.globals.keypair().version()) + .try_into() + .expect("found invalid server signing keys in DB"), VerifyKey { key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), }, @@ -730,7 +729,7 @@ pub async fn send_transaction_message_route( // 0. Check the server is in the room let room_id = match value .get("room_id") - .and_then(|id| Box::::try_from(id.as_str()?).ok()) + .and_then(|id| RoomId::parse(id.as_str()?).ok()) { Some(id) => id, None => { @@ -2354,10 +2353,10 @@ pub fn get_event_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = Box::::try_from(room_id_str) + let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, &room_id)? { + if !db.rooms.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); } @@ -2408,7 +2407,7 @@ pub fn get_missing_events_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let event_room_id = Box::::try_from(room_id_str) + let event_room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; if event_room_id != body.room_id { @@ -2476,14 +2475,14 @@ pub fn get_event_authorization_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = Box::::try_from(room_id_str) + let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, &room_id)? { + if !db.rooms.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); } - let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::from(&*body.event_id)], &db)?; + let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -2948,7 +2947,7 @@ pub async fn create_invite_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; // Generate event id - let event_id = Box::::try_from(&*format!( + let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&signed_event, &body.room_version) .expect("ruma can calculate reference hashes") @@ -3224,7 +3223,7 @@ pub(crate) async fn fetch_required_signing_keys( let fetch_res = fetch_signing_keys( db, - &Box::::try_from(&**signature_server).map_err(|_| { + signature_server.as_str().try_into().map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?, signature_ids, @@ -3262,19 +3261,20 @@ fn get_server_keys_from_cache( Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = Box::::try_from(&*format!( + let event_id = format!( "${}", ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); if let Some((time, tries)) = db .globals .bad_event_ratelimiter .read() .unwrap() - .get(&event_id) + .get(event_id) { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); @@ -3308,7 +3308,7 @@ fn get_server_keys_from_cache( let contains_all_ids = |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - let origin = &Box::::try_from(&**signature_server).map_err(|_| { + let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?; @@ -3327,7 +3327,7 @@ fn get_server_keys_from_cache( if !contains_all_ids(&result) { trace!("Signing key not loaded for {}", origin); - servers.insert(origin.clone(), BTreeMap::new()); + servers.insert(origin.to_owned(), BTreeMap::new()); } pub_key_map.insert(origin.to_string(), result); From 58ea081762adb5f14ecaadc3e16f7b6dddcaed43 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 16:04:19 +0100 Subject: [PATCH 0844/1727] Use int! macro instead of Int::from --- src/client_server/report.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 2e6527d..ae06984 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,8 +1,11 @@ -use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{ + database::{admin::AdminCommand, DatabaseGuard}, + ConduitResult, Error, Ruma, +}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, events::room::message, - Int, + int, }; #[cfg(feature = "conduit_bin")] @@ -33,7 +36,7 @@ pub async fn report_event_route( } }; - if body.score > Int::from(0) || body.score < Int::from(-100) { + if body.score > int!(0) || body.score < int!(-100) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", From f71245504726a717c8a85ecd99f83a7395bd3c2b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 16:35:59 +0100 Subject: [PATCH 0845/1727] Reduce EventId copying --- src/client_server/membership.rs | 3 ++- src/database/rooms.rs | 14 ++++++++++---- src/server_server.rs | 22 ++++++++-------------- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 6c7b721..e6c9d4b 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -31,6 +31,7 @@ use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, + iter, sync::{Arc, RwLock}, time::{Duration, Instant}, }; @@ -740,7 +741,7 @@ async fn join_room_by_id_helper( db.rooms.append_pdu( &pdu, utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), - &[pdu.event_id.clone()], + iter::once(&*pdu.event_id), db, )?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f8d2cad..4c092bf 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -36,6 +36,8 @@ use std::{ borrow::Cow, collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, + fmt::Debug, + iter, mem::size_of, sync::{Arc, Mutex, RwLock}, time::Instant, @@ -1191,7 +1193,11 @@ impl Rooms { /// The provided `event_ids` become the new leaves, this allows a room to have multiple /// `prev_events`. #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> { + pub fn replace_pdu_leaves<'a>( + &self, + room_id: &RoomId, + event_ids: impl IntoIterator + Debug, + ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1255,11 +1261,11 @@ impl Rooms { /// /// Returns pdu id #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu( + pub fn append_pdu<'a>( &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: &[Box], + leaves: impl IntoIterator + Debug, db: &Database, ) -> Result> { let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); @@ -2104,7 +2110,7 @@ impl Rooms { pdu_json, // Since this PDU references all pdu_leaves we can update the leaves // of the room - &[pdu.event_id.clone()], + iter::once(&*pdu.event_id), db, )?; diff --git a/src/server_server.rs b/src/server_server.rs index b0e3f0f..ca6bb3f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -64,6 +64,7 @@ use std::{ future::Future, mem, net::{IpAddr, SocketAddr}, + ops::Deref, pin::Pin, sync::{Arc, RwLock, RwLockWriteGuard}, time::{Duration, Instant, SystemTime}, @@ -1636,7 +1637,7 @@ async fn upgrade_outlier_to_timeline_pdu( db, &incoming_pdu, val, - extremities, + extremities.iter().map(Deref::deref), state_ids_compressed, soft_fail, &state_lock, @@ -1821,7 +1822,7 @@ async fn upgrade_outlier_to_timeline_pdu( db, &incoming_pdu, val, - extremities, + extremities.iter().map(Deref::deref), state_ids_compressed, soft_fail, &state_lock, @@ -2114,11 +2115,11 @@ pub(crate) async fn fetch_signing_keys( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))] -fn append_incoming_pdu( +fn append_incoming_pdu<'a>( db: &Database, pdu: &PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: HashSet>, + new_room_leaves: impl IntoIterator + Clone + Debug, state_ids_compressed: HashSet, soft_fail: bool, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex @@ -2135,19 +2136,12 @@ fn append_incoming_pdu( if soft_fail { db.rooms .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves( - &pdu.room_id, - &new_room_leaves.into_iter().collect::>(), - )?; + db.rooms + .replace_pdu_leaves(&pdu.room_id, new_room_leaves.clone())?; return Ok(None); } - let pdu_id = db.rooms.append_pdu( - pdu, - pdu_json, - &new_room_leaves.into_iter().collect::>(), - db, - )?; + let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; for appservice in db.appservice.all()? { if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { From 0183d003d0bfd864eab08499fb7385b4c8e9df0a Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 15 Dec 2021 13:58:25 +0100 Subject: [PATCH 0846/1727] Revert rename of Ruma<_> parameters --- src/client_server/membership.rs | 12 ++++++------ src/client_server/push.rs | 10 +++++----- src/client_server/sync.rs | 13 ++++++------- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e6c9d4b..e28f9a3 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -93,15 +93,15 @@ pub async fn join_room_by_id_route( /// - If the server does not know about the room: asks other servers over federation #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/join/<_>", data = "") + post("/_matrix/client/r0/join/<_>", data = "") )] -#[tracing::instrument(skip(db, req))] +#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, - req: Ruma>, + body: Ruma>, ) -> ConduitResult { - let body = req.body; - let sender_user = req.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_deref().expect("user is authenticated"); + let body = body.body; let (servers, room_id) = match Box::::try_from(body.room_id_or_alias) { Ok(room_id) => { @@ -129,7 +129,7 @@ pub async fn join_room_by_id_or_alias_route( let join_room_response = join_room_by_id_helper( &db, - req.sender_user.as_deref(), + Some(sender_user), &room_id, &servers, body.third_party_signed.as_ref(), diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 64f27f1..a8ba1a2 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -105,15 +105,15 @@ pub async fn get_pushrule_route( /// Creates a single specified push rule for this user. #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] -#[tracing::instrument(skip(db, req))] +#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_route( db: DatabaseGuard, - req: Ruma>, + body: Ruma>, ) -> ConduitResult { - let sender_user = req.sender_user.as_ref().expect("user is authenticated"); - let body = req.body; + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; if body.scope != "global" { return Err(Error::BadRequest( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2e372f9..9ba3b7f 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -54,17 +54,16 @@ use rocket::{get, tokio}; /// `since` will be cached #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/sync", data = "") + get("/_matrix/client/r0/sync", data = "") )] -#[tracing::instrument(skip(db, req))] +#[tracing::instrument(skip(db, body))] pub async fn sync_events_route( db: DatabaseGuard, - req: Ruma>, + body: Ruma>, ) -> Result, RumaResponse> { - let body = req.body; - - let sender_user = req.sender_user.expect("user is authenticated"); - let sender_device = req.sender_device.expect("user is authenticated"); + let sender_user = body.sender_user.expect("user is authenticated"); + let sender_device = body.sender_device.expect("user is authenticated"); + let body = body.body; let arc_db = Arc::new(db); From 34d3f74f363719ab60263da62477cf0cd56bbbb0 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 17:44:52 +0100 Subject: [PATCH 0847/1727] Use Arc for EventIds in PDUs Upgrades Ruma again to make this work. --- Cargo.lock | 36 ++++++++++++++-------------- Cargo.toml | 4 ++-- src/client_server/account.rs | 4 ++++ src/client_server/membership.rs | 7 +++++- src/client_server/message.rs | 2 +- src/client_server/redact.rs | 4 +++- src/client_server/room.rs | 14 +++++------ src/client_server/state.rs | 4 +++- src/database/rooms.rs | 12 +++++----- src/pdu.rs | 28 ++++++++++++---------- src/server_server.rs | 42 +++++++++++++-------------------- 11 files changed, 81 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8b25b47..fbf4b3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1984,7 +1984,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "assign", "js_int", @@ -2005,7 +2005,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "bytes", "http", @@ -2021,7 +2021,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2032,7 +2032,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "ruma-api", "ruma-common", @@ -2046,7 +2046,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "assign", "bytes", @@ -2066,7 +2066,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "indexmap", "js_int", @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "indoc", "js_int", @@ -2097,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2108,7 +2108,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "js_int", "ruma-api", @@ -2123,7 +2123,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2137,7 +2137,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2147,7 +2147,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "thiserror", ] @@ -2155,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "js_int", "ruma-api", @@ -2168,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "js_int", "ruma-api", @@ -2183,7 +2183,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "bytes", "form_urlencoded", @@ -2197,7 +2197,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2208,7 +2208,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2225,7 +2225,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index b24afb5..02159e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "bba7d624425da2c65a834bbd0e633b7577488cdf", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "16f031fabb7871fcd738b0f25391193ee4ca28a9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -40,7 +40,7 @@ serde_json = { version = "1.0.67", features = ["raw_value"] } # Used for appservice registration files serde_yaml = "0.8.20" # Used for pdu definition -serde = "1.0.130" +serde = { version = "1.0.130", features = ["rc"] } # Used for secure identifiers rand = "0.8.4" # Used to hash passwords diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 3149187..c4e118c 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -306,6 +306,7 @@ pub async fn register_route( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -463,6 +464,7 @@ pub async fn register_route( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -485,6 +487,7 @@ pub async fn register_route( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -701,6 +704,7 @@ pub async fn deactivate_route( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }; let mutex_state = Arc::clone( diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e28f9a3..cede51f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -286,6 +286,7 @@ pub async fn ban_user_route( third_party_invite: None, blurhash: db.users.blurhash(&body.user_id)?, reason: None, + join_authorized_via_users_server: None, }), |event| { serde_json::from_str(event.content.get()) @@ -604,6 +605,7 @@ async fn join_room_by_id_helper( third_party_invite: None, blurhash: db.users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), ); @@ -757,6 +759,7 @@ async fn join_room_by_id_helper( third_party_invite: None, blurhash: db.users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }; db.rooms.build_and_append_pdu( @@ -906,6 +909,7 @@ pub(crate) async fn invite_helper<'a>( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }) .expect("member event is valid value"); @@ -939,7 +943,7 @@ pub(crate) async fn invite_helper<'a>( } let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), sender: sender_user.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() @@ -1117,6 +1121,7 @@ pub(crate) async fn invite_helper<'a>( third_party_invite: None, blurhash: db.users.blurhash(user_id)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index e521943..60c756a 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -98,7 +98,7 @@ pub async fn send_message_event_route( db.flush()?; - Ok(send_message_event::Response::new(event_id).into()) + Ok(send_message_event::Response::new((*event_id).to_owned()).into()) } /// # `GET /_matrix/client/r0/rooms/{roomId}/messages` diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 7435c5c..85de233 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -25,6 +25,7 @@ pub async fn redact_event_route( body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; let mutex_state = Arc::clone( db.globals @@ -45,7 +46,7 @@ pub async fn redact_event_route( .expect("event is valid, we just created it"), unsigned: None, state_key: None, - redacts: Some(body.event_id.clone()), + redacts: Some(body.event_id.into()), }, sender_user, &body.room_id, @@ -57,5 +58,6 @@ pub async fn redact_event_route( db.flush()?; + let event_id = (*event_id).to_owned(); Ok(redact_event::Response { event_id }.into()) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 83571f1..52d2542 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,6 +22,7 @@ use ruma::{ }, EventType, }, + int, serde::{CanonicalJsonObject, JsonObject}, RoomAliasId, RoomId, RoomVersionId, }; @@ -195,6 +196,7 @@ pub async fn create_room_route( third_party_invite: None, blurhash: db.users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -220,11 +222,11 @@ pub async fn create_room_route( }); let mut users = BTreeMap::new(); - users.insert(sender_user.clone(), 100.into()); + users.insert(sender_user.clone(), int!(100)); if preset == create_room::RoomPreset::TrustedPrivateChat { for invite_ in &body.invite { - users.insert(invite_.clone(), 100.into()); + users.insert(invite_.clone(), int!(100)); } } @@ -569,7 +571,7 @@ pub async fn upgrade_room_route( // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( body.room_id.clone(), - tombstone_event_id, + (*tombstone_event_id).to_owned(), )); // Send a m.room.create event containing a predecessor field and the applicable room_version @@ -633,6 +635,7 @@ pub async fn upgrade_room_route( third_party_invite: None, blurhash: db.users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -697,10 +700,7 @@ pub async fn upgrade_room_route( .map_err(|_| Error::bad_database("Invalid room event in database."))?; // Setting events_default and invite to the greater of 50 and users_default + 1 - let new_level = max( - 50.into(), - power_levels_event_content.users_default + 1.into(), - ); + let new_level = max(int!(50), power_levels_event_content.users_default + int!(1)); power_levels_event_content.events_default = new_level; power_levels_event_content.invite = new_level; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 0ba2062..e42694a 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -52,6 +52,7 @@ pub async fn send_state_event_for_key_route( db.flush()?; + let event_id = (*event_id).to_owned(); Ok(send_state_event::Response { event_id }.into()) } @@ -93,6 +94,7 @@ pub async fn send_state_event_for_empty_key_route( db.flush()?; + let event_id = (*event_id).to_owned(); Ok(send_state_event::Response { event_id }.into()) } @@ -267,7 +269,7 @@ async fn send_state_event_for_key_helper( event_type: EventType, json: &Raw, state_key: String, -) -> Result> { +) -> Result> { let sender_user = sender; // TODO: Review this check, error if event is unparsable, use event type, allow alias if it diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4c092bf..fb9ecbf 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1162,14 +1162,14 @@ impl Rooms { /// Returns the leaf pdus of a room. #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { + pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomid_pduleaves .scan_prefix(prefix) .map(|(_, bytes)| { - EventId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") })?) .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) @@ -1178,7 +1178,7 @@ impl Rooms { } #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> { + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(prev.as_bytes()); @@ -1953,7 +1953,7 @@ impl Rooms { room_id: &RoomId, db: &Database, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { + ) -> Result> { let PduBuilder { event_type, content, @@ -2019,7 +2019,7 @@ impl Rooms { } let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), sender: sender.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() @@ -2086,7 +2086,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - pdu.event_id = EventId::parse(format!( + pdu.event_id = EventId::parse_arc(format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") diff --git a/src/pdu.rs b/src/pdu.rs index c1f3d27..db9375e 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -13,7 +13,7 @@ use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, ops::Deref}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, sync::Arc}; use tracing::warn; /// Content hashes of a PDU. @@ -25,7 +25,7 @@ pub struct EventHash { #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { - pub event_id: Box, + pub event_id: Arc, pub room_id: Box, pub sender: Box, pub origin_server_ts: UInt, @@ -34,11 +34,11 @@ pub struct PduEvent { pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, - pub prev_events: Vec>, + pub prev_events: Vec>, pub depth: UInt, - pub auth_events: Vec>, + pub auth_events: Vec>, #[serde(skip_serializing_if = "Option::is_none")] - pub redacts: Option>, + pub redacts: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unsigned: Option>, pub hashes: EventHash, @@ -266,7 +266,9 @@ impl PduEvent { } impl state_res::Event for PduEvent { - fn event_id(&self) -> &EventId { + type Id = Arc; + + fn event_id(&self) -> &Self::Id { &self.event_id } @@ -294,16 +296,16 @@ impl state_res::Event for PduEvent { self.state_key.as_deref() } - fn prev_events(&self) -> Box + '_> { - Box::new(self.prev_events.iter().map(Deref::deref)) + fn prev_events(&self) -> Box + '_> { + Box::new(self.prev_events.iter()) } - fn auth_events(&self) -> Box + '_> { - Box::new(self.auth_events.iter().map(Deref::deref)) + fn auth_events(&self) -> Box + '_> { + Box::new(self.auth_events.iter()) } - fn redacts(&self) -> Option<&EventId> { - self.redacts.as_deref() + fn redacts(&self) -> Option<&Self::Id> { + self.redacts.as_ref() } } @@ -357,7 +359,7 @@ pub struct PduBuilder { pub content: Box, pub unsigned: Option>, pub state_key: Option, - pub redacts: Option>, + pub redacts: Option>, } /// Direct conversion prevents loss of the empty `state_key` that ruma requires. diff --git a/src/server_server.rs b/src/server_server.rs index ca6bb3f..594152a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -995,13 +995,9 @@ pub(crate) async fn handle_incoming_pdu<'a>( } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let mut graph = HashMap::new(); + let mut graph: HashMap, _> = HashMap::new(); let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = incoming_pdu - .prev_events - .iter() - .map(|x| Arc::from(&**x)) - .collect(); + let mut todo_outlier_stack: Vec> = incoming_pdu.prev_events.clone(); let mut amount = 0; @@ -1020,7 +1016,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( if amount > 100 { // Max limit reached warn!("Max prev event limit reached!"); - graph.insert((*prev_event_id).to_owned(), HashSet::new()); + graph.insert(prev_event_id.clone(), HashSet::new()); continue; } @@ -1031,27 +1027,27 @@ pub(crate) async fn handle_incoming_pdu<'a>( amount += 1; for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(Arc::from(&**prev_prev))); + todo_outlier_stack.push(dbg!(prev_prev.clone())); } } graph.insert( - (*prev_event_id).to_owned(), + prev_event_id.clone(), pdu.prev_events.iter().cloned().collect(), ); } else { // Time based check failed - graph.insert((*prev_event_id).to_owned(), HashSet::new()); + graph.insert(prev_event_id.clone(), HashSet::new()); } eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { // Get json failed - graph.insert((*prev_event_id).to_owned(), HashSet::new()); + graph.insert(prev_event_id.clone(), HashSet::new()); } } else { // Fetch and handle failed - graph.insert((*prev_event_id).to_owned(), HashSet::new()); + graph.insert(prev_event_id.clone(), HashSet::new()); } } @@ -1401,14 +1397,13 @@ async fn upgrade_outlier_to_timeline_pdu( .get_statekey_from_short(k) .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - state.insert(k, (*id).to_owned()); + state.insert(k, id.clone()); starting_events.push(id); } auth_chain_sets.push( get_auth_chain(room_id, starting_events, db) .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).to_owned()) .collect(), ); @@ -1435,7 +1430,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, Arc::from(event_id))) + Ok((shortstatekey, event_id)) }) .collect::>()?, ), @@ -1752,7 +1747,6 @@ async fn upgrade_outlier_to_timeline_pdu( db, ) .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).to_owned()) .collect(), ); } @@ -1761,11 +1755,7 @@ async fn upgrade_outlier_to_timeline_pdu( .into_iter() .map(|map| { map.into_iter() - .map(|(k, id)| { - db.rooms - .get_statekey_from_short(k) - .map(|k| (k, (*id).to_owned())) - }) + .map(|(k, id)| db.rooms.get_statekey_from_short(k).map(|k| (k, id))) .collect::>>() }) .collect::>() @@ -2136,8 +2126,7 @@ fn append_incoming_pdu<'a>( if soft_fail { db.rooms .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms - .replace_pdu_leaves(&pdu.room_id, new_room_leaves.clone())?; + db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; return Ok(None); } @@ -2282,7 +2271,7 @@ fn get_auth_chain_inner( event_id: &EventId, db: &Database, ) -> Result> { - let mut todo = vec![event_id.to_owned()]; + let mut todo = vec![Arc::from(event_id)]; let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { @@ -2298,7 +2287,7 @@ fn get_auth_chain_inner( if !found.contains(&sauthevent) { found.insert(sauthevent); - todo.push(auth_event.to_owned()); + todo.push(auth_event.clone()); } } } @@ -2676,6 +2665,7 @@ pub fn create_join_event_template_route( membership: MembershipState::Join, third_party_invite: None, reason: None, + join_authorized_via_users_server: None, }) .expect("member event is valid value"); @@ -2709,7 +2699,7 @@ pub fn create_join_event_template_route( } let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: body.room_id.clone(), sender: body.user_id.clone(), origin_server_ts: utils::millis_since_unix_epoch() From 3d25d46dc5b14c506692ea8a82151b6e4f39fafd Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Wed, 20 Oct 2021 06:20:34 +0200 Subject: [PATCH 0848/1727] Use simple BTreeMap to store uiaa requests some uiaa requests contain plaintext passwords which should never be persisted to disk. Currently there is no cleanup implemented (you have to restart conduit) --- src/database.rs | 3 +-- src/database/uiaa.rs | 16 +++++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/database.rs b/src/database.rs index 84ca68d..83b0fd5 100644 --- a/src/database.rs +++ b/src/database.rs @@ -250,8 +250,7 @@ impl Database { }, uiaa: uiaa::Uiaa { userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, - userdevicesessionid_uiaarequest: builder - .open_tree("userdevicesessionid_uiaarequest")?, + userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), }, rooms: rooms::Rooms { edus: rooms::RoomEdus { diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 1c0fb56..2ecca93 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,4 +1,6 @@ use std::sync::Arc; +use std::sync::RwLock; +use std::collections::BTreeMap; use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ @@ -18,7 +20,7 @@ use super::abstraction::Tree; pub struct Uiaa { pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: Arc, // UiaaRequest = canonical json value + pub(super) userdevicesessionid_uiaarequest: RwLock, Vec>>, // UiaaRequest = canonical json value } impl Uiaa { @@ -153,10 +155,10 @@ impl Uiaa { userdevicesessionid.push(0xff); userdevicesessionid.extend_from_slice(session.as_bytes()); - self.userdevicesessionid_uiaarequest.insert( - &userdevicesessionid, - &serde_json::to_vec(request).expect("json value to vec always works"), - )?; + self.userdevicesessionid_uiaarequest.write().unwrap().insert( + userdevicesessionid, + serde_json::to_vec(request).expect("json value to vec always works"), + ); Ok(()) } @@ -173,8 +175,8 @@ impl Uiaa { userdevicesessionid.push(0xff); userdevicesessionid.extend_from_slice(session.as_bytes()); - self.userdevicesessionid_uiaarequest - .get(&userdevicesessionid)? + self.userdevicesessionid_uiaarequest.read().unwrap() + .get(&userdevicesessionid) .map(|bytes| { serde_json::from_str::( &utils::string_from_bytes(&bytes) From fe8cfe05569e667b03ee855a2463964a5a029661 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Tue, 14 Dec 2021 17:55:28 +0100 Subject: [PATCH 0849/1727] Add database migration to remove stored passwords uiaarequests can contain plaintext passwords, which were stored on disk --- src/database.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/database.rs b/src/database.rs index 83b0fd5..8b29b22 100644 --- a/src/database.rs +++ b/src/database.rs @@ -754,6 +754,15 @@ impl Database { println!("Migration: 9 -> 10 finished"); } + + if db.globals.database_version()? < 11 { + db._db + .open_tree("userdevicesessionid_uiaarequest")? + .clear()?; + db.globals.bump_database_version(11)?; + + println!("Migration: 10 -> 11 finished"); + } } let guard = db.read().await; From 0725b69abb7453df534a764947b6015ffe8293c4 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Sat, 18 Dec 2021 18:46:38 +0100 Subject: [PATCH 0850/1727] Clean up userdevicesessionid_uiaarequest BTreeMap There is no need to encode or decode anything as we are not saving to disk --- src/database/uiaa.rs | 52 ++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 28 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 2ecca93..461a3e2 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,6 +1,6 @@ +use std::collections::BTreeMap; use std::sync::Arc; use std::sync::RwLock; -use std::collections::BTreeMap; use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ @@ -20,7 +20,8 @@ use super::abstraction::Tree; pub struct Uiaa { pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: RwLock, Vec>>, // UiaaRequest = canonical json value + pub(super) userdevicesessionid_uiaarequest: + RwLock>, } impl Uiaa { @@ -149,16 +150,17 @@ impl Uiaa { session: &str, request: &CanonicalJsonValue, ) -> Result<()> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - self.userdevicesessionid_uiaarequest.write().unwrap().insert( - userdevicesessionid, - serde_json::to_vec(request).expect("json value to vec always works"), - ); + self.userdevicesessionid_uiaarequest + .write() + .unwrap() + .insert( + ( + user_id.to_owned(), + device_id.to_string(), + session.to_string(), + ), + request.to_owned(), + ); Ok(()) } @@ -169,22 +171,16 @@ impl Uiaa { device_id: &DeviceId, session: &str, ) -> Result> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - self.userdevicesessionid_uiaarequest.read().unwrap() - .get(&userdevicesessionid) - .map(|bytes| { - serde_json::from_str::( - &utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid uiaa request bytes in db."))?, - ) - .map_err(|_| Error::bad_database("Invalid uiaa request in db.")) - }) - .transpose() + Ok(self + .userdevicesessionid_uiaarequest + .read() + .unwrap() + .get(&( + user_id.to_owned(), + device_id.to_string(), + session.to_string(), + )) + .map(|j| j.to_owned())) } fn update_uiaa_session( From 720a54b3bb74301eaf08f54edd163995bf5ef7fa Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Sat, 18 Dec 2021 19:05:18 +0100 Subject: [PATCH 0851/1727] Use String to store UserId for uiaa request Fixes compilation error after ruma upgrade --- src/database/uiaa.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 461a3e2..6a5f7a3 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -21,7 +21,7 @@ use super::abstraction::Tree; pub struct Uiaa { pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication pub(super) userdevicesessionid_uiaarequest: - RwLock>, + RwLock>, } impl Uiaa { @@ -155,7 +155,7 @@ impl Uiaa { .unwrap() .insert( ( - user_id.to_owned(), + user_id.to_string(), device_id.to_string(), session.to_string(), ), @@ -176,7 +176,7 @@ impl Uiaa { .read() .unwrap() .get(&( - user_id.to_owned(), + user_id.to_string(), device_id.to_string(), session.to_string(), )) From 7857da8a0b6322618b12e4b41c6945bcd7dee9ef Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 20 Dec 2021 15:46:36 +0100 Subject: [PATCH 0852/1727] Add ability to remove an appservice --- APPSERVICES.md | 8 ++++++++ src/database/admin.rs | 4 ++++ src/database/appservice.rs | 9 +++++++++ src/database/rooms.rs | 9 +++++++++ 4 files changed, 30 insertions(+) diff --git a/APPSERVICES.md b/APPSERVICES.md index 26c34cc..894bc6f 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -42,6 +42,14 @@ could help. ## Appservice-specific instructions +### Remove an appservice + +To remove an appservice go to your admin room and execute + +```@conduit:your.server.name: unregister_appservice ``` + +where `` one of the output of `list_appservices`. + ### Tested appservices These appservices have been tested and work with Conduit without any extra steps: diff --git a/src/database/admin.rs b/src/database/admin.rs index 1e5c47c..0702bcd 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -12,6 +12,7 @@ use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), + UnregisterAppservice(String), ListAppservices, SendMessage(RoomMessageEventContent), } @@ -96,6 +97,9 @@ impl Admin { AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error } + AdminCommand::UnregisterAppservice(service_name) => { + guard.appservice.unregister_appservice(&service_name).unwrap(); // TODO: see above + } AdminCommand::ListAppservices => { if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { let count = appservices.len(); diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 7cc9137..caa48ad 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -27,6 +27,15 @@ impl Appservice { Ok(()) } + /** + * Remove an appservice registration + * service_name is the name you send to register the service + */ + pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { + self.id_appserviceregistrations.remove(service_name.as_bytes())?; + Ok(()) + } + pub fn get_registration(&self, id: &str) -> Result> { self.cached_registrations .read() diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fb9ecbf..612bd51 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1528,6 +1528,15 @@ impl Rooms { )); } } + "unregister_appservice" => { + if args.len() == 1 { + db.admin.send(AdminCommand::UnregisterAppservice(args[0].to_owned())); + } else { + db.admin.send(AdminCommand::SendMessage( + RoomMessageEventContent::text_plain("Missing appservice identifier"), + )); + } + } "list_appservices" => { db.admin.send(AdminCommand::ListAppservices); } From b6c9582cf4e9255e0610a63849bb3c5113be16e2 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Wed, 22 Dec 2021 13:09:56 +0100 Subject: [PATCH 0853/1727] Fix doc style comment according to Rust; VSCode added line breaks --- src/database/appservice.rs | 12 +++++++----- src/database/rooms.rs | 8 ++++++-- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/database/appservice.rs b/src/database/appservice.rs index caa48ad..910964a 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -27,12 +27,14 @@ impl Appservice { Ok(()) } - /** - * Remove an appservice registration - * service_name is the name you send to register the service - */ + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { - self.id_appserviceregistrations.remove(service_name.as_bytes())?; + self.id_appserviceregistrations + .remove(service_name.as_bytes())?; Ok(()) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 612bd51..775e2f8 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1530,10 +1530,14 @@ impl Rooms { } "unregister_appservice" => { if args.len() == 1 { - db.admin.send(AdminCommand::UnregisterAppservice(args[0].to_owned())); + db.admin.send(AdminCommand::UnregisterAppservice( + args[0].to_owned(), + )); } else { db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain("Missing appservice identifier"), + RoomMessageEventContent::text_plain( + "Missing appservice identifier", + ), )); } } From 7f2445be6ca7798ec25458e5447b23e7aeea1f7f Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Wed, 22 Dec 2021 16:48:27 +0100 Subject: [PATCH 0854/1727] On unregister_appservice(service_name), remove the appservice service_name from cache too --- src/database/appservice.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 910964a..847d747 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -35,6 +35,10 @@ impl Appservice { pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { self.id_appserviceregistrations .remove(service_name.as_bytes())?; + self.cached_registrations. + write(). + unwrap(). + remove(service_name); Ok(()) } From c4a438460e0537e465f5b93514fd05b66a03ad37 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Wed, 22 Dec 2021 19:26:23 +0100 Subject: [PATCH 0855/1727] Use Box to store UserID and DeviceID Userid and DeviceID are of unknown size, use Box to be able to store them into the userdevicesessionid_uiaarequest BTreeMap --- src/database/uiaa.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 6a5f7a3..772dab9 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -21,7 +21,7 @@ use super::abstraction::Tree; pub struct Uiaa { pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication pub(super) userdevicesessionid_uiaarequest: - RwLock>, + RwLock, Box, String), CanonicalJsonValue>>, } impl Uiaa { @@ -155,8 +155,8 @@ impl Uiaa { .unwrap() .insert( ( - user_id.to_string(), - device_id.to_string(), + user_id.to_owned(), + device_id.to_owned(), session.to_string(), ), request.to_owned(), @@ -176,8 +176,8 @@ impl Uiaa { .read() .unwrap() .get(&( - user_id.to_string(), - device_id.to_string(), + user_id.to_owned(), + device_id.to_owned(), session.to_string(), )) .map(|j| j.to_owned())) From aba95b20f3b3c252e72ac87312b10df8068f7419 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 22 Dec 2021 19:41:33 +0100 Subject: [PATCH 0856/1727] Upgrade Ruma --- Cargo.lock | 56 +++++++++------- Cargo.toml | 4 +- src/client_server/keys.rs | 45 +++++++------ src/client_server/sync.rs | 2 + src/database/key_backups.rs | 65 +++++++++++------- src/database/users.rs | 129 ++++++++++++++++++++++-------------- 6 files changed, 182 insertions(+), 119 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fbf4b3f..69a026b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -938,7 +938,7 @@ checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 0.4.8", ] [[package]] @@ -979,7 +979,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 0.4.8", "pin-project-lite", "socket2 0.4.1", "tokio", @@ -1114,6 +1114,12 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + [[package]] name = "jobserver" version = "0.1.24" @@ -1984,7 +1990,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "assign", "js_int", @@ -2005,7 +2011,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "bytes", "http", @@ -2021,7 +2027,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2032,7 +2038,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "ruma-api", "ruma-common", @@ -2046,7 +2052,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "assign", "bytes", @@ -2066,7 +2072,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "indexmap", "js_int", @@ -2081,7 +2087,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "indoc", "js_int", @@ -2097,7 +2103,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2108,7 +2114,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "js_int", "ruma-api", @@ -2123,7 +2129,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2137,7 +2143,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2147,7 +2153,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "thiserror", ] @@ -2155,7 +2161,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "js_int", "ruma-api", @@ -2168,7 +2174,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "js_int", "ruma-api", @@ -2183,11 +2189,11 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "bytes", "form_urlencoded", - "itoa", + "itoa 0.4.8", "js_int", "ruma-serde-macros", "serde", @@ -2197,7 +2203,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2208,7 +2214,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2225,7 +2231,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "itertools 0.10.1", "js_int", @@ -2404,11 +2410,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.67" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f9e390c27c3c0ce8bc5d725f6e4d30a29d26659494aa4b17535f7522c5c950" +checksum = "bcbd0344bc6533bc7ec56df11d42fb70f1b912351c0825ccb7211b59d8af7cf5" dependencies = [ - "itoa", + "itoa 1.0.1", "ryu", "serde", ] @@ -2420,7 +2426,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" dependencies = [ "form_urlencoded", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] diff --git a/Cargo.toml b/Cargo.toml index 02159e3..e64e275 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "16f031fabb7871fcd738b0f25391193ee4ca28a9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "7cf3abbaf02995b03db74429090ca5af1cd71edc", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -36,7 +36,7 @@ http = "0.2.4" # Used to find data directory for default db path directories = "3.0.2" # Used for ruma wrapper -serde_json = { version = "1.0.67", features = ["raw_value"] } +serde_json = { version = "1.0.70", features = ["raw_value"] } # Used for appservice registration files serde_yaml = "0.8.20" # Used for pdu definition diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 08ea6e7..be0675d 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -15,7 +15,7 @@ use ruma::{ }, federation, }, - encryption::UnsignedDeviceInfo, + serde::Raw, DeviceId, DeviceKeyAlgorithm, UserId, }; use serde_json::json; @@ -42,16 +42,9 @@ pub async fn upload_keys_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if let Some(one_time_keys) = &body.one_time_keys { - for (key_key, key_value) in one_time_keys { - db.users.add_one_time_key( - sender_user, - sender_device, - key_key, - key_value, - &db.globals, - )?; - } + for (key_key, key_value) in &body.one_time_keys { + db.users + .add_one_time_key(sender_user, sender_device, key_key, key_value, &db.globals)?; } if let Some(device_keys) = &body.device_keys { @@ -350,10 +343,8 @@ pub(crate) async fn get_keys_helper bool>( Error::bad_database("all_device_keys contained nonexistent device.") })?; - keys.unsigned = UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }; - + add_unsigned_device_display_name(&mut keys, metadata) + .map_err(|_| Error::bad_database("invalid device keys in database"))?; container.insert(device_id, keys); } } @@ -369,10 +360,8 @@ pub(crate) async fn get_keys_helper bool>( ), )?; - keys.unsigned = UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }; - + add_unsigned_device_display_name(&mut keys, metadata) + .map_err(|_| Error::bad_database("invalid device keys in database"))?; container.insert(device_id.to_owned(), keys); } device_keys.insert(user_id.to_owned(), container); @@ -441,6 +430,24 @@ pub(crate) async fn get_keys_helper bool>( }) } +fn add_unsigned_device_display_name( + keys: &mut Raw, + metadata: ruma::api::client::r0::device::Device, +) -> serde_json::Result<()> { + if let Some(display_name) = metadata.display_name { + let mut object = keys.deserialize_as::>()?; + + let unsigned = object.entry("unsigned").or_insert_with(|| json!({})); + if let serde_json::Value::Object(unsigned_object) = unsigned { + unsigned_object.insert("device_display_name".to_owned(), display_name.into()); + } + + *keys = Raw::from_json(serde_json::value::to_raw_value(&object)?); + } + + Ok(()) +} + pub(crate) async fn claim_keys_helper( one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, db: &Database, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 9ba3b7f..64588a2 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -762,6 +762,8 @@ async fn sync_helper( .users .get_to_device_events(&sender_user, &sender_device)?, }, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, }; // TODO: Retry the endpoint instead of returning (waiting for #118) diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 56963c0..b74bc40 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -4,8 +4,10 @@ use ruma::{ error::ErrorKind, r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, }, + serde::Raw, RoomId, UserId, }; +use serde_json::json; use std::{collections::BTreeMap, sync::Arc}; use super::abstraction::Tree; @@ -20,7 +22,7 @@ impl KeyBackups { pub fn create_backup( &self, user_id: &UserId, - backup_metadata: &BackupAlgorithm, + backup_metadata: &Raw, globals: &super::globals::Globals, ) -> Result { let version = globals.next_count()?.to_string(); @@ -59,7 +61,7 @@ impl KeyBackups { &self, user_id: &UserId, version: &str, - backup_metadata: &BackupAlgorithm, + backup_metadata: &Raw, globals: &super::globals::Globals, ) -> Result { let mut key = user_id.as_bytes().to_vec(); @@ -73,12 +75,8 @@ impl KeyBackups { )); } - self.backupid_algorithm.insert( - &key, - serde_json::to_string(backup_metadata) - .expect("BackupAlgorithm::to_string always works") - .as_bytes(), - )?; + self.backupid_algorithm + .insert(&key, backup_metadata.json().get().as_bytes())?; self.backupid_etag .insert(&key, &globals.next_count()?.to_be_bytes())?; Ok(version.to_owned()) @@ -105,7 +103,10 @@ impl KeyBackups { .transpose() } - pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { + pub fn get_latest_backup( + &self, + user_id: &UserId, + ) -> Result)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); let mut last_possible_key = prefix.clone(); @@ -133,7 +134,11 @@ impl KeyBackups { .transpose() } - pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { + pub fn get_backup( + &self, + user_id: &UserId, + version: &str, + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -152,7 +157,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - key_data: &KeyBackupData, + key_data: &Raw, globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); @@ -174,10 +179,8 @@ impl KeyBackups { key.push(0xff); key.extend_from_slice(session_id.as_bytes()); - self.backupkeyid_backup.insert( - &key, - &serde_json::to_vec(&key_data).expect("KeyBackupData::to_vec always works"), - )?; + self.backupkeyid_backup + .insert(&key, key_data.json().get().as_bytes())?; Ok(()) } @@ -209,13 +212,13 @@ impl KeyBackups { &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>> { + ) -> Result, Raw>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::, RoomKeyBackup>::new(); + let mut rooms = BTreeMap::, Raw>::new(); for result in self .backupkeyid_backup @@ -241,7 +244,7 @@ impl KeyBackups { Error::bad_database("backupkeyid_backup room_id is invalid room id.") })?; - let key_data = serde_json::from_slice(&value).map_err(|_| { + let key_data: serde_json::Value = serde_json::from_slice(&value).map_err(|_| { Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") })?; @@ -249,13 +252,25 @@ impl KeyBackups { }) { let (room_id, session_id, key_data) = result?; - rooms - .entry(room_id) - .or_insert_with(|| RoomKeyBackup { + let room_key_backup = rooms.entry(room_id).or_insert_with(|| { + Raw::new(&RoomKeyBackup { sessions: BTreeMap::new(), }) - .sessions - .insert(session_id, key_data); + .expect("RoomKeyBackup serialization") + }); + + let mut object = room_key_backup + .deserialize_as::>() + .map_err(|_| Error::bad_database("RoomKeyBackup is not an object"))?; + + let sessions = object.entry("session").or_insert_with(|| json!({})); + if let serde_json::Value::Object(unsigned_object) = sessions { + unsigned_object.insert(session_id, key_data); + } + + *room_key_backup = Raw::from_json( + serde_json::value::to_raw_value(&object).expect("Value => RawValue serialization"), + ); } Ok(rooms) @@ -266,7 +281,7 @@ impl KeyBackups { user_id: &UserId, version: &str, room_id: &RoomId, - ) -> Result> { + ) -> Result>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -304,7 +319,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - ) -> Result> { + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); diff --git a/src/database/users.rs b/src/database/users.rs index d4bf489..63a63f0 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,7 +8,12 @@ use ruma::{ DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, UserId, }; -use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, + mem, + sync::Arc, +}; use tracing::warn; use super::abstraction::Tree; @@ -359,7 +364,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, - one_time_key_value: &OneTimeKey, + one_time_key_value: &Raw, globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); @@ -409,7 +414,7 @@ impl Users { device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, globals: &super::globals::Globals, - ) -> Result, OneTimeKey)>> { + ) -> Result, Raw)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); @@ -480,7 +485,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - device_keys: &DeviceKeys, + device_keys: &Raw, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { @@ -509,9 +514,9 @@ impl Users { pub fn add_cross_signing_keys( &self, user_id: &UserId, - master_key: &CrossSigningKey, - self_signing_key: &Option, - user_signing_key: &Option, + master_key: &Raw, + self_signing_key: &Option>, + user_signing_key: &Option>, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { @@ -521,7 +526,12 @@ impl Users { prefix.push(0xff); // Master key - let mut master_key_ids = master_key.keys.values(); + let master_key_map = master_key + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? + .keys; + let mut master_key_ids = master_key_map.values(); + let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Master key contained no key.", @@ -537,17 +547,21 @@ impl Users { let mut master_key_key = prefix.clone(); master_key_key.extend_from_slice(master_key_id.as_bytes()); - self.keyid_key.insert( - &master_key_key, - &serde_json::to_vec(&master_key).expect("CrossSigningKey::to_vec always works"), - )?; + self.keyid_key + .insert(&master_key_key, master_key.json().get().as_bytes())?; self.userid_masterkeyid .insert(user_id.as_bytes(), &master_key_key)?; // Self-signing key if let Some(self_signing_key) = self_signing_key { - let mut self_signing_key_ids = self_signing_key.keys.values(); + let self_signing_key_map = self_signing_key + .deserialize() + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") + })? + .keys; + let mut self_signing_key_ids = self_signing_key_map.values(); let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Self signing key contained no key.", @@ -565,8 +579,7 @@ impl Users { self.keyid_key.insert( &self_signing_key_key, - &serde_json::to_vec(&self_signing_key) - .expect("CrossSigningKey::to_vec always works"), + self_signing_key.json().get().as_bytes(), )?; self.userid_selfsigningkeyid @@ -575,7 +588,13 @@ impl Users { // User-signing key if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key.keys.values(); + let user_signing_key_map = user_signing_key + .deserialize() + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") + })? + .keys; + let mut user_signing_key_ids = user_signing_key_map.values(); let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "User signing key contained no key.", @@ -593,8 +612,7 @@ impl Users { self.keyid_key.insert( &user_signing_key_key, - &serde_json::to_vec(&user_signing_key) - .expect("CrossSigningKey::to_vec always works"), + user_signing_key.json().get().as_bytes(), )?; self.userid_usersigningkeyid @@ -727,7 +745,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.as_bytes()); @@ -744,25 +762,19 @@ impl Users { &self, user_id: &UserId, allowed_signatures: F, - ) -> Result> { + ) -> Result>> { self.userid_masterkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?; + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; - // A user is not allowed to see signatures from users other than himself and - // the target user - cross_signing_key.signatures = cross_signing_key - .signatures - .into_iter() - .filter(|(user, _)| allowed_signatures(user)) - .collect(); - - Ok(Some(cross_signing_key)) + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key) + .expect("Value to RawValue serialization"), + ))) }) }) } @@ -772,31 +784,25 @@ impl Users { &self, user_id: &UserId, allowed_signatures: F, - ) -> Result> { + ) -> Result>> { self.userid_selfsigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?; + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; - // A user is not allowed to see signatures from users other than himself and - // the target user - cross_signing_key.signatures = cross_signing_key - .signatures - .into_iter() - .filter(|(user, _)| user == user_id || allowed_signatures(user)) - .collect(); - - Ok(Some(cross_signing_key)) + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key) + .expect("Value to RawValue serialization"), + ))) }) }) } #[tracing::instrument(skip(self, user_id))] - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result> { + pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { self.userid_usersigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { @@ -991,3 +997,30 @@ impl Users { Ok(()) } } + +/// Ensure that a user only sees signatures from themselves and the target user +fn clean_signatures bool>( + cross_signing_key: &mut serde_json::Value, + user_id: &UserId, + allowed_signatures: F, +) -> Result<(), Error> { + if let Some(signatures) = cross_signing_key + .get_mut("signatures") + .and_then(|v| v.as_object_mut()) + { + // Don't allocate for the full size of the current signatures, but require + // at most one resize if nothing is dropped + let new_capacity = signatures.len() / 2; + for (user, signature) in + mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) + { + let id = <&UserId>::try_from(user.as_str()) + .map_err(|_| Error::bad_database("Invalid user ID in database."))?; + if id == user_id || allowed_signatures(id) { + signatures.insert(user, signature); + } + } + } + + Ok(()) +} From a889e884e684aa433772d8d61ee965c062a38790 Mon Sep 17 00:00:00 2001 From: Tglman Date: Thu, 23 Dec 2021 22:16:40 +0000 Subject: [PATCH 0857/1727] refactor:moved key watch wake logic to specific module --- Cargo.toml | 2 +- src/database/abstraction.rs | 3 ++ src/database/abstraction/heed.rs | 48 ++++--------------------- src/database/abstraction/sqlite.rs | 46 ++++-------------------- src/database/abstraction/watchers.rs | 54 ++++++++++++++++++++++++++++ 5 files changed, 70 insertions(+), 83 deletions(-) create mode 100644 src/database/abstraction/watchers.rs diff --git a/Cargo.toml b/Cargo.toml index 02159e3..ceae6ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,7 +87,7 @@ sha-1 = "0.9.8" default = ["conduit_bin", "backend_sqlite"] backend_sled = ["sled"] backend_sqlite = ["sqlite"] -backend_heed = ["heed", "crossbeam"] +backend_heed = ["heed", "crossbeam", "parking_lot"] sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 11bbc3b..67b80d1 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -12,6 +12,9 @@ pub mod sqlite; #[cfg(feature = "heed")] pub mod heed; +#[cfg(any(feature = "sqlite", feature = "heed"))] +pub mod watchers; + pub trait DatabaseEngine: Sized { fn open(config: &Config) -> Result>; fn open_tree(self: &Arc, name: &'static str) -> Result>; diff --git a/src/database/abstraction/heed.rs b/src/database/abstraction/heed.rs index e767e22..83dafc5 100644 --- a/src/database/abstraction/heed.rs +++ b/src/database/abstraction/heed.rs @@ -1,15 +1,13 @@ -use super::super::Config; +use super::{super::Config, watchers::Watchers}; use crossbeam::channel::{bounded, Sender as ChannelSender}; use threadpool::ThreadPool; use crate::{Error, Result}; use std::{ - collections::HashMap, future::Future, pin::Pin, - sync::{Arc, Mutex, RwLock}, + sync::{Arc, Mutex}, }; -use tokio::sync::oneshot::Sender; use super::{DatabaseEngine, Tree}; @@ -23,7 +21,7 @@ pub struct Engine { pub struct EngineTree { engine: Arc, tree: Arc, - watchers: RwLock, Vec>>>, + watchers: Watchers, } fn convert_error(error: heed::Error) -> Error { @@ -60,7 +58,7 @@ impl DatabaseEngine for Engine { .create_database(Some(name)) .map_err(convert_error)?, ), - watchers: RwLock::new(HashMap::new()), + watchers: Default::default(), })) } @@ -145,29 +143,7 @@ impl Tree for EngineTree { .put(&mut txn, &key, &value) .map_err(convert_error)?; txn.commit().map_err(convert_error)?; - - let watchers = self.watchers.read().unwrap(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write().unwrap(); - for prefix in triggered { - if let Some(txs) = watchers.remove(prefix) { - for tx in txs { - let _ = tx.send(()); - } - } - } - }; - + self.watchers.wake(key); Ok(()) } @@ -223,18 +199,6 @@ impl Tree for EngineTree { #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.watchers - .write() - .unwrap() - .entry(prefix.to_vec()) - .or_default() - .push(tx); - - Box::pin(async move { - // Tx is never destroyed - rx.await.unwrap(); - }) + self.watchers.watch(prefix) } } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 1d2038c..1e6a2d8 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -1,17 +1,15 @@ -use super::{DatabaseEngine, Tree}; +use super::{watchers::Watchers, DatabaseEngine, Tree}; use crate::{database::Config, Result}; -use parking_lot::{Mutex, MutexGuard, RwLock}; +use parking_lot::{Mutex, MutexGuard}; use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; use std::{ cell::RefCell, - collections::{hash_map, HashMap}, future::Future, path::{Path, PathBuf}, pin::Pin, sync::Arc, }; use thread_local::ThreadLocal; -use tokio::sync::watch; use tracing::debug; thread_local! { @@ -113,7 +111,7 @@ impl DatabaseEngine for Engine { Ok(Arc::new(SqliteTable { engine: Arc::clone(self), name: name.to_owned(), - watchers: RwLock::new(HashMap::new()), + watchers: Watchers::default(), })) } @@ -126,7 +124,7 @@ impl DatabaseEngine for Engine { pub struct SqliteTable { engine: Arc, name: String, - watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, + watchers: Watchers, } type TupleOfBytes = (Vec, Vec); @@ -200,27 +198,7 @@ impl Tree for SqliteTable { let guard = self.engine.write_lock(); self.insert_with_guard(&guard, key, value)?; drop(guard); - - let watchers = self.watchers.read(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write(); - for prefix in triggered { - if let Some(tx) = watchers.remove(prefix) { - let _ = tx.0.send(()); - } - } - }; - + self.watchers.wake(key); Ok(()) } @@ -365,19 +343,7 @@ impl Tree for SqliteTable { #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let mut rx = match self.watchers.write().entry(prefix.to_vec()) { - hash_map::Entry::Occupied(o) => o.get().1.clone(), - hash_map::Entry::Vacant(v) => { - let (tx, rx) = tokio::sync::watch::channel(()); - v.insert((tx, rx.clone())); - rx - } - }; - - Box::pin(async move { - // Tx is never destroyed - rx.changed().await.unwrap(); - }) + self.watchers.watch(prefix) } #[tracing::instrument(skip(self))] diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs new file mode 100644 index 0000000..404f3f0 --- /dev/null +++ b/src/database/abstraction/watchers.rs @@ -0,0 +1,54 @@ +use parking_lot::RwLock; +use std::{ + collections::{hash_map, HashMap}, + future::Future, + pin::Pin, +}; +use tokio::sync::watch; + +#[derive(Default)] +pub(super) struct Watchers { + watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, +} + +impl Watchers { + pub(super) fn watch<'a>( + &'a self, + prefix: &[u8], + ) -> Pin + Send + 'a>> { + let mut rx = match self.watchers.write().entry(prefix.to_vec()) { + hash_map::Entry::Occupied(o) => o.get().1.clone(), + hash_map::Entry::Vacant(v) => { + let (tx, rx) = tokio::sync::watch::channel(()); + v.insert((tx, rx.clone())); + rx + } + }; + + Box::pin(async move { + // Tx is never destroyed + rx.changed().await.unwrap(); + }) + } + pub(super) fn wake(&self, key: &[u8]) { + let watchers = self.watchers.read(); + let mut triggered = Vec::new(); + + for length in 0..=key.len() { + if watchers.contains_key(&key[..length]) { + triggered.push(&key[..length]); + } + } + + drop(watchers); + + if !triggered.is_empty() { + let mut watchers = self.watchers.write(); + for prefix in triggered { + if let Some(tx) = watchers.remove(prefix) { + let _ = tx.0.send(()); + } + } + }; + } +} From 7c1b2625cf8f315bced5e560574c0c64eedd368f Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Fri, 24 Dec 2021 23:06:54 +0100 Subject: [PATCH 0858/1727] Prepare to add an option to list local user accounts from your homeserver --- src/database/admin.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/database/admin.rs b/src/database/admin.rs index 1e5c47c..5ea872e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -13,6 +13,7 @@ use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), ListAppservices, + CountUsers, SendMessage(RoomMessageEventContent), } @@ -93,6 +94,16 @@ impl Admin { let state_lock = mutex_state.lock().await; match event { + AdminCommand::CountUsers => { + // count() does not return an error on failure... + if let Ok(usercount) = guard.users.count() { + let message = format!("Found {} total user accounts", usercount); + send_message(RoomMessageEventContent::text_plain(message), guard, &state_lock); + } else { + // ... so we simply spit out a generic non-explaining-info in case count() did not return Ok() + send_message(RoomMessageEventContent::text_plain("Unable to count users"), guard, &state_lock); + } + } AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error } From 567cf6dbe970ee5422cd38439498f7e5a86b89ac Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 25 Dec 2021 20:51:22 +0100 Subject: [PATCH 0859/1727] Add command count_local_users to database/rooms.rs --- src/database/rooms.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fb9ecbf..0236c83 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1531,6 +1531,9 @@ impl Rooms { "list_appservices" => { db.admin.send(AdminCommand::ListAppservices); } + "count_local_users" => { + db.admin.send(AdminCommand::CountUsers); + } "get_auth_chain" => { if args.len() == 1 { if let Ok(event_id) = EventId::parse_arc(args[0]) { From d21030566c174509c4030d2f6428ffe1109e6c1d Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 25 Dec 2021 21:29:03 +0100 Subject: [PATCH 0860/1727] Rename/Add count methods to count_local_users --- src/database/admin.rs | 2 +- src/database/users.rs | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 5ea872e..b18e50c 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -96,7 +96,7 @@ impl Admin { match event { AdminCommand::CountUsers => { // count() does not return an error on failure... - if let Ok(usercount) = guard.users.count() { + if let Ok(usercount) = guard.users.count_local_users() { let message = format!("Found {} total user accounts", usercount); send_message(RoomMessageEventContent::text_plain(message), guard, &state_lock); } else { diff --git a/src/database/users.rs b/src/database/users.rs index d4bf489..5a32f16 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -77,11 +77,23 @@ impl Users { } /// Returns the number of users registered on this server. + /// It really returns all users, not only real ones with a + /// password to login but also bridge puppets... #[tracing::instrument(skip(self))] pub fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } + /// This method will only count those local user accounts with + /// a password thus returning only real accounts on this instance. + #[tracing::instrument(skip(self))] + pub fn count_local_users(&self) -> Result { + self.userid_password.iter().map(|(key, value)| { + + }); + Ok(1) + } + /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { From 2281bcefc631e02c83800297a4838e127ded7973 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 26 Dec 2021 11:06:28 +0100 Subject: [PATCH 0861/1727] Finalize count_local_users function --- src/database/users.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index 5a32f16..1e103fa 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -88,10 +88,8 @@ impl Users { /// a password thus returning only real accounts on this instance. #[tracing::instrument(skip(self))] pub fn count_local_users(&self) -> Result { - self.userid_password.iter().map(|(key, value)| { - - }); - Ok(1) + let n = self.userid_password.iter().filter(|(_, bytes)| bytes.len() > 0).count(); + Ok(n) } /// Find out which user an access token belongs to. From 39787b41cb341ca3d270cc00c9ac46b8f4bd384d Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 26 Dec 2021 12:04:38 +0100 Subject: [PATCH 0862/1727] Rename admin command CountUsers -> CountLocalUsers; Update comments --- src/database/admin.rs | 12 ++++++------ src/database/rooms.rs | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index b18e50c..330fecb 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -13,7 +13,7 @@ use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), ListAppservices, - CountUsers, + CountLocalUsers, SendMessage(RoomMessageEventContent), } @@ -94,14 +94,14 @@ impl Admin { let state_lock = mutex_state.lock().await; match event { - AdminCommand::CountUsers => { - // count() does not return an error on failure... + AdminCommand::CountLocalUsers => { + // count_local_users() only returns with OK(x) where x is the number of found accounts if let Ok(usercount) = guard.users.count_local_users() { - let message = format!("Found {} total user accounts", usercount); + let message = format!("Found {} local user account(s)", usercount); send_message(RoomMessageEventContent::text_plain(message), guard, &state_lock); } else { - // ... so we simply spit out a generic non-explaining-info in case count() did not return Ok() - send_message(RoomMessageEventContent::text_plain("Unable to count users"), guard, &state_lock); + // if count_local_users() only returns with OK(x), then why is this? ;-) + send_message(RoomMessageEventContent::text_plain("Unable to count local users"), guard, &state_lock); } } AdminCommand::RegisterAppservice(yaml) => { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0236c83..b1dd103 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1532,7 +1532,7 @@ impl Rooms { db.admin.send(AdminCommand::ListAppservices); } "count_local_users" => { - db.admin.send(AdminCommand::CountUsers); + db.admin.send(AdminCommand::CountLocalUsers); } "get_auth_chain" => { if args.len() == 1 { From a69eb277d46d074d2bb4fef82f4111f70845f874 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 26 Dec 2021 20:00:31 +0100 Subject: [PATCH 0863/1727] Update count users: It's now list_local_users and contains the number and the usernames --- src/database/admin.rs | 22 ++++++++++++---------- src/database/rooms.rs | 4 ++-- src/database/users.rs | 14 ++++++++++++++ 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 330fecb..58d9e83 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -13,7 +13,7 @@ use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), ListAppservices, - CountLocalUsers, + ListLocalUsers, SendMessage(RoomMessageEventContent), } @@ -94,15 +94,17 @@ impl Admin { let state_lock = mutex_state.lock().await; match event { - AdminCommand::CountLocalUsers => { - // count_local_users() only returns with OK(x) where x is the number of found accounts - if let Ok(usercount) = guard.users.count_local_users() { - let message = format!("Found {} local user account(s)", usercount); - send_message(RoomMessageEventContent::text_plain(message), guard, &state_lock); - } else { - // if count_local_users() only returns with OK(x), then why is this? ;-) - send_message(RoomMessageEventContent::text_plain("Unable to count local users"), guard, &state_lock); - } + AdminCommand::ListLocalUsers => { + // collect all local users + let users = guard.users.iter_locals(); + + let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + + // send number of local users as plain text: + // TODO: send as Markdown + send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); + } AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b1dd103..4d839d3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1531,8 +1531,8 @@ impl Rooms { "list_appservices" => { db.admin.send(AdminCommand::ListAppservices); } - "count_local_users" => { - db.admin.send(AdminCommand::CountLocalUsers); + "list_local_users" => { + db.admin.send(AdminCommand::ListLocalUsers); } "get_auth_chain" => { if args.len() == 1 { diff --git a/src/database/users.rs b/src/database/users.rs index 1e103fa..d3e1fe4 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -84,6 +84,8 @@ impl Users { Ok(self.userid_password.iter().count()) } + /// The method is DEPRECATED and was replaced by iter_locals() + /// /// This method will only count those local user accounts with /// a password thus returning only real accounts on this instance. #[tracing::instrument(skip(self))] @@ -92,6 +94,7 @@ impl Users { Ok(n) } + /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { @@ -131,6 +134,17 @@ impl Users { }) } + /// Returns a vector of local usernames + #[tracing::instrument(skip(self))] + pub fn iter_locals(&self) -> Vec { + self.userid_password.iter().filter(|(_, pw)| pw.len() > 0).map(|(username, _)| { + match utils::string_from_bytes(&username) { + Ok(s) => s, + Err(e) => e.to_string() + } + }).collect::>() + } + /// Returns the password hash for the given user. #[tracing::instrument(skip(self, user_id))] pub fn password_hash(&self, user_id: &UserId) -> Result> { From 8d51359668199585175e7e0095de66e96bc1a3e1 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 26 Dec 2021 20:49:19 +0100 Subject: [PATCH 0864/1727] Fix typo and remove unneeded newline --- src/database/admin.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 58d9e83..5418f53 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -95,7 +95,7 @@ impl Admin { match event { AdminCommand::ListLocalUsers => { - // collect all local users + // collect local users only let users = guard.users.iter_locals(); let mut msg: String = format!("Found {} local user account(s):\n", users.len()); @@ -104,7 +104,6 @@ impl Admin { // send number of local users as plain text: // TODO: send as Markdown send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); - } AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error From b746f17e562ba02d9471d23c42c9bb8c9f4ee070 Mon Sep 17 00:00:00 2001 From: Ticho 34782694 Date: Fri, 7 Jan 2022 13:06:21 +0000 Subject: [PATCH 0865/1727] Make traefik+nginx config more self-contained The nginx instance which is serving the .well-known endpoints can serve the simple JSON replies directly from memory, instead of having them as external files on disk. --- docker/README.md | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/docker/README.md b/docker/README.md index 19d9dca..1f38d66 100644 --- a/docker/README.md +++ b/docker/README.md @@ -94,26 +94,20 @@ So...step by step: server_name .; listen 80 default_server; - location /.well-known/matrix/ { - root /var/www; - default_type application/json; - add_header Access-Control-Allow-Origin *; + location /.well-known/matrix/server { + return 200 '{"m.server": ".:443"}'; + add_header Content-Type application/json; } - } - ``` - - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.homeserver": { - "base_url": "https://." - } - } - ``` - - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.server": ".:443" + location /.well-known/matrix/client { + return 200 '{"m.homeserver": {"base_url": "https://."}}'; + add_header Content-Type application/json; + add_header "Access-Control-Allow-Origin" *; + } + + location / { + return 404; + } } ``` From 349865d3ccb9ee78c9410de28e0d5d8c043ae0c8 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 13 Jan 2022 11:44:23 +0100 Subject: [PATCH 0866/1727] Upgrade Ruma --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 2 +- src/client_server/message.rs | 4 ++-- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 69a026b..07cae94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1990,7 +1990,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "assign", "js_int", @@ -2011,7 +2011,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "bytes", "http", @@ -2027,7 +2027,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2038,7 +2038,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "ruma-api", "ruma-common", @@ -2052,7 +2052,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "assign", "bytes", @@ -2072,7 +2072,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "indexmap", "js_int", @@ -2087,7 +2087,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "indoc", "js_int", @@ -2103,7 +2103,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2114,7 +2114,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "js_int", "ruma-api", @@ -2129,7 +2129,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2143,7 +2143,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2153,7 +2153,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "thiserror", ] @@ -2161,7 +2161,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "js_int", "ruma-api", @@ -2174,7 +2174,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "js_int", "ruma-api", @@ -2189,7 +2189,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "bytes", "form_urlencoded", @@ -2203,7 +2203,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2214,7 +2214,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2231,7 +2231,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 7b3432c..5e09dee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "7cf3abbaf02995b03db74429090ca5af1cd71edc", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f8ba7f795765bf4aeb4db06849f9fdde9c162ac3", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 60c756a..da6ae87 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -160,7 +160,7 @@ pub async fn get_message_events_route( .collect(); let mut resp = get_message_events::Response::new(); - resp.start = Some(body.from.to_owned()); + resp.start = body.from.to_owned(); resp.end = end_token; resp.chunk = events_after; resp.state = Vec::new(); @@ -190,7 +190,7 @@ pub async fn get_message_events_route( .collect(); let mut resp = get_message_events::Response::new(); - resp.start = Some(body.from.to_owned()); + resp.start = body.from.to_owned(); resp.end = start_token; resp.chunk = events_before; resp.state = Vec::new(); From cf54185a1cfe6b7cbed4c8c472198360aa705663 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 13 Jan 2022 11:48:18 +0100 Subject: [PATCH 0867/1727] Use struct literals for consistency --- src/client_server/context.rs | 25 +++++++++++++------------ src/client_server/message.rs | 22 ++++++++++++---------- src/client_server/unversioned.rs | 9 ++++----- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 97fc4fd..9bfec9e 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -92,18 +92,19 @@ pub async fn get_context_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let mut resp = get_context::Response::new(); - resp.start = start_token; - resp.end = end_token; - resp.events_before = events_before; - resp.event = Some(base_event); - resp.events_after = events_after; - resp.state = db // TODO: State at event - .rooms - .room_state_full(&body.room_id)? - .values() - .map(|pdu| pdu.to_state_event()) - .collect(); + let resp = get_context::Response { + start: start_token, + end: end_token, + events_before, + event: Some(base_event), + events_after, + state: db // TODO: State at event + .rooms + .room_state_full(&body.room_id)? + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), + }; Ok(resp.into()) } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index da6ae87..cbce019 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -159,11 +159,12 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let mut resp = get_message_events::Response::new(); - resp.start = body.from.to_owned(); - resp.end = end_token; - resp.chunk = events_after; - resp.state = Vec::new(); + let resp = get_message_events::Response { + start: body.from.to_owned(), + end: end_token, + chunk: events_after, + state: Vec::new(), + }; Ok(resp.into()) } @@ -189,11 +190,12 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let mut resp = get_message_events::Response::new(); - resp.start = body.from.to_owned(); - resp.end = start_token; - resp.chunk = events_before; - resp.state = Vec::new(); + let resp = get_message_events::Response { + start: body.from.to_owned(), + end: start_token, + chunk: events_before, + state: Vec::new(), + }; Ok(resp.into()) } diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index f2624bb..f17d8cf 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -17,11 +17,10 @@ use rocket::get; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] #[tracing::instrument] pub async fn get_supported_versions_route() -> ConduitResult { - let mut resp = - get_supported_versions::Response::new(vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()]); - - resp.unstable_features - .insert("org.matrix.e2e_cross_signing".to_owned(), true); + let resp = get_supported_versions::Response { + versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], + unstable_features: [("org.matrix.e2e_cross_signing".to_owned(), true)].into(), + }; Ok(resp.into()) } From 84862352bacd7172602f1b8200a774d668a9f087 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 13 Jan 2022 11:48:40 +0100 Subject: [PATCH 0868/1727] Replace to_string calls on string literals with to_owned --- src/database/uiaa.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 772dab9..5e11467 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -154,11 +154,7 @@ impl Uiaa { .write() .unwrap() .insert( - ( - user_id.to_owned(), - device_id.to_owned(), - session.to_string(), - ), + (user_id.to_owned(), device_id.to_owned(), session.to_owned()), request.to_owned(), ); @@ -175,11 +171,7 @@ impl Uiaa { .userdevicesessionid_uiaarequest .read() .unwrap() - .get(&( - user_id.to_owned(), - device_id.to_owned(), - session.to_string(), - )) + .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) .map(|j| j.to_owned())) } From bcf4ede0bc356efb4bd8b8909ca3db0ab157f97e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 13 Jan 2022 12:06:20 +0100 Subject: [PATCH 0869/1727] Restore compatibility with Rust 1.53 --- src/client_server/unversioned.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index f17d8cf..ea685b4 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,3 +1,5 @@ +use std::{collections::BTreeMap, iter::FromIterator}; + use crate::ConduitResult; use ruma::api::client::unversioned::get_supported_versions; @@ -19,7 +21,7 @@ use rocket::get; pub async fn get_supported_versions_route() -> ConduitResult { let resp = get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], - unstable_features: [("org.matrix.e2e_cross_signing".to_owned(), true)].into(), + unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; Ok(resp.into()) From eecd664c43c652f7fe4afc06154b346fc6a45b58 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Thu, 13 Jan 2022 12:26:23 +0100 Subject: [PATCH 0870/1727] Reformat code --- src/database/appservice.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 847d747..88de1f3 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -28,17 +28,17 @@ impl Appservice { } /// Remove an appservice registration - /// + /// /// # Arguments - /// + /// /// * `service_name` - the name you send to register the service previously pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { self.id_appserviceregistrations .remove(service_name.as_bytes())?; - self.cached_registrations. - write(). - unwrap(). - remove(service_name); + self.cached_registrations + .write() + .unwrap() + .remove(service_name); Ok(()) } From 1d647a1a9a0a3075ee1bdbe2a039d22ee73baa2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 16 Oct 2021 15:19:25 +0200 Subject: [PATCH 0871/1727] improvement: allow rocksdb again --- Cargo.lock | 727 +++++++++++++++++----------- Cargo.toml | 4 +- src/database.rs | 11 +- src/database/abstraction.rs | 5 +- src/database/abstraction/rocksdb.rs | 183 +++++++ src/database/abstraction/sqlite.rs | 14 +- src/error.rs | 6 + src/utils.rs | 11 + 8 files changed, 664 insertions(+), 297 deletions(-) create mode 100644 src/database/abstraction/rocksdb.rs diff --git a/Cargo.lock b/Cargo.lock index 07cae94..794445f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,9 +10,9 @@ checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" [[package]] name = "ahash" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ "getrandom 0.2.3", "once_cell", @@ -78,9 +78,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", @@ -89,9 +89,9 @@ dependencies = [ [[package]] name = "atomic" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" dependencies = [ "autocfg", ] @@ -146,6 +146,25 @@ dependencies = [ "serde", ] +[[package]] +name = "bindgen" +version = "0.59.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -174,15 +193,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.7.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "bytemuck" -version = "1.7.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72957246c41db82b8ef88a5486143830adeb8227ef9837740bdec67724cf2c5b" +checksum = "439989e6b8c38d1b6570a384ef1e49c8848128f5a97f3914baef02920842712f" [[package]] name = "byteorder" @@ -198,13 +217,22 @@ checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" -version = "1.0.70" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" dependencies = [ "jobserver", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "0.1.10" @@ -230,6 +258,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "clang-sys" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -259,11 +298,12 @@ dependencies = [ "reqwest", "ring", "rocket", + "rocksdb", "ruma", "rusqlite", "rust-argon2", - "rustls", - "rustls-native-certs", + "rustls 0.19.1", + "rustls-native-certs 0.5.0", "serde", "serde_json", "serde_yaml", @@ -275,22 +315,22 @@ dependencies = [ "tokio", "tracing", "tracing-flame", - "tracing-subscriber", + "tracing-subscriber 0.2.25", "trust-dns-resolver", "webpki 0.22.0", ] [[package]] name = "const-oid" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c32f031ea41b4291d695026c023b95d59db2d8a2c7640800ed56bc8f510f22" +checksum = "9d6f2aa4d0537bcc1c74df8755072bd31c1ef1a3a1b85a68e8404a8c353b7b8b" [[package]] name = "const_fn" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" +checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" [[package]] name = "constant_time_eq" @@ -311,9 +351,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" dependencies = [ "core-foundation-sys", "libc", @@ -321,9 +361,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" @@ -336,9 +376,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" dependencies = [ "cfg-if 1.0.0", ] @@ -353,18 +393,18 @@ dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", - "crossbeam-queue 0.3.2", - "crossbeam-utils 0.8.5", + "crossbeam-queue 0.3.3", + "crossbeam-utils 0.8.6", ] [[package]] name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", ] [[package]] @@ -375,17 +415,17 @@ checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", ] [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", "lazy_static", "memoffset", "scopeguard", @@ -402,12 +442,12 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9" +checksum = "b979d76c9fcb84dffc80a73f7290da0f83e4c95773494674cb44b76d13a7a110" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", ] [[package]] @@ -422,9 +462,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" +checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" dependencies = [ "cfg-if 1.0.0", "lazy_static", @@ -471,9 +511,9 @@ dependencies = [ [[package]] name = "der" -version = "0.4.1" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e21d2d0f22cde6e88694108429775c0219760a07779bf96503b434a03d7412" +checksum = "79b71cca7d95d7681a4b3b9cdf63c8dbc3730d0584c2c74e31416d64a90493f4" dependencies = [ "const-oid", ] @@ -546,17 +586,11 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" -[[package]] -name = "dtoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" - [[package]] name = "ed25519" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" +checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" dependencies = [ "signature", ] @@ -583,9 +617,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.28" +version = "0.8.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ "cfg-if 1.0.0", ] @@ -614,6 +648,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" +dependencies = [ + "instant", +] + [[package]] name = "figment" version = "0.10.6" @@ -656,9 +699,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" dependencies = [ "futures-channel", "futures-core", @@ -671,9 +714,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" dependencies = [ "futures-core", "futures-sink", @@ -681,15 +724,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" dependencies = [ "futures-core", "futures-task", @@ -698,18 +741,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" dependencies = [ - "autocfg", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -717,23 +758,22 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" dependencies = [ - "autocfg", "futures-channel", "futures-core", "futures-io", @@ -743,8 +783,6 @@ dependencies = [ "memchr", "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] @@ -772,9 +810,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", "version_check", @@ -804,9 +842,9 @@ dependencies = [ [[package]] name = "gif" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a668f699973d0f573d15749b7002a9ac9e1f9c6b220e7b165601334c173d8de" +checksum = "c3a7187e78088aead22ceedeee99779455b23fc231fe13ec443f99bb71694e5b" dependencies = [ "color_quant", "weezl", @@ -820,9 +858,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.4" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "0c9de88456263e249e241fcd211d3954e2c9b0ef7ccfc235a444eb367cae3689" dependencies = [ "bytes", "fnv", @@ -932,20 +970,20 @@ dependencies = [ [[package]] name = "http" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa 0.4.8", + "itoa 1.0.1", ] [[package]] name = "http-body" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", @@ -960,15 +998,15 @@ checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.12" +version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd" +checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ "bytes", "futures-channel", @@ -981,7 +1019,7 @@ dependencies = [ "httpdate", "itoa 0.4.8", "pin-project-lite", - "socket2 0.4.1", + "socket2 0.4.2", "tokio", "tower-service", "tracing", @@ -990,17 +1028,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.22.1" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ - "futures-util", + "http", "hyper", - "log", - "rustls", + "rustls 0.20.2", "tokio", - "tokio-rustls", - "webpki 0.21.4", + "tokio-rustls 0.23.2", ] [[package]] @@ -1033,9 +1069,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg", "hashbrown", @@ -1053,15 +1089,15 @@ dependencies = [ [[package]] name = "inlinable_string" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3094308123a0e9fd59659ce45e22de9f53fc1d2ac6e1feb9fef988e4f76cad77" +checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" [[package]] name = "instant" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if 1.0.0", ] @@ -1092,18 +1128,9 @@ checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itertools" -version = "0.9.0" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" +checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" dependencies = [ "either", ] @@ -1137,9 +1164,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.53" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4bf49d50e2961077d9c99f4b7997d770a1114f087c3c2e0069b36c13fc2979d" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] @@ -1174,10 +1201,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] -name = "libc" -version = "0.2.101" +name = "lazycell" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.112" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" + +[[package]] +name = "libloading" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" +dependencies = [ + "cfg-if 1.0.0", + "winapi", +] + +[[package]] +name = "librocksdb-sys" +version = "6.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" +dependencies = [ + "bindgen", + "cc", + "glob", + "libc", +] [[package]] name = "libsqlite3-sys" @@ -1227,15 +1282,17 @@ dependencies = [ [[package]] name = "loom" -version = "0.5.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2111607c723d7857e0d8299d5ce7a0bf4b844d3e44f8de136b13da513eaf8fc4" +checksum = "edc5c7d328e32cc4954e8e01193d7f0ef5ab257b5090b70a964e099a36034309" dependencies = [ "cfg-if 1.0.0", "generator", "scoped-tls", "serde", "serde_json", + "tracing", + "tracing-subscriber 0.3.5", ] [[package]] @@ -1268,6 +1325,15 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.9" @@ -1282,9 +1348,9 @@ checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] @@ -1295,6 +1361,12 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.3.7" @@ -1306,9 +1378,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ "libc", "log", @@ -1328,9 +1400,9 @@ dependencies = [ [[package]] name = "multer" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "408327e2999b839cd1af003fc01b2019a6c10a1361769542203f6fedc5179680" +checksum = "5f8f35e687561d5c1667590911e6698a8cb714a134a7505718a182e7bc9d3836" dependencies = [ "bytes", "encoding_rs", @@ -1338,11 +1410,22 @@ dependencies = [ "http", "httparse", "log", + "memchr", "mime", "spin 0.9.2", "tokio", "tokio-util", - "twoway", + "version_check", +] + +[[package]] +name = "nom" +version = "7.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" +dependencies = [ + "memchr", + "minimal-lexical", "version_check", ] @@ -1409,9 +1492,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -1419,9 +1502,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "opaque-debug" @@ -1431,9 +1514,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" @@ -1545,6 +1628,12 @@ dependencies = [ "syn", ] +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + [[package]] name = "pem" version = "0.8.3" @@ -1564,18 +1653,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -1584,9 +1673,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -1596,9 +1685,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbee84ed13e44dd82689fa18348a49934fa79cc774a344c42fc9b301c71b140a" +checksum = "ee3ef9b64d26bad0536099c816c6734379e45bbd5f14798def6809e5cc350447" dependencies = [ "der", "spki", @@ -1607,9 +1696,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "png" @@ -1625,15 +1714,15 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-crate" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" +checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" dependencies = [ "thiserror", "toml", @@ -1645,17 +1734,11 @@ version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.29" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] @@ -1681,9 +1764,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.9" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" dependencies = [ "proc-macro2", ] @@ -1845,15 +1928,16 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.4" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" +checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" dependencies = [ "base64 0.13.0", "bytes", "encoding_rs", "futures-core", "futures-util", + "h2", "http", "http-body", "hyper", @@ -1865,12 +1949,14 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls", - "rustls-native-certs", + "rustls 0.20.2", + "rustls-native-certs 0.6.1", + "rustls-pemfile", "serde", + "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.2", "tokio-socks", "url", "wasm-bindgen", @@ -1983,10 +2069,20 @@ dependencies = [ "state", "time 0.2.27", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "uncased", ] +[[package]] +name = "rocksdb" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3" +dependencies = [ + "libc", + "librocksdb-sys", +] + [[package]] name = "ruma" version = "0.4.0" @@ -2233,7 +2329,7 @@ name = "ruma-state-res" version = "0.4.1" source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ - "itertools 0.10.1", + "itertools", "js_int", "ruma-common", "ruma-events", @@ -2247,9 +2343,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.25.3" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57adcf67c8faaf96f3248c2a7b419a0dbc52ebe36ba83dd57fe83827c1ea4eb3" +checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152" dependencies = [ "bitflags", "fallible-iterator", @@ -2269,9 +2365,15 @@ dependencies = [ "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", ] +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc_version" version = "0.2.3" @@ -2290,10 +2392,22 @@ dependencies = [ "base64 0.13.0", "log", "ring", - "sct", + "sct 0.6.1", "webpki 0.21.4", ] +[[package]] +name = "rustls" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +dependencies = [ + "log", + "ring", + "sct 0.7.0", + "webpki 0.22.0", +] + [[package]] name = "rustls-native-certs" version = "0.5.0" @@ -2301,22 +2415,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" dependencies = [ "openssl-probe", - "rustls", + "rustls 0.19.1", "schannel", "security-framework", ] [[package]] -name = "rustversion" -version = "1.0.5" +name = "rustls-native-certs" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" +checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +dependencies = [ + "base64 0.13.0", +] + +[[package]] +name = "rustversion" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "schannel" @@ -2350,6 +2485,16 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "security-framework" version = "2.4.2" @@ -2390,18 +2535,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.130" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" dependencies = [ "proc-macro2", "quote", @@ -2410,9 +2555,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.73" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcbd0344bc6533bc7ec56df11d42fb70f1b912351c0825ccb7211b59d8af7cf5" +checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" dependencies = [ "itoa 1.0.1", "ryu", @@ -2433,12 +2578,12 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.20" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad104641f3c958dab30eb3010e834c2622d1f3f4c530fef1dee20ad9485f3c09" +checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" dependencies = [ - "dtoa", "indexmap", + "ryu", "serde", "yaml-rust", ] @@ -2464,9 +2609,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "sha2" -version = "0.9.6" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9204c41a1597a8c5af23c82d1c921cb01ec0a4c59e07a9c7306062829a3903f3" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer", "cfg-if 1.0.0", @@ -2477,13 +2622,19 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740223c51853f3145fe7c90360d2d4232f2b62e3449489c207eccde818979982" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -2495,9 +2646,9 @@ dependencies = [ [[package]] name = "signature" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335" +checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" [[package]] name = "simple_asn1" @@ -2512,19 +2663,19 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "sled" -version = "0.34.6" +version = "0.34.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" +checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" dependencies = [ "crc32fast", "crossbeam-epoch", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", "fs2", "fxhash", "libc", @@ -2535,9 +2686,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" [[package]] name = "socket2" @@ -2552,9 +2703,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" dependencies = [ "libc", "winapi", @@ -2574,9 +2725,9 @@ checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" [[package]] name = "spki" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "987637c5ae6b3121aba9d513f869bd2bff11c4cc086c22473befd6649c0bd521" +checksum = "5c01a0c15da1b0b0e1494112e7af814a678fec9bd157881b49beac661e9b6f32" dependencies = [ "der", ] @@ -2665,9 +2816,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.75" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f58f7e8eaa0009c5fec437aabf511bd9933e4b2d7407bd05273c01a8906ea7" +checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" dependencies = [ "proc-macro2", "quote", @@ -2685,9 +2836,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.5" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", @@ -2697,13 +2848,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ "cfg-if 1.0.0", + "fastrand", "libc", - "rand 0.8.4", "redox_syscall", "remove_dir_all", "winapi", @@ -2711,18 +2862,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "283d5230e63df9608ac7d9691adc1dfb6e701225436eb64d0b9a7f0a5a04f6ec" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa3884228611f5cd3608e2d409bf7dce832e4eb3135e3f11addbd7e41bd68e71" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", @@ -2810,9 +2961,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.3.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -2825,11 +2976,10 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.11.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4efe6fc2395938c8155973d7be49fe8d03a843726e285e100a8a383cc0154ce" +checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" dependencies = [ - "autocfg", "bytes", "libc", "memchr", @@ -2844,9 +2994,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.3.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", @@ -2859,11 +3009,22 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "rustls", + "rustls 0.19.1", "tokio", "webpki 0.21.4", ] +[[package]] +name = "tokio-rustls" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +dependencies = [ + "rustls 0.20.2", + "tokio", + "webpki 0.22.0", +] + [[package]] name = "tokio-socks" version = "0.5.1" @@ -2878,9 +3039,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", "pin-project-lite", @@ -2889,9 +3050,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.7" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes", "futures-core", @@ -2918,9 +3079,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.26" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if 1.0.0", "pin-project-lite", @@ -2930,9 +3091,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ "proc-macro2", "quote", @@ -2941,9 +3102,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ca517f43f0fb96e0c3072ed5c275fe5eece87e8cb52f4a77b69226d3b1c9df8" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" dependencies = [ "lazy_static", ] @@ -2956,7 +3117,7 @@ checksum = "bd520fe41c667b437952383f3a1ec14f1fa45d653f719a77eedd6e6a02d8fa54" dependencies = [ "lazy_static", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.2.25", ] [[package]] @@ -2982,14 +3143,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.20" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cbe87a2fa7e35900ce5de20220a582a9483a7063811defce79d7cbd59d4cfe" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ "ansi_term", "chrono", "lazy_static", - "matchers", + "matchers 0.0.1", "regex", "serde", "serde_json", @@ -3002,6 +3163,24 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-subscriber" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d81bfa81424cc98cb034b837c985b7a290f592e5b4322f353f94a0ab0f9f594" +dependencies = [ + "ansi_term", + "lazy_static", + "matchers 0.1.0", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + [[package]] name = "trust-dns-proto" version = "0.20.3" @@ -3053,21 +3232,11 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "twoway" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c57ffb460d7c24cd6eda43694110189030a3d1dfe418416d9468fd1c1d290b47" -dependencies = [ - "memchr", - "unchecked-index", -] - [[package]] name = "typenum" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "ubyte" @@ -3088,17 +3257,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "unchecked-index" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeba86d422ce181a719445e51872fa30f1f7413b62becb52e95ec91aa262d85c" - [[package]] name = "unicode-bidi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" @@ -3153,9 +3316,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" @@ -3181,21 +3344,19 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce9b1b516211d33767048e5d47fa2a381ed8b76fc48d2ce4aa39877f9f183e0" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ "cfg-if 1.0.0", - "serde", - "serde_json", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe8dc78e2326ba5f845f4b5bf548401604fa20b1dd1d365fb73b6c1d6364041" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", @@ -3208,9 +3369,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.26" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fded345a6559c2cfee778d562300c581f7d4ff3edb9b0d230d69800d213972" +checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3220,9 +3381,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44468aa53335841d9d6b6c023eaab07c0cd4bddbcfdee3e2bb1e8d2cb8069fef" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3230,9 +3391,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0195807922713af1e67dc66132c7328206ed9766af3858164fb583eedc25fbad" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ "proc-macro2", "quote", @@ -3243,15 +3404,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdb075a845574a1fa5f09fd77e43f7747599301ea3417a9fbffdeedfc1f4a29" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "web-sys" -version = "0.3.53" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224b2f6b67919060055ef1a67807367c2066ed520c3862cc013d26cf893a783c" +checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" dependencies = [ "js-sys", "wasm-bindgen", @@ -3352,18 +3513,18 @@ checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" [[package]] name = "zeroize" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" +checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.1.0" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +checksum = "65f1a51723ec88c66d5d1fe80c841f17f63587d6691901d66be9bec6c3b51f73" dependencies = [ "proc-macro2", "quote", @@ -3373,18 +3534,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.5.4+zstd.1.4.7" +version = "0.9.2+zstd.1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" +checksum = "2390ea1bf6c038c39674f22d95f0564725fc06034a47129179810b2fc58caa54" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" +version = "4.1.3+zstd.1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" +checksum = "e99d81b99fb3c2c2c794e3fe56c305c63d5173a16a46b5850b07c935ffc7db79" dependencies = [ "libc", "zstd-sys", @@ -3392,12 +3553,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" +version = "1.6.2+zstd.1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +checksum = "2daf2f248d9ea44454bfcb2516534e8b8ad2fc91bf818a1885495fc42bc8ac9f" dependencies = [ "cc", - "glob", - "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 5e09dee..5cc6a83 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,6 +78,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } +rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true } thread_local = "1.1.3" # used for TURN server authentication hmac = "0.11.0" @@ -87,7 +88,8 @@ sha-1 = "0.9.8" default = ["conduit_bin", "backend_sqlite"] backend_sled = ["sled"] backend_sqlite = ["sqlite"] -backend_heed = ["heed", "crossbeam", "parking_lot"] +backend_heed = ["heed", "crossbeam"] +backend_rocksdb = ["rocksdb"] sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional diff --git a/src/database.rs b/src/database.rs index 8b29b22..4c377f0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -154,6 +154,9 @@ pub type Engine = abstraction::sqlite::Engine; #[cfg(feature = "heed")] pub type Engine = abstraction::heed::Engine; +#[cfg(feature = "rocksdb")] +pub type Engine = abstraction::rocksdb::Engine; + pub struct Database { _db: Arc, pub globals: globals::Globals, @@ -314,10 +317,10 @@ impl Database { .expect("pdu cache capacity fits into usize"), )), auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), - shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), - eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), - shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), - statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), + shorteventid_cache: Mutex::new(LruCache::new(100_000_000)), + eventidshort_cache: Mutex::new(LruCache::new(100_000_000)), + shortstatekey_cache: Mutex::new(LruCache::new(100_000_000)), + statekeyshort_cache: Mutex::new(LruCache::new(100_000_000)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), stateinfo_cache: Mutex::new(LruCache::new(1000)), diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 67b80d1..a347f83 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -12,7 +12,10 @@ pub mod sqlite; #[cfg(feature = "heed")] pub mod heed; -#[cfg(any(feature = "sqlite", feature = "heed"))] +#[cfg(feature = "rocksdb")] +pub mod rocksdb; + +#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed"))] pub mod watchers; pub trait DatabaseEngine: Sized { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs new file mode 100644 index 0000000..3ff6ab8 --- /dev/null +++ b/src/database/abstraction/rocksdb.rs @@ -0,0 +1,183 @@ +use super::super::Config; +use crate::{utils, Result}; + +use std::{future::Future, pin::Pin, sync::Arc}; + +use super::{DatabaseEngine, Tree}; + +use std::{collections::HashMap, sync::RwLock}; + +pub struct Engine { + rocks: rocksdb::DBWithThreadMode, + old_cfs: Vec, +} + +pub struct RocksDbEngineTree<'a> { + db: Arc, + name: &'a str, + watchers: Watchers, +} + +impl DatabaseEngine for Engine { + fn open(config: &Config) -> Result> { + let mut db_opts = rocksdb::Options::default(); + db_opts.create_if_missing(true); + db_opts.set_max_open_files(16); + db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy); + db_opts.set_target_file_size_base(256 << 20); + db_opts.set_write_buffer_size(256 << 20); + + let mut block_based_options = rocksdb::BlockBasedOptions::default(); + block_based_options.set_block_size(512 << 10); + db_opts.set_block_based_table_factory(&block_based_options); + + let cfs = rocksdb::DBWithThreadMode::::list_cf( + &db_opts, + &config.database_path, + ) + .unwrap_or_default(); + + let db = rocksdb::DBWithThreadMode::::open_cf_descriptors( + &db_opts, + &config.database_path, + cfs.iter().map(|name| { + let mut options = rocksdb::Options::default(); + let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); + options.set_prefix_extractor(prefix_extractor); + options.set_merge_operator_associative("increment", utils::increment_rocksdb); + + rocksdb::ColumnFamilyDescriptor::new(name, options) + }), + )?; + + Ok(Arc::new(Engine { + rocks: db, + old_cfs: cfs, + })) + } + + fn open_tree(self: &Arc, name: &'static str) -> Result> { + if !self.old_cfs.contains(&name.to_owned()) { + // Create if it didn't exist + let mut options = rocksdb::Options::default(); + let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); + options.set_prefix_extractor(prefix_extractor); + options.set_merge_operator_associative("increment", utils::increment_rocksdb); + + let _ = self.rocks.create_cf(name, &options); + println!("created cf"); + } + + Ok(Arc::new(RocksDbEngineTree { + name, + db: Arc::clone(self), + watchers: Watchers::default(), + })) + } + + fn flush(self: &Arc) -> Result<()> { + // TODO? + Ok(()) + } +} + +impl RocksDbEngineTree<'_> { + fn cf(&self) -> rocksdb::BoundColumnFamily<'_> { + self.db.rocks.cf_handle(self.name).unwrap() + } +} + +impl Tree for RocksDbEngineTree<'_> { + fn get(&self, key: &[u8]) -> Result>> { + Ok(self.db.rocks.get_cf(self.cf(), key)?) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + self.db.rocks.put_cf(self.cf(), key, value)?; + self.watchers.wake(key); + Ok(()) + } + + fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + for (key, value) in iter { + self.db.rocks.put_cf(self.cf(), key, value)?; + } + + Ok(()) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + Ok(self.db.rocks.delete_cf(self.cf(), key)?) + } + + fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { + Box::new( + self.db + .rocks + .iterator_cf(self.cf(), rocksdb::IteratorMode::Start) + .map(|(k, v)| (Vec::from(k), Vec::from(v))), + ) + } + + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box, Vec)> + 'a> { + Box::new( + self.db + .rocks + .iterator_cf( + self.cf(), + rocksdb::IteratorMode::From( + from, + if backwards { + rocksdb::Direction::Reverse + } else { + rocksdb::Direction::Forward + }, + ), + ) + .map(|(k, v)| (Vec::from(k), Vec::from(v))), + ) + } + + fn increment(&self, key: &[u8]) -> Result> { + // TODO: make atomic + let old = self.db.rocks.get_cf(self.cf(), &key)?; + let new = utils::increment(old.as_deref()).unwrap(); + self.db.rocks.put_cf(self.cf(), key, &new)?; + Ok(new) + } + + fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + for key in iter { + let old = self.db.rocks.get_cf(self.cf(), &key)?; + let new = utils::increment(old.as_deref()).unwrap(); + self.db.rocks.put_cf(self.cf(), key, new)?; + } + + Ok(()) + } + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Vec)> + 'a> { + Box::new( + self.db + .rocks + .iterator_cf( + self.cf(), + rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), + ) + .map(|(k, v)| (Vec::from(k), Vec::from(v))) + .take_while(move |(k, _)| k.starts_with(&prefix)), + ) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + self.watchers.watch(prefix) + } +} diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 1e6a2d8..3187566 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -132,7 +132,7 @@ type TupleOfBytes = (Vec, Vec); impl SqliteTable { #[tracing::instrument(skip(self, guard, key))] fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { - //dbg!(&self.name); + dbg!(&self.name); Ok(guard .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? .query_row([key], |row| row.get(0)) @@ -141,7 +141,7 @@ impl SqliteTable { #[tracing::instrument(skip(self, guard, key, value))] fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { - //dbg!(&self.name); + dbg!(&self.name); guard.execute( format!( "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", @@ -168,14 +168,14 @@ impl SqliteTable { let statement_ref = NonAliasingBox(statement); - //let name = self.name.clone(); + let name = self.name.clone(); let iterator = Box::new( statement .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - //dbg!(&name); + dbg!(&name); r.unwrap() }), ); @@ -263,7 +263,7 @@ impl Tree for SqliteTable { let guard = self.engine.read_lock_iterator(); let from = from.to_vec(); // TODO change interface? - //let name = self.name.clone(); + let name = self.name.clone(); if backwards { let statement = Box::leak(Box::new( @@ -282,7 +282,7 @@ impl Tree for SqliteTable { .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - //dbg!(&name); + dbg!(&name); r.unwrap() }), ); @@ -307,7 +307,7 @@ impl Tree for SqliteTable { .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - //dbg!(&name); + dbg!(&name); r.unwrap() }), ); diff --git a/src/error.rs b/src/error.rs index 7faddc9..4d427da 100644 --- a/src/error.rs +++ b/src/error.rs @@ -39,6 +39,12 @@ pub enum Error { #[cfg(feature = "heed")] #[error("There was a problem with the connection to the heed database: {error}")] HeedError { error: String }, + #[cfg(feature = "rocksdb")] + #[error("There was a problem with the connection to the rocksdb database: {source}")] + RocksDbError { + #[from] + source: rocksdb::Error, + }, #[error("Could not generate an image.")] ImageError { #[from] diff --git a/src/utils.rs b/src/utils.rs index 26d71a8..4702d05 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -29,6 +29,17 @@ pub fn increment(old: Option<&[u8]>) -> Option> { Some(number.to_be_bytes().to_vec()) } +#[cfg(feature = "rocksdb")] +pub fn increment_rocksdb( + _new_key: &[u8], + old: Option<&[u8]>, + _operands: &mut rocksdb::MergeOperands, +) -> Option> { + dbg!(_new_key); + dbg!(old); + increment(old) +} + pub fn generate_keypair() -> Vec { let mut value = random_string(8).as_bytes().to_vec(); value.push(0xff); From a30b588ede6135642946afd575a2411c6d0d21e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 10 Dec 2021 21:34:45 +0100 Subject: [PATCH 0872/1727] rocksdb as default --- Cargo.toml | 2 +- src/database.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5cc6a83..0a2b445 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,7 +85,7 @@ hmac = "0.11.0" sha-1 = "0.9.8" [features] -default = ["conduit_bin", "backend_sqlite"] +default = ["conduit_bin", "backend_rocksdb"] backend_sled = ["sled"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] diff --git a/src/database.rs b/src/database.rs index 4c377f0..af6136b 100644 --- a/src/database.rs +++ b/src/database.rs @@ -317,10 +317,10 @@ impl Database { .expect("pdu cache capacity fits into usize"), )), auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), - shorteventid_cache: Mutex::new(LruCache::new(100_000_000)), - eventidshort_cache: Mutex::new(LruCache::new(100_000_000)), - shortstatekey_cache: Mutex::new(LruCache::new(100_000_000)), - statekeyshort_cache: Mutex::new(LruCache::new(100_000_000)), + shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), + eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), + shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), + statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), stateinfo_cache: Mutex::new(LruCache::new(1000)), From c9c99746412155fcdce6a6430bd5ef9c567cc3fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 16 Dec 2021 14:52:19 +0100 Subject: [PATCH 0873/1727] fix: stack overflows when fetching auth events --- src/database/abstraction/rocksdb.rs | 18 ++-- src/server_server.rs | 157 +++++++++++++++------------- 2 files changed, 94 insertions(+), 81 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 3ff6ab8..825c02e 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -22,14 +22,20 @@ impl DatabaseEngine for Engine { fn open(config: &Config) -> Result> { let mut db_opts = rocksdb::Options::default(); db_opts.create_if_missing(true); - db_opts.set_max_open_files(16); + db_opts.set_max_open_files(512); db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy); - db_opts.set_target_file_size_base(256 << 20); - db_opts.set_write_buffer_size(256 << 20); + db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + db_opts.set_target_file_size_base(2 << 22); + db_opts.set_max_bytes_for_level_base(2 << 24); + db_opts.set_max_bytes_for_level_multiplier(2.0); + db_opts.set_num_levels(8); + db_opts.set_write_buffer_size(2 << 27); + + let rocksdb_cache = rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize).unwrap(); let mut block_based_options = rocksdb::BlockBasedOptions::default(); - block_based_options.set_block_size(512 << 10); + block_based_options.set_block_size(2 << 19); + block_based_options.set_block_cache(&rocksdb_cache); db_opts.set_block_based_table_factory(&block_based_options); let cfs = rocksdb::DBWithThreadMode::::list_cf( @@ -45,7 +51,6 @@ impl DatabaseEngine for Engine { let mut options = rocksdb::Options::default(); let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); options.set_prefix_extractor(prefix_extractor); - options.set_merge_operator_associative("increment", utils::increment_rocksdb); rocksdb::ColumnFamilyDescriptor::new(name, options) }), @@ -63,7 +68,6 @@ impl DatabaseEngine for Engine { let mut options = rocksdb::Options::default(); let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); options.set_prefix_extractor(prefix_extractor); - options.set_merge_operator_associative("increment", utils::increment_rocksdb); let _ = self.rocks.create_cf(name, &options); println!("created cf"); diff --git a/src/server_server.rs b/src/server_server.rs index 594152a..d6bc9b9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1392,12 +1392,11 @@ async fn upgrade_outlier_to_timeline_pdu( let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - let k = db - .rooms - .get_statekey_from_short(k) - .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - - state.insert(k, id.clone()); + if let Ok(k) = db.rooms.get_statekey_from_short(k) { + state.insert(k, id.clone()); + } else { + warn!("Failed to get_statekey_from_short."); + } starting_events.push(id); } @@ -1755,11 +1754,16 @@ async fn upgrade_outlier_to_timeline_pdu( .into_iter() .map(|map| { map.into_iter() - .map(|(k, id)| db.rooms.get_statekey_from_short(k).map(|k| (k, id))) - .collect::>>() + .filter_map(|(k, id)| { + db.rooms + .get_statekey_from_short(k) + .map(|k| (k, id)) + .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) + .ok() + }) + .collect::>() }) - .collect::>() - .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; + .collect(); let state = match state_res::resolve( room_version_id, @@ -1871,73 +1875,78 @@ pub(crate) fn fetch_and_handle_outliers<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - let local_pdu = db.rooms.get_pdu(id); - let pdu = match local_pdu { - Ok(Some(pdu)) => { + if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { + trace!("Found {} in db", id); + pdus.push((local_pdu, None)); + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![id]; + let mut events_in_reverse_order = Vec::new(); + while let Some(next_id) = todo_auth_events.pop() { + if let Ok(Some(_)) = db.rooms.get_pdu(next_id) { trace!("Found {} in db", id); - (pdu, None) - } - Ok(None) => { - // c. Ask origin server over federation - warn!("Fetching {} over federation.", id); - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: id }, - ) - .await - { - Ok(res) => { - warn!("Got {} over federation", id); - let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu) { - Ok(t) => t, - Err(_) => { - back_off((**id).to_owned()); - continue; - } - }; - - if calculated_event_id != **id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - id, calculated_event_id, &res.pdu); - } - - // This will also fetch the auth chain - match handle_outlier_pdu( - origin, - create_event, - id, - room_id, - value.clone(), - db, - pub_key_map, - ) - .await - { - Ok((pdu, json)) => (pdu, Some(json)), - Err(e) => { - warn!("Authentication of event {} failed: {:?}", id, e); - back_off((**id).to_owned()); - continue; - } - } - } - Err(_) => { - warn!("Failed to fetch event: {}", id); - back_off((**id).to_owned()); - continue; - } - } - } - Err(e) => { - warn!("Error loading {}: {}", id, e); continue; } - }; - pdus.push(pdu); + + warn!("Fetching {} over federation.", next_id); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: next_id }, + ) + .await + { + Ok(res) => { + warn!("Got {} over federation", next_id); + let (calculated_event_id, value) = + match crate::pdu::gen_event_id_canonical_json(&res.pdu) { + Ok(t) => t, + Err(_) => { + back_off((**next_id).to_owned()); + continue; + } + }; + + if calculated_event_id != **next_id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu); + } + + events_in_reverse_order.push((next_id, value)); + } + Err(_) => { + warn!("Failed to fetch event: {}", next_id); + back_off((**next_id).to_owned()); + } + } + } + + while let Some((next_id, value)) = events_in_reverse_order.pop() { + match handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + db, + pub_key_map, + ) + .await + { + Ok((pdu, json)) => { + pdus.push((pdu, Some(json))); + } + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()); + } + } + } } pdus }) From 4b4afea2abb4289d6fa31e02bd2be2799f51e0ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 16 Dec 2021 15:54:42 +0100 Subject: [PATCH 0874/1727] fix auth event fetching --- src/server_server.rs | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index d6bc9b9..28c3ea0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1878,15 +1878,16 @@ pub(crate) fn fetch_and_handle_outliers<'a>( if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { trace!("Found {} in db", id); pdus.push((local_pdu, None)); + continue; } // c. Ask origin server over federation // We also handle its auth chain here so we don't get a stack overflow in // handle_outlier_pdu. - let mut todo_auth_events = vec![id]; + let mut todo_auth_events = vec![Arc::clone(id)]; let mut events_in_reverse_order = Vec::new(); while let Some(next_id) = todo_auth_events.pop() { - if let Ok(Some(_)) = db.rooms.get_pdu(next_id) { + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { trace!("Found {} in db", id); continue; } @@ -1897,7 +1898,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .send_federation_request( &db.globals, origin, - get_event::v1::Request { event_id: next_id }, + get_event::v1::Request { event_id: &next_id }, ) .await { @@ -1907,21 +1908,35 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match crate::pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => { - back_off((**next_id).to_owned()); + back_off((*next_id).to_owned()); continue; } }; - if calculated_event_id != **next_id { + if calculated_event_id != *next_id { warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", next_id, calculated_event_id, &res.pdu); } + + if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { + for auth_event in auth_events { + if let Some(Ok(auth_event)) = auth_event.as_str() + .map(|e| serde_json::from_str(e)) { + todo_auth_events.push(auth_event); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + events_in_reverse_order.push((next_id, value)); } Err(_) => { warn!("Failed to fetch event: {}", next_id); - back_off((**next_id).to_owned()); + back_off((*next_id).to_owned()); } } } @@ -1930,7 +1945,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match handle_outlier_pdu( origin, create_event, - next_id, + &next_id, room_id, value.clone(), db, @@ -1943,7 +1958,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } Err(e) => { warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((**next_id).to_owned()); + back_off((*next_id).to_owned()); } } } From 74951cb239b5ec7ef41ba080729bc93df046fb66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 16 Dec 2021 21:42:53 +0100 Subject: [PATCH 0875/1727] dbg --- src/server_server.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 28c3ea0..b6bea0c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1922,8 +1922,9 @@ pub(crate) fn fetch_and_handle_outliers<'a>( if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { for auth_event in auth_events { if let Some(Ok(auth_event)) = auth_event.as_str() - .map(|e| serde_json::from_str(e)) { - todo_auth_events.push(auth_event); + .map(|e| {let ev: std::result::Result, _> = dbg!(serde_json::from_str(dbg!(e))); ev}) { + let a: Arc = auth_event; + todo_auth_events.push(a); } else { warn!("Auth event id is not valid"); } From 83a9095cdc3febd617d9bfd2d8cacf0fe3e89990 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 16 Dec 2021 22:25:24 +0100 Subject: [PATCH 0876/1727] fix? --- src/server_server.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index b6bea0c..8c5c09f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1921,8 +1921,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { for auth_event in auth_events { - if let Some(Ok(auth_event)) = auth_event.as_str() - .map(|e| {let ev: std::result::Result, _> = dbg!(serde_json::from_str(dbg!(e))); ev}) { + if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { let a: Arc = auth_event; todo_auth_events.push(a); } else { From ee3d2db8e061bcdac43674aa050bcd3aad79d4a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 19 Dec 2021 10:48:28 +0100 Subject: [PATCH 0877/1727] improvement, maybe not safe --- src/server_server.rs | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 8c5c09f..57f5586 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1686,25 +1686,6 @@ async fn upgrade_outlier_to_timeline_pdu( // We do this by adding the current state to the list of fork states extremity_sstatehashes.remove(¤t_sstatehash); fork_states.push(current_state_ids); - dbg!(&extremity_sstatehashes); - - for (sstatehash, leaf_pdu) in extremity_sstatehashes { - let mut leaf_state = db - .rooms - .state_full_ids(sstatehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &leaf_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::from(&*leaf_pdu.event_id)); - // Now it's the state after the pdu - } - - fork_states.push(leaf_state); - } // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); @@ -1941,7 +1922,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } } - while let Some((next_id, value)) = events_in_reverse_order.pop() { + for (next_id, value) in events_in_reverse_order { match handle_outlier_pdu( origin, create_event, @@ -1954,7 +1935,9 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .await { Ok((pdu, json)) => { - pdus.push((pdu, Some(json))); + if next_id == *id { + pdus.push((pdu, Some(json))); + } } Err(e) => { warn!("Authentication of event {} failed: {:?}", next_id, e); From b1d9ec3efccafaf887da1b54e4b3ef2bfa4d84a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 20 Dec 2021 10:16:22 +0100 Subject: [PATCH 0878/1727] fix: atomic increment --- Cargo.toml | 2 +- src/database/abstraction/rocksdb.rs | 24 ++++++++++++++++-------- src/database/abstraction/watchers.rs | 8 ++++---- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0a2b445..6241b6a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,7 +90,7 @@ backend_sled = ["sled"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] -sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] +sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional [[bin]] diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 825c02e..b2142df 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,11 +1,6 @@ -use super::super::Config; +use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree}; use crate::{utils, Result}; - -use std::{future::Future, pin::Pin, sync::Arc}; - -use super::{DatabaseEngine, Tree}; - -use std::{collections::HashMap, sync::RwLock}; +use std::{future::Future, pin::Pin, sync::Arc, collections::HashMap, sync::RwLock}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, @@ -16,6 +11,7 @@ pub struct RocksDbEngineTree<'a> { db: Arc, name: &'a str, watchers: Watchers, + write_lock: RwLock<()> } impl DatabaseEngine for Engine { @@ -77,6 +73,7 @@ impl DatabaseEngine for Engine { name, db: Arc::clone(self), watchers: Watchers::default(), + write_lock: RwLock::new(()), })) } @@ -98,8 +95,12 @@ impl Tree for RocksDbEngineTree<'_> { } fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let lock = self.write_lock.read().unwrap(); self.db.rocks.put_cf(self.cf(), key, value)?; + drop(lock); + self.watchers.wake(key); + Ok(()) } @@ -148,20 +149,27 @@ impl Tree for RocksDbEngineTree<'_> { } fn increment(&self, key: &[u8]) -> Result> { - // TODO: make atomic + let lock = self.write_lock.write().unwrap(); + let old = self.db.rocks.get_cf(self.cf(), &key)?; let new = utils::increment(old.as_deref()).unwrap(); self.db.rocks.put_cf(self.cf(), key, &new)?; + + drop(lock); Ok(new) } fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + let lock = self.write_lock.write().unwrap(); + for key in iter { let old = self.db.rocks.get_cf(self.cf(), &key)?; let new = utils::increment(old.as_deref()).unwrap(); self.db.rocks.put_cf(self.cf(), key, new)?; } + drop(lock); + Ok(()) } diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs index 404f3f0..fec1f27 100644 --- a/src/database/abstraction/watchers.rs +++ b/src/database/abstraction/watchers.rs @@ -1,6 +1,6 @@ -use parking_lot::RwLock; use std::{ collections::{hash_map, HashMap}, + sync::RwLock, future::Future, pin::Pin, }; @@ -16,7 +16,7 @@ impl Watchers { &'a self, prefix: &[u8], ) -> Pin + Send + 'a>> { - let mut rx = match self.watchers.write().entry(prefix.to_vec()) { + let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) { hash_map::Entry::Occupied(o) => o.get().1.clone(), hash_map::Entry::Vacant(v) => { let (tx, rx) = tokio::sync::watch::channel(()); @@ -31,7 +31,7 @@ impl Watchers { }) } pub(super) fn wake(&self, key: &[u8]) { - let watchers = self.watchers.read(); + let watchers = self.watchers.read().unwrap(); let mut triggered = Vec::new(); for length in 0..=key.len() { @@ -43,7 +43,7 @@ impl Watchers { drop(watchers); if !triggered.is_empty() { - let mut watchers = self.watchers.write(); + let mut watchers = self.watchers.write().unwrap(); for prefix in triggered { if let Some(tx) = watchers.remove(prefix) { let _ = tx.0.send(()); From 54f4d39e3ed92106ec3a902de22d2366cfd8e8be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 21 Dec 2021 16:02:12 +0100 Subject: [PATCH 0879/1727] improvement: don't fetch event multiple times --- src/database/abstraction/rocksdb.rs | 4 +++- src/server_server.rs | 17 +++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index b2142df..397047b 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -27,7 +27,9 @@ impl DatabaseEngine for Engine { db_opts.set_num_levels(8); db_opts.set_write_buffer_size(2 << 27); - let rocksdb_cache = rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize).unwrap(); + let rocksdb_cache = + rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize) + .unwrap(); let mut block_based_options = rocksdb::BlockBasedOptions::default(); block_based_options.set_block_size(2 << 19); diff --git a/src/server_server.rs b/src/server_server.rs index 57f5586..6e8ebf3 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1867,7 +1867,12 @@ pub(crate) fn fetch_and_handle_outliers<'a>( // handle_outlier_pdu. let mut todo_auth_events = vec![Arc::clone(id)]; let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); while let Some(next_id) = todo_auth_events.pop() { + if events_all.contains(&next_id) { + continue; + } + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { trace!("Found {} in db", id); continue; @@ -1899,10 +1904,13 @@ pub(crate) fn fetch_and_handle_outliers<'a>( next_id, calculated_event_id, &res.pdu); } - - if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { for auth_event in auth_events { - if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { let a: Arc = auth_event; todo_auth_events.push(a); } else { @@ -1913,7 +1921,8 @@ pub(crate) fn fetch_and_handle_outliers<'a>( warn!("Auth event list invalid"); } - events_in_reverse_order.push((next_id, value)); + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); } Err(_) => { warn!("Failed to fetch event: {}", next_id); From 5bcc1324ed3936444ba189c399e906482cc67d3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 21 Dec 2021 22:10:31 +0100 Subject: [PATCH 0880/1727] fix: auth event fetch order --- src/server_server.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 6e8ebf3..c76afd3 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1931,7 +1931,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } } - for (next_id, value) in events_in_reverse_order { + for (next_id, value) in events_in_reverse_order.iter().rev() { match handle_outlier_pdu( origin, create_event, @@ -1944,13 +1944,13 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .await { Ok((pdu, json)) => { - if next_id == *id { + if next_id == id { pdus.push((pdu, Some(json))); } } Err(e) => { warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((*next_id).to_owned()); + back_off((**next_id).to_owned()); } } } From 68e910bb77f7bbc93269dd1dfd0f70a26f1e8ef0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 4 Jan 2022 14:30:13 +0100 Subject: [PATCH 0881/1727] feat: lazy loading --- src/client_server/context.rs | 62 ++++++++++++++++--- src/client_server/message.rs | 85 +++++++++++++++++++------ src/client_server/sync.rs | 117 +++++++++++++++++++++++++++++++---- src/database.rs | 3 + src/database/rooms.rs | 96 +++++++++++++++++++++++++++- 5 files changed, 321 insertions(+), 42 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 9bfec9e..94a44e3 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,5 +1,9 @@ use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; -use ruma::api::client::{error::ErrorKind, r0::context::get_context}; +use ruma::{ + api::client::{error::ErrorKind, r0::context::get_context}, + events::EventType, +}; +use std::collections::HashSet; use std::convert::TryFrom; #[cfg(feature = "conduit_bin")] @@ -21,6 +25,7 @@ pub async fn get_context_route( body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( @@ -29,6 +34,8 @@ pub async fn get_context_route( )); } + let mut lazy_loaded = HashSet::new(); + let base_pdu_id = db .rooms .get_pdu_id(&body.event_id)? @@ -45,8 +52,18 @@ pub async fn get_context_route( .ok_or(Error::BadRequest( ErrorKind::NotFound, "Base event not found.", - ))? - .to_room_event(); + ))?; + + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &base_event.sender, + )? { + lazy_loaded.insert(base_event.sender.clone()); + } + + let base_event = base_event.to_room_event(); let events_before: Vec<_> = db .rooms @@ -60,6 +77,17 @@ pub async fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect(); + for (_, event) in &events_before { + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + } + let start_token = events_before .last() .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) @@ -82,6 +110,17 @@ pub async fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect(); + for (_, event) in &events_after { + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + } + let end_token = events_after .last() .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) @@ -92,18 +131,23 @@ pub async fn get_context_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); + let mut state = Vec::new(); + for ll_id in &lazy_loaded { + if let Some(member_event) = + db.rooms + .room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())? + { + state.push(member_event.to_state_event()); + } + } + let resp = get_context::Response { start: start_token, end: end_token, events_before, event: Some(base_event), events_after, - state: db // TODO: State at event - .rooms - .room_state_full(&body.room_id)? - .values() - .map(|pdu| pdu.to_state_event()) - .collect(), + state, }; Ok(resp.into()) diff --git a/src/client_server/message.rs b/src/client_server/message.rs index cbce019..48ca4ae 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -6,7 +6,11 @@ use ruma::{ }, events::EventType, }; -use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{ + collections::{BTreeMap, HashSet}, + convert::TryInto, + sync::Arc, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -117,6 +121,7 @@ pub async fn get_message_events_route( body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( @@ -136,6 +141,12 @@ pub async fn get_message_events_route( // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); + let next_token; + + let mut resp = get_message_events::Response::new(); + + let mut lazy_loaded = HashSet::new(); + match body.dir { get_message_events::Direction::Forward => { let events_after: Vec<_> = db @@ -152,21 +163,27 @@ pub async fn get_message_events_route( .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect(); - let end_token = events_after.last().map(|(count, _)| count.to_string()); + for (_, event) in &events_after { + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + } + + next_token = events_after.last().map(|(count, _)| count).copied(); let events_after: Vec<_> = events_after .into_iter() .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let resp = get_message_events::Response { - start: body.from.to_owned(), - end: end_token, - chunk: events_after, - state: Vec::new(), - }; - - Ok(resp.into()) + resp.start = body.from.to_owned(); + resp.end = next_token.map(|count| count.to_string()); + resp.chunk = events_after; } get_message_events::Direction::Backward => { let events_before: Vec<_> = db @@ -183,21 +200,51 @@ pub async fn get_message_events_route( .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect(); - let start_token = events_before.last().map(|(count, _)| count.to_string()); + for (_, event) in &events_before { + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + } + + next_token = events_before.last().map(|(count, _)| count).copied(); let events_before: Vec<_> = events_before .into_iter() .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let resp = get_message_events::Response { - start: body.from.to_owned(), - end: start_token, - chunk: events_before, - state: Vec::new(), - }; - - Ok(resp.into()) + resp.start = body.from.to_owned(); + resp.end = next_token.map(|count| count.to_string()); + resp.chunk = events_before; } } + + db.rooms + .lazy_load_confirm_delivery(&sender_user, &sender_device, &body.room_id, from)?; + resp.state = Vec::new(); + for ll_id in &lazy_loaded { + if let Some(member_event) = + db.rooms + .room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())? + { + resp.state.push(member_event.to_state_event()); + } + } + + if let Some(next_token) = next_token { + db.rooms.lazy_load_mark_sent( + &sender_user, + &sender_device, + &body.room_id, + lazy_loaded.into_iter().collect(), + next_token, + ); + } + + Ok(resp.into()) } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 64588a2..88bf861 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -264,6 +264,14 @@ async fn sync_helper( // limited unless there are events in non_timeline_pdus let limited = non_timeline_pdus.next().is_some(); + let mut timeline_users = HashSet::new(); + for (_, event) in &timeline_pdus { + timeline_users.insert(event.sender.as_str().to_owned()); + } + + db.rooms + .lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?; + // Database queries: let current_shortstatehash = db @@ -344,14 +352,51 @@ async fn sync_helper( state_events, ) = if since_shortstatehash.is_none() { // Probably since = 0, we will do an initial sync + let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; - let state_events: Vec<_> = current_state_ids - .iter() - .map(|(_, id)| db.rooms.get_pdu(id)) - .filter_map(|r| r.ok().flatten()) - .collect(); + + let mut state_events = Vec::new(); + let mut lazy_loaded = Vec::new(); + + for (_, id) in current_state_ids { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + let state_key = pdu + .state_key + .as_ref() + .expect("state events have state keys"); + if pdu.kind != EventType::RoomMember { + state_events.push(pdu); + } else if full_state || timeline_users.contains(state_key) { + // TODO: check filter: is ll enabled? + lazy_loaded.push( + UserId::parse(state_key.as_ref()) + .expect("they are in timeline_users, so they should be correct"), + ); + state_events.push(pdu); + } + } + + // Reset lazy loading because this is an initial sync + db.rooms + .lazy_load_reset(&sender_user, &sender_device, &room_id)?; + + // The state_events above should contain all timeline_users, let's mark them as lazy + // loaded. + db.rooms.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batch, + ); ( heroes, @@ -387,20 +432,66 @@ async fn sync_helper( let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; - let state_events = if joined_since_last_sync { + /* + let state_events = if joined_since_last_sync || full_state { current_state_ids .iter() .map(|(_, id)| db.rooms.get_pdu(id)) .filter_map(|r| r.ok().flatten()) .collect::>() } else { - current_state_ids - .iter() - .filter(|(key, id)| since_state_ids.get(key) != Some(id)) - .map(|(_, id)| db.rooms.get_pdu(id)) - .filter_map(|r| r.ok().flatten()) - .collect() - }; + */ + let mut state_events = Vec::new(); + let mut lazy_loaded = Vec::new(); + + for (key, id) in current_state_ids { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + let state_key = pdu + .state_key + .as_ref() + .expect("state events have state keys"); + + if pdu.kind != EventType::RoomMember { + if full_state || since_state_ids.get(&key) != Some(&id) { + state_events.push(pdu); + } + continue; + } + + // Pdu has to be a member event + let state_key_userid = UserId::parse(state_key.as_ref()) + .expect("they are in timeline_users, so they should be correct"); + + if full_state || since_state_ids.get(&key) != Some(&id) { + lazy_loaded.push(state_key_userid); + state_events.push(pdu); + } else if timeline_users.contains(state_key) + && !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &room_id, + &state_key_userid, + )? + { + lazy_loaded.push(state_key_userid); + state_events.push(pdu); + } + } + + db.rooms.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batch, + ); let encrypted_room = db .rooms diff --git a/src/database.rs b/src/database.rs index af6136b..9e02019 100644 --- a/src/database.rs +++ b/src/database.rs @@ -288,6 +288,8 @@ impl Database { userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + lazyloadedids: builder.open_tree("lazyloadedids")?, + userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, @@ -323,6 +325,7 @@ impl Database { statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), + lazy_load_waiting: Mutex::new(HashMap::new()), stateinfo_cache: Mutex::new(LruCache::new(1000)), }, account_data: account_data::AccountData { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 775e2f8..b957b55 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -28,7 +28,7 @@ use ruma::{ push::{Action, Ruleset, Tweak}, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, state_res::{self, RoomVersion, StateMap}, - uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, + uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::to_raw_value; @@ -79,6 +79,8 @@ pub struct Rooms { pub(super) userroomid_leftstate: Arc, pub(super) roomuserid_leftcount: Arc, + pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId + pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 @@ -117,6 +119,8 @@ pub struct Rooms { pub(super) shortstatekey_cache: Mutex>, pub(super) our_real_users_cache: RwLock, Arc>>>>, pub(super) appservice_in_room_cache: RwLock, HashMap>>, + pub(super) lazy_load_waiting: + Mutex, Box, Box, u64), Vec>>>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -3466,4 +3470,94 @@ impl Rooms { Ok(()) } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_was_sent_before( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, + ) -> Result { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&device_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(&room_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(&ll_user.as_bytes()); + Ok(self.lazyloadedids.get(&key)?.is_some()) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_mark_sent( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + lazy_load: Vec>, + count: u64, + ) { + self.lazy_load_waiting.lock().unwrap().insert( + ( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + count, + ), + lazy_load, + ); + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + since: u64, + ) -> Result<()> { + if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + since, + )) { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(&room_id.as_bytes()); + prefix.push(0xff); + + for ll_id in user_ids { + let mut key = prefix.clone(); + key.extend_from_slice(&ll_id.as_bytes()); + self.lazyloadedids.insert(&key, &[])?; + } + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_reset( + &self, + user_id: &Box, + device_id: &Box, + room_id: &Box, + ) -> Result<()> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(&room_id.as_bytes()); + prefix.push(0xff); + + for (key, _) in self.lazyloadedids.scan_prefix(prefix) { + self.lazyloadedids.remove(&key)?; + } + + Ok(()) + } } From 1bd9fd74b31383105526ea27b6df8d461aacc223 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Jan 2022 18:15:00 +0100 Subject: [PATCH 0882/1727] feat: partially support sync filters --- src/client_server/filter.rs | 57 ++++++++++++++++--------- src/client_server/message.rs | 5 ++- src/client_server/sync.rs | 83 ++++++++++++++++++++---------------- src/database.rs | 1 + src/database/users.rs | 48 ++++++++++++++++++++- 5 files changed, 133 insertions(+), 61 deletions(-) diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index dfb5377..f8845f1 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -1,32 +1,47 @@ -use crate::{utils, ConduitResult}; -use ruma::api::client::r0::filter::{self, create_filter, get_filter}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + r0::filter::{create_filter, get_filter}, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// -/// TODO: Loads a filter that was previously created. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] -#[tracing::instrument] -pub async fn get_filter_route() -> ConduitResult { - // TODO - Ok(get_filter::Response::new(filter::IncomingFilterDefinition { - event_fields: None, - event_format: filter::EventFormat::default(), - account_data: filter::IncomingFilter::default(), - room: filter::IncomingRoomFilter::default(), - presence: filter::IncomingFilter::default(), - }) - .into()) +/// Loads a filter that was previously created. +/// +/// - A user can only access their own filters +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/user/<_>/filter/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn get_filter_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let filter = match db.users.get_filter(sender_user, &body.filter_id)? { + Some(filter) => filter, + None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), + }; + + Ok(get_filter::Response::new(filter).into()) } /// # `PUT /_matrix/client/r0/user/{userId}/filter` /// -/// TODO: Creates a new filter to be used by other endpoints. -#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] -#[tracing::instrument] -pub async fn create_filter_route() -> ConduitResult { - // TODO - Ok(create_filter::Response::new(utils::random_string(10)).into()) +/// Creates a new filter to be used by other endpoints. +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/user/<_>/filter", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn create_filter_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + Ok(create_filter::Response::new(db.users.create_filter(sender_user, &body.filter)?).into()) } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 48ca4ae..899c45a 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -138,6 +138,9 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); + db.rooms + .lazy_load_confirm_delivery(&sender_user, &sender_device, &body.room_id, from)?; + // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); @@ -224,8 +227,6 @@ pub async fn get_message_events_route( } } - db.rooms - .lazy_load_confirm_delivery(&sender_user, &sender_device, &body.room_id, from)?; resp.state = Vec::new(); for ll_id in &lazy_loaded { if let Some(member_event) = diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 88bf861..6d8ac28 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,6 +1,10 @@ use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; use ruma::{ - api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, + api::client::r0::{ + filter::{IncomingFilterDefinition, LazyLoadOptions}, + sync::sync_events, + uiaa::UiaaResponse, + }, events::{ room::member::{MembershipState, RoomMemberEventContent}, AnySyncEphemeralRoomEvent, EventType, @@ -77,34 +81,32 @@ pub async fn sync_events_route( Entry::Vacant(v) => { let (tx, rx) = tokio::sync::watch::channel(None); + v.insert((body.since.clone(), rx.clone())); + tokio::spawn(sync_helper_wrapper( Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), - body.since.clone(), - body.full_state, - body.timeout, + body, tx, )); - v.insert((body.since.clone(), rx)).1.clone() + rx } Entry::Occupied(mut o) => { if o.get().0 != body.since { let (tx, rx) = tokio::sync::watch::channel(None); + o.insert((body.since.clone(), rx.clone())); + tokio::spawn(sync_helper_wrapper( Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), - body.since.clone(), - body.full_state, - body.timeout, + body, tx, )); - o.insert((body.since.clone(), rx.clone())); - rx } else { o.get().1.clone() @@ -135,18 +137,16 @@ async fn sync_helper_wrapper( db: Arc, sender_user: Box, sender_device: Box, - since: Option, - full_state: bool, - timeout: Option, + body: sync_events::IncomingRequest, tx: Sender>>, ) { + let since = body.since.clone(); + let r = sync_helper( Arc::clone(&db), sender_user.clone(), sender_device.clone(), - since.clone(), - full_state, - timeout, + body, ) .await; @@ -179,9 +179,7 @@ async fn sync_helper( db: Arc, sender_user: Box, sender_device: Box, - since: Option, - full_state: bool, - timeout: Option, + body: sync_events::IncomingRequest, // bool = caching allowed ) -> Result<(sync_events::Response, bool), Error> { // TODO: match body.set_presence { @@ -193,8 +191,26 @@ async fn sync_helper( let next_batch = db.globals.current_count()?; let next_batch_string = next_batch.to_string(); + // Load filter + let filter = match body.filter { + None => IncomingFilterDefinition::default(), + Some(sync_events::IncomingFilter::FilterDefinition(filter)) => filter, + Some(sync_events::IncomingFilter::FilterId(filter_id)) => db + .users + .get_filter(&sender_user, &filter_id)? + .unwrap_or_default(), + }; + + let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members: redundant, + } => (true, redundant), + _ => (false, false), + }; + let mut joined_rooms = BTreeMap::new(); - let since = since + let since = body + .since .clone() .and_then(|string| string.parse().ok()) .unwrap_or(0); @@ -374,8 +390,10 @@ async fn sync_helper( .expect("state events have state keys"); if pdu.kind != EventType::RoomMember { state_events.push(pdu); - } else if full_state || timeline_users.contains(state_key) { - // TODO: check filter: is ll enabled? + } else if !lazy_load_enabled + || body.full_state + || timeline_users.contains(state_key) + { lazy_loaded.push( UserId::parse(state_key.as_ref()) .expect("they are in timeline_users, so they should be correct"), @@ -432,15 +450,6 @@ async fn sync_helper( let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; - /* - let state_events = if joined_since_last_sync || full_state { - current_state_ids - .iter() - .map(|(_, id)| db.rooms.get_pdu(id)) - .filter_map(|r| r.ok().flatten()) - .collect::>() - } else { - */ let mut state_events = Vec::new(); let mut lazy_loaded = Vec::new(); @@ -459,7 +468,7 @@ async fn sync_helper( .expect("state events have state keys"); if pdu.kind != EventType::RoomMember { - if full_state || since_state_ids.get(&key) != Some(&id) { + if body.full_state || since_state_ids.get(&key) != Some(&id) { state_events.push(pdu); } continue; @@ -469,16 +478,16 @@ async fn sync_helper( let state_key_userid = UserId::parse(state_key.as_ref()) .expect("they are in timeline_users, so they should be correct"); - if full_state || since_state_ids.get(&key) != Some(&id) { + if body.full_state || since_state_ids.get(&key) != Some(&id) { lazy_loaded.push(state_key_userid); state_events.push(pdu); } else if timeline_users.contains(state_key) - && !db.rooms.lazy_load_was_sent_before( + && (!db.rooms.lazy_load_was_sent_before( &sender_user, &sender_device, &room_id, &state_key_userid, - )? + )? || lazy_load_send_redundant) { lazy_loaded.push(state_key_userid); state_events.push(pdu); @@ -858,7 +867,7 @@ async fn sync_helper( }; // TODO: Retry the endpoint instead of returning (waiting for #118) - if !full_state + if !body.full_state && response.rooms.is_empty() && response.presence.is_empty() && response.account_data.is_empty() @@ -867,7 +876,7 @@ async fn sync_helper( { // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives - let mut duration = timeout.unwrap_or_default(); + let mut duration = body.timeout.unwrap_or_default(); if duration.as_secs() > 30 { duration = Duration::from_secs(30); } diff --git a/src/database.rs b/src/database.rs index 9e02019..ddf701b 100644 --- a/src/database.rs +++ b/src/database.rs @@ -249,6 +249,7 @@ impl Database { userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, + userfilterid_filter: builder.open_tree("userfilterid_filter")?, todeviceid_events: builder.open_tree("todeviceid_events")?, }, uiaa: uiaa::Uiaa { diff --git a/src/database/users.rs b/src/database/users.rs index 63a63f0..c4fcee3 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,6 +1,9 @@ use crate::{utils, Error, Result}; use ruma::{ - api::client::{error::ErrorKind, r0::device::Device}, + api::client::{ + error::ErrorKind, + r0::{device::Device, filter::IncomingFilterDefinition}, + }, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, EventType}, identifiers::MxcUri, @@ -36,6 +39,8 @@ pub struct Users { pub(super) userid_selfsigningkeyid: Arc, pub(super) userid_usersigningkeyid: Arc, + pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId + pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count } @@ -996,6 +1001,47 @@ impl Users { // TODO: Unhook 3PID Ok(()) } + + /// Creates a new sync filter. Returns the filter id. + #[tracing::instrument(skip(self))] + pub fn create_filter( + &self, + user_id: &UserId, + filter: &IncomingFilterDefinition, + ) -> Result { + let filter_id = utils::random_string(4); + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(filter_id.as_bytes()); + + self.userfilterid_filter.insert( + &key, + &serde_json::to_vec(&filter).expect("filter is valid json"), + )?; + + Ok(filter_id) + } + + #[tracing::instrument(skip(self))] + pub fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(filter_id.as_bytes()); + + let raw = self.userfilterid_filter.get(&key)?; + + if let Some(raw) = raw { + serde_json::from_slice(&raw) + .map_err(|_| Error::bad_database("Invalid filter event in db.")) + } else { + Ok(None) + } + } } /// Ensure that a user only sees signatures from themselves and the target user From 93d225fd1ec186d1957c670b0e6f7f161888dd21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Jan 2022 20:31:20 +0100 Subject: [PATCH 0883/1727] improvement: faster way to load required state --- src/client_server/sync.rs | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6d8ac28..a41e728 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -376,24 +376,29 @@ async fn sync_helper( let mut state_events = Vec::new(); let mut lazy_loaded = Vec::new(); - for (_, id) in current_state_ids { - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - let state_key = pdu - .state_key - .as_ref() - .expect("state events have state keys"); - if pdu.kind != EventType::RoomMember { + for (shortstatekey, id) in current_state_ids { + let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; + + if event_type != EventType::RoomMember { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; state_events.push(pdu); } else if !lazy_load_enabled || body.full_state - || timeline_users.contains(state_key) + || timeline_users.contains(&state_key) { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; lazy_loaded.push( UserId::parse(state_key.as_ref()) .expect("they are in timeline_users, so they should be correct"), From f285c89006e48b0644f421ae399f1a8eb47e37f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 6 Jan 2022 00:15:34 +0100 Subject: [PATCH 0884/1727] fix: make incremental sync efficient again --- src/client_server/message.rs | 2 +- src/client_server/sync.rs | 79 ++++++++++++++++++++---------------- src/database/rooms.rs | 4 +- 3 files changed, 48 insertions(+), 37 deletions(-) diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 899c45a..9705e4c 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -242,7 +242,7 @@ pub async fn get_message_events_route( &sender_user, &sender_device, &body.room_id, - lazy_loaded.into_iter().collect(), + lazy_loaded, next_token, ); } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index a41e728..c201440 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -374,7 +374,7 @@ async fn sync_helper( let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let mut state_events = Vec::new(); - let mut lazy_loaded = Vec::new(); + let mut lazy_loaded = HashSet::new(); for (shortstatekey, id) in current_state_ids { let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; @@ -399,7 +399,7 @@ async fn sync_helper( continue; } }; - lazy_loaded.push( + lazy_loaded.insert( UserId::parse(state_key.as_ref()) .expect("they are in timeline_users, so they should be correct"), ); @@ -456,46 +456,57 @@ async fn sync_helper( let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; let mut state_events = Vec::new(); - let mut lazy_loaded = Vec::new(); + let mut lazy_loaded = HashSet::new(); for (key, id) in current_state_ids { - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); + if body.full_state || since_state_ids.get(&key) != Some(&id) { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + if pdu.kind == EventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + } + Err(e) => error!("Invalid state key for member event: {}", e), + } + } + + state_events.push(pdu); + } + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { continue; } - }; - let state_key = pdu - .state_key - .as_ref() - .expect("state events have state keys"); - - if pdu.kind != EventType::RoomMember { - if body.full_state || since_state_ids.get(&key) != Some(&id) { - state_events.push(pdu); - } - continue; - } - - // Pdu has to be a member event - let state_key_userid = UserId::parse(state_key.as_ref()) - .expect("they are in timeline_users, so they should be correct"); - - if body.full_state || since_state_ids.get(&key) != Some(&id) { - lazy_loaded.push(state_key_userid); - state_events.push(pdu); - } else if timeline_users.contains(state_key) - && (!db.rooms.lazy_load_was_sent_before( + if !db.rooms.lazy_load_was_sent_before( &sender_user, &sender_device, &room_id, - &state_key_userid, - )? || lazy_load_send_redundant) - { - lazy_loaded.push(state_key_userid); - state_events.push(pdu); + &event.sender, + )? || lazy_load_send_redundant + { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + lazy_loaded.insert(event.sender.clone()); + state_events.push(pdu); + } } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b957b55..600f46d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -120,7 +120,7 @@ pub struct Rooms { pub(super) our_real_users_cache: RwLock, Arc>>>>, pub(super) appservice_in_room_cache: RwLock, HashMap>>, pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), Vec>>>, + Mutex, Box, Box, u64), HashSet>>>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -3495,7 +3495,7 @@ impl Rooms { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - lazy_load: Vec>, + lazy_load: HashSet>, count: u64, ) { self.lazy_load_waiting.lock().unwrap().insert( From c6d88359d7aae985f9688dddb321d07ef2043708 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 7 Jan 2022 09:56:09 +0100 Subject: [PATCH 0885/1727] fix: incremental lazy loading --- src/client_server/sync.rs | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index c201440..a612289 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -484,28 +484,27 @@ async fn sync_helper( state_events.push(pdu); } - for (_, event) in &timeline_pdus { - if lazy_loaded.contains(&event.sender) { - continue; - } + } - if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + if let Some(member_event) = db.rooms.room_state_get( &room_id, - &event.sender, - )? || lazy_load_send_redundant - { - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - + &EventType::RoomMember, + event.sender.as_str(), + )? { lazy_loaded.insert(event.sender.clone()); - state_events.push(pdu); + state_events.push(member_event); } } } From 4f39d36e980d8f4e6fcc7ae7c9a292db52d915e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Jan 2022 13:42:25 +0100 Subject: [PATCH 0886/1727] docs: lazy loading --- src/client_server/sync.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index a612289..bd2f48a 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -40,13 +40,15 @@ use rocket::{get, tokio}; /// Calling this endpoint with a `since` parameter from a previous `next_batch` returns: /// For joined rooms: /// - Some of the most recent events of each timeline that happened after since -/// - If user joined the room after since: All state events and device list updates in that room +/// - If user joined the room after since: All state events (unless lazy loading is activated) and +/// all device list updates in that room /// - If the user was already in the room: A list of all events that are in the state now, but were /// not in the state at `since` /// - If the state we send contains a member event: Joined and invited member counts, heroes /// - Device list updates that happened after `since` /// - If there are events in the timeline we send or the user send updated his read mark: Notification counts /// - EDUs that are active now (read receipts, typing updates, presence) +/// - TODO: Allow multiple sync streams to support Pantalaimon /// /// For invited rooms: /// - If the user was invited after `since`: A subset of the state of the room at the point of the invite From fa6d7f7ccd14426f1fc2d802fff021b06f39bf02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Jan 2022 16:44:44 +0100 Subject: [PATCH 0887/1727] feat: database backend selection at runtime --- Cargo.toml | 2 +- conduit-example.toml | 15 ++- src/database.rs | 140 +++++++++++++++++----------- src/database/abstraction.rs | 13 ++- src/database/abstraction/rocksdb.rs | 9 +- src/database/abstraction/sqlite.rs | 14 ++- src/utils.rs | 11 --- 7 files changed, 116 insertions(+), 88 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6241b6a..c898d4d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,7 +85,7 @@ hmac = "0.11.0" sha-1 = "0.9.8" [features] -default = ["conduit_bin", "backend_rocksdb"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] backend_sled = ["sled"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] diff --git a/conduit-example.toml b/conduit-example.toml index 4275f52..c0274a4 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -1,11 +1,15 @@ [global] -# The server_name is the name of this server. It is used as a suffix for user +# The server_name is the pretty name of this server. It is used as a suffix for user # and room ids. Examples: matrix.org, conduit.rs -# The Conduit server needs to be reachable at https://your.server.name/ on port -# 443 (client-server) and 8448 (federation) OR you can create /.well-known -# files to redirect requests. See + +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). + +# If that's not possible for you, you can create /.well-known files to redirect +# requests. See # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# and +# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server # for more information # YOU NEED TO EDIT THIS @@ -13,6 +17,7 @@ # This is the only directory where Conduit will save its data database_path = "/var/lib/conduit/" +database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port diff --git a/src/database.rs b/src/database.rs index ddf701b..c2b3e2b 100644 --- a/src/database.rs +++ b/src/database.rs @@ -44,13 +44,15 @@ use self::proxy::ProxyConfig; #[derive(Clone, Debug, Deserialize)] pub struct Config { server_name: Box, + #[serde(default = "default_database_backend")] + database_backend: String, database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, #[serde(default = "default_pdu_cache_capacity")] pdu_cache_capacity: u32, - #[serde(default = "default_sqlite_wal_clean_second_interval")] - sqlite_wal_clean_second_interval: u32, + #[serde(default = "default_cleanup_second_interval")] + cleanup_second_interval: u32, #[serde(default = "default_max_request_size")] max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] @@ -117,6 +119,10 @@ fn true_fn() -> bool { true } +fn default_database_backend() -> String { + "sqlite".to_owned() +} + fn default_db_cache_capacity_mb() -> f64 { 200.0 } @@ -125,7 +131,7 @@ fn default_pdu_cache_capacity() -> u32 { 100_000 } -fn default_sqlite_wal_clean_second_interval() -> u32 { +fn default_cleanup_second_interval() -> u32 { 1 * 60 // every minute } @@ -145,20 +151,8 @@ fn default_turn_ttl() -> u64 { 60 * 60 * 24 } -#[cfg(feature = "sled")] -pub type Engine = abstraction::sled::Engine; - -#[cfg(feature = "sqlite")] -pub type Engine = abstraction::sqlite::Engine; - -#[cfg(feature = "heed")] -pub type Engine = abstraction::heed::Engine; - -#[cfg(feature = "rocksdb")] -pub type Engine = abstraction::rocksdb::Engine; - pub struct Database { - _db: Arc, + _db: Arc, pub globals: globals::Globals, pub users: users::Users, pub uiaa: uiaa::Uiaa, @@ -186,27 +180,53 @@ impl Database { Ok(()) } - fn check_sled_or_sqlite_db(config: &Config) -> Result<()> { - #[cfg(feature = "backend_sqlite")] - { - let path = Path::new(&config.database_path); + fn check_db_setup(config: &Config) -> Result<()> { + let path = Path::new(&config.database_path); - let sled_exists = path.join("db").exists(); - let sqlite_exists = path.join("conduit.db").exists(); - if sled_exists { - if sqlite_exists { - // most likely an in-place directory, only warn - warn!("Both sled and sqlite databases are detected in database directory"); - warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") - } else { - error!( - "Sled database detected, conduit now uses sqlite for database operations" - ); - error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); - return Err(Error::bad_config( - "sled database detected, migrate to sqlite", - )); - } + let sled_exists = path.join("db").exists(); + let sqlite_exists = path.join("conduit.db").exists(); + let rocksdb_exists = path.join("IDENTITY").exists(); + + let mut count = 0; + + if sled_exists { + count += 1; + } + + if sqlite_exists { + count += 1; + } + + if rocksdb_exists { + count += 1; + } + + if count > 1 { + warn!("Multiple databases at database_path detected"); + return Ok(()); + } + + if sled_exists { + if config.database_backend != "sled" { + return Err(Error::bad_config( + "Found sled at database_path, but is not specified in config.", + )); + } + } + + if sqlite_exists { + if config.database_backend != "sqlite" { + return Err(Error::bad_config( + "Found sqlite at database_path, but is not specified in config.", + )); + } + } + + if rocksdb_exists { + if config.database_backend != "rocksdb" { + return Err(Error::bad_config( + "Found rocksdb at database_path, but is not specified in config.", + )); } } @@ -215,14 +235,30 @@ impl Database { /// Load an existing database or create a new one. pub async fn load_or_create(config: &Config) -> Result>> { - Self::check_sled_or_sqlite_db(config)?; + Self::check_db_setup(config)?; if !Path::new(&config.database_path).exists() { std::fs::create_dir_all(&config.database_path) .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; } - let builder = Engine::open(config)?; + let builder: Arc = match &*config.database_backend { + "sqlite" => { + #[cfg(not(feature = "sqlite"))] + return Err(Error::BadConfig("Database backend not found.")); + #[cfg(feature = "sqlite")] + Arc::new(Arc::::open(config)?) + } + "rocksdb" => { + #[cfg(not(feature = "rocksdb"))] + return Err(Error::BadConfig("Database backend not found.")); + #[cfg(feature = "rocksdb")] + Arc::new(Arc::::open(config)?) + } + _ => { + return Err(Error::BadConfig("Database backend not found.")); + } + }; if config.max_request_size < 1024 { eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); @@ -784,10 +820,7 @@ impl Database { drop(guard); - #[cfg(feature = "sqlite")] - { - Self::start_wal_clean_task(Arc::clone(&db), config).await; - } + Self::start_cleanup_task(Arc::clone(&db), config).await; Ok(db) } @@ -925,15 +958,8 @@ impl Database { res } - #[cfg(feature = "sqlite")] - #[tracing::instrument(skip(self))] - pub fn flush_wal(&self) -> Result<()> { - self._db.flush_wal() - } - - #[cfg(feature = "sqlite")] #[tracing::instrument(skip(db, config))] - pub async fn start_wal_clean_task(db: Arc>, config: &Config) { + pub async fn start_cleanup_task(db: Arc>, config: &Config) { use tokio::time::interval; #[cfg(unix)] @@ -942,7 +968,7 @@ impl Database { use std::time::{Duration, Instant}; - let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64); + let timer_interval = Duration::from_secs(config.cleanup_second_interval as u64); tokio::spawn(async move { let mut i = interval(timer_interval); @@ -953,23 +979,23 @@ impl Database { #[cfg(unix)] tokio::select! { _ = i.tick() => { - info!("wal-trunc: Timer ticked"); + info!("cleanup: Timer ticked"); } _ = s.recv() => { - info!("wal-trunc: Received SIGHUP"); + info!("cleanup: Received SIGHUP"); } }; #[cfg(not(unix))] { i.tick().await; - info!("wal-trunc: Timer ticked") + info!("cleanup: Timer ticked") } let start = Instant::now(); - if let Err(e) = db.read().await.flush_wal() { - error!("wal-trunc: Errored: {}", e); + if let Err(e) = db.read().await._db.cleanup() { + error!("cleanup: Errored: {}", e); } else { - info!("wal-trunc: Flushed in {:?}", start.elapsed()); + info!("cleanup: Finished in {:?}", start.elapsed()); } } }); diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index a347f83..45627bb 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -18,10 +18,15 @@ pub mod rocksdb; #[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed"))] pub mod watchers; -pub trait DatabaseEngine: Sized { - fn open(config: &Config) -> Result>; - fn open_tree(self: &Arc, name: &'static str) -> Result>; - fn flush(self: &Arc) -> Result<()>; +pub trait DatabaseEngine: Send + Sync { + fn open(config: &Config) -> Result + where + Self: Sized; + fn open_tree(&self, name: &'static str) -> Result>; + fn flush(self: &Self) -> Result<()>; + fn cleanup(self: &Self) -> Result<()> { + Ok(()) + } } pub trait Tree: Send + Sync { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 397047b..a41ed1f 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -14,8 +14,8 @@ pub struct RocksDbEngineTree<'a> { write_lock: RwLock<()> } -impl DatabaseEngine for Engine { - fn open(config: &Config) -> Result> { +impl DatabaseEngine for Arc { + fn open(config: &Config) -> Result { let mut db_opts = rocksdb::Options::default(); db_opts.create_if_missing(true); db_opts.set_max_open_files(512); @@ -60,7 +60,7 @@ impl DatabaseEngine for Engine { })) } - fn open_tree(self: &Arc, name: &'static str) -> Result> { + fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist let mut options = rocksdb::Options::default(); @@ -68,7 +68,6 @@ impl DatabaseEngine for Engine { options.set_prefix_extractor(prefix_extractor); let _ = self.rocks.create_cf(name, &options); - println!("created cf"); } Ok(Arc::new(RocksDbEngineTree { @@ -79,7 +78,7 @@ impl DatabaseEngine for Engine { })) } - fn flush(self: &Arc) -> Result<()> { + fn flush(&self) -> Result<()> { // TODO? Ok(()) } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 3187566..d4fd0bd 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -80,8 +80,8 @@ impl Engine { } } -impl DatabaseEngine for Engine { - fn open(config: &Config) -> Result> { +impl DatabaseEngine for Arc { + fn open(config: &Config) -> Result { let path = Path::new(&config.database_path).join("conduit.db"); // calculates cache-size per permanent connection @@ -92,7 +92,7 @@ impl DatabaseEngine for Engine { / ((num_cpus::get().max(1) * 2) + 1) as f64) as u32; - let writer = Mutex::new(Self::prepare_conn(&path, cache_size_per_thread)?); + let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?); let arc = Arc::new(Engine { writer, @@ -105,7 +105,7 @@ impl DatabaseEngine for Engine { Ok(arc) } - fn open_tree(self: &Arc, name: &str) -> Result> { + fn open_tree(&self, name: &str) -> Result> { self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; Ok(Arc::new(SqliteTable { @@ -115,10 +115,14 @@ impl DatabaseEngine for Engine { })) } - fn flush(self: &Arc) -> Result<()> { + fn flush(&self) -> Result<()> { // we enabled PRAGMA synchronous=normal, so this should not be necessary Ok(()) } + + fn cleanup(&self) -> Result<()> { + self.flush_wal() + } } pub struct SqliteTable { diff --git a/src/utils.rs b/src/utils.rs index 4702d05..26d71a8 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -29,17 +29,6 @@ pub fn increment(old: Option<&[u8]>) -> Option> { Some(number.to_be_bytes().to_vec()) } -#[cfg(feature = "rocksdb")] -pub fn increment_rocksdb( - _new_key: &[u8], - old: Option<&[u8]>, - _operands: &mut rocksdb::MergeOperands, -) -> Option> { - dbg!(_new_key); - dbg!(old); - increment(old) -} - pub fn generate_keypair() -> Vec { let mut value = random_string(8).as_bytes().to_vec(); value.push(0xff); From 71431f330aadb1ee92cd63a36351af834aa65215 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 9 Jan 2022 20:07:03 +0100 Subject: [PATCH 0888/1727] Add memory_usage() to DatabaseEngine trait --- src/database/abstraction.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 45627bb..17bd971 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -27,6 +27,9 @@ pub trait DatabaseEngine: Send + Sync { fn cleanup(self: &Self) -> Result<()> { Ok(()) } + fn memory_usage(self: &Self) -> Result { + Ok("Current database engine does not support memory usage reporting.".to_string()) + } } pub trait Tree: Send + Sync { From ff243870f850c07907a6944151fd909c234da662 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 9 Jan 2022 20:07:50 +0100 Subject: [PATCH 0889/1727] Add "database_memory_usage" AdminCommand --- src/database/admin.rs | 8 ++++++++ src/database/rooms.rs | 3 +++ 2 files changed, 11 insertions(+) diff --git a/src/database/admin.rs b/src/database/admin.rs index 0702bcd..c308330 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -14,6 +14,7 @@ pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), UnregisterAppservice(String), ListAppservices, + ShowMemoryUsage, SendMessage(RoomMessageEventContent), } @@ -113,6 +114,13 @@ impl Admin { send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock); } } + AdminCommand::ShowMemoryUsage => { + if let Ok(response) = guard._db.memory_usage() { + send_message(RoomMessageEventContent::text_plain(response), guard, &state_lock); + } else { + send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage".to_string()), guard, &state_lock); + } + } AdminCommand::SendMessage(message) => { send_message(message, guard, &state_lock); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 600f46d..0ba6c9b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1693,6 +1693,9 @@ impl Rooms { )); } } + "database_memory_usage" => { + db.admin.send(AdminCommand::ShowMemoryUsage); + } _ => { db.admin.send(AdminCommand::SendMessage( RoomMessageEventContent::text_plain(format!( From 68ee1a5408595804625a6dd0ebab5f333e7f0fe6 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 9 Jan 2022 20:08:15 +0100 Subject: [PATCH 0890/1727] Add rocksdb implementation of memory_usage() --- src/database/abstraction/rocksdb.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index a41ed1f..f0affd3 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -82,6 +82,19 @@ impl DatabaseEngine for Arc { // TODO? Ok(()) } + + fn memory_usage(&self) -> Result { + let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), None)?; + Ok(format!("Approximate memory usage of all the mem-tables: {:.3} MB\n\ + Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ + Approximate memory usage of all the table readers: {:.3} MB\n\ + Approximate memory usage by cache: {:.3} MB", + stats.mem_table_total as f64 / 1024.0 / 1024.0, + stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, + stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, + stats.cache_total as f64 / 1024.0 / 1024.0 + )) + } } impl RocksDbEngineTree<'_> { From 077e9ad4380715688a8ad5a2f40afd7331157bd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jan 2022 15:53:28 +0100 Subject: [PATCH 0891/1727] improvement: memory usage for caches --- Cargo.lock | 4 +-- Cargo.toml | 2 +- src/database/abstraction/rocksdb.rs | 40 ++++++++++++++++------------- src/database/admin.rs | 2 +- 4 files changed, 26 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 794445f..d297102 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2075,9 +2075,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3" +checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" dependencies = [ "libc", "librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index c898d4d..c87d949 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,7 +78,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true } +rocksdb = { version = "0.17.0", features = ["multi-threaded-cf"], optional = true } thread_local = "1.1.3" # used for TURN server authentication hmac = "0.11.0" diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index f0affd3..a7dd6e1 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -4,6 +4,7 @@ use std::{future::Future, pin::Pin, sync::Arc, collections::HashMap, sync::RwLoc pub struct Engine { rocks: rocksdb::DBWithThreadMode, + cache: rocksdb::Cache, old_cfs: Vec, } @@ -56,6 +57,7 @@ impl DatabaseEngine for Arc { Ok(Arc::new(Engine { rocks: db, + cache: rocksdb_cache, old_cfs: cfs, })) } @@ -84,33 +86,35 @@ impl DatabaseEngine for Arc { } fn memory_usage(&self) -> Result { - let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), None)?; - Ok(format!("Approximate memory usage of all the mem-tables: {:.3} MB\n\ + let stats = + rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?; + Ok(format!( + "Approximate memory usage of all the mem-tables: {:.3} MB\n\ Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ Approximate memory usage of all the table readers: {:.3} MB\n\ Approximate memory usage by cache: {:.3} MB", - stats.mem_table_total as f64 / 1024.0 / 1024.0, - stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, - stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, - stats.cache_total as f64 / 1024.0 / 1024.0 + stats.mem_table_total as f64 / 1024.0 / 1024.0, + stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, + stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, + stats.cache_total as f64 / 1024.0 / 1024.0 )) } } impl RocksDbEngineTree<'_> { - fn cf(&self) -> rocksdb::BoundColumnFamily<'_> { + fn cf(&self) -> Arc> { self.db.rocks.cf_handle(self.name).unwrap() } } impl Tree for RocksDbEngineTree<'_> { fn get(&self, key: &[u8]) -> Result>> { - Ok(self.db.rocks.get_cf(self.cf(), key)?) + Ok(self.db.rocks.get_cf(&self.cf(), key)?) } fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let lock = self.write_lock.read().unwrap(); - self.db.rocks.put_cf(self.cf(), key, value)?; + self.db.rocks.put_cf(&self.cf(), key, value)?; drop(lock); self.watchers.wake(key); @@ -120,21 +124,21 @@ impl Tree for RocksDbEngineTree<'_> { fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { for (key, value) in iter { - self.db.rocks.put_cf(self.cf(), key, value)?; + self.db.rocks.put_cf(&self.cf(), key, value)?; } Ok(()) } fn remove(&self, key: &[u8]) -> Result<()> { - Ok(self.db.rocks.delete_cf(self.cf(), key)?) + Ok(self.db.rocks.delete_cf(&self.cf(), key)?) } fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { Box::new( self.db .rocks - .iterator_cf(self.cf(), rocksdb::IteratorMode::Start) + .iterator_cf(&self.cf(), rocksdb::IteratorMode::Start) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -148,7 +152,7 @@ impl Tree for RocksDbEngineTree<'_> { self.db .rocks .iterator_cf( - self.cf(), + &self.cf(), rocksdb::IteratorMode::From( from, if backwards { @@ -165,9 +169,9 @@ impl Tree for RocksDbEngineTree<'_> { fn increment(&self, key: &[u8]) -> Result> { let lock = self.write_lock.write().unwrap(); - let old = self.db.rocks.get_cf(self.cf(), &key)?; + let old = self.db.rocks.get_cf(&self.cf(), &key)?; let new = utils::increment(old.as_deref()).unwrap(); - self.db.rocks.put_cf(self.cf(), key, &new)?; + self.db.rocks.put_cf(&self.cf(), key, &new)?; drop(lock); Ok(new) @@ -177,9 +181,9 @@ impl Tree for RocksDbEngineTree<'_> { let lock = self.write_lock.write().unwrap(); for key in iter { - let old = self.db.rocks.get_cf(self.cf(), &key)?; + let old = self.db.rocks.get_cf(&self.cf(), &key)?; let new = utils::increment(old.as_deref()).unwrap(); - self.db.rocks.put_cf(self.cf(), key, new)?; + self.db.rocks.put_cf(&self.cf(), key, new)?; } drop(lock); @@ -195,7 +199,7 @@ impl Tree for RocksDbEngineTree<'_> { self.db .rocks .iterator_cf( - self.cf(), + &self.cf(), rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), ) .map(|(k, v)| (Vec::from(k), Vec::from(v))) diff --git a/src/database/admin.rs b/src/database/admin.rs index c308330..7d2301d 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -118,7 +118,7 @@ impl Admin { if let Ok(response) = guard._db.memory_usage() { send_message(RoomMessageEventContent::text_plain(response), guard, &state_lock); } else { - send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage".to_string()), guard, &state_lock); + send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage.".to_string()), guard, &state_lock); } } AdminCommand::SendMessage(message) => { From 0bb7d76dec4b3f54b1cbb37e57ddbe54e1dbd38f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jan 2022 20:20:45 +0100 Subject: [PATCH 0892/1727] improvement: rocksdb configuration --- src/database/abstraction/rocksdb.rs | 33 +++++++++++++++-------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index a7dd6e1..3f1793a 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -17,25 +17,21 @@ pub struct RocksDbEngineTree<'a> { impl DatabaseEngine for Arc { fn open(config: &Config) -> Result { - let mut db_opts = rocksdb::Options::default(); - db_opts.create_if_missing(true); - db_opts.set_max_open_files(512); - db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); - db_opts.set_target_file_size_base(2 << 22); - db_opts.set_max_bytes_for_level_base(2 << 24); - db_opts.set_max_bytes_for_level_multiplier(2.0); - db_opts.set_num_levels(8); - db_opts.set_write_buffer_size(2 << 27); - let rocksdb_cache = rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize) .unwrap(); let mut block_based_options = rocksdb::BlockBasedOptions::default(); - block_based_options.set_block_size(2 << 19); block_based_options.set_block_cache(&rocksdb_cache); + + let mut db_opts = rocksdb::Options::default(); db_opts.set_block_based_table_factory(&block_based_options); + db_opts.create_if_missing(true); + db_opts.increase_parallelism(num_cpus::get() as i32); + db_opts.set_max_open_files(512); + db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.optimize_level_style_compaction((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize); let cfs = rocksdb::DBWithThreadMode::::list_cf( &db_opts, @@ -90,13 +86,18 @@ impl DatabaseEngine for Arc { rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?; Ok(format!( "Approximate memory usage of all the mem-tables: {:.3} MB\n\ - Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ - Approximate memory usage of all the table readers: {:.3} MB\n\ - Approximate memory usage by cache: {:.3} MB", + Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ + Approximate memory usage of all the table readers: {:.3} MB\n\ + Approximate memory usage by cache: {:.3} MB\n\ + self.cache.get_usage(): {:.3} MB\n\ + self.cache.get_pinned_usage(): {:.3} MB\n\ + ", stats.mem_table_total as f64 / 1024.0 / 1024.0, stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, - stats.cache_total as f64 / 1024.0 / 1024.0 + stats.cache_total as f64 / 1024.0 / 1024.0, + self.cache.get_usage() as f64 / 1024.0 / 1024.0, + self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, )) } } From b96822b6174de4d404bf0b9013a39f8fd2a06f87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jan 2022 21:20:29 +0100 Subject: [PATCH 0893/1727] fix: use db options for column families too --- src/database/abstraction/rocksdb.rs | 55 ++++++++++++++++------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 3f1793a..c82e4bc 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -4,6 +4,7 @@ use std::{future::Future, pin::Pin, sync::Arc, collections::HashMap, sync::RwLoc pub struct Engine { rocks: rocksdb::DBWithThreadMode, + cache_capacity_bytes: usize, cache: rocksdb::Cache, old_cfs: Vec, } @@ -15,23 +16,31 @@ pub struct RocksDbEngineTree<'a> { write_lock: RwLock<()> } +fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { + let mut block_based_options = rocksdb::BlockBasedOptions::default(); + block_based_options.set_block_cache(rocksdb_cache); + + let mut db_opts = rocksdb::Options::default(); + db_opts.set_block_based_table_factory(&block_based_options); + db_opts.create_if_missing(true); + db_opts.increase_parallelism(num_cpus::get() as i32); + db_opts.set_max_open_files(512); + db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.optimize_level_style_compaction(cache_capacity_bytes); + + let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); + db_opts.set_prefix_extractor(prefix_extractor); + + db_opts +} + impl DatabaseEngine for Arc { fn open(config: &Config) -> Result { - let rocksdb_cache = - rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize) - .unwrap(); + let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; + let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); - let mut block_based_options = rocksdb::BlockBasedOptions::default(); - block_based_options.set_block_cache(&rocksdb_cache); - - let mut db_opts = rocksdb::Options::default(); - db_opts.set_block_based_table_factory(&block_based_options); - db_opts.create_if_missing(true); - db_opts.increase_parallelism(num_cpus::get() as i32); - db_opts.set_max_open_files(512); - db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); - db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.optimize_level_style_compaction((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize); + let db_opts = db_options(cache_capacity_bytes, &rocksdb_cache); let cfs = rocksdb::DBWithThreadMode::::list_cf( &db_opts, @@ -43,16 +52,16 @@ impl DatabaseEngine for Arc { &db_opts, &config.database_path, cfs.iter().map(|name| { - let mut options = rocksdb::Options::default(); - let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); - options.set_prefix_extractor(prefix_extractor); - - rocksdb::ColumnFamilyDescriptor::new(name, options) + rocksdb::ColumnFamilyDescriptor::new( + name, + db_options(cache_capacity_bytes, &rocksdb_cache), + ) }), )?; Ok(Arc::new(Engine { rocks: db, + cache_capacity_bytes, cache: rocksdb_cache, old_cfs: cfs, })) @@ -61,11 +70,9 @@ impl DatabaseEngine for Arc { fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist - let mut options = rocksdb::Options::default(); - let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); - options.set_prefix_extractor(prefix_extractor); - - let _ = self.rocks.create_cf(name, &options); + let _ = self + .rocks + .create_cf(name, &db_options(self.cache_capacity_bytes, &self.cache)); } Ok(Arc::new(RocksDbEngineTree { From 7f27af032b7d0cb79248607decd1bb5f2a818507 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Jan 2022 10:07:10 +0100 Subject: [PATCH 0894/1727] improvement: optimize rocksdb for spinning disks --- src/database/abstraction/rocksdb.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index c82e4bc..3209556 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -20,8 +20,20 @@ fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> ro let mut block_based_options = rocksdb::BlockBasedOptions::default(); block_based_options.set_block_cache(rocksdb_cache); + // "Difference of spinning disk" + // https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html + block_based_options.set_block_size(64 * 1024); + block_based_options.set_cache_index_and_filter_blocks(true); + let mut db_opts = rocksdb::Options::default(); db_opts.set_block_based_table_factory(&block_based_options); + db_opts.set_optimize_filters_for_hits(true); + db_opts.set_skip_stats_update_on_db_open(true); + db_opts.set_level_compaction_dynamic_level_bytes(true); + db_opts.set_target_file_size_base(256 * 1024 * 1024); + db_opts.set_compaction_readahead_size(2 * 1024 * 1024); + db_opts.set_use_direct_reads(true); + db_opts.set_use_direct_io_for_flush_and_compaction(true); db_opts.create_if_missing(true); db_opts.increase_parallelism(num_cpus::get() as i32); db_opts.set_max_open_files(512); From 9e77f7617cfcdc8d1c0e1b3146cbef6566ed0dc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Jan 2022 12:27:02 +0100 Subject: [PATCH 0895/1727] fix: disable direct IO again --- src/database/abstraction/rocksdb.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 3209556..b7f6d3b 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -22,7 +22,7 @@ fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> ro // "Difference of spinning disk" // https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html - block_based_options.set_block_size(64 * 1024); + block_based_options.set_block_size(4 * 1024); block_based_options.set_cache_index_and_filter_blocks(true); let mut db_opts = rocksdb::Options::default(); @@ -31,9 +31,9 @@ fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> ro db_opts.set_skip_stats_update_on_db_open(true); db_opts.set_level_compaction_dynamic_level_bytes(true); db_opts.set_target_file_size_base(256 * 1024 * 1024); - db_opts.set_compaction_readahead_size(2 * 1024 * 1024); - db_opts.set_use_direct_reads(true); - db_opts.set_use_direct_io_for_flush_and_compaction(true); + //db_opts.set_compaction_readahead_size(2 * 1024 * 1024); + //db_opts.set_use_direct_reads(true); + //db_opts.set_use_direct_io_for_flush_and_compaction(true); db_opts.create_if_missing(true); db_opts.increase_parallelism(num_cpus::get() as i32); db_opts.set_max_open_files(512); From 447639054e21523aed76e408667c5263ccde85ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Jan 2022 21:03:53 +0100 Subject: [PATCH 0896/1727] improvement: higher default pdu capacity --- src/client_server/device.rs | 2 +- src/database.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 03a3004..f240f2e 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -85,7 +85,7 @@ pub async fn update_device_route( Ok(update_device::Response {}.into()) } -/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// # `DELETE /_matrix/client/r0/devices/{deviceId}` /// /// Deletes the given device. /// diff --git a/src/database.rs b/src/database.rs index c2b3e2b..9a71e73 100644 --- a/src/database.rs +++ b/src/database.rs @@ -128,7 +128,7 @@ fn default_db_cache_capacity_mb() -> f64 { } fn default_pdu_cache_capacity() -> u32 { - 100_000 + 1_000_000 } fn default_cleanup_second_interval() -> u32 { From a336027b0e45b512c55e4c0b68e095d40ebd01ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Jan 2022 21:11:45 +0100 Subject: [PATCH 0897/1727] fix: better memory usage message --- src/database/abstraction/rocksdb.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index b7f6d3b..d1706d4 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -108,14 +108,12 @@ impl DatabaseEngine for Arc { Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ Approximate memory usage of all the table readers: {:.3} MB\n\ Approximate memory usage by cache: {:.3} MB\n\ - self.cache.get_usage(): {:.3} MB\n\ - self.cache.get_pinned_usage(): {:.3} MB\n\ + Approximate memory usage by cache pinned: {:.3} MB\n\ ", stats.mem_table_total as f64 / 1024.0 / 1024.0, stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, stats.cache_total as f64 / 1024.0 / 1024.0, - self.cache.get_usage() as f64 / 1024.0 / 1024.0, self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, )) } From 6fa01aa9826c2a4f7643289e0b86aee40efc59d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Jan 2022 21:46:20 +0100 Subject: [PATCH 0898/1727] fix: remove dbg --- src/database/abstraction/sqlite.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index d4fd0bd..f80f50e 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -136,7 +136,7 @@ type TupleOfBytes = (Vec, Vec); impl SqliteTable { #[tracing::instrument(skip(self, guard, key))] fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { - dbg!(&self.name); + //dbg!(&self.name); Ok(guard .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? .query_row([key], |row| row.get(0)) @@ -145,7 +145,7 @@ impl SqliteTable { #[tracing::instrument(skip(self, guard, key, value))] fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { - dbg!(&self.name); + //dbg!(&self.name); guard.execute( format!( "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", @@ -179,7 +179,7 @@ impl SqliteTable { .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - dbg!(&name); + //dbg!(&name); r.unwrap() }), ); @@ -286,7 +286,7 @@ impl Tree for SqliteTable { .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - dbg!(&name); + //dbg!(&name); r.unwrap() }), ); @@ -311,7 +311,7 @@ impl Tree for SqliteTable { .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - dbg!(&name); + //dbg!(&name); r.unwrap() }), ); From 16f826773bc26dd388f04e3e862bef7d1be9cdeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Jan 2022 22:47:30 +0100 Subject: [PATCH 0899/1727] refactor: fix warnings --- src/database/abstraction/rocksdb.rs | 4 ++-- src/database/abstraction/sqlite.rs | 4 ++-- src/database/abstraction/watchers.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index d1706d4..79a3d82 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,6 +1,6 @@ use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree}; use crate::{utils, Result}; -use std::{future::Future, pin::Pin, sync::Arc, collections::HashMap, sync::RwLock}; +use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, @@ -13,7 +13,7 @@ pub struct RocksDbEngineTree<'a> { db: Arc, name: &'a str, watchers: Watchers, - write_lock: RwLock<()> + write_lock: RwLock<()>, } fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index f80f50e..d4aab7d 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -172,7 +172,7 @@ impl SqliteTable { let statement_ref = NonAliasingBox(statement); - let name = self.name.clone(); + //let name = self.name.clone(); let iterator = Box::new( statement @@ -267,7 +267,7 @@ impl Tree for SqliteTable { let guard = self.engine.read_lock_iterator(); let from = from.to_vec(); // TODO change interface? - let name = self.name.clone(); + //let name = self.name.clone(); if backwards { let statement = Box::leak(Box::new( diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs index fec1f27..55cb60b 100644 --- a/src/database/abstraction/watchers.rs +++ b/src/database/abstraction/watchers.rs @@ -1,8 +1,8 @@ use std::{ collections::{hash_map, HashMap}, - sync::RwLock, future::Future, pin::Pin, + sync::RwLock, }; use tokio::sync::watch; From f67785caaf6a4be5c7d330df0f7a89781aa21f91 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 13 Jan 2022 22:24:47 +0000 Subject: [PATCH 0900/1727] Fix(ci): Disable CARGO_HOME caching --- .gitlab-ci.yml | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1dedd8f..f47327b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -23,18 +23,12 @@ variables: interruptible: true image: "rust:latest" tags: ["docker"] - cache: - paths: - - cargohome - key: "build_cache--$TARGET--$CI_COMMIT_BRANCH" variables: CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow - CARGO_HOME: $CI_PROJECT_DIR/cargohome before_script: - 'echo "Building for target $TARGET"' - - "mkdir -p $CARGO_HOME" - "rustc --version && cargo --version && rustup show" # Print version info for debugging - "rustup target add $TARGET" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: @@ -219,15 +213,10 @@ test:cargo: image: "rust:latest" tags: ["docker"] variables: - CARGO_HOME: "$CI_PROJECT_DIR/cargohome" CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow - cache: - paths: - - cargohome - key: "test_cache--$CI_COMMIT_BRANCH" interruptible: true before_script: - - mkdir -p $CARGO_HOME + # - mkdir -p $CARGO_HOME - apt-get update -yqq - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - rustup component add clippy rustfmt From 80e51986c42ea449a3f1d7860c16722431f4fcaf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 14 Jan 2022 11:08:31 +0100 Subject: [PATCH 0901/1727] improvement: better default cache capacity --- src/database.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index 9a71e73..d688ff9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -124,7 +124,7 @@ fn default_database_backend() -> String { } fn default_db_cache_capacity_mb() -> f64 { - 200.0 + 10.0 } fn default_pdu_cache_capacity() -> u32 { From d434dfb3a56afde239023685ca0a8d191355314b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 14 Jan 2022 11:40:49 +0100 Subject: [PATCH 0902/1727] feat: config option for rocksdb max open files --- src/database.rs | 6 ++++++ src/database/abstraction/rocksdb.rs | 29 ++++++++++++++++++++++------- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/src/database.rs b/src/database.rs index d688ff9..fd7a145 100644 --- a/src/database.rs +++ b/src/database.rs @@ -49,6 +49,8 @@ pub struct Config { database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, + #[serde(default = "default_rocksdb_max_open_files")] + rocksdb_max_open_files: i32, #[serde(default = "default_pdu_cache_capacity")] pdu_cache_capacity: u32, #[serde(default = "default_cleanup_second_interval")] @@ -127,6 +129,10 @@ fn default_db_cache_capacity_mb() -> f64 { 10.0 } +fn default_rocksdb_max_open_files() -> i32 { + 512 +} + fn default_pdu_cache_capacity() -> u32 { 1_000_000 } diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 79a3d82..adda678 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -5,6 +5,7 @@ use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, cache_capacity_bytes: usize, + max_open_files: i32, cache: rocksdb::Cache, old_cfs: Vec, } @@ -16,7 +17,11 @@ pub struct RocksDbEngineTree<'a> { write_lock: RwLock<()>, } -fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { +fn db_options( + cache_capacity_bytes: usize, + max_open_files: i32, + rocksdb_cache: &rocksdb::Cache, +) -> rocksdb::Options { let mut block_based_options = rocksdb::BlockBasedOptions::default(); block_based_options.set_block_cache(rocksdb_cache); @@ -36,7 +41,7 @@ fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> ro //db_opts.set_use_direct_io_for_flush_and_compaction(true); db_opts.create_if_missing(true); db_opts.increase_parallelism(num_cpus::get() as i32); - db_opts.set_max_open_files(512); + db_opts.set_max_open_files(max_open_files); db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); db_opts.optimize_level_style_compaction(cache_capacity_bytes); @@ -52,7 +57,11 @@ impl DatabaseEngine for Arc { let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); - let db_opts = db_options(cache_capacity_bytes, &rocksdb_cache); + let db_opts = db_options( + cache_capacity_bytes, + config.rocksdb_max_open_files, + &rocksdb_cache, + ); let cfs = rocksdb::DBWithThreadMode::::list_cf( &db_opts, @@ -66,7 +75,11 @@ impl DatabaseEngine for Arc { cfs.iter().map(|name| { rocksdb::ColumnFamilyDescriptor::new( name, - db_options(cache_capacity_bytes, &rocksdb_cache), + db_options( + cache_capacity_bytes, + config.rocksdb_max_open_files, + &rocksdb_cache, + ), ) }), )?; @@ -74,6 +87,7 @@ impl DatabaseEngine for Arc { Ok(Arc::new(Engine { rocks: db, cache_capacity_bytes, + max_open_files: config.rocksdb_max_open_files, cache: rocksdb_cache, old_cfs: cfs, })) @@ -82,9 +96,10 @@ impl DatabaseEngine for Arc { fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist - let _ = self - .rocks - .create_cf(name, &db_options(self.cache_capacity_bytes, &self.cache)); + let _ = self.rocks.create_cf( + name, + &db_options(self.cache_capacity_bytes, self.max_open_files, &self.cache), + ); } Ok(Arc::new(RocksDbEngineTree { From ab15ec6c32f2e5463369fe7a29f5ea2e6d9c4f2d Mon Sep 17 00:00:00 2001 From: Tglman Date: Fri, 18 Jun 2021 00:38:32 +0100 Subject: [PATCH 0903/1727] feat: Integration with persy using background ops --- Cargo.toml | 5 + src/database.rs | 6 + src/database/abstraction.rs | 5 +- src/database/abstraction/persy.rs | 245 ++++++++++++++++++++++++++++++ src/error.rs | 15 ++ 5 files changed, 275 insertions(+), 1 deletion(-) create mode 100644 src/database/abstraction/persy.rs diff --git a/Cargo.toml b/Cargo.toml index c87d949..2dbd3fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,10 @@ tokio = "1.11.0" # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } +persy = { git = "https://gitlab.com/tglman/persy.git", branch="master" , optional = true, features=["background_ops"] } +# Used by the persy write cache for background flush +timer = "0.2" +chrono = "0.4" # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" @@ -87,6 +91,7 @@ sha-1 = "0.9.8" [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] backend_sled = ["sled"] +backend_persy = ["persy"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] diff --git a/src/database.rs b/src/database.rs index d688ff9..c2cd9f2 100644 --- a/src/database.rs +++ b/src/database.rs @@ -255,6 +255,12 @@ impl Database { #[cfg(feature = "rocksdb")] Arc::new(Arc::::open(config)?) } + "persy" => { + #[cfg(not(feature = "persy"))] + return Err(Error::BadConfig("Database backend not found.")); + #[cfg(feature = "persy")] + Arc::new(Arc::::open(config)?) + } _ => { return Err(Error::BadConfig("Database backend not found.")); } diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 17bd971..9a3771f 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -15,7 +15,10 @@ pub mod heed; #[cfg(feature = "rocksdb")] pub mod rocksdb; -#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed"))] +#[cfg(feature = "persy")] +pub mod persy; + +#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed", feature="persy"))] pub mod watchers; pub trait DatabaseEngine: Send + Sync { diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs new file mode 100644 index 0000000..5d633ab --- /dev/null +++ b/src/database/abstraction/persy.rs @@ -0,0 +1,245 @@ +use crate::{ + database::{ + abstraction::{DatabaseEngine, Tree}, + Config, + }, + Result, +}; +use persy::{ByteVec, OpenOptions, Persy, Transaction, TransactionConfig, ValueMode}; + +use std::{ + collections::HashMap, + future::Future, + pin::Pin, + sync::{Arc, RwLock}, +}; + +use tokio::sync::oneshot::Sender; +use tracing::warn; + +pub struct PersyEngine { + persy: Persy, +} + +impl DatabaseEngine for PersyEngine { + fn open(config: &Config) -> Result> { + let mut cfg = persy::Config::new(); + cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64); + + let persy = OpenOptions::new() + .create(true) + .config(cfg) + .open(&format!("{}/db.persy", config.database_path))?; + Ok(Arc::new(PersyEngine { persy })) + } + + fn open_tree(self: &Arc, name: &'static str) -> Result> { + // Create if it doesn't exist + if !self.persy.exists_index(name)? { + let mut tx = self.persy.begin()?; + tx.create_index::(name, ValueMode::Replace)?; + tx.prepare()?.commit()?; + } + + Ok(Arc::new(PersyTree { + persy: self.persy.clone(), + name: name.to_owned(), + watchers: RwLock::new(HashMap::new()), + })) + } + + fn flush(self: &Arc) -> Result<()> { + Ok(()) + } +} + +pub struct PersyTree { + persy: Persy, + name: String, + watchers: RwLock, Vec>>>, +} + +impl PersyTree { + fn begin(&self) -> Result { + Ok(self + .persy + .begin_with(TransactionConfig::new().set_background_sync(true))?) + } +} + +impl Tree for PersyTree { + #[tracing::instrument(skip(self, key))] + fn get(&self, key: &[u8]) -> Result>> { + let result = self + .persy + .get::(&self.name, &ByteVec::from(key))? + .next() + .map(|v| (*v).to_owned()); + Ok(result) + } + + #[tracing::instrument(skip(self, key, value))] + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + self.insert_batch(&mut Some((key.to_owned(), value.to_owned())).into_iter())?; + let watchers = self.watchers.read().unwrap(); + let mut triggered = Vec::new(); + + for length in 0..=key.len() { + if watchers.contains_key(&key[..length]) { + triggered.push(&key[..length]); + } + } + + drop(watchers); + + if !triggered.is_empty() { + let mut watchers = self.watchers.write().unwrap(); + for prefix in triggered { + if let Some(txs) = watchers.remove(prefix) { + for tx in txs { + let _ = tx.send(()); + } + } + } + } + Ok(()) + } + + #[tracing::instrument(skip(self, iter))] + fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + let mut tx = self.begin()?; + for (key, value) in iter { + tx.put::( + &self.name, + ByteVec::from(key.clone()), + ByteVec::from(value), + )?; + } + tx.prepare()?.commit()?; + Ok(()) + } + + #[tracing::instrument(skip(self, iter))] + fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + let mut tx = self.begin()?; + for key in iter { + let old = tx + .get::(&self.name, &ByteVec::from(key.clone()))? + .next() + .map(|v| (*v).to_owned()); + let new = crate::utils::increment(old.as_deref()).unwrap(); + tx.put::(&self.name, ByteVec::from(key), ByteVec::from(new))?; + } + tx.prepare()?.commit()?; + Ok(()) + } + + #[tracing::instrument(skip(self, key))] + fn remove(&self, key: &[u8]) -> Result<()> { + let mut tx = self.begin()?; + tx.remove::(&self.name, ByteVec::from(key), None)?; + tx.prepare()?.commit()?; + Ok(()) + } + + #[tracing::instrument(skip(self))] + fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { + let iter = self.persy.range::(&self.name, ..); + match iter { + Ok(iter) => Box::new(iter.filter_map(|(k, v)| { + v.into_iter() + .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .next() + })), + Err(e) => { + warn!("error iterating {:?}", e); + Box::new(std::iter::empty()) + } + } + } + + #[tracing::instrument(skip(self, from, backwards))] + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box, Vec)> + 'a> { + let range = if backwards { + self.persy + .range::(&self.name, ..=ByteVec::from(from)) + } else { + self.persy + .range::(&self.name, ByteVec::from(from)..) + }; + match range { + Ok(iter) => { + let map = iter.filter_map(|(k, v)| { + v.into_iter() + .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .next() + }); + if backwards { + Box::new(map.rev()) + } else { + Box::new(map) + } + } + Err(e) => { + warn!("error iterating with prefix {:?}", e); + Box::new(std::iter::empty()) + } + } + } + + #[tracing::instrument(skip(self, key))] + fn increment(&self, key: &[u8]) -> Result> { + self.increment_batch(&mut Some(key.to_owned()).into_iter())?; + Ok(self.get(key)?.unwrap()) + } + + #[tracing::instrument(skip(self, prefix))] + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Vec)> + 'a> { + let range_prefix = ByteVec::from(prefix.clone()); + let range = self + .persy + .range::(&self.name, range_prefix..); + + match range { + Ok(iter) => { + let owned_prefix = prefix.clone(); + Box::new( + iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix)) + .filter_map(|(k, v)| { + v.into_iter() + .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .next() + }), + ) + } + Err(e) => { + warn!("error scanning prefix {:?}", e); + Box::new(std::iter::empty()) + } + } + } + + #[tracing::instrument(skip(self, prefix))] + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + let (tx, rx) = tokio::sync::oneshot::channel(); + + self.watchers + .write() + .unwrap() + .entry(prefix.to_vec()) + .or_default() + .push(tx); + + Box::pin(async move { + // Tx is never destroyed + rx.await.unwrap(); + }) + } +} diff --git a/src/error.rs b/src/error.rs index 4d427da..5ffe48c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -8,6 +8,9 @@ use ruma::{ use thiserror::Error; use tracing::warn; +#[cfg(feature = "persy")] +use persy::PersyError; + #[cfg(feature = "conduit_bin")] use { crate::RumaResponse, @@ -36,6 +39,9 @@ pub enum Error { #[from] source: rusqlite::Error, }, + #[cfg(feature = "persy")] + #[error("There was a problem with the connection to the persy database.")] + PersyError { source: PersyError }, #[cfg(feature = "heed")] #[error("There was a problem with the connection to the heed database: {error}")] HeedError { error: String }, @@ -142,3 +148,12 @@ where self.to_response().respond_to(r) } } + +#[cfg(feature = "persy")] +impl> From> for Error { + fn from(err: persy::PE) -> Self { + Error::PersyError { + source: err.error().into(), + } + } +} From 1cc41937bd9ae7679aa48c10074ac1041d3a94b5 Mon Sep 17 00:00:00 2001 From: Tglman Date: Thu, 23 Dec 2021 22:59:17 +0000 Subject: [PATCH 0904/1727] refactor:use generic watcher in persy implementation --- Cargo.lock | 46 ++++++++++++++++++++++++++++ Cargo.toml | 5 +--- src/database/abstraction/persy.rs | 50 ++++--------------------------- 3 files changed, 53 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d297102..df37fd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -293,6 +293,7 @@ dependencies = [ "opentelemetry", "opentelemetry-jaeger", "parking_lot", + "persy", "rand 0.8.4", "regex", "reqwest", @@ -374,6 +375,21 @@ dependencies = [ "libc", ] +[[package]] +name = "crc" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" + [[package]] name = "crc32fast" version = "1.3.0" @@ -1651,6 +1667,21 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "persy" +version = "1.2.0" +source = "git+https://gitlab.com/tglman/persy.git?branch=master#ff102d6edeaf14d30a846c2e2376a814685d09e7" +dependencies = [ + "crc", + "data-encoding", + "fs2", + "linked-hash-map", + "rand 0.8.4", + "thiserror", + "unsigned-varint", + "zigzag", +] + [[package]] name = "pin-project" version = "1.0.10" @@ -3290,6 +3321,12 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f14ee04d9415b52b3aeab06258a3f07093182b88ba0f9b8d203f211a7a7d41c7" +[[package]] +name = "unsigned-varint" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" + [[package]] name = "untrusted" version = "0.7.1" @@ -3532,6 +3569,15 @@ dependencies = [ "synstructure", ] +[[package]] +name = "zigzag" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70b40401a28d86ce16a330b863b86fd7dbee4d7c940587ab09ab8c019f9e3fdf" +dependencies = [ + "num-traits", +] + [[package]] name = "zstd" version = "0.9.2+zstd.1.5.1" diff --git a/Cargo.toml b/Cargo.toml index 2dbd3fd..7c94a69 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,9 +29,6 @@ tokio = "1.11.0" sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } persy = { git = "https://gitlab.com/tglman/persy.git", branch="master" , optional = true, features=["background_ops"] } -# Used by the persy write cache for background flush -timer = "0.2" -chrono = "0.4" # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" @@ -91,7 +88,7 @@ sha-1 = "0.9.8" [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] backend_sled = ["sled"] -backend_persy = ["persy"] +backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs index 5d633ab..71efed3 100644 --- a/src/database/abstraction/persy.rs +++ b/src/database/abstraction/persy.rs @@ -1,20 +1,14 @@ use crate::{ database::{ - abstraction::{DatabaseEngine, Tree}, + abstraction::{watchers::Watchers, DatabaseEngine, Tree}, Config, }, Result, }; use persy::{ByteVec, OpenOptions, Persy, Transaction, TransactionConfig, ValueMode}; -use std::{ - collections::HashMap, - future::Future, - pin::Pin, - sync::{Arc, RwLock}, -}; +use std::{future::Future, pin::Pin, sync::Arc}; -use tokio::sync::oneshot::Sender; use tracing::warn; pub struct PersyEngine { @@ -44,7 +38,7 @@ impl DatabaseEngine for PersyEngine { Ok(Arc::new(PersyTree { persy: self.persy.clone(), name: name.to_owned(), - watchers: RwLock::new(HashMap::new()), + watchers: Watchers::default(), })) } @@ -56,7 +50,7 @@ impl DatabaseEngine for PersyEngine { pub struct PersyTree { persy: Persy, name: String, - watchers: RwLock, Vec>>>, + watchers: Watchers, } impl PersyTree { @@ -81,27 +75,7 @@ impl Tree for PersyTree { #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { self.insert_batch(&mut Some((key.to_owned(), value.to_owned())).into_iter())?; - let watchers = self.watchers.read().unwrap(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write().unwrap(); - for prefix in triggered { - if let Some(txs) = watchers.remove(prefix) { - for tx in txs { - let _ = tx.send(()); - } - } - } - } + self.watchers.wake(key); Ok(()) } @@ -228,18 +202,6 @@ impl Tree for PersyTree { #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.watchers - .write() - .unwrap() - .entry(prefix.to_vec()) - .or_default() - .push(tx); - - Box::pin(async move { - // Tx is never destroyed - rx.await.unwrap(); - }) + self.watchers.watch(prefix) } } From f9977ca64f84768c0a71535f6038f4a6487ddc17 Mon Sep 17 00:00:00 2001 From: Tglman Date: Thu, 13 Jan 2022 22:37:19 +0000 Subject: [PATCH 0905/1727] fix: changes to update to the last database engine trait definition --- src/database/abstraction.rs | 7 ++++++- src/database/abstraction/persy.rs | 12 ++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 9a3771f..0908182 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -18,7 +18,12 @@ pub mod rocksdb; #[cfg(feature = "persy")] pub mod persy; -#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed", feature="persy"))] +#[cfg(any( + feature = "sqlite", + feature = "rocksdb", + feature = "heed", + feature = "persy" +))] pub mod watchers; pub trait DatabaseEngine: Send + Sync { diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs index 71efed3..628cf32 100644 --- a/src/database/abstraction/persy.rs +++ b/src/database/abstraction/persy.rs @@ -11,12 +11,12 @@ use std::{future::Future, pin::Pin, sync::Arc}; use tracing::warn; -pub struct PersyEngine { +pub struct Engine { persy: Persy, } -impl DatabaseEngine for PersyEngine { - fn open(config: &Config) -> Result> { +impl DatabaseEngine for Arc { + fn open(config: &Config) -> Result { let mut cfg = persy::Config::new(); cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64); @@ -24,10 +24,10 @@ impl DatabaseEngine for PersyEngine { .create(true) .config(cfg) .open(&format!("{}/db.persy", config.database_path))?; - Ok(Arc::new(PersyEngine { persy })) + Ok(Arc::new(Engine { persy })) } - fn open_tree(self: &Arc, name: &'static str) -> Result> { + fn open_tree(&self, name: &'static str) -> Result> { // Create if it doesn't exist if !self.persy.exists_index(name)? { let mut tx = self.persy.begin()?; @@ -42,7 +42,7 @@ impl DatabaseEngine for PersyEngine { })) } - fn flush(self: &Arc) -> Result<()> { + fn flush(&self) -> Result<()> { Ok(()) } } From c1cd4b5e26c68d1c5e91f85df2a65591f774d13c Mon Sep 17 00:00:00 2001 From: Tglman Date: Fri, 14 Jan 2022 21:00:13 +0000 Subject: [PATCH 0906/1727] chore: set the released version of persy in Cargo.toml --- Cargo.lock | 3 ++- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df37fd5..469c566 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1670,7 +1670,8 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "persy" version = "1.2.0" -source = "git+https://gitlab.com/tglman/persy.git?branch=master#ff102d6edeaf14d30a846c2e2376a814685d09e7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c6aa7d7f093620a28b74fcf5f5da73ba17a9e52fcbbdbb4ecc89e61cb2d673" dependencies = [ "crc", "data-encoding", diff --git a/Cargo.toml b/Cargo.toml index 7c94a69..df879c3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ tokio = "1.11.0" # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } -persy = { git = "https://gitlab.com/tglman/persy.git", branch="master" , optional = true, features=["background_ops"] } +persy = { version = "1.2" , optional = true, features=["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" From fb19114bd9bbfc9bcc50caaab72074247bbe726b Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 15 Jan 2022 15:52:47 +0100 Subject: [PATCH 0907/1727] rename iter_locals to get_local_users; make get_local_users skip on parse errors; remove deprecated function count_local_users --- src/database/admin.rs | 5 +---- src/database/users.rs | 22 +++++++++------------- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 5418f53..859977e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -95,14 +95,11 @@ impl Admin { match event { AdminCommand::ListLocalUsers => { - // collect local users only - let users = guard.users.iter_locals(); + let users = guard.users.get_local_users(); let mut msg: String = format!("Found {} local user account(s):\n", users.len()); msg += &users.join("\n"); - // send number of local users as plain text: - // TODO: send as Markdown send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); } AdminCommand::RegisterAppservice(yaml) => { diff --git a/src/database/users.rs b/src/database/users.rs index d3e1fe4..021c710 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -84,16 +84,6 @@ impl Users { Ok(self.userid_password.iter().count()) } - /// The method is DEPRECATED and was replaced by iter_locals() - /// - /// This method will only count those local user accounts with - /// a password thus returning only real accounts on this instance. - #[tracing::instrument(skip(self))] - pub fn count_local_users(&self) -> Result { - let n = self.userid_password.iter().filter(|(_, bytes)| bytes.len() > 0).count(); - Ok(n) - } - /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] @@ -134,13 +124,19 @@ impl Users { }) } - /// Returns a vector of local usernames + /// Returns a list of local usernames, that is, a parseable username + /// with a password of length greater then zero bytes. + /// If utils::string_from_bytes returns an error that username will be skipped + /// and the function will log the error #[tracing::instrument(skip(self))] - pub fn iter_locals(&self) -> Vec { + pub fn get_local_users(&self) -> Vec { self.userid_password.iter().filter(|(_, pw)| pw.len() > 0).map(|(username, _)| { match utils::string_from_bytes(&username) { Ok(s) => s, - Err(e) => e.to_string() + Err(e) => { + Error::bad_database(format!("Failed to parse username: {}", e.to_string())); + None + } } }).collect::>() } From 91eb6c4d08c5293f8af6436489923871fb2477a9 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 15 Jan 2022 17:10:23 +0100 Subject: [PATCH 0908/1727] Return a Result instead of a vector --- src/database/admin.rs | 16 ++++++++++------ src/database/users.rs | 18 +++++++++++------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 859977e..7799ffa 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -95,12 +95,16 @@ impl Admin { match event { AdminCommand::ListLocalUsers => { - let users = guard.users.get_local_users(); - - let mut msg: String = format!("Found {} local user account(s):\n", users.len()); - msg += &users.join("\n"); - - send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); + match guard.users.get_local_users() { + Ok(users) => { + let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); + } + Err(e) => { + send_message(RoomMessageEventContent::text_plain(e.to_string()), guard, &state_lock); + } + } } AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error diff --git a/src/database/users.rs b/src/database/users.rs index 021c710..7d14b3e 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -84,7 +84,6 @@ impl Users { Ok(self.userid_password.iter().count()) } - /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { @@ -129,16 +128,21 @@ impl Users { /// If utils::string_from_bytes returns an error that username will be skipped /// and the function will log the error #[tracing::instrument(skip(self))] - pub fn get_local_users(&self) -> Vec { - self.userid_password.iter().filter(|(_, pw)| pw.len() > 0).map(|(username, _)| { - match utils::string_from_bytes(&username) { + pub fn get_local_users(&self) -> Result> { + self.userid_password + .iter() + .filter(|(_, pw)| pw.len() > 0) + .map(|(username, _)| match utils::string_from_bytes(&username) { Ok(s) => s, Err(e) => { - Error::bad_database(format!("Failed to parse username: {}", e.to_string())); + Error::bad_database(format!( + "Failed to parse username while calling get_local_users(): {}", + e.to_string() + )); None } - } - }).collect::>() + }) + .collect::>>() } /// Returns the password hash for the given user. From 217e3789929b7a1b227058b3b88664ee5f74ca75 Mon Sep 17 00:00:00 2001 From: Julius de Bruijn Date: Sat, 15 Jan 2022 17:34:13 +0000 Subject: [PATCH 0909/1727] Add mautrix-signal to tested appservices --- APPSERVICES.md | 36 +----------------------------------- 1 file changed, 1 insertion(+), 35 deletions(-) diff --git a/APPSERVICES.md b/APPSERVICES.md index 894bc6f..f23918b 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -57,38 +57,4 @@ These appservices have been tested and work with Conduit without any extra steps - [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord) - [mautrix-hangouts](https://github.com/mautrix/hangouts/) - [mautrix-telegram](https://github.com/mautrix/telegram/) - -### [mautrix-signal](https://github.com/mautrix/signal) - -There are a few things you need to do, in order for the Signal bridge (at least -up to version `0.2.0`) to work. How you do this depends on whether you use -Docker or `virtualenv` to run it. In either case you need to modify -[portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py). -Do this **before** following the bridge installation guide. - -1. **Create a copy of `portal.py`**. Go to - [portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py) -at [mautrix-signal](https://github.com/mautrix/signal) (make sure you change to -the correct commit/version of mautrix-signal you're using) and copy its -content. Create a new `portal.py` on your system and paste the content in. -2. **Patch the copy**. Exact line numbers may be slightly different, look nearby if they don't match: - - [Line 1020](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1020) - ```diff - --- levels.users[self.main_intent.mxid] = 9001 if is_initial else 100 - +++ levels.users[self.main_intent.mxid] = 100 if is_initial else 100 - ``` - - [Between lines 1041 and 1042](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1041-L1042) add a new line: - ```diff - "type": str(EventType.ROOM_POWER_LEVELS), - +++ "state_key": "", - "content": power_levels.serialize(), - ``` -3. **Deploy the patch**. This is different depending on how you have `mautrix-signal` deployed: - - [*If using virtualenv*] Copy your patched `portal.py` to `./lib/python3.7/site-packages/mautrix_signal/portal.py` (the exact version of Python may be different on your system). - - [*If using Docker*] Map the patched `portal.py` into the `mautrix-signal` container: - - ```yaml - volumes: - - ./your/path/on/host/portal.py:/usr/lib/python3.9/site-packages/mautrix_signal/portal.py - ``` -4. Now continue with the [bridge installation instructions ](https://docs.mau.fi/bridges/index.html) and the general bridge notes above. +- [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward. From c03bf6ef11bd88459d6dc1eed75d23879ae2fa1a Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 15 Jan 2022 22:20:51 +0100 Subject: [PATCH 0910/1727] name the function after its purpose: iter_locals -> get_local_users --- src/database/users.rs | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index 7d14b3e..6f51e1f 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -129,20 +129,35 @@ impl Users { /// and the function will log the error #[tracing::instrument(skip(self))] pub fn get_local_users(&self) -> Result> { - self.userid_password + let users: Vec = self + .userid_password .iter() - .filter(|(_, pw)| pw.len() > 0) - .map(|(username, _)| match utils::string_from_bytes(&username) { - Ok(s) => s, - Err(e) => { - Error::bad_database(format!( - "Failed to parse username while calling get_local_users(): {}", - e.to_string() - )); + .filter_map(|(username, pw)| self.get_username_on_valid_password(&username, &pw)) + .collect(); + Ok(users) + } + + /// A private helper to avoid double filtering the iterator + fn get_username_on_valid_password(&self, username: &[u8], password: &[u8]) -> Option { + // A valid password is not empty + if password.len() > 0 { + match utils::string_from_bytes(username) { + Ok(u) => Some(u), + Err(_) => { + // TODO: add error cause! + // let msg: String = format!( + // "Failed to parse username while calling get_local_users(): {}", + // e.to_string() + // ); + Error::bad_database( + "Failed to parse username while calling get_username_on_valid_password", + ); None } - }) - .collect::>>() + } + } else { + None + } } /// Returns the password hash for the given user. From 9205c070485a234463119182bd976a0d45c1ace0 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 15 Jan 2022 22:37:39 +0100 Subject: [PATCH 0911/1727] Update get_local_users description --- src/database/users.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index 6f51e1f..e510140 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -123,10 +123,11 @@ impl Users { }) } - /// Returns a list of local usernames, that is, a parseable username - /// with a password of length greater then zero bytes. + /// Returns a list of local users as list of usernames. + /// + /// A user account is considered `local` if the length of it's password + /// is greater then zero. /// If utils::string_from_bytes returns an error that username will be skipped - /// and the function will log the error #[tracing::instrument(skip(self))] pub fn get_local_users(&self) -> Result> { let users: Vec = self @@ -138,6 +139,7 @@ impl Users { } /// A private helper to avoid double filtering the iterator + #[tracing::instrument(skip(self))] fn get_username_on_valid_password(&self, username: &[u8], password: &[u8]) -> Option { // A valid password is not empty if password.len() > 0 { From 13ae036ca04b4ebd427444252ef9856b3028b7ac Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Sun, 16 Jan 2022 13:52:23 +0200 Subject: [PATCH 0912/1727] Move and refactor admin commands into admin module --- src/database/admin.rs | 239 +++++++++++++++++++++++++++++++++++++++++- src/database/rooms.rs | 220 +------------------------------------- 2 files changed, 240 insertions(+), 219 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 7d2301d..518d758 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,10 +1,17 @@ -use std::{convert::TryInto, sync::Arc}; +use std::{convert::TryFrom, convert::TryInto, sync::Arc, time::Instant}; -use crate::{pdu::PduBuilder, Database}; -use rocket::futures::{channel::mpsc, stream::StreamExt}; +use crate::{ + error::{Error, Result}, + pdu::PduBuilder, + server_server, Database, PduEvent, +}; +use rocket::{ + futures::{channel::mpsc, stream::StreamExt}, + http::RawStr, +}; use ruma::{ events::{room::message::RoomMessageEventContent, EventType}, - UserId, + EventId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; @@ -137,3 +144,227 @@ impl Admin { self.sender.unbounded_send(command).unwrap(); } } + +pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) -> AdminCommand { + let mut parts = command_line.split_whitespace().skip(1); + + let command_name = match parts.next() { + Some(command) => command, + None => { + let message = "No command given. Use help for a list of commands."; + return AdminCommand::SendMessage(RoomMessageEventContent::text_html( + html_to_markdown(message), + message, + )); + } + }; + + let args: Vec<_> = parts.collect(); + + match try_parse_admin_command(db, command_name, args, body) { + Ok(admin_command) => admin_command, + Err(error) => { + let message = format!( + "Encountered error while handling {} command:\n\ +
      {}
      ", + command_name, error, + ); + + AdminCommand::SendMessage(RoomMessageEventContent::text_html( + html_to_markdown(&message), + message, + )) + } + } +} + +// Helper for `RoomMessageEventContent::text_html`, which needs the content as +// both markdown and HTML. +fn html_to_markdown(text: &str) -> String { + text.replace("

      ", "") + .replace("

      ", "\n") + .replace("
      ", "```\n")
      +        .replace("
      ", "\n```") + .replace("", "`") + .replace("", "`") + .replace("
    • ", "* ") + .replace("
    • ", "") + .replace("
        \n", "") + .replace("
      \n", "") +} + +const HELP_TEXT: &'static str = r#" +

      The following commands are available:

      +
        +
      • register_appservice: Register a bridge using its registration YAML
      • +
      • unregister_appservice: Unregister a bridge using its ID
      • +
      • list_appservices: List all the currently registered bridges
      • +
      • get_auth_chain: Get the `auth_chain` of a PDU
      • +
      • parse_pdu: Parse and print a PDU from a JSON
      • +
      • get_pdu: Retrieve and print a PDU by ID from the Conduit database
      • +
      • database_memory_usage: Print database memory usage statistics
      • +
          +"#; + +pub fn try_parse_admin_command( + db: &Database, + command: &str, + args: Vec<&str>, + body: Vec<&str>, +) -> Result { + let command = match command { + "help" => AdminCommand::SendMessage(RoomMessageEventContent::text_html( + html_to_markdown(HELP_TEXT), + HELP_TEXT, + )), + "register_appservice" => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let appservice_config = body[1..body.len() - 1].join("\n"); + let parsed_config = serde_yaml::from_str::(&appservice_config); + match parsed_config { + Ok(yaml) => AdminCommand::RegisterAppservice(yaml), + Err(e) => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + format!("Could not parse appservice config: {}", e), + )), + } + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Expected code block in command body.", + )) + } + } + "unregister_appservice" => { + if args.len() == 1 { + AdminCommand::UnregisterAppservice(args[0].to_owned()) + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Missing appservice identifier", + )) + } + } + "list_appservices" => AdminCommand::ListAppservices, + "get_auth_chain" => { + if args.len() == 1 { + if let Ok(event_id) = EventId::parse_arc(args[0]) { + if let Some(event) = db.rooms.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database("Invalid room id field in event in database") + })?; + let start = Instant::now(); + let count = + server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); + let elapsed = start.elapsed(); + return Ok(AdminCommand::SendMessage( + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )), + )); + } + } + } + + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Usage: get_auth_chain ", + )) + } + "parse_pdu" => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + let event_id = EventId::parse(format!( + "${}", + // Anything higher than version3 behaves the same + ruma::signatures::reference_hash(&value, &RoomVersionId::V6) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + match serde_json::from_value::( + serde_json::to_value(value).expect("value is json"), + ) { + Ok(pdu) => { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + format!("EventId: {:?}\n{:#?}", event_id, pdu), + )) + } + Err(e) => AdminCommand::SendMessage( + RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\nCould not parse event: {}", + event_id, e + )), + ), + } + } + Err(e) => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + format!("Invalid json in command body: {}", e), + )), + } + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Expected code block in command body.", + )) + } + } + "get_pdu" => { + if args.len() == 1 { + if let Ok(event_id) = EventId::parse(args[0]) { + let mut outlier = false; + let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = db.rooms.get_pdu_json(&event_id)?; + } + match pdu_json { + Some(json) => { + let json_text = serde_json::to_string_pretty(&json) + .expect("canonical json is valid json"); + AdminCommand::SendMessage( + RoomMessageEventContent::text_html( + format!("{}\n```json\n{}\n```", + if outlier { + "PDU is outlier" + } else { "PDU was accepted"}, json_text), + format!("

          {}

          \n
          {}\n
          \n", + if outlier { + "PDU is outlier" + } else { "PDU was accepted"}, RawStr::new(&json_text).html_escape()) + ), + ) + } + None => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "PDU not found.", + )), + } + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Event ID could not be parsed.", + )) + } + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Usage: get_pdu ", + )) + } + } + "database_memory_usage" => AdminCommand::ShowMemoryUsage, + _ => { + let message = format!( + "Unrecognized command {}, try help for a list of commands.", + command, + ); + AdminCommand::SendMessage(RoomMessageEventContent::text_html( + html_to_markdown(&message), + message, + )) + } + }; + + Ok(command) +} diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0ba6c9b..14df8f5 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,13 +3,13 @@ mod edus; pub use edus::RoomEdus; use crate::{ + database::admin::parse_admin_command, pdu::{EventHash, PduBuilder}, - server_server, utils, Database, Error, PduEvent, Result, + utils, Database, Error, PduEvent, Result, }; use lru_cache::LruCache; use regex::Regex; use ring::digest; -use rocket::http::RawStr; use ruma::{ api::{client::error::ErrorKind, federation}, events::{ @@ -19,7 +19,6 @@ use ruma::{ room::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, power_levels::RoomPowerLevelsEventContent, }, tag::TagEvent, @@ -40,12 +39,11 @@ use std::{ iter, mem::size_of, sync::{Arc, Mutex, RwLock}, - time::Instant, }; use tokio::sync::MutexGuard; use tracing::{error, warn}; -use super::{abstraction::Tree, admin::AdminCommand, pusher}; +use super::{abstraction::Tree, pusher}; /// The unique identifier of each state group. /// @@ -1496,216 +1494,8 @@ impl Rooms { let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); - let mut parts = command_line.split_whitespace().skip(1); - if let Some(command) = parts.next() { - let args: Vec<_> = parts.collect(); - - match command { - "register_appservice" => { - if body.len() > 2 - && body[0].trim() == "```" - && body.last().unwrap().trim() == "```" - { - let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = serde_yaml::from_str::( - &appservice_config, - ); - match parsed_config { - Ok(yaml) => { - db.admin - .send(AdminCommand::RegisterAppservice(yaml)); - } - Err(e) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config: {}", - e - )), - )); - } - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Expected code block in command body.", - ), - )); - } - } - "unregister_appservice" => { - if args.len() == 1 { - db.admin.send(AdminCommand::UnregisterAppservice( - args[0].to_owned(), - )); - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Missing appservice identifier", - ), - )); - } - } - "list_appservices" => { - db.admin.send(AdminCommand::ListAppservices); - } - "get_auth_chain" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::parse_arc(args[0]) { - if let Some(event) = db.rooms.get_pdu_json(&event_id)? { - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| { - Error::bad_database( - "Invalid event in database", - ) - })?; - - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let start = Instant::now(); - let count = server_server::get_auth_chain( - room_id, - vec![event_id], - db, - )? - .count(); - let elapsed = start.elapsed(); - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - )), - )); - } - } - } - } - "parse_pdu" => { - if body.len() > 2 - && body[0].trim() == "```" - && body.last().unwrap().trim() == "```" - { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => { - let event_id = EventId::parse(format!( - "${}", - // Anything higher than version3 behaves the same - ruma::signatures::reference_hash( - &value, - &RoomVersionId::V6 - ) - .expect("ruma can calculate reference hashes") - )) - .expect( - "ruma's reference hashes are valid event ids", - ); - - match serde_json::from_value::( - serde_json::to_value(value) - .expect("value is json"), - ) { - Ok(pdu) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - format!( - "EventId: {:?}\n{:#?}", - event_id, pdu - ), - ), - )); - } - Err(e) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - format!("EventId: {:?}\nCould not parse event: {}", event_id, e), - ), - )); - } - } - } - Err(e) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {}", - e - )), - )); - } - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Expected code block in command body.", - ), - )); - } - } - "get_pdu" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::parse(args[0]) { - let mut outlier = false; - let mut pdu_json = - db.rooms.get_non_outlier_pdu_json(&event_id)?; - if pdu_json.is_none() { - outlier = true; - pdu_json = db.rooms.get_pdu_json(&event_id)?; - } - match pdu_json { - Some(json) => { - let json_text = - serde_json::to_string_pretty(&json) - .expect("canonical json is valid json"); - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_html( - format!("{}\n```json\n{}\n```", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, json_text), - format!("

          {}

          \n
          {}\n
          \n", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, RawStr::new(&json_text).html_escape()) - ), - )); - } - None => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "PDU not found.", - ), - )); - } - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Event ID could not be parsed.", - ), - )); - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Usage: get_pdu ", - ), - )); - } - } - "database_memory_usage" => { - db.admin.send(AdminCommand::ShowMemoryUsage); - } - _ => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Unrecognized command: {}", - command - )), - )); - } - } - } + let command = parse_admin_command(db, command_line, body); + db.admin.send(command); } } } From 3e79d154957211b98343f18077282b6ab5e6d36e Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 16 Jan 2022 20:15:53 +0100 Subject: [PATCH 0913/1727] Updated function documentation --- src/database/users.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index e510140..9fe4a4e 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -125,28 +125,27 @@ impl Users { /// Returns a list of local users as list of usernames. /// - /// A user account is considered `local` if the length of it's password - /// is greater then zero. - /// If utils::string_from_bytes returns an error that username will be skipped + /// A user account is considered `local` if the length of it's password is greater then zero. #[tracing::instrument(skip(self))] pub fn get_local_users(&self) -> Result> { let users: Vec = self .userid_password .iter() - .filter_map(|(username, pw)| self.get_username_on_valid_password(&username, &pw)) + .filter_map(|(username, pw)| self.get_username_with_valid_password(&username, &pw)) .collect(); Ok(users) } - /// A private helper to avoid double filtering the iterator + /// A private helper to avoid double filtering the iterator in get_local_users(). + /// If utils::string_from_bytes(...) returns an error that username will be skipped + /// and the error will be logged. TODO: add error cause. #[tracing::instrument(skip(self))] - fn get_username_on_valid_password(&self, username: &[u8], password: &[u8]) -> Option { + fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { // A valid password is not empty if password.len() > 0 { match utils::string_from_bytes(username) { Ok(u) => Some(u), Err(_) => { - // TODO: add error cause! // let msg: String = format!( // "Failed to parse username while calling get_local_users(): {}", // e.to_string() From 52284ef9e2de88798408913b73d6b783768e13f3 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 16 Jan 2022 20:25:16 +0100 Subject: [PATCH 0914/1727] Add some debug/info if user was found --- src/database/users.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/database/users.rs b/src/database/users.rs index 9fe4a4e..f73c1c8 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -10,6 +10,7 @@ use ruma::{ }; use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc}; use tracing::warn; +use tracing::info; use super::abstraction::Tree; @@ -144,7 +145,10 @@ impl Users { // A valid password is not empty if password.len() > 0 { match utils::string_from_bytes(username) { - Ok(u) => Some(u), + Ok(u) => { + info!("list_local_users_test: found user {}", u); + Some(u) + }, Err(_) => { // let msg: String = format!( // "Failed to parse username while calling get_local_users(): {}", From 50430cf4ab8c742b3942b2735f0d264b17be936e Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 16 Jan 2022 21:22:57 +0100 Subject: [PATCH 0915/1727] Name function after command: list_local_users --- src/database/admin.rs | 2 +- src/database/users.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 7799ffa..3b347b1 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -95,7 +95,7 @@ impl Admin { match event { AdminCommand::ListLocalUsers => { - match guard.users.get_local_users() { + match guard.users.list_local_users() { Ok(users) => { let mut msg: String = format!("Found {} local user account(s):\n", users.len()); msg += &users.join("\n"); diff --git a/src/database/users.rs b/src/database/users.rs index 9fe4a4e..645e54c 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -127,7 +127,7 @@ impl Users { /// /// A user account is considered `local` if the length of it's password is greater then zero. #[tracing::instrument(skip(self))] - pub fn get_local_users(&self) -> Result> { + pub fn list_local_users(&self) -> Result> { let users: Vec = self .userid_password .iter() From 10f1da12bfa17c05ae219913c411fd3c27dc3a29 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 16 Jan 2022 20:57:23 +0000 Subject: [PATCH 0916/1727] CI: Fix cargo-test --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f47327b..73a1a92 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -218,7 +218,7 @@ test:cargo: before_script: # - mkdir -p $CARGO_HOME - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config + - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config libclang-dev - rustup component add clippy rustfmt - curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: From ee8e72f7a809cfbe58697ad69aff437d35e1404f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 17 Jan 2022 14:35:38 +0100 Subject: [PATCH 0917/1727] feat: implement server ACLs --- Cargo.lock | 48 +++++++---- Cargo.toml | 2 +- src/client_server/membership.rs | 4 +- src/client_server/message.rs | 4 +- src/client_server/state.rs | 4 +- src/client_server/to_device.rs | 4 +- src/database/abstraction/rocksdb.rs | 2 +- src/database/sending.rs | 8 +- src/database/transaction_ids.rs | 6 +- src/server_server.rs | 126 +++++++++++++++++++++++----- 10 files changed, 150 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d297102..5be10f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2086,7 +2086,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "assign", "js_int", @@ -2107,7 +2107,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "bytes", "http", @@ -2123,7 +2123,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2134,7 +2134,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "ruma-api", "ruma-common", @@ -2148,7 +2148,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "assign", "bytes", @@ -2168,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "indexmap", "js_int", @@ -2183,7 +2183,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "indoc", "js_int", @@ -2194,12 +2194,13 @@ dependencies = [ "serde", "serde_json", "thiserror", + "wildmatch", ] [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2210,7 +2211,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "js_int", "ruma-api", @@ -2225,7 +2226,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2234,12 +2235,13 @@ dependencies = [ "ruma-serde", "ruma-serde-macros", "serde", + "uuid", ] [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2249,7 +2251,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "thiserror", ] @@ -2257,7 +2259,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "js_int", "ruma-api", @@ -2270,7 +2272,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "js_int", "ruma-api", @@ -2285,8 +2287,9 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ + "base64 0.13.0", "bytes", "form_urlencoded", "itoa 0.4.8", @@ -2299,7 +2302,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2310,7 +2313,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2327,7 +2330,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "itertools", "js_int", @@ -3308,6 +3311,15 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +dependencies = [ + "getrandom 0.2.3", +] + [[package]] name = "vcpkg" version = "0.2.15" diff --git a/Cargo.toml b/Cargo.toml index c87d949..29a090c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "f8ba7f795765bf4aeb4db06849f9fdde9c162ac3", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "08d60b3d376b63462f769d4b9bd3bbfb560d501a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index cede51f..7035278 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -23,7 +23,7 @@ use ruma::{ }, EventType, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue}, + serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion}, uint, EventId, RoomId, RoomVersionId, ServerName, UserId, }; @@ -787,7 +787,7 @@ async fn join_room_by_id_helper( fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<(Box, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 9705e4c..36653fa 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -74,11 +74,11 @@ pub async fn send_message_event_route( } let mut unsigned = BTreeMap::new(); - unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); + unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::from(&body.event_type), + event_type: EventType::from(&*body.event_type), content: serde_json::from_str(body.body.body.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, unsigned: Some(unsigned), diff --git a/src/client_server/state.rs b/src/client_server/state.rs index e42694a..c07d482 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -44,7 +44,7 @@ pub async fn send_state_event_for_key_route( &db, sender_user, &body.room_id, - EventType::from(&body.event_type), + EventType::from(&*body.event_type), &body.body.body, // Yes, I hate it too body.state_key.to_owned(), ) @@ -86,7 +86,7 @@ pub async fn send_state_event_for_empty_key_route( &db, sender_user, &body.room_id, - EventType::from(&body.event_type), + EventType::from(&*body.event_type), &body.body.body, body.state_key.to_owned(), ) diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 177b123..6e764de 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -53,8 +53,8 @@ pub async fn send_event_to_device_route( serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { sender: sender_user.clone(), - ev_type: EventType::from(&body.event_type), - message_id: body.txn_id.clone(), + ev_type: EventType::from(&*body.event_type), + message_id: body.txn_id.to_string(), messages, }, )) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index adda678..15ea9f7 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -44,7 +44,7 @@ fn db_options( db_opts.set_max_open_files(max_open_files); db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.optimize_level_style_compaction(cache_capacity_bytes); + db_opts.optimize_level_style_compaction(10 * 1024 * 1024); let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); db_opts.set_prefix_extractor(prefix_extractor); diff --git a/src/database/sending.rs b/src/database/sending.rs index 1e180d4..65284a4 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -524,7 +524,7 @@ impl Sending { .unwrap(), // TODO: handle error appservice::event::push_events::v1::Request { events: &pdu_jsons, - txn_id: &base64::encode_config( + txn_id: (&*base64::encode_config( Self::calculate_hash( &events .iter() @@ -534,7 +534,7 @@ impl Sending { .collect::>(), ), base64::URL_SAFE_NO_PAD, - ), + )).into(), }, ) .await @@ -682,7 +682,7 @@ impl Sending { pdus: &pdu_jsons, edus: &edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - transaction_id: &base64::encode_config( + transaction_id: (&*base64::encode_config( Self::calculate_hash( &events .iter() @@ -692,7 +692,7 @@ impl Sending { .collect::>(), ), base64::URL_SAFE_NO_PAD, - ), + )).into(), }, ) .await diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index f346757..d576083 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::Result; -use ruma::{DeviceId, UserId}; +use ruma::{DeviceId, UserId, identifiers::TransactionId}; use super::abstraction::Tree; @@ -14,7 +14,7 @@ impl TransactionIds { &self, user_id: &UserId, device_id: Option<&DeviceId>, - txn_id: &str, + txn_id: &TransactionId, data: &[u8], ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); @@ -32,7 +32,7 @@ impl TransactionIds { &self, user_id: &UserId, device_id: Option<&DeviceId>, - txn_id: &str, + txn_id: &TransactionId, ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index c76afd3..5cd43d8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -42,6 +42,7 @@ use ruma::{ events::{ receipt::{ReceiptEvent, ReceiptEventContent}, room::{ + server_acl::RoomServerAclEventContent, create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, }, @@ -49,7 +50,7 @@ use ruma::{ }, int, receipt::ReceiptType, - serde::JsonObject, + serde::{Base64, JsonObject}, signatures::{CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion, StateMap}, to_device::DeviceIdOrAllDevices, @@ -551,7 +552,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { .try_into() .expect("found invalid server signing keys in DB"), VerifyKey { - key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), + key: Base64::new(db.globals.keypair().public_key().to_vec()), }, ); let mut response = serde_json::from_slice( @@ -740,6 +741,8 @@ pub async fn send_transaction_message_route( } }; + acl_check(&body.origin, &room_id, &db)?; + let mutex = Arc::clone( db.globals .roomid_mutex_federation @@ -854,7 +857,7 @@ pub async fn send_transaction_message_route( // Check if this is a new transaction id if db .transaction_ids - .existing_txnid(&sender, None, &message_id)? + .existing_txnid(&sender, None, (&*message_id).into())? .is_some() { continue; @@ -902,7 +905,7 @@ pub async fn send_transaction_message_route( // Save transaction id with empty data db.transaction_ids - .add_txnid(&sender, None, &message_id, &[])?; + .add_txnid(&sender, None, (&*message_id).into(), &[])?; } Edu::_Custom(_) => {} } @@ -948,7 +951,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( value: BTreeMap, is_timeline_event: bool, db: &'a Database, - pub_key_map: &'a RwLock>>, + pub_key_map: &'a RwLock>>, ) -> Result>, String> { match db.rooms.exists(room_id) { Ok(true) => {} @@ -1123,7 +1126,7 @@ fn handle_outlier_pdu<'a>( room_id: &'a RoomId, value: BTreeMap, db: &'a Database, - pub_key_map: &'a RwLock>>, + pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json @@ -1285,7 +1288,7 @@ async fn upgrade_outlier_to_timeline_pdu( origin: &ServerName, db: &Database, room_id: &RoomId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>>, ) -> Result>, String> { if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); @@ -1827,7 +1830,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, - pub_key_map: &'a RwLock>>, + pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { Box::pin(async move { let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { @@ -1966,9 +1969,9 @@ pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, signature_ids: Vec, -) -> Result> { +) -> Result> { let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); let permit = db .globals @@ -2355,8 +2358,11 @@ pub fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, room_id)? { - return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); + if !db.rooms.server_in_room(sender_servername, &room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room", + )); } Ok(get_event::v1::Response { @@ -2395,6 +2401,8 @@ pub fn get_missing_events_route( )); } + acl_check(sender_servername, &body.room_id, &db)?; + let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); @@ -2464,6 +2472,15 @@ pub fn get_event_authorization_route( .as_ref() .expect("server is authenticated"); + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + let event = db .rooms .get_pdu_json(&body.event_id)? @@ -2477,10 +2494,6 @@ pub fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, room_id)? { - return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); - } - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_event_authorization::v1::Response { @@ -2520,6 +2533,8 @@ pub fn get_room_state_route( )); } + acl_check(sender_servername, &body.room_id, &db)?; + let shortstatehash = db .rooms .pdu_shortstatehash(&body.event_id)? @@ -2583,6 +2598,8 @@ pub fn get_room_state_ids_route( )); } + acl_check(sender_servername, &body.room_id, &db)?; + let shortstatehash = db .rooms .pdu_shortstatehash(&body.event_id)? @@ -2626,10 +2643,17 @@ pub fn create_join_event_template_route( if !db.rooms.exists(&body.room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, - "Server is not in room.", + "Room is unknown to this server.", )); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + acl_check(sender_servername, &body.room_id, &db)?; + let prev_events: Vec<_> = db .rooms .get_pdu_leaves(&body.room_id)? @@ -2782,6 +2806,7 @@ pub fn create_join_event_template_route( async fn create_join_event( db: &DatabaseGuard, + sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, ) -> Result { @@ -2789,6 +2814,15 @@ async fn create_join_event( return Err(Error::bad_config("Federation is disabled.")); } + if !db.rooms.exists(room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server.", + )); + } + + acl_check(sender_servername, room_id, &db)?; + // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = db .rooms @@ -2888,7 +2922,12 @@ pub async fn create_join_event_v1_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { - let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; Ok(create_join_event::v1::Response { room_state }.into()) } @@ -2905,7 +2944,12 @@ pub async fn create_join_event_v2_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { - let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; Ok(create_join_event::v2::Response { room_state }.into()) } @@ -2926,6 +2970,13 @@ pub async fn create_invite_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + acl_check(sender_servername, &body.room_id, &db)?; + if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { @@ -3199,7 +3250,7 @@ pub async fn claim_keys_route( #[tracing::instrument(skip(event, pub_key_map, db))] pub(crate) async fn fetch_required_signing_keys( event: &BTreeMap, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { let signatures = event @@ -3253,7 +3304,7 @@ fn get_server_keys_from_cache( pdu: &RawJsonValue, servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, db: &Database, ) -> Result<()> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { @@ -3306,7 +3357,7 @@ fn get_server_keys_from_cache( let signature_ids = signature_object.keys().cloned().collect::>(); let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") @@ -3339,7 +3390,7 @@ fn get_server_keys_from_cache( pub(crate) async fn fetch_join_signing_keys( event: &create_join_event::v2::Response, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = @@ -3439,6 +3490,35 @@ pub(crate) async fn fetch_join_signing_keys( Ok(()) } +/// Returns Ok if the acl allows the server +fn acl_check( + server_name: &ServerName, + room_id: &RoomId, + db: &Database, +) -> Result<()> { + let acl_event = match db + .rooms + .room_state_get(room_id, &EventType::RoomServerAcl, "")? { + Some(acl) => acl, + None => return Ok(()), + }; + + let acl_event_content: RoomServerAclEventContent = match + serde_json::from_str(acl_event.content.get()) { + Ok(content) => content, + Err(_) => { + warn!("Invalid ACL event"); + return Ok(()); + } + }; + + if acl_event_content.is_allowed(server_name) { + Ok(()) + } else { + Err(Error::BadRequest(ErrorKind::Forbidden, "Server was denied by ACL")) + } +} + #[cfg(test)] mod tests { use super::{add_port_to_hostname, get_ip_with_port, FedDest}; From 8c90e7adfb0d06164d17921e6e686cdaab0d8f1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 17 Jan 2022 14:39:37 +0100 Subject: [PATCH 0918/1727] refactor: fix warnings --- src/database/abstraction/rocksdb.rs | 27 ++++++--------------------- src/database/sending.rs | 6 ++++-- src/database/transaction_ids.rs | 2 +- src/server_server.rs | 26 +++++++++++++------------- 4 files changed, 24 insertions(+), 37 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 15ea9f7..d615713 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -4,7 +4,6 @@ use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, - cache_capacity_bytes: usize, max_open_files: i32, cache: rocksdb::Cache, old_cfs: Vec, @@ -17,11 +16,7 @@ pub struct RocksDbEngineTree<'a> { write_lock: RwLock<()>, } -fn db_options( - cache_capacity_bytes: usize, - max_open_files: i32, - rocksdb_cache: &rocksdb::Cache, -) -> rocksdb::Options { +fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { let mut block_based_options = rocksdb::BlockBasedOptions::default(); block_based_options.set_block_cache(rocksdb_cache); @@ -57,11 +52,7 @@ impl DatabaseEngine for Arc { let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); - let db_opts = db_options( - cache_capacity_bytes, - config.rocksdb_max_open_files, - &rocksdb_cache, - ); + let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache); let cfs = rocksdb::DBWithThreadMode::::list_cf( &db_opts, @@ -75,18 +66,13 @@ impl DatabaseEngine for Arc { cfs.iter().map(|name| { rocksdb::ColumnFamilyDescriptor::new( name, - db_options( - cache_capacity_bytes, - config.rocksdb_max_open_files, - &rocksdb_cache, - ), + db_options(config.rocksdb_max_open_files, &rocksdb_cache), ) }), )?; Ok(Arc::new(Engine { rocks: db, - cache_capacity_bytes, max_open_files: config.rocksdb_max_open_files, cache: rocksdb_cache, old_cfs: cfs, @@ -96,10 +82,9 @@ impl DatabaseEngine for Arc { fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist - let _ = self.rocks.create_cf( - name, - &db_options(self.cache_capacity_bytes, self.max_open_files, &self.cache), - ); + let _ = self + .rocks + .create_cf(name, &db_options(self.max_open_files, &self.cache)); } Ok(Arc::new(RocksDbEngineTree { diff --git a/src/database/sending.rs b/src/database/sending.rs index 65284a4..69f7c44 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -534,7 +534,8 @@ impl Sending { .collect::>(), ), base64::URL_SAFE_NO_PAD, - )).into(), + )) + .into(), }, ) .await @@ -692,7 +693,8 @@ impl Sending { .collect::>(), ), base64::URL_SAFE_NO_PAD, - )).into(), + )) + .into(), }, ) .await diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index d576083..12b838b 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::Result; -use ruma::{DeviceId, UserId, identifiers::TransactionId}; +use ruma::{identifiers::TransactionId, DeviceId, UserId}; use super::abstraction::Tree; diff --git a/src/server_server.rs b/src/server_server.rs index 5cd43d8..54ae025 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -42,9 +42,9 @@ use ruma::{ events::{ receipt::{ReceiptEvent, ReceiptEventContent}, room::{ - server_acl::RoomServerAclEventContent, create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, + server_acl::RoomServerAclEventContent, }, AnyEphemeralRoomEvent, EventType, }, @@ -3491,20 +3491,17 @@ pub(crate) async fn fetch_join_signing_keys( } /// Returns Ok if the acl allows the server -fn acl_check( - server_name: &ServerName, - room_id: &RoomId, - db: &Database, -) -> Result<()> { +fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { let acl_event = match db .rooms - .room_state_get(room_id, &EventType::RoomServerAcl, "")? { - Some(acl) => acl, - None => return Ok(()), - }; + .room_state_get(room_id, &EventType::RoomServerAcl, "")? + { + Some(acl) => acl, + None => return Ok(()), + }; - let acl_event_content: RoomServerAclEventContent = match - serde_json::from_str(acl_event.content.get()) { + let acl_event_content: RoomServerAclEventContent = + match serde_json::from_str(acl_event.content.get()) { Ok(content) => content, Err(_) => { warn!("Invalid ACL event"); @@ -3515,7 +3512,10 @@ fn acl_check( if acl_event_content.is_allowed(server_name) { Ok(()) } else { - Err(Error::BadRequest(ErrorKind::Forbidden, "Server was denied by ACL")) + Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server was denied by ACL", + )) } } From 03b174335cfc472c3ecaba7068ead74f0e2268be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 17 Jan 2022 14:46:53 +0100 Subject: [PATCH 0919/1727] improvement: lower default pdu cache capacity --- src/database.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index fd7a145..1997dc0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -134,7 +134,7 @@ fn default_rocksdb_max_open_files() -> i32 { } fn default_pdu_cache_capacity() -> u32 { - 1_000_000 + 150_000 } fn default_cleanup_second_interval() -> u32 { From fc39b3447c5add8b8c8d2b188751969d752d1ee1 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 17 Jan 2022 19:43:45 +0100 Subject: [PATCH 0920/1727] Little bit of refactoring --- src/database/users.rs | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index 83c1520..e608673 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -10,7 +10,6 @@ use ruma::{ }; use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc}; use tracing::warn; -use tracing::info; use super::abstraction::Tree; @@ -137,31 +136,23 @@ impl Users { Ok(users) } - /// A private helper to avoid double filtering the iterator in get_local_users(). + /// Will only return with Some(username) if the password was not empty and the + /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped - /// and the error will be logged. TODO: add error cause. + /// and the error will be logged. #[tracing::instrument(skip(self))] fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { // A valid password is not empty - if password.len() > 0 { + if password.is_empty() { + None + } else { match utils::string_from_bytes(username) { - Ok(u) => { - info!("list_local_users_test: found user {}", u); - Some(u) - }, - Err(_) => { - // let msg: String = format!( - // "Failed to parse username while calling get_local_users(): {}", - // e.to_string() - // ); - Error::bad_database( - "Failed to parse username while calling get_username_on_valid_password", - ); + Ok(u) => Some(u), + Err(e) => { + warn!("Failed to parse username while calling get_local_users(): {}", e.to_string()); None } } - } else { - None } } From fd6427a83fef08b7f8f6f74d3a4c88af3171aa77 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 17 Jan 2022 22:34:34 +0100 Subject: [PATCH 0921/1727] Update/Revert code comment --- src/database/users.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index e608673..9b986d4 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -77,8 +77,6 @@ impl Users { } /// Returns the number of users registered on this server. - /// It really returns all users, not only real ones with a - /// password to login but also bridge puppets... #[tracing::instrument(skip(self))] pub fn count(&self) -> Result { Ok(self.userid_password.iter().count()) From 53de3509087f46b6a45ca20d27e8fa2884269535 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 17 Jan 2022 23:24:27 +0100 Subject: [PATCH 0922/1727] fix: less load when lazy loading --- src/client_server/sync.rs | 53 ++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index bd2f48a..14aac3a 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -453,38 +453,39 @@ async fn sync_helper( let joined_since_last_sync = since_sender_member .map_or(true, |member| member.membership != MembershipState::Join); - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; - - let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; - let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); - for (key, id) in current_state_ids { - if body.full_state || since_state_ids.get(&key) != Some(&id) { - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; + if since_shortstatehash != current_shortstatehash { + let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; + let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; - if pdu.kind == EventType::RoomMember { - match UserId::parse( - pdu.state_key - .as_ref() - .expect("State event has state key") - .clone(), - ) { - Ok(state_key_userid) => { - lazy_loaded.insert(state_key_userid); + for (key, id) in current_state_ids { + if body.full_state || since_state_ids.get(&key) != Some(&id) { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; } - Err(e) => error!("Invalid state key for member event: {}", e), - } - } + }; - state_events.push(pdu); + if pdu.kind == EventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + } + Err(e) => error!("Invalid state key for member event: {}", e), + } + } + + state_events.push(pdu); + } } } From 13a48c45776de19912ecd040a6434c75152802f7 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 18 Jan 2022 21:04:44 +0100 Subject: [PATCH 0923/1727] Clean up mod and use statements in lib.rs and main.rs --- src/lib.rs | 10 ++++++---- src/main.rs | 22 ++++------------------ 2 files changed, 10 insertions(+), 22 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 82b8f34..745eb39 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,21 +7,23 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -pub mod appservice_server; -pub mod client_server; +use std::ops::Deref; + mod database; mod error; mod pdu; mod ruma_wrapper; -pub mod server_server; mod utils; +pub mod appservice_server; +pub mod client_server; +pub mod server_server; + pub use database::{Config, Database}; pub use error::{Error, Result}; pub use pdu::PduEvent; pub use rocket::Config as RocketConfig; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use std::ops::Deref; pub struct State<'r, T: Send + Sync + 'static>(pub &'r T); diff --git a/src/main.rs b/src/main.rs index 56faa3e..d9bbc24 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,27 +7,9 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -pub mod appservice_server; -pub mod client_server; -pub mod server_server; - -mod database; -mod error; -mod pdu; -mod ruma_wrapper; -mod utils; - use std::sync::Arc; -use database::Config; -pub use database::Database; -pub use error::{Error, Result}; use opentelemetry::trace::{FutureExt, Tracer}; -pub use pdu::PduEvent; -pub use rocket::State; -use ruma::api::client::error::ErrorKind; -pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; - use rocket::{ catch, catchers, figment::{ @@ -36,9 +18,13 @@ use rocket::{ }, routes, Request, }; +use ruma::api::client::error::ErrorKind; use tokio::sync::RwLock; use tracing_subscriber::{prelude::*, EnvFilter}; +pub use conduit::*; // Re-export everything from the library crate +pub use rocket::State; + fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket { rocket::custom(config) .manage(data) From c6277c72a1f75d889b47708769adf376cac9d1ea Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 18 Jan 2022 21:05:40 +0100 Subject: [PATCH 0924/1727] Fix warnings in database::abstraction --- src/database/abstraction.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 17bd971..321b064 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -23,12 +23,12 @@ pub trait DatabaseEngine: Send + Sync { where Self: Sized; fn open_tree(&self, name: &'static str) -> Result>; - fn flush(self: &Self) -> Result<()>; - fn cleanup(self: &Self) -> Result<()> { + fn flush(&self) -> Result<()>; + fn cleanup(&self) -> Result<()> { Ok(()) } - fn memory_usage(self: &Self) -> Result { - Ok("Current database engine does not support memory usage reporting.".to_string()) + fn memory_usage(&self) -> Result { + Ok("Current database engine does not support memory usage reporting.".to_owned()) } } From d4eb3e3295ee1b0947b66d1d45ef10bb4d152839 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 19 Jan 2022 07:09:25 +0100 Subject: [PATCH 0925/1727] fix: rocksdb does not use zstd compression unless we disable everything else --- Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 29a090c..3223330 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,7 +78,8 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", features = ["multi-threaded-cf"], optional = true } +rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true } + thread_local = "1.1.3" # used for TURN server authentication hmac = "0.11.0" From a0fc5eba72a7b364cfe91d5b188b136fa555b7e1 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 19 Jan 2022 23:56:55 +0100 Subject: [PATCH 0926/1727] Remove unnecessary Result --- src/database/uiaa.rs | 7 +++---- src/ruma_wrapper.rs | 13 +++++-------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 5e11467..b0c8d6d 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -166,13 +166,12 @@ impl Uiaa { user_id: &UserId, device_id: &DeviceId, session: &str, - ) -> Result> { - Ok(self - .userdevicesessionid_uiaarequest + ) -> Option { + self.userdevicesessionid_uiaarequest .read() .unwrap() .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) - .map(|j| j.to_owned())) + .map(|j| j.to_owned()) } fn update_uiaa_session( diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 4b8d5de..1bd921d 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -296,14 +296,11 @@ where .and_then(|auth| auth.get("session")) .and_then(|session| session.as_str()) .and_then(|session| { - db.uiaa - .get_uiaa_request( - &user_id, - &sender_device.clone().unwrap_or_else(|| "".into()), - session, - ) - .ok() - .flatten() + db.uiaa.get_uiaa_request( + &user_id, + &sender_device.clone().unwrap_or_else(|| "".into()), + session, + ) }) { for (key, value) in initial_request { From 756a41f22d24c89682eea826e138f8c3896433fb Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 00:10:39 +0100 Subject: [PATCH 0927/1727] Fix rustc / clippy warnings --- src/client_server/context.rs | 15 +++++++-------- src/client_server/keys.rs | 2 +- src/client_server/message.rs | 14 +++++++------- src/client_server/profile.rs | 4 ++-- src/database.rs | 30 ++++++++++++------------------ src/database/admin.rs | 2 +- src/database/rooms.rs | 27 +++++++++++++-------------- src/server_server.rs | 6 +++--- 8 files changed, 46 insertions(+), 54 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 94a44e3..e117766 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -3,8 +3,7 @@ use ruma::{ api::client::{error::ErrorKind, r0::context::get_context}, events::EventType, }; -use std::collections::HashSet; -use std::convert::TryFrom; +use std::{collections::HashSet, convert::TryFrom}; #[cfg(feature = "conduit_bin")] use rocket::get; @@ -55,8 +54,8 @@ pub async fn get_context_route( ))?; if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &base_event.sender, )? { @@ -79,8 +78,8 @@ pub async fn get_context_route( for (_, event) in &events_before { if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &event.sender, )? { @@ -112,8 +111,8 @@ pub async fn get_context_route( for (_, event) in &events_after { if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &event.sender, )? { diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index be0675d..e7aec26 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -272,7 +272,7 @@ pub async fn get_key_changes_route( device_list_updates.extend( db.users .keys_changed( - &sender_user.to_string(), + sender_user.as_str(), body.from .parse() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 36653fa..7d904f9 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -139,7 +139,7 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); db.rooms - .lazy_load_confirm_delivery(&sender_user, &sender_device, &body.room_id, from)?; + .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); @@ -168,8 +168,8 @@ pub async fn get_message_events_route( for (_, event) in &events_after { if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &event.sender, )? { @@ -205,8 +205,8 @@ pub async fn get_message_events_route( for (_, event) in &events_before { if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &event.sender, )? { @@ -239,8 +239,8 @@ pub async fn get_message_events_route( if let Some(next_token) = next_token { db.rooms.lazy_load_mark_sent( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, lazy_loaded, next_token, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 29b1ae8..71e61da 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -52,7 +52,7 @@ pub async fn set_displayname_route( .room_state_get( &room_id, &EventType::RoomMember, - &sender_user.to_string(), + sender_user.as_str(), )? .ok_or_else(|| { Error::bad_database( @@ -195,7 +195,7 @@ pub async fn set_avatar_url_route( .room_state_get( &room_id, &EventType::RoomMember, - &sender_user.to_string(), + sender_user.as_str(), )? .ok_or_else(|| { Error::bad_database( diff --git a/src/database.rs b/src/database.rs index 1997dc0..7a4ddc6 100644 --- a/src/database.rs +++ b/src/database.rs @@ -212,28 +212,22 @@ impl Database { return Ok(()); } - if sled_exists { - if config.database_backend != "sled" { - return Err(Error::bad_config( - "Found sled at database_path, but is not specified in config.", - )); - } + if sled_exists && config.database_backend != "sled" { + return Err(Error::bad_config( + "Found sled at database_path, but is not specified in config.", + )); } - if sqlite_exists { - if config.database_backend != "sqlite" { - return Err(Error::bad_config( - "Found sqlite at database_path, but is not specified in config.", - )); - } + if sqlite_exists && config.database_backend != "sqlite" { + return Err(Error::bad_config( + "Found sqlite at database_path, but is not specified in config.", + )); } - if rocksdb_exists { - if config.database_backend != "rocksdb" { - return Err(Error::bad_config( - "Found rocksdb at database_path, but is not specified in config.", - )); - } + if rocksdb_exists && config.database_backend != "rocksdb" { + return Err(Error::bad_config( + "Found rocksdb at database_path, but is not specified in config.", + )); } Ok(()) diff --git a/src/database/admin.rs b/src/database/admin.rs index 7d2301d..bf38bd8 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -118,7 +118,7 @@ impl Admin { if let Ok(response) = guard._db.memory_usage() { send_message(RoomMessageEventContent::text_plain(response), guard, &state_lock); } else { - send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage.".to_string()), guard, &state_lock); + send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage.".to_owned()), guard, &state_lock); } } AdminCommand::SendMessage(message) => { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0ba6c9b..c9a3c20 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2727,7 +2727,7 @@ impl Rooms { let state_lock = mutex_state.lock().await; let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? + self.room_state_get(room_id, &EventType::RoomMember, user_id.as_str())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -3462,8 +3462,7 @@ impl Rooms { &key[0].to_be_bytes(), &chain .iter() - .map(|s| s.to_be_bytes().to_vec()) - .flatten() + .flat_map(|s| s.to_be_bytes().to_vec()) .collect::>(), )?; } @@ -3484,11 +3483,11 @@ impl Rooms { ) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&device_id.as_bytes()); + key.extend_from_slice(device_id.as_bytes()); key.push(0xff); - key.extend_from_slice(&room_id.as_bytes()); + key.extend_from_slice(room_id.as_bytes()); key.push(0xff); - key.extend_from_slice(&ll_user.as_bytes()); + key.extend_from_slice(ll_user.as_bytes()); Ok(self.lazyloadedids.get(&key)?.is_some()) } @@ -3528,14 +3527,14 @@ impl Rooms { )) { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&device_id.as_bytes()); + prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); - prefix.extend_from_slice(&room_id.as_bytes()); + prefix.extend_from_slice(room_id.as_bytes()); prefix.push(0xff); for ll_id in user_ids { let mut key = prefix.clone(); - key.extend_from_slice(&ll_id.as_bytes()); + key.extend_from_slice(ll_id.as_bytes()); self.lazyloadedids.insert(&key, &[])?; } } @@ -3546,15 +3545,15 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn lazy_load_reset( &self, - user_id: &Box, - device_id: &Box, - room_id: &Box, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, ) -> Result<()> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&device_id.as_bytes()); + prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); - prefix.extend_from_slice(&room_id.as_bytes()); + prefix.extend_from_slice(room_id.as_bytes()); prefix.push(0xff); for (key, _) in self.lazyloadedids.scan_prefix(prefix) { diff --git a/src/server_server.rs b/src/server_server.rs index 54ae025..9129951 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1938,7 +1938,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match handle_outlier_pdu( origin, create_event, - &next_id, + next_id, room_id, value.clone(), db, @@ -2358,7 +2358,7 @@ pub fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, &room_id)? { + if !db.rooms.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", @@ -2821,7 +2821,7 @@ async fn create_join_event( )); } - acl_check(sender_servername, room_id, &db)?; + acl_check(sender_servername, room_id, db)?; // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = db From 6e322716caf6f9181bf21444b552bf05d5f5a774 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 12:29:10 +0100 Subject: [PATCH 0928/1727] Delete rust-toolchain file --- rust-toolchain | 1 - 1 file changed, 1 deletion(-) delete mode 100644 rust-toolchain diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index 74df8b1..0000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -1.53 From 5afb27a5a9ae887dea042e3ca9f0ecef98feff47 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 12:29:24 +0100 Subject: [PATCH 0929/1727] Use latest stable for Docker image --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5812fdf..b629690 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.53-alpine AS builder +FROM docker.io/rust:1.58-alpine AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies @@ -38,7 +38,7 @@ FROM docker.io/alpine:3.15.0 AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. +# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # Conduit needs: @@ -78,4 +78,4 @@ WORKDIR /srv/conduit # Run Conduit and print backtraces on panics ENV RUST_BACKTRACE=1 -ENTRYPOINT [ "/srv/conduit/conduit" ] \ No newline at end of file +ENTRYPOINT [ "/srv/conduit/conduit" ] From ff5fec9e74b4ed12c4dae579344a94f1c1f22f29 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 12:29:52 +0100 Subject: [PATCH 0930/1727] Raise minimum supported Rust version to 1.56 --- Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 29a090c..b6a2a2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,8 @@ homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.2.0" -edition = "2018" +rust-version = "1.56" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From 6bb1081b7127a38cdc85614e4250f52b557753c8 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 13:13:14 +0100 Subject: [PATCH 0931/1727] Use BTreeMap::into_values Stable under new MSRV. --- src/database/users.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index c4fcee3..69a277c 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -531,11 +531,11 @@ impl Users { prefix.push(0xff); // Master key - let master_key_map = master_key + let mut master_key_ids = master_key .deserialize() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? - .keys; - let mut master_key_ids = master_key_map.values(); + .keys + .into_values(); let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, @@ -560,13 +560,14 @@ impl Users { // Self-signing key if let Some(self_signing_key) = self_signing_key { - let self_signing_key_map = self_signing_key + let mut self_signing_key_ids = self_signing_key .deserialize() .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") })? - .keys; - let mut self_signing_key_ids = self_signing_key_map.values(); + .keys + .into_values(); + let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Self signing key contained no key.", @@ -593,13 +594,14 @@ impl Users { // User-signing key if let Some(user_signing_key) = user_signing_key { - let user_signing_key_map = user_signing_key + let mut user_signing_key_ids = user_signing_key .deserialize() .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") })? - .keys; - let mut user_signing_key_ids = user_signing_key_map.values(); + .keys + .into_values(); + let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "User signing key contained no key.", From 8d81c1c0722ad2f608adea44d7b4ceb1a8f645ae Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 13:23:58 +0100 Subject: [PATCH 0932/1727] Use MSRV for build CI jobs The test job will use the latest stable so all stable lints are included. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 73a1a92..cdc1d4c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,7 @@ variables: - if: '$CI_COMMIT_BRANCH == "next"' - if: "$CI_COMMIT_TAG" interruptible: true - image: "rust:latest" + image: "rust:1.56" tags: ["docker"] variables: CARGO_PROFILE_RELEASE_LTO: "true" From e378bc4a2c5590047b42cd4f8e244396125cb428 Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Tue, 18 Jan 2022 13:53:17 +0200 Subject: [PATCH 0933/1727] Refactor admin commands to use structopt --- APPSERVICES.md | 8 +- Cargo.toml | 3 + src/database/admin.rs | 302 +++++++++++++++++++++++------------------- 3 files changed, 175 insertions(+), 138 deletions(-) diff --git a/APPSERVICES.md b/APPSERVICES.md index 894bc6f..257166e 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -18,7 +18,7 @@ First, go into the #admins room of your homeserver. The first person that registered on the homeserver automatically joins it. Then send a message into the room like this: - @conduit:your.server.name: register_appservice + @conduit:your.server.name: register-appservice ``` paste the @@ -31,7 +31,7 @@ the room like this: ``` You can confirm it worked by sending a message like this: -`@conduit:your.server.name: list_appservices` +`@conduit:your.server.name: list-appservices` The @conduit bot should answer with `Appservices (1): your-bridge` @@ -46,9 +46,9 @@ could help. To remove an appservice go to your admin room and execute -```@conduit:your.server.name: unregister_appservice ``` +```@conduit:your.server.name: unregister-appservice ``` -where `` one of the output of `list_appservices`. +where `` one of the output of `list-appservices`. ### Tested appservices diff --git a/Cargo.toml b/Cargo.toml index c87d949..08afe1f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,6 +83,9 @@ thread_local = "1.1.3" # used for TURN server authentication hmac = "0.11.0" sha-1 = "0.9.8" +# used for conduit's CLI and admin room command parsing +structopt = { version = "0.3.25", default-features = false } +pulldown-cmark = "0.9.1" [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] diff --git a/src/database/admin.rs b/src/database/admin.rs index 518d758..55724db 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -5,6 +5,7 @@ use crate::{ pdu::PduBuilder, server_server, Database, PduEvent, }; +use regex::Regex; use rocket::{ futures::{channel::mpsc, stream::StreamExt}, http::RawStr, @@ -14,6 +15,7 @@ use ruma::{ EventId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; +use structopt::StructOpt; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; use tracing::warn; @@ -146,78 +148,98 @@ impl Admin { } pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) -> AdminCommand { - let mut parts = command_line.split_whitespace().skip(1); + let mut argv: Vec<_> = command_line.split_whitespace().skip(1).collect(); - let command_name = match parts.next() { - Some(command) => command, + let command_name = match argv.get(0) { + Some(command) => *command, None => { - let message = "No command given. Use help for a list of commands."; + let markdown_message = "No command given. Use `help` for a list of commands."; + let html_message = markdown_to_html(&markdown_message); + return AdminCommand::SendMessage(RoomMessageEventContent::text_html( - html_to_markdown(message), - message, + markdown_message, + html_message, )); } }; - let args: Vec<_> = parts.collect(); + // Backwards compatibility with `register_appservice`-style commands + let command_with_dashes; + if command_line.contains("_") { + command_with_dashes = command_name.replace("_", "-"); + argv[0] = &command_with_dashes; + } - match try_parse_admin_command(db, command_name, args, body) { + match try_parse_admin_command(db, argv, body) { Ok(admin_command) => admin_command, Err(error) => { - let message = format!( - "Encountered error while handling {} command:\n\ -
          {}
          ", + let markdown_message = format!( + "Encountered an error while handling the `{}` command:\n\ + ```\n{}\n```", command_name, error, ); + let html_message = markdown_to_html(&markdown_message); AdminCommand::SendMessage(RoomMessageEventContent::text_html( - html_to_markdown(&message), - message, + markdown_message, + html_message, )) } } } -// Helper for `RoomMessageEventContent::text_html`, which needs the content as -// both markdown and HTML. -fn html_to_markdown(text: &str) -> String { - text.replace("

          ", "") - .replace("

          ", "\n") - .replace("
          ", "```\n")
          -        .replace("
          ", "\n```") - .replace("", "`") - .replace("", "`") - .replace("
        • ", "* ") - .replace("
        • ", "") - .replace("
            \n", "") - .replace("
          \n", "") +#[derive(StructOpt)] +enum AdminCommands { + #[structopt(verbatim_doc_comment)] + /// Register a bridge using its registration YAML + /// + /// This command needs a YAML generated by an appservice (such as a mautrix + /// bridge), which must be provided in a code-block below the command. + /// + /// Example: + /// ```` + /// @conduit:example.com: register-appservice + /// ``` + /// yaml content here + /// ``` + /// ```` + RegisterAppservice, + /// Unregister a bridge using its ID + UnregisterAppservice { appservice_identifier: String }, + /// List all the currently registered bridges + ListAppservices, + /// Get the auth_chain of a PDU + GetAuthChain { event_id: Box }, + /// Parse and print a PDU from a JSON + ParsePdu, + /// Retrieve and print a PDU by ID from the Conduit database + GetPdu { event_id: Box }, + /// Print database memory usage statistics + DatabaseMemoryUsage, } -const HELP_TEXT: &'static str = r#" -

          The following commands are available:

          -
            -
          • register_appservice: Register a bridge using its registration YAML
          • -
          • unregister_appservice: Unregister a bridge using its ID
          • -
          • list_appservices: List all the currently registered bridges
          • -
          • get_auth_chain: Get the `auth_chain` of a PDU
          • -
          • parse_pdu: Parse and print a PDU from a JSON
          • -
          • get_pdu: Retrieve and print a PDU by ID from the Conduit database
          • -
          • database_memory_usage: Print database memory usage statistics
          • -
              -"#; - pub fn try_parse_admin_command( db: &Database, - command: &str, - args: Vec<&str>, + mut argv: Vec<&str>, body: Vec<&str>, ) -> Result { - let command = match command { - "help" => AdminCommand::SendMessage(RoomMessageEventContent::text_html( - html_to_markdown(HELP_TEXT), - HELP_TEXT, - )), - "register_appservice" => { + argv.insert(0, "@conduit:example.com:"); + let command = match AdminCommands::from_iter_safe(argv) { + Ok(command) => command, + Err(error) => { + println!("Before:\n{}\n", error.to_string()); + let markdown_message = usage_to_markdown(&error.to_string()) + .replace("example.com", db.globals.server_name().as_str()); + let html_message = markdown_to_html(&markdown_message); + + return Ok(AdminCommand::SendMessage( + RoomMessageEventContent::text_html(markdown_message, html_message), + )); + } + }; + + let admin_command = match command { + AdminCommands::RegisterAppservice => { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { let appservice_config = body[1..body.len() - 1].join("\n"); let parsed_config = serde_yaml::from_str::(&appservice_config); @@ -233,47 +255,35 @@ pub fn try_parse_admin_command( )) } } - "unregister_appservice" => { - if args.len() == 1 { - AdminCommand::UnregisterAppservice(args[0].to_owned()) + AdminCommands::UnregisterAppservice { + appservice_identifier, + } => AdminCommand::UnregisterAppservice(appservice_identifier), + AdminCommands::ListAppservices => AdminCommand::ListAppservices, + AdminCommands::GetAuthChain { event_id } => { + let event_id = Arc::::from(event_id); + if let Some(event) = db.rooms.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database("Invalid room id field in event in database") + })?; + let start = Instant::now(); + let count = server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); + let elapsed = start.elapsed(); + return Ok(AdminCommand::SendMessage( + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )), + )); } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Missing appservice identifier", - )) + AdminCommand::SendMessage(RoomMessageEventContent::text_plain("Event not found.")) } } - "list_appservices" => AdminCommand::ListAppservices, - "get_auth_chain" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::parse_arc(args[0]) { - if let Some(event) = db.rooms.get_pdu_json(&event_id)? { - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { - Error::bad_database("Invalid room id field in event in database") - })?; - let start = Instant::now(); - let count = - server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); - let elapsed = start.elapsed(); - return Ok(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - )), - )); - } - } - } - - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Usage: get_auth_chain ", - )) - } - "parse_pdu" => { + AdminCommands::ParsePdu => { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { @@ -312,59 +322,83 @@ pub fn try_parse_admin_command( )) } } - "get_pdu" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::parse(args[0]) { - let mut outlier = false; - let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; - if pdu_json.is_none() { - outlier = true; - pdu_json = db.rooms.get_pdu_json(&event_id)?; - } - match pdu_json { - Some(json) => { - let json_text = serde_json::to_string_pretty(&json) - .expect("canonical json is valid json"); - AdminCommand::SendMessage( - RoomMessageEventContent::text_html( - format!("{}\n```json\n{}\n```", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, json_text), - format!("

              {}

              \n
              {}\n
              \n", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, RawStr::new(&json_text).html_escape()) - ), - ) - } - None => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "PDU not found.", - )), - } - } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Event ID could not be parsed.", + AdminCommands::GetPdu { event_id } => { + let mut outlier = false; + let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = db.rooms.get_pdu_json(&event_id)?; + } + match pdu_json { + Some(json) => { + let json_text = + serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + AdminCommand::SendMessage(RoomMessageEventContent::text_html( + format!( + "{}\n```json\n{}\n```", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + json_text + ), + format!( + "

              {}

              \n
              {}\n
              \n", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + RawStr::new(&json_text).html_escape() + ), )) } - } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Usage: get_pdu ", - )) + None => { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain("PDU not found.")) + } } } - "database_memory_usage" => AdminCommand::ShowMemoryUsage, - _ => { - let message = format!( - "Unrecognized command {}, try help for a list of commands.", - command, - ); - AdminCommand::SendMessage(RoomMessageEventContent::text_html( - html_to_markdown(&message), - message, - )) - } + AdminCommands::DatabaseMemoryUsage => AdminCommand::ShowMemoryUsage, }; - Ok(command) + Ok(admin_command) +} + +fn usage_to_markdown(text: &str) -> String { + // For the conduit admin room, subcommands become main commands + let text = text.replace("SUBCOMMAND", "COMMAND"); + let text = text.replace("subcommand", "command"); + + // Put the first line (command name and version text) on its own paragraph + let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "*$1*\n\n"); + + // Wrap command names in backticks + // (?m) enables multi-line mode for ^ and $ + let re = Regex::new("(?m)^ ([a-z-]+) +(.*)$").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, " `$1`: $2"); + + // Add * to list items + let re = Regex::new("(?m)^ (.*)$").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "* $1"); + + // Turn section names to headings + let re = Regex::new("(?m)^([A-Z-]+):$").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "#### $1"); + + text.to_string() +} + +fn markdown_to_html(text: &str) -> String { + // CommonMark's spec allows HTML tags; however, CLI required arguments look + // very much like tags so escape them. + let text = text.replace("<", "<").replace(">", ">"); + + let mut html_output = String::new(); + + let parser = pulldown_cmark::Parser::new(&text); + pulldown_cmark::html::push_html(&mut html_output, parser); + + html_output } From cc3ef1a8be08b9212a16957062304d7bd5da1111 Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Fri, 21 Jan 2022 11:06:16 +0200 Subject: [PATCH 0934/1727] Improve help text for admin commands --- src/database/admin.rs | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index f690bdf..362ef29 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -147,6 +147,7 @@ impl Admin { } } +// Parse chat messages from the admin room into an AdminCommand object pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) -> AdminCommand { let mut argv: Vec<_> = command_line.split_whitespace().skip(1).collect(); @@ -191,10 +192,13 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - #[derive(StructOpt)] enum AdminCommands { #[structopt(verbatim_doc_comment)] - /// Register a bridge using its registration YAML + /// Register an appservice using its registration YAML /// - /// This command needs a YAML generated by an appservice (such as a mautrix - /// bridge), which must be provided in a code-block below the command. + /// This command needs a YAML generated by an appservice (such as a bridge), + /// which must be provided in a Markdown code-block below the command. + /// + /// Registering a new bridge using the ID of an existing bridge will replace + /// the old one. /// /// Example: /// ```` @@ -204,16 +208,27 @@ enum AdminCommands { /// ``` /// ```` RegisterAppservice, - /// Unregister a bridge using its ID + + /// Unregister an appservice using its ID + /// + /// You can find the ID using the `list-appservices` command. UnregisterAppservice { appservice_identifier: String }, - /// List all the currently registered bridges + + /// List all the currently registered appservices ListAppservices, + /// Get the auth_chain of a PDU GetAuthChain { event_id: Box }, + /// Parse and print a PDU from a JSON + /// + /// The PDU event is only checked for validity and is not added to the + /// database. ParsePdu, + /// Retrieve and print a PDU by ID from the Conduit database GetPdu { event_id: Box }, + /// Print database memory usage statistics DatabaseMemoryUsage, } @@ -365,6 +380,7 @@ pub fn try_parse_admin_command( Ok(admin_command) } +// Utility to turn structopt's `--help` text to markdown. fn usage_to_markdown(text: &str) -> String { // For the conduit admin room, subcommands become main commands let text = text.replace("SUBCOMMAND", "COMMAND"); @@ -390,6 +406,7 @@ fn usage_to_markdown(text: &str) -> String { text.to_string() } +// Convert markdown to HTML using the CommonMark flavor fn markdown_to_html(text: &str) -> String { // CommonMark's spec allows HTML tags; however, CLI required arguments look // very much like tags so escape them. From ba6d72f3f93aeb96c9ca98daab3c34e969c76008 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Fri, 21 Jan 2022 14:28:07 +0100 Subject: [PATCH 0935/1727] Reformatted --- src/database/users.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/database/users.rs b/src/database/users.rs index 9b986d4..a6b6fab 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -147,7 +147,10 @@ impl Users { match utils::string_from_bytes(username) { Ok(u) => Some(u), Err(e) => { - warn!("Failed to parse username while calling get_local_users(): {}", e.to_string()); + warn!( + "Failed to parse username while calling get_local_users(): {}", + e.to_string() + ); None } } From 57979da28c0af4bc14787575d94308d5762e7dc6 Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Fri, 21 Jan 2022 17:34:21 +0200 Subject: [PATCH 0936/1727] Change structopt to clap, remove markdown dependency --- Cargo.lock | 75 +++++++++++++++++++++++- Cargo.toml | 3 +- src/database/admin.rs | 131 ++++++++++++++++++++++++++---------------- 3 files changed, 156 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5be10f1..ae385fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -269,6 +269,33 @@ dependencies = [ "libloading", ] +[[package]] +name = "clap" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a30c3bf9ff12dfe5dae53f0a96e0febcd18420d1c0e7fad77796d9d5c4b5375" +dependencies = [ + "bitflags", + "clap_derive", + "indexmap", + "lazy_static", + "os_str_bytes", + "textwrap", +] + +[[package]] +name = "clap_derive" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "517358c28fcef6607bf6f76108e02afad7e82297d132a6b846dcc1fc3efcd153" +dependencies = [ + "heck 0.4.0", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -281,6 +308,7 @@ version = "0.2.0" dependencies = [ "base64 0.13.0", "bytes", + "clap", "crossbeam", "directories", "heed", @@ -630,7 +658,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" dependencies = [ - "heck", + "heck 0.3.3", "proc-macro2", "quote", "syn", @@ -902,6 +930,12 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + [[package]] name = "heed" version = "0.10.6" @@ -1570,6 +1604,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "os_str_bytes" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +dependencies = [ + "memchr", +] + [[package]] name = "page_size" version = "0.4.2" @@ -1728,6 +1771,30 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + [[package]] name = "proc-macro-hack" version = "0.5.19" @@ -2863,6 +2930,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "textwrap" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80" + [[package]] name = "thiserror" version = "1.0.30" diff --git a/Cargo.toml b/Cargo.toml index 9a2d2fd..3f8677d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,8 +86,7 @@ thread_local = "1.1.3" hmac = "0.11.0" sha-1 = "0.9.8" # used for conduit's CLI and admin room command parsing -structopt = { version = "0.3.25", default-features = false } -pulldown-cmark = "0.9.1" +clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] diff --git a/src/database/admin.rs b/src/database/admin.rs index 362ef29..59b8acd 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -5,6 +5,7 @@ use crate::{ pdu::PduBuilder, server_server, Database, PduEvent, }; +use clap::Parser; use regex::Regex; use rocket::{ futures::{channel::mpsc, stream::StreamExt}, @@ -15,7 +16,6 @@ use ruma::{ EventId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; -use structopt::StructOpt; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; use tracing::warn; @@ -155,7 +155,7 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - Some(command) => *command, None => { let markdown_message = "No command given. Use `help` for a list of commands."; - let html_message = markdown_to_html(&markdown_message); + let html_message = "No command given. Use help for a list of commands."; return AdminCommand::SendMessage(RoomMessageEventContent::text_html( markdown_message, @@ -164,10 +164,17 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - } }; + // Replace `help command` with `command --help` + // Clap has a help subcommand, but it omits the long help description. + if argv[0] == "help" { + argv.remove(0); + argv.push("--help"); + } + // Backwards compatibility with `register_appservice`-style commands let command_with_dashes; - if command_line.contains("_") { - command_with_dashes = command_name.replace("_", "-"); + if argv[0].contains("_") { + command_with_dashes = argv[0].replace("_", "-"); argv[0] = &command_with_dashes; } @@ -179,7 +186,11 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - ```\n{}\n```", command_name, error, ); - let html_message = markdown_to_html(&markdown_message); + let html_message = format!( + "Encountered an error while handling the {} command:\n\ +
              \n{}\n
              ", + command_name, error, + ); AdminCommand::SendMessage(RoomMessageEventContent::text_html( markdown_message, @@ -189,9 +200,10 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - } } -#[derive(StructOpt)] +#[derive(Parser)] +#[clap(name = "@conduit:example.com", version = env!("CARGO_PKG_VERSION"))] enum AdminCommands { - #[structopt(verbatim_doc_comment)] + #[clap(verbatim_doc_comment)] /// Register an appservice using its registration YAML /// /// This command needs a YAML generated by an appservice (such as a bridge), @@ -200,25 +212,25 @@ enum AdminCommands { /// Registering a new bridge using the ID of an existing bridge will replace /// the old one. /// - /// Example: - /// ```` - /// @conduit:example.com: register-appservice - /// ``` - /// yaml content here - /// ``` - /// ```` + /// [add-yaml-block-to-usage] RegisterAppservice, /// Unregister an appservice using its ID - /// + /// /// You can find the ID using the `list-appservices` command. - UnregisterAppservice { appservice_identifier: String }, + UnregisterAppservice { + /// The appservice to unregister + appservice_identifier: String, + }, /// List all the currently registered appservices ListAppservices, /// Get the auth_chain of a PDU - GetAuthChain { event_id: Box }, + GetAuthChain { + /// An event ID (the $ character followed by the base64 reference hash) + event_id: Box, + }, /// Parse and print a PDU from a JSON /// @@ -227,7 +239,10 @@ enum AdminCommands { ParsePdu, /// Retrieve and print a PDU by ID from the Conduit database - GetPdu { event_id: Box }, + GetPdu { + /// An event ID (a $ followed by the base64 reference hash) + event_id: Box, + }, /// Print database memory usage statistics DatabaseMemoryUsage, @@ -239,16 +254,16 @@ pub fn try_parse_admin_command( body: Vec<&str>, ) -> Result { argv.insert(0, "@conduit:example.com:"); - let command = match AdminCommands::from_iter_safe(argv) { + let command = match AdminCommands::try_parse_from(argv) { Ok(command) => command, Err(error) => { - println!("Before:\n{}\n", error.to_string()); - let markdown_message = usage_to_markdown(&error.to_string()) + let message = error + .to_string() .replace("example.com", db.globals.server_name().as_str()); - let html_message = markdown_to_html(&markdown_message); + let html_message = usage_to_html(&message); return Ok(AdminCommand::SendMessage( - RoomMessageEventContent::text_html(markdown_message, html_message), + RoomMessageEventContent::text_html(message, html_message), )); } }; @@ -380,42 +395,58 @@ pub fn try_parse_admin_command( Ok(admin_command) } -// Utility to turn structopt's `--help` text to markdown. -fn usage_to_markdown(text: &str) -> String { +// Utility to turn clap's `--help` text to HTML. +fn usage_to_html(text: &str) -> String { // For the conduit admin room, subcommands become main commands let text = text.replace("SUBCOMMAND", "COMMAND"); let text = text.replace("subcommand", "command"); - // Put the first line (command name and version text) on its own paragraph + // Escape option names (e.g. ``) since they look like HTML tags + let text = text.replace("<", "<").replace(">", ">"); + + // Italicize the first line (command name and version text) let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "*$1*\n\n"); + let text = re.replace_all(&text, "$1\n"); - // Wrap command names in backticks + // Unmerge wrapped lines + let text = text.replace("\n ", " "); + + // Wrap option names in backticks. The lines look like: + // -V, --version Prints version information + // And are converted to: + // -V, --version: Prints version information // (?m) enables multi-line mode for ^ and $ - let re = Regex::new("(?m)^ ([a-z-]+) +(.*)$").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, " `$1`: $2"); + let re = Regex::new("(?m)^ (([a-zA-Z_&;-]+(, )?)+) +(.*)$") + .expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1: $4"); - // Add * to list items - let re = Regex::new("(?m)^ (.*)$").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "* $1"); + // // Enclose examples in code blocks + // // (?ms) enables multi-line mode and dot-matches-all + // let re = + // Regex::new("(?ms)^Example:\n(.*?)\nUSAGE:$").expect("Regex compilation should not fail"); + // let text = re.replace_all(&text, "EXAMPLE:\n
              $1
              \nUSAGE:"); - // Turn section names to headings - let re = Regex::new("(?m)^([A-Z-]+):$").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "#### $1"); + let has_yaml_block_marker = text.contains("\n[add-yaml-block-to-usage]\n"); + let text = text.replace("\n[add-yaml-block-to-usage]\n", ""); + + // Add HTML line-breaks + let text = text.replace("\n", "
              \n"); + + let text = if !has_yaml_block_marker { + // Wrap the usage line in code tags + let re = Regex::new("(?m)^USAGE:
              \n (@conduit:.*)
              $") + .expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:
              \n$1
              ") + } else { + // Wrap the usage line in a code block, and add a yaml block example + // This makes the usage of e.g. `register-appservice` more accurate + let re = Regex::new("(?m)^USAGE:
              \n (.*?)
              \n
              \n") + .expect("Regex compilation should not fail"); + re.replace_all( + &text, + "USAGE:
              \n
              $1\n```\nyaml content here\n```
              ", + ) + }; text.to_string() } - -// Convert markdown to HTML using the CommonMark flavor -fn markdown_to_html(text: &str) -> String { - // CommonMark's spec allows HTML tags; however, CLI required arguments look - // very much like tags so escape them. - let text = text.replace("<", "<").replace(">", ">"); - - let mut html_output = String::new(); - - let parser = pulldown_cmark::Parser::new(&text); - pulldown_cmark::html::push_html(&mut html_output, parser); - - html_output -} From 97d56af5bd48474efc9aa1ac94ed4295e282d8ca Mon Sep 17 00:00:00 2001 From: Reiner Herrmann Date: Sat, 15 Jan 2022 17:23:14 +0000 Subject: [PATCH 0937/1727] Add heisenbridge to tested appservices --- APPSERVICES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/APPSERVICES.md b/APPSERVICES.md index f23918b..5ff223e 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -58,3 +58,4 @@ These appservices have been tested and work with Conduit without any extra steps - [mautrix-hangouts](https://github.com/mautrix/hangouts/) - [mautrix-telegram](https://github.com/mautrix/telegram/) - [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward. +- [heisenbridge](https://github.com/hifi/heisenbridge/) From d94f3c1e9aad363aff552933b104944094f7ddc2 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Fri, 21 Jan 2022 17:06:15 +0100 Subject: [PATCH 0938/1727] fix: make sure cc-rs and bindgen use the correct paths when cross-compiling --- .gitlab-ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cdc1d4c..d0d4f3e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,6 +33,12 @@ variables: - "rustup target add $TARGET" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi + # Make sure that cc-rs links the correct libraries when cross-compiling (required for compiling librocksdb-sys) + # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information + - 'export TARGET_CFLAGS="-L$TARGET_HOME/lib -latomic"' + # Make sure that rust-bindgen uses the correct include path when cross-compiling (required for compiling librocksdb-sys) + # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information + - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' script: - time cargo build --target $TARGET --release - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' From bfef94f5f4f465156317e5a6d60fffc8e1fd9240 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Fri, 21 Jan 2022 17:26:25 +0100 Subject: [PATCH 0939/1727] fix: linking against libatomic is no longer required since the library path is fixed --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d0d4f3e..236ce0a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -35,7 +35,7 @@ variables: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi # Make sure that cc-rs links the correct libraries when cross-compiling (required for compiling librocksdb-sys) # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information - - 'export TARGET_CFLAGS="-L$TARGET_HOME/lib -latomic"' + - 'export TARGET_CFLAGS="-L$TARGET_HOME/lib"' # Make sure that rust-bindgen uses the correct include path when cross-compiling (required for compiling librocksdb-sys) # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' From f88523988e23a09ffc5e1b9ab19e435863be3a9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 21 Jan 2022 09:19:19 +0100 Subject: [PATCH 0940/1727] improvement: use jemalloc for lower memory usage --- Cargo.lock | 258 ++++++++++++++++++++++++---------------------------- Cargo.toml | 10 +- src/main.rs | 7 ++ 3 files changed, 132 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8fe767e..493ac08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,7 +14,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "once_cell", "version_check", ] @@ -303,8 +303,6 @@ dependencies = [ "ruma", "rusqlite", "rust-argon2", - "rustls 0.19.1", - "rustls-native-certs 0.5.0", "serde", "serde_json", "serde_yaml", @@ -313,12 +311,13 @@ dependencies = [ "thiserror", "thread_local", "threadpool", + "tikv-jemalloc-ctl", + "tikv-jemallocator", "tokio", "tracing", "tracing-flame", "tracing-subscriber 0.2.25", "trust-dns-resolver", - "webpki 0.22.0", ] [[package]] @@ -350,22 +349,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "core-foundation" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" - [[package]] name = "cpufeatures" version = "0.2.1" @@ -392,9 +375,9 @@ checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" [[package]] name = "crc32fast" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" +checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" dependencies = [ "cfg-if 1.0.0", ] @@ -713,6 +696,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fs_extra" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" + [[package]] name = "futures" version = "0.3.19" @@ -847,9 +836,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1035,7 +1024,7 @@ dependencies = [ "httpdate", "itoa 0.4.8", "pin-project-lite", - "socket2 0.4.2", + "socket2 0.4.3", "tokio", "tower-service", "tracing", @@ -1180,9 +1169,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" +checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" dependencies = [ "wasm-bindgen", ] @@ -1224,15 +1213,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.112" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" +checksum = "eef78b64d87775463c549fbd80e19249ef436ea3bf1de2a1eb7e717ec7fab1e9" [[package]] name = "libloading" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" +checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ "cfg-if 1.0.0", "winapi", @@ -1308,7 +1297,7 @@ dependencies = [ "serde", "serde_json", "tracing", - "tracing-subscriber 0.3.5", + "tracing-subscriber 0.3.6", ] [[package]] @@ -1528,12 +1517,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - [[package]] name = "opentelemetry" version = "0.16.0" @@ -1621,6 +1604,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "paste" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" + [[package]] name = "pear" version = "0.2.3" @@ -1669,9 +1658,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "persy" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c6aa7d7f093620a28b74fcf5f5da73ba17a9e52fcbbdbb4ecc89e61cb2d673" +checksum = "b71907e1dfa6844b657f5ca59e9a076e7d6281efb4885526ba9e235a18e7e3b3" dependencies = [ "crc", "data-encoding", @@ -1863,7 +1852,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", ] [[package]] @@ -1899,7 +1888,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "redox_syscall", ] @@ -1982,7 +1971,6 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls 0.20.2", - "rustls-native-certs 0.6.1", "rustls-pemfile", "serde", "serde_json", @@ -1994,6 +1982,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.7.0", ] @@ -2443,30 +2432,6 @@ dependencies = [ "webpki 0.22.0", ] -[[package]] -name = "rustls-native-certs" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" -dependencies = [ - "openssl-probe", - "rustls 0.19.1", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-native-certs" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" -dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", -] - [[package]] name = "rustls-pemfile" version = "0.2.1" @@ -2488,16 +2453,6 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" -[[package]] -name = "schannel" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = [ - "lazy_static", - "winapi", -] - [[package]] name = "scoped-tls" version = "1.0.0" @@ -2530,29 +2485,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "security-framework" -version = "2.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "semver" version = "0.9.0" @@ -2570,18 +2502,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.133" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" +checksum = "96b3c34c1690edf8174f5b289a336ab03f568a4460d8c6df75f2f3a692b3bc6a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.133" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" +checksum = "784ed1fbfa13fe191077537b0d70ec8ad1e903cfe04831da608aa36457cb653d" dependencies = [ "proc-macro2", "quote", @@ -2590,9 +2522,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.74" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" +checksum = "c059c05b48c5c0067d4b4b2b4f0732dd65feb52daf7e0ea09cd87e7dadc1af79" dependencies = [ "itoa 1.0.1", "ryu", @@ -2601,12 +2533,12 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 0.4.8", + "itoa 1.0.1", "ryu", "serde", ] @@ -2638,9 +2570,18 @@ dependencies = [ [[package]] name = "sha1" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +dependencies = [ + "sha1_smol", +] + +[[package]] +name = "sha1_smol" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" [[package]] name = "sha2" @@ -2721,9 +2662,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "socket2" @@ -2738,9 +2679,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" +checksum = "0f82496b90c36d70af5fcd482edaa2e0bd16fade569de1330405fecbbdac736b" dependencies = [ "libc", "winapi", @@ -2851,9 +2792,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.85" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" +checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" dependencies = [ "proc-macro2", "quote", @@ -2946,6 +2887,38 @@ dependencies = [ "threadpool", ] +[[package]] +name = "tikv-jemalloc-ctl" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb833c46ecbf8b6daeccb347cefcabf9c1beb5c9b0f853e1cec45632d9963e69" +dependencies = [ + "libc", + "paste", + "tikv-jemalloc-sys", +] + +[[package]] +name = "tikv-jemalloc-sys" +version = "0.4.2+5.2.1-patched.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5844e429d797c62945a566f8da4e24c7fe3fbd5d6617fd8bf7a0b7dc1ee0f22e" +dependencies = [ + "cc", + "fs_extra", + "libc", +] + +[[package]] +name = "tikv-jemallocator" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c14a5a604eb8715bc5785018a37d00739b180bcf609916ddf4393d33d49ccdf" +dependencies = [ + "libc", + "tikv-jemalloc-sys", +] + [[package]] name = "time" version = "0.1.43" @@ -3200,9 +3173,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d81bfa81424cc98cb034b837c985b7a290f592e5b4322f353f94a0ab0f9f594" +checksum = "77be66445c4eeebb934a7340f227bfe7b338173d3f8c00a60a5a58005c9faecf" dependencies = [ "ansi_term", "lazy_static", @@ -3355,7 +3328,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", ] [[package]] @@ -3394,9 +3367,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" +checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -3404,9 +3377,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" +checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" dependencies = [ "bumpalo", "lazy_static", @@ -3419,9 +3392,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" +checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3431,9 +3404,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" +checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3441,9 +3414,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" +checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" dependencies = [ "proc-macro2", "quote", @@ -3454,15 +3427,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" +checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" dependencies = [ "js-sys", "wasm-bindgen", @@ -3488,6 +3461,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki-roots" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" +dependencies = [ + "webpki 0.22.0", +] + [[package]] name = "weezl" version = "0.1.5" @@ -3563,18 +3545,18 @@ checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" [[package]] name = "zeroize" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" +checksum = "cc222aec311c323c717f56060324f32b82da1ce1dd81d9a09aa6a9030bfe08db" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.2.2" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65f1a51723ec88c66d5d1fe80c841f17f63587d6691901d66be9bec6c3b51f73" +checksum = "81e8f13fef10b63c06356d65d416b070798ddabcadc10d3ece0c5be9b3c7eddb" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index e3614ec..78a4c8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,11 +48,7 @@ rand = "0.8.4" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls-native-roots", "socks"] } -# Custom TLS verifier -rustls = { version = "0.19.1", features = ["dangerous_configuration"] } -rustls-native-certs = "0.5.0" -webpki = "0.22.0" +reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls", "socks"] } # Used for conduit::Error type thiserror = "1.0.28" # Used to generate thumbnails for images @@ -87,6 +83,10 @@ thread_local = "1.1.3" hmac = "0.11.0" sha-1 = "0.9.8" +[target.'cfg(not(target_env = "msvc"))'.dependencies] +tikv-jemalloc-ctl = { version = "0.4.2", features = ['use_std'] } +tikv-jemallocator = { version = "0.4.1", features = ['unprefixed_malloc_on_supported_platforms'] } + [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] backend_sled = ["sled"] diff --git a/src/main.rs b/src/main.rs index d9bbc24..b18ca80 100644 --- a/src/main.rs +++ b/src/main.rs @@ -25,6 +25,13 @@ use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate pub use rocket::State; +#[cfg(not(target_env = "msvc"))] +use tikv_jemallocator::Jemalloc; + +#[cfg(not(target_env = "msvc"))] +#[global_allocator] +static GLOBAL: Jemalloc = Jemalloc; + fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket { rocket::custom(config) .manage(data) From 3e9abfedb43e6f52ebb3f24adaf8bf0871712181 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sat, 22 Jan 2022 00:14:19 +0100 Subject: [PATCH 0941/1727] fix: make sure libstdc++ is linked statically when cross-compiling --- .gitlab-ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 236ce0a..5cae743 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,10 +33,10 @@ variables: - "rustup target add $TARGET" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi - # Make sure that cc-rs links the correct libraries when cross-compiling (required for compiling librocksdb-sys) + # Make sure that cc-rs links the correct libraries statically when cross-compiling # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information - - 'export TARGET_CFLAGS="-L$TARGET_HOME/lib"' - # Make sure that rust-bindgen uses the correct include path when cross-compiling (required for compiling librocksdb-sys) + - 'export CARGO_BUILD_RUSTFLAGS="-L$TARGET_HOME/lib" CXXSTDLIB="static=stdc++"' + # Make sure that rust-bindgen uses the correct include path when cross-compiling # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' script: From a021680591cf581fccd05a9dbb0914163f69e8ba Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sat, 22 Jan 2022 01:14:36 +0100 Subject: [PATCH 0942/1727] fix: make sure libatomic is always linked because it's skipped on arm targets --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5cae743..b863de9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -35,7 +35,7 @@ variables: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi # Make sure that cc-rs links the correct libraries statically when cross-compiling # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information - - 'export CARGO_BUILD_RUSTFLAGS="-L$TARGET_HOME/lib" CXXSTDLIB="static=stdc++"' + - 'export CARGO_BUILD_RUSTFLAGS="-L$TARGET_HOME/lib -latomic" CXXSTDLIB="static=stdc++"' # Make sure that rust-bindgen uses the correct include path when cross-compiling # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' From cd9902637ddf3f8e7711f01a5cf044725704e28a Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sat, 22 Jan 2022 17:34:30 +0100 Subject: [PATCH 0943/1727] feat: use rustembedded/cross images and use static relocation model to fix cross-compile --- .gitlab-ci.yml | 39 ++++++++++++++++++++++++--------------- CROSS_COMPILE.md | 11 ----------- Cross.toml | 11 +++++++++++ DEPLOY.md | 2 +- cross/README.md | 37 +++++++++++++++++++++++++++++++++++++ cross/build.sh | 31 +++++++++++++++++++++++++++++++ cross/test.sh | 8 ++++++++ 7 files changed, 112 insertions(+), 27 deletions(-) delete mode 100644 CROSS_COMPILE.md create mode 100644 Cross.toml create mode 100644 cross/README.md create mode 100755 cross/build.sh create mode 100755 cross/test.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b863de9..993145a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,33 +21,46 @@ variables: - if: '$CI_COMMIT_BRANCH == "next"' - if: "$CI_COMMIT_TAG" interruptible: true - image: "rust:1.56" + image: "rust:1.58" tags: ["docker"] + services: ["docker:dind"] variables: + DOCKER_HOST: tcp://docker:2375/ + DOCKER_TLS_CERTDIR: "" + DOCKER_DRIVER: overlay2 + SHARED_PATH: $CI_PROJECT_DIR/shared/ CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow before_script: - 'echo "Building for target $TARGET"' - - "rustc --version && cargo --version && rustup show" # Print version info for debugging - - "rustup target add $TARGET" + - "rustup show && rustc --version && cargo --version" # Print version info for debugging # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi - # Make sure that cc-rs links the correct libraries statically when cross-compiling - # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information - - 'export CARGO_BUILD_RUSTFLAGS="-L$TARGET_HOME/lib -latomic" CXXSTDLIB="static=stdc++"' - # Make sure that rust-bindgen uses the correct include path when cross-compiling - # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information - - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' script: - - time cargo build --target $TARGET --release + # install cross-compiling prerequisites + - 'apt-get update && apt-get install -y docker.io && docker version' # install docker + - 'cargo install cross && cross --version' # install cross + # fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227) + - 'mkdir -p $SHARED_PATH/cargo' + - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' + - 'cp -r $RUSTUP_HOME $SHARED_PATH' + - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + # cross-compile conduit for target + - 'time ./cross/build.sh --locked --release' - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' + cache: + # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci + - key: 'cargo-cache-$TARGET' + paths: + - $SHARED_PATH/cargo/registry/index + - $SHARED_PATH/cargo/registry/cache + - $SHARED_PATH/cargo/git/db artifacts: expire_in: never build:release:cargo:x86_64-unknown-linux-musl-with-debug: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:x86_64-musl variables: CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling TARGET: "x86_64-unknown-linux-musl" @@ -61,7 +74,6 @@ build:release:cargo:x86_64-unknown-linux-musl-with-debug: build:release:cargo:x86_64-unknown-linux-musl: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:x86_64-musl variables: TARGET: "x86_64-unknown-linux-musl" artifacts: @@ -72,7 +84,6 @@ build:release:cargo:x86_64-unknown-linux-musl: build:release:cargo:arm-unknown-linux-musleabihf: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:arm-musleabihf variables: TARGET: "arm-unknown-linux-musleabihf" artifacts: @@ -83,7 +94,6 @@ build:release:cargo:arm-unknown-linux-musleabihf: build:release:cargo:armv7-unknown-linux-musleabihf: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:armv7-musleabihf variables: TARGET: "armv7-unknown-linux-musleabihf" artifacts: @@ -94,7 +104,6 @@ build:release:cargo:armv7-unknown-linux-musleabihf: build:release:cargo:aarch64-unknown-linux-musl: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:aarch64-musl variables: TARGET: "aarch64-unknown-linux-musl" artifacts: diff --git a/CROSS_COMPILE.md b/CROSS_COMPILE.md deleted file mode 100644 index e38a6ad..0000000 --- a/CROSS_COMPILE.md +++ /dev/null @@ -1,11 +0,0 @@ -Install docker: - -``` -$ sudo apt install docker -$ sudo usermod -aG docker $USER -$ exec sudo su -l $USER -$ sudo systemctl start docker -$ cargo install cross -$ cross build --release --target armv7-unknown-linux-musleabihf -``` -The cross-compiled binary is at target/armv7-unknown-linux-musleabihf/release/conduit diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 0000000..491efcb --- /dev/null +++ b/Cross.toml @@ -0,0 +1,11 @@ +[target.aarch64-unknown-linux-musl] +image = "rust-cross:aarch64-unknown-linux-musl" + +[target.arm-unknown-linux-musleabihf] +image = "rust-cross:arm-unknown-linux-musleabihf" + +[target.armv7-unknown-linux-musleabihf] +image = "rust-cross:armv7-unknown-linux-musleabihf" + +[target.x86_64-unknown-linux-musl] +image = "rust-cross:x86_64-unknown-linux-musl" diff --git a/DEPLOY.md b/DEPLOY.md index 0058b93..38e1e28 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -37,7 +37,7 @@ $ cargo build --release Note that this currently requires Rust 1.50. -If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). +If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md). ## Adding a Conduit user diff --git a/cross/README.md b/cross/README.md new file mode 100644 index 0000000..2829d23 --- /dev/null +++ b/cross/README.md @@ -0,0 +1,37 @@ +## Cross compilation + +The `cross` folder contains a set of convenience scripts (`build.sh` and `test.sh`) for cross-compiling Conduit. + +Currently supported targets are + +- aarch64-unknown-linux-musl +- arm-unknown-linux-musleabihf +- armv7-unknown-linux-musleabihf +- x86\_64-unknown-linux-musl + +### Install prerequisites +#### Docker +[Installation guide](https://docs.docker.com/get-docker/). +```sh +$ sudo apt install docker +$ sudo systemctl start docker +$ sudo usermod -aG docker $USER +$ newgrp docker +``` + +#### Cross +[Installation guide](https://github.com/rust-embedded/cross/#installation). +```sh +$ cargo install cross +``` + +### Buiding Conduit +```sh +$ TARGET=armv7-unknown-linux-musleabihf ./cross/build.sh --release +``` +The cross-compiled binary is at `target/armv7-unknown-linux-musleabihf/release/conduit` + +### Testing Conduit +```sh +$ TARGET=armv7-unknown-linux-musleabihf ./cross/test.sh --release +``` diff --git a/cross/build.sh b/cross/build.sh new file mode 100755 index 0000000..4a6d449 --- /dev/null +++ b/cross/build.sh @@ -0,0 +1,31 @@ +#!/bin/bash +set -ex + +# build custom container with libclang and static compilation +tag="rust-cross:${TARGET:?}" +docker build --tag="$tag" - << EOF +FROM rustembedded/cross:$TARGET + +# Install libclang for generating bindings with rust-bindgen +# The architecture is not relevant here since it's not used for compilation +RUN apt-get update && \ + apt-get install --assume-yes libclang-dev + +# Set the target prefix +ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's/-unknown//')" + +# Make sure that cc-rs links libc/libstdc++ statically when cross-compiling +# See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information +ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" +# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing +$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic -lstatic=c"') +# Strip symbols while compiling in release mode +$([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') + +# Make sure that rust-bindgen uses the correct include path when cross-compiling +# See https://github.com/rust-lang/rust-bindgen#environment-variables for more information +ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" +EOF + +# build conduit for a specific target +cross build --target="$TARGET" $@ diff --git a/cross/test.sh b/cross/test.sh new file mode 100755 index 0000000..0aa0909 --- /dev/null +++ b/cross/test.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env sh +set -ex + +# Build conduit for a specific target +cross/build.sh $@ + +# Test conduit for a specific target +cross test --target="$TARGET" $@ From fd67cd7450e33b97050372bdd13832828fa75458 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 15:10:42 +0100 Subject: [PATCH 0944/1727] feat: support targetting i686 --- .gitlab-ci.yml | 28 +++++++++++++++++++--------- Cross.toml | 3 +++ cross/build.sh | 6 ++++-- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 993145a..b5a12f3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -37,7 +37,6 @@ variables: - "rustup show && rustc --version && cargo --version" # Print version info for debugging # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi - script: # install cross-compiling prerequisites - 'apt-get update && apt-get install -y docker.io && docker version' # install docker - 'cargo install cross && cross --version' # install cross @@ -46,16 +45,17 @@ variables: - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - 'cp -r $RUSTUP_HOME $SHARED_PATH' - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + script: # cross-compile conduit for target - 'time ./cross/build.sh --locked --release' - - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' + - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci - - key: 'cargo-cache-$TARGET' - paths: - - $SHARED_PATH/cargo/registry/index - - $SHARED_PATH/cargo/registry/cache - - $SHARED_PATH/cargo/git/db + key: 'cargo-cache-$TARGET' + paths: + - $SHARED_PATH/cargo/registry/index + - $SHARED_PATH/cargo/registry/cache + - $SHARED_PATH/cargo/git/db artifacts: expire_in: never @@ -82,6 +82,16 @@ build:release:cargo:x86_64-unknown-linux-musl: - "conduit-x86_64-unknown-linux-musl" expose_as: "Conduit for x86_64-unknown-linux-musl" +build:release:cargo:i686-unknown-linux-musl: + extends: .build-cargo-shared-settings + variables: + TARGET: "i686-unknown-linux-musl" + artifacts: + name: "conduit-i686-unknown-linux-musl" + paths: + - "conduit-i686-unknown-linux-musl" + expose_as: "Conduit for i686-unknown-linux-musl" + build:release:cargo:arm-unknown-linux-musleabihf: extends: .build-cargo-shared-settings variables: @@ -119,14 +129,14 @@ build:release:cargo:aarch64-unknown-linux-musl: cache: key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: - - "time cargo build --target $TARGET" + # cross-compile conduit for target + - 'time ./cross/build.sh --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' artifacts: expire_in: 4 weeks build:debug:cargo:x86_64-unknown-linux-musl: extends: ".cargo-debug-shared-settings" - image: messense/rust-musl-cross:x86_64-musl variables: TARGET: "x86_64-unknown-linux-musl" artifacts: diff --git a/Cross.toml b/Cross.toml index 491efcb..22c84b9 100644 --- a/Cross.toml +++ b/Cross.toml @@ -7,5 +7,8 @@ image = "rust-cross:arm-unknown-linux-musleabihf" [target.armv7-unknown-linux-musleabihf] image = "rust-cross:armv7-unknown-linux-musleabihf" +[target.i686-unknown-linux-musl] +image = "rust-cross:i686-unknown-linux-musl" + [target.x86_64-unknown-linux-musl] image = "rust-cross:x86_64-unknown-linux-musl" diff --git a/cross/build.sh b/cross/build.sh index 4a6d449..24a2224 100755 --- a/cross/build.sh +++ b/cross/build.sh @@ -17,8 +17,10 @@ ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's # Make sure that cc-rs links libc/libstdc++ statically when cross-compiling # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" -# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic -lstatic=c"') +# Forcefully linking against libatomic and libgcc is required for arm32, otherwise symbols are missing +$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic"') +# Forcefully linking against libc is required for 32-bit, otherwise symbols are missing +$([[ $TARGET =~ arm|i686 ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -lstatic=c"') # Strip symbols while compiling in release mode $([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') From 219dfbabd58ff6008f8a85c291f8cf4f6da1318a Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 17:31:12 +0100 Subject: [PATCH 0945/1727] fix: pass RUSTC_WRAPPER to the cross container and enforce static builds --- .gitlab-ci.yml | 18 ++++++------------ Cross.toml | 3 --- cross/build.sh | 9 +++++---- 3 files changed, 11 insertions(+), 19 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b5a12f3..fac678c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -35,8 +35,6 @@ variables: before_script: - 'echo "Building for target $TARGET"' - "rustup show && rustc --version && cargo --version" # Print version info for debugging - # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi # install cross-compiling prerequisites - 'apt-get update && apt-get install -y docker.io && docker version' # install docker - 'cargo install cross && cross --version' # install cross @@ -45,10 +43,14 @@ variables: - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - 'cp -r $RUSTUP_HOME $SHARED_PATH' - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_BIN_URL}" ]; then export RUSTC_WRAPPER=$SHARED_PATH/cargo/bin/sccache && curl $SCCACHE_BIN_URL --output $RUSTC_WRAPPER && chmod +x $RUSTC_WRAPPER; fi script: # cross-compile conduit for target - 'time ./cross/build.sh --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' + # assert that the binary is statically linked + - 'file conduit-$TARGET | grep "static\(-pie\|ally\) linked"' cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci key: 'cargo-cache-$TARGET' @@ -82,16 +84,6 @@ build:release:cargo:x86_64-unknown-linux-musl: - "conduit-x86_64-unknown-linux-musl" expose_as: "Conduit for x86_64-unknown-linux-musl" -build:release:cargo:i686-unknown-linux-musl: - extends: .build-cargo-shared-settings - variables: - TARGET: "i686-unknown-linux-musl" - artifacts: - name: "conduit-i686-unknown-linux-musl" - paths: - - "conduit-i686-unknown-linux-musl" - expose_as: "Conduit for i686-unknown-linux-musl" - build:release:cargo:arm-unknown-linux-musleabihf: extends: .build-cargo-shared-settings variables: @@ -132,6 +124,8 @@ build:release:cargo:aarch64-unknown-linux-musl: # cross-compile conduit for target - 'time ./cross/build.sh --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' + # assert that the binary is statically linked + - 'file conduit-debug-$TARGET | grep "static\(-pie\|ally\) linked"' artifacts: expire_in: 4 weeks diff --git a/Cross.toml b/Cross.toml index 22c84b9..491efcb 100644 --- a/Cross.toml +++ b/Cross.toml @@ -7,8 +7,5 @@ image = "rust-cross:arm-unknown-linux-musleabihf" [target.armv7-unknown-linux-musleabihf] image = "rust-cross:armv7-unknown-linux-musleabihf" -[target.i686-unknown-linux-musl] -image = "rust-cross:i686-unknown-linux-musl" - [target.x86_64-unknown-linux-musl] image = "rust-cross:x86_64-unknown-linux-musl" diff --git a/cross/build.sh b/cross/build.sh index 24a2224..3408260 100755 --- a/cross/build.sh +++ b/cross/build.sh @@ -17,13 +17,14 @@ ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's # Make sure that cc-rs links libc/libstdc++ statically when cross-compiling # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" -# Forcefully linking against libatomic and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic"') -# Forcefully linking against libc is required for 32-bit, otherwise symbols are missing -$([[ $TARGET =~ arm|i686 ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -lstatic=c"') +# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing +$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic -lstatic=c"') # Strip symbols while compiling in release mode $([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') +# Support a rustc wrapper like sccache when cross-compiling +ENV RUSTC_WRAPPER="$RUSTC_WRAPPER" + # Make sure that rust-bindgen uses the correct include path when cross-compiling # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" From c2ad2b3dd747e7a8baa5ff2f9ade8edb92204aa6 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 17:38:13 +0100 Subject: [PATCH 0946/1727] fix: pass sccache variables to cross container with build.env.passthrough --- Cross.toml | 12 ++++++++++++ cross/build.sh | 3 --- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/Cross.toml b/Cross.toml index 491efcb..a989a98 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,3 +1,15 @@ +[build.env] +# CI uses an S3 endpoint to store sccache artifacts, so their config needs to +# be available in the cross container as well +passthrough = [ + "RUSTC_WRAPPER", + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "SCCACHE_BUCKET", + "SCCACHE_ENDPOINT", + "SCCACHE_S3_USE_SSL", +] + [target.aarch64-unknown-linux-musl] image = "rust-cross:aarch64-unknown-linux-musl" diff --git a/cross/build.sh b/cross/build.sh index 3408260..4a6d449 100755 --- a/cross/build.sh +++ b/cross/build.sh @@ -22,9 +22,6 @@ $([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clin # Strip symbols while compiling in release mode $([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') -# Support a rustc wrapper like sccache when cross-compiling -ENV RUSTC_WRAPPER="$RUSTC_WRAPPER" - # Make sure that rust-bindgen uses the correct include path when cross-compiling # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" From c7560b3502d27a49f935c347458df6421459c485 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 18:09:14 +0100 Subject: [PATCH 0947/1727] fix: remove libgcc dependency in ci builds since the binary is ensured to be statically compiled --- docker/ci-binaries-packaging.Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index f460310..a6339be 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -9,6 +9,7 @@ FROM docker.io/alpine:3.15.0 AS runner + # Standard port on which Conduit launches. # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 @@ -18,10 +19,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # Conduit needs: # ca-certificates: for https -# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. RUN apk add --no-cache \ - ca-certificates \ - libgcc + ca-certificates ARG CREATED From 64c25ea4a15739f75ebe2811e84dc00280ba5fb0 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 18:31:40 +0100 Subject: [PATCH 0948/1727] fix: always print ELF information --- .gitlab-ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fac678c..defd66e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -50,7 +50,8 @@ variables: - 'time ./cross/build.sh --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' # assert that the binary is statically linked - - 'file conduit-$TARGET | grep "static\(-pie\|ally\) linked"' + - 'ldd conduit-$TARGET' # print linking information + - 'file conduit-$TARGET | sed -e "/static\(-pie\|ally\) linked/!q1"' # print elf information cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci key: 'cargo-cache-$TARGET' @@ -125,7 +126,8 @@ build:release:cargo:aarch64-unknown-linux-musl: - 'time ./cross/build.sh --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' # assert that the binary is statically linked - - 'file conduit-debug-$TARGET | grep "static\(-pie\|ally\) linked"' + - 'ldd conduit-debug-$TARGET' # print linking information + - 'file conduit-debug-$TARGET | sed -e "/static\(-pie\|ally\) linked/!q1"' # print elf information artifacts: expire_in: 4 weeks From 77ad4cb8f8c69b563c890494b5d203d96195253d Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 19:24:36 +0100 Subject: [PATCH 0949/1727] fix: use readelf for checking static compilation --- .gitlab-ci.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index defd66e..9e584a2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,9 +49,9 @@ variables: # cross-compile conduit for target - 'time ./cross/build.sh --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' - # assert that the binary is statically linked - - 'ldd conduit-$TARGET' # print linking information - - 'file conduit-$TARGET | sed -e "/static\(-pie\|ally\) linked/!q1"' # print elf information + # print information about linking for debugging + - 'file conduit-$TARGET' # print file information + - 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci key: 'cargo-cache-$TARGET' @@ -125,9 +125,9 @@ build:release:cargo:aarch64-unknown-linux-musl: # cross-compile conduit for target - 'time ./cross/build.sh --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' - # assert that the binary is statically linked - - 'ldd conduit-debug-$TARGET' # print linking information - - 'file conduit-debug-$TARGET | sed -e "/static\(-pie\|ally\) linked/!q1"' # print elf information + # print information about linking for debugging + - 'file conduit-debug-$TARGET' # print file information + - 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked artifacts: expire_in: 4 weeks From 067fcfc0e40ced1c2ed28f32b04940c6e74d6f5a Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 21:19:19 +0100 Subject: [PATCH 0950/1727] fix: remove trailing slash from shared path --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9e584a2..aac773c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,7 +28,7 @@ variables: DOCKER_HOST: tcp://docker:2375/ DOCKER_TLS_CERTDIR: "" DOCKER_DRIVER: overlay2 - SHARED_PATH: $CI_PROJECT_DIR/shared/ + SHARED_PATH: $CI_PROJECT_DIR/shared CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow From acf1585fc35e7851df9c30208f654d5c085267d6 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Mon, 24 Jan 2022 11:45:07 +0100 Subject: [PATCH 0951/1727] fix: make sure that libatomic is linked statically --- cross/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cross/build.sh b/cross/build.sh index 4a6d449..8f64ff8 100755 --- a/cross/build.sh +++ b/cross/build.sh @@ -18,7 +18,7 @@ ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" # Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic -lstatic=c"') +$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-static-libgcc -Clink-arg=-lgcc -lstatic=atomic -lstatic=c"') # Strip symbols while compiling in release mode $([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') From ff167299766d41c361398243db6bf97e9d45fa65 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Tue, 25 Jan 2022 22:36:51 +0100 Subject: [PATCH 0952/1727] fix: correct RUSTC_WRAPPER path in cross container --- .gitlab-ci.yml | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index aac773c..741b532 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -8,6 +8,10 @@ variables: GIT_SUBMODULE_STRATEGY: recursive FF_USE_FASTZIP: 1 CACHE_COMPRESSION_LEVEL: fastest + # Docker in Docker + DOCKER_HOST: tcp://docker:2375/ + DOCKER_TLS_CERTDIR: "" + DOCKER_DRIVER: overlay2 # --------------------------------------------------------------------- # # Cargo: Compiling for different architectures # @@ -25,9 +29,6 @@ variables: tags: ["docker"] services: ["docker:dind"] variables: - DOCKER_HOST: tcp://docker:2375/ - DOCKER_TLS_CERTDIR: "" - DOCKER_DRIVER: overlay2 SHARED_PATH: $CI_PROJECT_DIR/shared CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" @@ -43,8 +44,9 @@ variables: - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - 'cp -r $RUSTUP_HOME $SHARED_PATH' - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' - # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_BIN_URL}" ]; then export RUSTC_WRAPPER=$SHARED_PATH/cargo/bin/sccache && curl $SCCACHE_BIN_URL --output $RUSTC_WRAPPER && chmod +x $RUSTC_WRAPPER; fi + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results. + # The sccache binary is stored in the sysroot of the rustc installation since that directory is added to the path of the cross container. + - if [ -n "${SCCACHE_BIN_URL}" ]; then RUSTC_SYSROOT=$(rustc --print sysroot) && curl $SCCACHE_BIN_URL --output $RUSTC_SYSROOT/bin/sccache && chmod +x $RUSTC_SYSROOT/bin/sccache && export RUSTC_WRAPPER=sccache; fi script: # cross-compile conduit for target - 'time ./cross/build.sh --locked --release' @@ -157,9 +159,6 @@ build:debug:cargo:x86_64-unknown-linux-musl: - "build:release:cargo:armv7-unknown-linux-musleabihf" - "build:release:cargo:aarch64-unknown-linux-musl" variables: - DOCKER_HOST: tcp://docker:2375/ - DOCKER_TLS_CERTDIR: "" - DOCKER_DRIVER: overlay2 PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64" DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile" cache: From 8472eff277faf55808dc794a8eb9023fafb75763 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 00:25:20 +0100 Subject: [PATCH 0953/1727] Implement media download with custom filename --- src/client_server/media.rs | 64 +++++++++++++++++++++++++++++++++++++- src/main.rs | 1 + 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 0a7f4bb..2e3cf05 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -4,7 +4,10 @@ use crate::{ }; use ruma::api::client::{ error::ErrorKind, - r0::media::{create_content, get_content, get_content_thumbnail, get_media_config}, + r0::media::{ + create_content, get_content, get_content_as_filename, get_content_thumbnail, + get_media_config, + }, }; use std::convert::TryInto; @@ -129,6 +132,65 @@ pub async fn get_content_route( } } +/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}/{fileName}` +/// +/// Load media from our server or over federation, permitting desired filename. +/// +/// - Only allows federation if `allow_remote` is true +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/media/r0/download/<_>/<_>/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn get_content_as_filename_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_disposition: _, + content_type, + file, + }) = db.media.get(&db.globals, &mxc).await? + { + Ok(get_content_as_filename::Response { + file, + content_type, + content_disposition: Some(format!("inline; filename={}", body.filename)), + } + .into()) + } else if &*body.server_name != db.globals.server_name() && body.allow_remote { + let get_content_response = db + .sending + .send_federation_request( + &db.globals, + &body.server_name, + get_content_as_filename::Request { + allow_remote: false, + server_name: &body.server_name, + media_id: &body.media_id, + filename: &body.filename, + }, + ) + .await?; + + db.media + .create( + mxc, + &db.globals, + &get_content_response.content_disposition.as_deref(), + &get_content_response.content_type.as_deref(), + &get_content_response.file, + ) + .await?; + + Ok(get_content_response.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + /// # `POST /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` /// /// Load media thumbnail from our server or over federation. diff --git a/src/main.rs b/src/main.rs index 56faa3e..514c244 100644 --- a/src/main.rs +++ b/src/main.rs @@ -136,6 +136,7 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket< client_server::send_event_to_device_route, client_server::get_media_config_route, client_server::create_content_route, + client_server::get_content_as_filename_route, client_server::get_content_route, client_server::get_content_thumbnail_route, client_server::get_devices_route, From 52873c88b7e204f5b3c9295448c15780fff8084c Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 00:27:13 +0100 Subject: [PATCH 0954/1727] Fix incorrect HTTP method in doc comments of two media routes --- src/client_server/media.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 2e3cf05..5b196df 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -74,7 +74,7 @@ pub async fn create_content_route( .into()) } -/// # `POST /_matrix/media/r0/download/{serverName}/{mediaId}` +/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` /// /// Load media from our server or over federation. /// @@ -191,7 +191,7 @@ pub async fn get_content_as_filename_route( } } -/// # `POST /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` +/// # `GET /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` /// /// Load media thumbnail from our server or over federation. /// From 9c2000cb8973894512940b96996a2b5937f5cc8f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 27 Jan 2022 16:17:55 +0100 Subject: [PATCH 0955/1727] Upgrade Ruma --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 2 +- src/database/key_backups.rs | 29 ++++++++--------------------- 3 files changed, 27 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 493ac08..794a025 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2107,7 +2107,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "assign", "js_int", @@ -2128,7 +2128,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "bytes", "http", @@ -2144,7 +2144,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2155,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "ruma-api", "ruma-common", @@ -2169,7 +2169,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "assign", "bytes", @@ -2189,7 +2189,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "indexmap", "js_int", @@ -2204,7 +2204,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "indoc", "js_int", @@ -2221,7 +2221,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2232,7 +2232,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "js_int", "ruma-api", @@ -2247,7 +2247,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2262,7 +2262,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2272,7 +2272,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "thiserror", ] @@ -2280,7 +2280,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "js_int", "ruma-api", @@ -2293,7 +2293,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "js_int", "ruma-api", @@ -2308,7 +2308,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "base64 0.13.0", "bytes", @@ -2323,7 +2323,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2334,7 +2334,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2351,7 +2351,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 78a4c8f..9ba1ac0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "08d60b3d376b63462f769d4b9bd3bbfb560d501a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "82becb86c837570224964425929d1b5305784435", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index b74bc40..2eefe48 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -7,7 +7,6 @@ use ruma::{ serde::Raw, RoomId, UserId, }; -use serde_json::json; use std::{collections::BTreeMap, sync::Arc}; use super::abstraction::Tree; @@ -212,13 +211,13 @@ impl KeyBackups { &self, user_id: &UserId, version: &str, - ) -> Result, Raw>> { + ) -> Result, RoomKeyBackup>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::, Raw>::new(); + let mut rooms = BTreeMap::, RoomKeyBackup>::new(); for result in self .backupkeyid_backup @@ -244,7 +243,7 @@ impl KeyBackups { Error::bad_database("backupkeyid_backup room_id is invalid room id.") })?; - let key_data: serde_json::Value = serde_json::from_slice(&value).map_err(|_| { + let key_data = serde_json::from_slice(&value).map_err(|_| { Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") })?; @@ -252,25 +251,13 @@ impl KeyBackups { }) { let (room_id, session_id, key_data) = result?; - let room_key_backup = rooms.entry(room_id).or_insert_with(|| { - Raw::new(&RoomKeyBackup { + rooms + .entry(room_id) + .or_insert_with(|| RoomKeyBackup { sessions: BTreeMap::new(), }) - .expect("RoomKeyBackup serialization") - }); - - let mut object = room_key_backup - .deserialize_as::>() - .map_err(|_| Error::bad_database("RoomKeyBackup is not an object"))?; - - let sessions = object.entry("session").or_insert_with(|| json!({})); - if let serde_json::Value::Object(unsigned_object) = sessions { - unsigned_object.insert(session_id, key_data); - } - - *room_key_backup = Raw::from_json( - serde_json::value::to_raw_value(&object).expect("Value => RawValue serialization"), - ); + .sessions + .insert(session_id, key_data); } Ok(rooms) From c4317a7a962fcc4b41b1abfc273034d9827a6563 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 16:12:39 +0100 Subject: [PATCH 0956/1727] Reduce code duplication in media download route handlers --- src/client_server/media.rs | 99 ++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 48 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 5b196df..bd73cff 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -74,6 +74,38 @@ pub async fn create_content_route( .into()) } +pub async fn get_remote_content( + db: &DatabaseGuard, + mxc: &str, + server_name: &ruma::ServerName, + media_id: &str +) -> ConduitResult { + let content_response = db + .sending + .send_federation_request( + &db.globals, + server_name, + get_content::Request { + allow_remote: false, + server_name, + media_id + }, + ) + .await?; + + db.media + .create( + mxc.to_string(), + &db.globals, + &content_response.content_disposition.as_deref(), + &content_response.content_type.as_deref(), + &content_response.file, + ) + .await?; + + Ok(content_response.into()) +} + /// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` /// /// Load media from our server or over federation. @@ -103,30 +135,13 @@ pub async fn get_content_route( } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_content_response = db - .sending - .send_federation_request( - &db.globals, - &body.server_name, - get_content::Request { - allow_remote: false, - server_name: &body.server_name, - media_id: &body.media_id, - }, - ) - .await?; - - db.media - .create( - mxc, - &db.globals, - &get_content_response.content_disposition.as_deref(), - &get_content_response.content_type.as_deref(), - &get_content_response.file, - ) - .await?; - - Ok(get_content_response.into()) + let remote_content_response = get_remote_content( + &db, + &mxc, + &body.server_name, + &body.media_id + ).await?; + Ok(remote_content_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -161,31 +176,19 @@ pub async fn get_content_as_filename_route( } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_content_response = db - .sending - .send_federation_request( - &db.globals, - &body.server_name, - get_content_as_filename::Request { - allow_remote: false, - server_name: &body.server_name, - media_id: &body.media_id, - filename: &body.filename, - }, - ) - .await?; + let remote_content_response = get_remote_content( + &db, + &mxc, + &body.server_name, + &body.media_id + ).await?; - db.media - .create( - mxc, - &db.globals, - &get_content_response.content_disposition.as_deref(), - &get_content_response.content_type.as_deref(), - &get_content_response.file, - ) - .await?; - - Ok(get_content_response.into()) + Ok(get_content_as_filename::Response { + content_disposition: Some(format!("inline: filename={}", body.filename)), + content_type: remote_content_response.0.content_type, + file: remote_content_response.0.file + } + .into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } From ccfc243c2c1fec9b859ab6accec1246fa63aef94 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 17:00:08 +0100 Subject: [PATCH 0957/1727] Make get_remote_content() return Result instead of ConduitResult --- src/client_server/media.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index bd73cff..dd8e7b0 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -79,7 +79,7 @@ pub async fn get_remote_content( mxc: &str, server_name: &ruma::ServerName, media_id: &str -) -> ConduitResult { +) -> Result { let content_response = db .sending .send_federation_request( @@ -103,7 +103,7 @@ pub async fn get_remote_content( ) .await?; - Ok(content_response.into()) + Ok(content_response) } /// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` @@ -141,7 +141,7 @@ pub async fn get_content_route( &body.server_name, &body.media_id ).await?; - Ok(remote_content_response) + Ok(remote_content_response.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -185,8 +185,8 @@ pub async fn get_content_as_filename_route( Ok(get_content_as_filename::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), - content_type: remote_content_response.0.content_type, - file: remote_content_response.0.file + content_type: remote_content_response.content_type, + file: remote_content_response.file } .into()) } else { From 0f6d232cb1117d959a1984551ee2872558172767 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 17:08:04 +0100 Subject: [PATCH 0958/1727] Style fixes from 'cargo fmt' --- src/client_server/media.rs | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index dd8e7b0..a827d64 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -78,7 +78,7 @@ pub async fn get_remote_content( db: &DatabaseGuard, mxc: &str, server_name: &ruma::ServerName, - media_id: &str + media_id: &str, ) -> Result { let content_response = db .sending @@ -88,7 +88,7 @@ pub async fn get_remote_content( get_content::Request { allow_remote: false, server_name, - media_id + media_id, }, ) .await?; @@ -135,12 +135,8 @@ pub async fn get_content_route( } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let remote_content_response = get_remote_content( - &db, - &mxc, - &body.server_name, - &body.media_id - ).await?; + let remote_content_response = + get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; Ok(remote_content_response.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -176,17 +172,13 @@ pub async fn get_content_as_filename_route( } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let remote_content_response = get_remote_content( - &db, - &mxc, - &body.server_name, - &body.media_id - ).await?; + let remote_content_response = + get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; Ok(get_content_as_filename::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), content_type: remote_content_response.content_type, - file: remote_content_response.file + file: remote_content_response.file, } .into()) } else { From f8d1c1a8af122d8955b4b08fa564723badbb3f77 Mon Sep 17 00:00:00 2001 From: "Aode (Lion)" Date: Mon, 24 Jan 2022 18:42:15 -0600 Subject: [PATCH 0959/1727] Re-use a basic request in all possible cases --- src/appservice_server.rs | 6 +----- src/database/globals.rs | 40 ++++++++++++++++++++++++++++++---------- src/database/pusher.rs | 6 +----- src/server_server.rs | 23 ++++++++++++----------- 4 files changed, 44 insertions(+), 31 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index ed886d6..a5d795f 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -46,11 +46,7 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = globals - .reqwest_client()? - .build()? - .execute(reqwest_request) - .await?; + let mut response = globals.reqwest_client().execute(reqwest_request).await?; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/database/globals.rs b/src/database/globals.rs index 098d819..da91c1f 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -39,6 +39,7 @@ pub struct Globals { keypair: Arc, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, + basic_client: reqwest::Client, pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, @@ -132,6 +133,8 @@ impl Globals { .as_ref() .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); + let basic_client = reqwest_client_builder(&config, None)?.build()?; + let s = Self { globals, config, @@ -141,6 +144,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), tls_name_override, + basic_client, server_signingkeys, jwt_decoding_key, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), @@ -163,17 +167,15 @@ impl Globals { &self.keypair } - /// Returns a reqwest client which can be used to send requests. - pub fn reqwest_client(&self) -> Result { - let mut reqwest_client_builder = reqwest::Client::builder() - .connect_timeout(Duration::from_secs(30)) - .timeout(Duration::from_secs(60 * 3)) - .pool_max_idle_per_host(1); - if let Some(proxy) = self.config.proxy.to_proxy()? { - reqwest_client_builder = reqwest_client_builder.proxy(proxy); - } + /// Returns a reqwest client which can be used to send requests + pub fn reqwest_client(&self) -> reqwest::Client { + // can't return &Client or else we'll hold a lock around the DB across an await + self.basic_client.clone() + } - Ok(reqwest_client_builder) + /// Returns a reqwest client builder which can be customized and used to send requests. + pub fn reqwest_client_builder(&self) -> Result { + reqwest_client_builder(&self.config, Some(1)) } #[tracing::instrument(skip(self))] @@ -340,3 +342,21 @@ impl Globals { r } } + +fn reqwest_client_builder( + config: &Config, + max_idle: Option, +) -> Result { + let mut reqwest_client_builder = reqwest::Client::builder() + .connect_timeout(Duration::from_secs(30)) + .timeout(Duration::from_secs(60 * 3)); + + if let Some(max_idle) = max_idle { + reqwest_client_builder = reqwest_client_builder.pool_max_idle_per_host(max_idle); + } + if let Some(proxy) = config.proxy.to_proxy()? { + reqwest_client_builder = reqwest_client_builder.proxy(proxy); + } + + Ok(reqwest_client_builder) +} diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 97ca85d..d63db1d 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -115,11 +115,7 @@ where //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let response = globals - .reqwest_client()? - .build()? - .execute(reqwest_request) - .await; + let response = globals.reqwest_client().execute(reqwest_request).await; match response { Ok(mut response) => { diff --git a/src/server_server.rs b/src/server_server.rs index 9129951..205355f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -237,21 +237,25 @@ where let url = reqwest_request.url().clone(); - let mut client = globals.reqwest_client()?; - if let Some((override_name, port)) = globals + let client = if let Some((override_name, port)) = globals .tls_name_override .read() .unwrap() .get(&actual_destination.hostname()) { - client = client.resolve( - &actual_destination.hostname(), - SocketAddr::new(override_name[0], *port), - ); + globals + .reqwest_client_builder()? + .resolve( + &actual_destination.hostname(), + SocketAddr::new(override_name[0], *port), + ) + .build()? // port will be ignored - } + } else { + globals.reqwest_client() + }; - let response = client.build()?.execute(reqwest_request).await; + let response = client.execute(reqwest_request).await; match response { Ok(mut response) => { @@ -492,9 +496,6 @@ async fn request_well_known( let body: serde_json::Value = serde_json::from_str( &globals .reqwest_client() - .ok()? - .build() - .ok()? .get(&format!( "https://{}/.well-known/matrix/server", destination From 1059f35fdcf4942fd253748121c883ea38b427a7 Mon Sep 17 00:00:00 2001 From: "Aode (lion)" Date: Thu, 27 Jan 2022 10:19:28 -0600 Subject: [PATCH 0960/1727] use pre-constructed client for well-known requests also --- Cargo.lock | 3 +-- Cargo.toml | 2 +- src/database/globals.rs | 30 ++++++++++++++++++------------ src/server_server.rs | 20 +------------------- 4 files changed, 21 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 794a025..21c2770 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1950,8 +1950,7 @@ dependencies = [ [[package]] name = "reqwest" version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" +source = "git+https://github.com/niuhuan/reqwest?branch=dns-resolver-fn#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" dependencies = [ "base64 0.13.0", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 9ba1ac0..974b4ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ rand = "0.8.4" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls", "socks"] } +reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls", "socks"], git = "https://github.com/niuhuan/reqwest", branch = "dns-resolver-fn" } # Used for conduit::Error type thiserror = "1.0.28" # Used to generate thumbnails for images diff --git a/src/database/globals.rs b/src/database/globals.rs index da91c1f..3278b7f 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -10,7 +10,7 @@ use std::{ collections::{BTreeMap, HashMap}, fs, future::Future, - net::IpAddr, + net::{IpAddr, SocketAddr}, path::PathBuf, sync::{Arc, Mutex, RwLock}, time::{Duration, Instant}, @@ -39,6 +39,7 @@ pub struct Globals { keypair: Arc, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, + well_known_client: reqwest::Client, basic_client: reqwest::Client, pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, @@ -133,7 +134,16 @@ impl Globals { .as_ref() .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); - let basic_client = reqwest_client_builder(&config, None)?.build()?; + let basic_client = reqwest_client_builder(&config)?.build()?; + let name_override = Arc::clone(&tls_name_override); + let well_known_client = reqwest_client_builder(&config)? + .resolve_fn(move |domain| { + let read_guard = name_override.read().unwrap(); + let (override_name, port) = read_guard.get(&domain)?; + let first_name = override_name.get(0)?; + Some(SocketAddr::new(*first_name, *port)) + }) + .build()?; let s = Self { globals, @@ -144,6 +154,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), tls_name_override, + well_known_client, basic_client, server_signingkeys, jwt_decoding_key, @@ -173,9 +184,10 @@ impl Globals { self.basic_client.clone() } - /// Returns a reqwest client builder which can be customized and used to send requests. - pub fn reqwest_client_builder(&self) -> Result { - reqwest_client_builder(&self.config, Some(1)) + /// Returns a client used for resolving .well-knowns + pub fn well_known_client(&self) -> reqwest::Client { + // can't return &Client or else we'll hold a lock around the DB across an await + self.well_known_client.clone() } #[tracing::instrument(skip(self))] @@ -343,17 +355,11 @@ impl Globals { } } -fn reqwest_client_builder( - config: &Config, - max_idle: Option, -) -> Result { +fn reqwest_client_builder(config: &Config) -> Result { let mut reqwest_client_builder = reqwest::Client::builder() .connect_timeout(Duration::from_secs(30)) .timeout(Duration::from_secs(60 * 3)); - if let Some(max_idle) = max_idle { - reqwest_client_builder = reqwest_client_builder.pool_max_idle_per_host(max_idle); - } if let Some(proxy) = config.proxy.to_proxy()? { reqwest_client_builder = reqwest_client_builder.proxy(proxy); } diff --git a/src/server_server.rs b/src/server_server.rs index 205355f..978eb67 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -237,25 +237,7 @@ where let url = reqwest_request.url().clone(); - let client = if let Some((override_name, port)) = globals - .tls_name_override - .read() - .unwrap() - .get(&actual_destination.hostname()) - { - globals - .reqwest_client_builder()? - .resolve( - &actual_destination.hostname(), - SocketAddr::new(override_name[0], *port), - ) - .build()? - // port will be ignored - } else { - globals.reqwest_client() - }; - - let response = client.execute(reqwest_request).await; + let response = globals.well_known_client().execute(reqwest_request).await; match response { Ok(mut response) => { From 529e88c7f9d8997a4fbbc84f98120d1b31d2e39e Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 17:47:09 +0100 Subject: [PATCH 0961/1727] Do not copy mxc string unnecessarily in db.get_thumbnail() --- src/client_server/media.rs | 2 +- src/database/media.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index a827d64..8524c57 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -207,7 +207,7 @@ pub async fn get_content_thumbnail_route( }) = db .media .get_thumbnail( - mxc.clone(), + &mxc, &db.globals, body.width .try_into() diff --git a/src/database/media.rs b/src/database/media.rs index 4663013..a4bb402 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -171,7 +171,7 @@ impl Media { /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. pub async fn get_thumbnail( &self, - mxc: String, + mxc: &str, globals: &Globals, width: u32, height: u32, From b39ddf7be9150b8baa6cecabed7730d2ab610a72 Mon Sep 17 00:00:00 2001 From: "Aode (lion)" Date: Fri, 28 Jan 2022 12:42:47 -0600 Subject: [PATCH 0962/1727] Rename reqwest clients, mention cheap client clones in comment --- src/appservice_server.rs | 2 +- src/database/globals.rs | 24 ++++++++++++------------ src/database/pusher.rs | 2 +- src/server_server.rs | 4 ++-- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index a5d795f..e78fb34 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -46,7 +46,7 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = globals.reqwest_client().execute(reqwest_request).await?; + let mut response = globals.default_client().execute(reqwest_request).await?; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/database/globals.rs b/src/database/globals.rs index 3278b7f..decd84c 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -39,8 +39,8 @@ pub struct Globals { keypair: Arc, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, - well_known_client: reqwest::Client, - basic_client: reqwest::Client, + federation_client: reqwest::Client, + default_client: reqwest::Client, pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, @@ -134,9 +134,9 @@ impl Globals { .as_ref() .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); - let basic_client = reqwest_client_builder(&config)?.build()?; + let default_client = reqwest_client_builder(&config)?.build()?; let name_override = Arc::clone(&tls_name_override); - let well_known_client = reqwest_client_builder(&config)? + let federation_client = reqwest_client_builder(&config)? .resolve_fn(move |domain| { let read_guard = name_override.read().unwrap(); let (override_name, port) = read_guard.get(&domain)?; @@ -154,8 +154,8 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), tls_name_override, - well_known_client, - basic_client, + federation_client, + default_client, server_signingkeys, jwt_decoding_key, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), @@ -179,15 +179,15 @@ impl Globals { } /// Returns a reqwest client which can be used to send requests - pub fn reqwest_client(&self) -> reqwest::Client { - // can't return &Client or else we'll hold a lock around the DB across an await - self.basic_client.clone() + pub fn default_client(&self) -> reqwest::Client { + // Client is cheap to clone (Arc wrapper) and avoids lifetime issues + self.default_client.clone() } /// Returns a client used for resolving .well-knowns - pub fn well_known_client(&self) -> reqwest::Client { - // can't return &Client or else we'll hold a lock around the DB across an await - self.well_known_client.clone() + pub fn federation_client(&self) -> reqwest::Client { + // Client is cheap to clone (Arc wrapper) and avoids lifetime issues + self.federation_client.clone() } #[tracing::instrument(skip(self))] diff --git a/src/database/pusher.rs b/src/database/pusher.rs index d63db1d..bbe85a8 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -115,7 +115,7 @@ where //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let response = globals.reqwest_client().execute(reqwest_request).await; + let response = globals.default_client().execute(reqwest_request).await; match response { Ok(mut response) => { diff --git a/src/server_server.rs b/src/server_server.rs index 978eb67..c5e0b1a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -237,7 +237,7 @@ where let url = reqwest_request.url().clone(); - let response = globals.well_known_client().execute(reqwest_request).await; + let response = globals.federation_client().execute(reqwest_request).await; match response { Ok(mut response) => { @@ -477,7 +477,7 @@ async fn request_well_known( ) -> Option { let body: serde_json::Value = serde_json::from_str( &globals - .reqwest_client() + .default_client() .get(&format!( "https://{}/.well-known/matrix/server", destination From 44f7a85077e5c249ec618004b0386f3d66f01911 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 28 Jan 2022 22:19:19 +0100 Subject: [PATCH 0963/1727] fix: Use default port for healthcheck as fallback Conduit can start without a specific port being configured. This adjusts the healthcheck script to tolerate that state. Closes https://gitlab.com/famedly/conduit/-/issues/222 --- docker/healthcheck.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index 7ca0460..efc9491 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -1,10 +1,15 @@ #!/bin/sh # If the port is not specified as env var, take it from the config file -if [ -z ${CONDUIT_PORT} ]; then +if [ -z "${CONDUIT_PORT}" ]; then CONDUIT_PORT=$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*') fi +# If the config file also does not contain a default port, just use the default one: 6167. +if [ -z "${CONDUIT_PORT}" ]; then + CONDUIT_PORT=6167 +fi + # The actual health check. # We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. # TODO: Change this to a single wget call. Do we have a config value that we can check for that? From 401b88d16d43f0c58e5a4ccf777815fd8d538ff8 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 28 Jan 2022 23:23:58 +0100 Subject: [PATCH 0964/1727] fix: Healtcheck use netstat for port as fallback --- docker/healthcheck.sh | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index efc9491..df7f18a 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -1,13 +1,9 @@ #!/bin/sh -# If the port is not specified as env var, take it from the config file +# If the config file does not contain a default port and the CONDUIT_PORT env is not set, create +# try to get port from process list if [ -z "${CONDUIT_PORT}" ]; then - CONDUIT_PORT=$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*') -fi - -# If the config file also does not contain a default port, just use the default one: 6167. -if [ -z "${CONDUIT_PORT}" ]; then - CONDUIT_PORT=6167 + CONDUIT_PORT=$(netstat -tlp | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') fi # The actual health check. From 8ff95a5a48c055549b3652a33faddc3d89351d91 Mon Sep 17 00:00:00 2001 From: user Date: Fri, 28 Jan 2022 22:26:56 -0800 Subject: [PATCH 0965/1727] fix: mention dependencies to build from source --- DEPLOY.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 38e1e28..d9f91e0 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -29,7 +29,11 @@ $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` -Alternatively, you may compile the binary yourself using +Alternatively, you may compile the binary yourself + +```bash +$ sudo apt install libclang-dev build-essential +``` ```bash $ cargo build --release From 677f044d13985f794afecdb0bbf62fbab3a52dec Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Sun, 30 Jan 2022 23:15:53 +0200 Subject: [PATCH 0966/1727] Refactor admin code to always defer command processing --- src/client_server/report.rs | 12 +- src/database/admin.rs | 304 ++++++++++++++++++------------------ src/database/rooms.rs | 8 +- 3 files changed, 155 insertions(+), 169 deletions(-) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index ae06984..032e446 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,7 +1,4 @@ -use crate::{ - database::{admin::AdminCommand, DatabaseGuard}, - ConduitResult, Error, Ruma, -}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, events::room::message, @@ -50,8 +47,8 @@ pub async fn report_event_route( )); }; - db.admin.send(AdminCommand::SendMessage( - message::RoomMessageEventContent::text_html( + db.admin + .send_message(message::RoomMessageEventContent::text_html( format!( "Report received from: {}\n\n\ Event ID: {}\n\ @@ -75,8 +72,7 @@ pub async fn report_event_route( body.score, RawStr::new(&body.reason).html_escape() ), - ), - )); + )); db.flush()?; diff --git a/src/database/admin.rs b/src/database/admin.rs index dbd20e4..ea08f65 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -19,25 +19,21 @@ use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; use tracing::warn; -pub enum AdminCommand { - RegisterAppservice(serde_yaml::Value), - UnregisterAppservice(String), - ListAppservices, - ListLocalUsers, - ShowMemoryUsage, +pub enum AdminRoomEvent { + ProcessMessage(String), SendMessage(RoomMessageEventContent), } #[derive(Clone)] pub struct Admin { - pub sender: mpsc::UnboundedSender, + pub sender: mpsc::UnboundedSender, } impl Admin { pub fn start_handler( &self, db: Arc>, - mut receiver: mpsc::UnboundedReceiver, + mut receiver: mpsc::UnboundedReceiver, ) { tokio::spawn(async move { // TODO: Use futures when we have long admin commands @@ -56,7 +52,7 @@ impl Admin { .try_into() .expect("#admins:server_name is a valid room alias"), ) - .unwrap(); + .expect("Admin room must exist"); let conduit_room = match conduit_room { None => { @@ -105,46 +101,13 @@ impl Admin { let state_lock = mutex_state.lock().await; match event { - AdminCommand::ListLocalUsers => { - match guard.users.list_local_users() { - Ok(users) => { - let mut msg: String = format!("Found {} local user account(s):\n", users.len()); - msg += &users.join("\n"); - send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); - } - Err(e) => { - send_message(RoomMessageEventContent::text_plain(e.to_string()), guard, &state_lock); - } - } + AdminRoomEvent::SendMessage(content) => { + send_message(content, guard, &state_lock); } - AdminCommand::RegisterAppservice(yaml) => { - guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error - } - AdminCommand::UnregisterAppservice(service_name) => { - guard.appservice.unregister_appservice(&service_name).unwrap(); // TODO: see above - } - AdminCommand::ListAppservices => { - if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { - let count = appservices.len(); - let output = format!( - "Appservices ({}): {}", - count, - appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") - ); - send_message(RoomMessageEventContent::text_plain(output), guard, &state_lock); - } else { - send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock); - } - } - AdminCommand::ShowMemoryUsage => { - if let Ok(response) = guard._db.memory_usage() { - send_message(RoomMessageEventContent::text_plain(response), guard, &state_lock); - } else { - send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage.".to_owned()), guard, &state_lock); - } - } - AdminCommand::SendMessage(message) => { - send_message(message, guard, &state_lock); + AdminRoomEvent::ProcessMessage(room_message) => { + let reply_message = process_admin_message(&*guard, room_message); + + send_message(reply_message, guard, &state_lock); } } @@ -155,67 +118,81 @@ impl Admin { }); } - pub fn send(&self, command: AdminCommand) { - self.sender.unbounded_send(command).unwrap(); + pub fn process_message(&self, room_message: String) { + self.sender + .unbounded_send(AdminRoomEvent::ProcessMessage(room_message)) + .unwrap(); + } + + pub fn send_message(&self, message_content: RoomMessageEventContent) { + self.sender + .unbounded_send(AdminRoomEvent::SendMessage(message_content)) + .unwrap(); + } +} + +// Parse and process a message from the admin room +pub fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { + let mut lines = room_message.lines(); + let command_line = lines.next().expect("each string has at least one line"); + let body: Vec<_> = lines.collect(); + + let admin_command = match parse_admin_command(&command_line) { + Ok(command) => command, + Err(error) => { + let message = error + .to_string() + .replace("example.com", db.globals.server_name().as_str()); + let html_message = usage_to_html(&message); + + return RoomMessageEventContent::text_html(message, html_message); + } + }; + + match process_admin_command(db, admin_command, body) { + Ok(reply_message) => reply_message, + Err(error) => { + let markdown_message = format!( + "Encountered an error while handling the command:\n\ + ```\n{}\n```", + error, + ); + let html_message = format!( + "Encountered an error while handling the command:\n\ +
              \n{}\n
              ", + error, + ); + + RoomMessageEventContent::text_html(markdown_message, html_message) + } } } // Parse chat messages from the admin room into an AdminCommand object -pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) -> AdminCommand { - let mut argv: Vec<_> = command_line.split_whitespace().skip(1).collect(); - - let command_name = match argv.get(0) { - Some(command) => *command, - None => { - let markdown_message = "No command given. Use `help` for a list of commands."; - let html_message = "No command given. Use help for a list of commands."; - - return AdminCommand::SendMessage(RoomMessageEventContent::text_html( - markdown_message, - html_message, - )); - } - }; +fn parse_admin_command(command_line: &str) -> std::result::Result { + // Note: argv[0] is `@conduit:servername:`, which is treated as the main command + let mut argv: Vec<_> = command_line.split_whitespace().collect(); // Replace `help command` with `command --help` // Clap has a help subcommand, but it omits the long help description. - if argv[0] == "help" { - argv.remove(0); + if argv.len() > 1 && argv[1] == "help" { + argv.remove(1); argv.push("--help"); } // Backwards compatibility with `register_appservice`-style commands let command_with_dashes; - if argv[0].contains("_") { - command_with_dashes = argv[0].replace("_", "-"); - argv[0] = &command_with_dashes; + if argv.len() > 1 && argv[1].contains("_") { + command_with_dashes = argv[1].replace("_", "-"); + argv[1] = &command_with_dashes; } - match try_parse_admin_command(db, argv, body) { - Ok(admin_command) => admin_command, - Err(error) => { - let markdown_message = format!( - "Encountered an error while handling the `{}` command:\n\ - ```\n{}\n```", - command_name, error, - ); - let html_message = format!( - "Encountered an error while handling the {} command:\n\ -
              \n{}\n
              ", - command_name, error, - ); - - AdminCommand::SendMessage(RoomMessageEventContent::text_html( - markdown_message, - html_message, - )) - } - } + AdminCommand::try_parse_from(argv).map_err(|error| error.to_string()) } #[derive(Parser)] #[clap(name = "@conduit:example.com", version = env!("CARGO_PKG_VERSION"))] -enum AdminCommands { +enum AdminCommand { #[clap(verbatim_doc_comment)] /// Register an appservice using its registration YAML /// @@ -264,49 +241,70 @@ enum AdminCommands { DatabaseMemoryUsage, } -pub fn try_parse_admin_command( +fn process_admin_command( db: &Database, - mut argv: Vec<&str>, + command: AdminCommand, body: Vec<&str>, -) -> Result { - argv.insert(0, "@conduit:example.com:"); - let command = match AdminCommands::try_parse_from(argv) { - Ok(command) => command, - Err(error) => { - let message = error - .to_string() - .replace("example.com", db.globals.server_name().as_str()); - let html_message = usage_to_html(&message); - - return Ok(AdminCommand::SendMessage( - RoomMessageEventContent::text_html(message, html_message), - )); - } - }; - - let admin_command = match command { - AdminCommands::RegisterAppservice => { +) -> Result { + let reply_message_content = match command { + AdminCommand::RegisterAppservice => { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { let appservice_config = body[1..body.len() - 1].join("\n"); let parsed_config = serde_yaml::from_str::(&appservice_config); match parsed_config { - Ok(yaml) => AdminCommand::RegisterAppservice(yaml), - Err(e) => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - format!("Could not parse appservice config: {}", e), + Ok(yaml) => match db.appservice.register_appservice(yaml) { + Ok(()) => RoomMessageEventContent::text_plain("Appservice registered."), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to register appservice: {}", + e + )), + }, + Err(e) => RoomMessageEventContent::text_plain(format!( + "Could not parse appservice config: {}", + e )), } } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Expected code block in command body.", - )) + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) } } - AdminCommands::UnregisterAppservice { + AdminCommand::UnregisterAppservice { appservice_identifier, - } => AdminCommand::UnregisterAppservice(appservice_identifier), - AdminCommands::ListAppservices => AdminCommand::ListAppservices, - AdminCommands::ListLocalUsers => AdminCommand::ListLocalUsers, - AdminCommands::GetAuthChain { event_id } => { + } => match db.appservice.unregister_appservice(&appservice_identifier) { + Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to unregister appservice: {}", + e + )), + }, + AdminCommand::ListAppservices => { + if let Ok(appservices) = db.appservice.iter_ids().map(|ids| ids.collect::>()) { + let count = appservices.len(); + let output = format!( + "Appservices ({}): {}", + count, + appservices + .into_iter() + .filter_map(|r| r.ok()) + .collect::>() + .join(", ") + ); + RoomMessageEventContent::text_plain(output) + } else { + RoomMessageEventContent::text_plain("Failed to get appservices.") + } + } + AdminCommand::ListLocalUsers => match db.users.list_local_users() { + Ok(users) => { + let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + RoomMessageEventContent::text_plain(&msg) + } + Err(e) => RoomMessageEventContent::text_plain(e.to_string()), + }, + AdminCommand::GetAuthChain { event_id } => { let event_id = Arc::::from(event_id); if let Some(event) = db.rooms.get_pdu_json(&event_id)? { let room_id_str = event @@ -320,17 +318,15 @@ pub fn try_parse_admin_command( let start = Instant::now(); let count = server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); let elapsed = start.elapsed(); - return Ok(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - )), - )); + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )) } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain("Event not found.")) + RoomMessageEventContent::text_plain("Event not found.") } } - AdminCommands::ParsePdu => { + AdminCommand::ParsePdu => { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { @@ -346,30 +342,26 @@ pub fn try_parse_admin_command( match serde_json::from_value::( serde_json::to_value(value).expect("value is json"), ) { - Ok(pdu) => { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - format!("EventId: {:?}\n{:#?}", event_id, pdu), - )) - } - Err(e) => AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\nCould not parse event: {}", - event_id, e - )), - ), + Ok(pdu) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\n{:#?}", + event_id, pdu + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\nCould not parse event: {}", + event_id, e + )), } } - Err(e) => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - format!("Invalid json in command body: {}", e), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Invalid json in command body: {}", + e )), } } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Expected code block in command body.", - )) + RoomMessageEventContent::text_plain("Expected code block in command body.") } } - AdminCommands::GetPdu { event_id } => { + AdminCommand::GetPdu { event_id } => { let mut outlier = false; let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; if pdu_json.is_none() { @@ -380,7 +372,7 @@ pub fn try_parse_admin_command( Some(json) => { let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - AdminCommand::SendMessage(RoomMessageEventContent::text_html( + RoomMessageEventContent::text_html( format!( "{}\n```json\n{}\n```", if outlier { @@ -399,17 +391,21 @@ pub fn try_parse_admin_command( }, RawStr::new(&json_text).html_escape() ), - )) - } - None => { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain("PDU not found.")) + ) } + None => RoomMessageEventContent::text_plain("PDU not found."), } } - AdminCommands::DatabaseMemoryUsage => AdminCommand::ShowMemoryUsage, + AdminCommand::DatabaseMemoryUsage => match db._db.memory_usage() { + Ok(response) => RoomMessageEventContent::text_plain(response), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to get database memory usage: {}", + e + )), + }, }; - Ok(admin_command) + Ok(reply_message_content) } // Utility to turn clap's `--help` text to HTML. diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 1f4566f..2303b0d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,6 @@ mod edus; pub use edus::RoomEdus; use crate::{ - database::admin::parse_admin_command, pdu::{EventHash, PduBuilder}, utils, Database, Error, PduEvent, Result, }; @@ -1490,12 +1489,7 @@ impl Rooms { .as_ref() == Some(&pdu.room_id) { - let mut lines = body.lines(); - let command_line = lines.next().expect("each string has at least one line"); - let body: Vec<_> = lines.collect(); - - let command = parse_admin_command(db, command_line, body); - db.admin.send(command); + db.admin.process_message(body.to_string()); } } } From cc13112592b1666e75b4e5d0d340d6124afe4071 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 09:27:31 +0100 Subject: [PATCH 0967/1727] Cleanup appservice events after removing the appservice --- src/database/admin.rs | 13 ++++++++++++- src/database/sending.rs | 31 +++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 81e9839..9895a83 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -112,7 +112,18 @@ impl Admin { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error } AdminCommand::UnregisterAppservice(service_name) => { - guard.appservice.unregister_appservice(&service_name).unwrap(); // TODO: see above + if let Ok(_) = guard.appservice.unregister_appservice(&service_name) { + if let Ok(_) = guard.sending.cleanup_events(&service_name) { + let msg: String = format!("OK. Appservice {} removed", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } else { + let msg: String = format!("WARN: Appservice {} removed, but failed to cleanup events", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } + } else { + let msg: String = format!("ERR. Appservice {} not removed", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } } AdminCommand::ListAppservices => { if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { diff --git a/src/database/sending.rs b/src/database/sending.rs index 69f7c44..af4ac67 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -480,6 +480,26 @@ impl Sending { hash.as_ref().to_owned() } + /// Cleanup event data + /// Used for instance after we remove an appservice registration + /// + #[tracing::instrument(skip(self))] + pub fn cleanup_events(&self, key_id: &str) -> Result<()> { + let mut prefix = b"+".to_vec(); + prefix.extend_from_slice(key_id.as_bytes()); + prefix.push(0xff); + + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key).unwrap(); + } + + for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { + self.servernameevent_data.remove(&key).unwrap(); + } + + Ok(()) + } + #[tracing::instrument(skip(db, events, kind))] async fn handle_events( kind: OutgoingKind, @@ -520,8 +540,15 @@ impl Sending { &db.globals, db.appservice .get_registration(server.as_str()) - .unwrap() - .unwrap(), // TODO: handle error + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database( + "[Appservice] Could not load registration from db.", + ), + ) + })?, appservice::event::push_events::v1::Request { events: &pdu_jsons, txn_id: (&*base64::encode_config( From 78502aa6b10d97f3af3fe006fcdbd19b585d3b58 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 10:07:49 +0100 Subject: [PATCH 0968/1727] add error handling for register_appservice too --- src/database/admin.rs | 13 ++++++++++++- src/database/appservice.rs | 4 ++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 9895a83..eef6ce1 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -109,7 +109,18 @@ impl Admin { } } AdminCommand::RegisterAppservice(yaml) => { - guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error + match guard.appservice.register_appservice(yaml) { + Ok(Some(id)) => { + let msg: String = format!("OK. Appservice {} created", id); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } + Ok(None) => { + send_message(RoomMessageEventContent::text_plain("WARN. Appservice created, but its ID was not returned!"), guard, &state_lock); + } + Err(_) => { + send_message(RoomMessageEventContent::text_plain("ERR: Failed register appservice. Check server log"), guard, &state_lock); + } + } } AdminCommand::UnregisterAppservice(service_name) => { if let Ok(_) = guard.appservice.unregister_appservice(&service_name) { diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 88de1f3..8b29aca 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -12,7 +12,7 @@ pub struct Appservice { } impl Appservice { - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<()> { + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result> { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( @@ -24,7 +24,7 @@ impl Appservice { .unwrap() .insert(id.to_owned(), yaml); - Ok(()) + Ok(Some(id.to_owned())) } /// Remove an appservice registration From 28d3b348d2fc23e6b2b78c468f682018ab472652 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 11:52:33 +0100 Subject: [PATCH 0969/1727] Return the ID of the appservice that was created by register_appservice --- src/database/admin.rs | 5 +---- src/database/appservice.rs | 8 +++++--- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index eef6ce1..a214796 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -110,13 +110,10 @@ impl Admin { } AdminCommand::RegisterAppservice(yaml) => { match guard.appservice.register_appservice(yaml) { - Ok(Some(id)) => { + Ok(id) => { let msg: String = format!("OK. Appservice {} created", id); send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); } - Ok(None) => { - send_message(RoomMessageEventContent::text_plain("WARN. Appservice created, but its ID was not returned!"), guard, &state_lock); - } Err(_) => { send_message(RoomMessageEventContent::text_plain("ERR: Failed register appservice. Check server log"), guard, &state_lock); } diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 8b29aca..edd5009 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -12,7 +12,9 @@ pub struct Appservice { } impl Appservice { - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result> { + /// Registers an appservice and returns the ID to the caller + /// + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( @@ -22,9 +24,9 @@ impl Appservice { self.cached_registrations .write() .unwrap() - .insert(id.to_owned(), yaml); + .insert(id.to_owned(), yaml.to_owned()); - Ok(Some(id.to_owned())) + Ok(id.to_owned()) } /// Remove an appservice registration From e17bbdd42d3245f2fb3730753f1feb51cd452207 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 28 Jan 2022 17:26:43 +0100 Subject: [PATCH 0970/1727] tests --- Cargo.toml | 2 +- src/database.rs | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 78a4c8f..0089e7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication diff --git a/src/database.rs b/src/database.rs index 4f230f3..79b8208 100644 --- a/src/database.rs +++ b/src/database.rs @@ -130,7 +130,7 @@ fn default_db_cache_capacity_mb() -> f64 { } fn default_rocksdb_max_open_files() -> i32 { - 512 + 20 } fn default_pdu_cache_capacity() -> u32 { @@ -361,15 +361,15 @@ impl Database { .try_into() .expect("pdu cache capacity fits into usize"), )), - auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), - shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), - eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), - shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), - statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), + auth_chain_cache: Mutex::new(LruCache::new(100_000)), + shorteventid_cache: Mutex::new(LruCache::new(100_000)), + eventidshort_cache: Mutex::new(LruCache::new(100_000)), + shortstatekey_cache: Mutex::new(LruCache::new(100_000)), + statekeyshort_cache: Mutex::new(LruCache::new(100_000)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new(1000)), + stateinfo_cache: Mutex::new(LruCache::new(100)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, From 23aecb78c7c5ba5872a058f806cd722787eefc10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 31 Jan 2022 15:39:46 +0100 Subject: [PATCH 0971/1727] fix: use to_lowercase on /register/available username --- src/client_server/account.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index c4e118c..80c6f70 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -60,7 +60,7 @@ pub async fn get_register_available_route( body: Ruma>, ) -> ConduitResult { // Validate user id - let user_id = UserId::parse_with_server_name(body.username.clone(), db.globals.server_name()) + let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) .ok() .filter(|user_id| { !user_id.is_historical() && user_id.server_name() == db.globals.server_name() From caf9834e50b540fc48bf8cf50dc439c57c503de9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 1 Feb 2022 14:42:13 +0100 Subject: [PATCH 0972/1727] feat: cache capacity modifier --- src/database.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/database.rs b/src/database.rs index 79b8208..449d71b 100644 --- a/src/database.rs +++ b/src/database.rs @@ -49,6 +49,8 @@ pub struct Config { database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, + #[serde(default = "default_conduit_cache_capacity_modifier")] + conduit_cache_capacity_modifier: f64, #[serde(default = "default_rocksdb_max_open_files")] rocksdb_max_open_files: i32, #[serde(default = "default_pdu_cache_capacity")] @@ -129,6 +131,10 @@ fn default_db_cache_capacity_mb() -> f64 { 10.0 } +fn default_conduit_cache_capacity_modifier() -> f64 { + 1.0 +} + fn default_rocksdb_max_open_files() -> i32 { 20 } @@ -361,15 +367,15 @@ impl Database { .try_into() .expect("pdu cache capacity fits into usize"), )), - auth_chain_cache: Mutex::new(LruCache::new(100_000)), - shorteventid_cache: Mutex::new(LruCache::new(100_000)), - eventidshort_cache: Mutex::new(LruCache::new(100_000)), - shortstatekey_cache: Mutex::new(LruCache::new(100_000)), - statekeyshort_cache: Mutex::new(LruCache::new(100_000)), + auth_chain_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + shorteventid_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + eventidshort_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + shortstatekey_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + statekeyshort_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new(100)), + stateinfo_cache: Mutex::new(LruCache::new((100.0 * config.conduit_cache_capacity_modifier) as usize)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, From fa4099b138b3a4cdf6727acb14c47f20ace5f38e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 1 Feb 2022 23:51:38 +0000 Subject: [PATCH 0973/1727] Use prebuilt CI-containers from https://gitlab.com/jfowl/conduit-containers Also run all builds on approved MRs --- .gitlab-ci.yml | 39 ++++++++++++++++----------------------- Cross.toml | 8 ++++---- cross/build.sh | 31 ------------------------------- cross/test.sh | 8 -------- 4 files changed, 20 insertions(+), 66 deletions(-) delete mode 100755 cross/build.sh delete mode 100755 cross/test.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 741b532..6f1a19f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,8 +24,9 @@ variables: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - if: "$CI_COMMIT_TAG" + - if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set. interruptible: true - image: "rust:1.58" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" tags: ["docker"] services: ["docker:dind"] variables: @@ -36,27 +37,23 @@ variables: before_script: - 'echo "Building for target $TARGET"' - "rustup show && rustc --version && cargo --version" # Print version info for debugging - # install cross-compiling prerequisites - - 'apt-get update && apt-get install -y docker.io && docker version' # install docker - - 'cargo install cross && cross --version' # install cross # fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227) - - 'mkdir -p $SHARED_PATH/cargo' - - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - - 'cp -r $RUSTUP_HOME $SHARED_PATH' - - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + - "mkdir -p $SHARED_PATH/cargo" + - "cp -r $CARGO_HOME/bin $SHARED_PATH/cargo" + - "cp -r $RUSTUP_HOME $SHARED_PATH" + - "export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results. - # The sccache binary is stored in the sysroot of the rustc installation since that directory is added to the path of the cross container. - - if [ -n "${SCCACHE_BIN_URL}" ]; then RUSTC_SYSROOT=$(rustc --print sysroot) && curl $SCCACHE_BIN_URL --output $RUSTC_SYSROOT/bin/sccache && chmod +x $RUSTC_SYSROOT/bin/sccache && export RUSTC_WRAPPER=sccache; fi + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/sccache; fi script: # cross-compile conduit for target - - 'time ./cross/build.sh --locked --release' + - 'time cross build --target="$TARGET" --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' # print information about linking for debugging - - 'file conduit-$TARGET' # print file information + - "file conduit-$TARGET" # print file information - 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci - key: 'cargo-cache-$TARGET' + key: "cargo-cache-$TARGET" paths: - $SHARED_PATH/cargo/registry/index - $SHARED_PATH/cargo/registry/cache @@ -125,10 +122,10 @@ build:release:cargo:aarch64-unknown-linux-musl: key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: # cross-compile conduit for target - - 'time ./cross/build.sh --locked' + - 'time time cross build --target="$TARGET" --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' # print information about linking for debugging - - 'file conduit-debug-$TARGET' # print file information + - "file conduit-debug-$TARGET" # print file information - 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked artifacts: expire_in: 4 weeks @@ -230,24 +227,20 @@ docker:master:dockerhub: test:cargo: stage: "test" needs: [] - image: "rust:latest" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" tags: ["docker"] variables: CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow interruptible: true before_script: - # - mkdir -p $CARGO_HOME - - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config libclang-dev - rustup component add clippy rustfmt - - curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - cargo fmt --all -- --check - - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | ./gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - - "cargo clippy --color always --verbose --message-format=json | ./gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" + - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" artifacts: when: always reports: diff --git a/Cross.toml b/Cross.toml index a989a98..a1387b4 100644 --- a/Cross.toml +++ b/Cross.toml @@ -11,13 +11,13 @@ passthrough = [ ] [target.aarch64-unknown-linux-musl] -image = "rust-cross:aarch64-unknown-linux-musl" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest" [target.arm-unknown-linux-musleabihf] -image = "rust-cross:arm-unknown-linux-musleabihf" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest" [target.armv7-unknown-linux-musleabihf] -image = "rust-cross:armv7-unknown-linux-musleabihf" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest" [target.x86_64-unknown-linux-musl] -image = "rust-cross:x86_64-unknown-linux-musl" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl:latest" diff --git a/cross/build.sh b/cross/build.sh deleted file mode 100755 index 8f64ff8..0000000 --- a/cross/build.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -set -ex - -# build custom container with libclang and static compilation -tag="rust-cross:${TARGET:?}" -docker build --tag="$tag" - << EOF -FROM rustembedded/cross:$TARGET - -# Install libclang for generating bindings with rust-bindgen -# The architecture is not relevant here since it's not used for compilation -RUN apt-get update && \ - apt-get install --assume-yes libclang-dev - -# Set the target prefix -ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's/-unknown//')" - -# Make sure that cc-rs links libc/libstdc++ statically when cross-compiling -# See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information -ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" -# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-static-libgcc -Clink-arg=-lgcc -lstatic=atomic -lstatic=c"') -# Strip symbols while compiling in release mode -$([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') - -# Make sure that rust-bindgen uses the correct include path when cross-compiling -# See https://github.com/rust-lang/rust-bindgen#environment-variables for more information -ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" -EOF - -# build conduit for a specific target -cross build --target="$TARGET" $@ diff --git a/cross/test.sh b/cross/test.sh deleted file mode 100755 index 0aa0909..0000000 --- a/cross/test.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env sh -set -ex - -# Build conduit for a specific target -cross/build.sh $@ - -# Test conduit for a specific target -cross test --target="$TARGET" $@ From a5f004d7e9c783caf280884d7fd332c7bafa67ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 2 Feb 2022 12:36:55 +0100 Subject: [PATCH 0974/1727] fix: signature mismatch on odd send_join servers --- Cargo.toml | 2 +- src/client_server/account.rs | 19 ++++++++++--------- src/client_server/membership.rs | 17 +++++++++-------- src/database.rs | 24 ++++++++++++++++++------ 4 files changed, 38 insertions(+), 24 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0089e7f..78a4c8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 80c6f70..ff34854 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -60,15 +60,16 @@ pub async fn get_register_available_route( body: Ruma>, ) -> ConduitResult { // Validate user id - let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) - .ok() - .filter(|user_id| { - !user_id.is_historical() && user_id.server_name() == db.globals.server_name() - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let user_id = + UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) + .ok() + .filter(|user_id| { + !user_id.is_historical() && user_id.server_name() == db.globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; // Check if username is creative enough if db.users.exists(&user_id)? { diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 7035278..216c4c0 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -655,7 +655,7 @@ async fn join_room_by_id_helper( db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; - let pdu = PduEvent::from_id_val(event_id, join_event.clone()) + let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; let mut state = HashMap::new(); @@ -695,14 +695,15 @@ async fn join_room_by_id_helper( } let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey( - &pdu.kind, - pdu.state_key + &parsed_pdu.kind, + parsed_pdu + .state_key .as_ref() .expect("Pdu is a membership state event"), &db.globals, )?; - state.insert(incoming_shortstatekey, pdu.event_id.clone()); + state.insert(incoming_shortstatekey, parsed_pdu.event_id.clone()); let create_shortstatekey = db .rooms @@ -738,12 +739,12 @@ async fn join_room_by_id_helper( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?; + let statehashid = db.rooms.append_to_state(&parsed_pdu, &db.globals)?; db.rooms.append_pdu( - &pdu, - utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), - iter::once(&*pdu.event_id), + &parsed_pdu, + join_event, + iter::once(&*parsed_pdu.event_id), db, )?; diff --git a/src/database.rs b/src/database.rs index 449d71b..8d245b7 100644 --- a/src/database.rs +++ b/src/database.rs @@ -367,15 +367,27 @@ impl Database { .try_into() .expect("pdu cache capacity fits into usize"), )), - auth_chain_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), - shorteventid_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), - eventidshort_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), - shortstatekey_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), - statekeyshort_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + auth_chain_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shorteventid_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + eventidshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shortstatekey_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + statekeyshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new((100.0 * config.conduit_cache_capacity_modifier) as usize)), + stateinfo_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, From bfcf2db497ffab518b946922205fb9a5661d8c27 Mon Sep 17 00:00:00 2001 From: user Date: Fri, 28 Jan 2022 22:26:56 -0800 Subject: [PATCH 0975/1727] fix: mention dependencies to build from source --- DEPLOY.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 38e1e28..d9f91e0 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -29,7 +29,11 @@ $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` -Alternatively, you may compile the binary yourself using +Alternatively, you may compile the binary yourself + +```bash +$ sudo apt install libclang-dev build-essential +``` ```bash $ cargo build --release From da7b55b39c1ea592c0d5ec86a1988465bedaad0e Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 09:27:31 +0100 Subject: [PATCH 0976/1727] Cleanup appservice events after removing the appservice --- src/database/admin.rs | 13 ++++++++++++- src/database/sending.rs | 31 +++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 81e9839..9895a83 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -112,7 +112,18 @@ impl Admin { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error } AdminCommand::UnregisterAppservice(service_name) => { - guard.appservice.unregister_appservice(&service_name).unwrap(); // TODO: see above + if let Ok(_) = guard.appservice.unregister_appservice(&service_name) { + if let Ok(_) = guard.sending.cleanup_events(&service_name) { + let msg: String = format!("OK. Appservice {} removed", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } else { + let msg: String = format!("WARN: Appservice {} removed, but failed to cleanup events", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } + } else { + let msg: String = format!("ERR. Appservice {} not removed", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } } AdminCommand::ListAppservices => { if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { diff --git a/src/database/sending.rs b/src/database/sending.rs index 69f7c44..af4ac67 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -480,6 +480,26 @@ impl Sending { hash.as_ref().to_owned() } + /// Cleanup event data + /// Used for instance after we remove an appservice registration + /// + #[tracing::instrument(skip(self))] + pub fn cleanup_events(&self, key_id: &str) -> Result<()> { + let mut prefix = b"+".to_vec(); + prefix.extend_from_slice(key_id.as_bytes()); + prefix.push(0xff); + + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key).unwrap(); + } + + for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { + self.servernameevent_data.remove(&key).unwrap(); + } + + Ok(()) + } + #[tracing::instrument(skip(db, events, kind))] async fn handle_events( kind: OutgoingKind, @@ -520,8 +540,15 @@ impl Sending { &db.globals, db.appservice .get_registration(server.as_str()) - .unwrap() - .unwrap(), // TODO: handle error + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database( + "[Appservice] Could not load registration from db.", + ), + ) + })?, appservice::event::push_events::v1::Request { events: &pdu_jsons, txn_id: (&*base64::encode_config( From 8f69f02e592299dbe3713e238b94b19bfc445ec8 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 10:07:49 +0100 Subject: [PATCH 0977/1727] add error handling for register_appservice too --- src/database/admin.rs | 13 ++++++++++++- src/database/appservice.rs | 4 ++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 9895a83..eef6ce1 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -109,7 +109,18 @@ impl Admin { } } AdminCommand::RegisterAppservice(yaml) => { - guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error + match guard.appservice.register_appservice(yaml) { + Ok(Some(id)) => { + let msg: String = format!("OK. Appservice {} created", id); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } + Ok(None) => { + send_message(RoomMessageEventContent::text_plain("WARN. Appservice created, but its ID was not returned!"), guard, &state_lock); + } + Err(_) => { + send_message(RoomMessageEventContent::text_plain("ERR: Failed register appservice. Check server log"), guard, &state_lock); + } + } } AdminCommand::UnregisterAppservice(service_name) => { if let Ok(_) = guard.appservice.unregister_appservice(&service_name) { diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 88de1f3..8b29aca 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -12,7 +12,7 @@ pub struct Appservice { } impl Appservice { - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<()> { + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result> { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( @@ -24,7 +24,7 @@ impl Appservice { .unwrap() .insert(id.to_owned(), yaml); - Ok(()) + Ok(Some(id.to_owned())) } /// Remove an appservice registration From e24d75cffc8f00d526848a93a4e2cfce54bf69a2 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 11:52:33 +0100 Subject: [PATCH 0978/1727] Return the ID of the appservice that was created by register_appservice --- src/database/admin.rs | 5 +---- src/database/appservice.rs | 8 +++++--- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index eef6ce1..a214796 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -110,13 +110,10 @@ impl Admin { } AdminCommand::RegisterAppservice(yaml) => { match guard.appservice.register_appservice(yaml) { - Ok(Some(id)) => { + Ok(id) => { let msg: String = format!("OK. Appservice {} created", id); send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); } - Ok(None) => { - send_message(RoomMessageEventContent::text_plain("WARN. Appservice created, but its ID was not returned!"), guard, &state_lock); - } Err(_) => { send_message(RoomMessageEventContent::text_plain("ERR: Failed register appservice. Check server log"), guard, &state_lock); } diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 8b29aca..edd5009 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -12,7 +12,9 @@ pub struct Appservice { } impl Appservice { - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result> { + /// Registers an appservice and returns the ID to the caller + /// + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( @@ -22,9 +24,9 @@ impl Appservice { self.cached_registrations .write() .unwrap() - .insert(id.to_owned(), yaml); + .insert(id.to_owned(), yaml.to_owned()); - Ok(Some(id.to_owned())) + Ok(id.to_owned()) } /// Remove an appservice registration From 9478c75f9dcd040cb9f03deb5ea809f117985de2 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 1 Feb 2022 23:51:38 +0000 Subject: [PATCH 0979/1727] Use prebuilt CI-containers from https://gitlab.com/jfowl/conduit-containers Also run all builds on approved MRs --- .gitlab-ci.yml | 39 ++++++++++--------------- Cross.toml | 8 ++--- cross/build.sh | 31 -------------------- cross/test.sh | 8 ----- docker/ci-binaries-packaging.Dockerfile | 4 ++- docker/healthcheck.sh | 2 +- 6 files changed, 24 insertions(+), 68 deletions(-) delete mode 100755 cross/build.sh delete mode 100755 cross/test.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 741b532..6f1a19f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,8 +24,9 @@ variables: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - if: "$CI_COMMIT_TAG" + - if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set. interruptible: true - image: "rust:1.58" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" tags: ["docker"] services: ["docker:dind"] variables: @@ -36,27 +37,23 @@ variables: before_script: - 'echo "Building for target $TARGET"' - "rustup show && rustc --version && cargo --version" # Print version info for debugging - # install cross-compiling prerequisites - - 'apt-get update && apt-get install -y docker.io && docker version' # install docker - - 'cargo install cross && cross --version' # install cross # fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227) - - 'mkdir -p $SHARED_PATH/cargo' - - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - - 'cp -r $RUSTUP_HOME $SHARED_PATH' - - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + - "mkdir -p $SHARED_PATH/cargo" + - "cp -r $CARGO_HOME/bin $SHARED_PATH/cargo" + - "cp -r $RUSTUP_HOME $SHARED_PATH" + - "export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results. - # The sccache binary is stored in the sysroot of the rustc installation since that directory is added to the path of the cross container. - - if [ -n "${SCCACHE_BIN_URL}" ]; then RUSTC_SYSROOT=$(rustc --print sysroot) && curl $SCCACHE_BIN_URL --output $RUSTC_SYSROOT/bin/sccache && chmod +x $RUSTC_SYSROOT/bin/sccache && export RUSTC_WRAPPER=sccache; fi + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/sccache; fi script: # cross-compile conduit for target - - 'time ./cross/build.sh --locked --release' + - 'time cross build --target="$TARGET" --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' # print information about linking for debugging - - 'file conduit-$TARGET' # print file information + - "file conduit-$TARGET" # print file information - 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci - key: 'cargo-cache-$TARGET' + key: "cargo-cache-$TARGET" paths: - $SHARED_PATH/cargo/registry/index - $SHARED_PATH/cargo/registry/cache @@ -125,10 +122,10 @@ build:release:cargo:aarch64-unknown-linux-musl: key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: # cross-compile conduit for target - - 'time ./cross/build.sh --locked' + - 'time time cross build --target="$TARGET" --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' # print information about linking for debugging - - 'file conduit-debug-$TARGET' # print file information + - "file conduit-debug-$TARGET" # print file information - 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked artifacts: expire_in: 4 weeks @@ -230,24 +227,20 @@ docker:master:dockerhub: test:cargo: stage: "test" needs: [] - image: "rust:latest" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" tags: ["docker"] variables: CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow interruptible: true before_script: - # - mkdir -p $CARGO_HOME - - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config libclang-dev - rustup component add clippy rustfmt - - curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - cargo fmt --all -- --check - - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | ./gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - - "cargo clippy --color always --verbose --message-format=json | ./gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" + - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" artifacts: when: always reports: diff --git a/Cross.toml b/Cross.toml index a989a98..a1387b4 100644 --- a/Cross.toml +++ b/Cross.toml @@ -11,13 +11,13 @@ passthrough = [ ] [target.aarch64-unknown-linux-musl] -image = "rust-cross:aarch64-unknown-linux-musl" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest" [target.arm-unknown-linux-musleabihf] -image = "rust-cross:arm-unknown-linux-musleabihf" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest" [target.armv7-unknown-linux-musleabihf] -image = "rust-cross:armv7-unknown-linux-musleabihf" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest" [target.x86_64-unknown-linux-musl] -image = "rust-cross:x86_64-unknown-linux-musl" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl:latest" diff --git a/cross/build.sh b/cross/build.sh deleted file mode 100755 index 8f64ff8..0000000 --- a/cross/build.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -set -ex - -# build custom container with libclang and static compilation -tag="rust-cross:${TARGET:?}" -docker build --tag="$tag" - << EOF -FROM rustembedded/cross:$TARGET - -# Install libclang for generating bindings with rust-bindgen -# The architecture is not relevant here since it's not used for compilation -RUN apt-get update && \ - apt-get install --assume-yes libclang-dev - -# Set the target prefix -ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's/-unknown//')" - -# Make sure that cc-rs links libc/libstdc++ statically when cross-compiling -# See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information -ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" -# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-static-libgcc -Clink-arg=-lgcc -lstatic=atomic -lstatic=c"') -# Strip symbols while compiling in release mode -$([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') - -# Make sure that rust-bindgen uses the correct include path when cross-compiling -# See https://github.com/rust-lang/rust-bindgen#environment-variables for more information -ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" -EOF - -# build conduit for a specific target -cross build --target="$TARGET" $@ diff --git a/cross/test.sh b/cross/test.sh deleted file mode 100755 index 0aa0909..0000000 --- a/cross/test.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env sh -set -ex - -# Build conduit for a specific target -cross/build.sh $@ - -# Test conduit for a specific target -cross test --target="$TARGET" $@ diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index a6339be..bb67bb2 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -19,8 +19,10 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # Conduit needs: # ca-certificates: for https +# iproute2: for `ss` for the healthcheck script RUN apk add --no-cache \ - ca-certificates + ca-certificates \ + iproute2 ARG CREATED diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index df7f18a..42b2e10 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -3,7 +3,7 @@ # If the config file does not contain a default port and the CONDUIT_PORT env is not set, create # try to get port from process list if [ -z "${CONDUIT_PORT}" ]; then - CONDUIT_PORT=$(netstat -tlp | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') + CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') fi # The actual health check. From e5bac5e4f53fa3e6565cca96b687dc8ff976f7f0 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 2 Feb 2022 14:07:35 +0100 Subject: [PATCH 0980/1727] fix: Running in Docker --- Dockerfile | 44 ++++++++++++------------- conduit-example.toml | 1 + docker/ci-binaries-packaging.Dockerfile | 5 +-- 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/Dockerfile b/Dockerfile index b629690..0da4aac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,9 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.58-alpine AS builder +FROM docker.io/rust:1.58-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies -RUN apk add musl-dev +RUN apt update && apt -y install libclang-11-dev # == Build dependencies without our own code separately for caching == # @@ -26,28 +26,28 @@ COPY src src # Builds conduit and places the binary at /usr/src/conduit/target/release/conduit RUN touch src/main.rs && touch src/lib.rs && cargo build --release - - - # --------------------------------------------------------------------------------------------------------------- # Stuff below this line actually ends up in the resulting docker image # --------------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.15.0 AS runner +FROM docker.io/debian:bullseye-slim AS runner # Standard port on which Conduit launches. # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" +# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs. +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ + CONDUIT_PORT=6167 # Conduit needs: # ca-certificates: for https -# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. -RUN apk add --no-cache \ +# iproute2 & wget: for the healthcheck script +RUN apt update && apt -y install \ ca-certificates \ - libgcc + iproute2 \ + wget +RUN rm -rf /var/lib/apt/lists/* # Created directory for the database and media files RUN mkdir -p /srv/conduit/.local/share/conduit @@ -59,20 +59,20 @@ HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh # Copy over the actual Conduit binary from the builder stage COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit -# Improve security: Don't run stuff as root, that does not need to run as root: -# Add www-data user and group with UID 82, as used by alpine -# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install +# Improve security: Don't run stuff as root, that does not need to run as root +# Add 'conduit' user and group (100:82). The UID:GID choice is to be compatible +# with previous, Alpine-based containers, where the user and group were both +# named 'www-data'. RUN set -x ; \ - addgroup -Sg 82 www-data 2>/dev/null ; \ - adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \ - addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1 + groupadd -r -g 82 conduit ; \ + useradd -r -M -d /srv/conduit -o -u 100 -g conduit conduit && exit 0 ; exit 1 -# Change ownership of Conduit files to www-data user and group -RUN chown -cR www-data:www-data /srv/conduit -RUN chmod +x /srv/conduit/healthcheck.sh +# Change ownership of Conduit files to conduit user and group and make the healthcheck executable: +RUN chown -cR conduit:conduit /srv/conduit && \ + chmod +x /srv/conduit/healthcheck.sh -# Change user to www-data -USER www-data +# Change user to conduit, no root permissions afterwards: +USER conduit # Set container home directory WORKDIR /srv/conduit diff --git a/conduit-example.toml b/conduit-example.toml index c0274a4..f157807 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -22,6 +22,7 @@ database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port # 443 and 8448 will be forwarded to the Conduit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. port = 6167 # Max size for uploads diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index bb67bb2..3731bac 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -14,8 +14,9 @@ FROM docker.io/alpine:3.15.0 AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" +# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs. +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ + CONDUIT_PORT=6167 # Conduit needs: # ca-certificates: for https From c4733676cf16267ffbb0b348848e87a7d103cf37 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 2 Feb 2022 13:35:15 +0000 Subject: [PATCH 0981/1727] Apply feedback from Ticho --- Dockerfile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0da4aac..b631f29 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM docker.io/rust:1.58-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies -RUN apt update && apt -y install libclang-11-dev +RUN apt update && apt -y install libclang-dev # == Build dependencies without our own code separately for caching == # @@ -45,9 +45,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ RUN apt update && apt -y install \ ca-certificates \ iproute2 \ - wget - -RUN rm -rf /var/lib/apt/lists/* + wget \ + && rm -rf /var/lib/apt/lists/* # Created directory for the database and media files RUN mkdir -p /srv/conduit/.local/share/conduit From 87225e70c3441c9ddd96d9fe0c4dd4e5a2c1289e Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Wed, 2 Feb 2022 21:35:57 +0200 Subject: [PATCH 0982/1727] Parse admin command body templates from doc comments --- src/database/admin.rs | 69 ++++++++++++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 21 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index ea08f65..c715049 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -132,7 +132,7 @@ impl Admin { } // Parse and process a message from the admin room -pub fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { +fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { let mut lines = room_message.lines(); let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); @@ -202,7 +202,10 @@ enum AdminCommand { /// Registering a new bridge using the ID of an existing bridge will replace /// the old one. /// - /// [add-yaml-block-to-usage] + /// [commandbody] + /// # ``` + /// # yaml content here + /// # ``` RegisterAppservice, /// Unregister an appservice using its ID @@ -225,10 +228,16 @@ enum AdminCommand { event_id: Box, }, + #[clap(verbatim_doc_comment)] /// Parse and print a PDU from a JSON /// /// The PDU event is only checked for validity and is not added to the /// database. + /// + /// [commandbody] + /// # ``` + /// # PDU json content here + /// # ``` ParsePdu, /// Retrieve and print a PDU by ID from the Conduit database @@ -433,33 +442,51 @@ fn usage_to_html(text: &str) -> String { .expect("Regex compilation should not fail"); let text = re.replace_all(&text, "$1: $4"); - // // Enclose examples in code blocks - // // (?ms) enables multi-line mode and dot-matches-all - // let re = - // Regex::new("(?ms)^Example:\n(.*?)\nUSAGE:$").expect("Regex compilation should not fail"); - // let text = re.replace_all(&text, "EXAMPLE:\n
              $1
              \nUSAGE:"); + // Look for a `[commandbody]` tag. If it exists, use all lines below it that + // start with a `#` in the USAGE section. + let mut text_lines: Vec<&str> = text.lines().collect(); + let mut command_body = String::new(); - let has_yaml_block_marker = text.contains("\n[add-yaml-block-to-usage]\n"); - let text = text.replace("\n[add-yaml-block-to-usage]\n", ""); + if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { + text_lines.remove(line_index); - // Add HTML line-breaks - let text = text.replace("\n", "
              \n"); + while text_lines + .get(line_index) + .map(|line| line.starts_with("#")) + .unwrap_or(false) + { + command_body += if text_lines[line_index].starts_with("# ") { + &text_lines[line_index][2..] + } else { + &text_lines[line_index][1..] + }; + command_body += "[nobr]\n"; + text_lines.remove(line_index); + } + } - let text = if !has_yaml_block_marker { + let text = text_lines.join("\n"); + + // Improve the usage section + let text = if command_body.is_empty() { // Wrap the usage line in code tags - let re = Regex::new("(?m)^USAGE:
              \n (@conduit:.*)
              $") + let re = Regex::new("(?m)^USAGE:\n (@conduit:.*)$") .expect("Regex compilation should not fail"); - re.replace_all(&text, "USAGE:
              \n$1
              ") + re.replace_all(&text, "USAGE:\n$1").to_string() } else { // Wrap the usage line in a code block, and add a yaml block example // This makes the usage of e.g. `register-appservice` more accurate - let re = Regex::new("(?m)^USAGE:
              \n (.*?)
              \n
              \n") - .expect("Regex compilation should not fail"); - re.replace_all( - &text, - "USAGE:
              \n
              $1\n```\nyaml content here\n```
              ", - ) + let re = + Regex::new("(?m)^USAGE:\n (.*?)\n\n").expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n
              $1[nobr]\n[commandbodyblock]
              ") + .replace("[commandbodyblock]", &command_body) }; - text.to_string() + // Add HTML line-breaks + let text = text + .replace("\n\n\n", "\n\n") + .replace("\n", "
              \n") + .replace("[nobr]
              ", ""); + + text } From 9ef3abacd43571300a7fbd7d35ba05d040816d8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 2 Feb 2022 18:03:50 +0100 Subject: [PATCH 0983/1727] fix: initial state deserialize->serialize error --- src/client_server/room.rs | 7 +++++-- src/pdu.rs | 19 ++----------------- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 52d2542..a233963 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -344,10 +344,13 @@ pub async fn create_room_route( // 6. Events listed in initial_state for event in &body.initial_state { - let pdu_builder = PduBuilder::from(event.deserialize().map_err(|e| { + let mut pdu_builder = event.deserialize_as::().map_err(|e| { warn!("Invalid initial state event: {:?}", e); Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.") - })?); + })?; + + // Implicit state key defaults to "" + pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed if pdu_builder.event_type == EventType::RoomEncryption && !db.globals.allow_encryption() { diff --git a/src/pdu.rs b/src/pdu.rs index db9375e..fe00460 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,9 +1,8 @@ use crate::Error; use ruma::{ events::{ - room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyInitialStateEvent, - AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, - EventType, StateEvent, + room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, + AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId, @@ -361,17 +360,3 @@ pub struct PduBuilder { pub state_key: Option, pub redacts: Option>, } - -/// Direct conversion prevents loss of the empty `state_key` that ruma requires. -impl From for PduBuilder { - fn from(event: AnyInitialStateEvent) -> Self { - Self { - event_type: EventType::from(event.event_type()), - content: to_raw_value(&event.content()) - .expect("AnyStateEventContent came from JSON and can thus turn back into JSON."), - unsigned: None, - state_key: Some(event.state_key().to_owned()), - redacts: None, - } - } -} From abb4b4cf0b0868fe7e5ee21298278b8c3deacb0e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 3 Feb 2022 13:24:04 +0100 Subject: [PATCH 0984/1727] Remove TryFrom, TryInto imports They are no longer needed in the 2021 edition. --- src/appservice_server.rs | 7 +------ src/client_server/account.rs | 2 +- src/client_server/context.rs | 2 +- src/client_server/directory.rs | 2 -- src/client_server/media.rs | 1 - src/client_server/membership.rs | 1 - src/client_server/message.rs | 1 - src/client_server/presence.rs | 2 +- src/client_server/profile.rs | 2 +- src/client_server/room.rs | 2 +- src/client_server/sync.rs | 1 - src/database.rs | 1 - src/database/account_data.rs | 2 +- src/database/admin.rs | 2 +- src/database/pusher.rs | 2 +- src/database/rooms.rs | 1 - src/database/rooms/edus.rs | 1 - src/database/sending.rs | 1 - src/database/users.rs | 7 +------ src/pdu.rs | 2 +- src/server_server.rs | 1 - src/utils.rs | 1 - 22 files changed, 11 insertions(+), 33 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index ed886d6..0152c38 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,12 +1,7 @@ use crate::{utils, Error, Result}; use bytes::BytesMut; use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; -use std::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - mem, - time::Duration, -}; +use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; pub(crate) async fn send_request( diff --git a/src/client_server/account.rs b/src/client_server/account.rs index ff34854..47e2a6a 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{collections::BTreeMap, sync::Arc}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; diff --git a/src/client_server/context.rs b/src/client_server/context.rs index e117766..7ded48d 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::{error::ErrorKind, r0::context::get_context}, events::EventType, }; -use std::{collections::HashSet, convert::TryFrom}; +use std::collections::HashSet; #[cfg(feature = "conduit_bin")] use rocket::get; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 5a1bc49..719d9af 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,5 +1,3 @@ -use std::convert::TryInto; - use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ api::{ diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 8524c57..deea319 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -9,7 +9,6 @@ use ruma::api::client::{ get_media_config, }, }; -use std::convert::TryInto; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 216c4c0..e855dba 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -30,7 +30,6 @@ use ruma::{ use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, iter, sync::{Arc, RwLock}, time::{Duration, Instant}, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 7d904f9..cf4f0cb 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -8,7 +8,6 @@ use ruma::{ }; use std::{ collections::{BTreeMap, HashSet}, - convert::TryInto, sync::Arc, }; diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index aaa78a9..cdc1e1f 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; use ruma::api::client::r0::presence::{get_presence, set_presence}; -use std::{convert::TryInto, time::Duration}; +use std::time::Duration; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 71e61da..ef58a98 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -12,7 +12,7 @@ use ruma::{ events::{room::member::RoomMemberEventContent, EventType}, }; use serde_json::value::to_raw_value; -use std::{convert::TryInto, sync::Arc}; +use std::sync::Arc; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index a233963..7ea31d8 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -27,7 +27,7 @@ use ruma::{ RoomAliasId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; -use std::{cmp::max, collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{cmp::max, collections::BTreeMap, sync::Arc}; use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 14aac3a..7cfea5a 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -14,7 +14,6 @@ use ruma::{ }; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - convert::TryInto, sync::Arc, time::Duration, }; diff --git a/src/database.rs b/src/database.rs index 8d245b7..c9cbad4 100644 --- a/src/database.rs +++ b/src/database.rs @@ -28,7 +28,6 @@ use ruma::{DeviceId, EventId, RoomId, ServerName, UserId}; use serde::{de::IgnoredAny, Deserialize}; use std::{ collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, fs::{self, remove_dir_all}, io::Write, mem::size_of, diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 456283b..ec9d09e 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -6,7 +6,7 @@ use ruma::{ RoomId, UserId, }; use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::HashMap, convert::TryFrom, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use super::abstraction::Tree; diff --git a/src/database/admin.rs b/src/database/admin.rs index a214796..32972de 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,4 +1,4 @@ -use std::{convert::TryInto, sync::Arc}; +use std::sync::Arc; use crate::{pdu::PduBuilder, Database}; use rocket::futures::{channel::mpsc, stream::StreamExt}; diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 97ca85d..f401834 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -19,7 +19,7 @@ use ruma::{ }; use tracing::{error, info, warn}; -use std::{convert::TryFrom, fmt::Debug, mem, sync::Arc}; +use std::{fmt::Debug, mem, sync::Arc}; use super::abstraction::Tree; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c0cb1ce..a139853 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -35,7 +35,6 @@ use serde_json::value::to_raw_value; use std::{ borrow::Cow, collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, fmt::Debug, iter, mem::size_of, diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index eb2d342..289a00a 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -11,7 +11,6 @@ use ruma::{ }; use std::{ collections::{HashMap, HashSet}, - convert::TryInto, mem, sync::Arc, }; diff --git a/src/database/sending.rs b/src/database/sending.rs index af4ac67..4a03285 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,5 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, - convert::TryInto, fmt::Debug, sync::Arc, time::{Duration, Instant}, diff --git a/src/database/users.rs b/src/database/users.rs index 13f9b15..681ee28 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -11,12 +11,7 @@ use ruma::{ DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, UserId, }; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - mem, - sync::Arc, -}; +use std::{collections::BTreeMap, mem, sync::Arc}; use tracing::warn; use super::abstraction::Tree; diff --git a/src/pdu.rs b/src/pdu.rs index fe00460..ec6c961 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -12,7 +12,7 @@ use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; use tracing::warn; /// Content hashes of a PDU. diff --git a/src/server_server.rs b/src/server_server.rs index 9129951..e730210 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -60,7 +60,6 @@ use ruma::{ use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, - convert::{TryFrom, TryInto}, fmt::Debug, future::Future, mem, diff --git a/src/utils.rs b/src/utils.rs index 26d71a8..e2d71f4 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -4,7 +4,6 @@ use rand::prelude::*; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ cmp, - convert::TryInto, str::FromStr, time::{SystemTime, UNIX_EPOCH}, }; From ce60fc6859ea698ed8341beea8321a949d90ad39 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 13:27:54 +0100 Subject: [PATCH 0985/1727] Stop using set_env to configure tracing-subscriber --- src/main.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/main.rs b/src/main.rs index 63b2219..5fda573 100644 --- a/src/main.rs +++ b/src/main.rs @@ -184,9 +184,6 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket< #[rocket::main] async fn main() { - // Force log level off, so we can use our own logger - std::env::set_var("CONDUIT_LOG_LEVEL", "off"); - let raw_config = Figment::from(default_config()) .merge( @@ -197,8 +194,6 @@ async fn main() { ) .merge(Env::prefixed("CONDUIT_").global()); - std::env::set_var("RUST_LOG", "warn"); - let config = match raw_config.extract::() { Ok(s) => s, Err(e) => { @@ -244,8 +239,6 @@ async fn main() { println!("exporting"); opentelemetry::global::shutdown_tracer_provider(); } else { - std::env::set_var("RUST_LOG", &config.log); - let registry = tracing_subscriber::Registry::default(); if config.tracing_flame { let (flame_layer, _guard) = @@ -259,7 +252,7 @@ async fn main() { start.await; } else { let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = EnvFilter::try_from_default_env() + let filter_layer = EnvFilter::try_new(&config.log) .or_else(|_| EnvFilter::try_new("info")) .unwrap(); From 974c10e739b70c5798450f5e51819e9d2beed5d3 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 3 Feb 2022 13:30:04 +0100 Subject: [PATCH 0986/1727] Move Config out of database module --- src/config.rs | 131 ++++++++++++++++++++++++++++++ src/{database => config}/proxy.rs | 0 src/database.rs | 130 +---------------------------- src/lib.rs | 4 +- 4 files changed, 136 insertions(+), 129 deletions(-) create mode 100644 src/config.rs rename src/{database => config}/proxy.rs (100%) diff --git a/src/config.rs b/src/config.rs new file mode 100644 index 0000000..4c0fcc2 --- /dev/null +++ b/src/config.rs @@ -0,0 +1,131 @@ +use std::collections::BTreeMap; + +use ruma::ServerName; +use serde::{de::IgnoredAny, Deserialize}; +use tracing::warn; + +mod proxy; + +use self::proxy::ProxyConfig; + +#[derive(Clone, Debug, Deserialize)] +pub struct Config { + pub server_name: Box, + #[serde(default = "default_database_backend")] + pub database_backend: String, + pub database_path: String, + #[serde(default = "default_db_cache_capacity_mb")] + pub db_cache_capacity_mb: f64, + #[serde(default = "default_conduit_cache_capacity_modifier")] + pub conduit_cache_capacity_modifier: f64, + #[serde(default = "default_rocksdb_max_open_files")] + pub rocksdb_max_open_files: i32, + #[serde(default = "default_pdu_cache_capacity")] + pub pdu_cache_capacity: u32, + #[serde(default = "default_cleanup_second_interval")] + pub cleanup_second_interval: u32, + #[serde(default = "default_max_request_size")] + pub max_request_size: u32, + #[serde(default = "default_max_concurrent_requests")] + pub max_concurrent_requests: u16, + #[serde(default = "false_fn")] + pub allow_registration: bool, + #[serde(default = "true_fn")] + pub allow_encryption: bool, + #[serde(default = "false_fn")] + pub allow_federation: bool, + #[serde(default = "true_fn")] + pub allow_room_creation: bool, + #[serde(default = "false_fn")] + pub allow_jaeger: bool, + #[serde(default = "false_fn")] + pub tracing_flame: bool, + #[serde(default)] + pub proxy: ProxyConfig, + pub jwt_secret: Option, + #[serde(default = "Vec::new")] + pub trusted_servers: Vec>, + #[serde(default = "default_log")] + pub log: String, + #[serde(default)] + pub turn_username: String, + #[serde(default)] + pub turn_password: String, + #[serde(default = "Vec::new")] + pub turn_uris: Vec, + #[serde(default)] + pub turn_secret: String, + #[serde(default = "default_turn_ttl")] + pub turn_ttl: u64, + + #[serde(flatten)] + pub catchall: BTreeMap, +} + +const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; + +impl Config { + pub fn warn_deprecated(&self) { + let mut was_deprecated = false; + for key in self + .catchall + .keys() + .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) + { + warn!("Config parameter {} is deprecated", key); + was_deprecated = true; + } + + if was_deprecated { + warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); + } + } +} + +fn false_fn() -> bool { + false +} + +fn true_fn() -> bool { + true +} + +fn default_database_backend() -> String { + "sqlite".to_owned() +} + +fn default_db_cache_capacity_mb() -> f64 { + 10.0 +} + +fn default_conduit_cache_capacity_modifier() -> f64 { + 1.0 +} + +fn default_rocksdb_max_open_files() -> i32 { + 20 +} + +fn default_pdu_cache_capacity() -> u32 { + 150_000 +} + +fn default_cleanup_second_interval() -> u32 { + 1 * 60 // every minute +} + +fn default_max_request_size() -> u32 { + 20 * 1024 * 1024 // Default to 20 MB +} + +fn default_max_concurrent_requests() -> u16 { + 100 +} + +fn default_log() -> String { + "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() +} + +fn default_turn_ttl() -> u64 { + 60 * 60 * 24 +} diff --git a/src/database/proxy.rs b/src/config/proxy.rs similarity index 100% rename from src/database/proxy.rs rename to src/config/proxy.rs diff --git a/src/database.rs b/src/database.rs index c9cbad4..5deedcf 100644 --- a/src/database.rs +++ b/src/database.rs @@ -6,7 +6,6 @@ pub mod appservice; pub mod globals; pub mod key_backups; pub mod media; -pub mod proxy; pub mod pusher; pub mod rooms; pub mod sending; @@ -14,7 +13,7 @@ pub mod transaction_ids; pub mod uiaa; pub mod users; -use crate::{utils, Error, Result}; +use crate::{utils, Config, Error, Result}; use abstraction::DatabaseEngine; use directories::ProjectDirs; use lru_cache::LruCache; @@ -24,8 +23,7 @@ use rocket::{ request::{FromRequest, Request}, Shutdown, State, }; -use ruma::{DeviceId, EventId, RoomId, ServerName, UserId}; -use serde::{de::IgnoredAny, Deserialize}; +use ruma::{DeviceId, EventId, RoomId, UserId}; use std::{ collections::{BTreeMap, HashMap, HashSet}, fs::{self, remove_dir_all}, @@ -38,130 +36,6 @@ use std::{ use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, warn}; -use self::proxy::ProxyConfig; - -#[derive(Clone, Debug, Deserialize)] -pub struct Config { - server_name: Box, - #[serde(default = "default_database_backend")] - database_backend: String, - database_path: String, - #[serde(default = "default_db_cache_capacity_mb")] - db_cache_capacity_mb: f64, - #[serde(default = "default_conduit_cache_capacity_modifier")] - conduit_cache_capacity_modifier: f64, - #[serde(default = "default_rocksdb_max_open_files")] - rocksdb_max_open_files: i32, - #[serde(default = "default_pdu_cache_capacity")] - pdu_cache_capacity: u32, - #[serde(default = "default_cleanup_second_interval")] - cleanup_second_interval: u32, - #[serde(default = "default_max_request_size")] - max_request_size: u32, - #[serde(default = "default_max_concurrent_requests")] - max_concurrent_requests: u16, - #[serde(default = "false_fn")] - allow_registration: bool, - #[serde(default = "true_fn")] - allow_encryption: bool, - #[serde(default = "false_fn")] - allow_federation: bool, - #[serde(default = "true_fn")] - allow_room_creation: bool, - #[serde(default = "false_fn")] - pub allow_jaeger: bool, - #[serde(default = "false_fn")] - pub tracing_flame: bool, - #[serde(default)] - proxy: ProxyConfig, - jwt_secret: Option, - #[serde(default = "Vec::new")] - trusted_servers: Vec>, - #[serde(default = "default_log")] - pub log: String, - #[serde(default)] - turn_username: String, - #[serde(default)] - turn_password: String, - #[serde(default = "Vec::new")] - turn_uris: Vec, - #[serde(default)] - turn_secret: String, - #[serde(default = "default_turn_ttl")] - turn_ttl: u64, - - #[serde(flatten)] - catchall: BTreeMap, -} - -const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; - -impl Config { - pub fn warn_deprecated(&self) { - let mut was_deprecated = false; - for key in self - .catchall - .keys() - .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) - { - warn!("Config parameter {} is deprecated", key); - was_deprecated = true; - } - - if was_deprecated { - warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); - } - } -} - -fn false_fn() -> bool { - false -} - -fn true_fn() -> bool { - true -} - -fn default_database_backend() -> String { - "sqlite".to_owned() -} - -fn default_db_cache_capacity_mb() -> f64 { - 10.0 -} - -fn default_conduit_cache_capacity_modifier() -> f64 { - 1.0 -} - -fn default_rocksdb_max_open_files() -> i32 { - 20 -} - -fn default_pdu_cache_capacity() -> u32 { - 150_000 -} - -fn default_cleanup_second_interval() -> u32 { - 1 * 60 // every minute -} - -fn default_max_request_size() -> u32 { - 20 * 1024 * 1024 // Default to 20 MB -} - -fn default_max_concurrent_requests() -> u16 { - 100 -} - -fn default_log() -> String { - "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() -} - -fn default_turn_ttl() -> u64 { - 60 * 60 * 24 -} - pub struct Database { _db: Arc, pub globals: globals::Globals, diff --git a/src/lib.rs b/src/lib.rs index 745eb39..030dfc3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ use std::ops::Deref; +mod config; mod database; mod error; mod pdu; @@ -19,7 +20,8 @@ pub mod appservice_server; pub mod client_server; pub mod server_server; -pub use database::{Config, Database}; +pub use config::Config; +pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; pub use rocket::Config as RocketConfig; From 6399a7fe4e07f9992ac8ca0412dc48c87d4d0456 Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Thu, 3 Feb 2022 20:21:04 +0200 Subject: [PATCH 0987/1727] Remove dash from admin command help --- src/database/admin.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 8f90e4d..34bef5f 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -13,7 +13,7 @@ use rocket::{ }; use ruma::{ events::{room::message::RoomMessageEventContent, EventType}, - EventId, RoomId, RoomVersionId, UserId, + EventId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; @@ -140,10 +140,11 @@ fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEven let admin_command = match parse_admin_command(&command_line) { Ok(command) => command, Err(error) => { + let server_name = db.globals.server_name(); let message = error .to_string() - .replace("example.com", db.globals.server_name().as_str()); - let html_message = usage_to_html(&message); + .replace("server.name", server_name.as_str()); + let html_message = usage_to_html(&message, server_name); return RoomMessageEventContent::text_html(message, html_message); } @@ -191,7 +192,7 @@ fn parse_admin_command(command_line: &str) -> std::result::Result String { +fn usage_to_html(text: &str, server_name: &ServerName) -> String { + // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` + let text = text.replace( + &format!("@conduit:{}:-", server_name), + &format!("@conduit:{}: ", server_name), + ); + // For the conduit admin room, subcommands become main commands let text = text.replace("SUBCOMMAND", "COMMAND"); let text = text.replace("subcommand", "command"); From 92571d961f8ec0ce72c0c40433e2487032643060 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 3 Feb 2022 19:54:29 +0100 Subject: [PATCH 0988/1727] Remove mutation from default_config and set default log_level to off --- Cargo.lock | 1 + Cargo.toml | 1 + src/main.rs | 37 ++++++++++++++++++------------------- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6dbb658..8548771 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -317,6 +317,7 @@ dependencies = [ "image", "jsonwebtoken", "lru-cache", + "maplit", "num_cpus", "opentelemetry", "opentelemetry-jaeger", diff --git a/Cargo.toml b/Cargo.toml index 05782e7..fe60f6e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,6 +84,7 @@ hmac = "0.11.0" sha-1 = "0.9.8" # used for conduit's CLI and admin room command parsing clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } +maplit = "1.0.2" [target.'cfg(not(target_env = "msvc"))'.dependencies] tikv-jemalloc-ctl = { version = "0.4.2", features = ['use_std'] } diff --git a/src/main.rs b/src/main.rs index 5fda573..b3e85c9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -9,6 +9,7 @@ use std::sync::Arc; +use maplit::hashset; use opentelemetry::trace::{FutureExt, Tracer}; use rocket::{ catch, catchers, @@ -292,28 +293,26 @@ fn bad_json_catcher() -> Result<()> { } fn default_config() -> rocket::Config { - let mut config = rocket::Config::release_default(); + use rocket::config::{LogLevel, Shutdown, Sig}; - { - let mut shutdown = &mut config.shutdown; + rocket::Config { + // Disable rocket's logging to get only tracing-subscriber's log output + log_level: LogLevel::Off, + shutdown: Shutdown { + // Once shutdown is triggered, this is the amount of seconds before rocket + // will forcefully start shutting down connections, this gives enough time to /sync + // requests and the like (which havent gotten the memo, somehow) to still complete gracefully. + grace: 35, - #[cfg(unix)] - { - use rocket::config::Sig; + // After the grace period, rocket starts shutting down connections, and waits at least this + // many seconds before forcefully shutting all of them down. + mercy: 10, - shutdown.signals.insert(Sig::Term); - shutdown.signals.insert(Sig::Int); - } + #[cfg(unix)] + signals: hashset![Sig::Term, Sig::Int], - // Once shutdown is triggered, this is the amount of seconds before rocket - // will forcefully start shutting down connections, this gives enough time to /sync - // requests and the like (which havent gotten the memo, somehow) to still complete gracefully. - shutdown.grace = 35; - - // After the grace period, rocket starts shutting down connections, and waits at least this - // many seconds before forcefully shutting all of them down. - shutdown.mercy = 10; + ..Shutdown::default() + }, + ..rocket::Config::release_default() } - - config } From d23d6fbb371c4aa263d47ff430f6491283d49915 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 3 Feb 2022 20:23:35 +0100 Subject: [PATCH 0989/1727] Upgrade Ruma --- Cargo.lock | 36 +++++++++++++++++----------------- Cargo.toml | 2 +- src/client_server/to_device.rs | 2 +- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8548771..19df999 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2175,7 +2175,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "assign", "js_int", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "bytes", "http", @@ -2212,7 +2212,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2223,7 +2223,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "ruma-api", "ruma-common", @@ -2237,7 +2237,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "assign", "bytes", @@ -2257,7 +2257,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "indexmap", "js_int", @@ -2272,7 +2272,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "indoc", "js_int", @@ -2289,7 +2289,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2300,7 +2300,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "js_int", "ruma-api", @@ -2315,7 +2315,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2330,7 +2330,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2340,7 +2340,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "thiserror", ] @@ -2348,7 +2348,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "js_int", "ruma-api", @@ -2361,7 +2361,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "js_int", "ruma-api", @@ -2376,7 +2376,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "base64 0.13.0", "bytes", @@ -2391,7 +2391,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2402,7 +2402,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2419,7 +2419,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index fe60f6e..1e1b188 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "82becb86c837570224964425929d1b5305784435", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f7a10a7e471b59d3096be2695c2a05d407d80df1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 6e764de..e0aa9e9 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -54,7 +54,7 @@ pub async fn send_event_to_device_route( DirectDeviceContent { sender: sender_user.clone(), ev_type: EventType::from(&*body.event_type), - message_id: body.txn_id.to_string(), + message_id: body.txn_id.clone(), messages, }, )) From e1c0dcb6bb45432c0638f5eced7b719ea2ff1afe Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Thu, 3 Feb 2022 20:52:41 +0200 Subject: [PATCH 0990/1727] Create admin room and hide migration messages on first run --- src/client_server/account.rs | 302 ++---------------------------- src/database.rs | 81 +++++--- src/database/admin.rs | 353 +++++++++++++++++++++++++++++++++-- 3 files changed, 419 insertions(+), 317 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 47e2a6a..a210e8a 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,7 +1,11 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::sync::Arc; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; +use crate::{ + database::{admin::make_user_admin, DatabaseGuard}, + pdu::PduBuilder, + utils, ConduitResult, Error, Ruma, +}; use ruma::{ api::client::{ error::ErrorKind, @@ -14,25 +18,13 @@ use ruma::{ }, }, events::{ - room::{ - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, - name::RoomNameEventContent, - power_levels::RoomPowerLevelsEventContent, - topic::RoomTopicEventContent, - }, + room::member::{MembershipState, RoomMemberEventContent}, EventType, }, - identifiers::RoomName, - push, RoomAliasId, RoomId, RoomVersionId, UserId, + push, UserId, }; use serde_json::value::to_raw_value; -use tracing::info; +use tracing::{info, warn}; use register::RegistrationKind; #[cfg(feature = "conduit_bin")] @@ -253,276 +245,16 @@ pub async fn register_route( body.initial_device_display_name.clone(), )?; - // If this is the first user on this server, create the admin room - if db.users.count()? == 1 { - // Create a user for the server - let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) - .expect("@conduit:server_name is valid"); - - db.users.create(&conduit_user, None)?; - - let room_id = RoomId::new(db.globals.server_name()); - - db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut content = RoomCreateEventContent::new(conduit_user.clone()); - content.federate = true; - content.predecessor = None; - content.room_version = RoomVersionId::V6; - - // 1. The room create event - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 2. Make conduit bot join - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(conduit_user.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 3. Power levels - let mut users = BTreeMap::new(); - users.insert(conduit_user.clone(), 100.into()); - users.insert(user_id.clone(), 100.into()); - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 4.1 Join Rules - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 4.2 History Visibility - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 4.3 Guest Access - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 6. Events implied by name and topic - let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name())) - .expect("Room name is valid"); - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", db.globals.server_name()), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // Room alias - let alias: Box = format!("#admins:{}", db.globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(alias.clone()), - alt_aliases: Vec::new(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; - - // Invite and join the real user - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: Some(displayname), - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &user_id, - &room_id, - &db, - &state_lock, - )?; - - // Send welcome message - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMessage, - content: to_raw_value(&RoomMessageEventContent::text_html( - "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), - "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - } - info!("{} registered on this server", user_id); + // If this is the first real user, grant them admin privileges + // Note: the server user, @conduit:servername, is generated first + if db.users.count()? == 2 { + make_user_admin(&db, &user_id, displayname).await?; + + warn!("Granting {} admin privileges as the first user", user_id); + } + db.flush()?; Ok(register::Response { diff --git a/src/database.rs b/src/database.rs index 5deedcf..2b1671c 100644 --- a/src/database.rs +++ b/src/database.rs @@ -34,7 +34,9 @@ use std::{ sync::{Arc, Mutex, RwLock}, }; use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; -use tracing::{debug, error, warn}; +use tracing::{debug, error, info, warn}; + +use self::admin::create_admin_room; pub struct Database { _db: Arc, @@ -301,10 +303,32 @@ impl Database { )?, })); - { - let db = db.read().await; + let guard = db.read().await; + + // Matrix resource ownership is based on the server name; changing it + // requires recreating the database from scratch. + if guard.users.count()? > 0 { + let conduit_user = + UserId::parse_with_server_name("conduit", guard.globals.server_name()) + .expect("@conduit:server_name is valid"); + + if !guard.users.exists(&conduit_user)? { + error!( + "The {} server user does not exist, and the database is not new.", + conduit_user + ); + return Err(Error::bad_database( + "Cannot reuse an existing database after changing the server name, please delete the old one first." + )); + } + } + + // If the database has any data, perform data migrations before starting + let latest_database_version = 11; + + if guard.users.count()? > 0 { + let db = &*guard; // MIGRATIONS - // TODO: database versions of new dbs should probably not be 0 if db.globals.database_version()? < 1 { for (roomserverid, _) in db.rooms.roomserverids.iter() { let mut parts = roomserverid.split(|&b| b == 0xff); @@ -325,7 +349,7 @@ impl Database { db.globals.bump_database_version(1)?; - println!("Migration: 0 -> 1 finished"); + warn!("Migration: 0 -> 1 finished"); } if db.globals.database_version()? < 2 { @@ -344,7 +368,7 @@ impl Database { db.globals.bump_database_version(2)?; - println!("Migration: 1 -> 2 finished"); + warn!("Migration: 1 -> 2 finished"); } if db.globals.database_version()? < 3 { @@ -362,7 +386,7 @@ impl Database { db.globals.bump_database_version(3)?; - println!("Migration: 2 -> 3 finished"); + warn!("Migration: 2 -> 3 finished"); } if db.globals.database_version()? < 4 { @@ -385,7 +409,7 @@ impl Database { db.globals.bump_database_version(4)?; - println!("Migration: 3 -> 4 finished"); + warn!("Migration: 3 -> 4 finished"); } if db.globals.database_version()? < 5 { @@ -409,7 +433,7 @@ impl Database { db.globals.bump_database_version(5)?; - println!("Migration: 4 -> 5 finished"); + warn!("Migration: 4 -> 5 finished"); } if db.globals.database_version()? < 6 { @@ -422,7 +446,7 @@ impl Database { db.globals.bump_database_version(6)?; - println!("Migration: 5 -> 6 finished"); + warn!("Migration: 5 -> 6 finished"); } if db.globals.database_version()? < 7 { @@ -549,7 +573,7 @@ impl Database { db.globals.bump_database_version(7)?; - println!("Migration: 6 -> 7 finished"); + warn!("Migration: 6 -> 7 finished"); } if db.globals.database_version()? < 8 { @@ -557,7 +581,7 @@ impl Database { for (room_id, _) in db.rooms.roomid_shortstatehash.iter() { let shortroomid = db.globals.next_count()?.to_be_bytes(); db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; - println!("Migration: 8"); + info!("Migration: 8"); } // Update pduids db layout let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| { @@ -608,7 +632,7 @@ impl Database { db.globals.bump_database_version(8)?; - println!("Migration: 7 -> 8 finished"); + warn!("Migration: 7 -> 8 finished"); } if db.globals.database_version()? < 9 { @@ -650,7 +674,7 @@ impl Database { println!("smaller batch done"); } - println!("Deleting starts"); + info!("Deleting starts"); let batch2: Vec<_> = db .rooms @@ -673,7 +697,7 @@ impl Database { db.globals.bump_database_version(9)?; - println!("Migration: 8 -> 9 finished"); + warn!("Migration: 8 -> 9 finished"); } if db.globals.database_version()? < 10 { @@ -692,7 +716,7 @@ impl Database { db.globals.bump_database_version(10)?; - println!("Migration: 9 -> 10 finished"); + warn!("Migration: 9 -> 10 finished"); } if db.globals.database_version()? < 11 { @@ -701,11 +725,28 @@ impl Database { .clear()?; db.globals.bump_database_version(11)?; - println!("Migration: 10 -> 11 finished"); + warn!("Migration: 10 -> 11 finished"); } - } - let guard = db.read().await; + assert_eq!(11, latest_database_version); + + info!( + "Loaded {} database with version {}", + config.database_backend, latest_database_version + ); + } else { + guard + .globals + .bump_database_version(latest_database_version)?; + + // Create the admin room and server user on first run + create_admin_room(&guard).await?; + + warn!( + "Created new {} database with version {}", + config.database_backend, latest_database_version + ); + } // This data is probably outdated guard.rooms.edus.presenceid_presence.clear()?; @@ -724,8 +765,6 @@ impl Database { #[cfg(feature = "conduit_bin")] pub async fn start_on_shutdown_tasks(db: Arc>, shutdown: Shutdown) { - use tracing::info; - tokio::spawn(async move { shutdown.await; diff --git a/src/database/admin.rs b/src/database/admin.rs index 34bef5f..9bbfd4e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,4 +1,4 @@ -use std::{convert::TryFrom, convert::TryInto, sync::Arc, time::Instant}; +use std::{collections::BTreeMap, convert::TryFrom, convert::TryInto, sync::Arc, time::Instant}; use crate::{ error::{Error, Result}, @@ -12,12 +12,22 @@ use rocket::{ http::RawStr, }; use ruma::{ + events::room::{ + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + topic::RoomTopicEventContent, + }, events::{room::message::RoomMessageEventContent, EventType}, - EventId, RoomId, RoomVersionId, ServerName, UserId, + identifiers::{EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId}, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; -use tracing::warn; pub enum AdminRoomEvent { ProcessMessage(String), @@ -52,16 +62,9 @@ impl Admin { .try_into() .expect("#admins:server_name is a valid room alias"), ) + .expect("Database data for admin room alias must be valid") .expect("Admin room must exist"); - let conduit_room = match conduit_room { - None => { - warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this."); - return; - } - Some(r) => r, - }; - drop(guard); let send_message = |message: RoomMessageEventContent, @@ -500,3 +503,331 @@ fn usage_to_html(text: &str, server_name: &ServerName) -> String { text } + +/// Create the admin room. +/// +/// Users in this room are considered admins by conduit, and the room can be +/// used to issue admin commands by talking to the server user inside it. +pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { + let room_id = RoomId::new(db.globals.server_name()); + + db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Create a user for the server + let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + .expect("@conduit:server_name is valid"); + + db.users.create(&conduit_user, None)?; + + let mut content = RoomCreateEventContent::new(conduit_user.clone()); + content.federate = true; + content.predecessor = None; + content.room_version = RoomVersionId::V6; + + // 1. The room create event + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 2. Make conduit bot join + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(conduit_user.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 3. Power levels + let mut users = BTreeMap::new(); + users.insert(conduit_user.clone(), 100.into()); + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 4.1 Join Rules + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 4.2 History Visibility + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 4.3 Guest Access + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 5. Events implied by name and topic + let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name())) + .expect("Room name is valid"); + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: format!("Manage {}", db.globals.server_name()), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 6. Room alias + let alias: Box = format!("#admins:{}", db.globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + + Ok(()) +} + +/// Invite the user to the conduit admin room. +/// +/// In conduit, this is equivalent to granting admin privileges. +pub(crate) async fn make_user_admin( + db: &Database, + user_id: &UserId, + displayname: String, +) -> Result<()> { + let admin_room_alias: Box = format!("#admins:{}", db.globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + let room_id = db + .rooms + .id_from_alias(&admin_room_alias)? + .expect("Admin room must exist"); + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Use the server user to grant the new admin's power level + let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + .expect("@conduit:server_name is valid"); + + // Invite and join the real user + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: Some(displayname), + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &user_id, + &room_id, + &db, + &state_lock, + )?; + + // Set power level + let mut users = BTreeMap::new(); + users.insert(conduit_user.to_owned(), 100.into()); + users.insert(user_id.to_owned(), 100.into()); + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // Send welcome message + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMessage, + content: to_raw_value(&RoomMessageEventContent::text_html( + "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), + "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + Ok(()) +} From 72cd52e57c7afa1f051b488e6385b59617fffa4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 4 Feb 2022 13:30:42 +0100 Subject: [PATCH 0991/1727] fix: lazy loading for /context --- src/client_server/context.rs | 99 +++++++++++++++++++++++++++--------- 1 file changed, 74 insertions(+), 25 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 7ded48d..02148f4 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,9 +1,13 @@ use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ - api::client::{error::ErrorKind, r0::context::get_context}, + api::client::{ + error::ErrorKind, + r0::{context::get_context, filter::LazyLoadOptions}, + }, events::EventType, }; -use std::collections::HashSet; +use std::{collections::HashSet, convert::TryFrom}; +use tracing::error; #[cfg(feature = "conduit_bin")] use rocket::get; @@ -26,12 +30,15 @@ pub async fn get_context_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } + // Load filter + let filter = body.filter.clone().unwrap_or_default(); + + let (lazy_load_enabled, lazy_load_send_redundant) = match filter.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members: redundant, + } => (true, redundant), + _ => (false, false), + }; let mut lazy_loaded = HashSet::new(); @@ -53,20 +60,30 @@ pub async fn get_context_route( "Base event not found.", ))?; + let room_id = base_event.room_id.clone(); + + if !db.rooms.is_joined(sender_user, &room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + if !db.rooms.lazy_load_was_sent_before( sender_user, sender_device, - &body.room_id, + &room_id, &base_event.sender, - )? { - lazy_loaded.insert(base_event.sender.clone()); + )? || lazy_load_send_redundant + { + lazy_loaded.insert(base_event.sender.as_str().to_owned()); } let base_event = base_event.to_room_event(); let events_before: Vec<_> = db .rooms - .pdus_until(sender_user, &body.room_id, base_token)? + .pdus_until(sender_user, &room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -80,10 +97,11 @@ pub async fn get_context_route( if !db.rooms.lazy_load_was_sent_before( sender_user, sender_device, - &body.room_id, + &room_id, &event.sender, - )? { - lazy_loaded.insert(event.sender.clone()); + )? || lazy_load_send_redundant + { + lazy_loaded.insert(event.sender.as_str().to_owned()); } } @@ -99,7 +117,7 @@ pub async fn get_context_route( let events_after: Vec<_> = db .rooms - .pdus_after(sender_user, &body.room_id, base_token)? + .pdus_after(sender_user, &room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -113,13 +131,28 @@ pub async fn get_context_route( if !db.rooms.lazy_load_was_sent_before( sender_user, sender_device, - &body.room_id, + &room_id, &event.sender, - )? { - lazy_loaded.insert(event.sender.clone()); + )? || lazy_load_send_redundant + { + lazy_loaded.insert(event.sender.as_str().to_owned()); } } + let shortstatehash = match db.rooms.pdu_shortstatehash( + events_after + .last() + .map_or(&*body.event_id, |(_, e)| &*e.event_id), + )? { + Some(s) => s, + None => db + .rooms + .current_shortstatehash(&room_id)? + .expect("All rooms have state"), + }; + + let state_ids = db.rooms.state_full_ids(shortstatehash)?; + let end_token = events_after .last() .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) @@ -131,12 +164,28 @@ pub async fn get_context_route( .collect(); let mut state = Vec::new(); - for ll_id in &lazy_loaded { - if let Some(member_event) = - db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())? - { - state.push(member_event.to_state_event()); + + for (shortstatekey, id) in state_ids { + let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; + + if event_type != EventType::RoomMember { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + state.push(pdu.to_state_event()); + } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + state.push(pdu.to_state_event()); } } From 8d8edddb2e1074bf36a14b244c7ed9a0eec054dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 4 Feb 2022 16:59:30 +0100 Subject: [PATCH 0992/1727] feat: allow disabling jemalloc via feature --- Cargo.toml | 8 ++++---- src/main.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index aac840b..dd31c84 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,17 +86,17 @@ sha-1 = "0.9.8" clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } maplit = "1.0.2" -[target.'cfg(not(target_env = "msvc"))'.dependencies] -tikv-jemalloc-ctl = { version = "0.4.2", features = ['use_std'] } -tikv-jemallocator = { version = "0.4.1", features = ['unprefixed_malloc_on_supported_platforms'] } +tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true } +tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } [features] -default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] +jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional diff --git a/src/main.rs b/src/main.rs index b3e85c9..ea09dd5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -26,10 +26,10 @@ use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate pub use rocket::State; -#[cfg(not(target_env = "msvc"))] +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use tikv_jemallocator::Jemalloc; -#[cfg(not(target_env = "msvc"))] +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; From eb0b2c429faf9bfa3c472c198deca8f7f07f46f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 4 Feb 2022 17:15:21 +0100 Subject: [PATCH 0993/1727] fix: crash on empty search --- src/client_server/search.rs | 9 +++++---- src/database/rooms.rs | 29 +++++++++++++++-------------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 59c9480..f492292 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -44,11 +44,12 @@ pub async fn search_events_route( )); } - let search = db + if let Some(search) = db .rooms - .search_pdus(&room_id, &search_criteria.search_term)?; - - searches.push(search.0.peekable()); + .search_pdus(&room_id, &search_criteria.search_term)? + { + searches.push(search.0.peekable()); + } } let skip = match body.next_batch.as_ref().map(|s| s.parse()) { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2c271d1..0abd2e7 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2771,7 +2771,7 @@ impl Rooms { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result<(impl Iterator> + 'a, Vec)> { + ) -> Result> + 'a, Vec)>> { let prefix = self .get_shortroomid(room_id)? .expect("room exists") @@ -2799,19 +2799,20 @@ impl Rooms { .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) }); - Ok(( - utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .unwrap() - .map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - )) + Ok(utils::common_elements(iterators, |a, b| { + // We compare b with a because we reversed the iterator earlier + b.cmp(a) + }) + .map(|iter| { + ( + iter.map(move |id| { + let mut pduid = prefix_clone.clone(); + pduid.extend_from_slice(&id); + pduid + }), + words, + ) + })) } #[tracing::instrument(skip(self))] From dd03608f173b1cc450183eae2b2be46684e868fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 4 Feb 2022 17:24:45 +0100 Subject: [PATCH 0994/1727] use our own reqwest fork --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 209adea..859564b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2018,7 +2018,7 @@ dependencies = [ [[package]] name = "reqwest" version = "0.11.9" -source = "git+https://github.com/niuhuan/reqwest?branch=dns-resolver-fn#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" +source = "git+https://github.com/timokoesters/reqwest?rev=57b7cf4feb921573dfafad7d34b9ac6e44ead0bd#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" dependencies = [ "base64 0.13.0", "bytes", diff --git a/Cargo.toml b/Cargo.toml index aac840b..fb86138 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ rand = "0.8.4" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls", "socks"], git = "https://github.com/niuhuan/reqwest", branch = "dns-resolver-fn" } +reqwest = { default-features = false, features = ["rustls-tls", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type thiserror = "1.0.28" # Used to generate thumbnails for images From 103dc7e09b4cdcefca6817b448f6b45677988e84 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 4 Feb 2022 17:57:59 +0100 Subject: [PATCH 0995/1727] Pre-0.3 doc adjustments --- APPSERVICES.md | 4 ++-- Cargo.lock | 2 +- Cargo.toml | 2 +- DEPLOY.md | 27 ++++++++++++++++----------- README.md | 18 ++++++------------ docker/README.md | 2 +- 6 files changed, 27 insertions(+), 28 deletions(-) diff --git a/APPSERVICES.md b/APPSERVICES.md index 545772a..8ca015a 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -2,7 +2,7 @@ ## Getting help -If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Set up the appservice - general instructions @@ -46,7 +46,7 @@ could help. To remove an appservice go to your admin room and execute -```@conduit:your.server.name: unregister-appservice ``` +`@conduit:your.server.name: unregister-appservice ` where `` one of the output of `list-appservices`. diff --git a/Cargo.lock b/Cargo.lock index 859564b..632b4ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -304,7 +304,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.2.0" +version = "0.3.0" dependencies = [ "base64 0.13.0", "bytes", diff --git a/Cargo.toml b/Cargo.toml index fb86138..587e26b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.2.0" +version = "0.3.0" rust-version = "1.56" edition = "2021" diff --git a/DEPLOY.md b/DEPLOY.md index d9f91e0..c3da697 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -1,9 +1,9 @@ # Deploying Conduit -## Getting help - -If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us -in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +> ## Getting help +> +> If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us +> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Installing Conduit @@ -12,17 +12,21 @@ only offer Linux binaries. You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -| CPU Architecture | Download stable version | -| ------------------------------------------- | ------------------------------ | -| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | -| armv6 | [Download][armv6-musl-master] | -| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | -| armv8 / aarch64 | [Download][armv8-musl-master] | +| CPU Architecture | Download stable version | Download development version | +| ------------------------------------------- | ------------------------------ | ---------------------------- | +| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | [Download][x84_64-musl-next] | +| armv6 | [Download][armv6-musl-master] | [Download][armv6-musl-next] | +| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | [Download][armv7-musl-next] | +| armv8 / aarch64 | [Download][armv8-musl-master] | [Download][armv8-musl-next] | [x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl [armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf [armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf [armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl +[x84_64-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl +[armv6-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf +[armv7-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf +[armv8-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl ```bash $ sudo wget -O /usr/local/bin/matrix-conduit @@ -240,4 +244,5 @@ $ curl https://your.server.name/_matrix/client/versions $ curl https://your.server.name:8448/_matrix/client/versions ``` -If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). +- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/) +- If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). diff --git a/README.md b/README.md index e667d18..a4f0929 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # Conduit + ### A Matrix homeserver written in Rust #### What is the goal? @@ -7,7 +8,6 @@ An efficient Matrix homeserver that's easy to set up and just works. You can ins it on a mini-computer like the Raspberry Pi to host Matrix for your family, friends or company. - #### Can I try it out? Yes! You can test our Conduit instance by opening a Matrix client ( or Element Android for @@ -17,7 +17,6 @@ It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which was used in the Samsung Galaxy S5. It joined many big rooms including Matrix HQ. - #### What is the current status? As of 2021-09-01, Conduit is Beta, meaning you can join and participate in most @@ -31,26 +30,23 @@ There are still a few important features missing: Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). - #### How can I deploy my own? -Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)\ -Debian package: [debian/README.Debian](debian/README.Debian)\ -Docker: [docker/README.md](docker/README.md) +- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md) +- Debian package: [debian/README.Debian](debian/README.Debian) +- Docker: [docker/README.md](docker/README.md) If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md). - #### How can I contribute? 1. Look for an issue you would like to work on and make sure it's not assigned to other users 2. Ask someone to assign the issue to you (comment on the issue or chat in - #conduit:nordgedanken.dev) -3. Fork the repo and work on the issue. #conduit:nordgedanken.dev is happy to help :) + [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)) +3. Fork the repo and work on the issue.[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) is happy to help :) 4. Submit a MR - #### Thanks to Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project. @@ -60,13 +56,11 @@ Thanks to the contributors to Conduit and all libraries we use, for example: - Ruma: A clean library for the Matrix Spec in Rust - Rocket: A flexible web framework - #### Donate Liberapay: \ Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` - #### Logo Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \ diff --git a/docker/README.md b/docker/README.md index 1f38d66..d886738 100644 --- a/docker/README.md +++ b/docker/README.md @@ -35,7 +35,7 @@ or you can skip the build step and pull the image from one of the following regi | GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] | [dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit -[gl]: https://gitlab.com/famedly/conduit/container_registry/ +[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 [shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). From 826b077e218d5d698995d786e1a6457fc3b79dad Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 4 Feb 2022 18:43:13 +0100 Subject: [PATCH 0996/1727] fix(ci): Always build debug version for sytest --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6f1a19f..8d701c2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -117,7 +117,7 @@ build:release:cargo:aarch64-unknown-linux-musl: .cargo-debug-shared-settings: extends: ".build-cargo-shared-settings" rules: - - if: '$CI_COMMIT_BRANCH != "master"' + - when: "always" cache: key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: From 63a2c6cce5f01cfca8295a2ea5ad7f639bc257b8 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Fri, 4 Feb 2022 19:11:29 +0100 Subject: [PATCH 0997/1727] Add new TURN Readme and reference it from DEPLOY.md --- DEPLOY.md | 14 ++++++++++++++ TURN.md | 25 +++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 TURN.md diff --git a/DEPLOY.md b/DEPLOY.md index d9f91e0..4a0d0ab 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -241,3 +241,17 @@ $ curl https://your.server.name:8448/_matrix/client/versions ``` If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). + +# What's next? + +## Audio/Video calls + +For Audio/Video call functionality see the [TURN Guide](TURN.md). +As of 2022, Clients known to support a/v calls are + +* Element/Android +* SchildiChat + +Clients known to not support a/v calls are + +* FluffyChat diff --git a/TURN.md b/TURN.md new file mode 100644 index 0000000..ed96295 --- /dev/null +++ b/TURN.md @@ -0,0 +1,25 @@ +# Setting up TURN/STURN + +## General instructions + +* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md). + +## Edit/Add a few settings to your existing conduit.toml + +``` +# Refer to your Coturn settings. +# `server.name` has to match the REALM setting of your Coturn as well as `transport`. +turn_uris = ["turn:server.name?transport=udp", "turn:server.name?transport=tcp"] + +# static-auth-secret of your turnserver +turn_secret = "ADD SECRET HERE" + +# If you have your TURN server configured to use a username and password +# you can provide these information too. In this case comment out `turn_secret above`! +#turn_username = "" +#turn_password = "" +``` + +## Apply settings + +Restart Conduit. \ No newline at end of file From f110b5710a182192231d5ae40f8374bb5dda332f Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Fri, 4 Feb 2022 21:11:50 +0100 Subject: [PATCH 0998/1727] Move appservice howto into whats-next; again, rename placeholder TURN url --- DEPLOY.md | 5 ++++- TURN.md | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 8259b3f..eecf513 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -245,10 +245,13 @@ $ curl https://your.server.name:8448/_matrix/client/versions ``` - To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/) -- If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). # What's next? ## Audio/Video calls For Audio/Video call functionality see the [TURN Guide](TURN.md). + +## Appservices + +If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). diff --git a/TURN.md b/TURN.md index ed96295..63c1e99 100644 --- a/TURN.md +++ b/TURN.md @@ -8,8 +8,8 @@ ``` # Refer to your Coturn settings. -# `server.name` has to match the REALM setting of your Coturn as well as `transport`. -turn_uris = ["turn:server.name?transport=udp", "turn:server.name?transport=tcp"] +# `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`. +turn_uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"] # static-auth-secret of your turnserver turn_secret = "ADD SECRET HERE" From 31918bb9908c3917273a70acb6be9ea1b3b1b6ed Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Sat, 5 Feb 2022 08:57:15 +0200 Subject: [PATCH 0999/1727] Fix admin room processing commands from its own messages --- src/database/rooms.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0abd2e7..aff39dd 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1477,17 +1477,18 @@ impl Rooms { self.tokenids.insert_batch(&mut batch)?; - if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) - && self - .id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )? - .as_ref() - == Some(&pdu.room_id) - { + let admin_room = self.id_from_alias( + <&RoomAliasId>::try_from( + format!("#admins:{}", db.globals.server_name()).as_str(), + ) + .expect("#admins:server_name is a valid room alias"), + )?; + let server_user = format!("@conduit:{}", db.globals.server_name()); + + let to_conduit = body.starts_with(&format!("{}: ", server_user)); + let from_conduit = pdu.sender == server_user; + + if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { db.admin.process_message(body.to_string()); } } From bfbefb0cd2e90549c41247b407d40ad9e1b128b8 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Mon, 7 Feb 2022 12:55:21 +0100 Subject: [PATCH 1000/1727] Display actual error message from TokioAsyncResolver, if any --- src/database/globals.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index decd84c..f38f32c 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -149,7 +149,11 @@ impl Globals { globals, config, keypair: Arc::new(keypair), - dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|e| { + error!( + "Failed to set up trust dns resolver with system config: {}", + e + ); Error::bad_config("Failed to set up trust dns resolver with system config.") })?, actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), From f2b8aa28f303c49a57a282d94c1a003cbbc403eb Mon Sep 17 00:00:00 2001 From: M0dEx Date: Fri, 11 Feb 2022 18:26:56 +0100 Subject: [PATCH 1001/1727] feat: add a line with the help command to the welcome message --- src/database/admin.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 9bbfd4e..664aabb 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -815,8 +815,8 @@ pub(crate) async fn make_user_admin( PduBuilder { event_type: EventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), - "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), + "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of commands, send the following message in this room: `@conduit:: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), + "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of commands, send the following message in this room: @conduit:<your_server>: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, From a6976e6d2d3878c93a42799d832fe34016e29860 Mon Sep 17 00:00:00 2001 From: M0dEx Date: Fri, 11 Feb 2022 18:40:51 +0100 Subject: [PATCH 1002/1727] feat: add 'available' to the help command line in the welcome message --- src/database/admin.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 664aabb..eae8aa5 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -815,8 +815,8 @@ pub(crate) async fn make_user_admin( PduBuilder { event_type: EventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of commands, send the following message in this room: `@conduit:: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), - "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of commands, send the following message in this room: @conduit:<your_server>: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), + "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), + "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of available commands, send the following message in this room: @conduit:<your_server>: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, From f602d32aaa0e4fbc2b5d9eb0d0d89d04bdca21d7 Mon Sep 17 00:00:00 2001 From: M0dEx Date: Fri, 11 Feb 2022 18:51:28 +0100 Subject: [PATCH 1003/1727] feat: add the actual server name to the welcome message --- src/database/admin.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index eae8aa5..20fb42a 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -815,8 +815,8 @@ pub(crate) async fn make_user_admin( PduBuilder { event_type: EventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), - "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of available commands, send the following message in this room: @conduit:<your_server>: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), + format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", db.globals.server_name()).to_owned(), + format!("

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of available commands, send the following message in this room: @conduit:{}: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n", db.globals.server_name()).to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, From 583ec51f9fd5590d8a6982b91bd8c7b878903b3f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 01:55:53 +0100 Subject: [PATCH 1004/1727] Remove unnecessary use of event enum --- src/client_server/sync.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 7cfea5a..2b814f5 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -7,7 +7,7 @@ use ruma::{ }, events::{ room::member::{MembershipState, RoomMemberEventContent}, - AnySyncEphemeralRoomEvent, EventType, + EventType, }, serde::Raw, DeviceId, RoomId, UserId, @@ -656,10 +656,8 @@ async fn sync_helper( if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( - db.rooms.edus.typings_all(&room_id)?, - )) - .expect("event is valid, we just created it"), + &serde_json::to_string(&db.rooms.edus.typings_all(&room_id)?) + .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), ); From 5db4c001d1e385513bb27f1484e1eaf5b1497374 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 01:58:36 +0100 Subject: [PATCH 1005/1727] Remove another unnecessary use of an event enum --- src/client_server/read_marker.rs | 10 +++++----- src/database/rooms/edus.rs | 5 +++-- src/server_server.rs | 6 +++--- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 60aa4ce..502a612 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -4,7 +4,7 @@ use ruma::{ error::ErrorKind, r0::{read_marker::set_read_marker, receipt::create_receipt}, }, - events::{AnyEphemeralRoomEvent, EventType}, + events::EventType, receipt::ReceiptType, MilliSecondsSinceUnixEpoch, }; @@ -73,10 +73,10 @@ pub async fn set_read_marker_route( db.rooms.edus.readreceipt_update( sender_user, &body.room_id, - AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { + ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), - }), + }, &db.globals, )?; } @@ -130,10 +130,10 @@ pub async fn create_receipt_route( db.rooms.edus.readreceipt_update( sender_user, &body.room_id, - AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { + ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), - }), + }, &db.globals, )?; diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 289a00a..118efd4 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -2,7 +2,8 @@ use crate::{database::abstraction::Tree, utils, Error, Result}; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, - AnyEphemeralRoomEvent, SyncEphemeralRoomEvent, + receipt::ReceiptEvent, + SyncEphemeralRoomEvent, }, presence::PresenceState, serde::Raw, @@ -31,7 +32,7 @@ impl RoomEdus { &self, user_id: &UserId, room_id: &RoomId, - event: AnyEphemeralRoomEvent, + event: ReceiptEvent, globals: &super::super::globals::Globals, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/server_server.rs b/src/server_server.rs index 2c682f6..a39b3a5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -46,7 +46,7 @@ use ruma::{ member::{MembershipState, RoomMemberEventContent}, server_acl::RoomServerAclEventContent, }, - AnyEphemeralRoomEvent, EventType, + EventType, }, int, receipt::ReceiptType, @@ -795,10 +795,10 @@ pub async fn send_transaction_message_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(event_id.to_owned(), receipts); - let event = AnyEphemeralRoomEvent::Receipt(ReceiptEvent { + let event = ReceiptEvent { content: ReceiptEventContent(receipt_content), room_id: room_id.clone(), - }); + }; db.rooms.edus.readreceipt_update( &user_id, &room_id, From 1f7b3fa4acd13ea4962ba93c5bc96bd8aa9f44b3 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 11:51:31 +0100 Subject: [PATCH 1006/1727] Port from Rocket to axum --- .gitignore | 1 - Cargo.lock | 775 ++++++++-------------------- Cargo.toml | 20 +- README.md | 2 +- conduit-example.toml | 2 +- debian/postinst | 2 +- docker-compose.yml | 2 +- docker/docker-compose.traefik.yml | 2 +- src/client_server/account.rs | 26 - src/client_server/alias.rs | 15 - src/client_server/backup.rs | 59 --- src/client_server/capabilities.rs | 7 - src/client_server/config.rs | 25 - src/client_server/context.rs | 7 - src/client_server/device.rs | 22 - src/client_server/directory.rs | 19 - src/client_server/filter.rs | 11 - src/client_server/keys.rs | 29 +- src/client_server/media.rs | 23 +- src/client_server/membership.rs | 47 -- src/client_server/message.rs | 11 - src/client_server/mod.rs | 17 - src/client_server/presence.rs | 11 - src/client_server/profile.rs | 23 - src/client_server/push.rs | 43 -- src/client_server/read_marker.rs | 11 - src/client_server/redact.rs | 6 - src/client_server/report.rs | 11 +- src/client_server/room.rs | 19 - src/client_server/search.rs | 6 - src/client_server/session.rs | 22 +- src/client_server/state.rs | 23 - src/client_server/sync.rs | 7 - src/client_server/tag.rs | 15 - src/client_server/thirdparty.rs | 14 +- src/client_server/to_device.rs | 7 - src/client_server/typing.rs | 9 +- src/client_server/unversioned.rs | 12 +- src/client_server/user_directory.rs | 7 - src/client_server/voip.rs | 9 +- src/config.rs | 19 +- src/database.rs | 48 +- src/database/admin.rs | 51 +- src/database/sending.rs | 19 +- src/error.rs | 45 +- src/lib.rs | 14 - src/main.rs | 515 ++++++++++-------- src/ruma_wrapper.rs | 362 +------------ src/ruma_wrapper/axum.rs | 338 ++++++++++++ src/server_server.rs | 113 +--- src/utils.rs | 39 +- tests/Complement.Dockerfile | 7 +- 52 files changed, 1064 insertions(+), 1885 deletions(-) create mode 100644 src/ruma_wrapper/axum.rs diff --git a/.gitignore b/.gitignore index 1f5f395..f5e9505 100644 --- a/.gitignore +++ b/.gitignore @@ -57,7 +57,6 @@ $RECYCLE.BIN/ *.lnk # Conduit -Rocket.toml conduit.toml conduit.db diff --git a/Cargo.lock b/Cargo.lock index 632b4ce..f84c982 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,12 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + [[package]] name = "adler32" version = "1.2.0" @@ -28,6 +34,21 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloc-no-stdlib" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" +dependencies = [ + "alloc-no-stdlib", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -56,24 +77,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] -name = "async-stream" -version = "0.3.2" +name = "async-compression" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" +checksum = "5443ccbb270374a2b1055fc72da40e1f237809cd6bb0e97e66d264cd138473a6" dependencies = [ - "async-stream-impl", + "brotli", + "flate2", "futures-core", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "memchr", + "pin-project-lite", + "tokio", ] [[package]] @@ -96,17 +110,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.0.1" @@ -114,10 +117,50 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] -name = "base-x" -version = "0.2.8" +name = "axum" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "310a147401c66e79fc78636e4db63ac68cd6acb9ece056de806ea173a15bce32" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "headers", + "http", + "http-body", + "hyper", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca6c0b218388a7ed6a8d25e94f7dea5498daaa4fd8c711fb3ff166041b06fda" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", +] [[package]] name = "base64" @@ -131,12 +174,6 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" -[[package]] -name = "binascii" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" - [[package]] name = "bincode" version = "1.3.3" @@ -191,6 +228,27 @@ dependencies = [ "generic-array", ] +[[package]] +name = "brotli" +version = "3.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f838e47a451d5a8fa552371f80024dd6ace9b7acdf25c4c3d0f9bc6816fb1c39" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + [[package]] name = "bumpalo" version = "3.9.1" @@ -254,7 +312,7 @@ dependencies = [ "libc", "num-integer", "num-traits", - "time 0.1.43", + "time", "winapi", ] @@ -306,18 +364,21 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.3.0" dependencies = [ + "axum", "base64 0.13.0", "bytes", "clap", "crossbeam", "directories", + "figment", + "futures-util", "heed", "hmac", "http", + "hyper", "image", "jsonwebtoken", "lru-cache", - "maplit", "num_cpus", "opentelemetry", "opentelemetry-jaeger", @@ -327,7 +388,6 @@ dependencies = [ "regex", "reqwest", "ring", - "rocket", "rocksdb", "ruma", "rusqlite", @@ -343,9 +403,11 @@ dependencies = [ "tikv-jemalloc-ctl", "tikv-jemallocator", "tokio", + "tower", + "tower-http", "tracing", "tracing-flame", - "tracing-subscriber 0.2.25", + "tracing-subscriber", "trust-dns-resolver", ] @@ -355,29 +417,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d6f2aa4d0537bcc1c74df8755072bd31c1ef1a3a1b85a68e8404a8c353b7b8b" -[[package]] -name = "const_fn" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" - [[package]] name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "cookie" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f1c7727e460397e56abc4bddc1d49e07a1ad78fc98eb2e1c8f032a58a2f80d" -dependencies = [ - "percent-encoding", - "time 0.2.27", - "version_check", -] - [[package]] name = "cpufeatures" version = "0.2.1" @@ -546,39 +591,6 @@ dependencies = [ "const-oid", ] -[[package]] -name = "devise" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c7580b072f1c8476148f16e0a0d5dedddab787da98d86c5082c5e9ed8ab595" -dependencies = [ - "devise_codegen", - "devise_core", -] - -[[package]] -name = "devise_codegen" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "123c73e7a6e51b05c75fe1a1b2f4e241399ea5740ed810b0e3e6cacd9db5e7b2" -dependencies = [ - "devise_core", - "quote", -] - -[[package]] -name = "devise_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841ef46f4787d9097405cac4e70fb8644fc037b526e8c14054247c0263c400d0" -dependencies = [ - "bitflags", - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn", -] - [[package]] name = "digest" version = "0.9.0" @@ -608,12 +620,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - [[package]] name = "ed25519" version = "1.3.0" @@ -676,15 +682,6 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" -[[package]] -name = "fastrand" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" -dependencies = [ - "instant", -] - [[package]] name = "figment" version = "0.10.6" @@ -699,6 +696,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "flate2" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +dependencies = [ + "cfg-if 1.0.0", + "crc32fast", + "libc", + "miniz_oxide 0.4.4", +] + [[package]] name = "fnv" version = "1.0.7" @@ -829,19 +838,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "generator" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1d9279ca822891c1a4dae06d185612cf8fc6acfe5dff37781b41297811b12ee" -dependencies = [ - "cc", - "libc", - "log", - "rustversion", - "winapi", -] - [[package]] name = "generic-array" version = "0.14.5" @@ -927,6 +923,31 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "headers" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c4eb0471fcb85846d8b0690695ef354f9afb11cb03cac2e1d7c9253351afb0" +dependencies = [ + "base64 0.13.0", + "bitflags", + "bytes", + "headers-core", + "http", + "httpdate", + "mime", + "sha-1", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http", +] + [[package]] name = "heck" version = "0.3.3" @@ -1030,6 +1051,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.5.1" @@ -1074,9 +1101,9 @@ checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ "http", "hyper", - "rustls 0.20.2", + "rustls", "tokio", - "tokio-rustls 0.23.2", + "tokio-rustls", ] [[package]] @@ -1320,21 +1347,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "loom" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc5c7d328e32cc4954e8e01193d7f0ef5ab257b5090b70a964e099a36034309" -dependencies = [ - "cfg-if 1.0.0", - "generator", - "scoped-tls", - "serde", - "serde_json", - "tracing", - "tracing-subscriber 0.3.6", -] - [[package]] name = "lru-cache" version = "0.1.2" @@ -1365,21 +1377,18 @@ dependencies = [ "regex-automata", ] -[[package]] -name = "matchers" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" -dependencies = [ - "regex-automata", -] - [[package]] name = "matches" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +[[package]] +name = "matchit" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58b6f41fdfbec185dd3dff58b51e323f5bc61692c0de38419a957b0dcfccca3c" + [[package]] name = "memchr" version = "2.4.1" @@ -1416,6 +1425,16 @@ dependencies = [ "adler32", ] +[[package]] +name = "miniz_oxide" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +dependencies = [ + "adler", + "autocfg", +] + [[package]] name = "mio" version = "0.7.14" @@ -1438,26 +1457,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "multer" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f8f35e687561d5c1667590911e6698a8cb714a134a7505718a182e7bc9d3836" -dependencies = [ - "bytes", - "encoding_rs", - "futures-util", - "http", - "httparse", - "log", - "memchr", - "mime", - "spin 0.9.2", - "tokio", - "tokio-util", - "version_check", -] - [[package]] name = "nom" version = "7.1.0" @@ -1774,7 +1773,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide", + "miniz_oxide 0.3.7", ] [[package]] @@ -1817,12 +1816,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - [[package]] name = "proc-macro2" version = "1.0.36" @@ -1960,26 +1953,6 @@ dependencies = [ "redox_syscall", ] -[[package]] -name = "ref-cast" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "regex" version = "1.5.4" @@ -2006,15 +1979,6 @@ version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - [[package]] name = "reqwest" version = "0.11.9" @@ -2037,13 +2001,13 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls 0.20.2", + "rustls", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.23.2", + "tokio-rustls", "tokio-socks", "url", "wasm-bindgen", @@ -2072,95 +2036,12 @@ dependencies = [ "cc", "libc", "once_cell", - "spin 0.5.2", + "spin", "untrusted", "web-sys", "winapi", ] -[[package]] -name = "rocket" -version = "0.5.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a71c18c42a0eb15bf3816831caf0dad11e7966f2a41aaf486a701979c4dd1f2" -dependencies = [ - "async-stream", - "async-trait", - "atomic", - "atty", - "binascii", - "bytes", - "either", - "figment", - "futures", - "indexmap", - "log", - "memchr", - "multer", - "num_cpus", - "parking_lot", - "pin-project-lite", - "rand 0.8.4", - "ref-cast", - "rocket_codegen", - "rocket_http", - "serde", - "state", - "tempfile", - "time 0.2.27", - "tokio", - "tokio-stream", - "tokio-util", - "ubyte", - "version_check", - "yansi", -] - -[[package]] -name = "rocket_codegen" -version = "0.5.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66f5fa462f7eb958bba8710c17c5d774bbbd59809fa76fb1957af7e545aea8bb" -dependencies = [ - "devise", - "glob", - "indexmap", - "proc-macro2", - "quote", - "rocket_http", - "syn", - "unicode-xid", -] - -[[package]] -name = "rocket_http" -version = "0.5.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23c8b7d512d2fcac2316ebe590cde67573844b99e6cc9ee0f53375fa16e25ebd" -dependencies = [ - "cookie", - "either", - "http", - "hyper", - "indexmap", - "log", - "memchr", - "mime", - "parking_lot", - "pear", - "percent-encoding", - "pin-project-lite", - "ref-cast", - "serde", - "smallvec", - "stable-pattern", - "state", - "time 0.2.27", - "tokio", - "tokio-rustls 0.22.0", - "uncased", -] - [[package]] name = "rocksdb" version = "0.17.0" @@ -2465,28 +2346,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - -[[package]] -name = "rustls" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" -dependencies = [ - "base64 0.13.0", - "log", - "ring", - "sct 0.6.1", - "webpki 0.21.4", -] - [[package]] name = "rustls" version = "0.20.2" @@ -2495,8 +2354,8 @@ checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" dependencies = [ "log", "ring", - "sct 0.7.0", - "webpki 0.22.0", + "sct", + "webpki", ] [[package]] @@ -2508,40 +2367,18 @@ dependencies = [ "base64 0.13.0", ] -[[package]] -name = "rustversion" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" - [[package]] name = "ryu" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -2552,21 +2389,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" version = "1.0.134" @@ -2635,21 +2457,6 @@ dependencies = [ "opaque-debug", ] -[[package]] -name = "sha1" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" -dependencies = [ - "sha1_smol", -] - -[[package]] -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - [[package]] name = "sha2" version = "0.9.9" @@ -2760,12 +2567,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "spin" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" - [[package]] name = "spki" version = "0.4.1" @@ -2775,82 +2576,6 @@ dependencies = [ "der", ] -[[package]] -name = "stable-pattern" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4564168c00635f88eaed410d5efa8131afa8d8699a612c80c455a0ba05c21045" -dependencies = [ - "memchr", -] - -[[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - -[[package]] -name = "state" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cf4f5369e6d3044b5e365c9690f451516ac8f0954084622b49ea3fde2f6de5" -dependencies = [ - "loom", -] - -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - [[package]] name = "subtle" version = "2.4.1" @@ -2868,6 +2593,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "synchronoise" version = "1.0.0" @@ -2889,20 +2620,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if 1.0.0", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] - [[package]] name = "textwrap" version = "0.14.2" @@ -3002,44 +2719,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "time" -version = "0.2.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi", -] - -[[package]] -name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros-impl" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - [[package]] name = "tinyvec" version = "1.5.1" @@ -3084,26 +2763,15 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", -] - [[package]] name = "tokio-rustls" version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" dependencies = [ - "rustls 0.20.2", + "rustls", "tokio", - "webpki 0.22.0", + "webpki", ] [[package]] @@ -3152,6 +2820,52 @@ dependencies = [ "serde", ] +[[package]] +name = "tower" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5651b5f6860a99bd1adb59dbfe1db8beb433e73709d9032b413a77e2fb7c066a" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03650267ad175b51c47d02ed9547fc7d4ba2c7e5cb76df0bed67edd1825ae297" +dependencies = [ + "async-compression", + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tokio", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.1" @@ -3165,6 +2879,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if 1.0.0", + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -3198,7 +2913,7 @@ checksum = "bd520fe41c667b437952383f3a1ec14f1fa45d653f719a77eedd6e6a02d8fa54" dependencies = [ "lazy_static", "tracing", - "tracing-subscriber 0.2.25", + "tracing-subscriber", ] [[package]] @@ -3231,7 +2946,7 @@ dependencies = [ "ansi_term", "chrono", "lazy_static", - "matchers 0.0.1", + "matchers", "regex", "serde", "serde_json", @@ -3244,24 +2959,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "tracing-subscriber" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77be66445c4eeebb934a7340f227bfe7b338173d3f8c00a60a5a58005c9faecf" -dependencies = [ - "ansi_term", - "lazy_static", - "matchers 0.1.0", - "regex", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", -] - [[package]] name = "trust-dns-proto" version = "0.20.3" @@ -3319,22 +3016,12 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" -[[package]] -name = "ubyte" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42756bb9e708855de2f8a98195643dff31a97f0485d90d8467b39dc24be9e8fe" -dependencies = [ - "serde", -] - [[package]] name = "uncased" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" dependencies = [ - "serde", "version_check", ] @@ -3514,16 +3201,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki" version = "0.22.0" @@ -3540,7 +3217,7 @@ version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" dependencies = [ - "webpki 0.22.0", + "webpki", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7f84343..5fb75dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,10 +13,11 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -# Used to handle requests -# TODO: This can become optional as soon as proper configs are supported -# rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests -rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests +# Web framework +axum = { version = "0.4.4", features = ["headers"], optional = true } +hyper = "0.14.16" +tower = { version = "0.4.11", features = ["util"] } +tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -24,8 +25,8 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "f7a10a7e471b59d3096be2695c #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -# Used for long polling and federation sender, should be the same as rocket::tokio -tokio = "1.11.0" +# Async runtime and utilities +tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } @@ -33,7 +34,6 @@ persy = { version = "1.2" , optional = true, features=["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" -# Used for rocket<->ruma conversions http = "0.2.4" # Used to find data directory for default db path directories = "3.0.2" @@ -84,7 +84,9 @@ hmac = "0.11.0" sha-1 = "0.9.8" # used for conduit's CLI and admin room command parsing clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } -maplit = "1.0.2" +futures-util = { version = "0.3.19", default-features = false } +# Used for reading the configuration from conduit.toml & environment variables +figment = { version = "0.10.6", features = ["env", "toml"] } tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true } tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } @@ -98,7 +100,7 @@ backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] -conduit_bin = [] # TODO: add rocket to this when it is optional +conduit_bin = ["axum"] [[bin]] name = "conduit" diff --git a/README.md b/README.md index a4f0929..45b16fd 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individual Thanks to the contributors to Conduit and all libraries we use, for example: - Ruma: A clean library for the Matrix Spec in Rust -- Rocket: A flexible web framework +- axum: A modular web framework #### Donate diff --git a/conduit-example.toml b/conduit-example.toml index f157807..c22c862 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -42,7 +42,7 @@ allow_registration = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "info,state_res=warn,_=off,sled=off" #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy diff --git a/debian/postinst b/debian/postinst index 6bd1a3a..29a9367 100644 --- a/debian/postinst +++ b/debian/postinst @@ -74,7 +74,7 @@ allow_registration = true #allow_jaeger = false #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "info,state_res=warn,_=off,sled=off" #workers = 4 # default: cpu core count * 2 # The total amount of memory that the database will use. diff --git a/docker-compose.yml b/docker-compose.yml index 530fc19..88d5c3f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -33,7 +33,7 @@ services: # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" + # CONDUIT_LOG: info # default is: "info,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' # CONDUIT_ALLOW_FEDERATION: 'false' diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index 392b382..f625080 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -33,7 +33,7 @@ services: # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" + # CONDUIT_LOG: info # default is: "info,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' # CONDUIT_ALLOW_FEDERATION: 'false' diff --git a/src/client_server/account.rs b/src/client_server/account.rs index a210e8a..bf1a74d 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -27,8 +27,6 @@ use serde_json::value::to_raw_value; use tracing::{info, warn}; use register::RegistrationKind; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; const GUEST_NAME_LENGTH: usize = 10; @@ -42,10 +40,6 @@ const GUEST_NAME_LENGTH: usize = 10; /// - No user or appservice on this server already claimed this username /// /// Note: This will not reserve the username, so the username might become invalid when trying to register -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/register/available", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_register_available_route( db: DatabaseGuard, @@ -90,10 +84,6 @@ pub async fn get_register_available_route( /// - If type is not guest and no username is given: Always fails after UIAA check /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/register", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn register_route( db: DatabaseGuard, @@ -279,10 +269,6 @@ pub async fn register_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/account/password", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn change_password_route( db: DatabaseGuard, @@ -348,10 +334,6 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/account/whoami", data = "") -)] #[tracing::instrument(skip(body))] pub async fn whoami_route(body: Ruma) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -371,10 +353,6 @@ pub async fn whoami_route(body: Ruma) -> ConduitResult, ) -> ConduitResult { diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 129ac16..6e1b43e 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -12,16 +12,9 @@ use ruma::{ RoomAliasId, }; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, put}; - /// # `PUT /_matrix/client/r0/directory/room/{roomAlias}` /// /// Creates a new room alias on this server. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/directory/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_alias_route( db: DatabaseGuard, @@ -52,10 +45,6 @@ pub async fn create_alias_route( /// /// - TODO: additional access control checks /// - TODO: Update canonical alias event -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/directory/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_alias_route( db: DatabaseGuard, @@ -82,10 +71,6 @@ pub async fn delete_alias_route( /// Resolve an alias locally or over federation. /// /// - TODO: Suggest more servers to join via -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/directory/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_alias_route( db: DatabaseGuard, diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index bbb8672..cc2d7c4 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -9,16 +9,9 @@ use ruma::api::client::{ }, }; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, post, put}; - /// # `POST /_matrix/client/r0/room_keys/version` /// /// Creates a new backup. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/room_keys/version", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_backup_route( db: DatabaseGuard, @@ -37,10 +30,6 @@ pub async fn create_backup_route( /// # `PUT /_matrix/client/r0/room_keys/version/{version}` /// /// Update information about an existing backup. Only `auth_data` can be modified. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn update_backup_route( db: DatabaseGuard, @@ -58,10 +47,6 @@ pub async fn update_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about the latest backup version. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/version", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_latest_backup_route( db: DatabaseGuard, @@ -89,10 +74,6 @@ pub async fn get_latest_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about an existing backup. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_backup_route( db: DatabaseGuard, @@ -121,10 +102,6 @@ pub async fn get_backup_route( /// Delete an existing key backup. /// /// - Deletes both information about the backup, as well as all key data related to the backup -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_route( db: DatabaseGuard, @@ -146,10 +123,6 @@ pub async fn delete_backup_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn add_backup_keys_route( db: DatabaseGuard, @@ -198,10 +171,6 @@ pub async fn add_backup_keys_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn add_backup_key_sessions_route( db: DatabaseGuard, @@ -248,10 +217,6 @@ pub async fn add_backup_key_sessions_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn add_backup_key_session_route( db: DatabaseGuard, @@ -292,10 +257,6 @@ pub async fn add_backup_key_session_route( /// # `GET /_matrix/client/r0/room_keys/keys` /// /// Retrieves all keys from the backup. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_backup_keys_route( db: DatabaseGuard, @@ -311,10 +272,6 @@ pub async fn get_backup_keys_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Retrieves all keys from the backup for a given room. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_backup_key_sessions_route( db: DatabaseGuard, @@ -332,10 +289,6 @@ pub async fn get_backup_key_sessions_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Retrieves a key from the backup. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_backup_key_session_route( db: DatabaseGuard, @@ -357,10 +310,6 @@ pub async fn get_backup_key_session_route( /// # `DELETE /_matrix/client/r0/room_keys/keys` /// /// Delete the keys from the backup. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/keys", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_keys_route( db: DatabaseGuard, @@ -382,10 +331,6 @@ pub async fn delete_backup_keys_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Delete the keys from the backup for a given room. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, @@ -408,10 +353,6 @@ pub async fn delete_backup_key_sessions_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Delete a key from the backup. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_session_route( db: DatabaseGuard, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index c69b7cb..8da6855 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -7,16 +7,9 @@ use ruma::{ }; use std::collections::BTreeMap; -#[cfg(feature = "conduit_bin")] -use rocket::get; - /// # `GET /_matrix/client/r0/capabilities` /// /// Get information on the supported feature set and other relevent capabilities of this server. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/capabilities", data = "<_body>") -)] #[tracing::instrument(skip(_body))] pub async fn get_capabilities_route( _body: Ruma, diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 0c668ff..0df0dec 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -13,16 +13,9 @@ use ruma::{ use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Sets some account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_global_account_data_route( db: DatabaseGuard, @@ -54,13 +47,6 @@ pub async fn set_global_account_data_route( /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Sets some room account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - put( - "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", - data = "" - ) -)] #[tracing::instrument(skip(db, body))] pub async fn set_room_account_data_route( db: DatabaseGuard, @@ -92,10 +78,6 @@ pub async fn set_room_account_data_route( /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Gets some account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_global_account_data_route( db: DatabaseGuard, @@ -118,13 +100,6 @@ pub async fn get_global_account_data_route( /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Gets some room account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - get( - "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", - data = "" - ) -)] #[tracing::instrument(skip(db, body))] pub async fn get_room_account_data_route( db: DatabaseGuard, diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 02148f4..1fbfee9 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -9,19 +9,12 @@ use ruma::{ use std::{collections::HashSet, convert::TryFrom}; use tracing::error; -#[cfg(feature = "conduit_bin")] -use rocket::get; - /// # `GET /_matrix/client/r0/rooms/{roomId}/context` /// /// Allows loading room history around an event. /// /// - Only works if the user is joined (TODO: always allow, but only show events if the user was /// joined, depending on history_visibility) -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_context_route( db: DatabaseGuard, diff --git a/src/client_server/device.rs b/src/client_server/device.rs index f240f2e..82d1168 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -8,16 +8,10 @@ use ruma::api::client::{ }; use super::SESSION_ID_LENGTH; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, post, put}; /// # `GET /_matrix/client/r0/devices` /// /// Get metadata on all devices of the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/devices", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_devices_route( db: DatabaseGuard, @@ -37,10 +31,6 @@ pub async fn get_devices_route( /// # `GET /_matrix/client/r0/devices/{deviceId}` /// /// Get metadata on a single device of the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/devices/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_device_route( db: DatabaseGuard, @@ -59,10 +49,6 @@ pub async fn get_device_route( /// # `PUT /_matrix/client/r0/devices/{deviceId}` /// /// Updates the metadata on a given device of the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/devices/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn update_device_route( db: DatabaseGuard, @@ -94,10 +80,6 @@ pub async fn update_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/devices/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_device_route( db: DatabaseGuard, @@ -157,10 +139,6 @@ pub async fn delete_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/delete_devices", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_devices_route( db: DatabaseGuard, diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 719d9af..06d7a27 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -29,18 +29,11 @@ use ruma::{ }; use tracing::{info, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; - /// # `POST /_matrix/client/r0/publicRooms` /// /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/publicRooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, @@ -62,10 +55,6 @@ pub async fn get_public_rooms_filtered_route( /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/publicRooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: DatabaseGuard, @@ -96,10 +85,6 @@ pub async fn get_public_rooms_route( /// Sets the visibility of a given room in the room directory. /// /// - TODO: Access control checks -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_room_visibility_route( db: DatabaseGuard, @@ -129,10 +114,6 @@ pub async fn set_room_visibility_route( /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` /// /// Gets the visibility of a given room in the room directory. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_room_visibility_route( db: DatabaseGuard, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index f8845f1..6c42edd 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -4,18 +4,11 @@ use ruma::api::client::{ r0::filter::{create_filter, get_filter}, }; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// /// Loads a filter that was previously created. /// /// - A user can only access their own filters -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/filter/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_filter_route( db: DatabaseGuard, @@ -33,10 +26,6 @@ pub async fn get_filter_route( /// # `PUT /_matrix/client/r0/user/{userId}/filter` /// /// Creates a new filter to be used by other endpoints. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/user/<_>/filter", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_filter_route( db: DatabaseGuard, diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index e7aec26..9a7a4e7 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -1,6 +1,6 @@ use super::SESSION_ID_LENGTH; use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma}; -use rocket::futures::{prelude::*, stream::FuturesUnordered}; +use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ client::{ @@ -21,19 +21,12 @@ use ruma::{ use serde_json::json; use std::collections::{BTreeMap, HashMap, HashSet}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `POST /_matrix/client/r0/keys/upload` /// /// Publish end-to-end encryption keys for the sender device. /// /// - Adds one time keys /// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/upload", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn upload_keys_route( db: DatabaseGuard, @@ -80,10 +73,6 @@ pub async fn upload_keys_route( /// - Always fetches users from other servers over federation /// - Gets master keys, self-signing keys, user signing keys and device keys. /// - The master and self-signing keys contain signatures that the user is allowed to see -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/query", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: DatabaseGuard, @@ -105,10 +94,6 @@ pub async fn get_keys_route( /// # `POST /_matrix/client/r0/keys/claim` /// /// Claims one-time keys -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/claim", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: DatabaseGuard, @@ -126,10 +111,6 @@ pub async fn claim_keys_route( /// Uploads end-to-end key information for the sender user. /// /// - Requires UIAA to verify password -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/keys/device_signing/upload", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn upload_signing_keys_route( db: DatabaseGuard, @@ -190,10 +171,6 @@ pub async fn upload_signing_keys_route( /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/keys/signatures/upload", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn upload_signatures_route( db: DatabaseGuard, @@ -256,10 +233,6 @@ pub async fn upload_signatures_route( /// Gets a list of users who have updated their device identity keys since the previous sync token. /// /// - TODO: left users -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/keys/changes", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_key_changes_route( db: DatabaseGuard, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index deea319..5eba17b 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -10,18 +10,15 @@ use ruma::api::client::{ }, }; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - const MXC_LENGTH: usize = 32; /// # `GET /_matrix/media/r0/config` /// /// Returns max upload size. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] -#[tracing::instrument(skip(db))] +#[tracing::instrument(skip(db, _body))] pub async fn get_media_config_route( db: DatabaseGuard, + _body: Ruma, ) -> ConduitResult { Ok(get_media_config::Response { upload_size: db.globals.max_request_size().into(), @@ -35,10 +32,6 @@ pub async fn get_media_config_route( /// /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/media/r0/upload", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_content_route( db: DatabaseGuard, @@ -110,10 +103,6 @@ pub async fn get_remote_content( /// Load media from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/media/r0/download/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_content_route( db: DatabaseGuard, @@ -147,10 +136,6 @@ pub async fn get_content_route( /// Load media from our server or over federation, permitting desired filename. /// /// - Only allows federation if `allow_remote` is true -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/media/r0/download/<_>/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_content_as_filename_route( db: DatabaseGuard, @@ -190,10 +175,6 @@ pub async fn get_content_as_filename_route( /// Load media thumbnail from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_content_thumbnail_route( db: DatabaseGuard, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e855dba..c16065e 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -36,19 +36,12 @@ use std::{ }; use tracing::{debug, error, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `POST /_matrix/client/r0/rooms/{roomId}/join` /// /// Tries to join the sender user into a room. /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/join", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_route( db: DatabaseGuard, @@ -90,10 +83,6 @@ pub async fn join_room_by_id_route( /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/join/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, @@ -148,10 +137,6 @@ pub async fn join_room_by_id_or_alias_route( /// Tries to leave the sender user from a room. /// /// - This should always work if the user is currently joined. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/leave", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn leave_room_route( db: DatabaseGuard, @@ -169,10 +154,6 @@ pub async fn leave_room_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` /// /// Tries to send an invite event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/invite", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn invite_user_route( db: DatabaseGuard, @@ -192,10 +173,6 @@ pub async fn invite_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/kick` /// /// Tries to send a kick event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/kick", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn kick_user_route( db: DatabaseGuard, @@ -256,10 +233,6 @@ pub async fn kick_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` /// /// Tries to send a ban event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/ban", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn ban_user_route( db: DatabaseGuard, @@ -331,10 +304,6 @@ pub async fn ban_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/unban` /// /// Tries to send an unban event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/unban", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn unban_user_route( db: DatabaseGuard, @@ -399,10 +368,6 @@ pub async fn unban_user_route( /// /// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to /// be called from every device -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/forget", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn forget_room_route( db: DatabaseGuard, @@ -420,10 +385,6 @@ pub async fn forget_room_route( /// # `POST /_matrix/client/r0/joined_rooms` /// /// Lists all rooms the user has joined. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/joined_rooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn joined_rooms_route( db: DatabaseGuard, @@ -446,10 +407,6 @@ pub async fn joined_rooms_route( /// Lists all joined users in a room (TODO: at a specific point in time, with a specific membership). /// /// - Only works if the user is currently joined -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/members", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_member_events_route( db: DatabaseGuard, @@ -483,10 +440,6 @@ pub async fn get_member_events_route( /// /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn joined_members_route( db: DatabaseGuard, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index cf4f0cb..4fb8771 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -11,9 +11,6 @@ use std::{ sync::Arc, }; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}` /// /// Send a message event into the room. @@ -21,10 +18,6 @@ use rocket::{get, put}; /// - Is a NOOP if the txn id was already used before and returns the same event id again /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_message_event_route( db: DatabaseGuard, @@ -110,10 +103,6 @@ pub async fn send_message_event_route( /// /// - Only works if the user is joined (TODO: always allow, but only show events where the user was /// joined, depending on history_visibility) -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/messages", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_message_events_route( db: DatabaseGuard, diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index 115ddaf..a7241b0 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -62,23 +62,6 @@ pub use unversioned::*; pub use user_directory::*; pub use voip::*; -#[cfg(not(feature = "conduit_bin"))] -use super::State; -#[cfg(feature = "conduit_bin")] -use { - crate::ConduitResult, rocket::options, ruma::api::client::r0::to_device::send_event_to_device, -}; - pub const DEVICE_ID_LENGTH: usize = 10; pub const TOKEN_LENGTH: usize = 256; pub const SESSION_ID_LENGTH: usize = 256; - -/// # `OPTIONS` -/// -/// Web clients use this to get CORS headers. -#[cfg(feature = "conduit_bin")] -#[options("/<_..>")] -#[tracing::instrument] -pub async fn options_route() -> ConduitResult { - Ok(send_event_to_device::Response {}.into()) -} diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index cdc1e1f..0d58ebf 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -2,16 +2,9 @@ use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; use ruma::api::client::r0::presence::{get_presence, set_presence}; use std::time::Duration; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/presence/{userId}/status` /// /// Sets the presence state of the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/presence/<_>/status", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_presence_route( db: DatabaseGuard, @@ -54,10 +47,6 @@ pub async fn set_presence_route( /// Gets the presence state of the given user. /// /// - Only works if you share a room with the user -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/presence/<_>/status", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_presence_route( db: DatabaseGuard, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index ef58a98..bb13b44 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -14,18 +14,11 @@ use ruma::{ use serde_json::value::to_raw_value; use std::sync::Arc; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/profile/{userId}/displayname` /// /// Updates the displayname. /// /// - Also makes sure other users receive the update using presence EDUs -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_>/displayname", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_displayname_route( db: DatabaseGuard, @@ -124,10 +117,6 @@ pub async fn set_displayname_route( /// Returns the displayname of the user. /// /// - If user is on another server: Fetches displayname over federation -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>/displayname", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_displayname_route( db: DatabaseGuard, @@ -163,10 +152,6 @@ pub async fn get_displayname_route( /// Updates the avatar_url and blurhash. /// /// - Also makes sure other users receive the update using presence EDUs -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_avatar_url_route( db: DatabaseGuard, @@ -267,10 +252,6 @@ pub async fn set_avatar_url_route( /// Returns the avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches avatar_url and blurhash over federation -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_avatar_url_route( db: DatabaseGuard, @@ -308,10 +289,6 @@ pub async fn get_avatar_url_route( /// Returns the displayname, avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches profile over federation -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_profile_route( db: DatabaseGuard, diff --git a/src/client_server/push.rs b/src/client_server/push.rs index a8ba1a2..322cf89 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -12,16 +12,9 @@ use ruma::{ push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, post, put}; - /// # `GET /_matrix/client/r0/pushrules` /// /// Retrieves the push rules event for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushrules_all_route( db: DatabaseGuard, @@ -46,10 +39,6 @@ pub async fn get_pushrules_all_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Retrieves a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_route( db: DatabaseGuard, @@ -103,10 +92,6 @@ pub async fn get_pushrule_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Creates a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_route( db: DatabaseGuard, @@ -204,10 +189,6 @@ pub async fn set_pushrule_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Gets the actions of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_actions_route( db: DatabaseGuard, @@ -266,10 +247,6 @@ pub async fn get_pushrule_actions_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Sets the actions of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_actions_route( db: DatabaseGuard, @@ -338,10 +315,6 @@ pub async fn set_pushrule_actions_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Gets the enabled status of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_enabled_route( db: DatabaseGuard, @@ -402,10 +375,6 @@ pub async fn get_pushrule_enabled_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Sets the enabled status of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_enabled_route( db: DatabaseGuard, @@ -479,10 +448,6 @@ pub async fn set_pushrule_enabled_route( /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Deletes a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_pushrule_route( db: DatabaseGuard, @@ -546,10 +511,6 @@ pub async fn delete_pushrule_route( /// # `GET /_matrix/client/r0/pushers` /// /// Gets all currently active pushers for the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushers", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushers_route( db: DatabaseGuard, @@ -568,10 +529,6 @@ pub async fn get_pushers_route( /// Adds a pusher for the sender user. /// /// - TODO: Handle `append` -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/pushers/set", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_pushers_route( db: DatabaseGuard, diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 502a612..c9480f0 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -10,19 +10,12 @@ use ruma::{ }; use std::collections::BTreeMap; -#[cfg(feature = "conduit_bin")] -use rocket::post; - /// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` /// /// Sets different types of read markers. /// /// - Updates fully-read account data event to `fully_read` /// - If `read_receipt` is set: Update private marker and public read receipt EDU -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_read_marker_route( db: DatabaseGuard, @@ -89,10 +82,6 @@ pub async fn set_read_marker_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` /// /// Sets private read marker and public read receipt EDU. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_receipt_route( db: DatabaseGuard, diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 85de233..2b442fc 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -6,8 +6,6 @@ use ruma::{ events::{room::redaction::RoomRedactionEventContent, EventType}, }; -#[cfg(feature = "conduit_bin")] -use rocket::put; use serde_json::value::to_raw_value; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` @@ -15,10 +13,6 @@ use serde_json::value::to_raw_value; /// Tries to send a redaction event into the room. /// /// - TODO: Handle txn id -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn redact_event_route( db: DatabaseGuard, diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 032e446..441e33d 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,21 +1,14 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, utils::HtmlEscape, ConduitResult, Error, Ruma}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, events::room::message, int, }; -#[cfg(feature = "conduit_bin")] -use rocket::{http::RawStr, post}; - /// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}` /// /// Reports an inappropriate event to homeserver admins /// -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/report/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn report_event_route( db: DatabaseGuard, @@ -70,7 +63,7 @@ pub async fn report_event_route( pdu.room_id, pdu.sender, body.score, - RawStr::new(&body.reason).html_escape() + HtmlEscape(&body.reason) ), )); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 7ea31d8..475c5b4 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -30,9 +30,6 @@ use serde_json::{json, value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, sync::Arc}; use tracing::{info, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `POST /_matrix/client/r0/createRoom` /// /// Creates a new room. @@ -49,10 +46,6 @@ use rocket::{get, post}; /// - Send events listed in initial state /// - Send events implied by `name` and `topic` /// - Send invite events -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/createRoom", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_room_route( db: DatabaseGuard, @@ -425,10 +418,6 @@ pub async fn create_room_route( /// Gets a single event. /// /// - You have to currently be joined to the room (TODO: Respect history visibility) -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_room_event_route( db: DatabaseGuard, @@ -458,10 +447,6 @@ pub async fn get_room_event_route( /// Lists all aliases of the room. /// /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/aliases", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_room_aliases_route( db: DatabaseGuard, @@ -496,10 +481,6 @@ pub async fn get_room_aliases_route( /// - Transfers some state events /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/upgrade", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( db: DatabaseGuard, diff --git a/src/client_server/search.rs b/src/client_server/search.rs index f492292..3f8a701 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,8 +1,6 @@ use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::api::client::{error::ErrorKind, r0::search::search_events}; -#[cfg(feature = "conduit_bin")] -use rocket::post; use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}; use std::collections::BTreeMap; @@ -11,10 +9,6 @@ use std::collections::BTreeMap; /// Searches rooms for messages. /// /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/search", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn search_events_route( db: DatabaseGuard, diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 61e5519..264eac0 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -19,16 +19,14 @@ struct Claims { exp: usize, } -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `GET /_matrix/client/r0/login` /// /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] -#[tracing::instrument] -pub async fn get_login_types_route() -> ConduitResult { +#[tracing::instrument(skip(_body))] +pub async fn get_login_types_route( + _body: Ruma, +) -> ConduitResult { Ok( get_login_types::Response::new(vec![get_login_types::LoginType::Password( Default::default(), @@ -48,10 +46,6 @@ pub async fn get_login_types_route() -> ConduitResult /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/login", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn login_route( db: DatabaseGuard, @@ -173,10 +167,6 @@ pub async fn login_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/logout", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn logout_route( db: DatabaseGuard, @@ -203,10 +193,6 @@ pub async fn logout_route( /// /// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html) /// from each device of this user. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/logout/all", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn logout_all_route( db: DatabaseGuard, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index c07d482..96b2184 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -19,9 +19,6 @@ use ruma::{ EventId, RoomId, UserId, }; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}` /// /// Sends a state event into the room. @@ -29,10 +26,6 @@ use rocket::{get, put}; /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( db: DatabaseGuard, @@ -63,10 +56,6 @@ pub async fn send_state_event_for_key_route( /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, @@ -103,10 +92,6 @@ pub async fn send_state_event_for_empty_key_route( /// Get all state events for a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_route( db: DatabaseGuard, @@ -155,10 +140,6 @@ pub async fn get_state_events_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_key_route( db: DatabaseGuard, @@ -211,10 +192,6 @@ pub async fn get_state_events_for_key_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2b814f5..6ba68b0 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -20,9 +20,6 @@ use std::{ use tokio::sync::watch::Sender; use tracing::error; -#[cfg(feature = "conduit_bin")] -use rocket::{get, tokio}; - /// # `GET /_matrix/client/r0/sync` /// /// Synchronize the client's state with the latest state on the server. @@ -57,10 +54,6 @@ use rocket::{get, tokio}; /// /// - Sync is handled in an async task, multiple requests from the same device with the same /// `since` will be cached -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/sync", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn sync_events_route( db: DatabaseGuard, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 42bad4c..cad3421 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -8,18 +8,11 @@ use ruma::{ }; use std::collections::BTreeMap; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, put}; - /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` /// /// Adds a tag to the room. /// /// - Inserts the tag into the tag event of the room account data. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn update_tag_route( db: DatabaseGuard, @@ -58,10 +51,6 @@ pub async fn update_tag_route( /// Deletes a tag from the room. /// /// - Removes the tag from the tag event of the room account data. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_tag_route( db: DatabaseGuard, @@ -97,10 +86,6 @@ pub async fn delete_tag_route( /// Returns tags on the room. /// /// - Gets the tag event of the room account data. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_tags_route( db: DatabaseGuard, diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index 4305902..d8b7972 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -1,19 +1,15 @@ -use crate::ConduitResult; +use crate::{ConduitResult, Ruma}; use ruma::api::client::r0::thirdparty::get_protocols; -#[cfg(feature = "conduit_bin")] -use rocket::get; use std::collections::BTreeMap; /// # `GET /_matrix/client/r0/thirdparty/protocols` /// /// TODO: Fetches all metadata about protocols supported by the homeserver. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/thirdparty/protocols") -)] -#[tracing::instrument] -pub async fn get_protocols_route() -> ConduitResult { +#[tracing::instrument(skip(_body))] +pub async fn get_protocols_route( + _body: Ruma, +) -> ConduitResult { // TODO Ok(get_protocols::Response { protocols: BTreeMap::new(), diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index e0aa9e9..1269118 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -10,16 +10,9 @@ use ruma::{ to_device::DeviceIdOrAllDevices, }; -#[cfg(feature = "conduit_bin")] -use rocket::put; - /// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` /// /// Send a to-device event to a set of client devices. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_event_to_device_route( db: DatabaseGuard, diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 15e74b3..3a61c58 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -2,18 +2,11 @@ use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; use create_typing_event::Typing; use ruma::api::client::r0::typing::create_typing_event; -#[cfg(feature = "conduit_bin")] -use rocket::put; - /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// /// Sets the typing state of the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn create_typing_event_route( +pub async fn create_typing_event_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index ea685b4..8b1b66f 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,11 +1,8 @@ use std::{collections::BTreeMap, iter::FromIterator}; -use crate::ConduitResult; +use crate::{ConduitResult, Ruma}; use ruma::api::client::unversioned::get_supported_versions; -#[cfg(feature = "conduit_bin")] -use rocket::get; - /// # `GET /_matrix/client/versions` /// /// Get the versions of the specification and unstable features supported by this server. @@ -16,9 +13,10 @@ use rocket::get; /// /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] -#[tracing::instrument] -pub async fn get_supported_versions_route() -> ConduitResult { +#[tracing::instrument(skip(_body))] +pub async fn get_supported_versions_route( + _body: Ruma, +) -> ConduitResult { let resp = get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index cfcb9bb..c923cee 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,18 +1,11 @@ use crate::{database::DatabaseGuard, ConduitResult, Ruma}; use ruma::api::client::r0::user_directory::search_users; -#[cfg(feature = "conduit_bin")] -use rocket::post; - /// # `POST /_matrix/client/r0/user_directory/search` /// /// Searches all known users for a match. /// /// - TODO: Hide users that are not in any public rooms? -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/user_directory/search", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn search_users_route( db: DatabaseGuard, diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 66a85f0..6abebdc 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -7,20 +7,13 @@ use std::time::{Duration, SystemTime}; type HmacSha1 = Hmac; -#[cfg(feature = "conduit_bin")] -use rocket::get; - /// # `GET /_matrix/client/r0/voip/turnServer` /// /// TODO: Returns information about the recommended turn server. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/voip/turnServer", data = "") -)] #[tracing::instrument(skip(body, db))] pub async fn turn_server_route( - body: Ruma, db: DatabaseGuard, + body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/config.rs b/src/config.rs index 4c0fcc2..48ac981 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,4 +1,7 @@ -use std::collections::BTreeMap; +use std::{ + collections::BTreeMap, + net::{IpAddr, Ipv4Addr}, +}; use ruma::ServerName; use serde::{de::IgnoredAny, Deserialize}; @@ -10,6 +13,10 @@ use self::proxy::ProxyConfig; #[derive(Clone, Debug, Deserialize)] pub struct Config { + #[serde(default = "default_address")] + pub address: IpAddr, + #[serde(default = "default_port")] + pub port: u16, pub server_name: Box, #[serde(default = "default_database_backend")] pub database_backend: String, @@ -90,6 +97,14 @@ fn true_fn() -> bool { true } +fn default_address() -> IpAddr { + Ipv4Addr::LOCALHOST.into() +} + +fn default_port() -> u16 { + 8000 +} + fn default_database_backend() -> String { "sqlite".to_owned() } @@ -123,7 +138,7 @@ fn default_max_concurrent_requests() -> u16 { } fn default_log() -> String { - "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() + "info,state_res=warn,_=off,sled=off".to_owned() } fn default_turn_ttl() -> u64 { diff --git a/src/database.rs b/src/database.rs index 2b1671c..9eb8bd5 100644 --- a/src/database.rs +++ b/src/database.rs @@ -13,16 +13,12 @@ pub mod transaction_ids; pub mod uiaa; pub mod users; +use self::admin::create_admin_room; use crate::{utils, Config, Error, Result}; use abstraction::DatabaseEngine; use directories::ProjectDirs; +use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; -use rocket::{ - futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}, - outcome::{try_outcome, IntoOutcome}, - request::{FromRequest, Request}, - Shutdown, State, -}; use ruma::{DeviceId, EventId, RoomId, UserId}; use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -33,11 +29,9 @@ use std::{ path::Path, sync::{Arc, Mutex, RwLock}, }; -use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; +use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, info, warn}; -use self::admin::create_admin_room; - pub struct Database { _db: Arc, pub globals: globals::Globals, @@ -151,8 +145,8 @@ impl Database { eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); } - let (admin_sender, admin_receiver) = mpsc::unbounded(); - let (sending_sender, sending_receiver) = mpsc::unbounded(); + let (admin_sender, admin_receiver) = mpsc::unbounded_channel(); + let (sending_sender, sending_receiver) = mpsc::unbounded_channel(); let db = Arc::new(TokioRwLock::from(Self { _db: builder.clone(), @@ -764,14 +758,9 @@ impl Database { } #[cfg(feature = "conduit_bin")] - pub async fn start_on_shutdown_tasks(db: Arc>, shutdown: Shutdown) { - tokio::spawn(async move { - shutdown.await; - - info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - - db.read().await.globals.rotate.fire(); - }); + pub async fn on_shutdown(db: Arc>) { + info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + db.read().await.globals.rotate.fire(); } pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { @@ -948,14 +937,23 @@ impl Deref for DatabaseGuard { } } -#[rocket::async_trait] -impl<'r> FromRequest<'r> for DatabaseGuard { - type Error = (); +#[cfg(feature = "conduit_bin")] +#[axum::async_trait] +impl axum::extract::FromRequest for DatabaseGuard +where + B: Send, +{ + type Rejection = axum::extract::rejection::ExtensionRejection; - async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome { - let db = try_outcome!(req.guard::<&State>>>().await); + async fn from_request( + req: &mut axum::extract::RequestParts, + ) -> Result { + use axum::extract::Extension; - Ok(DatabaseGuard(Arc::clone(db).read_owned().await)).or_forward(()) + let Extension(db): Extension>> = + Extension::from_request(req).await?; + + Ok(DatabaseGuard(db.read_owned().await)) } } diff --git a/src/database/admin.rs b/src/database/admin.rs index 9bbfd4e..e4b7e0f 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,34 +1,41 @@ -use std::{collections::BTreeMap, convert::TryFrom, convert::TryInto, sync::Arc, time::Instant}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, + sync::Arc, + time::Instant, +}; use crate::{ error::{Error, Result}, pdu::PduBuilder, - server_server, Database, PduEvent, + server_server, + utils::HtmlEscape, + Database, PduEvent, }; use clap::Parser; use regex::Regex; -use rocket::{ - futures::{channel::mpsc, stream::StreamExt}, - http::RawStr, -}; use ruma::{ - events::room::{ - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - name::RoomNameEventContent, - power_levels::RoomPowerLevelsEventContent, - topic::RoomTopicEventContent, + events::{ + room::{ + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + message::RoomMessageEventContent, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + topic::RoomTopicEventContent, + }, + EventType, }, - events::{room::message::RoomMessageEventContent, EventType}, identifiers::{EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId}, }; use serde_json::value::to_raw_value; -use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; +use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; +#[derive(Debug)] pub enum AdminRoomEvent { ProcessMessage(String), SendMessage(RoomMessageEventContent), @@ -91,7 +98,7 @@ impl Admin { loop { tokio::select! { - Some(event) = receiver.next() => { + Some(event) = receiver.recv() => { let guard = db.read().await; let mutex_state = Arc::clone( guard.globals @@ -123,13 +130,13 @@ impl Admin { pub fn process_message(&self, room_message: String) { self.sender - .unbounded_send(AdminRoomEvent::ProcessMessage(room_message)) + .send(AdminRoomEvent::ProcessMessage(room_message)) .unwrap(); } pub fn send_message(&self, message_content: RoomMessageEventContent) { self.sender - .unbounded_send(AdminRoomEvent::SendMessage(message_content)) + .send(AdminRoomEvent::SendMessage(message_content)) .unwrap(); } } @@ -405,7 +412,7 @@ fn process_admin_command( } else { "PDU was accepted" }, - RawStr::new(&json_text).html_escape() + HtmlEscape(&json_text) ), ) } diff --git a/src/database/sending.rs b/src/database/sending.rs index 4a03285..2d64be1 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -9,11 +9,8 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; +use futures_util::{stream::FuturesUnordered, StreamExt}; use ring::digest; -use rocket::futures::{ - channel::mpsc, - stream::{FuturesUnordered, StreamExt}, -}; use ruma::{ api::{ appservice, @@ -33,7 +30,7 @@ use ruma::{ }; use tokio::{ select, - sync::{RwLock, Semaphore}, + sync::{mpsc, RwLock, Semaphore}, }; use tracing::{error, warn}; @@ -170,7 +167,7 @@ impl Sending { Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) }) .take(30) - .collect::<>(); + .collect(); // TODO: find edus @@ -207,7 +204,7 @@ impl Sending { } }; }, - Some((key, value)) = receiver.next() => { + Some((key, value)) = receiver.recv() => { if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key, value) { let guard = db.read().await; @@ -417,7 +414,7 @@ impl Sending { key.push(0xff); key.extend_from_slice(pdu_id); self.servernameevent_data.insert(&key, &[])?; - self.sender.unbounded_send((key, vec![])).unwrap(); + self.sender.send((key, vec![])).unwrap(); Ok(()) } @@ -433,7 +430,7 @@ impl Sending { key.push(0xff); key.extend_from_slice(pdu_id); - self.sender.unbounded_send((key.clone(), vec![])).unwrap(); + self.sender.send((key.clone(), vec![])).unwrap(); (key, Vec::new()) }); @@ -454,7 +451,7 @@ impl Sending { key.push(0xff); key.extend_from_slice(&id.to_be_bytes()); self.servernameevent_data.insert(&key, &serialized)?; - self.sender.unbounded_send((key, serialized)).unwrap(); + self.sender.send((key, serialized)).unwrap(); Ok(()) } @@ -466,7 +463,7 @@ impl Sending { key.push(0xff); key.extend_from_slice(pdu_id); self.servernameevent_data.insert(&key, &[])?; - self.sender.unbounded_send((key, vec![])).unwrap(); + self.sender.send((key, vec![])).unwrap(); Ok(()) } diff --git a/src/error.rs b/src/error.rs index 5ffe48c..817ef50 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,27 +1,20 @@ +use std::convert::Infallible; + +use http::StatusCode; use ruma::{ api::client::{ error::{Error as RumaError, ErrorKind}, - r0::uiaa::UiaaInfo, + r0::uiaa::{UiaaInfo, UiaaResponse}, }, ServerName, }; use thiserror::Error; -use tracing::warn; +use tracing::{error, warn}; #[cfg(feature = "persy")] use persy::PersyError; -#[cfg(feature = "conduit_bin")] -use { - crate::RumaResponse, - http::StatusCode, - rocket::{ - response::{self, Responder}, - Request, - }, - ruma::api::client::r0::uiaa::UiaaResponse, - tracing::error, -}; +use crate::RumaResponse; pub type Result = std::result::Result; @@ -81,6 +74,9 @@ pub enum Error { BadRequest(ErrorKind, &'static str), #[error("{0}")] Conflict(&'static str), // This is only needed for when a room alias already exists + #[cfg(feature = "conduit_bin")] + #[error("{0}")] + ExtensionError(#[from] axum::extract::rejection::ExtensionRejection), } impl Error { @@ -139,16 +135,6 @@ impl Error { } } -#[cfg(feature = "conduit_bin")] -impl<'r, 'o> Responder<'r, 'o> for Error -where - 'o: 'r, -{ - fn respond_to(self, r: &'r Request<'_>) -> response::Result<'o> { - self.to_response().respond_to(r) - } -} - #[cfg(feature = "persy")] impl> From> for Error { fn from(err: persy::PE) -> Self { @@ -157,3 +143,16 @@ impl> From> for Error { } } } + +impl From for Error { + fn from(i: Infallible) -> Self { + match i {} + } +} + +#[cfg(feature = "conduit_bin")] +impl axum::response::IntoResponse for Error { + fn into_response(self) -> axum::response::Response { + self.to_response().into_response() + } +} diff --git a/src/lib.rs b/src/lib.rs index 030dfc3..135ab85 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,8 +7,6 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::ops::Deref; - mod config; mod database; mod error; @@ -24,16 +22,4 @@ pub use config::Config; pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; -pub use rocket::Config as RocketConfig; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; - -pub struct State<'r, T: Send + Sync + 'static>(pub &'r T); - -impl<'r, T: Send + Sync + 'static> Deref for State<'r, T> { - type Target = T; - - #[inline(always)] - fn deref(&self) -> &T { - self.0 - } -} diff --git a/src/main.rs b/src/main.rs index ea09dd5..3ab1294 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,24 +7,37 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::sync::Arc; +use std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}; -use maplit::hashset; -use opentelemetry::trace::{FutureExt, Tracer}; -use rocket::{ - catch, catchers, - figment::{ - providers::{Env, Format, Toml}, - Figment, - }, - routes, Request, +use axum::{ + extract::{FromRequest, MatchedPath}, + handler::Handler, + routing::{get, on, MethodFilter}, + Router, +}; +use figment::{ + providers::{Env, Format, Toml}, + Figment, +}; +use http::{ + header::{self, HeaderName}, + Method, +}; +use opentelemetry::trace::{FutureExt, Tracer}; +use ruma::{ + api::{IncomingRequest, Metadata}, + Outgoing, +}; +use tokio::{signal, sync::RwLock}; +use tower::ServiceBuilder; +use tower_http::{ + cors::{self, CorsLayer}, + trace::TraceLayer, + ServiceBuilderExt as _, }; -use ruma::api::client::error::ErrorKind; -use tokio::sync::RwLock; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate -pub use rocket::State; #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use tikv_jemallocator::Jemalloc; @@ -33,160 +46,10 @@ use tikv_jemallocator::Jemalloc; #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; -fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket { - rocket::custom(config) - .manage(data) - .mount( - "/", - routes![ - client_server::get_supported_versions_route, - client_server::get_register_available_route, - client_server::register_route, - client_server::get_login_types_route, - client_server::login_route, - client_server::whoami_route, - client_server::logout_route, - client_server::logout_all_route, - client_server::change_password_route, - client_server::deactivate_route, - client_server::third_party_route, - client_server::get_capabilities_route, - client_server::get_pushrules_all_route, - client_server::set_pushrule_route, - client_server::get_pushrule_route, - client_server::set_pushrule_enabled_route, - client_server::get_pushrule_enabled_route, - client_server::get_pushrule_actions_route, - client_server::set_pushrule_actions_route, - client_server::delete_pushrule_route, - client_server::get_room_event_route, - client_server::get_room_aliases_route, - client_server::get_filter_route, - client_server::create_filter_route, - client_server::set_global_account_data_route, - client_server::set_room_account_data_route, - client_server::get_global_account_data_route, - client_server::get_room_account_data_route, - client_server::set_displayname_route, - client_server::get_displayname_route, - client_server::set_avatar_url_route, - client_server::get_avatar_url_route, - client_server::get_profile_route, - client_server::set_presence_route, - client_server::get_presence_route, - client_server::upload_keys_route, - client_server::get_keys_route, - client_server::claim_keys_route, - client_server::create_backup_route, - client_server::update_backup_route, - client_server::delete_backup_route, - client_server::get_latest_backup_route, - client_server::get_backup_route, - client_server::add_backup_key_sessions_route, - client_server::add_backup_keys_route, - client_server::delete_backup_key_session_route, - client_server::delete_backup_key_sessions_route, - client_server::delete_backup_keys_route, - client_server::get_backup_key_session_route, - client_server::get_backup_key_sessions_route, - client_server::get_backup_keys_route, - client_server::set_read_marker_route, - client_server::create_receipt_route, - client_server::create_typing_event_route, - client_server::create_room_route, - client_server::redact_event_route, - client_server::report_event_route, - client_server::create_alias_route, - client_server::delete_alias_route, - client_server::get_alias_route, - client_server::join_room_by_id_route, - client_server::join_room_by_id_or_alias_route, - client_server::joined_members_route, - client_server::leave_room_route, - client_server::forget_room_route, - client_server::joined_rooms_route, - client_server::kick_user_route, - client_server::ban_user_route, - client_server::unban_user_route, - client_server::invite_user_route, - client_server::set_room_visibility_route, - client_server::get_room_visibility_route, - client_server::get_public_rooms_route, - client_server::get_public_rooms_filtered_route, - client_server::search_users_route, - client_server::get_member_events_route, - client_server::get_protocols_route, - client_server::send_message_event_route, - client_server::send_state_event_for_key_route, - client_server::send_state_event_for_empty_key_route, - client_server::get_state_events_route, - client_server::get_state_events_for_key_route, - client_server::get_state_events_for_empty_key_route, - client_server::sync_events_route, - client_server::get_context_route, - client_server::get_message_events_route, - client_server::search_events_route, - client_server::turn_server_route, - client_server::send_event_to_device_route, - client_server::get_media_config_route, - client_server::create_content_route, - client_server::get_content_as_filename_route, - client_server::get_content_route, - client_server::get_content_thumbnail_route, - client_server::get_devices_route, - client_server::get_device_route, - client_server::update_device_route, - client_server::delete_device_route, - client_server::delete_devices_route, - client_server::get_tags_route, - client_server::update_tag_route, - client_server::delete_tag_route, - client_server::options_route, - client_server::upload_signing_keys_route, - client_server::upload_signatures_route, - client_server::get_key_changes_route, - client_server::get_pushers_route, - client_server::set_pushers_route, - // client_server::third_party_route, - client_server::upgrade_room_route, - server_server::get_server_version_route, - server_server::get_server_keys_route, - server_server::get_server_keys_deprecated_route, - server_server::get_public_rooms_route, - server_server::get_public_rooms_filtered_route, - server_server::send_transaction_message_route, - server_server::get_event_route, - server_server::get_missing_events_route, - server_server::get_event_authorization_route, - server_server::get_room_state_route, - server_server::get_room_state_ids_route, - server_server::create_join_event_template_route, - server_server::create_join_event_v1_route, - server_server::create_join_event_v2_route, - server_server::create_invite_route, - server_server::get_devices_route, - server_server::get_room_information_route, - server_server::get_profile_information_route, - server_server::get_keys_route, - server_server::claim_keys_route, - ], - ) - .register( - "/", - catchers![ - not_found_catcher, - forbidden_catcher, - unknown_token_catcher, - missing_token_catcher, - bad_json_catcher - ], - ) -} - -#[rocket::main] +#[tokio::main] async fn main() { let raw_config = - Figment::from(default_config()) + Figment::new() .merge( Toml::file(Env::var("CONDUIT_CONFIG").expect( "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", @@ -217,14 +80,7 @@ async fn main() { } }; - let rocket = setup_rocket(raw_config, Arc::clone(&db)) - .ignite() - .await - .unwrap(); - - Database::start_on_shutdown_tasks(db, rocket.shutdown()).await; - - rocket.launch().await.unwrap(); + run_server(&config, db).await.unwrap(); }; if config.allow_jaeger { @@ -264,55 +120,282 @@ async fn main() { } } -#[catch(404)] -fn not_found_catcher(_: &Request<'_>) -> String { - "404 Not Found".to_owned() +async fn run_server(config: &Config, db: Arc>) -> hyper::Result<()> { + let listen_addr = SocketAddr::from((config.address, config.port)); + + let x_requested_with = HeaderName::from_static("x-requested-with"); + + let middlewares = ServiceBuilder::new() + .sensitive_headers([header::AUTHORIZATION]) + .layer( + TraceLayer::new_for_http().make_span_with(|request: &http::Request<_>| { + let path = if let Some(path) = request.extensions().get::() { + path.as_str() + } else { + request.uri().path() + }; + + tracing::info_span!("http_request", %path) + }), + ) + .compression() + .layer( + CorsLayer::new() + .allow_origin(cors::any()) + .allow_methods([ + Method::GET, + Method::POST, + Method::PUT, + Method::DELETE, + Method::OPTIONS, + ]) + .allow_headers([ + header::ORIGIN, + x_requested_with, + header::CONTENT_TYPE, + header::ACCEPT, + header::AUTHORIZATION, + ]) + .max_age(Duration::from_secs(86400)), + ) + .add_extension(db.clone()); + + axum::Server::bind(&listen_addr) + .serve(routes().layer(middlewares).into_make_service()) + .with_graceful_shutdown(shutdown_signal()) + .await?; + + // After serve exits and before exiting, shutdown the DB + Database::on_shutdown(db).await; + + Ok(()) } -#[catch(580)] -fn forbidden_catcher() -> Result<()> { - Err(Error::BadRequest(ErrorKind::Forbidden, "Forbidden.")) +fn routes() -> Router { + Router::new() + .ruma_route(client_server::get_supported_versions_route) + .ruma_route(client_server::get_register_available_route) + .ruma_route(client_server::register_route) + .ruma_route(client_server::get_login_types_route) + .ruma_route(client_server::login_route) + .ruma_route(client_server::whoami_route) + .ruma_route(client_server::logout_route) + .ruma_route(client_server::logout_all_route) + .ruma_route(client_server::change_password_route) + .ruma_route(client_server::deactivate_route) + .ruma_route(client_server::third_party_route) + .ruma_route(client_server::get_capabilities_route) + .ruma_route(client_server::get_pushrules_all_route) + .ruma_route(client_server::set_pushrule_route) + .ruma_route(client_server::get_pushrule_route) + .ruma_route(client_server::set_pushrule_enabled_route) + .ruma_route(client_server::get_pushrule_enabled_route) + .ruma_route(client_server::get_pushrule_actions_route) + .ruma_route(client_server::set_pushrule_actions_route) + .ruma_route(client_server::delete_pushrule_route) + .ruma_route(client_server::get_room_event_route) + .ruma_route(client_server::get_room_aliases_route) + .ruma_route(client_server::get_filter_route) + .ruma_route(client_server::create_filter_route) + .ruma_route(client_server::set_global_account_data_route) + .ruma_route(client_server::set_room_account_data_route) + .ruma_route(client_server::get_global_account_data_route) + .ruma_route(client_server::get_room_account_data_route) + .ruma_route(client_server::set_displayname_route) + .ruma_route(client_server::get_displayname_route) + .ruma_route(client_server::set_avatar_url_route) + .ruma_route(client_server::get_avatar_url_route) + .ruma_route(client_server::get_profile_route) + .ruma_route(client_server::set_presence_route) + .ruma_route(client_server::get_presence_route) + .ruma_route(client_server::upload_keys_route) + .ruma_route(client_server::get_keys_route) + .ruma_route(client_server::claim_keys_route) + .ruma_route(client_server::create_backup_route) + .ruma_route(client_server::update_backup_route) + .ruma_route(client_server::delete_backup_route) + .ruma_route(client_server::get_latest_backup_route) + .ruma_route(client_server::get_backup_route) + .ruma_route(client_server::add_backup_key_sessions_route) + .ruma_route(client_server::add_backup_keys_route) + .ruma_route(client_server::delete_backup_key_session_route) + .ruma_route(client_server::delete_backup_key_sessions_route) + .ruma_route(client_server::delete_backup_keys_route) + .ruma_route(client_server::get_backup_key_session_route) + .ruma_route(client_server::get_backup_key_sessions_route) + .ruma_route(client_server::get_backup_keys_route) + .ruma_route(client_server::set_read_marker_route) + .ruma_route(client_server::create_receipt_route) + .ruma_route(client_server::create_typing_event_route) + .ruma_route(client_server::create_room_route) + .ruma_route(client_server::redact_event_route) + .ruma_route(client_server::report_event_route) + .ruma_route(client_server::create_alias_route) + .ruma_route(client_server::delete_alias_route) + .ruma_route(client_server::get_alias_route) + .ruma_route(client_server::join_room_by_id_route) + .ruma_route(client_server::join_room_by_id_or_alias_route) + .ruma_route(client_server::joined_members_route) + .ruma_route(client_server::leave_room_route) + .ruma_route(client_server::forget_room_route) + .ruma_route(client_server::joined_rooms_route) + .ruma_route(client_server::kick_user_route) + .ruma_route(client_server::ban_user_route) + .ruma_route(client_server::unban_user_route) + .ruma_route(client_server::invite_user_route) + .ruma_route(client_server::set_room_visibility_route) + .ruma_route(client_server::get_room_visibility_route) + .ruma_route(client_server::get_public_rooms_route) + .ruma_route(client_server::get_public_rooms_filtered_route) + .ruma_route(client_server::search_users_route) + .ruma_route(client_server::get_member_events_route) + .ruma_route(client_server::get_protocols_route) + .ruma_route(client_server::send_message_event_route) + .ruma_route(client_server::send_state_event_for_key_route) + .ruma_route(client_server::send_state_event_for_empty_key_route) + .ruma_route(client_server::get_state_events_route) + .ruma_route(client_server::get_state_events_for_key_route) + .ruma_route(client_server::get_state_events_for_empty_key_route) + .route( + "/_matrix/client/r0/sync", + get(client_server::sync_events_route), + ) + .ruma_route(client_server::get_context_route) + .ruma_route(client_server::get_message_events_route) + .ruma_route(client_server::search_events_route) + .ruma_route(client_server::turn_server_route) + .ruma_route(client_server::send_event_to_device_route) + .ruma_route(client_server::get_media_config_route) + .ruma_route(client_server::create_content_route) + .ruma_route(client_server::get_content_route) + .ruma_route(client_server::get_content_as_filename_route) + .ruma_route(client_server::get_content_thumbnail_route) + .ruma_route(client_server::get_devices_route) + .ruma_route(client_server::get_device_route) + .ruma_route(client_server::update_device_route) + .ruma_route(client_server::delete_device_route) + .ruma_route(client_server::delete_devices_route) + .ruma_route(client_server::get_tags_route) + .ruma_route(client_server::update_tag_route) + .ruma_route(client_server::delete_tag_route) + .ruma_route(client_server::upload_signing_keys_route) + .ruma_route(client_server::upload_signatures_route) + .ruma_route(client_server::get_key_changes_route) + .ruma_route(client_server::get_pushers_route) + .ruma_route(client_server::set_pushers_route) + // .ruma_route(client_server::third_party_route) + .ruma_route(client_server::upgrade_room_route) + .ruma_route(server_server::get_server_version_route) + .route( + "/_matrix/key/v2/server", + get(server_server::get_server_keys_route), + ) + .route( + "/_matrix/key/v2/server/:key_id", + get(server_server::get_server_keys_deprecated_route), + ) + .ruma_route(server_server::get_public_rooms_route) + .ruma_route(server_server::get_public_rooms_filtered_route) + .ruma_route(server_server::send_transaction_message_route) + .ruma_route(server_server::get_event_route) + .ruma_route(server_server::get_missing_events_route) + .ruma_route(server_server::get_event_authorization_route) + .ruma_route(server_server::get_room_state_route) + .ruma_route(server_server::get_room_state_ids_route) + .ruma_route(server_server::create_join_event_template_route) + .ruma_route(server_server::create_join_event_v1_route) + .ruma_route(server_server::create_join_event_v2_route) + .ruma_route(server_server::create_invite_route) + .ruma_route(server_server::get_devices_route) + .ruma_route(server_server::get_room_information_route) + .ruma_route(server_server::get_profile_information_route) + .ruma_route(server_server::get_keys_route) + .ruma_route(server_server::claim_keys_route) } -#[catch(581)] -fn unknown_token_catcher() -> Result<()> { - Err(Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown token.", - )) -} +async fn shutdown_signal() { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + }; -#[catch(582)] -fn missing_token_catcher() -> Result<()> { - Err(Error::BadRequest(ErrorKind::MissingToken, "Missing token.")) -} + #[cfg(unix)] + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; -#[catch(583)] -fn bad_json_catcher() -> Result<()> { - Err(Error::BadRequest(ErrorKind::BadJson, "Bad json.")) -} + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); -fn default_config() -> rocket::Config { - use rocket::config::{LogLevel, Shutdown, Sig}; - - rocket::Config { - // Disable rocket's logging to get only tracing-subscriber's log output - log_level: LogLevel::Off, - shutdown: Shutdown { - // Once shutdown is triggered, this is the amount of seconds before rocket - // will forcefully start shutting down connections, this gives enough time to /sync - // requests and the like (which havent gotten the memo, somehow) to still complete gracefully. - grace: 35, - - // After the grace period, rocket starts shutting down connections, and waits at least this - // many seconds before forcefully shutting all of them down. - mercy: 10, - - #[cfg(unix)] - signals: hashset![Sig::Term, Sig::Int], - - ..Shutdown::default() - }, - ..rocket::Config::release_default() + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {}, } } + +trait RouterExt { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static; +} + +impl RouterExt for Router { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static, + { + let meta = H::METADATA; + let method_filter = match meta.method { + Method::DELETE => MethodFilter::DELETE, + Method::GET => MethodFilter::GET, + Method::HEAD => MethodFilter::HEAD, + Method::OPTIONS => MethodFilter::OPTIONS, + Method::PATCH => MethodFilter::PATCH, + Method::POST => MethodFilter::POST, + Method::PUT => MethodFilter::PUT, + Method::TRACE => MethodFilter::TRACE, + _ => panic!(""), + }; + + self.route(meta.path, on(method_filter, handler)) + } +} + +pub trait RumaHandler: Handler { + const METADATA: Metadata; +} + +macro_rules! impl_ruma_handler { + ( $($ty:ident),* $(,)? ) => { + #[axum::async_trait] + #[allow(non_snake_case)] + impl RumaHandler<($($ty,)* Ruma,)> for F + where + Req: Outgoing, + Req::Incoming: IncomingRequest + Send, + F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, + Fut: Future::OutgoingResponse + >> + Send, + $( $ty: FromRequest + Send, )* + { + const METADATA: Metadata = Req::Incoming::METADATA; + } + }; +} + +impl_ruma_handler!(); +impl_ruma_handler!(T1); +impl_ruma_handler!(T1, T2); +impl_ruma_handler!(T1, T2, T3); +impl_ruma_handler!(T1, T2, T3, T4); +impl_ruma_handler!(T1, T2, T3, T4, T5); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 1bd921d..12be79a 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ -use crate::{database::DatabaseGuard, Error}; +use crate::Error; use ruma::{ - api::{client::r0::uiaa::UiaaResponse, OutgoingResponse}, + api::client::r0::uiaa::UiaaResponse, identifiers::{DeviceId, UserId}, signatures::CanonicalJsonValue, Outgoing, ServerName, @@ -8,24 +8,9 @@ use ruma::{ use std::ops::Deref; #[cfg(feature = "conduit_bin")] -use { - crate::server_server, - rocket::{ - data::{self, ByteUnit, Data, FromData}, - http::Status, - outcome::Outcome::*, - response::{self, Responder}, - tokio::io::AsyncReadExt, - Request, - }, - ruma::api::{AuthScheme, IncomingRequest}, - std::collections::BTreeMap, - std::io::Cursor, - tracing::{debug, warn}, -}; +mod axum; -/// This struct converts rocket requests into ruma structs by converting them into http requests -/// first. +/// Extractor for Ruma request structs pub struct Ruma { pub body: T::Incoming, pub sender_user: Option>, @@ -36,300 +21,6 @@ pub struct Ruma { pub from_appservice: bool, } -#[cfg(feature = "conduit_bin")] -#[rocket::async_trait] -impl<'a, T: Outgoing> FromData<'a> for Ruma -where - T::Incoming: IncomingRequest, -{ - type Error = (); - - #[tracing::instrument(skip(request, data))] - async fn from_data( - request: &'a Request<'_>, - data: Data<'a>, - ) -> data::Outcome<'a, Self, Self::Error> { - let metadata = T::Incoming::METADATA; - let db = request - .guard::() - .await - .expect("database was loaded"); - - // Get token from header or query value - let token = request - .headers() - .get_one("Authorization") - .and_then(|s| s.get(7..)) // Split off "Bearer " - .or_else(|| request.query_value("access_token").and_then(|r| r.ok())); - - let limit = db.globals.max_request_size(); - let mut handle = data.open(ByteUnit::Byte(limit.into())); - let mut body = Vec::new(); - if handle.read_to_end(&mut body).await.is_err() { - // Client disconnected - // Missing Token - return Failure((Status::new(582), ())); - } - - let mut json_body = serde_json::from_slice::(&body).ok(); - - let (sender_user, sender_device, sender_servername, from_appservice) = if let Some(( - _id, - registration, - )) = db - .appservice - .all() - .unwrap() - .iter() - .find(|(_id, registration)| { - registration - .get("as_token") - .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| token == Some(as_token)) - }) { - match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - let user_id = request.query_value::("user_id").map_or_else( - || { - UserId::parse_with_server_name( - registration - .get("sender_localpart") - .unwrap() - .as_str() - .unwrap(), - db.globals.server_name(), - ) - .unwrap() - }, - |string| { - UserId::parse(string.expect("parsing to string always works")).unwrap() - }, - ); - - if !db.users.exists(&user_id).unwrap() { - // Forbidden - return Failure((Status::new(580), ())); - } - - // TODO: Check if appservice is allowed to be that user - (Some(user_id), None, None, true) - } - AuthScheme::ServerSignatures => (None, None, None, true), - AuthScheme::None => (None, None, None, true), - } - } else { - match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - if let Some(token) = token { - match db.users.find_from_token(token).unwrap() { - // Unknown Token - None => return Failure((Status::new(581), ())), - Some((user_id, device_id)) => ( - Some(user_id), - Some(Box::::from(device_id)), - None, - false, - ), - } - } else { - // Missing Token - return Failure((Status::new(582), ())); - } - } - AuthScheme::ServerSignatures => { - // Get origin from header - let x_matrix = match request - .headers() - .get_one("Authorization") - .and_then(|s| s.get(9..)) // Split off "X-Matrix " and parse the rest - .map(|s| { - s.split_terminator(',') - .map(|field| { - let mut splits = field.splitn(2, '='); - (splits.next(), splits.next().map(|s| s.trim_matches('"'))) - }) - .collect::>() - }) { - Some(t) => t, - None => { - warn!("No Authorization header"); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let origin_str = match x_matrix.get(&Some("origin")) { - Some(Some(o)) => *o, - _ => { - warn!("Invalid X-Matrix header origin field: {:?}", x_matrix); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let origin = match ServerName::parse(origin_str) { - Ok(s) => s, - _ => { - warn!( - "Invalid server name in X-Matrix header origin field: {:?}", - x_matrix - ); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let key = match x_matrix.get(&Some("key")) { - Some(Some(k)) => *k, - _ => { - warn!("Invalid X-Matrix header key field: {:?}", x_matrix); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let sig = match x_matrix.get(&Some("sig")) { - Some(Some(s)) => *s, - _ => { - warn!("Invalid X-Matrix header sig field: {:?}", x_matrix); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let mut request_map = BTreeMap::::new(); - - if let Some(json_body) = &json_body { - request_map.insert("content".to_owned(), json_body.clone()); - }; - - request_map.insert( - "method".to_owned(), - CanonicalJsonValue::String(request.method().to_string()), - ); - request_map.insert( - "uri".to_owned(), - CanonicalJsonValue::String(request.uri().to_string()), - ); - request_map.insert( - "origin".to_owned(), - CanonicalJsonValue::String(origin.as_str().to_owned()), - ); - request_map.insert( - "destination".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - - let mut origin_signatures = BTreeMap::new(); - origin_signatures - .insert(key.to_owned(), CanonicalJsonValue::String(sig.to_owned())); - - let mut signatures = BTreeMap::new(); - signatures.insert( - origin.as_str().to_owned(), - CanonicalJsonValue::Object(origin_signatures), - ); - - request_map.insert( - "signatures".to_owned(), - CanonicalJsonValue::Object(signatures), - ); - - let keys = - match server_server::fetch_signing_keys(&db, &origin, vec![key.to_owned()]) - .await - { - Ok(b) => b, - Err(e) => { - warn!("Failed to fetch signing keys: {}", e); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let mut pub_key_map = BTreeMap::new(); - pub_key_map.insert(origin.as_str().to_owned(), keys); - - match ruma::signatures::verify_json(&pub_key_map, &request_map) { - Ok(()) => (None, None, Some(origin), false), - Err(e) => { - warn!( - "Failed to verify json request from {}: {}\n{:?}", - origin, e, request_map - ); - - if request.uri().to_string().contains('@') { - warn!("Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: use nocanon)"); - } - - // Forbidden - return Failure((Status::new(580), ())); - } - } - } - AuthScheme::None => (None, None, None, false), - } - }; - - let mut http_request = http::Request::builder() - .uri(request.uri().to_string()) - .method(&*request.method().to_string()); - for header in request.headers().iter() { - http_request = http_request.header(header.name.as_str(), &*header.value); - } - - if let Some(json_body) = json_body.as_mut().and_then(|val| val.as_object_mut()) { - let user_id = sender_user.clone().unwrap_or_else(|| { - UserId::parse_with_server_name("", db.globals.server_name()) - .expect("we know this is valid") - }); - - if let Some(CanonicalJsonValue::Object(initial_request)) = json_body - .get("auth") - .and_then(|auth| auth.as_object()) - .and_then(|auth| auth.get("session")) - .and_then(|session| session.as_str()) - .and_then(|session| { - db.uiaa.get_uiaa_request( - &user_id, - &sender_device.clone().unwrap_or_else(|| "".into()), - session, - ) - }) - { - for (key, value) in initial_request { - json_body.entry(key).or_insert(value); - } - } - body = serde_json::to_vec(json_body).expect("value to bytes can't fail"); - } - - let http_request = http_request.body(&*body).unwrap(); - debug!("{:?}", http_request); - match ::try_from_http_request(http_request) { - Ok(t) => Success(Ruma { - body: t, - sender_user, - sender_device, - sender_servername, - from_appservice, - json_body, - }), - Err(e) => { - warn!("{:?}", e); - // Bad Json - Failure((Status::new(583), ())) - } - } - } -} - impl Deref for Ruma { type Target = T::Incoming; @@ -338,41 +29,9 @@ impl Deref for Ruma { } } -/// This struct converts ruma responses into rocket http responses. +/// This struct converts ruma structs to http responses. pub type ConduitResult = Result, Error>; -pub fn response(response: RumaResponse) -> response::Result<'static> { - let http_response = response - .0 - .try_into_http_response::>() - .map_err(|_| Status::InternalServerError)?; - - let mut response = rocket::response::Response::build(); - - let status = http_response.status(); - response.status(Status::new(status.as_u16())); - - for header in http_response.headers() { - response.raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); - } - - let http_body = http_response.into_body(); - - response.sized_body(http_body.len(), Cursor::new(http_body)); - - response.raw_header("Access-Control-Allow-Origin", "*"); - response.raw_header( - "Access-Control-Allow-Methods", - "GET, POST, PUT, DELETE, OPTIONS", - ); - response.raw_header( - "Access-Control-Allow-Headers", - "Origin, X-Requested-With, Content-Type, Accept, Authorization", - ); - response.raw_header("Access-Control-Max-Age", "86400"); - response.ok() -} - #[derive(Clone)] pub struct RumaResponse(pub T); @@ -387,14 +46,3 @@ impl From for RumaResponse { t.to_response() } } - -#[cfg(feature = "conduit_bin")] -impl<'r, 'o, T> Responder<'r, 'o> for RumaResponse -where - 'o: 'r, - T: OutgoingResponse, -{ - fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { - response(self) - } -} diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs new file mode 100644 index 0000000..d2cf3f1 --- /dev/null +++ b/src/ruma_wrapper/axum.rs @@ -0,0 +1,338 @@ +use std::{collections::BTreeMap, iter::FromIterator, str}; + +use axum::{ + async_trait, + body::{Full, HttpBody}, + extract::{FromRequest, RequestParts, TypedHeader}, + headers::{ + authorization::{Bearer, Credentials}, + Authorization, + }, + response::{IntoResponse, Response}, + BoxError, +}; +use bytes::{BufMut, Bytes, BytesMut}; +use http::StatusCode; +use ruma::{ + api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, + signatures::CanonicalJsonValue, + DeviceId, Outgoing, ServerName, UserId, +}; +use tracing::{debug, warn}; + +use super::{Ruma, RumaResponse}; +use crate::{database::DatabaseGuard, server_server, Error, Result}; + +#[async_trait] +impl FromRequest for Ruma +where + T: Outgoing, + T::Incoming: IncomingRequest, + B: HttpBody + Send, + B::Data: Send, + B::Error: Into, +{ + type Rejection = Error; + + async fn from_request(req: &mut RequestParts) -> Result { + let metadata = T::Incoming::METADATA; + let db = DatabaseGuard::from_request(req).await?; + let auth_header = Option::>>::from_request(req).await?; + + // FIXME: Do this more efficiently + let query: BTreeMap = + ruma::serde::urlencoded::from_str(req.uri().query().unwrap_or_default()) + .expect("Query to string map deserialization should be fine"); + + let token = match &auth_header { + Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), + None => query.get("access_token").map(|tok| tok.as_str()), + }; + + let mut body = Bytes::from_request(req) + .await + .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; + + let mut json_body = serde_json::from_slice::(&body).ok(); + + let appservices = db.appservice.all().unwrap(); + let appservice_registration = appservices.iter().find(|(_id, registration)| { + registration + .get("as_token") + .and_then(|as_token| as_token.as_str()) + .map_or(false, |as_token| token == Some(as_token)) + }); + + let (sender_user, sender_device, sender_servername, from_appservice) = + if let Some((_id, registration)) = appservice_registration { + match metadata.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + let user_id = query.get("user_id").map_or_else( + || { + UserId::parse_with_server_name( + registration + .get("sender_localpart") + .unwrap() + .as_str() + .unwrap(), + db.globals.server_name(), + ) + .unwrap() + }, + |s| UserId::parse(s.as_str()).unwrap(), + ); + + if !db.users.exists(&user_id).unwrap() { + return Err(forbidden()); + } + + // TODO: Check if appservice is allowed to be that user + (Some(user_id), None, None, true) + } + AuthScheme::ServerSignatures => (None, None, None, true), + AuthScheme::None => (None, None, None, true), + } + } else { + match metadata.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + let token = match token { + Some(token) => token, + _ => return Err(missing_token()), + }; + + match db.users.find_from_token(token).unwrap() { + None => return Err(unknown_token()), + Some((user_id, device_id)) => ( + Some(user_id), + Some(Box::::from(device_id)), + None, + false, + ), + } + } + AuthScheme::ServerSignatures => { + let TypedHeader(Authorization(x_matrix)) = + TypedHeader::>::from_request(req) + .await + .map_err(|e| { + warn!("Missing or invalid Authorization header: {}", e); + forbidden() + })?; + + let origin_signatures = BTreeMap::from_iter([( + x_matrix.key.clone(), + CanonicalJsonValue::String(x_matrix.sig), + )]); + + let signatures = BTreeMap::from_iter([( + x_matrix.origin.as_str().to_owned(), + CanonicalJsonValue::Object(origin_signatures), + )]); + + let mut request_map = BTreeMap::from_iter([ + ( + "method".to_owned(), + CanonicalJsonValue::String(req.method().to_string()), + ), + ( + "uri".to_owned(), + CanonicalJsonValue::String(req.uri().to_string()), + ), + ( + "origin".to_owned(), + CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()), + ), + ( + "destination".to_owned(), + CanonicalJsonValue::String( + db.globals.server_name().as_str().to_owned(), + ), + ), + ( + "signatures".to_owned(), + CanonicalJsonValue::Object(signatures), + ), + ]); + + if let Some(json_body) = &json_body { + request_map.insert("content".to_owned(), json_body.clone()); + }; + + let keys_result = server_server::fetch_signing_keys( + &db, + &x_matrix.origin, + vec![x_matrix.key.to_owned()], + ) + .await; + + let keys = match keys_result { + Ok(b) => b, + Err(e) => { + warn!("Failed to fetch signing keys: {}", e); + return Err(forbidden()); + } + }; + + let pub_key_map = + BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]); + + match ruma::signatures::verify_json(&pub_key_map, &request_map) { + Ok(()) => (None, None, Some(x_matrix.origin), false), + Err(e) => { + warn!( + "Failed to verify json request from {}: {}\n{:?}", + x_matrix.origin, e, request_map + ); + + if req.uri().to_string().contains('@') { + warn!( + "Request uri contained '@' character. Make sure your \ + reverse proxy gives Conduit the raw uri (apache: use \ + nocanon)" + ); + } + + return Err(forbidden()); + } + } + } + AuthScheme::None => (None, None, None, false), + } + }; + + let mut http_request = http::Request::builder().uri(req.uri()).method(req.method()); + *http_request.headers_mut().unwrap() = + req.headers().expect("Headers already extracted").clone(); + + if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { + let user_id = sender_user.clone().unwrap_or_else(|| { + UserId::parse_with_server_name("", db.globals.server_name()) + .expect("we know this is valid") + }); + + let uiaa_request = json_body + .get("auth") + .and_then(|auth| auth.as_object()) + .and_then(|auth| auth.get("session")) + .and_then(|session| session.as_str()) + .and_then(|session| { + db.uiaa.get_uiaa_request( + &user_id, + &sender_device.clone().unwrap_or_else(|| "".into()), + session, + ) + }); + + if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { + for (key, value) in initial_request { + json_body.entry(key).or_insert(value); + } + } + + let mut buf = BytesMut::new().writer(); + serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail"); + body = buf.into_inner().freeze(); + } + + let http_request = http_request.body(&*body).unwrap(); + + debug!("{:?}", http_request); + + let body = + ::try_from_http_request(http_request).map_err(|e| { + warn!("{:?}", e); + bad_json() + })?; + + Ok(Ruma { + body, + sender_user, + sender_device, + sender_servername, + from_appservice, + json_body, + }) + } +} + +fn forbidden() -> Error { + Error::BadRequest(ErrorKind::Forbidden, "Forbidden.") +} + +fn unknown_token() -> Error { + Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, + "Unknown token.", + ) +} + +fn missing_token() -> Error { + Error::BadRequest(ErrorKind::MissingToken, "Missing token.") +} + +fn bad_json() -> Error { + Error::BadRequest(ErrorKind::BadJson, "Bad json.") +} + +struct XMatrix { + origin: Box, + key: String, // KeyName? + sig: String, +} + +impl Credentials for XMatrix { + const SCHEME: &'static str = "X-Matrix"; + + fn decode(value: &http::HeaderValue) -> Option { + debug_assert!( + value.as_bytes().starts_with(b"X-Matrix "), + "HeaderValue to decode should start with \"X-Matrix ..\", received = {:?}", + value, + ); + + let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..]) + .ok()? + .trim_start(); + + let mut origin = None; + let mut key = None; + let mut sig = None; + + for entry in parameters.split_terminator(',') { + let (name, value) = entry.split_once('=')?; + + // FIXME: Catch multiple fields of the same name + match name { + "origin" => origin = Some(value.try_into().ok()?), + "key" => key = Some(value.to_owned()), + "sig" => sig = Some(value.to_owned()), + _ => warn!( + "Unexpected field `{}` in X-Matrix Authorization header", + name + ), + } + } + + Some(Self { + origin: origin?, + key: key?, + sig: sig?, + }) + } + + fn encode(&self) -> http::HeaderValue { + todo!() + } +} + +impl IntoResponse for RumaResponse +where + T: OutgoingResponse, +{ + fn into_response(self) -> Response { + match self.0.try_into_http_response::() { + Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(), + Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), + } + } +} diff --git a/src/server_server.rs b/src/server_server.rs index a39b3a5..5e6fab0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -4,13 +4,11 @@ use crate::{ pdu::EventHash, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, }; +use axum::{response::IntoResponse, Json}; +use futures_util::{stream::FuturesUnordered, StreamExt}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; use regex::Regex; -use rocket::{ - futures::{prelude::*, stream::FuturesUnordered}, - response::content::Json, -}; use ruma::{ api::{ client::error::{Error as RumaError, ErrorKind}, @@ -72,9 +70,6 @@ use std::{ use tokio::sync::{MutexGuard, Semaphore}; use tracing::{debug, error, info, trace, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; - /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). /// @@ -495,10 +490,10 @@ async fn request_well_known( /// # `GET /_matrix/federation/v1/version` /// /// Get version information on this server. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] -#[tracing::instrument(skip(db))] -pub fn get_server_version_route( +#[tracing::instrument(skip(db, _body))] +pub async fn get_server_version_route( db: DatabaseGuard, + _body: Ruma, ) -> ConduitResult { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -520,12 +515,11 @@ pub fn get_server_version_route( /// - Matrix does not support invalidating public keys, so the key returned by this will be valid /// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response -#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] #[tracing::instrument(skip(db))] -pub fn get_server_keys_route(db: DatabaseGuard) -> Json { +pub async fn get_server_keys_route(db: DatabaseGuard) -> impl IntoResponse { if !db.globals.allow_federation() { // TODO: Use proper types - return Json("Federation is disabled.".to_owned()); + return Json("Federation is disabled.").into_response(); } let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); @@ -563,7 +557,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { ) .unwrap(); - Json(serde_json::to_string(&response).expect("JSON is canonical")) + Json(response).into_response() } /// # `GET /_matrix/key/v2/server/{keyId}` @@ -572,19 +566,14 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { /// /// - Matrix does not support invalidating public keys, so the key returned by this will be valid /// forever. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] #[tracing::instrument(skip(db))] -pub fn get_server_keys_deprecated_route(db: DatabaseGuard) -> Json { - get_server_keys_route(db) +pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoResponse { + get_server_keys_route(db).await } /// # `POST /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/publicRooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, @@ -628,10 +617,6 @@ pub async fn get_public_rooms_filtered_route( /// # `GET /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/publicRooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: DatabaseGuard, @@ -675,10 +660,6 @@ pub async fn get_public_rooms_route( /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v1/send/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_transaction_message_route( db: DatabaseGuard, @@ -2309,12 +2290,8 @@ fn get_auth_chain_inner( /// Retrieves a single event from the server. /// /// - Only works if a user of this server is currently invited or joined the room -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/event/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_event_route( +pub async fn get_event_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2358,12 +2335,8 @@ pub fn get_event_route( /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/get_missing_events/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_missing_events_route( +pub async fn get_missing_events_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2436,12 +2409,8 @@ pub fn get_missing_events_route( /// Retrieves the auth chain for a given event. /// /// - This does not include the event itself -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/event_auth/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_event_authorization_route( +pub async fn get_event_authorization_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2490,12 +2459,8 @@ pub fn get_event_authorization_route( /// # `GET /_matrix/federation/v1/state/{roomId}` /// /// Retrieves the current state of the room. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/state/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_room_state_route( +pub async fn get_room_state_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2555,12 +2520,8 @@ pub fn get_room_state_route( /// # `GET /_matrix/federation/v1/state_ids/{roomId}` /// /// Retrieves the current state of the room. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/state_ids/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_room_state_ids_route( +pub async fn get_room_state_ids_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2609,12 +2570,8 @@ pub fn get_room_state_ids_route( /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` /// /// Creates a join template. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/make_join/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn create_join_event_template_route( +pub async fn create_join_event_template_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2895,10 +2852,6 @@ async fn create_join_event( /// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v1/send_join/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_join_event_v1_route( db: DatabaseGuard, @@ -2917,10 +2870,6 @@ pub async fn create_join_event_v1_route( /// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v2/send_join/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_join_event_v2_route( db: DatabaseGuard, @@ -2939,10 +2888,6 @@ pub async fn create_join_event_v2_route( /// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` /// /// Invites a remote user to a room. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v2/invite/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_invite_route( db: DatabaseGuard, @@ -3055,12 +3000,8 @@ pub async fn create_invite_route( /// # `GET /_matrix/federation/v1/user/devices/{userId}` /// /// Gets information on all devices of the user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/user/devices/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_devices_route( +pub async fn get_devices_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -3098,12 +3039,8 @@ pub fn get_devices_route( /// # `GET /_matrix/federation/v1/query/directory` /// /// Resolve a room alias to a room id. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/query/directory", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_room_information_route( +pub async fn get_room_information_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -3129,12 +3066,8 @@ pub fn get_room_information_route( /// # `GET /_matrix/federation/v1/query/profile` /// /// Gets information on a profile. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/query/profile", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_profile_information_route( +pub async fn get_profile_information_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -3172,10 +3105,6 @@ pub fn get_profile_information_route( /// # `POST /_matrix/federation/v1/user/keys/query` /// /// Gets devices and identity keys for the given users. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/user/keys/query", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: DatabaseGuard, @@ -3206,10 +3135,6 @@ pub async fn get_keys_route( /// # `POST /_matrix/federation/v1/user/keys/claim` /// /// Claims one-time keys. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/user/keys/claim", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: DatabaseGuard, diff --git a/src/utils.rs b/src/utils.rs index e2d71f4..7142b3f 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -3,7 +3,7 @@ use cmp::Ordering; use rand::prelude::*; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ - cmp, + cmp, fmt, str::FromStr, time::{SystemTime, UNIX_EPOCH}, }; @@ -140,3 +140,40 @@ pub fn deserialize_from_str< } deserializer.deserialize_str(Visitor(std::marker::PhantomData)) } + +// Copied from librustdoc: +// https://github.com/rust-lang/rust/blob/cbaeec14f90b59a91a6b0f17fc046c66fa811892/src/librustdoc/html/escape.rs + +/// Wrapper struct which will emit the HTML-escaped version of the contained +/// string when passed to a format string. +pub struct HtmlEscape<'a>(pub &'a str); + +impl<'a> fmt::Display for HtmlEscape<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + // Because the internet is always right, turns out there's not that many + // characters to escape: http://stackoverflow.com/questions/7381974 + let HtmlEscape(s) = *self; + let pile_o_bits = s; + let mut last = 0; + for (i, ch) in s.char_indices() { + let s = match ch { + '>' => ">", + '<' => "<", + '&' => "&", + '\'' => "'", + '"' => """, + _ => continue, + }; + fmt.write_str(&pile_o_bits[last..i])?; + fmt.write_str(s)?; + // NOTE: we only expect single byte characters here - which is fine as long as we + // only match single byte characters + last = i + 1; + } + + if last < s.len() { + fmt.write_str(&pile_o_bits[last..])?; + } + Ok(()) + } +} diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index f6c62fe..22016e9 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -27,19 +27,18 @@ RUN chmod +x /workdir/caddy COPY conduit-example.toml conduit.toml ENV SERVER_NAME=localhost -ENV ROCKET_LOG=normal ENV CONDUIT_CONFIG=/workdir/conduit.toml RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml RUN echo "allow_federation = true" >> conduit.toml RUN echo "allow_encryption = true" >> conduit.toml RUN echo "allow_registration = true" >> conduit.toml -RUN echo "log = \"info,rocket=info,_=off,sled=off\"" >> conduit.toml +RUN echo "log = \"info,_=off,sled=off\"" >> conduit.toml RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml # Enabled Caddy auto cert generation for complement provided CA. -RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json - +RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json + EXPOSE 8008 8448 CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement PKI support" && true) || \ From d1d22170199fd5e8827ad71b9cad621e129ba519 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 13:32:21 +0100 Subject: [PATCH 1007/1727] Clean up error handling for server_server::get_server_keys_route --- src/server_server.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 5e6fab0..fc3681b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -516,10 +516,9 @@ pub async fn get_server_version_route( /// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response #[tracing::instrument(skip(db))] -pub async fn get_server_keys_route(db: DatabaseGuard) -> impl IntoResponse { +pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { if !db.globals.allow_federation() { - // TODO: Use proper types - return Json("Federation is disabled.").into_response(); + return Err(Error::bad_config("Federation is disabled.")); } let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); @@ -557,7 +556,7 @@ pub async fn get_server_keys_route(db: DatabaseGuard) -> impl IntoResponse { ) .unwrap(); - Json(response).into_response() + Ok(Json(response)) } /// # `GET /_matrix/key/v2/server/{keyId}` From a5757ab1950b3e498793a14e359412851542d1ac Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 13:51:55 +0100 Subject: [PATCH 1008/1727] Generalize RumaHandler --- src/main.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/main.rs b/src/main.rs index 3ab1294..8a9d2c5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -12,6 +12,7 @@ use std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, handler::Handler, + response::IntoResponse, routing::{get, on, MethodFilter}, Router, }; @@ -256,10 +257,7 @@ fn routes() -> Router { .ruma_route(client_server::get_state_events_route) .ruma_route(client_server::get_state_events_for_key_route) .ruma_route(client_server::get_state_events_for_empty_key_route) - .route( - "/_matrix/client/r0/sync", - get(client_server::sync_events_route), - ) + .ruma_route(client_server::sync_events_route) .ruma_route(client_server::get_context_route) .ruma_route(client_server::get_message_events_route) .ruma_route(client_server::search_events_route) @@ -375,14 +373,16 @@ macro_rules! impl_ruma_handler { ( $($ty:ident),* $(,)? ) => { #[axum::async_trait] #[allow(non_snake_case)] - impl RumaHandler<($($ty,)* Ruma,)> for F + impl RumaHandler<($($ty,)* Ruma,)> for F where Req: Outgoing, Req::Incoming: IncomingRequest + Send, F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, - Fut: Future::OutgoingResponse + Fut: Future::OutgoingResponse>, + E, >> + Send, + E: IntoResponse, $( $ty: FromRequest + Send, )* { const METADATA: Metadata = Req::Incoming::METADATA; From 7bf538f5498fa8affa80700ec7e91c5266e9961b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 14:45:12 +0100 Subject: [PATCH 1009/1727] Fix axum route conflicts --- src/main.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 8a9d2c5..46df5d6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -253,10 +253,15 @@ fn routes() -> Router { .ruma_route(client_server::get_protocols_route) .ruma_route(client_server::send_message_event_route) .ruma_route(client_server::send_state_event_for_key_route) - .ruma_route(client_server::send_state_event_for_empty_key_route) .ruma_route(client_server::get_state_events_route) .ruma_route(client_server::get_state_events_for_key_route) - .ruma_route(client_server::get_state_events_for_empty_key_route) + // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes + // share one Ruma request / response type pair with {get,send}_state_event_for_key_route + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) .ruma_route(client_server::sync_events_route) .ruma_route(client_server::get_context_route) .ruma_route(client_server::get_message_events_route) From 77a87881c913bca384403e377239c95b2dfa19f5 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 15:03:58 +0100 Subject: [PATCH 1010/1727] Add message to unsupported HTTP method panic --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 46df5d6..53b1825 100644 --- a/src/main.rs +++ b/src/main.rs @@ -363,7 +363,7 @@ impl RouterExt for Router { Method::POST => MethodFilter::POST, Method::PUT => MethodFilter::PUT, Method::TRACE => MethodFilter::TRACE, - _ => panic!(""), + m => panic!("Unsupported HTTP method: {:?}", m), }; self.route(meta.path, on(method_filter, handler)) From 5fa9190117805ff1040c69b65a3b9caacb6c965b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 16:58:32 +0100 Subject: [PATCH 1011/1727] Simplify return type of most route handlers --- src/client_server/account.rs | 34 +++++------ src/client_server/alias.rs | 21 ++++--- src/client_server/backup.rs | 66 +++++++++----------- src/client_server/capabilities.rs | 6 +- src/client_server/config.rs | 18 +++--- src/client_server/context.rs | 6 +- src/client_server/device.rs | 22 +++---- src/client_server/directory.rs | 29 ++++----- src/client_server/filter.rs | 12 ++-- src/client_server/keys.rs | 28 ++++----- src/client_server/media.rs | 33 +++++----- src/client_server/membership.rs | 55 ++++++++--------- src/client_server/message.rs | 12 ++-- src/client_server/presence.rs | 11 ++-- src/client_server/profile.rs | 34 +++++------ src/client_server/push.rs | 45 +++++++------- src/client_server/read_marker.rs | 10 +-- src/client_server/redact.rs | 6 +- src/client_server/report.rs | 6 +- src/client_server/room.rs | 21 +++---- src/client_server/search.rs | 7 +-- src/client_server/session.rs | 26 ++++---- src/client_server/state.rs | 20 +++--- src/client_server/sync.rs | 8 +-- src/client_server/tag.rs | 15 +++-- src/client_server/thirdparty.rs | 7 +-- src/client_server/to_device.rs | 6 +- src/client_server/typing.rs | 6 +- src/client_server/unversioned.rs | 6 +- src/client_server/user_directory.rs | 6 +- src/client_server/voip.rs | 10 ++- src/database/abstraction/rocksdb.rs | 6 +- src/database/globals.rs | 6 +- src/database/uiaa.rs | 7 ++- src/lib.rs | 2 +- src/main.rs | 62 ++++++++++--------- src/ruma_wrapper.rs | 3 - src/server_server.rs | 94 ++++++++++++----------------- 38 files changed, 358 insertions(+), 414 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index bf1a74d..2b2e6e6 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -4,7 +4,7 @@ use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{ database::{admin::make_user_admin, DatabaseGuard}, pdu::PduBuilder, - utils, ConduitResult, Error, Ruma, + utils, Error, Result, Ruma, }; use ruma::{ api::client::{ @@ -44,7 +44,7 @@ const GUEST_NAME_LENGTH: usize = 10; pub async fn get_register_available_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { // Validate user id let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) @@ -68,7 +68,7 @@ pub async fn get_register_available_route( // TODO add check for appservice namespaces // If no if check is true we have an username that's available to be used. - Ok(get_username_availability::Response { available: true }.into()) + Ok(get_username_availability::Response { available: true }) } /// # `POST /_matrix/client/r0/register` @@ -88,7 +88,7 @@ pub async fn get_register_available_route( pub async fn register_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -212,8 +212,7 @@ pub async fn register_route( access_token: None, user_id, device_id: None, - } - .into()); + }); } // Generate new device id if the user didn't specify one @@ -251,8 +250,7 @@ pub async fn register_route( access_token: Some(token), user_id, device_id: Some(device_id), - } - .into()) + }) } /// # `POST /_matrix/client/r0/account/password` @@ -273,7 +271,7 @@ pub async fn register_route( pub async fn change_password_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -326,7 +324,7 @@ pub async fn change_password_route( db.flush()?; - Ok(change_password::Response {}.into()) + Ok(change_password::Response {}) } /// # `GET _matrix/client/r0/account/whoami` @@ -335,12 +333,11 @@ pub async fn change_password_route( /// /// Note: Also works for Application Services #[tracing::instrument(skip(body))] -pub async fn whoami_route(body: Ruma) -> ConduitResult { +pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(whoami::Response { user_id: sender_user.clone(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/account/deactivate` @@ -357,7 +354,7 @@ pub async fn whoami_route(body: Ruma) -> ConduitResult>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -452,8 +449,7 @@ pub async fn deactivate_route( Ok(deactivate::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, - } - .into()) + }) } /// # `GET _matrix/client/r0/account/3pid` @@ -461,10 +457,8 @@ pub async fn deactivate_route( /// Get a list of third party identifiers associated with this account. /// /// - Currently always returns empty list -pub async fn third_party_route( - body: Ruma, -) -> ConduitResult { +pub async fn third_party_route(body: Ruma) -> Result { let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_3pids::Response::new(Vec::new()).into()) + Ok(get_3pids::Response::new(Vec::new())) } diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 6e1b43e..eecd72a 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; use regex::Regex; use ruma::{ api::{ @@ -19,7 +19,7 @@ use ruma::{ pub async fn create_alias_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -36,7 +36,7 @@ pub async fn create_alias_route( db.flush()?; - Ok(create_alias::Response::new().into()) + Ok(create_alias::Response::new()) } /// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}` @@ -49,7 +49,7 @@ pub async fn create_alias_route( pub async fn delete_alias_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -63,7 +63,7 @@ pub async fn delete_alias_route( db.flush()?; - Ok(delete_alias::Response::new().into()) + Ok(delete_alias::Response::new()) } /// # `GET /_matrix/client/r0/directory/room/{roomAlias}` @@ -75,14 +75,14 @@ pub async fn delete_alias_route( pub async fn get_alias_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { get_alias_helper(&db, &body.room_alias).await } pub(crate) async fn get_alias_helper( db: &Database, room_alias: &RoomAliasId, -) -> ConduitResult { +) -> Result { if room_alias.server_name() != db.globals.server_name() { let response = db .sending @@ -93,7 +93,7 @@ pub(crate) async fn get_alias_helper( ) .await?; - return Ok(get_alias::Response::new(response.room_id, response.servers).into()); + return Ok(get_alias::Response::new(response.room_id, response.servers)); } let mut room_id = None; @@ -144,5 +144,8 @@ pub(crate) async fn get_alias_helper( } }; - Ok(get_alias::Response::new(room_id, vec![db.globals.server_name().to_owned()]).into()) + Ok(get_alias::Response::new( + room_id, + vec![db.globals.server_name().to_owned()], + )) } diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index cc2d7c4..acff437 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::backup::{ @@ -16,7 +16,7 @@ use ruma::api::client::{ pub async fn create_backup_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let version = db .key_backups @@ -24,7 +24,7 @@ pub async fn create_backup_route( db.flush()?; - Ok(create_backup::Response { version }.into()) + Ok(create_backup::Response { version }) } /// # `PUT /_matrix/client/r0/room_keys/version/{version}` @@ -34,14 +34,14 @@ pub async fn create_backup_route( pub async fn update_backup_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; db.flush()?; - Ok(update_backup::Response {}.into()) + Ok(update_backup::Response {}) } /// # `GET /_matrix/client/r0/room_keys/version` @@ -51,7 +51,7 @@ pub async fn update_backup_route( pub async fn get_latest_backup_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = @@ -67,8 +67,7 @@ pub async fn get_latest_backup_route( count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &version)?, version, - } - .into()) + }) } /// # `GET /_matrix/client/r0/room_keys/version` @@ -78,7 +77,7 @@ pub async fn get_latest_backup_route( pub async fn get_backup_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db .key_backups @@ -93,8 +92,7 @@ pub async fn get_backup_route( count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, version: body.version.to_owned(), - } - .into()) + }) } /// # `DELETE /_matrix/client/r0/room_keys/version/{version}` @@ -106,14 +104,14 @@ pub async fn get_backup_route( pub async fn delete_backup_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_backup(sender_user, &body.version)?; db.flush()?; - Ok(delete_backup::Response {}.into()) + Ok(delete_backup::Response {}) } /// # `PUT /_matrix/client/r0/room_keys/keys` @@ -127,7 +125,7 @@ pub async fn delete_backup_route( pub async fn add_backup_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -160,8 +158,7 @@ pub async fn add_backup_keys_route( Ok(add_backup_keys::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` @@ -175,7 +172,7 @@ pub async fn add_backup_keys_route( pub async fn add_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -206,8 +203,7 @@ pub async fn add_backup_key_sessions_route( Ok(add_backup_key_sessions::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -221,7 +217,7 @@ pub async fn add_backup_key_sessions_route( pub async fn add_backup_key_session_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -250,8 +246,7 @@ pub async fn add_backup_key_session_route( Ok(add_backup_key_session::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `GET /_matrix/client/r0/room_keys/keys` @@ -261,12 +256,12 @@ pub async fn add_backup_key_session_route( pub async fn get_backup_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let rooms = db.key_backups.get_all(sender_user, &body.version)?; - Ok(get_backup_keys::Response { rooms }.into()) + Ok(get_backup_keys::Response { rooms }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` @@ -276,14 +271,14 @@ pub async fn get_backup_keys_route( pub async fn get_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sessions = db .key_backups .get_room(sender_user, &body.version, &body.room_id)?; - Ok(get_backup_key_sessions::Response { sessions }.into()) + Ok(get_backup_key_sessions::Response { sessions }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -293,7 +288,7 @@ pub async fn get_backup_key_sessions_route( pub async fn get_backup_key_session_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let key_data = db @@ -304,7 +299,7 @@ pub async fn get_backup_key_session_route( "Backup key not found for this user's session.", ))?; - Ok(get_backup_key_session::Response { key_data }.into()) + Ok(get_backup_key_session::Response { key_data }) } /// # `DELETE /_matrix/client/r0/room_keys/keys` @@ -314,7 +309,7 @@ pub async fn get_backup_key_session_route( pub async fn delete_backup_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_all_keys(sender_user, &body.version)?; @@ -324,8 +319,7 @@ pub async fn delete_backup_keys_route( Ok(delete_backup_keys::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` @@ -335,7 +329,7 @@ pub async fn delete_backup_keys_route( pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -346,8 +340,7 @@ pub async fn delete_backup_key_sessions_route( Ok(delete_backup_key_sessions::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -357,7 +350,7 @@ pub async fn delete_backup_key_sessions_route( pub async fn delete_backup_key_session_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -368,6 +361,5 @@ pub async fn delete_backup_key_session_route( Ok(delete_backup_key_session::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 8da6855..3f779dc 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,4 +1,4 @@ -use crate::{ConduitResult, Ruma}; +use crate::{Result, Ruma}; use ruma::{ api::client::r0::capabilities::{ get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, @@ -13,7 +13,7 @@ use std::collections::BTreeMap; #[tracing::instrument(skip(_body))] pub async fn get_capabilities_route( _body: Ruma, -) -> ConduitResult { +) -> Result { let mut available = BTreeMap::new(); available.insert(RoomVersionId::V5, RoomVersionStability::Stable); available.insert(RoomVersionId::V6, RoomVersionStability::Stable); @@ -24,5 +24,5 @@ pub async fn get_capabilities_route( available, }; - Ok(get_capabilities::Response { capabilities }.into()) + Ok(get_capabilities::Response { capabilities }) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 0df0dec..14a665e 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -20,7 +20,7 @@ use serde_json::{json, value::RawValue as RawJsonValue}; pub async fn set_global_account_data_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let data: serde_json::Value = serde_json::from_str(body.data.get()) @@ -41,7 +41,7 @@ pub async fn set_global_account_data_route( db.flush()?; - Ok(set_global_account_data::Response {}.into()) + Ok(set_global_account_data::Response {}) } /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` @@ -51,7 +51,7 @@ pub async fn set_global_account_data_route( pub async fn set_room_account_data_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let data: serde_json::Value = serde_json::from_str(body.data.get()) @@ -72,7 +72,7 @@ pub async fn set_room_account_data_route( db.flush()?; - Ok(set_room_account_data::Response {}.into()) + Ok(set_room_account_data::Response {}) } /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` @@ -82,7 +82,7 @@ pub async fn set_room_account_data_route( pub async fn get_global_account_data_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = db @@ -94,7 +94,7 @@ pub async fn get_global_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_global_account_data::Response { account_data }.into()) + Ok(get_global_account_data::Response { account_data }) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` @@ -104,7 +104,7 @@ pub async fn get_global_account_data_route( pub async fn get_room_account_data_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = db @@ -120,7 +120,7 @@ pub async fn get_room_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_room_account_data::Response { account_data }.into()) + Ok(get_room_account_data::Response { account_data }) } #[derive(Deserialize)] diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 1fbfee9..3d884e0 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -19,7 +19,7 @@ use tracing::error; pub async fn get_context_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -191,5 +191,5 @@ pub async fn get_context_route( state, }; - Ok(resp.into()) + Ok(resp) } diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 82d1168..e35da97 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::{ @@ -16,7 +16,7 @@ use super::SESSION_ID_LENGTH; pub async fn get_devices_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let devices: Vec = db @@ -25,7 +25,7 @@ pub async fn get_devices_route( .filter_map(|r| r.ok()) // Filter out buggy devices .collect(); - Ok(get_devices::Response { devices }.into()) + Ok(get_devices::Response { devices }) } /// # `GET /_matrix/client/r0/devices/{deviceId}` @@ -35,7 +35,7 @@ pub async fn get_devices_route( pub async fn get_device_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device = db @@ -43,7 +43,7 @@ pub async fn get_device_route( .get_device_metadata(sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - Ok(get_device::Response { device }.into()) + Ok(get_device::Response { device }) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -53,7 +53,7 @@ pub async fn get_device_route( pub async fn update_device_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device = db @@ -68,7 +68,7 @@ pub async fn update_device_route( db.flush()?; - Ok(update_device::Response {}.into()) + Ok(update_device::Response {}) } /// # `DELETE /_matrix/client/r0/devices/{deviceId}` @@ -84,7 +84,7 @@ pub async fn update_device_route( pub async fn delete_device_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -125,7 +125,7 @@ pub async fn delete_device_route( db.flush()?; - Ok(delete_device::Response {}.into()) + Ok(delete_device::Response {}) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -143,7 +143,7 @@ pub async fn delete_device_route( pub async fn delete_devices_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -186,5 +186,5 @@ pub async fn delete_devices_route( db.flush()?; - Ok(delete_devices::Response {}.into()) + Ok(delete_devices::Response {}) } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 06d7a27..0f3ae30 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma}; +use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; use ruma::{ api::{ client::{ @@ -38,7 +38,7 @@ use tracing::{info, warn}; pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { get_public_rooms_filtered_helper( &db, body.server.as_deref(), @@ -59,7 +59,7 @@ pub async fn get_public_rooms_filtered_route( pub async fn get_public_rooms_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let response = get_public_rooms_filtered_helper( &db, body.server.as_deref(), @@ -68,16 +68,14 @@ pub async fn get_public_rooms_route( &IncomingFilter::default(), &IncomingRoomNetwork::Matrix, ) - .await? - .0; + .await?; Ok(get_public_rooms::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()) + }) } /// # `PUT /_matrix/client/r0/directory/list/room/{roomId}` @@ -89,7 +87,7 @@ pub async fn get_public_rooms_route( pub async fn set_room_visibility_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); match &body.visibility { @@ -108,7 +106,7 @@ pub async fn set_room_visibility_route( db.flush()?; - Ok(set_room_visibility::Response {}.into()) + Ok(set_room_visibility::Response {}) } /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` @@ -118,15 +116,14 @@ pub async fn set_room_visibility_route( pub async fn get_room_visibility_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { Ok(get_room_visibility::Response { visibility: if db.rooms.is_public_room(&body.room_id)? { room::Visibility::Public } else { room::Visibility::Private }, - } - .into()) + }) } pub(crate) async fn get_public_rooms_filtered_helper( @@ -136,7 +133,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, -) -> ConduitResult { +) -> Result { if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str()) { let response = db @@ -172,8 +169,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()); + }); } let limit = limit.map_or(10, u64::from); @@ -353,6 +349,5 @@ pub(crate) async fn get_public_rooms_filtered_helper( prev_batch, next_batch, total_room_count_estimate: Some(total_room_count_estimate), - } - .into()) + }) } diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 6c42edd..28610ec 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::filter::{create_filter, get_filter}, @@ -13,14 +13,14 @@ use ruma::api::client::{ pub async fn get_filter_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let filter = match db.users.get_filter(sender_user, &body.filter_id)? { Some(filter) => filter, None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), }; - Ok(get_filter::Response::new(filter).into()) + Ok(get_filter::Response::new(filter)) } /// # `PUT /_matrix/client/r0/user/{userId}/filter` @@ -30,7 +30,9 @@ pub async fn get_filter_route( pub async fn create_filter_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(create_filter::Response::new(db.users.create_filter(sender_user, &body.filter)?).into()) + Ok(create_filter::Response::new( + db.users.create_filter(sender_user, &body.filter)?, + )) } diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 9a7a4e7..d272ff4 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -1,5 +1,5 @@ use super::SESSION_ID_LENGTH; -use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma}; +use crate::{database::DatabaseGuard, utils, Database, Error, Result, Ruma}; use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -31,7 +31,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; pub async fn upload_keys_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -62,8 +62,7 @@ pub async fn upload_keys_route( Ok(upload_keys::Response { one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, - } - .into()) + }) } /// # `POST /_matrix/client/r0/keys/query` @@ -77,7 +76,7 @@ pub async fn upload_keys_route( pub async fn get_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let response = get_keys_helper( @@ -88,7 +87,7 @@ pub async fn get_keys_route( ) .await?; - Ok(response.into()) + Ok(response) } /// # `POST /_matrix/client/r0/keys/claim` @@ -98,12 +97,12 @@ pub async fn get_keys_route( pub async fn claim_keys_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let response = claim_keys_helper(&body.one_time_keys, &db).await?; db.flush()?; - Ok(response.into()) + Ok(response) } /// # `POST /_matrix/client/r0/keys/device_signing/upload` @@ -115,7 +114,7 @@ pub async fn claim_keys_route( pub async fn upload_signing_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -165,7 +164,7 @@ pub async fn upload_signing_keys_route( db.flush()?; - Ok(upload_signing_keys::Response {}.into()) + Ok(upload_signing_keys::Response {}) } /// # `POST /_matrix/client/r0/keys/signatures/upload` @@ -175,7 +174,7 @@ pub async fn upload_signing_keys_route( pub async fn upload_signatures_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for (user_id, signed_keys) in &body.signed_keys { @@ -225,7 +224,7 @@ pub async fn upload_signatures_route( db.flush()?; - Ok(upload_signatures::Response {}.into()) + Ok(upload_signatures::Response {}) } /// # `POST /_matrix/client/r0/keys/changes` @@ -237,7 +236,7 @@ pub async fn upload_signatures_route( pub async fn get_key_changes_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device_list_updates = HashSet::new(); @@ -276,8 +275,7 @@ pub async fn get_key_changes_route( Ok(get_key_changes::Response { changed: device_list_updates.into_iter().collect(), left: Vec::new(), // TODO - } - .into()) + }) } pub(crate) async fn get_keys_helper bool>( diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 5eba17b..615f760 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -1,6 +1,6 @@ use crate::{ database::{media::FileMeta, DatabaseGuard}, - utils, ConduitResult, Error, Ruma, + utils, Error, Result, Ruma, }; use ruma::api::client::{ error::ErrorKind, @@ -19,11 +19,10 @@ const MXC_LENGTH: usize = 32; pub async fn get_media_config_route( db: DatabaseGuard, _body: Ruma, -) -> ConduitResult { +) -> Result { Ok(get_media_config::Response { upload_size: db.globals.max_request_size().into(), - } - .into()) + }) } /// # `POST /_matrix/media/r0/upload` @@ -36,7 +35,7 @@ pub async fn get_media_config_route( pub async fn create_content_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let mxc = format!( "mxc://{}/{}", db.globals.server_name(), @@ -62,8 +61,7 @@ pub async fn create_content_route( Ok(create_content::Response { content_uri: mxc.try_into().expect("Invalid mxc:// URI"), blurhash: None, - } - .into()) + }) } pub async fn get_remote_content( @@ -107,7 +105,7 @@ pub async fn get_remote_content( pub async fn get_content_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -120,12 +118,11 @@ pub async fn get_content_route( file, content_type, content_disposition, - } - .into()) + }) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let remote_content_response = get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; - Ok(remote_content_response.into()) + Ok(remote_content_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -140,7 +137,7 @@ pub async fn get_content_route( pub async fn get_content_as_filename_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -153,8 +150,7 @@ pub async fn get_content_as_filename_route( file, content_type, content_disposition: Some(format!("inline; filename={}", body.filename)), - } - .into()) + }) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let remote_content_response = get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; @@ -163,8 +159,7 @@ pub async fn get_content_as_filename_route( content_disposition: Some(format!("inline: filename={}", body.filename)), content_type: remote_content_response.content_type, file: remote_content_response.file, - } - .into()) + }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -179,7 +174,7 @@ pub async fn get_content_as_filename_route( pub async fn get_content_thumbnail_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -198,7 +193,7 @@ pub async fn get_content_thumbnail_route( ) .await? { - Ok(get_content_thumbnail::Response { file, content_type }.into()) + Ok(get_content_thumbnail::Response { file, content_type }) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_thumbnail_response = db .sending @@ -228,7 +223,7 @@ pub async fn get_content_thumbnail_route( ) .await?; - Ok(get_thumbnail_response.into()) + Ok(get_thumbnail_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index c16065e..efdf774 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -2,7 +2,7 @@ use crate::{ client_server, database::DatabaseGuard, pdu::{EventHash, PduBuilder, PduEvent}, - server_server, utils, ConduitResult, Database, Error, Result, Ruma, + server_server, utils, Database, Error, Result, Ruma, }; use ruma::{ api::{ @@ -46,7 +46,7 @@ use tracing::{debug, error, warn}; pub async fn join_room_by_id_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut servers: HashSet<_> = db @@ -87,7 +87,7 @@ pub async fn join_room_by_id_route( pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; @@ -111,7 +111,7 @@ pub async fn join_room_by_id_or_alias_route( Err(room_alias) => { let response = client_server::get_alias_helper(&db, &room_alias).await?; - (response.0.servers.into_iter().collect(), response.0.room_id) + (response.servers.into_iter().collect(), response.room_id) } }; @@ -127,9 +127,8 @@ pub async fn join_room_by_id_or_alias_route( db.flush()?; Ok(join_room_by_id_or_alias::Response { - room_id: join_room_response.0.room_id, - } - .into()) + room_id: join_room_response.room_id, + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/leave` @@ -141,14 +140,14 @@ pub async fn join_room_by_id_or_alias_route( pub async fn leave_room_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.leave_room(sender_user, &body.room_id, &db).await?; db.flush()?; - Ok(leave_room::Response::new().into()) + Ok(leave_room::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` @@ -158,13 +157,13 @@ pub async fn leave_room_route( pub async fn invite_user_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; db.flush()?; - Ok(invite_user::Response {}.into()) + Ok(invite_user::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) } @@ -177,7 +176,7 @@ pub async fn invite_user_route( pub async fn kick_user_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( @@ -227,7 +226,7 @@ pub async fn kick_user_route( db.flush()?; - Ok(kick_user::Response::new().into()) + Ok(kick_user::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` @@ -237,7 +236,7 @@ pub async fn kick_user_route( pub async fn ban_user_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: reason @@ -298,7 +297,7 @@ pub async fn ban_user_route( db.flush()?; - Ok(ban_user::Response::new().into()) + Ok(ban_user::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/unban` @@ -308,7 +307,7 @@ pub async fn ban_user_route( pub async fn unban_user_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( @@ -357,7 +356,7 @@ pub async fn unban_user_route( db.flush()?; - Ok(unban_user::Response::new().into()) + Ok(unban_user::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/forget` @@ -372,14 +371,14 @@ pub async fn unban_user_route( pub async fn forget_room_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.forget(&body.room_id, sender_user)?; db.flush()?; - Ok(forget_room::Response::new().into()) + Ok(forget_room::Response::new()) } /// # `POST /_matrix/client/r0/joined_rooms` @@ -389,7 +388,7 @@ pub async fn forget_room_route( pub async fn joined_rooms_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(joined_rooms::Response { @@ -398,8 +397,7 @@ pub async fn joined_rooms_route( .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/members` @@ -411,7 +409,7 @@ pub async fn joined_rooms_route( pub async fn get_member_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? @@ -430,8 +428,7 @@ pub async fn get_member_events_route( .filter(|(key, _)| key.0 == EventType::RoomMember) .map(|(_, pdu)| pdu.to_member_event()) .collect(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` @@ -444,7 +441,7 @@ pub async fn get_member_events_route( pub async fn joined_members_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -468,7 +465,7 @@ pub async fn joined_members_route( ); } - Ok(joined_members::Response { joined }.into()) + Ok(joined_members::Response { joined }) } #[tracing::instrument(skip(db))] @@ -478,7 +475,7 @@ async fn join_room_by_id_helper( room_id: &RoomId, servers: &HashSet>, _third_party_signed: Option<&IncomingThirdPartySigned>, -) -> ConduitResult { +) -> Result { let sender_user = sender_user.expect("user is authenticated"); let mutex_state = Arc::clone( @@ -734,7 +731,7 @@ async fn join_room_by_id_helper( db.flush()?; - Ok(join_room_by_id::Response::new(room_id.to_owned()).into()) + Ok(join_room_by_id::Response::new(room_id.to_owned())) } fn validate_and_add_event_id( diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 4fb8771..c5982de 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -22,7 +22,7 @@ use std::{ pub async fn send_message_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -62,7 +62,7 @@ pub async fn send_message_event_route( .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? .try_into() .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; - return Ok(send_message_event::Response { event_id }.into()); + return Ok(send_message_event::Response { event_id }); } let mut unsigned = BTreeMap::new(); @@ -94,7 +94,7 @@ pub async fn send_message_event_route( db.flush()?; - Ok(send_message_event::Response::new((*event_id).to_owned()).into()) + Ok(send_message_event::Response::new((*event_id).to_owned())) } /// # `GET /_matrix/client/r0/rooms/{roomId}/messages` @@ -107,7 +107,7 @@ pub async fn send_message_event_route( pub async fn get_message_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -235,5 +235,5 @@ pub async fn get_message_events_route( ); } - Ok(resp.into()) + Ok(resp) } diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 0d58ebf..aedff55 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, utils, Result, Ruma}; use ruma::api::client::r0::presence::{get_presence, set_presence}; use std::time::Duration; @@ -9,7 +9,7 @@ use std::time::Duration; pub async fn set_presence_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for room_id in db.rooms.rooms_joined(sender_user) { @@ -39,7 +39,7 @@ pub async fn set_presence_route( db.flush()?; - Ok(set_presence::Response {}.into()) + Ok(set_presence::Response {}) } /// # `GET /_matrix/client/r0/presence/{userId}/status` @@ -51,7 +51,7 @@ pub async fn set_presence_route( pub async fn get_presence_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut presence_event = None; @@ -82,8 +82,7 @@ pub async fn get_presence_route( .last_active_ago .map(|millis| Duration::from_millis(millis.into())), presence: presence.content.presence, - } - .into()) + }) } else { todo!(); } diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index bb13b44..f520d2c 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma}; use ruma::{ api::{ client::{ @@ -23,7 +23,7 @@ use std::sync::Arc; pub async fn set_displayname_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users @@ -109,7 +109,7 @@ pub async fn set_displayname_route( db.flush()?; - Ok(set_display_name::Response {}.into()) + Ok(set_display_name::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/displayname` @@ -121,7 +121,7 @@ pub async fn set_displayname_route( pub async fn get_displayname_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -137,14 +137,12 @@ pub async fn get_displayname_route( return Ok(get_display_name::Response { displayname: response.displayname, - } - .into()); + }); } Ok(get_display_name::Response { displayname: db.users.displayname(&body.user_id)?, - } - .into()) + }) } /// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url` @@ -156,7 +154,7 @@ pub async fn get_displayname_route( pub async fn set_avatar_url_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users @@ -244,7 +242,7 @@ pub async fn set_avatar_url_route( db.flush()?; - Ok(set_avatar_url::Response {}.into()) + Ok(set_avatar_url::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/avatar_url` @@ -256,7 +254,7 @@ pub async fn set_avatar_url_route( pub async fn get_avatar_url_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -273,15 +271,13 @@ pub async fn get_avatar_url_route( return Ok(get_avatar_url::Response { avatar_url: response.avatar_url, blurhash: response.blurhash, - } - .into()); + }); } Ok(get_avatar_url::Response { avatar_url: db.users.avatar_url(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?, - } - .into()) + }) } /// # `GET /_matrix/client/r0/profile/{userId}` @@ -293,7 +289,7 @@ pub async fn get_avatar_url_route( pub async fn get_profile_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -311,8 +307,7 @@ pub async fn get_profile_route( displayname: response.displayname, avatar_url: response.avatar_url, blurhash: response.blurhash, - } - .into()); + }); } if !db.users.exists(&body.user_id)? { @@ -327,6 +322,5 @@ pub async fn get_profile_route( avatar_url: db.users.avatar_url(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?, displayname: db.users.displayname(&body.user_id)?, - } - .into()) + }) } diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 322cf89..3bc46b8 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -19,7 +19,7 @@ use ruma::{ pub async fn get_pushrules_all_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db @@ -32,8 +32,7 @@ pub async fn get_pushrules_all_route( Ok(get_pushrules_all::Response { global: event.content.global, - } - .into()) + }) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` @@ -43,7 +42,7 @@ pub async fn get_pushrules_all_route( pub async fn get_pushrule_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db @@ -80,7 +79,7 @@ pub async fn get_pushrule_route( }; if let Some(rule) = rule { - Ok(get_pushrule::Response { rule }.into()) + Ok(get_pushrule::Response { rule }) } else { Err(Error::BadRequest( ErrorKind::NotFound, @@ -96,7 +95,7 @@ pub async fn get_pushrule_route( pub async fn set_pushrule_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -183,7 +182,7 @@ pub async fn set_pushrule_route( db.flush()?; - Ok(set_pushrule::Response {}.into()) + Ok(set_pushrule::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` @@ -193,7 +192,7 @@ pub async fn set_pushrule_route( pub async fn get_pushrule_actions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -240,8 +239,7 @@ pub async fn get_pushrule_actions_route( Ok(get_pushrule_actions::Response { actions: actions.unwrap_or_default(), - } - .into()) + }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` @@ -251,7 +249,7 @@ pub async fn get_pushrule_actions_route( pub async fn set_pushrule_actions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -309,7 +307,7 @@ pub async fn set_pushrule_actions_route( db.flush()?; - Ok(set_pushrule_actions::Response {}.into()) + Ok(set_pushrule_actions::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` @@ -319,7 +317,7 @@ pub async fn set_pushrule_actions_route( pub async fn get_pushrule_enabled_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -369,7 +367,7 @@ pub async fn get_pushrule_enabled_route( db.flush()?; - Ok(get_pushrule_enabled::Response { enabled }.into()) + Ok(get_pushrule_enabled::Response { enabled }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` @@ -379,7 +377,7 @@ pub async fn get_pushrule_enabled_route( pub async fn set_pushrule_enabled_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -442,7 +440,7 @@ pub async fn set_pushrule_enabled_route( db.flush()?; - Ok(set_pushrule_enabled::Response {}.into()) + Ok(set_pushrule_enabled::Response {}) } /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` @@ -452,7 +450,7 @@ pub async fn set_pushrule_enabled_route( pub async fn delete_pushrule_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -505,7 +503,7 @@ pub async fn delete_pushrule_route( db.flush()?; - Ok(delete_pushrule::Response {}.into()) + Ok(delete_pushrule::Response {}) } /// # `GET /_matrix/client/r0/pushers` @@ -515,13 +513,12 @@ pub async fn delete_pushrule_route( pub async fn get_pushers_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_pushers::Response { pushers: db.pusher.get_pushers(sender_user)?, - } - .into()) + }) } /// # `POST /_matrix/client/r0/pushers/set` @@ -533,7 +530,7 @@ pub async fn get_pushers_route( pub async fn set_pushers_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pusher = body.pusher.clone(); @@ -541,5 +538,5 @@ pub async fn set_pushers_route( db.flush()?; - Ok(set_pusher::Response::default().into()) + Ok(set_pusher::Response::default()) } diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index c9480f0..fa2627b 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -20,7 +20,7 @@ use std::collections::BTreeMap; pub async fn set_read_marker_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let fully_read_event = ruma::events::fully_read::FullyReadEvent { @@ -76,7 +76,7 @@ pub async fn set_read_marker_route( db.flush()?; - Ok(set_read_marker::Response {}.into()) + Ok(set_read_marker::Response {}) } /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` @@ -86,7 +86,7 @@ pub async fn set_read_marker_route( pub async fn create_receipt_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.edus.private_read_set( @@ -128,5 +128,5 @@ pub async fn create_receipt_route( db.flush()?; - Ok(create_receipt::Response {}.into()) + Ok(create_receipt::Response {}) } diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 2b442fc..0a343e5 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma}; use ruma::{ api::client::r0::redact::redact_event, events::{room::redaction::RoomRedactionEventContent, EventType}, @@ -17,7 +17,7 @@ use serde_json::value::to_raw_value; pub async fn redact_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -53,5 +53,5 @@ pub async fn redact_event_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(redact_event::Response { event_id }.into()) + Ok(redact_event::Response { event_id }) } diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 441e33d..680ad5a 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils::HtmlEscape, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, utils::HtmlEscape, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, events::room::message, @@ -13,7 +13,7 @@ use ruma::{ pub async fn report_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pdu = match db.rooms.get_pdu(&body.event_id)? { @@ -69,5 +69,5 @@ pub async fn report_event_route( db.flush()?; - Ok(report_content::Response {}.into()) + Ok(report_content::Response {}) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 475c5b4..4640cda 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -1,6 +1,5 @@ use crate::{ - client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Error, - Ruma, + client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, Error, Result, Ruma, }; use ruma::{ api::client::{ @@ -50,7 +49,7 @@ use tracing::{info, warn}; pub async fn create_room_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let room_id = RoomId::new(db.globals.server_name()); @@ -410,7 +409,7 @@ pub async fn create_room_route( db.flush()?; - Ok(create_room::Response::new(room_id).into()) + Ok(create_room::Response::new(room_id)) } /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` @@ -422,7 +421,7 @@ pub async fn create_room_route( pub async fn get_room_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -438,8 +437,7 @@ pub async fn get_room_event_route( .get_pdu(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? .to_room_event(), - } - .into()) + }) } /// # `GET /_matrix/client/r0/rooms/{roomId}/aliases` @@ -451,7 +449,7 @@ pub async fn get_room_event_route( pub async fn get_room_aliases_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -467,8 +465,7 @@ pub async fn get_room_aliases_route( .room_aliases(&body.room_id) .filter_map(|a| a.ok()) .collect(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` @@ -485,7 +482,7 @@ pub async fn get_room_aliases_route( pub async fn upgrade_room_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) { @@ -709,5 +706,5 @@ pub async fn upgrade_room_route( db.flush()?; // Return the replacement room id - Ok(upgrade_room::Response { replacement_room }.into()) + Ok(upgrade_room::Response { replacement_room }) } diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 3f8a701..78ac51a 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{error::ErrorKind, r0::search::search_events}; use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}; @@ -13,7 +13,7 @@ use std::collections::BTreeMap; pub async fn search_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); @@ -111,6 +111,5 @@ pub async fn search_events_route( .map(str::to_lowercase) .collect(), }, - }) - .into()) + })) } diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 264eac0..dbcd28c 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -1,5 +1,5 @@ use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -26,13 +26,10 @@ struct Claims { #[tracing::instrument(skip(_body))] pub async fn get_login_types_route( _body: Ruma, -) -> ConduitResult { - Ok( - get_login_types::Response::new(vec![get_login_types::LoginType::Password( - Default::default(), - )]) - .into(), - ) +) -> Result { + Ok(get_login_types::Response::new(vec![ + get_login_types::LoginType::Password(Default::default()), + ])) } /// # `POST /_matrix/client/r0/login` @@ -50,7 +47,7 @@ pub async fn get_login_types_route( pub async fn login_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { @@ -155,8 +152,7 @@ pub async fn login_route( home_server: Some(db.globals.server_name().to_owned()), device_id, well_known: None, - } - .into()) + }) } /// # `POST /_matrix/client/r0/logout` @@ -171,7 +167,7 @@ pub async fn login_route( pub async fn logout_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -179,7 +175,7 @@ pub async fn logout_route( db.flush()?; - Ok(logout::Response::new().into()) + Ok(logout::Response::new()) } /// # `POST /_matrix/client/r0/logout/all` @@ -197,7 +193,7 @@ pub async fn logout_route( pub async fn logout_all_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for device_id in db.users.all_device_ids(sender_user).flatten() { @@ -206,5 +202,5 @@ pub async fn logout_all_route( db.flush()?; - Ok(logout_all::Response::new().into()) + Ok(logout_all::Response::new()) } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 96b2184..acc362f 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::{ - database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma, + database::DatabaseGuard, pdu::PduBuilder, Database, Error, Result, Ruma, RumaResponse, }; use ruma::{ api::client::{ @@ -30,7 +30,7 @@ use ruma::{ pub async fn send_state_event_for_key_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = send_state_event_for_key_helper( @@ -46,7 +46,7 @@ pub async fn send_state_event_for_key_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(send_state_event::Response { event_id }.into()) + Ok(send_state_event::Response { event_id }) } /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}` @@ -60,7 +60,7 @@ pub async fn send_state_event_for_key_route( pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled @@ -96,7 +96,7 @@ pub async fn send_state_event_for_empty_key_route( pub async fn get_state_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -131,8 +131,7 @@ pub async fn get_state_events_route( .values() .map(|pdu| pdu.to_state_event()) .collect(), - } - .into()) + }) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}` @@ -144,7 +143,7 @@ pub async fn get_state_events_route( pub async fn get_state_events_for_key_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -183,8 +182,7 @@ pub async fn get_state_events_for_key_route( Ok(get_state_events_for_key::Response { content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, - } - .into()) + }) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}` @@ -196,7 +194,7 @@ pub async fn get_state_events_for_key_route( pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6ba68b0..6410ce5 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; +use crate::{database::DatabaseGuard, Database, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::r0::{ filter::{IncomingFilterDefinition, LazyLoadOptions}, @@ -58,7 +58,7 @@ use tracing::error; pub async fn sync_events_route( db: DatabaseGuard, body: Ruma>, -) -> Result, RumaResponse> { +) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let body = body.body; @@ -132,7 +132,7 @@ async fn sync_helper_wrapper( sender_user: Box, sender_device: Box, body: sync_events::IncomingRequest, - tx: Sender>>, + tx: Sender>>, ) { let since = body.since.clone(); @@ -166,7 +166,7 @@ async fn sync_helper_wrapper( drop(db); - let _ = tx.send(Some(r.map(|(r, _)| r.into()))); + let _ = tx.send(Some(r.map(|(r, _)| r))); } async fn sync_helper( diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index cad3421..edf8690 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, Result, Ruma}; use ruma::{ api::client::r0::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -17,7 +17,7 @@ use std::collections::BTreeMap; pub async fn update_tag_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db @@ -43,7 +43,7 @@ pub async fn update_tag_route( db.flush()?; - Ok(create_tag::Response {}.into()) + Ok(create_tag::Response {}) } /// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` @@ -55,7 +55,7 @@ pub async fn update_tag_route( pub async fn delete_tag_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db @@ -78,7 +78,7 @@ pub async fn delete_tag_route( db.flush()?; - Ok(delete_tag::Response {}.into()) + Ok(delete_tag::Response {}) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` @@ -90,7 +90,7 @@ pub async fn delete_tag_route( pub async fn get_tags_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_tags::Response { @@ -104,6 +104,5 @@ pub async fn get_tags_route( }) .content .tags, - } - .into()) + }) } diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index d8b7972..929503e 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -1,4 +1,4 @@ -use crate::{ConduitResult, Ruma}; +use crate::{Result, Ruma}; use ruma::api::client::r0::thirdparty::get_protocols; use std::collections::BTreeMap; @@ -9,10 +9,9 @@ use std::collections::BTreeMap; #[tracing::instrument(skip(_body))] pub async fn get_protocols_route( _body: Ruma, -) -> ConduitResult { +) -> Result { // TODO Ok(get_protocols::Response { protocols: BTreeMap::new(), - } - .into()) + }) } diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 1269118..9f67bf0 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::{ client::{error::ErrorKind, r0::to_device::send_event_to_device}, @@ -17,7 +17,7 @@ use ruma::{ pub async fn send_event_to_device_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -94,5 +94,5 @@ pub async fn send_event_to_device_route( db.flush()?; - Ok(send_event_to_device::Response {}.into()) + Ok(send_event_to_device::Response {}) } diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 3a61c58..6c1939a 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, utils, Result, Ruma}; use create_typing_event::Typing; use ruma::api::client::r0::typing::create_typing_event; @@ -9,7 +9,7 @@ use ruma::api::client::r0::typing::create_typing_event; pub async fn create_typing_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let Typing::Yes(duration) = body.state { @@ -25,5 +25,5 @@ pub async fn create_typing_event_route( .typing_remove(sender_user, &body.room_id, &db.globals)?; } - Ok(create_typing_event::Response {}.into()) + Ok(create_typing_event::Response {}) } diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 8b1b66f..65becda 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, iter::FromIterator}; -use crate::{ConduitResult, Ruma}; +use crate::{Result, Ruma}; use ruma::api::client::unversioned::get_supported_versions; /// # `GET /_matrix/client/versions` @@ -16,11 +16,11 @@ use ruma::api::client::unversioned::get_supported_versions; #[tracing::instrument(skip(_body))] pub async fn get_supported_versions_route( _body: Ruma, -) -> ConduitResult { +) -> Result { let resp = get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; - Ok(resp.into()) + Ok(resp) } diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index c923cee..a3df583 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, Result, Ruma}; use ruma::api::client::r0::user_directory::search_users; /// # `POST /_matrix/client/r0/user_directory/search` @@ -10,7 +10,7 @@ use ruma::api::client::r0::user_directory::search_users; pub async fn search_users_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let limit = u64::from(body.limit) as usize; let mut users = db.users.iter().filter_map(|user_id| { @@ -48,5 +48,5 @@ pub async fn search_users_route( let results = users.by_ref().take(limit).collect(); let limited = users.next().is_some(); - Ok(search_users::Response { results, limited }.into()) + Ok(search_users::Response { results, limited }) } diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 6abebdc..f3262ab 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,7 +1,6 @@ -use crate::{database::DatabaseGuard, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, Result, Ruma}; use hmac::{Hmac, Mac, NewMac}; -use ruma::api::client::r0::voip::get_turn_server_info; -use ruma::SecondsSinceUnixEpoch; +use ruma::{api::client::r0::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; use std::time::{Duration, SystemTime}; @@ -14,7 +13,7 @@ type HmacSha1 = Hmac; pub async fn turn_server_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let turn_secret = db.globals.turn_secret(); @@ -46,6 +45,5 @@ pub async fn turn_server_route( password, uris: db.globals.turn_uris().to_vec(), ttl: Duration::from_secs(db.globals.turn_ttl()), - } - .into()) + }) } diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index d615713..2cf9d5e 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,6 +1,10 @@ use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree}; use crate::{utils, Result}; -use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock}; +use std::{ + future::Future, + pin::Pin, + sync::{Arc, RwLock}, +}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, diff --git a/src/database/globals.rs b/src/database/globals.rs index decd84c..1014511 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,4 +1,4 @@ -use crate::{database::Config, server_server::FedDest, utils, ConduitResult, Error, Result}; +use crate::{database::Config, server_server::FedDest, utils, Error, Result}; use ruma::{ api::{ client::r0::sync::sync_events, @@ -27,8 +27,8 @@ type WellKnownMap = HashMap, (FedDest, String)>; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type SyncHandle = ( - Option, // since - Receiver>>, // rx + Option, // since + Receiver>>, // rx ); pub struct Globals { diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index b0c8d6d..b2244b5 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,6 +1,7 @@ -use std::collections::BTreeMap; -use std::sync::Arc; -use std::sync::RwLock; +use std::{ + collections::BTreeMap, + sync::{Arc, RwLock}, +}; use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ diff --git a/src/lib.rs b/src/lib.rs index 135ab85..c35a129 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,4 +22,4 @@ pub use config::Config; pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; -pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; +pub use ruma_wrapper::{Ruma, RumaResponse}; diff --git a/src/main.rs b/src/main.rs index 53b1825..40122cf 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,7 +11,6 @@ use std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, - handler::Handler, response::IntoResponse, routing::{get, on, MethodFilter}, Router, @@ -25,10 +24,7 @@ use http::{ Method, }; use opentelemetry::trace::{FutureExt, Tracer}; -use ruma::{ - api::{IncomingRequest, Metadata}, - Outgoing, -}; +use ruma::{api::IncomingRequest, Outgoing}; use tokio::{signal, sync::RwLock}; use tower::ServiceBuilder; use tower_http::{ @@ -353,25 +349,15 @@ impl RouterExt for Router { H: RumaHandler, T: 'static, { - let meta = H::METADATA; - let method_filter = match meta.method { - Method::DELETE => MethodFilter::DELETE, - Method::GET => MethodFilter::GET, - Method::HEAD => MethodFilter::HEAD, - Method::OPTIONS => MethodFilter::OPTIONS, - Method::PATCH => MethodFilter::PATCH, - Method::POST => MethodFilter::POST, - Method::PUT => MethodFilter::PUT, - Method::TRACE => MethodFilter::TRACE, - m => panic!("Unsupported HTTP method: {:?}", m), - }; - - self.route(meta.path, on(method_filter, handler)) + handler.add_to_router(self) } } -pub trait RumaHandler: Handler { - const METADATA: Metadata; +pub trait RumaHandler { + // Can't transform to a handler without boxing or relying on the nightly-only + // impl-trait-in-traits feature. Moving a small amount of extra logic into the trait + // allows bypassing both. + fn add_to_router(self, router: Router) -> Router; } macro_rules! impl_ruma_handler { @@ -380,17 +366,22 @@ macro_rules! impl_ruma_handler { #[allow(non_snake_case)] impl RumaHandler<($($ty,)* Ruma,)> for F where - Req: Outgoing, + Req: Outgoing + 'static, Req::Incoming: IncomingRequest + Send, F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, - Fut: Future::OutgoingResponse>, - E, - >> + Send, + Fut: Future::OutgoingResponse, E>> + + Send, E: IntoResponse, - $( $ty: FromRequest + Send, )* + $( $ty: FromRequest + Send + 'static, )* { - const METADATA: Metadata = Req::Incoming::METADATA; + fn add_to_router(self, router: Router) -> Router { + let meta = Req::Incoming::METADATA; + let method_filter = method_to_filter(meta.method); + + router.route(meta.path, on(method_filter, |$( $ty: $ty, )* req| async move { + self($($ty,)* req).await.map(RumaResponse) + })) + } } }; } @@ -404,3 +395,18 @@ impl_ruma_handler!(T1, T2, T3, T4, T5); impl_ruma_handler!(T1, T2, T3, T4, T5, T6); impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); + +fn method_to_filter(method: Method) -> MethodFilter { + let method_filter = match method { + Method::DELETE => MethodFilter::DELETE, + Method::GET => MethodFilter::GET, + Method::HEAD => MethodFilter::HEAD, + Method::OPTIONS => MethodFilter::OPTIONS, + Method::PATCH => MethodFilter::PATCH, + Method::POST => MethodFilter::POST, + Method::PUT => MethodFilter::PUT, + Method::TRACE => MethodFilter::TRACE, + m => panic!("Unsupported HTTP method: {:?}", m), + }; + method_filter +} diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 12be79a..ee89cc2 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -29,9 +29,6 @@ impl Deref for Ruma { } } -/// This struct converts ruma structs to http responses. -pub type ConduitResult = Result, Error>; - #[derive(Clone)] pub struct RumaResponse(pub T); diff --git a/src/server_server.rs b/src/server_server.rs index fc3681b..e17449e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2,7 +2,7 @@ use crate::{ client_server::{self, claim_keys_helper, get_keys_helper}, database::{rooms::CompressedStateEvent, DatabaseGuard}, pdu::EventHash, - utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, + utils, Database, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -494,7 +494,7 @@ async fn request_well_known( pub async fn get_server_version_route( db: DatabaseGuard, _body: Ruma, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -504,8 +504,7 @@ pub async fn get_server_version_route( name: Some("Conduit".to_owned()), version: Some(env!("CARGO_PKG_VERSION").to_owned()), }), - } - .into()) + }) } /// # `GET /_matrix/key/v2/server` @@ -577,7 +576,7 @@ pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoRes pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -590,8 +589,7 @@ pub async fn get_public_rooms_filtered_route( &body.filter, &body.room_network, ) - .await? - .0; + .await?; Ok(get_public_rooms_filtered::v1::Response { chunk: response @@ -609,8 +607,7 @@ pub async fn get_public_rooms_filtered_route( prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()) + }) } /// # `GET /_matrix/federation/v1/publicRooms` @@ -620,7 +617,7 @@ pub async fn get_public_rooms_filtered_route( pub async fn get_public_rooms_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -633,8 +630,7 @@ pub async fn get_public_rooms_route( &IncomingFilter::default(), &IncomingRoomNetwork::Matrix, ) - .await? - .0; + .await?; Ok(get_public_rooms::v1::Response { chunk: response @@ -652,8 +648,7 @@ pub async fn get_public_rooms_route( prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()) + }) } /// # `PUT /_matrix/federation/v1/send/{txnId}` @@ -663,7 +658,7 @@ pub async fn get_public_rooms_route( pub async fn send_transaction_message_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -875,7 +870,7 @@ pub async fn send_transaction_message_route( db.flush()?; - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(send_transaction_message::v1::Response { pdus: resolved_map }) } /// An async function that can recursively call itself. @@ -2293,7 +2288,7 @@ fn get_auth_chain_inner( pub async fn get_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2327,8 +2322,7 @@ pub async fn get_event_route( origin: db.globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), pdu: PduEvent::convert_to_outgoing_federation_event(event), - } - .into()) + }) } /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` @@ -2338,7 +2332,7 @@ pub async fn get_event_route( pub async fn get_missing_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2400,7 +2394,7 @@ pub async fn get_missing_events_route( i += 1; } - Ok(get_missing_events::v1::Response { events }.into()) + Ok(get_missing_events::v1::Response { events }) } /// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` @@ -2412,7 +2406,7 @@ pub async fn get_missing_events_route( pub async fn get_event_authorization_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2451,8 +2445,7 @@ pub async fn get_event_authorization_route( .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), - } - .into()) + }) } /// # `GET /_matrix/federation/v1/state/{roomId}` @@ -2462,7 +2455,7 @@ pub async fn get_event_authorization_route( pub async fn get_room_state_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2512,8 +2505,7 @@ pub async fn get_room_state_route( .filter_map(|r| r.ok()) .collect(), pdus, - } - .into()) + }) } /// # `GET /_matrix/federation/v1/state_ids/{roomId}` @@ -2523,7 +2515,7 @@ pub async fn get_room_state_route( pub async fn get_room_state_ids_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2562,8 +2554,7 @@ pub async fn get_room_state_ids_route( Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), pdu_ids, - } - .into()) + }) } /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` @@ -2573,7 +2564,7 @@ pub async fn get_room_state_ids_route( pub async fn create_join_event_template_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2738,8 +2729,7 @@ pub async fn create_join_event_template_route( Ok(create_join_event_template::v1::Response { room_version: Some(room_version_id), event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), - } - .into()) + }) } async fn create_join_event( @@ -2855,7 +2845,7 @@ async fn create_join_event( pub async fn create_join_event_v1_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_servername = body .sender_servername .as_ref() @@ -2863,7 +2853,7 @@ pub async fn create_join_event_v1_route( let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; - Ok(create_join_event::v1::Response { room_state }.into()) + Ok(create_join_event::v1::Response { room_state }) } /// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` @@ -2873,7 +2863,7 @@ pub async fn create_join_event_v1_route( pub async fn create_join_event_v2_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_servername = body .sender_servername .as_ref() @@ -2881,7 +2871,7 @@ pub async fn create_join_event_v2_route( let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; - Ok(create_join_event::v2::Response { room_state }.into()) + Ok(create_join_event::v2::Response { room_state }) } /// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` @@ -2891,7 +2881,7 @@ pub async fn create_join_event_v2_route( pub async fn create_invite_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2992,8 +2982,7 @@ pub async fn create_invite_route( Ok(create_invite::v2::Response { event: PduEvent::convert_to_outgoing_federation_event(signed_event), - } - .into()) + }) } /// # `GET /_matrix/federation/v1/user/devices/{userId}` @@ -3003,7 +2992,7 @@ pub async fn create_invite_route( pub async fn get_devices_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3031,8 +3020,7 @@ pub async fn get_devices_route( }) }) .collect(), - } - .into()) + }) } /// # `GET /_matrix/federation/v1/query/directory` @@ -3042,7 +3030,7 @@ pub async fn get_devices_route( pub async fn get_room_information_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3058,8 +3046,7 @@ pub async fn get_room_information_route( Ok(get_room_information::v1::Response { room_id, servers: vec![db.globals.server_name().to_owned()], - } - .into()) + }) } /// # `GET /_matrix/federation/v1/query/profile` @@ -3069,7 +3056,7 @@ pub async fn get_room_information_route( pub async fn get_profile_information_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3097,8 +3084,7 @@ pub async fn get_profile_information_route( blurhash, displayname, avatar_url, - } - .into()) + }) } /// # `POST /_matrix/federation/v1/user/keys/query` @@ -3108,7 +3094,7 @@ pub async fn get_profile_information_route( pub async fn get_keys_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3127,8 +3113,7 @@ pub async fn get_keys_route( device_keys: result.device_keys, master_keys: result.master_keys, self_signing_keys: result.self_signing_keys, - } - .into()) + }) } /// # `POST /_matrix/federation/v1/user/keys/claim` @@ -3138,7 +3123,7 @@ pub async fn get_keys_route( pub async fn claim_keys_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3149,8 +3134,7 @@ pub async fn claim_keys_route( Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys, - } - .into()) + }) } #[tracing::instrument(skip(event, pub_key_map, db))] From c8951a1d9cc05a8c138be06f520a78b4cbb053c7 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 18:38:39 +0100 Subject: [PATCH 1012/1727] Use axum-server for direct TLS support --- Cargo.lock | 28 +++++++++++++++++++++++++++- Cargo.toml | 2 +- src/config.rs | 8 ++++++++ src/main.rs | 29 +++++++++++++++++++++-------- 4 files changed, 57 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f84c982..41105b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -58,6 +58,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "arc-swap" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" + [[package]] name = "arrayref" version = "0.3.6" @@ -162,6 +168,26 @@ dependencies = [ "mime", ] +[[package]] +name = "axum-server" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cfd9dbe28ebde5c0460067ea27c6f3b1d514b699c4e0a5aab0fb63e452a8a8" +dependencies = [ + "arc-swap", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "base64" version = "0.12.3" @@ -365,6 +391,7 @@ name = "conduit" version = "0.3.0" dependencies = [ "axum", + "axum-server", "base64 0.13.0", "bytes", "clap", @@ -375,7 +402,6 @@ dependencies = [ "heed", "hmac", "http", - "hyper", "image", "jsonwebtoken", "lru-cache", diff --git a/Cargo.toml b/Cargo.toml index 5fb75dc..6dedfa8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ edition = "2021" [dependencies] # Web framework axum = { version = "0.4.4", features = ["headers"], optional = true } -hyper = "0.14.16" +axum-server = { version = "0.3.3", features = ["tls-rustls"] } tower = { version = "0.4.11", features = ["util"] } tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } diff --git a/src/config.rs b/src/config.rs index 48ac981..155704b 100644 --- a/src/config.rs +++ b/src/config.rs @@ -17,6 +17,8 @@ pub struct Config { pub address: IpAddr, #[serde(default = "default_port")] pub port: u16, + pub tls: Option, + pub server_name: Box, #[serde(default = "default_database_backend")] pub database_backend: String, @@ -69,6 +71,12 @@ pub struct Config { pub catchall: BTreeMap, } +#[derive(Clone, Debug, Deserialize)] +pub struct TlsConfig { + pub certs: String, + pub key: String, +} + const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; impl Config { diff --git a/src/main.rs b/src/main.rs index 40122cf..22ddf3e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,7 +7,7 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}; +use std::{future::Future, io, net::SocketAddr, sync::Arc, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, @@ -15,6 +15,7 @@ use axum::{ routing::{get, on, MethodFilter}, Router, }; +use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle}; use figment::{ providers::{Env, Format, Toml}, Figment, @@ -117,8 +118,8 @@ async fn main() { } } -async fn run_server(config: &Config, db: Arc>) -> hyper::Result<()> { - let listen_addr = SocketAddr::from((config.address, config.port)); +async fn run_server(config: &Config, db: Arc>) -> io::Result<()> { + let addr = SocketAddr::from((config.address, config.port)); let x_requested_with = HeaderName::from_static("x-requested-with"); @@ -157,10 +158,20 @@ async fn run_server(config: &Config, db: Arc>) -> hyper::Result ) .add_extension(db.clone()); - axum::Server::bind(&listen_addr) - .serve(routes().layer(middlewares).into_make_service()) - .with_graceful_shutdown(shutdown_signal()) - .await?; + let app = routes().layer(middlewares).into_make_service(); + let handle = ServerHandle::new(); + + tokio::spawn(shutdown_signal(handle.clone())); + + match &config.tls { + Some(tls) => { + let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?; + bind_rustls(addr, conf).handle(handle).serve(app).await?; + } + None => { + bind(addr).handle(handle).serve(app).await?; + } + } // After serve exits and before exiting, shutdown the DB Database::on_shutdown(db).await; @@ -312,7 +323,7 @@ fn routes() -> Router { .ruma_route(server_server::claim_keys_route) } -async fn shutdown_signal() { +async fn shutdown_signal(handle: ServerHandle) { let ctrl_c = async { signal::ctrl_c() .await @@ -334,6 +345,8 @@ async fn shutdown_signal() { _ = ctrl_c => {}, _ = terminate => {}, } + + handle.graceful_shutdown(Some(Duration::from_secs(30))); } trait RouterExt { From 21ae63d46b744a73a3497dddde2e336993981b38 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 9 Feb 2022 12:32:18 +0100 Subject: [PATCH 1013/1727] Rewrite query parameter parsing --- src/ruma_wrapper/axum.rs | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index d2cf3f1..cec8212 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -18,7 +18,8 @@ use ruma::{ signatures::CanonicalJsonValue, DeviceId, Outgoing, ServerName, UserId, }; -use tracing::{debug, warn}; +use serde::Deserialize; +use tracing::{debug, error, warn}; use super::{Ruma, RumaResponse}; use crate::{database::DatabaseGuard, server_server, Error, Result}; @@ -35,18 +36,31 @@ where type Rejection = Error; async fn from_request(req: &mut RequestParts) -> Result { + #[derive(Deserialize)] + struct QueryParams { + access_token: Option, + user_id: Option, + } + let metadata = T::Incoming::METADATA; let db = DatabaseGuard::from_request(req).await?; let auth_header = Option::>>::from_request(req).await?; - // FIXME: Do this more efficiently - let query: BTreeMap = - ruma::serde::urlencoded::from_str(req.uri().query().unwrap_or_default()) - .expect("Query to string map deserialization should be fine"); + let query = req.uri().query().unwrap_or_default(); + let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) { + Ok(params) => params, + Err(e) => { + error!(%query, "Failed to deserialize query parameters: {}", e); + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Failed to read query parameters", + )); + } + }; let token = match &auth_header { Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), - None => query.get("access_token").map(|tok| tok.as_str()), + None => query_params.access_token.as_deref(), }; let mut body = Bytes::from_request(req) @@ -67,7 +81,7 @@ where if let Some((_id, registration)) = appservice_registration { match metadata.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - let user_id = query.get("user_id").map_or_else( + let user_id = query_params.user_id.map_or_else( || { UserId::parse_with_server_name( registration @@ -79,7 +93,7 @@ where ) .unwrap() }, - |s| UserId::parse(s.as_str()).unwrap(), + |s| UserId::parse(s).unwrap(), ); if !db.users.exists(&user_id).unwrap() { From 5d8c80b170292bf444c018fd5a73eaade87b171d Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 9 Feb 2022 14:01:44 +0100 Subject: [PATCH 1014/1727] Strip quotes from X-Matrix fields --- src/ruma_wrapper/axum.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index cec8212..c4e1d29 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -315,6 +315,13 @@ impl Credentials for XMatrix { for entry in parameters.split_terminator(',') { let (name, value) = entry.split_once('=')?; + // It's not at all clear why some fields are quoted and others not in the spec, + // let's simply accept either form for every field. + let value = value + .strip_prefix('"') + .and_then(|rest| rest.strip_suffix('"')) + .unwrap_or(value); + // FIXME: Catch multiple fields of the same name match name { "origin" => origin = Some(value.try_into().ok()?), From 9db0473ed5926ee962652be0643794241773df8e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 9 Feb 2022 14:03:38 +0100 Subject: [PATCH 1015/1727] Improve error messages in Ruma wrapper FromRequest impl --- src/ruma_wrapper/axum.rs | 64 +++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index c4e1d29..7178619 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -3,7 +3,7 @@ use std::{collections::BTreeMap, iter::FromIterator, str}; use axum::{ async_trait, body::{Full, HttpBody}, - extract::{FromRequest, RequestParts, TypedHeader}, + extract::{rejection::TypedHeaderRejectionReason, FromRequest, RequestParts, TypedHeader}, headers::{ authorization::{Bearer, Credentials}, Authorization, @@ -97,7 +97,10 @@ where ); if !db.users.exists(&user_id).unwrap() { - return Err(forbidden()); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "User does not exist.", + )); } // TODO: Check if appservice is allowed to be that user @@ -111,11 +114,21 @@ where AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { let token = match token { Some(token) => token, - _ => return Err(missing_token()), + _ => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing access token.", + )) + } }; match db.users.find_from_token(token).unwrap() { - None => return Err(unknown_token()), + None => { + return Err(Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, + "Unknown access token.", + )) + } Some((user_id, device_id)) => ( Some(user_id), Some(Box::::from(device_id)), @@ -130,7 +143,17 @@ where .await .map_err(|e| { warn!("Missing or invalid Authorization header: {}", e); - forbidden() + + let msg = match e.reason() { + TypedHeaderRejectionReason::Missing => { + "Missing Authorization header." + } + TypedHeaderRejectionReason::Error(_) => { + "Invalid X-Matrix signatures." + } + }; + + Error::BadRequest(ErrorKind::Forbidden, msg) })?; let origin_signatures = BTreeMap::from_iter([( @@ -183,7 +206,10 @@ where Ok(b) => b, Err(e) => { warn!("Failed to fetch signing keys: {}", e); - return Err(forbidden()); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Failed to fetch signing keys.", + )); } }; @@ -206,7 +232,10 @@ where ); } - return Err(forbidden()); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Failed to verify X-Matrix signatures.", + )); } } } @@ -255,7 +284,7 @@ where let body = ::try_from_http_request(http_request).map_err(|e| { warn!("{:?}", e); - bad_json() + Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") })?; Ok(Ruma { @@ -269,25 +298,6 @@ where } } -fn forbidden() -> Error { - Error::BadRequest(ErrorKind::Forbidden, "Forbidden.") -} - -fn unknown_token() -> Error { - Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown token.", - ) -} - -fn missing_token() -> Error { - Error::BadRequest(ErrorKind::MissingToken, "Missing token.") -} - -fn bad_json() -> Error { - Error::BadRequest(ErrorKind::BadJson, "Bad json.") -} - struct XMatrix { origin: Box, key: String, // KeyName? From 50b24b37c264cad5c0281ebb05eeddf6095cbdf8 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 02:06:30 +0100 Subject: [PATCH 1016/1727] Upgrade Ruma --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 2 +- src/client_server/context.rs | 9 +++------ src/client_server/search.rs | 2 +- src/error.rs | 3 +++ src/ruma_wrapper/axum.rs | 14 ++++++++------ 6 files changed, 34 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 41105b3..042f6f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "assign", "js_int", @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "bytes", "http", @@ -2118,7 +2118,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2129,7 +2129,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "ruma-api", "ruma-common", @@ -2143,7 +2143,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "assign", "bytes", @@ -2163,7 +2163,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "indexmap", "js_int", @@ -2178,7 +2178,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "indoc", "js_int", @@ -2195,7 +2195,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2206,7 +2206,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "js_int", "ruma-api", @@ -2221,7 +2221,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2236,7 +2236,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2246,7 +2246,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "thiserror", ] @@ -2254,7 +2254,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "js_int", "ruma-api", @@ -2267,7 +2267,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "js_int", "ruma-api", @@ -2282,7 +2282,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "base64 0.13.0", "bytes", @@ -2297,7 +2297,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2308,7 +2308,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2325,7 +2325,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 6dedfa8..8ce097d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "f7a10a7e471b59d3096be2695c2a05d407d80df1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f130d09daabf021ad30750eed89483a0f45f820a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 3d884e0..6f3e777 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -23,13 +23,10 @@ pub async fn get_context_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - // Load filter - let filter = body.filter.clone().unwrap_or_default(); - - let (lazy_load_enabled, lazy_load_send_redundant) = match filter.lazy_load_options { + let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options { LazyLoadOptions::Enabled { - include_redundant_members: redundant, - } => (true, redundant), + include_redundant_members, + } => (true, *include_redundant_members), _ => (false, false), }; diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 78ac51a..067eddc 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -17,7 +17,7 @@ pub async fn search_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); - let filter = search_criteria.filter.clone().unwrap_or_default(); + let filter = &search_criteria.filter; let room_ids = filter.rooms.clone().unwrap_or_else(|| { db.rooms diff --git a/src/error.rs b/src/error.rs index 817ef50..a16a3ab 100644 --- a/src/error.rs +++ b/src/error.rs @@ -77,6 +77,9 @@ pub enum Error { #[cfg(feature = "conduit_bin")] #[error("{0}")] ExtensionError(#[from] axum::extract::rejection::ExtensionRejection), + #[cfg(feature = "conduit_bin")] + #[error("{0}")] + PathError(#[from] axum::extract::rejection::PathRejection), } impl Error { diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index 7178619..d8e7f51 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -3,7 +3,9 @@ use std::{collections::BTreeMap, iter::FromIterator, str}; use axum::{ async_trait, body::{Full, HttpBody}, - extract::{rejection::TypedHeaderRejectionReason, FromRequest, RequestParts, TypedHeader}, + extract::{ + rejection::TypedHeaderRejectionReason, FromRequest, Path, RequestParts, TypedHeader, + }, headers::{ authorization::{Bearer, Credentials}, Authorization, @@ -45,6 +47,7 @@ where let metadata = T::Incoming::METADATA; let db = DatabaseGuard::from_request(req).await?; let auth_header = Option::>>::from_request(req).await?; + let path_params = Path::>::from_request(req).await?; let query = req.uri().query().unwrap_or_default(); let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) { @@ -281,11 +284,10 @@ where debug!("{:?}", http_request); - let body = - ::try_from_http_request(http_request).map_err(|e| { - warn!("{:?}", e); - Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") - })?; + let body = T::Incoming::try_from_http_request(http_request, &path_params).map_err(|e| { + warn!("{:?}", e); + Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") + })?; Ok(Ruma { body, From ce714cfd07c95843d18a90f5596bc4597d4b5577 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 13:20:55 +0100 Subject: [PATCH 1017/1727] Bump version --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 042f6f3..a56103e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -388,7 +388,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.3.0" +version = "0.3.0-next" dependencies = [ "axum", "axum-server", diff --git a/Cargo.toml b/Cargo.toml index 8ce097d..aa6bdbb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.3.0" +version = "0.3.0-next" rust-version = "1.56" edition = "2021" From d74074ad53e7a02fde5c78600ea47e96bf061826 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 15:01:28 +0100 Subject: [PATCH 1018/1727] Remove tracing::instrument attribute from util functions They don't ever log anything, so the extra context is never used. --- src/utils.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 7142b3f..1ad0aa3 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -8,7 +8,6 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; -#[tracing::instrument] pub fn millis_since_unix_epoch() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) @@ -39,19 +38,16 @@ pub fn generate_keypair() -> Vec { } /// Parses the bytes into an u64. -#[tracing::instrument(skip(bytes))] pub fn u64_from_bytes(bytes: &[u8]) -> Result { let array: [u8; 8] = bytes.try_into()?; Ok(u64::from_be_bytes(array)) } /// Parses the bytes into a string. -#[tracing::instrument(skip(bytes))] pub fn string_from_bytes(bytes: &[u8]) -> Result { String::from_utf8(bytes.to_vec()) } -#[tracing::instrument(skip(length))] pub fn random_string(length: usize) -> String { thread_rng() .sample_iter(&rand::distributions::Alphanumeric) @@ -61,7 +57,6 @@ pub fn random_string(length: usize) -> String { } /// Calculate a new hash for the given password -#[tracing::instrument(skip(password))] pub fn calculate_hash(password: &str) -> Result { let hashing_config = Config { variant: Variant::Argon2id, @@ -72,7 +67,6 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } -#[tracing::instrument(skip(iterators, check_order))] pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, @@ -100,7 +94,6 @@ pub fn common_elements( /// Fallible conversion from any value that implements `Serialize` to a `CanonicalJsonObject`. /// /// `value` must serialize to an `serde_json::Value::Object`. -#[tracing::instrument(skip(value))] pub fn to_canonical_object( value: T, ) -> Result { @@ -114,7 +107,6 @@ pub fn to_canonical_object( } } -#[tracing::instrument(skip(deserializer))] pub fn deserialize_from_str< 'de, D: serde::de::Deserializer<'de>, From adeb8ee425176643e49d1817b95d9d8cdee325e8 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 15:03:07 +0100 Subject: [PATCH 1019/1727] Remove no-op conversions --- src/server_server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index e17449e..42e44c6 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -814,7 +814,7 @@ pub async fn send_transaction_message_route( // Check if this is a new transaction id if db .transaction_ids - .existing_txnid(&sender, None, (&*message_id).into())? + .existing_txnid(&sender, None, &message_id)? .is_some() { continue; @@ -862,7 +862,7 @@ pub async fn send_transaction_message_route( // Save transaction id with empty data db.transaction_ids - .add_txnid(&sender, None, (&*message_id).into(), &[])?; + .add_txnid(&sender, None, &message_id, &[])?; } Edu::_Custom(_) => {} } From 2a00c547a1baca5e2ca57966ef5ce5c7f063f367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 8 Feb 2022 09:25:44 +0100 Subject: [PATCH 1020/1727] improvement: faster /syncs --- src/client_server/sync.rs | 61 ++++++++++++++++++++++----------------- src/database.rs | 1 + src/database/rooms.rs | 37 +++++++++++++++++++++++- 3 files changed, 71 insertions(+), 28 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 7cfea5a..1ccf798 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -245,30 +245,41 @@ async fn sync_helper( let insert_lock = mutex_insert.lock().unwrap(); drop(insert_lock); - let mut non_timeline_pdus = db - .rooms - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .take_while(|(pduid, _)| { - db.rooms - .pdu_count(pduid) - .map_or(false, |count| count > since) - }); + let timeline_pdus; + let limited; + if db.rooms.last_timeline_count(&sender_user, &room_id)? > since { + let mut non_timeline_pdus = db + .rooms + .pdus_until(&sender_user, &room_id, u64::MAX)? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .take_while(|(pduid, _)| { + db.rooms + .pdu_count(pduid) + .map_or(false, |count| count > since) + }); - // Take the last 10 events for the timeline - let timeline_pdus: Vec<_> = non_timeline_pdus - .by_ref() - .take(10) - .collect::>() - .into_iter() - .rev() - .collect(); + // Take the last 10 events for the timeline + timeline_pdus = non_timeline_pdus + .by_ref() + .take(10) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // They /sync response doesn't always return all messages, so we say the output is + // limited unless there are events in non_timeline_pdus + limited = non_timeline_pdus.next().is_some(); + } else { + timeline_pdus = Vec::new(); + limited = false; + } let send_notification_counts = !timeline_pdus.is_empty() || db @@ -277,10 +288,6 @@ async fn sync_helper( .last_privateread_update(&sender_user, &room_id)? > since; - // They /sync response doesn't always return all messages, so we say the output is - // limited unless there are events in non_timeline_pdus - let limited = non_timeline_pdus.next().is_some(); - let mut timeline_users = HashSet::new(); for (_, event) in &timeline_pdus { timeline_users.insert(event.sender.as_str().to_owned()); diff --git a/src/database.rs b/src/database.rs index 2b1671c..8e95b1e 100644 --- a/src/database.rs +++ b/src/database.rs @@ -263,6 +263,7 @@ impl Database { stateinfo_cache: Mutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), + lasttimelinecount_cache: Mutex::new(HashMap::new()), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0abd2e7..17c9b74 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -32,7 +32,7 @@ use serde::Deserialize; use serde_json::value::to_raw_value; use std::{ borrow::Cow, - collections::{BTreeMap, HashMap, HashSet}, + collections::{hash_map, BTreeMap, HashMap, HashSet}, fmt::Debug, iter, mem::size_of, @@ -128,6 +128,7 @@ pub struct Rooms { )>, >, >, + pub(super) lasttimelinecount_cache: Mutex, u64>>, } impl Rooms { @@ -1331,6 +1332,10 @@ impl Rooms { &pdu_id, &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), )?; + self.lasttimelinecount_cache + .lock() + .unwrap() + .insert(pdu.room_id.clone(), count2); self.eventid_pduid .insert(pdu.event_id.as_bytes(), &pdu_id)?; @@ -1498,6 +1503,36 @@ impl Rooms { Ok(pdu_id) } + #[tracing::instrument(skip(self))] + pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + match self + .lasttimelinecount_cache + .lock() + .unwrap() + .entry(room_id.to_owned()) + { + hash_map::Entry::Vacant(v) => { + if let Some(last_count) = self + .pdus_until(&sender_user, &room_id, u64::MAX)? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .map(|(pduid, _)| self.pdu_count(&pduid)) + .next() + { + Ok(*v.insert(last_count?)) + } else { + Ok(0) + } + } + hash_map::Entry::Occupied(o) => Ok(*o.get()), + } + } + #[tracing::instrument(skip(self))] pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); From accdb773158e78801548b696121b1f60e5bf264f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 15:03:30 +0100 Subject: [PATCH 1021/1727] Clean up tracing::instrument attributes Remove it from request handler since there's already the context of the request path, added through TraceLayer. --- src/appservice_server.rs | 1 + src/client_server/account.rs | 5 ----- src/client_server/alias.rs | 3 --- src/client_server/backup.rs | 14 ------------ src/client_server/capabilities.rs | 1 - src/client_server/config.rs | 4 ---- src/client_server/context.rs | 1 - src/client_server/device.rs | 5 ----- src/client_server/directory.rs | 4 ---- src/client_server/filter.rs | 2 -- src/client_server/keys.rs | 6 ----- src/client_server/media.rs | 5 ----- src/client_server/membership.rs | 11 ---------- src/client_server/message.rs | 2 -- src/client_server/presence.rs | 2 -- src/client_server/profile.rs | 5 ----- src/client_server/push.rs | 10 --------- src/client_server/read_marker.rs | 2 -- src/client_server/redact.rs | 1 - src/client_server/report.rs | 1 - src/client_server/room.rs | 4 ---- src/client_server/search.rs | 1 - src/client_server/session.rs | 4 ---- src/client_server/state.rs | 5 ----- src/client_server/sync.rs | 1 - src/client_server/tag.rs | 3 --- src/client_server/thirdparty.rs | 1 - src/client_server/to_device.rs | 1 - src/client_server/typing.rs | 1 - src/client_server/unversioned.rs | 1 - src/client_server/user_directory.rs | 1 - src/client_server/voip.rs | 1 - src/server_server.rs | 34 +++++------------------------ 33 files changed, 7 insertions(+), 136 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index b2154b8..8d6d052 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -4,6 +4,7 @@ use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; +#[tracing::instrument(skip(globals, request))] pub(crate) async fn send_request( globals: &crate::database::globals::Globals, registration: serde_yaml::Value, diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 2b2e6e6..c15d820 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -40,7 +40,6 @@ const GUEST_NAME_LENGTH: usize = 10; /// - No user or appservice on this server already claimed this username /// /// Note: This will not reserve the username, so the username might become invalid when trying to register -#[tracing::instrument(skip(db, body))] pub async fn get_register_available_route( db: DatabaseGuard, body: Ruma>, @@ -84,7 +83,6 @@ pub async fn get_register_available_route( /// - If type is not guest and no username is given: Always fails after UIAA check /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token -#[tracing::instrument(skip(db, body))] pub async fn register_route( db: DatabaseGuard, body: Ruma>, @@ -267,7 +265,6 @@ pub async fn register_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[tracing::instrument(skip(db, body))] pub async fn change_password_route( db: DatabaseGuard, body: Ruma>, @@ -332,7 +329,6 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -#[tracing::instrument(skip(body))] pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(whoami::Response { @@ -350,7 +346,6 @@ pub async fn whoami_route(body: Ruma) -> Result>, diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index eecd72a..509372c 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -15,7 +15,6 @@ use ruma::{ /// # `PUT /_matrix/client/r0/directory/room/{roomAlias}` /// /// Creates a new room alias on this server. -#[tracing::instrument(skip(db, body))] pub async fn create_alias_route( db: DatabaseGuard, body: Ruma>, @@ -45,7 +44,6 @@ pub async fn create_alias_route( /// /// - TODO: additional access control checks /// - TODO: Update canonical alias event -#[tracing::instrument(skip(db, body))] pub async fn delete_alias_route( db: DatabaseGuard, body: Ruma>, @@ -71,7 +69,6 @@ pub async fn delete_alias_route( /// Resolve an alias locally or over federation. /// /// - TODO: Suggest more servers to join via -#[tracing::instrument(skip(db, body))] pub async fn get_alias_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index acff437..14c239b 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -12,7 +12,6 @@ use ruma::api::client::{ /// # `POST /_matrix/client/r0/room_keys/version` /// /// Creates a new backup. -#[tracing::instrument(skip(db, body))] pub async fn create_backup_route( db: DatabaseGuard, body: Ruma, @@ -30,7 +29,6 @@ pub async fn create_backup_route( /// # `PUT /_matrix/client/r0/room_keys/version/{version}` /// /// Update information about an existing backup. Only `auth_data` can be modified. -#[tracing::instrument(skip(db, body))] pub async fn update_backup_route( db: DatabaseGuard, body: Ruma>, @@ -47,7 +45,6 @@ pub async fn update_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about the latest backup version. -#[tracing::instrument(skip(db, body))] pub async fn get_latest_backup_route( db: DatabaseGuard, body: Ruma, @@ -73,7 +70,6 @@ pub async fn get_latest_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about an existing backup. -#[tracing::instrument(skip(db, body))] pub async fn get_backup_route( db: DatabaseGuard, body: Ruma>, @@ -100,7 +96,6 @@ pub async fn get_backup_route( /// Delete an existing key backup. /// /// - Deletes both information about the backup, as well as all key data related to the backup -#[tracing::instrument(skip(db, body))] pub async fn delete_backup_route( db: DatabaseGuard, body: Ruma>, @@ -121,7 +116,6 @@ pub async fn delete_backup_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[tracing::instrument(skip(db, body))] pub async fn add_backup_keys_route( db: DatabaseGuard, body: Ruma>, @@ -168,7 +162,6 @@ pub async fn add_backup_keys_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[tracing::instrument(skip(db, body))] pub async fn add_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, @@ -213,7 +206,6 @@ pub async fn add_backup_key_sessions_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[tracing::instrument(skip(db, body))] pub async fn add_backup_key_session_route( db: DatabaseGuard, body: Ruma>, @@ -252,7 +244,6 @@ pub async fn add_backup_key_session_route( /// # `GET /_matrix/client/r0/room_keys/keys` /// /// Retrieves all keys from the backup. -#[tracing::instrument(skip(db, body))] pub async fn get_backup_keys_route( db: DatabaseGuard, body: Ruma>, @@ -267,7 +258,6 @@ pub async fn get_backup_keys_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Retrieves all keys from the backup for a given room. -#[tracing::instrument(skip(db, body))] pub async fn get_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, @@ -284,7 +274,6 @@ pub async fn get_backup_key_sessions_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Retrieves a key from the backup. -#[tracing::instrument(skip(db, body))] pub async fn get_backup_key_session_route( db: DatabaseGuard, body: Ruma>, @@ -305,7 +294,6 @@ pub async fn get_backup_key_session_route( /// # `DELETE /_matrix/client/r0/room_keys/keys` /// /// Delete the keys from the backup. -#[tracing::instrument(skip(db, body))] pub async fn delete_backup_keys_route( db: DatabaseGuard, body: Ruma>, @@ -325,7 +313,6 @@ pub async fn delete_backup_keys_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Delete the keys from the backup for a given room. -#[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, @@ -346,7 +333,6 @@ pub async fn delete_backup_key_sessions_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Delete a key from the backup. -#[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_session_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 3f779dc..b1e072e 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -10,7 +10,6 @@ use std::collections::BTreeMap; /// # `GET /_matrix/client/r0/capabilities` /// /// Get information on the supported feature set and other relevent capabilities of this server. -#[tracing::instrument(skip(_body))] pub async fn get_capabilities_route( _body: Ruma, ) -> Result { diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 14a665e..83bb7a5 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -16,7 +16,6 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Sets some account data for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn set_global_account_data_route( db: DatabaseGuard, body: Ruma>, @@ -47,7 +46,6 @@ pub async fn set_global_account_data_route( /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Sets some room account data for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn set_room_account_data_route( db: DatabaseGuard, body: Ruma>, @@ -78,7 +76,6 @@ pub async fn set_room_account_data_route( /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Gets some account data for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_global_account_data_route( db: DatabaseGuard, body: Ruma>, @@ -100,7 +97,6 @@ pub async fn get_global_account_data_route( /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Gets some room account data for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_room_account_data_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 6f3e777..167d0cc 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -15,7 +15,6 @@ use tracing::error; /// /// - Only works if the user is joined (TODO: always allow, but only show events if the user was /// joined, depending on history_visibility) -#[tracing::instrument(skip(db, body))] pub async fn get_context_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/device.rs b/src/client_server/device.rs index e35da97..76172d2 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -12,7 +12,6 @@ use super::SESSION_ID_LENGTH; /// # `GET /_matrix/client/r0/devices` /// /// Get metadata on all devices of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_devices_route( db: DatabaseGuard, body: Ruma, @@ -31,7 +30,6 @@ pub async fn get_devices_route( /// # `GET /_matrix/client/r0/devices/{deviceId}` /// /// Get metadata on a single device of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_device_route( db: DatabaseGuard, body: Ruma>, @@ -49,7 +47,6 @@ pub async fn get_device_route( /// # `PUT /_matrix/client/r0/devices/{deviceId}` /// /// Updates the metadata on a given device of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn update_device_route( db: DatabaseGuard, body: Ruma>, @@ -80,7 +77,6 @@ pub async fn update_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[tracing::instrument(skip(db, body))] pub async fn delete_device_route( db: DatabaseGuard, body: Ruma>, @@ -139,7 +135,6 @@ pub async fn delete_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[tracing::instrument(skip(db, body))] pub async fn delete_devices_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 0f3ae30..75601fe 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -34,7 +34,6 @@ use tracing::{info, warn}; /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, body: Ruma>, @@ -55,7 +54,6 @@ pub async fn get_public_rooms_filtered_route( /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: DatabaseGuard, body: Ruma>, @@ -83,7 +81,6 @@ pub async fn get_public_rooms_route( /// Sets the visibility of a given room in the room directory. /// /// - TODO: Access control checks -#[tracing::instrument(skip(db, body))] pub async fn set_room_visibility_route( db: DatabaseGuard, body: Ruma>, @@ -112,7 +109,6 @@ pub async fn set_room_visibility_route( /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` /// /// Gets the visibility of a given room in the room directory. -#[tracing::instrument(skip(db, body))] pub async fn get_room_visibility_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 28610ec..a606aeb 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -9,7 +9,6 @@ use ruma::api::client::{ /// Loads a filter that was previously created. /// /// - A user can only access their own filters -#[tracing::instrument(skip(db, body))] pub async fn get_filter_route( db: DatabaseGuard, body: Ruma>, @@ -26,7 +25,6 @@ pub async fn get_filter_route( /// # `PUT /_matrix/client/r0/user/{userId}/filter` /// /// Creates a new filter to be used by other endpoints. -#[tracing::instrument(skip(db, body))] pub async fn create_filter_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index d272ff4..2ea62a8 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -27,7 +27,6 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// /// - Adds one time keys /// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) -#[tracing::instrument(skip(db, body))] pub async fn upload_keys_route( db: DatabaseGuard, body: Ruma, @@ -72,7 +71,6 @@ pub async fn upload_keys_route( /// - Always fetches users from other servers over federation /// - Gets master keys, self-signing keys, user signing keys and device keys. /// - The master and self-signing keys contain signatures that the user is allowed to see -#[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: DatabaseGuard, body: Ruma>, @@ -93,7 +91,6 @@ pub async fn get_keys_route( /// # `POST /_matrix/client/r0/keys/claim` /// /// Claims one-time keys -#[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: DatabaseGuard, body: Ruma, @@ -110,7 +107,6 @@ pub async fn claim_keys_route( /// Uploads end-to-end key information for the sender user. /// /// - Requires UIAA to verify password -#[tracing::instrument(skip(db, body))] pub async fn upload_signing_keys_route( db: DatabaseGuard, body: Ruma>, @@ -170,7 +166,6 @@ pub async fn upload_signing_keys_route( /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. -#[tracing::instrument(skip(db, body))] pub async fn upload_signatures_route( db: DatabaseGuard, body: Ruma, @@ -232,7 +227,6 @@ pub async fn upload_signatures_route( /// Gets a list of users who have updated their device identity keys since the previous sync token. /// /// - TODO: left users -#[tracing::instrument(skip(db, body))] pub async fn get_key_changes_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 615f760..dcdea05 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -15,7 +15,6 @@ const MXC_LENGTH: usize = 32; /// # `GET /_matrix/media/r0/config` /// /// Returns max upload size. -#[tracing::instrument(skip(db, _body))] pub async fn get_media_config_route( db: DatabaseGuard, _body: Ruma, @@ -31,7 +30,6 @@ pub async fn get_media_config_route( /// /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory -#[tracing::instrument(skip(db, body))] pub async fn create_content_route( db: DatabaseGuard, body: Ruma>, @@ -101,7 +99,6 @@ pub async fn get_remote_content( /// Load media from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -#[tracing::instrument(skip(db, body))] pub async fn get_content_route( db: DatabaseGuard, body: Ruma>, @@ -133,7 +130,6 @@ pub async fn get_content_route( /// Load media from our server or over federation, permitting desired filename. /// /// - Only allows federation if `allow_remote` is true -#[tracing::instrument(skip(db, body))] pub async fn get_content_as_filename_route( db: DatabaseGuard, body: Ruma>, @@ -170,7 +166,6 @@ pub async fn get_content_as_filename_route( /// Load media thumbnail from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -#[tracing::instrument(skip(db, body))] pub async fn get_content_thumbnail_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index efdf774..447f829 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -42,7 +42,6 @@ use tracing::{debug, error, warn}; /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_route( db: DatabaseGuard, body: Ruma>, @@ -83,7 +82,6 @@ pub async fn join_room_by_id_route( /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, body: Ruma>, @@ -136,7 +134,6 @@ pub async fn join_room_by_id_or_alias_route( /// Tries to leave the sender user from a room. /// /// - This should always work if the user is currently joined. -#[tracing::instrument(skip(db, body))] pub async fn leave_room_route( db: DatabaseGuard, body: Ruma>, @@ -153,7 +150,6 @@ pub async fn leave_room_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` /// /// Tries to send an invite event into the room. -#[tracing::instrument(skip(db, body))] pub async fn invite_user_route( db: DatabaseGuard, body: Ruma>, @@ -172,7 +168,6 @@ pub async fn invite_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/kick` /// /// Tries to send a kick event into the room. -#[tracing::instrument(skip(db, body))] pub async fn kick_user_route( db: DatabaseGuard, body: Ruma>, @@ -232,7 +227,6 @@ pub async fn kick_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` /// /// Tries to send a ban event into the room. -#[tracing::instrument(skip(db, body))] pub async fn ban_user_route( db: DatabaseGuard, body: Ruma>, @@ -303,7 +297,6 @@ pub async fn ban_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/unban` /// /// Tries to send an unban event into the room. -#[tracing::instrument(skip(db, body))] pub async fn unban_user_route( db: DatabaseGuard, body: Ruma>, @@ -367,7 +360,6 @@ pub async fn unban_user_route( /// /// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to /// be called from every device -#[tracing::instrument(skip(db, body))] pub async fn forget_room_route( db: DatabaseGuard, body: Ruma>, @@ -384,7 +376,6 @@ pub async fn forget_room_route( /// # `POST /_matrix/client/r0/joined_rooms` /// /// Lists all rooms the user has joined. -#[tracing::instrument(skip(db, body))] pub async fn joined_rooms_route( db: DatabaseGuard, body: Ruma, @@ -405,7 +396,6 @@ pub async fn joined_rooms_route( /// Lists all joined users in a room (TODO: at a specific point in time, with a specific membership). /// /// - Only works if the user is currently joined -#[tracing::instrument(skip(db, body))] pub async fn get_member_events_route( db: DatabaseGuard, body: Ruma>, @@ -437,7 +427,6 @@ pub async fn get_member_events_route( /// /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined -#[tracing::instrument(skip(db, body))] pub async fn joined_members_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index c5982de..93d5b3b 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -18,7 +18,6 @@ use std::{ /// - Is a NOOP if the txn id was already used before and returns the same event id again /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed -#[tracing::instrument(skip(db, body))] pub async fn send_message_event_route( db: DatabaseGuard, body: Ruma>, @@ -103,7 +102,6 @@ pub async fn send_message_event_route( /// /// - Only works if the user is joined (TODO: always allow, but only show events where the user was /// joined, depending on history_visibility) -#[tracing::instrument(skip(db, body))] pub async fn get_message_events_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index aedff55..7549b1a 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -5,7 +5,6 @@ use std::time::Duration; /// # `PUT /_matrix/client/r0/presence/{userId}/status` /// /// Sets the presence state of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn set_presence_route( db: DatabaseGuard, body: Ruma>, @@ -47,7 +46,6 @@ pub async fn set_presence_route( /// Gets the presence state of the given user. /// /// - Only works if you share a room with the user -#[tracing::instrument(skip(db, body))] pub async fn get_presence_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index f520d2c..33bfbb5 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -19,7 +19,6 @@ use std::sync::Arc; /// Updates the displayname. /// /// - Also makes sure other users receive the update using presence EDUs -#[tracing::instrument(skip(db, body))] pub async fn set_displayname_route( db: DatabaseGuard, body: Ruma>, @@ -117,7 +116,6 @@ pub async fn set_displayname_route( /// Returns the displayname of the user. /// /// - If user is on another server: Fetches displayname over federation -#[tracing::instrument(skip(db, body))] pub async fn get_displayname_route( db: DatabaseGuard, body: Ruma>, @@ -150,7 +148,6 @@ pub async fn get_displayname_route( /// Updates the avatar_url and blurhash. /// /// - Also makes sure other users receive the update using presence EDUs -#[tracing::instrument(skip(db, body))] pub async fn set_avatar_url_route( db: DatabaseGuard, body: Ruma>, @@ -250,7 +247,6 @@ pub async fn set_avatar_url_route( /// Returns the avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches avatar_url and blurhash over federation -#[tracing::instrument(skip(db, body))] pub async fn get_avatar_url_route( db: DatabaseGuard, body: Ruma>, @@ -285,7 +281,6 @@ pub async fn get_avatar_url_route( /// Returns the displayname, avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches profile over federation -#[tracing::instrument(skip(db, body))] pub async fn get_profile_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 3bc46b8..67b70d2 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -15,7 +15,6 @@ use ruma::{ /// # `GET /_matrix/client/r0/pushrules` /// /// Retrieves the push rules event for this user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushrules_all_route( db: DatabaseGuard, body: Ruma, @@ -38,7 +37,6 @@ pub async fn get_pushrules_all_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Retrieves a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_route( db: DatabaseGuard, body: Ruma>, @@ -91,7 +89,6 @@ pub async fn get_pushrule_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Creates a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_route( db: DatabaseGuard, body: Ruma>, @@ -188,7 +185,6 @@ pub async fn set_pushrule_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Gets the actions of a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_actions_route( db: DatabaseGuard, body: Ruma>, @@ -245,7 +241,6 @@ pub async fn get_pushrule_actions_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Sets the actions of a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_actions_route( db: DatabaseGuard, body: Ruma>, @@ -313,7 +308,6 @@ pub async fn set_pushrule_actions_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Gets the enabled status of a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_enabled_route( db: DatabaseGuard, body: Ruma>, @@ -373,7 +367,6 @@ pub async fn get_pushrule_enabled_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Sets the enabled status of a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_enabled_route( db: DatabaseGuard, body: Ruma>, @@ -446,7 +439,6 @@ pub async fn set_pushrule_enabled_route( /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Deletes a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn delete_pushrule_route( db: DatabaseGuard, body: Ruma>, @@ -509,7 +501,6 @@ pub async fn delete_pushrule_route( /// # `GET /_matrix/client/r0/pushers` /// /// Gets all currently active pushers for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushers_route( db: DatabaseGuard, body: Ruma, @@ -526,7 +517,6 @@ pub async fn get_pushers_route( /// Adds a pusher for the sender user. /// /// - TODO: Handle `append` -#[tracing::instrument(skip(db, body))] pub async fn set_pushers_route( db: DatabaseGuard, body: Ruma, diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index fa2627b..cc6928d 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -16,7 +16,6 @@ use std::collections::BTreeMap; /// /// - Updates fully-read account data event to `fully_read` /// - If `read_receipt` is set: Update private marker and public read receipt EDU -#[tracing::instrument(skip(db, body))] pub async fn set_read_marker_route( db: DatabaseGuard, body: Ruma>, @@ -82,7 +81,6 @@ pub async fn set_read_marker_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` /// /// Sets private read marker and public read receipt EDU. -#[tracing::instrument(skip(db, body))] pub async fn create_receipt_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 0a343e5..1e05bfe 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -13,7 +13,6 @@ use serde_json::value::to_raw_value; /// Tries to send a redaction event into the room. /// /// - TODO: Handle txn id -#[tracing::instrument(skip(db, body))] pub async fn redact_event_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 680ad5a..6274172 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -9,7 +9,6 @@ use ruma::{ /// /// Reports an inappropriate event to homeserver admins /// -#[tracing::instrument(skip(db, body))] pub async fn report_event_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 4640cda..54559e2 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -45,7 +45,6 @@ use tracing::{info, warn}; /// - Send events listed in initial state /// - Send events implied by `name` and `topic` /// - Send invite events -#[tracing::instrument(skip(db, body))] pub async fn create_room_route( db: DatabaseGuard, body: Ruma>, @@ -417,7 +416,6 @@ pub async fn create_room_route( /// Gets a single event. /// /// - You have to currently be joined to the room (TODO: Respect history visibility) -#[tracing::instrument(skip(db, body))] pub async fn get_room_event_route( db: DatabaseGuard, body: Ruma>, @@ -445,7 +443,6 @@ pub async fn get_room_event_route( /// Lists all aliases of the room. /// /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable -#[tracing::instrument(skip(db, body))] pub async fn get_room_aliases_route( db: DatabaseGuard, body: Ruma>, @@ -478,7 +475,6 @@ pub async fn get_room_aliases_route( /// - Transfers some state events /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking -#[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 067eddc..5860484 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -9,7 +9,6 @@ use std::collections::BTreeMap; /// Searches rooms for messages. /// /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) -#[tracing::instrument(skip(db, body))] pub async fn search_events_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/session.rs b/src/client_server/session.rs index dbcd28c..c2259c2 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -23,7 +23,6 @@ struct Claims { /// /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. -#[tracing::instrument(skip(_body))] pub async fn get_login_types_route( _body: Ruma, ) -> Result { @@ -43,7 +42,6 @@ pub async fn get_login_types_route( /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -#[tracing::instrument(skip(db, body))] pub async fn login_route( db: DatabaseGuard, body: Ruma>, @@ -163,7 +161,6 @@ pub async fn login_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[tracing::instrument(skip(db, body))] pub async fn logout_route( db: DatabaseGuard, body: Ruma, @@ -189,7 +186,6 @@ pub async fn logout_route( /// /// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html) /// from each device of this user. -#[tracing::instrument(skip(db, body))] pub async fn logout_all_route( db: DatabaseGuard, body: Ruma, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index acc362f..e334e7d 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -26,7 +26,6 @@ use ruma::{ /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( db: DatabaseGuard, body: Ruma>, @@ -56,7 +55,6 @@ pub async fn send_state_event_for_key_route( /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, body: Ruma>, @@ -92,7 +90,6 @@ pub async fn send_state_event_for_empty_key_route( /// Get all state events for a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[tracing::instrument(skip(db, body))] pub async fn get_state_events_route( db: DatabaseGuard, body: Ruma>, @@ -139,7 +136,6 @@ pub async fn get_state_events_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_key_route( db: DatabaseGuard, body: Ruma>, @@ -190,7 +186,6 @@ pub async fn get_state_events_for_key_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6410ce5..360f015 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -54,7 +54,6 @@ use tracing::error; /// /// - Sync is handled in an async task, multiple requests from the same device with the same /// `since` will be cached -#[tracing::instrument(skip(db, body))] pub async fn sync_events_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index edf8690..29bd9a0 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -13,7 +13,6 @@ use std::collections::BTreeMap; /// Adds a tag to the room. /// /// - Inserts the tag into the tag event of the room account data. -#[tracing::instrument(skip(db, body))] pub async fn update_tag_route( db: DatabaseGuard, body: Ruma>, @@ -51,7 +50,6 @@ pub async fn update_tag_route( /// Deletes a tag from the room. /// /// - Removes the tag from the tag event of the room account data. -#[tracing::instrument(skip(db, body))] pub async fn delete_tag_route( db: DatabaseGuard, body: Ruma>, @@ -86,7 +84,6 @@ pub async fn delete_tag_route( /// Returns tags on the room. /// /// - Gets the tag event of the room account data. -#[tracing::instrument(skip(db, body))] pub async fn get_tags_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index 929503e..524f3ba 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -6,7 +6,6 @@ use std::collections::BTreeMap; /// # `GET /_matrix/client/r0/thirdparty/protocols` /// /// TODO: Fetches all metadata about protocols supported by the homeserver. -#[tracing::instrument(skip(_body))] pub async fn get_protocols_route( _body: Ruma, ) -> Result { diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 9f67bf0..e57998f 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -13,7 +13,6 @@ use ruma::{ /// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` /// /// Send a to-device event to a set of client devices. -#[tracing::instrument(skip(db, body))] pub async fn send_event_to_device_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 6c1939a..bbc852d 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -5,7 +5,6 @@ use ruma::api::client::r0::typing::create_typing_event; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// /// Sets the typing state of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn create_typing_event_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 65becda..168f172 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -13,7 +13,6 @@ use ruma::api::client::unversioned::get_supported_versions; /// /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases -#[tracing::instrument(skip(_body))] pub async fn get_supported_versions_route( _body: Ruma, ) -> Result { diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index a3df583..cecba7f 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -6,7 +6,6 @@ use ruma::api::client::r0::user_directory::search_users; /// Searches all known users for a match. /// /// - TODO: Hide users that are not in any public rooms? -#[tracing::instrument(skip(db, body))] pub async fn search_users_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index f3262ab..e9a553a 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -9,7 +9,6 @@ type HmacSha1 = Hmac; /// # `GET /_matrix/client/r0/voip/turnServer` /// /// TODO: Returns information about the recommended turn server. -#[tracing::instrument(skip(body, db))] pub async fn turn_server_route( db: DatabaseGuard, body: Ruma, diff --git a/src/server_server.rs b/src/server_server.rs index 42e44c6..5c00aab 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -301,7 +301,6 @@ where } } -#[tracing::instrument] fn get_ip_with_port(destination_str: &str) -> Option { if let Ok(destination) = destination_str.parse::() { Some(FedDest::Literal(destination)) @@ -312,7 +311,6 @@ fn get_ip_with_port(destination_str: &str) -> Option { } } -#[tracing::instrument] fn add_port_to_hostname(destination_str: &str) -> FedDest { let (host, port) = match destination_str.find(':') { None => (destination_str, ":8448"), @@ -490,7 +488,6 @@ async fn request_well_known( /// # `GET /_matrix/federation/v1/version` /// /// Get version information on this server. -#[tracing::instrument(skip(db, _body))] pub async fn get_server_version_route( db: DatabaseGuard, _body: Ruma, @@ -514,7 +511,6 @@ pub async fn get_server_version_route( /// - Matrix does not support invalidating public keys, so the key returned by this will be valid /// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response -#[tracing::instrument(skip(db))] pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -564,7 +560,6 @@ pub async fn get_server_keys_route(db: DatabaseGuard) -> Result impl IntoResponse { get_server_keys_route(db).await } @@ -572,7 +567,6 @@ pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoRes /// # `POST /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, body: Ruma>, @@ -613,7 +607,6 @@ pub async fn get_public_rooms_filtered_route( /// # `GET /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: DatabaseGuard, body: Ruma>, @@ -654,7 +647,6 @@ pub async fn get_public_rooms_route( /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. -#[tracing::instrument(skip(db, body))] pub async fn send_transaction_message_route( db: DatabaseGuard, body: Ruma>, @@ -1075,7 +1067,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( .await } -#[tracing::instrument(skip(origin, create_event, event_id, room_id, value, db, pub_key_map))] +#[tracing::instrument(skip_all)] fn handle_outlier_pdu<'a>( origin: &'a ServerName, create_event: &'a PduEvent, @@ -1237,7 +1229,7 @@ fn handle_outlier_pdu<'a>( }) } -#[tracing::instrument(skip(incoming_pdu, val, create_event, origin, db, room_id, pub_key_map))] +#[tracing::instrument(skip_all)] async fn upgrade_outlier_to_timeline_pdu( incoming_pdu: Arc, val: BTreeMap, @@ -1780,7 +1772,7 @@ async fn upgrade_outlier_to_timeline_pdu( /// b. Look at outlier pdu tree /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? -#[tracing::instrument(skip(db, origin, events, create_event, room_id, pub_key_map))] +#[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( db: &'a Database, origin: &'a ServerName, @@ -1921,7 +1913,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. -#[tracing::instrument(skip(db, origin, signature_ids))] +#[tracing::instrument(skip_all)] pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, @@ -2080,7 +2072,7 @@ pub(crate) async fn fetch_signing_keys( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. -#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))] +#[tracing::instrument(skip_all)] fn append_incoming_pdu<'a>( db: &Database, pdu: &PduEvent, @@ -2284,7 +2276,6 @@ fn get_auth_chain_inner( /// Retrieves a single event from the server. /// /// - Only works if a user of this server is currently invited or joined the room -#[tracing::instrument(skip(db, body))] pub async fn get_event_route( db: DatabaseGuard, body: Ruma>, @@ -2328,7 +2319,6 @@ pub async fn get_event_route( /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. -#[tracing::instrument(skip(db, body))] pub async fn get_missing_events_route( db: DatabaseGuard, body: Ruma>, @@ -2402,7 +2392,6 @@ pub async fn get_missing_events_route( /// Retrieves the auth chain for a given event. /// /// - This does not include the event itself -#[tracing::instrument(skip(db, body))] pub async fn get_event_authorization_route( db: DatabaseGuard, body: Ruma>, @@ -2451,7 +2440,6 @@ pub async fn get_event_authorization_route( /// # `GET /_matrix/federation/v1/state/{roomId}` /// /// Retrieves the current state of the room. -#[tracing::instrument(skip(db, body))] pub async fn get_room_state_route( db: DatabaseGuard, body: Ruma>, @@ -2511,7 +2499,6 @@ pub async fn get_room_state_route( /// # `GET /_matrix/federation/v1/state_ids/{roomId}` /// /// Retrieves the current state of the room. -#[tracing::instrument(skip(db, body))] pub async fn get_room_state_ids_route( db: DatabaseGuard, body: Ruma>, @@ -2560,7 +2547,6 @@ pub async fn get_room_state_ids_route( /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` /// /// Creates a join template. -#[tracing::instrument(skip(db, body))] pub async fn create_join_event_template_route( db: DatabaseGuard, body: Ruma>, @@ -2841,7 +2827,6 @@ async fn create_join_event( /// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -#[tracing::instrument(skip(db, body))] pub async fn create_join_event_v1_route( db: DatabaseGuard, body: Ruma>, @@ -2859,7 +2844,6 @@ pub async fn create_join_event_v1_route( /// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -#[tracing::instrument(skip(db, body))] pub async fn create_join_event_v2_route( db: DatabaseGuard, body: Ruma>, @@ -2877,7 +2861,6 @@ pub async fn create_join_event_v2_route( /// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` /// /// Invites a remote user to a room. -#[tracing::instrument(skip(db, body))] pub async fn create_invite_route( db: DatabaseGuard, body: Ruma>, @@ -2988,7 +2971,6 @@ pub async fn create_invite_route( /// # `GET /_matrix/federation/v1/user/devices/{userId}` /// /// Gets information on all devices of the user. -#[tracing::instrument(skip(db, body))] pub async fn get_devices_route( db: DatabaseGuard, body: Ruma>, @@ -3026,7 +3008,6 @@ pub async fn get_devices_route( /// # `GET /_matrix/federation/v1/query/directory` /// /// Resolve a room alias to a room id. -#[tracing::instrument(skip(db, body))] pub async fn get_room_information_route( db: DatabaseGuard, body: Ruma>, @@ -3052,7 +3033,6 @@ pub async fn get_room_information_route( /// # `GET /_matrix/federation/v1/query/profile` /// /// Gets information on a profile. -#[tracing::instrument(skip(db, body))] pub async fn get_profile_information_route( db: DatabaseGuard, body: Ruma>, @@ -3090,7 +3070,6 @@ pub async fn get_profile_information_route( /// # `POST /_matrix/federation/v1/user/keys/query` /// /// Gets devices and identity keys for the given users. -#[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: DatabaseGuard, body: Ruma, @@ -3119,7 +3098,6 @@ pub async fn get_keys_route( /// # `POST /_matrix/federation/v1/user/keys/claim` /// /// Claims one-time keys. -#[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: DatabaseGuard, body: Ruma, @@ -3137,7 +3115,7 @@ pub async fn claim_keys_route( }) } -#[tracing::instrument(skip(event, pub_key_map, db))] +#[tracing::instrument(skip_all)] pub(crate) async fn fetch_required_signing_keys( event: &BTreeMap, pub_key_map: &RwLock>>, From 0ad6eac4f820c8a69dabff984f66b95abbcfb597 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 16:28:43 +0100 Subject: [PATCH 1022/1727] Remove all tracing::instrument attributes from database::abstraction::* --- src/database/abstraction/heed.rs | 10 ---------- src/database/abstraction/persy.rs | 10 ---------- src/database/abstraction/sled.rs | 1 - src/database/abstraction/sqlite.rs | 13 ------------- 4 files changed, 34 deletions(-) diff --git a/src/database/abstraction/heed.rs b/src/database/abstraction/heed.rs index 83dafc5..9cca097 100644 --- a/src/database/abstraction/heed.rs +++ b/src/database/abstraction/heed.rs @@ -69,7 +69,6 @@ impl DatabaseEngine for Engine { } impl EngineTree { - #[tracing::instrument(skip(self, tree, from, backwards))] fn iter_from_thread( &self, tree: Arc, @@ -94,7 +93,6 @@ impl EngineTree { } } -#[tracing::instrument(skip(tree, txn, from, backwards))] fn iter_from_thread_work( tree: Arc, txn: &heed::RoTxn<'_>, @@ -126,7 +124,6 @@ fn iter_from_thread_work( } impl Tree for EngineTree { - #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { let txn = self.engine.env.read_txn().map_err(convert_error)?; Ok(self @@ -136,7 +133,6 @@ impl Tree for EngineTree { .map(|s| s.to_vec())) } - #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let mut txn = self.engine.env.write_txn().map_err(convert_error)?; self.tree @@ -147,7 +143,6 @@ impl Tree for EngineTree { Ok(()) } - #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let mut txn = self.engine.env.write_txn().map_err(convert_error)?; self.tree.delete(&mut txn, &key).map_err(convert_error)?; @@ -155,12 +150,10 @@ impl Tree for EngineTree { Ok(()) } - #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box, Vec)> + Send + 'a> { self.iter_from(&[], false) } - #[tracing::instrument(skip(self, from, backwards))] fn iter_from( &self, from: &[u8], @@ -169,7 +162,6 @@ impl Tree for EngineTree { self.iter_from_thread(Arc::clone(&self.tree), from.to_vec(), backwards) } - #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { let mut txn = self.engine.env.write_txn().map_err(convert_error)?; @@ -186,7 +178,6 @@ impl Tree for EngineTree { Ok(new) } - #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>( &'a self, prefix: Vec, @@ -197,7 +188,6 @@ impl Tree for EngineTree { ) } - #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { self.watchers.watch(prefix) } diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs index 628cf32..e78e731 100644 --- a/src/database/abstraction/persy.rs +++ b/src/database/abstraction/persy.rs @@ -62,7 +62,6 @@ impl PersyTree { } impl Tree for PersyTree { - #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { let result = self .persy @@ -72,14 +71,12 @@ impl Tree for PersyTree { Ok(result) } - #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { self.insert_batch(&mut Some((key.to_owned(), value.to_owned())).into_iter())?; self.watchers.wake(key); Ok(()) } - #[tracing::instrument(skip(self, iter))] fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { let mut tx = self.begin()?; for (key, value) in iter { @@ -93,7 +90,6 @@ impl Tree for PersyTree { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { let mut tx = self.begin()?; for key in iter { @@ -108,7 +104,6 @@ impl Tree for PersyTree { Ok(()) } - #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let mut tx = self.begin()?; tx.remove::(&self.name, ByteVec::from(key), None)?; @@ -116,7 +111,6 @@ impl Tree for PersyTree { Ok(()) } - #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { let iter = self.persy.range::(&self.name, ..); match iter { @@ -132,7 +126,6 @@ impl Tree for PersyTree { } } - #[tracing::instrument(skip(self, from, backwards))] fn iter_from<'a>( &'a self, from: &[u8], @@ -165,13 +158,11 @@ impl Tree for PersyTree { } } - #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { self.increment_batch(&mut Some(key.to_owned()).into_iter())?; Ok(self.get(key)?.unwrap()) } - #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>( &'a self, prefix: Vec, @@ -200,7 +191,6 @@ impl Tree for PersyTree { } } - #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { self.watchers.watch(prefix) } diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs index 35ba1b2..87defc5 100644 --- a/src/database/abstraction/sled.rs +++ b/src/database/abstraction/sled.rs @@ -39,7 +39,6 @@ impl Tree for SledEngineTree { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { for (key, value) in iter { self.0.insert(key, value)?; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index d4aab7d..730c1bc 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -134,7 +134,6 @@ pub struct SqliteTable { type TupleOfBytes = (Vec, Vec); impl SqliteTable { - #[tracing::instrument(skip(self, guard, key))] fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { //dbg!(&self.name); Ok(guard @@ -143,7 +142,6 @@ impl SqliteTable { .optional()?) } - #[tracing::instrument(skip(self, guard, key, value))] fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { //dbg!(&self.name); guard.execute( @@ -192,12 +190,10 @@ impl SqliteTable { } impl Tree for SqliteTable { - #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { self.get_with_guard(self.engine.read_lock(), key) } - #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); self.insert_with_guard(&guard, key, value)?; @@ -206,7 +202,6 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { let guard = self.engine.write_lock(); @@ -221,7 +216,6 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { let guard = self.engine.write_lock(); @@ -239,7 +233,6 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); @@ -251,14 +244,12 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box + 'a> { let guard = self.engine.read_lock_iterator(); self.iter_with_guard(guard) } - #[tracing::instrument(skip(self, from, backwards))] fn iter_from<'a>( &'a self, from: &[u8], @@ -323,7 +314,6 @@ impl Tree for SqliteTable { } } - #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { let guard = self.engine.write_lock(); @@ -337,7 +327,6 @@ impl Tree for SqliteTable { Ok(new) } - #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>(&'a self, prefix: Vec) -> Box + 'a> { Box::new( self.iter_from(&prefix, false) @@ -345,12 +334,10 @@ impl Tree for SqliteTable { ) } - #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { self.watchers.watch(prefix) } - #[tracing::instrument(skip(self))] fn clear(&self) -> Result<()> { debug!("clear: running"); self.engine From 0ed1e42aed9c88d467f05252177df18a69a0fae1 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sat, 12 Feb 2022 21:01:53 +0100 Subject: [PATCH 1023/1727] update ruma --- Cargo.lock | 37 +++++++++++++++++++------------------ Cargo.toml | 2 +- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a56103e..dbc29ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "assign", "js_int", @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "bytes", "http", @@ -2113,12 +2113,13 @@ dependencies = [ "serde", "serde_json", "thiserror", + "tracing", ] [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2129,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "ruma-api", "ruma-common", @@ -2143,7 +2144,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "assign", "bytes", @@ -2163,7 +2164,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "indexmap", "js_int", @@ -2178,7 +2179,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "indoc", "js_int", @@ -2195,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2206,7 +2207,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "js_int", "ruma-api", @@ -2221,7 +2222,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2236,7 +2237,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2246,7 +2247,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "thiserror", ] @@ -2254,7 +2255,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "js_int", "ruma-api", @@ -2267,7 +2268,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "js_int", "ruma-api", @@ -2282,7 +2283,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "base64 0.13.0", "bytes", @@ -2297,7 +2298,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2308,7 +2309,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2325,7 +2326,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index aa6bdbb..bcdf01a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "f130d09daabf021ad30750eed89483a0f45f820a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f72d6601fcf2ce4382a7c02b740d60a6e803f4d9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } From 35b82d51cf3a76ab2ab2c240c061a9b421f046d7 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sat, 12 Feb 2022 21:04:38 +0100 Subject: [PATCH 1024/1727] fix compilations --- src/appservice_server.rs | 8 ++++++-- src/database/pusher.rs | 8 ++++++-- src/server_server.rs | 9 +++++++-- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 8d6d052..ce122da 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,6 +1,6 @@ use crate::{utils, Error, Result}; use bytes::BytesMut; -use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; +use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; @@ -17,7 +17,11 @@ where let hs_token = registration.get("hs_token").unwrap().as_str().unwrap(); let mut http_request = request - .try_into_http_request::(destination, SendAccessToken::IfRequired("")) + .try_into_http_request::( + destination, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) .unwrap() .map(|body| body.freeze()); diff --git a/src/database/pusher.rs b/src/database/pusher.rs index e73ab06..bc7017b 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -7,7 +7,7 @@ use ruma::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - IncomingResponse, OutgoingRequest, SendAccessToken, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, @@ -101,7 +101,11 @@ where let destination = destination.replace("/_matrix/push/v1/notify", ""); let http_request = request - .try_into_http_request::(&destination, SendAccessToken::IfRequired("")) + .try_into_http_request::( + &destination, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) .map_err(|e| { warn!("Failed to find destination {}: {}", destination, e); Error::BadServerResponse("Invalid destination") diff --git a/src/server_server.rs b/src/server_server.rs index 5c00aab..3921055 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -34,7 +34,8 @@ use ruma::{ send_transaction_message, }, }, - EndpointError, IncomingResponse, OutgoingRequest, OutgoingResponse, SendAccessToken, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, + SendAccessToken, }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ @@ -155,7 +156,11 @@ where let actual_destination_str = actual_destination.clone().into_https_string(); let mut http_request = request - .try_into_http_request::>(&actual_destination_str, SendAccessToken::IfRequired("")) + .try_into_http_request::>( + &actual_destination_str, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) .map_err(|e| { warn!( "Failed to find destination {}: {}", From d4217007fe311896e8e685e0c95c50a50336d486 Mon Sep 17 00:00:00 2001 From: M0dEx Date: Sat, 12 Feb 2022 21:40:07 +0100 Subject: [PATCH 1025/1727] fix: do not panic on a JSON not containing the PDU Do not panic on a JSON not containing the PDU when executing the parse-pdu admin command. --- src/database/admin.rs | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 50fac3e..f9d4f42 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -354,24 +354,26 @@ fn process_admin_command( let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { Ok(value) => { - let event_id = EventId::parse(format!( - "${}", - // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &RoomVersionId::V6) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + Ok(hash) => { + let event_id = EventId::parse(format!("${}", hash)); - match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), - ) { - Ok(pdu) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\n{:#?}", - event_id, pdu - )), + match serde_json::from_value::( + serde_json::to_value(value).expect("value is json"), + ) { + Ok(pdu) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\n{:#?}", + event_id, pdu + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\nCould not parse event: {}", + event_id, e + )), + } + } Err(e) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\nCould not parse event: {}", - event_id, e + "Could not parse PDU JSON: {:?}", + e )), } } From b8d92d3cec4905265c1ef6aa9b03f1433e7d5637 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sun, 13 Feb 2022 12:07:00 +0100 Subject: [PATCH 1026/1727] take advantage of multiple paths --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 2 +- src/main.rs | 14 ++++++++++---- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dbc29ad..e7ffe5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "assign", "js_int", @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "bytes", "http", @@ -2119,7 +2119,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "ruma-api", "ruma-common", @@ -2144,7 +2144,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "assign", "bytes", @@ -2164,7 +2164,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "indexmap", "js_int", @@ -2179,7 +2179,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "indoc", "js_int", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2207,7 +2207,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "js_int", "ruma-api", @@ -2222,7 +2222,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2237,7 +2237,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2247,7 +2247,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "thiserror", ] @@ -2255,7 +2255,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "js_int", "ruma-api", @@ -2268,7 +2268,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "js_int", "ruma-api", @@ -2283,7 +2283,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "base64 0.13.0", "bytes", @@ -2298,7 +2298,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2309,7 +2309,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2326,7 +2326,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index bcdf01a..ab7b47d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "f72d6601fcf2ce4382a7c02b740d60a6e803f4d9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "aed09886946f8817a478981cae1b6b8b5d4e7b7d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/main.rs b/src/main.rs index 22ddf3e..828d7dc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -387,13 +387,19 @@ macro_rules! impl_ruma_handler { E: IntoResponse, $( $ty: FromRequest + Send + 'static, )* { - fn add_to_router(self, router: Router) -> Router { + fn add_to_router(self, mut router: Router) -> Router { let meta = Req::Incoming::METADATA; let method_filter = method_to_filter(meta.method); - router.route(meta.path, on(method_filter, |$( $ty: $ty, )* req| async move { - self($($ty,)* req).await.map(RumaResponse) - })) + for path in IntoIterator::into_iter([meta.unstable_path, meta.r0_path, meta.stable_path]).flatten() { + let this = self.clone(); + + router = router.route(path, on(method_filter, |$( $ty: $ty, )* req| async move { + this($($ty,)* req).await.map(RumaResponse) + })) + } + + router } } }; From aee6bf7e7aedb250911f79f43d56bac934c64381 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 13 Feb 2022 11:30:04 +0000 Subject: [PATCH 1027/1727] Change this to handler --- src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 828d7dc..a96bef5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -392,10 +392,10 @@ macro_rules! impl_ruma_handler { let method_filter = method_to_filter(meta.method); for path in IntoIterator::into_iter([meta.unstable_path, meta.r0_path, meta.stable_path]).flatten() { - let this = self.clone(); + let handler = self.clone(); router = router.route(path, on(method_filter, |$( $ty: $ty, )* req| async move { - this($($ty,)* req).await.map(RumaResponse) + handler($($ty,)* req).await.map(RumaResponse) })) } From 3aece38e9dfab762efc52afe066ce418c36e673a Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 13 Feb 2022 13:59:27 +0100 Subject: [PATCH 1028/1727] Add a not-found route --- src/main.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index a96bef5..6aa0870 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,6 +11,7 @@ use std::{future::Future, io, net::SocketAddr, sync::Arc, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, + handler::Handler, response::IntoResponse, routing::{get, on, MethodFilter}, Router, @@ -22,10 +23,13 @@ use figment::{ }; use http::{ header::{self, HeaderName}, - Method, + Method, Uri, }; use opentelemetry::trace::{FutureExt, Tracer}; -use ruma::{api::IncomingRequest, Outgoing}; +use ruma::{ + api::{client::error::ErrorKind, IncomingRequest}, + Outgoing, +}; use tokio::{signal, sync::RwLock}; use tower::ServiceBuilder; use tower_http::{ @@ -321,6 +325,7 @@ fn routes() -> Router { .ruma_route(server_server::get_profile_information_route) .ruma_route(server_server::get_keys_route) .ruma_route(server_server::claim_keys_route) + .fallback(not_found.into_service()) } async fn shutdown_signal(handle: ServerHandle) { @@ -349,6 +354,10 @@ async fn shutdown_signal(handle: ServerHandle) { handle.graceful_shutdown(Some(Duration::from_secs(30))); } +async fn not_found(_uri: Uri) -> impl IntoResponse { + Error::BadRequest(ErrorKind::NotFound, "Unknown or unimplemented route") +} + trait RouterExt { fn ruma_route(self, handler: H) -> Self where From 6602f6114c59e47d0c2ff605493a0f7e4ffeba3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 10 Feb 2022 20:59:11 +0100 Subject: [PATCH 1029/1727] fix: redacts can't error anymore --- src/database/rooms.rs | 8 ++------ src/server_server.rs | 10 ++++++++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 1f6b431..c751167 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2147,13 +2147,9 @@ impl Rooms { .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; pdu.redact(reason)?; self.replace_pdu(&pdu_id, &pdu)?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "Event ID does not exist.", - )) } + // If event does not exist, just noop + Ok(()) } /// Update current membership data. diff --git a/src/server_server.rs b/src/server_server.rs index 3921055..9f0e922 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1593,7 +1593,10 @@ async fn upgrade_outlier_to_timeline_pdu( soft_fail, &state_lock, ) - .map_err(|_| "Failed to add pdu to db.".to_owned())?; + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; // Soft fail, we keep the event as an outlier but don't add it to the timeline warn!("Event was soft failed: {:?}", incoming_pdu); @@ -1759,7 +1762,10 @@ async fn upgrade_outlier_to_timeline_pdu( soft_fail, &state_lock, ) - .map_err(|_| "Failed to add pdu to db.".to_owned())?; + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; debug!("Appended incoming pdu."); From 77f4b68c8e9f610a6960daaaf6502a7da9936130 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 15 Feb 2022 11:17:32 +0100 Subject: [PATCH 1030/1727] fix(ci): Also create versioned docker image --- .gitlab-ci.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8d701c2..40716fa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -220,6 +220,20 @@ docker:master:dockerhub: variables: TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" +docker:tags:gitlab: + extends: .docker-shared-settings + rules: + - if: "$CI_COMMIT_TAG" + variables: + TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:$CI_COMMIT_TAG" + +docker:tags:dockerhub: + extends: .docker-shared-settings + rules: + - if: "$CI_COMMIT_TAG && $DOCKER_HUB" + variables: + TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG" + # --------------------------------------------------------------------- # # Run tests # # --------------------------------------------------------------------- # From 2645494582f75a8b51391f0b270d5131ba59df34 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 15 Feb 2022 11:17:46 +0100 Subject: [PATCH 1031/1727] fix(ci): Also run CI for git tags --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 40716fa..71511ef 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -327,3 +327,4 @@ workflow: - if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS" when: never - if: "$CI_COMMIT_BRANCH" + - if: "$CI_COMMIT_TAG" From b21a44ca4cd5c5064f2991f62bd3c48074c4148b Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 15 Feb 2022 18:33:20 +0100 Subject: [PATCH 1032/1727] feat(ci): Lint dockerfiles with hadolint --- .gitlab-ci.yml | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 71511ef..bd4ce79 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -272,6 +272,7 @@ test:sytest: tags: ["docker"] variables: PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" + interruptible: true before_script: - "mkdir -p /app" - "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit" @@ -292,6 +293,40 @@ test:sytest: reports: junit: "$CI_PROJECT_DIR/sytest.xml" +test:dockerlint: + stage: "test" + needs: [] + image: "ghcr.io/hadolint/hadolint:latest-alpine" + interruptible: true + script: + # First pass: Print for CI log: + - > + hadolint + --no-fail --verbose + ./Dockerfile + ./docker/ci-binaries-packaging.Dockerfile + # Then output the results into a json for GitLab to pretty-print this in the MR: + - > + hadolint + --format gitlab_codeclimate + --failure-threshold error + ./Dockerfile + ./docker/ci-binaries-packaging.Dockerfile > dockerlint.json + artifacts: + when: always + reports: + codequality: dockerlint.json + paths: + - dockerlint.json + rules: + - if: '$CI_COMMIT_REF_NAME != "master"' + changes: + - docker/*Dockerfile + - Dockerfile + - .gitlab-ci.yml + - if: '$CI_COMMIT_REF_NAME == "master"' + - if: '$CI_COMMIT_REF_NAME == "next"' + # --------------------------------------------------------------------- # # Store binaries as package so they have download urls # # --------------------------------------------------------------------- # From de6c3312ceca9d0f9c0d2041c16a46d6b538b2a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 12 Feb 2022 10:29:04 +0100 Subject: [PATCH 1033/1727] docs: make all configs match --- DEPLOY.md | 27 +++++++++++----------- Dockerfile | 15 +++++++++--- conduit-example.toml | 18 +++------------ debian/postinst | 38 ++++++++++++++++--------------- docker-compose.yml | 31 ++++++++++++------------- docker/README.md | 2 +- docker/docker-compose.traefik.yml | 31 ++++++++++--------------- 7 files changed, 76 insertions(+), 86 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index eecf513..0657c0c 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -93,24 +93,30 @@ to read it. You need to change at least the server name.** ```toml [global] -# The server_name is the name of this server. It is used as a suffix for user +# The server_name is the pretty name of this server. It is used as a suffix for user # and room ids. Examples: matrix.org, conduit.rs -# The Conduit server needs to be reachable at https://your.server.name/ on port -# 443 (client-server) and 8448 (federation) OR you can create /.well-known -# files to redirect requests. See + +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). + +# If that's not possible for you, you can create /.well-known files to redirect +# requests. See # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# and +# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server # for more information # YOU NEED TO EDIT THIS #server_name = "your.server.name" # This is the only directory where Conduit will save its data -database_path = "/var/lib/matrix-conduit/conduit_db" +database_path = "/var/lib/matrix-conduit/" +database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port # 443 and 8448 will be forwarded to the Conduit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. port = 6167 # Max size for uploads @@ -119,20 +125,15 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true -# Disable encryption, so no new encrypted rooms can be created -# Note: existing rooms will continue to work -allow_encryption = true allow_federation = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#workers = 4 # default: cpu core count * 2 +#log = "info,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy - -# The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 +#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. ``` ## Setting the correct file permissions diff --git a/Dockerfile b/Dockerfile index b631f29..34a0766 100644 --- a/Dockerfile +++ b/Dockerfile @@ -35,9 +35,18 @@ FROM docker.io/debian:bullseye-slim AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs. -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ - CONDUIT_PORT=6167 +ENV CONDUIT_SERVER_NAME=your.server.name # EDIT THIS +ENV CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit +ENV CONDUIT_DATABASE_BACKEND=rocksdb +ENV CONDUIT_PORT=6167 +ENV CONDUIT_MAX_REQUEST_SIZE=20_000_000 # in bytes, ~20 MB +ENV CONDUIT_ALLOW_REGISTRATION=true +ENV CONDUIT_ALLOW_FEDERATION=true +ENV CONDUIT_TRUSTED_SERVERS=["matrix.org"] +#ENV CONDUIT_MAX_CONCURRENT_REQUESTS=100 +#ENV CONDUIT_LOG=info,rocket=off,_=off,sled=off +ENV CONDUIT_ADDRESS=0.0.0.0 +ENV CONDUIT_CONFIG='' # Ignore this # Conduit needs: # ca-certificates: for https diff --git a/conduit-example.toml b/conduit-example.toml index c22c862..23c1844 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -16,7 +16,7 @@ #server_name = "your.server.name" # This is the only directory where Conduit will save its data -database_path = "/var/lib/conduit/" +database_path = "/var/lib/matrix-conduit/" database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in @@ -31,24 +31,12 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true -# Disable encryption, so no new encrypted rooms can be created -# Note: existing rooms will continue to work -#allow_encryption = false -#allow_federation = false - -# Enable jaeger to support monitoring and troubleshooting through jaeger -#allow_jaeger = false +allow_federation = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,_=off,sled=off" -#workers = 4 # default: cpu core count * 2 +#log = "info,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. - -proxy = "none" # more examples can be found at src/database/proxy.rs:6 - -# The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 diff --git a/debian/postinst b/debian/postinst index 29a9367..10d5561 100644 --- a/debian/postinst +++ b/debian/postinst @@ -36,18 +36,24 @@ case "$1" in mkdir -p "$CONDUIT_CONFIG_PATH" cat > "$CONDUIT_CONFIG_FILE" << EOF [global] -# The server_name is the name of this server. It is used as a suffix for user -# and room ids. Examples: matrix.org, conduit.rs -# The Conduit server needs to be reachable at https://your.server.name/ on port -# 443 (client-server) and 8448 (federation) OR you can create /.well-known -# files to redirect requests. See +# The server_name is the pretty name of this server. It is used as a suffix for +# user and room ids. Examples: matrix.org, conduit.rs + +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). + +# If that's not possible for you, you can create /.well-known files to redirect +# requests. See # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server -# for more information. +# and +# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# for more information + server_name = "${CONDUIT_SERVER_NAME}" # This is the only directory where Conduit will save its data. database_path = "${CONDUIT_DATABASE_PATH}" +database_backend = "rocksdb" # The address Conduit will be listening on. # By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to @@ -56,7 +62,8 @@ address = "${CONDUIT_ADDRESS}" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port -# 443 and 8448 will be forwarded to the Conduit instance running on this port. +# 443 and 8448 will be forwarded to the Conduit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. port = ${CONDUIT_PORT} # Max size for uploads @@ -65,20 +72,15 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true -# Disable encryption, so no new encrypted rooms can be created. -# Note: Existing rooms will continue to work. -#allow_encryption = false -#allow_federation = false +allow_federation = true -# Enable jaeger to support monitoring and troubleshooting through jaeger. -#allow_jaeger = false +trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,_=off,sled=off" -#workers = 4 # default: cpu core count * 2 +#log = "info,state_res=warn,rocket=off,_=off,sled=off" -# The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 +address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy +#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. EOF fi ;; diff --git a/docker-compose.yml b/docker-compose.yml index 88d5c3f..5a17a8d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,27 +20,24 @@ services: ports: - 8448:6167 volumes: - - db:/srv/conduit/.local/share/conduit + - db:/var/lib/matrix-conduit/ ### Uncomment if you want to use conduit.toml to configure Conduit ### Note: Set env vars will override conduit.toml values # - ./conduit.toml:/srv/conduit/conduit.toml environment: - CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name - CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + CONDUIT_SERVER_NAME: your.server.name # EDIT THIS + CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ + CONDUIT_DATABASE_BACKEND: rocksdb + CONDUIT_PORT: 6167 + CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' - ### Uncomment and change values as desired - # CONDUIT_ADDRESS: 0.0.0.0 - # CONDUIT_PORT: 6167 - # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' - # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,_=off,sled=off" - # CONDUIT_ALLOW_JAEGER: 'false' - # CONDUIT_ALLOW_ENCRYPTION: 'false' - # CONDUIT_ALLOW_FEDERATION: 'false' - # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit - # CONDUIT_WORKERS: 10 - # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB - + CONDUIT_ALLOW_FEDERATION: 'true' + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 + #CONDUIT_LOG: info,rocket=off,_=off,sled=off + CONDUIT_ADDRESS: 0.0.0.0 + CONDUIT_CONFIG: '' # Ignore this + # ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second ### Domain or Subdomain for the communication between Element and Conduit @@ -56,4 +53,4 @@ services: # - homeserver volumes: - db: + db: diff --git a/docker/README.md b/docker/README.md index d886738..14758fd 100644 --- a/docker/README.md +++ b/docker/README.md @@ -112,4 +112,4 @@ So...step by step: ``` 6. Run `docker-compose up -d` -7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin. +7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin. diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index f625080..ca560b8 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -18,28 +18,22 @@ services: # GIT_REF: origin/master restart: unless-stopped volumes: - - db:/srv/conduit/.local/share/conduit - ### Uncomment if you want to use conduit.toml to configure Conduit - ### Note: Set env vars will override conduit.toml values - # - ./conduit.toml:/srv/conduit/conduit.toml + - db:/var/lib/matrix-conduit/ networks: - proxy environment: - CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name + CONDUIT_SERVER_NAME: your.server.name # EDIT THIS + CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ + CONDUIT_DATABASE_BACKEND: rocksdb + CONDUIT_PORT: 6167 + CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUIT_ALLOW_REGISTRATION: 'true' + CONDUIT_ALLOW_FEDERATION: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' - CONDUIT_ALLOW_REGISTRATION : 'true' - ### Uncomment and change values as desired - # CONDUIT_ADDRESS: 0.0.0.0 - # CONDUIT_PORT: 6167 - # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' - # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,_=off,sled=off" - # CONDUIT_ALLOW_JAEGER: 'false' - # CONDUIT_ALLOW_ENCRYPTION: 'false' - # CONDUIT_ALLOW_FEDERATION: 'false' - # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit - # CONDUIT_WORKERS: 10 - # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 + #CONDUIT_LOG: info,rocket=off,_=off,sled=off + CONDUIT_ADDRESS: 0.0.0.0 + CONDUIT_CONFIG: '' # Ignore this # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container # to serve those two as static files. If you want to use a different way, delete or comment the below service, here @@ -50,7 +44,6 @@ services: volumes: - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files - ./nginx/www:/var/www/ # location of the client and server .well-known-files - ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second ### Domain or Subdomain for the communication between Element and Conduit From c4353405a5c457b8301de123c646e748a07f8a22 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 13 Feb 2022 12:15:40 +0000 Subject: [PATCH 1034/1727] Suggestions from Jonas Zohren --- DEPLOY.md | 4 ++-- debian/postinst | 2 +- docker-compose.yml | 3 --- docker/README.md | 2 +- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 0657c0c..a28218d 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -148,8 +148,8 @@ sudo chown -R conduit:nogroup /etc/matrix-conduit If you use the default database path you also need to run this: ```bash -sudo mkdir -p /var/lib/matrix-conduit/conduit_db -sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db +sudo mkdir -p /var/lib/matrix-conduit/ +sudo chown -R conduit:nogroup /var/lib/matrix-conduit/ ``` ## Setting up the Reverse Proxy diff --git a/debian/postinst b/debian/postinst index 10d5561..378f99e 100644 --- a/debian/postinst +++ b/debian/postinst @@ -5,7 +5,7 @@ set -e CONDUIT_CONFIG_PATH=/etc/matrix-conduit CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml" -CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/conduit_db +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/ case "$1" in configure) diff --git a/docker-compose.yml b/docker-compose.yml index 5a17a8d..0a9d8f4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -21,9 +21,6 @@ services: - 8448:6167 volumes: - db:/var/lib/matrix-conduit/ - ### Uncomment if you want to use conduit.toml to configure Conduit - ### Note: Set env vars will override conduit.toml values - # - ./conduit.toml:/srv/conduit/conduit.toml environment: CONDUIT_SERVER_NAME: your.server.name # EDIT THIS CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ diff --git a/docker/README.md b/docker/README.md index 14758fd..28ad06f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -24,7 +24,7 @@ which also will tag the resulting image as `matrixconduit/matrix-conduit:latest` After building the image you can simply run it with ```bash -docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest +docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/var/lib/matrix-conduit/ matrixconduit/matrix-conduit:latest ``` or you can skip the build step and pull the image from one of the following registries: From 97507d28806e7a10cb4ffc9ab4cc64b902b267ef Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 13 Feb 2022 12:25:19 +0000 Subject: [PATCH 1035/1727] Remove most env vars from Dockerfile --- Dockerfile | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/Dockerfile b/Dockerfile index 34a0766..82ee951 100644 --- a/Dockerfile +++ b/Dockerfile @@ -35,18 +35,9 @@ FROM docker.io/debian:bullseye-slim AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -ENV CONDUIT_SERVER_NAME=your.server.name # EDIT THIS -ENV CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit -ENV CONDUIT_DATABASE_BACKEND=rocksdb -ENV CONDUIT_PORT=6167 -ENV CONDUIT_MAX_REQUEST_SIZE=20_000_000 # in bytes, ~20 MB -ENV CONDUIT_ALLOW_REGISTRATION=true -ENV CONDUIT_ALLOW_FEDERATION=true -ENV CONDUIT_TRUSTED_SERVERS=["matrix.org"] -#ENV CONDUIT_MAX_CONCURRENT_REQUESTS=100 -#ENV CONDUIT_LOG=info,rocket=off,_=off,sled=off -ENV CONDUIT_ADDRESS=0.0.0.0 -ENV CONDUIT_CONFIG='' # Ignore this +ENV CONDUIT_PORT=6167 \ + CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \ + CONDUIT_CONFIG='' # Set no config file to do all configuration with env vars # Conduit needs: # ca-certificates: for https From 0be8500c4fec53d2442da7f3cb98ecc6cbe198da Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 13 Feb 2022 12:38:13 +0000 Subject: [PATCH 1036/1727] Set all env vars in docker README --- Dockerfile | 1 + docker/README.md | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 82ee951..e6cdaf5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,6 +36,7 @@ FROM docker.io/debian:bullseye-slim AS runner EXPOSE 6167 ENV CONDUIT_PORT=6167 \ + CONDUIT_ADDRESS="0.0.0.0" \ CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \ CONDUIT_CONFIG='' # Set no config file to do all configuration with env vars diff --git a/docker/README.md b/docker/README.md index 28ad06f..f9d94ab 100644 --- a/docker/README.md +++ b/docker/README.md @@ -24,7 +24,17 @@ which also will tag the resulting image as `matrixconduit/matrix-conduit:latest` After building the image you can simply run it with ```bash -docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/var/lib/matrix-conduit/ matrixconduit/matrix-conduit:latest +docker run -d -p 8448:6167 \ + -v db:/var/lib/matrix-conduit/ \ + -e CONDUIT_SERVER_NAME="your.server.name" \ + -e CONDUIT_DATABASE_BACKEND="rocksdb" \ + -e CONDUIT_ALLOW_REGISTRATION=true \ + -e CONDUIT_ALLOW_FEDERATION=true \ + -e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \ + -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ + -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ + -e CONDUIT_LOG="info,rocket=off,_=off,sled=off" \ + --name conduit matrixconduit/matrix-conduit:latest ``` or you can skip the build step and pull the image from one of the following registries: From 98b67da649c602574b4c4b304b3c52fdd0450641 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 16 Feb 2022 13:04:45 +0000 Subject: [PATCH 1037/1727] fix: Docker syntax --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index e6cdaf5..49c3224 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,7 +38,8 @@ EXPOSE 6167 ENV CONDUIT_PORT=6167 \ CONDUIT_ADDRESS="0.0.0.0" \ CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \ - CONDUIT_CONFIG='' # Set no config file to do all configuration with env vars + CONDUIT_CONFIG='' +# └─> Set no config file to do all configuration with env vars # Conduit needs: # ca-certificates: for https From b4225cb0fca88636e0a4d6213cfcea30c800ec1e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 16 Feb 2022 15:04:32 +0100 Subject: [PATCH 1038/1727] fix(docker): use user 1000 and standard db path --- Dockerfile | 15 ++++---- docker/ci-binaries-packaging.Dockerfile | 48 +++++++++++++------------ 2 files changed, 34 insertions(+), 29 deletions(-) diff --git a/Dockerfile b/Dockerfile index 49c3224..76d10ea 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,8 @@ FROM docker.io/rust:1.58-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies -RUN apt update && apt -y install libclang-dev +RUN apt-get update && \ + apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5 # == Build dependencies without our own code separately for caching == # @@ -44,7 +45,7 @@ ENV CONDUIT_PORT=6167 \ # Conduit needs: # ca-certificates: for https # iproute2 & wget: for the healthcheck script -RUN apt update && apt -y install \ +RUN apt-get update && apt-get -y --no-install-recommends install \ ca-certificates \ iproute2 \ wget \ @@ -61,12 +62,12 @@ HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit # Improve security: Don't run stuff as root, that does not need to run as root -# Add 'conduit' user and group (100:82). The UID:GID choice is to be compatible -# with previous, Alpine-based containers, where the user and group were both -# named 'www-data'. +# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. +ARG USER_ID=1000 +ARG GROUP_ID=1000 RUN set -x ; \ - groupadd -r -g 82 conduit ; \ - useradd -r -M -d /srv/conduit -o -u 100 -g conduit conduit && exit 0 ; exit 1 + groupadd -r -g ${GROUP_ID} conduit ; \ + useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1 # Change ownership of Conduit files to conduit user and group and make the healthcheck executable: RUN chown -cR conduit:conduit /srv/conduit && \ diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 3731bac..ee1ca4c 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -14,9 +14,14 @@ FROM docker.io/alpine:3.15.0 AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs. -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ - CONDUIT_PORT=6167 +# Users are expected to mount a volume to this directory: +ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit + +ENV CONDUIT_PORT=6167 \ + CONDUIT_ADDRESS="0.0.0.0" \ + CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ + CONDUIT_CONFIG='' +# └─> Set no config file to do all configuration with env vars # Conduit needs: # ca-certificates: for https @@ -25,7 +30,6 @@ RUN apk add --no-cache \ ca-certificates \ iproute2 - ARG CREATED ARG VERSION ARG GIT_REF @@ -45,36 +49,36 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.ref.name="" # Created directory for the database and media files -RUN mkdir -p /srv/conduit/.local/share/conduit +RUN mkdir -p ${DEFAULT_DB_PATH} # Test if Conduit is still alive, uses the same endpoint as Element COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh - -# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64") -# copy the matching binary into this docker image -ARG TARGETPLATFORM -COPY ./$TARGETPLATFORM /srv/conduit/conduit - - # Improve security: Don't run stuff as root, that does not need to run as root: -# Add www-data user and group with UID 82, as used by alpine -# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install +# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. +ARG USER_ID=1000 +ARG GROUP_ID=1000 RUN set -x ; \ - addgroup -Sg 82 www-data 2>/dev/null ; \ - adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \ - addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1 + deluser --remove-home www-data ; \ + addgroup -S -g ${GROUP_ID} conduit 2>/dev/null ; \ + adduser -S -u ${USER_ID} -D -H -h /srv/conduit -G conduit -g conduit conduit 2>/dev/null ; \ + addgroup conduit conduit 2>/dev/null && exit 0 ; exit 1 -# Change ownership of Conduit files to www-data user and group -RUN chown -cR www-data:www-data /srv/conduit -RUN chmod +x /srv/conduit/healthcheck.sh +# Change ownership of Conduit files to conduit user and group +RUN chown -cR conduit:conduit /srv/conduit && \ + chmod +x /srv/conduit/healthcheck.sh -# Change user to www-data -USER www-data +# Change user to conduit +USER conduit # Set container home directory WORKDIR /srv/conduit # Run Conduit and print backtraces on panics ENV RUST_BACKTRACE=1 ENTRYPOINT [ "/srv/conduit/conduit" ] + +# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64") +# copy the matching binary into this docker image +ARG TARGETPLATFORM +COPY --chown=conduit:conduit ./$TARGETPLATFORM /srv/conduit/conduit From bcd6c0bf532930b31873431ebdf601f4699d7d69 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 17 Feb 2022 11:14:50 +0000 Subject: [PATCH 1039/1727] feat: Provide sane defaults for vscode developing This includes some extensions and a debug profile --- .vscode/extensions.json | 11 +++++++++++ .vscode/launch.json | 35 +++++++++++++++++++++++++++++++++++ .vscode/settings.json | 2 +- 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 .vscode/extensions.json create mode 100644 .vscode/launch.json diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..7963e9d --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,11 @@ +{ + "recommendations": [ + "matklad.rust-analyzer", + "bungcip.better-toml", + "ms-azuretools.vscode-docker", + "eamodio.gitlens", + "serayuzgur.crates", + "vadimcn.vscode-lldb", + "timonwong.shellcheck" + ] +} \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..da52160 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,35 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug conduit", + "sourceLanguages": ["rust"], + "cargo": { + "args": [ + "build", + "--bin=conduit", + "--package=conduit" + ], + "filter": { + "name": "conduit", + "kind": "bin" + } + }, + "args": [], + "env": { + "RUST_BACKTRACE": "1", + "CONDUIT_CONFIG": "", + "CONDUIT_SERVER_NAME": "localhost", + "CONDUIT_DATABASE_PATH": "/tmp", + "CONDUIT_ADDRESS": "0.0.0.0", + "CONDUIT_PORT": "6167" + }, + "cwd": "${workspaceFolder}" + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index c3f6605..95294d4 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,3 +1,3 @@ { - "rust-analyzer.procMacro.enable": true + "rust-analyzer.procMacro.enable": true, } \ No newline at end of file From e57cd437d4cfc55757220ffa02a3f6312a792567 Mon Sep 17 00:00:00 2001 From: TomZ Date: Thu, 17 Feb 2022 21:59:55 +0100 Subject: [PATCH 1040/1727] Slight clarification Which version it started being beta in is quite irrelevant here. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 45b16fd..730b251 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ HQ. #### What is the current status? -As of 2021-09-01, Conduit is Beta, meaning you can join and participate in most +Conduit is Beta, meaning you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time. From 27692a2f149c010dc08a610599a9c1035c815f91 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 18 Feb 2022 11:52:00 +0100 Subject: [PATCH 1041/1727] Remove useless serde roundtrips --- src/client_server/directory.rs | 14 +------------- src/server_server.rs | 26 ++------------------------ 2 files changed, 3 insertions(+), 37 deletions(-) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 75601fe..62bf566 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -149,19 +149,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .await?; return Ok(get_public_rooms_filtered::Response { - chunk: response - .chunk - .into_iter() - .map(|c| { - // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk - // to ruma::api::client::r0::directory::PublicRoomsChunk - serde_json::from_str( - &serde_json::to_string(&c) - .expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type") - }) - .collect(), + chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, diff --git a/src/server_server.rs b/src/server_server.rs index 3921055..372a76f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -591,18 +591,7 @@ pub async fn get_public_rooms_filtered_route( .await?; Ok(get_public_rooms_filtered::v1::Response { - chunk: response - .chunk - .into_iter() - .map(|c| { - // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk - // to ruma::api::client::r0::directory::PublicRoomsChunk - serde_json::from_str( - &serde_json::to_string(&c).expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type") - }) - .collect(), + chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, @@ -631,18 +620,7 @@ pub async fn get_public_rooms_route( .await?; Ok(get_public_rooms::v1::Response { - chunk: response - .chunk - .into_iter() - .map(|c| { - // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk - // to ruma::api::client::r0::directory::PublicRoomsChunk - serde_json::from_str( - &serde_json::to_string(&c).expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type") - }) - .collect(), + chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, From e9f87e1952b8ae1588347d26d146986df623afe2 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Fri, 18 Feb 2022 15:33:14 +0100 Subject: [PATCH 1042/1727] update ruma --- Cargo.lock | 38 ++++++------- Cargo.toml | 2 +- src/client_server/account.rs | 48 ++++++++-------- src/client_server/alias.rs | 27 +++++---- src/client_server/backup.rs | 88 ++++++++++++++--------------- src/client_server/capabilities.rs | 8 +-- src/client_server/config.rs | 28 ++++----- src/client_server/context.rs | 11 ++-- src/client_server/device.rs | 36 ++++++------ src/client_server/directory.rs | 41 +++++++------- src/client_server/filter.rs | 14 ++--- src/client_server/keys.rs | 56 +++++++++--------- src/client_server/media.rs | 40 ++++++------- src/client_server/membership.rs | 76 ++++++++++++------------- src/client_server/message.rs | 22 ++++---- src/client_server/presence.rs | 14 ++--- src/client_server/profile.rs | 38 ++++++------- src/client_server/push.rs | 62 ++++++++++---------- src/client_server/read_marker.rs | 17 +++--- src/client_server/redact.rs | 8 +-- src/client_server/report.rs | 8 +-- src/client_server/room.rs | 40 ++++++------- src/client_server/search.rs | 15 +++-- src/client_server/session.rs | 36 ++++++------ src/client_server/state.rs | 32 +++++------ src/client_server/sync.rs | 62 +++++++++++--------- src/client_server/tag.rs | 20 +++---- src/client_server/thirdparty.rs | 8 +-- src/client_server/to_device.rs | 10 ++-- src/client_server/typing.rs | 11 ++-- src/client_server/unversioned.rs | 2 +- src/client_server/user_directory.rs | 10 ++-- src/client_server/voip.rs | 8 +-- src/database/globals.rs | 6 +- src/database/key_backups.rs | 2 +- src/database/pusher.rs | 12 ++-- src/database/rooms.rs | 2 +- src/database/uiaa.rs | 2 +- src/database/users.rs | 5 +- src/error.rs | 2 +- src/ruma_wrapper.rs | 2 +- src/server_server.rs | 45 +++++++++------ 42 files changed, 514 insertions(+), 500 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7ffe5b..997cedc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "assign", "js_int", @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "bytes", "http", @@ -2119,7 +2119,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "ruma-api", "ruma-common", @@ -2144,7 +2144,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "assign", "bytes", @@ -2164,7 +2164,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "indexmap", "js_int", @@ -2179,7 +2179,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "indoc", "js_int", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2207,7 +2207,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "js_int", "ruma-api", @@ -2222,7 +2222,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2237,7 +2237,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2247,7 +2247,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "thiserror", ] @@ -2255,7 +2255,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "js_int", "ruma-api", @@ -2268,7 +2268,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "js_int", "ruma-api", @@ -2283,12 +2283,12 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "base64 0.13.0", "bytes", "form_urlencoded", - "itoa 0.4.8", + "itoa 1.0.1", "js_int", "ruma-serde-macros", "serde", @@ -2298,7 +2298,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2309,7 +2309,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2326,7 +2326,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index ab7b47d..b9affa7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "aed09886946f8817a478981cae1b6b8b5d4e7b7d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index c15d820..1ff0fa0 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -8,14 +8,12 @@ use crate::{ }; use ruma::{ api::client::{ - error::ErrorKind, - r0::{ - account::{ - change_password, deactivate, get_3pids, get_username_availability, register, - whoami, ThirdPartyIdRemovalStatus, - }, - uiaa::{AuthFlow, AuthType, UiaaInfo}, + account::{ + change_password, deactivate, get_3pids, get_username_availability, register, whoami, + ThirdPartyIdRemovalStatus, }, + error::ErrorKind, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ room::member::{MembershipState, RoomMemberEventContent}, @@ -42,8 +40,8 @@ const GUEST_NAME_LENGTH: usize = 10; /// Note: This will not reserve the username, so the username might become invalid when trying to register pub async fn get_register_available_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { // Validate user id let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) @@ -67,7 +65,7 @@ pub async fn get_register_available_route( // TODO add check for appservice namespaces // If no if check is true we have an username that's available to be used. - Ok(get_username_availability::Response { available: true }) + Ok(get_username_availability::v3::Response { available: true }) } /// # `POST /_matrix/client/r0/register` @@ -85,8 +83,8 @@ pub async fn get_register_available_route( /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if !db.globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -206,7 +204,7 @@ pub async fn register_route( // Inhibit login does not work for guests if !is_guest && body.inhibit_login { - return Ok(register::Response { + return Ok(register::v3::Response { access_token: None, user_id, device_id: None, @@ -244,7 +242,7 @@ pub async fn register_route( db.flush()?; - Ok(register::Response { + Ok(register::v3::Response { access_token: Some(token), user_id, device_id: Some(device_id), @@ -267,8 +265,8 @@ pub async fn register_route( /// - Triggers device list updates pub async fn change_password_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -321,7 +319,7 @@ pub async fn change_password_route( db.flush()?; - Ok(change_password::Response {}) + Ok(change_password::v3::Response {}) } /// # `GET _matrix/client/r0/account/whoami` @@ -329,9 +327,9 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -pub async fn whoami_route(body: Ruma) -> Result { +pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(whoami::Response { + Ok(whoami::v3::Response { user_id: sender_user.clone(), }) } @@ -348,8 +346,8 @@ pub async fn whoami_route(body: Ruma) -> Result>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -442,7 +440,7 @@ pub async fn deactivate_route( db.flush()?; - Ok(deactivate::Response { + Ok(deactivate::v3::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, }) } @@ -452,8 +450,10 @@ pub async fn deactivate_route( /// Get a list of third party identifiers associated with this account. /// /// - Currently always returns empty list -pub async fn third_party_route(body: Ruma) -> Result { +pub async fn third_party_route( + body: Ruma, +) -> Result { let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_3pids::Response::new(Vec::new())) + Ok(get_3pids::v3::Response::new(Vec::new())) } diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 509372c..75cf85e 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -4,8 +4,8 @@ use ruma::{ api::{ appservice, client::{ + alias::{create_alias, delete_alias, get_alias}, error::ErrorKind, - r0::alias::{create_alias, delete_alias, get_alias}, }, federation, }, @@ -17,8 +17,8 @@ use ruma::{ /// Creates a new room alias on this server. pub async fn create_alias_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -35,7 +35,7 @@ pub async fn create_alias_route( db.flush()?; - Ok(create_alias::Response::new()) + Ok(create_alias::v3::Response::new()) } /// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}` @@ -46,8 +46,8 @@ pub async fn create_alias_route( /// - TODO: Update canonical alias event pub async fn delete_alias_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -61,7 +61,7 @@ pub async fn delete_alias_route( db.flush()?; - Ok(delete_alias::Response::new()) + Ok(delete_alias::v3::Response::new()) } /// # `GET /_matrix/client/r0/directory/room/{roomAlias}` @@ -71,15 +71,15 @@ pub async fn delete_alias_route( /// - TODO: Suggest more servers to join via pub async fn get_alias_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { get_alias_helper(&db, &body.room_alias).await } pub(crate) async fn get_alias_helper( db: &Database, room_alias: &RoomAliasId, -) -> Result { +) -> Result { if room_alias.server_name() != db.globals.server_name() { let response = db .sending @@ -90,7 +90,10 @@ pub(crate) async fn get_alias_helper( ) .await?; - return Ok(get_alias::Response::new(response.room_id, response.servers)); + return Ok(get_alias::v3::Response::new( + response.room_id, + response.servers, + )); } let mut room_id = None; @@ -141,7 +144,7 @@ pub(crate) async fn get_alias_helper( } }; - Ok(get_alias::Response::new( + Ok(get_alias::v3::Response::new( room_id, vec![db.globals.server_name().to_owned()], )) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 14c239b..808d886 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -1,12 +1,12 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ - error::ErrorKind, - r0::backup::{ + backup::{ add_backup_key_session, add_backup_key_sessions, add_backup_keys, create_backup, delete_backup, delete_backup_key_session, delete_backup_key_sessions, delete_backup_keys, get_backup, get_backup_key_session, get_backup_key_sessions, get_backup_keys, get_latest_backup, update_backup, }, + error::ErrorKind, }; /// # `POST /_matrix/client/r0/room_keys/version` @@ -14,8 +14,8 @@ use ruma::api::client::{ /// Creates a new backup. pub async fn create_backup_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let version = db .key_backups @@ -23,7 +23,7 @@ pub async fn create_backup_route( db.flush()?; - Ok(create_backup::Response { version }) + Ok(create_backup::v3::Response { version }) } /// # `PUT /_matrix/client/r0/room_keys/version/{version}` @@ -31,15 +31,15 @@ pub async fn create_backup_route( /// Update information about an existing backup. Only `auth_data` can be modified. pub async fn update_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; db.flush()?; - Ok(update_backup::Response {}) + Ok(update_backup::v3::Response {}) } /// # `GET /_matrix/client/r0/room_keys/version` @@ -47,8 +47,8 @@ pub async fn update_backup_route( /// Get information about the latest backup version. pub async fn get_latest_backup_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = @@ -59,7 +59,7 @@ pub async fn get_latest_backup_route( "Key backup does not exist.", ))?; - Ok(get_latest_backup::Response { + Ok(get_latest_backup::v3::Response { algorithm, count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &version)?, @@ -72,8 +72,8 @@ pub async fn get_latest_backup_route( /// Get information about an existing backup. pub async fn get_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db .key_backups @@ -83,7 +83,7 @@ pub async fn get_backup_route( "Key backup does not exist.", ))?; - Ok(get_backup::Response { + Ok(get_backup::v3::Response { algorithm, count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, @@ -98,15 +98,15 @@ pub async fn get_backup_route( /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_backup(sender_user, &body.version)?; db.flush()?; - Ok(delete_backup::Response {}) + Ok(delete_backup::v3::Response {}) } /// # `PUT /_matrix/client/r0/room_keys/keys` @@ -118,8 +118,8 @@ pub async fn delete_backup_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -149,7 +149,7 @@ pub async fn add_backup_keys_route( db.flush()?; - Ok(add_backup_keys::Response { + Ok(add_backup_keys::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -164,8 +164,8 @@ pub async fn add_backup_keys_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -193,7 +193,7 @@ pub async fn add_backup_key_sessions_route( db.flush()?; - Ok(add_backup_key_sessions::Response { + Ok(add_backup_key_sessions::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -208,8 +208,8 @@ pub async fn add_backup_key_sessions_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -235,7 +235,7 @@ pub async fn add_backup_key_session_route( db.flush()?; - Ok(add_backup_key_session::Response { + Ok(add_backup_key_session::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -246,13 +246,13 @@ pub async fn add_backup_key_session_route( /// Retrieves all keys from the backup. pub async fn get_backup_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let rooms = db.key_backups.get_all(sender_user, &body.version)?; - Ok(get_backup_keys::Response { rooms }) + Ok(get_backup_keys::v3::Response { rooms }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` @@ -260,15 +260,15 @@ pub async fn get_backup_keys_route( /// Retrieves all keys from the backup for a given room. pub async fn get_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sessions = db .key_backups .get_room(sender_user, &body.version, &body.room_id)?; - Ok(get_backup_key_sessions::Response { sessions }) + Ok(get_backup_key_sessions::v3::Response { sessions }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -276,8 +276,8 @@ pub async fn get_backup_key_sessions_route( /// Retrieves a key from the backup. pub async fn get_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let key_data = db @@ -288,7 +288,7 @@ pub async fn get_backup_key_session_route( "Backup key not found for this user's session.", ))?; - Ok(get_backup_key_session::Response { key_data }) + Ok(get_backup_key_session::v3::Response { key_data }) } /// # `DELETE /_matrix/client/r0/room_keys/keys` @@ -296,15 +296,15 @@ pub async fn get_backup_key_session_route( /// Delete the keys from the backup. pub async fn delete_backup_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_all_keys(sender_user, &body.version)?; db.flush()?; - Ok(delete_backup_keys::Response { + Ok(delete_backup_keys::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -315,8 +315,8 @@ pub async fn delete_backup_keys_route( /// Delete the keys from the backup for a given room. pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -324,7 +324,7 @@ pub async fn delete_backup_key_sessions_route( db.flush()?; - Ok(delete_backup_key_sessions::Response { + Ok(delete_backup_key_sessions::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -335,8 +335,8 @@ pub async fn delete_backup_key_sessions_route( /// Delete a key from the backup. pub async fn delete_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -344,7 +344,7 @@ pub async fn delete_backup_key_session_route( db.flush()?; - Ok(delete_backup_key_session::Response { + Ok(delete_backup_key_session::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index b1e072e..ac2e59f 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,6 +1,6 @@ use crate::{Result, Ruma}; use ruma::{ - api::client::r0::capabilities::{ + api::client::capabilities::{ get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, }, RoomVersionId, @@ -11,8 +11,8 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( - _body: Ruma, -) -> Result { + _body: Ruma, +) -> Result { let mut available = BTreeMap::new(); available.insert(RoomVersionId::V5, RoomVersionStability::Stable); available.insert(RoomVersionId::V6, RoomVersionStability::Stable); @@ -23,5 +23,5 @@ pub async fn get_capabilities_route( available, }; - Ok(get_capabilities::Response { capabilities }) + Ok(get_capabilities::v3::Response { capabilities }) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 83bb7a5..a9a2fb1 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -1,11 +1,11 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ - error::ErrorKind, - r0::config::{ + config::{ get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, }, + error::ErrorKind, }, events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent}, serde::Raw, @@ -18,8 +18,8 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// Sets some account data for the sender user. pub async fn set_global_account_data_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let data: serde_json::Value = serde_json::from_str(body.data.get()) @@ -40,7 +40,7 @@ pub async fn set_global_account_data_route( db.flush()?; - Ok(set_global_account_data::Response {}) + Ok(set_global_account_data::v3::Response {}) } /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` @@ -48,8 +48,8 @@ pub async fn set_global_account_data_route( /// Sets some room account data for the sender user. pub async fn set_room_account_data_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let data: serde_json::Value = serde_json::from_str(body.data.get()) @@ -70,7 +70,7 @@ pub async fn set_room_account_data_route( db.flush()?; - Ok(set_room_account_data::Response {}) + Ok(set_room_account_data::v3::Response {}) } /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` @@ -78,8 +78,8 @@ pub async fn set_room_account_data_route( /// Gets some account data for the sender user. pub async fn get_global_account_data_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = db @@ -91,7 +91,7 @@ pub async fn get_global_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_global_account_data::Response { account_data }) + Ok(get_global_account_data::v3::Response { account_data }) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` @@ -99,8 +99,8 @@ pub async fn get_global_account_data_route( /// Gets some room account data for the sender user. pub async fn get_room_account_data_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = db @@ -116,7 +116,7 @@ pub async fn get_room_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_room_account_data::Response { account_data }) + Ok(get_room_account_data::v3::Response { account_data }) } #[derive(Deserialize)] diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 167d0cc..2f6a2ea 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,9 +1,6 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::{context::get_context, filter::LazyLoadOptions}, - }, + api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, events::EventType, }; use std::{collections::HashSet, convert::TryFrom}; @@ -17,8 +14,8 @@ use tracing::error; /// joined, depending on history_visibility) pub async fn get_context_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -178,7 +175,7 @@ pub async fn get_context_route( } } - let resp = get_context::Response { + let resp = get_context::v3::Response { start: start_token, end: end_token, events_before, diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 76172d2..09c9406 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -1,10 +1,8 @@ use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; use ruma::api::client::{ + device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, - r0::{ - device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, - uiaa::{AuthFlow, AuthType, UiaaInfo}, - }, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }; use super::SESSION_ID_LENGTH; @@ -14,8 +12,8 @@ use super::SESSION_ID_LENGTH; /// Get metadata on all devices of the sender user. pub async fn get_devices_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let devices: Vec = db @@ -24,7 +22,7 @@ pub async fn get_devices_route( .filter_map(|r| r.ok()) // Filter out buggy devices .collect(); - Ok(get_devices::Response { devices }) + Ok(get_devices::v3::Response { devices }) } /// # `GET /_matrix/client/r0/devices/{deviceId}` @@ -32,8 +30,8 @@ pub async fn get_devices_route( /// Get metadata on a single device of the sender user. pub async fn get_device_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device = db @@ -41,7 +39,7 @@ pub async fn get_device_route( .get_device_metadata(sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - Ok(get_device::Response { device }) + Ok(get_device::v3::Response { device }) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -49,8 +47,8 @@ pub async fn get_device_route( /// Updates the metadata on a given device of the sender user. pub async fn update_device_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device = db @@ -65,7 +63,7 @@ pub async fn update_device_route( db.flush()?; - Ok(update_device::Response {}) + Ok(update_device::v3::Response {}) } /// # `DELETE /_matrix/client/r0/devices/{deviceId}` @@ -79,8 +77,8 @@ pub async fn update_device_route( /// - Triggers device list updates pub async fn delete_device_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -121,7 +119,7 @@ pub async fn delete_device_route( db.flush()?; - Ok(delete_device::Response {}) + Ok(delete_device::v3::Response {}) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -137,8 +135,8 @@ pub async fn delete_device_route( /// - Triggers device list updates pub async fn delete_devices_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -181,5 +179,5 @@ pub async fn delete_devices_route( db.flush()?; - Ok(delete_devices::Response {}) + Ok(delete_devices::v3::Response {}) } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 62bf566..ad88254 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -2,14 +2,12 @@ use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; use ruma::{ api::{ client::{ - error::ErrorKind, - r0::{ - directory::{ - get_public_rooms, get_public_rooms_filtered, get_room_visibility, - set_room_visibility, - }, - room, + directory::{ + get_public_rooms, get_public_rooms_filtered, get_room_visibility, + set_room_visibility, }, + error::ErrorKind, + room, }, federation, }, @@ -36,8 +34,8 @@ use tracing::{info, warn}; /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { get_public_rooms_filtered_helper( &db, body.server.as_deref(), @@ -56,8 +54,8 @@ pub async fn get_public_rooms_filtered_route( /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let response = get_public_rooms_filtered_helper( &db, body.server.as_deref(), @@ -68,7 +66,7 @@ pub async fn get_public_rooms_route( ) .await?; - Ok(get_public_rooms::Response { + Ok(get_public_rooms::v3::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, @@ -83,8 +81,8 @@ pub async fn get_public_rooms_route( /// - TODO: Access control checks pub async fn set_room_visibility_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); match &body.visibility { @@ -103,7 +101,7 @@ pub async fn set_room_visibility_route( db.flush()?; - Ok(set_room_visibility::Response {}) + Ok(set_room_visibility::v3::Response {}) } /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` @@ -111,9 +109,9 @@ pub async fn set_room_visibility_route( /// Gets the visibility of a given room in the room directory. pub async fn get_room_visibility_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { - Ok(get_room_visibility::Response { + body: Ruma>, +) -> Result { + Ok(get_room_visibility::v3::Response { visibility: if db.rooms.is_public_room(&body.room_id)? { room::Visibility::Public } else { @@ -129,7 +127,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, -) -> Result { +) -> Result { if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str()) { let response = db @@ -148,7 +146,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( ) .await?; - return Ok(get_public_rooms_filtered::Response { + return Ok(get_public_rooms_filtered::v3::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, @@ -189,7 +187,6 @@ pub(crate) async fn get_public_rooms_filtered_helper( let room_id = room_id?; let chunk = PublicRoomsChunk { - aliases: Vec::new(), canonical_alias: db .rooms .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? @@ -328,7 +325,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Some(format!("n{}", num_since + limit)) }; - Ok(get_public_rooms_filtered::Response { + Ok(get_public_rooms_filtered::v3::Response { chunk, prev_batch, next_batch, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index a606aeb..379950f 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -1,7 +1,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, - r0::filter::{create_filter, get_filter}, + filter::{create_filter, get_filter}, }; /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` @@ -11,15 +11,15 @@ use ruma::api::client::{ /// - A user can only access their own filters pub async fn get_filter_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let filter = match db.users.get_filter(sender_user, &body.filter_id)? { Some(filter) => filter, None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), }; - Ok(get_filter::Response::new(filter)) + Ok(get_filter::v3::Response::new(filter)) } /// # `PUT /_matrix/client/r0/user/{userId}/filter` @@ -27,10 +27,10 @@ pub async fn get_filter_route( /// Creates a new filter to be used by other endpoints. pub async fn create_filter_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(create_filter::Response::new( + Ok(create_filter::v3::Response::new( db.users.create_filter(sender_user, &body.filter)?, )) } diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 2ea62a8..525c779 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -5,13 +5,11 @@ use ruma::{ api::{ client::{ error::ErrorKind, - r0::{ - keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, - upload_signing_keys, - }, - uiaa::{AuthFlow, AuthType, UiaaInfo}, + keys::{ + claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + upload_signing_keys, }, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }, federation, }, @@ -29,8 +27,8 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) pub async fn upload_keys_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -59,7 +57,7 @@ pub async fn upload_keys_route( db.flush()?; - Ok(upload_keys::Response { + Ok(upload_keys::v3::Response { one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, }) } @@ -73,8 +71,8 @@ pub async fn upload_keys_route( /// - The master and self-signing keys contain signatures that the user is allowed to see pub async fn get_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let response = get_keys_helper( @@ -93,8 +91,8 @@ pub async fn get_keys_route( /// Claims one-time keys pub async fn claim_keys_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let response = claim_keys_helper(&body.one_time_keys, &db).await?; db.flush()?; @@ -109,8 +107,8 @@ pub async fn claim_keys_route( /// - Requires UIAA to verify password pub async fn upload_signing_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -160,7 +158,7 @@ pub async fn upload_signing_keys_route( db.flush()?; - Ok(upload_signing_keys::Response {}) + Ok(upload_signing_keys::v3::Response {}) } /// # `POST /_matrix/client/r0/keys/signatures/upload` @@ -168,12 +166,14 @@ pub async fn upload_signing_keys_route( /// Uploads end-to-end key signatures from the sender user. pub async fn upload_signatures_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for (user_id, signed_keys) in &body.signed_keys { for (key_id, signed_key) in signed_keys { + let signed_key = serde_json::to_value(signed_key).unwrap(); + for signature in signed_key .get("signatures") .ok_or(Error::BadRequest( @@ -219,7 +219,9 @@ pub async fn upload_signatures_route( db.flush()?; - Ok(upload_signatures::Response {}) + Ok(upload_signatures::v3::Response { + failures: BTreeMap::new(), // TODO: integrate + }) } /// # `POST /_matrix/client/r0/keys/changes` @@ -229,8 +231,8 @@ pub async fn upload_signatures_route( /// - TODO: left users pub async fn get_key_changes_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device_list_updates = HashSet::new(); @@ -266,7 +268,7 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); } - Ok(get_key_changes::Response { + Ok(get_key_changes::v3::Response { changed: device_list_updates.into_iter().collect(), left: Vec::new(), // TODO }) @@ -277,7 +279,7 @@ pub(crate) async fn get_keys_helper bool>( device_keys_input: &BTreeMap, Vec>>, allowed_signatures: F, db: &Database, -) -> Result { +) -> Result { let mut master_keys = BTreeMap::new(); let mut self_signing_keys = BTreeMap::new(); let mut user_signing_keys = BTreeMap::new(); @@ -386,7 +388,7 @@ pub(crate) async fn get_keys_helper bool>( } } - Ok(get_keys::Response { + Ok(get_keys::v3::Response { master_keys, self_signing_keys, user_signing_keys, @@ -397,7 +399,7 @@ pub(crate) async fn get_keys_helper bool>( fn add_unsigned_device_display_name( keys: &mut Raw, - metadata: ruma::api::client::r0::device::Device, + metadata: ruma::api::client::device::Device, ) -> serde_json::Result<()> { if let Some(display_name) = metadata.display_name { let mut object = keys.deserialize_as::>()?; @@ -416,7 +418,7 @@ fn add_unsigned_device_display_name( pub(crate) async fn claim_keys_helper( one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, db: &Database, -) -> Result { +) -> Result { let mut one_time_keys = BTreeMap::new(); let mut get_over_federation = BTreeMap::new(); @@ -468,7 +470,7 @@ pub(crate) async fn claim_keys_helper( } } - Ok(claim_keys::Response { + Ok(claim_keys::v3::Response { failures, one_time_keys, }) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index dcdea05..71dbed6 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -4,7 +4,7 @@ use crate::{ }; use ruma::api::client::{ error::ErrorKind, - r0::media::{ + media::{ create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, }, @@ -17,9 +17,9 @@ const MXC_LENGTH: usize = 32; /// Returns max upload size. pub async fn get_media_config_route( db: DatabaseGuard, - _body: Ruma, -) -> Result { - Ok(get_media_config::Response { + _body: Ruma, +) -> Result { + Ok(get_media_config::v3::Response { upload_size: db.globals.max_request_size().into(), }) } @@ -32,8 +32,8 @@ pub async fn get_media_config_route( /// - Media will be saved in the media/ directory pub async fn create_content_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let mxc = format!( "mxc://{}/{}", db.globals.server_name(), @@ -56,7 +56,7 @@ pub async fn create_content_route( db.flush()?; - Ok(create_content::Response { + Ok(create_content::v3::Response { content_uri: mxc.try_into().expect("Invalid mxc:// URI"), blurhash: None, }) @@ -67,13 +67,13 @@ pub async fn get_remote_content( mxc: &str, server_name: &ruma::ServerName, media_id: &str, -) -> Result { +) -> Result { let content_response = db .sending .send_federation_request( &db.globals, server_name, - get_content::Request { + get_content::v3::Request { allow_remote: false, server_name, media_id, @@ -101,8 +101,8 @@ pub async fn get_remote_content( /// - Only allows federation if `allow_remote` is true pub async fn get_content_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -111,7 +111,7 @@ pub async fn get_content_route( file, }) = db.media.get(&db.globals, &mxc).await? { - Ok(get_content::Response { + Ok(get_content::v3::Response { file, content_type, content_disposition, @@ -132,8 +132,8 @@ pub async fn get_content_route( /// - Only allows federation if `allow_remote` is true pub async fn get_content_as_filename_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -142,7 +142,7 @@ pub async fn get_content_as_filename_route( file, }) = db.media.get(&db.globals, &mxc).await? { - Ok(get_content_as_filename::Response { + Ok(get_content_as_filename::v3::Response { file, content_type, content_disposition: Some(format!("inline; filename={}", body.filename)), @@ -151,7 +151,7 @@ pub async fn get_content_as_filename_route( let remote_content_response = get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; - Ok(get_content_as_filename::Response { + Ok(get_content_as_filename::v3::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), content_type: remote_content_response.content_type, file: remote_content_response.file, @@ -168,8 +168,8 @@ pub async fn get_content_as_filename_route( /// - Only allows federation if `allow_remote` is true pub async fn get_content_thumbnail_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -188,14 +188,14 @@ pub async fn get_content_thumbnail_route( ) .await? { - Ok(get_content_thumbnail::Response { file, content_type }) + Ok(get_content_thumbnail::v3::Response { file, content_type }) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_thumbnail_response = db .sending .send_federation_request( &db.globals, &body.server_name, - get_content_thumbnail::Request { + get_content_thumbnail::v3::Request { allow_remote: false, height: body.height, width: body.width, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 447f829..0f5e7c2 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -8,7 +8,7 @@ use ruma::{ api::{ client::{ error::ErrorKind, - r0::membership::{ + membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, unban_user, IncomingThirdPartySigned, @@ -44,8 +44,8 @@ use tracing::{debug, error, warn}; /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut servers: HashSet<_> = db @@ -84,8 +84,8 @@ pub async fn join_room_by_id_route( /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; @@ -124,7 +124,7 @@ pub async fn join_room_by_id_or_alias_route( db.flush()?; - Ok(join_room_by_id_or_alias::Response { + Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id, }) } @@ -136,15 +136,15 @@ pub async fn join_room_by_id_or_alias_route( /// - This should always work if the user is currently joined. pub async fn leave_room_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.leave_room(sender_user, &body.room_id, &db).await?; db.flush()?; - Ok(leave_room::Response::new()) + Ok(leave_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` @@ -152,14 +152,14 @@ pub async fn leave_room_route( /// Tries to send an invite event into the room. pub async fn invite_user_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { + if let invite_user::v3::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; db.flush()?; - Ok(invite_user::Response {}) + Ok(invite_user::v3::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) } @@ -170,8 +170,8 @@ pub async fn invite_user_route( /// Tries to send a kick event into the room. pub async fn kick_user_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( @@ -221,7 +221,7 @@ pub async fn kick_user_route( db.flush()?; - Ok(kick_user::Response::new()) + Ok(kick_user::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` @@ -229,8 +229,8 @@ pub async fn kick_user_route( /// Tries to send a ban event into the room. pub async fn ban_user_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: reason @@ -291,7 +291,7 @@ pub async fn ban_user_route( db.flush()?; - Ok(ban_user::Response::new()) + Ok(ban_user::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/unban` @@ -299,8 +299,8 @@ pub async fn ban_user_route( /// Tries to send an unban event into the room. pub async fn unban_user_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( @@ -349,7 +349,7 @@ pub async fn unban_user_route( db.flush()?; - Ok(unban_user::Response::new()) + Ok(unban_user::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/forget` @@ -362,15 +362,15 @@ pub async fn unban_user_route( /// be called from every device pub async fn forget_room_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.forget(&body.room_id, sender_user)?; db.flush()?; - Ok(forget_room::Response::new()) + Ok(forget_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/joined_rooms` @@ -378,11 +378,11 @@ pub async fn forget_room_route( /// Lists all rooms the user has joined. pub async fn joined_rooms_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(joined_rooms::Response { + Ok(joined_rooms::v3::Response { joined_rooms: db .rooms .rooms_joined(sender_user) @@ -398,8 +398,8 @@ pub async fn joined_rooms_route( /// - Only works if the user is currently joined pub async fn get_member_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? @@ -410,7 +410,7 @@ pub async fn get_member_events_route( )); } - Ok(get_member_events::Response { + Ok(get_member_events::v3::Response { chunk: db .rooms .room_state_full(&body.room_id)? @@ -429,8 +429,8 @@ pub async fn get_member_events_route( /// - TODO: An appservice just needs a puppet joined pub async fn joined_members_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -447,14 +447,14 @@ pub async fn joined_members_route( joined.insert( user_id, - joined_members::RoomMember { + joined_members::v3::RoomMember { display_name, avatar_url, }, ); } - Ok(joined_members::Response { joined }) + Ok(joined_members::v3::Response { joined }) } #[tracing::instrument(skip(db))] @@ -464,7 +464,7 @@ async fn join_room_by_id_helper( room_id: &RoomId, servers: &HashSet>, _third_party_signed: Option<&IncomingThirdPartySigned>, -) -> Result { +) -> Result { let sender_user = sender_user.expect("user is authenticated"); let mutex_state = Arc::clone( @@ -489,7 +489,7 @@ async fn join_room_by_id_helper( .send_federation_request( &db.globals, remote_server, - federation::membership::create_join_event_template::v1::Request { + federation::membership::prepare_join_event::v1::Request { room_id, user_id: sender_user, ver: &[RoomVersionId::V5, RoomVersionId::V6], @@ -720,7 +720,7 @@ async fn join_room_by_id_helper( db.flush()?; - Ok(join_room_by_id::Response::new(room_id.to_owned())) + Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) } fn validate_and_add_event_id( diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 93d5b3b..b5c4149 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -2,7 +2,7 @@ use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma use ruma::{ api::client::{ error::ErrorKind, - r0::message::{get_message_events, send_message_event}, + message::{get_message_events, send_message_event}, }, events::EventType, }; @@ -20,8 +20,8 @@ use std::{ /// - Tries to send the event into the room, auth rules will determine if it is allowed pub async fn send_message_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -61,7 +61,7 @@ pub async fn send_message_event_route( .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? .try_into() .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; - return Ok(send_message_event::Response { event_id }); + return Ok(send_message_event::v3::Response { event_id }); } let mut unsigned = BTreeMap::new(); @@ -93,7 +93,9 @@ pub async fn send_message_event_route( db.flush()?; - Ok(send_message_event::Response::new((*event_id).to_owned())) + Ok(send_message_event::v3::Response::new( + (*event_id).to_owned(), + )) } /// # `GET /_matrix/client/r0/rooms/{roomId}/messages` @@ -104,8 +106,8 @@ pub async fn send_message_event_route( /// joined, depending on history_visibility) pub async fn get_message_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -132,12 +134,12 @@ pub async fn get_message_events_route( let next_token; - let mut resp = get_message_events::Response::new(); + let mut resp = get_message_events::v3::Response::new(); let mut lazy_loaded = HashSet::new(); match body.dir { - get_message_events::Direction::Forward => { + get_message_events::v3::Direction::Forward => { let events_after: Vec<_> = db .rooms .pdus_after(sender_user, &body.room_id, from)? @@ -174,7 +176,7 @@ pub async fn get_message_events_route( resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_after; } - get_message_events::Direction::Backward => { + get_message_events::v3::Direction::Backward => { let events_before: Vec<_> = db .rooms .pdus_until(sender_user, &body.room_id, from)? diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 7549b1a..9e6ce0b 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -1,5 +1,5 @@ use crate::{database::DatabaseGuard, utils, Result, Ruma}; -use ruma::api::client::r0::presence::{get_presence, set_presence}; +use ruma::api::client::presence::{get_presence, set_presence}; use std::time::Duration; /// # `PUT /_matrix/client/r0/presence/{userId}/status` @@ -7,8 +7,8 @@ use std::time::Duration; /// Sets the presence state of the sender user. pub async fn set_presence_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for room_id in db.rooms.rooms_joined(sender_user) { @@ -38,7 +38,7 @@ pub async fn set_presence_route( db.flush()?; - Ok(set_presence::Response {}) + Ok(set_presence::v3::Response {}) } /// # `GET /_matrix/client/r0/presence/{userId}/status` @@ -48,8 +48,8 @@ pub async fn set_presence_route( /// - Only works if you share a room with the user pub async fn get_presence_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut presence_event = None; @@ -71,7 +71,7 @@ pub async fn get_presence_route( } if let Some(presence) = presence_event { - Ok(get_presence::Response { + Ok(get_presence::v3::Response { // TODO: Should ruma just use the presenceeventcontent type here? status_msg: presence.content.status_msg, currently_active: presence.content.currently_active, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 33bfbb5..3000027 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -3,7 +3,7 @@ use ruma::{ api::{ client::{ error::ErrorKind, - r0::profile::{ + profile::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, }, }, @@ -21,8 +21,8 @@ use std::sync::Arc; /// - Also makes sure other users receive the update using presence EDUs pub async fn set_displayname_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users @@ -108,7 +108,7 @@ pub async fn set_displayname_route( db.flush()?; - Ok(set_display_name::Response {}) + Ok(set_display_name::v3::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/displayname` @@ -118,8 +118,8 @@ pub async fn set_displayname_route( /// - If user is on another server: Fetches displayname over federation pub async fn get_displayname_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -133,12 +133,12 @@ pub async fn get_displayname_route( ) .await?; - return Ok(get_display_name::Response { + return Ok(get_display_name::v3::Response { displayname: response.displayname, }); } - Ok(get_display_name::Response { + Ok(get_display_name::v3::Response { displayname: db.users.displayname(&body.user_id)?, }) } @@ -150,8 +150,8 @@ pub async fn get_displayname_route( /// - Also makes sure other users receive the update using presence EDUs pub async fn set_avatar_url_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users @@ -239,7 +239,7 @@ pub async fn set_avatar_url_route( db.flush()?; - Ok(set_avatar_url::Response {}) + Ok(set_avatar_url::v3::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/avatar_url` @@ -249,8 +249,8 @@ pub async fn set_avatar_url_route( /// - If user is on another server: Fetches avatar_url and blurhash over federation pub async fn get_avatar_url_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -264,13 +264,13 @@ pub async fn get_avatar_url_route( ) .await?; - return Ok(get_avatar_url::Response { + return Ok(get_avatar_url::v3::Response { avatar_url: response.avatar_url, blurhash: response.blurhash, }); } - Ok(get_avatar_url::Response { + Ok(get_avatar_url::v3::Response { avatar_url: db.users.avatar_url(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?, }) @@ -283,8 +283,8 @@ pub async fn get_avatar_url_route( /// - If user is on another server: Fetches profile over federation pub async fn get_profile_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -298,7 +298,7 @@ pub async fn get_profile_route( ) .await?; - return Ok(get_profile::Response { + return Ok(get_profile::v3::Response { displayname: response.displayname, avatar_url: response.avatar_url, blurhash: response.blurhash, @@ -313,7 +313,7 @@ pub async fn get_profile_route( )); } - Ok(get_profile::Response { + Ok(get_profile::v3::Response { avatar_url: db.users.avatar_url(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?, displayname: db.users.displayname(&body.user_id)?, diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 67b70d2..90f4e02 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -2,7 +2,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::push::{ + push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleKind, @@ -17,8 +17,8 @@ use ruma::{ /// Retrieves the push rules event for this user. pub async fn get_pushrules_all_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db @@ -29,7 +29,7 @@ pub async fn get_pushrules_all_route( "PushRules event not found.", ))?; - Ok(get_pushrules_all::Response { + Ok(get_pushrules_all::v3::Response { global: event.content.global, }) } @@ -39,8 +39,8 @@ pub async fn get_pushrules_all_route( /// Retrieves a single specified push rule for this user. pub async fn get_pushrule_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db @@ -77,7 +77,7 @@ pub async fn get_pushrule_route( }; if let Some(rule) = rule { - Ok(get_pushrule::Response { rule }) + Ok(get_pushrule::v3::Response { rule }) } else { Err(Error::BadRequest( ErrorKind::NotFound, @@ -91,8 +91,8 @@ pub async fn get_pushrule_route( /// Creates a single specified push rule for this user. pub async fn set_pushrule_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -179,7 +179,7 @@ pub async fn set_pushrule_route( db.flush()?; - Ok(set_pushrule::Response {}) + Ok(set_pushrule::v3::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` @@ -187,8 +187,8 @@ pub async fn set_pushrule_route( /// Gets the actions of a single specified push rule for this user. pub async fn get_pushrule_actions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -233,7 +233,7 @@ pub async fn get_pushrule_actions_route( db.flush()?; - Ok(get_pushrule_actions::Response { + Ok(get_pushrule_actions::v3::Response { actions: actions.unwrap_or_default(), }) } @@ -243,8 +243,8 @@ pub async fn get_pushrule_actions_route( /// Sets the actions of a single specified push rule for this user. pub async fn set_pushrule_actions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -302,7 +302,7 @@ pub async fn set_pushrule_actions_route( db.flush()?; - Ok(set_pushrule_actions::Response {}) + Ok(set_pushrule_actions::v3::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` @@ -310,8 +310,8 @@ pub async fn set_pushrule_actions_route( /// Gets the enabled status of a single specified push rule for this user. pub async fn get_pushrule_enabled_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -361,7 +361,7 @@ pub async fn get_pushrule_enabled_route( db.flush()?; - Ok(get_pushrule_enabled::Response { enabled }) + Ok(get_pushrule_enabled::v3::Response { enabled }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` @@ -369,8 +369,8 @@ pub async fn get_pushrule_enabled_route( /// Sets the enabled status of a single specified push rule for this user. pub async fn set_pushrule_enabled_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -433,7 +433,7 @@ pub async fn set_pushrule_enabled_route( db.flush()?; - Ok(set_pushrule_enabled::Response {}) + Ok(set_pushrule_enabled::v3::Response {}) } /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` @@ -441,8 +441,8 @@ pub async fn set_pushrule_enabled_route( /// Deletes a single specified push rule for this user. pub async fn delete_pushrule_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -495,7 +495,7 @@ pub async fn delete_pushrule_route( db.flush()?; - Ok(delete_pushrule::Response {}) + Ok(delete_pushrule::v3::Response {}) } /// # `GET /_matrix/client/r0/pushers` @@ -503,11 +503,11 @@ pub async fn delete_pushrule_route( /// Gets all currently active pushers for the sender user. pub async fn get_pushers_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_pushers::Response { + Ok(get_pushers::v3::Response { pushers: db.pusher.get_pushers(sender_user)?, }) } @@ -519,8 +519,8 @@ pub async fn get_pushers_route( /// - TODO: Handle `append` pub async fn set_pushers_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pusher = body.pusher.clone(); @@ -528,5 +528,5 @@ pub async fn set_pushers_route( db.flush()?; - Ok(set_pusher::Response::default()) + Ok(set_pusher::v3::Response::default()) } diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index cc6928d..9422f21 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -1,9 +1,6 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::{read_marker::set_read_marker, receipt::create_receipt}, - }, + api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, events::EventType, receipt::ReceiptType, MilliSecondsSinceUnixEpoch, @@ -18,8 +15,8 @@ use std::collections::BTreeMap; /// - If `read_receipt` is set: Update private marker and public read receipt EDU pub async fn set_read_marker_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let fully_read_event = ruma::events::fully_read::FullyReadEvent { @@ -75,7 +72,7 @@ pub async fn set_read_marker_route( db.flush()?; - Ok(set_read_marker::Response {}) + Ok(set_read_marker::v3::Response {}) } /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` @@ -83,8 +80,8 @@ pub async fn set_read_marker_route( /// Sets private read marker and public read receipt EDU. pub async fn create_receipt_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.edus.private_read_set( @@ -126,5 +123,5 @@ pub async fn create_receipt_route( db.flush()?; - Ok(create_receipt::Response {}) + Ok(create_receipt::v3::Response {}) } diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 1e05bfe..4843993 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma}; use ruma::{ - api::client::r0::redact::redact_event, + api::client::redact::redact_event, events::{room::redaction::RoomRedactionEventContent, EventType}, }; @@ -15,8 +15,8 @@ use serde_json::value::to_raw_value; /// - TODO: Handle txn id pub async fn redact_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -52,5 +52,5 @@ pub async fn redact_event_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(redact_event::Response { event_id }) + Ok(redact_event::v3::Response { event_id }) } diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 6274172..1e47792 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, utils::HtmlEscape, Error, Result, Ruma}; use ruma::{ - api::client::{error::ErrorKind, r0::room::report_content}, + api::client::{error::ErrorKind, room::report_content}, events::room::message, int, }; @@ -11,8 +11,8 @@ use ruma::{ /// pub async fn report_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pdu = match db.rooms.get_pdu(&body.event_id)? { @@ -68,5 +68,5 @@ pub async fn report_event_route( db.flush()?; - Ok(report_content::Response {}) + Ok(report_content::v3::Response {}) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 54559e2..99838ce 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -4,7 +4,7 @@ use crate::{ use ruma::{ api::client::{ error::ErrorKind, - r0::room::{self, aliases, create_room, get_room_event, upgrade_room}, + room::{self, aliases, create_room, get_room_event, upgrade_room}, }, events::{ room::{ @@ -47,8 +47,10 @@ use tracing::{info, warn}; /// - Send invite events pub async fn create_room_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { + use create_room::v3::RoomPreset; + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let room_id = RoomId::new(db.globals.server_name()); @@ -207,15 +209,15 @@ pub async fn create_room_route( .preset .clone() .unwrap_or_else(|| match &body.visibility { - room::Visibility::Private => create_room::RoomPreset::PrivateChat, - room::Visibility::Public => create_room::RoomPreset::PublicChat, - _ => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom + room::Visibility::Private => RoomPreset::PrivateChat, + room::Visibility::Public => RoomPreset::PublicChat, + _ => RoomPreset::PrivateChat, // Room visibility should not be custom }); let mut users = BTreeMap::new(); users.insert(sender_user.clone(), int!(100)); - if preset == create_room::RoomPreset::TrustedPrivateChat { + if preset == RoomPreset::TrustedPrivateChat { for invite_ in &body.invite { users.insert(invite_.clone(), int!(100)); } @@ -281,7 +283,7 @@ pub async fn create_room_route( PduBuilder { event_type: EventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { - create_room::RoomPreset::PublicChat => JoinRule::Public, + RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default _ => JoinRule::Invite, })) @@ -319,7 +321,7 @@ pub async fn create_room_route( PduBuilder { event_type: EventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { - create_room::RoomPreset::PublicChat => GuestAccess::Forbidden, + RoomPreset::PublicChat => GuestAccess::Forbidden, _ => GuestAccess::CanJoin, })) .expect("event is valid, we just created it"), @@ -408,7 +410,7 @@ pub async fn create_room_route( db.flush()?; - Ok(create_room::Response::new(room_id)) + Ok(create_room::v3::Response::new(room_id)) } /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` @@ -418,8 +420,8 @@ pub async fn create_room_route( /// - You have to currently be joined to the room (TODO: Respect history visibility) pub async fn get_room_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -429,7 +431,7 @@ pub async fn get_room_event_route( )); } - Ok(get_room_event::Response { + Ok(get_room_event::v3::Response { event: db .rooms .get_pdu(&body.event_id)? @@ -445,8 +447,8 @@ pub async fn get_room_event_route( /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable pub async fn get_room_aliases_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -456,7 +458,7 @@ pub async fn get_room_aliases_route( )); } - Ok(aliases::Response { + Ok(aliases::v3::Response { aliases: db .rooms .room_aliases(&body.room_id) @@ -477,8 +479,8 @@ pub async fn get_room_aliases_route( /// - Modifies old room power levels to prevent users from speaking pub async fn upgrade_room_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) { @@ -702,5 +704,5 @@ pub async fn upgrade_room_route( db.flush()?; // Return the replacement room id - Ok(upgrade_room::Response { replacement_room }) + Ok(upgrade_room::v3::Response { replacement_room }) } diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 5860484..c83ff2c 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,7 +1,12 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; -use ruma::api::client::{error::ErrorKind, r0::search::search_events}; +use ruma::api::client::{ + error::ErrorKind, + search::search_events::v3::{ + self as search_events_v3, EventContextResult, ResultCategories, ResultRoomEvents, + SearchResult, + }, +}; -use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}; use std::collections::BTreeMap; /// # `POST /_matrix/client/r0/search` @@ -11,8 +16,8 @@ use std::collections::BTreeMap; /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); @@ -97,7 +102,7 @@ pub async fn search_events_route( Some((skip + limit).to_string()) }; - Ok(search_events::Response::new(ResultCategories { + Ok(search_events_v3::Response::new(ResultCategories { room_events: ResultRoomEvents { count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it groups: BTreeMap::new(), // TODO diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c2259c2..2e1ed54 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -3,10 +3,8 @@ use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::{ - session::{get_login_types, login, logout, logout_all}, - uiaa::IncomingUserIdentifier, - }, + session::{get_login_types, login, logout, logout_all}, + uiaa::IncomingUserIdentifier, }, UserId, }; @@ -24,10 +22,10 @@ struct Claims { /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. pub async fn get_login_types_route( - _body: Ruma, -) -> Result { - Ok(get_login_types::Response::new(vec![ - get_login_types::LoginType::Password(Default::default()), + _body: Ruma, +) -> Result { + Ok(get_login_types::v3::Response::new(vec![ + get_login_types::v3::LoginType::Password(Default::default()), ])) } @@ -44,12 +42,12 @@ pub async fn get_login_types_route( /// supported login types. pub async fn login_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { - login::IncomingLoginInfo::Password(login::IncomingPassword { + login::v3::IncomingLoginInfo::Password(login::v3::IncomingPassword { identifier, password, }) => { @@ -86,7 +84,7 @@ pub async fn login_route( user_id } - login::IncomingLoginInfo::Token(login::IncomingToken { token }) => { + login::v3::IncomingLoginInfo::Token(login::v3::IncomingToken { token }) => { if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { let token = jsonwebtoken::decode::( token, @@ -144,7 +142,7 @@ pub async fn login_route( db.flush()?; - Ok(login::Response { + Ok(login::v3::Response { user_id, access_token: token, home_server: Some(db.globals.server_name().to_owned()), @@ -163,8 +161,8 @@ pub async fn login_route( /// - Triggers device list updates pub async fn logout_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -172,7 +170,7 @@ pub async fn logout_route( db.flush()?; - Ok(logout::Response::new()) + Ok(logout::v3::Response::new()) } /// # `POST /_matrix/client/r0/logout/all` @@ -188,8 +186,8 @@ pub async fn logout_route( /// from each device of this user. pub async fn logout_all_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for device_id in db.users.all_device_ids(sender_user).flatten() { @@ -198,5 +196,5 @@ pub async fn logout_all_route( db.flush()?; - Ok(logout_all::Response::new()) + Ok(logout_all::v3::Response::new()) } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index e334e7d..a97b187 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -6,7 +6,7 @@ use crate::{ use ruma::{ api::client::{ error::ErrorKind, - r0::state::{get_state_events, get_state_events_for_key, send_state_event}, + state::{get_state_events, get_state_events_for_key, send_state_event}, }, events::{ room::{ @@ -28,8 +28,8 @@ use ruma::{ /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_key_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = send_state_event_for_key_helper( @@ -45,7 +45,7 @@ pub async fn send_state_event_for_key_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(send_state_event::Response { event_id }) + Ok(send_state_event::v3::Response { event_id }) } /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}` @@ -57,8 +57,8 @@ pub async fn send_state_event_for_key_route( /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, - body: Ruma>, -) -> Result> { + body: Ruma>, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled @@ -82,7 +82,7 @@ pub async fn send_state_event_for_empty_key_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(send_state_event::Response { event_id }.into()) + Ok(send_state_event::v3::Response { event_id }.into()) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state` @@ -92,8 +92,8 @@ pub async fn send_state_event_for_empty_key_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -121,7 +121,7 @@ pub async fn get_state_events_route( )); } - Ok(get_state_events::Response { + Ok(get_state_events::v3::Response { room_state: db .rooms .room_state_full(&body.room_id)? @@ -138,8 +138,8 @@ pub async fn get_state_events_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_key_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -175,7 +175,7 @@ pub async fn get_state_events_for_key_route( "State event not found.", ))?; - Ok(get_state_events_for_key::Response { + Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, }) @@ -188,8 +188,8 @@ pub async fn get_state_events_for_key_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, - body: Ruma>, -) -> Result> { + body: Ruma>, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -225,7 +225,7 @@ pub async fn get_state_events_for_empty_key_route( "State event not found.", ))?; - Ok(get_state_events_for_key::Response { + Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index eef65da..eec4cf6 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, Database, Error, Result, Ruma, RumaResponse}; use ruma::{ - api::client::r0::{ + api::client::{ filter::{IncomingFilterDefinition, LazyLoadOptions}, sync::sync_events, uiaa::UiaaResponse, @@ -56,8 +56,8 @@ use tracing::error; /// `since` will be cached pub async fn sync_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result> { + body: Ruma>, +) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let body = body.body; @@ -130,8 +130,8 @@ async fn sync_helper_wrapper( db: Arc, sender_user: Box, sender_device: Box, - body: sync_events::IncomingRequest, - tx: Sender>>, + body: sync_events::v3::IncomingRequest, + tx: Sender>>, ) { let since = body.since.clone(); @@ -172,9 +172,15 @@ async fn sync_helper( db: Arc, sender_user: Box, sender_device: Box, - body: sync_events::IncomingRequest, + body: sync_events::v3::IncomingRequest, // bool = caching allowed -) -> Result<(sync_events::Response, bool), Error> { +) -> Result<(sync_events::v3::Response, bool), Error> { + use sync_events::v3::{ + DeviceLists, Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom, + JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, + ToDevice, UnreadNotificationsCount, + }; + // TODO: match body.set_presence { db.rooms.edus.ping_presence(&sender_user)?; @@ -187,8 +193,8 @@ async fn sync_helper( // Load filter let filter = match body.filter { None => IncomingFilterDefinition::default(), - Some(sync_events::IncomingFilter::FilterDefinition(filter)) => filter, - Some(sync_events::IncomingFilter::FilterId(filter_id)) => db + Some(IncomingFilter::FilterDefinition(filter)) => filter, + Some(IncomingFilter::FilterId(filter_id)) => db .users .get_filter(&sender_user, &filter_id)? .unwrap_or_default(), @@ -666,8 +672,8 @@ async fn sync_helper( db.rooms .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; - let joined_room = sync_events::JoinedRoom { - account_data: sync_events::RoomAccountData { + let joined_room = JoinedRoom { + account_data: RoomAccountData { events: db .account_data .changes_since(Some(&room_id), &sender_user, since)? @@ -679,27 +685,27 @@ async fn sync_helper( }) .collect(), }, - summary: sync_events::RoomSummary { + summary: RoomSummary { heroes, joined_member_count: joined_member_count.map(|n| (n as u32).into()), invited_member_count: invited_member_count.map(|n| (n as u32).into()), }, - unread_notifications: sync_events::UnreadNotificationsCount { + unread_notifications: UnreadNotificationsCount { highlight_count, notification_count, }, - timeline: sync_events::Timeline { + timeline: Timeline { limited: limited || joined_since_last_sync, prev_batch, events: room_events, }, - state: sync_events::State { + state: State { events: state_events .iter() .map(|pdu| pdu.to_sync_state_event()) .collect(), }, - ephemeral: sync_events::Ephemeral { events: edus }, + ephemeral: Ephemeral { events: edus }, }; if !joined_room.is_empty() { @@ -767,14 +773,14 @@ async fn sync_helper( left_rooms.insert( room_id.clone(), - sync_events::LeftRoom { - account_data: sync_events::RoomAccountData { events: Vec::new() }, - timeline: sync_events::Timeline { + LeftRoom { + account_data: RoomAccountData { events: Vec::new() }, + timeline: Timeline { limited: false, prev_batch: Some(next_batch_string.clone()), events: Vec::new(), }, - state: sync_events::State { + state: State { events: left_state_events, }, }, @@ -807,8 +813,8 @@ async fn sync_helper( invited_rooms.insert( room_id.clone(), - sync_events::InvitedRoom { - invite_state: sync_events::InviteState { + InvitedRoom { + invite_state: InviteState { events: invite_state_events, }, }, @@ -840,21 +846,21 @@ async fn sync_helper( db.users .remove_to_device_events(&sender_user, &sender_device, since)?; - let response = sync_events::Response { + let response = sync_events::v3::Response { next_batch: next_batch_string, - rooms: sync_events::Rooms { + rooms: Rooms { leave: left_rooms, join: joined_rooms, invite: invited_rooms, knock: BTreeMap::new(), // TODO }, - presence: sync_events::Presence { + presence: Presence { events: presence_updates .into_iter() .map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully")) .collect(), }, - account_data: sync_events::GlobalAccountData { + account_data: GlobalAccountData { events: db .account_data .changes_since(None, &sender_user, since)? @@ -866,12 +872,12 @@ async fn sync_helper( }) .collect(), }, - device_lists: sync_events::DeviceLists { + device_lists: DeviceLists { changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?, - to_device: sync_events::ToDevice { + to_device: ToDevice { events: db .users .get_to_device_events(&sender_user, &sender_device)?, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 29bd9a0..21cff0b 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, Result, Ruma}; use ruma::{ - api::client::r0::tag::{create_tag, delete_tag, get_tags}, + api::client::tag::{create_tag, delete_tag, get_tags}, events::{ tag::{TagEvent, TagEventContent}, EventType, @@ -15,8 +15,8 @@ use std::collections::BTreeMap; /// - Inserts the tag into the tag event of the room account data. pub async fn update_tag_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db @@ -42,7 +42,7 @@ pub async fn update_tag_route( db.flush()?; - Ok(create_tag::Response {}) + Ok(create_tag::v3::Response {}) } /// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` @@ -52,8 +52,8 @@ pub async fn update_tag_route( /// - Removes the tag from the tag event of the room account data. pub async fn delete_tag_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db @@ -76,7 +76,7 @@ pub async fn delete_tag_route( db.flush()?; - Ok(delete_tag::Response {}) + Ok(delete_tag::v3::Response {}) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` @@ -86,11 +86,11 @@ pub async fn delete_tag_route( /// - Gets the tag event of the room account data. pub async fn get_tags_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_tags::Response { + Ok(get_tags::v3::Response { tags: db .account_data .get(Some(&body.room_id), sender_user, EventType::Tag)? diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index 524f3ba..c2c1adf 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -1,5 +1,5 @@ use crate::{Result, Ruma}; -use ruma::api::client::r0::thirdparty::get_protocols; +use ruma::api::client::thirdparty::get_protocols; use std::collections::BTreeMap; @@ -7,10 +7,10 @@ use std::collections::BTreeMap; /// /// TODO: Fetches all metadata about protocols supported by the homeserver. pub async fn get_protocols_route( - _body: Ruma, -) -> Result { + _body: Ruma, +) -> Result { // TODO - Ok(get_protocols::Response { + Ok(get_protocols::v3::Response { protocols: BTreeMap::new(), }) } diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index e57998f..6d4fc0c 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::{ - client::{error::ErrorKind, r0::to_device::send_event_to_device}, + client::{error::ErrorKind, to_device::send_event_to_device}, federation::{self, transactions::edu::DirectDeviceContent}, }, events::EventType, @@ -15,8 +15,8 @@ use ruma::{ /// Send a to-device event to a set of client devices. pub async fn send_event_to_device_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -28,7 +28,7 @@ pub async fn send_event_to_device_route( .existing_txnid(sender_user, sender_device, &body.txn_id)? .is_some() { - return Ok(send_event_to_device::Response.into()); + return Ok(send_event_to_device::v3::Response.into()); } */ @@ -93,5 +93,5 @@ pub async fn send_event_to_device_route( db.flush()?; - Ok(send_event_to_device::Response {}) + Ok(send_event_to_device::v3::Response {}) } diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index bbc852d..9d4ba6f 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,14 +1,15 @@ use crate::{database::DatabaseGuard, utils, Result, Ruma}; -use create_typing_event::Typing; -use ruma::api::client::r0::typing::create_typing_event; +use ruma::api::client::typing::create_typing_event; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// /// Sets the typing state of the sender user. pub async fn create_typing_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { + use create_typing_event::v3::Typing; + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let Typing::Yes(duration) = body.state { @@ -24,5 +25,5 @@ pub async fn create_typing_event_route( .typing_remove(sender_user, &body.room_id, &db.globals)?; } - Ok(create_typing_event::Response {}) + Ok(create_typing_event::v3::Response {}) } diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 168f172..84ac355 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,7 +1,7 @@ use std::{collections::BTreeMap, iter::FromIterator}; use crate::{Result, Ruma}; -use ruma::api::client::unversioned::get_supported_versions; +use ruma::api::client::discover::get_supported_versions; /// # `GET /_matrix/client/versions` /// diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index cecba7f..d641848 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,5 +1,5 @@ use crate::{database::DatabaseGuard, Result, Ruma}; -use ruma::api::client::r0::user_directory::search_users; +use ruma::api::client::user_directory::search_users; /// # `POST /_matrix/client/r0/user_directory/search` /// @@ -8,15 +8,15 @@ use ruma::api::client::r0::user_directory::search_users; /// - TODO: Hide users that are not in any public rooms? pub async fn search_users_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let limit = u64::from(body.limit) as usize; let mut users = db.users.iter().filter_map(|user_id| { // Filter out buggy users (they should not exist, but you never know...) let user_id = user_id.ok()?; - let user = search_users::User { + let user = search_users::v3::User { user_id: user_id.clone(), display_name: db.users.displayname(&user_id).ok()?, avatar_url: db.users.avatar_url(&user_id).ok()?, @@ -47,5 +47,5 @@ pub async fn search_users_route( let results = users.by_ref().take(limit).collect(); let limited = users.next().is_some(); - Ok(search_users::Response { results, limited }) + Ok(search_users::v3::Response { results, limited }) } diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index e9a553a..6281744 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, Result, Ruma}; use hmac::{Hmac, Mac, NewMac}; -use ruma::{api::client::r0::voip::get_turn_server_info, SecondsSinceUnixEpoch}; +use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; use std::time::{Duration, SystemTime}; @@ -11,8 +11,8 @@ type HmacSha1 = Hmac; /// TODO: Returns information about the recommended turn server. pub async fn turn_server_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let turn_secret = db.globals.turn_secret(); @@ -39,7 +39,7 @@ pub async fn turn_server_route( ) }; - Ok(get_turn_server_info::Response { + Ok(get_turn_server_info::v3::Response { username, password, uris: db.globals.turn_uris().to_vec(), diff --git a/src/database/globals.rs b/src/database/globals.rs index c5b2b77..7bc300d 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,7 @@ use crate::{database::Config, server_server::FedDest, utils, Error, Result}; use ruma::{ api::{ - client::r0::sync::sync_events, + client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId, @@ -27,8 +27,8 @@ type WellKnownMap = HashMap, (FedDest, String)>; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type SyncHandle = ( - Option, // since - Receiver>>, // rx + Option, // since + Receiver>>, // rx ); pub struct Globals { diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 2eefe48..10443f6 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -1,8 +1,8 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::{ + backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind, - r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, }, serde::Raw, RoomId, UserId, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index bc7017b..36f8454 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -2,7 +2,7 @@ use crate::{Database, Error, PduEvent, Result}; use bytes::BytesMut; use ruma::{ api::{ - client::r0::push::{get_pushers, set_pusher, PusherKind}, + client::push::{get_pushers, set_pusher, PusherKind}, push_gateway::send_event_notification::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, @@ -30,7 +30,7 @@ pub struct PushData { impl PushData { #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::Pusher) -> Result<()> { + pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pusher.pushkey.as_bytes()); @@ -53,7 +53,7 @@ impl PushData { } #[tracing::instrument(skip(self, senderkey))] - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { + pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { self.senderkey_pusher .get(senderkey)? .map(|push| { @@ -64,7 +64,7 @@ impl PushData { } #[tracing::instrument(skip(self, sender))] - pub fn get_pushers(&self, sender: &UserId) -> Result> { + pub fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); @@ -171,7 +171,7 @@ where pub async fn send_push_notice( user: &UserId, unread: UInt, - pusher: &get_pushers::Pusher, + pusher: &get_pushers::v3::Pusher, ruleset: Ruleset, pdu: &PduEvent, db: &Database, @@ -251,7 +251,7 @@ pub fn get_actions<'a>( #[tracing::instrument(skip(unread, pusher, tweaks, event, db))] async fn send_notice( unread: UInt, - pusher: &get_pushers::Pusher, + pusher: &get_pushers::v3::Pusher, tweaks: Vec, event: &PduEvent, db: &Database, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c751167..3a71a3b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2606,7 +2606,7 @@ impl Rooms { .send_federation_request( &db.globals, &remote_server, - federation::membership::get_leave_event::v1::Request { room_id, user_id }, + federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, ) .await; diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index b2244b5..6b15d72 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -7,7 +7,7 @@ use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, - r0::uiaa::{ + uiaa::{ AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, UiaaInfo, }, diff --git a/src/database/users.rs b/src/database/users.rs index 681ee28..a66fa93 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,9 +1,6 @@ use crate::{utils, Error, Result}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::{device::Device, filter::IncomingFilterDefinition}, - }, + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, EventType}, identifiers::MxcUri, diff --git a/src/error.rs b/src/error.rs index a16a3ab..206a055 100644 --- a/src/error.rs +++ b/src/error.rs @@ -4,7 +4,7 @@ use http::StatusCode; use ruma::{ api::client::{ error::{Error as RumaError, ErrorKind}, - r0::uiaa::{UiaaInfo, UiaaResponse}, + uiaa::{UiaaInfo, UiaaResponse}, }, ServerName, }; diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index ee89cc2..119c3ea 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ use crate::Error; use ruma::{ - api::client::r0::uiaa::UiaaResponse, + api::client::uiaa::UiaaResponse, identifiers::{DeviceId, UserId}, signatures::CanonicalJsonValue, Outgoing, ServerName, diff --git a/src/server_server.rs b/src/server_server.rs index a4442f0..9dc2617 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -26,7 +26,7 @@ use ruma::{ membership::{ create_invite, create_join_event::{self, RoomState}, - create_join_event_template, + prepare_join_event, }, query::{get_profile_information, get_room_information}, transactions::{ @@ -49,7 +49,7 @@ use ruma::{ }, int, receipt::ReceiptType, - serde::{Base64, JsonObject}, + serde::{Base64, JsonObject, Raw}, signatures::{CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion, StateMap}, to_device::DeviceIdOrAllDevices, @@ -532,7 +532,7 @@ pub async fn get_server_keys_route(db: DatabaseGuard) -> Result Result>() .unwrap() @@ -1981,24 +1982,23 @@ pub(crate) async fn fetch_signing_keys( debug!("Fetching signing keys for {} over federation", origin); - if let Ok(get_keys_response) = db + if let Some(server_key) = db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) .await + .ok() + .and_then(|resp| resp.server_key.deserialize().ok()) { - db.globals - .add_signing_key(origin, get_keys_response.server_key.clone())?; + db.globals.add_signing_key(origin, server_key.clone())?; result.extend( - get_keys_response - .server_key + server_key .verify_keys .into_iter() .map(|(k, v)| (k.to_string(), v.key)), ); result.extend( - get_keys_response - .server_key + server_key .old_verify_keys .into_iter() .map(|(k, v)| (k.to_string(), v.key)), @@ -2011,7 +2011,7 @@ pub(crate) async fn fetch_signing_keys( for server in db.globals.trusted_servers() { debug!("Asking {} for {}'s signing key", server, origin); - if let Ok(keys) = db + if let Some(server_keys) = db .sending .send_federation_request( &db.globals, @@ -2027,9 +2027,16 @@ pub(crate) async fn fetch_signing_keys( ), ) .await + .ok() + .map(|resp| { + resp.server_keys + .into_iter() + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) { - trace!("Got signing keys: {:?}", keys); - for k in keys.server_keys { + trace!("Got signing keys: {:?}", server_keys); + for k in server_keys { db.globals.add_signing_key(origin, k.clone())?; result.extend( k.verify_keys @@ -2538,8 +2545,8 @@ pub async fn get_room_state_ids_route( /// Creates a join template. pub async fn create_join_event_template_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2701,7 +2708,7 @@ pub async fn create_join_event_template_route( CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), ); - Ok(create_join_event_template::v1::Response { + Ok(prepare_join_event::v1::Response { room_version: Some(room_version_id), event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), }) @@ -3293,6 +3300,8 @@ pub(crate) async fn fetch_join_signing_keys( .write() .map_err(|_| Error::bad_database("RwLock is poisoned."))?; for k in keys.server_keys { + let k = k.deserialize().unwrap(); + // TODO: Check signature from trusted server? servers.remove(&k.server_name); @@ -3332,7 +3341,7 @@ pub(crate) async fn fetch_join_signing_keys( if let (Ok(get_keys_response), origin) = result { let result: BTreeMap<_, _> = db .globals - .add_signing_key(&origin, get_keys_response.server_key.clone())? + .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(); From 557d119bee75d4d506067249b996cad54c00a66c Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Fri, 18 Feb 2022 19:54:26 +0100 Subject: [PATCH 1043/1727] change search_events_v3 to search_events::v3 --- src/client_server/search.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/client_server/search.rs b/src/client_server/search.rs index c83ff2c..753669a 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,9 +1,9 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, - search::search_events::v3::{ - self as search_events_v3, EventContextResult, ResultCategories, ResultRoomEvents, - SearchResult, + search::search_events::{ + self, + v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, }, }; @@ -16,8 +16,8 @@ use std::collections::BTreeMap; /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); @@ -102,7 +102,7 @@ pub async fn search_events_route( Some((skip + limit).to_string()) }; - Ok(search_events_v3::Response::new(ResultCategories { + Ok(search_events::v3::Response::new(ResultCategories { room_events: ResultRoomEvents { count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it groups: BTreeMap::new(), // TODO From 8f063c99d51deef5d35296555b7b1028a610fc1c Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 18 Feb 2022 22:29:55 +0100 Subject: [PATCH 1044/1727] chore(ci): Split up tests --- .gitlab-ci.yml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bd4ce79..8660f4e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -238,7 +238,7 @@ docker:tags:dockerhub: # Run tests # # --------------------------------------------------------------------- # -test:cargo: +.test-shared-settings: stage: "test" needs: [] image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" @@ -246,13 +246,15 @@ test:cargo: variables: CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow interruptible: true + +test:cargo: + extends: .test-shared-settings before_script: - - rustup component add clippy rustfmt + - rustup component add clippy # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - - cargo fmt --all -- --check - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" artifacts: @@ -261,6 +263,13 @@ test:cargo: junit: report.xml codequality: gl-code-quality-report.json +test:format: + extends: .test-shared-settings + before_script: + - rustup component add rustfmt + script: + - cargo fmt --all -- --check + test:sytest: stage: "test" allow_failure: true From ad6eb92bbd38889c196d02a5af15313679e7d7cb Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 18 Feb 2022 22:30:02 +0100 Subject: [PATCH 1045/1727] feat(ci): Add dependency audit to CI tests --- .gitlab-ci.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8660f4e..3d321b4 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -270,6 +270,17 @@ test:format: script: - cargo fmt --all -- --check +test:audit: + extends: .test-shared-settings + allow_failure: true + script: + - cargo audit --color always || true + - cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json + artifacts: + when: always + reports: + sast: gl-sast-report.json + test:sytest: stage: "test" allow_failure: true From 94573a3a610556bf3871689fb0fd749521071580 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sat, 19 Feb 2022 17:06:06 +0100 Subject: [PATCH 1046/1727] improve docker documentation some --- docker/README.md | 37 +++++-- ...fik.yml => docker-compose.for-traefik.yml} | 0 ...raefik.yml => docker-compose.override.yml} | 0 docker/docker-compose.with-traefik.yml | 97 +++++++++++++++++++ 4 files changed, 124 insertions(+), 10 deletions(-) rename docker/{docker-compose.traefik.yml => docker-compose.for-traefik.yml} (100%) rename docker/{docker-compose.override.traefik.yml => docker-compose.override.yml} (100%) create mode 100644 docker/docker-compose.with-traefik.yml diff --git a/docker/README.md b/docker/README.md index d886738..0a5981d 100644 --- a/docker/README.md +++ b/docker/README.md @@ -38,16 +38,28 @@ or you can skip the build step and pull the image from one of the following regi [gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 [shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest -The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). -You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need -to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. +The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` +config file, an example can be found [here](../conduit-example.toml). You can pass in different env +vars to change config values on the fly. You can even configure Conduit completely by using env +vars, but for that you need to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of +possible values, please take a look at the `docker-compose.yml` file. If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. ## Docker-compose -If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying -Conduit can be found [here](../DEPLOY.md). +If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files. + +Depending on your proxy setup, you can use one of the following files; +- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) +- If you don't have a `traefik` instance set up (or any other reverse proxy), use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml) +- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml) + +When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and +rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want +for your server. + +Additional info about deploying Conduit can be found [here](../DEPLOY.md). ### Build @@ -71,11 +83,16 @@ docker-compose up -d ### Use Traefik as Proxy -As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making containerized app and services available through the web. With the -two provided files, [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml), it is -equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is -the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to either expose ports -`443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`. +As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making +containerized app and services available through the web. With the two provided files, +[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or +[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and +[`docker-compose.override.yml`](docker-compose.override.traefik.yml), it is equally easy to deploy +and use Conduit, with a little caveat. If you already took a look at the files, then you should have +seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and +loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to +either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and +`.well-known/matrix/server`. With the service `well-known` we use a single `nginx` container that will serve those two files. diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.for-traefik.yml similarity index 100% rename from docker/docker-compose.traefik.yml rename to docker/docker-compose.for-traefik.yml diff --git a/docker/docker-compose.override.traefik.yml b/docker/docker-compose.override.yml similarity index 100% rename from docker/docker-compose.override.traefik.yml rename to docker/docker-compose.override.yml diff --git a/docker/docker-compose.with-traefik.yml b/docker/docker-compose.with-traefik.yml new file mode 100644 index 0000000..6d46827 --- /dev/null +++ b/docker/docker-compose.with-traefik.yml @@ -0,0 +1,97 @@ +# Conduit - Behind Traefik Reverse Proxy +version: '3' + +services: + homeserver: + ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, + ### then you are ready to go. + image: matrixconduit/matrix-conduit:latest + ### If you want to build a fresh image from the sources, then comment the image line and uncomment the + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + # build: + # context: . + # args: + # CREATED: '2021-03-16T08:18:27Z' + # VERSION: '0.1.0' + # LOCAL: 'false' + # GIT_REF: origin/master + restart: unless-stopped + volumes: + - db:/srv/conduit/.local/share/conduit + ### Uncomment if you want to use conduit.toml to configure Conduit + ### Note: Set env vars will override conduit.toml values + # - ./conduit.toml:/srv/conduit/conduit.toml + networks: + - proxy + environment: + CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + CONDUIT_ALLOW_REGISTRATION : 'true' + ### Uncomment and change values as desired + # CONDUIT_ADDRESS: 0.0.0.0 + # CONDUIT_PORT: 6167 + # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' + # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging + # CONDUIT_LOG: info # default is: "info,_=off,sled=off" + # CONDUIT_ALLOW_JAEGER: 'false' + # CONDUIT_ALLOW_ENCRYPTION: 'false' + # CONDUIT_ALLOW_FEDERATION: 'false' + # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit + # CONDUIT_WORKERS: 10 + # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + + # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container + # to serve those two as static files. If you want to use a different way, delete or comment the below service, here + # and in the docker-compose override file. + well-known: + image: nginx:latest + restart: unless-stopped + volumes: + - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files + - ./nginx/www:/var/www/ # location of the client and server .well-known-files + + ### Uncomment if you want to use your own Element-Web App. + ### Note: You need to provide a config.json for Element and you also need a second + ### Domain or Subdomain for the communication between Element and Conduit + ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md + # element-web: + # image: vectorim/element-web:latest + # restart: unless-stopped + # volumes: + # - ./element_config.json:/app/config.json + # networks: + # - proxy + # depends_on: + # - homeserver + + traefik: + image: "traefik:latest" + container_name: "traefik" + restart: "unless-stopped" + ports: + - "80:80" + - "443:443" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + # - "./traefik_config:/etc/traefik" + - "acme:/etc/traefik/acme" + labels: + - "traefik.enable=true" + + # middleware redirect + - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" + # global redirect to https + - "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)" + - "traefik.http.routers.redirs.entrypoints=http" + - "traefik.http.routers.redirs.middlewares=redirect-to-https" + + networks: + - proxy + +volumes: + db: + acme: + +networks: + proxy: \ No newline at end of file From cc1472788815d9daadb53b085dd969bcd7e39741 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sun, 20 Feb 2022 10:55:17 +0100 Subject: [PATCH 1047/1727] revert reflow --- docker/README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docker/README.md b/docker/README.md index 0a5981d..1c9a03d 100644 --- a/docker/README.md +++ b/docker/README.md @@ -38,11 +38,9 @@ or you can skip the build step and pull the image from one of the following regi [gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 [shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest -The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` -config file, an example can be found [here](../conduit-example.toml). You can pass in different env -vars to change config values on the fly. You can even configure Conduit completely by using env -vars, but for that you need to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of -possible values, please take a look at the `docker-compose.yml` file. +The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). +You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need +to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. From 5a80507006eb27403a0dcf9d42607d9fa781a8fc Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 20 Feb 2022 11:12:49 +0100 Subject: [PATCH 1048/1727] chore(docs): Remove the now obsolete cross readme --- cross/README.md | 37 ------------------------------------- 1 file changed, 37 deletions(-) delete mode 100644 cross/README.md diff --git a/cross/README.md b/cross/README.md deleted file mode 100644 index 2829d23..0000000 --- a/cross/README.md +++ /dev/null @@ -1,37 +0,0 @@ -## Cross compilation - -The `cross` folder contains a set of convenience scripts (`build.sh` and `test.sh`) for cross-compiling Conduit. - -Currently supported targets are - -- aarch64-unknown-linux-musl -- arm-unknown-linux-musleabihf -- armv7-unknown-linux-musleabihf -- x86\_64-unknown-linux-musl - -### Install prerequisites -#### Docker -[Installation guide](https://docs.docker.com/get-docker/). -```sh -$ sudo apt install docker -$ sudo systemctl start docker -$ sudo usermod -aG docker $USER -$ newgrp docker -``` - -#### Cross -[Installation guide](https://github.com/rust-embedded/cross/#installation). -```sh -$ cargo install cross -``` - -### Buiding Conduit -```sh -$ TARGET=armv7-unknown-linux-musleabihf ./cross/build.sh --release -``` -The cross-compiled binary is at `target/armv7-unknown-linux-musleabihf/release/conduit` - -### Testing Conduit -```sh -$ TARGET=armv7-unknown-linux-musleabihf ./cross/test.sh --release -``` From 196c83939c38cce47bba054e533b8ebee0ac6310 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 6 Feb 2022 20:23:22 +0100 Subject: [PATCH 1049/1727] Add show-config admin room command --- src/config.rs | 92 +++++++++++++++++++++++++++++++++++++++++ src/database/admin.rs | 7 ++++ src/database/globals.rs | 2 +- 3 files changed, 100 insertions(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index 155704b..a6ab63e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,5 +1,6 @@ use std::{ collections::BTreeMap, + fmt, net::{IpAddr, Ipv4Addr}, }; @@ -97,6 +98,97 @@ impl Config { } } +impl fmt::Display for Config { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Prepare a list of config values to show + let lines = [ + ("Server name", self.server_name.host()), + ("Database backend", &self.database_backend), + ("Database path", &self.database_path), + ( + "Database cache capacity (MB)", + &self.db_cache_capacity_mb.to_string(), + ), + ( + "Cache capacity modifier", + &self.conduit_cache_capacity_modifier.to_string(), + ), + #[cfg(feature = "rocksdb")] + ( + "Maximum open files for RocksDB", + &self.rocksdb_max_open_files.to_string(), + ), + ("PDU cache capacity", &self.pdu_cache_capacity.to_string()), + ( + "Cleanup interval in seconds", + &self.cleanup_second_interval.to_string(), + ), + ("Maximum request size", &self.max_request_size.to_string()), + ( + "Maximum concurrent requests", + &self.max_concurrent_requests.to_string(), + ), + ("Allow registration", &self.allow_registration.to_string()), + ("Allow encryption", &self.allow_encryption.to_string()), + ("Allow federation", &self.allow_federation.to_string()), + ("Allow room creation", &self.allow_room_creation.to_string()), + ( + "JWT secret", + match self.jwt_secret { + Some(_) => "set", + None => "not set", + }, + ), + ("Trusted servers", { + let mut lst = vec![]; + for server in &self.trusted_servers { + lst.push(server.host()); + } + &lst.join(", ") + }), + ( + "TURN username", + if self.turn_username.is_empty() { + "not set" + } else { + &self.turn_username + }, + ), + ("TURN password", { + if self.turn_password.is_empty() { + "not set" + } else { + "set" + } + }), + ("TURN secret", { + if self.turn_secret.is_empty() { + "not set" + } else { + "set" + } + }), + ("Turn TTL", &self.turn_ttl.to_string()), + ("Turn URIs", { + let mut lst = vec![]; + for item in self.turn_uris.to_vec().into_iter().enumerate() { + let (_, uri): (usize, String) = item; + lst.push(uri); + } + &lst.join(", ") + }), + ]; + + let mut msg: String = "Active config values:\n\n".to_string(); + + for line in lines.into_iter().enumerate() { + msg += &format!("{}: {}\n", line.1 .0, line.1 .1); + } + + write!(f, "{}", msg) + } +} + fn false_fn() -> bool { false } diff --git a/src/database/admin.rs b/src/database/admin.rs index f9d4f42..f2e66e4 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -259,6 +259,9 @@ enum AdminCommand { /// Print database memory usage statistics DatabaseMemoryUsage, + + /// Show configuration values + ShowConfig, } fn process_admin_command( @@ -428,6 +431,10 @@ fn process_admin_command( e )), }, + AdminCommand::ShowConfig => { + // Construct and send the response + RoomMessageEventContent::text_plain(format!("{}", db.globals.config)) + } }; Ok(reply_message_content) diff --git a/src/database/globals.rs b/src/database/globals.rs index 7bc300d..9a9163b 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -35,7 +35,7 @@ pub struct Globals { pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, pub(super) globals: Arc, - config: Config, + pub config: Config, keypair: Arc, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, From 949f2523f900604e688129563d952f03fc6e0bb3 Mon Sep 17 00:00:00 2001 From: TomZ Date: Mon, 21 Feb 2022 22:35:08 +0100 Subject: [PATCH 1050/1727] Fix permissions The text just sets the ownership and ignores that defaults on unix are to have newly created dirs be readable by everyone. This closes the database to unauthorized users on multi-user systems. --- DEPLOY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DEPLOY.md b/DEPLOY.md index a28218d..3d1b780 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -150,6 +150,7 @@ If you use the default database path you also need to run this: ```bash sudo mkdir -p /var/lib/matrix-conduit/ sudo chown -R conduit:nogroup /var/lib/matrix-conduit/ +sudo chmod 700 /var/lib/matrix-conduit/ ``` ## Setting up the Reverse Proxy From 3b2b35aab738875fe07b155d9d97a6fbefca6757 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Tue, 22 Feb 2022 00:26:53 +0100 Subject: [PATCH 1051/1727] Log caught Ctrl+C or SIGTERM for operator feedback --- src/main.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6aa0870..c49c5ea 100644 --- a/src/main.rs +++ b/src/main.rs @@ -37,6 +37,7 @@ use tower_http::{ trace::TraceLayer, ServiceBuilderExt as _, }; +use tracing::warn; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate @@ -346,11 +347,14 @@ async fn shutdown_signal(handle: ServerHandle) { #[cfg(not(unix))] let terminate = std::future::pending::<()>(); + let sig: &str; + tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {}, + _ = ctrl_c => { sig = "Ctrl+C"; }, + _ = terminate => { sig = "SIGTERM"; }, } + warn!("Received {}, shutting down...", sig); handle.graceful_shutdown(Some(Duration::from_secs(30))); } From a5bb6786c8688134c5f6df3ed3c02ef85eb9f14e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 22 Feb 2022 15:22:53 +0000 Subject: [PATCH 1052/1727] fix(docker): Make conduit own default db path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a user mounts a volume into the default volume path, it uses the permissions and ownership from the host volume. In most cases, this is 1000:1000, which it also uses on the inside. If you don't mount a volume though (e.g., for testing), conduit cries: “The database couldn't be loaded or created.” This fix chowns the default db dir to remedy this. --- docker/ci-binaries-packaging.Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index ee1ca4c..6defc3d 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -48,8 +48,6 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ org.opencontainers.image.ref.name="" -# Created directory for the database and media files -RUN mkdir -p ${DEFAULT_DB_PATH} # Test if Conduit is still alive, uses the same endpoint as Element COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh @@ -67,7 +65,9 @@ RUN set -x ; \ # Change ownership of Conduit files to conduit user and group RUN chown -cR conduit:conduit /srv/conduit && \ - chmod +x /srv/conduit/healthcheck.sh + chmod +x /srv/conduit/healthcheck.sh && \ + mkdir -p ${DEFAULT_DB_PATH} && \ + chown -cR conduit:conduit ${DEFAULT_DB_PATH} # Change user to conduit USER conduit From 65fa4b2ca4c2524cad8c11bbc9a33b193e267c57 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Tue, 22 Feb 2022 22:31:34 +0100 Subject: [PATCH 1053/1727] Fix proxy config examples in config/proxy.rs --- src/config/proxy.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/config/proxy.rs b/src/config/proxy.rs index fb0387c..dcf304e 100644 --- a/src/config/proxy.rs +++ b/src/config/proxy.rs @@ -10,13 +10,13 @@ use crate::Result; /// ``` /// - Global proxy /// ```toml -/// [proxy] +/// [global.proxy] /// global = { url = "socks5h://localhost:9050" } /// ``` /// - Proxy some domains /// ```toml -/// [proxy] -/// [[proxy.by_domain]] +/// [global.proxy] +/// [[global.proxy.by_domain]] /// url = "socks5h://localhost:9050" /// include = ["*.onion", "matrix.myspecial.onion"] /// exclude = ["*.myspecial.onion"] From 5c6c6f272cdb805c2253a6957a142e6dcaa1fa56 Mon Sep 17 00:00:00 2001 From: TomZ Date: Mon, 21 Feb 2022 22:28:13 +0100 Subject: [PATCH 1054/1727] Fix security issue. The docs state that you need to make the config file _readable_ and then proceeds to make the file writable. This changes it to make the file to be owned by root and readable by anyone. This is the default for unix / linux and suggested practice for files in /etc. --- DEPLOY.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index a28218d..6063482 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -142,7 +142,8 @@ As we are using a Conduit specific user we need to allow it to read the config. Debian: ```bash -sudo chown -R conduit:nogroup /etc/matrix-conduit +sudo chown -R root:root /etc/matrix-conduit +sudo chmod 755 /etc/matrix-conduit ``` If you use the default database path you also need to run this: From 9f059ad4c3a0ec0edcc6a086b5a755a8df6826a1 Mon Sep 17 00:00:00 2001 From: reti4 Date: Tue, 1 Mar 2022 21:03:55 +0000 Subject: [PATCH 1055/1727] make username login case insensitive --- src/client_server/session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 2e1ed54..c4a7107 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -57,7 +57,7 @@ pub async fn login_route( return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; let user_id = - UserId::parse_with_server_name(username.to_owned(), db.globals.server_name()) + UserId::parse_with_server_name(username.to_lowercase().to_owned(), db.globals.server_name()) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") })?; From 9385ea0e7c7b7e87ae49ddb17cd75d84080e91ca Mon Sep 17 00:00:00 2001 From: reti4 Date: Tue, 1 Mar 2022 21:23:34 +0000 Subject: [PATCH 1056/1727] fmt fix --- src/client_server/session.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c4a7107..0bbae14 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -56,11 +56,11 @@ pub async fn login_route( } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; - let user_id = - UserId::parse_with_server_name(username.to_lowercase().to_owned(), db.globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + let user_id = UserId::parse_with_server_name( + username.to_lowercase().to_owned(), + db.globals.server_name(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest( ErrorKind::Forbidden, "Wrong username or password.", From 8bafdc4623edeb72496511c2e561ab213f46db80 Mon Sep 17 00:00:00 2001 From: reti4 Date: Wed, 2 Mar 2022 02:25:15 +0000 Subject: [PATCH 1057/1727] fixed location of lowercase fn --- src/client_server/session.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 0bbae14..c78f600 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -52,15 +52,15 @@ pub async fn login_route( password, }) => { let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier { - matrix_id + matrix_id.to_lowercase() } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; - let user_id = UserId::parse_with_server_name( - username.to_lowercase().to_owned(), - db.globals.server_name(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + UserId::parse_with_server_name(username.to_owned(), db.globals.server_name()) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest( ErrorKind::Forbidden, "Wrong username or password.", From 5695121f38af06f7af7acb84299705dc9a2f1c43 Mon Sep 17 00:00:00 2001 From: chenyuqide Date: Wed, 2 Mar 2022 23:48:01 +0800 Subject: [PATCH 1058/1727] Fix wrong associated type in OutgoingKind::Appservice --- src/database/sending.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index 2d64be1..b7d62c1 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -38,7 +38,7 @@ use super::abstraction::Tree; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { - Appservice(Box), + Appservice(String), Push(Vec, Vec), // user and pushkey Normal(Box), } @@ -505,7 +505,7 @@ impl Sending { let db = db.read().await; match &kind { - OutgoingKind::Appservice(server) => { + OutgoingKind::Appservice(id) => { let mut pdu_jsons = Vec::new(); for event in &events { @@ -535,7 +535,7 @@ impl Sending { let response = appservice_server::send_request( &db.globals, db.appservice - .get_registration(server.as_str()) + .get_registration(&id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { ( @@ -756,9 +756,7 @@ impl Sending { })?; ( - OutgoingKind::Appservice(ServerName::parse(server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?), + OutgoingKind::Appservice(server), if value.is_empty() { SendingEventType::Pdu(event.to_vec()) } else { From 5a9462c9ab5a9d7ffe48644bb17689be4df56020 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 8 Mar 2022 21:31:54 +0000 Subject: [PATCH 1059/1727] fix(ci): Fix musl builds This pins the image to use for cross to a working image's sha256 --- .gitlab-ci.yml | 2 +- Cross.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3d321b4..bf68e25 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -26,7 +26,7 @@ variables: - if: "$CI_COMMIT_TAG" - if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set. interruptible: true - image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools@sha256:69ab327974aef4cc0daf4273579253bf7ae5e379a6c52729b83137e4caa9d093" tags: ["docker"] services: ["docker:dind"] variables: diff --git a/Cross.toml b/Cross.toml index a1387b4..5d99a35 100644 --- a/Cross.toml +++ b/Cross.toml @@ -20,4 +20,4 @@ image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-lin image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest" [target.x86_64-unknown-linux-musl] -image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl:latest" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl@sha256:b6d689e42f0236c8a38b961bca2a12086018b85ed20e0826310421daf182e2bb" From 194a85d4c5b4872e412c1bd4d93c9d4a85053bc5 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sat, 12 Mar 2022 15:42:18 +0100 Subject: [PATCH 1060/1727] Use native root CA certificates for reqwest --- Cargo.lock | 78 +++++++++++++++++++++++++++++++++++++++++++++++------- Cargo.toml | 2 +- 2 files changed, 69 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 997cedc..c45fa26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -449,6 +449,22 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + [[package]] name = "cpufeatures" version = "0.2.1" @@ -1577,6 +1593,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + [[package]] name = "opentelemetry" version = "0.16.0" @@ -2028,6 +2050,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls", + "rustls-native-certs", "rustls-pemfile", "serde", "serde_json", @@ -2039,7 +2062,6 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", "winreg 0.7.0", ] @@ -2385,6 +2407,18 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "0.2.1" @@ -2400,6 +2434,16 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi", +] + [[package]] name = "scopeguard" version = "1.1.0" @@ -2416,6 +2460,29 @@ dependencies = [ "untrusted", ] +[[package]] +name = "security-framework" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "serde" version = "1.0.134" @@ -3238,15 +3305,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "webpki-roots" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" -dependencies = [ - "webpki", -] - [[package]] name = "weezl" version = "0.1.5" diff --git a/Cargo.toml b/Cargo.toml index b9affa7..c24c7cc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ rand = "0.8.4" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { default-features = false, features = ["rustls-tls", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } +reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type thiserror = "1.0.28" # Used to generate thumbnails for images From b5b81818516555f23ea77a00efb6cc0b7e5f3b81 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Tue, 22 Feb 2022 00:02:01 +0100 Subject: [PATCH 1061/1727] Notify admin room for user registrations, deactivations and password changes --- src/client_server/account.rs | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 1ff0fa0..32488f2 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -17,6 +17,7 @@ use ruma::{ }, events::{ room::member::{MembershipState, RoomMemberEventContent}, + room::message::RoomMessageEventContent, EventType, }, push, UserId, @@ -230,7 +231,12 @@ pub async fn register_route( body.initial_device_display_name.clone(), )?; - info!("{} registered on this server", user_id); + info!("New user {} registered on this server.", user_id); + db.admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "New user {} registered on this server.", + user_id + ))); // If this is the first real user, grant them admin privileges // Note: the server user, @conduit:servername, is generated first @@ -319,6 +325,13 @@ pub async fn change_password_route( db.flush()?; + info!("User {} changed their password.", sender_user); + db.admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "User {} changed their password.", + sender_user + ))); + Ok(change_password::v3::Response {}) } @@ -436,7 +449,12 @@ pub async fn deactivate_route( // Remove devices and mark account as deactivated db.users.deactivate_account(sender_user)?; - info!("{} deactivated their account", sender_user); + info!("User {} deactivated their account.", sender_user); + db.admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "User {} deactivated their account.", + sender_user + ))); db.flush()?; From 61277452af96aa2c9a50bbd0ea206d1856b53918 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 18 Mar 2022 18:05:16 +0100 Subject: [PATCH 1062/1727] chore(docker): Bump alpine (base image) version --- .gitlab-ci.yml | 3 ++- docker/ci-binaries-packaging.Dockerfile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bf68e25..380332b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -316,9 +316,10 @@ test:sytest: test:dockerlint: stage: "test" needs: [] - image: "ghcr.io/hadolint/hadolint:latest-alpine" + image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine interruptible: true script: + - hadolint --version # First pass: Print for CI log: - > hadolint diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 6defc3d..1a31871 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -7,7 +7,7 @@ # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.15.0 AS runner +FROM docker.io/alpine:3.15.1 AS runner # Standard port on which Conduit launches. From 1ebf417c1191df984850c2208d4baa871b82f5cb Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 30 Mar 2022 20:23:04 +0000 Subject: [PATCH 1063/1727] chore: Bump alpine version for CI generated docker --- docker/ci-binaries-packaging.Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 1a31871..6964a02 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -7,7 +7,8 @@ # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.15.1 AS runner +FROM docker.io/alpine@sha256:b66bccf2e0cca8e5fb79f7d3c573dd76c4787d1d883f5afe6c9d136a260bba07 AS runner +# = alpine:3.15.3 # Standard port on which Conduit launches. From 4a12a7cbc882375fc66df49e046b503f047573b9 Mon Sep 17 00:00:00 2001 From: LordMZTE Date: Thu, 31 Mar 2022 20:59:59 +0200 Subject: [PATCH 1064/1727] Fix crash when a bad user ID is in the database To my understanding, a bad user ID can sometimes make it into the database, which lead to a panic prior to this change. --- src/client_server/sync.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index eec4cf6..5f34fa6 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -405,10 +405,11 @@ async fn sync_helper( continue; } }; - lazy_loaded.insert( - UserId::parse(state_key.as_ref()) - .expect("they are in timeline_users, so they should be correct"), - ); + + // This check is in case a bad user ID made it into the database + if let Ok(uid) = UserId::parse(state_key.as_ref()) { + lazy_loaded.insert(uid); + } state_events.push(pdu); } } From db0659cb3db588f3ef08aad866e9f0e631bf8dcb Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 31 Mar 2022 22:50:17 +0200 Subject: [PATCH 1065/1727] Upgrade axum to 0.5 --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- src/ruma_wrapper/axum.rs | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c45fa26..1a60e65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -124,9 +124,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "axum" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310a147401c66e79fc78636e4db63ac68cd6acb9ece056de806ea173a15bce32" +checksum = "5611d4977882c5af1c0f7a34d51b5d87f784f86912bb543986b014ea4995ef93" dependencies = [ "async-trait", "axum-core", @@ -137,6 +137,7 @@ dependencies = [ "http", "http-body", "hyper", + "itoa 1.0.1", "matchit", "memchr", "mime", @@ -147,7 +148,6 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-util", "tower", "tower-http", "tower-layer", @@ -156,9 +156,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca6c0b218388a7ed6a8d25e94f7dea5498daaa4fd8c711fb3ff166041b06fda" +checksum = "95cd109b3e93c9541dcce5b0219dcf89169dcc58c1bebed65082808324258afb" dependencies = [ "async-trait", "bytes", @@ -1427,9 +1427,9 @@ checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "matchit" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58b6f41fdfbec185dd3dff58b51e323f5bc61692c0de38419a957b0dcfccca3c" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" [[package]] name = "memchr" diff --git a/Cargo.toml b/Cargo.toml index c24c7cc..1a1bb66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2021" [dependencies] # Web framework -axum = { version = "0.4.4", features = ["headers"], optional = true } +axum = { version = "0.5.0", features = ["headers"], optional = true } axum-server = { version = "0.3.3", features = ["tls-rustls"] } tower = { version = "0.4.11", features = ["util"] } tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index d8e7f51..c779e33 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -154,6 +154,7 @@ where TypedHeaderRejectionReason::Error(_) => { "Invalid X-Matrix signatures." } + _ => "Unknown header-related error", }; Error::BadRequest(ErrorKind::Forbidden, msg) @@ -247,8 +248,7 @@ where }; let mut http_request = http::Request::builder().uri(req.uri()).method(req.method()); - *http_request.headers_mut().unwrap() = - req.headers().expect("Headers already extracted").clone(); + *http_request.headers_mut().unwrap() = req.headers().clone(); if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { let user_id = sender_user.clone().unwrap_or_else(|| { From 3933bd9a8e73b408bcba44579ba60e3ee35dae8b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 31 Mar 2022 22:52:16 +0200 Subject: [PATCH 1066/1727] Update axum feature set used --- Cargo.lock | 1 - Cargo.toml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a60e65..66daf5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -145,7 +145,6 @@ dependencies = [ "pin-project-lite", "serde", "serde_json", - "serde_urlencoded", "sync_wrapper", "tokio", "tower", diff --git a/Cargo.toml b/Cargo.toml index 1a1bb66..627829f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2021" [dependencies] # Web framework -axum = { version = "0.5.0", features = ["headers"], optional = true } +axum = { version = "0.5.0", default-features = false, features = ["headers", "http1", "http2", "json", "matched-path"], optional = true } axum-server = { version = "0.3.3", features = ["tls-rustls"] } tower = { version = "0.4.11", features = ["util"] } tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } From a5465dfd3eb57c50e5707d90351cf73120986eff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 1 Apr 2022 16:00:04 +0200 Subject: [PATCH 1067/1727] fix: allow trailing slashes for /state// again --- src/main.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/main.rs b/src/main.rs index 6aa0870..beb2436 100644 --- a/src/main.rs +++ b/src/main.rs @@ -273,6 +273,22 @@ fn routes() -> Router { get(client_server::get_state_events_for_empty_key_route) .put(client_server::send_state_event_for_empty_key_route), ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + // These two endpoints allow trailing slashes + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type/", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type/", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) .ruma_route(client_server::sync_events_route) .ruma_route(client_server::get_context_route) .ruma_route(client_server::get_message_events_route) From 9046223e7f649314671ab6f18cf606d5442f36f1 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 20 Mar 2022 15:21:03 +0100 Subject: [PATCH 1068/1727] Send PDU to appservice if state_key is their user ID Fixes #110. --- src/database/rooms.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3a71a3b..ef54474 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1970,6 +1970,30 @@ impl Rooms { continue; } + // If the RoomMember event has a non-empty state_key, it is targeted at someone. + // If it is our appservice user, we send this PDU to it. + if pdu.kind == EventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + if let Some(appservice_uid) = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, db.globals.server_name()).ok() + }) + { + if state_key_uid == &appservice_uid { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + continue; + } + } + } + } + if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") From 1712e63e069de10a8997b46a3547562d9c33b2b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 3 Apr 2022 18:58:45 +0200 Subject: [PATCH 1069/1727] fix: fix kick and ban events over federation Fix the scenario where a MembershipState change event was not sent to the server of a user kicked/banned from a room on a Conduit instance if there were not any other users from that server in the room. --- src/database/rooms.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3a71a3b..5b86b2f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1957,12 +1957,24 @@ impl Rooms { // where events in the current room state do not exist self.set_room_state(room_id, statehashid)?; - let servers = self + let mut servers: HashSet> = self .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); + .filter(|server| &**server != db.globals.server_name()) + .collect(); - db.sending.send_pdu(servers, &pdu_id)?; + // In case we are kicking or banning a user, we need to inform their server of the change + if pdu.kind == EventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + servers.insert(Box::from(state_key_uid.server_name())); + } + } + + db.sending.send_pdu(servers.into_iter(), &pdu_id)?; for appservice in db.appservice.all()? { if self.appservice_in_room(room_id, &appservice, db)? { From 414c7c40c4f215cae20917310ae69a4df66152b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 3 Apr 2022 19:19:57 +0200 Subject: [PATCH 1070/1727] fix: remove our server from the list of servers to send the event PDU to --- src/database/rooms.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5b86b2f..bf34cdc 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1957,11 +1957,8 @@ impl Rooms { // where events in the current room state do not exist self.set_room_state(room_id, statehashid)?; - let mut servers: HashSet> = self - .room_servers(room_id) - .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()) - .collect(); + let mut servers: HashSet> = + self.room_servers(room_id).filter_map(|r| r.ok()).collect(); // In case we are kicking or banning a user, we need to inform their server of the change if pdu.kind == EventType::RoomMember { @@ -1974,6 +1971,8 @@ impl Rooms { } } + servers.remove(db.globals.server_name()); + db.sending.send_pdu(servers.into_iter(), &pdu_id)?; for appservice in db.appservice.all()? { From a08c667230ff5bb6f93b15743230111045273f76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 3 Apr 2022 19:27:48 +0200 Subject: [PATCH 1071/1727] docs: add comments for clarification of recent changes --- src/database/rooms.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index bf34cdc..5cbe56a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1971,6 +1971,7 @@ impl Rooms { } } + // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above servers.remove(db.globals.server_name()); db.sending.send_pdu(servers.into_iter(), &pdu_id)?; From 21bc099ccf08197a72878ca6e1a7dd5db1c71346 Mon Sep 17 00:00:00 2001 From: chenyuqide Date: Fri, 4 Mar 2022 08:08:32 +0800 Subject: [PATCH 1072/1727] Update ruma --- Cargo.lock | 64 +++++++++++++++++++----------------- Cargo.toml | 2 +- src/client_server/session.rs | 4 +-- src/database/uiaa.rs | 4 +-- 4 files changed, 38 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66daf5e..c48fdea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "assign", "js_int", @@ -2122,8 +2122,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.20.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "bytes", "http", @@ -2139,8 +2139,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.20.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2151,7 +2151,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "ruma-api", "ruma-common", @@ -2164,8 +2164,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.13.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "assign", "bytes", @@ -2184,8 +2184,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "indexmap", "js_int", @@ -2199,8 +2199,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.26.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "indoc", "js_int", @@ -2216,8 +2216,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.26.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2228,7 +2228,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "js_int", "ruma-api", @@ -2242,8 +2242,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.22.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2252,13 +2252,14 @@ dependencies = [ "ruma-serde", "ruma-serde-macros", "serde", + "url", "uuid", ] [[package]] name = "ruma-identifiers-macros" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.22.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2267,16 +2268,17 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.7.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "thiserror", + "url", ] [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "js_int", "ruma-api", @@ -2289,7 +2291,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "js_int", "ruma-api", @@ -2303,8 +2305,8 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "base64 0.13.0", "bytes", @@ -2318,8 +2320,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2329,8 +2331,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.10.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2346,8 +2348,8 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 627829f..67c0553 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "95fdb303c82e257eee18f5064b87ed4e2ed01ac0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c78f600..c0fcb37 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -51,8 +51,8 @@ pub async fn login_route( identifier, password, }) => { - let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier { - matrix_id.to_lowercase() + let username = if let IncomingUserIdentifier::UserIdOrLocalpart(user_id) = identifier { + user_id.to_lowercase() } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 6b15d72..2c61064 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -8,7 +8,7 @@ use ruma::{ api::client::{ error::ErrorKind, uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, + AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, }, }, @@ -74,7 +74,7 @@ impl Uiaa { .. }) => { let username = match identifier { - MatrixId(username) => username, + UserIdOrLocalpart(username) => username, _ => { return Err(Error::BadRequest( ErrorKind::Unrecognized, From ee96a03d60f39fa24bba2643d4c363972fd1df81 Mon Sep 17 00:00:00 2001 From: chenyuqide Date: Sat, 5 Mar 2022 10:16:21 +0800 Subject: [PATCH 1073/1727] Update ruma --- Cargo.lock | 128 ++++++++++----------------------- Cargo.toml | 2 +- src/client_server/account.rs | 4 ++ src/client_server/backup.rs | 75 +++++++++---------- src/client_server/config.rs | 4 +- src/client_server/directory.rs | 25 ++++++- src/client_server/report.rs | 26 +++---- src/server_server.rs | 24 ++++++- 8 files changed, 144 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c48fdea..8dba0bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2101,12 +2101,11 @@ dependencies = [ [[package]] name = "ruma" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "assign", "js_int", - "ruma-api", "ruma-appservice-api", "ruma-client-api", "ruma-common", @@ -2120,40 +2119,11 @@ dependencies = [ "ruma-state-res", ] -[[package]] -name = "ruma-api" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "bytes", - "http", - "percent-encoding", - "ruma-api-macros", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", - "thiserror", - "tracing", -] - -[[package]] -name = "ruma-api-macros" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-appservice-api" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ - "ruma-api", "ruma-common", "ruma-events", "ruma-identifiers", @@ -2165,7 +2135,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.13.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "assign", "bytes", @@ -2173,7 +2143,6 @@ dependencies = [ "js_int", "maplit", "percent-encoding", - "ruma-api", "ruma-common", "ruma-events", "ruma-identifiers", @@ -2185,14 +2154,19 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ + "bytes", + "http", "indexmap", "js_int", + "percent-encoding", "ruma-identifiers", + "ruma-macros", "ruma-serde", "serde", "serde_json", + "thiserror", "tracing", "wildmatch", ] @@ -2200,13 +2174,13 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.26.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "indoc", "js_int", "ruma-common", - "ruma-events-macros", "ruma-identifiers", + "ruma-macros", "ruma-serde", "serde", "serde_json", @@ -2214,24 +2188,12 @@ dependencies = [ "wildmatch", ] -[[package]] -name = "ruma-events-macros" -version = "0.26.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-federation-api" -version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "js_int", - "ruma-api", "ruma-common", "ruma-events", "ruma-identifiers", @@ -2243,33 +2205,22 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.22.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "percent-encoding", "rand 0.8.4", - "ruma-identifiers-macros", "ruma-identifiers-validation", + "ruma-macros", "ruma-serde", - "ruma-serde-macros", "serde", "url", "uuid", ] -[[package]] -name = "ruma-identifiers-macros" -version = "0.22.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "quote", - "ruma-identifiers-validation", - "syn", -] - [[package]] name = "ruma-identifiers-validation" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "thiserror", "url", @@ -2277,24 +2228,34 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "js_int", - "ruma-api", "ruma-common", "ruma-identifiers", "ruma-serde", "serde", ] +[[package]] +name = "ruma-macros" +version = "0.1.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "ruma-identifiers-validation", + "syn", +] + [[package]] name = "ruma-push-gateway-api" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "js_int", - "ruma-api", "ruma-common", "ruma-events", "ruma-identifiers", @@ -2306,33 +2267,22 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "base64 0.13.0", "bytes", "form_urlencoded", "itoa 1.0.1", "js_int", - "ruma-serde-macros", + "ruma-macros", "serde", "serde_json", ] -[[package]] -name = "ruma-serde-macros" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-signatures" version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2348,8 +2298,8 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 67c0553..17f158d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "95fdb303c82e257eee18f5064b87ed4e2ed01ac0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "588fe9c006eb140264160e68f4a21ea1fb28af18", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 32488f2..4c2dff9 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -342,8 +342,12 @@ pub async fn change_password_route( /// Note: Also works for Application Services pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let device_id = body.sender_device.as_ref().cloned(); + let is_guest = device_id.is_none(); Ok(whoami::v3::Response { user_id: sender_user.clone(), + device_id, + is_guest, }) } diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 808d886..2e449d1 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -1,10 +1,11 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ backup::{ - add_backup_key_session, add_backup_key_sessions, add_backup_keys, create_backup, - delete_backup, delete_backup_key_session, delete_backup_key_sessions, delete_backup_keys, - get_backup, get_backup_key_session, get_backup_key_sessions, get_backup_keys, - get_latest_backup, update_backup, + add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, + create_backup_version, delete_backup_keys, delete_backup_keys_for_room, + delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys, + get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, + update_backup_version, }, error::ErrorKind, }; @@ -14,8 +15,8 @@ use ruma::api::client::{ /// Creates a new backup. pub async fn create_backup_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let version = db .key_backups @@ -23,7 +24,7 @@ pub async fn create_backup_route( db.flush()?; - Ok(create_backup::v3::Response { version }) + Ok(create_backup_version::v3::Response { version }) } /// # `PUT /_matrix/client/r0/room_keys/version/{version}` @@ -31,15 +32,15 @@ pub async fn create_backup_route( /// Update information about an existing backup. Only `auth_data` can be modified. pub async fn update_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; db.flush()?; - Ok(update_backup::v3::Response {}) + Ok(update_backup_version::v3::Response {}) } /// # `GET /_matrix/client/r0/room_keys/version` @@ -47,8 +48,8 @@ pub async fn update_backup_route( /// Get information about the latest backup version. pub async fn get_latest_backup_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = @@ -59,7 +60,7 @@ pub async fn get_latest_backup_route( "Key backup does not exist.", ))?; - Ok(get_latest_backup::v3::Response { + Ok(get_latest_backup_info::v3::Response { algorithm, count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &version)?, @@ -72,8 +73,8 @@ pub async fn get_latest_backup_route( /// Get information about an existing backup. pub async fn get_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db .key_backups @@ -83,7 +84,7 @@ pub async fn get_backup_route( "Key backup does not exist.", ))?; - Ok(get_backup::v3::Response { + Ok(get_backup_info::v3::Response { algorithm, count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, @@ -98,15 +99,15 @@ pub async fn get_backup_route( /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_backup(sender_user, &body.version)?; db.flush()?; - Ok(delete_backup::v3::Response {}) + Ok(delete_backup_version::v3::Response {}) } /// # `PUT /_matrix/client/r0/room_keys/keys` @@ -164,8 +165,8 @@ pub async fn add_backup_keys_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -193,7 +194,7 @@ pub async fn add_backup_key_sessions_route( db.flush()?; - Ok(add_backup_key_sessions::v3::Response { + Ok(add_backup_keys_for_room::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -208,8 +209,8 @@ pub async fn add_backup_key_sessions_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -235,7 +236,7 @@ pub async fn add_backup_key_session_route( db.flush()?; - Ok(add_backup_key_session::v3::Response { + Ok(add_backup_keys_for_session::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -260,15 +261,15 @@ pub async fn get_backup_keys_route( /// Retrieves all keys from the backup for a given room. pub async fn get_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sessions = db .key_backups .get_room(sender_user, &body.version, &body.room_id)?; - Ok(get_backup_key_sessions::v3::Response { sessions }) + Ok(get_backup_keys_for_room::v3::Response { sessions }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -276,8 +277,8 @@ pub async fn get_backup_key_sessions_route( /// Retrieves a key from the backup. pub async fn get_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let key_data = db @@ -288,7 +289,7 @@ pub async fn get_backup_key_session_route( "Backup key not found for this user's session.", ))?; - Ok(get_backup_key_session::v3::Response { key_data }) + Ok(get_backup_keys_for_session::v3::Response { key_data }) } /// # `DELETE /_matrix/client/r0/room_keys/keys` @@ -315,8 +316,8 @@ pub async fn delete_backup_keys_route( /// Delete the keys from the backup for a given room. pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -324,7 +325,7 @@ pub async fn delete_backup_key_sessions_route( db.flush()?; - Ok(delete_backup_key_sessions::v3::Response { + Ok(delete_backup_keys_for_room::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -335,8 +336,8 @@ pub async fn delete_backup_key_sessions_route( /// Delete a key from the backup. pub async fn delete_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -344,7 +345,7 @@ pub async fn delete_backup_key_session_route( db.flush()?; - Ok(delete_backup_key_session::v3::Response { + Ok(delete_backup_keys_for_session::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) diff --git a/src/client_server/config.rs b/src/client_server/config.rs index a9a2fb1..d39f8b6 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -22,7 +22,7 @@ pub async fn set_global_account_data_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data: serde_json::Value = serde_json::from_str(body.data.get()) + let data: serde_json::Value = serde_json::from_str(body.data.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); @@ -52,7 +52,7 @@ pub async fn set_room_account_data_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data: serde_json::Value = serde_json::from_str(body.data.get()) + let data: serde_json::Value = serde_json::from_str(body.data.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index ad88254..f26df87 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -11,13 +11,17 @@ use ruma::{ }, federation, }, - directory::{Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk, RoomNetwork}, + directory::{ + Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomJoinRule, PublicRoomsChunk, + RoomNetwork, + }, events::{ room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, name::RoomNameEventContent, topic::RoomTopicEventContent, }, @@ -265,6 +269,25 @@ pub(crate) async fn get_public_rooms_filtered_helper( .transpose()? // url is now an Option so we must flatten .flatten(), + join_rule: db + .rooms + .room_state_get(&room_id, &EventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| match c.join_rule { + JoinRule::Public => Some(PublicRoomJoinRule::Public), + JoinRule::Knock => Some(PublicRoomJoinRule::Knock), + _ => None, + }) + .map_err(|_| { + Error::bad_database("Invalid room join rule event in database.") + }) + }) + .transpose()? + .flatten() + .ok_or(Error::bad_database( + "Invalid room join rule event in database.", + ))?, room_id, }; Ok(chunk) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 1e47792..8c51e9c 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -25,14 +25,14 @@ pub async fn report_event_route( } }; - if body.score > int!(0) || body.score < int!(-100) { + if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", )); }; - if body.reason.chars().count() > 250 { + if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Reason too long, should be 250 characters or fewer", @@ -43,26 +43,26 @@ pub async fn report_event_route( .send_message(message::RoomMessageEventContent::text_html( format!( "Report received from: {}\n\n\ - Event ID: {}\n\ - Room ID: {}\n\ - Sent By: {}\n\n\ - Report Score: {}\n\ - Report Reason: {}", + Event ID: {:?}\n\ + Room ID: {:?}\n\ + Sent By: {:?}\n\n\ + Report Score: {:?}\n\ + Report Reason: {:?}", sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason ), format!( - "
              Report received from: {0}\ -
              • Event Info
                • Event ID: {1}\ - 🔗
                • Room ID: {2}\ -
                • Sent By: {3}
              • \ - Report Info
                • Report Score: {4}
                • Report Reason: {5}
              • \ + "
                Report received from: {0:?}\ +
                • Event Info
                  • Event ID: {1:?}\ + 🔗
                  • Room ID: {2:?}\ +
                  • Sent By: {3:?}
                • \ + Report Info
                  • Report Score: {4:?}
                  • Report Reason: {5}
                • \
                ", sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, - HtmlEscape(&body.reason) + HtmlEscape(&body.reason.clone().unwrap_or(String::new())) ), )); diff --git a/src/server_server.rs b/src/server_server.rs index 9dc2617..56f5b9d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -30,7 +30,7 @@ use ruma::{ }, query::{get_profile_information, get_room_information}, transactions::{ - edu::{DeviceListUpdateContent, DirectDeviceContent, Edu}, + edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, send_transaction_message, }, }, @@ -840,6 +840,22 @@ pub async fn send_transaction_message_route( db.transaction_ids .add_txnid(&sender, None, &message_id, &[])?; } + Edu::SigningKeyUpdate(SigningKeyUpdateContent { + user_id, + master_key, + self_signing_key, + }) => { + if let Some(master_key) = master_key { + db.users.add_cross_signing_keys( + &user_id, + &master_key, + &self_signing_key, + &None, + &db.rooms, + &db.globals, + )?; + } + } Edu::_Custom(_) => {} } } @@ -2998,6 +3014,12 @@ pub async fn get_devices_route( }) }) .collect(), + master_key: db + .users + .get_master_key(&body.user_id, |u| u == &body.user_id)?, + self_signing_key: db + .users + .get_self_signing_key(&body.user_id, |u| u == &body.user_id)?, }) } From 566833111c2b18055320989845d17c72c3420675 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 6 Apr 2022 18:49:46 +0200 Subject: [PATCH 1074/1727] refactor: small improvements --- src/client_server/account.rs | 9 ++++++--- src/client_server/report.rs | 2 +- src/database/uiaa.rs | 4 ++-- src/server_server.rs | 9 +++++++-- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 4c2dff9..fcdf551 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -340,14 +340,17 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -pub async fn whoami_route(body: Ruma) -> Result { +pub async fn whoami_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device_id = body.sender_device.as_ref().cloned(); - let is_guest = device_id.is_none(); + Ok(whoami::v3::Response { user_id: sender_user.clone(), device_id, - is_guest, + is_guest: db.users.is_deactivated(&sender_user)?, }) } diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 8c51e9c..e60da69 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -62,7 +62,7 @@ pub async fn report_event_route( pdu.room_id, pdu.sender, body.score, - HtmlEscape(&body.reason.clone().unwrap_or(String::new())) + HtmlEscape(body.reason.as_deref().unwrap_or("")) ), )); diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 2c61064..1237313 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -8,8 +8,8 @@ use ruma::{ api::client::{ error::ErrorKind, uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::UserIdOrLocalpart, - UiaaInfo, + AuthType, IncomingAuthData, IncomingPassword, + IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, }, }, signatures::CanonicalJsonValue, diff --git a/src/server_server.rs b/src/server_server.rs index 56f5b9d..d68ded8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2991,6 +2991,11 @@ pub async fn get_devices_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + Ok(get_devices::v1::Response { user_id: body.user_id.clone(), stream_id: db @@ -3016,10 +3021,10 @@ pub async fn get_devices_route( .collect(), master_key: db .users - .get_master_key(&body.user_id, |u| u == &body.user_id)?, + .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, self_signing_key: db .users - .get_self_signing_key(&body.user_id, |u| u == &body.user_id)?, + .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, }) } From b8411ae2fd359e890b3805116b3b32f9aed16e74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 6 Apr 2022 19:01:16 +0200 Subject: [PATCH 1075/1727] refactor: rename endpoints to match ruma --- src/client_server/backup.rs | 22 +++++++++++----------- src/main.rs | 22 +++++++++++----------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 2e449d1..b48343f 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -13,7 +13,7 @@ use ruma::api::client::{ /// # `POST /_matrix/client/r0/room_keys/version` /// /// Creates a new backup. -pub async fn create_backup_route( +pub async fn create_backup_version_route( db: DatabaseGuard, body: Ruma, ) -> Result { @@ -30,7 +30,7 @@ pub async fn create_backup_route( /// # `PUT /_matrix/client/r0/room_keys/version/{version}` /// /// Update information about an existing backup. Only `auth_data` can be modified. -pub async fn update_backup_route( +pub async fn update_backup_version_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -46,7 +46,7 @@ pub async fn update_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about the latest backup version. -pub async fn get_latest_backup_route( +pub async fn get_latest_backup_info_route( db: DatabaseGuard, body: Ruma, ) -> Result { @@ -71,7 +71,7 @@ pub async fn get_latest_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about an existing backup. -pub async fn get_backup_route( +pub async fn get_backup_info_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -97,7 +97,7 @@ pub async fn get_backup_route( /// Delete an existing key backup. /// /// - Deletes both information about the backup, as well as all key data related to the backup -pub async fn delete_backup_route( +pub async fn delete_backup_version_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -163,7 +163,7 @@ pub async fn add_backup_keys_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -pub async fn add_backup_key_sessions_route( +pub async fn add_backup_keys_for_room_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -207,7 +207,7 @@ pub async fn add_backup_key_sessions_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -pub async fn add_backup_key_session_route( +pub async fn add_backup_keys_for_session_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -259,7 +259,7 @@ pub async fn get_backup_keys_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Retrieves all keys from the backup for a given room. -pub async fn get_backup_key_sessions_route( +pub async fn get_backup_keys_for_room_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -275,7 +275,7 @@ pub async fn get_backup_key_sessions_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Retrieves a key from the backup. -pub async fn get_backup_key_session_route( +pub async fn get_backup_keys_for_session_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -314,7 +314,7 @@ pub async fn delete_backup_keys_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Delete the keys from the backup for a given room. -pub async fn delete_backup_key_sessions_route( +pub async fn delete_backup_keys_for_room_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -334,7 +334,7 @@ pub async fn delete_backup_key_sessions_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Delete a key from the backup. -pub async fn delete_backup_key_session_route( +pub async fn delete_backup_keys_for_session_route( db: DatabaseGuard, body: Ruma>, ) -> Result { diff --git a/src/main.rs b/src/main.rs index fae3380..a9047ec 100644 --- a/src/main.rs +++ b/src/main.rs @@ -224,18 +224,18 @@ fn routes() -> Router { .ruma_route(client_server::upload_keys_route) .ruma_route(client_server::get_keys_route) .ruma_route(client_server::claim_keys_route) - .ruma_route(client_server::create_backup_route) - .ruma_route(client_server::update_backup_route) - .ruma_route(client_server::delete_backup_route) - .ruma_route(client_server::get_latest_backup_route) - .ruma_route(client_server::get_backup_route) - .ruma_route(client_server::add_backup_key_sessions_route) - .ruma_route(client_server::add_backup_keys_route) - .ruma_route(client_server::delete_backup_key_session_route) - .ruma_route(client_server::delete_backup_key_sessions_route) + .ruma_route(client_server::create_backup_version_route) + .ruma_route(client_server::update_backup_version_route) + .ruma_route(client_server::delete_backup_version_route) + .ruma_route(client_server::get_latest_backup_info_route) + .ruma_route(client_server::get_backup_info_route) + .ruma_route(client_server::add_backup_keys_for_room_route) + .ruma_route(client_server::add_backup_keys_for_session_route) + .ruma_route(client_server::delete_backup_keys_for_room_route) + .ruma_route(client_server::delete_backup_keys_for_session_route) .ruma_route(client_server::delete_backup_keys_route) - .ruma_route(client_server::get_backup_key_session_route) - .ruma_route(client_server::get_backup_key_sessions_route) + .ruma_route(client_server::get_backup_keys_for_room_route) + .ruma_route(client_server::get_backup_keys_for_session_route) .ruma_route(client_server::get_backup_keys_route) .ruma_route(client_server::set_read_marker_route) .ruma_route(client_server::create_receipt_route) From 17ad5f0595c3b91683ef620aa8d3a400479136da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 6 Apr 2022 19:08:23 +0200 Subject: [PATCH 1076/1727] fix: checks for incoming cross signing changes --- src/server_server.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index d68ded8..371f297 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -639,6 +639,11 @@ pub async fn send_transaction_message_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + let mut resolved_map = BTreeMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); @@ -674,7 +679,7 @@ pub async fn send_transaction_message_route( } }; - acl_check(&body.origin, &room_id, &db)?; + acl_check(&sender_servername, &room_id, &db)?; let mutex = Arc::clone( db.globals @@ -689,7 +694,7 @@ pub async fn send_transaction_message_route( resolved_map.insert( event_id.clone(), handle_incoming_pdu( - &body.origin, + &sender_servername, &event_id, &room_id, value, @@ -845,6 +850,9 @@ pub async fn send_transaction_message_route( master_key, self_signing_key, }) => { + if user_id.server_name() != sender_servername { + continue; + } if let Some(master_key) = master_key { db.users.add_cross_signing_keys( &user_id, From 2808dd2000f331c9ef90d152afefe7c04e3b1e92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 6 Apr 2022 21:31:29 +0200 Subject: [PATCH 1077/1727] Ruma upgrade --- Cargo.lock | 102 +++++----------------- Cargo.toml | 2 +- src/client_server/account.rs | 14 ++-- src/client_server/alias.rs | 6 +- src/client_server/backup.rs | 24 +++--- src/client_server/capabilities.rs | 6 +- src/client_server/config.rs | 8 +- src/client_server/context.rs | 6 +- src/client_server/device.rs | 8 +- src/client_server/directory.rs | 24 +++--- src/client_server/filter.rs | 4 +- src/client_server/keys.rs | 6 +- src/client_server/media.rs | 8 +- src/client_server/membership.rs | 63 +++++++------- src/client_server/message.rs | 33 +++++--- src/client_server/presence.rs | 4 +- src/client_server/profile.rs | 20 ++--- src/client_server/push.rs | 102 ++++++++++++++++------ src/client_server/read_marker.rs | 8 +- src/client_server/redact.rs | 6 +- src/client_server/report.rs | 2 +- src/client_server/room.rs | 63 +++++++------- src/client_server/search.rs | 2 +- src/client_server/session.rs | 4 +- src/client_server/state.rs | 28 +++---- src/client_server/sync.rs | 28 +++---- src/client_server/tag.rs | 30 +++++-- src/client_server/thirdparty.rs | 2 +- src/client_server/to_device.rs | 9 +- src/client_server/typing.rs | 2 +- src/client_server/unversioned.rs | 5 +- src/client_server/user_directory.rs | 2 +- src/client_server/voip.rs | 2 +- src/database/account_data.rs | 16 ++-- src/database/admin.rs | 32 +++---- src/database/pusher.rs | 10 +-- src/database/rooms.rs | 126 ++++++++++++++++------------ src/database/sending.rs | 11 ++- src/database/transaction_ids.rs | 2 +- src/database/users.rs | 9 +- src/main.rs | 12 +-- src/pdu.rs | 18 ++-- src/ruma_wrapper.rs | 13 ++- src/ruma_wrapper/axum.rs | 14 ++-- src/server_server.rs | 106 ++++++++++++++--------- 45 files changed, 528 insertions(+), 474 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8dba0bf..cd51825 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2102,19 +2102,16 @@ dependencies = [ [[package]] name = "ruma" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "assign", "js_int", "ruma-appservice-api", "ruma-client-api", "ruma-common", - "ruma-events", "ruma-federation-api", - "ruma-identifiers", "ruma-identity-service-api", "ruma-push-gateway-api", - "ruma-serde", "ruma-signatures", "ruma-state-res", ] @@ -2122,12 +2119,9 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", ] @@ -2135,7 +2129,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.13.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "assign", "bytes", @@ -2144,9 +2138,6 @@ dependencies = [ "maplit", "percent-encoding", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", ] @@ -2154,73 +2145,44 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ + "base64 0.13.0", "bytes", + "form_urlencoded", "http", "indexmap", + "indoc", + "itoa 1.0.1", "js_int", "percent-encoding", - "ruma-identifiers", + "rand 0.8.4", + "ruma-identifiers-validation", "ruma-macros", - "ruma-serde", "serde", "serde_json", "thiserror", "tracing", - "wildmatch", -] - -[[package]] -name = "ruma-events" -version = "0.26.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" -dependencies = [ - "indoc", - "js_int", - "ruma-common", - "ruma-identifiers", - "ruma-macros", - "ruma-serde", - "serde", - "serde_json", - "thiserror", + "url", + "uuid", "wildmatch", ] [[package]] name = "ruma-federation-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "js_int", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", ] -[[package]] -name = "ruma-identifiers" -version = "0.22.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" -dependencies = [ - "percent-encoding", - "rand 0.8.4", - "ruma-identifiers-validation", - "ruma-macros", - "ruma-serde", - "serde", - "url", - "uuid", -] - [[package]] name = "ruma-identifiers-validation" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "thiserror", "url", @@ -2229,19 +2191,17 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "js_int", "ruma-common", - "ruma-identifiers", - "ruma-serde", "serde", ] [[package]] name = "ruma-macros" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2253,28 +2213,10 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "js_int", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-serde" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" -dependencies = [ - "base64 0.13.0", - "bytes", - "form_urlencoded", - "itoa 1.0.1", - "js_int", - "ruma-macros", "serde", "serde_json", ] @@ -2282,14 +2224,13 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "base64 0.13.0", "ed25519-dalek", "pkcs8", "rand 0.7.3", - "ruma-identifiers", - "ruma-serde", + "ruma-common", "serde_json", "sha2", "thiserror", @@ -2299,14 +2240,11 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "itertools", "js_int", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index 17f158d..64b7a23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "588fe9c006eb140264160e68f4a21ea1fb28af18", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318ca8514ae338fd6d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index fcdf551..be14b92 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -18,7 +18,7 @@ use ruma::{ events::{ room::member::{MembershipState, RoomMemberEventContent}, room::message::RoomMessageEventContent, - EventType, + GlobalAccountDataEventType, RoomAccountDataEventType, RoomEventType, }, push, UserId, }; @@ -41,7 +41,7 @@ const GUEST_NAME_LENGTH: usize = 10; /// Note: This will not reserve the username, so the username might become invalid when trying to register pub async fn get_register_available_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { // Validate user id let user_id = @@ -84,7 +84,7 @@ pub async fn get_register_available_route( /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( @@ -194,7 +194,7 @@ pub async fn register_route( db.account_data.update( None, &user_id, - EventType::PushRules, + GlobalAccountDataEventType::PushRules.to_string().into(), &ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { global: push::Ruleset::server_default(&user_id), @@ -271,7 +271,7 @@ pub async fn register_route( /// - Triggers device list updates pub async fn change_password_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -366,7 +366,7 @@ pub async fn whoami_route( /// - Removes ability to log in again pub async fn deactivate_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -440,7 +440,7 @@ pub async fn deactivate_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_user.to_string()), diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 75cf85e..90e9d2c 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -17,7 +17,7 @@ use ruma::{ /// Creates a new room alias on this server. pub async fn create_alias_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( @@ -46,7 +46,7 @@ pub async fn create_alias_route( /// - TODO: Update canonical alias event pub async fn delete_alias_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( @@ -71,7 +71,7 @@ pub async fn delete_alias_route( /// - TODO: Suggest more servers to join via pub async fn get_alias_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { get_alias_helper(&db, &body.room_alias).await } diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index b48343f..067f20c 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -32,7 +32,7 @@ pub async fn create_backup_version_route( /// Update information about an existing backup. Only `auth_data` can be modified. pub async fn update_backup_version_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -73,7 +73,7 @@ pub async fn get_latest_backup_info_route( /// Get information about an existing backup. pub async fn get_backup_info_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db @@ -99,7 +99,7 @@ pub async fn get_backup_info_route( /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_version_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -119,7 +119,7 @@ pub async fn delete_backup_version_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -165,7 +165,7 @@ pub async fn add_backup_keys_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -209,7 +209,7 @@ pub async fn add_backup_keys_for_room_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_session_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -247,7 +247,7 @@ pub async fn add_backup_keys_for_session_route( /// Retrieves all keys from the backup. pub async fn get_backup_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -261,7 +261,7 @@ pub async fn get_backup_keys_route( /// Retrieves all keys from the backup for a given room. pub async fn get_backup_keys_for_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -277,7 +277,7 @@ pub async fn get_backup_keys_for_room_route( /// Retrieves a key from the backup. pub async fn get_backup_keys_for_session_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -297,7 +297,7 @@ pub async fn get_backup_keys_for_session_route( /// Delete the keys from the backup. pub async fn delete_backup_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -316,7 +316,7 @@ pub async fn delete_backup_keys_route( /// Delete the keys from the backup for a given room. pub async fn delete_backup_keys_for_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -336,7 +336,7 @@ pub async fn delete_backup_keys_for_room_route( /// Delete a key from the backup. pub async fn delete_backup_keys_for_session_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index ac2e59f..952db58 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,7 +1,7 @@ use crate::{Result, Ruma}; use ruma::{ - api::client::capabilities::{ - get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, + api::client::discovery::get_capabilities::{ + self, Capabilities, RoomVersionStability, RoomVersionsCapability, }, RoomVersionId, }; @@ -11,7 +11,7 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( - _body: Ruma, + _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); available.insert(RoomVersionId::V5, RoomVersionStability::Stable); diff --git a/src/client_server/config.rs b/src/client_server/config.rs index d39f8b6..6184e0b 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -18,7 +18,7 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// Sets some account data for the sender user. pub async fn set_global_account_data_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -48,7 +48,7 @@ pub async fn set_global_account_data_route( /// Sets some room account data for the sender user. pub async fn set_room_account_data_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -78,7 +78,7 @@ pub async fn set_room_account_data_route( /// Gets some account data for the sender user. pub async fn get_global_account_data_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -99,7 +99,7 @@ pub async fn get_global_account_data_route( /// Gets some room account data for the sender user. pub async fn get_room_account_data_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 2f6a2ea..8ecd6ec 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,7 +1,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, - events::EventType, + events::{EventType, StateEventType}, }; use std::{collections::HashSet, convert::TryFrom}; use tracing::error; @@ -14,7 +14,7 @@ use tracing::error; /// joined, depending on history_visibility) pub async fn get_context_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -154,7 +154,7 @@ pub async fn get_context_route( for (shortstatekey, id) in state_ids { let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; - if event_type != EventType::RoomMember { + if event_type != StateEventType::RoomMember { let pdu = match db.rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 09c9406..b100bf2 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -30,7 +30,7 @@ pub async fn get_devices_route( /// Get metadata on a single device of the sender user. pub async fn get_device_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -47,7 +47,7 @@ pub async fn get_device_route( /// Updates the metadata on a given device of the sender user. pub async fn update_device_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -77,7 +77,7 @@ pub async fn update_device_route( /// - Triggers device list updates pub async fn delete_device_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -135,7 +135,7 @@ pub async fn delete_device_route( /// - Triggers device list updates pub async fn delete_devices_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index f26df87..4e4a322 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -25,7 +25,7 @@ use ruma::{ name::RoomNameEventContent, topic::RoomTopicEventContent, }, - EventType, + StateEventType, }, ServerName, UInt, }; @@ -38,7 +38,7 @@ use tracing::{info, warn}; /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { get_public_rooms_filtered_helper( &db, @@ -58,7 +58,7 @@ pub async fn get_public_rooms_filtered_route( /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let response = get_public_rooms_filtered_helper( &db, @@ -85,7 +85,7 @@ pub async fn get_public_rooms_route( /// - TODO: Access control checks pub async fn set_room_visibility_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -113,7 +113,7 @@ pub async fn set_room_visibility_route( /// Gets the visibility of a given room in the room directory. pub async fn get_room_visibility_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { Ok(get_room_visibility::v3::Response { visibility: if db.rooms.is_public_room(&body.room_id)? { @@ -193,7 +193,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( let chunk = PublicRoomsChunk { canonical_alias: db .rooms - .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? + .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomCanonicalAliasEventContent| c.alias) @@ -203,7 +203,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, name: db .rooms - .room_state_get(&room_id, &EventType::RoomName, "")? + .room_state_get(&room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomNameEventContent| c.name) @@ -222,7 +222,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .expect("user count should not be that big"), topic: db .rooms - .room_state_get(&room_id, &EventType::RoomTopic, "")? + .room_state_get(&room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) @@ -232,7 +232,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, world_readable: db .rooms - .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? + .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| { @@ -246,7 +246,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, guest_can_join: db .rooms - .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? + .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomGuestAccessEventContent| { @@ -258,7 +258,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, avatar_url: db .rooms - .room_state_get(&room_id, &EventType::RoomAvatar, "")? + .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomAvatarEventContent| c.url) @@ -271,7 +271,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .flatten(), join_rule: db .rooms - .room_state_get(&room_id, &EventType::RoomJoinRules, "")? + .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomJoinRulesEventContent| match c.join_rule { diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 379950f..6522c90 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -11,7 +11,7 @@ use ruma::api::client::{ /// - A user can only access their own filters pub async fn get_filter_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let filter = match db.users.get_filter(sender_user, &body.filter_id)? { @@ -27,7 +27,7 @@ pub async fn get_filter_route( /// Creates a new filter to be used by other endpoints. pub async fn create_filter_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(create_filter::v3::Response::new( diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 525c779..c4f91cb 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -71,7 +71,7 @@ pub async fn upload_keys_route( /// - The master and self-signing keys contain signatures that the user is allowed to see pub async fn get_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -107,7 +107,7 @@ pub async fn claim_keys_route( /// - Requires UIAA to verify password pub async fn upload_signing_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -231,7 +231,7 @@ pub async fn upload_signatures_route( /// - TODO: left users pub async fn get_key_changes_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 71dbed6..a9a6d6c 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -32,7 +32,7 @@ pub async fn get_media_config_route( /// - Media will be saved in the media/ directory pub async fn create_content_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let mxc = format!( "mxc://{}/{}", @@ -101,7 +101,7 @@ pub async fn get_remote_content( /// - Only allows federation if `allow_remote` is true pub async fn get_content_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -132,7 +132,7 @@ pub async fn get_content_route( /// - Only allows federation if `allow_remote` is true pub async fn get_content_as_filename_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -168,7 +168,7 @@ pub async fn get_content_as_filename_route( /// - Only allows federation if `allow_remote` is true pub async fn get_content_thumbnail_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0f5e7c2..8fb2fec 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,7 +21,7 @@ use ruma::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, }, - EventType, + RoomEventType, StateEventType, }, serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion}, @@ -44,7 +44,7 @@ use tracing::{debug, error, warn}; /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -84,7 +84,7 @@ pub async fn join_room_by_id_route( /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; @@ -136,7 +136,7 @@ pub async fn join_room_by_id_or_alias_route( /// - This should always work if the user is currently joined. pub async fn leave_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -152,7 +152,7 @@ pub async fn leave_room_route( /// Tries to send an invite event into the room. pub async fn invite_user_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -170,7 +170,7 @@ pub async fn invite_user_route( /// Tries to send a kick event into the room. pub async fn kick_user_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -178,7 +178,7 @@ pub async fn kick_user_route( db.rooms .room_state_get( &body.room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, &body.user_id.to_string(), )? .ok_or(Error::BadRequest( @@ -205,7 +205,7 @@ pub async fn kick_user_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -229,7 +229,7 @@ pub async fn kick_user_route( /// Tries to send a ban event into the room. pub async fn ban_user_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -239,7 +239,7 @@ pub async fn ban_user_route( .rooms .room_state_get( &body.room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, &body.user_id.to_string(), )? .map_or( @@ -275,7 +275,7 @@ pub async fn ban_user_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -299,7 +299,7 @@ pub async fn ban_user_route( /// Tries to send an unban event into the room. pub async fn unban_user_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -307,7 +307,7 @@ pub async fn unban_user_route( db.rooms .room_state_get( &body.room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, &body.user_id.to_string(), )? .ok_or(Error::BadRequest( @@ -333,7 +333,7 @@ pub async fn unban_user_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -362,7 +362,7 @@ pub async fn unban_user_route( /// be called from every device pub async fn forget_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -398,7 +398,7 @@ pub async fn joined_rooms_route( /// - Only works if the user is currently joined pub async fn get_member_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -415,8 +415,8 @@ pub async fn get_member_events_route( .rooms .room_state_full(&body.room_id)? .iter() - .filter(|(key, _)| key.0 == EventType::RoomMember) - .map(|(_, pdu)| pdu.to_member_event()) + .filter(|(key, _)| key.0 == StateEventType::RoomMember) + .map(|(_, pdu)| pdu.to_member_event().into()) .collect(), }) } @@ -429,7 +429,7 @@ pub async fn get_member_events_route( /// - TODO: An appservice just needs a puppet joined pub async fn joined_members_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -625,15 +625,17 @@ async fn join_room_by_id_helper( db.rooms.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = - db.rooms - .get_or_create_shortstatekey(&pdu.kind, state_key, &db.globals)?; + let shortstatekey = db.rooms.get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + state_key, + &db.globals, + )?; state.insert(shortstatekey, pdu.event_id.clone()); } } let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey( - &parsed_pdu.kind, + &parsed_pdu.kind.to_string().into(), parsed_pdu .state_key .as_ref() @@ -645,7 +647,7 @@ async fn join_room_by_id_helper( let create_shortstatekey = db .rooms - .get_shortstatekey(&EventType::RoomCreate, "")? + .get_shortstatekey(&StateEventType::RoomCreate, "")? .expect("Room exists"); if state.get(&create_shortstatekey).is_none() { @@ -703,7 +705,7 @@ async fn join_room_by_id_helper( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_user.to_string()), @@ -814,7 +816,7 @@ pub(crate) async fn invite_helper<'a>( let create_event = db .rooms - .room_state_get(room_id, &EventType::RoomCreate, "")?; + .room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -853,11 +855,11 @@ pub(crate) async fn invite_helper<'a>( .expect("member event is valid value"); let state_key = user_id.to_string(); - let kind = EventType::RoomMember; + let kind = StateEventType::RoomMember; let auth_events = db.rooms.get_auth_events( room_id, - &kind, + &kind.to_string().into(), sender_user, Some(&state_key), &content, @@ -888,7 +890,7 @@ pub(crate) async fn invite_helper<'a>( origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), - kind, + kind: kind.to_string().into(), content, state_key: Some(state_key), prev_events, @@ -912,7 +914,6 @@ pub(crate) async fn invite_helper<'a>( let auth_check = state_res::auth_check( &room_version, &pdu, - create_prev_event, None::, // TODO: third_party_invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -1051,7 +1052,7 @@ pub(crate) async fn invite_helper<'a>( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, displayname: db.users.displayname(user_id)?, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index b5c4149..1348132 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -4,7 +4,7 @@ use ruma::{ error::ErrorKind, message::{get_message_events, send_message_event}, }, - events::EventType, + events::{RoomEventType, StateEventType}, }; use std::{ collections::{BTreeMap, HashSet}, @@ -20,7 +20,7 @@ use std::{ /// - Tries to send the event into the room, auth rules will determine if it is allowed pub async fn send_message_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -36,7 +36,9 @@ pub async fn send_message_event_route( let state_lock = mutex_state.lock().await; // Forbid m.room.encrypted if encryption is disabled - if &body.event_type == "m.room.encrypted" && !db.globals.allow_encryption() { + if RoomEventType::RoomEncrypted == body.event_type.to_string().into() + && !db.globals.allow_encryption() + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Encryption has been disabled", @@ -69,7 +71,7 @@ pub async fn send_message_event_route( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::from(&*body.event_type), + event_type: body.event_type.to_string().into(), content: serde_json::from_str(body.body.body.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, unsigned: Some(unsigned), @@ -106,7 +108,7 @@ pub async fn send_message_event_route( /// joined, depending on history_visibility) pub async fn get_message_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -118,11 +120,16 @@ pub async fn get_message_events_route( )); } - let from = body - .from - .clone() - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; + let from = match body.from.clone() { + Some(from) => from + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?, + + None => match body.dir { + get_message_events::v3::Direction::Forward => 0, + get_message_events::v3::Direction::Backward => u64::MAX, + }, + }; let to = body.to.as_ref().map(|t| t.parse()); @@ -172,7 +179,7 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - resp.start = body.from.to_owned(); + resp.start = from.to_string(); resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_after; } @@ -209,7 +216,7 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - resp.start = body.from.to_owned(); + resp.start = from.to_string(); resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_before; } @@ -219,7 +226,7 @@ pub async fn get_message_events_route( for ll_id in &lazy_loaded { if let Some(member_event) = db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())? + .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())? { resp.state.push(member_event.to_state_event()); } diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 9e6ce0b..773fef4 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -7,7 +7,7 @@ use std::time::Duration; /// Sets the presence state of the sender user. pub async fn set_presence_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -48,7 +48,7 @@ pub async fn set_presence_route( /// - Only works if you share a room with the user pub async fn get_presence_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 3000027..acea19f 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -9,7 +9,7 @@ use ruma::{ }, federation::{self, query::get_profile_information::v1::ProfileField}, }, - events::{room::member::RoomMemberEventContent, EventType}, + events::{room::member::RoomMemberEventContent, RoomEventType, StateEventType}, }; use serde_json::value::to_raw_value; use std::sync::Arc; @@ -21,7 +21,7 @@ use std::sync::Arc; /// - Also makes sure other users receive the update using presence EDUs pub async fn set_displayname_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -36,14 +36,14 @@ pub async fn set_displayname_route( .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_str( db.rooms .room_state_get( &room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, sender_user.as_str(), )? .ok_or_else(|| { @@ -118,7 +118,7 @@ pub async fn set_displayname_route( /// - If user is on another server: Fetches displayname over federation pub async fn get_displayname_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db @@ -150,7 +150,7 @@ pub async fn get_displayname_route( /// - Also makes sure other users receive the update using presence EDUs pub async fn set_avatar_url_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -167,14 +167,14 @@ pub async fn set_avatar_url_route( .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_str( db.rooms .room_state_get( &room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, sender_user.as_str(), )? .ok_or_else(|| { @@ -249,7 +249,7 @@ pub async fn set_avatar_url_route( /// - If user is on another server: Fetches avatar_url and blurhash over federation pub async fn get_avatar_url_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db @@ -283,7 +283,7 @@ pub async fn get_avatar_url_route( /// - If user is on another server: Fetches profile over federation pub async fn get_profile_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 90f4e02..5169b8b 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -8,7 +8,9 @@ use ruma::{ set_pushrule_enabled, RuleKind, }, }, - events::{push_rules::PushRulesEvent, EventType}, + events::{ + push_rules::PushRulesEvent, EventType, GlobalAccountDataEventType, RoomAccountDataEventType, + }, push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; @@ -23,7 +25,11 @@ pub async fn get_pushrules_all_route( let event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -39,13 +45,17 @@ pub async fn get_pushrules_all_route( /// Retrieves a single specified push rule for this user. pub async fn get_pushrule_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -91,7 +101,7 @@ pub async fn get_pushrule_route( /// Creates a single specified push rule for this user. pub async fn set_pushrule_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -105,7 +115,11 @@ pub async fn set_pushrule_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -174,8 +188,13 @@ pub async fn set_pushrule_route( _ => {} } - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; + db.account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &event, + &db.globals, + )?; db.flush()?; @@ -187,7 +206,7 @@ pub async fn set_pushrule_route( /// Gets the actions of a single specified push rule for this user. pub async fn get_pushrule_actions_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -200,7 +219,11 @@ pub async fn get_pushrule_actions_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -243,7 +266,7 @@ pub async fn get_pushrule_actions_route( /// Sets the actions of a single specified push rule for this user. pub async fn set_pushrule_actions_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -256,7 +279,11 @@ pub async fn set_pushrule_actions_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -297,8 +324,13 @@ pub async fn set_pushrule_actions_route( _ => {} }; - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; + db.account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &event, + &db.globals, + )?; db.flush()?; @@ -310,7 +342,7 @@ pub async fn set_pushrule_actions_route( /// Gets the enabled status of a single specified push rule for this user. pub async fn get_pushrule_enabled_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -323,7 +355,11 @@ pub async fn get_pushrule_enabled_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -369,7 +405,7 @@ pub async fn get_pushrule_enabled_route( /// Sets the enabled status of a single specified push rule for this user. pub async fn set_pushrule_enabled_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -382,7 +418,11 @@ pub async fn set_pushrule_enabled_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -428,8 +468,13 @@ pub async fn set_pushrule_enabled_route( _ => {} } - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; + db.account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &event, + &db.globals, + )?; db.flush()?; @@ -441,7 +486,7 @@ pub async fn set_pushrule_enabled_route( /// Deletes a single specified push rule for this user. pub async fn delete_pushrule_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -454,7 +499,11 @@ pub async fn delete_pushrule_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -490,8 +539,13 @@ pub async fn delete_pushrule_route( _ => {} } - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; + db.account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &event, + &db.globals, + )?; db.flush()?; diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 9422f21..91988a4 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -1,7 +1,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, - events::EventType, + events::RoomAccountDataEventType, receipt::ReceiptType, MilliSecondsSinceUnixEpoch, }; @@ -15,7 +15,7 @@ use std::collections::BTreeMap; /// - If `read_receipt` is set: Update private marker and public read receipt EDU pub async fn set_read_marker_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -27,7 +27,7 @@ pub async fn set_read_marker_route( db.account_data.update( Some(&body.room_id), sender_user, - EventType::FullyRead, + RoomAccountDataEventType::FullyRead, &fully_read_event, &db.globals, )?; @@ -80,7 +80,7 @@ pub async fn set_read_marker_route( /// Sets private read marker and public read receipt EDU. pub async fn create_receipt_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 4843993..059e0f5 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma}; use ruma::{ api::client::redact::redact_event, - events::{room::redaction::RoomRedactionEventContent, EventType}, + events::{room::redaction::RoomRedactionEventContent, RoomEventType}, }; use serde_json::value::to_raw_value; @@ -15,7 +15,7 @@ use serde_json::value::to_raw_value; /// - TODO: Handle txn id pub async fn redact_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -32,7 +32,7 @@ pub async fn redact_event_route( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomRedaction, + event_type: RoomEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { reason: body.reason.clone(), }) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index e60da69..14768e1 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -11,7 +11,7 @@ use ruma::{ /// pub async fn report_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 99838ce..1b3b840 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -19,7 +19,7 @@ use ruma::{ tombstone::RoomTombstoneEventContent, topic::RoomTopicEventContent, }, - EventType, + RoomEventType, StateEventType, }, int, serde::{CanonicalJsonObject, JsonObject}, @@ -47,7 +47,7 @@ use tracing::{info, warn}; /// - Send invite events pub async fn create_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { use create_room::v3::RoomPreset; @@ -165,7 +165,7 @@ pub async fn create_room_route( // 1. The room create event db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -180,7 +180,7 @@ pub async fn create_room_route( // 2. Let the room creator join db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: db.users.displayname(sender_user)?, @@ -242,7 +242,7 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) .expect("to_raw_value always works on serde_json::Value"), unsigned: None, @@ -259,7 +259,7 @@ pub async fn create_room_route( if let Some(room_alias_id) = &alias { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCanonicalAlias, + event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { alias: Some(room_alias_id.to_owned()), alt_aliases: vec![], @@ -281,7 +281,7 @@ pub async fn create_room_route( // 5.1 Join Rules db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomJoinRules, + event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default @@ -301,7 +301,7 @@ pub async fn create_room_route( // 5.2 History Visibility db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomHistoryVisibility, + event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( HistoryVisibility::Shared, )) @@ -319,7 +319,7 @@ pub async fn create_room_route( // 5.3 Guest Access db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomGuestAccess, + event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { RoomPreset::PublicChat => GuestAccess::Forbidden, _ => GuestAccess::CanJoin, @@ -346,7 +346,8 @@ pub async fn create_room_route( pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == EventType::RoomEncryption && !db.globals.allow_encryption() { + if pdu_builder.event_type == RoomEventType::RoomEncryption && !db.globals.allow_encryption() + { continue; } @@ -358,7 +359,7 @@ pub async fn create_room_route( if let Some(name) = &body.name { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomName, + event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) .expect("event is valid, we just created it"), unsigned: None, @@ -375,7 +376,7 @@ pub async fn create_room_route( if let Some(topic) = &body.topic { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomTopic, + event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { topic: topic.clone(), }) @@ -420,7 +421,7 @@ pub async fn create_room_route( /// - You have to currently be joined to the room (TODO: Respect history visibility) pub async fn get_room_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -447,7 +448,7 @@ pub async fn get_room_event_route( /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable pub async fn get_room_aliases_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -479,7 +480,7 @@ pub async fn get_room_aliases_route( /// - Modifies old room power levels to prevent users from speaking pub async fn upgrade_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -509,7 +510,7 @@ pub async fn upgrade_room_route( // Fail if the sender does not have the required permissions let tombstone_event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomTombstone, + event_type: RoomEventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), @@ -540,7 +541,7 @@ pub async fn upgrade_room_route( // Get the old room creation event let mut create_event_content = serde_json::from_str::( db.rooms - .room_state_get(&body.room_id, &EventType::RoomCreate, "")? + .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content .get(), @@ -589,7 +590,7 @@ pub async fn upgrade_room_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, @@ -605,7 +606,7 @@ pub async fn upgrade_room_route( // Join the new room db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: db.users.displayname(sender_user)?, @@ -629,15 +630,15 @@ pub async fn upgrade_room_route( // Recommended transferable state events list from the specs let transferable_state_events = vec![ - EventType::RoomServerAcl, - EventType::RoomEncryption, - EventType::RoomName, - EventType::RoomAvatar, - EventType::RoomTopic, - EventType::RoomGuestAccess, - EventType::RoomHistoryVisibility, - EventType::RoomJoinRules, - EventType::RoomPowerLevels, + StateEventType::RoomServerAcl, + StateEventType::RoomEncryption, + StateEventType::RoomName, + StateEventType::RoomAvatar, + StateEventType::RoomTopic, + StateEventType::RoomGuestAccess, + StateEventType::RoomHistoryVisibility, + StateEventType::RoomJoinRules, + StateEventType::RoomPowerLevels, ]; // Replicate transferable state events to the new room @@ -649,7 +650,7 @@ pub async fn upgrade_room_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type, + event_type: event_type.to_string().into(), content: event_content, unsigned: None, state_key: Some("".to_owned()), @@ -671,7 +672,7 @@ pub async fn upgrade_room_route( // Get the old room power levels let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( db.rooms - .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? + .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content .get(), @@ -686,7 +687,7 @@ pub async fn upgrade_room_route( // Modify the power levels in the old room to prevent sending of events and inviting new users let _ = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_event_content) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 753669a..686e3b5 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -16,7 +16,7 @@ use std::collections::BTreeMap; /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c0fcb37..c31636d 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -22,7 +22,7 @@ struct Claims { /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. pub async fn get_login_types_route( - _body: Ruma, + _body: Ruma, ) -> Result { Ok(get_login_types::v3::Response::new(vec![ get_login_types::v3::LoginType::Password(Default::default()), @@ -42,7 +42,7 @@ pub async fn get_login_types_route( /// supported login types. pub async fn login_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { // Validate login method // TODO: Other login methods diff --git a/src/client_server/state.rs b/src/client_server/state.rs index a97b187..c0fbf73 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -13,7 +13,7 @@ use ruma::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, }, - AnyStateEventContent, EventType, + AnyStateEventContent, EventType, RoomEventType, StateEventType, }, serde::Raw, EventId, RoomId, UserId, @@ -28,7 +28,7 @@ use ruma::{ /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_key_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -36,7 +36,7 @@ pub async fn send_state_event_for_key_route( &db, sender_user, &body.room_id, - EventType::from(&*body.event_type), + &body.event_type, &body.body.body, // Yes, I hate it too body.state_key.to_owned(), ) @@ -57,12 +57,12 @@ pub async fn send_state_event_for_key_route( /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled - if &body.event_type == "m.room.encryption" && !db.globals.allow_encryption() { + if body.event_type == StateEventType::RoomEncryption && !db.globals.allow_encryption() { return Err(Error::BadRequest( ErrorKind::Forbidden, "Encryption has been disabled", @@ -73,7 +73,7 @@ pub async fn send_state_event_for_empty_key_route( &db, sender_user, &body.room_id, - EventType::from(&*body.event_type), + &body.event_type.to_string().into(), &body.body.body, body.state_key.to_owned(), ) @@ -92,7 +92,7 @@ pub async fn send_state_event_for_empty_key_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -102,7 +102,7 @@ pub async fn get_state_events_route( if !db.rooms.is_joined(sender_user, &body.room_id)? && !matches!( db.rooms - .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -138,7 +138,7 @@ pub async fn get_state_events_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_key_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -148,7 +148,7 @@ pub async fn get_state_events_for_key_route( if !db.rooms.is_joined(sender_user, &body.room_id)? && !matches!( db.rooms - .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -188,7 +188,7 @@ pub async fn get_state_events_for_key_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -198,7 +198,7 @@ pub async fn get_state_events_for_empty_key_route( if !db.rooms.is_joined(sender_user, &body.room_id)? && !matches!( db.rooms - .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -236,7 +236,7 @@ async fn send_state_event_for_key_helper( db: &Database, sender: &UserId, room_id: &RoomId, - event_type: EventType, + event_type: &StateEventType, json: &Raw, state_key: String, ) -> Result> { @@ -282,7 +282,7 @@ async fn send_state_event_for_key_helper( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type, + event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get()).expect("content is valid json"), unsigned: None, state_key: Some(state_key), diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 5f34fa6..de6a45a 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -7,7 +7,7 @@ use ruma::{ }, events::{ room::member::{MembershipState, RoomMemberEventContent}, - EventType, + EventType, RoomEventType, StateEventType, }, serde::Raw, DeviceId, RoomId, UserId, @@ -56,7 +56,7 @@ use tracing::error; /// `since` will be cached pub async fn sync_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); @@ -74,7 +74,7 @@ pub async fn sync_events_route( Entry::Vacant(v) => { let (tx, rx) = tokio::sync::watch::channel(None); - v.insert((body.since.clone(), rx.clone())); + v.insert((body.since.to_owned(), rx.clone())); tokio::spawn(sync_helper_wrapper( Arc::clone(&arc_db), @@ -319,7 +319,7 @@ async fn sync_helper( .rooms .all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) + .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) .map(|(_, pdu)| { let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get()).map_err(|_| { @@ -385,7 +385,7 @@ async fn sync_helper( for (shortstatekey, id) in current_state_ids { let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; - if event_type != EventType::RoomMember { + if event_type != StateEventType::RoomMember { let pdu = match db.rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { @@ -446,7 +446,7 @@ async fn sync_helper( .rooms .state_get( since_shortstatehash, - &EventType::RoomMember, + &StateEventType::RoomMember, sender_user.as_str(), )? .and_then(|pdu| { @@ -475,7 +475,7 @@ async fn sync_helper( } }; - if pdu.kind == EventType::RoomMember { + if pdu.kind == RoomEventType::RoomMember { match UserId::parse( pdu.state_key .as_ref() @@ -508,7 +508,7 @@ async fn sync_helper( { if let Some(member_event) = db.rooms.room_state_get( &room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, event.sender.as_str(), )? { lazy_loaded.insert(event.sender.clone()); @@ -527,23 +527,23 @@ async fn sync_helper( let encrypted_room = db .rooms - .state_get(current_shortstatehash, &EventType::RoomEncryption, "")? + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? .is_some(); let since_encryption = db.rooms - .state_get(since_shortstatehash, &EventType::RoomEncryption, "")?; + .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?; // Calculations: let new_encrypted_room = encrypted_room && since_encryption.is_none(); let send_member_count = state_events .iter() - .any(|event| event.kind == EventType::RoomMember); + .any(|event| event.kind == RoomEventType::RoomMember); if encrypted_room { for state_event in &state_events { - if state_event.kind != EventType::RoomMember { + if state_event.kind != RoomEventType::RoomMember { continue; } @@ -830,7 +830,7 @@ async fn sync_helper( .filter_map(|other_room_id| { Some( db.rooms - .room_state_get(&other_room_id, &EventType::RoomEncryption, "") + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), ) @@ -923,7 +923,7 @@ fn share_encrypted_room( .filter_map(|other_room_id| { Some( db.rooms - .room_state_get(&other_room_id, &EventType::RoomEncryption, "") + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), ) diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 21cff0b..0340886 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ tag::{TagEvent, TagEventContent}, - EventType, + EventType, RoomAccountDataEventType, }, }; use std::collections::BTreeMap; @@ -15,13 +15,17 @@ use std::collections::BTreeMap; /// - Inserts the tag into the tag event of the room account data. pub async fn update_tag_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db .account_data - .get(Some(&body.room_id), sender_user, EventType::Tag)? + .get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )? .unwrap_or_else(|| TagEvent { content: TagEventContent { tags: BTreeMap::new(), @@ -35,7 +39,7 @@ pub async fn update_tag_route( db.account_data.update( Some(&body.room_id), sender_user, - EventType::Tag, + RoomAccountDataEventType::Tag, &tags_event, &db.globals, )?; @@ -52,13 +56,17 @@ pub async fn update_tag_route( /// - Removes the tag from the tag event of the room account data. pub async fn delete_tag_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db .account_data - .get(Some(&body.room_id), sender_user, EventType::Tag)? + .get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )? .unwrap_or_else(|| TagEvent { content: TagEventContent { tags: BTreeMap::new(), @@ -69,7 +77,7 @@ pub async fn delete_tag_route( db.account_data.update( Some(&body.room_id), sender_user, - EventType::Tag, + RoomAccountDataEventType::Tag, &tags_event, &db.globals, )?; @@ -86,14 +94,18 @@ pub async fn delete_tag_route( /// - Gets the tag event of the room account data. pub async fn get_tags_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_tags::v3::Response { tags: db .account_data - .get(Some(&body.room_id), sender_user, EventType::Tag)? + .get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )? .unwrap_or_else(|| TagEvent { content: TagEventContent { tags: BTreeMap::new(), diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index c2c1adf..5665ad6 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -7,7 +7,7 @@ use std::collections::BTreeMap; /// /// TODO: Fetches all metadata about protocols supported by the homeserver. pub async fn get_protocols_route( - _body: Ruma, + _body: Ruma, ) -> Result { // TODO Ok(get_protocols::v3::Response { diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 6d4fc0c..42364f5 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -1,3 +1,4 @@ +use ruma::events::ToDeviceEventType; use std::collections::BTreeMap; use crate::{database::DatabaseGuard, Error, Result, Ruma}; @@ -15,7 +16,7 @@ use ruma::{ /// Send a to-device event to a set of client devices. pub async fn send_event_to_device_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -45,8 +46,8 @@ pub async fn send_event_to_device_route( serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { sender: sender_user.clone(), - ev_type: EventType::from(&*body.event_type), - message_id: body.txn_id.clone(), + ev_type: ToDeviceEventType::from(&*body.event_type), + message_id: body.txn_id.to_owned(), messages, }, )) @@ -61,7 +62,7 @@ pub async fn send_event_to_device_route( DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event( sender_user, target_user_id, - target_device_id, + &target_device_id, &body.event_type, event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 9d4ba6f..60fc1cc 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -6,7 +6,7 @@ use ruma::api::client::typing::create_typing_event; /// Sets the typing state of the sender user. pub async fn create_typing_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { use create_typing_event::v3::Typing; diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 84ac355..294c753 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,7 +1,8 @@ use std::{collections::BTreeMap, iter::FromIterator}; +use ruma::api::client::discovery::get_supported_versions; + use crate::{Result, Ruma}; -use ruma::api::client::discover::get_supported_versions; /// # `GET /_matrix/client/versions` /// @@ -14,7 +15,7 @@ use ruma::api::client::discover::get_supported_versions; /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases pub async fn get_supported_versions_route( - _body: Ruma, + _body: Ruma, ) -> Result { let resp = get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index d641848..7c0bcc1 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -8,7 +8,7 @@ use ruma::api::client::user_directory::search_users; /// - TODO: Hide users that are not in any public rooms? pub async fn search_users_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let limit = u64::from(body.limit) as usize; diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 6281744..7e9de31 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -11,7 +11,7 @@ type HmacSha1 = Hmac; /// TODO: Returns information about the recommended turn server. pub async fn turn_server_route( db: DatabaseGuard, - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/database/account_data.rs b/src/database/account_data.rs index ec9d09e..d85918f 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,7 +1,7 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::error::ErrorKind, - events::{AnyEphemeralRoomEvent, EventType}, + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, serde::Raw, RoomId, UserId, }; @@ -22,7 +22,7 @@ impl AccountData { &self, room_id: Option<&RoomId>, user_id: &UserId, - event_type: EventType, + event_type: RoomAccountDataEventType, data: &T, globals: &super::globals::Globals, ) -> Result<()> { @@ -38,10 +38,10 @@ impl AccountData { let mut roomuserdataid = prefix.clone(); roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes()); roomuserdataid.push(0xff); - roomuserdataid.extend_from_slice(event_type.as_bytes()); + roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); let mut key = prefix; - key.extend_from_slice(event_type.as_bytes()); + key.extend_from_slice(event_type.to_string().as_bytes()); let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling if json.get("type").is_none() || json.get("content").is_none() { @@ -75,7 +75,7 @@ impl AccountData { &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: EventType, + kind: RoomAccountDataEventType, ) -> Result> { let mut key = room_id .map(|r| r.to_string()) @@ -85,7 +85,7 @@ impl AccountData { key.push(0xff); key.extend_from_slice(user_id.as_bytes()); key.push(0xff); - key.extend_from_slice(kind.as_ref().as_bytes()); + key.extend_from_slice(kind.to_string().as_bytes()); self.roomusertype_roomuserdataid .get(&key)? @@ -109,7 +109,7 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>> { + ) -> Result>> { let mut userdata = HashMap::new(); let mut prefix = room_id @@ -131,7 +131,7 @@ impl AccountData { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(k, v)| { Ok::<_, Error>(( - EventType::try_from( + RoomAccountDataEventType::try_from( utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( || Error::bad_database("RoomUserData ID in db is invalid."), )?) diff --git a/src/database/admin.rs b/src/database/admin.rs index f2e66e4..4238c5f 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -28,9 +28,9 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, topic::RoomTopicEventContent, }, - EventType, + RoomEventType, }, - identifiers::{EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId}, + EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; @@ -81,7 +81,7 @@ impl Admin { .rooms .build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMessage, + event_type: RoomEventType::RoomMessage, content: to_raw_value(&message) .expect("event is valid, we just created it"), unsigned: None, @@ -553,7 +553,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 1. The room create event db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -568,7 +568,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 2. Make conduit bot join db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: None, @@ -596,7 +596,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { users, ..Default::default() @@ -615,7 +615,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 4.1 Join Rules db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomJoinRules, + event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) .expect("event is valid, we just created it"), unsigned: None, @@ -631,7 +631,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 4.2 History Visibility db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomHistoryVisibility, + event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( HistoryVisibility::Shared, )) @@ -649,7 +649,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 4.3 Guest Access db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomGuestAccess, + event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) .expect("event is valid, we just created it"), unsigned: None, @@ -667,7 +667,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { .expect("Room name is valid"); db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomName, + event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) .expect("event is valid, we just created it"), unsigned: None, @@ -682,7 +682,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomTopic, + event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { topic: format!("Manage {}", db.globals.server_name()), }) @@ -704,7 +704,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCanonicalAlias, + event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { alias: Some(alias.clone()), alt_aliases: Vec::new(), @@ -758,7 +758,7 @@ pub(crate) async fn make_user_admin( // Invite and join the real user db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, displayname: None, @@ -781,7 +781,7 @@ pub(crate) async fn make_user_admin( )?; db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: Some(displayname), @@ -810,7 +810,7 @@ pub(crate) async fn make_user_admin( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { users, ..Default::default() @@ -829,7 +829,7 @@ pub(crate) async fn make_user_admin( // Send welcome message db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMessage, + event_type: RoomEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", db.globals.server_name()).to_owned(), format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", db.globals.server_name()).to_owned(), diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 36f8454..410300e 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -11,7 +11,7 @@ use ruma::{ }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, EventType, + AnySyncRoomEvent, EventType, RoomEventType, StateEventType, }, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, @@ -181,7 +181,7 @@ pub async fn send_push_notice( let power_levels: RoomPowerLevelsEventContent = db .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) @@ -293,7 +293,7 @@ async fn send_notice( // TODO: missed calls notifi.counts = NotificationCounts::new(unread, uint!(0)); - if event.kind == EventType::RoomEncrypted + if event.kind == RoomEventType::RoomEncrypted || tweaks .iter() .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) @@ -314,7 +314,7 @@ async fn send_notice( let content = serde_json::value::to_raw_value(&event.content).ok(); notifi.content = content.as_deref(); - if event.kind == EventType::RoomMember { + if event.kind == RoomEventType::RoomMember { notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } @@ -323,7 +323,7 @@ async fn send_notice( let room_name = if let Some(room_name_pdu) = db.rooms - .room_state_get(&event.room_id, &EventType::RoomName, "")? + .room_state_get(&event.room_id, &StateEventType::RoomName, "")? { serde_json::from_str::(room_name_pdu.content.get()) .map_err(|_| Error::bad_database("Invalid room name event in database."))? diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 88a0729..44f3344 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -21,7 +21,8 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, }, tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, EventType, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, RoomEventType, StateEventType, }, push::{Action, Ruleset, Tweak}, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, @@ -111,8 +112,8 @@ pub struct Rooms { pub(super) shorteventid_cache: Mutex>>, pub(super) auth_chain_cache: Mutex, Arc>>>, pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, + pub(super) statekeyshort_cache: Mutex>, + pub(super) shortstatekey_cache: Mutex>, pub(super) our_real_users_cache: RwLock, Arc>>>>, pub(super) appservice_in_room_cache: RwLock, HashMap>>, pub(super) lazy_load_waiting: @@ -151,7 +152,7 @@ impl Rooms { pub fn state_full( &self, shortstatehash: u64, - ) -> Result>> { + ) -> Result>> { let full_state = self .load_shortstatehash_info(shortstatehash)? .pop() @@ -166,7 +167,7 @@ impl Rooms { .map(|pdu| { Ok::<_, Error>(( ( - pdu.kind.clone(), + pdu.kind.to_string().into(), pdu.state_key .as_ref() .ok_or_else(|| Error::bad_database("State event has no state key."))? @@ -184,7 +185,7 @@ impl Rooms { pub fn state_get_id( &self, shortstatehash: u64, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result>> { let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { @@ -211,7 +212,7 @@ impl Rooms { pub fn state_get( &self, shortstatehash: u64, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result>> { self.state_get_id(shortstatehash, event_type, state_key)? @@ -254,7 +255,7 @@ impl Rooms { pub fn get_auth_events( &self, room_id: &RoomId, - kind: &EventType, + kind: &RoomEventType, sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, @@ -272,7 +273,7 @@ impl Rooms { let mut sauthevents = auth_events .into_iter() .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type, &state_key) + self.get_shortstatekey(&event_type.to_string().into(), &state_key) .ok() .flatten() .map(|s| (s, (event_type, state_key))) @@ -764,7 +765,7 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn get_shortstatekey( &self, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result> { if let Some(short) = self @@ -776,7 +777,7 @@ impl Rooms { return Ok(Some(*short)); } - let mut statekey = event_type.as_ref().as_bytes().to_vec(); + let mut statekey = event_type.to_string().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(state_key.as_bytes()); @@ -820,7 +821,7 @@ impl Rooms { #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shortstatekey( &self, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, globals: &super::globals::Globals, ) -> Result { @@ -833,7 +834,7 @@ impl Rooms { return Ok(*short); } - let mut statekey = event_type.as_ref().as_bytes().to_vec(); + let mut statekey = event_type.to_string().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(state_key.as_bytes()); @@ -888,7 +889,7 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(EventType, String)> { + pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { if let Some(id) = self .shortstatekey_cache .lock() @@ -910,7 +911,7 @@ impl Rooms { .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; let event_type = - EventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { + StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") })?) .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; @@ -934,7 +935,7 @@ impl Rooms { pub fn room_state_full( &self, room_id: &RoomId, - ) -> Result>> { + ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { self.state_full(current_shortstatehash) } else { @@ -947,7 +948,7 @@ impl Rooms { pub fn room_state_get_id( &self, room_id: &RoomId, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { @@ -962,7 +963,7 @@ impl Rooms { pub fn room_state_get( &self, room_id: &RoomId, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { @@ -1281,7 +1282,7 @@ impl Rooms { { if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind, state_key) + .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) .unwrap() { unsigned.insert( @@ -1346,7 +1347,7 @@ impl Rooms { // See if the event matches any known pushers let power_levels: RoomPowerLevelsEventContent = db .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) @@ -1367,7 +1368,11 @@ impl Rooms { let rules_for_user = db .account_data - .get(None, user, EventType::PushRules)? + .get( + None, + user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| Ruleset::server_default(user)); @@ -1416,12 +1421,12 @@ impl Rooms { .increment_batch(&mut highlights.into_iter())?; match pdu.kind { - EventType::RoomRedaction => { + RoomEventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { self.redact_pdu(redact_id, pdu)?; } } - EventType::RoomMember => { + RoomEventType::RoomMember => { if let Some(state_key) = &pdu.state_key { #[derive(Deserialize)] struct ExtractMembership { @@ -1456,7 +1461,7 @@ impl Rooms { )?; } } - EventType::RoomMessage => { + RoomEventType::RoomMessage => { #[derive(Deserialize)] struct ExtractBody<'a> { #[serde(borrow)] @@ -1663,8 +1668,11 @@ impl Rooms { let states_parents = previous_shortstatehash .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - let shortstatekey = - self.get_or_create_shortstatekey(&new_pdu.kind, state_key, globals)?; + let shortstatekey = self.get_or_create_shortstatekey( + &new_pdu.kind.to_string().into(), + state_key, + globals, + )?; let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; @@ -1713,28 +1721,36 @@ impl Rooms { ) -> Result>> { let mut state = Vec::new(); // Add recommended events - if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomCreate, "")? { - state.push(e.to_stripped_state_event()); - } if let Some(e) = - self.room_state_get(&invite_event.room_id, &EventType::RoomJoinRules, "")? + self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? { state.push(e.to_stripped_state_event()); } if let Some(e) = - self.room_state_get(&invite_event.room_id, &EventType::RoomCanonicalAlias, "")? + self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? { state.push(e.to_stripped_state_event()); } - if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomAvatar, "")? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomName, "")? { - state.push(e.to_stripped_state_event()); - } if let Some(e) = self.room_state_get( &invite_event.room_id, - &EventType::RoomMember, + &StateEventType::RoomCanonicalAlias, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get( + &invite_event.room_id, + &StateEventType::RoomMember, invite_event.sender.as_str(), )? { state.push(e.to_stripped_state_event()); @@ -1807,7 +1823,7 @@ impl Rooms { .take(20) .collect::>(); - let create_event = self.room_state_get(room_id, &EventType::RoomCreate, "")?; + let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -1845,7 +1861,9 @@ impl Rooms { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some(prev_pdu) = self.room_state_get(room_id, &event_type, state_key)? { + if let Some(prev_pdu) = + self.room_state_get(room_id, &event_type.to_string().into(), state_key)? + { unsigned.insert( "prev_content".to_owned(), serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), @@ -1888,7 +1906,6 @@ impl Rooms { let auth_check = state_res::auth_check( &room_version, &pdu, - create_prev_event, None::, // TODO: third_party_invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -2031,7 +2048,7 @@ impl Rooms { let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) - || pdu.kind == EventType::RoomMember + || pdu.kind == RoomEventType::RoomMember && pdu .state_key .as_ref() @@ -2231,7 +2248,7 @@ impl Rooms { // Check if the room has a predecessor if let Some(predecessor) = self - .room_state_get(room_id, &EventType::RoomCreate, "")? + .room_state_get(room_id, &StateEventType::RoomCreate, "")? .and_then(|create| serde_json::from_str(create.content.get()).ok()) .and_then(|content: RoomCreateEventContent| content.predecessor) { @@ -2264,13 +2281,13 @@ impl Rooms { if let Some(tag_event) = db.account_data.get::( Some(&predecessor.room_id), user_id, - EventType::Tag, + RoomAccountDataEventType::Tag, )? { db.account_data .update( Some(room_id), user_id, - EventType::Tag, + RoomAccountDataEventType::Tag, &tag_event, &db.globals, ) @@ -2278,10 +2295,11 @@ impl Rooms { }; // Copy direct chat flag - if let Some(mut direct_event) = - db.account_data - .get::(None, user_id, EventType::Direct)? - { + if let Some(mut direct_event) = db.account_data.get::( + None, + user_id, + GlobalAccountDataEventType::Direct.to_string().into(), + )? { let mut room_ids_updated = false; for room_ids in direct_event.content.0.values_mut() { @@ -2295,7 +2313,7 @@ impl Rooms { db.account_data.update( None, user_id, - EventType::Direct, + GlobalAccountDataEventType::Direct.to_string().into(), &direct_event, &db.globals, )?; @@ -2322,7 +2340,9 @@ impl Rooms { .get::( None, // Ignored users are in global account data user_id, // Receiver - EventType::IgnoredUserList, + GlobalAccountDataEventType::IgnoredUserList + .to_string() + .into(), )? .map_or(false, |ignored| { ignored @@ -2578,7 +2598,7 @@ impl Rooms { let state_lock = mutex_state.lock().await; let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &EventType::RoomMember, user_id.as_str())? + self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -2592,7 +2612,7 @@ impl Rooms { self.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(user_id.to_string()), diff --git a/src/database/sending.rs b/src/database/sending.rs index b7d62c1..1ff2cdf 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -23,7 +23,10 @@ use ruma::{ OutgoingRequest, }, device_id, - events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType}, + events::{ + push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType, + GlobalAccountDataEventType, + }, push, receipt::ReceiptType, uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, @@ -635,7 +638,11 @@ impl Sending { let rules_for_user = db .account_data - .get(None, &userid, EventType::PushRules) + .get( + None, + &userid, + GlobalAccountDataEventType::PushRules.to_string().into(), + ) .unwrap_or_default() .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| push::Ruleset::server_default(&userid)); diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index 12b838b..ed0970d 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::Result; -use ruma::{identifiers::TransactionId, DeviceId, UserId}; +use ruma::{DeviceId, TransactionId, UserId}; use super::abstraction::Tree; diff --git a/src/database/users.rs b/src/database/users.rs index a66fa93..7c15f1d 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -2,11 +2,10 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, EventType}, - identifiers::MxcUri, + events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, - UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, + UInt, UserId, }; use std::{collections::BTreeMap, mem, sync::Arc}; use tracing::warn; @@ -754,7 +753,7 @@ impl Users { for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { // Don't send key updates to unencrypted rooms if rooms - .room_state_get(&room_id, &EventType::RoomEncryption, "")? + .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? .is_none() { continue; diff --git a/src/main.rs b/src/main.rs index a9047ec..d20ee75 100644 --- a/src/main.rs +++ b/src/main.rs @@ -26,10 +26,7 @@ use http::{ Method, Uri, }; use opentelemetry::trace::{FutureExt, Tracer}; -use ruma::{ - api::{client::error::ErrorKind, IncomingRequest}, - Outgoing, -}; +use ruma::api::{client::error::ErrorKind, IncomingRequest}; use tokio::{signal, sync::RwLock}; use tower::ServiceBuilder; use tower_http::{ @@ -408,16 +405,15 @@ macro_rules! impl_ruma_handler { #[allow(non_snake_case)] impl RumaHandler<($($ty,)* Ruma,)> for F where - Req: Outgoing + 'static, - Req::Incoming: IncomingRequest + Send, + Req: IncomingRequest + Send + 'static, F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, - Fut: Future::OutgoingResponse, E>> + Fut: Future> + Send, E: IntoResponse, $( $ty: FromRequest + Send + 'static, )* { fn add_to_router(self, mut router: Router) -> Router { - let meta = Req::Incoming::METADATA; + let meta = Req::METADATA; let method_filter = method_to_filter(meta.method); for path in IntoIterator::into_iter([meta.unstable_path, meta.r0_path, meta.stable_path]).flatten() { diff --git a/src/pdu.rs b/src/pdu.rs index ec6c961..aed2575 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -2,7 +2,7 @@ use crate::Error; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, - AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, + AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, RoomEventType, StateEvent, }, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId, @@ -29,7 +29,7 @@ pub struct PduEvent { pub sender: Box, pub origin_server_ts: UInt, #[serde(rename = "type")] - pub kind: EventType, + pub kind: RoomEventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, @@ -51,10 +51,10 @@ impl PduEvent { self.unsigned = None; let allowed: &[&str] = match self.kind { - EventType::RoomMember => &["membership"], - EventType::RoomCreate => &["creator"], - EventType::RoomJoinRules => &["join_rule"], - EventType::RoomPowerLevels => &[ + RoomEventType::RoomMember => &["membership"], + RoomEventType::RoomCreate => &["creator"], + RoomEventType::RoomJoinRules => &["join_rule"], + RoomEventType::RoomPowerLevels => &[ "ban", "events", "events_default", @@ -64,7 +64,7 @@ impl PduEvent { "users", "users_default", ], - EventType::RoomHistoryVisibility => &["history_visibility"], + RoomEventType::RoomHistoryVisibility => &["history_visibility"], _ => &[], }; @@ -279,7 +279,7 @@ impl state_res::Event for PduEvent { &self.sender } - fn event_type(&self) -> &EventType { + fn event_type(&self) -> &RoomEventType { &self.kind } @@ -354,7 +354,7 @@ pub(crate) fn gen_event_id_canonical_json( #[derive(Debug, Deserialize)] pub struct PduBuilder { #[serde(rename = "type")] - pub event_type: EventType, + pub event_type: RoomEventType, pub content: Box, pub unsigned: Option>, pub state_key: Option, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 119c3ea..15360e5 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,9 +1,6 @@ use crate::Error; use ruma::{ - api::client::uiaa::UiaaResponse, - identifiers::{DeviceId, UserId}, - signatures::CanonicalJsonValue, - Outgoing, ServerName, + api::client::uiaa::UiaaResponse, signatures::CanonicalJsonValue, DeviceId, ServerName, UserId, }; use std::ops::Deref; @@ -11,8 +8,8 @@ use std::ops::Deref; mod axum; /// Extractor for Ruma request structs -pub struct Ruma { - pub body: T::Incoming, +pub struct Ruma { + pub body: T, pub sender_user: Option>, pub sender_device: Option>, pub sender_servername: Option>, @@ -21,8 +18,8 @@ pub struct Ruma { pub from_appservice: bool, } -impl Deref for Ruma { - type Target = T::Incoming; +impl Deref for Ruma { + type Target = T; fn deref(&self) -> &Self::Target { &self.body diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index c779e33..fdb140f 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -18,7 +18,7 @@ use http::StatusCode; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, signatures::CanonicalJsonValue, - DeviceId, Outgoing, ServerName, UserId, + DeviceId, ServerName, UserId, }; use serde::Deserialize; use tracing::{debug, error, warn}; @@ -29,8 +29,7 @@ use crate::{database::DatabaseGuard, server_server, Error, Result}; #[async_trait] impl FromRequest for Ruma where - T: Outgoing, - T::Incoming: IncomingRequest, + T: IncomingRequest, B: HttpBody + Send, B::Data: Send, B::Error: Into, @@ -44,7 +43,7 @@ where user_id: Option, } - let metadata = T::Incoming::METADATA; + let metadata = T::METADATA; let db = DatabaseGuard::from_request(req).await?; let auth_header = Option::>>::from_request(req).await?; let path_params = Path::>::from_request(req).await?; @@ -284,7 +283,7 @@ where debug!("{:?}", http_request); - let body = T::Incoming::try_from_http_request(http_request, &path_params).map_err(|e| { + let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { warn!("{:?}", e); Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") })?; @@ -358,10 +357,7 @@ impl Credentials for XMatrix { } } -impl IntoResponse for RumaResponse -where - T: OutgoingResponse, -{ +impl IntoResponse for RumaResponse { fn into_response(self) -> Response { match self.0.try_into_http_response::() { Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(), diff --git a/src/server_server.rs b/src/server_server.rs index 371f297..e9977f9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -45,7 +45,7 @@ use ruma::{ member::{MembershipState, RoomMemberEventContent}, server_acl::RoomServerAclEventContent, }, - EventType, + RoomEventType, StateEventType, }, int, receipt::ReceiptType, @@ -575,7 +575,7 @@ pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoRes /// Lists the public rooms on this server. pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -604,7 +604,7 @@ pub async fn get_public_rooms_filtered_route( /// Lists the public rooms on this server. pub async fn get_public_rooms_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -633,7 +633,7 @@ pub async fn get_public_rooms_route( /// Push EDUs and PDUs to this server. pub async fn send_transaction_message_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -924,7 +924,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( let create_event = db .rooms - .room_state_get(room_id, &EventType::RoomCreate, "") + .room_state_get(room_id, &StateEventType::RoomCreate, "") .map_err(|_| "Failed to ask database for event.".to_owned())? .ok_or_else(|| "Failed to find create event in db.".to_owned())?; @@ -1174,7 +1174,7 @@ fn handle_outlier_pdu<'a>( }; match auth_events.entry(( - auth_event.kind.clone(), + auth_event.kind.to_string().into(), auth_event .state_key .clone() @@ -1194,7 +1194,7 @@ fn handle_outlier_pdu<'a>( // The original create event must be in the auth events if auth_events - .get(&(EventType::RoomCreate, "".to_owned())) + .get(&(StateEventType::RoomCreate, "".to_owned())) .map(|a| a.as_ref()) != Some(create_event) { @@ -1216,9 +1216,8 @@ fn handle_outlier_pdu<'a>( if !state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.as_ref(), None::, // TODO: third party invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), + |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), ) .map_err(|_e| "Auth check failed".to_owned())? { @@ -1297,7 +1296,11 @@ async fn upgrade_outlier_to_timeline_pdu( if let Some(state_key) = &prev_pdu.state_key { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) + .get_or_create_shortstatekey( + &prev_pdu.kind.to_string().into(), + state_key, + &db.globals, + ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; state.insert(shortstatekey, Arc::from(prev_event)); @@ -1342,7 +1345,11 @@ async fn upgrade_outlier_to_timeline_pdu( if let Some(state_key) = &prev_event.state_key { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) + .get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + &db.globals, + ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); // Now it's the state after the pdu @@ -1352,8 +1359,10 @@ async fn upgrade_outlier_to_timeline_pdu( let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok(k) = db.rooms.get_statekey_from_short(k) { - state.insert(k, id.clone()); + if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + state.insert((ty.to_string().into(), st_key), id.clone()); } else { warn!("Failed to get_statekey_from_short."); } @@ -1387,7 +1396,11 @@ async fn upgrade_outlier_to_timeline_pdu( .map(|((event_type, state_key), event_id)| { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; Ok((shortstatekey, event_id)) }) @@ -1441,7 +1454,11 @@ async fn upgrade_outlier_to_timeline_pdu( let shortstatekey = db .rooms - .get_or_create_shortstatekey(&pdu.kind, &state_key, &db.globals) + .get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + &db.globals, + ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; match state.entry(shortstatekey) { @@ -1458,7 +1475,7 @@ async fn upgrade_outlier_to_timeline_pdu( // The original create event must still be in the state let create_shortstatekey = db .rooms - .get_shortstatekey(&EventType::RoomCreate, "") + .get_shortstatekey(&StateEventType::RoomCreate, "") .map_err(|_| "Failed to talk to db.")? .expect("Room exists"); @@ -1496,11 +1513,10 @@ async fn upgrade_outlier_to_timeline_pdu( let check_result = state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.as_ref(), None::, // TODO: third party invite |k, s| { db.rooms - .get_shortstatekey(k, s) + .get_shortstatekey(&k.to_string().into(), s) .ok() .flatten() .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) @@ -1580,7 +1596,6 @@ async fn upgrade_outlier_to_timeline_pdu( let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.as_ref(), None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -1655,7 +1670,11 @@ async fn upgrade_outlier_to_timeline_pdu( if let Some(state_key) = &incoming_pdu.state_key { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) + .get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + &db.globals, + ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); @@ -1701,7 +1720,9 @@ async fn upgrade_outlier_to_timeline_pdu( .filter_map(|(k, id)| { db.rooms .get_statekey_from_short(k) - .map(|k| (k, id)) + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) .ok() }) @@ -1732,7 +1753,11 @@ async fn upgrade_outlier_to_timeline_pdu( .map(|((event_type, state_key), event_id)| { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; db.rooms .compress_state_event(shortstatekey, &event_id, &db.globals) @@ -2151,7 +2176,7 @@ fn append_incoming_pdu<'a>( let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) - || pdu.kind == EventType::RoomMember + || pdu.kind == RoomEventType::RoomMember && pdu .state_key .as_ref() @@ -2298,7 +2323,7 @@ fn get_auth_chain_inner( /// - Only works if a user of this server is currently invited or joined the room pub async fn get_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2341,7 +2366,7 @@ pub async fn get_event_route( /// Retrieves events that the sender is missing. pub async fn get_missing_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2414,7 +2439,7 @@ pub async fn get_missing_events_route( /// - This does not include the event itself pub async fn get_event_authorization_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2462,7 +2487,7 @@ pub async fn get_event_authorization_route( /// Retrieves the current state of the room. pub async fn get_room_state_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2521,7 +2546,7 @@ pub async fn get_room_state_route( /// Retrieves the current state of the room. pub async fn get_room_state_ids_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2569,7 +2594,7 @@ pub async fn get_room_state_ids_route( /// Creates a join template. pub async fn create_join_event_template_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2598,7 +2623,7 @@ pub async fn create_join_event_template_route( let create_event = db .rooms - .room_state_get(&body.room_id, &EventType::RoomCreate, "")?; + .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -2645,11 +2670,11 @@ pub async fn create_join_event_template_route( .expect("member event is valid value"); let state_key = body.user_id.to_string(); - let kind = EventType::RoomMember; + let kind = StateEventType::RoomMember; let auth_events = db.rooms.get_auth_events( &body.room_id, - &kind, + &kind.to_string().into(), &body.user_id, Some(&state_key), &content, @@ -2680,7 +2705,7 @@ pub async fn create_join_event_template_route( origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), - kind, + kind: kind.to_string().into(), content, state_key: Some(state_key), prev_events, @@ -2704,7 +2729,6 @@ pub async fn create_join_event_template_route( let auth_check = state_res::auth_check( &room_version, &pdu, - create_prev_event, None::, // TODO: third_party_invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -2849,7 +2873,7 @@ async fn create_join_event( /// Submits a signed join event. pub async fn create_join_event_v1_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_servername = body .sender_servername @@ -2866,7 +2890,7 @@ pub async fn create_join_event_v1_route( /// Submits a signed join event. pub async fn create_join_event_v2_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_servername = body .sender_servername @@ -2883,7 +2907,7 @@ pub async fn create_join_event_v2_route( /// Invites a remote user to a room. pub async fn create_invite_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2993,7 +3017,7 @@ pub async fn create_invite_route( /// Gets information on all devices of the user. pub async fn get_devices_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -3041,7 +3065,7 @@ pub async fn get_devices_route( /// Resolve a room alias to a room id. pub async fn get_room_information_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -3066,7 +3090,7 @@ pub async fn get_room_information_route( /// Gets information on a profile. pub async fn get_profile_information_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -3395,7 +3419,7 @@ pub(crate) async fn fetch_join_signing_keys( fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { let acl_event = match db .rooms - .room_state_get(room_id, &EventType::RoomServerAcl, "")? + .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? { Some(acl) => acl, None => return Ok(()), From df4c38cb610c2ca8c3dcf09595b20c271ca26d70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 7 Apr 2022 13:22:10 +0200 Subject: [PATCH 1078/1727] fix: remove warnings --- src/client_server/account.rs | 2 +- src/client_server/context.rs | 2 +- src/client_server/membership.rs | 8 -------- src/client_server/push.rs | 4 +--- src/client_server/state.rs | 2 +- src/client_server/sync.rs | 2 +- src/client_server/tag.rs | 2 +- src/client_server/to_device.rs | 1 - src/database/abstraction/sqlite.rs | 8 ++++---- src/database/pusher.rs | 2 +- src/database/rooms.rs | 12 ++--------- src/database/sending.rs | 5 +---- src/server_server.rs | 32 ------------------------------ 13 files changed, 14 insertions(+), 68 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index be14b92..820e4f1 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -18,7 +18,7 @@ use ruma::{ events::{ room::member::{MembershipState, RoomMemberEventContent}, room::message::RoomMessageEventContent, - GlobalAccountDataEventType, RoomAccountDataEventType, RoomEventType, + GlobalAccountDataEventType, RoomEventType, }, push, UserId, }; diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 8ecd6ec..de7aae9 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,7 +1,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, - events::{EventType, StateEventType}, + events::StateEventType, }; use std::{collections::HashSet, convert::TryFrom}; use tracing::error; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 8fb2fec..ac0715a 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -828,14 +828,6 @@ pub(crate) async fn invite_helper<'a>( }) .transpose()?; - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content .map_or(RoomVersionId::V6, |create_event| create_event.room_version); diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 5169b8b..dc45ea0 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -8,9 +8,7 @@ use ruma::{ set_pushrule_enabled, RuleKind, }, }, - events::{ - push_rules::PushRulesEvent, EventType, GlobalAccountDataEventType, RoomAccountDataEventType, - }, + events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index c0fbf73..50fe9b4 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -13,7 +13,7 @@ use ruma::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, }, - AnyStateEventContent, EventType, RoomEventType, StateEventType, + AnyStateEventContent, StateEventType, }, serde::Raw, EventId, RoomId, UserId, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index de6a45a..d61e689 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -7,7 +7,7 @@ use ruma::{ }, events::{ room::member::{MembershipState, RoomMemberEventContent}, - EventType, RoomEventType, StateEventType, + RoomEventType, StateEventType, }, serde::Raw, DeviceId, RoomId, UserId, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 0340886..98d895c 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ tag::{TagEvent, TagEventContent}, - EventType, RoomAccountDataEventType, + RoomAccountDataEventType, }, }; use std::collections::BTreeMap; diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 42364f5..5f4ac58 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -7,7 +7,6 @@ use ruma::{ client::{error::ErrorKind, to_device::send_event_to_device}, federation::{self, transactions::edu::DirectDeviceContent}, }, - events::EventType, to_device::DeviceIdOrAllDevices, }; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 730c1bc..7cfa81a 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -19,7 +19,7 @@ thread_local! { struct PreparedStatementIterator<'a> { pub iterator: Box + 'a>, - pub statement_ref: NonAliasingBox>, + pub _statement_ref: NonAliasingBox>, } impl Iterator for PreparedStatementIterator<'_> { @@ -184,7 +184,7 @@ impl SqliteTable { Box::new(PreparedStatementIterator { iterator, - statement_ref, + _statement_ref: statement_ref, }) } } @@ -283,7 +283,7 @@ impl Tree for SqliteTable { ); Box::new(PreparedStatementIterator { iterator, - statement_ref, + _statement_ref: statement_ref, }) } else { let statement = Box::leak(Box::new( @@ -309,7 +309,7 @@ impl Tree for SqliteTable { Box::new(PreparedStatementIterator { iterator, - statement_ref, + _statement_ref: statement_ref, }) } } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 410300e..6b906c2 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -11,7 +11,7 @@ use ruma::{ }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, EventType, RoomEventType, StateEventType, + AnySyncRoomEvent, RoomEventType, StateEventType, }, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 44f3344..b9d0a87 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1835,14 +1835,6 @@ impl Rooms { }) .transpose()?; - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content .map_or(RoomVersionId::V6, |create_event| create_event.room_version); @@ -1978,7 +1970,7 @@ impl Rooms { self.room_servers(room_id).filter_map(|r| r.ok()).collect(); // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == EventType::RoomMember { + if pdu.kind == RoomEventType::RoomMember { if let Some(state_key_uid) = &pdu .state_key .as_ref() @@ -2001,7 +1993,7 @@ impl Rooms { // If the RoomMember event has a non-empty state_key, it is targeted at someone. // If it is our appservice user, we send this PDU to it. - if pdu.kind == EventType::RoomMember { + if pdu.kind == RoomEventType::RoomMember { if let Some(state_key_uid) = &pdu .state_key .as_ref() diff --git a/src/database/sending.rs b/src/database/sending.rs index 1ff2cdf..4c830d6 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -23,10 +23,7 @@ use ruma::{ OutgoingRequest, }, device_id, - events::{ - push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType, - GlobalAccountDataEventType, - }, + events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType}, push, receipt::ReceiptType, uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, diff --git a/src/server_server.rs b/src/server_server.rs index e9977f9..d574c4e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1201,18 +1201,6 @@ fn handle_outlier_pdu<'a>( return Err("Incoming event refers to wrong create event.".to_owned()); } - // If the previous event was the create event special rules apply - let previous_create = if incoming_pdu.auth_events.len() == 1 - && incoming_pdu.prev_events == incoming_pdu.auth_events - { - db.rooms - .get_pdu(&incoming_pdu.auth_events[0]) - .map_err(|e| e.to_string())? - .filter(|maybe_create| **maybe_create == *create_event) - } else { - None - }; - if !state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -1498,18 +1486,6 @@ async fn upgrade_outlier_to_timeline_pdu( state_at_incoming_event.expect("we always set this to some above"); // 11. Check the auth of the event passes based on the state of the event - // If the previous event was the create event special rules apply - let previous_create = if incoming_pdu.auth_events.len() == 1 - && incoming_pdu.prev_events == incoming_pdu.auth_events - { - db.rooms - .get_pdu(&incoming_pdu.auth_events[0]) - .map_err(|e| e.to_string())? - .filter(|maybe_create| **maybe_create == *create_event) - } else { - None - }; - let check_result = state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -2635,14 +2611,6 @@ pub async fn create_join_event_template_route( }) .transpose()?; - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content.map_or(RoomVersionId::V6, |create_event| create_event.room_version); From ada07de2048950feb106b9bfa9220ed0f85787a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Thu, 7 Apr 2022 12:11:55 +0000 Subject: [PATCH 1079/1727] feat: support user password resets --- src/config.rs | 2 ++ src/database.rs | 52 ++++++++++++++++++++++++++++++++++++++++- src/database/admin.rs | 47 ++++++++++++++++++++++++++++++++++++- src/database/globals.rs | 4 ++++ src/database/rooms.rs | 6 ++++- 5 files changed, 108 insertions(+), 3 deletions(-) diff --git a/src/config.rs b/src/config.rs index a6ab63e..4a3a054 100644 --- a/src/config.rs +++ b/src/config.rs @@ -68,6 +68,8 @@ pub struct Config { #[serde(default = "default_turn_ttl")] pub turn_ttl: u64, + pub emergency_password: Option, + #[serde(flatten)] pub catchall: BTreeMap, } diff --git a/src/database.rs b/src/database.rs index e0745c5..69cf3fc 100644 --- a/src/database.rs +++ b/src/database.rs @@ -19,7 +19,14 @@ use abstraction::DatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; -use ruma::{DeviceId, EventId, RoomId, UserId}; +use ruma::{ + events::{ + push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, EventType, + GlobalAccountDataEvent, + }, + push::Ruleset, + DeviceId, EventId, RoomId, UserId, +}; use std::{ collections::{BTreeMap, HashMap, HashSet}, fs::{self, remove_dir_all}, @@ -747,6 +754,23 @@ impl Database { guard.rooms.edus.presenceid_presence.clear()?; guard.admin.start_handler(Arc::clone(&db), admin_receiver); + + // Set emergency access for the conduit user + match set_emergency_access(&guard) { + Ok(pwd_set) => { + if pwd_set { + warn!("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!"); + guard.admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!")); + } + } + Err(e) => { + error!( + "Could not set the configured emergency password for the conduit user: {}", + e + ) + } + }; + guard .sending .start_handler(Arc::clone(&db), sending_receiver); @@ -928,6 +952,32 @@ impl Database { } } +/// Sets the emergency password and push rules for the @conduit account in case emergency password is set +fn set_emergency_access(db: &Database) -> Result { + let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + .expect("@conduit:server_name is a valid UserId"); + + db.users + .set_password(&conduit_user, db.globals.emergency_password().as_deref())?; + + let (ruleset, res) = match db.globals.emergency_password() { + Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), + None => (Ruleset::new(), Ok(false)), + }; + + db.account_data.update( + None, + &conduit_user, + EventType::PushRules, + &GlobalAccountDataEvent { + content: PushRulesEventContent { global: ruleset }, + }, + &db.globals, + )?; + + res +} + pub struct DatabaseGuard(OwnedRwLockReadGuard); impl Deref for DatabaseGuard { diff --git a/src/database/admin.rs b/src/database/admin.rs index f2e66e4..f5f3ba6 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -8,7 +8,7 @@ use std::{ use crate::{ error::{Error, Result}, pdu::PduBuilder, - server_server, + server_server, utils, utils::HtmlEscape, Database, PduEvent, }; @@ -262,6 +262,12 @@ enum AdminCommand { /// Show configuration values ShowConfig, + + /// Reset user password + ResetPassword { + /// Username of the user for whom the password should be reset + username: String, + }, } fn process_admin_command( @@ -435,6 +441,45 @@ fn process_admin_command( // Construct and send the response RoomMessageEventContent::text_plain(format!("{}", db.globals.config)) } + AdminCommand::ResetPassword { username } => { + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + db.globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + + // Check if the specified user is valid + if !db.users.exists(&user_id)? + || db.users.is_deactivated(&user_id)? + || user_id + == UserId::parse_with_server_name("conduit", db.globals.server_name()) + .expect("conduit user exists") + { + return Ok(RoomMessageEventContent::text_plain( + "The specified user does not exist or is deactivated!", + )); + } + + let new_password = utils::random_string(20); + + match db.users.set_password(&user_id, Some(new_password.as_str())) { + Ok(()) => RoomMessageEventContent::text_plain(format!( + "Successfully reset the password for user {}: {}", + user_id, new_password + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Couldn't reset the password for user {}: {}", + user_id, e + )), + } + } }; Ok(reply_message_content) diff --git a/src/database/globals.rs b/src/database/globals.rs index 9a9163b..ee7db53 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -264,6 +264,10 @@ impl Globals { &self.config.turn_secret } + pub fn emergency_password(&self) -> &Option { + &self.config.emergency_password + } + /// TODO: the key valid until timestamp is only honored in room version > 4 /// Remove the outdated keys and insert the new ones. /// diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 88a0729..7939edc 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1491,7 +1491,11 @@ impl Rooms { let server_user = format!("@conduit:{}", db.globals.server_name()); let to_conduit = body.starts_with(&format!("{}: ", server_user)); - let from_conduit = pdu.sender == server_user; + + // This will evaluate to false if the emergency password is set up so that + // the administrator can execute commands as conduit + let from_conduit = + pdu.sender == server_user && db.globals.emergency_password().is_none(); if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { db.admin.process_message(body.to_string()); From d81216cad7cbb9cb7d0a91bdccbe9910555b4a8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 18 Jan 2022 16:53:25 +0100 Subject: [PATCH 1080/1727] improvement: preparing for room version 9 --- src/database.rs | 6 ++--- src/pdu.rs | 2 +- src/server_server.rs | 55 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 4 deletions(-) diff --git a/src/database.rs b/src/database.rs index 69cf3fc..4a03f18 100644 --- a/src/database.rs +++ b/src/database.rs @@ -21,8 +21,8 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; use ruma::{ events::{ - push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, EventType, - GlobalAccountDataEvent, + push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, + GlobalAccountDataEvent, GlobalAccountDataEventType, }, push::Ruleset, DeviceId, EventId, RoomId, UserId, @@ -968,7 +968,7 @@ fn set_emergency_access(db: &Database) -> Result { db.account_data.update( None, &conduit_user, - EventType::PushRules, + GlobalAccountDataEventType::PushRules.to_string().into(), &GlobalAccountDataEvent { content: PushRulesEventContent { global: ruleset }, }, diff --git a/src/pdu.rs b/src/pdu.rs index aed2575..3b90533 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -51,7 +51,7 @@ impl PduEvent { self.unsigned = None; let allowed: &[&str] = match self.kind { - RoomEventType::RoomMember => &["membership"], + RoomEventType::RoomMember => &["join_authorised_via_users_server", "membership"], RoomEventType::RoomCreate => &["creator"], RoomEventType::RoomJoinRules => &["join_rule"], RoomEventType::RoomPowerLevels => &[ diff --git a/src/server_server.rs b/src/server_server.rs index d574c4e..596a54e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -42,6 +42,7 @@ use ruma::{ receipt::{ReceiptEvent, ReceiptEventContent}, room::{ create::RoomCreateEventContent, + join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, server_acl::RoomServerAclEventContent, }, @@ -2590,6 +2591,33 @@ pub async fn create_join_event_template_route( acl_check(sender_servername, &body.room_id, &db)?; + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = db + .rooms + .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Conduit does not support restricted rooms yet.", + )); + } + } + let prev_events: Vec<_> = db .rooms .get_pdu_leaves(&body.room_id)? @@ -2749,6 +2777,33 @@ async fn create_join_event( acl_check(sender_servername, room_id, db)?; + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Conduit does not support restricted rooms yet.", + )); + } + } + // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = db .rooms From 714873694db0f96a56c50064779db6b48972dca5 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 1 Nov 2021 01:58:26 +0000 Subject: [PATCH 1081/1727] Refactor room version support, add default room version config --- src/client_server/capabilities.rs | 20 +++++++++++---- src/client_server/membership.rs | 15 ++++++----- src/client_server/room.rs | 8 +++--- src/config.rs | 11 ++++++++- src/database/globals.rs | 41 ++++++++++++++++++++++++++++++- src/database/rooms.rs | 18 ++++++++++---- src/server_server.rs | 11 ++++++--- 7 files changed, 96 insertions(+), 28 deletions(-) diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 952db58..915cd7d 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,9 +1,8 @@ -use crate::{Result, Ruma}; +use crate::{database::DatabaseGuard, Result, Ruma}; use ruma::{ api::client::discovery::get_capabilities::{ self, Capabilities, RoomVersionStability, RoomVersionsCapability, }, - RoomVersionId, }; use std::collections::BTreeMap; @@ -11,15 +10,26 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( + db: DatabaseGuard, _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); - available.insert(RoomVersionId::V5, RoomVersionStability::Stable); - available.insert(RoomVersionId::V6, RoomVersionStability::Stable); + if db.globals.allow_unstable_room_versions() { + for room_version in &db.globals.unstable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Stable); + } + } else { + for room_version in &db.globals.unstable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Unstable); + } + } + for room_version in &db.globals.stable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Stable); + } let mut capabilities = Capabilities::new(); capabilities.room_versions = RoomVersionsCapability { - default: RoomVersionId::V6, + default: db.globals.default_room_version(), available, }; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ac0715a..0f440f4 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -492,7 +492,7 @@ async fn join_room_by_id_helper( federation::membership::prepare_join_event::v1::Request { room_id, user_id: sender_user, - ver: &[RoomVersionId::V5, RoomVersionId::V6], + ver: &db.globals.supported_room_versions(), }, ) .await; @@ -507,11 +507,7 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) - if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 => - { - room_version - } + Some(room_version) if db.rooms.is_supported_version(&db, &room_version) => room_version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -828,9 +824,12 @@ pub(crate) async fn invite_helper<'a>( }) .transpose()?; - // If there was no create event yet, assume we are creating a version 6 room right now + // If there was no create event yet, assume we are creating a room with the default + // version right now let room_version_id = create_event_content - .map_or(RoomVersionId::V6, |create_event| create_event.room_version); + .map_or(db.globals.default_room_version(), |create_event| { + create_event.room_version + }); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 1b3b840..a5b7970 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -23,7 +23,7 @@ use ruma::{ }, int, serde::{CanonicalJsonObject, JsonObject}, - RoomAliasId, RoomId, RoomVersionId, + RoomAliasId, RoomId, }; use serde_json::{json, value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, sync::Arc}; @@ -100,7 +100,7 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 { + if db.rooms.is_supported_version(&db, &room_version) { room_version } else { return Err(Error::BadRequest( @@ -109,7 +109,7 @@ pub async fn create_room_route( )); } } - None => RoomVersionId::V6, + None => db.globals.default_room_version(), }; let content = match &body.creation_content { @@ -484,7 +484,7 @@ pub async fn upgrade_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) { + if !db.rooms.is_supported_version(&db, &body.new_version) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", diff --git a/src/config.rs b/src/config.rs index 4a3a054..29af883 100644 --- a/src/config.rs +++ b/src/config.rs @@ -4,7 +4,7 @@ use std::{ net::{IpAddr, Ipv4Addr}, }; -use ruma::ServerName; +use ruma::{RoomVersionId, ServerName}; use serde::{de::IgnoredAny, Deserialize}; use tracing::warn; @@ -46,6 +46,10 @@ pub struct Config { pub allow_federation: bool, #[serde(default = "true_fn")] pub allow_room_creation: bool, + #[serde(default = "true_fn")] + pub allow_unstable_room_versions: bool, + #[serde(default = "default_default_room_version")] + pub default_room_version: RoomVersionId, #[serde(default = "false_fn")] pub allow_jaeger: bool, #[serde(default = "false_fn")] @@ -246,3 +250,8 @@ fn default_log() -> String { fn default_turn_ttl() -> u64 { 60 * 60 * 24 } + +// I know, it's a great name +fn default_default_room_version() -> RoomVersionId { + RoomVersionId::V6 +} diff --git a/src/database/globals.rs b/src/database/globals.rs index ee7db53..a12f462 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,7 +4,8 @@ use ruma::{ client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, - DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId, + DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + ServerSigningKeyId, UserId, }; use std::{ collections::{BTreeMap, HashMap}, @@ -41,6 +42,8 @@ pub struct Globals { jwt_decoding_key: Option>, federation_client: reqwest::Client, default_client: reqwest::Client, + pub stable_room_versions: Vec, + pub unstable_room_versions: Vec, pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, @@ -145,6 +148,11 @@ impl Globals { }) .build()?; + // Supported and stable room versions + let stable_room_versions = vec![RoomVersionId::V6]; + // Experimental, partially supported room versions + let unstable_room_versions = vec![RoomVersionId::V5]; + let s = Self { globals, config, @@ -162,6 +170,8 @@ impl Globals { default_client, server_signingkeys, jwt_decoding_key, + stable_room_versions, + unstable_room_versions, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), @@ -232,6 +242,22 @@ impl Globals { self.config.allow_room_creation } + pub fn allow_unstable_room_versions(&self) -> bool { + self.config.allow_unstable_room_versions + } + + pub fn default_room_version(&self) -> RoomVersionId { + if self + .supported_room_versions() + .contains(&self.config.default_room_version.clone()) + { + self.config.default_room_version.clone() + } else { + error!("Room version in config isn't supported, falling back to Version 6"); + RoomVersionId::V6 + } + } + pub fn trusted_servers(&self) -> &[Box] { &self.config.trusted_servers } @@ -268,6 +294,19 @@ impl Globals { &self.config.emergency_password } + pub fn supported_room_versions(&self) -> Vec { + let mut room_versions: Vec = vec![]; + self.stable_room_versions + .iter() + .for_each(|room_version| room_versions.push(room_version.clone())); + if self.allow_unstable_room_versions() { + self.unstable_room_versions + .iter() + .for_each(|room_version| room_versions.push(room_version.clone())); + }; + room_versions + } + /// TODO: the key valid until timestamp is only honored in room version > 4 /// Remove the outdated keys and insert the new ones. /// diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 07772e7..6616305 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -133,6 +133,12 @@ pub struct Rooms { } impl Rooms { + /// Returns true if a given room version is supported + #[tracing::instrument(skip(self, db))] + pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { + db.globals.supported_room_versions().contains(room_version) + } + /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] @@ -1839,9 +1845,13 @@ impl Rooms { }) .transpose()?; - // If there was no create event yet, assume we are creating a version 6 room right now + + // If there was no create event yet, assume we are creating a room with the default + // version right now let room_version_id = create_event_content - .map_or(RoomVersionId::V6, |create_event| create_event.room_version); + .map_or(db.globals.default_room_version(), |create_event| { + create_event.room_version + }); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = @@ -2672,9 +2682,7 @@ impl Rooms { let (make_leave_response, remote_server) = make_leave_response_and_server?; let room_version_id = match make_leave_response.room_version { - Some(version) if version == RoomVersionId::V5 || version == RoomVersionId::V6 => { - version - } + Some(version) if self.is_supported_version(&db, &version) => version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; diff --git a/src/server_server.rs b/src/server_server.rs index 596a54e..19c9583 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2639,9 +2639,12 @@ pub async fn create_join_event_template_route( }) .transpose()?; - // If there was no create event yet, assume we are creating a version 6 room right now - let room_version_id = - create_event_content.map_or(RoomVersionId::V6, |create_event| create_event.room_version); + // If there was no create event yet, assume we are creating a room with the default version + // right now + let room_version_id = create_event_content + .map_or(db.globals.default_room_version(), |create_event| { + create_event.room_version + }); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); if !body.ver.contains(&room_version_id) { @@ -2943,7 +2946,7 @@ pub async fn create_invite_route( acl_check(sender_servername, &body.room_id, &db)?; - if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 { + if !db.rooms.is_supported_version(&db, &body.room_version) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), From d8a3b257f2b052d199c3e38d9e1d48d6d6c0b6bf Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 1 Nov 2021 02:22:32 +0000 Subject: [PATCH 1082/1727] Enable room version 4 --- src/client_server/capabilities.rs | 6 ++---- src/database/globals.rs | 2 +- src/database/rooms.rs | 1 - src/server_server.rs | 6 +++--- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 915cd7d..417ad29 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,8 +1,6 @@ use crate::{database::DatabaseGuard, Result, Ruma}; -use ruma::{ - api::client::discovery::get_capabilities::{ - self, Capabilities, RoomVersionStability, RoomVersionsCapability, - }, +use ruma::api::client::discovery::get_capabilities::{ + self, Capabilities, RoomVersionStability, RoomVersionsCapability, }; use std::collections::BTreeMap; diff --git a/src/database/globals.rs b/src/database/globals.rs index a12f462..c2ce8a5 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -151,7 +151,7 @@ impl Globals { // Supported and stable room versions let stable_room_versions = vec![RoomVersionId::V6]; // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V5]; + let unstable_room_versions = vec![RoomVersionId::V4, RoomVersionId::V5]; let s = Self { globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6616305..0bccc84 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1845,7 +1845,6 @@ impl Rooms { }) .transpose()?; - // If there was no create event yet, assume we are creating a room with the default // version right now let room_version_id = create_event_content diff --git a/src/server_server.rs b/src/server_server.rs index 19c9583..6d58947 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2592,9 +2592,9 @@ pub async fn create_join_event_template_route( acl_check(sender_servername, &body.room_id, &db)?; // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = db - .rooms - .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; + let join_rules_event = + db.rooms + .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; let join_rules_event_content: Option = join_rules_event .as_ref() From 4b28146ee7837451511a660cfb83130373ca38d3 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 1 Nov 2021 08:57:27 +0000 Subject: [PATCH 1083/1727] Support room version 3 --- src/client_server/membership.rs | 3 ++- src/database/globals.rs | 2 +- src/database/rooms.rs | 23 +++++++++++++++++++++++ src/pdu.rs | 16 ++++++++++++---- src/server_server.rs | 6 +++--- 5 files changed, 41 insertions(+), 9 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0f440f4..65107a3 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -975,7 +975,8 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event) { + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event, &db) + { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json diff --git a/src/database/globals.rs b/src/database/globals.rs index c2ce8a5..b1afd96 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -151,7 +151,7 @@ impl Globals { // Supported and stable room versions let stable_room_versions = vec![RoomVersionId::V6]; // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V4, RoomVersionId::V5]; + let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; let s = Self { globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0bccc84..3133365 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3437,4 +3437,27 @@ impl Rooms { Ok(()) } + + /// Returns the room's version. + #[tracing::instrument(skip(self))] + pub fn get_room_version(&self, room_id: &RoomId) -> RoomVersionId { + let create_event = self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .unwrap(); + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose() + .unwrap(); + + create_event_content + .map(|create_event| create_event.room_version) + .expect("Invalid room version") + } } diff --git a/src/pdu.rs b/src/pdu.rs index 3b90533..6e2bf5a 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,11 +1,11 @@ -use crate::Error; +use crate::{Database, Error}; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, RoomEventType, StateEvent, }, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId, + state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::{ @@ -332,16 +332,24 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, + db: &Database, ) -> crate::Result<(Box, CanonicalJsonObject)> { - let value = serde_json::from_str(pdu.get()).map_err(|e| { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; + let room_id = value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + .expect("Invalid room id in event"); + + let room_version_id = db.rooms.get_room_version(&room_id); + let event_id = format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &RoomVersionId::V6) + ruma::signatures::reference_hash(&value, &room_version_id) .expect("ruma can calculate reference hashes") ) .try_into() diff --git a/src/server_server.rs b/src/server_server.rs index 6d58947..e95c4c0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -659,7 +659,7 @@ pub async fn send_transaction_message_route( for pdu in &body.pdus { // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -1859,7 +1859,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( Ok(res) => { warn!("Got {} over federation", next_id); let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu) { + match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { Ok(t) => t, Err(_) => { back_off((*next_id).to_owned()); @@ -2820,7 +2820,7 @@ async fn create_join_event( // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json From d655f4c1bee05e69765f0d4c76c0f605244ed17d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 5 Nov 2021 20:47:11 +0000 Subject: [PATCH 1084/1727] Cleanup rooms.rs, globals.rs, and pdu.rs --- src/database/globals.rs | 28 ++++++++++++---------------- src/database/rooms.rs | 15 ++++++--------- src/pdu.rs | 2 +- 3 files changed, 19 insertions(+), 26 deletions(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index b1afd96..9909ebd 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -153,7 +153,7 @@ impl Globals { // Experimental, partially supported room versions let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; - let s = Self { + let mut s = Self { globals, config, keypair: Arc::new(keypair), @@ -184,6 +184,14 @@ impl Globals { fs::create_dir_all(s.get_media_folder())?; + if !s + .supported_room_versions() + .contains(&s.config.default_room_version) + { + error!("Room version in config isn't supported, falling back to Version 6"); + s.config.default_room_version = RoomVersionId::V6; + }; + Ok(s) } @@ -247,15 +255,7 @@ impl Globals { } pub fn default_room_version(&self) -> RoomVersionId { - if self - .supported_room_versions() - .contains(&self.config.default_room_version.clone()) - { - self.config.default_room_version.clone() - } else { - error!("Room version in config isn't supported, falling back to Version 6"); - RoomVersionId::V6 - } + self.config.default_room_version.clone() } pub fn trusted_servers(&self) -> &[Box] { @@ -296,13 +296,9 @@ impl Globals { pub fn supported_room_versions(&self) -> Vec { let mut room_versions: Vec = vec![]; - self.stable_room_versions - .iter() - .for_each(|room_version| room_versions.push(room_version.clone())); + room_versions.extend(self.stable_room_versions.clone()); if self.allow_unstable_room_versions() { - self.unstable_room_versions - .iter() - .for_each(|room_version| room_versions.push(room_version.clone())); + room_versions.extend(self.unstable_room_versions.clone()); }; room_versions } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3133365..25337b3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3440,10 +3440,8 @@ impl Rooms { /// Returns the room's version. #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> RoomVersionId { - let create_event = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .unwrap(); + pub fn get_room_version(&self, room_id: &RoomId) -> Result { + let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -3453,11 +3451,10 @@ impl Rooms { Error::bad_database("Invalid create event in db.") }) }) - .transpose() - .unwrap(); - - create_event_content + .transpose()?; + let room_version = create_event_content .map(|create_event| create_event.room_version) - .expect("Invalid room version") + .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; + Ok(room_version) } } diff --git a/src/pdu.rs b/src/pdu.rs index 6e2bf5a..e26739f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -349,7 +349,7 @@ pub(crate) fn gen_event_id_canonical_json( let event_id = format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &room_version_id) + ruma::signatures::reference_hash(&value, &room_version_id?) .expect("ruma can calculate reference hashes") ) .try_into() From 686319e2e311970c821e555b383629e1b6cbebe1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 18 Jan 2022 17:24:34 +0100 Subject: [PATCH 1085/1727] fix: error handling --- src/pdu.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pdu.rs b/src/pdu.rs index e26739f..20ec01e 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -342,7 +342,7 @@ pub(crate) fn gen_event_id_canonical_json( let room_id = value .get("room_id") .and_then(|id| RoomId::parse(id.as_str()?).ok()) - .expect("Invalid room id in event"); + .ok_or_else(|| Error::bad_database("PDU in db has invalid room_id."))?; let room_version_id = db.rooms.get_room_version(&room_id); From 0ae39807a478370a769217d01fa33514299a2b35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 18 Feb 2022 13:39:15 +0100 Subject: [PATCH 1086/1727] Add V9 to list of allowed versions --- src/database/globals.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index 9909ebd..797e5b1 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -149,7 +149,7 @@ impl Globals { .build()?; // Supported and stable room versions - let stable_room_versions = vec![RoomVersionId::V6]; + let stable_room_versions = vec![RoomVersionId::V6, RoomVersionId::V7, RoomVersionId::V8, RoomVersionId::V9]; // Experimental, partially supported room versions let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; From e4600ccfef51a03029161790d0271c174958942d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 18 Feb 2022 13:41:37 +0100 Subject: [PATCH 1087/1727] bump ruma --- Cargo.lock | 500 +++++++++++++++++++++------------------- src/database/globals.rs | 7 +- 2 files changed, 269 insertions(+), 238 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd51825..3a251b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20,7 +20,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.6", "once_cell", "version_check", ] @@ -84,9 +84,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-compression" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443ccbb270374a2b1055fc72da40e1f237809cd6bb0e97e66d264cd138473a6" +checksum = "f2bf394cfbbe876f0ac67b13b6ca819f9c9f2fb9ec67223cceb1555fbab1c31a" dependencies = [ "brotli", "flate2", @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" +checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" dependencies = [ "proc-macro2", "quote", @@ -118,15 +118,15 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5611d4977882c5af1c0f7a34d51b5d87f784f86912bb543986b014ea4995ef93" +checksum = "47594e438a243791dba58124b6669561f5baa14cb12046641d8008bf035e5a25" dependencies = [ "async-trait", "axum-core", @@ -137,7 +137,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.1", + "itoa", "matchit", "memchr", "mime", @@ -155,9 +155,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95cd109b3e93c9541dcce5b0219dcf89169dcc58c1bebed65082808324258afb" +checksum = "9a671c9ae99531afdd5d3ee8340b8da547779430689947144c140fc74a740244" dependencies = [ "async-trait", "bytes", @@ -254,10 +254,19 @@ dependencies = [ ] [[package]] -name = "brotli" -version = "3.3.3" +name = "block-buffer" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f838e47a451d5a8fa552371f80024dd6ace9b7acdf25c4c3d0f9bc6816fb1c39" +checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +dependencies = [ + "generic-array", +] + +[[package]] +name = "brotli" +version = "3.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -282,9 +291,9 @@ checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "bytemuck" -version = "1.7.3" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439989e6b8c38d1b6570a384ef1e49c8848128f5a97f3914baef02920842712f" +checksum = "cdead85bdec19c194affaeeb670c0e41fe23de31459efd1c174d049269cf02cc" [[package]] name = "byteorder" @@ -300,9 +309,9 @@ checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" dependencies = [ "jobserver", ] @@ -343,9 +352,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" +checksum = "4cc00842eed744b858222c4c9faf7243aafc6d33f92f96935263ef4d8a41ce21" dependencies = [ "glob", "libc", @@ -354,9 +363,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.0.10" +version = "3.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a30c3bf9ff12dfe5dae53f0a96e0febcd18420d1c0e7fad77796d9d5c4b5375" +checksum = "71c47df61d9e16dc010b55dba1952a57d8c215dbb533fd13cdd13369aac73b1c" dependencies = [ "bitflags", "clap_derive", @@ -368,11 +377,11 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.0.6" +version = "3.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "517358c28fcef6607bf6f76108e02afad7e82297d132a6b846dcc1fc3efcd153" +checksum = "a3aab4734e083b809aaf5794e14e756d1c798d2c69c7f7de7a09a2f5214993c1" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro-error", "proc-macro2", "quote", @@ -409,7 +418,7 @@ dependencies = [ "opentelemetry-jaeger", "parking_lot", "persy", - "rand 0.8.4", + "rand 0.8.5", "regex", "reqwest", "ring", @@ -420,7 +429,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "sha-1", + "sha-1 0.9.8", "sled", "thiserror", "thread_local", @@ -466,9 +475,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" dependencies = [ "libc", ] @@ -490,9 +499,9 @@ checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" [[package]] name = "crc32fast" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if 1.0.0", ] @@ -507,18 +516,18 @@ dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", - "crossbeam-queue 0.3.3", - "crossbeam-utils 0.8.6", + "crossbeam-queue 0.3.5", + "crossbeam-utils 0.8.8", ] [[package]] name = "crossbeam-channel" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" +checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", ] [[package]] @@ -529,17 +538,18 @@ checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", ] [[package]] name = "crossbeam-epoch" -version = "0.9.6" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" +checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" dependencies = [ + "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", "lazy_static", "memoffset", "scopeguard", @@ -556,12 +566,12 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b979d76c9fcb84dffc80a73f7290da0f83e4c95773494674cb44b76d13a7a110" +checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", ] [[package]] @@ -576,14 +586,24 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" +checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" dependencies = [ "cfg-if 1.0.0", "lazy_static", ] +[[package]] +name = "crypto-common" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +dependencies = [ + "generic-array", + "typenum", +] + [[package]] name = "crypto-mac" version = "0.11.1" @@ -596,12 +616,12 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" dependencies = [ "byteorder", - "digest", + "digest 0.9.0", "rand_core 0.5.1", "subtle", "zeroize", @@ -641,6 +661,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "digest" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +dependencies = [ + "block-buffer 0.10.2", + "crypto-common", +] + [[package]] name = "directories" version = "3.0.2" @@ -652,9 +682,9 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", @@ -663,9 +693,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" +checksum = "3d5c4b5e5959dc2c2b89918d8e2cc40fcdd623cef026ed09d2f0ee05199dc8e4" dependencies = [ "signature", ] @@ -692,20 +722,20 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.30" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "enum-as-inner" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ - "heck 0.3.3", + "heck", "proc-macro2", "quote", "syn", @@ -783,9 +813,9 @@ checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" [[package]] name = "futures" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" +checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" dependencies = [ "futures-channel", "futures-core", @@ -798,9 +828,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" dependencies = [ "futures-core", "futures-sink", @@ -808,15 +838,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" +checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" [[package]] name = "futures-executor" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" +checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" dependencies = [ "futures-core", "futures-task", @@ -825,15 +855,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" +checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" [[package]] name = "futures-macro" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" +checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" dependencies = [ "proc-macro2", "quote", @@ -842,21 +872,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" +checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" [[package]] name = "futures-task" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" +checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" [[package]] name = "futures-util" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" +checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ "futures-channel", "futures-core", @@ -902,9 +932,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" dependencies = [ "cfg-if 1.0.0", "libc", @@ -929,9 +959,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9de88456263e249e241fcd211d3954e2c9b0ef7ccfc235a444eb367cae3689" +checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" dependencies = [ "bytes", "fnv", @@ -966,9 +996,9 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c4eb0471fcb85846d8b0690695ef354f9afb11cb03cac2e1d7c9253351afb0" +checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" dependencies = [ "base64 0.13.0", "bitflags", @@ -977,7 +1007,7 @@ dependencies = [ "http", "httpdate", "mime", - "sha-1", + "sha-1 0.10.0", ] [[package]] @@ -989,15 +1019,6 @@ dependencies = [ "http", ] -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "heck" version = "0.4.0" @@ -1056,7 +1077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac", - "digest", + "digest 0.9.0", ] [[package]] @@ -1078,7 +1099,7 @@ checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa", ] [[package]] @@ -1100,9 +1121,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" [[package]] name = "httpdate" @@ -1112,9 +1133,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.16" +version = "0.14.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" +checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" dependencies = [ "bytes", "futures-channel", @@ -1125,9 +1146,9 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 0.4.8", + "itoa", "pin-project-lite", - "socket2 0.4.3", + "socket2 0.4.4", "tokio", "tower-service", "tracing", @@ -1177,9 +1198,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" +checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" dependencies = [ "autocfg", "hashbrown", @@ -1188,9 +1209,9 @@ dependencies = [ [[package]] name = "indoc" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a75aeaaef0ce18b58056d306c27b07436fbb34b8816c53094b76dd81803136" +checksum = "e7906a9fababaeacb774f72410e497a1d18de916322e33797bb2cd29baa23c9e" dependencies = [ "unindent", ] @@ -1230,9 +1251,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" +checksum = "35e70ee094dc02fd9c13fdad4940090f22dbd6ac7c9e7094a46cf0232a50bc7c" [[package]] name = "itertools" @@ -1243,12 +1264,6 @@ dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.1" @@ -1281,9 +1296,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaba9bcd19568a4b4b3736b23e368e5b75e3ea126fd4cb3e4ad2ea5af274fd" +checksum = "d937f95470b270ce8b8950207715d71aa8e153c0d44c6684d59397ed4949160a" dependencies = [ "serde", ] @@ -1316,9 +1331,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.113" +version = "0.2.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eef78b64d87775463c549fbd80e19249ef436ea3bf1de2a1eb7e717ec7fab1e9" +checksum = "ec647867e2bf0772e28c8bcde4f0d19a9216916e890543b5a03ed8ef27b8f259" [[package]] name = "libloading" @@ -1361,9 +1376,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lmdb-rkv-sys" -version = "0.11.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5" +checksum = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe" dependencies = [ "cc", "libc", @@ -1372,18 +1387,19 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" +checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" dependencies = [ + "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.14" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" dependencies = [ "cfg-if 1.0.0", ] @@ -1478,14 +1494,15 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.14" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" dependencies = [ "libc", "log", "miow", "ntapi", + "wasi 0.11.0+wasi-snapshot-preview1", "winapi", ] @@ -1500,20 +1517,19 @@ dependencies = [ [[package]] name = "nom" -version = "7.1.0" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" dependencies = [ "memchr", "minimal-lexical", - "version_check", ] [[package]] name = "ntapi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" dependencies = [ "winapi", ] @@ -1582,9 +1598,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" [[package]] name = "opaque-debug" @@ -1611,7 +1627,7 @@ dependencies = [ "lazy_static", "percent-encoding", "pin-project", - "rand 0.8.4", + "rand 0.8.5", "thiserror", "tokio", "tokio-stream", @@ -1696,9 +1712,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" +checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" [[package]] name = "pear" @@ -1748,15 +1764,15 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "persy" -version = "1.2.1" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71907e1dfa6844b657f5ca59e9a076e7d6281efb4885526ba9e235a18e7e3b3" +checksum = "5af61053f1daed3ff0265fad7f924e43ce07642a336c79304f8e5aec205460fb" dependencies = [ "crc", "data-encoding", "fs2", "linked-hash-map", - "rand 0.8.4", + "rand 0.8.5", "thiserror", "unsigned-varint", "zigzag", @@ -1807,9 +1823,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" +checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "png" @@ -1831,9 +1847,9 @@ checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-crate" -version = "1.1.0" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ "thiserror", "toml", @@ -1865,9 +1881,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" dependencies = [ "unicode-xid", ] @@ -1893,9 +1909,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" +checksum = "632d02bff7f874a36f33ea8bb416cd484b90cc66c1194b1a1110d067a7013f58" dependencies = [ "proc-macro2", ] @@ -1910,19 +1926,18 @@ dependencies = [ "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc 0.2.0", + "rand_hc", ] [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.3", - "rand_hc 0.3.1", ] [[package]] @@ -1960,7 +1975,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.6", ] [[package]] @@ -1972,39 +1987,31 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", -] - [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.6", "redox_syscall", + "thiserror", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" dependencies = [ "aho-corasick", "memchr", @@ -2153,10 +2160,10 @@ dependencies = [ "http", "indexmap", "indoc", - "itoa 1.0.1", + "itoa", "js_int", "percent-encoding", - "rand 0.8.4", + "rand 0.8.5", "ruma-identifiers-validation", "ruma-macros", "serde", @@ -2275,7 +2282,7 @@ dependencies = [ "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", ] [[package]] @@ -2286,9 +2293,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustls" -version = "0.20.2" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" dependencies = [ "log", "ring", @@ -2374,18 +2381,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.134" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b3c34c1690edf8174f5b289a336ab03f568a4460d8c6df75f2f3a692b3bc6a" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.134" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784ed1fbfa13fe191077537b0d70ec8ad1e903cfe04831da608aa36457cb653d" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" dependencies = [ "proc-macro2", "quote", @@ -2394,11 +2401,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c059c05b48c5c0067d4b4b2b4f0732dd65feb52daf7e0ea09cd87e7dadc1af79" +checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" dependencies = [ - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -2410,7 +2417,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -2433,23 +2440,34 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if 1.0.0", "cpufeatures", - "digest", + "digest 0.9.0", "opaque-debug", ] +[[package]] +name = "sha-1" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.3", +] + [[package]] name = "sha2" version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if 1.0.0", "cpufeatures", - "digest", + "digest 0.9.0", "opaque-debug", ] @@ -2496,9 +2514,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] name = "sled" @@ -2508,7 +2526,7 @@ checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" dependencies = [ "crc32fast", "crossbeam-epoch", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", "fs2", "fxhash", "libc", @@ -2536,9 +2554,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f82496b90c36d70af5fcd482edaa2e0bd16fade569de1330405fecbbdac736b" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" dependencies = [ "libc", "winapi", @@ -2567,9 +2585,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.86" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" +checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" dependencies = [ "proc-macro2", "quote", @@ -2605,9 +2623,9 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.14.2" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80" +checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" @@ -2631,9 +2649,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ "once_cell", ] @@ -2673,9 +2691,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.4.2+5.2.1-patched.2" +version = "0.4.3+5.2.1-patched.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5844e429d797c62945a566f8da4e24c7fe3fbd5d6617fd8bf7a0b7dc1ee0f22e" +checksum = "a1792ccb507d955b46af42c123ea8863668fae24d03721e40cad6a41773dbb49" dependencies = [ "cc", "fs_extra", @@ -2684,9 +2702,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c14a5a604eb8715bc5785018a37d00739b180bcf609916ddf4393d33d49ccdf" +checksum = "a5b7bcecfafe4998587d636f9ae9d55eb9d0499877b88757767c346875067098" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -2719,9 +2737,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.15.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" +checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" dependencies = [ "bytes", "libc", @@ -2731,6 +2749,7 @@ dependencies = [ "once_cell", "pin-project-lite", "signal-hook-registry", + "socket2 0.4.4", "tokio-macros", "winapi", ] @@ -2748,9 +2767,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.2" +version = "0.23.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +checksum = "4151fda0cf2798550ad0b34bcfc9b9dcc2a9d2471c895c68f3a8818e54f2389e" dependencies = [ "rustls", "tokio", @@ -2782,16 +2801,16 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.9" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" +checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" dependencies = [ "bytes", "futures-core", "futures-sink", - "log", "pin-project-lite", "tokio", + "tracing", ] [[package]] @@ -2805,9 +2824,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5651b5f6860a99bd1adb59dbfe1db8beb433e73709d9032b413a77e2fb7c066a" +checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" dependencies = [ "futures-core", "futures-util", @@ -2822,9 +2841,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03650267ad175b51c47d02ed9547fc7d4ba2c7e5cb76df0bed67edd1825ae297" +checksum = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8" dependencies = [ "async-compression", "bitflags", @@ -2857,9 +2876,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.29" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +checksum = "4a1bdf54a7c28a2bbf701e1d2233f6c77f473486b94bee4f9678da5a148dca7f" dependencies = [ "cfg-if 1.0.0", "log", @@ -2870,9 +2889,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.18" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" +checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" dependencies = [ "proc-macro2", "quote", @@ -2881,11 +2900,12 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.21" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" +checksum = "90442985ee2f57c9e1b548ee72ae842f4a9a20e3f417cc38dbc5dc684d9bb4ee" dependencies = [ "lazy_static", + "valuable", ] [[package]] @@ -2912,9 +2932,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ "serde", "tracing-core", @@ -2944,9 +2964,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.3" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" +checksum = "ca94d4e9feb6a181c690c4040d7a24ef34018d8313ac5044a61d21222ae24e31" dependencies = [ "async-trait", "cfg-if 1.0.0", @@ -2959,7 +2979,7 @@ dependencies = [ "ipnet", "lazy_static", "log", - "rand 0.8.4", + "rand 0.8.5", "smallvec", "thiserror", "tinyvec", @@ -2969,9 +2989,9 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.20.3" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" +checksum = "ecae383baad9995efaa34ce8e57d12c3f305e545887472a492b838f4b5cfb77a" dependencies = [ "cfg-if 1.0.0", "futures-util", @@ -3023,12 +3043,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" - [[package]] name = "unicode-xid" version = "0.2.2" @@ -3037,9 +3051,9 @@ checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "unindent" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f14ee04d9415b52b3aeab06258a3f07093182b88ba0f9b8d203f211a7a7d41c7" +checksum = "514672a55d7380da379785a4d70ca8386c8883ff7eaae877be4d2081cebe73d8" [[package]] name = "unsigned-varint" @@ -3071,9 +3085,15 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.6", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vcpkg" version = "0.2.15" @@ -3108,6 +3128,12 @@ version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" version = "0.2.79" @@ -3263,24 +3289,24 @@ dependencies = [ [[package]] name = "yansi" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zeroize" -version = "1.5.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc222aec311c323c717f56060324f32b82da1ce1dd81d9a09aa6a9030bfe08db" +checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81e8f13fef10b63c06356d65d416b070798ddabcadc10d3ece0c5be9b3c7eddb" +checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ "proc-macro2", "quote", diff --git a/src/database/globals.rs b/src/database/globals.rs index 797e5b1..d363e93 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -149,7 +149,12 @@ impl Globals { .build()?; // Supported and stable room versions - let stable_room_versions = vec![RoomVersionId::V6, RoomVersionId::V7, RoomVersionId::V8, RoomVersionId::V9]; + let stable_room_versions = vec![ + RoomVersionId::V6, + RoomVersionId::V7, + RoomVersionId::V8, + RoomVersionId::V9, + ]; // Experimental, partially supported room versions let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; From 3573d40027926f77617fcce63c762c793e785551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 19 Feb 2022 12:53:11 +0100 Subject: [PATCH 1088/1727] fix warnings --- src/client_server/session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c31636d..c2a79ca 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -14,7 +14,7 @@ use tracing::info; #[derive(Debug, Deserialize)] struct Claims { sub: String, - exp: usize, + //exp: usize, } /// # `GET /_matrix/client/r0/login` From b6b27b66c83c322a9887b7b207788f7e4b2c348f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 23 Mar 2022 11:05:41 +0100 Subject: [PATCH 1089/1727] fix: don't allow unjoined users to send typing notifications --- src/client_server/typing.rs | 11 +++++++++-- src/server_server.rs | 26 +++++++++++++++----------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 60fc1cc..413c540 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,5 +1,5 @@ -use crate::{database::DatabaseGuard, utils, Result, Ruma}; -use ruma::api::client::typing::create_typing_event; +use crate::{database::DatabaseGuard, Error, utils, Result, Ruma}; +use ruma::api::client::{typing::create_typing_event, error::ErrorKind}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// @@ -12,6 +12,13 @@ pub async fn create_typing_event_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if !db.rooms.is_joined(sender_user, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You are not in this room.", + )); + } + if let Typing::Yes(duration) = body.state { db.rooms.edus.typing_add( sender_user, diff --git a/src/server_server.rs b/src/server_server.rs index e95c4c0..67ad369 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -770,17 +770,21 @@ pub async fn send_transaction_message_route( } } Edu::Typing(typing) => { - if typing.typing { - db.rooms.edus.typing_add( - &typing.user_id, - &typing.room_id, - 3000 + utils::millis_since_unix_epoch(), - &db.globals, - )?; - } else { - db.rooms - .edus - .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?; + if db.rooms.is_joined(&typing.user_id, &typing.room_id)? { + if typing.typing { + db.rooms.edus.typing_add( + &typing.user_id, + &typing.room_id, + 3000 + utils::millis_since_unix_epoch(), + &db.globals, + )?; + } else { + db.rooms.edus.typing_remove( + &typing.user_id, + &typing.room_id, + &db.globals, + )?; + } } } Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { From 00b362b43ba61d0d5a2b43a944e47556730e42c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 7 Apr 2022 17:09:07 +0200 Subject: [PATCH 1090/1727] fix: cors warning --- src/client_server/typing.rs | 4 ++-- src/main.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 413c540..cac5a5f 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,5 +1,5 @@ -use crate::{database::DatabaseGuard, Error, utils, Result, Ruma}; -use ruma::api::client::{typing::create_typing_event, error::ErrorKind}; +use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; +use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// diff --git a/src/main.rs b/src/main.rs index d20ee75..67ec82e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -141,7 +141,7 @@ async fn run_server(config: &Config, db: Arc>) -> io::Result<() .compression() .layer( CorsLayer::new() - .allow_origin(cors::any()) + .allow_origin(cors::Any) .allow_methods([ Method::GET, Method::POST, From 07a3a6fa9a9f8c00788fb262dd19139bd2c22192 Mon Sep 17 00:00:00 2001 From: Zeyphros Date: Fri, 8 Apr 2022 22:05:13 +0200 Subject: [PATCH 1091/1727] Return an error when signing an event fails Prevents the server from crashing/become unresponsive when overly long messages are sent --- src/database/rooms.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 25337b3..955489b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1938,13 +1938,25 @@ impl Rooms { CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), ); - ruma::signatures::hash_and_sign_event( + match ruma::signatures::hash_and_sign_event( db.globals.server_name().as_str(), db.globals.keypair(), &mut pdu_json, &room_version_id, - ) - .expect("event is valid, we just created it"); + ) { + Ok(_) => {} + Err(e) => { + return match e { + ruma::signatures::Error::PduSize => { + Err(Error::BadRequest(ErrorKind::TooLarge, "Message is to long")) + } + _ => Err(Error::BadRequest( + ErrorKind::Unknown, + "Signing event failed", + )), + } + } + } // Generate event id pdu.event_id = EventId::parse_arc(format!( From b10dbc747bed15b7eeffb22c3478869de697d060 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Sat, 9 Apr 2022 15:13:01 +0200 Subject: [PATCH 1092/1727] Remove the address override in deb generated config This override was accidentally introduced by commit de6c331. The Debian postinst script will ask for and generate a config with the address set. This should not be overriden by what is set in the default config and is thus a deviation from the standard docs. --- debian/postinst | 3 --- 1 file changed, 3 deletions(-) diff --git a/debian/postinst b/debian/postinst index 378f99e..aab2480 100644 --- a/debian/postinst +++ b/debian/postinst @@ -78,9 +78,6 @@ trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #log = "info,state_res=warn,rocket=off,_=off,sled=off" - -address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy -#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. EOF fi ;; From 729d66aa11180b56b8802191852529fda9354c36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 10 Apr 2022 14:56:43 +0200 Subject: [PATCH 1093/1727] feat: register missing add_backup_keys route --- src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main.rs b/src/main.rs index 67ec82e..9a0928a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -226,6 +226,7 @@ fn routes() -> Router { .ruma_route(client_server::delete_backup_version_route) .ruma_route(client_server::get_latest_backup_info_route) .ruma_route(client_server::get_backup_info_route) + .ruma_route(client_server::add_backup_keys_route) .ruma_route(client_server::add_backup_keys_for_room_route) .ruma_route(client_server::add_backup_keys_for_session_route) .ruma_route(client_server::delete_backup_keys_for_room_route) From 090d0fe68420777c71b6c88124c2e04311a2e0be Mon Sep 17 00:00:00 2001 From: Zeyphros Date: Wed, 13 Apr 2022 00:08:55 +0200 Subject: [PATCH 1094/1727] Fix typo --- src/database/rooms.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 955489b..c885c96 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1947,9 +1947,10 @@ impl Rooms { Ok(_) => {} Err(e) => { return match e { - ruma::signatures::Error::PduSize => { - Err(Error::BadRequest(ErrorKind::TooLarge, "Message is to long")) - } + ruma::signatures::Error::PduSize => Err(Error::BadRequest( + ErrorKind::TooLarge, + "Message is too long", + )), _ => Err(Error::BadRequest( ErrorKind::Unknown, "Signing event failed", From efe9d5000e1f85e21477e3a6345d2c756ced6c3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Thu, 14 Apr 2022 16:42:08 +0200 Subject: [PATCH 1095/1727] enable FedDest doc-test Doc rendering is exactly as before, but it now actually tests the code --- src/server_server.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/server_server.rs b/src/server_server.rs index 67ad369..a227f57 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -79,12 +79,16 @@ use tracing::{debug, error, info, trace, warn}; /// was no port specified to construct a SocketAddr with. /// /// # Examples: -/// ```rust,ignore +/// ```rust +/// # use conduit::server_server::FedDest; +/// # fn main() -> Result<(), std::net::AddrParseError> { /// FedDest::Literal("198.51.100.3:8448".parse()?); /// FedDest::Literal("[2001:db8::4:5]:443".parse()?); /// FedDest::Named("matrix.example.org".to_owned(), "".to_owned()); /// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); /// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); +/// # Ok(()) +/// # } /// ``` #[derive(Clone, Debug, PartialEq)] pub enum FedDest { From 23f29d1bda537f8c67d9e6c90a1650c0b8f1d13a Mon Sep 17 00:00:00 2001 From: rmsthebest Date: Sun, 17 Apr 2022 23:08:17 +0000 Subject: [PATCH 1096/1727] Added Caddy to the web proxy examples --- DEPLOY.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index a711cc9..99ba641 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -156,7 +156,7 @@ sudo chmod 700 /var/lib/matrix-conduit/ ## Setting up the Reverse Proxy -This depends on whether you use Apache, Nginx or another web server. +This depends on whether you use Apache, Caddy, Nginx or another web server. ### Apache @@ -182,6 +182,19 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ $ sudo systemctl reload apache2 ``` +### Caddy +Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name). +```caddy +your.server.name, your.server.name:8448 { + reverse_proxy /_matrix/* 127.0.0.1:6167 +} +``` +That's it! Just start or enable the service and you're set. +```bash +$ sudo systemctl enable caddy +``` + + ### Nginx If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf` @@ -216,6 +229,8 @@ $ sudo systemctl reload nginx ## SSL Certificate +If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step. + The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: ```bash From bb033fe02a64da716f82cf64fc84e0c0b7622322 Mon Sep 17 00:00:00 2001 From: = <11652273+rmsthebest@users.noreply.github.com> Date: Sun, 1 May 2022 17:49:02 +0200 Subject: [PATCH 1097/1727] added a command to the admin bot to create a new user, even with registration disabled --- src/client_server/mod.rs | 1 + src/database/admin.rs | 65 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index a7241b0..65b7a10 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -65,3 +65,4 @@ pub use voip::*; pub const DEVICE_ID_LENGTH: usize = 10; pub const TOKEN_LENGTH: usize = 256; pub const SESSION_ID_LENGTH: usize = 256; +pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15; diff --git a/src/database/admin.rs b/src/database/admin.rs index 432bc3a..dcf09eb 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -6,6 +6,7 @@ use std::{ }; use crate::{ + client_server::AUTO_GEN_PASSWORD_LENGTH, error::{Error, Result}, pdu::PduBuilder, server_server, utils, @@ -268,6 +269,13 @@ enum AdminCommand { /// Username of the user for whom the password should be reset username: String, }, + /// Create a new user + CreateUser { + /// Username of the new user + username: String, + /// Password of the new user, if unspecified one is generated + password: Option, + }, } fn process_admin_command( @@ -480,6 +488,63 @@ fn process_admin_command( )), } } + AdminCommand::CreateUser { username, password } => { + let password = password.unwrap_or(utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + // Validate user id + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + db.globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + if user_id.is_historical() { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} is not allowed due to historical" + ))); + } + if db.users.exists(&user_id)? { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} already exists" + ))); + } + // Create user + db.users.create(&user_id, Some(password.as_str()))?; + + // Default to pretty displayname + let displayname = format!("{} ⚡️", user_id.localpart()); + db.users + .set_displayname(&user_id, Some(displayname.clone()))?; + + // Initial account data + db.account_data.update( + None, + &user_id, + ruma::events::GlobalAccountDataEventType::PushRules + .to_string() + .into(), + &ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: ruma::push::Ruleset::server_default(&user_id), + }, + }, + &db.globals, + )?; + + // we dont add a device since we're not the user, just the creator + + db.flush()?; + + // Inhibit login does not work for guests + RoomMessageEventContent::text_plain(format!( + "Created user with user_id: {user_id} and password: {password}" + )) + } }; Ok(reply_message_content) From 8392809eb1ce86b715ab48444cee9104288bb204 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 10 May 2022 07:26:19 +0000 Subject: [PATCH 1098/1727] Adjust some files to the AUR patches --- conduit-example.toml | 7 +++++++ debian/matrix-conduit.service | 1 + 2 files changed, 8 insertions(+) diff --git a/conduit-example.toml b/conduit-example.toml index 23c1844..362f7e7 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -1,3 +1,10 @@ +# ============================================================================= +# This is the official example config for Conduit. +# If you use it for your server, you will need to adjust it to your own needs. +# At the very least, change the server_name field! +# ============================================================================= + + [global] # The server_name is the pretty name of this server. It is used as a suffix for user # and room ids. Examples: matrix.org, conduit.rs diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service index 7c12d1a..299f268 100644 --- a/debian/matrix-conduit.service +++ b/debian/matrix-conduit.service @@ -3,6 +3,7 @@ Description=Conduit Matrix homeserver After=network.target [Service] +DynamicUser=yes User=_matrix-conduit Group=_matrix-conduit Type=simple From d9782c508a6984b808c80abdbdb3579de4cda181 Mon Sep 17 00:00:00 2001 From: Dietrich Date: Mon, 13 Jun 2022 20:03:30 +0200 Subject: [PATCH 1099/1727] rust-analyzer-extension moved to rust-lang The recommended extension id could not be found as rust-analyzer now has the id `rust-lang.rust-analyzer` --- .vscode/extensions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 7963e9d..037f20d 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,6 +1,6 @@ { "recommendations": [ - "matklad.rust-analyzer", + "rust-lang.rust-analyzer", "bungcip.better-toml", "ms-azuretools.vscode-docker", "eamodio.gitlens", From ae8e143fe90b95179ed54668e171d07530b1b162 Mon Sep 17 00:00:00 2001 From: Dietrich Date: Mon, 13 Jun 2022 20:08:18 +0200 Subject: [PATCH 1100/1727] Add a section to Ports and forwarding --- DEPLOY.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/DEPLOY.md b/DEPLOY.md index a711cc9..b77dd29 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -58,6 +58,12 @@ In Debian you can use this command to create a Conduit user: sudo adduser --system conduit --no-create-home ``` +## Forwarding Ports in the firewall or the router + +Conduit uses the ports 443 and 8448 both of which need to be open in the firewall. + +If conduit runs behind a router or in a container and has a different public IP address than the host system these public Ports need to be forwarded directly or indirectly to the port mentioned in the config. + ## Setting up a systemd service Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your From 58d784aa29b1602a872b1a287eab8c3e59375dce Mon Sep 17 00:00:00 2001 From: Dietrich Date: Mon, 13 Jun 2022 20:23:08 +0200 Subject: [PATCH 1101/1727] Adding a hint to closed ports in the testing section --- DEPLOY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DEPLOY.md b/DEPLOY.md index b77dd29..21fcadf 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -254,6 +254,7 @@ $ curl https://your.server.name:8448/_matrix/client/versions ``` - To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/) + If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly. # What's next? From bd3f9e0dbeca8c739741d5d8060401b75dfc1560 Mon Sep 17 00:00:00 2001 From: Dietrich Date: Mon, 13 Jun 2022 20:45:12 +0200 Subject: [PATCH 1102/1727] Fix spelling. --- DEPLOY.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 21fcadf..7dc25db 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -58,11 +58,11 @@ In Debian you can use this command to create a Conduit user: sudo adduser --system conduit --no-create-home ``` -## Forwarding Ports in the firewall or the router +## Forwarding ports in the firewall or the router Conduit uses the ports 443 and 8448 both of which need to be open in the firewall. -If conduit runs behind a router or in a container and has a different public IP address than the host system these public Ports need to be forwarded directly or indirectly to the port mentioned in the config. +If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. ## Setting up a systemd service @@ -253,7 +253,7 @@ $ curl https://your.server.name/_matrix/client/versions $ curl https://your.server.name:8448/_matrix/client/versions ``` -- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/) +- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/). If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly. # What's next? From 8a63a2cc6848c8c8b27c7f914d84c4cf37eb918b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Dobo=C5=A1?= Date: Wed, 15 Jun 2022 13:07:07 +0000 Subject: [PATCH 1103/1727] Fix FluffyChat Compatibility --- src/client_server/unversioned.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 294c753..fd0277c 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -18,7 +18,7 @@ pub async fn get_supported_versions_route( _body: Ruma, ) -> Result { let resp = get_supported_versions::Response { - versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], + versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned(), "v1.1".to_owned(), "v1.2".to_owned()], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; From b862283ed9549b1664c4f606cac9f869c5e884e5 Mon Sep 17 00:00:00 2001 From: Miepee Date: Thu, 16 Jun 2022 13:23:45 +0000 Subject: [PATCH 1104/1727] Mention different databse backends in DEPLOY.md --- DEPLOY.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 7dc25db..930a558 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -95,7 +95,8 @@ $ sudo systemctl daemon-reload ## Creating the Conduit configuration file Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment -to read it. You need to change at least the server name.** +to read it. You need to change at least the server name.** +You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended. ```toml [global] From c3924b566b4b67dd2755a5e5877ab47f7d6041dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sat, 18 Jun 2022 11:04:16 +0000 Subject: [PATCH 1105/1727] feat: if txn id exists in the db, skip the event --- src/client_server/to_device.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 5f4ac58..51441dd 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -20,17 +20,14 @@ pub async fn send_event_to_device_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); - // TODO: uncomment when https://github.com/vector-im/element-android/issues/3589 is solved // Check if this is a new transaction id - /* if db .transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? .is_some() { - return Ok(send_event_to_device::v3::Response.into()); + return Ok(send_event_to_device::v3::Response {}); } - */ for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { From e03a2b86364a913e0d016b5962a3312f412e7597 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sat, 18 Jun 2022 11:05:42 +0000 Subject: [PATCH 1106/1727] chore(docker): Bump base image to alpine 3.16.0 --- docker/ci-binaries-packaging.Dockerfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 6964a02..4c1199e 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -7,8 +7,7 @@ # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine@sha256:b66bccf2e0cca8e5fb79f7d3c573dd76c4787d1d883f5afe6c9d136a260bba07 AS runner -# = alpine:3.15.3 +FROM docker.io/alpine:3.16.0@sha256:4ff3ca91275773af45cb4b0834e12b7eb47d1c18f770a0b151381cd227f4c253 AS runner # Standard port on which Conduit launches. From 84ec057f6e673e822bcc6e6693b59831602f41de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radek=20St=C4=99pie=C5=84?= Date: Sat, 18 Jun 2022 11:13:37 +0000 Subject: [PATCH 1107/1727] Allow registration without username --- src/client_server/account.rs | 77 +++++++++++++++++------------------- 1 file changed, 36 insertions(+), 41 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 820e4f1..984b1ba 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -16,8 +16,10 @@ use ruma::{ uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ - room::member::{MembershipState, RoomMemberEventContent}, - room::message::RoomMessageEventContent, + room::{ + member::{MembershipState, RoomMemberEventContent}, + message::RoomMessageEventContent, + }, GlobalAccountDataEventType, RoomEventType, }, push, UserId, @@ -27,7 +29,7 @@ use tracing::{info, warn}; use register::RegistrationKind; -const GUEST_NAME_LENGTH: usize = 10; +const RANDOM_USER_ID_LENGTH: usize = 10; /// # `GET /_matrix/client/r0/register/available` /// @@ -95,38 +97,38 @@ pub async fn register_route( let is_guest = body.kind == RegistrationKind::Guest; - let mut missing_username = false; - - // Validate user id - let user_id = UserId::parse_with_server_name( - if is_guest { - utils::random_string(GUEST_NAME_LENGTH) - } else { - body.username.clone().unwrap_or_else(|| { - // If the user didn't send a username field, that means the client is just trying - // the get an UIAA error to see available flows - missing_username = true; - // Just give the user a random name. He won't be able to register with it anyway. - utils::random_string(GUEST_NAME_LENGTH) - }) + let user_id = match (&body.username, is_guest) { + (Some(username), false) => { + let proposed_user_id = + UserId::parse_with_server_name(username.to_lowercase(), db.globals.server_name()) + .ok() + .filter(|user_id| { + !user_id.is_historical() + && user_id.server_name() == db.globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; + if db.users.exists(&proposed_user_id)? { + return Err(Error::BadRequest( + ErrorKind::UserInUse, + "Desired user ID is already taken.", + )); + } + proposed_user_id } - .to_lowercase(), - db.globals.server_name(), - ) - .ok() - .filter(|user_id| !user_id.is_historical() && user_id.server_name() == db.globals.server_name()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; - - // Check if username is creative enough - if db.users.exists(&user_id)? { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } + _ => loop { + let proposed_user_id = UserId::parse_with_server_name( + utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), + db.globals.server_name(), + ) + .unwrap(); + if !db.users.exists(&proposed_user_id)? { + break proposed_user_id; + } + }, + }; // UIAA let mut uiaainfo = UiaaInfo { @@ -169,13 +171,6 @@ pub async fn register_route( } } - if missing_username { - return Err(Error::BadRequest( - ErrorKind::MissingParam, - "Missing username field.", - )); - } - let password = if is_guest { None } else { From 7239243163362ede719b05e60aa5a37ccba1766b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radek=20St=C4=99pie=C5=84?= Date: Sat, 18 Jun 2022 11:17:09 +0000 Subject: [PATCH 1108/1727] Hide users from user directory if they are only in private rooms and they don't share a room --- src/client_server/user_directory.rs | 46 +++++++++++++++++++++++++++-- tests/sytest/sytest-whitelist | 3 ++ 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index 7c0bcc1..349c139 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,15 +1,23 @@ use crate::{database::DatabaseGuard, Result, Ruma}; -use ruma::api::client::user_directory::search_users; +use ruma::{ + api::client::user_directory::search_users, + events::{ + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, + StateEventType, + }, +}; /// # `POST /_matrix/client/r0/user_directory/search` /// /// Searches all known users for a match. /// -/// - TODO: Hide users that are not in any public rooms? +/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public) +/// and don't share a room with the sender pub async fn search_users_route( db: DatabaseGuard, body: Ruma, ) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let limit = u64::from(body.limit) as usize; let mut users = db.users.iter().filter_map(|user_id| { @@ -41,7 +49,39 @@ pub async fn search_users_route( return None; } - Some(user) + let user_is_in_public_rooms = + db.rooms + .rooms_joined(&user_id) + .filter_map(|r| r.ok()) + .any(|room| { + db.rooms + .room_state_get(&room, &StateEventType::RoomJoinRules, "") + .map_or(false, |event| { + event.map_or(false, |event| { + serde_json::from_str(event.content.get()) + .map_or(false, |r: RoomJoinRulesEventContent| { + r.join_rule == JoinRule::Public + }) + }) + }) + }); + + if user_is_in_public_rooms { + return Some(user); + } + + let user_is_in_shared_rooms = db + .rooms + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) + .ok()? + .next() + .is_some(); + + if user_is_in_shared_rooms { + return Some(user); + } + + None }); let results = users.by_ref().take(limit).collect(); diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist index 5afc3fd..1c969db 100644 --- a/tests/sytest/sytest-whitelist +++ b/tests/sytest/sytest-whitelist @@ -445,6 +445,9 @@ Typing notifications don't leak Uninvited users cannot join the room Unprivileged users can set m.room.topic if it only needs level 0 User appears in user directory +User in private room doesn't appear in user directory +User joining then leaving public room appears and dissappears from directory +User in shared private room does appear in user directory until leave User can create and send/receive messages in a room with version 1 User can create and send/receive messages in a room with version 2 User can create and send/receive messages in a room with version 3 From 722e553c6edea297bd44e7a5e715a30496d34faa Mon Sep 17 00:00:00 2001 From: Jim Date: Sat, 18 Jun 2022 14:47:32 +0000 Subject: [PATCH 1109/1727] Remove rust version requirement from deploy.md --- DEPLOY.md | 1 - 1 file changed, 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index ddf0aac..f0990dc 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -43,7 +43,6 @@ $ sudo apt install libclang-dev build-essential $ cargo build --release ``` -Note that this currently requires Rust 1.50. If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md). From 9b898248c7cd5c060fd806db98068c6298f6aac5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 30 May 2022 12:58:43 +0200 Subject: [PATCH 1110/1727] feat: more admin commands, better logging --- Cargo.toml | 6 +- src/client_server/unversioned.rs | 7 +- src/database.rs | 2 + src/database/admin.rs | 56 +++++++++++++++ src/database/globals.rs | 4 ++ src/database/rooms.rs | 18 +++++ src/ruma_wrapper/axum.rs | 2 +- src/server_server.rs | 119 ++++++++++++++++++++++--------- 8 files changed, 177 insertions(+), 37 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 64b7a23..10be750 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } -persy = { version = "1.2" , optional = true, features=["background_ops"] } +persy = { version = "1.2" , optional = true, features = ["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" @@ -64,7 +64,7 @@ regex = "1.5.4" # jwt jsonwebtokens jsonwebtoken = "7.2.0" # Performance measurements -tracing = { version = "0.1.26", features = ["release_max_level_warn"] } +tracing = { version = "0.1.26", features = [] } tracing-subscriber = "0.2.20" tracing-flame = "0.1.0" opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } @@ -76,7 +76,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index fd0277c..8a5c3d2 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -18,7 +18,12 @@ pub async fn get_supported_versions_route( _body: Ruma, ) -> Result { let resp = get_supported_versions::Response { - versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned(), "v1.1".to_owned(), "v1.2".to_owned()], + versions: vec![ + "r0.5.0".to_owned(), + "r0.6.0".to_owned(), + "v1.1".to_owned(), + "v1.2".to_owned(), + ], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; diff --git a/src/database.rs b/src/database.rs index 4a03f18..a0937c2 100644 --- a/src/database.rs +++ b/src/database.rs @@ -213,6 +213,8 @@ impl Database { userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + disabledroomids: builder.open_tree("disabledroomids")?, + lazyloadedids: builder.open_tree("lazyloadedids")?, userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, diff --git a/src/database/admin.rs b/src/database/admin.rs index dcf09eb..c6ef9a6 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -231,9 +231,15 @@ enum AdminCommand { /// List all the currently registered appservices ListAppservices, + /// List all rooms the server knows about + ListRooms, + /// List users in the database ListLocalUsers, + /// List all rooms we are currently handling an incoming pdu from + IncomingFederation, + /// Get the auth_chain of a PDU GetAuthChain { /// An event ID (the $ character followed by the base64 reference hash) @@ -269,6 +275,7 @@ enum AdminCommand { /// Username of the user for whom the password should be reset username: String, }, + /// Create a new user CreateUser { /// Username of the new user @@ -276,6 +283,11 @@ enum AdminCommand { /// Password of the new user, if unspecified one is generated password: Option, }, + + /// Disables incoming federation handling for a room. + DisableRoom { room_id: Box }, + /// Enables incoming federation handling for a room again. + EnableRoom { room_id: Box }, } fn process_admin_command( @@ -336,6 +348,26 @@ fn process_admin_command( RoomMessageEventContent::text_plain("Failed to get appservices.") } } + AdminCommand::ListRooms => { + let room_ids = db.rooms.iter_ids(); + let output = format!( + "Rooms:\n{}", + room_ids + .filter_map(|r| r.ok()) + .map(|id| id.to_string() + + "\tMembers: " + + &db + .rooms + .room_joined_count(&id) + .ok() + .flatten() + .unwrap_or(0) + .to_string()) + .collect::>() + .join("\n") + ); + RoomMessageEventContent::text_plain(output) + } AdminCommand::ListLocalUsers => match db.users.list_local_users() { Ok(users) => { let mut msg: String = format!("Found {} local user account(s):\n", users.len()); @@ -344,6 +376,22 @@ fn process_admin_command( } Err(e) => RoomMessageEventContent::text_plain(e.to_string()), }, + AdminCommand::IncomingFederation => { + let map = db.globals.roomid_federationhandletime.read().unwrap(); + let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); + + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + msg += &format!( + "{} {}: {}m{}s\n", + r, + e, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + RoomMessageEventContent::text_plain(&msg) + } AdminCommand::GetAuthChain { event_id } => { let event_id = Arc::::from(event_id); if let Some(event) = db.rooms.get_pdu_json(&event_id)? { @@ -545,6 +593,14 @@ fn process_admin_command( "Created user with user_id: {user_id} and password: {password}" )) } + AdminCommand::DisableRoom { room_id } => { + db.rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; + RoomMessageEventContent::text_plain("Room disabled.") + } + AdminCommand::EnableRoom { room_id } => { + db.rooms.disabledroomids.remove(room_id.as_bytes())?; + RoomMessageEventContent::text_plain("Room enabled.") + } }; Ok(reply_message_content) diff --git a/src/database/globals.rs b/src/database/globals.rs index d363e93..7e09128 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -52,6 +52,8 @@ pub struct Globals { pub roomid_mutex_insert: RwLock, Arc>>>, pub roomid_mutex_state: RwLock, Arc>>>, pub roomid_mutex_federation: RwLock, Arc>>>, // this lock will be held longer + pub roomid_federationhandletime: RwLock, (Box, Instant)>>, + pub stateres_mutex: Arc>, pub rotate: RotationHandler, } @@ -183,6 +185,8 @@ impl Globals { roomid_mutex_state: RwLock::new(HashMap::new()), roomid_mutex_insert: RwLock::new(HashMap::new()), roomid_mutex_federation: RwLock::new(HashMap::new()), + roomid_federationhandletime: RwLock::new(HashMap::new()), + stateres_mutex: Arc::new(Mutex::new(())), sync_receivers: RwLock::new(HashMap::new()), rotate: RotationHandler::new(), }; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c885c96..2c1b8f4 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -76,6 +76,8 @@ pub struct Rooms { pub(super) userroomid_leftstate: Arc, pub(super) roomuserid_leftcount: Arc, + pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled + pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 @@ -2858,6 +2860,18 @@ impl Rooms { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } + #[tracing::instrument(skip(self))] + pub fn iter_ids(&self) -> impl Iterator>> + '_ { + self.roomid_shortroomid.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) + }) + } + #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { @@ -3140,6 +3154,10 @@ impl Rooms { .transpose() } + pub fn is_disabled(&self, room_id: &RoomId) -> Result { + Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) + } + /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self))] pub fn rooms_joined<'a>( diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index fdb140f..45e9d9a 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -338,7 +338,7 @@ impl Credentials for XMatrix { "origin" => origin = Some(value.try_into().ok()?), "key" => key = Some(value.to_owned()), "sig" => sig = Some(value.to_owned()), - _ => warn!( + _ => debug!( "Unexpected field `{}` in X-Matrix Authorization header", name ), diff --git a/src/server_server.rs b/src/server_server.rs index a227f57..7b08cf9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -768,7 +768,7 @@ pub async fn send_transaction_message_route( )?; } else { // TODO fetch missing events - debug!("No known event ids in read receipt: {:?}", user_updates); + info!("No known event ids in read receipt: {:?}", user_updates); } } } @@ -926,6 +926,13 @@ pub(crate) async fn handle_incoming_pdu<'a>( } } + match db.rooms.is_disabled(room_id) { + Ok(false) => {} + _ => { + return Err("Federation of this room is currently disabled on this server.".to_owned()); + } + } + // 1. Skip the PDU if we already have it as a timeline event if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { return Ok(Some(pdu_id.to_vec())); @@ -1038,6 +1045,15 @@ pub(crate) async fn handle_incoming_pdu<'a>( let mut errors = 0; for prev_id in dbg!(sorted) { + match db.rooms.is_disabled(room_id) { + Ok(false) => {} + _ => { + return Err( + "Federation of this room is currently disabled on this server.".to_owned(), + ); + } + } + if errors >= 5 { break; } @@ -1047,6 +1063,11 @@ pub(crate) async fn handle_incoming_pdu<'a>( } let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); let event_id = pdu.event_id.clone(); if let Err(e) = upgrade_outlier_to_timeline_pdu( pdu, @@ -1063,6 +1084,11 @@ pub(crate) async fn handle_incoming_pdu<'a>( warn!("Prev event {} failed: {}", event_id, e); } let elapsed = start_time.elapsed(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); warn!( "Handling prev event {} took {}m{}s", event_id, @@ -1072,7 +1098,13 @@ pub(crate) async fn handle_incoming_pdu<'a>( } } - upgrade_outlier_to_timeline_pdu( + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + let r = upgrade_outlier_to_timeline_pdu( incoming_pdu, val, &create_event, @@ -1081,10 +1113,17 @@ pub(crate) async fn handle_incoming_pdu<'a>( room_id, pub_key_map, ) - .await + .await; + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + + r } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip(create_event, value, db, pub_key_map))] fn handle_outlier_pdu<'a>( origin: &'a ServerName, create_event: &'a PduEvent, @@ -1166,7 +1205,7 @@ fn handle_outlier_pdu<'a>( .await; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - debug!( + info!( "Auth check for {} based on auth events", incoming_pdu.event_id ); @@ -1221,19 +1260,19 @@ fn handle_outlier_pdu<'a>( return Err("Event has failed auth check with auth events.".to_owned()); } - debug!("Validation successful."); + info!("Validation successful."); // 7. Persist the event as an outlier. db.rooms .add_pdu_outlier(&incoming_pdu.event_id, &val) .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; - debug!("Added pdu as outlier."); + info!("Added pdu as outlier."); Ok((Arc::new(incoming_pdu), val)) }) } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] async fn upgrade_outlier_to_timeline_pdu( incoming_pdu: Arc, val: BTreeMap, @@ -1255,6 +1294,8 @@ async fn upgrade_outlier_to_timeline_pdu( return Err("Event has been soft failed".into()); } + info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); + let create_event_content: RoomCreateEventContent = serde_json::from_str(create_event.content.get()).map_err(|e| { warn!("Invalid create event: {}", e); @@ -1270,7 +1311,7 @@ async fn upgrade_outlier_to_timeline_pdu( // TODO: if we know the prev_events of the incoming event we can avoid the request and build // the state from a known point and resolve if > 1 prev_event - debug!("Requesting state at event."); + info!("Requesting state at event"); let mut state_at_incoming_event = None; if incoming_pdu.prev_events.len() == 1 { @@ -1284,7 +1325,7 @@ async fn upgrade_outlier_to_timeline_pdu( prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); if let Some(Ok(mut state)) = state { - warn!("Using cached state"); + info!("Using cached state"); let prev_pdu = db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { "Could not find prev event, but we know the state.".to_owned() @@ -1307,7 +1348,7 @@ async fn upgrade_outlier_to_timeline_pdu( state_at_incoming_event = Some(state); } } else { - warn!("Calculating state at event using state res"); + info!("Calculating state at event using state res"); let mut extremity_sstatehashes = HashMap::new(); let mut okay = true; @@ -1375,18 +1416,18 @@ async fn upgrade_outlier_to_timeline_pdu( fork_states.push(state); } - state_at_incoming_event = match state_res::resolve( - room_version_id, - &fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { + let lock = db.globals.stateres_mutex.lock(); + + let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }); + drop(lock); + + state_at_incoming_event = match result { Ok(new_state) => Some( new_state .into_iter() @@ -1407,12 +1448,12 @@ async fn upgrade_outlier_to_timeline_pdu( warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); None } - }; + } } } if state_at_incoming_event.is_none() { - warn!("Calling /state_ids"); + info!("Calling /state_ids"); // Call /state_ids to find out what the state at this pdu is. We trust the server's // response to some extend, but we still do a lot of checks on the events match db @@ -1428,7 +1469,7 @@ async fn upgrade_outlier_to_timeline_pdu( .await { Ok(res) => { - warn!("Fetching state events at event."); + info!("Fetching state events at event."); let state_vec = fetch_and_handle_outliers( db, origin, @@ -1513,7 +1554,7 @@ async fn upgrade_outlier_to_timeline_pdu( if !check_result { return Err("Event has failed auth check with state at the event.".into()); } - debug!("Auth check succeeded."); + info!("Auth check succeeded."); // We start looking at current room state now, so lets lock the room @@ -1576,7 +1617,7 @@ async fn upgrade_outlier_to_timeline_pdu( .collect::>()?; // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - debug!("starting soft fail auth check"); + info!("Starting soft fail auth check"); let soft_fail = !state_res::event_auth::auth_check( &room_version, @@ -1610,8 +1651,10 @@ async fn upgrade_outlier_to_timeline_pdu( } if incoming_pdu.state_key.is_some() { + info!("Preparing for stateres to derive new room state"); let mut extremity_sstatehashes = HashMap::new(); + info!("Loading extremities"); for id in dbg!(&extremities) { match db .rooms @@ -1671,6 +1714,7 @@ async fn upgrade_outlier_to_timeline_pdu( let new_room_state = if fork_states.is_empty() { return Err("State is empty.".to_owned()); } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + info!("State resolution trivial"); // There was only one state, so it has to be the room's current state (because that is // always included) fork_states[0] @@ -1682,6 +1726,7 @@ async fn upgrade_outlier_to_timeline_pdu( }) .collect::>()? } else { + info!("Loading auth chains"); // We do need to force an update to this room's state update_state = true; @@ -1698,6 +1743,8 @@ async fn upgrade_outlier_to_timeline_pdu( ); } + info!("Loading fork states"); + let fork_states: Vec<_> = fork_states .into_iter() .map(|map| { @@ -1715,6 +1762,9 @@ async fn upgrade_outlier_to_timeline_pdu( }) .collect(); + info!("Resolving state"); + + let lock = db.globals.stateres_mutex.lock(); let state = match state_res::resolve( room_version_id, &fork_states, @@ -1733,6 +1783,10 @@ async fn upgrade_outlier_to_timeline_pdu( } }; + drop(lock); + + info!("State resolution done. Compressing state"); + state .into_iter() .map(|((event_type, state_key), event_id)| { @@ -1753,13 +1807,14 @@ async fn upgrade_outlier_to_timeline_pdu( // Set the new room state to the resolved state if update_state { + info!("Forcing new room state"); db.rooms .force_state(room_id, new_room_state, db) .map_err(|_| "Failed to set new room state.".to_owned())?; } - debug!("Updated resolved state"); } + info!("Appending pdu to timeline"); extremities.insert(incoming_pdu.event_id.clone()); // Now that the event has passed all auth it is added into the timeline. @@ -1780,7 +1835,7 @@ async fn upgrade_outlier_to_timeline_pdu( "Failed to add pdu to db.".to_owned() })?; - debug!("Appended incoming pdu."); + info!("Appended incoming pdu"); // Event has passed all auth/stateres checks drop(state_lock); @@ -1854,7 +1909,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( continue; } - warn!("Fetching {} over federation.", next_id); + info!("Fetching {} over federation.", next_id); match db .sending .send_federation_request( @@ -1865,7 +1920,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .await { Ok(res) => { - warn!("Got {} over federation", next_id); + info!("Got {} over federation", next_id); let (calculated_event_id, value) = match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { Ok(t) => t, From 0bc03e90a11de7d68d4d17676c9122bc2c6953ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 18 Jun 2022 16:38:41 +0200 Subject: [PATCH 1111/1727] improvement: make more things async --- src/client_server/context.rs | 2 +- src/client_server/membership.rs | 66 ++++++++------- src/client_server/state.rs | 3 +- src/client_server/sync.rs | 102 +++++++++++++---------- src/database/admin.rs | 12 +-- src/database/rooms.rs | 75 ++++++++--------- src/server_server.rs | 139 +++++++++++++++++++++++--------- 7 files changed, 244 insertions(+), 155 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index de7aae9..e93f5a5 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -137,7 +137,7 @@ pub async fn get_context_route( .expect("All rooms have state"), }; - let state_ids = db.rooms.state_full_ids(shortstatehash)?; + let state_ids = db.rooms.state_full_ids(shortstatehash).await?; let end_token = events_after .last() diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 65107a3..a1b616b 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -29,7 +29,7 @@ use ruma::{ }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{hash_map::Entry, BTreeMap, HashMap}, iter, sync::{Arc, RwLock}, time::{Duration, Instant}, @@ -48,19 +48,20 @@ pub async fn join_room_by_id_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut servers: HashSet<_> = db - .rooms - .invite_state(sender_user, &body.room_id)? - .unwrap_or_default() - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); + let mut servers = Vec::new(); // There is no body.server_name for /roomId/join + servers.extend( + db.rooms + .invite_state(sender_user, &body.room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); - servers.insert(body.room_id.server_name().to_owned()); + servers.push(body.room_id.server_name().to_owned()); let ret = join_room_by_id_helper( &db, @@ -91,19 +92,20 @@ pub async fn join_room_by_id_or_alias_route( let (servers, room_id) = match Box::::try_from(body.room_id_or_alias) { Ok(room_id) => { - let mut servers: HashSet<_> = db - .rooms - .invite_state(sender_user, &room_id)? - .unwrap_or_default() - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); + let mut servers = body.server_name.clone(); + servers.extend( + db.rooms + .invite_state(sender_user, &room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); - servers.insert(room_id.server_name().to_owned()); + servers.push(room_id.server_name().to_owned()); (servers, room_id) } Err(room_alias) => { @@ -413,7 +415,8 @@ pub async fn get_member_events_route( Ok(get_member_events::v3::Response { chunk: db .rooms - .room_state_full(&body.room_id)? + .room_state_full(&body.room_id) + .await? .iter() .filter(|(key, _)| key.0 == StateEventType::RoomMember) .map(|(_, pdu)| pdu.to_member_event().into()) @@ -462,7 +465,7 @@ async fn join_room_by_id_helper( db: &Database, sender_user: Option<&UserId>, room_id: &RoomId, - servers: &HashSet>, + servers: &[Box], _third_party_signed: Option<&IncomingThirdPartySigned>, ) -> Result { let sender_user = sender_user.expect("user is authenticated"); @@ -478,7 +481,7 @@ async fn join_room_by_id_helper( let state_lock = mutex_state.lock().await; // Ask a remote server if we don't have this room - if !db.rooms.exists(room_id)? && room_id.server_name() != db.globals.server_name() { + if !db.rooms.exists(room_id)? { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); @@ -1032,6 +1035,13 @@ pub(crate) async fn invite_helper<'a>( return Ok(()); } + if !db.rooms.is_joined(sender_user, &room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + let mutex_state = Arc::clone( db.globals .roomid_mutex_state diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 50fe9b4..4df953c 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -124,7 +124,8 @@ pub async fn get_state_events_route( Ok(get_state_events::v3::Response { room_state: db .rooms - .room_state_full(&body.room_id)? + .room_state_full(&body.room_id) + .await? .values() .map(|pdu| pdu.to_state_event()) .collect(), diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index d61e689..0c294b7 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -230,18 +230,20 @@ async fn sync_helper( for room_id in all_joined_rooms { let room_id = room_id?; - // Get and drop the lock to wait for remaining operations to finish - // This will make sure the we have all events until next_batch - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); + { + // Get and drop the lock to wait for remaining operations to finish + // This will make sure the we have all events until next_batch + let mutex_insert = Arc::clone( + db.globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } let timeline_pdus; let limited; @@ -296,10 +298,12 @@ async fn sync_helper( // Database queries: - let current_shortstatehash = db - .rooms - .current_shortstatehash(&room_id)? - .expect("All rooms have state"); + let current_shortstatehash = if let Some(s) = db.rooms.current_shortstatehash(&room_id)? { + s + } else { + error!("Room {} has no state", room_id); + continue; + }; let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?; @@ -377,11 +381,12 @@ async fn sync_helper( let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; + let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?; let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); + let mut i = 0; for (shortstatekey, id) in current_state_ids { let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; @@ -394,6 +399,11 @@ async fn sync_helper( } }; state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } } else if !lazy_load_enabled || body.full_state || timeline_users.contains(&state_key) @@ -411,6 +421,11 @@ async fn sync_helper( lazy_loaded.insert(uid); } state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } } } @@ -462,8 +477,8 @@ async fn sync_helper( let mut lazy_loaded = HashSet::new(); if since_shortstatehash != current_shortstatehash { - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; - let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; + let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?; + let since_state_ids = db.rooms.state_full_ids(since_shortstatehash).await?; for (key, id) in current_state_ids { if body.full_state || since_state_ids.get(&key) != Some(&id) { @@ -490,6 +505,7 @@ async fn sync_helper( } state_events.push(pdu); + tokio::task::yield_now().await; } } } @@ -753,17 +769,19 @@ async fn sync_helper( for result in all_left_rooms { let (room_id, left_state_events) = result?; - // Get and drop the lock to wait for remaining operations to finish - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); + { + // Get and drop the lock to wait for remaining operations to finish + let mutex_insert = Arc::clone( + db.globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; @@ -793,17 +811,19 @@ async fn sync_helper( for result in all_invited_rooms { let (room_id, invite_state_events) = result?; - // Get and drop the lock to wait for remaining operations to finish - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); + { + // Get and drop the lock to wait for remaining operations to finish + let mutex_insert = Arc::clone( + db.globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; diff --git a/src/database/admin.rs b/src/database/admin.rs index c6ef9a6..3ed1a8a 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -116,7 +116,7 @@ impl Admin { send_message(content, guard, &state_lock); } AdminRoomEvent::ProcessMessage(room_message) => { - let reply_message = process_admin_message(&*guard, room_message); + let reply_message = process_admin_message(&*guard, room_message).await; send_message(reply_message, guard, &state_lock); } @@ -143,7 +143,7 @@ impl Admin { } // Parse and process a message from the admin room -fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { +async fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { let mut lines = room_message.lines(); let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); @@ -161,7 +161,7 @@ fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEven } }; - match process_admin_command(db, admin_command, body) { + match process_admin_command(db, admin_command, body).await { Ok(reply_message) => reply_message, Err(error) => { let markdown_message = format!( @@ -290,7 +290,7 @@ enum AdminCommand { EnableRoom { room_id: Box }, } -fn process_admin_command( +async fn process_admin_command( db: &Database, command: AdminCommand, body: Vec<&str>, @@ -404,7 +404,9 @@ fn process_admin_command( Error::bad_database("Invalid room id field in event in database") })?; let start = Instant::now(); - let count = server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); + let count = server_server::get_auth_chain(room_id, vec![event_id], db) + .await? + .count(); let elapsed = start.elapsed(); RoomMessageEventContent::text_plain(format!( "Loaded auth chain with length {} in {:?}", diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2c1b8f4..7b3b750 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -144,20 +144,28 @@ impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] - pub fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = self .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") .1; - full_state - .into_iter() - .map(|compressed| self.parse_compressed_state_event(compressed)) - .collect() + let mut result = BTreeMap::new(); + let mut i = 0; + for compressed in full_state.into_iter() { + let parsed = self.parse_compressed_state_event(compressed)?; + result.insert(parsed.0, parsed.1); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + Ok(result) } #[tracing::instrument(skip(self))] - pub fn state_full( + pub async fn state_full( &self, shortstatehash: u64, ) -> Result>> { @@ -166,14 +174,13 @@ impl Rooms { .pop() .expect("there is always one layer") .1; - Ok(full_state - .into_iter() - .map(|compressed| self.parse_compressed_state_event(compressed)) - .filter_map(|r| r.ok()) - .map(|(_, eventid)| self.get_pdu(&eventid)) - .filter_map(|r| r.ok().flatten()) - .map(|pdu| { - Ok::<_, Error>(( + + let mut result = HashMap::new(); + let mut i = 0; + for compressed in full_state { + let (_, eventid) = self.parse_compressed_state_event(compressed)?; + if let Some(pdu) = self.get_pdu(&eventid)? { + result.insert( ( pdu.kind.to_string().into(), pdu.state_key @@ -182,10 +189,16 @@ impl Rooms { .clone(), ), pdu, - )) - }) - .filter_map(|r| r.ok()) - .collect()) + ); + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + Ok(result) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -228,7 +241,6 @@ impl Rooms { } /// Returns the state hash for this pdu. - #[tracing::instrument(skip(self))] pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.eventid_shorteventid .get(event_id.as_bytes())? @@ -531,7 +543,6 @@ impl Rooms { } } - #[tracing::instrument(skip(self, globals))] pub fn compress_state_event( &self, shortstatekey: u64, @@ -548,7 +559,6 @@ impl Rooms { } /// Returns shortstatekey, event id - #[tracing::instrument(skip(self, compressed_event))] pub fn parse_compressed_state_event( &self, compressed_event: CompressedStateEvent, @@ -707,7 +717,6 @@ impl Rooms { } /// Returns (shortstatehash, already_existed) - #[tracing::instrument(skip(self, globals))] fn get_or_create_shortstatehash( &self, state_hash: &StateHashId, @@ -728,7 +737,6 @@ impl Rooms { }) } - #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shorteventid( &self, event_id: &EventId, @@ -759,7 +767,6 @@ impl Rooms { Ok(short) } - #[tracing::instrument(skip(self))] pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { self.roomid_shortroomid .get(room_id.as_bytes())? @@ -770,7 +777,6 @@ impl Rooms { .transpose() } - #[tracing::instrument(skip(self))] pub fn get_shortstatekey( &self, event_type: &StateEventType, @@ -808,7 +814,6 @@ impl Rooms { Ok(short) } - #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shortroomid( &self, room_id: &RoomId, @@ -826,7 +831,6 @@ impl Rooms { }) } - #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shortstatekey( &self, event_type: &StateEventType, @@ -867,7 +871,6 @@ impl Rooms { Ok(short) } - #[tracing::instrument(skip(self))] pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { if let Some(id) = self .shorteventid_cache @@ -896,7 +899,6 @@ impl Rooms { Ok(event_id) } - #[tracing::instrument(skip(self))] pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { if let Some(id) = self .shortstatekey_cache @@ -940,12 +942,12 @@ impl Rooms { /// Returns the full room state. #[tracing::instrument(skip(self))] - pub fn room_state_full( + pub async fn room_state_full( &self, room_id: &RoomId, ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash) + self.state_full(current_shortstatehash).await } else { Ok(HashMap::new()) } @@ -982,14 +984,12 @@ impl Rooms { } /// Returns the `count` of this pdu's id. - #[tracing::instrument(skip(self))] pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) } /// Returns the `count` of this pdu's id. - #[tracing::instrument(skip(self))] pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -1018,7 +1018,6 @@ impl Rooms { } /// Returns the json of a pdu. - #[tracing::instrument(skip(self))] pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -1037,7 +1036,6 @@ impl Rooms { } /// Returns the json of a pdu. - #[tracing::instrument(skip(self))] pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? @@ -1048,7 +1046,6 @@ impl Rooms { } /// Returns the json of a pdu. - #[tracing::instrument(skip(self))] pub fn get_non_outlier_pdu_json( &self, event_id: &EventId, @@ -1068,7 +1065,6 @@ impl Rooms { } /// Returns the pdu's id. - #[tracing::instrument(skip(self))] pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { self.eventid_pduid.get(event_id.as_bytes()) } @@ -1076,7 +1072,6 @@ impl Rooms { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - #[tracing::instrument(skip(self))] pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -1095,7 +1090,6 @@ impl Rooms { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - #[tracing::instrument(skip(self))] pub fn get_pdu(&self, event_id: &EventId) -> Result>> { if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { return Ok(Some(Arc::clone(p))); @@ -1132,7 +1126,6 @@ impl Rooms { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - #[tracing::instrument(skip(self))] pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( @@ -1143,7 +1136,6 @@ impl Rooms { } /// Returns the pdu as a `BTreeMap`. - #[tracing::instrument(skip(self))] pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( @@ -1232,7 +1224,6 @@ impl Rooms { } /// Returns the pdu from the outlier tree. - #[tracing::instrument(skip(self))] pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? diff --git a/src/server_server.rs b/src/server_server.rs index 7b08cf9..6fa83e4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -691,7 +691,7 @@ pub async fn send_transaction_message_route( .roomid_mutex_federation .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let mutex_lock = mutex.lock().await; @@ -1054,6 +1054,25 @@ pub(crate) async fn handle_incoming_pdu<'a>( } } + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&*prev_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", prev_id); + continue; + } + } + if errors >= 5 { break; } @@ -1068,7 +1087,6 @@ pub(crate) async fn handle_incoming_pdu<'a>( .write() .unwrap() .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - let event_id = pdu.event_id.clone(); if let Err(e) = upgrade_outlier_to_timeline_pdu( pdu, json, @@ -1081,7 +1099,21 @@ pub(crate) async fn handle_incoming_pdu<'a>( .await { errors += 1; - warn!("Prev event {} failed: {}", event_id, e); + warn!("Prev event {} failed: {}", prev_id, e); + match db + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry((*prev_id).to_owned()) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1) + } + } } let elapsed = start_time.elapsed(); db.globals @@ -1091,7 +1123,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( .remove(&room_id.to_owned()); warn!( "Handling prev event {} took {}m{}s", - event_id, + prev_id, elapsed.as_secs() / 60, elapsed.as_secs() % 60 ); @@ -1321,8 +1353,11 @@ async fn upgrade_outlier_to_timeline_pdu( .pdu_shortstatehash(prev_event) .map_err(|_| "Failed talking to db".to_owned())?; - let state = - prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); + let state = if let Some(shortstatehash) = prev_event_sstatehash { + Some(db.rooms.state_full_ids(shortstatehash).await) + } else { + None + }; if let Some(Ok(mut state)) = state { info!("Using cached state"); @@ -1378,6 +1413,7 @@ async fn upgrade_outlier_to_timeline_pdu( let mut leaf_state: BTreeMap<_, _> = db .rooms .state_full_ids(sstatehash) + .await .map_err(|_| "Failed to ask db for room state.".to_owned())?; if let Some(state_key) = &prev_event.state_key { @@ -1409,6 +1445,7 @@ async fn upgrade_outlier_to_timeline_pdu( auth_chain_sets.push( get_auth_chain(room_id, starting_events, db) + .await .map_err(|_| "Failed to load auth chain.".to_owned())? .collect(), ); @@ -1535,6 +1572,7 @@ async fn upgrade_outlier_to_timeline_pdu( let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); + info!("Starting auth check"); // 11. Check the auth of the event passes based on the state of the event let check_result = state_res::event_auth::auth_check( &room_version, @@ -1554,7 +1592,7 @@ async fn upgrade_outlier_to_timeline_pdu( if !check_result { return Err("Event has failed auth check with state at the event.".into()); } - info!("Auth check succeeded."); + info!("Auth check succeeded"); // We start looking at current room state now, so lets lock the room @@ -1570,6 +1608,7 @@ async fn upgrade_outlier_to_timeline_pdu( // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) + info!("Calculating extremities"); let mut extremities = db .rooms .get_pdu_leaves(room_id) @@ -1585,28 +1624,7 @@ async fn upgrade_outlier_to_timeline_pdu( // Only keep those extremities were not referenced yet extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); - let current_sstatehash = db - .rooms - .current_shortstatehash(room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? - .expect("every room has state"); - - let current_state_ids = db - .rooms - .state_full_ids(current_sstatehash) - .map_err(|_| "Failed to load room state.")?; - - let auth_events = db - .rooms - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - ) - .map_err(|_| "Failed to get_auth_events.".to_owned())?; - + info!("Compressing state at event"); let state_ids_compressed = state_at_incoming_event .iter() .map(|(shortstatekey, id)| { @@ -1619,6 +1637,17 @@ async fn upgrade_outlier_to_timeline_pdu( // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it info!("Starting soft fail auth check"); + let auth_events = db + .rooms + .get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + ) + .map_err(|_| "Failed to get_auth_events.".to_owned())?; + let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -1651,6 +1680,19 @@ async fn upgrade_outlier_to_timeline_pdu( } if incoming_pdu.state_key.is_some() { + info!("Loading current room state ids"); + let current_sstatehash = db + .rooms + .current_shortstatehash(room_id) + .map_err(|_| "Failed to load current state hash.".to_owned())? + .expect("every room has state"); + + let current_state_ids = db + .rooms + .state_full_ids(current_sstatehash) + .await + .map_err(|_| "Failed to load room state.")?; + info!("Preparing for stateres to derive new room state"); let mut extremity_sstatehashes = HashMap::new(); @@ -1738,6 +1780,7 @@ async fn upgrade_outlier_to_timeline_pdu( state.iter().map(|(_, id)| id.clone()).collect(), db, ) + .await .map_err(|_| "Failed to load auth chain.".to_owned())? .collect(), ); @@ -1899,11 +1942,17 @@ pub(crate) fn fetch_and_handle_outliers<'a>( let mut todo_auth_events = vec![Arc::clone(id)]; let mut events_in_reverse_order = Vec::new(); let mut events_all = HashSet::new(); + let mut i = 0; while let Some(next_id) = todo_auth_events.pop() { if events_all.contains(&next_id) { continue; } + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { trace!("Found {} in db", id); continue; @@ -2242,7 +2291,7 @@ fn append_incoming_pdu<'a>( } #[tracing::instrument(skip(starting_events, db))] -pub(crate) fn get_auth_chain<'a>( +pub(crate) async fn get_auth_chain<'a>( room_id: &RoomId, starting_events: Vec>, db: &'a Database, @@ -2251,10 +2300,15 @@ pub(crate) fn get_auth_chain<'a>( let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; + let mut i = 0; for id in starting_events { let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; let bucket_id = (short % NUM_BUCKETS as u64) as usize; buckets[bucket_id].insert((short, id.clone())); + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } } let mut full_auth_chain = HashSet::new(); @@ -2277,6 +2331,7 @@ pub(crate) fn get_auth_chain<'a>( let mut chunk_cache = HashSet::new(); let mut hits2 = 0; let mut misses2 = 0; + let mut i = 0; for (sevent_id, event_id) in chunk { if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { hits2 += 1; @@ -2292,6 +2347,11 @@ pub(crate) fn get_auth_chain<'a>( auth_chain.len() ); chunk_cache.extend(auth_chain.iter()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } }; } println!( @@ -2512,7 +2572,7 @@ pub async fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?; + let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -2557,7 +2617,8 @@ pub async fn get_room_state_route( let pdus = db .rooms - .state_full_ids(shortstatehash)? + .state_full_ids(shortstatehash) + .await? .into_iter() .map(|(_, id)| { PduEvent::convert_to_outgoing_federation_event( @@ -2566,7 +2627,8 @@ pub async fn get_room_state_route( }) .collect(); - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; + let auth_chain_ids = + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids @@ -2616,12 +2678,14 @@ pub async fn get_room_state_ids_route( let pdu_ids = db .rooms - .state_full_ids(shortstatehash)? + .state_full_ids(shortstatehash) + .await? .into_iter() .map(|(_, id)| (*id).to_owned()) .collect(); - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; + let auth_chain_ids = + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), @@ -2927,12 +2991,13 @@ async fn create_join_event( ))?; drop(mutex_lock); - let state_ids = db.rooms.state_full_ids(shortstatehash)?; + let state_ids = db.rooms.state_full_ids(shortstatehash).await?; let auth_chain_ids = get_auth_chain( room_id, state_ids.iter().map(|(_, id)| id.clone()).collect(), db, - )?; + ) + .await?; let servers = db .rooms From 7bee9c1c69e8fb3d80039dd7532d4fdd5cfc5d66 Mon Sep 17 00:00:00 2001 From: Dietrich Date: Sun, 19 Jun 2022 06:56:51 +0200 Subject: [PATCH 1112/1727] Length of passwords consistently use the constant --- src/database/admin.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 3ed1a8a..5a0c28a 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -525,7 +525,7 @@ async fn process_admin_command( )); } - let new_password = utils::random_string(20); + let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); match db.users.set_password(&user_id, Some(new_password.as_str())) { Ok(()) => RoomMessageEventContent::text_plain(format!( From 0c8e51e1b70fcd8bc6b541614a3a0ff555817ff6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 19 Jun 2022 15:38:21 +0200 Subject: [PATCH 1113/1727] Upgrade dependencies --- Cargo.lock | 479 +++++++++++++++++++++++++++++------------------------ Cargo.toml | 28 ++-- 2 files changed, 273 insertions(+), 234 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a251b6..d8d791f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20,7 +20,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "once_cell", "version_check", ] @@ -84,9 +84,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-compression" -version = "0.3.12" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2bf394cfbbe876f0ac67b13b6ca819f9c9f2fb9ec67223cceb1555fbab1c31a" +checksum = "345fd392ab01f746c717b1357165b76f0b67a60192007b234058c9045fdcf695" dependencies = [ "brotli", "flate2", @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" +checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" dependencies = [ "proc-macro2", "quote", @@ -124,9 +124,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.1" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47594e438a243791dba58124b6669561f5baa14cb12046641d8008bf035e5a25" +checksum = "8b4d4f9a5ca8b1ab8de59e663e68c6207059239373ca72980f5be7ab81231f74" dependencies = [ "async-trait", "axum-core", @@ -145,6 +145,7 @@ dependencies = [ "pin-project-lite", "serde", "serde_json", + "serde_urlencoded", "sync_wrapper", "tokio", "tower", @@ -155,9 +156,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a671c9ae99531afdd5d3ee8340b8da547779430689947144c140fc74a740244" +checksum = "cf4d047478b986f14a13edad31a009e2e05cb241f9805d0d75e4cba4e129ad4d" dependencies = [ "async-trait", "bytes", @@ -169,9 +170,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cfd9dbe28ebde5c0460067ea27c6f3b1d514b699c4e0a5aab0fb63e452a8a8" +checksum = "abf18303ef7e23b045301555bf8a0dfbc1444ea1a37b3c81757a32680ace4d7d" dependencies = [ "arc-swap", "bytes", @@ -181,7 +182,7 @@ dependencies = [ "hyper", "pin-project-lite", "rustls", - "rustls-pemfile", + "rustls-pemfile 1.0.0", "tokio", "tokio-rustls", "tower-service", @@ -285,9 +286,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" [[package]] name = "bytemuck" @@ -352,9 +353,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cc00842eed744b858222c4c9faf7243aafc6d33f92f96935263ef4d8a41ce21" +checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b" dependencies = [ "glob", "libc", @@ -363,23 +364,23 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.8" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71c47df61d9e16dc010b55dba1952a57d8c215dbb533fd13cdd13369aac73b1c" +checksum = "d53da17d37dba964b9b3ecb5c5a1f193a2762c700e6829201e645b9381c99dc7" dependencies = [ "bitflags", "clap_derive", + "clap_lex", "indexmap", - "lazy_static", - "os_str_bytes", + "once_cell", "textwrap", ] [[package]] name = "clap_derive" -version = "3.1.7" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3aab4734e083b809aaf5794e14e756d1c798d2c69c7f7de7a09a2f5214993c1" +checksum = "c11d40217d16aee8508cc8e5fde8b4ff24639758608e5374e731b53f85749fb9" dependencies = [ "heck", "proc-macro-error", @@ -388,6 +389,15 @@ dependencies = [ "syn", ] +[[package]] +name = "clap_lex" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5538cd660450ebeb4234cfecf8f2284b844ffc4c50531e66d584ad5b91293613" +dependencies = [ + "os_str_bytes", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -517,17 +527,17 @@ dependencies = [ "crossbeam-deque", "crossbeam-epoch", "crossbeam-queue 0.3.5", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] name = "crossbeam-channel" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] @@ -538,20 +548,20 @@ checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] name = "crossbeam-epoch" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", - "lazy_static", + "crossbeam-utils 0.8.9", "memoffset", + "once_cell", "scopeguard", ] @@ -571,7 +581,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] @@ -586,12 +596,12 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" dependencies = [ "cfg-if 1.0.0", - "lazy_static", + "once_cell", ] [[package]] @@ -673,9 +683,9 @@ dependencies = [ [[package]] name = "directories" -version = "3.0.2" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" +checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" dependencies = [ "dirs-sys", ] @@ -693,9 +703,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d5c4b5e5959dc2c2b89918d8e2cc40fcdd623cef026ed09d2f0ee05199dc8e4" +checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ "signature", ] @@ -769,14 +779,12 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ - "cfg-if 1.0.0", "crc32fast", - "libc", - "miniz_oxide 0.4.4", + "miniz_oxide 0.5.3", ] [[package]] @@ -932,13 +940,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -985,13 +993,19 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" + [[package]] name = "hashlink" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" dependencies = [ - "hashbrown", + "hashbrown 0.11.2", ] [[package]] @@ -1093,9 +1107,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", @@ -1104,9 +1118,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -1121,9 +1135,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" +checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" [[package]] name = "httpdate" @@ -1133,9 +1147,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.18" +version = "0.14.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" +checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" dependencies = [ "bytes", "futures-channel", @@ -1198,23 +1212,20 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "6c6392766afd7964e2531940894cffe4bd8d7d17dbc3c1c4857040fd4b33bdb3" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.1", "serde", ] [[package]] name = "indoc" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7906a9fababaeacb774f72410e497a1d18de916322e33797bb2cd29baa23c9e" -dependencies = [ - "unindent", -] +checksum = "05a0bd019339e5d968b37855180087b7b9d512c5046fbd244cf8c95687927d6e" [[package]] name = "inlinable_string" @@ -1251,9 +1262,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e70ee094dc02fd9c13fdad4940090f22dbd6ac7c9e7094a46cf0232a50bc7c" +checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itertools" @@ -1266,9 +1277,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "jobserver" @@ -1287,9 +1298,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.56" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" dependencies = [ "wasm-bindgen", ] @@ -1331,9 +1342,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.122" +version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec647867e2bf0772e28c8bcde4f0d19a9216916e890543b5a03ed8ef27b8f259" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "libloading" @@ -1397,9 +1408,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if 1.0.0", ] @@ -1448,9 +1459,9 @@ checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" @@ -1484,35 +1495,23 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" dependencies = [ "adler", - "autocfg", ] [[package]] name = "mio" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" +checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" dependencies = [ "libc", "log", - "miow", - "ntapi", "wasi 0.11.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "windows-sys", ] [[package]] @@ -1525,15 +1524,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "ntapi" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" -dependencies = [ - "winapi", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -1547,9 +1537,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -1557,9 +1547,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg", "num-integer", @@ -1579,9 +1569,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] @@ -1598,9 +1588,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" +checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" [[package]] name = "opaque-debug" @@ -1668,12 +1658,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.0.0" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" -dependencies = [ - "memchr", -] +checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" [[package]] name = "page_size" @@ -1800,9 +1787,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -1881,11 +1868,11 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.37" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" +checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1909,9 +1896,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.17" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632d02bff7f874a36f33ea8bb416cd484b90cc66c1194b1a1110d067a7013f58" +checksum = "f53dc8cf16a769a6f677e09e7ff2cd4be1ea0f48754aac39520536962011de0d" dependencies = [ "proc-macro2", ] @@ -1975,7 +1962,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", ] [[package]] @@ -2002,16 +1989,16 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "redox_syscall", "thiserror", ] [[package]] name = "regex" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" dependencies = [ "aho-corasick", "memchr", @@ -2029,9 +2016,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" [[package]] name = "reqwest" @@ -2057,7 +2044,7 @@ dependencies = [ "pin-project-lite", "rustls", "rustls-native-certs", - "rustls-pemfile", + "rustls-pemfile 0.2.1", "serde", "serde_json", "serde_urlencoded", @@ -2282,7 +2269,7 @@ dependencies = [ "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] @@ -2293,9 +2280,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustls" -version = "0.20.4" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" +checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" dependencies = [ "log", "ring", @@ -2305,12 +2292,12 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 1.0.0", "schannel", "security-framework", ] @@ -2325,19 +2312,28 @@ dependencies = [ ] [[package]] -name = "ryu" -version = "1.0.9" +name = "rustls-pemfile" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +dependencies = [ + "base64 0.13.0", +] + +[[package]] +name = "ryu" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys", ] [[package]] @@ -2381,18 +2377,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" dependencies = [ "proc-macro2", "quote", @@ -2401,9 +2397,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ "itoa", "ryu", @@ -2424,9 +2420,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" +checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" dependencies = [ "indexmap", "ryu", @@ -2526,7 +2522,7 @@ checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" dependencies = [ "crc32fast", "crossbeam-epoch", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", "fs2", "fxhash", "libc", @@ -2585,13 +2581,13 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.91" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" +checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] @@ -2629,18 +2625,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ "proc-macro2", "quote", @@ -2712,19 +2708,20 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -2737,9 +2734,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.17.0" +version = "1.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" +checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" dependencies = [ "bytes", "libc", @@ -2756,9 +2753,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", @@ -2767,9 +2764,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.3" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4151fda0cf2798550ad0b34bcfc9b9dcc2a9d2471c895c68f3a8818e54f2389e" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", "tokio", @@ -2790,9 +2787,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" dependencies = [ "futures-core", "pin-project-lite", @@ -2801,9 +2798,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" dependencies = [ "bytes", "futures-core", @@ -2815,25 +2812,24 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] [[package]] name = "tower" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", "pin-project", "pin-project-lite", "tokio", - "tokio-util", "tower-layer", "tower-service", "tracing", @@ -2841,9 +2837,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.2.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" dependencies = [ "async-compression", "bitflags", @@ -2870,15 +2866,15 @@ checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.32" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1bdf54a7c28a2bbf701e1d2233f6c77f473486b94bee4f9678da5a148dca7f" +checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if 1.0.0", "log", @@ -2889,9 +2885,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" +checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" dependencies = [ "proc-macro2", "quote", @@ -2900,11 +2896,11 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.24" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90442985ee2f57c9e1b548ee72ae842f4a9a20e3f417cc38dbc5dc684d9bb4ee" +checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" dependencies = [ - "lazy_static", + "once_cell", "valuable", ] @@ -2921,9 +2917,9 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", "log", @@ -3021,18 +3017,24 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "uncased" -version = "0.9.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" +checksum = "09b01702b0fd0b3fadcf98e098780badda8742d4f4a7676615cad90e8ac73622" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" + +[[package]] +name = "unicode-ident" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] name = "unicode-normalization" @@ -3045,15 +3047,9 @@ dependencies = [ [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" - -[[package]] -name = "unindent" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514672a55d7380da379785a4d70ca8386c8883ff7eaae877be4d2081cebe73d8" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "unsigned-varint" @@ -3085,7 +3081,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", ] [[package]] @@ -3124,9 +3120,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" @@ -3136,9 +3132,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -3146,9 +3142,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" dependencies = [ "bumpalo", "lazy_static", @@ -3161,9 +3157,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.29" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" +checksum = "de9a9cec1733468a8c657e57fa2413d2ae2c0129b95e87c5b72b8ace4d13f31f" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3173,9 +3169,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3183,9 +3179,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" dependencies = [ "proc-macro2", "quote", @@ -3196,15 +3192,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" +checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" [[package]] name = "web-sys" -version = "0.3.56" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" dependencies = [ "js-sys", "wasm-bindgen", @@ -3222,9 +3218,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b77fdfd5a253be4ab714e4ffa3c49caf146b4de743e97510c0656cf90f1e8e" +checksum = "9c97e489d8f836838d497091de568cf16b117486d529ec5579233521065bd5e4" [[package]] name = "widestring" @@ -3260,6 +3256,49 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + [[package]] name = "winreg" version = "0.6.2" diff --git a/Cargo.toml b/Cargo.toml index 10be750..f150c4e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,10 +14,10 @@ edition = "2021" [dependencies] # Web framework -axum = { version = "0.5.0", default-features = false, features = ["headers", "http1", "http2", "json", "matched-path"], optional = true } -axum-server = { version = "0.3.3", features = ["tls-rustls"] } -tower = { version = "0.4.11", features = ["util"] } -tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } +axum = { version = "0.5.8", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } +axum-server = { version = "0.4.0", features = ["tls-rustls"] } +tower = { version = "0.4.8", features = ["util"] } +tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -28,19 +28,19 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318c # Async runtime and utilities tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently -sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } +sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } -persy = { version = "1.2" , optional = true, features = ["background_ops"] } +persy = { version = "1.0.0", optional = true, features = ["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" http = "0.2.4" # Used to find data directory for default db path -directories = "3.0.2" +directories = "4.0.0" # Used for ruma wrapper -serde_json = { version = "1.0.70", features = ["raw_value"] } +serde_json = { version = "1.0.68", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.8.20" +serde_yaml = "0.8.21" # Used for pdu definition serde = { version = "1.0.130", features = ["rc"] } # Used for secure identifiers @@ -50,7 +50,7 @@ rust-argon2 = "0.8.3" # Used to send requests reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type -thiserror = "1.0.28" +thiserror = "1.0.29" # Used to generate thumbnails for images image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key @@ -64,8 +64,8 @@ regex = "1.5.4" # jwt jsonwebtokens jsonwebtoken = "7.2.0" # Performance measurements -tracing = { version = "0.1.26", features = [] } -tracing-subscriber = "0.2.20" +tracing = { version = "0.1.27", features = [] } +tracing-subscriber = "0.2.22" tracing-flame = "0.1.0" opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] } @@ -83,8 +83,8 @@ thread_local = "1.1.3" hmac = "0.11.0" sha-1 = "0.9.8" # used for conduit's CLI and admin room command parsing -clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } -futures-util = { version = "0.3.19", default-features = false } +clap = { version = "3.2.5", default-features = false, features = ["std", "derive"] } +futures-util = { version = "0.3.17", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.6", features = ["env", "toml"] } From f6183e457d3c7743d916cb51ab1441c1e9005643 Mon Sep 17 00:00:00 2001 From: Zeyphros Date: Sat, 2 Apr 2022 14:00:19 +0200 Subject: [PATCH 1114/1727] Implement command to deactivate user from admin channel Use `leave_room` in `leave_all_rooms` WIP: Add command to delete a list of users also implements a flag to prevent the user from being removed from their joined rooms. Report user deactivation failure reason Don't send leave events by default when mass deactivating user accounts Don't stop leaving rooms if an error was encountered WIP: Rename command, make flags consistent, don't deactivate admin accounts. Accounts should be deactivated as fast as possible and removing users from joined groups is completed afterwards. Fix admin safety logic, improve command output Continue leaving rooms if a room_id is invalid Ignore errors from leave_room Add notice to the list-local-users command Output form list-local-users can be used directly without modification with the deactivate-all command Only get mutex lock for admin room when sending message --- src/client_server/account.rs | 53 +------------ src/database/admin.rs | 142 ++++++++++++++++++++++++++++++++--- src/database/rooms.rs | 21 ++++++ 3 files changed, 156 insertions(+), 60 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 984b1ba..dc0782d 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -4,7 +4,7 @@ use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{ database::{admin::make_user_admin, DatabaseGuard}, pdu::PduBuilder, - utils, Error, Result, Ruma, + utils, Database, Error, Result, Ruma, }; use ruma::{ api::client::{ @@ -398,55 +398,8 @@ pub async fn deactivate_route( return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - // Leave all joined rooms and reject all invitations - // TODO: work over federation invites - let all_rooms = db - .rooms - .rooms_joined(sender_user) - .chain( - db.rooms - .rooms_invited(sender_user) - .map(|t| t.map(|(r, _)| r)), - ) - .collect::>(); - - for room_id in all_rooms { - let room_id = room_id?; - let event = RoomMemberEventContent { - membership: MembershipState::Leave, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }; - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - sender_user, - &room_id, - &db, - &state_lock, - )?; - } + // Make the user leave all rooms before deactivation + db.rooms.leave_all_rooms(&sender_user, &db).await?; // Remove devices and mark account as deactivated db.users.deactivate_account(sender_user)?; diff --git a/src/database/admin.rs b/src/database/admin.rs index 5a0c28a..328c99c 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -101,6 +101,12 @@ impl Admin { tokio::select! { Some(event) = receiver.recv() => { let guard = db.read().await; + + let message_content = match event { + AdminRoomEvent::SendMessage(content) => content, + AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(&*guard, room_message).await + }; + let mutex_state = Arc::clone( guard.globals .roomid_mutex_state @@ -109,18 +115,10 @@ impl Admin { .entry(conduit_room.clone()) .or_default(), ); + let state_lock = mutex_state.lock().await; - match event { - AdminRoomEvent::SendMessage(content) => { - send_message(content, guard, &state_lock); - } - AdminRoomEvent::ProcessMessage(room_message) => { - let reply_message = process_admin_message(&*guard, room_message).await; - - send_message(reply_message, guard, &state_lock); - } - } + send_message(message_content, guard, &state_lock); drop(state_lock); } @@ -240,6 +238,39 @@ enum AdminCommand { /// List all rooms we are currently handling an incoming pdu from IncomingFederation, + /// Deactivate a user + /// + /// User will be removed from all rooms by default. + /// This behaviour can be overridden with the --no-leave-rooms flag. + DeactivateUser { + #[clap(short, long)] + leave_rooms: bool, + user_id: Box, + }, + + #[clap(verbatim_doc_comment)] + /// Deactivate a list of users + /// + /// Recommended to use in conjunction with list-local-users. + /// + /// Users will not be removed from joined rooms by default. + /// Can be overridden with --leave-rooms flag. + /// Removing a mass amount of users from a room may cause a significant amount of leave events. + /// The time to leave rooms may depend significantly on joined rooms and servers. + /// + /// [commandbody] + /// # ``` + /// # User list here + /// # ``` + DeactivateAll { + #[clap(short, long)] + /// Remove users from their joined rooms + leave_rooms: bool, + #[clap(short, long)] + /// Also deactivate admin accounts + force: bool, + }, + /// Get the auth_chain of a PDU GetAuthChain { /// An event ID (the $ character followed by the base64 reference hash) @@ -603,6 +634,97 @@ async fn process_admin_command( db.rooms.disabledroomids.remove(room_id.as_bytes())?; RoomMessageEventContent::text_plain("Room enabled.") } + AdminCommand::DeactivateUser { + leave_rooms, + user_id, + } => { + let user_id = Arc::::from(user_id); + if db.users.exists(&user_id)? { + RoomMessageEventContent::text_plain(format!( + "Making {} leave all rooms before deactivation...", + user_id + )); + + db.users.deactivate_account(&user_id)?; + + if leave_rooms { + db.rooms.leave_all_rooms(&user_id, &db).await?; + } + + RoomMessageEventContent::text_plain(format!( + "User {} has been deactivated", + user_id + )) + } else { + RoomMessageEventContent::text_plain(format!( + "User {} doesn't exist on this server", + user_id + )) + } + } + AdminCommand::DeactivateAll { leave_rooms, force } => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let usernames = body.clone().drain(1..body.len() - 1).collect::>(); + + let mut user_ids: Vec<&UserId> = Vec::new(); + + for &username in &usernames { + match <&UserId>::try_from(username) { + Ok(user_id) => user_ids.push(user_id), + Err(_) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "{} is not a valid username", + username + ))) + } + } + } + + let mut deactivation_count = 0; + let mut admins = Vec::new(); + + if !force { + user_ids.retain(|&user_id| { + match db.users.is_admin(user_id, &db.rooms, &db.globals) { + Ok(is_admin) => match is_admin { + true => { + admins.push(user_id.localpart()); + false + } + false => true, + }, + Err(_) => false, + } + }) + } + + for &user_id in &user_ids { + match db.users.deactivate_account(user_id) { + Ok(_) => deactivation_count += 1, + Err(_) => {} + } + } + + if leave_rooms { + for &user_id in &user_ids { + let _ = db.rooms.leave_all_rooms(user_id, &db).await; + } + } + + if admins.is_empty() { + RoomMessageEventContent::text_plain(format!( + "Deactivated {} accounts.", + deactivation_count + )) + } else { + RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) + } + } }; Ok(reply_message_content) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7b3b750..4ad815e 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2569,6 +2569,27 @@ impl Rooms { } } + // Make a user leave all their joined rooms + #[tracing::instrument(skip(self, db))] + pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { + let all_rooms = db + .rooms + .rooms_joined(user_id) + .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .collect::>(); + + for room_id in all_rooms { + let room_id = match room_id { + Ok(room_id) => room_id, + Err(_) => continue, + }; + + let _ = self.leave_room(user_id, &room_id, db).await; + } + + Ok(()) + } + #[tracing::instrument(skip(self, db))] pub async fn leave_room( &self, From 1c31f7905f4781be5dd99951a78508c9d3473636 Mon Sep 17 00:00:00 2001 From: Zeyphros Date: Sun, 19 Jun 2022 18:53:12 +0200 Subject: [PATCH 1115/1727] Update command comment to coincide with the default action --- src/database/admin.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 328c99c..6f418ea 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -240,8 +240,8 @@ enum AdminCommand { /// Deactivate a user /// - /// User will be removed from all rooms by default. - /// This behaviour can be overridden with the --no-leave-rooms flag. + /// User will not be removed from all rooms by default. + /// Use --leave-rooms to force the user to leave all rooms DeactivateUser { #[clap(short, long)] leave_rooms: bool, From 40eebbd9d86f3ac8c0f36924c1c38248cbbfb3af Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 22 Jun 2022 22:14:53 +0000 Subject: [PATCH 1116/1727] feat(ci): Split clippy into own fallible job For some reason, the clippy build does not work. This change allows the cargo:test job to still succeed and the pipeline to pass --- .gitlab-ci.yml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 380332b..eb7a96f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -250,17 +250,30 @@ docker:tags:dockerhub: test:cargo: extends: .test-shared-settings before_script: - - rustup component add clippy # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" artifacts: when: always reports: junit: report.xml + + +test:clippy: + extends: .test-shared-settings + allow_failure: true + before_script: + - rustup component add clippy + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi + script: + - rustc --version && cargo --version # Print version info for debugging + - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + artifacts: + when: always + reports: codequality: gl-code-quality-report.json test:format: From 49bd75b8563db181517eb97679ca8349177eb102 Mon Sep 17 00:00:00 2001 From: Jim Date: Thu, 23 Jun 2022 06:58:34 +0000 Subject: [PATCH 1117/1727] Lightning bolt optional --- conduit-example.toml | 3 +++ src/client_server/account.rs | 22 +++++++++------------- src/config.rs | 6 ++++++ src/database/admin.rs | 8 +++++++- src/database/globals.rs | 4 ++++ 5 files changed, 29 insertions(+), 14 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 362f7e7..5eed070 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -40,6 +40,9 @@ allow_registration = true allow_federation = true +# Enable the display name lightning bolt on registration. +enable_lightning_bolt = true + trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time diff --git a/src/client_server/account.rs b/src/client_server/account.rs index dc0782d..1484bf6 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,10 +1,7 @@ -use std::sync::Arc; - use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{ database::{admin::make_user_admin, DatabaseGuard}, - pdu::PduBuilder, - utils, Database, Error, Result, Ruma, + utils, Error, Result, Ruma, }; use ruma::{ api::client::{ @@ -15,16 +12,9 @@ use ruma::{ error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, - events::{ - room::{ - member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, - }, - GlobalAccountDataEventType, RoomEventType, - }, + events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType}, push, UserId, }; -use serde_json::value::to_raw_value; use tracing::{info, warn}; use register::RegistrationKind; @@ -181,7 +171,13 @@ pub async fn register_route( db.users.create(&user_id, password)?; // Default to pretty displayname - let displayname = format!("{} ⚡️", user_id.localpart()); + let mut displayname = user_id.localpart().to_owned(); + + // If enabled append lightning bolt to display name (default true) + if db.globals.enable_lightning_bolt() { + displayname.push_str(" ⚡️"); + } + db.users .set_displayname(&user_id, Some(displayname.clone()))?; diff --git a/src/config.rs b/src/config.rs index 29af883..7d81d0f 100644 --- a/src/config.rs +++ b/src/config.rs @@ -26,6 +26,8 @@ pub struct Config { pub database_path: String, #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, + #[serde(default = "true_fn")] + pub enable_lightning_bolt: bool, #[serde(default = "default_conduit_cache_capacity_modifier")] pub conduit_cache_capacity_modifier: f64, #[serde(default = "default_rocksdb_max_open_files")] @@ -135,6 +137,10 @@ impl fmt::Display for Config { &self.max_concurrent_requests.to_string(), ), ("Allow registration", &self.allow_registration.to_string()), + ( + "Enabled lightning bolt", + &self.enable_lightning_bolt.to_string(), + ), ("Allow encryption", &self.allow_encryption.to_string()), ("Allow federation", &self.allow_federation.to_string()), ("Allow room creation", &self.allow_room_creation.to_string()), diff --git a/src/database/admin.rs b/src/database/admin.rs index 6f418ea..edc7691 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -598,7 +598,13 @@ async fn process_admin_command( db.users.create(&user_id, Some(password.as_str()))?; // Default to pretty displayname - let displayname = format!("{} ⚡️", user_id.localpart()); + let mut displayname = user_id.localpart().to_owned(); + + // If enabled append lightning bolt to display name (default true) + if db.globals.enable_lightning_bolt() { + displayname.push_str(" ⚡️"); + } + db.users .set_displayname(&user_id, Some(displayname.clone()))?; diff --git a/src/database/globals.rs b/src/database/globals.rs index 7e09128..7d7b7fd 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -267,6 +267,10 @@ impl Globals { self.config.default_room_version.clone() } + pub fn enable_lightning_bolt(&self) -> bool { + self.config.enable_lightning_bolt + } + pub fn trusted_servers(&self) -> &[Box] { &self.config.trusted_servers } From 35fd732b047f6c7a848e2065c7e8310ca23041db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 23 Jun 2022 09:04:19 +0200 Subject: [PATCH 1118/1727] Bump version to 0.4 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d8d791f..c4700ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -406,7 +406,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.3.0-next" +version = "0.4.0" dependencies = [ "axum", "axum-server", diff --git a/Cargo.toml b/Cargo.toml index f150c4e..a8556a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.3.0-next" +version = "0.4.0" rust-version = "1.56" edition = "2021" From 4dc14e15803035001a045db289fd847aaf63a0e2 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:34 +0200 Subject: [PATCH 1119/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/directory.rs --- src/{database/rooms.rs => service/rooms/directory.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/directory.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/directory.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/directory.rs From 27e2f0d5458d886a86324af267628dae05bd288e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:34 +0200 Subject: [PATCH 1120/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/alias.rs --- src/{database/rooms.rs => service/rooms/alias.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/alias.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/alias.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/alias.rs From bd7b49b098caf1aaf42649f69a3f57120ea57834 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:35 +0200 Subject: [PATCH 1121/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/mod.rs --- src/{database/rooms.rs => service/rooms/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/mod.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/mod.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/mod.rs From baa8224cceab6944b1f2c44cd3f13d8dcd40e71e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:35 +0200 Subject: [PATCH 1122/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/metadata.rs --- src/{database/rooms.rs => service/rooms/metadata.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/metadata.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/metadata.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/metadata.rs From 249440115bfc81dd37aac4ea066f805bd451c4d6 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:35 +0200 Subject: [PATCH 1123/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/lazy_loading.rs --- src/{database/rooms.rs => service/rooms/lazy_loading.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/lazy_loading.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/lazy_loading.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/lazy_loading.rs From 2dbfbd45a25f59ae61b114e26af6059c388d9e5e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:36 +0200 Subject: [PATCH 1124/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/short.rs --- src/{database/rooms.rs => service/rooms/short.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/short.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/short.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/short.rs From 8dffdadfd3cde3aa928a3d658a50386f7d707cc0 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:36 +0200 Subject: [PATCH 1125/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/search.rs --- src/{database/rooms.rs => service/rooms/search.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/search.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/search.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/search.rs From 8ed79a00fd23a295794fc7fbc89501829c3a482f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:36 +0200 Subject: [PATCH 1126/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/pdu_metadata.rs --- src/{database/rooms.rs => service/rooms/pdu_metadata.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/pdu_metadata.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/pdu_metadata.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/pdu_metadata.rs From 54bf91b76e9095f8d9b416c39d7ad8236a5f9e6f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:36 +0200 Subject: [PATCH 1127/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/outlier.rs --- src/{database/rooms.rs => service/rooms/outlier.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/outlier.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/outlier.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/outlier.rs From d05b84d0f595da41330918f2ba3030dbc2f402b7 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:37 +0200 Subject: [PATCH 1128/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/state_compressor.rs --- src/{database/rooms.rs => service/rooms/state_compressor.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/state_compressor.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/state_compressor.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/state_compressor.rs From 751be39376c1c147cb8e6f7a0facf67814397908 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:37 +0200 Subject: [PATCH 1129/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/state_cache.rs --- src/{database/rooms.rs => service/rooms/state_cache.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/state_cache.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/state_cache.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/state_cache.rs From 64a022a4d2527170a675be3dd0f2964c6da65aec Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:37 +0200 Subject: [PATCH 1130/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/state.rs --- src/{database/rooms.rs => service/rooms/state.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/state.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/state.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/state.rs From e22f5fef1f6623842ab99617c1231276b52a1633 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:38 +0200 Subject: [PATCH 1131/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/user.rs --- src/{database/rooms.rs => service/rooms/user.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/user.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/user.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/user.rs From 7989c7cdda148bc804238522085489333e2c849e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:38 +0200 Subject: [PATCH 1132/1727] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/timeline.rs --- src/{database/rooms.rs => service/rooms/timeline.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/timeline.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/timeline.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/timeline.rs From 025b64befc5872aa7ffcc0ba348005e326d347d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 19 Jun 2022 22:56:14 +0200 Subject: [PATCH 1133/1727] refactor: renames and split room.rs --- src/{ => api}/appservice_server.rs | 0 src/{ => api}/client_server/account.rs | 0 src/{ => api}/client_server/alias.rs | 0 src/{ => api}/client_server/backup.rs | 0 src/{ => api}/client_server/capabilities.rs | 0 src/{ => api}/client_server/config.rs | 0 src/{ => api}/client_server/context.rs | 0 src/{ => api}/client_server/device.rs | 0 src/{ => api}/client_server/directory.rs | 0 src/{ => api}/client_server/filter.rs | 0 src/{ => api}/client_server/keys.rs | 0 src/{ => api}/client_server/media.rs | 0 src/{ => api}/client_server/membership.rs | 206 + src/{ => api}/client_server/message.rs | 0 src/{ => api}/client_server/mod.rs | 0 src/{ => api}/client_server/presence.rs | 0 src/{ => api}/client_server/profile.rs | 0 src/{ => api}/client_server/push.rs | 0 src/{ => api}/client_server/read_marker.rs | 0 src/{ => api}/client_server/redact.rs | 0 src/{ => api}/client_server/report.rs | 0 src/{ => api}/client_server/room.rs | 21 + src/{ => api}/client_server/search.rs | 0 src/{ => api}/client_server/session.rs | 0 src/{ => api}/client_server/state.rs | 0 src/{ => api}/client_server/sync.rs | 0 src/{ => api}/client_server/tag.rs | 0 src/{ => api}/client_server/thirdparty.rs | 0 src/{ => api}/client_server/to_device.rs | 0 src/{ => api}/client_server/typing.rs | 0 src/{ => api}/client_server/unversioned.rs | 0 src/{ => api}/client_server/user_directory.rs | 0 src/{ => api}/client_server/voip.rs | 0 src/{ => api}/ruma_wrapper/axum.rs | 0 .../ruma_wrapper/mod.rs} | 0 src/{ => api}/server_server.rs | 0 src/{config.rs => config/mod.rs} | 0 src/{database.rs => database/mod.rs} | 0 src/{database => service}/account_data.rs | 0 src/{database => service}/admin.rs | 0 src/{database => service}/appservice.rs | 0 src/{database => service}/globals.rs | 0 src/{database => service}/key_backups.rs | 0 src/{database => service}/media.rs | 0 src/{ => service}/pdu.rs | 0 src/{database => service}/pusher.rs | 0 src/service/rooms/alias.rs | 3437 ---------------- src/service/rooms/directory.rs | 3474 ---------------- src/{database => service}/rooms/edus.rs | 0 src/service/rooms/lazy_loading.rs | 3412 ---------------- src/service/rooms/metadata.rs | 3459 ---------------- src/service/rooms/mod.rs | 3287 ---------------- src/service/rooms/outlier.rs | 3483 ----------------- src/service/rooms/pdu_metadata.rs | 3472 ---------------- src/service/rooms/search.rs | 3453 ---------------- src/service/rooms/short.rs | 3331 +--------------- src/service/rooms/state.rs | 2994 -------------- src/service/rooms/state_cache.rs | 2786 ------------- src/service/rooms/state_compressor.rs | 3202 --------------- src/service/rooms/timeline.rs | 2673 +------------ src/service/rooms/user.rs | 3389 ---------------- src/{database => service}/sending.rs | 0 src/{database => service}/transaction_ids.rs | 0 src/{database => service}/uiaa.rs | 0 src/{database => service}/users.rs | 0 src/{ => utils}/error.rs | 0 src/{ => utils}/utils.rs | 0 67 files changed, 278 insertions(+), 45801 deletions(-) rename src/{ => api}/appservice_server.rs (100%) rename src/{ => api}/client_server/account.rs (100%) rename src/{ => api}/client_server/alias.rs (100%) rename src/{ => api}/client_server/backup.rs (100%) rename src/{ => api}/client_server/capabilities.rs (100%) rename src/{ => api}/client_server/config.rs (100%) rename src/{ => api}/client_server/context.rs (100%) rename src/{ => api}/client_server/device.rs (100%) rename src/{ => api}/client_server/directory.rs (100%) rename src/{ => api}/client_server/filter.rs (100%) rename src/{ => api}/client_server/keys.rs (100%) rename src/{ => api}/client_server/media.rs (100%) rename src/{ => api}/client_server/membership.rs (83%) rename src/{ => api}/client_server/message.rs (100%) rename src/{ => api}/client_server/mod.rs (100%) rename src/{ => api}/client_server/presence.rs (100%) rename src/{ => api}/client_server/profile.rs (100%) rename src/{ => api}/client_server/push.rs (100%) rename src/{ => api}/client_server/read_marker.rs (100%) rename src/{ => api}/client_server/redact.rs (100%) rename src/{ => api}/client_server/report.rs (100%) rename src/{ => api}/client_server/room.rs (96%) rename src/{ => api}/client_server/search.rs (100%) rename src/{ => api}/client_server/session.rs (100%) rename src/{ => api}/client_server/state.rs (100%) rename src/{ => api}/client_server/sync.rs (100%) rename src/{ => api}/client_server/tag.rs (100%) rename src/{ => api}/client_server/thirdparty.rs (100%) rename src/{ => api}/client_server/to_device.rs (100%) rename src/{ => api}/client_server/typing.rs (100%) rename src/{ => api}/client_server/unversioned.rs (100%) rename src/{ => api}/client_server/user_directory.rs (100%) rename src/{ => api}/client_server/voip.rs (100%) rename src/{ => api}/ruma_wrapper/axum.rs (100%) rename src/{ruma_wrapper.rs => api/ruma_wrapper/mod.rs} (100%) rename src/{ => api}/server_server.rs (100%) rename src/{config.rs => config/mod.rs} (100%) rename src/{database.rs => database/mod.rs} (100%) rename src/{database => service}/account_data.rs (100%) rename src/{database => service}/admin.rs (100%) rename src/{database => service}/appservice.rs (100%) rename src/{database => service}/globals.rs (100%) rename src/{database => service}/key_backups.rs (100%) rename src/{database => service}/media.rs (100%) rename src/{ => service}/pdu.rs (100%) rename src/{database => service}/pusher.rs (100%) rename src/{database => service}/rooms/edus.rs (100%) rename src/{database => service}/sending.rs (100%) rename src/{database => service}/transaction_ids.rs (100%) rename src/{database => service}/uiaa.rs (100%) rename src/{database => service}/users.rs (100%) rename src/{ => utils}/error.rs (100%) rename src/{ => utils}/utils.rs (100%) diff --git a/src/appservice_server.rs b/src/api/appservice_server.rs similarity index 100% rename from src/appservice_server.rs rename to src/api/appservice_server.rs diff --git a/src/client_server/account.rs b/src/api/client_server/account.rs similarity index 100% rename from src/client_server/account.rs rename to src/api/client_server/account.rs diff --git a/src/client_server/alias.rs b/src/api/client_server/alias.rs similarity index 100% rename from src/client_server/alias.rs rename to src/api/client_server/alias.rs diff --git a/src/client_server/backup.rs b/src/api/client_server/backup.rs similarity index 100% rename from src/client_server/backup.rs rename to src/api/client_server/backup.rs diff --git a/src/client_server/capabilities.rs b/src/api/client_server/capabilities.rs similarity index 100% rename from src/client_server/capabilities.rs rename to src/api/client_server/capabilities.rs diff --git a/src/client_server/config.rs b/src/api/client_server/config.rs similarity index 100% rename from src/client_server/config.rs rename to src/api/client_server/config.rs diff --git a/src/client_server/context.rs b/src/api/client_server/context.rs similarity index 100% rename from src/client_server/context.rs rename to src/api/client_server/context.rs diff --git a/src/client_server/device.rs b/src/api/client_server/device.rs similarity index 100% rename from src/client_server/device.rs rename to src/api/client_server/device.rs diff --git a/src/client_server/directory.rs b/src/api/client_server/directory.rs similarity index 100% rename from src/client_server/directory.rs rename to src/api/client_server/directory.rs diff --git a/src/client_server/filter.rs b/src/api/client_server/filter.rs similarity index 100% rename from src/client_server/filter.rs rename to src/api/client_server/filter.rs diff --git a/src/client_server/keys.rs b/src/api/client_server/keys.rs similarity index 100% rename from src/client_server/keys.rs rename to src/api/client_server/keys.rs diff --git a/src/client_server/media.rs b/src/api/client_server/media.rs similarity index 100% rename from src/client_server/media.rs rename to src/api/client_server/media.rs diff --git a/src/client_server/membership.rs b/src/api/client_server/membership.rs similarity index 83% rename from src/client_server/membership.rs rename to src/api/client_server/membership.rs index a1b616b..4dda11a 100644 --- a/src/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -1080,3 +1080,209 @@ pub(crate) async fn invite_helper<'a>( Ok(()) } + + // Make a user leave all their joined rooms + #[tracing::instrument(skip(self, db))] + pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { + let all_rooms = db + .rooms + .rooms_joined(user_id) + .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .collect::>(); + + for room_id in all_rooms { + let room_id = match room_id { + Ok(room_id) => room_id, + Err(_) => continue, + }; + + let _ = self.leave_room(user_id, &room_id, db).await; + } + + Ok(()) + } + + #[tracing::instrument(skip(self, db))] + pub async fn leave_room( + &self, + user_id: &UserId, + room_id: &RoomId, + db: &Database, + ) -> Result<()> { + // Ask a remote server if we don't have this room + if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { + if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { + warn!("Failed to leave room {} remotely: {}", user_id, e); + // Don't tell the client about this error + } + + let last_state = self + .invite_state(user_id, room_id)? + .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; + + // We always drop the invite, we can't rely on other servers + self.update_membership( + room_id, + user_id, + MembershipState::Leave, + user_id, + last_state, + db, + true, + )?; + } else { + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let mut event: RoomMemberEventContent = serde_json::from_str( + self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot leave a room you are not a member of.", + ))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = MembershipState::Leave; + + self.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + room_id, + db, + &state_lock, + )?; + } + + Ok(()) + } + + #[tracing::instrument(skip(self, db))] + async fn remote_leave_room( + &self, + user_id: &UserId, + room_id: &RoomId, + db: &Database, + ) -> Result<()> { + let mut make_leave_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in leaving.", + )); + + let invite_state = db + .rooms + .invite_state(user_id, room_id)? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "User is not invited.", + ))?; + + let servers: HashSet<_> = invite_state + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect(); + + for remote_server in servers { + let make_leave_response = db + .sending + .send_federation_request( + &db.globals, + &remote_server, + federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, + ) + .await; + + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + + if make_leave_response_and_server.is_ok() { + break; + } + } + + let (make_leave_response, remote_server) = make_leave_response_and_server?; + + let room_version_id = match make_leave_response.room_version { + Some(version) if self.is_supported_version(&db, &version) => version, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + + let mut leave_event_stub = + serde_json::from_str::(make_leave_response.event.get()).map_err( + |_| Error::BadServerResponse("Invalid make_leave event json received from server."), + )?; + + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + leave_event_stub.remove("event_id"); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut leave_event_stub, + &room_version_id, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + leave_event_stub.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; + + db.sending + .send_federation_request( + &db.globals, + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id, + event_id: &event_id, + pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + }, + ) + .await?; + + Ok(()) + } + diff --git a/src/client_server/message.rs b/src/api/client_server/message.rs similarity index 100% rename from src/client_server/message.rs rename to src/api/client_server/message.rs diff --git a/src/client_server/mod.rs b/src/api/client_server/mod.rs similarity index 100% rename from src/client_server/mod.rs rename to src/api/client_server/mod.rs diff --git a/src/client_server/presence.rs b/src/api/client_server/presence.rs similarity index 100% rename from src/client_server/presence.rs rename to src/api/client_server/presence.rs diff --git a/src/client_server/profile.rs b/src/api/client_server/profile.rs similarity index 100% rename from src/client_server/profile.rs rename to src/api/client_server/profile.rs diff --git a/src/client_server/push.rs b/src/api/client_server/push.rs similarity index 100% rename from src/client_server/push.rs rename to src/api/client_server/push.rs diff --git a/src/client_server/read_marker.rs b/src/api/client_server/read_marker.rs similarity index 100% rename from src/client_server/read_marker.rs rename to src/api/client_server/read_marker.rs diff --git a/src/client_server/redact.rs b/src/api/client_server/redact.rs similarity index 100% rename from src/client_server/redact.rs rename to src/api/client_server/redact.rs diff --git a/src/client_server/report.rs b/src/api/client_server/report.rs similarity index 100% rename from src/client_server/report.rs rename to src/api/client_server/report.rs diff --git a/src/client_server/room.rs b/src/api/client_server/room.rs similarity index 96% rename from src/client_server/room.rs rename to src/api/client_server/room.rs index a5b7970..5ae7224 100644 --- a/src/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -707,3 +707,24 @@ pub async fn upgrade_room_route( // Return the replacement room id Ok(upgrade_room::v3::Response { replacement_room }) } + + /// Returns the room's version. + #[tracing::instrument(skip(self))] + pub fn get_room_version(&self, room_id: &RoomId) -> Result { + let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + let room_version = create_event_content + .map(|create_event| create_event.room_version) + .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; + Ok(room_version) + } + diff --git a/src/client_server/search.rs b/src/api/client_server/search.rs similarity index 100% rename from src/client_server/search.rs rename to src/api/client_server/search.rs diff --git a/src/client_server/session.rs b/src/api/client_server/session.rs similarity index 100% rename from src/client_server/session.rs rename to src/api/client_server/session.rs diff --git a/src/client_server/state.rs b/src/api/client_server/state.rs similarity index 100% rename from src/client_server/state.rs rename to src/api/client_server/state.rs diff --git a/src/client_server/sync.rs b/src/api/client_server/sync.rs similarity index 100% rename from src/client_server/sync.rs rename to src/api/client_server/sync.rs diff --git a/src/client_server/tag.rs b/src/api/client_server/tag.rs similarity index 100% rename from src/client_server/tag.rs rename to src/api/client_server/tag.rs diff --git a/src/client_server/thirdparty.rs b/src/api/client_server/thirdparty.rs similarity index 100% rename from src/client_server/thirdparty.rs rename to src/api/client_server/thirdparty.rs diff --git a/src/client_server/to_device.rs b/src/api/client_server/to_device.rs similarity index 100% rename from src/client_server/to_device.rs rename to src/api/client_server/to_device.rs diff --git a/src/client_server/typing.rs b/src/api/client_server/typing.rs similarity index 100% rename from src/client_server/typing.rs rename to src/api/client_server/typing.rs diff --git a/src/client_server/unversioned.rs b/src/api/client_server/unversioned.rs similarity index 100% rename from src/client_server/unversioned.rs rename to src/api/client_server/unversioned.rs diff --git a/src/client_server/user_directory.rs b/src/api/client_server/user_directory.rs similarity index 100% rename from src/client_server/user_directory.rs rename to src/api/client_server/user_directory.rs diff --git a/src/client_server/voip.rs b/src/api/client_server/voip.rs similarity index 100% rename from src/client_server/voip.rs rename to src/api/client_server/voip.rs diff --git a/src/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs similarity index 100% rename from src/ruma_wrapper/axum.rs rename to src/api/ruma_wrapper/axum.rs diff --git a/src/ruma_wrapper.rs b/src/api/ruma_wrapper/mod.rs similarity index 100% rename from src/ruma_wrapper.rs rename to src/api/ruma_wrapper/mod.rs diff --git a/src/server_server.rs b/src/api/server_server.rs similarity index 100% rename from src/server_server.rs rename to src/api/server_server.rs diff --git a/src/config.rs b/src/config/mod.rs similarity index 100% rename from src/config.rs rename to src/config/mod.rs diff --git a/src/database.rs b/src/database/mod.rs similarity index 100% rename from src/database.rs rename to src/database/mod.rs diff --git a/src/database/account_data.rs b/src/service/account_data.rs similarity index 100% rename from src/database/account_data.rs rename to src/service/account_data.rs diff --git a/src/database/admin.rs b/src/service/admin.rs similarity index 100% rename from src/database/admin.rs rename to src/service/admin.rs diff --git a/src/database/appservice.rs b/src/service/appservice.rs similarity index 100% rename from src/database/appservice.rs rename to src/service/appservice.rs diff --git a/src/database/globals.rs b/src/service/globals.rs similarity index 100% rename from src/database/globals.rs rename to src/service/globals.rs diff --git a/src/database/key_backups.rs b/src/service/key_backups.rs similarity index 100% rename from src/database/key_backups.rs rename to src/service/key_backups.rs diff --git a/src/database/media.rs b/src/service/media.rs similarity index 100% rename from src/database/media.rs rename to src/service/media.rs diff --git a/src/pdu.rs b/src/service/pdu.rs similarity index 100% rename from src/pdu.rs rename to src/service/pdu.rs diff --git a/src/database/pusher.rs b/src/service/pusher.rs similarity index 100% rename from src/database/pusher.rs rename to src/service/pusher.rs diff --git a/src/service/rooms/alias.rs b/src/service/rooms/alias.rs index 4ad815e..393ad67 100644 --- a/src/service/rooms/alias.rs +++ b/src/service/rooms/alias.rs @@ -1,2795 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } #[tracing::instrument(skip(self, globals))] pub fn set_alias( @@ -2856,648 +64,3 @@ impl Rooms { }) } - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/directory.rs b/src/service/rooms/directory.rs index 4ad815e..8be7bd5 100644 --- a/src/service/rooms/directory.rs +++ b/src/service/rooms/directory.rs @@ -1,2860 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } #[tracing::instrument(skip(self))] pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { @@ -2872,18 +15,6 @@ impl Rooms { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { @@ -2896,608 +27,3 @@ impl Rooms { }) } - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/database/rooms/edus.rs b/src/service/rooms/edus.rs similarity index 100% rename from src/database/rooms/edus.rs rename to src/service/rooms/edus.rs diff --git a/src/service/rooms/lazy_loading.rs b/src/service/rooms/lazy_loading.rs index 4ad815e..a402702 100644 --- a/src/service/rooms/lazy_loading.rs +++ b/src/service/rooms/lazy_loading.rs @@ -1,3395 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } #[tracing::instrument(skip(self))] pub fn lazy_load_was_sent_before( @@ -3481,23 +89,3 @@ impl Rooms { Ok(()) } - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/metadata.rs b/src/service/rooms/metadata.rs index 4ad815e..5d70345 100644 --- a/src/service/rooms/metadata.rs +++ b/src/service/rooms/metadata.rs @@ -1,331 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn exists(&self, room_id: &RoomId) -> Result { @@ -343,430 +15,6 @@ impl Rooms { .is_some()) } - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { self.roomid_shortroomid .get(room_id.as_bytes())? @@ -777,43 +25,6 @@ impl Rooms { .transpose() } - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - pub fn get_or_create_shortroomid( &self, room_id: &RoomId, @@ -831,2673 +42,3 @@ impl Rooms { }) } - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 4ad815e..89598af 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -141,135 +141,6 @@ impl Rooms { db.globals.supported_room_versions().contains(room_version) } - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - /// This fetches auth events from the current state. #[tracing::instrument(skip(self))] pub fn get_auth_events( @@ -326,2552 +197,6 @@ impl Rooms { hash.as_ref().into() } - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - #[tracing::instrument(skip(self))] pub fn iter_ids(&self) -> impl Iterator>> + '_ { self.roomid_shortroomid.iter().map(|(bytes, _)| { @@ -2884,620 +209,8 @@ impl Rooms { }) } - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - pub fn is_disabled(&self, room_id: &RoomId) -> Result { Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) } - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } } diff --git a/src/service/rooms/outlier.rs b/src/service/rooms/outlier.rs index 4ad815e..afb0a14 100644 --- a/src/service/rooms/outlier.rs +++ b/src/service/rooms/outlier.rs @@ -1,1228 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -1243,2261 +18,3 @@ impl Rooms { ) } - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/pdu_metadata.rs b/src/service/rooms/pdu_metadata.rs index 4ad815e..f8ffcee 100644 --- a/src/service/rooms/pdu_metadata.rs +++ b/src/service/rooms/pdu_metadata.rs @@ -1,1183 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { @@ -1190,32 +10,6 @@ impl Rooms { Ok(()) } - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - #[tracing::instrument(skip(self))] pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { let mut key = room_id.as_bytes().to_vec(); @@ -1223,26 +17,6 @@ impl Rooms { Ok(self.referencedevents.get(&key)?.is_some()) } - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - #[tracing::instrument(skip(self))] pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { self.softfailedeventids.insert(event_id.as_bytes(), &[]) @@ -1255,2249 +29,3 @@ impl Rooms { .map(|o| o.is_some()) } - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/search.rs b/src/service/rooms/search.rs index 4ad815e..ce05505 100644 --- a/src/service/rooms/search.rs +++ b/src/service/rooms/search.rs @@ -1,2900 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( @@ -2945,559 +48,3 @@ impl Rooms { })) } - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/short.rs b/src/service/rooms/short.rs index 4ad815e..63e8b71 100644 --- a/src/service/rooms/short.rs +++ b/src/service/rooms/short.rs @@ -1,741 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } pub fn get_or_create_shorteventid( &self, @@ -767,16 +29,6 @@ impl Rooms { Ok(short) } - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - pub fn get_shortstatekey( &self, event_type: &StateEventType, @@ -814,23 +66,6 @@ impl Rooms { Ok(short) } - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - pub fn get_or_create_shortstatekey( &self, event_type: &StateEventType, @@ -940,2564 +175,24 @@ impl Rooms { Ok(result) } - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash( &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, + state_hash: &StateHashId, globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, + ) -> Result<(u64, bool)> { + Ok(match self.statehash_shortstatehash.get(state_hash)? { + Some(shortstatehash) => ( + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); + None => { + let shortstatehash = globals.next_count()?; + self.statehash_shortstatehash + .insert(state_hash, &shortstatehash.to_be_bytes())?; + (shortstatehash, false) } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) }) } - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/state.rs b/src/service/rooms/state.rs index 4ad815e..4c75467 100644 --- a/src/service/rooms/state.rs +++ b/src/service/rooms/state.rs @@ -1,145 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. @@ -270,101 +128,6 @@ impl Rooms { }) } - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - /// Force the creation of a new StateHash and insert it into the db. /// /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. @@ -474,472 +237,6 @@ impl Rooms { Ok(()) } - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - /// Returns the full room state. #[tracing::instrument(skip(self))] pub async fn room_state_full( @@ -983,185 +280,6 @@ impl Rooms { } } - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - /// Returns the leaf pdus of a room. #[tracing::instrument(skip(self))] pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { @@ -1179,17 +297,6 @@ impl Rooms { .collect() } - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - /// Replace the leaves of a room. /// /// The provided `event_ids` become the new leaves, this allows a room to have multiple @@ -1216,376 +323,6 @@ impl Rooms { Ok(()) } - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - /// Generates a new StateHash and associates it with the incoming event. /// /// This adds all current state events (not including the incoming event) @@ -1770,1734 +507,3 @@ impl Rooms { Ok(()) } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/state_cache.rs b/src/service/rooms/state_cache.rs index 4ad815e..e7f457e 100644 --- a/src/service/rooms/state_cache.rs +++ b/src/service/rooms/state_cache.rs @@ -1,2220 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } /// Update current membership data. #[tracing::instrument(skip(self, last_state, db))] @@ -2569,211 +352,6 @@ impl Rooms { } } - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - /// Makes a user forget a room. #[tracing::instrument(skip(self))] pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { @@ -2791,198 +369,6 @@ impl Rooms { Ok(()) } - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - /// Returns an iterator of all servers participating in this room. #[tracing::instrument(skip(self))] pub fn room_servers<'a>( @@ -3166,10 +552,6 @@ impl Rooms { .transpose() } - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self))] pub fn rooms_joined<'a>( @@ -3333,171 +715,3 @@ impl Rooms { Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/state_compressor.rs b/src/service/rooms/state_compressor.rs index 4ad815e..a56c0f5 100644 --- a/src/service/rooms/state_compressor.rs +++ b/src/service/rooms/state_compressor.rs @@ -1,478 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] @@ -716,2623 +241,6 @@ impl Rooms { Ok(()) } - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - #[tracing::instrument(skip(self))] pub fn get_auth_chain_from_cache<'a>( &'a self, @@ -3391,113 +299,3 @@ impl Rooms { Ok(()) } - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/timeline.rs b/src/service/rooms/timeline.rs index 4ad815e..fd93344 100644 --- a/src/service/rooms/timeline.rs +++ b/src/service/rooms/timeline.rs @@ -1,347 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } /// Checks if a room exists. #[tracing::instrument(skip(self))] @@ -365,638 +21,37 @@ impl Rooms { .transpose() } - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache + pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + match self + .lasttimelinecount_cache .lock() .unwrap() - .get_mut(&shortstatehash) + .entry(room_id.to_owned()) { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); + hash_map::Entry::Vacant(v) => { + if let Some(last_count) = self + .pdus_until(&sender_user, &room_id, u64::MAX)? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .map(|(pduid, _)| self.pdu_count(&pduid)) + .next() + { + Ok(*v.insert(last_count?)) + } else { + Ok(0) } } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) + hash_map::Entry::Occupied(o) => Ok(*o.get()), } } - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - + // TODO Is this the same as the function above? #[tracing::instrument(skip(self))] pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { let prefix = self @@ -1017,6 +72,16 @@ impl Rooms { .map(|op| op.unwrap_or_default()) } + + + /// Returns the `count` of this pdu's id. + pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map(|pdu_id| self.pdu_count(&pdu_id)) + .transpose() + } + /// Returns the json of a pdu. pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid @@ -1145,6 +210,12 @@ impl Rooms { }) } + /// Returns the `count` of this pdu's id. + pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { + utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) + } + /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { @@ -1162,99 +233,6 @@ impl Rooms { } } - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - /// Creates a new persisted data unit and adds it to a room. /// /// By this point the incoming event should be fully authenticated, no auth happens @@ -1512,296 +490,6 @@ impl Rooms { Ok(pdu_id) } - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - /// Creates a new persisted data unit and adds it to a room. #[tracing::instrument(skip(self, db, _mutex_lock))] pub fn build_and_append_pdu( @@ -2216,1288 +904,3 @@ impl Rooms { Ok(()) } - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/user.rs b/src/service/rooms/user.rs index 4ad815e..976ab5b 100644 --- a/src/service/rooms/user.rs +++ b/src/service/rooms/user.rs @@ -1,1546 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } #[tracing::instrument(skip(self))] pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -1586,191 +43,6 @@ impl Rooms { .unwrap_or(Ok(0)) } - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - pub fn associate_token_shortstatehash( &self, room_id: &RoomId, @@ -1802,1149 +74,6 @@ impl Rooms { .transpose() } - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - #[tracing::instrument(skip(self))] pub fn get_shared_rooms<'a>( &'a self, @@ -2983,521 +112,3 @@ impl Rooms { })) } - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/database/sending.rs b/src/service/sending.rs similarity index 100% rename from src/database/sending.rs rename to src/service/sending.rs diff --git a/src/database/transaction_ids.rs b/src/service/transaction_ids.rs similarity index 100% rename from src/database/transaction_ids.rs rename to src/service/transaction_ids.rs diff --git a/src/database/uiaa.rs b/src/service/uiaa.rs similarity index 100% rename from src/database/uiaa.rs rename to src/service/uiaa.rs diff --git a/src/database/users.rs b/src/service/users.rs similarity index 100% rename from src/database/users.rs rename to src/service/users.rs diff --git a/src/error.rs b/src/utils/error.rs similarity index 100% rename from src/error.rs rename to src/utils/error.rs diff --git a/src/utils.rs b/src/utils/utils.rs similarity index 100% rename from src/utils.rs rename to src/utils/utils.rs From d0cbe46ff079998cf419a7f657da56f47432ffba Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:07:33 +0200 Subject: [PATCH 1134/1727] refactor: prepare splitting src/service/rooms/state.rs to src/service/rooms/state/mod.rs --- src/service/rooms/{state.rs => state/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/{state.rs => state/mod.rs} (100%) diff --git a/src/service/rooms/state.rs b/src/service/rooms/state/mod.rs similarity index 100% rename from src/service/rooms/state.rs rename to src/service/rooms/state/mod.rs From 9efd9f06c6c274ad56bc70c9e176e7f59f74272c Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:07:33 +0200 Subject: [PATCH 1135/1727] refactor: prepare splitting src/service/rooms/state.rs to src/service/rooms/state/data.rs --- src/service/rooms/{state.rs => state/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/{state.rs => state/data.rs} (100%) diff --git a/src/service/rooms/state.rs b/src/service/rooms/state/data.rs similarity index 100% rename from src/service/rooms/state.rs rename to src/service/rooms/state/data.rs From 7d2b22f58de3059e5244bf0cc657d7703ff6f245 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:08:33 +0200 Subject: [PATCH 1136/1727] refactor: prepare splitting src/service/rooms/state.rs to src/service/rooms/state_accessor/mod.rs --- src/service/rooms/{state.rs => state_accessor/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/{state.rs => state_accessor/mod.rs} (100%) diff --git a/src/service/rooms/state.rs b/src/service/rooms/state_accessor/mod.rs similarity index 100% rename from src/service/rooms/state.rs rename to src/service/rooms/state_accessor/mod.rs From e1d8c03e474883c6ffce09a1713241f5d5cb828f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:09:15 +0200 Subject: [PATCH 1137/1727] refactor: prepare splitting src/api/server_server.rs to src/service/rooms/event_handler/mod.rs --- src/{api/server_server.rs => service/rooms/event_handler/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{api/server_server.rs => service/rooms/event_handler/mod.rs} (100%) diff --git a/src/api/server_server.rs b/src/service/rooms/event_handler/mod.rs similarity index 100% rename from src/api/server_server.rs rename to src/service/rooms/event_handler/mod.rs From 57c92f80445ce908a47f6cc9745f7048b1f020cb Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:09:22 +0200 Subject: [PATCH 1138/1727] refactor: restore src/api/server_server.rs --- src/api/server_server.rs | 3644 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 3644 insertions(+) create mode 100644 src/api/server_server.rs diff --git a/src/api/server_server.rs b/src/api/server_server.rs new file mode 100644 index 0000000..6fa83e4 --- /dev/null +++ b/src/api/server_server.rs @@ -0,0 +1,3644 @@ +use crate::{ + client_server::{self, claim_keys_helper, get_keys_helper}, + database::{rooms::CompressedStateEvent, DatabaseGuard}, + pdu::EventHash, + utils, Database, Error, PduEvent, Result, Ruma, +}; +use axum::{response::IntoResponse, Json}; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use get_profile_information::v1::ProfileField; +use http::header::{HeaderValue, AUTHORIZATION}; +use regex::Regex; +use ruma::{ + api::{ + client::error::{Error as RumaError, ErrorKind}, + federation::{ + authorization::get_event_authorization, + device::get_devices::{self, v1::UserDevice}, + directory::{get_public_rooms, get_public_rooms_filtered}, + discovery::{ + get_remote_server_keys, get_remote_server_keys_batch, + get_remote_server_keys_batch::v2::QueryCriteria, get_server_keys, + get_server_version, ServerSigningKeys, VerifyKey, + }, + event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + keys::{claim_keys, get_keys}, + membership::{ + create_invite, + create_join_event::{self, RoomState}, + prepare_join_event, + }, + query::{get_profile_information, get_room_information}, + transactions::{ + edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, + send_transaction_message, + }, + }, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, + SendAccessToken, + }, + directory::{IncomingFilter, IncomingRoomNetwork}, + events::{ + receipt::{ReceiptEvent, ReceiptEventContent}, + room::{ + create::RoomCreateEventContent, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + server_acl::RoomServerAclEventContent, + }, + RoomEventType, StateEventType, + }, + int, + receipt::ReceiptType, + serde::{Base64, JsonObject, Raw}, + signatures::{CanonicalJsonObject, CanonicalJsonValue}, + state_res::{self, RoomVersion, StateMap}, + to_device::DeviceIdOrAllDevices, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + ServerSigningKeyId, +}; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use std::{ + collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + fmt::Debug, + future::Future, + mem, + net::{IpAddr, SocketAddr}, + ops::Deref, + pin::Pin, + sync::{Arc, RwLock, RwLockWriteGuard}, + time::{Duration, Instant, SystemTime}, +}; +use tokio::sync::{MutexGuard, Semaphore}; +use tracing::{debug, error, info, trace, warn}; + +/// Wraps either an literal IP address plus port, or a hostname plus complement +/// (colon-plus-port if it was specified). +/// +/// Note: A `FedDest::Named` might contain an IP address in string form if there +/// was no port specified to construct a SocketAddr with. +/// +/// # Examples: +/// ```rust +/// # use conduit::server_server::FedDest; +/// # fn main() -> Result<(), std::net::AddrParseError> { +/// FedDest::Literal("198.51.100.3:8448".parse()?); +/// FedDest::Literal("[2001:db8::4:5]:443".parse()?); +/// FedDest::Named("matrix.example.org".to_owned(), "".to_owned()); +/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); +/// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); +/// # Ok(()) +/// # } +/// ``` +#[derive(Clone, Debug, PartialEq)] +pub enum FedDest { + Literal(SocketAddr), + Named(String, String), +} + +impl FedDest { + fn into_https_string(self) -> String { + match self { + Self::Literal(addr) => format!("https://{}", addr), + Self::Named(host, port) => format!("https://{}{}", host, port), + } + } + + fn into_uri_string(self) -> String { + match self { + Self::Literal(addr) => addr.to_string(), + Self::Named(host, ref port) => host + port, + } + } + + fn hostname(&self) -> String { + match &self { + Self::Literal(addr) => addr.ip().to_string(), + Self::Named(host, _) => host.clone(), + } + } + + fn port(&self) -> Option { + match &self { + Self::Literal(addr) => Some(addr.port()), + Self::Named(_, port) => port[1..].parse().ok(), + } + } +} + +#[tracing::instrument(skip(globals, request))] +pub(crate) async fn send_request( + globals: &crate::database::globals::Globals, + destination: &ServerName, + request: T, +) -> Result +where + T: Debug, +{ + if !globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut write_destination_to_cache = false; + + let cached_result = globals + .actual_destination_cache + .read() + .unwrap() + .get(destination) + .cloned(); + + let (actual_destination, host) = if let Some(result) = cached_result { + result + } else { + write_destination_to_cache = true; + + let result = find_actual_destination(globals, destination).await; + + (result.0, result.1.into_uri_string()) + }; + + let actual_destination_str = actual_destination.clone().into_https_string(); + + let mut http_request = request + .try_into_http_request::>( + &actual_destination_str, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) + .map_err(|e| { + warn!( + "Failed to find destination {}: {}", + actual_destination_str, e + ); + Error::BadServerResponse("Invalid destination") + })?; + + let mut request_map = serde_json::Map::new(); + + if !http_request.body().is_empty() { + request_map.insert( + "content".to_owned(), + serde_json::from_slice(http_request.body()) + .expect("body is valid json, we just created it"), + ); + }; + + request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); + request_map.insert( + "uri".to_owned(), + http_request + .uri() + .path_and_query() + .expect("all requests have a path") + .to_string() + .into(), + ); + request_map.insert("origin".to_owned(), globals.server_name().as_str().into()); + request_map.insert("destination".to_owned(), destination.as_str().into()); + + let mut request_json = + serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); + + ruma::signatures::sign_json( + globals.server_name().as_str(), + globals.keypair(), + &mut request_json, + ) + .expect("our request json is what ruma expects"); + + let request_json: serde_json::Map = + serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); + + let signatures = request_json["signatures"] + .as_object() + .unwrap() + .values() + .map(|v| { + v.as_object() + .unwrap() + .iter() + .map(|(k, v)| (k, v.as_str().unwrap())) + }); + + for signature_server in signatures { + for s in signature_server { + http_request.headers_mut().insert( + AUTHORIZATION, + HeaderValue::from_str(&format!( + "X-Matrix origin={},key=\"{}\",sig=\"{}\"", + globals.server_name(), + s.0, + s.1 + )) + .unwrap(), + ); + } + } + + let reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + let url = reqwest_request.url().clone(); + + let response = globals.federation_client().execute(reqwest_request).await; + + match response { + Ok(mut response) => { + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); + + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if status != 200 { + warn!( + "{} {}: {}", + url, + status, + String::from_utf8_lossy(&body) + .lines() + .collect::>() + .join(" ") + ); + } + + let http_response = http_response_builder + .body(body) + .expect("reqwest body is valid http body"); + + if status == 200 { + let response = T::IncomingResponse::try_from_http_response(http_response); + if response.is_ok() && write_destination_to_cache { + globals.actual_destination_cache.write().unwrap().insert( + Box::::from(destination), + (actual_destination, host), + ); + } + + response.map_err(|e| { + warn!( + "Invalid 200 response from {} on: {} {}", + &destination, url, e + ); + Error::BadServerResponse("Server returned bad 200 response.") + }) + } else { + Err(Error::FederationError( + destination.to_owned(), + RumaError::try_from_http_response(http_response).map_err(|e| { + warn!( + "Invalid {} response from {} on: {} {}", + status, &destination, url, e + ); + Error::BadServerResponse("Server returned bad error response.") + })?, + )) + } + } + Err(e) => Err(e.into()), + } +} + +fn get_ip_with_port(destination_str: &str) -> Option { + if let Ok(destination) = destination_str.parse::() { + Some(FedDest::Literal(destination)) + } else if let Ok(ip_addr) = destination_str.parse::() { + Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) + } else { + None + } +} + +fn add_port_to_hostname(destination_str: &str) -> FedDest { + let (host, port) = match destination_str.find(':') { + None => (destination_str, ":8448"), + Some(pos) => destination_str.split_at(pos), + }; + FedDest::Named(host.to_owned(), port.to_owned()) +} + +/// Returns: actual_destination, host header +/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names +/// Numbers in comments below refer to bullet points in linked section of specification +#[tracing::instrument(skip(globals))] +async fn find_actual_destination( + globals: &crate::database::globals::Globals, + destination: &'_ ServerName, +) -> (FedDest, FedDest) { + let destination_str = destination.as_str().to_owned(); + let mut hostname = destination_str.clone(); + let actual_destination = match get_ip_with_port(&destination_str) { + Some(host_port) => { + // 1: IP literal with provided or default port + host_port + } + None => { + if let Some(pos) = destination_str.find(':') { + // 2: Hostname with included port + let (host, port) = destination_str.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + match request_well_known(globals, destination.as_str()).await { + // 3: A .well-known file is available + Some(delegated_hostname) => { + hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); + match get_ip_with_port(&delegated_hostname) { + Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file + None => { + if let Some(pos) = delegated_hostname.find(':') { + // 3.2: Hostname with port in .well-known file + let (host, port) = delegated_hostname.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + // Delegated hostname has no port in this branch + if let Some(hostname_override) = + query_srv_record(globals, &delegated_hostname).await + { + // 3.3: SRV lookup successful + let force_port = hostname_override.port(); + + if let Ok(override_ip) = globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + globals.tls_name_override.write().unwrap().insert( + delegated_hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); + } else { + warn!("Using SRV record, but could not resolve to IP"); + } + + if let Some(port) = force_port { + FedDest::Named(delegated_hostname, format!(":{}", port)) + } else { + add_port_to_hostname(&delegated_hostname) + } + } else { + // 3.4: No SRV records, just use the hostname from .well-known + add_port_to_hostname(&delegated_hostname) + } + } + } + } + } + // 4: No .well-known or an error occured + None => { + match query_srv_record(globals, &destination_str).await { + // 4: SRV record found + Some(hostname_override) => { + let force_port = hostname_override.port(); + + if let Ok(override_ip) = globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + globals.tls_name_override.write().unwrap().insert( + hostname.clone(), + (override_ip.iter().collect(), force_port.unwrap_or(8448)), + ); + } else { + warn!("Using SRV record, but could not resolve to IP"); + } + + if let Some(port) = force_port { + FedDest::Named(hostname.clone(), format!(":{}", port)) + } else { + add_port_to_hostname(&hostname) + } + } + // 5: No SRV record found + None => add_port_to_hostname(&destination_str), + } + } + } + } + } + }; + + // Can't use get_ip_with_port here because we don't want to add a port + // to an IP address if it wasn't specified + let hostname = if let Ok(addr) = hostname.parse::() { + FedDest::Literal(addr) + } else if let Ok(addr) = hostname.parse::() { + FedDest::Named(addr.to_string(), ":8448".to_owned()) + } else if let Some(pos) = hostname.find(':') { + let (host, port) = hostname.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + FedDest::Named(hostname, ":8448".to_owned()) + }; + (actual_destination, hostname) +} + +#[tracing::instrument(skip(globals))] +async fn query_srv_record( + globals: &crate::database::globals::Globals, + hostname: &'_ str, +) -> Option { + if let Ok(Some(host_port)) = globals + .dns_resolver() + .srv_lookup(format!("_matrix._tcp.{}", hostname)) + .await + .map(|srv| { + srv.iter().next().map(|result| { + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()), + ) + }) + }) + { + Some(host_port) + } else { + None + } +} + +#[tracing::instrument(skip(globals))] +async fn request_well_known( + globals: &crate::database::globals::Globals, + destination: &str, +) -> Option { + let body: serde_json::Value = serde_json::from_str( + &globals + .default_client() + .get(&format!( + "https://{}/.well-known/matrix/server", + destination + )) + .send() + .await + .ok()? + .text() + .await + .ok()?, + ) + .ok()?; + Some(body.get("m.server")?.as_str()?.to_owned()) +} + +/// # `GET /_matrix/federation/v1/version` +/// +/// Get version information on this server. +pub async fn get_server_version_route( + db: DatabaseGuard, + _body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + Ok(get_server_version::v1::Response { + server: Some(get_server_version::v1::Server { + name: Some("Conduit".to_owned()), + version: Some(env!("CARGO_PKG_VERSION").to_owned()), + }), + }) +} + +/// # `GET /_matrix/key/v2/server` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by this will be valid +/// forever. +// Response type for this endpoint is Json because we need to calculate a signature for the response +pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); + verify_keys.insert( + format!("ed25519:{}", db.globals.keypair().version()) + .try_into() + .expect("found invalid server signing keys in DB"), + VerifyKey { + key: Base64::new(db.globals.keypair().public_key().to_vec()), + }, + ); + let mut response = serde_json::from_slice( + get_server_keys::v2::Response { + server_key: Raw::new(&ServerSigningKeys { + server_name: db.globals.server_name().to_owned(), + verify_keys, + old_verify_keys: BTreeMap::new(), + signatures: BTreeMap::new(), + valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(86400 * 7), + ) + .expect("time is valid"), + }) + .expect("static conversion, no errors"), + } + .try_into_http_response::>() + .unwrap() + .body(), + ) + .unwrap(); + + ruma::signatures::sign_json( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut response, + ) + .unwrap(); + + Ok(Json(response)) +} + +/// # `GET /_matrix/key/v2/server/{keyId}` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by this will be valid +/// forever. +pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoResponse { + get_server_keys_route(db).await +} + +/// # `POST /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. +pub async fn get_public_rooms_filtered_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let response = client_server::get_public_rooms_filtered_helper( + &db, + None, + body.limit, + body.since.as_deref(), + &body.filter, + &body.room_network, + ) + .await?; + + Ok(get_public_rooms_filtered::v1::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }) +} + +/// # `GET /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. +pub async fn get_public_rooms_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let response = client_server::get_public_rooms_filtered_helper( + &db, + None, + body.limit, + body.since.as_deref(), + &IncomingFilter::default(), + &IncomingRoomNetwork::Matrix, + ) + .await?; + + Ok(get_public_rooms::v1::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }) +} + +/// # `PUT /_matrix/federation/v1/send/{txnId}` +/// +/// Push EDUs and PDUs to this server. +pub async fn send_transaction_message_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let mut resolved_map = BTreeMap::new(); + + let pub_key_map = RwLock::new(BTreeMap::new()); + + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. + // TODO: make this persist across requests but not in a DB Tree (in globals?) + // TODO: This could potentially also be some sort of trie (suffix tree) like structure so + // that once an auth event is known it would know (using indexes maybe) all of the auth + // events that it references. + // let mut auth_cache = EventMap::new(); + + for pdu in &body.pdus { + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + continue; + } + }; + + // 0. Check the server is in the room + let room_id = match value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + { + Some(id) => id, + None => { + // Event is invalid + resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned())); + continue; + } + }; + + acl_check(&sender_servername, &room_id, &db)?; + + let mutex = Arc::clone( + db.globals + .roomid_mutex_federation + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let start_time = Instant::now(); + resolved_map.insert( + event_id.clone(), + handle_incoming_pdu( + &sender_servername, + &event_id, + &room_id, + value, + true, + &db, + &pub_key_map, + ) + .await + .map(|_| ()), + ); + drop(mutex_lock); + + let elapsed = start_time.elapsed(); + warn!( + "Handling transaction of event {} took {}m{}s", + event_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + + for pdu in &resolved_map { + if let Err(e) = pdu.1 { + if e != "Room is unknown to this server." { + warn!("Incoming PDU failed {:?}", pdu); + } + } + } + + for edu in body + .edus + .iter() + .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) + { + match edu { + Edu::Presence(_) => {} + Edu::Receipt(receipt) => { + for (room_id, room_updates) in receipt.receipts { + for (user_id, user_updates) in room_updates.read { + if let Some((event_id, _)) = user_updates + .event_ids + .iter() + .filter_map(|id| { + db.rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) + }) + .max_by_key(|(_, count)| *count) + { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert(user_id.clone(), user_updates.data); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event_id.to_owned(), receipts); + + let event = ReceiptEvent { + content: ReceiptEventContent(receipt_content), + room_id: room_id.clone(), + }; + db.rooms.edus.readreceipt_update( + &user_id, + &room_id, + event, + &db.globals, + )?; + } else { + // TODO fetch missing events + info!("No known event ids in read receipt: {:?}", user_updates); + } + } + } + } + Edu::Typing(typing) => { + if db.rooms.is_joined(&typing.user_id, &typing.room_id)? { + if typing.typing { + db.rooms.edus.typing_add( + &typing.user_id, + &typing.room_id, + 3000 + utils::millis_since_unix_epoch(), + &db.globals, + )?; + } else { + db.rooms.edus.typing_remove( + &typing.user_id, + &typing.room_id, + &db.globals, + )?; + } + } + } + Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { + db.users + .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; + } + Edu::DirectToDevice(DirectDeviceContent { + sender, + ev_type, + message_id, + messages, + }) => { + // Check if this is a new transaction id + if db + .transaction_ids + .existing_txnid(&sender, None, &message_id)? + .is_some() + { + continue; + } + + for (target_user_id, map) in &messages { + for (target_device_id_maybe, event) in map { + match target_device_id_maybe { + DeviceIdOrAllDevices::DeviceId(target_device_id) => { + db.users.add_to_device_event( + &sender, + target_user_id, + target_device_id, + &ev_type.to_string(), + event.deserialize_as().map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + })?, + &db.globals, + )? + } + + DeviceIdOrAllDevices::AllDevices => { + for target_device_id in db.users.all_device_ids(target_user_id) { + db.users.add_to_device_event( + &sender, + target_user_id, + &target_device_id?, + &ev_type.to_string(), + event.deserialize_as().map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + })?, + &db.globals, + )?; + } + } + } + } + } + + // Save transaction id with empty data + db.transaction_ids + .add_txnid(&sender, None, &message_id, &[])?; + } + Edu::SigningKeyUpdate(SigningKeyUpdateContent { + user_id, + master_key, + self_signing_key, + }) => { + if user_id.server_name() != sender_servername { + continue; + } + if let Some(master_key) = master_key { + db.users.add_cross_signing_keys( + &user_id, + &master_key, + &self_signing_key, + &None, + &db.rooms, + &db.globals, + )?; + } + } + Edu::_Custom(_) => {} + } + } + + db.flush()?; + + Ok(send_transaction_message::v1::Response { pdus: resolved_map }) +} + +/// An async function that can recursively call itself. +type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; + +/// When receiving an event one needs to: +/// 0. Check the server is in the room +/// 1. Skip the PDU if we already know about it +/// 2. Check signatures, otherwise drop +/// 3. Check content hash, redact if doesn't match +/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not +/// timeline events +/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are +/// also rejected "due to auth events" +/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events +/// 7. Persist this event as an outlier +/// 8. If not timeline event: stop +/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline +/// events +/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities +/// doing all the checks in this list starting at 1. These are not timeline events +/// 11. Check the auth of the event passes based on the state of the event +/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by +/// doing state res where one of the inputs was a previously trusted set of state, don't just +/// trust a set of state we got from a remote) +/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" +/// it +/// 14. Use state resolution to find new room state +// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively +#[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] +pub(crate) async fn handle_incoming_pdu<'a>( + origin: &'a ServerName, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + is_timeline_event: bool, + db: &'a Database, + pub_key_map: &'a RwLock>>, +) -> Result>, String> { + match db.rooms.exists(room_id) { + Ok(true) => {} + _ => { + return Err("Room is unknown to this server.".to_owned()); + } + } + + match db.rooms.is_disabled(room_id) { + Ok(false) => {} + _ => { + return Err("Federation of this room is currently disabled on this server.".to_owned()); + } + } + + // 1. Skip the PDU if we already have it as a timeline event + if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { + return Ok(Some(pdu_id.to_vec())); + } + + let create_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .map_err(|_| "Failed to ask database for event.".to_owned())? + .ok_or_else(|| "Failed to find create event in db.".to_owned())?; + + let first_pdu_in_room = db + .rooms + .first_pdu_in_room(room_id) + .map_err(|_| "Error loading first room event.".to_owned())? + .expect("Room exists"); + + let (incoming_pdu, val) = handle_outlier_pdu( + origin, + &create_event, + event_id, + room_id, + value, + db, + pub_key_map, + ) + .await?; + + // 8. if not timeline event: stop + if !is_timeline_event { + return Ok(None); + } + + if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + return Ok(None); + } + + // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let mut graph: HashMap, _> = HashMap::new(); + let mut eventid_info = HashMap::new(); + let mut todo_outlier_stack: Vec> = incoming_pdu.prev_events.clone(); + + let mut amount = 0; + + while let Some(prev_event_id) = todo_outlier_stack.pop() { + if let Some((pdu, json_opt)) = fetch_and_handle_outliers( + db, + origin, + &[prev_event_id.clone()], + &create_event, + room_id, + pub_key_map, + ) + .await + .pop() + { + if amount > 100 { + // Max limit reached + warn!("Max prev event limit reached!"); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + } + + if let Some(json) = + json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) + { + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + amount += 1; + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(dbg!(prev_prev.clone())); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } else { + // Fetch and handle failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } + + let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + println!("{}", event_id); + Ok(( + int!(0), + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), + ), + )) + }) + .map_err(|_| "Error sorting prev events".to_owned())?; + + let mut errors = 0; + for prev_id in dbg!(sorted) { + match db.rooms.is_disabled(room_id) { + Ok(false) => {} + _ => { + return Err( + "Federation of this room is currently disabled on this server.".to_owned(), + ); + } + } + + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&*prev_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", prev_id); + continue; + } + } + + if errors >= 5 { + break; + } + if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { + if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + continue; + } + + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + if let Err(e) = upgrade_outlier_to_timeline_pdu( + pdu, + json, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await + { + errors += 1; + warn!("Prev event {} failed: {}", prev_id, e); + match db + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry((*prev_id).to_owned()) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1) + } + } + } + let elapsed = start_time.elapsed(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + warn!( + "Handling prev event {} took {}m{}s", + prev_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + } + + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + let r = upgrade_outlier_to_timeline_pdu( + incoming_pdu, + val, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await; + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + + r +} + +#[tracing::instrument(skip(create_event, value, db, pub_key_map))] +fn handle_outlier_pdu<'a>( + origin: &'a ServerName, + create_event: &'a PduEvent, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + db: &'a Database, + pub_key_map: &'a RwLock>>, +) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { + Box::pin(async move { + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + fetch_required_signing_keys(&value, pub_key_map, db) + .await + .map_err(|e| e.to_string())?; + + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match + + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; + + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + + let mut val = match ruma::signatures::verify_event( + &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, + &value, + room_version_id, + ) { + Err(e) => { + // Drop + warn!("Dropping bad event {}: {}", event_id, e); + return Err("Signature verification failed".to_owned()); + } + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + warn!("Calculated hash does not match: {}", event_id); + match ruma::signatures::redact(&value, room_version_id) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_owned()), + } + } + Ok(ruma::signatures::Verified::All) => value, + }; + + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + let incoming_pdu = serde_json::from_value::( + serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU.".to_owned())?; + + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // EDIT: Step 5 is not applied anymore because it failed too often + warn!("Fetching auth events for {}", incoming_pdu.event_id); + fetch_and_handle_outliers( + db, + origin, + &incoming_pdu + .auth_events + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; + + // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + info!( + "Auth check for {} based on auth events", + incoming_pdu.event_id + ); + + // Build map of auth events + let mut auth_events = HashMap::new(); + for id in &incoming_pdu.auth_events { + let auth_event = match db.rooms.get_pdu(id).map_err(|e| e.to_string())? { + Some(e) => e, + None => { + warn!("Could not find auth event {}", id); + continue; + } + }; + + match auth_events.entry(( + auth_event.kind.to_string().into(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + hash_map::Entry::Vacant(v) => { + v.insert(auth_event); + } + hash_map::Entry::Occupied(_) => { + return Err( + "Auth event's type and state_key combination exists multiple times." + .to_owned(), + ) + } + } + } + + // The original create event must be in the auth events + if auth_events + .get(&(StateEventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(create_event) + { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + if !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), + ) + .map_err(|_e| "Auth check failed".to_owned())? + { + return Err("Event has failed auth check with auth events.".to_owned()); + } + + info!("Validation successful."); + + // 7. Persist the event as an outlier. + db.rooms + .add_pdu_outlier(&incoming_pdu.event_id, &val) + .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; + info!("Added pdu as outlier."); + + Ok((Arc::new(incoming_pdu), val)) + }) +} + +#[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] +async fn upgrade_outlier_to_timeline_pdu( + incoming_pdu: Arc, + val: BTreeMap, + create_event: &PduEvent, + origin: &ServerName, + db: &Database, + room_id: &RoomId, + pub_key_map: &RwLock>>, +) -> Result>, String> { + if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { + return Ok(Some(pduid)); + } + + if db + .rooms + .is_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to ask db for soft fail".to_owned())? + { + return Err("Event has been soft failed".into()); + } + + info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); + + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; + + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + // doing all the checks in this list starting at 1. These are not timeline events. + + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event + + info!("Requesting state at event"); + let mut state_at_incoming_event = None; + + if incoming_pdu.prev_events.len() == 1 { + let prev_event = &*incoming_pdu.prev_events[0]; + let prev_event_sstatehash = db + .rooms + .pdu_shortstatehash(prev_event) + .map_err(|_| "Failed talking to db".to_owned())?; + + let state = if let Some(shortstatehash) = prev_event_sstatehash { + Some(db.rooms.state_full_ids(shortstatehash).await) + } else { + None + }; + + if let Some(Ok(mut state)) = state { + info!("Using cached state"); + let prev_pdu = + db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { + "Could not find prev event, but we know the state.".to_owned() + })?; + + if let Some(state_key) = &prev_pdu.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &prev_pdu.kind.to_string().into(), + state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + state.insert(shortstatekey, Arc::from(prev_event)); + // Now it's the state after the pdu + } + + state_at_incoming_event = Some(state); + } + } else { + info!("Calculating state at event using state res"); + let mut extremity_sstatehashes = HashMap::new(); + + let mut okay = true; + for prev_eventid in &incoming_pdu.prev_events { + let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { + pdu + } else { + okay = false; + break; + }; + + let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { + s + } else { + okay = false; + break; + }; + + extremity_sstatehashes.insert(sstatehash, prev_event); + } + + if okay { + let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); + let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); + + for (sstatehash, prev_event) in extremity_sstatehashes { + let mut leaf_state: BTreeMap<_, _> = db + .rooms + .state_full_ids(sstatehash) + .await + .map_err(|_| "Failed to ask db for room state.".to_owned())?; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); + // Now it's the state after the pdu + } + + let mut state = StateMap::with_capacity(leaf_state.len()); + let mut starting_events = Vec::with_capacity(leaf_state.len()); + + for (k, id) in leaf_state { + if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + state.insert((ty.to_string().into(), st_key), id.clone()); + } else { + warn!("Failed to get_statekey_from_short."); + } + starting_events.push(id); + } + + auth_chain_sets.push( + get_auth_chain(room_id, starting_events, db) + .await + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), + ); + + fork_states.push(state); + } + + let lock = db.globals.stateres_mutex.lock(); + + let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }); + drop(lock); + + state_at_incoming_event = match result { + Ok(new_state) => Some( + new_state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + Ok((shortstatekey, event_id)) + }) + .collect::>()?, + ), + Err(e) => { + warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); + None + } + } + } + } + + if state_at_incoming_event.is_none() { + info!("Calling /state_ids"); + // Call /state_ids to find out what the state at this pdu is. We trust the server's + // response to some extend, but we still do a lot of checks on the events + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_room_state_ids::v1::Request { + room_id, + event_id: &incoming_pdu.event_id, + }, + ) + .await + { + Ok(res) => { + info!("Fetching state events at event."); + let state_vec = fetch_and_handle_outliers( + db, + origin, + &res.pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; + + let mut state: BTreeMap<_, Arc> = BTreeMap::new(); + for (pdu, _) in state_vec { + let state_key = pdu + .state_key + .clone() + .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; + + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + match state.entry(shortstatekey) { + btree_map::Entry::Vacant(v) => { + v.insert(Arc::from(&*pdu.event_id)); + } + btree_map::Entry::Occupied(_) => return Err( + "State event's type and state_key combination exists multiple times." + .to_owned(), + ), + } + } + + // The original create event must still be in the state + let create_shortstatekey = db + .rooms + .get_shortstatekey(&StateEventType::RoomCreate, "") + .map_err(|_| "Failed to talk to db.")? + .expect("Room exists"); + + if state.get(&create_shortstatekey).map(|id| id.as_ref()) + != Some(&create_event.event_id) + { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + state_at_incoming_event = Some(state); + } + Err(e) => { + warn!("Fetching state for event failed: {}", e); + return Err("Fetching state for event failed".into()); + } + }; + } + + let state_at_incoming_event = + state_at_incoming_event.expect("we always set this to some above"); + + info!("Starting auth check"); + // 11. Check the auth of the event passes based on the state of the event + let check_result = state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| { + db.rooms + .get_shortstatekey(&k.to_string().into(), s) + .ok() + .flatten() + .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) + .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) + }, + ) + .map_err(|_e| "Auth check failed.".to_owned())?; + + if !check_result { + return Err("Event has failed auth check with state at the event.".into()); + } + info!("Auth check succeeded"); + + // We start looking at current room state now, so lets lock the room + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Now we calculate the set of extremities this room has after the incoming event has been + // applied. We start with the previous extremities (aka leaves) + info!("Calculating extremities"); + let mut extremities = db + .rooms + .get_pdu_leaves(room_id) + .map_err(|_| "Failed to load room leaves".to_owned())?; + + // Remove any forward extremities that are referenced by this incoming event's prev_events + for prev_event in &incoming_pdu.prev_events { + if extremities.contains(prev_event) { + extremities.remove(prev_event); + } + } + + // Only keep those extremities were not referenced yet + extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); + + info!("Compressing state at event"); + let state_ids_compressed = state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + db.rooms + .compress_state_event(*shortstatekey, id, &db.globals) + .map_err(|_| "Failed to compress_state_event".to_owned()) + }) + .collect::>()?; + + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + info!("Starting soft fail auth check"); + + let auth_events = db + .rooms + .get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + ) + .map_err(|_| "Failed to get_auth_events.".to_owned())?; + + let soft_fail = !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|_e| "Auth check failed.".to_owned())?; + + if soft_fail { + append_incoming_pdu( + db, + &incoming_pdu, + val, + extremities.iter().map(Deref::deref), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; + + // Soft fail, we keep the event as an outlier but don't add it to the timeline + warn!("Event was soft failed: {:?}", incoming_pdu); + db.rooms + .mark_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to set soft failed flag".to_owned())?; + return Err("Event has been soft failed".into()); + } + + if incoming_pdu.state_key.is_some() { + info!("Loading current room state ids"); + let current_sstatehash = db + .rooms + .current_shortstatehash(room_id) + .map_err(|_| "Failed to load current state hash.".to_owned())? + .expect("every room has state"); + + let current_state_ids = db + .rooms + .state_full_ids(current_sstatehash) + .await + .map_err(|_| "Failed to load room state.")?; + + info!("Preparing for stateres to derive new room state"); + let mut extremity_sstatehashes = HashMap::new(); + + info!("Loading extremities"); + for id in dbg!(&extremities) { + match db + .rooms + .get_pdu(id) + .map_err(|_| "Failed to ask db for pdu.".to_owned())? + { + Some(leaf_pdu) => { + extremity_sstatehashes.insert( + db.rooms + .pdu_shortstatehash(&leaf_pdu.event_id) + .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + "Found pdu with no statehash in db.".to_owned() + })?, + leaf_pdu, + ); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err("Missing state snapshot.".to_owned()); + } + } + } + + let mut fork_states = Vec::new(); + + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). + + // We do this by adding the current state to the list of fork states + extremity_sstatehashes.remove(¤t_sstatehash); + fork_states.push(current_state_ids); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); + } + fork_states.push(state_after); + + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + return Err("State is empty.".to_owned()); + } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + info!("State resolution trivial"); + // There was only one state, so it has to be the room's current state (because that is + // always included) + fork_states[0] + .iter() + .map(|(k, id)| { + db.rooms + .compress_state_event(*k, id, &db.globals) + .map_err(|_| "Failed to compress_state_event.".to_owned()) + }) + .collect::>()? + } else { + info!("Loading auth chains"); + // We do need to force an update to this room's state + update_state = true; + + let mut auth_chain_sets = Vec::new(); + for state in &fork_states { + auth_chain_sets.push( + get_auth_chain( + room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + db, + ) + .await + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), + ); + } + + info!("Loading fork states"); + + let fork_states: Vec<_> = fork_states + .into_iter() + .map(|map| { + map.into_iter() + .filter_map(|(k, id)| { + db.rooms + .get_statekey_from_short(k) + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) + .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) + .ok() + }) + .collect::>() + }) + .collect(); + + info!("Resolving state"); + + let lock = db.globals.stateres_mutex.lock(); + let state = match state_res::resolve( + room_version_id, + &fork_states, + auth_chain_sets, + |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err("State resolution failed, either an event could not be found or deserialization".into()); + } + }; + + drop(lock); + + info!("State resolution done. Compressing state"); + + state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + db.rooms + .compress_state_event(shortstatekey, &event_id, &db.globals) + .map_err(|_| "Failed to compress state event".to_owned()) + }) + .collect::>()? + }; + + // Set the new room state to the resolved state + if update_state { + info!("Forcing new room state"); + db.rooms + .force_state(room_id, new_room_state, db) + .map_err(|_| "Failed to set new room state.".to_owned())?; + } + } + + info!("Appending pdu to timeline"); + extremities.insert(incoming_pdu.event_id.clone()); + + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + + let pdu_id = append_incoming_pdu( + db, + &incoming_pdu, + val, + extremities.iter().map(Deref::deref), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; + + info!("Appended incoming pdu"); + + // Event has passed all auth/stateres checks + drop(state_lock); + Ok(pdu_id) +} + +/// Find the event and auth it. Once the event is validated (steps 1 - 8) +/// it is appended to the outliers Tree. +/// +/// Returns pdu and if we fetched it over federation the raw json. +/// +/// a. Look in the main timeline (pduid_pdu tree) +/// b. Look at outlier pdu tree +/// c. Ask origin server over federation +/// d. TODO: Ask other servers over federation? +#[tracing::instrument(skip_all)] +pub(crate) fn fetch_and_handle_outliers<'a>( + db: &'a Database, + origin: &'a ServerName, + events: &'a [Arc], + create_event: &'a PduEvent, + room_id: &'a RoomId, + pub_key_map: &'a RwLock>>, +) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { + Box::pin(async move { + let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + let mut pdus = vec![]; + for id in events { + if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", id); + continue; + } + } + + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { + trace!("Found {} in db", id); + pdus.push((local_pdu, None)); + continue; + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); + let mut i = 0; + while let Some(next_id) = todo_auth_events.pop() { + if events_all.contains(&next_id) { + continue; + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { + trace!("Found {} in db", id); + continue; + } + + info!("Fetching {} over federation.", next_id); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &next_id }, + ) + .await + { + Ok(res) => { + info!("Got {} over federation", next_id); + let (calculated_event_id, value) = + match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { + Ok(t) => t, + Err(_) => { + back_off((*next_id).to_owned()); + continue; + } + }; + + if calculated_event_id != *next_id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu); + } + + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { + for auth_event in auth_events { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + } + Err(_) => { + warn!("Failed to fetch event: {}", next_id); + back_off((*next_id).to_owned()); + } + } + } + + for (next_id, value) in events_in_reverse_order.iter().rev() { + match handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + db, + pub_key_map, + ) + .await + { + Ok((pdu, json)) => { + if next_id == id { + pdus.push((pdu, Some(json))); + } + } + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()); + } + } + } + } + pdus + }) +} + +/// Search the DB for the signing keys of the given server, if we don't have them +/// fetch them from the server and save to our DB. +#[tracing::instrument(skip_all)] +pub(crate) async fn fetch_signing_keys( + db: &Database, + origin: &ServerName, + signature_ids: Vec, +) -> Result> { + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let permit = db + .globals + .servername_ratelimiter + .read() + .unwrap() + .get(origin) + .map(|s| Arc::clone(s).acquire_owned()); + + let permit = match permit { + Some(p) => p, + None => { + let mut write = db.globals.servername_ratelimiter.write().unwrap(); + let s = Arc::clone( + write + .entry(origin.to_owned()) + .or_insert_with(|| Arc::new(Semaphore::new(1))), + ); + + s.acquire_owned() + } + } + .await; + + let back_off = |id| match db + .globals + .bad_signature_ratelimiter + .write() + .unwrap() + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + if let Some((time, tries)) = db + .globals + .bad_signature_ratelimiter + .read() + .unwrap() + .get(&signature_ids) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {:?}", signature_ids); + return Err(Error::BadServerResponse("bad signature, still backing off")); + } + } + + trace!("Loading signing keys for {}", origin); + + let mut result: BTreeMap<_, _> = db + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if contains_all_ids(&result) { + return Ok(result); + } + + debug!("Fetching signing keys for {} over federation", origin); + + if let Some(server_key) = db + .sending + .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .await + .ok() + .and_then(|resp| resp.server_key.deserialize().ok()) + { + db.globals.add_signing_key(origin, server_key.clone())?; + + result.extend( + server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + + if contains_all_ids(&result) { + return Ok(result); + } + } + + for server in db.globals.trusted_servers() { + debug!("Asking {} for {}'s signing key", server, origin); + if let Some(server_keys) = db + .sending + .send_federation_request( + &db.globals, + server, + get_remote_server_keys::v2::Request::new( + origin, + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ) + .expect("time is valid"), + ), + ) + .await + .ok() + .map(|resp| { + resp.server_keys + .into_iter() + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) + { + trace!("Got signing keys: {:?}", server_keys); + for k in server_keys { + db.globals.add_signing_key(origin, k.clone())?; + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + } + + if contains_all_ids(&result) { + return Ok(result); + } + } + } + + drop(permit); + + back_off(signature_ids); + + warn!("Failed to find public key for server: {}", origin); + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) +} + +/// Append the incoming event setting the state snapshot to the state from the +/// server that sent the event. +#[tracing::instrument(skip_all)] +fn append_incoming_pdu<'a>( + db: &Database, + pdu: &PduEvent, + pdu_json: CanonicalJsonObject, + new_room_leaves: impl IntoIterator + Clone + Debug, + state_ids_compressed: HashSet, + soft_fail: bool, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex +) -> Result>> { + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + db.rooms.set_event_state( + &pdu.event_id, + &pdu.room_id, + state_ids_compressed, + &db.globals, + )?; + + if soft_fail { + db.rooms + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + return Ok(None); + } + + let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; + + for appservice in db.appservice.all()? { + if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + continue; + } + + if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else(Vec::new, |users| { + users + .iter() + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) + .collect::>() + }); + let aliases = namespaces + .get("aliases") + .and_then(|aliases| aliases.as_sequence()) + .map_or_else(Vec::new, |aliases| { + aliases + .iter() + .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) + .collect::>() + }); + let rooms = namespaces + .get("rooms") + .and_then(|rooms| rooms.as_sequence()); + + let matching_users = |users: &Regex| { + users.is_match(pdu.sender.as_str()) + || pdu.kind == RoomEventType::RoomMember + && pdu + .state_key + .as_ref() + .map_or(false, |state_key| users.is_match(state_key)) + }; + let matching_aliases = |aliases: &Regex| { + db.rooms + .room_aliases(&pdu.room_id) + .filter_map(|r| r.ok()) + .any(|room_alias| aliases.is_match(room_alias.as_str())) + }; + + if aliases.iter().any(matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) + || users.iter().any(matching_users) + { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + } + } + + Ok(Some(pdu_id)) +} + +#[tracing::instrument(skip(starting_events, db))] +pub(crate) async fn get_auth_chain<'a>( + room_id: &RoomId, + starting_events: Vec>, + db: &'a Database, +) -> Result> + 'a> { + const NUM_BUCKETS: usize = 50; + + let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; + + let mut i = 0; + for id in starting_events { + let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; + let bucket_id = (short % NUM_BUCKETS as u64) as usize; + buckets[bucket_id].insert((short, id.clone())); + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + let mut full_auth_chain = HashSet::new(); + + let mut hits = 0; + let mut misses = 0; + for chunk in buckets { + if chunk.is_empty() { + continue; + } + + let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); + if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { + hits += 1; + full_auth_chain.extend(cached.iter().copied()); + continue; + } + misses += 1; + + let mut chunk_cache = HashSet::new(); + let mut hits2 = 0; + let mut misses2 = 0; + let mut i = 0; + for (sevent_id, event_id) in chunk { + if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { + hits2 += 1; + chunk_cache.extend(cached.iter().copied()); + } else { + misses2 += 1; + let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?); + db.rooms + .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; + println!( + "cache missed event {} with auth chain len {}", + event_id, + auth_chain.len() + ); + chunk_cache.extend(auth_chain.iter()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + }; + } + println!( + "chunk missed with len {}, event hits2: {}, misses2: {}", + chunk_cache.len(), + hits2, + misses2 + ); + let chunk_cache = Arc::new(chunk_cache); + db.rooms + .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + full_auth_chain.extend(chunk_cache.iter()); + } + + println!( + "total: {}, chunk hits: {}, misses: {}", + full_auth_chain.len(), + hits, + misses + ); + + Ok(full_auth_chain + .into_iter() + .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) +} + +#[tracing::instrument(skip(event_id, db))] +fn get_auth_chain_inner( + room_id: &RoomId, + event_id: &EventId, + db: &Database, +) -> Result> { + let mut todo = vec![Arc::from(event_id)]; + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop() { + match db.rooms.get_pdu(&event_id) { + Ok(Some(pdu)) => { + if pdu.room_id != room_id { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); + } + for auth_event in &pdu.auth_events { + let sauthevent = db + .rooms + .get_or_create_shorteventid(auth_event, &db.globals)?; + + if !found.contains(&sauthevent) { + found.insert(sauthevent); + todo.push(auth_event.clone()); + } + } + } + Ok(None) => { + warn!("Could not find pdu mentioned in auth events: {}", event_id); + } + Err(e) => { + warn!("Could not load event in auth chain: {} {}", event_id, e); + } + } + } + + Ok(found) +} + +/// # `GET /_matrix/federation/v1/event/{eventId}` +/// +/// Retrieves a single event from the server. +/// +/// - Only works if a user of this server is currently invited or joined the room +pub async fn get_event_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let event = db + .rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if !db.rooms.server_in_room(sender_servername, room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room", + )); + } + + Ok(get_event::v1::Response { + origin: db.globals.server_name().to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + pdu: PduEvent::convert_to_outgoing_federation_event(event), + }) +} + +/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` +/// +/// Retrieves events that the sender is missing. +pub async fn get_missing_events_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + + let mut queued_events = body.latest_events.clone(); + let mut events = Vec::new(); + + let mut i = 0; + while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { + if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { + let room_id_str = pdu + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let event_room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if event_room_id != body.room_id { + warn!( + "Evil event detected: Event {} found while searching in room {}", + queued_events[i], body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Evil event detected", + )); + } + + if body.earliest_events.contains(&queued_events[i]) { + i += 1; + continue; + } + queued_events.extend_from_slice( + &serde_json::from_value::>>( + serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { + Error::bad_database("Event in db has no prev_events field.") + })?) + .expect("canonical json is valid json value"), + ) + .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, + ); + events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); + } + i += 1; + } + + Ok(get_missing_events::v1::Response { events }) +} + +/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` +/// +/// Retrieves the auth chain for a given event. +/// +/// - This does not include the event itself +pub async fn get_event_authorization_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + + let event = db + .rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?; + + Ok(get_event_authorization::v1::Response { + auth_chain: auth_chain_ids + .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + }) +} + +/// # `GET /_matrix/federation/v1/state/{roomId}` +/// +/// Retrieves the current state of the room. +pub async fn get_room_state_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + + let shortstatehash = db + .rooms + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pdus = db + .rooms + .state_full_ids(shortstatehash) + .await? + .into_iter() + .map(|(_, id)| { + PduEvent::convert_to_outgoing_federation_event( + db.rooms.get_pdu_json(&id).unwrap().unwrap(), + ) + }) + .collect(); + + let auth_chain_ids = + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; + + Ok(get_room_state::v1::Response { + auth_chain: auth_chain_ids + .map(|id| { + db.rooms.get_pdu_json(&id).map(|maybe_json| { + PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) + }) + }) + .filter_map(|r| r.ok()) + .collect(), + pdus, + }) +} + +/// # `GET /_matrix/federation/v1/state_ids/{roomId}` +/// +/// Retrieves the current state of the room. +pub async fn get_room_state_ids_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + + let shortstatehash = db + .rooms + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pdu_ids = db + .rooms + .state_full_ids(shortstatehash) + .await? + .into_iter() + .map(|(_, id)| (*id).to_owned()) + .collect(); + + let auth_chain_ids = + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; + + Ok(get_room_state_ids::v1::Response { + auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), + pdu_ids, + }) +} + +/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` +/// +/// Creates a join template. +pub async fn create_join_event_template_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + if !db.rooms.exists(&body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server.", + )); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + acl_check(sender_servername, &body.room_id, &db)?; + + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = + db.rooms + .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Conduit does not support restricted rooms yet.", + )); + } + } + + let prev_events: Vec<_> = db + .rooms + .get_pdu_leaves(&body.room_id)? + .into_iter() + .take(20) + .collect(); + + let create_event = db + .rooms + .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + + // If there was no create event yet, assume we are creating a room with the default version + // right now + let room_version_id = create_event_content + .map_or(db.globals.default_room_version(), |create_event| { + create_event.room_version + }); + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); + + if !body.ver.contains(&room_version_id) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: room_version_id, + }, + "Room version not supported.", + )); + } + + let content = to_raw_value(&RoomMemberEventContent { + avatar_url: None, + blurhash: None, + displayname: None, + is_direct: None, + membership: MembershipState::Join, + third_party_invite: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("member event is valid value"); + + let state_key = body.user_id.to_string(); + let kind = StateEventType::RoomMember; + + let auth_events = db.rooms.get_auth_events( + &body.room_id, + &kind.to_string().into(), + &body.user_id, + Some(&state_key), + &content, + )?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) + .max() + .unwrap_or_else(|| uint!(0)) + + uint!(1); + + let mut unsigned = BTreeMap::new(); + + if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? { + unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); + unsigned.insert( + "prev_sender".to_owned(), + to_raw_value(&prev_pdu.sender).expect("UserId is valid"), + ); + } + + let pdu = PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), + room_id: body.room_id.clone(), + sender: body.user_id.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind: kind.to_string().into(), + content, + state_key: Some(state_key), + prev_events, + depth, + auth_events: auth_events + .iter() + .map(|(_, pdu)| pdu.event_id.clone()) + .collect(), + redacts: None, + unsigned: if unsigned.is_empty() { + None + } else { + Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + }, + hashes: EventHash { + sha256: "aaa".to_owned(), + }, + signatures: None, + }; + + let auth_check = state_res::auth_check( + &room_version, + &pdu, + None::, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|e| { + error!("{:?}", e); + Error::bad_database("Auth check failed.") + })?; + + if !auth_check { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Event is not authorized.", + )); + } + + // Hash and sign + let mut pdu_json = + utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); + + pdu_json.remove("event_id"); + + // Add origin because synapse likes that (and it's required in the spec) + pdu_json.insert( + "origin".to_owned(), + CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), + ); + + Ok(prepare_join_event::v1::Response { + room_version: Some(room_version_id), + event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), + }) +} + +async fn create_join_event( + db: &DatabaseGuard, + sender_servername: &ServerName, + room_id: &RoomId, + pdu: &RawJsonValue, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + if !db.rooms.exists(room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server.", + )); + } + + acl_check(sender_servername, room_id, db)?; + + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Conduit does not support restricted rooms yet.", + )); + } + } + + // We need to return the state prior to joining, let's keep a reference to that here + let shortstatehash = db + .rooms + .current_shortstatehash(room_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pub_key_map = RwLock::new(BTreeMap::new()); + // let mut auth_cache = EventMap::new(); + + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + } + }; + + let origin: Box = serde_json::from_value( + serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event needs an origin field.", + ))?) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + + let mutex = Arc::clone( + db.globals + .roomid_mutex_federation + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let pdu_id = handle_incoming_pdu(&origin, &event_id, room_id, value, true, db, &pub_key_map) + .await + .map_err(|e| { + warn!("Error while handling incoming send join PDU: {}", e); + Error::BadRequest( + ErrorKind::InvalidParam, + "Error while handling incoming PDU.", + ) + })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + drop(mutex_lock); + + let state_ids = db.rooms.state_full_ids(shortstatehash).await?; + let auth_chain_ids = get_auth_chain( + room_id, + state_ids.iter().map(|(_, id)| id.clone()).collect(), + db, + ) + .await?; + + let servers = db + .rooms + .room_servers(room_id) + .filter_map(|r| r.ok()) + .filter(|server| &**server != db.globals.server_name()); + + db.sending.send_pdu(servers, &pdu_id)?; + + db.flush()?; + + Ok(RoomState { + auth_chain: auth_chain_ids + .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + state: state_ids + .iter() + .filter_map(|(_, id)| db.rooms.get_pdu_json(id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + }) +} + +/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. +pub async fn create_join_event_v1_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; + + Ok(create_join_event::v1::Response { room_state }) +} + +/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. +pub async fn create_join_event_v2_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; + + Ok(create_join_event::v2::Response { room_state }) +} + +/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` +/// +/// Invites a remote user to a room. +pub async fn create_invite_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + acl_check(sender_servername, &body.room_id, &db)?; + + if !db.rooms.is_supported_version(&db, &body.room_version) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: body.room_version.clone(), + }, + "Server does not support this room version.", + )); + } + + let mut signed_event = utils::to_canonical_object(&body.event) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; + + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut signed_event, + &body.room_version, + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&signed_event, &body.room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + signed_event.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.into()), + ); + + let sender: Box<_> = serde_json::from_value( + signed_event + .get("sender") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no sender field.", + ))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; + + let invited_user: Box<_> = serde_json::from_value( + signed_event + .get("state_key") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no state_key field.", + ))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; + + let mut invite_state = body.invite_room_state.clone(); + + let mut event: JsonObject = serde_json::from_str(body.event.get()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + + event.insert("event_id".to_owned(), "$dummy".into()); + + let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| { + warn!("Invalid invite event: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") + })?; + + invite_state.push(pdu.to_stripped_state_event()); + + // If the room already exists, the remote server will notify us about the join via /send + if !db.rooms.exists(&pdu.room_id)? { + db.rooms.update_membership( + &body.room_id, + &invited_user, + MembershipState::Invite, + &sender, + Some(invite_state), + &db, + true, + )?; + } + + db.flush()?; + + Ok(create_invite::v2::Response { + event: PduEvent::convert_to_outgoing_federation_event(signed_event), + }) +} + +/// # `GET /_matrix/federation/v1/user/devices/{userId}` +/// +/// Gets information on all devices of the user. +pub async fn get_devices_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + Ok(get_devices::v1::Response { + user_id: body.user_id.clone(), + stream_id: db + .users + .get_devicelist_version(&body.user_id)? + .unwrap_or(0) + .try_into() + .expect("version will not grow that large"), + devices: db + .users + .all_devices_metadata(&body.user_id) + .filter_map(|r| r.ok()) + .filter_map(|metadata| { + Some(UserDevice { + keys: db + .users + .get_device_keys(&body.user_id, &metadata.device_id) + .ok()??, + device_id: metadata.device_id, + device_display_name: metadata.display_name, + }) + }) + .collect(), + master_key: db + .users + .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, + self_signing_key: db + .users + .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, + }) +} + +/// # `GET /_matrix/federation/v1/query/directory` +/// +/// Resolve a room alias to a room id. +pub async fn get_room_information_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let room_id = db + .rooms + .id_from_alias(&body.room_alias)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room alias not found.", + ))?; + + Ok(get_room_information::v1::Response { + room_id, + servers: vec![db.globals.server_name().to_owned()], + }) +} + +/// # `GET /_matrix/federation/v1/query/profile` +/// +/// Gets information on a profile. +pub async fn get_profile_information_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut displayname = None; + let mut avatar_url = None; + let mut blurhash = None; + + match &body.field { + Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, + Some(ProfileField::AvatarUrl) => { + avatar_url = db.users.avatar_url(&body.user_id)?; + blurhash = db.users.blurhash(&body.user_id)? + } + // TODO: what to do with custom + Some(_) => {} + None => { + displayname = db.users.displayname(&body.user_id)?; + avatar_url = db.users.avatar_url(&body.user_id)?; + blurhash = db.users.blurhash(&body.user_id)?; + } + } + + Ok(get_profile_information::v1::Response { + blurhash, + displayname, + avatar_url, + }) +} + +/// # `POST /_matrix/federation/v1/user/keys/query` +/// +/// Gets devices and identity keys for the given users. +pub async fn get_keys_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let result = get_keys_helper( + None, + &body.device_keys, + |u| Some(u.server_name()) == body.sender_servername.as_deref(), + &db, + ) + .await?; + + db.flush()?; + + Ok(get_keys::v1::Response { + device_keys: result.device_keys, + master_keys: result.master_keys, + self_signing_keys: result.self_signing_keys, + }) +} + +/// # `POST /_matrix/federation/v1/user/keys/claim` +/// +/// Claims one-time keys. +pub async fn claim_keys_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let result = claim_keys_helper(&body.one_time_keys, &db).await?; + + db.flush()?; + + Ok(claim_keys::v1::Response { + one_time_keys: result.one_time_keys, + }) +} + +#[tracing::instrument(skip_all)] +pub(crate) async fn fetch_required_signing_keys( + event: &BTreeMap, + pub_key_map: &RwLock>>, + db: &Database, +) -> Result<()> { + let signatures = event + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let fetch_res = fetch_signing_keys( + db, + signature_server.as_str().try_into().map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?, + signature_ids, + ) + .await; + + let keys = match fetch_res { + Ok(keys) => keys, + Err(_) => { + warn!("Signature verification failed: Could not fetch signing key.",); + continue; + } + }; + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(signature_server.clone(), keys); + } + + Ok(()) +} + +// Gets a list of servers for which we don't have the signing key yet. We go over +// the PDUs and either cache the key or add it to the list that needs to be retrieved. +fn get_server_keys_from_cache( + pdu: &RawJsonValue, + servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, + room_version: &RoomVersionId, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + db: &Database, +) -> Result<()> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let event_id = format!( + "${}", + ruma::signatures::reference_hash(&value, room_version) + .expect("ruma can calculate reference hashes") + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); + + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } + + let signatures = value + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?; + + if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { + continue; + } + + trace!("Loading signing keys for {}", origin); + + let result: BTreeMap<_, _> = db + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if !contains_all_ids(&result) { + trace!("Signing key not loaded for {}", origin); + servers.insert(origin.to_owned(), BTreeMap::new()); + } + + pub_key_map.insert(origin.to_string(), result); + } + + Ok(()) +} + +pub(crate) async fn fetch_join_signing_keys( + event: &create_join_event::v2::Response, + room_version: &RoomVersionId, + pub_key_map: &RwLock>>, + db: &Database, +) -> Result<()> { + let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = + BTreeMap::new(); + + { + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + + // Try to fetch keys, failure is okay + // Servers we couldn't find in the cache will be added to `servers` + for pdu in &event.room_state.state { + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + } + for pdu in &event.room_state.auth_chain { + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + } + + drop(pkm); + } + + if servers.is_empty() { + // We had all keys locally + return Ok(()); + } + + for server in db.globals.trusted_servers() { + trace!("Asking batch signing keys from trusted server {}", server); + if let Ok(keys) = db + .sending + .send_federation_request( + &db.globals, + server, + get_remote_server_keys_batch::v2::Request { + server_keys: servers.clone(), + }, + ) + .await + { + trace!("Got signing keys: {:?}", keys); + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + for k in keys.server_keys { + let k = k.deserialize().unwrap(); + + // TODO: Check signature from trusted server? + servers.remove(&k.server_name); + + let result = db + .globals + .add_signing_key(&k.server_name, k.clone())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); + + pkm.insert(k.server_name.to_string(), result); + } + } + + if servers.is_empty() { + return Ok(()); + } + } + + let mut futures: FuturesUnordered<_> = servers + .into_iter() + .map(|(server, _)| async move { + ( + db.sending + .send_federation_request( + &db.globals, + &server, + get_server_keys::v2::Request::new(), + ) + .await, + server, + ) + }) + .collect(); + + while let Some(result) = futures.next().await { + if let (Ok(get_keys_response), origin) = result { + let result: BTreeMap<_, _> = db + .globals + .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(origin.to_string(), result); + } + } + + Ok(()) +} + +/// Returns Ok if the acl allows the server +fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { + let acl_event = match db + .rooms + .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? + { + Some(acl) => acl, + None => return Ok(()), + }; + + let acl_event_content: RoomServerAclEventContent = + match serde_json::from_str(acl_event.content.get()) { + Ok(content) => content, + Err(_) => { + warn!("Invalid ACL event"); + return Ok(()); + } + }; + + if acl_event_content.is_allowed(server_name) { + Ok(()) + } else { + Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server was denied by ACL", + )) + } +} + +#[cfg(test)] +mod tests { + use super::{add_port_to_hostname, get_ip_with_port, FedDest}; + + #[test] + fn ips_get_default_ports() { + assert_eq!( + get_ip_with_port("1.1.1.1"), + Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap())) + ); + assert_eq!( + get_ip_with_port("dead:beef::"), + Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap())) + ); + } + + #[test] + fn ips_keep_custom_ports() { + assert_eq!( + get_ip_with_port("1.1.1.1:1234"), + Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap())) + ); + assert_eq!( + get_ip_with_port("[dead::beef]:8933"), + Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap())) + ); + } + + #[test] + fn hostnames_get_default_ports() { + assert_eq!( + add_port_to_hostname("example.com"), + FedDest::Named(String::from("example.com"), String::from(":8448")) + ) + } + + #[test] + fn hostnames_keep_custom_ports() { + assert_eq!( + add_port_to_hostname("example.com:1337"), + FedDest::Named(String::from("example.com"), String::from(":1337")) + ) + } +} From dcdbcc08519643b7501b1e5695a9f211703f8b41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 20 Jun 2022 11:31:27 +0200 Subject: [PATCH 1139/1727] refactor: event handling code --- src/api/client_server/membership.rs | 123 +- src/api/server_server.rs | 1356 +-------- src/service/rooms/{alias.rs => alias/mod.rs} | 0 .../rooms/{directory.rs => directory/mod.rs} | 0 src/service/rooms/{edus.rs => edus/mod.rs} | 0 src/service/rooms/event_handler/mod.rs | 2680 +---------------- .../{lazy_loading.rs => lazy_loading/mod.rs} | 0 .../rooms/{metadata.rs => metadata/mod.rs} | 0 .../rooms/{outlier.rs => outlier/mod.rs} | 11 +- .../{pdu_metadata.rs => pdu_metadata/mod.rs} | 0 .../rooms/{search.rs => search/mod.rs} | 0 src/service/rooms/{short.rs => short/mod.rs} | 0 src/service/rooms/state/data.rs | 499 +-- src/service/rooms/state/mod.rs | 179 +- src/service/rooms/state_accessor/mod.rs | 349 --- .../{state_cache.rs => state_cache/mod.rs} | 0 .../mod.rs} | 0 .../rooms/{timeline.rs => timeline/mod.rs} | 474 +-- src/service/rooms/{user.rs => user/mod.rs} | 0 19 files changed, 373 insertions(+), 5298 deletions(-) rename src/service/rooms/{alias.rs => alias/mod.rs} (100%) rename src/service/rooms/{directory.rs => directory/mod.rs} (100%) rename src/service/rooms/{edus.rs => edus/mod.rs} (100%) rename src/service/rooms/{lazy_loading.rs => lazy_loading/mod.rs} (100%) rename src/service/rooms/{metadata.rs => metadata/mod.rs} (100%) rename src/service/rooms/{outlier.rs => outlier/mod.rs} (65%) rename src/service/rooms/{pdu_metadata.rs => pdu_metadata/mod.rs} (100%) rename src/service/rooms/{search.rs => search/mod.rs} (100%) rename src/service/rooms/{short.rs => short/mod.rs} (100%) rename src/service/rooms/{state_cache.rs => state_cache/mod.rs} (100%) rename src/service/rooms/{state_compressor.rs => state_compressor/mod.rs} (100%) rename src/service/rooms/{timeline.rs => timeline/mod.rs} (93%) rename src/service/rooms/{user.rs => user/mod.rs} (100%) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 4dda11a..ecd26d1 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -806,36 +806,6 @@ pub(crate) async fn invite_helper<'a>( ); let state_lock = mutex_state.lock().await; - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - let content = to_raw_value(&RoomMemberEventContent { avatar_url: None, displayname: None, @@ -851,98 +821,7 @@ pub(crate) async fn invite_helper<'a>( let state_key = user_id.to_string(); let kind = StateEventType::RoomMember; - let auth_events = db.rooms.get_auth_events( - room_id, - &kind.to_string().into(), - sender_user, - Some(&state_key), - &content, - )?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = BTreeMap::new(); - - if let Some(prev_pdu) = db.rooms.room_state_get(room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - unsigned.insert( - "prev_sender".to_owned(), - to_raw_value(&prev_pdu.sender).expect("UserId is valid"), - ); - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender_user.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: kind.to_string().into(), - content, - state_key: Some(state_key), - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts: None, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) - .expect("event is valid, we just created it"); + let (pdu, pdu_json) = create_hash_and_sign_event(); let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 6fa83e4..f60f735 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -882,1163 +882,6 @@ pub async fn send_transaction_message_route( Ok(send_transaction_message::v1::Response { pdus: resolved_map }) } -/// An async function that can recursively call itself. -type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; - -/// When receiving an event one needs to: -/// 0. Check the server is in the room -/// 1. Skip the PDU if we already know about it -/// 2. Check signatures, otherwise drop -/// 3. Check content hash, redact if doesn't match -/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not -/// timeline events -/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are -/// also rejected "due to auth events" -/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events -/// 7. Persist this event as an outlier -/// 8. If not timeline event: stop -/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline -/// events -/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities -/// doing all the checks in this list starting at 1. These are not timeline events -/// 11. Check the auth of the event passes based on the state of the event -/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by -/// doing state res where one of the inputs was a previously trusted set of state, don't just -/// trust a set of state we got from a remote) -/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" -/// it -/// 14. Use state resolution to find new room state -// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively -#[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] -pub(crate) async fn handle_incoming_pdu<'a>( - origin: &'a ServerName, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - is_timeline_event: bool, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> Result>, String> { - match db.rooms.exists(room_id) { - Ok(true) => {} - _ => { - return Err("Room is unknown to this server.".to_owned()); - } - } - - match db.rooms.is_disabled(room_id) { - Ok(false) => {} - _ => { - return Err("Federation of this room is currently disabled on this server.".to_owned()); - } - } - - // 1. Skip the PDU if we already have it as a timeline event - if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { - return Ok(Some(pdu_id.to_vec())); - } - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.".to_owned())? - .ok_or_else(|| "Failed to find create event in db.".to_owned())?; - - let first_pdu_in_room = db - .rooms - .first_pdu_in_room(room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists"); - - let (incoming_pdu, val) = handle_outlier_pdu( - origin, - &create_event, - event_id, - room_id, - value, - db, - pub_key_map, - ) - .await?; - - // 8. if not timeline event: stop - if !is_timeline_event { - return Ok(None); - } - - if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - return Ok(None); - } - - // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let mut graph: HashMap, _> = HashMap::new(); - let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = incoming_pdu.prev_events.clone(); - - let mut amount = 0; - - while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( - db, - origin, - &[prev_event_id.clone()], - &create_event, - room_id, - pub_key_map, - ) - .await - .pop() - { - if amount > 100 { - // Max limit reached - warn!("Max prev event limit reached!"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if let Some(json) = - json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) - { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { - amount += 1; - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(prev_prev.clone())); - } - } - - graph.insert( - prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), - ); - } else { - // Time based check failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } - - let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - println!("{}", event_id); - Ok(( - int!(0), - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; - - let mut errors = 0; - for prev_id in dbg!(sorted) { - match db.rooms.is_disabled(room_id) { - Ok(false) => {} - _ => { - return Err( - "Federation of this room is currently disabled on this server.".to_owned(), - ); - } - } - - if let Some((time, tries)) = db - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&*prev_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", prev_id); - continue; - } - } - - if errors >= 5 { - break; - } - if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { - if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - continue; - } - - let start_time = Instant::now(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - if let Err(e) = upgrade_outlier_to_timeline_pdu( - pdu, - json, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await - { - errors += 1; - warn!("Prev event {} failed: {}", prev_id, e); - match db - .globals - .bad_event_ratelimiter - .write() - .unwrap() - .entry((*prev_id).to_owned()) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1 + 1) - } - } - } - let elapsed = start_time.elapsed(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .remove(&room_id.to_owned()); - warn!( - "Handling prev event {} took {}m{}s", - prev_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - } - - let start_time = Instant::now(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - let r = upgrade_outlier_to_timeline_pdu( - incoming_pdu, - val, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await; - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .remove(&room_id.to_owned()); - - r -} - -#[tracing::instrument(skip(create_event, value, db, pub_key_map))] -fn handle_outlier_pdu<'a>( - origin: &'a ServerName, - create_event: &'a PduEvent, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { - Box::pin(async move { - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - fetch_required_signing_keys(&value, pub_key_map, db) - .await - .map_err(|e| e.to_string())?; - - // 2. Check signatures, otherwise drop - // 3. check content hash, redact if doesn't match - - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; - - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - - let mut val = match ruma::signatures::verify_event( - &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, - &value, - room_version_id, - ) { - Err(e) => { - // Drop - warn!("Dropping bad event {}: {}", event_id, e); - return Err("Signature verification failed".to_owned()); - } - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - warn!("Calculated hash does not match: {}", event_id); - match ruma::signatures::redact(&value, room_version_id) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_owned()), - } - } - Ok(ruma::signatures::Verified::All) => value, - }; - - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type - val.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU.".to_owned())?; - - // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // EDIT: Step 5 is not applied anymore because it failed too often - warn!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_outliers( - db, - origin, - &incoming_pdu - .auth_events - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; - - // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - info!( - "Auth check for {} based on auth events", - incoming_pdu.event_id - ); - - // Build map of auth events - let mut auth_events = HashMap::new(); - for id in &incoming_pdu.auth_events { - let auth_event = match db.rooms.get_pdu(id).map_err(|e| e.to_string())? { - Some(e) => e, - None => { - warn!("Could not find auth event {}", id); - continue; - } - }; - - match auth_events.entry(( - auth_event.kind.to_string().into(), - auth_event - .state_key - .clone() - .expect("all auth events have state keys"), - )) { - hash_map::Entry::Vacant(v) => { - v.insert(auth_event); - } - hash_map::Entry::Occupied(_) => { - return Err( - "Auth event's type and state_key combination exists multiple times." - .to_owned(), - ) - } - } - } - - // The original create event must be in the auth events - if auth_events - .get(&(StateEventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()) - != Some(create_event) - { - return Err("Incoming event refers to wrong create event.".to_owned()); - } - - if !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, // TODO: third party invite - |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed".to_owned())? - { - return Err("Event has failed auth check with auth events.".to_owned()); - } - - info!("Validation successful."); - - // 7. Persist the event as an outlier. - db.rooms - .add_pdu_outlier(&incoming_pdu.event_id, &val) - .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; - info!("Added pdu as outlier."); - - Ok((Arc::new(incoming_pdu), val)) - }) -} - -#[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] -async fn upgrade_outlier_to_timeline_pdu( - incoming_pdu: Arc, - val: BTreeMap, - create_event: &PduEvent, - origin: &ServerName, - db: &Database, - room_id: &RoomId, - pub_key_map: &RwLock>>, -) -> Result>, String> { - if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { - return Ok(Some(pduid)); - } - - if db - .rooms - .is_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to ask db for soft fail".to_owned())? - { - return Err("Event has been soft failed".into()); - } - - info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); - - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; - - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - - // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities - // doing all the checks in this list starting at 1. These are not timeline events. - - // TODO: if we know the prev_events of the incoming event we can avoid the request and build - // the state from a known point and resolve if > 1 prev_event - - info!("Requesting state at event"); - let mut state_at_incoming_event = None; - - if incoming_pdu.prev_events.len() == 1 { - let prev_event = &*incoming_pdu.prev_events[0]; - let prev_event_sstatehash = db - .rooms - .pdu_shortstatehash(prev_event) - .map_err(|_| "Failed talking to db".to_owned())?; - - let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(db.rooms.state_full_ids(shortstatehash).await) - } else { - None - }; - - if let Some(Ok(mut state)) = state { - info!("Using cached state"); - let prev_pdu = - db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { - "Could not find prev event, but we know the state.".to_owned() - })?; - - if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &prev_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - state.insert(shortstatekey, Arc::from(prev_event)); - // Now it's the state after the pdu - } - - state_at_incoming_event = Some(state); - } - } else { - info!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::new(); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { - pdu - } else { - okay = false; - break; - }; - - let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { - s - } else { - okay = false; - break; - }; - - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if okay { - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: BTreeMap<_, _> = db - .rooms - .state_full_ids(sstatehash) - .await - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &prev_event.kind.to_string().into(), - state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); - // Now it's the state after the pdu - } - - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); - - for (k, id) in leaf_state { - if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); - } else { - warn!("Failed to get_statekey_from_short."); - } - starting_events.push(id); - } - - auth_chain_sets.push( - get_auth_chain(room_id, starting_events, db) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), - ); - - fork_states.push(state); - } - - let lock = db.globals.stateres_mutex.lock(); - - let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }); - drop(lock); - - state_at_incoming_event = match result { - Ok(new_state) => Some( - new_state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, event_id)) - }) - .collect::>()?, - ), - Err(e) => { - warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); - None - } - } - } - } - - if state_at_incoming_event.is_none() { - info!("Calling /state_ids"); - // Call /state_ids to find out what the state at this pdu is. We trust the server's - // response to some extend, but we still do a lot of checks on the events - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id, - event_id: &incoming_pdu.event_id, - }, - ) - .await - { - Ok(res) => { - info!("Fetching state events at event."); - let state_vec = fetch_and_handle_outliers( - db, - origin, - &res.pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; - - let mut state: BTreeMap<_, Arc> = BTreeMap::new(); - for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; - - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &pdu.kind.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - match state.entry(shortstatekey) { - btree_map::Entry::Vacant(v) => { - v.insert(Arc::from(&*pdu.event_id)); - } - btree_map::Entry::Occupied(_) => return Err( - "State event's type and state_key combination exists multiple times." - .to_owned(), - ), - } - } - - // The original create event must still be in the state - let create_shortstatekey = db - .rooms - .get_shortstatekey(&StateEventType::RoomCreate, "") - .map_err(|_| "Failed to talk to db.")? - .expect("Room exists"); - - if state.get(&create_shortstatekey).map(|id| id.as_ref()) - != Some(&create_event.event_id) - { - return Err("Incoming event refers to wrong create event.".to_owned()); - } - - state_at_incoming_event = Some(state); - } - Err(e) => { - warn!("Fetching state for event failed: {}", e); - return Err("Fetching state for event failed".into()); - } - }; - } - - let state_at_incoming_event = - state_at_incoming_event.expect("we always set this to some above"); - - info!("Starting auth check"); - // 11. Check the auth of the event passes based on the state of the event - let check_result = state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, // TODO: third party invite - |k, s| { - db.rooms - .get_shortstatekey(&k.to_string().into(), s) - .ok() - .flatten() - .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) - }, - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if !check_result { - return Err("Event has failed auth check with state at the event.".into()); - } - info!("Auth check succeeded"); - - // We start looking at current room state now, so lets lock the room - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - // Now we calculate the set of extremities this room has after the incoming event has been - // applied. We start with the previous extremities (aka leaves) - info!("Calculating extremities"); - let mut extremities = db - .rooms - .get_pdu_leaves(room_id) - .map_err(|_| "Failed to load room leaves".to_owned())?; - - // Remove any forward extremities that are referenced by this incoming event's prev_events - for prev_event in &incoming_pdu.prev_events { - if extremities.contains(prev_event) { - extremities.remove(prev_event); - } - } - - // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); - - info!("Compressing state at event"); - let state_ids_compressed = state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - db.rooms - .compress_state_event(*shortstatekey, id, &db.globals) - .map_err(|_| "Failed to compress_state_event".to_owned()) - }) - .collect::>()?; - - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - info!("Starting soft fail auth check"); - - let auth_events = db - .rooms - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - ) - .map_err(|_| "Failed to get_auth_events.".to_owned())?; - - let soft_fail = !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if soft_fail { - append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities.iter().map(Deref::deref), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; - - // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {:?}", incoming_pdu); - db.rooms - .mark_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to set soft failed flag".to_owned())?; - return Err("Event has been soft failed".into()); - } - - if incoming_pdu.state_key.is_some() { - info!("Loading current room state ids"); - let current_sstatehash = db - .rooms - .current_shortstatehash(room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? - .expect("every room has state"); - - let current_state_ids = db - .rooms - .state_full_ids(current_sstatehash) - .await - .map_err(|_| "Failed to load room state.")?; - - info!("Preparing for stateres to derive new room state"); - let mut extremity_sstatehashes = HashMap::new(); - - info!("Loading extremities"); - for id in dbg!(&extremities) { - match db - .rooms - .get_pdu(id) - .map_err(|_| "Failed to ask db for pdu.".to_owned())? - { - Some(leaf_pdu) => { - extremity_sstatehashes.insert( - db.rooms - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? - .ok_or_else(|| { - error!( - "Found extremity pdu with no statehash in db: {:?}", - leaf_pdu - ); - "Found pdu with no statehash in db.".to_owned() - })?, - leaf_pdu, - ); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err("Missing state snapshot.".to_owned()); - } - } - } - - let mut fork_states = Vec::new(); - - // 12. Ensure that the state is derived from the previous current state (i.e. we calculated - // by doing state res where one of the inputs was a previously trusted set of state, - // don't just trust a set of state we got from a remote). - - // We do this by adding the current state to the list of fork states - extremity_sstatehashes.remove(¤t_sstatehash); - fork_states.push(current_state_ids); - - // We also add state after incoming event to the fork states - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &incoming_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); - } - fork_states.push(state_after); - - let mut update_state = false; - // 14. Use state resolution to find new room state - let new_room_state = if fork_states.is_empty() { - return Err("State is empty.".to_owned()); - } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { - info!("State resolution trivial"); - // There was only one state, so it has to be the room's current state (because that is - // always included) - fork_states[0] - .iter() - .map(|(k, id)| { - db.rooms - .compress_state_event(*k, id, &db.globals) - .map_err(|_| "Failed to compress_state_event.".to_owned()) - }) - .collect::>()? - } else { - info!("Loading auth chains"); - // We do need to force an update to this room's state - update_state = true; - - let mut auth_chain_sets = Vec::new(); - for state in &fork_states { - auth_chain_sets.push( - get_auth_chain( - room_id, - state.iter().map(|(_, id)| id.clone()).collect(), - db, - ) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), - ); - } - - info!("Loading fork states"); - - let fork_states: Vec<_> = fork_states - .into_iter() - .map(|map| { - map.into_iter() - .filter_map(|(k, id)| { - db.rooms - .get_statekey_from_short(k) - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) - .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) - .ok() - }) - .collect::>() - }) - .collect(); - - info!("Resolving state"); - - let lock = db.globals.stateres_mutex.lock(); - let state = match state_res::resolve( - room_version_id, - &fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { - Ok(new_state) => new_state, - Err(_) => { - return Err("State resolution failed, either an event could not be found or deserialization".into()); - } - }; - - drop(lock); - - info!("State resolution done. Compressing state"); - - state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - db.rooms - .compress_state_event(shortstatekey, &event_id, &db.globals) - .map_err(|_| "Failed to compress state event".to_owned()) - }) - .collect::>()? - }; - - // Set the new room state to the resolved state - if update_state { - info!("Forcing new room state"); - db.rooms - .force_state(room_id, new_room_state, db) - .map_err(|_| "Failed to set new room state.".to_owned())?; - } - } - - info!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone()); - - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - - let pdu_id = append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities.iter().map(Deref::deref), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; - - info!("Appended incoming pdu"); - - // Event has passed all auth/stateres checks - drop(state_lock); - Ok(pdu_id) -} - -/// Find the event and auth it. Once the event is validated (steps 1 - 8) -/// it is appended to the outliers Tree. -/// -/// Returns pdu and if we fetched it over federation the raw json. -/// -/// a. Look in the main timeline (pduid_pdu tree) -/// b. Look at outlier pdu tree -/// c. Ask origin server over federation -/// d. TODO: Ask other servers over federation? -#[tracing::instrument(skip_all)] -pub(crate) fn fetch_and_handle_outliers<'a>( - db: &'a Database, - origin: &'a ServerName, - events: &'a [Arc], - create_event: &'a PduEvent, - room_id: &'a RoomId, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { - Box::pin(async move { - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - - let mut pdus = vec![]; - for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", id); - continue; - } - } - - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { - trace!("Found {} in db", id); - pdus.push((local_pdu, None)); - continue; - } - - // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; - let mut events_in_reverse_order = Vec::new(); - let mut events_all = HashSet::new(); - let mut i = 0; - while let Some(next_id) = todo_auth_events.pop() { - if events_all.contains(&next_id) { - continue; - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - - if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { - trace!("Found {} in db", id); - continue; - } - - info!("Fetching {} over federation.", next_id); - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &next_id }, - ) - .await - { - Ok(res) => { - info!("Got {} over federation", next_id); - let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { - Ok(t) => t, - Err(_) => { - back_off((*next_id).to_owned()); - continue; - } - }; - - if calculated_event_id != *next_id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - next_id, calculated_event_id, &res.pdu); - } - - if let Some(auth_events) = - value.get("auth_events").and_then(|c| c.as_array()) - { - for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value(auth_event.clone().into()) - { - let a: Arc = auth_event; - todo_auth_events.push(a); - } else { - warn!("Auth event id is not valid"); - } - } - } else { - warn!("Auth event list invalid"); - } - - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - } - Err(_) => { - warn!("Failed to fetch event: {}", next_id); - back_off((*next_id).to_owned()); - } - } - } - - for (next_id, value) in events_in_reverse_order.iter().rev() { - match handle_outlier_pdu( - origin, - create_event, - next_id, - room_id, - value.clone(), - db, - pub_key_map, - ) - .await - { - Ok((pdu, json)) => { - if next_id == id { - pdus.push((pdu, Some(json))); - } - } - Err(e) => { - warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((**next_id).to_owned()); - } - } - } - } - pdus - }) -} - /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. #[tracing::instrument(skip_all)] @@ -2204,92 +1047,6 @@ pub(crate) async fn fetch_signing_keys( )) } -/// Append the incoming event setting the state snapshot to the state from the -/// server that sent the event. -#[tracing::instrument(skip_all)] -fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex -) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - for appservice in db.appservice.all()? { - if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - db.rooms - .room_aliases(&pdu.room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(Some(pdu_id)) -} - #[tracing::instrument(skip(starting_events, db))] pub(crate) async fn get_auth_chain<'a>( room_id: &RoomId, @@ -2745,35 +1502,6 @@ pub async fn create_join_event_template_route( } } - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(&body.room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default version - // right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - if !body.ver.contains(&room_version_id) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { @@ -2798,89 +1526,7 @@ pub async fn create_join_event_template_route( let state_key = body.user_id.to_string(); let kind = StateEventType::RoomMember; - let auth_events = db.rooms.get_auth_events( - &body.room_id, - &kind.to_string().into(), - &body.user_id, - Some(&state_key), - &content, - )?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = BTreeMap::new(); - - if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - unsigned.insert( - "prev_sender".to_owned(), - to_raw_value(&prev_pdu.sender).expect("UserId is valid"), - ); - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: body.room_id.clone(), - sender: body.user_id.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: kind.to_string().into(), - content, - state_key: Some(state_key), - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts: None, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); + let (pdu, pdu_json) = create_hash_and_sign_event(); Ok(prepare_join_event::v1::Response { room_version: Some(room_version_id), diff --git a/src/service/rooms/alias.rs b/src/service/rooms/alias/mod.rs similarity index 100% rename from src/service/rooms/alias.rs rename to src/service/rooms/alias/mod.rs diff --git a/src/service/rooms/directory.rs b/src/service/rooms/directory/mod.rs similarity index 100% rename from src/service/rooms/directory.rs rename to src/service/rooms/directory/mod.rs diff --git a/src/service/rooms/edus.rs b/src/service/rooms/edus/mod.rs similarity index 100% rename from src/service/rooms/edus.rs rename to src/service/rooms/edus/mod.rs diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 6fa83e4..e59219b 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,886 +1,3 @@ -use crate::{ - client_server::{self, claim_keys_helper, get_keys_helper}, - database::{rooms::CompressedStateEvent, DatabaseGuard}, - pdu::EventHash, - utils, Database, Error, PduEvent, Result, Ruma, -}; -use axum::{response::IntoResponse, Json}; -use futures_util::{stream::FuturesUnordered, StreamExt}; -use get_profile_information::v1::ProfileField; -use http::header::{HeaderValue, AUTHORIZATION}; -use regex::Regex; -use ruma::{ - api::{ - client::error::{Error as RumaError, ErrorKind}, - federation::{ - authorization::get_event_authorization, - device::get_devices::{self, v1::UserDevice}, - directory::{get_public_rooms, get_public_rooms_filtered}, - discovery::{ - get_remote_server_keys, get_remote_server_keys_batch, - get_remote_server_keys_batch::v2::QueryCriteria, get_server_keys, - get_server_version, ServerSigningKeys, VerifyKey, - }, - event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, - keys::{claim_keys, get_keys}, - membership::{ - create_invite, - create_join_event::{self, RoomState}, - prepare_join_event, - }, - query::{get_profile_information, get_room_information}, - transactions::{ - edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, - send_transaction_message, - }, - }, - EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, - SendAccessToken, - }, - directory::{IncomingFilter, IncomingRoomNetwork}, - events::{ - receipt::{ReceiptEvent, ReceiptEventContent}, - room::{ - create::RoomCreateEventContent, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - server_acl::RoomServerAclEventContent, - }, - RoomEventType, StateEventType, - }, - int, - receipt::ReceiptType, - serde::{Base64, JsonObject, Raw}, - signatures::{CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, RoomVersion, StateMap}, - to_device::DeviceIdOrAllDevices, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, - ServerSigningKeyId, -}; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use std::{ - collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, - fmt::Debug, - future::Future, - mem, - net::{IpAddr, SocketAddr}, - ops::Deref, - pin::Pin, - sync::{Arc, RwLock, RwLockWriteGuard}, - time::{Duration, Instant, SystemTime}, -}; -use tokio::sync::{MutexGuard, Semaphore}; -use tracing::{debug, error, info, trace, warn}; - -/// Wraps either an literal IP address plus port, or a hostname plus complement -/// (colon-plus-port if it was specified). -/// -/// Note: A `FedDest::Named` might contain an IP address in string form if there -/// was no port specified to construct a SocketAddr with. -/// -/// # Examples: -/// ```rust -/// # use conduit::server_server::FedDest; -/// # fn main() -> Result<(), std::net::AddrParseError> { -/// FedDest::Literal("198.51.100.3:8448".parse()?); -/// FedDest::Literal("[2001:db8::4:5]:443".parse()?); -/// FedDest::Named("matrix.example.org".to_owned(), "".to_owned()); -/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); -/// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); -/// # Ok(()) -/// # } -/// ``` -#[derive(Clone, Debug, PartialEq)] -pub enum FedDest { - Literal(SocketAddr), - Named(String, String), -} - -impl FedDest { - fn into_https_string(self) -> String { - match self { - Self::Literal(addr) => format!("https://{}", addr), - Self::Named(host, port) => format!("https://{}{}", host, port), - } - } - - fn into_uri_string(self) -> String { - match self { - Self::Literal(addr) => addr.to_string(), - Self::Named(host, ref port) => host + port, - } - } - - fn hostname(&self) -> String { - match &self { - Self::Literal(addr) => addr.ip().to_string(), - Self::Named(host, _) => host.clone(), - } - } - - fn port(&self) -> Option { - match &self { - Self::Literal(addr) => Some(addr.port()), - Self::Named(_, port) => port[1..].parse().ok(), - } - } -} - -#[tracing::instrument(skip(globals, request))] -pub(crate) async fn send_request( - globals: &crate::database::globals::Globals, - destination: &ServerName, - request: T, -) -> Result -where - T: Debug, -{ - if !globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut write_destination_to_cache = false; - - let cached_result = globals - .actual_destination_cache - .read() - .unwrap() - .get(destination) - .cloned(); - - let (actual_destination, host) = if let Some(result) = cached_result { - result - } else { - write_destination_to_cache = true; - - let result = find_actual_destination(globals, destination).await; - - (result.0, result.1.into_uri_string()) - }; - - let actual_destination_str = actual_destination.clone().into_https_string(); - - let mut http_request = request - .try_into_http_request::>( - &actual_destination_str, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!( - "Failed to find destination {}: {}", - actual_destination_str, e - ); - Error::BadServerResponse("Invalid destination") - })?; - - let mut request_map = serde_json::Map::new(); - - if !http_request.body().is_empty() { - request_map.insert( - "content".to_owned(), - serde_json::from_slice(http_request.body()) - .expect("body is valid json, we just created it"), - ); - }; - - request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); - request_map.insert( - "uri".to_owned(), - http_request - .uri() - .path_and_query() - .expect("all requests have a path") - .to_string() - .into(), - ); - request_map.insert("origin".to_owned(), globals.server_name().as_str().into()); - request_map.insert("destination".to_owned(), destination.as_str().into()); - - let mut request_json = - serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); - - ruma::signatures::sign_json( - globals.server_name().as_str(), - globals.keypair(), - &mut request_json, - ) - .expect("our request json is what ruma expects"); - - let request_json: serde_json::Map = - serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); - - let signatures = request_json["signatures"] - .as_object() - .unwrap() - .values() - .map(|v| { - v.as_object() - .unwrap() - .iter() - .map(|(k, v)| (k, v.as_str().unwrap())) - }); - - for signature_server in signatures { - for s in signature_server { - http_request.headers_mut().insert( - AUTHORIZATION, - HeaderValue::from_str(&format!( - "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - globals.server_name(), - s.0, - s.1 - )) - .unwrap(), - ); - } - } - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - let url = reqwest_request.url().clone(); - - let response = globals.federation_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - warn!( - "{} {}: {}", - url, - status, - String::from_utf8_lossy(&body) - .lines() - .collect::>() - .join(" ") - ); - } - - let http_response = http_response_builder - .body(body) - .expect("reqwest body is valid http body"); - - if status == 200 { - let response = T::IncomingResponse::try_from_http_response(http_response); - if response.is_ok() && write_destination_to_cache { - globals.actual_destination_cache.write().unwrap().insert( - Box::::from(destination), - (actual_destination, host), - ); - } - - response.map_err(|e| { - warn!( - "Invalid 200 response from {} on: {} {}", - &destination, url, e - ); - Error::BadServerResponse("Server returned bad 200 response.") - }) - } else { - Err(Error::FederationError( - destination.to_owned(), - RumaError::try_from_http_response(http_response).map_err(|e| { - warn!( - "Invalid {} response from {} on: {} {}", - status, &destination, url, e - ); - Error::BadServerResponse("Server returned bad error response.") - })?, - )) - } - } - Err(e) => Err(e.into()), - } -} - -fn get_ip_with_port(destination_str: &str) -> Option { - if let Ok(destination) = destination_str.parse::() { - Some(FedDest::Literal(destination)) - } else if let Ok(ip_addr) = destination_str.parse::() { - Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) - } else { - None - } -} - -fn add_port_to_hostname(destination_str: &str) -> FedDest { - let (host, port) = match destination_str.find(':') { - None => (destination_str, ":8448"), - Some(pos) => destination_str.split_at(pos), - }; - FedDest::Named(host.to_owned(), port.to_owned()) -} - -/// Returns: actual_destination, host header -/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names -/// Numbers in comments below refer to bullet points in linked section of specification -#[tracing::instrument(skip(globals))] -async fn find_actual_destination( - globals: &crate::database::globals::Globals, - destination: &'_ ServerName, -) -> (FedDest, FedDest) { - let destination_str = destination.as_str().to_owned(); - let mut hostname = destination_str.clone(); - let actual_destination = match get_ip_with_port(&destination_str) { - Some(host_port) => { - // 1: IP literal with provided or default port - host_port - } - None => { - if let Some(pos) = destination_str.find(':') { - // 2: Hostname with included port - let (host, port) = destination_str.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - match request_well_known(globals, destination.as_str()).await { - // 3: A .well-known file is available - Some(delegated_hostname) => { - hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); - match get_ip_with_port(&delegated_hostname) { - Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file - None => { - if let Some(pos) = delegated_hostname.find(':') { - // 3.2: Hostname with port in .well-known file - let (host, port) = delegated_hostname.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - // Delegated hostname has no port in this branch - if let Some(hostname_override) = - query_srv_record(globals, &delegated_hostname).await - { - // 3.3: SRV lookup successful - let force_port = hostname_override.port(); - - if let Ok(override_ip) = globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - globals.tls_name_override.write().unwrap().insert( - delegated_hostname.clone(), - ( - override_ip.iter().collect(), - force_port.unwrap_or(8448), - ), - ); - } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named(delegated_hostname, format!(":{}", port)) - } else { - add_port_to_hostname(&delegated_hostname) - } - } else { - // 3.4: No SRV records, just use the hostname from .well-known - add_port_to_hostname(&delegated_hostname) - } - } - } - } - } - // 4: No .well-known or an error occured - None => { - match query_srv_record(globals, &destination_str).await { - // 4: SRV record found - Some(hostname_override) => { - let force_port = hostname_override.port(); - - if let Ok(override_ip) = globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - globals.tls_name_override.write().unwrap().insert( - hostname.clone(), - (override_ip.iter().collect(), force_port.unwrap_or(8448)), - ); - } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named(hostname.clone(), format!(":{}", port)) - } else { - add_port_to_hostname(&hostname) - } - } - // 5: No SRV record found - None => add_port_to_hostname(&destination_str), - } - } - } - } - } - }; - - // Can't use get_ip_with_port here because we don't want to add a port - // to an IP address if it wasn't specified - let hostname = if let Ok(addr) = hostname.parse::() { - FedDest::Literal(addr) - } else if let Ok(addr) = hostname.parse::() { - FedDest::Named(addr.to_string(), ":8448".to_owned()) - } else if let Some(pos) = hostname.find(':') { - let (host, port) = hostname.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - FedDest::Named(hostname, ":8448".to_owned()) - }; - (actual_destination, hostname) -} - -#[tracing::instrument(skip(globals))] -async fn query_srv_record( - globals: &crate::database::globals::Globals, - hostname: &'_ str, -) -> Option { - if let Ok(Some(host_port)) = globals - .dns_resolver() - .srv_lookup(format!("_matrix._tcp.{}", hostname)) - .await - .map(|srv| { - srv.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()), - ) - }) - }) - { - Some(host_port) - } else { - None - } -} - -#[tracing::instrument(skip(globals))] -async fn request_well_known( - globals: &crate::database::globals::Globals, - destination: &str, -) -> Option { - let body: serde_json::Value = serde_json::from_str( - &globals - .default_client() - .get(&format!( - "https://{}/.well-known/matrix/server", - destination - )) - .send() - .await - .ok()? - .text() - .await - .ok()?, - ) - .ok()?; - Some(body.get("m.server")?.as_str()?.to_owned()) -} - -/// # `GET /_matrix/federation/v1/version` -/// -/// Get version information on this server. -pub async fn get_server_version_route( - db: DatabaseGuard, - _body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - Ok(get_server_version::v1::Response { - server: Some(get_server_version::v1::Server { - name: Some("Conduit".to_owned()), - version: Some(env!("CARGO_PKG_VERSION").to_owned()), - }), - }) -} - -/// # `GET /_matrix/key/v2/server` -/// -/// Gets the public signing keys of this server. -/// -/// - Matrix does not support invalidating public keys, so the key returned by this will be valid -/// forever. -// Response type for this endpoint is Json because we need to calculate a signature for the response -pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); - verify_keys.insert( - format!("ed25519:{}", db.globals.keypair().version()) - .try_into() - .expect("found invalid server signing keys in DB"), - VerifyKey { - key: Base64::new(db.globals.keypair().public_key().to_vec()), - }, - ); - let mut response = serde_json::from_slice( - get_server_keys::v2::Response { - server_key: Raw::new(&ServerSigningKeys { - server_name: db.globals.server_name().to_owned(), - verify_keys, - old_verify_keys: BTreeMap::new(), - signatures: BTreeMap::new(), - valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(86400 * 7), - ) - .expect("time is valid"), - }) - .expect("static conversion, no errors"), - } - .try_into_http_response::>() - .unwrap() - .body(), - ) - .unwrap(); - - ruma::signatures::sign_json( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut response, - ) - .unwrap(); - - Ok(Json(response)) -} - -/// # `GET /_matrix/key/v2/server/{keyId}` -/// -/// Gets the public signing keys of this server. -/// -/// - Matrix does not support invalidating public keys, so the key returned by this will be valid -/// forever. -pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoResponse { - get_server_keys_route(db).await -} - -/// # `POST /_matrix/federation/v1/publicRooms` -/// -/// Lists the public rooms on this server. -pub async fn get_public_rooms_filtered_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let response = client_server::get_public_rooms_filtered_helper( - &db, - None, - body.limit, - body.since.as_deref(), - &body.filter, - &body.room_network, - ) - .await?; - - Ok(get_public_rooms_filtered::v1::Response { - chunk: response.chunk, - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - }) -} - -/// # `GET /_matrix/federation/v1/publicRooms` -/// -/// Lists the public rooms on this server. -pub async fn get_public_rooms_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let response = client_server::get_public_rooms_filtered_helper( - &db, - None, - body.limit, - body.since.as_deref(), - &IncomingFilter::default(), - &IncomingRoomNetwork::Matrix, - ) - .await?; - - Ok(get_public_rooms::v1::Response { - chunk: response.chunk, - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - }) -} - -/// # `PUT /_matrix/federation/v1/send/{txnId}` -/// -/// Push EDUs and PDUs to this server. -pub async fn send_transaction_message_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let mut resolved_map = BTreeMap::new(); - - let pub_key_map = RwLock::new(BTreeMap::new()); - - // This is all the auth_events that have been recursively fetched so they don't have to be - // deserialized over and over again. - // TODO: make this persist across requests but not in a DB Tree (in globals?) - // TODO: This could potentially also be some sort of trie (suffix tree) like structure so - // that once an auth event is known it would know (using indexes maybe) all of the auth - // events that it references. - // let mut auth_cache = EventMap::new(); - - for pdu in &body.pdus { - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - continue; - } - }; - - // 0. Check the server is in the room - let room_id = match value - .get("room_id") - .and_then(|id| RoomId::parse(id.as_str()?).ok()) - { - Some(id) => id, - None => { - // Event is invalid - resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned())); - continue; - } - }; - - acl_check(&sender_servername, &room_id, &db)?; - - let mutex = Arc::clone( - db.globals - .roomid_mutex_federation - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - let start_time = Instant::now(); - resolved_map.insert( - event_id.clone(), - handle_incoming_pdu( - &sender_servername, - &event_id, - &room_id, - value, - true, - &db, - &pub_key_map, - ) - .await - .map(|_| ()), - ); - drop(mutex_lock); - - let elapsed = start_time.elapsed(); - warn!( - "Handling transaction of event {} took {}m{}s", - event_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - - for pdu in &resolved_map { - if let Err(e) = pdu.1 { - if e != "Room is unknown to this server." { - warn!("Incoming PDU failed {:?}", pdu); - } - } - } - - for edu in body - .edus - .iter() - .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) - { - match edu { - Edu::Presence(_) => {} - Edu::Receipt(receipt) => { - for (room_id, room_updates) in receipt.receipts { - for (user_id, user_updates) in room_updates.read { - if let Some((event_id, _)) = user_updates - .event_ids - .iter() - .filter_map(|id| { - db.rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) - }) - .max_by_key(|(_, count)| *count) - { - let mut user_receipts = BTreeMap::new(); - user_receipts.insert(user_id.clone(), user_updates.data); - - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(event_id.to_owned(), receipts); - - let event = ReceiptEvent { - content: ReceiptEventContent(receipt_content), - room_id: room_id.clone(), - }; - db.rooms.edus.readreceipt_update( - &user_id, - &room_id, - event, - &db.globals, - )?; - } else { - // TODO fetch missing events - info!("No known event ids in read receipt: {:?}", user_updates); - } - } - } - } - Edu::Typing(typing) => { - if db.rooms.is_joined(&typing.user_id, &typing.room_id)? { - if typing.typing { - db.rooms.edus.typing_add( - &typing.user_id, - &typing.room_id, - 3000 + utils::millis_since_unix_epoch(), - &db.globals, - )?; - } else { - db.rooms.edus.typing_remove( - &typing.user_id, - &typing.room_id, - &db.globals, - )?; - } - } - } - Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { - db.users - .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; - } - Edu::DirectToDevice(DirectDeviceContent { - sender, - ev_type, - message_id, - messages, - }) => { - // Check if this is a new transaction id - if db - .transaction_ids - .existing_txnid(&sender, None, &message_id)? - .is_some() - { - continue; - } - - for (target_user_id, map) in &messages { - for (target_device_id_maybe, event) in map { - match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => { - db.users.add_to_device_event( - &sender, - target_user_id, - target_device_id, - &ev_type.to_string(), - event.deserialize_as().map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Event is invalid", - ) - })?, - &db.globals, - )? - } - - DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(target_user_id) { - db.users.add_to_device_event( - &sender, - target_user_id, - &target_device_id?, - &ev_type.to_string(), - event.deserialize_as().map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Event is invalid", - ) - })?, - &db.globals, - )?; - } - } - } - } - } - - // Save transaction id with empty data - db.transaction_ids - .add_txnid(&sender, None, &message_id, &[])?; - } - Edu::SigningKeyUpdate(SigningKeyUpdateContent { - user_id, - master_key, - self_signing_key, - }) => { - if user_id.server_name() != sender_servername { - continue; - } - if let Some(master_key) = master_key { - db.users.add_cross_signing_keys( - &user_id, - &master_key, - &self_signing_key, - &None, - &db.rooms, - &db.globals, - )?; - } - } - Edu::_Custom(_) => {} - } - } - - db.flush()?; - - Ok(send_transaction_message::v1::Response { pdus: resolved_map }) -} /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; @@ -918,37 +35,25 @@ pub(crate) async fn handle_incoming_pdu<'a>( is_timeline_event: bool, db: &'a Database, pub_key_map: &'a RwLock>>, -) -> Result>, String> { - match db.rooms.exists(room_id) { - Ok(true) => {} - _ => { - return Err("Room is unknown to this server.".to_owned()); - } - } - - match db.rooms.is_disabled(room_id) { - Ok(false) => {} - _ => { - return Err("Federation of this room is currently disabled on this server.".to_owned()); - } - } +) -> Result>> { + db.rooms.exists(room_id)?.ok_or(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"))?; + db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of this room is currently disabled on this server."))?; + // 1. Skip the PDU if we already have it as a timeline event - if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { - return Ok(Some(pdu_id.to_vec())); + if let Some(pdu_id) = db.rooms.get_pdu_id(event_id)? { + return Some(pdu_id.to_vec()); } let create_event = db .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.".to_owned())? - .ok_or_else(|| "Failed to find create event in db.".to_owned())?; + .room_state_get(room_id, &StateEventType::RoomCreate, "")? + .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; let first_pdu_in_room = db .rooms - .first_pdu_in_room(room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists"); + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; let (incoming_pdu, val) = handle_outlier_pdu( origin, @@ -966,93 +71,19 @@ pub(crate) async fn handle_incoming_pdu<'a>( return Ok(None); } + // Skip old events if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { return Ok(None); } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let mut graph: HashMap, _> = HashMap::new(); - let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = incoming_pdu.prev_events.clone(); - - let mut amount = 0; - - while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( - db, - origin, - &[prev_event_id.clone()], - &create_event, - room_id, - pub_key_map, - ) - .await - .pop() - { - if amount > 100 { - // Max limit reached - warn!("Max prev event limit reached!"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if let Some(json) = - json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) - { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { - amount += 1; - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(prev_prev.clone())); - } - } - - graph.insert( - prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), - ); - } else { - // Time based check failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } - - let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - println!("{}", event_id); - Ok(( - int!(0), - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; + let sorted_prev_events = fetch_unknown_prev_events(incoming_pdu.prev_events.clone()); let mut errors = 0; for prev_id in dbg!(sorted) { - match db.rooms.is_disabled(room_id) { - Ok(false) => {} - _ => { - return Err( - "Federation of this room is currently disabled on this server.".to_owned(), - ); - } - } + // Check for disabled again because it might have changed + db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of + this room is currently disabled on this server."))?; if let Some((time, tries)) = db .globals @@ -1076,7 +107,9 @@ pub(crate) async fn handle_incoming_pdu<'a>( if errors >= 5 { break; } + if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { + // Skip old events if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { continue; } @@ -1087,6 +120,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( .write() .unwrap() .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + if let Err(e) = upgrade_outlier_to_timeline_pdu( pdu, json, @@ -1130,6 +164,8 @@ pub(crate) async fn handle_incoming_pdu<'a>( } } + // Done with prev events, now handling the incoming event + let start_time = Instant::now(); db.globals .roomid_federationhandletime @@ -1171,16 +207,14 @@ fn handle_outlier_pdu<'a>( // We go through all the signatures we see on the value and fetch the corresponding signing // keys fetch_required_signing_keys(&value, pub_key_map, db) - .await - .map_err(|e| e.to_string())?; + .await?; // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let create_event_content: RoomCreateEventContent = serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() + error!("Invalid create event: {}", e); + Error::BadDatabase("Invalid create event in db") })?; let room_version_id = &create_event_content.room_version; @@ -1220,7 +254,7 @@ fn handle_outlier_pdu<'a>( // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // EDIT: Step 5 is not applied anymore because it failed too often + // NOTE: Step 5 is not applied anymore because it failed too often warn!("Fetching auth events for {}", incoming_pdu.event_id); fetch_and_handle_outliers( db, @@ -1245,7 +279,7 @@ fn handle_outlier_pdu<'a>( // Build map of auth events let mut auth_events = HashMap::new(); for id in &incoming_pdu.auth_events { - let auth_event = match db.rooms.get_pdu(id).map_err(|e| e.to_string())? { + let auth_event = match db.rooms.get_pdu(id)? { Some(e) => e, None => { warn!("Could not find auth event {}", id); @@ -1264,10 +298,9 @@ fn handle_outlier_pdu<'a>( v.insert(auth_event); } hash_map::Entry::Occupied(_) => { - return Err( + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth event's type and state_key combination exists multiple times." - .to_owned(), - ) + )); } } } @@ -1278,7 +311,7 @@ fn handle_outlier_pdu<'a>( .map(|a| a.as_ref()) != Some(create_event) { - return Err("Incoming event refers to wrong create event.".to_owned()); + return Err(Error::BadRequest(ErrorKind::InvalidParam("Incoming event refers to wrong create event."))); } if !state_res::event_auth::auth_check( @@ -1287,17 +320,17 @@ fn handle_outlier_pdu<'a>( None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), ) - .map_err(|_e| "Auth check failed".to_owned())? + .map_err(|e| {error!(e); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")})? { - return Err("Event has failed auth check with auth events.".to_owned()); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); } info!("Validation successful."); // 7. Persist the event as an outlier. db.rooms - .add_pdu_outlier(&incoming_pdu.event_id, &val) - .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; + .add_pdu_outlier(&incoming_pdu.event_id, &val)?; + info!("Added pdu as outlier."); Ok((Arc::new(incoming_pdu), val)) @@ -1314,6 +347,7 @@ async fn upgrade_outlier_to_timeline_pdu( room_id: &RoomId, pub_key_map: &RwLock>>, ) -> Result>, String> { + // Skip the PDU if we already have it as a timeline event if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); } @@ -1331,7 +365,7 @@ async fn upgrade_outlier_to_timeline_pdu( let create_event_content: RoomCreateEventContent = serde_json::from_str(create_event.content.get()).map_err(|e| { warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() + Error::BadDatabase("Invalid create event in db") })?; let room_version_id = &create_event_content.room_version; @@ -2039,1606 +1073,80 @@ pub(crate) fn fetch_and_handle_outliers<'a>( }) } -/// Search the DB for the signing keys of the given server, if we don't have them -/// fetch them from the server and save to our DB. -#[tracing::instrument(skip_all)] -pub(crate) async fn fetch_signing_keys( - db: &Database, - origin: &ServerName, - signature_ids: Vec, -) -> Result> { - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - let permit = db - .globals - .servername_ratelimiter - .read() - .unwrap() - .get(origin) - .map(|s| Arc::clone(s).acquire_owned()); - let permit = match permit { - Some(p) => p, - None => { - let mut write = db.globals.servername_ratelimiter.write().unwrap(); - let s = Arc::clone( - write - .entry(origin.to_owned()) - .or_insert_with(|| Arc::new(Semaphore::new(1))), - ); +fn fetch_unknown_prev_events(initial_set: Vec>) -> Vec> { + let mut graph: HashMap, _> = HashMap::new(); + let mut eventid_info = HashMap::new(); + let mut todo_outlier_stack: Vec> = initial_set; - s.acquire_owned() - } - } - .await; + let mut amount = 0; - let back_off = |id| match db - .globals - .bad_signature_ratelimiter - .write() - .unwrap() - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - - if let Some((time, tries)) = db - .globals - .bad_signature_ratelimiter - .read() - .unwrap() - .get(&signature_ids) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {:?}", signature_ids); - return Err(Error::BadServerResponse("bad signature, still backing off")); - } - } - - trace!("Loading signing keys for {}", origin); - - let mut result: BTreeMap<_, _> = db - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if contains_all_ids(&result) { - return Ok(result); - } - - debug!("Fetching signing keys for {} over federation", origin); - - if let Some(server_key) = db - .sending - .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) - .await - .ok() - .and_then(|resp| resp.server_key.deserialize().ok()) - { - db.globals.add_signing_key(origin, server_key.clone())?; - - result.extend( - server_key - .verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - server_key - .old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - - if contains_all_ids(&result) { - return Ok(result); - } - } - - for server in db.globals.trusted_servers() { - debug!("Asking {} for {}'s signing key", server, origin); - if let Some(server_keys) = db - .sending - .send_federation_request( - &db.globals, - server, - get_remote_server_keys::v2::Request::new( - origin, - MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), - ) - .expect("time is valid"), - ), - ) - .await - .ok() - .map(|resp| { - resp.server_keys - .into_iter() - .filter_map(|e| e.deserialize().ok()) - .collect::>() - }) - { - trace!("Got signing keys: {:?}", server_keys); - for k in server_keys { - db.globals.add_signing_key(origin, k.clone())?; - result.extend( - k.verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } - - if contains_all_ids(&result) { - return Ok(result); - } - } - } - - drop(permit); - - back_off(signature_ids); - - warn!("Failed to find public key for server: {}", origin); - Err(Error::BadServerResponse( - "Failed to find public key for server", - )) -} - -/// Append the incoming event setting the state snapshot to the state from the -/// server that sent the event. -#[tracing::instrument(skip_all)] -fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex -) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - for appservice in db.appservice.all()? { - if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - db.rooms - .room_aliases(&pdu.room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(Some(pdu_id)) -} - -#[tracing::instrument(skip(starting_events, db))] -pub(crate) async fn get_auth_chain<'a>( - room_id: &RoomId, - starting_events: Vec>, - db: &'a Database, -) -> Result> + 'a> { - const NUM_BUCKETS: usize = 50; - - let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; - - let mut i = 0; - for id in starting_events { - let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; - let bucket_id = (short % NUM_BUCKETS as u64) as usize; - buckets[bucket_id].insert((short, id.clone())); - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - let mut full_auth_chain = HashSet::new(); - - let mut hits = 0; - let mut misses = 0; - for chunk in buckets { - if chunk.is_empty() { - continue; - } - - let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { - hits += 1; - full_auth_chain.extend(cached.iter().copied()); - continue; - } - misses += 1; - - let mut chunk_cache = HashSet::new(); - let mut hits2 = 0; - let mut misses2 = 0; - let mut i = 0; - for (sevent_id, event_id) in chunk { - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { - hits2 += 1; - chunk_cache.extend(cached.iter().copied()); - } else { - misses2 += 1; - let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?); - db.rooms - .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; - println!( - "cache missed event {} with auth chain len {}", - event_id, - auth_chain.len() - ); - chunk_cache.extend(auth_chain.iter()); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - }; - } - println!( - "chunk missed with len {}, event hits2: {}, misses2: {}", - chunk_cache.len(), - hits2, - misses2 - ); - let chunk_cache = Arc::new(chunk_cache); - db.rooms - .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; - full_auth_chain.extend(chunk_cache.iter()); - } - - println!( - "total: {}, chunk hits: {}, misses: {}", - full_auth_chain.len(), - hits, - misses - ); - - Ok(full_auth_chain - .into_iter() - .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) -} - -#[tracing::instrument(skip(event_id, db))] -fn get_auth_chain_inner( - room_id: &RoomId, - event_id: &EventId, - db: &Database, -) -> Result> { - let mut todo = vec![Arc::from(event_id)]; - let mut found = HashSet::new(); - - while let Some(event_id) = todo.pop() { - match db.rooms.get_pdu(&event_id) { - Ok(Some(pdu)) => { - if pdu.room_id != room_id { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); - } - for auth_event in &pdu.auth_events { - let sauthevent = db - .rooms - .get_or_create_shorteventid(auth_event, &db.globals)?; - - if !found.contains(&sauthevent) { - found.insert(sauthevent); - todo.push(auth_event.clone()); - } - } - } - Ok(None) => { - warn!("Could not find pdu mentioned in auth events: {}", event_id); - } - Err(e) => { - warn!("Could not load event in auth chain: {} {}", event_id, e); - } - } - } - - Ok(found) -} - -/// # `GET /_matrix/federation/v1/event/{eventId}` -/// -/// Retrieves a single event from the server. -/// -/// - Only works if a user of this server is currently invited or joined the room -pub async fn get_event_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let event = db - .rooms - .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; - - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - - if !db.rooms.server_in_room(sender_servername, room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room", - )); - } - - Ok(get_event::v1::Response { - origin: db.globals.server_name().to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdu: PduEvent::convert_to_outgoing_federation_event(event), - }) -} - -/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` -/// -/// Retrieves events that the sender is missing. -pub async fn get_missing_events_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room", - )); - } - - acl_check(sender_servername, &body.room_id, &db)?; - - let mut queued_events = body.latest_events.clone(); - let mut events = Vec::new(); - - let mut i = 0; - while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { - if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { - let room_id_str = pdu - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let event_room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - - if event_room_id != body.room_id { - warn!( - "Evil event detected: Event {} found while searching in room {}", - queued_events[i], body.room_id - ); - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Evil event detected", - )); - } - - if body.earliest_events.contains(&queued_events[i]) { - i += 1; - continue; - } - queued_events.extend_from_slice( - &serde_json::from_value::>>( - serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { - Error::bad_database("Event in db has no prev_events field.") - })?) - .expect("canonical json is valid json value"), - ) - .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, - ); - events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); - } - i += 1; - } - - Ok(get_missing_events::v1::Response { events }) -} - -/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` -/// -/// Retrieves the auth chain for a given event. -/// -/// - This does not include the event itself -pub async fn get_event_authorization_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room.", - )); - } - - acl_check(sender_servername, &body.room_id, &db)?; - - let event = db - .rooms - .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; - - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?; - - Ok(get_event_authorization::v1::Response { - auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - }) -} - -/// # `GET /_matrix/federation/v1/state/{roomId}` -/// -/// Retrieves the current state of the room. -pub async fn get_room_state_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room.", - )); - } - - acl_check(sender_servername, &body.room_id, &db)?; - - let shortstatehash = db - .rooms - .pdu_shortstatehash(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; - - let pdus = db - .rooms - .state_full_ids(shortstatehash) - .await? - .into_iter() - .map(|(_, id)| { - PduEvent::convert_to_outgoing_federation_event( - db.rooms.get_pdu_json(&id).unwrap().unwrap(), - ) - }) - .collect(); - - let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; - - Ok(get_room_state::v1::Response { - auth_chain: auth_chain_ids - .map(|id| { - db.rooms.get_pdu_json(&id).map(|maybe_json| { - PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) - }) - }) - .filter_map(|r| r.ok()) - .collect(), - pdus, - }) -} - -/// # `GET /_matrix/federation/v1/state_ids/{roomId}` -/// -/// Retrieves the current state of the room. -pub async fn get_room_state_ids_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room.", - )); - } - - acl_check(sender_servername, &body.room_id, &db)?; - - let shortstatehash = db - .rooms - .pdu_shortstatehash(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; - - let pdu_ids = db - .rooms - .state_full_ids(shortstatehash) - .await? - .into_iter() - .map(|(_, id)| (*id).to_owned()) - .collect(); - - let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; - - Ok(get_room_state_ids::v1::Response { - auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), - pdu_ids, - }) -} - -/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` -/// -/// Creates a join template. -pub async fn create_join_event_template_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - if !db.rooms.exists(&body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Room is unknown to this server.", - )); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - acl_check(sender_servername, &body.room_id, &db)?; - - // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = - db.rooms - .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; - - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") - }) - }) - .transpose()?; - - if let Some(join_rules_event_content) = join_rules_event_content { - if matches!( - join_rules_event_content.join_rule, - JoinRule::Restricted { .. } - ) { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "Conduit does not support restricted rooms yet.", - )); - } - } - - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(&body.room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default version - // right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - if !body.ver.contains(&room_version_id) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: room_version_id, - }, - "Room version not supported.", - )); - } - - let content = to_raw_value(&RoomMemberEventContent { - avatar_url: None, - blurhash: None, - displayname: None, - is_direct: None, - membership: MembershipState::Join, - third_party_invite: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("member event is valid value"); - - let state_key = body.user_id.to_string(); - let kind = StateEventType::RoomMember; - - let auth_events = db.rooms.get_auth_events( - &body.room_id, - &kind.to_string().into(), - &body.user_id, - Some(&state_key), - &content, - )?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = BTreeMap::new(); - - if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - unsigned.insert( - "prev_sender".to_owned(), - to_raw_value(&prev_pdu.sender).expect("UserId is valid"), - ); - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: body.room_id.clone(), - sender: body.user_id.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: kind.to_string().into(), - content, - state_key: Some(state_key), - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts: None, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - - Ok(prepare_join_event::v1::Response { - room_version: Some(room_version_id), - event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), - }) -} - -async fn create_join_event( - db: &DatabaseGuard, - sender_servername: &ServerName, - room_id: &RoomId, - pdu: &RawJsonValue, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - if !db.rooms.exists(room_id)? { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Room is unknown to this server.", - )); - } - - acl_check(sender_servername, room_id, db)?; - - // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; - - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") - }) - }) - .transpose()?; - - if let Some(join_rules_event_content) = join_rules_event_content { - if matches!( - join_rules_event_content.join_rule, - JoinRule::Restricted { .. } - ) { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "Conduit does not support restricted rooms yet.", - )); - } - } - - // We need to return the state prior to joining, let's keep a reference to that here - let shortstatehash = db - .rooms - .current_shortstatehash(room_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; - - let pub_key_map = RwLock::new(BTreeMap::new()); - // let mut auth_cache = EventMap::new(); - - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } - }; - - let origin: Box = serde_json::from_value( - serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event needs an origin field.", - ))?) - .expect("CanonicalJson is valid json value"), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - - let mutex = Arc::clone( - db.globals - .roomid_mutex_federation - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - let pdu_id = handle_incoming_pdu(&origin, &event_id, room_id, value, true, db, &pub_key_map) - .await - .map_err(|e| { - warn!("Error while handling incoming send join PDU: {}", e); - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; - drop(mutex_lock); - - let state_ids = db.rooms.state_full_ids(shortstatehash).await?; - let auth_chain_ids = get_auth_chain( - room_id, - state_ids.iter().map(|(_, id)| id.clone()).collect(), - db, - ) - .await?; - - let servers = db - .rooms - .room_servers(room_id) - .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); - - db.sending.send_pdu(servers, &pdu_id)?; - - db.flush()?; - - Ok(RoomState { - auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - state: state_ids - .iter() - .filter_map(|(_, id)| db.rooms.get_pdu_json(id).ok().flatten()) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - }) -} - -/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` -/// -/// Submits a signed join event. -pub async fn create_join_event_v1_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; - - Ok(create_join_event::v1::Response { room_state }) -} - -/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` -/// -/// Submits a signed join event. -pub async fn create_join_event_v2_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; - - Ok(create_join_event::v2::Response { room_state }) -} - -/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` -/// -/// Invites a remote user to a room. -pub async fn create_invite_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - acl_check(sender_servername, &body.room_id, &db)?; - - if !db.rooms.is_supported_version(&db, &body.room_version) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: body.room_version.clone(), - }, - "Server does not support this room version.", - )); - } - - let mut signed_event = utils::to_canonical_object(&body.event) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; - - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut signed_event, - &body.room_version, - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&signed_event, &body.room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - signed_event.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.into()), - ); - - let sender: Box<_> = serde_json::from_value( - signed_event - .get("sender") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event had no sender field.", - ))? - .clone() - .into(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; - - let invited_user: Box<_> = serde_json::from_value( - signed_event - .get("state_key") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event had no state_key field.", - ))? - .clone() - .into(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; - - let mut invite_state = body.invite_room_state.clone(); - - let mut event: JsonObject = serde_json::from_str(body.event.get()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; - - event.insert("event_id".to_owned(), "$dummy".into()); - - let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| { - warn!("Invalid invite event: {}", e); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") - })?; - - invite_state.push(pdu.to_stripped_state_event()); - - // If the room already exists, the remote server will notify us about the join via /send - if !db.rooms.exists(&pdu.room_id)? { - db.rooms.update_membership( - &body.room_id, - &invited_user, - MembershipState::Invite, - &sender, - Some(invite_state), - &db, - true, - )?; - } - - db.flush()?; - - Ok(create_invite::v2::Response { - event: PduEvent::convert_to_outgoing_federation_event(signed_event), - }) -} - -/// # `GET /_matrix/federation/v1/user/devices/{userId}` -/// -/// Gets information on all devices of the user. -pub async fn get_devices_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - Ok(get_devices::v1::Response { - user_id: body.user_id.clone(), - stream_id: db - .users - .get_devicelist_version(&body.user_id)? - .unwrap_or(0) - .try_into() - .expect("version will not grow that large"), - devices: db - .users - .all_devices_metadata(&body.user_id) - .filter_map(|r| r.ok()) - .filter_map(|metadata| { - Some(UserDevice { - keys: db - .users - .get_device_keys(&body.user_id, &metadata.device_id) - .ok()??, - device_id: metadata.device_id, - device_display_name: metadata.display_name, - }) - }) - .collect(), - master_key: db - .users - .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, - self_signing_key: db - .users - .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, - }) -} - -/// # `GET /_matrix/federation/v1/query/directory` -/// -/// Resolve a room alias to a room id. -pub async fn get_room_information_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let room_id = db - .rooms - .id_from_alias(&body.room_alias)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room alias not found.", - ))?; - - Ok(get_room_information::v1::Response { - room_id, - servers: vec![db.globals.server_name().to_owned()], - }) -} - -/// # `GET /_matrix/federation/v1/query/profile` -/// -/// Gets information on a profile. -pub async fn get_profile_information_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut displayname = None; - let mut avatar_url = None; - let mut blurhash = None; - - match &body.field { - Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, - Some(ProfileField::AvatarUrl) => { - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)? - } - // TODO: what to do with custom - Some(_) => {} - None => { - displayname = db.users.displayname(&body.user_id)?; - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)?; - } - } - - Ok(get_profile_information::v1::Response { - blurhash, - displayname, - avatar_url, - }) -} - -/// # `POST /_matrix/federation/v1/user/keys/query` -/// -/// Gets devices and identity keys for the given users. -pub async fn get_keys_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let result = get_keys_helper( - None, - &body.device_keys, - |u| Some(u.server_name()) == body.sender_servername.as_deref(), - &db, - ) - .await?; - - db.flush()?; - - Ok(get_keys::v1::Response { - device_keys: result.device_keys, - master_keys: result.master_keys, - self_signing_keys: result.self_signing_keys, - }) -} - -/// # `POST /_matrix/federation/v1/user/keys/claim` -/// -/// Claims one-time keys. -pub async fn claim_keys_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let result = claim_keys_helper(&body.one_time_keys, &db).await?; - - db.flush()?; - - Ok(claim_keys::v1::Response { - one_time_keys: result.one_time_keys, - }) -} - -#[tracing::instrument(skip_all)] -pub(crate) async fn fetch_required_signing_keys( - event: &BTreeMap, - pub_key_map: &RwLock>>, - db: &Database, -) -> Result<()> { - let signatures = event - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let fetch_res = fetch_signing_keys( + while let Some(prev_event_id) = todo_outlier_stack.pop() { + if let Some((pdu, json_opt)) = fetch_and_handle_outliers( db, - signature_server.as_str().try_into().map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?, - signature_ids, + origin, + &[prev_event_id.clone()], + &create_event, + room_id, + pub_key_map, ) - .await; - - let keys = match fetch_res { - Ok(keys) => keys, - Err(_) => { - warn!("Signature verification failed: Could not fetch signing key.",); + .await + .pop() + { + if amount > 100 { + // Max limit reached + warn!("Max prev event limit reached!"); + graph.insert(prev_event_id.clone(), HashSet::new()); continue; } - }; - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(signature_server.clone(), keys); - } + if let Some(json) = + json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) + { + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + amount += 1; + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(dbg!(prev_prev.clone())); + } + } - Ok(()) -} + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } -// Gets a list of servers for which we don't have the signing key yet. We go over -// the PDUs and either cache the key or add it to the list that needs to be retrieved. -fn get_server_keys_from_cache( - pdu: &RawJsonValue, - servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, - room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, - db: &Database, -) -> Result<()> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - - let event_id = format!( - "${}", - ruma::signatures::reference_hash(&value, room_version) - .expect("ruma can calculate reference hashes") - ); - let event_id = <&EventId>::try_from(event_id.as_str()) - .expect("ruma's reference hashes are valid event ids"); - - if let Some((time, tries)) = db - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(event_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {}", event_id); - return Err(Error::BadServerResponse("bad event, still backing off")); - } - } - - let signatures = value - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?; - - if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { - continue; - } - - trace!("Loading signing keys for {}", origin); - - let result: BTreeMap<_, _> = db - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if !contains_all_ids(&result) { - trace!("Signing key not loaded for {}", origin); - servers.insert(origin.to_owned(), BTreeMap::new()); - } - - pub_key_map.insert(origin.to_string(), result); - } - - Ok(()) -} - -pub(crate) async fn fetch_join_signing_keys( - event: &create_join_event::v2::Response, - room_version: &RoomVersionId, - pub_key_map: &RwLock>>, - db: &Database, -) -> Result<()> { - let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = - BTreeMap::new(); - - { - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - - // Try to fetch keys, failure is okay - // Servers we couldn't find in the cache will be added to `servers` - for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); - } - for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); - } - - drop(pkm); - } - - if servers.is_empty() { - // We had all keys locally - return Ok(()); - } - - for server in db.globals.trusted_servers() { - trace!("Asking batch signing keys from trusted server {}", server); - if let Ok(keys) = db - .sending - .send_federation_request( - &db.globals, - server, - get_remote_server_keys_batch::v2::Request { - server_keys: servers.clone(), - }, - ) - .await - { - trace!("Got signing keys: {:?}", keys); - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - for k in keys.server_keys { - let k = k.deserialize().unwrap(); - - // TODO: Check signature from trusted server? - servers.remove(&k.server_name); - - let result = db - .globals - .add_signing_key(&k.server_name, k.clone())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); - - pkm.insert(k.server_name.to_string(), result); + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); } - } - - if servers.is_empty() { - return Ok(()); + } else { + // Fetch and handle failed + graph.insert(prev_event_id.clone(), HashSet::new()); } } - let mut futures: FuturesUnordered<_> = servers - .into_iter() - .map(|(server, _)| async move { - ( - db.sending - .send_federation_request( - &db.globals, - &server, - get_server_keys::v2::Request::new(), - ) - .await, - server, - ) - }) - .collect(); - - while let Some(result) = futures.next().await { - if let (Ok(get_keys_response), origin) = result { - let result: BTreeMap<_, _> = db - .globals - .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(origin.to_string(), result); - } - } - - Ok(()) -} - -/// Returns Ok if the acl allows the server -fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { - let acl_event = match db - .rooms - .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? - { - Some(acl) => acl, - None => return Ok(()), - }; - - let acl_event_content: RoomServerAclEventContent = - match serde_json::from_str(acl_event.content.get()) { - Ok(content) => content, - Err(_) => { - warn!("Invalid ACL event"); - return Ok(()); - } - }; - - if acl_event_content.is_allowed(server_name) { - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server was denied by ACL", + let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + println!("{}", event_id); + Ok(( + int!(0), + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), + ), )) - } -} - -#[cfg(test)] -mod tests { - use super::{add_port_to_hostname, get_ip_with_port, FedDest}; - - #[test] - fn ips_get_default_ports() { - assert_eq!( - get_ip_with_port("1.1.1.1"), - Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap())) - ); - assert_eq!( - get_ip_with_port("dead:beef::"), - Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap())) - ); - } - - #[test] - fn ips_keep_custom_ports() { - assert_eq!( - get_ip_with_port("1.1.1.1:1234"), - Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap())) - ); - assert_eq!( - get_ip_with_port("[dead::beef]:8933"), - Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap())) - ); - } - - #[test] - fn hostnames_get_default_ports() { - assert_eq!( - add_port_to_hostname("example.com"), - FedDest::Named(String::from("example.com"), String::from(":8448")) - ) - } - - #[test] - fn hostnames_keep_custom_ports() { - assert_eq!( - add_port_to_hostname("example.com:1337"), - FedDest::Named(String::from("example.com"), String::from(":1337")) - ) - } + }) + .map_err(|_| "Error sorting prev events".to_owned())?; + + sorted } diff --git a/src/service/rooms/lazy_loading.rs b/src/service/rooms/lazy_loading/mod.rs similarity index 100% rename from src/service/rooms/lazy_loading.rs rename to src/service/rooms/lazy_loading/mod.rs diff --git a/src/service/rooms/metadata.rs b/src/service/rooms/metadata/mod.rs similarity index 100% rename from src/service/rooms/metadata.rs rename to src/service/rooms/metadata/mod.rs diff --git a/src/service/rooms/outlier.rs b/src/service/rooms/outlier/mod.rs similarity index 65% rename from src/service/rooms/outlier.rs rename to src/service/rooms/outlier/mod.rs index afb0a14..340e93e 100644 --- a/src/service/rooms/outlier.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,3 +1,12 @@ + /// Returns the pdu from the outlier tree. + pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -8,8 +17,6 @@ } /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. #[tracing::instrument(skip(self, pdu))] pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { self.eventid_outlierpdu.insert( diff --git a/src/service/rooms/pdu_metadata.rs b/src/service/rooms/pdu_metadata/mod.rs similarity index 100% rename from src/service/rooms/pdu_metadata.rs rename to src/service/rooms/pdu_metadata/mod.rs diff --git a/src/service/rooms/search.rs b/src/service/rooms/search/mod.rs similarity index 100% rename from src/service/rooms/search.rs rename to src/service/rooms/search/mod.rs diff --git a/src/service/rooms/short.rs b/src/service/rooms/short/mod.rs similarity index 100% rename from src/service/rooms/short.rs rename to src/service/rooms/short/mod.rs diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 4c75467..4b42ca8 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,120 +1,6 @@ - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } +pub trait Data { + fn get_room_shortstatehash(room_id: &RoomId); +} /// Returns the last state hash key added to the db for the given room. #[tracing::instrument(skip(self))] @@ -128,382 +14,3 @@ }) } - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 4c75467..eddfe9e 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,133 +1,8 @@ +pub struct Service { + db: D, +} - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - +impl Service { /// Force the creation of a new StateHash and insert it into the db. /// /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. @@ -138,7 +13,7 @@ new_state_ids_compressed: HashSet, db: &Database, ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; + let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; let state_hash = self.calculate_hash( &new_state_ids_compressed @@ -237,49 +112,6 @@ Ok(()) } - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - /// Returns the leaf pdus of a room. #[tracing::instrument(skip(self))] pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { @@ -507,3 +339,4 @@ Ok(()) } +} diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 4c75467..ae26a7c 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,4 +1,3 @@ - /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] @@ -116,127 +115,6 @@ }) } - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - /// Returns the full room state. #[tracing::instrument(skip(self))] pub async fn room_state_full( @@ -280,230 +158,3 @@ } } - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } diff --git a/src/service/rooms/state_cache.rs b/src/service/rooms/state_cache/mod.rs similarity index 100% rename from src/service/rooms/state_cache.rs rename to src/service/rooms/state_cache/mod.rs diff --git a/src/service/rooms/state_compressor.rs b/src/service/rooms/state_compressor/mod.rs similarity index 100% rename from src/service/rooms/state_compressor.rs rename to src/service/rooms/state_compressor/mod.rs diff --git a/src/service/rooms/timeline.rs b/src/service/rooms/timeline/mod.rs similarity index 93% rename from src/service/rooms/timeline.rs rename to src/service/rooms/timeline/mod.rs index fd93344..6299b16 100644 --- a/src/service/rooms/timeline.rs +++ b/src/service/rooms/timeline/mod.rs @@ -100,16 +100,6 @@ .transpose() } - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - /// Returns the json of a pdu. pub fn get_non_outlier_pdu_json( &self, @@ -487,211 +477,6 @@ _ => {} } - Ok(pdu_id) - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - for appservice in db.appservice.all()? { if self.appservice_in_room(room_id, &appservice, db)? { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; @@ -768,9 +553,268 @@ } } + + Ok(pdu_id) + } + + pub fn create_hash_and_sign_event( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + db: &Database, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> (PduEvent, CanonicalJsonObj) { + let PduBuilder { + event_type, + content, + unsigned, + state_key, + redacts, + } = pdu_builder; + + let prev_events: Vec<_> = db + .rooms + .get_pdu_leaves(room_id)? + .into_iter() + .take(20) + .collect(); + + let create_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomCreate, "")?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + + // If there was no create event yet, assume we are creating a room with the default + // version right now + let room_version_id = create_event_content + .map_or(db.globals.default_room_version(), |create_event| { + create_event.room_version + }); + let room_version = + RoomVersion::new(&room_version_id).expect("room version is supported"); + + let auth_events = + self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) + .max() + .unwrap_or_else(|| uint!(0)) + + uint!(1); + + let mut unsigned = unsigned.unwrap_or_default(); + + if let Some(state_key) = &state_key { + if let Some(prev_pdu) = + self.room_state_get(room_id, &event_type.to_string().into(), state_key)? + { + unsigned.insert( + "prev_content".to_owned(), + serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), + ); + unsigned.insert( + "prev_sender".to_owned(), + serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), + ); + } + } + + let pdu = PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), + room_id: room_id.to_owned(), + sender: sender_user.to_owned(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind: event_type, + content, + state_key, + prev_events, + depth, + auth_events: auth_events + .iter() + .map(|(_, pdu)| pdu.event_id.clone()) + .collect(), + redacts, + unsigned: if unsigned.is_empty() { + None + } else { + Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + }, + hashes: EventHash { + sha256: "aaa".to_owned(), + }, + signatures: None, + }; + + let auth_check = state_res::auth_check( + &room_version, + &pdu, + None::, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|e| { + error!("{:?}", e); + Error::bad_database("Auth check failed.") + })?; + + if !auth_check { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Event is not authorized.", + )); + } + + // Hash and sign + let mut pdu_json = + utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); + + pdu_json.remove("event_id"); + + // Add origin because synapse likes that (and it's required in the spec) + pdu_json.insert( + "origin".to_owned(), + to_canonical_value(db.globals.server_name()) + .expect("server name is a valid CanonicalJsonValue"), + ); + + match ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut pdu_json, + &room_version_id, + ) { + Ok(_) => {} + Err(e) => { + return match e { + ruma::signatures::Error::PduSize => Err(Error::BadRequest( + ErrorKind::TooLarge, + "Message is too long", + )), + _ => Err(Error::BadRequest( + ErrorKind::Unknown, + "Signing event failed", + )), + } + } + } + + // Generate event id + pdu.event_id = EventId::parse_arc(format!( + "${}", + ruma::signatures::reference_hash(&pdu_json, &room_version_id) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + pdu_json.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), + ); + + // Generate short event id + let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; + } + + /// Creates a new persisted data unit and adds it to a room. This function takes a + /// roomid_mutex_state, meaning that only this function is able to mutate the room state. + #[tracing::instrument(skip(self, db, _mutex_lock))] + pub fn build_and_append_pdu( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + db: &Database, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result> { + + let (pdu, pdu_json) = create_hash_and_sign_event()?; + + + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + let statehashid = self.append_to_state(&pdu, &db.globals)?; + + let pdu_id = self.append_pdu( + &pdu, + pdu_json, + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + iter::once(&*pdu.event_id), + db, + )?; + + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + self.set_room_state(room_id, statehashid)?; + + let mut servers: HashSet> = + self.room_servers(room_id).filter_map(|r| r.ok()).collect(); + + // In case we are kicking or banning a user, we need to inform their server of the change + if pdu.kind == RoomEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + servers.insert(Box::from(state_key_uid.server_name())); + } + } + + // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above + servers.remove(db.globals.server_name()); + + db.sending.send_pdu(servers.into_iter(), &pdu_id)?; + Ok(pdu.event_id) } + /// Append the incoming event setting the state snapshot to the state from the + /// server that sent the event. + #[tracing::instrument(skip_all)] + fn append_incoming_pdu<'a>( + db: &Database, + pdu: &PduEvent, + pdu_json: CanonicalJsonObject, + new_room_leaves: impl IntoIterator + Clone + Debug, + state_ids_compressed: HashSet, + soft_fail: bool, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result>> { + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + db.rooms.set_event_state( + &pdu.event_id, + &pdu.room_id, + state_ids_compressed, + &db.globals, + )?; + + if soft_fail { + db.rooms + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + return Ok(None); + } + + let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; + + Ok(Some(pdu_id)) + } + /// Returns an iterator over all PDUs in a room. #[tracing::instrument(skip(self))] pub fn all_pdus<'a>( diff --git a/src/service/rooms/user.rs b/src/service/rooms/user/mod.rs similarity index 100% rename from src/service/rooms/user.rs rename to src/service/rooms/user/mod.rs From cc801528899dd37afcf7669ae5ebfeb050fc1eb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 20 Jun 2022 12:08:58 +0200 Subject: [PATCH 1140/1727] refactor: split up force_state --- src/service/rooms/state/mod.rs | 54 ++------------------- src/service/rooms/state_compressor/mod.rs | 59 ++++++++++++++++++++++- 2 files changed, 62 insertions(+), 51 deletions(-) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index eddfe9e..da03ad4 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -3,62 +3,16 @@ pub struct Service { } impl Service { - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. + /// Set the room to the given statehash and update caches. #[tracing::instrument(skip(self, new_state_ids_compressed, db))] pub fn force_state( &self, room_id: &RoomId, - new_state_ids_compressed: HashSet, + shortstatehash: u64, + statediffnew :HashSet, + statediffremoved :HashSet, db: &Database, ) -> Result<()> { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; for event_id in statediffnew.into_iter().filter_map(|new| { self.parse_compressed_state_event(new) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index a56c0f5..197ce84 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -241,6 +241,64 @@ Ok(()) } + /// Returns the new shortstatehash + pub fn save_state( + room_id: &RoomId, + new_state_ids_compressed: HashSet, + ) -> Result<(u64, + HashSet, // added + HashSet)> // removed + { + let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; + + let state_hash = self.calculate_hash( + &new_state_ids_compressed + .iter() + .map(|bytes| &bytes[..]) + .collect::>(), + ); + + let (new_shortstatehash, already_existed) = + self.get_or_create_shortstatehash(&state_hash, &db.globals)?; + + if Some(new_shortstatehash) == previous_shortstatehash { + return Ok(()); + } + + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + + let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() + { + let statediffnew: HashSet<_> = new_state_ids_compressed + .difference(&parent_stateinfo.1) + .copied() + .collect(); + + let statediffremoved: HashSet<_> = parent_stateinfo + .1 + .difference(&new_state_ids_compressed) + .copied() + .collect(); + + (statediffnew, statediffremoved) + } else { + (new_state_ids_compressed, HashSet::new()) + }; + + if !already_existed { + self.save_state_from_diff( + new_shortstatehash, + statediffnew.clone(), + statediffremoved, + 2, // every state change is 2 event changes on average + states_parents, + )?; + }; + + Ok((new_shortstatehash, statediffnew, statediffremoved)) + } + #[tracing::instrument(skip(self))] pub fn get_auth_chain_from_cache<'a>( &'a self, @@ -298,4 +356,3 @@ Ok(()) } - From 28644f236e7e8871b365696a3c03e43adbd19aaa Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:50 +0200 Subject: [PATCH 1141/1727] refactor: prepare src/database/key_value.rs from src/service/rooms/state/data.rs --- src/{service/rooms/state/data.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state/data.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/state/data.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/state/data.rs rename to src/database/key_value.rs From 1442c64420345b7e6dff7e19cb628348d3d96c6a Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:50 +0200 Subject: [PATCH 1142/1727] refactor: restore src/service/rooms/state/data.rs --- src/service/rooms/state/data.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 src/service/rooms/state/data.rs diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs new file mode 100644 index 0000000..4b42ca8 --- /dev/null +++ b/src/service/rooms/state/data.rs @@ -0,0 +1,16 @@ +pub trait Data { + fn get_room_shortstatehash(room_id: &RoomId); +} + + /// Returns the last state hash key added to the db for the given room. + #[tracing::instrument(skip(self))] + pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.roomid_shortstatehash + .get(room_id.as_bytes())? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") + })?)) + }) + } + From 33c0e0f430663e48c012dbb71d328dce5a2a14a8 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:57 +0200 Subject: [PATCH 1143/1727] refactor: prepare src/database/key_value.rs from src/service/rooms/alias/mod.rs --- src/{service/rooms/alias/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/alias/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/alias/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/alias/mod.rs rename to src/database/key_value.rs From a2a327af7caf309a5ef7d95e2010e25d0e75d019 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:57 +0200 Subject: [PATCH 1144/1727] refactor: prepare src/database/key_value.rs from src/service/rooms/state/mod.rs --- src/{service/rooms/state/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/state/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/state/mod.rs rename to src/database/key_value.rs From 05487c7c158a7346c631efbb93b88bd3203ef3bc Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:57 +0200 Subject: [PATCH 1145/1727] refactor: restore src/service/rooms/alias/mod.rs --- src/service/rooms/alias/mod.rs | 66 ++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 src/service/rooms/alias/mod.rs diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs new file mode 100644 index 0000000..393ad67 --- /dev/null +++ b/src/service/rooms/alias/mod.rs @@ -0,0 +1,66 @@ + + #[tracing::instrument(skip(self, globals))] + pub fn set_alias( + &self, + alias: &RoomAliasId, + room_id: Option<&RoomId>, + globals: &super::globals::Globals, + ) -> Result<()> { + if let Some(room_id) = room_id { + // New alias + self.alias_roomid + .insert(alias.alias().as_bytes(), room_id.as_bytes())?; + let mut aliasid = room_id.as_bytes().to_vec(); + aliasid.push(0xff); + aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; + } else { + // room_id=None means remove alias + if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { + let mut prefix = room_id.to_vec(); + prefix.push(0xff); + + for (key, _) in self.aliasid_alias.scan_prefix(prefix) { + self.aliasid_alias.remove(&key)?; + } + self.alias_roomid.remove(alias.alias().as_bytes())?; + } else { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Alias does not exist.", + )); + } + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { + self.alias_roomid + .get(alias.alias().as_bytes())? + .map(|bytes| { + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in alias_roomid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + pub fn room_aliases<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator>> + 'a { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) + }) + } + From adafb335ffbfae4097d008685af78c2b15fa0f0d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:57 +0200 Subject: [PATCH 1146/1727] refactor: restore src/service/rooms/state/mod.rs --- src/service/rooms/state/mod.rs | 296 +++++++++++++++++++++++++++++++++ 1 file changed, 296 insertions(+) create mode 100644 src/service/rooms/state/mod.rs diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs new file mode 100644 index 0000000..da03ad4 --- /dev/null +++ b/src/service/rooms/state/mod.rs @@ -0,0 +1,296 @@ +pub struct Service { + db: D, +} + +impl Service { + /// Set the room to the given statehash and update caches. + #[tracing::instrument(skip(self, new_state_ids_compressed, db))] + pub fn force_state( + &self, + room_id: &RoomId, + shortstatehash: u64, + statediffnew :HashSet, + statediffremoved :HashSet, + db: &Database, + ) -> Result<()> { + + for event_id in statediffnew.into_iter().filter_map(|new| { + self.parse_compressed_state_event(new) + .ok() + .map(|(_, id)| id) + }) { + let pdu = match self.get_pdu_json(&event_id)? { + Some(pdu) => pdu, + None => continue, + }; + + if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { + continue; + } + + let pdu: PduEvent = match serde_json::from_str( + &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), + ) { + Ok(pdu) => pdu, + Err(_) => continue, + }; + + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, + } + + let membership = match serde_json::from_str::(pdu.content.get()) { + Ok(e) => e.membership, + Err(_) => continue, + }; + + let state_key = match pdu.state_key { + Some(k) => k, + None => continue, + }; + + let user_id = match UserId::parse(state_key) { + Ok(id) => id, + Err(_) => continue, + }; + + self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; + } + + self.update_joined_count(room_id, db)?; + + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; + + Ok(()) + } + + /// Returns the leaf pdus of a room. + #[tracing::instrument(skip(self))] + pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + self.roomid_pduleaves + .scan_prefix(prefix) + .map(|(_, bytes)| { + EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) + }) + .collect() + } + + /// Replace the leaves of a room. + /// + /// The provided `event_ids` become the new leaves, this allows a room to have multiple + /// `prev_events`. + #[tracing::instrument(skip(self))] + pub fn replace_pdu_leaves<'a>( + &self, + room_id: &RoomId, + event_ids: impl IntoIterator + Debug, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { + self.roomid_pduleaves.remove(&key)?; + } + + for event_id in event_ids { + let mut key = prefix.to_owned(); + key.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + } + + Ok(()) + } + + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + #[tracing::instrument(skip(self, state_ids_compressed, globals))] + pub fn set_event_state( + &self, + event_id: &EventId, + room_id: &RoomId, + state_ids_compressed: HashSet, + globals: &super::globals::Globals, + ) -> Result<()> { + let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; + + let previous_shortstatehash = self.current_shortstatehash(room_id)?; + + let state_hash = self.calculate_hash( + &state_ids_compressed + .iter() + .map(|s| &s[..]) + .collect::>(), + ); + + let (shortstatehash, already_existed) = + self.get_or_create_shortstatehash(&state_hash, globals)?; + + if !already_existed { + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew: HashSet<_> = state_ids_compressed + .difference(&parent_stateinfo.1) + .copied() + .collect(); + + let statediffremoved: HashSet<_> = parent_stateinfo + .1 + .difference(&state_ids_compressed) + .copied() + .collect(); + + (statediffnew, statediffremoved) + } else { + (state_ids_compressed, HashSet::new()) + }; + self.save_state_from_diff( + shortstatehash, + statediffnew, + statediffremoved, + 1_000_000, // high number because no state will be based on this one + states_parents, + )?; + } + + self.shorteventid_shortstatehash + .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + + Ok(()) + } + + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + #[tracing::instrument(skip(self, new_pdu, globals))] + pub fn append_to_state( + &self, + new_pdu: &PduEvent, + globals: &super::globals::Globals, + ) -> Result { + let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; + + let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; + + if let Some(p) = previous_shortstatehash { + self.shorteventid_shortstatehash + .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; + } + + if let Some(state_key) = &new_pdu.state_key { + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + + let shortstatekey = self.get_or_create_shortstatekey( + &new_pdu.kind.to_string().into(), + state_key, + globals, + )?; + + let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; + + let replaces = states_parents + .last() + .map(|info| { + info.1 + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + }) + .unwrap_or_default(); + + if Some(&new) == replaces { + return Ok(previous_shortstatehash.expect("must exist")); + } + + // TODO: statehash with deterministic inputs + let shortstatehash = globals.next_count()?; + + let mut statediffnew = HashSet::new(); + statediffnew.insert(new); + + let mut statediffremoved = HashSet::new(); + if let Some(replaces) = replaces { + statediffremoved.insert(*replaces); + } + + self.save_state_from_diff( + shortstatehash, + statediffnew, + statediffremoved, + 2, + states_parents, + )?; + + Ok(shortstatehash) + } else { + Ok(previous_shortstatehash.expect("first event in room must be a state event")) + } + } + + #[tracing::instrument(skip(self, invite_event))] + pub fn calculate_invite_state( + &self, + invite_event: &PduEvent, + ) -> Result>> { + let mut state = Vec::new(); + // Add recommended events + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get( + &invite_event.room_id, + &StateEventType::RoomCanonicalAlias, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get( + &invite_event.room_id, + &StateEventType::RoomMember, + invite_event.sender.as_str(), + )? { + state.push(e.to_stripped_state_event()); + } + + state.push(invite_event.to_stripped_state_event()); + Ok(state) + } + + #[tracing::instrument(skip(self))] + pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; + + Ok(()) + } +} From 9e1ab74bb438c60a8ccb90af98af36f1da4fb3df Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:58 +0200 Subject: [PATCH 1147/1727] refactor: prepare src/database/key_value.rs from src/service/rooms/directory/mod.rs --- src/{service/rooms/directory/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/directory/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/directory/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/directory/mod.rs rename to src/database/key_value.rs From a563b1ba9a9e60ab0d4c6c7b787b3855e048647e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:58 +0200 Subject: [PATCH 1148/1727] refactor: prepare src/database/key_value.rs from src/service/rooms/edus/mod.rs --- src/{service/rooms/edus/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/edus/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/edus/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/edus/mod.rs rename to src/database/key_value.rs From 0071a9cbf4bef95b7125d11d7950dfee991cd625 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:58 +0200 Subject: [PATCH 1149/1727] refactor: restore src/service/rooms/directory/mod.rs --- src/service/rooms/directory/mod.rs | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 src/service/rooms/directory/mod.rs diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs new file mode 100644 index 0000000..8be7bd5 --- /dev/null +++ b/src/service/rooms/directory/mod.rs @@ -0,0 +1,29 @@ + + #[tracing::instrument(skip(self))] + pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { + if public { + self.publicroomids.insert(room_id.as_bytes(), &[])?; + } else { + self.publicroomids.remove(room_id.as_bytes())?; + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn is_public_room(&self, room_id: &RoomId) -> Result { + Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) + } + + #[tracing::instrument(skip(self))] + pub fn public_rooms(&self) -> impl Iterator>> + '_ { + self.publicroomids.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) + }) + } + From 85e571baddb6e5ac7e3bc81ac94bce48a0d14981 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:59 +0200 Subject: [PATCH 1150/1727] refactor: prepare src/database/key_value.rs from src/service/rooms/lazy_loading/mod.rs --- src/{service/rooms/lazy_loading/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/lazy_loading/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/lazy_loading/mod.rs rename to src/database/key_value.rs From 931c8ece4a8c984effc8ff87b9228b237611a7dc Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:59 +0200 Subject: [PATCH 1151/1727] refactor: prepare src/database/key_value.rs from src/service/rooms/metadata/mod.rs --- src/{service/rooms/metadata/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/metadata/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/metadata/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/metadata/mod.rs rename to src/database/key_value.rs From 06bfddf0daed72d1c0a408faa565091ad6208ffb Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:59 +0200 Subject: [PATCH 1152/1727] refactor: restore src/service/rooms/lazy_loading/mod.rs --- src/service/rooms/lazy_loading/mod.rs | 91 +++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 src/service/rooms/lazy_loading/mod.rs diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs new file mode 100644 index 0000000..a402702 --- /dev/null +++ b/src/service/rooms/lazy_loading/mod.rs @@ -0,0 +1,91 @@ + + #[tracing::instrument(skip(self))] + pub fn lazy_load_was_sent_before( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, + ) -> Result { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(ll_user.as_bytes()); + Ok(self.lazyloadedids.get(&key)?.is_some()) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_mark_sent( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + lazy_load: HashSet>, + count: u64, + ) { + self.lazy_load_waiting.lock().unwrap().insert( + ( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + count, + ), + lazy_load, + ); + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + since: u64, + ) -> Result<()> { + if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + since, + )) { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xff); + + for ll_id in user_ids { + let mut key = prefix.clone(); + key.extend_from_slice(ll_id.as_bytes()); + self.lazyloadedids.insert(&key, &[])?; + } + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_reset( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ) -> Result<()> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xff); + + for (key, _) in self.lazyloadedids.scan_prefix(prefix) { + self.lazyloadedids.remove(&key)?; + } + + Ok(()) + } + From 42fe118cbe608e0b39a589b9192c47f77b2a313f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:59 +0200 Subject: [PATCH 1153/1727] refactor: restore src/service/rooms/edus/mod.rs --- src/service/rooms/edus/mod.rs | 550 ++++++++++++++++++++++++++++++++++ 1 file changed, 550 insertions(+) create mode 100644 src/service/rooms/edus/mod.rs diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs new file mode 100644 index 0000000..118efd4 --- /dev/null +++ b/src/service/rooms/edus/mod.rs @@ -0,0 +1,550 @@ +use crate::{database::abstraction::Tree, utils, Error, Result}; +use ruma::{ + events::{ + presence::{PresenceEvent, PresenceEventContent}, + receipt::ReceiptEvent, + SyncEphemeralRoomEvent, + }, + presence::PresenceState, + serde::Raw, + signatures::CanonicalJsonObject, + RoomId, UInt, UserId, +}; +use std::{ + collections::{HashMap, HashSet}, + mem, + sync::Arc, +}; + +pub struct RoomEdus { + pub(in super::super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId + pub(in super::super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count + pub(in super::super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count + pub(in super::super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count + pub(in super::super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count + pub(in super::super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId + pub(in super::super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count +} + +impl RoomEdus { + /// Adds an event which will be saved until a new event replaces it (e.g. read receipt). + pub fn readreceipt_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: ReceiptEvent, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + // Remove old entry + if let Some((old, _)) = self + .readreceiptid_readreceipt + .iter_from(&last_possible_key, true) + .take_while(|(key, _)| key.starts_with(&prefix)) + .find(|(key, _)| { + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element") + == user_id.as_bytes() + }) + { + // This is the old room_latest + self.readreceiptid_readreceipt.remove(&old)?; + } + + let mut room_latest_id = prefix; + room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); + room_latest_id.push(0xff); + room_latest_id.extend_from_slice(user_id.as_bytes()); + + self.readreceiptid_readreceipt.insert( + &room_latest_id, + &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), + )?; + + Ok(()) + } + + /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + #[tracing::instrument(skip(self))] + pub fn readreceipts_since<'a>( + &'a self, + room_id: &RoomId, + since: u64, + ) -> impl Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + > + 'a { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + let prefix2 = prefix.clone(); + + let mut first_possible_edu = prefix.clone(); + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + + self.readreceiptid_readreceipt + .iter_from(&first_possible_edu, false) + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(k, v)| { + let count = + utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) + .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; + let user_id = UserId::parse( + utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) + .map_err(|_| { + Error::bad_database("Invalid readreceiptid userid bytes in db.") + })?, + ) + .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; + + let mut json = serde_json::from_slice::(&v).map_err(|_| { + Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") + })?; + json.remove("room_id"); + + Ok(( + user_id, + count, + Raw::from_json( + serde_json::value::to_raw_value(&json).expect("json is valid raw value"), + ), + )) + }) + } + + /// Sets a private read marker at `count`. + #[tracing::instrument(skip(self, globals))] + pub fn private_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_privateread + .insert(&key, &count.to_be_bytes())?; + + self.roomuserid_lastprivatereadupdate + .insert(&key, &globals.next_count()?.to_be_bytes())?; + + Ok(()) + } + + /// Returns the private read marker. + #[tracing::instrument(skip(self))] + pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_privateread + .get(&key)? + .map_or(Ok(None), |v| { + Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { + Error::bad_database("Invalid private read marker bytes") + })?)) + }) + } + + /// Returns the count of the last typing update in this room. + pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + Ok(self + .roomuserid_lastprivatereadupdate + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") + }) + }) + .transpose()? + .unwrap_or(0)) + } + + /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is + /// called. + pub fn typing_add( + &self, + user_id: &UserId, + room_id: &RoomId, + timeout: u64, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let count = globals.next_count()?.to_be_bytes(); + + let mut room_typing_id = prefix; + room_typing_id.extend_from_slice(&timeout.to_be_bytes()); + room_typing_id.push(0xff); + room_typing_id.extend_from_slice(&count); + + self.typingid_userid + .insert(&room_typing_id, &*user_id.as_bytes())?; + + self.roomid_lasttypingupdate + .insert(room_id.as_bytes(), &count)?; + + Ok(()) + } + + /// Removes a user from typing before the timeout is reached. + pub fn typing_remove( + &self, + user_id: &UserId, + room_id: &RoomId, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let user_id = user_id.to_string(); + + let mut found_outdated = false; + + // Maybe there are multiple ones from calling roomtyping_add multiple times + for outdated_edu in self + .typingid_userid + .scan_prefix(prefix) + .filter(|(_, v)| &**v == user_id.as_bytes()) + { + self.typingid_userid.remove(&outdated_edu.0)?; + found_outdated = true; + } + + if found_outdated { + self.roomid_lasttypingupdate + .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + } + + Ok(()) + } + + /// Makes sure that typing events with old timestamps get removed. + fn typings_maintain( + &self, + room_id: &RoomId, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let current_timestamp = utils::millis_since_unix_epoch(); + + let mut found_outdated = false; + + // Find all outdated edus before inserting a new one + for outdated_edu in self + .typingid_userid + .scan_prefix(prefix) + .map(|(key, _)| { + Ok::<_, Error>(( + key.clone(), + utils::u64_from_bytes( + &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { + Error::bad_database("RoomTyping has invalid timestamp or delimiters.") + })?[0..mem::size_of::()], + ) + .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, + )) + }) + .filter_map(|r| r.ok()) + .take_while(|&(_, timestamp)| timestamp < current_timestamp) + { + // This is an outdated edu (time > timestamp) + self.typingid_userid.remove(&outdated_edu.0)?; + found_outdated = true; + } + + if found_outdated { + self.roomid_lasttypingupdate + .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + } + + Ok(()) + } + + /// Returns the count of the last typing update in this room. + #[tracing::instrument(skip(self, globals))] + pub fn last_typing_update( + &self, + room_id: &RoomId, + globals: &super::super::globals::Globals, + ) -> Result { + self.typings_maintain(room_id, globals)?; + + Ok(self + .roomid_lasttypingupdate + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") + }) + }) + .transpose()? + .unwrap_or(0)) + } + + pub fn typings_all( + &self, + room_id: &RoomId, + ) -> Result> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut user_ids = HashSet::new(); + + for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { + let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { + Error::bad_database("User ID in typingid_userid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; + + user_ids.insert(user_id); + } + + Ok(SyncEphemeralRoomEvent { + content: ruma::events::typing::TypingEventContent { + user_ids: user_ids.into_iter().collect(), + }, + }) + } + + /// Adds a presence event which will be saved until a new event replaces it. + /// + /// Note: This method takes a RoomId because presence updates are always bound to rooms to + /// make sure users outside these rooms can't see them. + pub fn update_presence( + &self, + user_id: &UserId, + room_id: &RoomId, + presence: PresenceEvent, + globals: &super::super::globals::Globals, + ) -> Result<()> { + // TODO: Remove old entry? Or maybe just wipe completely from time to time? + + let count = globals.next_count()?.to_be_bytes(); + + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&count); + presence_id.push(0xff); + presence_id.extend_from_slice(presence.sender.as_bytes()); + + self.presenceid_presence.insert( + &presence_id, + &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), + )?; + + self.userid_lastpresenceupdate.insert( + user_id.as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + + Ok(()) + } + + /// Resets the presence timeout, so the user will stay in their current presence state. + #[tracing::instrument(skip(self))] + pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { + self.userid_lastpresenceupdate.insert( + user_id.as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + + Ok(()) + } + + /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. + pub fn last_presence_update(&self, user_id: &UserId) -> Result> { + self.userid_lastpresenceupdate + .get(user_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") + }) + }) + .transpose() + } + + pub fn get_last_presence_event( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result> { + let last_update = match self.last_presence_update(user_id)? { + Some(last) => last, + None => return Ok(None), + }; + + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&last_update.to_be_bytes()); + presence_id.push(0xff); + presence_id.extend_from_slice(user_id.as_bytes()); + + self.presenceid_presence + .get(&presence_id)? + .map(|value| { + let mut presence: PresenceEvent = serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?; + let current_timestamp: UInt = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + + if presence.content.presence == PresenceState::Online { + // Don't set last_active_ago when the user is online + presence.content.last_active_ago = None; + } else { + // Convert from timestamp to duration + presence.content.last_active_ago = presence + .content + .last_active_ago + .map(|timestamp| current_timestamp - timestamp); + } + + Ok(presence) + }) + .transpose() + } + + /// Sets all users to offline who have been quiet for too long. + fn _presence_maintain( + &self, + rooms: &super::Rooms, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let current_timestamp = utils::millis_since_unix_epoch(); + + for (user_id_bytes, last_timestamp) in self + .userid_lastpresenceupdate + .iter() + .filter_map(|(k, bytes)| { + Some(( + k, + utils::u64_from_bytes(&bytes) + .map_err(|_| { + Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") + }) + .ok()?, + )) + }) + .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) + // 5 Minutes + { + // Send new presence events to set the user offline + let count = globals.next_count()?.to_be_bytes(); + let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) + .map_err(|_| { + Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") + })? + .try_into() + .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; + for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&count); + presence_id.push(0xff); + presence_id.extend_from_slice(&user_id_bytes); + + self.presenceid_presence.insert( + &presence_id, + &serde_json::to_vec(&PresenceEvent { + content: PresenceEventContent { + avatar_url: None, + currently_active: None, + displayname: None, + last_active_ago: Some( + last_timestamp.try_into().expect("time is valid"), + ), + presence: PresenceState::Offline, + status_msg: None, + }, + sender: user_id.to_owned(), + }) + .expect("PresenceEvent can be serialized"), + )?; + } + + self.userid_lastpresenceupdate.insert( + user_id.as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + } + + Ok(()) + } + + /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. + #[tracing::instrument(skip(self, since, _rooms, _globals))] + pub fn presence_since( + &self, + room_id: &RoomId, + since: u64, + _rooms: &super::Rooms, + _globals: &super::super::globals::Globals, + ) -> Result, PresenceEvent>> { + //self.presence_maintain(rooms, globals)?; + + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut first_possible_edu = prefix.clone(); + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + let mut hashmap = HashMap::new(); + + for (key, value) in self + .presenceid_presence + .iter_from(&*first_possible_edu, false) + .take_while(|(key, _)| key.starts_with(&prefix)) + { + let user_id = UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, + ) + .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; + + let mut presence: PresenceEvent = serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?; + + let current_timestamp: UInt = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + + if presence.content.presence == PresenceState::Online { + // Don't set last_active_ago when the user is online + presence.content.last_active_ago = None; + } else { + // Convert from timestamp to duration + presence.content.last_active_ago = presence + .content + .last_active_ago + .map(|timestamp| current_timestamp - timestamp); + } + + hashmap.insert(user_id, presence); + } + + Ok(hashmap) + } +} From 715b30a2b5ea9f2511e157ac23ced07183f25a85 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:00 +0200 Subject: [PATCH 1154/1727] refactor: prepare src/database/key_value.rs from src/service/rooms/outlier/mod.rs --- src/{service/rooms/outlier/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/outlier/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/outlier/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/outlier/mod.rs rename to src/database/key_value.rs From daa969508fe17a7d87bdfff5c8163aae469545af Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:00 +0200 Subject: [PATCH 1155/1727] refactor: restore src/service/rooms/outlier/mod.rs --- src/service/rooms/outlier/mod.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 src/service/rooms/outlier/mod.rs diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs new file mode 100644 index 0000000..340e93e --- /dev/null +++ b/src/service/rooms/outlier/mod.rs @@ -0,0 +1,27 @@ + /// Returns the pdu from the outlier tree. + pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + + /// Returns the pdu from the outlier tree. + pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + + /// Append the PDU as an outlier. + #[tracing::instrument(skip(self, pdu))] + pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { + self.eventid_outlierpdu.insert( + event_id.as_bytes(), + &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), + ) + } + From 0ce4446b1ab264378fb96029d420391aefdfdb91 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:00 +0200 Subject: [PATCH 1156/1727] refactor: restore src/service/rooms/metadata/mod.rs --- src/service/rooms/metadata/mod.rs | 44 +++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 src/service/rooms/metadata/mod.rs diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs new file mode 100644 index 0000000..5d70345 --- /dev/null +++ b/src/service/rooms/metadata/mod.rs @@ -0,0 +1,44 @@ + /// Checks if a room exists. + #[tracing::instrument(skip(self))] + pub fn exists(&self, room_id: &RoomId) -> Result { + let prefix = match self.get_shortroomid(room_id)? { + Some(b) => b.to_be_bytes().to_vec(), + None => return Ok(false), + }; + + // Look for PDUs in that room. + Ok(self + .pduid_pdu + .iter_from(&prefix, false) + .next() + .filter(|(k, _)| k.starts_with(&prefix)) + .is_some()) + } + + pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self.roomid_shortroomid + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + }) + .transpose() + } + + pub fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + globals: &super::globals::Globals, + ) -> Result { + Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, + None => { + let short = globals.next_count()?; + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes())?; + short + } + }) + } + From 1ccc226c6b195d84c14e1fdf5e0ade5feefb6872 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:01 +0200 Subject: [PATCH 1157/1727] refactor: prepare src/database/key_value.rs from src/service/rooms/pdu_metadata/mod.rs --- src/{service/rooms/pdu_metadata/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/pdu_metadata/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/pdu_metadata/mod.rs rename to src/database/key_value.rs From 81ac01c2f56669521fa55409efbeba6785230239 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:01 +0200 Subject: [PATCH 1158/1727] refactor: restore src/service/rooms/pdu_metadata/mod.rs --- src/service/rooms/pdu_metadata/mod.rs | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 src/service/rooms/pdu_metadata/mod.rs diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs new file mode 100644 index 0000000..f8ffcee --- /dev/null +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -0,0 +1,31 @@ + + #[tracing::instrument(skip(self, room_id, event_ids))] + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { + for prev in event_ids { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(prev.as_bytes()); + self.referencedevents.insert(&key, &[])?; + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(event_id.as_bytes()); + Ok(self.referencedevents.get(&key)?.is_some()) + } + + #[tracing::instrument(skip(self))] + pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { + self.softfailedeventids.insert(event_id.as_bytes(), &[]) + } + + #[tracing::instrument(skip(self))] + pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { + self.softfailedeventids + .get(event_id.as_bytes()) + .map(|o| o.is_some()) + } + From 865e35df17f7e9e3dba21765871285f0d1bdeeeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 25 Jun 2022 16:12:23 +0200 Subject: [PATCH 1159/1727] Work on rooms/state, database, alias, directory, edus services, event_handler, lazy_loading, metadata, outlier, and pdu_metadata --- src/database/abstraction.rs | 4 +- src/database/key_value.rs | 740 ++------- src/database/mod.rs | 48 +- src/main.rs | 49 +- src/service/rooms/alias/data.rs | 22 + src/service/rooms/alias/mod.rs | 72 +- src/service/rooms/directory/data.rs | 13 + src/service/rooms/directory/mod.rs | 37 +- src/service/rooms/edus/data.rs | 91 ++ src/service/rooms/edus/mod.rs | 350 +--- src/service/rooms/event_handler/mod.rs | 2052 ++++++++++++------------ src/service/rooms/lazy_loading/data.rs | 24 + src/service/rooms/lazy_loading/mod.rs | 55 +- src/service/rooms/metadata/data.rs | 3 + src/service/rooms/metadata/mod.rs | 52 +- src/service/rooms/outlier/data.rs | 5 + src/service/rooms/outlier/mod.rs | 29 +- src/service/rooms/pdu_metadata/data.rs | 6 + src/service/rooms/pdu_metadata/mod.rs | 29 +- src/service/rooms/short/mod.rs | 27 + src/service/rooms/state/data.rs | 32 +- src/service/rooms/state/mod.rs | 86 +- 22 files changed, 1544 insertions(+), 2282 deletions(-) create mode 100644 src/service/rooms/alias/data.rs create mode 100644 src/service/rooms/directory/data.rs create mode 100644 src/service/rooms/edus/data.rs create mode 100644 src/service/rooms/lazy_loading/data.rs create mode 100644 src/service/rooms/metadata/data.rs create mode 100644 src/service/rooms/outlier/data.rs create mode 100644 src/service/rooms/pdu_metadata/data.rs diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 74f3a45..29325bd 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -26,7 +26,7 @@ pub mod persy; ))] pub mod watchers; -pub trait DatabaseEngine: Send + Sync { +pub trait KeyValueDatabaseEngine: Send + Sync { fn open(config: &Config) -> Result where Self: Sized; @@ -40,7 +40,7 @@ pub trait DatabaseEngine: Send + Sync { } } -pub trait Tree: Send + Sync { +pub trait KeyValueTree: Send + Sync { fn get(&self, key: &[u8]) -> Result>>; fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; diff --git a/src/database/key_value.rs b/src/database/key_value.rs index 1a793f3..34916e4 100644 --- a/src/database/key_value.rs +++ b/src/database/key_value.rs @@ -1,10 +1,7 @@ -pub trait Data { - fn get_room_shortstatehash(room_id: &RoomId); -} +use crate::service; - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { +impl service::room::state::Data for KeyValueDatabase { + fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash .get(room_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -14,77 +11,21 @@ pub trait Data { }) } -pub struct Service { - db: D, -} - -impl Service { - /// Set the room to the given statehash and update caches. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - shortstatehash: u64, - statediffnew :HashSet, - statediffremoved :HashSet, - db: &Database, - ) -> Result<()> { - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - + fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 + _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) } - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { + fn set_event_state(&self) -> Result<()> { + db.shorteventid_shortstatehash + .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + Ok(()) + } + + fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -99,15 +40,11 @@ impl Service { .collect() } - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( + fn set_forward_extremities( &self, room_id: &RoomId, event_ids: impl IntoIterator + Debug, + _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -125,230 +62,48 @@ impl Service { Ok(()) } - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } } - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( +impl service::room::alias::Data for KeyValueDatabase { + fn set_alias( &self, alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, + room_id: Option<&RoomId> ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - + self.alias_roomid + .insert(alias.alias().as_bytes(), room_id.as_bytes())?; + let mut aliasid = room_id.as_bytes().to_vec(); + aliasid.push(0xff); + aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; Ok(()) } - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { + fn remove_alias( + &self, + alias: &RoomAliasId, + ) -> Result<()> { + if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { + let mut prefix = room_id.to_vec(); + prefix.push(0xff); + + for (key, _) in self.aliasid_alias.scan_prefix(prefix) { + self.aliasid_alias.remove(&key)?; + } + self.alias_roomid.remove(alias.alias().as_bytes())?; + } else { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Alias does not exist.", + )); + } + Ok(()) + } + + fn resolve_local_alias( + &self, + alias: &RoomAliasId + ) -> Result<()> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { @@ -360,11 +115,10 @@ impl Service { .transpose() } - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, + fn local_aliases_for_room( + &self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -375,26 +129,22 @@ impl Service { .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) }) } +} - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) +impl service::room::directory::Data for KeyValueDatabase { + fn set_public(&self, room_id: &RoomId) -> Result<()> { + self.publicroomids.insert(room_id.as_bytes(), &[])?; } - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { + fn set_not_public(&self, room_id: &RoomId) -> Result<()> { + self.publicroomids.remove(room_id.as_bytes())?; + } + + fn is_public_room(&self, room_id: &RoomId) -> Result { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { + fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { @@ -404,43 +154,14 @@ impl Service { .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) }) } - -use crate::{database::abstraction::Tree, utils, Error, Result}; -use ruma::{ - events::{ - presence::{PresenceEvent, PresenceEventContent}, - receipt::ReceiptEvent, - SyncEphemeralRoomEvent, - }, - presence::PresenceState, - serde::Raw, - signatures::CanonicalJsonObject, - RoomId, UInt, UserId, -}; -use std::{ - collections::{HashMap, HashSet}, - mem, - sync::Arc, -}; - -pub struct RoomEdus { - pub(in super::super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId - pub(in super::super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count - pub(in super::super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count - pub(in super::super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count - pub(in super::super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(in super::super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId - pub(in super::super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count } -impl RoomEdus { - /// Adds an event which will be saved until a new event replaces it (e.g. read receipt). - pub fn readreceipt_update( +impl service::room::edus::Data for KeyValueDatabase { + fn readreceipt_update( &self, user_id: &UserId, room_id: &RoomId, event: ReceiptEvent, - globals: &super::super::globals::Globals, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -477,8 +198,6 @@ impl RoomEdus { Ok(()) } - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] pub fn readreceipts_since<'a>( &'a self, room_id: &RoomId, @@ -527,14 +246,11 @@ impl RoomEdus { }) } - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( + fn private_read_set( &self, room_id: &RoomId, user_id: &UserId, count: u64, - globals: &super::super::globals::Globals, ) -> Result<()> { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); @@ -545,13 +261,9 @@ impl RoomEdus { self.roomuserid_lastprivatereadupdate .insert(&key, &globals.next_count()?.to_be_bytes())?; - - Ok(()) } - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(user_id.as_bytes()); @@ -565,8 +277,7 @@ impl RoomEdus { }) } - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(user_id.as_bytes()); @@ -583,9 +294,7 @@ impl RoomEdus { .unwrap_or(0)) } - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub fn typing_add( + fn typing_add( &self, user_id: &UserId, room_id: &RoomId, @@ -611,12 +320,10 @@ impl RoomEdus { Ok(()) } - /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( + fn typing_remove( &self, user_id: &UserId, room_id: &RoomId, - globals: &super::super::globals::Globals, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -643,59 +350,10 @@ impl RoomEdus { Ok(()) } - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( + fn last_typing_update( &self, room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, ) -> Result { - self.typings_maintain(room_id, globals)?; - Ok(self .roomid_lasttypingupdate .get(room_id.as_bytes())? @@ -708,10 +366,10 @@ impl RoomEdus { .unwrap_or(0)) } - pub fn typings_all( + fn typings_all( &self, room_id: &RoomId, - ) -> Result> { + ) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -726,23 +384,14 @@ impl RoomEdus { user_ids.insert(user_id); } - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), - }, - }) + Ok(user_ids) } - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( + fn update_presence( &self, user_id: &UserId, room_id: &RoomId, presence: PresenceEvent, - globals: &super::super::globals::Globals, ) -> Result<()> { // TODO: Remove old entry? Or maybe just wipe completely from time to time? @@ -767,8 +416,6 @@ impl RoomEdus { Ok(()) } - /// Resets the presence timeout, so the user will stay in their current presence state. - #[tracing::instrument(skip(self))] pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_lastpresenceupdate.insert( user_id.as_bytes(), @@ -778,8 +425,7 @@ impl RoomEdus { Ok(()) } - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - pub fn last_presence_update(&self, user_id: &UserId) -> Result> { + fn last_presence_update(&self, user_id: &UserId) -> Result> { self.userid_lastpresenceupdate .get(user_id.as_bytes())? .map(|bytes| { @@ -790,125 +436,29 @@ impl RoomEdus { .transpose() } - pub fn get_last_presence_event( + fn get_presence_event( &self, user_id: &UserId, room_id: &RoomId, + count: u64, ) -> Result> { - let last_update = match self.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; - let mut presence_id = room_id.as_bytes().to_vec(); presence_id.push(0xff); - presence_id.extend_from_slice(&last_update.to_be_bytes()); + presence_id.extend_from_slice(&count.to_be_bytes()); presence_id.push(0xff); presence_id.extend_from_slice(user_id.as_bytes()); self.presenceid_presence .get(&presence_id)? - .map(|value| { - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } - - Ok(presence) - }) + .map(|value| parse_presence_event(&value)) .transpose() } - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - } - - /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] - pub fn presence_since( + fn presence_since( &self, room_id: &RoomId, since: u64, - _rooms: &super::Rooms, - _globals: &super::super::globals::Globals, ) -> Result, PresenceEvent>> { - //self.presence_maintain(rooms, globals)?; - let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -931,23 +481,7 @@ impl RoomEdus { ) .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } + let presence = parse_presence_event(&value)?; hashmap.insert(user_id, presence); } @@ -956,8 +490,28 @@ impl RoomEdus { } } - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( +fn parse_presence_event(bytes: &[u8]) -> Result { + let mut presence: PresenceEvent = serde_json::from_slice(bytes) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?; + + let current_timestamp: UInt = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + + if presence.content.presence == PresenceState::Online { + // Don't set last_active_ago when the user is online + presence.content.last_active_ago = None; + } else { + // Convert from timestamp to duration + presence.content.last_active_ago = presence + .content + .last_active_ago + .map(|timestamp| current_timestamp - timestamp); + } +} + +impl service::room::lazy_load::Data for KeyValueDatabase { + fn lazy_load_was_sent_before( &self, user_id: &UserId, device_id: &DeviceId, @@ -974,28 +528,7 @@ impl RoomEdus { Ok(self.lazyloadedids.get(&key)?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( + fn lazy_load_confirm_delivery( &self, user_id: &UserId, device_id: &DeviceId, @@ -1025,8 +558,7 @@ impl RoomEdus { Ok(()) } - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( + fn lazy_load_reset( &self, user_id: &UserId, device_id: &DeviceId, @@ -1045,10 +577,10 @@ impl RoomEdus { Ok(()) } +} - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { +impl service::room::metadata::Data for KeyValueDatabase { + fn exists(&self, room_id: &RoomId) -> Result { let prefix = match self.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), None => return Ok(false), @@ -1062,36 +594,10 @@ impl RoomEdus { .filter(|(k, _)| k.starts_with(&prefix)) .is_some()) } +} - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - /// Returns the pdu from the outlier tree. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { +impl service::room::outlier::Data for KeyValueDatabase { + fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { @@ -1099,8 +605,7 @@ impl RoomEdus { }) } - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { @@ -1108,18 +613,16 @@ impl RoomEdus { }) } - /// Append the PDU as an outlier. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { + fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { self.eventid_outlierpdu.insert( event_id.as_bytes(), &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), ) } +} - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { +impl service::room::pdu_metadata::Data for KeyValueDatabase { + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(prev.as_bytes()); @@ -1129,22 +632,19 @@ impl RoomEdus { Ok(()) } - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(event_id.as_bytes()); Ok(self.referencedevents.get(&key)?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { + fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { self.softfailedeventids.insert(event_id.as_bytes(), &[]) } - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { + fn is_event_soft_failed(&self, event_id: &EventId) -> Result { self.softfailedeventids .get(event_id.as_bytes()) .map(|o| o.is_some()) } - +} diff --git a/src/database/mod.rs b/src/database/mod.rs index a0937c2..a35228a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -15,7 +15,7 @@ pub mod users; use self::admin::create_admin_room; use crate::{utils, Config, Error, Result}; -use abstraction::DatabaseEngine; +use abstraction::KeyValueDatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; @@ -39,8 +39,8 @@ use std::{ use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, info, warn}; -pub struct Database { - _db: Arc, +pub struct KeyValueDatabase { + _db: Arc, pub globals: globals::Globals, pub users: users::Users, pub uiaa: uiaa::Uiaa, @@ -55,7 +55,7 @@ pub struct Database { pub pusher: pusher::PushData, } -impl Database { +impl KeyValueDatabase { /// Tries to remove the old database but ignores all errors. pub fn try_remove(server_name: &str) -> Result<()> { let mut path = ProjectDirs::from("xyz", "koesters", "conduit") @@ -124,7 +124,7 @@ impl Database { .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; } - let builder: Arc = match &*config.database_backend { + let builder: Arc = match &*config.database_backend { "sqlite" => { #[cfg(not(feature = "sqlite"))] return Err(Error::BadConfig("Database backend not found.")); @@ -955,7 +955,7 @@ impl Database { } /// Sets the emergency password and push rules for the @conduit account in case emergency password is set -fn set_emergency_access(db: &Database) -> Result { +fn set_emergency_access(db: &KeyValueDatabase) -> Result { let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) .expect("@conduit:server_name is a valid UserId"); @@ -979,39 +979,3 @@ fn set_emergency_access(db: &Database) -> Result { res } - -pub struct DatabaseGuard(OwnedRwLockReadGuard); - -impl Deref for DatabaseGuard { - type Target = OwnedRwLockReadGuard; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -#[cfg(feature = "conduit_bin")] -#[axum::async_trait] -impl axum::extract::FromRequest for DatabaseGuard -where - B: Send, -{ - type Rejection = axum::extract::rejection::ExtensionRejection; - - async fn from_request( - req: &mut axum::extract::RequestParts, - ) -> Result { - use axum::extract::Extension; - - let Extension(db): Extension>> = - Extension::from_request(req).await?; - - Ok(DatabaseGuard(db.read_owned().await)) - } -} - -impl From> for DatabaseGuard { - fn from(val: OwnedRwLockReadGuard) -> Self { - Self(val) - } -} diff --git a/src/main.rs b/src/main.rs index 9a0928a..a1af976 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,27 +46,26 @@ use tikv_jemallocator::Jemalloc; #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; -#[tokio::main] -async fn main() { - let raw_config = - Figment::new() - .merge( - Toml::file(Env::var("CONDUIT_CONFIG").expect( - "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", - )) - .nested(), - ) - .merge(Env::prefixed("CONDUIT_").global()); +lazy_static! { + static ref DB: Database = { + let raw_config = + Figment::new() + .merge( + Toml::file(Env::var("CONDUIT_CONFIG").expect( + "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", + )) + .nested(), + ) + .merge(Env::prefixed("CONDUIT_").global()); - let config = match raw_config.extract::() { - Ok(s) => s, - Err(e) => { - eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); - std::process::exit(1); - } - }; + let config = match raw_config.extract::() { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); + std::process::exit(1); + } + }; - let start = async { config.warn_deprecated(); let db = match Database::load_or_create(&config).await { @@ -79,8 +78,15 @@ async fn main() { std::process::exit(1); } }; + }; +} - run_server(&config, db).await.unwrap(); +#[tokio::main] +async fn main() { + lazy_static::initialize(&DB); + + let start = async { + run_server(&config).await.unwrap(); }; if config.allow_jaeger { @@ -120,7 +126,8 @@ async fn main() { } } -async fn run_server(config: &Config, db: Arc>) -> io::Result<()> { +async fn run_server() -> io::Result<()> { + let config = DB.globals.config; let addr = SocketAddr::from((config.address, config.port)); let x_requested_with = HeaderName::from_static("x-requested-with"); diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs new file mode 100644 index 0000000..9dbfc7b --- /dev/null +++ b/src/service/rooms/alias/data.rs @@ -0,0 +1,22 @@ +pub trait Data { + /// Creates or updates the alias to the given room id. + pub fn set_alias( + alias: &RoomAliasId, + room_id: &RoomId + ) -> Result<()>; + + /// Forgets about an alias. Returns an error if the alias did not exist. + pub fn remove_alias( + alias: &RoomAliasId, + ) -> Result<()>; + + /// Looks up the roomid for the given alias. + pub fn resolve_local_alias( + alias: &RoomAliasId, + ) -> Result<()>; + + /// Returns all local aliases that point to the given room + pub fn local_aliases_for_room( + alias: &RoomAliasId, + ) -> Result<()>; +} diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 393ad67..cfe0539 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,66 +1,40 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self, globals))] pub fn set_alias( &self, alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, + room_id: &RoomId, ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); + self.db.set_alias(alias, room_id) + } - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) + #[tracing::instrument(skip(self, globals))] + pub fn remove_alias( + &self, + alias: &RoomAliasId, + ) -> Result<()> { + self.db.remove_alias(alias) } #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() + pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { + self.db.resolve_local_alias(alias: &RoomAliasId) } #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( + pub fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) + self.db.local_aliases_for_room(room_id) } - +} diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs new file mode 100644 index 0000000..83d7885 --- /dev/null +++ b/src/service/rooms/directory/data.rs @@ -0,0 +1,13 @@ +pub trait Data { + /// Adds the room to the public room directory + fn set_public(room_id: &RoomId) -> Result<()>; + + /// Removes the room from the public room directory. + fn set_not_public(room_id: &RoomId) -> Result<()>; + + /// Returns true if the room is in the public room directory. + fn is_public_room(room_id: &RoomId) -> Result; + + /// Returns the unsorted public room directory + fn public_rooms() -> impl Iterator>> + '_; +} diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 8be7bd5..b92933f 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,29 +1,30 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { + #[tracing::instrument(skip(self))] + pub fn set_public(&self, room_id: &RoomId) -> Result<()> { + self.db.set_public(&self, room_id) + } #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) + pub fn set_not_public(&self, room_id: &RoomId) -> Result<()> { + self.db.set_not_public(&self, room_id) } #[tracing::instrument(skip(self))] pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) + self.db.is_public_room(&self, room_id) } #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) + self.db.public_rooms(&self, room_id) } - +} diff --git a/src/service/rooms/edus/data.rs b/src/service/rooms/edus/data.rs new file mode 100644 index 0000000..16c14cf --- /dev/null +++ b/src/service/rooms/edus/data.rs @@ -0,0 +1,91 @@ +pub trait Data { + /// Replaces the previous read receipt. + fn readreceipt_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: ReceiptEvent, + ) -> Result<()>; + + /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + fn readreceipts_since( + &self, + room_id: &RoomId, + since: u64, + ) -> impl Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + >; + + /// Sets a private read marker at `count`. + fn private_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + ) -> Result<()>; + + /// Returns the private read marker. + fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + /// Returns the count of the last typing update in this room. + fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is + /// called. + fn typing_add( + &self, + user_id: &UserId, + room_id: &RoomId, + timeout: u64, + ) -> Result<()>; + + /// Removes a user from typing before the timeout is reached. + fn typing_remove( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result<()>; + + /// Returns the count of the last typing update in this room. + fn last_typing_update( + &self, + room_id: &RoomId, + ) -> Result; + + /// Returns all user ids currently typing. + fn typings_all( + &self, + room_id: &RoomId, + ) -> Result>; + + /// Adds a presence event which will be saved until a new event replaces it. + /// + /// Note: This method takes a RoomId because presence updates are always bound to rooms to + /// make sure users outside these rooms can't see them. + fn update_presence( + &self, + user_id: &UserId, + room_id: &RoomId, + presence: PresenceEvent, + ) -> Result<()>; + + /// Resets the presence timeout, so the user will stay in their current presence state. + fn ping_presence(&self, user_id: &UserId) -> Result<()>; + + /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. + fn last_presence_update(&self, user_id: &UserId) -> Result>; + + /// Returns the presence event with correct last_active_ago. + fn get_presence_event(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result>; + + /// Returns the most recent presence updates that happened after the event with id `since`. + fn presence_since( + &self, + room_id: &RoomId, + since: u64, + ) -> Result, PresenceEvent>>; +} diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index 118efd4..06adf57 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -1,73 +1,21 @@ -use crate::{database::abstraction::Tree, utils, Error, Result}; -use ruma::{ - events::{ - presence::{PresenceEvent, PresenceEventContent}, - receipt::ReceiptEvent, - SyncEphemeralRoomEvent, - }, - presence::PresenceState, - serde::Raw, - signatures::CanonicalJsonObject, - RoomId, UInt, UserId, -}; -use std::{ - collections::{HashMap, HashSet}, - mem, - sync::Arc, -}; +mod data; +pub use data::Data; -pub struct RoomEdus { - pub(in super::super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId - pub(in super::super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count - pub(in super::super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count - pub(in super::super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count - pub(in super::super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(in super::super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId - pub(in super::super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count +use crate::service::*; + +pub struct Service { + db: D, } -impl RoomEdus { - /// Adds an event which will be saved until a new event replaces it (e.g. read receipt). +impl Service<_> { + /// Replaces the previous read receipt. pub fn readreceipt_update( &self, user_id: &UserId, room_id: &RoomId, event: ReceiptEvent, - globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) + self.db.readreceipt_update(user_id, room_id, event); } /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. @@ -83,41 +31,7 @@ impl RoomEdus { Raw, )>, > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) + self.db.readreceipts_since(room_id, since) } /// Sets a private read marker at `count`. @@ -127,53 +41,19 @@ impl RoomEdus { room_id: &RoomId, user_id: &UserId, count: u64, - globals: &super::super::globals::Globals, ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - - Ok(()) + self.db.private_read_set(room_id, user_id, count) } /// Returns the private read marker. #[tracing::instrument(skip(self))] pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) + self.db.private_read_get(room_id, user_id) } /// Returns the count of the last typing update in this room. pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) + self.db.last_privateread_update(user_id, room_id) } /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is @@ -183,25 +63,8 @@ impl RoomEdus { user_id: &UserId, room_id: &RoomId, timeout: u64, - globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) + self.db.typing_add(user_id, room_id, timeout) } /// Removes a user from typing before the timeout is reached. @@ -209,33 +72,11 @@ impl RoomEdus { &self, user_id: &UserId, room_id: &RoomId, - globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) + self.db.typing_remove(user_id, room_id) } + /* TODO: Do this in background thread? /// Makes sure that typing events with old timestamps get removed. fn typings_maintain( &self, @@ -279,45 +120,23 @@ impl RoomEdus { Ok(()) } + */ /// Returns the count of the last typing update in this room. #[tracing::instrument(skip(self, globals))] pub fn last_typing_update( &self, room_id: &RoomId, - globals: &super::super::globals::Globals, ) -> Result { - self.typings_maintain(room_id, globals)?; - - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) + self.db.last_typing_update(room_id) } + /// Returns a new typing EDU. pub fn typings_all( &self, room_id: &RoomId, ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } + let user_ids = self.db.typings_all(room_id)?; Ok(SyncEphemeralRoomEvent { content: ruma::events::typing::TypingEventContent { @@ -335,52 +154,13 @@ impl RoomEdus { user_id: &UserId, room_id: &RoomId, presence: PresenceEvent, - globals: &super::super::globals::Globals, ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) + self.db.update_presence(user_id, room_id, presence) } /// Resets the presence timeout, so the user will stay in their current presence state. - #[tracing::instrument(skip(self))] pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - pub fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() + self.db.ping_presence(user_id) } pub fn get_last_presence_event( @@ -388,42 +168,15 @@ impl RoomEdus { user_id: &UserId, room_id: &RoomId, ) -> Result> { - let last_update = match self.last_presence_update(user_id)? { + let last_update = match self.db.last_presence_update(user_id)? { Some(last) => last, None => return Ok(None), }; - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&last_update.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| { - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } - - Ok(presence) - }) - .transpose() + self.db.get_presence_event(room_id, user_id, last_update) } + /* TODO /// Sets all users to offline who have been quiet for too long. fn _presence_maintain( &self, @@ -489,62 +242,15 @@ impl RoomEdus { } Ok(()) - } + }*/ - /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. + /// Returns the most recent presence updates that happened after the event with id `since`. #[tracing::instrument(skip(self, since, _rooms, _globals))] pub fn presence_since( &self, room_id: &RoomId, since: u64, - _rooms: &super::Rooms, - _globals: &super::super::globals::Globals, ) -> Result, PresenceEvent>> { - //self.presence_maintain(rooms, globals)?; - - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) + self.db.presence_since(room_id, since) } } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e59219b..5b77586 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -2,696 +2,908 @@ /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; -/// When receiving an event one needs to: -/// 0. Check the server is in the room -/// 1. Skip the PDU if we already know about it -/// 2. Check signatures, otherwise drop -/// 3. Check content hash, redact if doesn't match -/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not -/// timeline events -/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are -/// also rejected "due to auth events" -/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events -/// 7. Persist this event as an outlier -/// 8. If not timeline event: stop -/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline -/// events -/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities -/// doing all the checks in this list starting at 1. These are not timeline events -/// 11. Check the auth of the event passes based on the state of the event -/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by -/// doing state res where one of the inputs was a previously trusted set of state, don't just -/// trust a set of state we got from a remote) -/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" -/// it -/// 14. Use state resolution to find new room state -// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively -#[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] -pub(crate) async fn handle_incoming_pdu<'a>( - origin: &'a ServerName, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - is_timeline_event: bool, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> Result>> { - db.rooms.exists(room_id)?.ok_or(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"))?; +use crate::service::*; - db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of this room is currently disabled on this server."))?; - - // 1. Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = db.rooms.get_pdu_id(event_id)? { - return Some(pdu_id.to_vec()); - } +pub struct Service; - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; +impl Service { + /// When receiving an event one needs to: + /// 0. Check the server is in the room + /// 1. Skip the PDU if we already know about it + /// 2. Check signatures, otherwise drop + /// 3. Check content hash, redact if doesn't match + /// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not + /// timeline events + /// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are + /// also rejected "due to auth events" + /// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + /// 7. Persist this event as an outlier + /// 8. If not timeline event: stop + /// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline + /// events + /// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + /// doing all the checks in this list starting at 1. These are not timeline events + /// 11. Check the auth of the event passes based on the state of the event + /// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by + /// doing state res where one of the inputs was a previously trusted set of state, don't just + /// trust a set of state we got from a remote) + /// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" + /// it + /// 14. Use state resolution to find new room state + // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively + #[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] + pub(crate) async fn handle_incoming_pdu<'a>( + origin: &'a ServerName, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + is_timeline_event: bool, + db: &'a Database, + pub_key_map: &'a RwLock>>, + ) -> Result>> { + db.rooms.exists(room_id)?.ok_or(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"))?; - let first_pdu_in_room = db - .rooms - .first_pdu_in_room(room_id)? - .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; - - let (incoming_pdu, val) = handle_outlier_pdu( - origin, - &create_event, - event_id, - room_id, - value, - db, - pub_key_map, - ) - .await?; - - // 8. if not timeline event: stop - if !is_timeline_event { - return Ok(None); - } - - // Skip old events - if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - return Ok(None); - } - - // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let sorted_prev_events = fetch_unknown_prev_events(incoming_pdu.prev_events.clone()); - - let mut errors = 0; - for prev_id in dbg!(sorted) { - // Check for disabled again because it might have changed - db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of - this room is currently disabled on this server."))?; - - if let Some((time, tries)) = db - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&*prev_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", prev_id); - continue; - } + db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of this room is currently disabled on this server."))?; + + // 1. Skip the PDU if we already have it as a timeline event + if let Some(pdu_id) = db.rooms.get_pdu_id(event_id)? { + return Some(pdu_id.to_vec()); } - if errors >= 5 { - break; + let create_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomCreate, "")? + .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; + + let first_pdu_in_room = db + .rooms + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + + let (incoming_pdu, val) = handle_outlier_pdu( + origin, + &create_event, + event_id, + room_id, + value, + db, + pub_key_map, + ) + .await?; + + // 8. if not timeline event: stop + if !is_timeline_event { + return Ok(None); } - if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { - // Skip old events - if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - continue; - } + // Skip old events + if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + return Ok(None); + } - let start_time = Instant::now(); - db.globals - .roomid_federationhandletime - .write() + // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let sorted_prev_events = fetch_unknown_prev_events(incoming_pdu.prev_events.clone()); + + let mut errors = 0; + for prev_id in dbg!(sorted) { + // Check for disabled again because it might have changed + db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of + this room is currently disabled on this server."))?; + + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() .unwrap() - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + .get(&*prev_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } - if let Err(e) = upgrade_outlier_to_timeline_pdu( - pdu, - json, - &create_event, - origin, + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", prev_id); + continue; + } + } + + if errors >= 5 { + break; + } + + if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { + // Skip old events + if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + continue; + } + + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + + if let Err(e) = upgrade_outlier_to_timeline_pdu( + pdu, + json, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await + { + errors += 1; + warn!("Prev event {} failed: {}", prev_id, e); + match db + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry((*prev_id).to_owned()) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1) + } + } + } + let elapsed = start_time.elapsed(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + warn!( + "Handling prev event {} took {}m{}s", + prev_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + } + + // Done with prev events, now handling the incoming event + + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + let r = upgrade_outlier_to_timeline_pdu( + incoming_pdu, + val, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await; + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + + r + } + + #[tracing::instrument(skip(create_event, value, db, pub_key_map))] + fn handle_outlier_pdu<'a>( + origin: &'a ServerName, + create_event: &'a PduEvent, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + db: &'a Database, + pub_key_map: &'a RwLock>>, + ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { + Box::pin(async move { + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + fetch_required_signing_keys(&value, pub_key_map, db) + .await?; + + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + error!("Invalid create event: {}", e); + Error::BadDatabase("Invalid create event in db") + })?; + + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + + let mut val = match ruma::signatures::verify_event( + &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, + &value, + room_version_id, + ) { + Err(e) => { + // Drop + warn!("Dropping bad event {}: {}", event_id, e); + return Err("Signature verification failed".to_owned()); + } + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + warn!("Calculated hash does not match: {}", event_id); + match ruma::signatures::redact(&value, room_version_id) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_owned()), + } + } + Ok(ruma::signatures::Verified::All) => value, + }; + + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + let incoming_pdu = serde_json::from_value::( + serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU.".to_owned())?; + + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // NOTE: Step 5 is not applied anymore because it failed too often + warn!("Fetching auth events for {}", incoming_pdu.event_id); + fetch_and_handle_outliers( db, + origin, + &incoming_pdu + .auth_events + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, room_id, pub_key_map, ) - .await - { - errors += 1; - warn!("Prev event {} failed: {}", prev_id, e); - match db - .globals - .bad_event_ratelimiter - .write() - .unwrap() - .entry((*prev_id).to_owned()) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); + .await; + + // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + info!( + "Auth check for {} based on auth events", + incoming_pdu.event_id + ); + + // Build map of auth events + let mut auth_events = HashMap::new(); + for id in &incoming_pdu.auth_events { + let auth_event = match db.rooms.get_pdu(id)? { + Some(e) => e, + None => { + warn!("Could not find auth event {}", id); + continue; } - hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1 + 1) + }; + + match auth_events.entry(( + auth_event.kind.to_string().into(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + hash_map::Entry::Vacant(v) => { + v.insert(auth_event); + } + hash_map::Entry::Occupied(_) => { + return Err(Error::BadRequest(ErrorKind::InvalidParam, + "Auth event's type and state_key combination exists multiple times." + )); } } } - let elapsed = start_time.elapsed(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .remove(&room_id.to_owned()); - warn!( - "Handling prev event {} took {}m{}s", - prev_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } + + // The original create event must be in the auth events + if auth_events + .get(&(StateEventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(create_event) + { + return Err(Error::BadRequest(ErrorKind::InvalidParam("Incoming event refers to wrong create event."))); + } + + if !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), + ) + .map_err(|e| {error!(e); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")})? + { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); + } + + info!("Validation successful."); + + // 7. Persist the event as an outlier. + db.rooms + .add_pdu_outlier(&incoming_pdu.event_id, &val)?; + + info!("Added pdu as outlier."); + + Ok((Arc::new(incoming_pdu), val)) + }) } - // Done with prev events, now handling the incoming event + #[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] + async fn upgrade_outlier_to_timeline_pdu( + incoming_pdu: Arc, + val: BTreeMap, + create_event: &PduEvent, + origin: &ServerName, + db: &Database, + room_id: &RoomId, + pub_key_map: &RwLock>>, + ) -> Result>, String> { + // Skip the PDU if we already have it as a timeline event + if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { + return Ok(Some(pduid)); + } - let start_time = Instant::now(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - let r = upgrade_outlier_to_timeline_pdu( - incoming_pdu, - val, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await; - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .remove(&room_id.to_owned()); + if db + .rooms + .is_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to ask db for soft fail".to_owned())? + { + return Err("Event has been soft failed".into()); + } - r -} + info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); -#[tracing::instrument(skip(create_event, value, db, pub_key_map))] -fn handle_outlier_pdu<'a>( - origin: &'a ServerName, - create_event: &'a PduEvent, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { - Box::pin(async move { - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - fetch_required_signing_keys(&value, pub_key_map, db) - .await?; - - // 2. Check signatures, otherwise drop - // 3. check content hash, redact if doesn't match let create_event_content: RoomCreateEventContent = serde_json::from_str(create_event.content.get()).map_err(|e| { - error!("Invalid create event: {}", e); + warn!("Invalid create event: {}", e); Error::BadDatabase("Invalid create event in db") })?; let room_version_id = &create_event_content.room_version; let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - let mut val = match ruma::signatures::verify_event( - &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, - &value, - room_version_id, - ) { - Err(e) => { - // Drop - warn!("Dropping bad event {}: {}", event_id, e); - return Err("Signature verification failed".to_owned()); - } - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - warn!("Calculated hash does not match: {}", event_id); - match ruma::signatures::redact(&value, room_version_id) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_owned()), - } - } - Ok(ruma::signatures::Verified::All) => value, - }; + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + // doing all the checks in this list starting at 1. These are not timeline events. - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type - val.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU.".to_owned())?; + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event - // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // NOTE: Step 5 is not applied anymore because it failed too often - warn!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_outliers( - db, - origin, - &incoming_pdu - .auth_events - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; + info!("Requesting state at event"); + let mut state_at_incoming_event = None; - // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - info!( - "Auth check for {} based on auth events", - incoming_pdu.event_id - ); + if incoming_pdu.prev_events.len() == 1 { + let prev_event = &*incoming_pdu.prev_events[0]; + let prev_event_sstatehash = db + .rooms + .pdu_shortstatehash(prev_event) + .map_err(|_| "Failed talking to db".to_owned())?; - // Build map of auth events - let mut auth_events = HashMap::new(); - for id in &incoming_pdu.auth_events { - let auth_event = match db.rooms.get_pdu(id)? { - Some(e) => e, - None => { - warn!("Could not find auth event {}", id); - continue; - } + let state = if let Some(shortstatehash) = prev_event_sstatehash { + Some(db.rooms.state_full_ids(shortstatehash).await) + } else { + None }; - match auth_events.entry(( - auth_event.kind.to_string().into(), - auth_event - .state_key - .clone() - .expect("all auth events have state keys"), - )) { - hash_map::Entry::Vacant(v) => { - v.insert(auth_event); + if let Some(Ok(mut state)) = state { + info!("Using cached state"); + let prev_pdu = + db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { + "Could not find prev event, but we know the state.".to_owned() + })?; + + if let Some(state_key) = &prev_pdu.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &prev_pdu.kind.to_string().into(), + state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + state.insert(shortstatekey, Arc::from(prev_event)); + // Now it's the state after the pdu } - hash_map::Entry::Occupied(_) => { - return Err(Error::BadRequest(ErrorKind::InvalidParam, - "Auth event's type and state_key combination exists multiple times." - )); + + state_at_incoming_event = Some(state); + } + } else { + info!("Calculating state at event using state res"); + let mut extremity_sstatehashes = HashMap::new(); + + let mut okay = true; + for prev_eventid in &incoming_pdu.prev_events { + let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { + pdu + } else { + okay = false; + break; + }; + + let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { + s + } else { + okay = false; + break; + }; + + extremity_sstatehashes.insert(sstatehash, prev_event); + } + + if okay { + let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); + let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); + + for (sstatehash, prev_event) in extremity_sstatehashes { + let mut leaf_state: BTreeMap<_, _> = db + .rooms + .state_full_ids(sstatehash) + .await + .map_err(|_| "Failed to ask db for room state.".to_owned())?; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); + // Now it's the state after the pdu + } + + let mut state = StateMap::with_capacity(leaf_state.len()); + let mut starting_events = Vec::with_capacity(leaf_state.len()); + + for (k, id) in leaf_state { + if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + state.insert((ty.to_string().into(), st_key), id.clone()); + } else { + warn!("Failed to get_statekey_from_short."); + } + starting_events.push(id); + } + + auth_chain_sets.push( + get_auth_chain(room_id, starting_events, db) + .await + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), + ); + + fork_states.push(state); + } + + let lock = db.globals.stateres_mutex.lock(); + + let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }); + drop(lock); + + state_at_incoming_event = match result { + Ok(new_state) => Some( + new_state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + Ok((shortstatekey, event_id)) + }) + .collect::>()?, + ), + Err(e) => { + warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); + None + } } } } - // The original create event must be in the auth events - if auth_events - .get(&(StateEventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()) - != Some(create_event) - { - return Err(Error::BadRequest(ErrorKind::InvalidParam("Incoming event refers to wrong create event."))); + if state_at_incoming_event.is_none() { + info!("Calling /state_ids"); + // Call /state_ids to find out what the state at this pdu is. We trust the server's + // response to some extend, but we still do a lot of checks on the events + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_room_state_ids::v1::Request { + room_id, + event_id: &incoming_pdu.event_id, + }, + ) + .await + { + Ok(res) => { + info!("Fetching state events at event."); + let state_vec = fetch_and_handle_outliers( + db, + origin, + &res.pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; + + let mut state: BTreeMap<_, Arc> = BTreeMap::new(); + for (pdu, _) in state_vec { + let state_key = pdu + .state_key + .clone() + .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; + + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + match state.entry(shortstatekey) { + btree_map::Entry::Vacant(v) => { + v.insert(Arc::from(&*pdu.event_id)); + } + btree_map::Entry::Occupied(_) => return Err( + "State event's type and state_key combination exists multiple times." + .to_owned(), + ), + } + } + + // The original create event must still be in the state + let create_shortstatekey = db + .rooms + .get_shortstatekey(&StateEventType::RoomCreate, "") + .map_err(|_| "Failed to talk to db.")? + .expect("Room exists"); + + if state.get(&create_shortstatekey).map(|id| id.as_ref()) + != Some(&create_event.event_id) + { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + state_at_incoming_event = Some(state); + } + Err(e) => { + warn!("Fetching state for event failed: {}", e); + return Err("Fetching state for event failed".into()); + } + }; } - if !state_res::event_auth::auth_check( + let state_at_incoming_event = + state_at_incoming_event.expect("we always set this to some above"); + + info!("Starting auth check"); + // 11. Check the auth of the event passes based on the state of the event + let check_result = state_res::event_auth::auth_check( &room_version, &incoming_pdu, None::, // TODO: third party invite - |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), + |k, s| { + db.rooms + .get_shortstatekey(&k.to_string().into(), s) + .ok() + .flatten() + .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) + .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) + }, ) - .map_err(|e| {error!(e); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")})? - { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); + .map_err(|_e| "Auth check failed.".to_owned())?; + + if !check_result { + return Err("Event has failed auth check with state at the event.".into()); + } + info!("Auth check succeeded"); + + // We start looking at current room state now, so lets lock the room + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Now we calculate the set of extremities this room has after the incoming event has been + // applied. We start with the previous extremities (aka leaves) + info!("Calculating extremities"); + let mut extremities = db + .rooms + .get_pdu_leaves(room_id) + .map_err(|_| "Failed to load room leaves".to_owned())?; + + // Remove any forward extremities that are referenced by this incoming event's prev_events + for prev_event in &incoming_pdu.prev_events { + if extremities.contains(prev_event) { + extremities.remove(prev_event); + } } - info!("Validation successful."); + // Only keep those extremities were not referenced yet + extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); - // 7. Persist the event as an outlier. - db.rooms - .add_pdu_outlier(&incoming_pdu.event_id, &val)?; + info!("Compressing state at event"); + let state_ids_compressed = state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + db.rooms + .compress_state_event(*shortstatekey, id, &db.globals) + .map_err(|_| "Failed to compress_state_event".to_owned()) + }) + .collect::>()?; - info!("Added pdu as outlier."); + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + info!("Starting soft fail auth check"); - Ok((Arc::new(incoming_pdu), val)) - }) -} - -#[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] -async fn upgrade_outlier_to_timeline_pdu( - incoming_pdu: Arc, - val: BTreeMap, - create_event: &PduEvent, - origin: &ServerName, - db: &Database, - room_id: &RoomId, - pub_key_map: &RwLock>>, -) -> Result>, String> { - // Skip the PDU if we already have it as a timeline event - if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { - return Ok(Some(pduid)); - } - - if db - .rooms - .is_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to ask db for soft fail".to_owned())? - { - return Err("Event has been soft failed".into()); - } - - info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); - - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::BadDatabase("Invalid create event in db") - })?; - - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - - // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities - // doing all the checks in this list starting at 1. These are not timeline events. - - // TODO: if we know the prev_events of the incoming event we can avoid the request and build - // the state from a known point and resolve if > 1 prev_event - - info!("Requesting state at event"); - let mut state_at_incoming_event = None; - - if incoming_pdu.prev_events.len() == 1 { - let prev_event = &*incoming_pdu.prev_events[0]; - let prev_event_sstatehash = db + let auth_events = db .rooms - .pdu_shortstatehash(prev_event) - .map_err(|_| "Failed talking to db".to_owned())?; + .get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + ) + .map_err(|_| "Failed to get_auth_events.".to_owned())?; - let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(db.rooms.state_full_ids(shortstatehash).await) - } else { - None - }; + let soft_fail = !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|_e| "Auth check failed.".to_owned())?; - if let Some(Ok(mut state)) = state { - info!("Using cached state"); - let prev_pdu = - db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { - "Could not find prev event, but we know the state.".to_owned() - })?; + if soft_fail { + append_incoming_pdu( + db, + &incoming_pdu, + val, + extremities.iter().map(Deref::deref), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; - if let Some(state_key) = &prev_pdu.state_key { + // Soft fail, we keep the event as an outlier but don't add it to the timeline + warn!("Event was soft failed: {:?}", incoming_pdu); + db.rooms + .mark_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to set soft failed flag".to_owned())?; + return Err("Event has been soft failed".into()); + } + + if incoming_pdu.state_key.is_some() { + info!("Loading current room state ids"); + let current_sstatehash = db + .rooms + .current_shortstatehash(room_id) + .map_err(|_| "Failed to load current state hash.".to_owned())? + .expect("every room has state"); + + let current_state_ids = db + .rooms + .state_full_ids(current_sstatehash) + .await + .map_err(|_| "Failed to load room state.")?; + + info!("Preparing for stateres to derive new room state"); + let mut extremity_sstatehashes = HashMap::new(); + + info!("Loading extremities"); + for id in dbg!(&extremities) { + match db + .rooms + .get_pdu(id) + .map_err(|_| "Failed to ask db for pdu.".to_owned())? + { + Some(leaf_pdu) => { + extremity_sstatehashes.insert( + db.rooms + .pdu_shortstatehash(&leaf_pdu.event_id) + .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + "Found pdu with no statehash in db.".to_owned() + })?, + leaf_pdu, + ); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err("Missing state snapshot.".to_owned()); + } + } + } + + let mut fork_states = Vec::new(); + + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). + + // We do this by adding the current state to the list of fork states + extremity_sstatehashes.remove(¤t_sstatehash); + fork_states.push(current_state_ids); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { let shortstatekey = db .rooms .get_or_create_shortstatekey( - &prev_pdu.kind.to_string().into(), + &incoming_pdu.kind.to_string().into(), state_key, &db.globals, ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - state.insert(shortstatekey, Arc::from(prev_event)); - // Now it's the state after the pdu + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } + fork_states.push(state_after); - state_at_incoming_event = Some(state); - } - } else { - info!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::new(); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { - pdu + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + return Err("State is empty.".to_owned()); + } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + info!("State resolution trivial"); + // There was only one state, so it has to be the room's current state (because that is + // always included) + fork_states[0] + .iter() + .map(|(k, id)| { + db.rooms + .compress_state_event(*k, id, &db.globals) + .map_err(|_| "Failed to compress_state_event.".to_owned()) + }) + .collect::>()? } else { - okay = false; - break; - }; + info!("Loading auth chains"); + // We do need to force an update to this room's state + update_state = true; - let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { - s - } else { - okay = false; - break; - }; - - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if okay { - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: BTreeMap<_, _> = db - .rooms - .state_full_ids(sstatehash) - .await - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &prev_event.kind.to_string().into(), - state_key, - &db.globals, + let mut auth_chain_sets = Vec::new(); + for state in &fork_states { + auth_chain_sets.push( + get_auth_chain( + room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + db, ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); - // Now it's the state after the pdu - } - - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); - - for (k, id) in leaf_state { - if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); - } else { - warn!("Failed to get_statekey_from_short."); - } - starting_events.push(id); - } - - auth_chain_sets.push( - get_auth_chain(room_id, starting_events, db) .await .map_err(|_| "Failed to load auth chain.".to_owned())? .collect(), - ); - - fork_states.push(state); - } - - let lock = db.globals.stateres_mutex.lock(); - - let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); + ); } - res.ok().flatten() - }); - drop(lock); - state_at_incoming_event = match result { - Ok(new_state) => Some( - new_state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, event_id)) - }) - .collect::>()?, - ), - Err(e) => { - warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); - None - } - } - } - } + info!("Loading fork states"); - if state_at_incoming_event.is_none() { - info!("Calling /state_ids"); - // Call /state_ids to find out what the state at this pdu is. We trust the server's - // response to some extend, but we still do a lot of checks on the events - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id, - event_id: &incoming_pdu.event_id, - }, - ) - .await - { - Ok(res) => { - info!("Fetching state events at event."); - let state_vec = fetch_and_handle_outliers( - db, - origin, - &res.pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; + let fork_states: Vec<_> = fork_states + .into_iter() + .map(|map| { + map.into_iter() + .filter_map(|(k, id)| { + db.rooms + .get_statekey_from_short(k) + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) + .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) + .ok() + }) + .collect::>() + }) + .collect(); - let mut state: BTreeMap<_, Arc> = BTreeMap::new(); - for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; + info!("Resolving state"); - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &pdu.kind.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - match state.entry(shortstatekey) { - btree_map::Entry::Vacant(v) => { - v.insert(Arc::from(&*pdu.event_id)); + let lock = db.globals.stateres_mutex.lock(); + let state = match state_res::resolve( + room_version_id, + &fork_states, + auth_chain_sets, + |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); } - btree_map::Entry::Occupied(_) => return Err( - "State event's type and state_key combination exists multiple times." - .to_owned(), - ), + res.ok().flatten() + }, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err("State resolution failed, either an event could not be found or deserialization".into()); } - } + }; - // The original create event must still be in the state - let create_shortstatekey = db - .rooms - .get_shortstatekey(&StateEventType::RoomCreate, "") - .map_err(|_| "Failed to talk to db.")? - .expect("Room exists"); + drop(lock); - if state.get(&create_shortstatekey).map(|id| id.as_ref()) - != Some(&create_event.event_id) - { - return Err("Incoming event refers to wrong create event.".to_owned()); - } + info!("State resolution done. Compressing state"); - state_at_incoming_event = Some(state); + state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + db.rooms + .compress_state_event(shortstatekey, &event_id, &db.globals) + .map_err(|_| "Failed to compress state event".to_owned()) + }) + .collect::>()? + }; + + // Set the new room state to the resolved state + if update_state { + info!("Forcing new room state"); + db.rooms + .force_state(room_id, new_room_state, db) + .map_err(|_| "Failed to set new room state.".to_owned())?; } - Err(e) => { - warn!("Fetching state for event failed: {}", e); - return Err("Fetching state for event failed".into()); - } - }; - } - - let state_at_incoming_event = - state_at_incoming_event.expect("we always set this to some above"); - - info!("Starting auth check"); - // 11. Check the auth of the event passes based on the state of the event - let check_result = state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, // TODO: third party invite - |k, s| { - db.rooms - .get_shortstatekey(&k.to_string().into(), s) - .ok() - .flatten() - .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) - }, - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if !check_result { - return Err("Event has failed auth check with state at the event.".into()); - } - info!("Auth check succeeded"); - - // We start looking at current room state now, so lets lock the room - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - // Now we calculate the set of extremities this room has after the incoming event has been - // applied. We start with the previous extremities (aka leaves) - info!("Calculating extremities"); - let mut extremities = db - .rooms - .get_pdu_leaves(room_id) - .map_err(|_| "Failed to load room leaves".to_owned())?; - - // Remove any forward extremities that are referenced by this incoming event's prev_events - for prev_event in &incoming_pdu.prev_events { - if extremities.contains(prev_event) { - extremities.remove(prev_event); } - } - // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); + info!("Appending pdu to timeline"); + extremities.insert(incoming_pdu.event_id.clone()); - info!("Compressing state at event"); - let state_ids_compressed = state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - db.rooms - .compress_state_event(*shortstatekey, id, &db.globals) - .map_err(|_| "Failed to compress_state_event".to_owned()) - }) - .collect::>()?; + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - info!("Starting soft fail auth check"); - - let auth_events = db - .rooms - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - ) - .map_err(|_| "Failed to get_auth_events.".to_owned())?; - - let soft_fail = !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if soft_fail { - append_incoming_pdu( + let pdu_id = append_incoming_pdu( db, &incoming_pdu, val, @@ -705,448 +917,242 @@ async fn upgrade_outlier_to_timeline_pdu( "Failed to add pdu to db.".to_owned() })?; - // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {:?}", incoming_pdu); - db.rooms - .mark_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to set soft failed flag".to_owned())?; - return Err("Event has been soft failed".into()); + info!("Appended incoming pdu"); + + // Event has passed all auth/stateres checks + drop(state_lock); + Ok(pdu_id) } - if incoming_pdu.state_key.is_some() { - info!("Loading current room state ids"); - let current_sstatehash = db - .rooms - .current_shortstatehash(room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? - .expect("every room has state"); - - let current_state_ids = db - .rooms - .state_full_ids(current_sstatehash) - .await - .map_err(|_| "Failed to load room state.")?; - - info!("Preparing for stateres to derive new room state"); - let mut extremity_sstatehashes = HashMap::new(); - - info!("Loading extremities"); - for id in dbg!(&extremities) { - match db - .rooms - .get_pdu(id) - .map_err(|_| "Failed to ask db for pdu.".to_owned())? - { - Some(leaf_pdu) => { - extremity_sstatehashes.insert( - db.rooms - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? - .ok_or_else(|| { - error!( - "Found extremity pdu with no statehash in db: {:?}", - leaf_pdu - ); - "Found pdu with no statehash in db.".to_owned() - })?, - leaf_pdu, - ); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err("Missing state snapshot.".to_owned()); - } - } - } - - let mut fork_states = Vec::new(); - - // 12. Ensure that the state is derived from the previous current state (i.e. we calculated - // by doing state res where one of the inputs was a previously trusted set of state, - // don't just trust a set of state we got from a remote). - - // We do this by adding the current state to the list of fork states - extremity_sstatehashes.remove(¤t_sstatehash); - fork_states.push(current_state_ids); - - // We also add state after incoming event to the fork states - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &incoming_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); - } - fork_states.push(state_after); - - let mut update_state = false; - // 14. Use state resolution to find new room state - let new_room_state = if fork_states.is_empty() { - return Err("State is empty.".to_owned()); - } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { - info!("State resolution trivial"); - // There was only one state, so it has to be the room's current state (because that is - // always included) - fork_states[0] - .iter() - .map(|(k, id)| { - db.rooms - .compress_state_event(*k, id, &db.globals) - .map_err(|_| "Failed to compress_state_event.".to_owned()) - }) - .collect::>()? - } else { - info!("Loading auth chains"); - // We do need to force an update to this room's state - update_state = true; - - let mut auth_chain_sets = Vec::new(); - for state in &fork_states { - auth_chain_sets.push( - get_auth_chain( - room_id, - state.iter().map(|(_, id)| id.clone()).collect(), - db, - ) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), - ); - } - - info!("Loading fork states"); - - let fork_states: Vec<_> = fork_states - .into_iter() - .map(|map| { - map.into_iter() - .filter_map(|(k, id)| { - db.rooms - .get_statekey_from_short(k) - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) - .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) - .ok() - }) - .collect::>() - }) - .collect(); - - info!("Resolving state"); - - let lock = db.globals.stateres_mutex.lock(); - let state = match state_res::resolve( - room_version_id, - &fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { - Ok(new_state) => new_state, - Err(_) => { - return Err("State resolution failed, either an event could not be found or deserialization".into()); + /// Find the event and auth it. Once the event is validated (steps 1 - 8) + /// it is appended to the outliers Tree. + /// + /// Returns pdu and if we fetched it over federation the raw json. + /// + /// a. Look in the main timeline (pduid_pdu tree) + /// b. Look at outlier pdu tree + /// c. Ask origin server over federation + /// d. TODO: Ask other servers over federation? + #[tracing::instrument(skip_all)] + pub(crate) fn fetch_and_handle_outliers<'a>( + db: &'a Database, + origin: &'a ServerName, + events: &'a [Arc], + create_event: &'a PduEvent, + room_id: &'a RoomId, + pub_key_map: &'a RwLock>>, + ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { + Box::pin(async move { + let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; - drop(lock); + let mut pdus = vec![]; + for id in events { + if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } - info!("State resolution done. Compressing state"); - - state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - db.rooms - .compress_state_event(shortstatekey, &event_id, &db.globals) - .map_err(|_| "Failed to compress state event".to_owned()) - }) - .collect::>()? - }; - - // Set the new room state to the resolved state - if update_state { - info!("Forcing new room state"); - db.rooms - .force_state(room_id, new_room_state, db) - .map_err(|_| "Failed to set new room state.".to_owned())?; - } - } - - info!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone()); - - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - - let pdu_id = append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities.iter().map(Deref::deref), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; - - info!("Appended incoming pdu"); - - // Event has passed all auth/stateres checks - drop(state_lock); - Ok(pdu_id) -} - -/// Find the event and auth it. Once the event is validated (steps 1 - 8) -/// it is appended to the outliers Tree. -/// -/// Returns pdu and if we fetched it over federation the raw json. -/// -/// a. Look in the main timeline (pduid_pdu tree) -/// b. Look at outlier pdu tree -/// c. Ask origin server over federation -/// d. TODO: Ask other servers over federation? -#[tracing::instrument(skip_all)] -pub(crate) fn fetch_and_handle_outliers<'a>( - db: &'a Database, - origin: &'a ServerName, - events: &'a [Arc], - create_event: &'a PduEvent, - room_id: &'a RoomId, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { - Box::pin(async move { - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - - let mut pdus = vec![]; - for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", id); + continue; + } } - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", id); - continue; - } - } - - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { - trace!("Found {} in db", id); - pdus.push((local_pdu, None)); - continue; - } - - // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; - let mut events_in_reverse_order = Vec::new(); - let mut events_all = HashSet::new(); - let mut i = 0; - while let Some(next_id) = todo_auth_events.pop() { - if events_all.contains(&next_id) { - continue; - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - - if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { trace!("Found {} in db", id); + pdus.push((local_pdu, None)); continue; } - info!("Fetching {} over federation.", next_id); - match db - .sending - .send_federation_request( - &db.globals, + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); + let mut i = 0; + while let Some(next_id) = todo_auth_events.pop() { + if events_all.contains(&next_id) { + continue; + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { + trace!("Found {} in db", id); + continue; + } + + info!("Fetching {} over federation.", next_id); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &next_id }, + ) + .await + { + Ok(res) => { + info!("Got {} over federation", next_id); + let (calculated_event_id, value) = + match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { + Ok(t) => t, + Err(_) => { + back_off((*next_id).to_owned()); + continue; + } + }; + + if calculated_event_id != *next_id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu); + } + + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { + for auth_event in auth_events { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + } + Err(_) => { + warn!("Failed to fetch event: {}", next_id); + back_off((*next_id).to_owned()); + } + } + } + + for (next_id, value) in events_in_reverse_order.iter().rev() { + match handle_outlier_pdu( origin, - get_event::v1::Request { event_id: &next_id }, + create_event, + next_id, + room_id, + value.clone(), + db, + pub_key_map, ) .await - { - Ok(res) => { - info!("Got {} over federation", next_id); - let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { - Ok(t) => t, - Err(_) => { - back_off((*next_id).to_owned()); - continue; - } - }; - - if calculated_event_id != *next_id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - next_id, calculated_event_id, &res.pdu); - } - - if let Some(auth_events) = - value.get("auth_events").and_then(|c| c.as_array()) - { - for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value(auth_event.clone().into()) - { - let a: Arc = auth_event; - todo_auth_events.push(a); - } else { - warn!("Auth event id is not valid"); - } + { + Ok((pdu, json)) => { + if next_id == id { + pdus.push((pdu, Some(json))); } - } else { - warn!("Auth event list invalid"); } - - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - } - Err(_) => { - warn!("Failed to fetch event: {}", next_id); - back_off((*next_id).to_owned()); + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()); + } } } } - - for (next_id, value) in events_in_reverse_order.iter().rev() { - match handle_outlier_pdu( - origin, - create_event, - next_id, - room_id, - value.clone(), - db, - pub_key_map, - ) - .await - { - Ok((pdu, json)) => { - if next_id == id { - pdus.push((pdu, Some(json))); - } - } - Err(e) => { - warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((**next_id).to_owned()); - } - } - } - } - pdus - }) -} - - - -fn fetch_unknown_prev_events(initial_set: Vec>) -> Vec> { - let mut graph: HashMap, _> = HashMap::new(); - let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = initial_set; - - let mut amount = 0; - - while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( - db, - origin, - &[prev_event_id.clone()], - &create_event, - room_id, - pub_key_map, - ) - .await - .pop() - { - if amount > 100 { - // Max limit reached - warn!("Max prev event limit reached!"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if let Some(json) = - json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) - { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { - amount += 1; - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(prev_prev.clone())); - } - } - - graph.insert( - prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), - ); - } else { - // Time based check failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed, so this was not fetched over federation - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } + pdus + }) } - let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - println!("{}", event_id); - Ok(( - int!(0), - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; - sorted + + fn fetch_unknown_prev_events(initial_set: Vec>) -> Vec> { + let mut graph: HashMap, _> = HashMap::new(); + let mut eventid_info = HashMap::new(); + let mut todo_outlier_stack: Vec> = initial_set; + + let mut amount = 0; + + while let Some(prev_event_id) = todo_outlier_stack.pop() { + if let Some((pdu, json_opt)) = fetch_and_handle_outliers( + db, + origin, + &[prev_event_id.clone()], + &create_event, + room_id, + pub_key_map, + ) + .await + .pop() + { + if amount > 100 { + // Max limit reached + warn!("Max prev event limit reached!"); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + } + + if let Some(json) = + json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) + { + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + amount += 1; + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(dbg!(prev_prev.clone())); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } else { + // Fetch and handle failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } + + let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + println!("{}", event_id); + Ok(( + int!(0), + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), + ), + )) + }) + .map_err(|_| "Error sorting prev events".to_owned())?; + + sorted + } } diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs new file mode 100644 index 0000000..9cf2d8b --- /dev/null +++ b/src/service/rooms/lazy_loading/data.rs @@ -0,0 +1,24 @@ +pub trait Data { + fn lazy_load_was_sent_before( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, + ) -> Result; + + fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + since: u64, + ) -> Result<()>; + + fn lazy_load_reset( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ) -> Result<()>; +} diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index a402702..cf00174 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,4 +1,13 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self))] pub fn lazy_load_was_sent_before( &self, @@ -7,14 +16,7 @@ room_id: &RoomId, ll_user: &UserId, ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) + self.db.lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) } #[tracing::instrument(skip(self))] @@ -45,27 +47,7 @@ room_id: &RoomId, since: u64, ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) + self.db.lazy_load_confirm_delivery(user_d, device_id, room_id, since) } #[tracing::instrument(skip(self))] @@ -75,17 +57,6 @@ device_id: &DeviceId, room_id: &RoomId, ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) + self.db.lazy_load_reset(user_id, device_id, room_id); } - +} diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs new file mode 100644 index 0000000..58bd351 --- /dev/null +++ b/src/service/rooms/metadata/data.rs @@ -0,0 +1,3 @@ +pub trait Data { + fn exists(&self, room_id: &RoomId) -> Result; +} diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 5d70345..644cd18 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,44 +1,16 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) + self.db.exists(room_id) } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - +} diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs new file mode 100644 index 0000000..6b534b9 --- /dev/null +++ b/src/service/rooms/outlier/data.rs @@ -0,0 +1,5 @@ +pub trait Data { + fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; + fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; + fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()>; +} diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 340e93e..c82cb62 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,27 +1,26 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { /// Returns the pdu from the outlier tree. pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) + self.db.get_outlier_pdu_json(event_id) } /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) + self.db.get_outlier_pdu(event_id) } /// Append the PDU as an outlier. #[tracing::instrument(skip(self, pdu))] pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) + self.db.add_pdu_outlier(event_id, pdu) } - +} diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs new file mode 100644 index 0000000..6778795 --- /dev/null +++ b/src/service/rooms/pdu_metadata/data.rs @@ -0,0 +1,6 @@ +pub trait Data { + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; + fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; + fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; + fn is_event_soft_failed(&self, event_id: &EventId) -> Result; +} diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index f8ffcee..6d6df22 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,31 +1,30 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) + self.db.mark_as_referenced(room_id, event_ids) } #[tracing::instrument(skip(self))] pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) + self.db.is_event_referenced(room_id, event_id) } #[tracing::instrument(skip(self))] pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) + self.db.mark_event_soft_failed(event_id) } #[tracing::instrument(skip(self))] pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) + self.db.is_event_soft_failed(event_id) } - +} diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 63e8b71..c44d357 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -196,3 +196,30 @@ }) } + pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self.roomid_shortroomid + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + }) + .transpose() + } + + pub fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + globals: &super::globals::Globals, + ) -> Result { + Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, + None => { + let short = globals.next_count()?; + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes())?; + short + } + }) + } + diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 4b42ca8..8aa7638 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,16 +1,24 @@ pub trait Data { + /// Returns the last state hash key added to the db for the given room. fn get_room_shortstatehash(room_id: &RoomId); + + /// Update the current state of the room. + fn set_room_state(room_id: &RoomId, new_shortstatehash: u64 + _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + ); + + /// Associates a state with an event. + fn set_event_state(shorteventid: u64, shortstatehash: u64) -> Result<()> { + + /// Returns all events we would send as the prev_events of the next event. + fn get_forward_extremities(room_id: &RoomId) -> Result>>; + + /// Replace the forward extremities of the room. + fn set_forward_extremities( + room_id: &RoomId, + event_ids: impl IntoIterator + Debug, + _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { } - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - +pub struct StateLock; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index da03ad4..b513ab5 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,25 +1,30 @@ +mod data; +pub use data::Data; + +use crate::service::*; + pub struct Service { db: D, } -impl Service { +impl Service<_> { /// Set the room to the given statehash and update caches. #[tracing::instrument(skip(self, new_state_ids_compressed, db))] pub fn force_state( &self, room_id: &RoomId, shortstatehash: u64, - statediffnew :HashSet, - statediffremoved :HashSet, + statediffnew: HashSet, + statediffremoved: HashSet, db: &Database, ) -> Result<()> { for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) + state_compressor::parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { - let pdu = match self.get_pdu_json(&event_id)? { + let pdu = match timeline::get_pdu_json(&event_id)? { Some(pdu) => pdu, None => continue, }; @@ -55,56 +60,12 @@ impl Service { Err(_) => continue, }; - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; + room::state_cache::update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; } - self.update_joined_count(room_id, db)?; + room::state_cache::update_joined_count(room_id, db)?; - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } + db.set_room_state(room_id, new_shortstatehash); Ok(()) } @@ -121,11 +82,11 @@ impl Service { state_ids_compressed: HashSet, globals: &super::globals::Globals, ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; + let shorteventid = short::get_or_create_shorteventid(event_id, globals)?; - let previous_shortstatehash = self.current_shortstatehash(room_id)?; + let previous_shortstatehash = db.get_room_shortstatehash(room_id)?; - let state_hash = self.calculate_hash( + let state_hash = super::calculate_hash( &state_ids_compressed .iter() .map(|s| &s[..]) @@ -133,11 +94,11 @@ impl Service { ); let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; + short::get_or_create_shortstatehash(&state_hash, globals)?; if !already_existed { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| room::state_compressor.load_shortstatehash_info(p))?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -156,7 +117,7 @@ impl Service { } else { (state_ids_compressed, HashSet::new()) }; - self.save_state_from_diff( + state_compressor::save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -165,8 +126,7 @@ impl Service { )?; } - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } @@ -183,7 +143,7 @@ impl Service { ) -> Result { let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; + let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; if let Some(p) = previous_shortstatehash { self.shorteventid_shortstatehash @@ -293,4 +253,8 @@ impl Service { Ok(()) } + + pub fn db(&self) -> D { + &self.db + } } From c21820083bf3285634e9f7098c19fd9527233029 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:34:24 +0200 Subject: [PATCH 1160/1727] refactor: prepare src/service/rooms/edus/read_receipt/mod.rs from src/service/rooms/edus/mod.rs --- src/service/rooms/edus/{ => read_receipt}/mod.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => read_receipt}/mod.rs (100%) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs similarity index 100% rename from src/service/rooms/edus/mod.rs rename to src/service/rooms/edus/read_receipt/mod.rs From bfccd4f136117c4177e34aba10862a7d228fa556 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:35:14 +0200 Subject: [PATCH 1161/1727] refactor: prepare src/service/rooms/edus/presence/mod.rs from src/service/rooms/edus/mod.rs --- src/service/rooms/edus/{ => presence}/mod.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => presence}/mod.rs (100%) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/presence/mod.rs similarity index 100% rename from src/service/rooms/edus/mod.rs rename to src/service/rooms/edus/presence/mod.rs From d410f086424481123ca7893579e1d95bc289e3d0 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:36:08 +0200 Subject: [PATCH 1162/1727] refactor: prepare src/service/rooms/edus/typing/mod.rs from src/service/rooms/edus/mod.rs --- src/service/rooms/edus/{ => typing}/mod.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => typing}/mod.rs (100%) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/typing/mod.rs similarity index 100% rename from src/service/rooms/edus/mod.rs rename to src/service/rooms/edus/typing/mod.rs From 73217f238c61792967b72bc8016dbe4bc3efd38e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:37:57 +0200 Subject: [PATCH 1163/1727] refactor: prepare service/rooms/edus/presence/data.rs from service/rooms/edus/data.rs --- src/service/rooms/edus/{ => presence}/data.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => presence}/data.rs (100%) diff --git a/src/service/rooms/edus/data.rs b/src/service/rooms/edus/presence/data.rs similarity index 100% rename from src/service/rooms/edus/data.rs rename to src/service/rooms/edus/presence/data.rs From ac4724e82c9e5719287d9a2e30da037f5eb66f8c Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:38:23 +0200 Subject: [PATCH 1164/1727] refactor: prepare service/rooms/edus/read_receipt/data.rs from service/rooms/edus/data.rs --- src/service/rooms/edus/{ => read_receipt}/data.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => read_receipt}/data.rs (100%) diff --git a/src/service/rooms/edus/data.rs b/src/service/rooms/edus/read_receipt/data.rs similarity index 100% rename from src/service/rooms/edus/data.rs rename to src/service/rooms/edus/read_receipt/data.rs From c7e601eb0bb13ca374f657bb55934397174cd92d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:38:46 +0200 Subject: [PATCH 1165/1727] refactor: prepare service/rooms/edus/typing/data.rs from service/rooms/edus/data.rs --- src/service/rooms/edus/{ => typing}/data.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => typing}/data.rs (100%) diff --git a/src/service/rooms/edus/data.rs b/src/service/rooms/edus/typing/data.rs similarity index 100% rename from src/service/rooms/edus/data.rs rename to src/service/rooms/edus/typing/data.rs From 1869a38b8517b772456886627b9bbfa89224cab5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 10 Jul 2022 14:37:34 +0200 Subject: [PATCH 1166/1727] refactor(edus): split edus into separate modules --- src/database/key_value.rs | 14 +- src/service/rooms/edus/mod.rs | 259 +------------------- src/service/rooms/edus/presence/data.rs | 70 +----- src/service/rooms/edus/presence/mod.rs | 137 ----------- src/service/rooms/edus/read_receipt/data.rs | 62 +---- src/service/rooms/edus/read_receipt/mod.rs | 205 +--------------- src/service/rooms/edus/typing/data.rs | 85 +------ src/service/rooms/edus/typing/mod.rs | 175 +------------ 8 files changed, 27 insertions(+), 980 deletions(-) diff --git a/src/database/key_value.rs b/src/database/key_value.rs index 34916e4..0be1311 100644 --- a/src/database/key_value.rs +++ b/src/database/key_value.rs @@ -156,7 +156,7 @@ impl service::room::directory::Data for KeyValueDatabase { } } -impl service::room::edus::Data for KeyValueDatabase { +impl service::room::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( &self, user_id: &UserId, @@ -203,7 +203,7 @@ impl service::room::edus::Data for KeyValueDatabase { room_id: &RoomId, since: u64, ) -> impl Iterator< - Item = Result<( + Item=Result<( Box, u64, Raw, @@ -229,7 +229,7 @@ impl service::room::edus::Data for KeyValueDatabase { Error::bad_database("Invalid readreceiptid userid bytes in db.") })?, ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; + .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; let mut json = serde_json::from_slice::(&v).map_err(|_| { Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") @@ -293,7 +293,9 @@ impl service::room::edus::Data for KeyValueDatabase { .transpose()? .unwrap_or(0)) } +} +impl service::room::edus::typing::Data for KeyValueDatabase { fn typing_add( &self, user_id: &UserId, @@ -379,14 +381,16 @@ impl service::room::edus::Data for KeyValueDatabase { let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { Error::bad_database("User ID in typingid_userid is invalid unicode.") })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; user_ids.insert(user_id); } Ok(user_ids) } +} +impl service::room::edus::presence::Data for KeyValueDatabase { fn update_presence( &self, user_id: &UserId, @@ -416,7 +420,7 @@ impl service::room::edus::Data for KeyValueDatabase { Ok(()) } - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { + fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_lastpresenceupdate.insert( user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index 06adf57..d8ce530 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -1,256 +1,3 @@ -mod data; -pub use data::Data; - -use crate::service::*; - -pub struct Service { - db: D, -} - -impl Service<_> { - /// Replaces the previous read receipt. - pub fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event); - } - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a { - self.db.readreceipts_since(room_id, since) - } - - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - self.db.private_read_set(room_id, user_id, count) - } - - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.private_read_get(room_id, user_id) - } - - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_privateread_update(user_id, room_id) - } - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { - self.db.typing_add(user_id, room_id, timeout) - } - - /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - self.db.typing_remove(user_id, room_id) - } - - /* TODO: Do this in background thread? - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - */ - - /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - self.db.last_typing_update(room_id) - } - - /// Returns a new typing EDU. - pub fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let user_ids = self.db.typings_all(room_id)?; - - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), - }, - }) - } - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - self.db.update_presence(user_id, room_id, presence) - } - - /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.db.ping_presence(user_id) - } - - pub fn get_last_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result> { - let last_update = match self.db.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; - - self.db.get_presence_event(room_id, user_id, last_update) - } - - /* TODO - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - }*/ - - /// Returns the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] - pub fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - self.db.presence_since(room_id, since) - } -} +pub mod presence; +pub mod read_receipt; +pub mod typing; diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 16c14cf..de72e21 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,67 +1,4 @@ pub trait Data { - /// Replaces the previous read receipt. - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()>; - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - fn readreceipts_since( - &self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - >; - - /// Sets a private read marker at `count`. - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()>; - - /// Returns the private read marker. - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; - - /// Returns the count of the last typing update in this room. - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()>; - - /// Removes a user from typing before the timeout is reached. - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()>; - - /// Returns the count of the last typing update in this room. - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result; - - /// Returns all user ids currently typing. - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result>; - /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to @@ -80,7 +17,12 @@ pub trait Data { fn last_presence_update(&self, user_id: &UserId) -> Result>; /// Returns the presence event with correct last_active_ago. - fn get_presence_event(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result>; + fn get_presence_event( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + ) -> Result>; /// Returns the most recent presence updates that happened after the event with id `since`. fn presence_since( diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 06adf57..5793a79 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -8,143 +8,6 @@ pub struct Service { } impl Service<_> { - /// Replaces the previous read receipt. - pub fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event); - } - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a { - self.db.readreceipts_since(room_id, since) - } - - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - self.db.private_read_set(room_id, user_id, count) - } - - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.private_read_get(room_id, user_id) - } - - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_privateread_update(user_id, room_id) - } - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { - self.db.typing_add(user_id, room_id, timeout) - } - - /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - self.db.typing_remove(user_id, room_id) - } - - /* TODO: Do this in background thread? - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - */ - - /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - self.db.last_typing_update(room_id) - } - - /// Returns a new typing EDU. - pub fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let user_ids = self.db.typings_all(room_id)?; - - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), - }, - }) - } - /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 16c14cf..4befcf2 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -21,71 +21,11 @@ pub trait Data { >; /// Sets a private read marker at `count`. - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()>; + fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; /// Returns the private read marker. fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; /// Returns the count of the last typing update in this room. fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()>; - - /// Removes a user from typing before the timeout is reached. - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()>; - - /// Returns the count of the last typing update in this room. - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result; - - /// Returns all user ids currently typing. - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result>; - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()>; - - /// Resets the presence timeout, so the user will stay in their current presence state. - fn ping_presence(&self, user_id: &UserId) -> Result<()>; - - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - fn last_presence_update(&self, user_id: &UserId) -> Result>; - - /// Returns the presence event with correct last_active_ago. - fn get_presence_event(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result>; - - /// Returns the most recent presence updates that happened after the event with id `since`. - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>>; } diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 06adf57..9cd474f 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -36,12 +36,7 @@ impl Service<_> { /// Sets a private read marker at `count`. #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { + pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { self.db.private_read_set(room_id, user_id, count) } @@ -55,202 +50,4 @@ impl Service<_> { pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { self.db.last_privateread_update(user_id, room_id) } - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { - self.db.typing_add(user_id, room_id, timeout) - } - - /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - self.db.typing_remove(user_id, room_id) - } - - /* TODO: Do this in background thread? - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - */ - - /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - self.db.last_typing_update(room_id) - } - - /// Returns a new typing EDU. - pub fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let user_ids = self.db.typings_all(room_id)?; - - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), - }, - }) - } - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - self.db.update_presence(user_id, room_id, presence) - } - - /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.db.ping_presence(user_id) - } - - pub fn get_last_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result> { - let last_update = match self.db.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; - - self.db.get_presence_event(room_id, user_id, last_update) - } - - /* TODO - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - }*/ - - /// Returns the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] - pub fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - self.db.presence_since(room_id, since) - } } diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 16c14cf..83ff90e 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,91 +1,14 @@ pub trait Data { - /// Replaces the previous read receipt. - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()>; - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - fn readreceipts_since( - &self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - >; - - /// Sets a private read marker at `count`. - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()>; - - /// Returns the private read marker. - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; - - /// Returns the count of the last typing update in this room. - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()>; + fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>; /// Removes a user from typing before the timeout is reached. - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()>; + fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; /// Returns the count of the last typing update in this room. - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result; + fn last_typing_update(&self, room_id: &RoomId) -> Result; /// Returns all user ids currently typing. - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result>; - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()>; - - /// Resets the presence timeout, so the user will stay in their current presence state. - fn ping_presence(&self, user_id: &UserId) -> Result<()>; - - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - fn last_presence_update(&self, user_id: &UserId) -> Result>; - - /// Returns the presence event with correct last_active_ago. - fn get_presence_event(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result>; - - /// Returns the most recent presence updates that happened after the event with id `since`. - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>>; + fn typings_all(&self, room_id: &RoomId) -> Result>; } diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 06adf57..b29c788 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -8,71 +8,14 @@ pub struct Service { } impl Service<_> { - /// Replaces the previous read receipt. - pub fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event); - } - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a { - self.db.readreceipts_since(room_id, since) - } - - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - self.db.private_read_set(room_id, user_id, count) - } - - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.private_read_get(room_id, user_id) - } - - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_privateread_update(user_id, room_id) - } - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. - pub fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { + pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { self.db.typing_add(user_id, room_id, timeout) } /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { + pub fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { self.db.typing_remove(user_id, room_id) } @@ -124,10 +67,7 @@ impl Service<_> { /// Returns the count of the last typing update in this room. #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { + pub fn last_typing_update(&self, room_id: &RoomId) -> Result { self.db.last_typing_update(room_id) } @@ -144,113 +84,4 @@ impl Service<_> { }, }) } - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - self.db.update_presence(user_id, room_id, presence) - } - - /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.db.ping_presence(user_id) - } - - pub fn get_last_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result> { - let last_update = match self.db.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; - - self.db.get_presence_event(room_id, user_id, last_update) - } - - /* TODO - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - }*/ - - /// Returns the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] - pub fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - self.db.presence_since(room_id, since) - } } From 84630f90b77d9ff41551c1d4d1eda1f33419b039 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:37 +0200 Subject: [PATCH 1167/1727] refactor: prepare src/database/key_value/room/edus/read_receipt.rs from src/database/key_value.rs --- .../{key_value.rs => key_value/room/edus/read_receipt.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/edus/read_receipt.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/edus/read_receipt.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/edus/read_receipt.rs From 8fa990330f0ef518a352e3d8a5977a11f49590a6 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:37 +0200 Subject: [PATCH 1168/1727] refactor: prepare src/database/key_value/room/edus/presence.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/edus/presence.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/edus/presence.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/edus/presence.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/edus/presence.rs From 0f77ae14e4a3498eede69055a0b2dc0459a5ad11 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:36 +0200 Subject: [PATCH 1169/1727] refactor: prepare src/database/key_value/room/directory.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/directory.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/directory.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/directory.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/directory.rs From 56cacf6f1ce2c0ee34fb760c5a00450bd17b31b8 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:36 +0200 Subject: [PATCH 1170/1727] refactor: prepare src/database/key_value/room/alias.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/alias.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/alias.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/alias.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/alias.rs From 2950349adf5d64e874fe8b0856cc17038c16b78e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:38 +0200 Subject: [PATCH 1171/1727] refactor: prepare src/database/key_value/room/metadata.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/metadata.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/metadata.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/metadata.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/metadata.rs From cd3a16381629e20b072aadf090d0ca9c1d42b5f9 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:38 +0200 Subject: [PATCH 1172/1727] refactor: prepare src/database/key_value/room/lazy_load.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/lazy_load.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/lazy_load.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/lazy_load.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/lazy_load.rs From 0213a32e6a61685319322e06b2d86c09a81ff225 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:37 +0200 Subject: [PATCH 1173/1727] refactor: prepare src/database/key_value/room/edus/typing.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/edus/typing.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/edus/typing.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/edus/typing.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/edus/typing.rs From 332e7c9dba5827a70959d3ad42e0e0a731474a83 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:39 +0200 Subject: [PATCH 1174/1727] refactor: prepare src/database/key_value/room/state.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/state.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/state.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/state.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/state.rs From ea2dcf4ff09108d9191ffe76ec52cf47a30bfd55 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:38 +0200 Subject: [PATCH 1175/1727] refactor: prepare src/database/key_value/room/pdu_metadata.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/pdu_metadata.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/pdu_metadata.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/pdu_metadata.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/pdu_metadata.rs From 158de9ca08937781d2feee89ff460e59204dee4f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:38 +0200 Subject: [PATCH 1176/1727] refactor: prepare src/database/key_value/room/outlier.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/outlier.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/outlier.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/outlier.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/outlier.rs From 03e6e43ecd00e739d85f99ebd1bfe289e2bbecb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 10 Jul 2022 16:03:40 +0200 Subject: [PATCH 1177/1727] refactor: split up database/key_value.rs --- src/database/key_value/mod.rs | 13 + src/database/key_value/room.rs | 1 + src/database/key_value/room/alias.rs | 588 ---------------- src/database/key_value/room/directory.rs | 630 ----------------- src/database/key_value/room/edus/presence.rs | 530 --------------- .../key_value/room/edus/read_receipt.rs | 516 -------------- src/database/key_value/room/edus/typing.rs | 560 --------------- src/database/key_value/room/lazy_load.rs | 586 ---------------- src/database/key_value/room/metadata.rs | 638 ------------------ src/database/key_value/room/mod.rs | 17 + src/database/key_value/room/outlier.rs | 630 ----------------- src/database/key_value/room/pdu_metadata.rs | 627 ----------------- src/database/key_value/room/state.rs | 592 ---------------- 13 files changed, 31 insertions(+), 5897 deletions(-) create mode 100644 src/database/key_value/mod.rs create mode 100644 src/database/key_value/room.rs create mode 100644 src/database/key_value/room/mod.rs diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs new file mode 100644 index 0000000..0c09c17 --- /dev/null +++ b/src/database/key_value/mod.rs @@ -0,0 +1,13 @@ +mod account_data; +mod admin; +mod appservice; +mod globals; +mod key_backups; +mod media; +mod pdu; +mod pusher; +mod rooms; +mod sending; +mod transaction_ids; +mod uiaa; +mod users; diff --git a/src/database/key_value/room.rs b/src/database/key_value/room.rs new file mode 100644 index 0000000..8bd6648 --- /dev/null +++ b/src/database/key_value/room.rs @@ -0,0 +1 @@ +asdf diff --git a/src/database/key_value/room/alias.rs b/src/database/key_value/room/alias.rs index 0be1311..b00eb3b 100644 --- a/src/database/key_value/room/alias.rs +++ b/src/database/key_value/room/alias.rs @@ -1,69 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - impl service::room::alias::Data for KeyValueDatabase { fn set_alias( &self, @@ -130,525 +64,3 @@ impl service::room::alias::Data for KeyValueDatabase { }) } } - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/directory.rs b/src/database/key_value/room/directory.rs index 0be1311..f42de45 100644 --- a/src/database/key_value/room/directory.rs +++ b/src/database/key_value/room/directory.rs @@ -1,136 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - impl service::room::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.insert(room_id.as_bytes(), &[])?; @@ -155,500 +22,3 @@ impl service::room::directory::Data for KeyValueDatabase { }) } } - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/edus/presence.rs b/src/database/key_value/room/edus/presence.rs index 0be1311..61bd9d6 100644 --- a/src/database/key_value/room/edus/presence.rs +++ b/src/database/key_value/room/edus/presence.rs @@ -1,395 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - impl service::room::edus::presence::Data for KeyValueDatabase { fn update_presence( &self, @@ -514,141 +122,3 @@ fn parse_presence_event(bytes: &[u8]) -> Result { } } -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/edus/read_receipt.rs b/src/database/key_value/room/edus/read_receipt.rs index 0be1311..556e697 100644 --- a/src/database/key_value/room/edus/read_receipt.rs +++ b/src/database/key_value/room/edus/read_receipt.rs @@ -1,161 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - impl service::room::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( &self, @@ -294,361 +136,3 @@ impl service::room::edus::read_receipt::Data for KeyValueDatabase { .unwrap_or(0)) } } - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/edus/typing.rs b/src/database/key_value/room/edus/typing.rs index 0be1311..8cfb432 100644 --- a/src/database/key_value/room/edus/typing.rs +++ b/src/database/key_value/room/edus/typing.rs @@ -1,300 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - impl service::room::edus::typing::Data for KeyValueDatabase { fn typing_add( &self, @@ -389,266 +92,3 @@ impl service::room::edus::typing::Data for KeyValueDatabase { Ok(user_ids) } } - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/lazy_load.rs b/src/database/key_value/room/lazy_load.rs index 0be1311..8abdce4 100644 --- a/src/database/key_value/room/lazy_load.rs +++ b/src/database/key_value/room/lazy_load.rs @@ -1,519 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - impl service::room::lazy_load::Data for KeyValueDatabase { fn lazy_load_was_sent_before( &self, @@ -582,73 +66,3 @@ impl service::room::lazy_load::Data for KeyValueDatabase { Ok(()) } } - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/metadata.rs b/src/database/key_value/room/metadata.rs index 0be1311..37dd717 100644 --- a/src/database/key_value/room/metadata.rs +++ b/src/database/key_value/room/metadata.rs @@ -1,588 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - impl service::room::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { let prefix = match self.get_shortroomid(room_id)? { @@ -599,56 +14,3 @@ impl service::room::metadata::Data for KeyValueDatabase { .is_some()) } } - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/mod.rs b/src/database/key_value/room/mod.rs new file mode 100644 index 0000000..2a3f81d --- /dev/null +++ b/src/database/key_value/room/mod.rs @@ -0,0 +1,17 @@ +mod state; +mod alias; +mod directory; +mod edus; +mod event_handler; +mod lazy_loading; +mod metadata; +mod outlier; +mod pdu_metadata; +mod search; +mod short; +mod state; +mod state_accessor; +mod state_cache; +mod state_compressor; +mod timeline; +mod user; diff --git a/src/database/key_value/room/outlier.rs b/src/database/key_value/room/outlier.rs index 0be1311..c979d25 100644 --- a/src/database/key_value/room/outlier.rs +++ b/src/database/key_value/room/outlier.rs @@ -1,605 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - impl service::room::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -624,31 +22,3 @@ impl service::room::outlier::Data for KeyValueDatabase { ) } } - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/pdu_metadata.rs b/src/database/key_value/room/pdu_metadata.rs index 0be1311..6b2171c 100644 --- a/src/database/key_value/room/pdu_metadata.rs +++ b/src/database/key_value/room/pdu_metadata.rs @@ -1,630 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - impl service::room::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { diff --git a/src/database/key_value/room/state.rs b/src/database/key_value/room/state.rs index 0be1311..5daf6c6 100644 --- a/src/database/key_value/room/state.rs +++ b/src/database/key_value/room/state.rs @@ -1,5 +1,3 @@ -use crate::service; - impl service::room::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash @@ -61,594 +59,4 @@ impl service::room::state::Data for KeyValueDatabase { Ok(()) } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } } From 877ee484803e0a3b3b36aa292bc08189ae078275 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:52:58 +0200 Subject: [PATCH 1178/1727] refactor: prepare database/key_value/rooms/search.rs from service/rooms/search/mod.rs --- .../rooms/search/mod.rs => database/key_value/rooms/search.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/search/mod.rs => database/key_value/rooms/search.rs} (100%) diff --git a/src/service/rooms/search/mod.rs b/src/database/key_value/rooms/search.rs similarity index 100% rename from src/service/rooms/search/mod.rs rename to src/database/key_value/rooms/search.rs From f6040ef2d7f766eb20dc1c0268d4e7b79f4da44d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:52:58 +0200 Subject: [PATCH 1179/1727] refactor: prepare database/key_value/rooms/search.rs from service/rooms/timeline/mod.rs --- .../rooms/timeline/mod.rs => database/key_value/rooms/search.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/timeline/mod.rs => database/key_value/rooms/search.rs} (100%) diff --git a/src/service/rooms/timeline/mod.rs b/src/database/key_value/rooms/search.rs similarity index 100% rename from src/service/rooms/timeline/mod.rs rename to src/database/key_value/rooms/search.rs From b0029c49b917ccecc06c475db709aeef4671256c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 10 Jul 2022 16:28:43 +0200 Subject: [PATCH 1180/1727] refactor: work on search --- src/database/key_value/room.rs | 1 - .../key_value/{room => rooms}/alias.rs | 0 .../key_value/{room => rooms}/directory.rs | 0 .../{room => rooms}/edus/presence.rs | 0 .../{room => rooms}/edus/read_receipt.rs | 0 .../key_value/{room => rooms}/edus/typing.rs | 0 .../key_value/{room => rooms}/lazy_load.rs | 0 .../key_value/{room => rooms}/metadata.rs | 0 src/database/key_value/{room => rooms}/mod.rs | 0 .../key_value/{room => rooms}/outlier.rs | 0 .../key_value/{room => rooms}/pdu_metadata.rs | 0 src/database/key_value/rooms/search.rs | 966 +----------------- .../key_value/{room => rooms}/state.rs | 0 src/service/rooms/search/data.rs | 9 + src/service/rooms/search/mod.rs | 53 +- src/service/rooms/timeline/mod.rs | 15 +- 16 files changed, 37 insertions(+), 1007 deletions(-) delete mode 100644 src/database/key_value/room.rs rename src/database/key_value/{room => rooms}/alias.rs (100%) rename src/database/key_value/{room => rooms}/directory.rs (100%) rename src/database/key_value/{room => rooms}/edus/presence.rs (100%) rename src/database/key_value/{room => rooms}/edus/read_receipt.rs (100%) rename src/database/key_value/{room => rooms}/edus/typing.rs (100%) rename src/database/key_value/{room => rooms}/lazy_load.rs (100%) rename src/database/key_value/{room => rooms}/metadata.rs (100%) rename src/database/key_value/{room => rooms}/mod.rs (100%) rename src/database/key_value/{room => rooms}/outlier.rs (100%) rename src/database/key_value/{room => rooms}/pdu_metadata.rs (100%) rename src/database/key_value/{room => rooms}/state.rs (100%) create mode 100644 src/service/rooms/search/data.rs diff --git a/src/database/key_value/room.rs b/src/database/key_value/room.rs deleted file mode 100644 index 8bd6648..0000000 --- a/src/database/key_value/room.rs +++ /dev/null @@ -1 +0,0 @@ -asdf diff --git a/src/database/key_value/room/alias.rs b/src/database/key_value/rooms/alias.rs similarity index 100% rename from src/database/key_value/room/alias.rs rename to src/database/key_value/rooms/alias.rs diff --git a/src/database/key_value/room/directory.rs b/src/database/key_value/rooms/directory.rs similarity index 100% rename from src/database/key_value/room/directory.rs rename to src/database/key_value/rooms/directory.rs diff --git a/src/database/key_value/room/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs similarity index 100% rename from src/database/key_value/room/edus/presence.rs rename to src/database/key_value/rooms/edus/presence.rs diff --git a/src/database/key_value/room/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs similarity index 100% rename from src/database/key_value/room/edus/read_receipt.rs rename to src/database/key_value/rooms/edus/read_receipt.rs diff --git a/src/database/key_value/room/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs similarity index 100% rename from src/database/key_value/room/edus/typing.rs rename to src/database/key_value/rooms/edus/typing.rs diff --git a/src/database/key_value/room/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs similarity index 100% rename from src/database/key_value/room/lazy_load.rs rename to src/database/key_value/rooms/lazy_load.rs diff --git a/src/database/key_value/room/metadata.rs b/src/database/key_value/rooms/metadata.rs similarity index 100% rename from src/database/key_value/room/metadata.rs rename to src/database/key_value/rooms/metadata.rs diff --git a/src/database/key_value/room/mod.rs b/src/database/key_value/rooms/mod.rs similarity index 100% rename from src/database/key_value/room/mod.rs rename to src/database/key_value/rooms/mod.rs diff --git a/src/database/key_value/room/outlier.rs b/src/database/key_value/rooms/outlier.rs similarity index 100% rename from src/database/key_value/room/outlier.rs rename to src/database/key_value/rooms/outlier.rs diff --git a/src/database/key_value/room/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs similarity index 100% rename from src/database/key_value/room/pdu_metadata.rs rename to src/database/key_value/rooms/pdu_metadata.rs diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 6a32e8b..1ffffe5 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,956 +1,23 @@ - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - // TODO Is this the same as the function above? - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - - Ok(pdu_id) - } - - pub fn create_hash_and_sign_event( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObj) { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version +impl service::room::search::Data for KeyValueDatabase { + + fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()> { + let mut batch = body + .split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) + .filter(|word| word.len() <= 50) + .map(str::to_lowercase) + .map(|word| { + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xff); + key.extend_from_slice(&pdu_id); + (key, Vec::new()) }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender_user.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; + self.tokenids.insert_batch(&mut batch)?; } - /// Creates a new persisted data unit and adds it to a room. This function takes a - /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result> { - - let (pdu, pdu_json) = create_hash_and_sign_event()?; - - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - Ok(pdu.event_id) - } - - /// Append the incoming event setting the state snapshot to the state from the - /// server that sent the event. - #[tracing::instrument(skip_all)] - fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - Ok(Some(pdu_id)) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( + fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, @@ -997,4 +64,3 @@ ) })) } - diff --git a/src/database/key_value/room/state.rs b/src/database/key_value/rooms/state.rs similarity index 100% rename from src/database/key_value/room/state.rs rename to src/database/key_value/rooms/state.rs diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs new file mode 100644 index 0000000..1601e0d --- /dev/null +++ b/src/service/rooms/search/data.rs @@ -0,0 +1,9 @@ +pub trait Data { + pub fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()>; + + pub fn search_pdus<'a>( + &'a self, + room_id: &RoomId, + search_string: &str, + ) -> Result> + 'a, Vec)>>; +} diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index ce05505..5478273 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,50 +1,19 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) + self.db.search_pdus(room_id, search_string) } - +} diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 6299b16..5b423d2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -439,20 +439,7 @@ .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; + DB.rooms.search.index_pdu(room_id, pdu_id, body)?; let admin_room = self.id_from_alias( <&RoomAliasId>::try_from( From c8f64844ab81ede5b52409a1c640692fea43e5c1 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 11:17:34 +0200 Subject: [PATCH 1181/1727] refactor: prepare service/rooms/auth_chain/mod.rs from service/rooms/state_compressor/mod.rs --- src/service/rooms/{state_compressor => auth_chain}/mod.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/{state_compressor => auth_chain}/mod.rs (100%) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/auth_chain/mod.rs similarity index 100% rename from src/service/rooms/state_compressor/mod.rs rename to src/service/rooms/auth_chain/mod.rs From 691e69847fff26cc83869c103c125d6cefbb0b99 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 11:17:34 +0200 Subject: [PATCH 1182/1727] refactor: prepare database/key_value/rooms/auth_chain.rs from service/rooms/state_compressor/mod.rs --- .../mod.rs => database/key_value/rooms/auth_chain.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state_compressor/mod.rs => database/key_value/rooms/auth_chain.rs} (100%) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/database/key_value/rooms/auth_chain.rs similarity index 100% rename from src/service/rooms/state_compressor/mod.rs rename to src/database/key_value/rooms/auth_chain.rs From 8d0ed3ec51e077ad710dd435be69c17b8a505e5f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 11:17:34 +0200 Subject: [PATCH 1183/1727] refactor: prepare database/key_value/rooms/state_compressor.rs from service/rooms/state_compressor/mod.rs --- .../mod.rs => database/key_value/rooms/state_compressor.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state_compressor/mod.rs => database/key_value/rooms/state_compressor.rs} (100%) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/database/key_value/rooms/state_compressor.rs similarity index 100% rename from src/service/rooms/state_compressor/mod.rs rename to src/database/key_value/rooms/state_compressor.rs From e045abe96182cf61b59853b056007927049fca93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 10 Jul 2022 17:23:26 +0200 Subject: [PATCH 1184/1727] refactor: work on auth chain and state compressor --- src/database/key_value/rooms/auth_chain.rs | 374 +----------------- .../key_value/rooms/state_compressor.rs | 338 +--------------- src/service/rooms/auth_chain/data.rs | 4 + src/service/rooms/auth_chain/mod.rs | 345 +--------------- src/service/rooms/state_compressor/data.rs | 10 + src/service/rooms/state_compressor/mod.rs | 121 +----- 6 files changed, 81 insertions(+), 1111 deletions(-) create mode 100644 src/service/rooms/auth_chain/data.rs create mode 100644 src/service/rooms/state_compressor/data.rs diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 197ce84..57dbb14 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -1,358 +1,24 @@ - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns the new shortstatehash - pub fn save_state( - room_id: &RoomId, - new_state_ids_compressed: HashSet, - ) -> Result<(u64, - HashSet, // added - HashSet)> // removed - { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - Ok((new_shortstatehash, statediffnew, statediffremoved)) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() +impl service::room::auth_chain::Data for KeyValueDatabase { + fn get_cached_eventid_authchain<'a>() -> Result> { + self.shorteventid_authchain + .get(&shorteventid.to_be_bytes())? + .map(|chain| { + chain + .chunks_exact(size_of::()) + .map(|chunk| { + utils::u64_from_bytes(chunk).expect("byte length is correct") }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) + .collect() + }) } - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) + fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result<()> { + shorteventid_authchain.insert( + &shorteventid.to_be_bytes(), + &auth_chain + .iter() + .flat_map(|s| s.to_be_bytes().to_vec()) + .collect::>(), + ) } +} diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 197ce84..71a2f3a 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,26 +1,5 @@ - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - +impl service::room::state_compressor::Data for KeyValueDatabase { + fn get_statediff(shortstatehash: u64) -> Result { let value = self .shortstatehash_statediff .get(&shortstatehash.to_be_bytes())? @@ -47,312 +26,23 @@ i += 2 * size_of::(); } - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } + StateDiff { parent, added, removed } } - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); + fn save_statediff(shortstatehash: u64, diff: StateDiff) -> Result<()> { + let mut value = diff.parent.to_be_bytes().to_vec(); + for new in &diff.new { + value.extend_from_slice(&new[..]); } - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns the new shortstatehash - pub fn save_state( - room_id: &RoomId, - new_state_ids_compressed: HashSet, - ) -> Result<(u64, - HashSet, // added - HashSet)> // removed - { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - Ok((new_shortstatehash, statediffnew, statediffremoved)) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); + if !diff.removed.is_empty() { + value.extend_from_slice(&0_u64.to_be_bytes()); + for removed in &diff.removed { + value.extend_from_slice(&removed[..]); } } - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) + self.shortstatehash_statediff + .insert(&shortstatehash.to_be_bytes(), &value)?; } +} diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs new file mode 100644 index 0000000..d8fde95 --- /dev/null +++ b/src/service/rooms/auth_chain/data.rs @@ -0,0 +1,4 @@ +pub trait Data { + fn get_cached_eventid_authchain<'a>() -> Result>; + fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result>; +} diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 197ce84..dfc289f 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,327 +1,27 @@ +mod data; +pub use data::Data; - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns the new shortstatehash - pub fn save_state( - room_id: &RoomId, - new_state_ids_compressed: HashSet, - ) -> Result<(u64, - HashSet, // added - HashSet)> // removed - { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - Ok((new_shortstatehash, statediffnew, statediffremoved)) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( + pub fn get_cached_eventid_authchain<'a>( &'a self, key: &[u64], ) -> Result>>> { // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { + if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key.to_be_bytes()) { return Ok(Some(Arc::clone(result))); } - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) + // We only save auth chains for single events in the db + if key.len == 1 { + // Check DB cache + if let Some(chain) = self.db.get_cached_eventid_authchain(key[0]) { let chain = Arc::new(chain); @@ -339,20 +39,15 @@ } #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db + pub fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { + // Only persist single events in db if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; + self.db.cache_auth_chain(key[0], auth_chain)?; } // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); + self.auth_chain_cache.lock().unwrap().insert(key, auth_chain); Ok(()) } +} diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs new file mode 100644 index 0000000..8b855cd --- /dev/null +++ b/src/service/rooms/state_compressor/data.rs @@ -0,0 +1,10 @@ +struct StateDiff { + parent: Option, + added: Vec, + removed: Vec, +} + +pub trait Data { + fn get_statediff(shortstatehash: u64) -> Result; + fn save_statediff(shortstatehash: u64, diff: StateDiff) -> Result<()>; +} diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 197ce84..d6d88e2 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,4 +1,13 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( @@ -21,31 +30,7 @@ return Ok(r.clone()); } - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } + self.db.get_statediff(shortstatehash)?; if parent != 0_u64 { let mut response = self.load_shortstatehash_info(parent)?; @@ -170,17 +155,7 @@ if parent_states.is_empty() { // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; + self.db.save_statediff(shortstatehash, StateDiff { parent: 0, new: statediffnew, removed: statediffremoved })?; return Ok(()); }; @@ -222,20 +197,7 @@ )?; } else { // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; + self.db.save_statediff(shortstatehash, StateDiff { parent: parent.0, new: statediffnew, removed: statediffremoved })?; } Ok(()) @@ -298,61 +260,4 @@ Ok((new_shortstatehash, statediffnew, statediffremoved)) } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } +} From 306ff5ee4e95e71284a2c3c2da17fe4ca0bc8162 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:11 +0200 Subject: [PATCH 1185/1727] refactor: prepare database/key_value/users.rs from service/users.rs --- src/{service => database/key_value}/users.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/users.rs (100%) diff --git a/src/service/users.rs b/src/database/key_value/users.rs similarity index 100% rename from src/service/users.rs rename to src/database/key_value/users.rs From e62b0904ea78d740a1c1f9bc7af264f17c458797 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:11 +0200 Subject: [PATCH 1186/1727] refactor: prepare database/key_value/pusher.rs from service/pusher.rs --- src/{service => database/key_value}/pusher.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/pusher.rs (100%) diff --git a/src/service/pusher.rs b/src/database/key_value/pusher.rs similarity index 100% rename from src/service/pusher.rs rename to src/database/key_value/pusher.rs From cb9458122cb49a7dc2e6994e587f0142e8ef3105 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:12 +0200 Subject: [PATCH 1187/1727] refactor: prepare service/pusher/data.rs from service/pusher.rs --- src/service/{pusher.rs => pusher/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{pusher.rs => pusher/data.rs} (100%) diff --git a/src/service/pusher.rs b/src/service/pusher/data.rs similarity index 100% rename from src/service/pusher.rs rename to src/service/pusher/data.rs From 70863260f65f2cffb24ce0ffdd686bce957a77f1 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:12 +0200 Subject: [PATCH 1188/1727] refactor: prepare service/pusher/mod.rs from service/pusher.rs --- src/service/{pusher.rs => pusher/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{pusher.rs => pusher/mod.rs} (100%) diff --git a/src/service/pusher.rs b/src/service/pusher/mod.rs similarity index 100% rename from src/service/pusher.rs rename to src/service/pusher/mod.rs From 94ce06bb76df4b163041fe9811b7439f443fbad8 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:12 +0200 Subject: [PATCH 1189/1727] refactor: prepare service/users/data.rs from service/users.rs --- src/service/{users.rs => users/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{users.rs => users/data.rs} (100%) diff --git a/src/service/users.rs b/src/service/users/data.rs similarity index 100% rename from src/service/users.rs rename to src/service/users/data.rs From dc7670f3a85210bd62c8f9138be1f1ec05524525 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:12 +0200 Subject: [PATCH 1190/1727] refactor: prepare service/users/mod.rs from service/users.rs --- src/service/{users.rs => users/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{users.rs => users/mod.rs} (100%) diff --git a/src/service/users.rs b/src/service/users/mod.rs similarity index 100% rename from src/service/users.rs rename to src/service/users/mod.rs From e8b33e8c5a16abb8763f0d6e2ff6fcb3ff956865 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:13 +0200 Subject: [PATCH 1191/1727] refactor: prepare service/rooms/timeline/data.rs from service/rooms/timeline/mod.rs --- src/service/rooms/timeline/{mod.rs => data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/timeline/{mod.rs => data.rs} (100%) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/data.rs similarity index 100% rename from src/service/rooms/timeline/mod.rs rename to src/service/rooms/timeline/data.rs From bea5d1e0d8dc5c4395dcbae5c251915a91f6079d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:13 +0200 Subject: [PATCH 1192/1727] refactor: prepare database/key_value/rooms/timeline.rs from service/rooms/timeline/mod.rs --- .../timeline/mod.rs => database/key_value/rooms/timeline.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/timeline/mod.rs => database/key_value/rooms/timeline.rs} (100%) diff --git a/src/service/rooms/timeline/mod.rs b/src/database/key_value/rooms/timeline.rs similarity index 100% rename from src/service/rooms/timeline/mod.rs rename to src/database/key_value/rooms/timeline.rs From f56424bc8d8d582c52be91116ceb29d69791c563 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 7 Aug 2022 19:42:22 +0200 Subject: [PATCH 1193/1727] Refactor appservices, pusher, timeline, transactionids, users --- .../key_value}/appservice.rs | 24 +- src/database/key_value/pusher.rs | 302 +----- src/database/key_value/rooms/timeline.rs | 663 +----------- .../key_value}/transaction_ids.rs | 13 +- src/database/key_value/users.rs | 148 +-- src/service/appservice/data.rs | 17 + src/service/appservice/mod.rs | 36 + src/service/globals.rs | 14 +- src/service/pusher.rs | 348 ------- src/service/pusher/data.rs | 346 +------ src/service/pusher/mod.rs | 575 +++++------ src/service/rooms/short/mod.rs | 11 +- src/service/rooms/timeline/data.rs | 901 +--------------- src/service/rooms/timeline/mod.rs | 232 +---- src/service/transaction_ids/data.rs | 16 + src/service/transaction_ids/mod.rs | 44 + src/service/users/data.rs | 961 +----------------- src/service/users/mod.rs | 845 +-------------- 18 files changed, 546 insertions(+), 4950 deletions(-) rename src/{service => database/key_value}/appservice.rs (77%) rename src/{service => database/key_value}/transaction_ids.rs (77%) create mode 100644 src/service/appservice/data.rs create mode 100644 src/service/appservice/mod.rs delete mode 100644 src/service/pusher.rs create mode 100644 src/service/transaction_ids/data.rs create mode 100644 src/service/transaction_ids/mod.rs diff --git a/src/service/appservice.rs b/src/database/key_value/appservice.rs similarity index 77% rename from src/service/appservice.rs rename to src/database/key_value/appservice.rs index edd5009..66a2a5c 100644 --- a/src/service/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,19 +1,5 @@ -use crate::{utils, Error, Result}; -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; - -use super::abstraction::Tree; - -pub struct Appservice { - pub(super) cached_registrations: Arc>>, - pub(super) id_appserviceregistrations: Arc, -} - -impl Appservice { +impl service::appservice::Data for KeyValueDatabase { /// Registers an appservice and returns the ID to the caller - /// pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); @@ -34,7 +20,7 @@ impl Appservice { /// # Arguments /// /// * `service_name` - the name you send to register the service previously - pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { + fn unregister_appservice(&self, service_name: &str) -> Result<()> { self.id_appserviceregistrations .remove(service_name.as_bytes())?; self.cached_registrations @@ -44,7 +30,7 @@ impl Appservice { Ok(()) } - pub fn get_registration(&self, id: &str) -> Result> { + fn get_registration(&self, id: &str) -> Result> { self.cached_registrations .read() .unwrap() @@ -66,14 +52,14 @@ impl Appservice { ) } - pub fn iter_ids(&self) -> Result> + '_> { + fn iter_ids(&self) -> Result> + '_> { Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { utils::string_from_bytes(&id) .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) })) } - pub fn all(&self) -> Result> { + fn all(&self) -> Result> { self.iter_ids()? .filter_map(|id| id.ok()) .map(move |id| { diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 6b906c2..94374ab 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,36 +1,5 @@ -use crate::{Database, Error, PduEvent, Result}; -use bytes::BytesMut; -use ruma::{ - api::{ - client::push::{get_pushers, set_pusher, PusherKind}, - push_gateway::send_event_notification::{ - self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, - }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, - }, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - serde::Raw, - uint, RoomId, UInt, UserId, -}; -use tracing::{error, info, warn}; - -use std::{fmt::Debug, mem, sync::Arc}; - -use super::abstraction::Tree; - -pub struct PushData { - /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: Arc, -} - -impl PushData { - #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { +impl service::pusher::Data for KeyValueDatabase { + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pusher.pushkey.as_bytes()); @@ -52,8 +21,7 @@ impl PushData { Ok(()) } - #[tracing::instrument(skip(self, senderkey))] - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { + fn get_pusher(&self, senderkey: &[u8]) -> Result> { self.senderkey_pusher .get(senderkey)? .map(|push| { @@ -63,8 +31,7 @@ impl PushData { .transpose() } - #[tracing::instrument(skip(self, sender))] - pub fn get_pushers(&self, sender: &UserId) -> Result> { + fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); @@ -77,8 +44,7 @@ impl PushData { .collect() } - #[tracing::instrument(skip(self, sender))] - pub fn get_pusher_senderkeys<'a>( + fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, ) -> impl Iterator> + 'a { @@ -88,261 +54,3 @@ impl PushData { self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) } } - -#[tracing::instrument(skip(globals, destination, request))] -pub async fn send_request( - globals: &crate::database::globals::Globals, - destination: &str, - request: T, -) -> Result -where - T: Debug, -{ - let destination = destination.replace("/_matrix/push/v1/notify", ""); - - let http_request = request - .try_into_http_request::( - &destination, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); - Error::BadServerResponse("Invalid destination") - })? - .map(|body| body.freeze()); - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - - let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Push gateway returned bad response.") - }) - } - Err(e) => Err(e.into()), - } -} - -#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] -pub async fn send_push_notice( - user: &UserId, - unread: UInt, - pusher: &get_pushers::v3::Pusher, - ruleset: Ruleset, - pdu: &PduEvent, - db: &Database, -) -> Result<()> { - let mut notify = None; - let mut tweaks = Vec::new(); - - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - for action in get_actions( - user, - &ruleset, - &power_levels, - &pdu.to_sync_room_event(), - &pdu.room_id, - db, - )? { - let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, - Action::SetTweak(tweak) => { - tweaks.push(tweak.clone()); - continue; - } - }; - - if notify.is_some() { - return Err(Error::bad_database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, - )); - } - - notify = Some(n); - } - - if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; - } - // Else the event triggered no actions - - Ok(()) -} - -#[tracing::instrument(skip(user, ruleset, pdu, db))] -pub fn get_actions<'a>( - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - db: &Database, -) -> Result<&'a [Action]> { - let ctx = PushConditionRoomCtx { - room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), - }; - - Ok(ruleset.get_actions(pdu, &ctx)) -} - -#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] -async fn send_notice( - unread: UInt, - pusher: &get_pushers::v3::Pusher, - tweaks: Vec, - event: &PduEvent, - db: &Database, -) -> Result<()> { - // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } - - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; - - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = data_minus_url; - - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); - } - - let d = &[device]; - let mut notifi = Notification::new(d); - - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == RoomEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } - - if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); - - if event.kind == RoomEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); - } - - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - - let room_name = if let Some(room_name_pdu) = - db.rooms - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name - } else { - None - }; - - notifi.room_name = room_name.as_deref(); - - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } - - // TODO: email - - Ok(()) -} diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 5b423d2..58884ec 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -1,28 +1,5 @@ - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { +impl service::room::timeline::Data for KeyValueDatabase { + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache .lock() @@ -51,31 +28,8 @@ } } - // TODO Is this the same as the function above? - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { + fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map(|pdu_id| self.pdu_count(&pdu_id)) @@ -207,7 +161,6 @@ } /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(pdu_id)?.is_some() { self.pduid_pdu.insert( @@ -223,598 +176,8 @@ } } - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - DB.rooms.search.index_pdu(room_id, pdu_id, body)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - - Ok(pdu_id) - } - - pub fn create_hash_and_sign_event( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObj) { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender_user.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - } - - /// Creates a new persisted data unit and adds it to a room. This function takes a - /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result> { - - let (pdu, pdu_json) = create_hash_and_sign_event()?; - - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - Ok(pdu.event_id) - } - - /// Append the incoming event setting the state snapshot to the state from the - /// server that sent the event. - #[tracing::instrument(skip_all)] - fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - Ok(Some(pdu_id)) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_since<'a>( &'a self, user_id: &UserId, @@ -849,7 +212,6 @@ /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_until<'a>( &'a self, user_id: &UserId, @@ -884,9 +246,6 @@ })) } - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_after<'a>( &'a self, user_id: &UserId, @@ -920,18 +279,4 @@ Ok((pdu_id, pdu)) })) } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - +} diff --git a/src/service/transaction_ids.rs b/src/database/key_value/transaction_ids.rs similarity index 77% rename from src/service/transaction_ids.rs rename to src/database/key_value/transaction_ids.rs index ed0970d..81c1197 100644 --- a/src/service/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,15 +1,4 @@ -use std::sync::Arc; - -use crate::Result; -use ruma::{DeviceId, TransactionId, UserId}; - -use super::abstraction::Tree; - -pub struct TransactionIds { - pub(super) userdevicetxnid_response: Arc, // Response can be empty (/sendToDevice) or the event id (/send) -} - -impl TransactionIds { +impl service::pusher::Data for KeyValueDatabase { pub fn add_txnid( &self, user_id: &UserId, diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 7c15f1d..5ef058f 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,49 +1,10 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, - encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, StateEventType}, - serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, - UInt, UserId, -}; -use std::{collections::BTreeMap, mem, sync::Arc}; -use tracing::warn; - -use super::abstraction::Tree; - -pub struct Users { - pub(super) userid_password: Arc, - pub(super) userid_displayname: Arc, - pub(super) userid_avatarurl: Arc, - pub(super) userid_blurhash: Arc, - pub(super) userdeviceid_token: Arc, - pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists - pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 - pub(super) token_userdeviceid: Arc, - - pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) - pub(super) userid_masterkeyid: Arc, - pub(super) userid_selfsigningkeyid: Arc, - pub(super) userid_usersigningkeyid: Arc, - - pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId - - pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count -} - -impl Users { +impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. - #[tracing::instrument(skip(self, user_id))] pub fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) } /// Check if account is deactivated - #[tracing::instrument(skip(self, user_id))] pub fn is_deactivated(&self, user_id: &UserId) -> Result { Ok(self .userid_password @@ -56,7 +17,6 @@ impl Users { } /// Check if a user is an admin - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn is_admin( &self, user_id: &UserId, @@ -71,20 +31,17 @@ impl Users { } /// Create a new user account on this homeserver. - #[tracing::instrument(skip(self, user_id, password))] pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { self.set_password(user_id, password)?; Ok(()) } /// Returns the number of users registered on this server. - #[tracing::instrument(skip(self))] pub fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } /// Find out which user an access token belongs to. - #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { self.token_userdeviceid .get(token.as_bytes())? @@ -112,7 +69,6 @@ impl Users { } /// Returns an iterator over all users on this homeserver. - #[tracing::instrument(skip(self))] pub fn iter(&self) -> impl Iterator>> + '_ { self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { @@ -125,7 +81,6 @@ impl Users { /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - #[tracing::instrument(skip(self))] pub fn list_local_users(&self) -> Result> { let users: Vec = self .userid_password @@ -139,7 +94,6 @@ impl Users { /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped /// and the error will be logged. - #[tracing::instrument(skip(self))] fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { // A valid password is not empty if password.is_empty() { @@ -159,7 +113,6 @@ impl Users { } /// Returns the password hash for the given user. - #[tracing::instrument(skip(self, user_id))] pub fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password .get(user_id.as_bytes())? @@ -171,7 +124,6 @@ impl Users { } /// Hash and set the user's password to the Argon2 hash - #[tracing::instrument(skip(self, user_id, password))] pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { if let Ok(hash) = utils::calculate_hash(password) { @@ -191,7 +143,6 @@ impl Users { } /// Returns the displayname of a user on this homeserver. - #[tracing::instrument(skip(self, user_id))] pub fn displayname(&self, user_id: &UserId) -> Result> { self.userid_displayname .get(user_id.as_bytes())? @@ -203,7 +154,6 @@ impl Users { } /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - #[tracing::instrument(skip(self, user_id, displayname))] pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { if let Some(displayname) = displayname { self.userid_displayname @@ -216,7 +166,6 @@ impl Users { } /// Get the avatar_url of a user. - #[tracing::instrument(skip(self, user_id))] pub fn avatar_url(&self, user_id: &UserId) -> Result>> { self.userid_avatarurl .get(user_id.as_bytes())? @@ -230,7 +179,6 @@ impl Users { } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, avatar_url))] pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl @@ -243,7 +191,6 @@ impl Users { } /// Get the blurhash of a user. - #[tracing::instrument(skip(self, user_id))] pub fn blurhash(&self, user_id: &UserId) -> Result> { self.userid_blurhash .get(user_id.as_bytes())? @@ -257,7 +204,6 @@ impl Users { } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, blurhash))] pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { if let Some(blurhash) = blurhash { self.userid_blurhash @@ -270,7 +216,6 @@ impl Users { } /// Adds a new device to a user. - #[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))] pub fn create_device( &self, user_id: &UserId, @@ -305,7 +250,6 @@ impl Users { } /// Removes a device from a user. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); @@ -336,7 +280,6 @@ impl Users { } /// Returns an iterator over all device ids of this user. - #[tracing::instrument(skip(self, user_id))] pub fn all_device_ids<'a>( &'a self, user_id: &UserId, @@ -359,7 +302,6 @@ impl Users { } /// Replaces the access token of one device. - #[tracing::instrument(skip(self, user_id, device_id, token))] pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); @@ -383,14 +325,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip( - self, - user_id, - device_id, - one_time_key_key, - one_time_key_value, - globals - ))] pub fn add_one_time_key( &self, user_id: &UserId, @@ -427,7 +361,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate .get(user_id.as_bytes())? @@ -439,7 +372,6 @@ impl Users { .unwrap_or(Ok(0)) } - #[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))] pub fn take_one_time_key( &self, user_id: &UserId, @@ -479,7 +411,6 @@ impl Users { .transpose() } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn count_one_time_keys( &self, user_id: &UserId, @@ -512,7 +443,6 @@ impl Users { Ok(counts) } - #[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))] pub fn add_device_keys( &self, user_id: &UserId, @@ -535,14 +465,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip( - self, - master_key, - self_signing_key, - user_signing_key, - rooms, - globals - ))] pub fn add_cross_signing_keys( &self, user_id: &UserId, @@ -658,7 +580,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))] pub fn sign_key( &self, target_id: &UserId, @@ -703,7 +624,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_or_room_id, from, to))] pub fn keys_changed<'a>( &'a self, user_or_room_id: &str, @@ -742,7 +662,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn mark_device_key_update( &self, user_id: &UserId, @@ -774,7 +693,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_keys( &self, user_id: &UserId, @@ -791,7 +709,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_master_key bool>( &self, user_id: &UserId, @@ -813,7 +730,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_self_signing_key bool>( &self, user_id: &UserId, @@ -835,7 +751,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id))] pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { self.userid_usersigningkeyid .get(user_id.as_bytes())? @@ -848,15 +763,6 @@ impl Users { }) } - #[tracing::instrument(skip( - self, - sender, - target_user_id, - target_device_id, - event_type, - content, - globals - ))] pub fn add_to_device_event( &self, sender: &UserId, @@ -884,7 +790,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_to_device_events( &self, user_id: &UserId, @@ -907,7 +812,6 @@ impl Users { Ok(events) } - #[tracing::instrument(skip(self, user_id, device_id, until))] pub fn remove_to_device_events( &self, user_id: &UserId, @@ -942,7 +846,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id, device_id, device))] pub fn update_device_metadata( &self, user_id: &UserId, @@ -968,7 +871,6 @@ impl Users { } /// Get device metadata. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_metadata( &self, user_id: &UserId, @@ -987,7 +889,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id))] pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { self.userid_devicelistversion .get(user_id.as_bytes())? @@ -998,7 +899,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id))] pub fn all_devices_metadata<'a>( &'a self, user_id: &UserId, @@ -1014,25 +914,7 @@ impl Users { }) } - /// Deactivate account - #[tracing::instrument(skip(self, user_id))] - pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { - // Remove all associated devices - for device_id in self.all_device_ids(user_id) { - self.remove_device(user_id, &device_id?)?; - } - - // Set the password to "" to indicate a deactivated account. Hashes will never result in an - // empty string, so the user will not be able to log in again. Systems like changing the - // password without logging in should check if the account is deactivated. - self.userid_password.insert(user_id.as_bytes(), &[])?; - - // TODO: Unhook 3PID - Ok(()) - } - /// Creates a new sync filter. Returns the filter id. - #[tracing::instrument(skip(self))] pub fn create_filter( &self, user_id: &UserId, @@ -1052,7 +934,6 @@ impl Users { Ok(filter_id) } - #[tracing::instrument(skip(self))] pub fn get_filter( &self, user_id: &UserId, @@ -1072,30 +953,3 @@ impl Users { } } } - -/// Ensure that a user only sees signatures from themselves and the target user -fn clean_signatures bool>( - cross_signing_key: &mut serde_json::Value, - user_id: &UserId, - allowed_signatures: F, -) -> Result<(), Error> { - if let Some(signatures) = cross_signing_key - .get_mut("signatures") - .and_then(|v| v.as_object_mut()) - { - // Don't allocate for the full size of the current signatures, but require - // at most one resize if nothing is dropped - let new_capacity = signatures.len() / 2; - for (user, signature) in - mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) - { - let id = <&UserId>::try_from(user.as_str()) - .map_err(|_| Error::bad_database("Invalid user ID in database."))?; - if id == user_id || allowed_signatures(id) { - signatures.insert(user, signature); - } - } - } - - Ok(()) -} diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs new file mode 100644 index 0000000..fe57451 --- /dev/null +++ b/src/service/appservice/data.rs @@ -0,0 +1,17 @@ +pub trait Data { + /// Registers an appservice and returns the ID to the caller + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; + + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously + pub fn unregister_appservice(&self, service_name: &str) -> Result<()>; + + pub fn get_registration(&self, id: &str) -> Result>; + + pub fn iter_ids(&self) -> Result> + '_>; + + pub fn all(&self) -> Result>; +} diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs new file mode 100644 index 0000000..ec4ffc5 --- /dev/null +++ b/src/service/appservice/mod.rs @@ -0,0 +1,36 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { + /// Registers an appservice and returns the ID to the caller + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { + self.db.register_appservice(yaml) + } + + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously + pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { + self.db.unregister_appservice(service_name) + } + + pub fn get_registration(&self, id: &str) -> Result> { + self.db.get_registration(id) + } + + pub fn iter_ids(&self) -> Result> + '_> { + self.db.iter_ids() + } + + pub fn all(&self) -> Result> { + self.db.all() + } +} diff --git a/src/service/globals.rs b/src/service/globals.rs index 7e09128..2b47e5b 100644 --- a/src/service/globals.rs +++ b/src/service/globals.rs @@ -1,3 +1,8 @@ +mod data; +pub use data::Data; + +use crate::service::*; + use crate::{database::Config, server_server::FedDest, utils, Error, Result}; use ruma::{ api::{ @@ -32,10 +37,11 @@ type SyncHandle = ( Receiver>>, // rx ); -pub struct Globals { +pub struct Service { + db: D, + pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, - pub(super) globals: Arc, pub config: Config, keypair: Arc, dns_resolver: TokioAsyncResolver, @@ -44,7 +50,6 @@ pub struct Globals { default_client: reqwest::Client, pub stable_room_versions: Vec, pub unstable_room_versions: Vec, - pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, @@ -87,7 +92,8 @@ impl Default for RotationHandler { } } -impl Globals { + +impl Service<_> { pub fn load( globals: Arc, server_signingkeys: Arc, diff --git a/src/service/pusher.rs b/src/service/pusher.rs deleted file mode 100644 index 6b906c2..0000000 --- a/src/service/pusher.rs +++ /dev/null @@ -1,348 +0,0 @@ -use crate::{Database, Error, PduEvent, Result}; -use bytes::BytesMut; -use ruma::{ - api::{ - client::push::{get_pushers, set_pusher, PusherKind}, - push_gateway::send_event_notification::{ - self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, - }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, - }, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - serde::Raw, - uint, RoomId, UInt, UserId, -}; -use tracing::{error, info, warn}; - -use std::{fmt::Debug, mem, sync::Arc}; - -use super::abstraction::Tree; - -pub struct PushData { - /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: Arc, -} - -impl PushData { - #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { - let mut key = sender.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pusher.pushkey.as_bytes()); - - // There are 2 kinds of pushers but the spec says: null deletes the pusher. - if pusher.kind.is_none() { - return self - .senderkey_pusher - .remove(&key) - .map(|_| ()) - .map_err(Into::into); - } - - self.senderkey_pusher.insert( - &key, - &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), - )?; - - Ok(()) - } - - #[tracing::instrument(skip(self, senderkey))] - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { - self.senderkey_pusher - .get(senderkey)? - .map(|push| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self, sender))] - pub fn get_pushers(&self, sender: &UserId) -> Result> { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher - .scan_prefix(prefix) - .map(|(_, push)| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, sender))] - pub fn get_pusher_senderkeys<'a>( - &'a self, - sender: &UserId, - ) -> impl Iterator> + 'a { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) - } -} - -#[tracing::instrument(skip(globals, destination, request))] -pub async fn send_request( - globals: &crate::database::globals::Globals, - destination: &str, - request: T, -) -> Result -where - T: Debug, -{ - let destination = destination.replace("/_matrix/push/v1/notify", ""); - - let http_request = request - .try_into_http_request::( - &destination, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); - Error::BadServerResponse("Invalid destination") - })? - .map(|body| body.freeze()); - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - - let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Push gateway returned bad response.") - }) - } - Err(e) => Err(e.into()), - } -} - -#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] -pub async fn send_push_notice( - user: &UserId, - unread: UInt, - pusher: &get_pushers::v3::Pusher, - ruleset: Ruleset, - pdu: &PduEvent, - db: &Database, -) -> Result<()> { - let mut notify = None; - let mut tweaks = Vec::new(); - - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - for action in get_actions( - user, - &ruleset, - &power_levels, - &pdu.to_sync_room_event(), - &pdu.room_id, - db, - )? { - let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, - Action::SetTweak(tweak) => { - tweaks.push(tweak.clone()); - continue; - } - }; - - if notify.is_some() { - return Err(Error::bad_database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, - )); - } - - notify = Some(n); - } - - if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; - } - // Else the event triggered no actions - - Ok(()) -} - -#[tracing::instrument(skip(user, ruleset, pdu, db))] -pub fn get_actions<'a>( - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - db: &Database, -) -> Result<&'a [Action]> { - let ctx = PushConditionRoomCtx { - room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), - }; - - Ok(ruleset.get_actions(pdu, &ctx)) -} - -#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] -async fn send_notice( - unread: UInt, - pusher: &get_pushers::v3::Pusher, - tweaks: Vec, - event: &PduEvent, - db: &Database, -) -> Result<()> { - // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } - - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; - - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = data_minus_url; - - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); - } - - let d = &[device]; - let mut notifi = Notification::new(d); - - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == RoomEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } - - if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); - - if event.kind == RoomEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); - } - - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - - let room_name = if let Some(room_name_pdu) = - db.rooms - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name - } else { - None - }; - - notifi.room_name = room_name.as_deref(); - - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } - - // TODO: email - - Ok(()) -} diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 6b906c2..468ad8b 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,348 +1,12 @@ -use crate::{Database, Error, PduEvent, Result}; -use bytes::BytesMut; -use ruma::{ - api::{ - client::push::{get_pushers, set_pusher, PusherKind}, - push_gateway::send_event_notification::{ - self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, - }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, - }, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - serde::Raw, - uint, RoomId, UInt, UserId, -}; -use tracing::{error, info, warn}; +pub trait Data { + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; -use std::{fmt::Debug, mem, sync::Arc}; + pub fn get_pusher(&self, senderkey: &[u8]) -> Result>; -use super::abstraction::Tree; + pub fn get_pushers(&self, sender: &UserId) -> Result>; -pub struct PushData { - /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: Arc, -} - -impl PushData { - #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { - let mut key = sender.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pusher.pushkey.as_bytes()); - - // There are 2 kinds of pushers but the spec says: null deletes the pusher. - if pusher.kind.is_none() { - return self - .senderkey_pusher - .remove(&key) - .map(|_| ()) - .map_err(Into::into); - } - - self.senderkey_pusher.insert( - &key, - &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), - )?; - - Ok(()) - } - - #[tracing::instrument(skip(self, senderkey))] - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { - self.senderkey_pusher - .get(senderkey)? - .map(|push| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self, sender))] - pub fn get_pushers(&self, sender: &UserId) -> Result> { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher - .scan_prefix(prefix) - .map(|(_, push)| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, sender))] pub fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, - ) -> impl Iterator> + 'a { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) - } -} - -#[tracing::instrument(skip(globals, destination, request))] -pub async fn send_request( - globals: &crate::database::globals::Globals, - destination: &str, - request: T, -) -> Result -where - T: Debug, -{ - let destination = destination.replace("/_matrix/push/v1/notify", ""); - - let http_request = request - .try_into_http_request::( - &destination, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); - Error::BadServerResponse("Invalid destination") - })? - .map(|body| body.freeze()); - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - - let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Push gateway returned bad response.") - }) - } - Err(e) => Err(e.into()), - } -} - -#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] -pub async fn send_push_notice( - user: &UserId, - unread: UInt, - pusher: &get_pushers::v3::Pusher, - ruleset: Ruleset, - pdu: &PduEvent, - db: &Database, -) -> Result<()> { - let mut notify = None; - let mut tweaks = Vec::new(); - - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - for action in get_actions( - user, - &ruleset, - &power_levels, - &pdu.to_sync_room_event(), - &pdu.room_id, - db, - )? { - let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, - Action::SetTweak(tweak) => { - tweaks.push(tweak.clone()); - continue; - } - }; - - if notify.is_some() { - return Err(Error::bad_database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, - )); - } - - notify = Some(n); - } - - if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; - } - // Else the event triggered no actions - - Ok(()) -} - -#[tracing::instrument(skip(user, ruleset, pdu, db))] -pub fn get_actions<'a>( - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - db: &Database, -) -> Result<&'a [Action]> { - let ctx = PushConditionRoomCtx { - room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), - }; - - Ok(ruleset.get_actions(pdu, &ctx)) -} - -#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] -async fn send_notice( - unread: UInt, - pusher: &get_pushers::v3::Pusher, - tweaks: Vec, - event: &PduEvent, - db: &Database, -) -> Result<()> { - // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } - - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; - - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = data_minus_url; - - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); - } - - let d = &[device]; - let mut notifi = Notification::new(d); - - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == RoomEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } - - if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); - - if event.kind == RoomEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); - } - - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - - let room_name = if let Some(room_name_pdu) = - db.rooms - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name - } else { - None - }; - - notifi.room_name = room_name.as_deref(); - - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } - - // TODO: email - - Ok(()) + ) -> impl Iterator> + 'a; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 6b906c2..342763e 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,348 +1,287 @@ -use crate::{Database, Error, PduEvent, Result}; -use bytes::BytesMut; -use ruma::{ - api::{ - client::push::{get_pushers, set_pusher, PusherKind}, - push_gateway::send_event_notification::{ - self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, - }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, - }, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - serde::Raw, - uint, RoomId, UInt, UserId, -}; -use tracing::{error, info, warn}; +mod data; +pub use data::Data; -use std::{fmt::Debug, mem, sync::Arc}; +use crate::service::*; -use super::abstraction::Tree; - -pub struct PushData { - /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: Arc, +pub struct Service { + db: D, } -impl PushData { - #[tracing::instrument(skip(self, sender, pusher))] +impl Service<_> { pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { - let mut key = sender.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pusher.pushkey.as_bytes()); - - // There are 2 kinds of pushers but the spec says: null deletes the pusher. - if pusher.kind.is_none() { - return self - .senderkey_pusher - .remove(&key) - .map(|_| ()) - .map_err(Into::into); - } - - self.senderkey_pusher.insert( - &key, - &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), - )?; - - Ok(()) + self.db.set_pusher(sender, pusher) } - #[tracing::instrument(skip(self, senderkey))] pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { - self.senderkey_pusher - .get(senderkey)? - .map(|push| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .transpose() + self.db.get_pusher(senderkey) } - #[tracing::instrument(skip(self, sender))] pub fn get_pushers(&self, sender: &UserId) -> Result> { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher - .scan_prefix(prefix) - .map(|(_, push)| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .collect() + self.db.get_pushers(sender) } - #[tracing::instrument(skip(self, sender))] pub fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, ) -> impl Iterator> + 'a { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) - } -} - -#[tracing::instrument(skip(globals, destination, request))] -pub async fn send_request( - globals: &crate::database::globals::Globals, - destination: &str, - request: T, -) -> Result -where - T: Debug, -{ - let destination = destination.replace("/_matrix/push/v1/notify", ""); - - let http_request = request - .try_into_http_request::( - &destination, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); - Error::BadServerResponse("Invalid destination") - })? - .map(|body| body.freeze()); - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - - let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Push gateway returned bad response.") - }) - } - Err(e) => Err(e.into()), - } -} - -#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] -pub async fn send_push_notice( - user: &UserId, - unread: UInt, - pusher: &get_pushers::v3::Pusher, - ruleset: Ruleset, - pdu: &PduEvent, - db: &Database, -) -> Result<()> { - let mut notify = None; - let mut tweaks = Vec::new(); - - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - for action in get_actions( - user, - &ruleset, - &power_levels, - &pdu.to_sync_room_event(), - &pdu.room_id, - db, - )? { - let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, - Action::SetTweak(tweak) => { - tweaks.push(tweak.clone()); - continue; - } - }; - - if notify.is_some() { - return Err(Error::bad_database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, - )); - } - - notify = Some(n); + self.db.get_pusher_senderkeys(sender) } - if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; - } - // Else the event triggered no actions - - Ok(()) -} - -#[tracing::instrument(skip(user, ruleset, pdu, db))] -pub fn get_actions<'a>( - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - db: &Database, -) -> Result<&'a [Action]> { - let ctx = PushConditionRoomCtx { - room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), - }; - - Ok(ruleset.get_actions(pdu, &ctx)) -} - -#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] -async fn send_notice( - unread: UInt, - pusher: &get_pushers::v3::Pusher, - tweaks: Vec, - event: &PduEvent, - db: &Database, -) -> Result<()> { - // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } - - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; - - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = data_minus_url; - - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); - } - - let d = &[device]; - let mut notifi = Notification::new(d); - - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == RoomEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + #[tracing::instrument(skip(globals, destination, request))] + pub async fn send_request( + globals: &crate::database::globals::Globals, + destination: &str, + request: T, + ) -> Result + where + T: Debug, { - notifi.prio = NotificationPriority::High + let destination = destination.replace("/_matrix/push/v1/notify", ""); + + let http_request = request + .try_into_http_request::( + &destination, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) + .map_err(|e| { + warn!("Failed to find destination {}: {}", destination, e); + Error::BadServerResponse("Invalid destination") + })? + .map(|body| body.freeze()); + + let reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + // TODO: we could keep this very short and let expo backoff do it's thing... + //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); + + let url = reqwest_request.url().clone(); + let response = globals.default_client().execute(reqwest_request).await; + + match response { + Ok(mut response) => { + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); + + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if status != 200 { + info!( + "Push gateway returned bad response {} {}\n{}\n{:?}", + destination, + status, + url, + crate::utils::string_from_bytes(&body) + ); + } + + let response = T::IncomingResponse::try_from_http_response( + http_response_builder + .body(body) + .expect("reqwest body is valid http body"), + ); + response.map_err(|_| { + info!( + "Push gateway returned invalid response bytes {}\n{}", + destination, url + ); + Error::BadServerResponse("Push gateway returned bad response.") + }) + } + Err(e) => Err(e.into()), + } } - if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); + #[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] + pub async fn send_push_notice( + user: &UserId, + unread: UInt, + pusher: &get_pushers::v3::Pusher, + ruleset: Ruleset, + pdu: &PduEvent, + db: &Database, + ) -> Result<()> { + let mut notify = None; + let mut tweaks = Vec::new(); - if event.kind == RoomEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); + let power_levels: RoomPowerLevelsEventContent = db + .rooms + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + for action in get_actions( + user, + &ruleset, + &power_levels, + &pdu.to_sync_room_event(), + &pdu.room_id, + db, + )? { + let n = match action { + Action::DontNotify => false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => true, + Action::SetTweak(tweak) => { + tweaks.push(tweak.clone()); + continue; + } + }; + + if notify.is_some() { + return Err(Error::bad_database( + r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, + )); + } + + notify = Some(n); } - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); + if notify == Some(true) { + send_notice(unread, pusher, tweaks, pdu, db).await?; + } + // Else the event triggered no actions - let room_name = if let Some(room_name_pdu) = - db.rooms - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name - } else { - None - }; - - notifi.room_name = room_name.as_deref(); - - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; + Ok(()) } - // TODO: email + #[tracing::instrument(skip(user, ruleset, pdu, db))] + pub fn get_actions<'a>( + user: &UserId, + ruleset: &'a Ruleset, + power_levels: &RoomPowerLevelsEventContent, + pdu: &Raw, + room_id: &RoomId, + db: &Database, + ) -> Result<&'a [Action]> { + let ctx = PushConditionRoomCtx { + room_id: room_id.to_owned(), + member_count: 10_u32.into(), // TODO: get member count efficiently + user_display_name: db + .users + .displayname(user)? + .unwrap_or_else(|| user.localpart().to_owned()), + users_power_levels: power_levels.users.clone(), + default_power_level: power_levels.users_default, + notification_power_levels: power_levels.notifications.clone(), + }; - Ok(()) + Ok(ruleset.get_actions(pdu, &ctx)) + } + + #[tracing::instrument(skip(unread, pusher, tweaks, event, db))] + async fn send_notice( + unread: UInt, + pusher: &get_pushers::v3::Pusher, + tweaks: Vec, + event: &PduEvent, + db: &Database, + ) -> Result<()> { + // TODO: email + if pusher.kind == PusherKind::Email { + return Ok(()); + } + + // TODO: + // Two problems with this + // 1. if "event_id_only" is the only format kind it seems we should never add more info + // 2. can pusher/devices have conflicting formats + let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); + let url = if let Some(url) = &pusher.data.url { + url + } else { + error!("Http Pusher must have URL specified."); + return Ok(()); + }; + + let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); + let mut data_minus_url = pusher.data.clone(); + // The url must be stripped off according to spec + data_minus_url.url = None; + device.data = data_minus_url; + + // Tweaks are only added if the format is NOT event_id_only + if !event_id_only { + device.tweaks = tweaks.clone(); + } + + let d = &[device]; + let mut notifi = Notification::new(d); + + notifi.prio = NotificationPriority::Low; + notifi.event_id = Some(&event.event_id); + notifi.room_id = Some(&event.room_id); + // TODO: missed calls + notifi.counts = NotificationCounts::new(unread, uint!(0)); + + if event.kind == RoomEventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + { + notifi.prio = NotificationPriority::High + } + + if event_id_only { + send_request( + &db.globals, + url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; + } else { + notifi.sender = Some(&event.sender); + notifi.event_type = Some(&event.kind); + let content = serde_json::value::to_raw_value(&event.content).ok(); + notifi.content = content.as_deref(); + + if event.kind == RoomEventType::RoomMember { + notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); + } + + let user_name = db.users.displayname(&event.sender)?; + notifi.sender_display_name = user_name.as_deref(); + + let room_name = if let Some(room_name_pdu) = + db.rooms + .room_state_get(&event.room_id, &StateEventType::RoomName, "")? + { + serde_json::from_str::(room_name_pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid room name event in database."))? + .name + } else { + None + }; + + notifi.room_name = room_name.as_deref(); + + send_request( + &db.globals, + url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; + } + + // TODO: email + + Ok(()) + } } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index c44d357..a8e87b9 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,4 +1,13 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { pub fn get_or_create_shorteventid( &self, event_id: &EventId, @@ -222,4 +231,4 @@ } }) } - +} diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 5b423d2..4e5c379 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,816 +1,41 @@ - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - // TODO Is this the same as the function above? - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - +pub trait Data { + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } + fn get_pdu_count(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } + pub fn get_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } + pub fn get_pdu_id(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } + pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } + pub fn get_pdu(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } + pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } + pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } + pub fn pdu_count(&self, pdu_id: &[u8]) -> Result; /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - DB.rooms.search.index_pdu(room_id, pdu_id, body)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - - Ok(pdu_id) - } - - pub fn create_hash_and_sign_event( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObj) { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender_user.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - } - - /// Creates a new persisted data unit and adds it to a room. This function takes a - /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result> { - - let (pdu, pdu_json) = create_hash_and_sign_event()?; - - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - Ok(pdu.event_id) - } - - /// Append the incoming event setting the state snapshot to the state from the - /// server that sent the event. - #[tracing::instrument(skip_all)] - fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - Ok(Some(pdu_id)) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } + fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. @@ -820,32 +45,7 @@ user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } + ) -> Result, PduEvent)>> + 'a>; /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. @@ -855,83 +55,12 @@ user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); + ) -> Result, PduEvent)>> + 'a>; - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - + ) -> Result, PduEvent)>> + 'a>; +} diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 5b423d2..c6393c6 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,4 +1,14 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { + /* /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { @@ -20,38 +30,15 @@ .next() .transpose() } + */ #[tracing::instrument(skip(self))] pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } + self.db.last_timeline_count(sender_user: &UserId, room_id: &RoomId) } // TODO Is this the same as the function above? + /* #[tracing::instrument(skip(self))] pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { let prefix = self @@ -71,33 +58,16 @@ .transpose() .map(|op| op.unwrap_or_default()) } - - + */ /// Returns the `count` of this pdu's id. pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() + self.db.get_pdu_count(event_id) } /// Returns the json of a pdu. pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + self.db.get_pdu_json(event_id) } /// Returns the json of a pdu. @@ -105,122 +75,49 @@ &self, event_id: &EventId, ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + self.db.get_non_outlier_pdu(event_id) } /// Returns the pdu's id. pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) + self.db.get_pdu_id(event_id) } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + self.db.get_non_outlier_pdu(event_id) } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } + self.db.get_pdu(event_id) } /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + self.db.get_pdu_from_id(pdu_id) } /// Returns the pdu as a `BTreeMap`. pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + self.db.get_pdu_json_from_id(pdu_id) } /// Returns the `count` of this pdu's id. pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) + self.db.pdu_count(pdu_id) } /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } + self.db.pdu_count(pdu_id, pdu: &PduEvent) } /// Creates a new persisted data unit and adds it to a room. @@ -803,7 +700,6 @@ } /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] pub fn all_pdus<'a>( &'a self, user_id: &UserId, @@ -814,37 +710,13 @@ /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_since<'a>( &'a self, user_id: &UserId, room_id: &RoomId, since: u64, ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) + self.db.pdus_since(user_id, room_id, since) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -856,32 +728,7 @@ room_id: &RoomId, until: u64, ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) + self.db.pdus_until(user_id, room_id, until) } /// Returns an iterator over all events and their token in a room that happened after the event @@ -893,32 +740,7 @@ room_id: &RoomId, from: u64, ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) + self.db.pdus_after(user_id, room_id, from) } /// Replace a PDU with the redacted form. diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs new file mode 100644 index 0000000..f1ff5f8 --- /dev/null +++ b/src/service/transaction_ids/data.rs @@ -0,0 +1,16 @@ +pub trait Data { + pub fn add_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + data: &[u8], + ) -> Result<()>; + + pub fn existing_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + ) -> Result>>; +} diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs new file mode 100644 index 0000000..d944847 --- /dev/null +++ b/src/service/transaction_ids/mod.rs @@ -0,0 +1,44 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { + pub fn add_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + data: &[u8], + ) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); + key.push(0xff); + key.extend_from_slice(txn_id.as_bytes()); + + self.userdevicetxnid_response.insert(&key, data)?; + + Ok(()) + } + + pub fn existing_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + ) -> Result>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); + key.push(0xff); + key.extend_from_slice(txn_id.as_bytes()); + + // If there's no entry, this is a new transaction + self.userdevicetxnid_response.get(&key) + } +} diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 7c15f1d..d99d032 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,396 +1,86 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, - encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, StateEventType}, - serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, - UInt, UserId, -}; -use std::{collections::BTreeMap, mem, sync::Arc}; -use tracing::warn; - -use super::abstraction::Tree; - -pub struct Users { - pub(super) userid_password: Arc, - pub(super) userid_displayname: Arc, - pub(super) userid_avatarurl: Arc, - pub(super) userid_blurhash: Arc, - pub(super) userdeviceid_token: Arc, - pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists - pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 - pub(super) token_userdeviceid: Arc, - - pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) - pub(super) userid_masterkeyid: Arc, - pub(super) userid_selfsigningkeyid: Arc, - pub(super) userid_usersigningkeyid: Arc, - - pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId - - pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count -} - -impl Users { +pub trait Data { /// Check if a user has an account on this homeserver. - #[tracing::instrument(skip(self, user_id))] - pub fn exists(&self, user_id: &UserId) -> Result { - Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) - } + pub fn exists(&self, user_id: &UserId) -> Result; /// Check if account is deactivated - #[tracing::instrument(skip(self, user_id))] - pub fn is_deactivated(&self, user_id: &UserId) -> Result { - Ok(self - .userid_password - .get(user_id.as_bytes())? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "User does not exist.", - ))? - .is_empty()) - } + pub fn is_deactivated(&self, user_id: &UserId) -> Result; /// Check if a user is an admin - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn is_admin( &self, user_id: &UserId, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); - - rooms.is_joined(user_id, &admin_room_id) - } + ) -> Result; /// Create a new user account on this homeserver. - #[tracing::instrument(skip(self, user_id, password))] - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.set_password(user_id, password)?; - Ok(()) - } + pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; /// Returns the number of users registered on this server. - #[tracing::instrument(skip(self))] - pub fn count(&self) -> Result { - Ok(self.userid_password.iter().count()) - } + pub fn count(&self) -> Result; /// Find out which user an access token belongs to. - #[tracing::instrument(skip(self, token))] - pub fn find_from_token(&self, token: &str) -> Result, String)>> { - self.token_userdeviceid - .get(token.as_bytes())? - .map_or(Ok(None), |bytes| { - let mut parts = bytes.split(|&b| b == 0xff); - let user_bytes = parts.next().ok_or_else(|| { - Error::bad_database("User ID in token_userdeviceid is invalid.") - })?; - let device_bytes = parts.next().ok_or_else(|| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") - })?; - - Ok(Some(( - UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid unicode.") - })?) - .map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid.") - })?, - utils::string_from_bytes(device_bytes).map_err(|_| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") - })?, - ))) - }) - } + pub fn find_from_token(&self, token: &str) -> Result, String)>>; /// Returns an iterator over all users on this homeserver. - #[tracing::instrument(skip(self))] - pub fn iter(&self) -> impl Iterator>> + '_ { - self.userid_password.iter().map(|(bytes, _)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in userid_password is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) - }) - } + pub fn iter(&self) -> impl Iterator>> + '_; /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - #[tracing::instrument(skip(self))] - pub fn list_local_users(&self) -> Result> { - let users: Vec = self - .userid_password - .iter() - .filter_map(|(username, pw)| self.get_username_with_valid_password(&username, &pw)) - .collect(); - Ok(users) - } + pub fn list_local_users(&self) -> Result>; /// Will only return with Some(username) if the password was not empty and the /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped /// and the error will be logged. - #[tracing::instrument(skip(self))] - fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { - // A valid password is not empty - if password.is_empty() { - None - } else { - match utils::string_from_bytes(username) { - Ok(u) => Some(u), - Err(e) => { - warn!( - "Failed to parse username while calling get_local_users(): {}", - e.to_string() - ); - None - } - } - } - } + fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option; /// Returns the password hash for the given user. - #[tracing::instrument(skip(self, user_id))] - pub fn password_hash(&self, user_id: &UserId) -> Result> { - self.userid_password - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Password hash in db is not valid string.") - })?)) - }) - } + pub fn password_hash(&self, user_id: &UserId) -> Result>; /// Hash and set the user's password to the Argon2 hash - #[tracing::instrument(skip(self, user_id, password))] - pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - if let Some(password) = password { - if let Ok(hash) = utils::calculate_hash(password) { - self.userid_password - .insert(user_id.as_bytes(), hash.as_bytes())?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Password does not meet the requirements.", - )) - } - } else { - self.userid_password.insert(user_id.as_bytes(), b"")?; - Ok(()) - } - } + pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; /// Returns the displayname of a user on this homeserver. - #[tracing::instrument(skip(self, user_id))] - pub fn displayname(&self, user_id: &UserId) -> Result> { - self.userid_displayname - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Displayname in db is invalid.") - })?)) - }) - } + pub fn displayname(&self, user_id: &UserId) -> Result>; /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - #[tracing::instrument(skip(self, user_id, displayname))] - pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { - if let Some(displayname) = displayname { - self.userid_displayname - .insert(user_id.as_bytes(), displayname.as_bytes())?; - } else { - self.userid_displayname.remove(user_id.as_bytes())?; - } - - Ok(()) - } + pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; /// Get the avatar_url of a user. - #[tracing::instrument(skip(self, user_id))] - pub fn avatar_url(&self, user_id: &UserId) -> Result>> { - self.userid_avatarurl - .get(user_id.as_bytes())? - .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - s.try_into() - .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) - }) - .transpose() - } + pub fn avatar_url(&self, user_id: &UserId) -> Result>>; /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, avatar_url))] - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { - if let Some(avatar_url) = avatar_url { - self.userid_avatarurl - .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; - } else { - self.userid_avatarurl.remove(user_id.as_bytes())?; - } - - Ok(()) - } + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()>; /// Get the blurhash of a user. - #[tracing::instrument(skip(self, user_id))] - pub fn blurhash(&self, user_id: &UserId) -> Result> { - self.userid_blurhash - .get(user_id.as_bytes())? - .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - - Ok(s) - }) - .transpose() - } + pub fn blurhash(&self, user_id: &UserId) -> Result>; /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, blurhash))] - pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { - if let Some(blurhash) = blurhash { - self.userid_blurhash - .insert(user_id.as_bytes(), blurhash.as_bytes())?; - } else { - self.userid_blurhash.remove(user_id.as_bytes())?; - } - - Ok(()) - } + pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()>; /// Adds a new device to a user. - #[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))] pub fn create_device( &self, user_id: &UserId, device_id: &DeviceId, token: &str, initial_device_display_name: Option, - ) -> Result<()> { - // This method should never be called for nonexistent users. - assert!(self.exists(user_id)?); - - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(&Device { - device_id: device_id.into(), - display_name: initial_device_display_name, - last_seen_ip: None, // TODO - last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), - }) - .expect("Device::to_string never fails."), - )?; - - self.set_token(user_id, device_id, token)?; - - Ok(()) - } + ) -> Result<()>; /// Removes a device from a user. - #[tracing::instrument(skip(self, user_id, device_id))] - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Remove tokens - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.userdeviceid_token.remove(&userdeviceid)?; - self.token_userdeviceid.remove(&old_token)?; - } - - // Remove todevice events - let mut prefix = userdeviceid.clone(); - prefix.push(0xff); - - for (key, _) in self.todeviceid_events.scan_prefix(prefix) { - self.todeviceid_events.remove(&key)?; - } - - // TODO: Remove onetimekeys - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.remove(&userdeviceid)?; - - Ok(()) - } + pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; /// Returns an iterator over all device ids of this user. - #[tracing::instrument(skip(self, user_id))] pub fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - // All devices have metadata - self.userdeviceid_metadata - .scan_prefix(prefix) - .map(|(bytes, _)| { - Ok(utils::string_from_bytes( - bytes - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? - .into()) - }) - } + ) -> impl Iterator>> + 'a; /// Replaces the access token of one device. - #[tracing::instrument(skip(self, user_id, device_id, token))] - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; - // All devices have metadata - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); - - // Remove old token - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.token_userdeviceid.remove(&old_token)?; - // It will be removed from userdeviceid_token by the insert later - } - - // Assign token to user device combination - self.userdeviceid_token - .insert(&userdeviceid, token.as_bytes())?; - self.token_userdeviceid - .insert(token.as_bytes(), &userdeviceid)?; - - Ok(()) - } - - #[tracing::instrument(skip( - self, - user_id, - device_id, - one_time_key_key, - one_time_key_value, - globals - ))] pub fn add_one_time_key( &self, user_id: &UserId, @@ -398,121 +88,24 @@ impl Users { one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, globals: &super::globals::Globals, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + ) -> Result<()>; - // All devices have metadata - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&key)?.is_some()); + pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result; - key.push(0xff); - // TODO: Use DeviceKeyId::to_string when it's available (and update everything, - // because there are no wrapping quotation marks anymore) - key.extend_from_slice( - serde_json::to_string(one_time_key_key) - .expect("DeviceKeyId::to_string always works") - .as_bytes(), - ); - - self.onetimekeyid_onetimekeys.insert( - &key, - &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), - )?; - - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self, user_id))] - pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { - self.userid_lastonetimekeyupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))] pub fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, globals: &super::globals::Globals, - ) -> Result, Raw)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.push(b'"'); // Annoying quotation mark - prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); - prefix.push(b':'); + ) -> Result, Raw)>>; - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - - self.onetimekeyid_onetimekeys - .scan_prefix(prefix) - .next() - .map(|(key, value)| { - self.onetimekeyid_onetimekeys.remove(&key)?; - - Ok(( - serde_json::from_slice( - &*key - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, - serde_json::from_slice(&*value) - .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, - )) - }) - .transpose() - } - - #[tracing::instrument(skip(self, user_id, device_id))] pub fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + ) -> Result>; - let mut counts = BTreeMap::new(); - - for algorithm in - self.onetimekeyid_onetimekeys - .scan_prefix(userdeviceid) - .map(|(bytes, _)| { - Ok::<_, Error>( - serde_json::from_slice::>( - &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { - Error::bad_database("OneTimeKey ID in db is invalid.") - })?, - ) - .map_err(|_| Error::bad_database("DeviceKeyId in db is invalid."))? - .algorithm(), - ) - }) - { - *counts.entry(algorithm?).or_default() += UInt::from(1_u32); - } - - Ok(counts) - } - - #[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))] pub fn add_device_keys( &self, user_id: &UserId, @@ -520,29 +113,8 @@ impl Users { device_keys: &Raw, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + ) -> Result<()>; - self.keyid_key.insert( - &userdeviceid, - &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), - )?; - - self.mark_device_key_update(user_id, rooms, globals)?; - - Ok(()) - } - - #[tracing::instrument(skip( - self, - master_key, - self_signing_key, - user_signing_key, - rooms, - globals - ))] pub fn add_cross_signing_keys( &self, user_id: &UserId, @@ -551,114 +123,8 @@ impl Users { user_signing_key: &Option>, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result<()> { - // TODO: Check signatures + ) -> Result<()>; - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - // Master key - let mut master_key_ids = master_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? - .keys - .into_values(); - - let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained no key.", - ))?; - - if master_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained more than one key.", - )); - } - - let mut master_key_key = prefix.clone(); - master_key_key.extend_from_slice(master_key_id.as_bytes()); - - self.keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes())?; - - self.userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key)?; - - // Self-signing key - if let Some(self_signing_key) = self_signing_key { - let mut self_signing_key_ids = self_signing_key - .deserialize() - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") - })? - .keys - .into_values(); - - let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained no key.", - ))?; - - if self_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained more than one key.", - )); - } - - let mut self_signing_key_key = prefix.clone(); - self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); - - self.keyid_key.insert( - &self_signing_key_key, - self_signing_key.json().get().as_bytes(), - )?; - - self.userid_selfsigningkeyid - .insert(user_id.as_bytes(), &self_signing_key_key)?; - } - - // User-signing key - if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") - })? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained no key.", - ))?; - - if user_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained more than one key.", - )); - } - - let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); - - self.keyid_key.insert( - &user_signing_key_key, - user_signing_key.json().get().as_bytes(), - )?; - - self.userid_usersigningkeyid - .insert(user_id.as_bytes(), &user_signing_key_key)?; - } - - self.mark_device_key_update(user_id, rooms, globals)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))] pub fn sign_key( &self, target_id: &UserId, @@ -667,196 +133,42 @@ impl Users { sender_id: &UserId, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result<()> { - let mut key = target_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(key_id.as_bytes()); + ) -> Result<()>; - let mut cross_signing_key: serde_json::Value = - serde_json::from_slice(&self.keyid_key.get(&key)?.ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to sign nonexistent key.", - ))?) - .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; - - let signatures = cross_signing_key - .get_mut("signatures") - .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? - .as_object_mut() - .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? - .entry(sender_id.to_owned()) - .or_insert_with(|| serde_json::Map::new().into()); - - signatures - .as_object_mut() - .ok_or_else(|| Error::bad_database("signatures in keyid_key for a user is invalid."))? - .insert(signature.0, signature.1.into()); - - self.keyid_key.insert( - &key, - &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), - )?; - - // TODO: Should we notify about this change? - self.mark_device_key_update(target_id, rooms, globals)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, user_or_room_id, from, to))] pub fn keys_changed<'a>( &'a self, user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator>> + 'a { - let mut prefix = user_or_room_id.as_bytes().to_vec(); - prefix.push(0xff); + ) -> impl Iterator>> + 'a; - let mut start = prefix.clone(); - start.extend_from_slice(&(from + 1).to_be_bytes()); - - let to = to.unwrap_or(u64::MAX); - - self.keychangeid_userid - .iter_from(&start, false) - .take_while(move |(k, _)| { - k.starts_with(&prefix) - && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { - if let Ok(c) = utils::u64_from_bytes(current) { - c <= to - } else { - warn!("BadDatabase: Could not parse keychangeid_userid bytes"); - false - } - } else { - warn!("BadDatabase: Could not parse keychangeid_userid"); - false - } - }) - .map(|(_, bytes)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) - }) - } - - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn mark_device_key_update( &self, user_id: &UserId, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result<()> { - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - // Don't send key updates to unencrypted rooms - if rooms - .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? - .is_none() - { - continue; - } + ) -> Result<()>; - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - } - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_keys( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + ) -> Result>>; - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("DeviceKeys in db are invalid.") - })?)) - }) - } - - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_master_key bool>( &self, user_id: &UserId, allowed_signatures: F, - ) -> Result>> { - self.userid_masterkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; + ) -> Result>>; - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) - }) - } - - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_self_signing_key bool>( &self, user_id: &UserId, allowed_signatures: F, - ) -> Result>> { - self.userid_selfsigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; + ) -> Result>>; - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) - }) - } + pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; - #[tracing::instrument(skip(self, user_id))] - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { - self.userid_usersigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?)) - }) - }) - } - - #[tracing::instrument(skip( - self, - sender, - target_user_id, - target_device_id, - event_type, - content, - globals - ))] pub fn add_to_device_event( &self, sender: &UserId, @@ -865,237 +177,52 @@ impl Users { event_type: &str, content: serde_json::Value, globals: &super::globals::Globals, - ) -> Result<()> { - let mut key = target_user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(target_device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(&globals.next_count()?.to_be_bytes()); + ) -> Result<()>; - let mut json = serde_json::Map::new(); - json.insert("type".to_owned(), event_type.to_owned().into()); - json.insert("sender".to_owned(), sender.to_string().into()); - json.insert("content".to_owned(), content); - - let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); - - self.todeviceid_events.insert(&key, &value)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result>> { - let mut events = Vec::new(); + ) -> Result>>; - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - - for (_, value) in self.todeviceid_events.scan_prefix(prefix) { - events.push( - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Event in todeviceid_events is invalid."))?, - ); - } - - Ok(events) - } - - #[tracing::instrument(skip(self, user_id, device_id, until))] pub fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, until: u64, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); + ) -> Result<()>; - let mut last = prefix.clone(); - last.extend_from_slice(&until.to_be_bytes()); - - for (key, _) in self - .todeviceid_events - .iter_from(&last, true) // this includes last - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes(&key[key.len() - mem::size_of::()..key.len()]) - .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, count)| count <= until) - { - self.todeviceid_events.remove(&key)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, user_id, device_id, device))] pub fn update_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, device: &Device, - ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(device).expect("Device::to_string always works"), - )?; - - Ok(()) - } + ) -> Result<()>; /// Get device metadata. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + ) -> Result>; - self.userdeviceid_metadata - .get(&userdeviceid)? - .map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("Metadata in userdeviceid_metadata is invalid.") - })?)) - }) - } + pub fn get_devicelist_version(&self, user_id: &UserId) -> Result>; - #[tracing::instrument(skip(self, user_id))] - pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.userid_devicelistversion - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid devicelistversion in db.")) - .map(Some) - }) - } - - #[tracing::instrument(skip(self, user_id))] pub fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator> + 'a { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - - self.userdeviceid_metadata - .scan_prefix(key) - .map(|(_, bytes)| { - serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) - }) - } - - /// Deactivate account - #[tracing::instrument(skip(self, user_id))] - pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { - // Remove all associated devices - for device_id in self.all_device_ids(user_id) { - self.remove_device(user_id, &device_id?)?; - } - - // Set the password to "" to indicate a deactivated account. Hashes will never result in an - // empty string, so the user will not be able to log in again. Systems like changing the - // password without logging in should check if the account is deactivated. - self.userid_password.insert(user_id.as_bytes(), &[])?; - - // TODO: Unhook 3PID - Ok(()) - } + ) -> impl Iterator> + 'a; /// Creates a new sync filter. Returns the filter id. - #[tracing::instrument(skip(self))] pub fn create_filter( &self, user_id: &UserId, filter: &IncomingFilterDefinition, - ) -> Result { - let filter_id = utils::random_string(4); + ) -> Result; - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(filter_id.as_bytes()); - - self.userfilterid_filter.insert( - &key, - &serde_json::to_vec(&filter).expect("filter is valid json"), - )?; - - Ok(filter_id) - } - - #[tracing::instrument(skip(self))] pub fn get_filter( &self, user_id: &UserId, filter_id: &str, - ) -> Result> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(filter_id.as_bytes()); - - let raw = self.userfilterid_filter.get(&key)?; - - if let Some(raw) = raw { - serde_json::from_slice(&raw) - .map_err(|_| Error::bad_database("Invalid filter event in db.")) - } else { - Ok(None) - } - } -} - -/// Ensure that a user only sees signatures from themselves and the target user -fn clean_signatures bool>( - cross_signing_key: &mut serde_json::Value, - user_id: &UserId, - allowed_signatures: F, -) -> Result<(), Error> { - if let Some(signatures) = cross_signing_key - .get_mut("signatures") - .and_then(|v| v.as_object_mut()) - { - // Don't allocate for the full size of the current signatures, but require - // at most one resize if nothing is dropped - let new_capacity = signatures.len() / 2; - for (user, signature) in - mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) - { - let id = <&UserId>::try_from(user.as_str()) - .map_err(|_| Error::bad_database("Invalid user ID in database."))?; - if id == user_id || allowed_signatures(id) { - signatures.insert(user, signature); - } - } - } - - Ok(()) + ) -> Result>; } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 7c15f1d..93d6ea5 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,276 +1,107 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, - encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, StateEventType}, - serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, - UInt, UserId, -}; -use std::{collections::BTreeMap, mem, sync::Arc}; -use tracing::warn; +mod data; +pub use data::Data; -use super::abstraction::Tree; +use crate::service::*; -pub struct Users { - pub(super) userid_password: Arc, - pub(super) userid_displayname: Arc, - pub(super) userid_avatarurl: Arc, - pub(super) userid_blurhash: Arc, - pub(super) userdeviceid_token: Arc, - pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists - pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 - pub(super) token_userdeviceid: Arc, - - pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) - pub(super) userid_masterkeyid: Arc, - pub(super) userid_selfsigningkeyid: Arc, - pub(super) userid_usersigningkeyid: Arc, - - pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId - - pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count +pub struct Service { + db: D, } -impl Users { +impl Service<_> { /// Check if a user has an account on this homeserver. - #[tracing::instrument(skip(self, user_id))] pub fn exists(&self, user_id: &UserId) -> Result { - Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) + self.db.exists(user_id) } /// Check if account is deactivated - #[tracing::instrument(skip(self, user_id))] pub fn is_deactivated(&self, user_id: &UserId) -> Result { - Ok(self - .userid_password - .get(user_id.as_bytes())? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "User does not exist.", - ))? - .is_empty()) + self.db.is_deactivated(user_id) } /// Check if a user is an admin - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn is_admin( &self, user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); - - rooms.is_joined(user_id, &admin_room_id) + self.db.is_admin(user_id) } /// Create a new user account on this homeserver. - #[tracing::instrument(skip(self, user_id, password))] pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.set_password(user_id, password)?; - Ok(()) + self.db.set_password(user_id, password) } /// Returns the number of users registered on this server. - #[tracing::instrument(skip(self))] pub fn count(&self) -> Result { - Ok(self.userid_password.iter().count()) + self.db.count() } /// Find out which user an access token belongs to. - #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { - self.token_userdeviceid - .get(token.as_bytes())? - .map_or(Ok(None), |bytes| { - let mut parts = bytes.split(|&b| b == 0xff); - let user_bytes = parts.next().ok_or_else(|| { - Error::bad_database("User ID in token_userdeviceid is invalid.") - })?; - let device_bytes = parts.next().ok_or_else(|| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") - })?; - - Ok(Some(( - UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid unicode.") - })?) - .map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid.") - })?, - utils::string_from_bytes(device_bytes).map_err(|_| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") - })?, - ))) - }) + self.db.find_from_token(token) } /// Returns an iterator over all users on this homeserver. - #[tracing::instrument(skip(self))] pub fn iter(&self) -> impl Iterator>> + '_ { - self.userid_password.iter().map(|(bytes, _)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in userid_password is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) - }) + self.db.iter() } /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - #[tracing::instrument(skip(self))] pub fn list_local_users(&self) -> Result> { - let users: Vec = self - .userid_password - .iter() - .filter_map(|(username, pw)| self.get_username_with_valid_password(&username, &pw)) - .collect(); - Ok(users) + self.db.list_local_users() } /// Will only return with Some(username) if the password was not empty and the /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped /// and the error will be logged. - #[tracing::instrument(skip(self))] fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { - // A valid password is not empty - if password.is_empty() { - None - } else { - match utils::string_from_bytes(username) { - Ok(u) => Some(u), - Err(e) => { - warn!( - "Failed to parse username while calling get_local_users(): {}", - e.to_string() - ); - None - } - } - } + self.db.get_username_with_valid_password(username, password) } /// Returns the password hash for the given user. - #[tracing::instrument(skip(self, user_id))] pub fn password_hash(&self, user_id: &UserId) -> Result> { - self.userid_password - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Password hash in db is not valid string.") - })?)) - }) + self.db.password_hash(user_id) } /// Hash and set the user's password to the Argon2 hash - #[tracing::instrument(skip(self, user_id, password))] pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - if let Some(password) = password { - if let Ok(hash) = utils::calculate_hash(password) { - self.userid_password - .insert(user_id.as_bytes(), hash.as_bytes())?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Password does not meet the requirements.", - )) - } - } else { - self.userid_password.insert(user_id.as_bytes(), b"")?; - Ok(()) - } + self.db.set_password(user_id, password) } /// Returns the displayname of a user on this homeserver. - #[tracing::instrument(skip(self, user_id))] pub fn displayname(&self, user_id: &UserId) -> Result> { - self.userid_displayname - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Displayname in db is invalid.") - })?)) - }) + self.db.displayname(user_id) } /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - #[tracing::instrument(skip(self, user_id, displayname))] pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { - if let Some(displayname) = displayname { - self.userid_displayname - .insert(user_id.as_bytes(), displayname.as_bytes())?; - } else { - self.userid_displayname.remove(user_id.as_bytes())?; - } - - Ok(()) + self.db.set_displayname(user_id, displayname) } /// Get the avatar_url of a user. - #[tracing::instrument(skip(self, user_id))] pub fn avatar_url(&self, user_id: &UserId) -> Result>> { - self.userid_avatarurl - .get(user_id.as_bytes())? - .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - s.try_into() - .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) - }) - .transpose() + self.db.avatar_url(user_id) } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, avatar_url))] pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { - if let Some(avatar_url) = avatar_url { - self.userid_avatarurl - .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; - } else { - self.userid_avatarurl.remove(user_id.as_bytes())?; - } - - Ok(()) + self.db.set_avatar_url(user_id, avatar_url) } /// Get the blurhash of a user. - #[tracing::instrument(skip(self, user_id))] pub fn blurhash(&self, user_id: &UserId) -> Result> { - self.userid_blurhash - .get(user_id.as_bytes())? - .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - - Ok(s) - }) - .transpose() + self.db.blurhash(user_id) } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, blurhash))] pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { - if let Some(blurhash) = blurhash { - self.userid_blurhash - .insert(user_id.as_bytes(), blurhash.as_bytes())?; - } else { - self.userid_blurhash.remove(user_id.as_bytes())?; - } - - Ok(()) + self.db.set_blurhash(user_id, blurhash) } /// Adds a new device to a user. - #[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))] pub fn create_device( &self, user_id: &UserId, @@ -278,119 +109,27 @@ impl Users { token: &str, initial_device_display_name: Option, ) -> Result<()> { - // This method should never be called for nonexistent users. - assert!(self.exists(user_id)?); - - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(&Device { - device_id: device_id.into(), - display_name: initial_device_display_name, - last_seen_ip: None, // TODO - last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), - }) - .expect("Device::to_string never fails."), - )?; - - self.set_token(user_id, device_id, token)?; - - Ok(()) + self.db.create_device(user_id, device_id, token, initial_device_display_name) } /// Removes a device from a user. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Remove tokens - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.userdeviceid_token.remove(&userdeviceid)?; - self.token_userdeviceid.remove(&old_token)?; - } - - // Remove todevice events - let mut prefix = userdeviceid.clone(); - prefix.push(0xff); - - for (key, _) in self.todeviceid_events.scan_prefix(prefix) { - self.todeviceid_events.remove(&key)?; - } - - // TODO: Remove onetimekeys - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.remove(&userdeviceid)?; - - Ok(()) + self.db.remove_device(user_id, device_id) } /// Returns an iterator over all device ids of this user. - #[tracing::instrument(skip(self, user_id))] pub fn all_device_ids<'a>( &'a self, user_id: &UserId, ) -> impl Iterator>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - // All devices have metadata - self.userdeviceid_metadata - .scan_prefix(prefix) - .map(|(bytes, _)| { - Ok(utils::string_from_bytes( - bytes - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? - .into()) - }) + self.db.all_device_ids(user_id) } /// Replaces the access token of one device. - #[tracing::instrument(skip(self, user_id, device_id, token))] pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // All devices have metadata - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); - - // Remove old token - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.token_userdeviceid.remove(&old_token)?; - // It will be removed from userdeviceid_token by the insert later - } - - // Assign token to user device combination - self.userdeviceid_token - .insert(&userdeviceid, token.as_bytes())?; - self.token_userdeviceid - .insert(token.as_bytes(), &userdeviceid)?; - - Ok(()) + self.db.set_token(user_id, device_id, token) } - #[tracing::instrument(skip( - self, - user_id, - device_id, - one_time_key_key, - one_time_key_value, - globals - ))] pub fn add_one_time_key( &self, user_id: &UserId, @@ -399,464 +138,103 @@ impl Users { one_time_key_value: &Raw, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - - // All devices have metadata - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&key)?.is_some()); - - key.push(0xff); - // TODO: Use DeviceKeyId::to_string when it's available (and update everything, - // because there are no wrapping quotation marks anymore) - key.extend_from_slice( - serde_json::to_string(one_time_key_key) - .expect("DeviceKeyId::to_string always works") - .as_bytes(), - ); - - self.onetimekeyid_onetimekeys.insert( - &key, - &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), - )?; - - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - - Ok(()) + self.db.add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) } - #[tracing::instrument(skip(self, user_id))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { - self.userid_lastonetimekeyupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .unwrap_or(Ok(0)) + self.db.last_one_time_keys_update(user_id) } - #[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))] pub fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - globals: &super::globals::Globals, ) -> Result, Raw)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.push(b'"'); // Annoying quotation mark - prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); - prefix.push(b':'); - - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - - self.onetimekeyid_onetimekeys - .scan_prefix(prefix) - .next() - .map(|(key, value)| { - self.onetimekeyid_onetimekeys.remove(&key)?; - - Ok(( - serde_json::from_slice( - &*key - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, - serde_json::from_slice(&*value) - .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, - )) - }) - .transpose() + self.db.take_one_time_key(user_id, device_id, key_algorithm) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - let mut counts = BTreeMap::new(); - - for algorithm in - self.onetimekeyid_onetimekeys - .scan_prefix(userdeviceid) - .map(|(bytes, _)| { - Ok::<_, Error>( - serde_json::from_slice::>( - &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { - Error::bad_database("OneTimeKey ID in db is invalid.") - })?, - ) - .map_err(|_| Error::bad_database("DeviceKeyId in db is invalid."))? - .algorithm(), - ) - }) - { - *counts.entry(algorithm?).or_default() += UInt::from(1_u32); - } - - Ok(counts) + self.db.count_one_time_keys(user_id, device_id) } - #[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))] pub fn add_device_keys( &self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.keyid_key.insert( - &userdeviceid, - &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), - )?; - - self.mark_device_key_update(user_id, rooms, globals)?; - - Ok(()) + self.db.add_device_keys(user_id, device_id, device_keys) } - #[tracing::instrument(skip( - self, - master_key, - self_signing_key, - user_signing_key, - rooms, - globals - ))] pub fn add_cross_signing_keys( &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - // TODO: Check signatures - - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - // Master key - let mut master_key_ids = master_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? - .keys - .into_values(); - - let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained no key.", - ))?; - - if master_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained more than one key.", - )); - } - - let mut master_key_key = prefix.clone(); - master_key_key.extend_from_slice(master_key_id.as_bytes()); - - self.keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes())?; - - self.userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key)?; - - // Self-signing key - if let Some(self_signing_key) = self_signing_key { - let mut self_signing_key_ids = self_signing_key - .deserialize() - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") - })? - .keys - .into_values(); - - let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained no key.", - ))?; - - if self_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained more than one key.", - )); - } - - let mut self_signing_key_key = prefix.clone(); - self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); - - self.keyid_key.insert( - &self_signing_key_key, - self_signing_key.json().get().as_bytes(), - )?; - - self.userid_selfsigningkeyid - .insert(user_id.as_bytes(), &self_signing_key_key)?; - } - - // User-signing key - if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") - })? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained no key.", - ))?; - - if user_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained more than one key.", - )); - } - - let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); - - self.keyid_key.insert( - &user_signing_key_key, - user_signing_key.json().get().as_bytes(), - )?; - - self.userid_usersigningkeyid - .insert(user_id.as_bytes(), &user_signing_key_key)?; - } - - self.mark_device_key_update(user_id, rooms, globals)?; - - Ok(()) + self.db.add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key) } - #[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))] pub fn sign_key( &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - let mut key = target_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(key_id.as_bytes()); - - let mut cross_signing_key: serde_json::Value = - serde_json::from_slice(&self.keyid_key.get(&key)?.ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to sign nonexistent key.", - ))?) - .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; - - let signatures = cross_signing_key - .get_mut("signatures") - .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? - .as_object_mut() - .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? - .entry(sender_id.to_owned()) - .or_insert_with(|| serde_json::Map::new().into()); - - signatures - .as_object_mut() - .ok_or_else(|| Error::bad_database("signatures in keyid_key for a user is invalid."))? - .insert(signature.0, signature.1.into()); - - self.keyid_key.insert( - &key, - &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), - )?; - - // TODO: Should we notify about this change? - self.mark_device_key_update(target_id, rooms, globals)?; - - Ok(()) + self.db.sign_key(target_id, key_id, signature, sender_id) } - #[tracing::instrument(skip(self, user_or_room_id, from, to))] pub fn keys_changed<'a>( &'a self, user_or_room_id: &str, from: u64, to: Option, ) -> impl Iterator>> + 'a { - let mut prefix = user_or_room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut start = prefix.clone(); - start.extend_from_slice(&(from + 1).to_be_bytes()); - - let to = to.unwrap_or(u64::MAX); - - self.keychangeid_userid - .iter_from(&start, false) - .take_while(move |(k, _)| { - k.starts_with(&prefix) - && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { - if let Ok(c) = utils::u64_from_bytes(current) { - c <= to - } else { - warn!("BadDatabase: Could not parse keychangeid_userid bytes"); - false - } - } else { - warn!("BadDatabase: Could not parse keychangeid_userid"); - false - } - }) - .map(|(_, bytes)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) - }) + self.db.keys_changed(user_or_room_id, from, to) } - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn mark_device_key_update( &self, user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - // Don't send key updates to unencrypted rooms - if rooms - .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? - .is_none() - { - continue; - } - - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - } - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - - Ok(()) + self.db.mark_device_key_update(user_id) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_keys( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("DeviceKeys in db are invalid.") - })?)) - }) + self.db.get_device_keys(user_id, device_id) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_master_key bool>( &self, user_id: &UserId, allowed_signatures: F, ) -> Result>> { - self.userid_masterkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; - - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) - }) + self.db.get_master_key(user_id, allow_signatures) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_self_signing_key bool>( &self, user_id: &UserId, allowed_signatures: F, ) -> Result>> { - self.userid_selfsigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; - - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) - }) + self.db.get_self_signing_key(user_id, allowed_signatures) } - #[tracing::instrument(skip(self, user_id))] pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { - self.userid_usersigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?)) - }) - }) + self.db.get_user_signing_key(user_id) } - #[tracing::instrument(skip( - self, - sender, - target_user_id, - target_device_id, - event_type, - content, - globals - ))] pub fn add_to_device_event( &self, sender: &UserId, @@ -864,158 +242,57 @@ impl Users { target_device_id: &DeviceId, event_type: &str, content: serde_json::Value, - globals: &super::globals::Globals, ) -> Result<()> { - let mut key = target_user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(target_device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(&globals.next_count()?.to_be_bytes()); - - let mut json = serde_json::Map::new(); - json.insert("type".to_owned(), event_type.to_owned().into()); - json.insert("sender".to_owned(), sender.to_string().into()); - json.insert("content".to_owned(), content); - - let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); - - self.todeviceid_events.insert(&key, &value)?; - - Ok(()) + self.db.add_to_device_event(sender, target_user_id, target_device_id, event_type, content) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>> { - let mut events = Vec::new(); - - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - - for (_, value) in self.todeviceid_events.scan_prefix(prefix) { - events.push( - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Event in todeviceid_events is invalid."))?, - ); - } - - Ok(events) + self.get_to_device_events(user_id, device_id) } - #[tracing::instrument(skip(self, user_id, device_id, until))] pub fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, until: u64, ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - - let mut last = prefix.clone(); - last.extend_from_slice(&until.to_be_bytes()); - - for (key, _) in self - .todeviceid_events - .iter_from(&last, true) // this includes last - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes(&key[key.len() - mem::size_of::()..key.len()]) - .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, count)| count <= until) - { - self.todeviceid_events.remove(&key)?; - } - - Ok(()) + self.db.remove_to_device_events(user_id, device_id, until) } - #[tracing::instrument(skip(self, user_id, device_id, device))] pub fn update_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, device: &Device, ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(device).expect("Device::to_string always works"), - )?; - - Ok(()) + self.db.update_device_metadata(user_id, device_id, device) } /// Get device metadata. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.userdeviceid_metadata - .get(&userdeviceid)? - .map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("Metadata in userdeviceid_metadata is invalid.") - })?)) - }) + self.get_device_metadata(user_id, device_id) } - #[tracing::instrument(skip(self, user_id))] pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.userid_devicelistversion - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid devicelistversion in db.")) - .map(Some) - }) + self.db.devicelist_version(user_id) } - #[tracing::instrument(skip(self, user_id))] pub fn all_devices_metadata<'a>( &'a self, user_id: &UserId, ) -> impl Iterator> + 'a { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - - self.userdeviceid_metadata - .scan_prefix(key) - .map(|(_, bytes)| { - serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) - }) + self.db.all_devices_metadata(user_id) } /// Deactivate account - #[tracing::instrument(skip(self, user_id))] pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { // Remove all associated devices for device_id in self.all_device_ids(user_id) { @@ -1032,44 +309,20 @@ impl Users { } /// Creates a new sync filter. Returns the filter id. - #[tracing::instrument(skip(self))] pub fn create_filter( &self, user_id: &UserId, filter: &IncomingFilterDefinition, ) -> Result { - let filter_id = utils::random_string(4); - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(filter_id.as_bytes()); - - self.userfilterid_filter.insert( - &key, - &serde_json::to_vec(&filter).expect("filter is valid json"), - )?; - - Ok(filter_id) + self.db.create_filter(user_id, filter) } - #[tracing::instrument(skip(self))] pub fn get_filter( &self, user_id: &UserId, filter_id: &str, ) -> Result> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(filter_id.as_bytes()); - - let raw = self.userfilterid_filter.get(&key)?; - - if let Some(raw) = raw { - serde_json::from_slice(&raw) - .map_err(|_| Error::bad_database("Invalid filter event in db.")) - } else { - Ok(None) - } + self.db.get_filter(user_id, filter_id) } } From fd0ea4bf717174726b895bbc03b9953845869733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sat, 13 Aug 2022 23:29:01 +0200 Subject: [PATCH 1194/1727] feat(database/presence): add skeleton for presence maintenance --- src/database/key_value/rooms/edus/presence.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 61bd9d6..1978ce7 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -100,6 +100,23 @@ impl service::room::edus::presence::Data for KeyValueDatabase { Ok(hashmap) } + + fn presence_maintain(&self, db: Arc>) { + // TODO @M0dEx: move this to a timed tasks module + tokio::spawn(async move { + loop { + select! { + Some(user_id) = self.presence_timers.next() { + // TODO @M0dEx: would it be better to acquire the lock outside the loop? + let guard = db.read().await; + + // TODO @M0dEx: add self.presence_timers + // TODO @M0dEx: maintain presence + } + } + } + }); + } } fn parse_presence_event(bytes: &[u8]) -> Result { @@ -121,4 +138,3 @@ fn parse_presence_event(bytes: &[u8]) -> Result { .map(|timestamp| current_timestamp - timestamp); } } - From 19743ae1950e152e40d88f804b14c32e0a0c2142 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:30 +0200 Subject: [PATCH 1195/1727] refactor: prepare service/rooms/user/data.rs from service/rooms/user/mod.rs --- src/service/rooms/user/{mod.rs => data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/user/{mod.rs => data.rs} (100%) diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/data.rs similarity index 100% rename from src/service/rooms/user/mod.rs rename to src/service/rooms/user/data.rs From 7e0b8ec0acfb7756b7db28eaf2e9e0cc8bb73662 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:30 +0200 Subject: [PATCH 1196/1727] refactor: prepare database/key_value/rooms/user.rs from service/rooms/user/mod.rs --- .../rooms/user/mod.rs => database/key_value/rooms/user.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/user/mod.rs => database/key_value/rooms/user.rs} (100%) diff --git a/src/service/rooms/user/mod.rs b/src/database/key_value/rooms/user.rs similarity index 100% rename from src/service/rooms/user/mod.rs rename to src/database/key_value/rooms/user.rs From 6d981f37a2cfcb07a0fc479daebd92b1375d2c0c Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:30 +0200 Subject: [PATCH 1197/1727] refactor: prepare database/key_value/rooms/state_accessor.rs from service/rooms/state_accessor/mod.rs --- .../mod.rs => database/key_value/rooms/state_accessor.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state_accessor/mod.rs => database/key_value/rooms/state_accessor.rs} (100%) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/database/key_value/rooms/state_accessor.rs similarity index 100% rename from src/service/rooms/state_accessor/mod.rs rename to src/database/key_value/rooms/state_accessor.rs From 61f6ac0d66458dba58b28fca264780878806b1f7 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:31 +0200 Subject: [PATCH 1198/1727] refactor: prepare service/rooms/state_accessor/data.rs from service/rooms/state_accessor/mod.rs --- src/service/rooms/state_accessor/{mod.rs => data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/state_accessor/{mod.rs => data.rs} (100%) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/data.rs similarity index 100% rename from src/service/rooms/state_accessor/mod.rs rename to src/service/rooms/state_accessor/data.rs From 810a6baf344a0b974d0b2b6a430da982c26034f9 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:31 +0200 Subject: [PATCH 1199/1727] refactor: prepare service/uiaa/data.rs from service/uiaa/mod.rs --- src/service/{uiaa.rs => uiaa/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{uiaa.rs => uiaa/data.rs} (100%) diff --git a/src/service/uiaa.rs b/src/service/uiaa/data.rs similarity index 100% rename from src/service/uiaa.rs rename to src/service/uiaa/data.rs From 213579ee9deb288caa90a48139f976adcfac5f35 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:31 +0200 Subject: [PATCH 1200/1727] refactor: prepare database/key_value/uiaa.rs from service/uiaa/mod.rs --- src/{service => database/key_value}/uiaa.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/uiaa.rs (100%) diff --git a/src/service/uiaa.rs b/src/database/key_value/uiaa.rs similarity index 100% rename from src/service/uiaa.rs rename to src/database/key_value/uiaa.rs From 82e7f57b389d011bc8d80f9142f723b3cd1e1ad2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 14 Aug 2022 13:38:21 +0200 Subject: [PATCH 1201/1727] refactor state accessor, state cache, user, uiaa --- .../key_value/rooms/state_accessor.rs | 27 +-- src/database/key_value/rooms/state_cache.rs | 8 + src/database/key_value/rooms/user.rs | 20 +- src/database/key_value/uiaa.rs | 149 +------------ src/service/rooms/state_accessor/data.rs | 141 ++---------- src/service/rooms/state_accessor/mod.rs | 115 ++-------- src/service/rooms/state_cache/data.rs | 3 + src/service/rooms/state_cache/mod.rs | 65 +----- src/service/rooms/user/data.rs | 113 +--------- src/service/rooms/user/mod.rs | 104 ++------- src/service/uiaa/data.rs | 208 +----------------- src/service/uiaa/mod.rs | 96 +------- 12 files changed, 116 insertions(+), 933 deletions(-) create mode 100644 src/database/key_value/rooms/state_cache.rs create mode 100644 src/service/rooms/state_cache/data.rs diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index ae26a7c..db81967 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,7 +1,5 @@ - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { +impl service::room::state_accessor::Data for KeyValueDatabase { + async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = self .load_shortstatehash_info(shortstatehash)? .pop() @@ -21,8 +19,7 @@ Ok(result) } - #[tracing::instrument(skip(self))] - pub async fn state_full( + async fn state_full( &self, shortstatehash: u64, ) -> Result>> { @@ -59,8 +56,7 @@ } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( + fn state_get_id( &self, shortstatehash: u64, event_type: &StateEventType, @@ -86,8 +82,7 @@ } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( + fn state_get( &self, shortstatehash: u64, event_type: &StateEventType, @@ -98,7 +93,7 @@ } /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { + fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.eventid_shorteventid .get(event_id.as_bytes())? .map_or(Ok(None), |shorteventid| { @@ -116,8 +111,7 @@ } /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( + async fn room_state_full( &self, room_id: &RoomId, ) -> Result>> { @@ -129,8 +123,7 @@ } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( + fn room_state_get_id( &self, room_id: &RoomId, event_type: &StateEventType, @@ -144,8 +137,7 @@ } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( + fn room_state_get( &self, room_id: &RoomId, event_type: &StateEventType, @@ -157,4 +149,3 @@ Ok(None) } } - diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs new file mode 100644 index 0000000..3781402 --- /dev/null +++ b/src/database/key_value/rooms/state_cache.rs @@ -0,0 +1,8 @@ +impl service::room::state_cache::Data for KeyValueDatabase { + fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + self.roomuseroncejoinedids.insert(&userroom_id, &[])?; + } +} diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 976ab5b..52145ce 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,6 +1,5 @@ - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { +impl service::room::user::Data for KeyValueDatabase { + fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -13,8 +12,7 @@ Ok(()) } - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -28,8 +26,7 @@ .unwrap_or(Ok(0)) } - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -43,7 +40,7 @@ .unwrap_or(Ok(0)) } - pub fn associate_token_shortstatehash( + fn associate_token_shortstatehash( &self, room_id: &RoomId, token: u64, @@ -58,7 +55,7 @@ .insert(&key, &shortstatehash.to_be_bytes()) } - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { + fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); @@ -74,8 +71,7 @@ .transpose() } - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( + fn get_shared_rooms<'a>( &'a self, users: Vec>, ) -> Result>> + 'a> { @@ -111,4 +107,4 @@ .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) })) } - +} diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 1237313..4d1dac5 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,149 +1,4 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, RwLock}, -}; - -use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; -use ruma::{ - api::client::{ - error::ErrorKind, - uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, - IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, - }, - }, - signatures::CanonicalJsonValue, - DeviceId, UserId, -}; -use tracing::error; - -use super::abstraction::Tree; - -pub struct Uiaa { - pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: - RwLock, Box, String), CanonicalJsonValue>>, -} - -impl Uiaa { - /// Creates a new Uiaa session. Make sure the session token is unique. - pub fn create( - &self, - user_id: &UserId, - device_id: &DeviceId, - uiaainfo: &UiaaInfo, - json_body: &CanonicalJsonValue, - ) -> Result<()> { - self.set_uiaa_request( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) - json_body, - )?; - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), - Some(uiaainfo), - ) - } - - pub fn try_auth( - &self, - user_id: &UserId, - device_id: &DeviceId, - auth: &IncomingAuthData, - uiaainfo: &UiaaInfo, - users: &super::users::Users, - globals: &super::globals::Globals, - ) -> Result<(bool, UiaaInfo)> { - let mut uiaainfo = auth - .session() - .map(|session| self.get_uiaa_session(user_id, device_id, session)) - .unwrap_or_else(|| Ok(uiaainfo.clone()))?; - - if uiaainfo.session.is_none() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - } - - match auth { - // Find out what the user completed - IncomingAuthData::Password(IncomingPassword { - identifier, - password, - .. - }) => { - let username = match identifier { - UserIdOrLocalpart(username) => username, - _ => { - return Err(Error::BadRequest( - ErrorKind::Unrecognized, - "Identifier type not recognized.", - )) - } - }; - - let user_id = - UserId::parse_with_server_name(username.clone(), globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") - })?; - - // Check if password is correct - if let Some(hash) = users.password_hash(&user_id)? { - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); - - if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { - kind: ErrorKind::Forbidden, - message: "Invalid username or password.".to_owned(), - }); - return Ok((false, uiaainfo)); - } - } - - // Password was correct! Let's add it to `completed` - uiaainfo.completed.push(AuthType::Password); - } - IncomingAuthData::Dummy(_) => { - uiaainfo.completed.push(AuthType::Dummy); - } - k => error!("type not supported: {:?}", k), - } - - // Check if a flow now succeeds - let mut completed = false; - 'flows: for flow in &mut uiaainfo.flows { - for stage in &flow.stages { - if !uiaainfo.completed.contains(stage) { - continue 'flows; - } - } - // We didn't break, so this flow succeeded! - completed = true; - } - - if !completed { - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - Some(&uiaainfo), - )?; - return Ok((false, uiaainfo)); - } - - // UIAA was successful! Remove this session and return true - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - None, - )?; - Ok((true, uiaainfo)) - } - +impl service::uiaa::Data for KeyValueDatabase { fn set_uiaa_request( &self, user_id: &UserId, @@ -162,7 +17,7 @@ impl Uiaa { Ok(()) } - pub fn get_uiaa_request( + fn get_uiaa_request( &self, user_id: &UserId, device_id: &DeviceId, diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index ae26a7c..a2b76e4 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,160 +1,51 @@ +pub trait Data { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); + async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( + async fn state_full( &self, shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } + ) -> Result>>; /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( + fn state_get_id( &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } + ) -> Result>>; /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( + fn state_get( &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } + ) -> Result>>; /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } + fn pdu_shortstatehash(&self, event_id: &EventId) -> Result>; /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( + async fn room_state_full( &self, room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } + ) -> Result>>; /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( + fn room_state_get_id( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } + ) -> Result>>; /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( + fn room_state_get( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - + ) -> Result>>; +} diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index ae26a7c..28a49a9 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,24 +1,18 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) + self.db.state_full_ids(shortstatehash) } #[tracing::instrument(skip(self))] @@ -26,36 +20,7 @@ &self, shortstatehash: u64, ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) + self.db.state_full(shortstatehash) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -66,23 +31,7 @@ event_type: &StateEventType, state_key: &str, ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) + self.db.state_get_id(shortstatehash, event_type, state_key) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -93,26 +42,12 @@ event_type: &StateEventType, state_key: &str, ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) + self.db.pdu_state_get(event_id) } /// Returns the state hash for this pdu. pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) + self.db.pdu_shortstatehash(event_id) } /// Returns the full room state. @@ -121,11 +56,7 @@ &self, room_id: &RoomId, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } + self.db.room_state_full(room_id) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -136,11 +67,7 @@ event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } + self.db.room_state_get_id(room_id, event_type, state_key) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -151,10 +78,6 @@ event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } + self.db.room_state_get(room_id, event_type, state_key) } - +} diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs new file mode 100644 index 0000000..166d4f6 --- /dev/null +++ b/src/service/rooms/state_cache/data.rs @@ -0,0 +1,3 @@ +pub trait Data { + fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()>; +} diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index e7f457e..778679d 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -1,4 +1,13 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { /// Update current membership data. #[tracing::instrument(skip(self, last_state, db))] pub fn update_membership( @@ -25,10 +34,6 @@ serverroom_id.push(0xff); serverroom_id.extend_from_slice(room_id.as_bytes()); - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - let mut roomuser_id = room_id.as_bytes().to_vec(); roomuser_id.push(0xff); roomuser_id.extend_from_slice(user_id.as_bytes()); @@ -38,7 +43,7 @@ // Check if the user never joined this room if !self.once_joined(user_id, room_id)? { // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; + self.db.mark_as_once_joined(user_id, room_id)?; // Check if the room has a predecessor if let Some(predecessor) = self @@ -116,10 +121,6 @@ } } - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } self.userroomid_joined.insert(&userroom_id, &[])?; self.roomuserid_joined.insert(&roomuser_id, &[])?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -150,10 +151,6 @@ return Ok(()); } - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } self.userroomid_invitestate.insert( &userroom_id, &serde_json::to_vec(&last_state.unwrap_or_default()) @@ -167,16 +164,6 @@ self.roomuserid_leftcount.remove(&roomuser_id)?; } MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } self.userroomid_leftstate.insert( &userroom_id, &serde_json::to_vec(&Vec::>::new()).unwrap(), @@ -231,36 +218,6 @@ .unwrap() .insert(room_id.to_owned(), Arc::new(real_users)); - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.appservice_in_room_cache .write() .unwrap() @@ -714,4 +671,4 @@ Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) } - +} diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 976ab5b..47a44ee 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,114 +1,21 @@ +pub trait Data { + fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); + fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; + fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - pub fn associate_token_shortstatehash( + fn associate_token_shortstatehash( &self, room_id: &RoomId, token: u64, shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); + ) -> Result<()>; - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); + fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result>; - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( + fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - + ) -> Result>> + 'a>; +} diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 976ab5b..45fb355 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,46 +1,23 @@ +mod data; +pub use data::Data; - #[tracing::instrument(skip(self))] +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) + self.db.reset_notification_counts(user_id, room_id) } - #[tracing::instrument(skip(self))] pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) + self.db.notification_count(user_id, room_id) } - #[tracing::instrument(skip(self))] pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) + self.db.highlight_count(user_id, room_id) } pub fn associate_token_shortstatehash( @@ -49,66 +26,17 @@ token: u64, shortstatehash: u64, ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) + self.db.associate_token_shortstatehash(user_id, room_id) } pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() + self.db.get_token_shortstatehash(room_id, token) } - #[tracing::instrument(skip(self))] pub fn get_shared_rooms<'a>( &'a self, users: Vec>, ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) + self.db.get_shared_rooms(users) } - +} diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index 1237313..40e69bd 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,179 +1,18 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, RwLock}, -}; - -use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; -use ruma::{ - api::client::{ - error::ErrorKind, - uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, - IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, - }, - }, - signatures::CanonicalJsonValue, - DeviceId, UserId, -}; -use tracing::error; - -use super::abstraction::Tree; - -pub struct Uiaa { - pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: - RwLock, Box, String), CanonicalJsonValue>>, -} - -impl Uiaa { - /// Creates a new Uiaa session. Make sure the session token is unique. - pub fn create( - &self, - user_id: &UserId, - device_id: &DeviceId, - uiaainfo: &UiaaInfo, - json_body: &CanonicalJsonValue, - ) -> Result<()> { - self.set_uiaa_request( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) - json_body, - )?; - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), - Some(uiaainfo), - ) - } - - pub fn try_auth( - &self, - user_id: &UserId, - device_id: &DeviceId, - auth: &IncomingAuthData, - uiaainfo: &UiaaInfo, - users: &super::users::Users, - globals: &super::globals::Globals, - ) -> Result<(bool, UiaaInfo)> { - let mut uiaainfo = auth - .session() - .map(|session| self.get_uiaa_session(user_id, device_id, session)) - .unwrap_or_else(|| Ok(uiaainfo.clone()))?; - - if uiaainfo.session.is_none() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - } - - match auth { - // Find out what the user completed - IncomingAuthData::Password(IncomingPassword { - identifier, - password, - .. - }) => { - let username = match identifier { - UserIdOrLocalpart(username) => username, - _ => { - return Err(Error::BadRequest( - ErrorKind::Unrecognized, - "Identifier type not recognized.", - )) - } - }; - - let user_id = - UserId::parse_with_server_name(username.clone(), globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") - })?; - - // Check if password is correct - if let Some(hash) = users.password_hash(&user_id)? { - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); - - if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { - kind: ErrorKind::Forbidden, - message: "Invalid username or password.".to_owned(), - }); - return Ok((false, uiaainfo)); - } - } - - // Password was correct! Let's add it to `completed` - uiaainfo.completed.push(AuthType::Password); - } - IncomingAuthData::Dummy(_) => { - uiaainfo.completed.push(AuthType::Dummy); - } - k => error!("type not supported: {:?}", k), - } - - // Check if a flow now succeeds - let mut completed = false; - 'flows: for flow in &mut uiaainfo.flows { - for stage in &flow.stages { - if !uiaainfo.completed.contains(stage) { - continue 'flows; - } - } - // We didn't break, so this flow succeeded! - completed = true; - } - - if !completed { - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - Some(&uiaainfo), - )?; - return Ok((false, uiaainfo)); - } - - // UIAA was successful! Remove this session and return true - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - None, - )?; - Ok((true, uiaainfo)) - } - +pub trait Data { fn set_uiaa_request( &self, user_id: &UserId, device_id: &DeviceId, session: &str, request: &CanonicalJsonValue, - ) -> Result<()> { - self.userdevicesessionid_uiaarequest - .write() - .unwrap() - .insert( - (user_id.to_owned(), device_id.to_owned(), session.to_owned()), - request.to_owned(), - ); + ) -> Result<()>; - Ok(()) - } - - pub fn get_uiaa_request( + fn get_uiaa_request( &self, user_id: &UserId, device_id: &DeviceId, session: &str, - ) -> Option { - self.userdevicesessionid_uiaarequest - .read() - .unwrap() - .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) - .map(|j| j.to_owned()) - } + ) -> Option; fn update_uiaa_session( &self, @@ -181,47 +20,12 @@ impl Uiaa { device_id: &DeviceId, session: &str, uiaainfo: Option<&UiaaInfo>, - ) -> Result<()> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - if let Some(uiaainfo) = uiaainfo { - self.userdevicesessionid_uiaainfo.insert( - &userdevicesessionid, - &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), - )?; - } else { - self.userdevicesessionid_uiaainfo - .remove(&userdevicesessionid)?; - } - - Ok(()) - } + ) -> Result<()>; fn get_uiaa_session( &self, user_id: &UserId, device_id: &DeviceId, session: &str, - ) -> Result { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - serde_json::from_slice( - &self - .userdevicesessionid_uiaainfo - .get(&userdevicesessionid)? - .ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "UIAA session does not exist.", - ))?, - ) - .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) - } + ) -> Result; } diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 1237313..593ea5f 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,31 +1,13 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, RwLock}, -}; +mod data; +pub use data::Data; -use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; -use ruma::{ - api::client::{ - error::ErrorKind, - uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, - IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, - }, - }, - signatures::CanonicalJsonValue, - DeviceId, UserId, -}; -use tracing::error; +use crate::service::*; -use super::abstraction::Tree; - -pub struct Uiaa { - pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: - RwLock, Box, String), CanonicalJsonValue>>, +pub struct Service { + db: D, } -impl Uiaa { +impl Service<_> { /// Creates a new Uiaa session. Make sure the session token is unique. pub fn create( &self, @@ -144,35 +126,13 @@ impl Uiaa { Ok((true, uiaainfo)) } - fn set_uiaa_request( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - request: &CanonicalJsonValue, - ) -> Result<()> { - self.userdevicesessionid_uiaarequest - .write() - .unwrap() - .insert( - (user_id.to_owned(), device_id.to_owned(), session.to_owned()), - request.to_owned(), - ); - - Ok(()) - } - pub fn get_uiaa_request( &self, user_id: &UserId, device_id: &DeviceId, session: &str, ) -> Option { - self.userdevicesessionid_uiaarequest - .read() - .unwrap() - .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) - .map(|j| j.to_owned()) + self.db.get_uiaa_request(user_id, device_id, session) } fn update_uiaa_session( @@ -182,46 +142,6 @@ impl Uiaa { session: &str, uiaainfo: Option<&UiaaInfo>, ) -> Result<()> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - if let Some(uiaainfo) = uiaainfo { - self.userdevicesessionid_uiaainfo.insert( - &userdevicesessionid, - &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), - )?; - } else { - self.userdevicesessionid_uiaainfo - .remove(&userdevicesessionid)?; - } - - Ok(()) - } - - fn get_uiaa_session( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - ) -> Result { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - serde_json::from_slice( - &self - .userdevicesessionid_uiaainfo - .get(&userdevicesessionid)? - .ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "UIAA session does not exist.", - ))?, - ) - .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) + self.db.update_uiaa_session(user_id, device_id, session, uiaainfo) } } From 057f8364cc317dc8646043abd6c8ff3ef759625f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 6 Sep 2022 23:15:09 +0200 Subject: [PATCH 1202/1727] fix: some compile time errors Only 174 errors left! --- Cargo.lock | 6 +- Cargo.toml | 2 + src/api/appservice_server.rs | 7 +- src/api/client_server/account.rs | 86 +-- src/api/client_server/alias.rs | 40 +- src/api/client_server/backup.rs | 107 ++- src/api/client_server/capabilities.rs | 13 +- src/api/client_server/config.rs | 20 +- src/api/client_server/context.rs | 37 +- src/api/client_server/device.rs | 37 +- src/api/client_server/directory.rs | 40 +- src/api/client_server/filter.rs | 8 +- src/api/client_server/keys.rs | 83 +-- src/api/client_server/media.rs | 47 +- src/api/client_server/membership.rs | 610 ++++++++---------- src/api/client_server/message.rs | 37 +- src/api/client_server/presence.rs | 19 +- src/api/client_server/profile.rs | 81 +-- src/api/client_server/push.rs | 58 +- src/api/client_server/read_marker.rs | 31 +- src/api/client_server/redact.rs | 10 +- src/api/client_server/report.rs | 9 +- src/api/client_server/room.rs | 146 ++--- src/api/client_server/search.rs | 11 +- src/api/client_server/session.rs | 33 +- src/api/client_server/state.rs | 43 +- src/api/client_server/sync.rs | 142 ++-- src/api/client_server/tag.rs | 21 +- src/api/client_server/to_device.rs | 23 +- src/api/client_server/typing.rs | 12 +- src/api/client_server/user_directory.rs | 15 +- src/api/client_server/voip.rs | 15 +- src/api/mod.rs | 4 + src/api/ruma_wrapper/axum.rs | 18 +- src/api/server_server.rs | 403 +++++------- src/database/abstraction.rs | 4 +- src/database/abstraction/rocksdb.rs | 8 +- src/database/abstraction/sqlite.rs | 8 +- src/database/key_value/appservice.rs | 4 +- src/database/key_value/mod.rs | 14 +- src/database/key_value/pusher.rs | 4 + src/database/key_value/rooms/alias.rs | 8 +- src/database/key_value/rooms/directory.rs | 10 +- src/database/key_value/rooms/edus/mod.rs | 3 + src/database/key_value/rooms/edus/presence.rs | 12 +- .../key_value/rooms/edus/read_receipt.rs | 14 +- src/database/key_value/rooms/edus/typing.rs | 13 +- src/database/key_value/rooms/lazy_load.rs | 6 +- src/database/key_value/rooms/metadata.rs | 6 +- src/database/key_value/rooms/mod.rs | 7 +- src/database/key_value/rooms/outlier.rs | 6 +- src/database/key_value/rooms/pdu_metadata.rs | 8 +- src/database/key_value/rooms/search.rs | 12 +- src/database/key_value/rooms/state.rs | 23 +- .../key_value/rooms/state_accessor.rs | 10 +- src/database/key_value/rooms/state_cache.rs | 10 +- .../key_value/rooms/state_compressor.rs | 19 +- src/database/key_value/rooms/timeline.rs | 31 +- src/database/key_value/rooms/user.rs | 6 +- src/database/key_value/transaction_ids.rs | 10 +- src/database/key_value/uiaa.rs | 6 + src/database/key_value/users.rs | 138 ++-- src/database/mod.rs | 221 ++++--- src/lib.rs | 29 +- src/main.rs | 71 +- src/service/account_data.rs | 12 +- src/service/admin.rs | 179 +++-- src/service/appservice/data.rs | 11 +- src/service/key_backups.rs | 21 +- src/service/media.rs | 1 - src/service/mod.rs | 28 + src/service/pdu.rs | 5 +- src/service/pusher/data.rs | 8 +- src/service/pusher/mod.rs | 64 +- src/service/rooms/alias/data.rs | 10 +- src/service/rooms/alias/mod.rs | 7 +- src/service/rooms/auth_chain/data.rs | 2 + src/service/rooms/auth_chain/mod.rs | 2 + src/service/rooms/directory/data.rs | 2 + src/service/rooms/directory/mod.rs | 9 +- src/service/rooms/edus/mod.rs | 6 + src/service/rooms/edus/presence/data.rs | 4 + src/service/rooms/edus/presence/mod.rs | 5 +- src/service/rooms/edus/read_receipt/data.rs | 2 + src/service/rooms/edus/read_receipt/mod.rs | 7 +- src/service/rooms/edus/typing/data.rs | 4 + src/service/rooms/edus/typing/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 496 ++++++++------ src/service/rooms/lazy_loading/data.rs | 2 + src/service/rooms/lazy_loading/mod.rs | 7 +- src/service/rooms/metadata/data.rs | 2 + src/service/rooms/metadata/mod.rs | 1 + src/service/rooms/mod.rs | 249 +------ src/service/rooms/outlier/data.rs | 4 + src/service/rooms/outlier/mod.rs | 3 +- src/service/rooms/pdu_metadata/data.rs | 4 + src/service/rooms/pdu_metadata/mod.rs | 3 + src/service/rooms/search/data.rs | 8 +- src/service/rooms/search/mod.rs | 3 +- src/service/rooms/short/mod.rs | 8 +- src/service/rooms/state/data.rs | 12 +- src/service/rooms/state/mod.rs | 68 +- src/service/rooms/state_accessor/data.rs | 6 + src/service/rooms/state_accessor/mod.rs | 9 +- src/service/rooms/state_cache/data.rs | 2 + src/service/rooms/state_cache/mod.rs | 36 +- src/service/rooms/state_compressor/data.rs | 4 +- src/service/rooms/state_compressor/mod.rs | 19 +- src/service/rooms/timeline/data.rs | 31 +- src/service/rooms/timeline/mod.rs | 55 +- src/service/rooms/user/mod.rs | 1 + src/service/transaction_ids/data.rs | 4 +- src/service/transaction_ids/mod.rs | 1 + src/service/uiaa/data.rs | 2 + src/service/uiaa/mod.rs | 10 +- src/service/users/data.rs | 108 ++-- src/service/users/mod.rs | 26 +- src/utils/{utils.rs => mod.rs} | 2 + 118 files changed, 2139 insertions(+), 2433 deletions(-) create mode 100644 src/api/mod.rs create mode 100644 src/database/key_value/rooms/edus/mod.rs create mode 100644 src/service/mod.rs rename src/utils/{utils.rs => mod.rs} (99%) diff --git a/Cargo.lock b/Cargo.lock index d8d791f..c074c76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" +checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" dependencies = [ "proc-macro2", "quote", @@ -408,6 +408,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.3.0-next" dependencies = [ + "async-trait", "axum", "axum-server", "base64 0.13.0", @@ -422,6 +423,7 @@ dependencies = [ "http", "image", "jsonwebtoken", + "lazy_static", "lru-cache", "num_cpus", "opentelemetry", diff --git a/Cargo.toml b/Cargo.toml index f150c4e..b88674d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,6 +90,8 @@ figment = { version = "0.10.6", features = ["env", "toml"] } tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true } tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } +lazy_static = "1.4.0" +async-trait = "0.1.57" [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index ce122da..1f6e2c9 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -1,12 +1,11 @@ -use crate::{utils, Error, Result}; +use crate::{utils, Error, Result, services}; use bytes::BytesMut; use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; -#[tracing::instrument(skip(globals, request))] +#[tracing::instrument(skip(request))] pub(crate) async fn send_request( - globals: &crate::database::globals::Globals, registration: serde_yaml::Value, request: T, ) -> Result @@ -46,7 +45,7 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = globals.default_client().execute(reqwest_request).await?; + let mut response = services().globals.default_client().execute(reqwest_request).await?; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index dc0782d..848bfaa 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -2,9 +2,7 @@ use std::sync::Arc; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{ - database::{admin::make_user_admin, DatabaseGuard}, - pdu::PduBuilder, - utils, Database, Error, Result, Ruma, + utils, Error, Result, Ruma, services, }; use ruma::{ api::client::{ @@ -42,15 +40,14 @@ const RANDOM_USER_ID_LENGTH: usize = 10; /// /// Note: This will not reserve the username, so the username might become invalid when trying to register pub async fn get_register_available_route( - db: DatabaseGuard, body: Ruma, ) -> Result { // Validate user id let user_id = - UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) + UserId::parse_with_server_name(body.username.to_lowercase(), services().globals.server_name()) .ok() .filter(|user_id| { - !user_id.is_historical() && user_id.server_name() == db.globals.server_name() + !user_id.is_historical() && user_id.server_name() == services().globals.server_name() }) .ok_or(Error::BadRequest( ErrorKind::InvalidUsername, @@ -58,7 +55,7 @@ pub async fn get_register_available_route( ))?; // Check if username is creative enough - if db.users.exists(&user_id)? { + if services().users.exists(&user_id)? { return Err(Error::BadRequest( ErrorKind::UserInUse, "Desired user ID is already taken.", @@ -85,10 +82,9 @@ pub async fn get_register_available_route( /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_registration() && !body.from_appservice { + if !services().globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, "Registration has been disabled.", @@ -100,17 +96,17 @@ pub async fn register_route( let user_id = match (&body.username, is_guest) { (Some(username), false) => { let proposed_user_id = - UserId::parse_with_server_name(username.to_lowercase(), db.globals.server_name()) + UserId::parse_with_server_name(username.to_lowercase(), services().globals.server_name()) .ok() .filter(|user_id| { !user_id.is_historical() - && user_id.server_name() == db.globals.server_name() + && user_id.server_name() == services().globals.server_name() }) .ok_or(Error::BadRequest( ErrorKind::InvalidUsername, "Username is invalid.", ))?; - if db.users.exists(&proposed_user_id)? { + if services().users.exists(&proposed_user_id)? { return Err(Error::BadRequest( ErrorKind::UserInUse, "Desired user ID is already taken.", @@ -121,10 +117,10 @@ pub async fn register_route( _ => loop { let proposed_user_id = UserId::parse_with_server_name( utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), - db.globals.server_name(), + services().globals.server_name(), ) .unwrap(); - if !db.users.exists(&proposed_user_id)? { + if !services().users.exists(&proposed_user_id)? { break proposed_user_id; } }, @@ -143,14 +139,12 @@ pub async fn register_route( if !body.from_appservice { if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - &UserId::parse_with_server_name("", db.globals.server_name()) + let (worked, uiaainfo) = services().uiaa.try_auth( + &UserId::parse_with_server_name("", services().globals.server_name()) .expect("we know this is valid"), "".into(), auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -158,8 +152,8 @@ pub async fn register_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &UserId::parse_with_server_name("", db.globals.server_name()) + services().uiaa.create( + &UserId::parse_with_server_name("", services().globals.server_name()) .expect("we know this is valid"), "".into(), &uiaainfo, @@ -178,15 +172,15 @@ pub async fn register_route( }; // Create user - db.users.create(&user_id, password)?; + services().users.create(&user_id, password)?; // Default to pretty displayname let displayname = format!("{} ⚡️", user_id.localpart()); - db.users + services().users .set_displayname(&user_id, Some(displayname.clone()))?; // Initial account data - db.account_data.update( + services().account_data.update( None, &user_id, GlobalAccountDataEventType::PushRules.to_string().into(), @@ -195,7 +189,6 @@ pub async fn register_route( global: push::Ruleset::server_default(&user_id), }, }, - &db.globals, )?; // Inhibit login does not work for guests @@ -219,7 +212,7 @@ pub async fn register_route( let token = utils::random_string(TOKEN_LENGTH); // Create device for this account - db.users.create_device( + services().users.create_device( &user_id, &device_id, &token, @@ -227,7 +220,7 @@ pub async fn register_route( )?; info!("New user {} registered on this server.", user_id); - db.admin + services().admin .send_message(RoomMessageEventContent::notice_plain(format!( "New user {} registered on this server.", user_id @@ -235,14 +228,12 @@ pub async fn register_route( // If this is the first real user, grant them admin privileges // Note: the server user, @conduit:servername, is generated first - if db.users.count()? == 2 { - make_user_admin(&db, &user_id, displayname).await?; + if services().users.count()? == 2 { + services().admin.make_user_admin(&user_id, displayname).await?; warn!("Granting {} admin privileges as the first user", user_id); } - db.flush()?; - Ok(register::v3::Response { access_token: Some(token), user_id, @@ -265,7 +256,6 @@ pub async fn register_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn change_password_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -282,13 +272,11 @@ pub async fn change_password_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -296,32 +284,30 @@ pub async fn change_password_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - db.users + services().users .set_password(sender_user, Some(&body.new_password))?; if body.logout_devices { // Logout all devices except the current one - for id in db + for id in services() .users .all_device_ids(sender_user) .filter_map(|id| id.ok()) .filter(|id| id != sender_device) { - db.users.remove_device(sender_user, &id)?; + services().users.remove_device(sender_user, &id)?; } } - db.flush()?; - info!("User {} changed their password.", sender_user); - db.admin + services().admin .send_message(RoomMessageEventContent::notice_plain(format!( "User {} changed their password.", sender_user @@ -336,7 +322,6 @@ pub async fn change_password_route( /// /// Note: Also works for Application Services pub async fn whoami_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -345,7 +330,7 @@ pub async fn whoami_route( Ok(whoami::v3::Response { user_id: sender_user.clone(), device_id, - is_guest: db.users.is_deactivated(&sender_user)?, + is_guest: services().users.is_deactivated(&sender_user)?, }) } @@ -360,7 +345,6 @@ pub async fn whoami_route( /// - Triggers device list updates /// - Removes ability to log in again pub async fn deactivate_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -377,13 +361,11 @@ pub async fn deactivate_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -391,7 +373,7 @@ pub async fn deactivate_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -399,20 +381,18 @@ pub async fn deactivate_route( } // Make the user leave all rooms before deactivation - db.rooms.leave_all_rooms(&sender_user, &db).await?; + services().rooms.leave_all_rooms(&sender_user).await?; // Remove devices and mark account as deactivated - db.users.deactivate_account(sender_user)?; + services().users.deactivate_account(sender_user)?; info!("User {} deactivated their account.", sender_user); - db.admin + services().admin .send_message(RoomMessageEventContent::notice_plain(format!( "User {} deactivated their account.", sender_user ))); - db.flush()?; - Ok(deactivate::v3::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, }) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 90e9d2c..7aa5fb2 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use regex::Regex; use ruma::{ api::{ @@ -16,24 +16,21 @@ use ruma::{ /// /// Creates a new room alias on this server. pub async fn create_alias_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.room_alias.server_name() != db.globals.server_name() { + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Alias is from another server.", )); } - if db.rooms.id_from_alias(&body.room_alias)?.is_some() { + if services().rooms.id_from_alias(&body.room_alias)?.is_some() { return Err(Error::Conflict("Alias already exists.")); } - db.rooms - .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?; - - db.flush()?; + services().rooms + .set_alias(&body.room_alias, Some(&body.room_id))?; Ok(create_alias::v3::Response::new()) } @@ -45,22 +42,19 @@ pub async fn create_alias_route( /// - TODO: additional access control checks /// - TODO: Update canonical alias event pub async fn delete_alias_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.room_alias.server_name() != db.globals.server_name() { + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Alias is from another server.", )); } - db.rooms.set_alias(&body.room_alias, None, &db.globals)?; + services().rooms.set_alias(&body.room_alias, None)?; // TODO: update alt_aliases? - db.flush()?; - Ok(delete_alias::v3::Response::new()) } @@ -70,21 +64,18 @@ pub async fn delete_alias_route( /// /// - TODO: Suggest more servers to join via pub async fn get_alias_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - get_alias_helper(&db, &body.room_alias).await + get_alias_helper(&body.room_alias).await } pub(crate) async fn get_alias_helper( - db: &Database, room_alias: &RoomAliasId, ) -> Result { - if room_alias.server_name() != db.globals.server_name() { - let response = db + if room_alias.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) @@ -97,10 +88,10 @@ pub(crate) async fn get_alias_helper( } let mut room_id = None; - match db.rooms.id_from_alias(room_alias)? { + match services().rooms.id_from_alias(room_alias)? { Some(r) => room_id = Some(r), None => { - for (_id, registration) in db.appservice.all()? { + for (_id, registration) in services().appservice.all()? { let aliases = registration .get("namespaces") .and_then(|ns| ns.get("aliases")) @@ -115,17 +106,16 @@ pub(crate) async fn get_alias_helper( if aliases .iter() .any(|aliases| aliases.is_match(room_alias.as_str())) - && db + && services() .sending .send_appservice_request( - &db.globals, registration, appservice::query::query_room_alias::v1::Request { room_alias }, ) .await .is_ok() { - room_id = Some(db.rooms.id_from_alias(room_alias)?.ok_or_else(|| { + room_id = Some(services().rooms.id_from_alias(room_alias)?.ok_or_else(|| { Error::bad_config("Appservice lied to us. Room does not exist.") })?); break; @@ -146,6 +136,6 @@ pub(crate) async fn get_alias_helper( Ok(get_alias::v3::Response::new( room_id, - vec![db.globals.server_name().to_owned()], + vec![services().globals.server_name().to_owned()], )) } diff --git a/src/api/client_server/backup.rs b/src/api/client_server/backup.rs index 067f20c..e413893 100644 --- a/src/api/client_server/backup.rs +++ b/src/api/client_server/backup.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::api::client::{ backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, @@ -14,15 +14,12 @@ use ruma::api::client::{ /// /// Creates a new backup. pub async fn create_backup_version_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let version = db + let version = services() .key_backups - .create_backup(sender_user, &body.algorithm, &db.globals)?; - - db.flush()?; + .create_backup(sender_user, &body.algorithm)?; Ok(create_backup_version::v3::Response { version }) } @@ -31,14 +28,11 @@ pub async fn create_backup_version_route( /// /// Update information about an existing backup. Only `auth_data` can be modified. pub async fn update_backup_version_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups - .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; - - db.flush()?; + services().key_backups + .update_backup(sender_user, &body.version, &body.algorithm)?; Ok(update_backup_version::v3::Response {}) } @@ -47,13 +41,12 @@ pub async fn update_backup_version_route( /// /// Get information about the latest backup version. pub async fn get_latest_backup_info_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = - db.key_backups + services().key_backups .get_latest_backup(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -62,8 +55,8 @@ pub async fn get_latest_backup_info_route( Ok(get_latest_backup_info::v3::Response { algorithm, - count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &version)?, + count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &version)?, version, }) } @@ -72,11 +65,10 @@ pub async fn get_latest_backup_info_route( /// /// Get information about an existing backup. pub async fn get_backup_info_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let algorithm = db + let algorithm = services() .key_backups .get_backup(sender_user, &body.version)? .ok_or(Error::BadRequest( @@ -86,8 +78,8 @@ pub async fn get_backup_info_route( Ok(get_backup_info::v3::Response { algorithm, - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, version: body.version.to_owned(), }) } @@ -98,14 +90,11 @@ pub async fn get_backup_info_route( /// /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_version_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups.delete_backup(sender_user, &body.version)?; - - db.flush()?; + services().key_backups.delete_backup(sender_user, &body.version)?; Ok(delete_backup_version::v3::Response {}) } @@ -118,13 +107,12 @@ pub async fn delete_backup_version_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) - != db + != services() .key_backups .get_latest_backup_version(sender_user)? .as_ref() @@ -137,22 +125,19 @@ pub async fn add_backup_keys_route( for (room_id, room) in &body.rooms { for (session_id, key_data) in &room.sessions { - db.key_backups.add_key( + services().key_backups.add_key( sender_user, &body.version, room_id, session_id, key_data, - &db.globals, )? } } - db.flush()?; - Ok(add_backup_keys::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -164,13 +149,12 @@ pub async fn add_backup_keys_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) - != db + != services() .key_backups .get_latest_backup_version(sender_user)? .as_ref() @@ -182,21 +166,18 @@ pub async fn add_backup_keys_for_room_route( } for (session_id, key_data) in &body.sessions { - db.key_backups.add_key( + services().key_backups.add_key( sender_user, &body.version, &body.room_id, session_id, key_data, - &db.globals, )? } - db.flush()?; - Ok(add_backup_keys_for_room::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -208,13 +189,12 @@ pub async fn add_backup_keys_for_room_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_session_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) - != db + != services() .key_backups .get_latest_backup_version(sender_user)? .as_ref() @@ -225,20 +205,17 @@ pub async fn add_backup_keys_for_session_route( )); } - db.key_backups.add_key( + services().key_backups.add_key( sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data, - &db.globals, )?; - db.flush()?; - Ok(add_backup_keys_for_session::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -246,12 +223,11 @@ pub async fn add_backup_keys_for_session_route( /// /// Retrieves all keys from the backup. pub async fn get_backup_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let rooms = db.key_backups.get_all(sender_user, &body.version)?; + let rooms = services().key_backups.get_all(sender_user, &body.version)?; Ok(get_backup_keys::v3::Response { rooms }) } @@ -260,12 +236,11 @@ pub async fn get_backup_keys_route( /// /// Retrieves all keys from the backup for a given room. pub async fn get_backup_keys_for_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sessions = db + let sessions = services() .key_backups .get_room(sender_user, &body.version, &body.room_id)?; @@ -276,12 +251,11 @@ pub async fn get_backup_keys_for_room_route( /// /// Retrieves a key from the backup. pub async fn get_backup_keys_for_session_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let key_data = db + let key_data = services() .key_backups .get_session(sender_user, &body.version, &body.room_id, &body.session_id)? .ok_or(Error::BadRequest( @@ -296,18 +270,15 @@ pub async fn get_backup_keys_for_session_route( /// /// Delete the keys from the backup. pub async fn delete_backup_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups.delete_all_keys(sender_user, &body.version)?; - - db.flush()?; + services().key_backups.delete_all_keys(sender_user, &body.version)?; Ok(delete_backup_keys::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -315,19 +286,16 @@ pub async fn delete_backup_keys_route( /// /// Delete the keys from the backup for a given room. pub async fn delete_backup_keys_for_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups + services().key_backups .delete_room_keys(sender_user, &body.version, &body.room_id)?; - db.flush()?; - Ok(delete_backup_keys_for_room::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -335,18 +303,15 @@ pub async fn delete_backup_keys_for_room_route( /// /// Delete a key from the backup. pub async fn delete_backup_keys_for_session_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups + services().key_backups .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?; - db.flush()?; - Ok(delete_backup_keys_for_session::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs index 417ad29..e4283b7 100644 --- a/src/api/client_server/capabilities.rs +++ b/src/api/client_server/capabilities.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Result, Ruma}; +use crate::{Result, Ruma, services}; use ruma::api::client::discovery::get_capabilities::{ self, Capabilities, RoomVersionStability, RoomVersionsCapability, }; @@ -8,26 +8,25 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( - db: DatabaseGuard, _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); - if db.globals.allow_unstable_room_versions() { - for room_version in &db.globals.unstable_room_versions { + if services().globals.allow_unstable_room_versions() { + for room_version in &services().globals.unstable_room_versions { available.insert(room_version.clone(), RoomVersionStability::Stable); } } else { - for room_version in &db.globals.unstable_room_versions { + for room_version in &services().globals.unstable_room_versions { available.insert(room_version.clone(), RoomVersionStability::Unstable); } } - for room_version in &db.globals.stable_room_versions { + for room_version in &services().globals.stable_room_versions { available.insert(room_version.clone(), RoomVersionStability::Stable); } let mut capabilities = Capabilities::new(); capabilities.room_versions = RoomVersionsCapability { - default: db.globals.default_room_version(), + default: services().globals.default_room_version(), available, }; diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs index 6184e0b..36f4fcb 100644 --- a/src/api/client_server/config.rs +++ b/src/api/client_server/config.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::client::{ config::{ @@ -17,7 +17,6 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// /// Sets some account data for the sender user. pub async fn set_global_account_data_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -27,7 +26,7 @@ pub async fn set_global_account_data_route( let event_type = body.event_type.to_string(); - db.account_data.update( + services().account_data.update( None, sender_user, event_type.clone().into(), @@ -35,11 +34,8 @@ pub async fn set_global_account_data_route( "type": event_type, "content": data, }), - &db.globals, )?; - db.flush()?; - Ok(set_global_account_data::v3::Response {}) } @@ -47,7 +43,6 @@ pub async fn set_global_account_data_route( /// /// Sets some room account data for the sender user. pub async fn set_room_account_data_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -57,7 +52,7 @@ pub async fn set_room_account_data_route( let event_type = body.event_type.to_string(); - db.account_data.update( + services().account_data.update( Some(&body.room_id), sender_user, event_type.clone().into(), @@ -65,11 +60,8 @@ pub async fn set_room_account_data_route( "type": event_type, "content": data, }), - &db.globals, )?; - db.flush()?; - Ok(set_room_account_data::v3::Response {}) } @@ -77,12 +69,11 @@ pub async fn set_room_account_data_route( /// /// Gets some account data for the sender user. pub async fn get_global_account_data_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = db + let event: Box = services() .account_data .get(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; @@ -98,12 +89,11 @@ pub async fn get_global_account_data_route( /// /// Gets some room account data for the sender user. pub async fn get_room_account_data_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = db + let event: Box = services() .account_data .get( Some(&body.room_id), diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index e93f5a5..3551dcf 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, events::StateEventType, @@ -13,7 +13,6 @@ use tracing::error; /// - Only works if the user is joined (TODO: always allow, but only show events if the user was /// joined, depending on history_visibility) pub async fn get_context_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -28,7 +27,7 @@ pub async fn get_context_route( let mut lazy_loaded = HashSet::new(); - let base_pdu_id = db + let base_pdu_id = services() .rooms .get_pdu_id(&body.event_id)? .ok_or(Error::BadRequest( @@ -36,9 +35,9 @@ pub async fn get_context_route( "Base event id not found.", ))?; - let base_token = db.rooms.pdu_count(&base_pdu_id)?; + let base_token = services().rooms.pdu_count(&base_pdu_id)?; - let base_event = db + let base_event = services() .rooms .get_pdu_from_id(&base_pdu_id)? .ok_or(Error::BadRequest( @@ -48,14 +47,14 @@ pub async fn get_context_route( let room_id = base_event.room_id.clone(); - if !db.rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -67,7 +66,7 @@ pub async fn get_context_route( let base_event = base_event.to_room_event(); - let events_before: Vec<_> = db + let events_before: Vec<_> = services() .rooms .pdus_until(sender_user, &room_id, base_token)? .take( @@ -80,7 +79,7 @@ pub async fn get_context_route( .collect(); for (_, event) in &events_before { - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -93,7 +92,7 @@ pub async fn get_context_route( let start_token = events_before .last() - .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) + .and_then(|(pdu_id, _)| services().rooms.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); let events_before: Vec<_> = events_before @@ -101,7 +100,7 @@ pub async fn get_context_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let events_after: Vec<_> = db + let events_after: Vec<_> = services() .rooms .pdus_after(sender_user, &room_id, base_token)? .take( @@ -114,7 +113,7 @@ pub async fn get_context_route( .collect(); for (_, event) in &events_after { - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -125,23 +124,23 @@ pub async fn get_context_route( } } - let shortstatehash = match db.rooms.pdu_shortstatehash( + let shortstatehash = match services().rooms.pdu_shortstatehash( events_after .last() .map_or(&*body.event_id, |(_, e)| &*e.event_id), )? { Some(s) => s, - None => db + None => services() .rooms .current_shortstatehash(&room_id)? .expect("All rooms have state"), }; - let state_ids = db.rooms.state_full_ids(shortstatehash).await?; + let state_ids = services().rooms.state_full_ids(shortstatehash).await?; let end_token = events_after .last() - .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) + .and_then(|(pdu_id, _)| services().rooms.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); let events_after: Vec<_> = events_after @@ -152,10 +151,10 @@ pub async fn get_context_route( let mut state = Vec::new(); for (shortstatekey, id) in state_ids { - let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services().rooms.get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -164,7 +163,7 @@ pub async fn get_context_route( }; state.push(pdu.to_state_event()); } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs index b100bf2..2f55993 100644 --- a/src/api/client_server/device.rs +++ b/src/api/client_server/device.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services}; use ruma::api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, @@ -11,12 +11,11 @@ use super::SESSION_ID_LENGTH; /// /// Get metadata on all devices of the sender user. pub async fn get_devices_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let devices: Vec = db + let devices: Vec = services() .users .all_devices_metadata(sender_user) .filter_map(|r| r.ok()) // Filter out buggy devices @@ -29,12 +28,11 @@ pub async fn get_devices_route( /// /// Get metadata on a single device of the sender user. pub async fn get_device_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let device = db + let device = services() .users .get_device_metadata(sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; @@ -46,23 +44,20 @@ pub async fn get_device_route( /// /// Updates the metadata on a given device of the sender user. pub async fn update_device_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut device = db + let mut device = services() .users .get_device_metadata(sender_user, &body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; device.display_name = body.display_name.clone(); - db.users + services().users .update_device_metadata(sender_user, &body.device_id, &device)?; - db.flush()?; - Ok(update_device::v3::Response {}) } @@ -76,7 +71,6 @@ pub async fn update_device_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn delete_device_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -94,13 +88,11 @@ pub async fn delete_device_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -108,16 +100,14 @@ pub async fn delete_device_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - db.users.remove_device(sender_user, &body.device_id)?; - - db.flush()?; + services().users.remove_device(sender_user, &body.device_id)?; Ok(delete_device::v3::Response {}) } @@ -134,7 +124,6 @@ pub async fn delete_device_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn delete_devices_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -152,13 +141,11 @@ pub async fn delete_devices_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -166,7 +153,7 @@ pub async fn delete_devices_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -174,10 +161,8 @@ pub async fn delete_devices_route( } for device_id in &body.devices { - db.users.remove_device(sender_user, device_id)? + services().users.remove_device(sender_user, device_id)? } - db.flush()?; - Ok(delete_devices::v3::Response {}) } diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 4e4a322..87493fa 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::{ client::{ @@ -37,11 +37,9 @@ use tracing::{info, warn}; /// /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_filtered_route( - db: DatabaseGuard, body: Ruma, ) -> Result { get_public_rooms_filtered_helper( - &db, body.server.as_deref(), body.limit, body.since.as_deref(), @@ -57,11 +55,9 @@ pub async fn get_public_rooms_filtered_route( /// /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let response = get_public_rooms_filtered_helper( - &db, body.server.as_deref(), body.limit, body.since.as_deref(), @@ -84,17 +80,16 @@ pub async fn get_public_rooms_route( /// /// - TODO: Access control checks pub async fn set_room_visibility_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); match &body.visibility { room::Visibility::Public => { - db.rooms.set_public(&body.room_id, true)?; + services().rooms.set_public(&body.room_id, true)?; info!("{} made {} public", sender_user, body.room_id); } - room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, + room::Visibility::Private => services().rooms.set_public(&body.room_id, false)?, _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -103,8 +98,6 @@ pub async fn set_room_visibility_route( } } - db.flush()?; - Ok(set_room_visibility::v3::Response {}) } @@ -112,11 +105,10 @@ pub async fn set_room_visibility_route( /// /// Gets the visibility of a given room in the room directory. pub async fn get_room_visibility_route( - db: DatabaseGuard, body: Ruma, ) -> Result { Ok(get_room_visibility::v3::Response { - visibility: if db.rooms.is_public_room(&body.room_id)? { + visibility: if services().rooms.is_public_room(&body.room_id)? { room::Visibility::Public } else { room::Visibility::Private @@ -125,19 +117,17 @@ pub async fn get_room_visibility_route( } pub(crate) async fn get_public_rooms_filtered_helper( - db: &Database, server: Option<&ServerName>, limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, ) -> Result { - if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str()) + if let Some(other_server) = server.filter(|server| *server != services().globals.server_name().as_str()) { - let response = db + let response = services() .sending .send_federation_request( - &db.globals, other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, @@ -184,14 +174,14 @@ pub(crate) async fn get_public_rooms_filtered_helper( } } - let mut all_rooms: Vec<_> = db + let mut all_rooms: Vec<_> = services() .rooms .public_rooms() .map(|room_id| { let room_id = room_id?; let chunk = PublicRoomsChunk { - canonical_alias: db + canonical_alias: services() .rooms .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { @@ -201,7 +191,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid canonical alias event in database.") }) })?, - name: db + name: services() .rooms .room_state_get(&room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { @@ -211,7 +201,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid room name event in database.") }) })?, - num_joined_members: db + num_joined_members: services() .rooms .room_joined_count(&room_id)? .unwrap_or_else(|| { @@ -220,7 +210,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( }) .try_into() .expect("user count should not be that big"), - topic: db + topic: services() .rooms .room_state_get(&room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { @@ -230,7 +220,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid room topic event in database.") }) })?, - world_readable: db + world_readable: services() .rooms .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { @@ -244,7 +234,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( ) }) })?, - guest_can_join: db + guest_can_join: services() .rooms .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { @@ -256,7 +246,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid room guest access event in database.") }) })?, - avatar_url: db + avatar_url: services() .rooms .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? .map(|s| { @@ -269,7 +259,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .transpose()? // url is now an Option so we must flatten .flatten(), - join_rule: db + join_rule: services() .rooms .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? .map(|s| { diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs index 6522c90..e0c9506 100644 --- a/src/api/client_server/filter.rs +++ b/src/api/client_server/filter.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::api::client::{ error::ErrorKind, filter::{create_filter, get_filter}, @@ -10,11 +10,10 @@ use ruma::api::client::{ /// /// - A user can only access their own filters pub async fn get_filter_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let filter = match db.users.get_filter(sender_user, &body.filter_id)? { + let filter = match services().users.get_filter(sender_user, &body.filter_id)? { Some(filter) => filter, None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), }; @@ -26,11 +25,10 @@ pub async fn get_filter_route( /// /// Creates a new filter to be used by other endpoints. pub async fn create_filter_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(create_filter::v3::Response::new( - db.users.create_filter(sender_user, &body.filter)?, + services().users.create_filter(sender_user, &body.filter)?, )) } diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index c4f91cb..698bd1e 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -1,5 +1,5 @@ use super::SESSION_ID_LENGTH; -use crate::{database::DatabaseGuard, utils, Database, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services}; use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -26,39 +26,34 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// - Adds one time keys /// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) pub async fn upload_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); for (key_key, key_value) in &body.one_time_keys { - db.users - .add_one_time_key(sender_user, sender_device, key_key, key_value, &db.globals)?; + services().users + .add_one_time_key(sender_user, sender_device, key_key, key_value)?; } if let Some(device_keys) = &body.device_keys { // TODO: merge this and the existing event? // This check is needed to assure that signatures are kept - if db + if services() .users .get_device_keys(sender_user, sender_device)? .is_none() { - db.users.add_device_keys( + services().users.add_device_keys( sender_user, sender_device, device_keys, - &db.rooms, - &db.globals, )?; } } - db.flush()?; - Ok(upload_keys::v3::Response { - one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, + one_time_key_counts: services().users.count_one_time_keys(sender_user, sender_device)?, }) } @@ -70,7 +65,6 @@ pub async fn upload_keys_route( /// - Gets master keys, self-signing keys, user signing keys and device keys. /// - The master and self-signing keys contain signatures that the user is allowed to see pub async fn get_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -79,7 +73,6 @@ pub async fn get_keys_route( Some(sender_user), &body.device_keys, |u| u == sender_user, - &db, ) .await?; @@ -90,12 +83,9 @@ pub async fn get_keys_route( /// /// Claims one-time keys pub async fn claim_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - let response = claim_keys_helper(&body.one_time_keys, &db).await?; - - db.flush()?; + let response = claim_keys_helper(&body.one_time_keys).await?; Ok(response) } @@ -106,7 +96,6 @@ pub async fn claim_keys_route( /// /// - Requires UIAA to verify password pub async fn upload_signing_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -124,13 +113,11 @@ pub async fn upload_signing_keys_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -138,7 +125,7 @@ pub async fn upload_signing_keys_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -146,18 +133,14 @@ pub async fn upload_signing_keys_route( } if let Some(master_key) = &body.master_key { - db.users.add_cross_signing_keys( + services().users.add_cross_signing_keys( sender_user, master_key, &body.self_signing_key, &body.user_signing_key, - &db.rooms, - &db.globals, )?; } - db.flush()?; - Ok(upload_signing_keys::v3::Response {}) } @@ -165,7 +148,6 @@ pub async fn upload_signing_keys_route( /// /// Uploads end-to-end key signatures from the sender user. pub async fn upload_signatures_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -205,20 +187,16 @@ pub async fn upload_signatures_route( ))? .to_owned(), ); - db.users.sign_key( + services().users.sign_key( user_id, key_id, signature, sender_user, - &db.rooms, - &db.globals, )?; } } } - db.flush()?; - Ok(upload_signatures::v3::Response { failures: BTreeMap::new(), // TODO: integrate }) @@ -230,7 +208,6 @@ pub async fn upload_signatures_route( /// /// - TODO: left users pub async fn get_key_changes_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -238,7 +215,7 @@ pub async fn get_key_changes_route( let mut device_list_updates = HashSet::new(); device_list_updates.extend( - db.users + services().users .keys_changed( sender_user.as_str(), body.from @@ -253,9 +230,9 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); - for room_id in db.rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) { + for room_id in services().rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) { device_list_updates.extend( - db.users + services().users .keys_changed( &room_id.to_string(), body.from.parse().map_err(|_| { @@ -278,7 +255,6 @@ pub(crate) async fn get_keys_helper bool>( sender_user: Option<&UserId>, device_keys_input: &BTreeMap, Vec>>, allowed_signatures: F, - db: &Database, ) -> Result { let mut master_keys = BTreeMap::new(); let mut self_signing_keys = BTreeMap::new(); @@ -290,7 +266,7 @@ pub(crate) async fn get_keys_helper bool>( for (user_id, device_ids) in device_keys_input { let user_id: &UserId = &**user_id; - if user_id.server_name() != db.globals.server_name() { + if user_id.server_name() != services().globals.server_name() { get_over_federation .entry(user_id.server_name()) .or_insert_with(Vec::new) @@ -300,10 +276,10 @@ pub(crate) async fn get_keys_helper bool>( if device_ids.is_empty() { let mut container = BTreeMap::new(); - for device_id in db.users.all_device_ids(user_id) { + for device_id in services().users.all_device_ids(user_id) { let device_id = device_id?; - if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? { - let metadata = db + if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? { + let metadata = services() .users .get_device_metadata(user_id, &device_id)? .ok_or_else(|| { @@ -319,8 +295,8 @@ pub(crate) async fn get_keys_helper bool>( } else { for device_id in device_ids { let mut container = BTreeMap::new(); - if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? { - let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or( + if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? { + let metadata = services().users.get_device_metadata(user_id, device_id)?.ok_or( Error::BadRequest( ErrorKind::InvalidParam, "Tried to get keys for nonexistent device.", @@ -335,17 +311,17 @@ pub(crate) async fn get_keys_helper bool>( } } - if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? { + if let Some(master_key) = services().users.get_master_key(user_id, &allowed_signatures)? { master_keys.insert(user_id.to_owned(), master_key); } - if let Some(self_signing_key) = db + if let Some(self_signing_key) = services() .users .get_self_signing_key(user_id, &allowed_signatures)? { self_signing_keys.insert(user_id.to_owned(), self_signing_key); } if Some(user_id) == sender_user { - if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? { + if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? { user_signing_keys.insert(user_id.to_owned(), user_signing_key); } } @@ -362,9 +338,8 @@ pub(crate) async fn get_keys_helper bool>( } ( server, - db.sending + services().sending .send_federation_request( - &db.globals, server, federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed, @@ -417,14 +392,13 @@ fn add_unsigned_device_display_name( pub(crate) async fn claim_keys_helper( one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, - db: &Database, ) -> Result { let mut one_time_keys = BTreeMap::new(); let mut get_over_federation = BTreeMap::new(); for (user_id, map) in one_time_keys_input { - if user_id.server_name() != db.globals.server_name() { + if user_id.server_name() != services().globals.server_name() { get_over_federation .entry(user_id.server_name()) .or_insert_with(Vec::new) @@ -434,8 +408,8 @@ pub(crate) async fn claim_keys_helper( let mut container = BTreeMap::new(); for (device_id, key_algorithm) in map { if let Some(one_time_keys) = - db.users - .take_one_time_key(user_id, device_id, key_algorithm, &db.globals)? + services().users + .take_one_time_key(user_id, device_id, key_algorithm)? { let mut c = BTreeMap::new(); c.insert(one_time_keys.0, one_time_keys.1); @@ -453,10 +427,9 @@ pub(crate) async fn claim_keys_helper( one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); } // Ignore failures - if let Ok(keys) = db + if let Ok(keys) = services() .sending .send_federation_request( - &db.globals, server, federation::keys::claim_keys::v1::Request { one_time_keys: one_time_keys_input_fed, diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index a9a6d6c..f0da084 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -1,6 +1,5 @@ use crate::{ - database::{media::FileMeta, DatabaseGuard}, - utils, Error, Result, Ruma, + utils, Error, Result, Ruma, services, service::media::FileMeta, }; use ruma::api::client::{ error::ErrorKind, @@ -16,11 +15,10 @@ const MXC_LENGTH: usize = 32; /// /// Returns max upload size. pub async fn get_media_config_route( - db: DatabaseGuard, _body: Ruma, ) -> Result { Ok(get_media_config::v3::Response { - upload_size: db.globals.max_request_size().into(), + upload_size: services().globals.max_request_size().into(), }) } @@ -31,19 +29,17 @@ pub async fn get_media_config_route( /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory pub async fn create_content_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let mxc = format!( "mxc://{}/{}", - db.globals.server_name(), + services().globals.server_name(), utils::random_string(MXC_LENGTH) ); - db.media + services().media .create( mxc.clone(), - &db.globals, &body .filename .as_ref() @@ -54,8 +50,6 @@ pub async fn create_content_route( ) .await?; - db.flush()?; - Ok(create_content::v3::Response { content_uri: mxc.try_into().expect("Invalid mxc:// URI"), blurhash: None, @@ -63,15 +57,13 @@ pub async fn create_content_route( } pub async fn get_remote_content( - db: &DatabaseGuard, mxc: &str, server_name: &ruma::ServerName, media_id: &str, ) -> Result { - let content_response = db + let content_response = services() .sending .send_federation_request( - &db.globals, server_name, get_content::v3::Request { allow_remote: false, @@ -81,10 +73,9 @@ pub async fn get_remote_content( ) .await?; - db.media + services().media .create( mxc.to_string(), - &db.globals, &content_response.content_disposition.as_deref(), &content_response.content_type.as_deref(), &content_response.file, @@ -100,7 +91,6 @@ pub async fn get_remote_content( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -109,16 +99,16 @@ pub async fn get_content_route( content_disposition, content_type, file, - }) = db.media.get(&db.globals, &mxc).await? + }) = services().media.get(&mxc).await? { Ok(get_content::v3::Response { file, content_type, content_disposition, }) - } else if &*body.server_name != db.globals.server_name() && body.allow_remote { + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = - get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; + get_remote_content(&mxc, &body.server_name, &body.media_id).await?; Ok(remote_content_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -131,7 +121,6 @@ pub async fn get_content_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_as_filename_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -140,16 +129,16 @@ pub async fn get_content_as_filename_route( content_disposition: _, content_type, file, - }) = db.media.get(&db.globals, &mxc).await? + }) = services().media.get(&mxc).await? { Ok(get_content_as_filename::v3::Response { file, content_type, content_disposition: Some(format!("inline; filename={}", body.filename)), }) - } else if &*body.server_name != db.globals.server_name() && body.allow_remote { + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = - get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; + get_remote_content(&mxc, &body.server_name, &body.media_id).await?; Ok(get_content_as_filename::v3::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), @@ -167,18 +156,16 @@ pub async fn get_content_as_filename_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_thumbnail_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { content_type, file, .. - }) = db + }) = services() .media .get_thumbnail( &mxc, - &db.globals, body.width .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, @@ -189,11 +176,10 @@ pub async fn get_content_thumbnail_route( .await? { Ok(get_content_thumbnail::v3::Response { file, content_type }) - } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_thumbnail_response = db + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + let get_thumbnail_response = services() .sending .send_federation_request( - &db.globals, &body.server_name, get_content_thumbnail::v3::Request { allow_remote: false, @@ -206,10 +192,9 @@ pub async fn get_content_thumbnail_route( ) .await?; - db.media + services().media .upload_thumbnail( mxc, - &db.globals, &None, &get_thumbnail_response.content_type, body.width.try_into().expect("all UInts are valid u32s"), diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index ecd26d1..b000ec1 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -1,9 +1,3 @@ -use crate::{ - client_server, - database::DatabaseGuard, - pdu::{EventHash, PduBuilder, PduEvent}, - server_server, utils, Database, Error, Result, Ruma, -}; use ruma::{ api::{ client::{ @@ -29,13 +23,17 @@ use ruma::{ }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, iter, sync::{Arc, RwLock}, time::{Duration, Instant}, }; use tracing::{debug, error, warn}; +use crate::{services, PduEvent, service::pdu::{gen_event_id_canonical_json, PduBuilder}, Error, api::{server_server}, utils, Ruma}; + +use super::get_alias_helper; + /// # `POST /_matrix/client/r0/rooms/{roomId}/join` /// /// Tries to join the sender user into a room. @@ -43,14 +41,13 @@ use tracing::{debug, error, warn}; /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut servers = Vec::new(); // There is no body.server_name for /roomId/join servers.extend( - db.rooms + services().rooms .invite_state(sender_user, &body.room_id)? .unwrap_or_default() .iter() @@ -64,7 +61,6 @@ pub async fn join_room_by_id_route( servers.push(body.room_id.server_name().to_owned()); let ret = join_room_by_id_helper( - &db, body.sender_user.as_deref(), &body.room_id, &servers, @@ -72,8 +68,6 @@ pub async fn join_room_by_id_route( ) .await; - db.flush()?; - ret } @@ -84,7 +78,6 @@ pub async fn join_room_by_id_route( /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_or_alias_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); @@ -94,7 +87,7 @@ pub async fn join_room_by_id_or_alias_route( Ok(room_id) => { let mut servers = body.server_name.clone(); servers.extend( - db.rooms + services().rooms .invite_state(sender_user, &room_id)? .unwrap_or_default() .iter() @@ -109,14 +102,13 @@ pub async fn join_room_by_id_or_alias_route( (servers, room_id) } Err(room_alias) => { - let response = client_server::get_alias_helper(&db, &room_alias).await?; + let response = get_alias_helper(&room_alias).await?; (response.servers.into_iter().collect(), response.room_id) } }; let join_room_response = join_room_by_id_helper( - &db, Some(sender_user), &room_id, &servers, @@ -124,8 +116,6 @@ pub async fn join_room_by_id_or_alias_route( ) .await?; - db.flush()?; - Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id, }) @@ -137,14 +127,11 @@ pub async fn join_room_by_id_or_alias_route( /// /// - This should always work if the user is currently joined. pub async fn leave_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.leave_room(sender_user, &body.room_id, &db).await?; - - db.flush()?; + services().rooms.leave_room(sender_user, &body.room_id).await?; Ok(leave_room::v3::Response::new()) } @@ -153,14 +140,12 @@ pub async fn leave_room_route( /// /// Tries to send an invite event into the room. pub async fn invite_user_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let invite_user::v3::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { - invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; - db.flush()?; + invite_helper(sender_user, user_id, &body.room_id, false).await?; Ok(invite_user::v3::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) @@ -171,13 +156,12 @@ pub async fn invite_user_route( /// /// Tries to send a kick event into the room. pub async fn kick_user_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - db.rooms + services().rooms .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -196,7 +180,7 @@ pub async fn kick_user_route( // TODO: reason let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -205,7 +189,7 @@ pub async fn kick_user_route( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -215,14 +199,11 @@ pub async fn kick_user_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - Ok(kick_user::v3::Response::new()) } @@ -230,14 +211,13 @@ pub async fn kick_user_route( /// /// Tries to send a ban event into the room. pub async fn ban_user_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: reason - let event = db + let event = services() .rooms .room_state_get( &body.room_id, @@ -247,11 +227,11 @@ pub async fn ban_user_route( .map_or( Ok(RoomMemberEventContent { membership: MembershipState::Ban, - displayname: db.users.displayname(&body.user_id)?, - avatar_url: db.users.avatar_url(&body.user_id)?, + displayname: services().users.displayname(&body.user_id)?, + avatar_url: services().users.avatar_url(&body.user_id)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, reason: None, join_authorized_via_users_server: None, }), @@ -266,7 +246,7 @@ pub async fn ban_user_route( )?; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -275,7 +255,7 @@ pub async fn ban_user_route( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -285,14 +265,11 @@ pub async fn ban_user_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - Ok(ban_user::v3::Response::new()) } @@ -300,13 +277,12 @@ pub async fn ban_user_route( /// /// Tries to send an unban event into the room. pub async fn unban_user_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - db.rooms + services().rooms .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -324,7 +300,7 @@ pub async fn unban_user_route( event.membership = MembershipState::Leave; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -333,7 +309,7 @@ pub async fn unban_user_route( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -343,14 +319,11 @@ pub async fn unban_user_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - Ok(unban_user::v3::Response::new()) } @@ -363,14 +336,11 @@ pub async fn unban_user_route( /// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to /// be called from every device pub async fn forget_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.forget(&body.room_id, sender_user)?; - - db.flush()?; + services().rooms.forget(&body.room_id, sender_user)?; Ok(forget_room::v3::Response::new()) } @@ -379,13 +349,12 @@ pub async fn forget_room_route( /// /// Lists all rooms the user has joined. pub async fn joined_rooms_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(joined_rooms::v3::Response { - joined_rooms: db + joined_rooms: services() .rooms .rooms_joined(sender_user) .filter_map(|r| r.ok()) @@ -399,13 +368,12 @@ pub async fn joined_rooms_route( /// /// - Only works if the user is currently joined pub async fn get_member_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -413,7 +381,7 @@ pub async fn get_member_events_route( } Ok(get_member_events::v3::Response { - chunk: db + chunk: services() .rooms .room_state_full(&body.room_id) .await? @@ -431,12 +399,11 @@ pub async fn get_member_events_route( /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined pub async fn joined_members_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -444,9 +411,9 @@ pub async fn joined_members_route( } let mut joined = BTreeMap::new(); - for user_id in db.rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { - let display_name = db.users.displayname(&user_id)?; - let avatar_url = db.users.avatar_url(&user_id)?; + for user_id in services().rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { + let display_name = services().users.displayname(&user_id)?; + let avatar_url = services().users.avatar_url(&user_id)?; joined.insert( user_id, @@ -460,9 +427,7 @@ pub async fn joined_members_route( Ok(joined_members::v3::Response { joined }) } -#[tracing::instrument(skip(db))] async fn join_room_by_id_helper( - db: &Database, sender_user: Option<&UserId>, room_id: &RoomId, servers: &[Box], @@ -471,7 +436,7 @@ async fn join_room_by_id_helper( let sender_user = sender_user.expect("user is authenticated"); let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -481,21 +446,20 @@ async fn join_room_by_id_helper( let state_lock = mutex_state.lock().await; // Ask a remote server if we don't have this room - if !db.rooms.exists(room_id)? { + if !services().rooms.exists(room_id)? { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); for remote_server in servers { - let make_join_response = db + let make_join_response = services() .sending .send_federation_request( - &db.globals, remote_server, federation::membership::prepare_join_event::v1::Request { room_id, user_id: sender_user, - ver: &db.globals.supported_room_versions(), + ver: &services().globals.supported_room_versions(), }, ) .await; @@ -510,7 +474,7 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) if db.rooms.is_supported_version(&db, &room_version) => room_version, + Some(room_version) if services().rooms.is_supported_version(&room_version) => room_version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -522,7 +486,7 @@ async fn join_room_by_id_helper( // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), ); join_event_stub.insert( "origin_server_ts".to_owned(), @@ -536,11 +500,11 @@ async fn join_room_by_id_helper( "content".to_owned(), to_canonical_value(RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, join_authorized_via_users_server: None, }) @@ -552,8 +516,8 @@ async fn join_room_by_id_helper( // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), + services().globals.server_name().as_str(), + services().globals.keypair(), &mut join_event_stub, &room_version, ) @@ -577,10 +541,9 @@ async fn join_room_by_id_helper( // It has enough fields to be called a proper event now let join_event = join_event_stub; - let send_join_response = db + let send_join_response = services() .sending .send_federation_request( - &db.globals, remote_server, federation::membership::create_join_event::v2::Request { room_id, @@ -590,7 +553,7 @@ async fn join_room_by_id_helper( ) .await?; - db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; + services().rooms.get_or_create_shortroomid(room_id, &services().globals)?; let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; @@ -602,7 +565,6 @@ async fn join_room_by_id_helper( &send_join_response, &room_version, &pub_key_map, - db, ) .await?; @@ -610,7 +572,7 @@ async fn join_room_by_id_helper( .room_state .state .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, @@ -622,29 +584,27 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") })?; - db.rooms.add_pdu_outlier(&event_id, &value)?; + services().rooms.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = db.rooms.get_or_create_shortstatekey( + let shortstatekey = services().rooms.get_or_create_shortstatekey( &pdu.kind.to_string().into(), state_key, - &db.globals, )?; state.insert(shortstatekey, pdu.event_id.clone()); } } - let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey( + let incoming_shortstatekey = services().rooms.get_or_create_shortstatekey( &parsed_pdu.kind.to_string().into(), parsed_pdu .state_key .as_ref() .expect("Pdu is a membership state event"), - &db.globals, )?; state.insert(incoming_shortstatekey, parsed_pdu.event_id.clone()); - let create_shortstatekey = db + let create_shortstatekey = services() .rooms .get_shortstatekey(&StateEventType::RoomCreate, "")? .expect("Room exists"); @@ -653,56 +613,54 @@ async fn join_room_by_id_helper( return Err(Error::BadServerResponse("State contained no create event.")); } - db.rooms.force_state( + services().rooms.force_state( room_id, state .into_iter() - .map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals)) + .map(|(k, id)| services().rooms.compress_state_event(k, &id)) .collect::>()?, - db, )?; for result in send_join_response .room_state .auth_chain .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, Err(_) => continue, }; - db.rooms.add_pdu_outlier(&event_id, &value)?; + services().rooms.add_pdu_outlier(&event_id, &value)?; } // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&parsed_pdu, &db.globals)?; + let statehashid = services().rooms.append_to_state(&parsed_pdu)?; - db.rooms.append_pdu( + services().rooms.append_pdu( &parsed_pdu, join_event, iter::once(&*parsed_pdu.event_id), - db, )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - db.rooms.set_room_state(room_id, statehashid)?; + services().rooms.set_room_state(room_id, statehashid)?; } else { let event = RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, join_authorized_via_users_server: None, }; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -712,15 +670,13 @@ async fn join_room_by_id_helper( }, sender_user, room_id, - db, + services(), &state_lock, )?; } drop(state_lock); - db.flush()?; - Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) } @@ -728,7 +684,6 @@ fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, pub_key_map: &RwLock>>, - db: &Database, ) -> Result<(Box, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); @@ -741,14 +696,14 @@ fn validate_and_add_event_id( )) .expect("ruma's reference hashes are valid event ids"); - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + let back_off = |id| match services().globals.bad_event_ratelimiter.write().unwrap().entry(id) { Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; - if let Some((time, tries)) = db + if let Some((time, tries)) = services() .globals .bad_event_ratelimiter .read() @@ -791,13 +746,12 @@ pub(crate) async fn invite_helper<'a>( sender_user: &UserId, user_id: &UserId, room_id: &RoomId, - db: &Database, is_direct: bool, ) -> Result<()> { - if user_id.server_name() != db.globals.server_name() { - let (room_version_id, pdu_json, invite_room_state) = { + if user_id.server_name() != services().globals.server_name() { + let (pdu_json, invite_room_state) = { let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -818,36 +772,38 @@ pub(crate) async fn invite_helper<'a>( }) .expect("member event is valid value"); - let state_key = user_id.to_string(); - let kind = StateEventType::RoomMember; + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event(PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, sender_user, room_id, &state_lock); - let (pdu, pdu_json) = create_hash_and_sign_event(); - - let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; + let invite_room_state = services().rooms.calculate_invite_state(&pdu)?; drop(state_lock); - (room_version_id, pdu_json, invite_room_state) + (pdu_json, invite_room_state) }; // Generate event id let expected_event_id = format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) + ruma::signatures::reference_hash(&pdu_json, &services().rooms.state.get_room_version(&room_id)?) .expect("ruma can calculate reference hashes") ); let expected_event_id = <&EventId>::try_from(expected_event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); - let response = db + let response = services() .sending .send_federation_request( - &db.globals, user_id.server_name(), create_invite::v2::Request { room_id, event_id: expected_event_id, - room_version: &room_version_id, + room_version: &services().state.get_room_version(&room_id)?, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, }, @@ -857,7 +813,7 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event, &db) + let (event_id, value) = match gen_event_id_canonical_json(&response.event) { Ok(t) => t, Err(_) => { @@ -882,13 +838,12 @@ pub(crate) async fn invite_helper<'a>( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id = server_server::handle_incoming_pdu( + let pdu_id = services().rooms.event_handler.handle_incoming_pdu( &origin, &event_id, room_id, value, true, - db, &pub_key_map, ) .await @@ -903,18 +858,18 @@ pub(crate) async fn invite_helper<'a>( "Could not accept incoming PDU as timeline event.", ))?; - let servers = db + let servers = services() .rooms .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); + .filter(|server| &**server != services().globals.server_name()); - db.sending.send_pdu(servers, &pdu_id)?; + services().sending.send_pdu(servers, &pdu_id)?; return Ok(()); } - if !db.rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -922,7 +877,7 @@ pub(crate) async fn invite_helper<'a>( } let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -931,16 +886,16 @@ pub(crate) async fn invite_helper<'a>( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, - displayname: db.users.displayname(user_id)?, - avatar_url: db.users.avatar_url(user_id)?, + displayname: services().users.displayname(user_id)?, + avatar_url: services().users.avatar_url(user_id)?, is_direct: Some(is_direct), third_party_invite: None, - blurhash: db.users.blurhash(user_id)?, + blurhash: services().users.blurhash(user_id)?, reason: None, join_authorized_via_users_server: None, }) @@ -951,7 +906,6 @@ pub(crate) async fn invite_helper<'a>( }, sender_user, room_id, - db, &state_lock, )?; @@ -960,208 +914,196 @@ pub(crate) async fn invite_helper<'a>( Ok(()) } - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); +// Make a user leave all their joined rooms +pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { + let all_rooms = services() + .rooms + .rooms_joined(user_id) + .chain(services().rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .collect::>(); - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), + for room_id in all_rooms { + let room_id = match room_id { + Ok(room_id) => room_id, + Err(_) => continue, }; - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) + let _ = leave_room(user_id, &room_id).await; } + Ok(()) +} + +pub async fn leave_room( + user_id: &UserId, + room_id: &RoomId, +) -> Result<()> { + // Ask a remote server if we don't have this room + if !services().rooms.metadata.exists(room_id)? && room_id.server_name() != services().globals.server_name() { + if let Err(e) = remote_leave_room(user_id, room_id).await { + warn!("Failed to leave room {} remotely: {}", user_id, e); + // Don't tell the client about this error + } + + let last_state = services().rooms.state_cache + .invite_state(user_id, room_id)? + .map_or_else(|| services().rooms.left_state(user_id, room_id), |s| Ok(Some(s)))?; + + // We always drop the invite, we can't rely on other servers + services().rooms.state_cache.update_membership( + room_id, + user_id, + MembershipState::Leave, + user_id, + last_state, + true, + )?; + } else { + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let mut event: RoomMemberEventContent = serde_json::from_str( + services().rooms.state.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot leave a room you are not a member of.", + ))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = MembershipState::Leave; + + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + room_id, + &state_lock, + )?; + } + + Ok(()) +} + +async fn remote_leave_room( + user_id: &UserId, + room_id: &RoomId, +) -> Result<()> { + let mut make_leave_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in leaving.", + )); + + let invite_state = services() + .rooms + .invite_state(user_id, room_id)? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "User is not invited.", + ))?; + + let servers: HashSet<_> = invite_state + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect(); + + for remote_server in servers { + let make_leave_response = services() + .sending + .send_federation_request( + &remote_server, + federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, + ) + .await; + + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + + if make_leave_response_and_server.is_ok() { + break; + } + } + + let (make_leave_response, remote_server) = make_leave_response_and_server?; + + let room_version_id = match make_leave_response.room_version { + Some(version) if services().rooms.is_supported_version(&version) => version, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + + let mut leave_event_stub = + serde_json::from_str::(make_leave_response.event.get()).map_err( + |_| Error::BadServerResponse("Invalid make_leave event json received from server."), + )?; + + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + leave_event_stub.remove("event_id"); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut leave_event_stub, + &room_version_id, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + leave_event_stub.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; + + services().sending + .send_federation_request( + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id, + event_id: &event_id, + pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + }, + ) + .await?; + + Ok(()) +} diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 1348132..861f9c1 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services, service::pdu::PduBuilder}; use ruma::{ api::client::{ error::ErrorKind, @@ -19,14 +19,13 @@ use std::{ /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed pub async fn send_message_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -37,7 +36,7 @@ pub async fn send_message_event_route( // Forbid m.room.encrypted if encryption is disabled if RoomEventType::RoomEncrypted == body.event_type.to_string().into() - && !db.globals.allow_encryption() + && !services().globals.allow_encryption() { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -47,7 +46,7 @@ pub async fn send_message_event_route( // Check if this is a new transaction id if let Some(response) = - db.transaction_ids + services().transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? { // The client might have sent a txnid of the /sendToDevice endpoint @@ -69,7 +68,7 @@ pub async fn send_message_event_route( let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); - let event_id = db.rooms.build_and_append_pdu( + let event_id = services().rooms.build_and_append_pdu( PduBuilder { event_type: body.event_type.to_string().into(), content: serde_json::from_str(body.body.body.json().get()) @@ -80,11 +79,10 @@ pub async fn send_message_event_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; - db.transaction_ids.add_txnid( + services().transaction_ids.add_txnid( sender_user, sender_device, &body.txn_id, @@ -93,8 +91,6 @@ pub async fn send_message_event_route( drop(state_lock); - db.flush()?; - Ok(send_message_event::v3::Response::new( (*event_id).to_owned(), )) @@ -107,13 +103,12 @@ pub async fn send_message_event_route( /// - Only works if the user is joined (TODO: always allow, but only show events where the user was /// joined, depending on history_visibility) pub async fn get_message_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -133,7 +128,7 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); - db.rooms + services().rooms .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; // Use limit or else 10 @@ -147,13 +142,13 @@ pub async fn get_message_events_route( match body.dir { get_message_events::v3::Direction::Forward => { - let events_after: Vec<_> = db + let events_after: Vec<_> = services() .rooms .pdus_after(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { - db.rooms + services().rooms .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() @@ -162,7 +157,7 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_after { - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &body.room_id, @@ -184,13 +179,13 @@ pub async fn get_message_events_route( resp.chunk = events_after; } get_message_events::v3::Direction::Backward => { - let events_before: Vec<_> = db + let events_before: Vec<_> = services() .rooms .pdus_until(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { - db.rooms + services().rooms .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() @@ -199,7 +194,7 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_before { - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &body.room_id, @@ -225,7 +220,7 @@ pub async fn get_message_events_route( resp.state = Vec::new(); for ll_id in &lazy_loaded { if let Some(member_event) = - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())? { resp.state.push(member_event.to_state_event()); @@ -233,7 +228,7 @@ pub async fn get_message_events_route( } if let Some(next_token) = next_token { - db.rooms.lazy_load_mark_sent( + services().rooms.lazy_load_mark_sent( sender_user, sender_device, &body.room_id, diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index 773fef4..bc220b8 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, Result, Ruma}; +use crate::{utils, Result, Ruma, services}; use ruma::api::client::presence::{get_presence, set_presence}; use std::time::Duration; @@ -6,22 +6,21 @@ use std::time::Duration; /// /// Sets the presence state of the sender user. pub async fn set_presence_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for room_id in db.rooms.rooms_joined(sender_user) { + for room_id in services().rooms.rooms_joined(sender_user) { let room_id = room_id?; - db.rooms.edus.update_presence( + services().rooms.edus.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(sender_user)?, + displayname: services().users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -32,12 +31,9 @@ pub async fn set_presence_route( }, sender: sender_user.clone(), }, - &db.globals, )?; } - db.flush()?; - Ok(set_presence::v3::Response {}) } @@ -47,20 +43,19 @@ pub async fn set_presence_route( /// /// - Only works if you share a room with the user pub async fn get_presence_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut presence_event = None; - for room_id in db + for room_id in services() .rooms .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? { let room_id = room_id?; - if let Some(presence) = db + if let Some(presence) = services() .rooms .edus .get_last_presence_event(sender_user, &room_id)? diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index acea19f..7a87bcd 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services, service::pdu::PduBuilder}; use ruma::{ api::{ client::{ @@ -20,16 +20,15 @@ use std::sync::Arc; /// /// - Also makes sure other users receive the update using presence EDUs pub async fn set_displayname_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.users + services().users .set_displayname(sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms - let all_rooms_joined: Vec<_> = db + let all_rooms_joined: Vec<_> = services() .rooms .rooms_joined(sender_user) .filter_map(|r| r.ok()) @@ -40,7 +39,7 @@ pub async fn set_displayname_route( content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_str( - db.rooms + services().rooms .room_state_get( &room_id, &StateEventType::RoomMember, @@ -70,7 +69,7 @@ pub async fn set_displayname_route( for (pdu_builder, room_id) in all_rooms_joined { let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -79,19 +78,19 @@ pub async fn set_displayname_route( ); let state_lock = mutex_state.lock().await; - let _ = db + let _ = services() .rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock); + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); // Presence update - db.rooms.edus.update_presence( + services().rooms.edus.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(sender_user)?, + displayname: services().users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -102,12 +101,9 @@ pub async fn set_displayname_route( }, sender: sender_user.clone(), }, - &db.globals, )?; } - db.flush()?; - Ok(set_display_name::v3::Response {}) } @@ -117,14 +113,12 @@ pub async fn set_displayname_route( /// /// - If user is on another server: Fetches displayname over federation pub async fn get_displayname_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.user_id.server_name() != db.globals.server_name() { - let response = db + if body.user_id.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, body.user_id.server_name(), federation::query::get_profile_information::v1::Request { user_id: &body.user_id, @@ -139,7 +133,7 @@ pub async fn get_displayname_route( } Ok(get_display_name::v3::Response { - displayname: db.users.displayname(&body.user_id)?, + displayname: services().users.displayname(&body.user_id)?, }) } @@ -149,18 +143,17 @@ pub async fn get_displayname_route( /// /// - Also makes sure other users receive the update using presence EDUs pub async fn set_avatar_url_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.users + services().users .set_avatar_url(sender_user, body.avatar_url.clone())?; - db.users.set_blurhash(sender_user, body.blurhash.clone())?; + services().users.set_blurhash(sender_user, body.blurhash.clone())?; // Send a new membership event and presence update into all joined rooms - let all_joined_rooms: Vec<_> = db + let all_joined_rooms: Vec<_> = services() .rooms .rooms_joined(sender_user) .filter_map(|r| r.ok()) @@ -171,7 +164,7 @@ pub async fn set_avatar_url_route( content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_str( - db.rooms + services().rooms .room_state_get( &room_id, &StateEventType::RoomMember, @@ -201,7 +194,7 @@ pub async fn set_avatar_url_route( for (pdu_builder, room_id) in all_joined_rooms { let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -210,19 +203,19 @@ pub async fn set_avatar_url_route( ); let state_lock = mutex_state.lock().await; - let _ = db + let _ = services() .rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock); + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); // Presence update - db.rooms.edus.update_presence( + services().rooms.edus.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(sender_user)?, + displayname: services().users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -233,12 +226,10 @@ pub async fn set_avatar_url_route( }, sender: sender_user.clone(), }, - &db.globals, + &services().globals, )?; } - db.flush()?; - Ok(set_avatar_url::v3::Response {}) } @@ -248,14 +239,12 @@ pub async fn set_avatar_url_route( /// /// - If user is on another server: Fetches avatar_url and blurhash over federation pub async fn get_avatar_url_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.user_id.server_name() != db.globals.server_name() { - let response = db + if body.user_id.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, body.user_id.server_name(), federation::query::get_profile_information::v1::Request { user_id: &body.user_id, @@ -271,8 +260,8 @@ pub async fn get_avatar_url_route( } Ok(get_avatar_url::v3::Response { - avatar_url: db.users.avatar_url(&body.user_id)?, - blurhash: db.users.blurhash(&body.user_id)?, + avatar_url: services().users.avatar_url(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, }) } @@ -282,14 +271,12 @@ pub async fn get_avatar_url_route( /// /// - If user is on another server: Fetches profile over federation pub async fn get_profile_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.user_id.server_name() != db.globals.server_name() { - let response = db + if body.user_id.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, body.user_id.server_name(), federation::query::get_profile_information::v1::Request { user_id: &body.user_id, @@ -305,7 +292,7 @@ pub async fn get_profile_route( }); } - if !db.users.exists(&body.user_id)? { + if !services().users.exists(&body.user_id)? { // Return 404 if this user doesn't exist return Err(Error::BadRequest( ErrorKind::NotFound, @@ -314,8 +301,8 @@ pub async fn get_profile_route( } Ok(get_profile::v3::Response { - avatar_url: db.users.avatar_url(&body.user_id)?, - blurhash: db.users.blurhash(&body.user_id)?, - displayname: db.users.displayname(&body.user_id)?, + avatar_url: services().users.avatar_url(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, + displayname: services().users.displayname(&body.user_id)?, }) } diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index dc45ea0..112fa00 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::client::{ error::ErrorKind, @@ -16,12 +16,11 @@ use ruma::{ /// /// Retrieves the push rules event for this user. pub async fn get_pushrules_all_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = db + let event: PushRulesEvent = services() .account_data .get( None, @@ -42,12 +41,11 @@ pub async fn get_pushrules_all_route( /// /// Retrieves a single specified push rule for this user. pub async fn get_pushrule_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = db + let event: PushRulesEvent = services() .account_data .get( None, @@ -98,7 +96,6 @@ pub async fn get_pushrule_route( /// /// Creates a single specified push rule for this user. pub async fn set_pushrule_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -111,7 +108,7 @@ pub async fn set_pushrule_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -186,16 +183,13 @@ pub async fn set_pushrule_route( _ => {} } - db.account_data.update( + services().account_data.update( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &event, - &db.globals, )?; - db.flush()?; - Ok(set_pushrule::v3::Response {}) } @@ -203,7 +197,6 @@ pub async fn set_pushrule_route( /// /// Gets the actions of a single specified push rule for this user. pub async fn get_pushrule_actions_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -215,7 +208,7 @@ pub async fn get_pushrule_actions_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -252,8 +245,6 @@ pub async fn get_pushrule_actions_route( _ => None, }; - db.flush()?; - Ok(get_pushrule_actions::v3::Response { actions: actions.unwrap_or_default(), }) @@ -263,7 +254,6 @@ pub async fn get_pushrule_actions_route( /// /// Sets the actions of a single specified push rule for this user. pub async fn set_pushrule_actions_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -275,7 +265,7 @@ pub async fn set_pushrule_actions_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -322,16 +312,13 @@ pub async fn set_pushrule_actions_route( _ => {} }; - db.account_data.update( + services().account_data.update( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &event, - &db.globals, )?; - db.flush()?; - Ok(set_pushrule_actions::v3::Response {}) } @@ -339,7 +326,6 @@ pub async fn set_pushrule_actions_route( /// /// Gets the enabled status of a single specified push rule for this user. pub async fn get_pushrule_enabled_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -351,7 +337,7 @@ pub async fn get_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -393,8 +379,6 @@ pub async fn get_pushrule_enabled_route( _ => false, }; - db.flush()?; - Ok(get_pushrule_enabled::v3::Response { enabled }) } @@ -402,7 +386,6 @@ pub async fn get_pushrule_enabled_route( /// /// Sets the enabled status of a single specified push rule for this user. pub async fn set_pushrule_enabled_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -414,7 +397,7 @@ pub async fn set_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -466,16 +449,13 @@ pub async fn set_pushrule_enabled_route( _ => {} } - db.account_data.update( + services().account_data.update( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &event, - &db.globals, )?; - db.flush()?; - Ok(set_pushrule_enabled::v3::Response {}) } @@ -483,7 +463,6 @@ pub async fn set_pushrule_enabled_route( /// /// Deletes a single specified push rule for this user. pub async fn delete_pushrule_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -495,7 +474,7 @@ pub async fn delete_pushrule_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -537,16 +516,13 @@ pub async fn delete_pushrule_route( _ => {} } - db.account_data.update( + services().account_data.update( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &event, - &db.globals, )?; - db.flush()?; - Ok(delete_pushrule::v3::Response {}) } @@ -554,13 +530,12 @@ pub async fn delete_pushrule_route( /// /// Gets all currently active pushers for the sender user. pub async fn get_pushers_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_pushers::v3::Response { - pushers: db.pusher.get_pushers(sender_user)?, + pushers: services().pusher.get_pushers(sender_user)?, }) } @@ -570,15 +545,12 @@ pub async fn get_pushers_route( /// /// - TODO: Handle `append` pub async fn set_pushers_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pusher = body.pusher.clone(); - db.pusher.set_pusher(sender_user, pusher)?; - - db.flush()?; + services().pusher.set_pusher(sender_user, pusher)?; Ok(set_pusher::v3::Response::default()) } diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index 91988a4..284ae65 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, events::RoomAccountDataEventType, @@ -14,7 +14,6 @@ use std::collections::BTreeMap; /// - Updates fully-read account data event to `fully_read` /// - If `read_receipt` is set: Update private marker and public read receipt EDU pub async fn set_read_marker_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -24,25 +23,23 @@ pub async fn set_read_marker_route( event_id: body.fully_read.clone(), }, }; - db.account_data.update( + services().account_data.update( Some(&body.room_id), sender_user, RoomAccountDataEventType::FullyRead, &fully_read_event, - &db.globals, )?; if let Some(event) = &body.read_receipt { - db.rooms.edus.private_read_set( + services().rooms.edus.private_read_set( &body.room_id, sender_user, - db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( + services().rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", ))?, - &db.globals, )?; - db.rooms + services().rooms .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -59,19 +56,16 @@ pub async fn set_read_marker_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(event.to_owned(), receipts); - db.rooms.edus.readreceipt_update( + services().rooms.edus.readreceipt_update( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), }, - &db.globals, )?; } - db.flush()?; - Ok(set_read_marker::v3::Response {}) } @@ -79,23 +73,21 @@ pub async fn set_read_marker_route( /// /// Sets private read marker and public read receipt EDU. pub async fn create_receipt_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.edus.private_read_set( + services().rooms.edus.private_read_set( &body.room_id, sender_user, - db.rooms + services().rooms .get_pdu_count(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", ))?, - &db.globals, )?; - db.rooms + services().rooms .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -111,17 +103,16 @@ pub async fn create_receipt_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(body.event_id.to_owned(), receipts); - db.rooms.edus.readreceipt_update( + services().rooms.edus.readreceipt_update( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), }, - &db.globals, )?; - db.flush()?; + services().flush()?; Ok(create_receipt::v3::Response {}) } diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index 059e0f5..d6699bc 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma}; +use crate::{Result, Ruma, services, service::pdu::PduBuilder}; use ruma::{ api::client::redact::redact_event, events::{room::redaction::RoomRedactionEventContent, RoomEventType}, @@ -14,14 +14,13 @@ use serde_json::value::to_raw_value; /// /// - TODO: Handle txn id pub async fn redact_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -30,7 +29,7 @@ pub async fn redact_event_route( ); let state_lock = mutex_state.lock().await; - let event_id = db.rooms.build_and_append_pdu( + let event_id = services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { @@ -43,14 +42,11 @@ pub async fn redact_event_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - let event_id = (*event_id).to_owned(); Ok(redact_event::v3::Response { event_id }) } diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs index 14768e1..2c2a549 100644 --- a/src/api/client_server/report.rs +++ b/src/api/client_server/report.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils::HtmlEscape, Error, Result, Ruma}; +use crate::{utils::HtmlEscape, Error, Result, Ruma, services}; use ruma::{ api::client::{error::ErrorKind, room::report_content}, events::room::message, @@ -10,12 +10,11 @@ use ruma::{ /// Reports an inappropriate event to homeserver admins /// pub async fn report_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let pdu = match db.rooms.get_pdu(&body.event_id)? { + let pdu = match services().rooms.get_pdu(&body.event_id)? { Some(pdu) => pdu, _ => { return Err(Error::BadRequest( @@ -39,7 +38,7 @@ pub async fn report_event_route( )); }; - db.admin + services().admin .send_message(message::RoomMessageEventContent::text_html( format!( "Report received from: {}\n\n\ @@ -66,7 +65,5 @@ pub async fn report_event_route( ), )); - db.flush()?; - Ok(report_content::v3::Response {}) } diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 5ae7224..14affc6 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,5 +1,5 @@ use crate::{ - client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, Error, Result, Ruma, + Error, Result, Ruma, service::pdu::PduBuilder, services, api::client_server::invite_helper, }; use ruma::{ api::client::{ @@ -46,19 +46,18 @@ use tracing::{info, warn}; /// - Send events implied by `name` and `topic` /// - Send invite events pub async fn create_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { use create_room::v3::RoomPreset; let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let room_id = RoomId::new(db.globals.server_name()); + let room_id = RoomId::new(services().globals.server_name()); - db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + services().rooms.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -67,9 +66,9 @@ pub async fn create_room_route( ); let state_lock = mutex_state.lock().await; - if !db.globals.allow_room_creation() + if !services().globals.allow_room_creation() && !body.from_appservice - && !db.users.is_admin(sender_user, &db.rooms, &db.globals)? + && !services().users.is_admin(sender_user)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -83,12 +82,12 @@ pub async fn create_room_route( .map_or(Ok(None), |localpart| { // TODO: Check for invalid characters and maximum length let alias = - RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name())) + RoomAliasId::parse(format!("#{}:{}", localpart, services().globals.server_name())) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.") })?; - if db.rooms.id_from_alias(&alias)?.is_some() { + if services().rooms.id_from_alias(&alias)?.is_some() { Err(Error::BadRequest( ErrorKind::RoomInUse, "Room alias already exists.", @@ -100,7 +99,7 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if db.rooms.is_supported_version(&db, &room_version) { + if services().rooms.is_supported_version(&services(), &room_version) { room_version } else { return Err(Error::BadRequest( @@ -109,7 +108,7 @@ pub async fn create_room_route( )); } } - None => db.globals.default_room_version(), + None => services().globals.default_room_version(), }; let content = match &body.creation_content { @@ -163,7 +162,7 @@ pub async fn create_room_route( } // 1. The room create event - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), @@ -173,21 +172,20 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 2. Let the room creator join - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: Some(body.is_direct), third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, join_authorized_via_users_server: None, }) @@ -198,7 +196,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; @@ -240,7 +237,7 @@ pub async fn create_room_route( } } - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) @@ -251,13 +248,12 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 4. Canonical room alias if let Some(room_alias_id) = &alias { - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { @@ -271,7 +267,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; } @@ -279,7 +274,7 @@ pub async fn create_room_route( // 5. Events set by preset // 5.1 Join Rules - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { @@ -294,12 +289,11 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 5.2 History Visibility - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( @@ -312,12 +306,11 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 5.3 Guest Access - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { @@ -331,7 +324,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; @@ -346,18 +338,18 @@ pub async fn create_room_route( pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == RoomEventType::RoomEncryption && !db.globals.allow_encryption() + if pdu_builder.event_type == RoomEventType::RoomEncryption && !services().globals.allow_encryption() { continue; } - db.rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock)?; + services().rooms + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)?; } // 7. Events implied by name and topic if let Some(name) = &body.name { - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) @@ -368,13 +360,12 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; } if let Some(topic) = &body.topic { - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { @@ -387,7 +378,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; } @@ -395,22 +385,20 @@ pub async fn create_room_route( // 8. Events implied by invite (and TODO: invite_3pid) drop(state_lock); for user_id in &body.invite { - let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await; + let _ = invite_helper(sender_user, user_id, &room_id, body.is_direct).await; } // Homeserver specific stuff if let Some(alias) = alias { - db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + services().rooms.set_alias(&alias, Some(&room_id))?; } if body.visibility == room::Visibility::Public { - db.rooms.set_public(&room_id, true)?; + services().rooms.set_public(&room_id, true)?; } info!("{} created a room", sender_user); - db.flush()?; - Ok(create_room::v3::Response::new(room_id)) } @@ -420,12 +408,11 @@ pub async fn create_room_route( /// /// - You have to currently be joined to the room (TODO: Respect history visibility) pub async fn get_room_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -433,7 +420,7 @@ pub async fn get_room_event_route( } Ok(get_room_event::v3::Response { - event: db + event: services() .rooms .get_pdu(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? @@ -447,12 +434,11 @@ pub async fn get_room_event_route( /// /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable pub async fn get_room_aliases_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -460,7 +446,7 @@ pub async fn get_room_aliases_route( } Ok(aliases::v3::Response { - aliases: db + aliases: services() .rooms .room_aliases(&body.room_id) .filter_map(|a| a.ok()) @@ -479,12 +465,11 @@ pub async fn get_room_aliases_route( /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking pub async fn upgrade_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_supported_version(&db, &body.new_version) { + if !services().rooms.is_supported_version(&body.new_version) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", @@ -492,12 +477,12 @@ pub async fn upgrade_room_route( } // Create a replacement room - let replacement_room = RoomId::new(db.globals.server_name()); - db.rooms - .get_or_create_shortroomid(&replacement_room, &db.globals)?; + let replacement_room = RoomId::new(services().globals.server_name()); + services().rooms + .get_or_create_shortroomid(&replacement_room)?; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -508,7 +493,7 @@ pub async fn upgrade_room_route( // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions - let tombstone_event_id = db.rooms.build_and_append_pdu( + let tombstone_event_id = services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { @@ -522,14 +507,13 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; // Change lock to replacement room drop(state_lock); let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -540,7 +524,7 @@ pub async fn upgrade_room_route( // Get the old room creation event let mut create_event_content = serde_json::from_str::( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content @@ -588,7 +572,7 @@ pub async fn upgrade_room_route( )); } - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&create_event_content) @@ -599,21 +583,20 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db, &state_lock, )?; // Join the new room - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, join_authorized_via_users_server: None, }) @@ -624,7 +607,6 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db, &state_lock, )?; @@ -643,12 +625,12 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { - let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? { + let event_content = match services().rooms.room_state_get(&body.room_id, &event_type, "")? { Some(v) => v.content.clone(), None => continue, // Skipping missing events. }; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), content: event_content, @@ -658,20 +640,19 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db, &state_lock, )?; } // Moves any local aliases to the new room - for alias in db.rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) { - db.rooms - .set_alias(&alias, Some(&replacement_room), &db.globals)?; + for alias in services().rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) { + services().rooms + .set_alias(&alias, Some(&replacement_room))?; } // Get the old room power levels let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content @@ -685,7 +666,7 @@ pub async fn upgrade_room_route( power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - let _ = db.rooms.build_and_append_pdu( + let _ = services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_event_content) @@ -696,35 +677,12 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - // Return the replacement room id Ok(upgrade_room::v3::Response { replacement_room }) } - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } - diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index 686e3b5..b7eecd5 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::api::client::{ error::ErrorKind, search::search_events::{ @@ -15,7 +15,6 @@ use std::collections::BTreeMap; /// /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -24,7 +23,7 @@ pub async fn search_events_route( let filter = &search_criteria.filter; let room_ids = filter.rooms.clone().unwrap_or_else(|| { - db.rooms + services().rooms .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect() @@ -35,14 +34,14 @@ pub async fn search_events_route( let mut searches = Vec::new(); for room_id in room_ids { - if !db.rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - if let Some(search) = db + if let Some(search) = services() .rooms .search_pdus(&room_id, &search_criteria.search_term)? { @@ -85,7 +84,7 @@ pub async fn search_events_route( start: None, }, rank: None, - result: db + result: services() .rooms .get_pdu_from_id(result)? .map(|pdu| pdu.to_room_event()), diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index c2a79ca..7feeb66 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -1,5 +1,5 @@ use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services}; use ruma::{ api::client::{ error::ErrorKind, @@ -41,7 +41,6 @@ pub async fn get_login_types_route( /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. pub async fn login_route( - db: DatabaseGuard, body: Ruma, ) -> Result { // Validate login method @@ -57,11 +56,11 @@ pub async fn login_route( return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; let user_id = - UserId::parse_with_server_name(username.to_owned(), db.globals.server_name()) + UserId::parse_with_server_name(username.to_owned(), services().globals.server_name()) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") })?; - let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest( + let hash = services().users.password_hash(&user_id)?.ok_or(Error::BadRequest( ErrorKind::Forbidden, "Wrong username or password.", ))?; @@ -85,7 +84,7 @@ pub async fn login_route( user_id } login::v3::IncomingLoginInfo::Token(login::v3::IncomingToken { token }) => { - if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { + if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() { let token = jsonwebtoken::decode::( token, jwt_decoding_key, @@ -93,7 +92,7 @@ pub async fn login_route( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?; let username = token.claims.sub; - UserId::parse_with_server_name(username, db.globals.server_name()).map_err( + UserId::parse_with_server_name(username, services().globals.server_name()).map_err( |_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."), )? } else { @@ -122,15 +121,15 @@ pub async fn login_route( // Determine if device_id was provided and exists in the db for this user let device_exists = body.device_id.as_ref().map_or(false, |device_id| { - db.users + services().users .all_device_ids(&user_id) .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); if device_exists { - db.users.set_token(&user_id, &device_id, &token)?; + services().users.set_token(&user_id, &device_id, &token)?; } else { - db.users.create_device( + services().users.create_device( &user_id, &device_id, &token, @@ -140,12 +139,10 @@ pub async fn login_route( info!("{} logged in", user_id); - db.flush()?; - Ok(login::v3::Response { user_id, access_token: token, - home_server: Some(db.globals.server_name().to_owned()), + home_server: Some(services().globals.server_name().to_owned()), device_id, well_known: None, }) @@ -160,15 +157,12 @@ pub async fn login_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn logout_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - db.users.remove_device(sender_user, sender_device)?; - - db.flush()?; + services().users.remove_device(sender_user, sender_device)?; Ok(logout::v3::Response::new()) } @@ -185,16 +179,13 @@ pub async fn logout_route( /// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html) /// from each device of this user. pub async fn logout_all_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for device_id in db.users.all_device_ids(sender_user).flatten() { - db.users.remove_device(sender_user, &device_id)?; + for device_id in services().users.all_device_ids(sender_user).flatten() { + services().users.remove_device(sender_user, &device_id)?; } - db.flush()?; - Ok(logout_all::v3::Response::new()) } diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 4df953c..4e8d594 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::{ - database::DatabaseGuard, pdu::PduBuilder, Database, Error, Result, Ruma, RumaResponse, + Error, Result, Ruma, RumaResponse, services, service::pdu::PduBuilder, }; use ruma::{ api::client::{ @@ -27,13 +27,11 @@ use ruma::{ /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_key_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = send_state_event_for_key_helper( - &db, sender_user, &body.room_id, &body.event_type, @@ -42,8 +40,6 @@ pub async fn send_state_event_for_key_route( ) .await?; - db.flush()?; - let event_id = (*event_id).to_owned(); Ok(send_state_event::v3::Response { event_id }) } @@ -56,13 +52,12 @@ pub async fn send_state_event_for_key_route( /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_empty_key_route( - db: DatabaseGuard, body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled - if body.event_type == StateEventType::RoomEncryption && !db.globals.allow_encryption() { + if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() { return Err(Error::BadRequest( ErrorKind::Forbidden, "Encryption has been disabled", @@ -70,7 +65,6 @@ pub async fn send_state_event_for_empty_key_route( } let event_id = send_state_event_for_key_helper( - &db, sender_user, &body.room_id, &body.event_type.to_string().into(), @@ -79,8 +73,6 @@ pub async fn send_state_event_for_empty_key_route( ) .await?; - db.flush()?; - let event_id = (*event_id).to_owned(); Ok(send_state_event::v3::Response { event_id }.into()) } @@ -91,7 +83,6 @@ pub async fn send_state_event_for_empty_key_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -99,9 +90,9 @@ pub async fn get_state_events_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.is_joined(sender_user, &body.room_id)? && !matches!( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -122,7 +113,7 @@ pub async fn get_state_events_route( } Ok(get_state_events::v3::Response { - room_state: db + room_state: services() .rooms .room_state_full(&body.room_id) .await? @@ -138,7 +129,6 @@ pub async fn get_state_events_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_key_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -146,9 +136,9 @@ pub async fn get_state_events_for_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.is_joined(sender_user, &body.room_id)? && !matches!( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -168,7 +158,7 @@ pub async fn get_state_events_for_key_route( )); } - let event = db + let event = services() .rooms .room_state_get(&body.room_id, &body.event_type, &body.state_key)? .ok_or(Error::BadRequest( @@ -188,7 +178,6 @@ pub async fn get_state_events_for_key_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_empty_key_route( - db: DatabaseGuard, body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -196,9 +185,9 @@ pub async fn get_state_events_for_empty_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.is_joined(sender_user, &body.room_id)? && !matches!( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -218,7 +207,7 @@ pub async fn get_state_events_for_empty_key_route( )); } - let event = db + let event = services() .rooms .room_state_get(&body.room_id, &body.event_type, "")? .ok_or(Error::BadRequest( @@ -234,7 +223,6 @@ pub async fn get_state_events_for_empty_key_route( } async fn send_state_event_for_key_helper( - db: &Database, sender: &UserId, room_id: &RoomId, event_type: &StateEventType, @@ -255,8 +243,8 @@ async fn send_state_event_for_key_helper( } for alias in aliases { - if alias.server_name() != db.globals.server_name() - || db + if alias.server_name() != services().globals.server_name() + || services() .rooms .id_from_alias(&alias)? .filter(|room| room == room_id) // Make sure it's the right room @@ -272,7 +260,7 @@ async fn send_state_event_for_key_helper( } let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -281,7 +269,7 @@ async fn send_state_event_for_key_helper( ); let state_lock = mutex_state.lock().await; - let event_id = db.rooms.build_and_append_pdu( + let event_id = services().rooms.build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get()).expect("content is valid json"), @@ -291,7 +279,6 @@ async fn send_state_event_for_key_helper( }, sender_user, room_id, - db, &state_lock, )?; diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 0c294b7..cc4ebf6 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Database, Error, Result, Ruma, RumaResponse}; +use crate::{Error, Result, Ruma, RumaResponse, services}; use ruma::{ api::client::{ filter::{IncomingFilterDefinition, LazyLoadOptions}, @@ -55,16 +55,13 @@ use tracing::error; /// - Sync is handled in an async task, multiple requests from the same device with the same /// `since` will be cached pub async fn sync_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let body = body.body; - let arc_db = Arc::new(db); - - let mut rx = match arc_db + let mut rx = match services() .globals .sync_receivers .write() @@ -77,7 +74,6 @@ pub async fn sync_events_route( v.insert((body.since.to_owned(), rx.clone())); tokio::spawn(sync_helper_wrapper( - Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), body, @@ -93,7 +89,6 @@ pub async fn sync_events_route( o.insert((body.since.clone(), rx.clone())); tokio::spawn(sync_helper_wrapper( - Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), body, @@ -127,7 +122,6 @@ pub async fn sync_events_route( } async fn sync_helper_wrapper( - db: Arc, sender_user: Box, sender_device: Box, body: sync_events::v3::IncomingRequest, @@ -136,7 +130,6 @@ async fn sync_helper_wrapper( let since = body.since.clone(); let r = sync_helper( - Arc::clone(&db), sender_user.clone(), sender_device.clone(), body, @@ -145,7 +138,7 @@ async fn sync_helper_wrapper( if let Ok((_, caching_allowed)) = r { if !caching_allowed { - match db + match services() .globals .sync_receivers .write() @@ -163,13 +156,10 @@ async fn sync_helper_wrapper( } } - drop(db); - let _ = tx.send(Some(r.map(|(r, _)| r))); } async fn sync_helper( - db: Arc, sender_user: Box, sender_device: Box, body: sync_events::v3::IncomingRequest, @@ -182,19 +172,19 @@ async fn sync_helper( }; // TODO: match body.set_presence { - db.rooms.edus.ping_presence(&sender_user)?; + services().rooms.edus.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them - let watcher = db.watch(&sender_user, &sender_device); + let watcher = services().watch(&sender_user, &sender_device); - let next_batch = db.globals.current_count()?; + let next_batch = services().globals.current_count()?; let next_batch_string = next_batch.to_string(); // Load filter let filter = match body.filter { None => IncomingFilterDefinition::default(), Some(IncomingFilter::FilterDefinition(filter)) => filter, - Some(IncomingFilter::FilterId(filter_id)) => db + Some(IncomingFilter::FilterId(filter_id)) => services() .users .get_filter(&sender_user, &filter_id)? .unwrap_or_default(), @@ -221,12 +211,12 @@ async fn sync_helper( // Look for device list updates of this account device_list_updates.extend( - db.users + services().users .keys_changed(&sender_user.to_string(), since, None) .filter_map(|r| r.ok()), ); - let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::>(); + let all_joined_rooms = services().rooms.rooms_joined(&sender_user).collect::>(); for room_id in all_joined_rooms { let room_id = room_id?; @@ -234,7 +224,7 @@ async fn sync_helper( // Get and drop the lock to wait for remaining operations to finish // This will make sure the we have all events until next_batch let mutex_insert = Arc::clone( - db.globals + services().globals .roomid_mutex_insert .write() .unwrap() @@ -247,8 +237,8 @@ async fn sync_helper( let timeline_pdus; let limited; - if db.rooms.last_timeline_count(&sender_user, &room_id)? > since { - let mut non_timeline_pdus = db + if services().rooms.last_timeline_count(&sender_user, &room_id)? > since { + let mut non_timeline_pdus = services() .rooms .pdus_until(&sender_user, &room_id, u64::MAX)? .filter_map(|r| { @@ -259,7 +249,7 @@ async fn sync_helper( r.ok() }) .take_while(|(pduid, _)| { - db.rooms + services().rooms .pdu_count(pduid) .map_or(false, |count| count > since) }); @@ -282,7 +272,7 @@ async fn sync_helper( } let send_notification_counts = !timeline_pdus.is_empty() - || db + || services() .rooms .edus .last_privateread_update(&sender_user, &room_id)? @@ -293,24 +283,24 @@ async fn sync_helper( timeline_users.insert(event.sender.as_str().to_owned()); } - db.rooms + services().rooms .lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?; // Database queries: - let current_shortstatehash = if let Some(s) = db.rooms.current_shortstatehash(&room_id)? { + let current_shortstatehash = if let Some(s) = services().rooms.current_shortstatehash(&room_id)? { s } else { error!("Room {} has no state", room_id); continue; }; - let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?; + let since_shortstatehash = services().rooms.get_token_shortstatehash(&room_id, since)?; // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { - let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0); - let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0); + let joined_member_count = services().rooms.room_joined_count(&room_id)?.unwrap_or(0); + let invited_member_count = services().rooms.room_invited_count(&room_id)?.unwrap_or(0); // Recalculate heroes (first 5 members) let mut heroes = Vec::new(); @@ -319,7 +309,7 @@ async fn sync_helper( // Go through all PDUs and for each member event, check if the user is still joined or // invited until we have 5 or we reach the end - for hero in db + for hero in services() .rooms .all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus @@ -339,8 +329,8 @@ async fn sync_helper( if matches!( content.membership, MembershipState::Join | MembershipState::Invite - ) && (db.rooms.is_joined(&user_id, &room_id)? - || db.rooms.is_invited(&user_id, &room_id)?) + ) && (services().rooms.is_joined(&user_id, &room_id)? + || services().rooms.is_invited(&user_id, &room_id)?) { Ok::<_, Error>(Some(state_key.clone())) } else { @@ -381,17 +371,17 @@ async fn sync_helper( let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?; + let current_state_ids = services().rooms.state_full_ids(current_shortstatehash).await?; let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); let mut i = 0; for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services().rooms.get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -408,7 +398,7 @@ async fn sync_helper( || body.full_state || timeline_users.contains(&state_key) { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -430,12 +420,12 @@ async fn sync_helper( } // Reset lazy loading because this is an initial sync - db.rooms + services().rooms .lazy_load_reset(&sender_user, &sender_device, &room_id)?; // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. - db.rooms.lazy_load_mark_sent( + services().rooms.lazy_load_mark_sent( &sender_user, &sender_device, &room_id, @@ -457,7 +447,7 @@ async fn sync_helper( // Incremental /sync let since_shortstatehash = since_shortstatehash.unwrap(); - let since_sender_member: Option = db + let since_sender_member: Option = services() .rooms .state_get( since_shortstatehash, @@ -477,12 +467,12 @@ async fn sync_helper( let mut lazy_loaded = HashSet::new(); if since_shortstatehash != current_shortstatehash { - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?; - let since_state_ids = db.rooms.state_full_ids(since_shortstatehash).await?; + let current_state_ids = services().rooms.state_full_ids(current_shortstatehash).await?; + let since_state_ids = services().rooms.state_full_ids(since_shortstatehash).await?; for (key, id) in current_state_ids { if body.full_state || since_state_ids.get(&key) != Some(&id) { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -515,14 +505,14 @@ async fn sync_helper( continue; } - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( &sender_user, &sender_device, &room_id, &event.sender, )? || lazy_load_send_redundant { - if let Some(member_event) = db.rooms.room_state_get( + if let Some(member_event) = services().rooms.room_state_get( &room_id, &StateEventType::RoomMember, event.sender.as_str(), @@ -533,7 +523,7 @@ async fn sync_helper( } } - db.rooms.lazy_load_mark_sent( + services().rooms.lazy_load_mark_sent( &sender_user, &sender_device, &room_id, @@ -541,13 +531,13 @@ async fn sync_helper( next_batch, ); - let encrypted_room = db + let encrypted_room = services() .rooms .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? .is_some(); let since_encryption = - db.rooms + services().rooms .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?; // Calculations: @@ -580,7 +570,7 @@ async fn sync_helper( match new_membership { MembershipState::Join => { // A new user joined an encrypted room - if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? { + if !share_encrypted_room(&sender_user, &user_id, &room_id)? { device_list_updates.insert(user_id); } } @@ -597,7 +587,7 @@ async fn sync_helper( if joined_since_last_sync && encrypted_room || new_encrypted_room { // If the user is in a new encrypted room, give them all joined users device_list_updates.extend( - db.rooms + services().rooms .room_members(&room_id) .flatten() .filter(|user_id| { @@ -606,7 +596,7 @@ async fn sync_helper( }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&db, &sender_user, user_id, &room_id) + !share_encrypted_room(&sender_user, user_id, &room_id) .unwrap_or(false) }), ); @@ -629,14 +619,14 @@ async fn sync_helper( // Look for device list updates in this room device_list_updates.extend( - db.users + services().users .keys_changed(&room_id.to_string(), since, None) .filter_map(|r| r.ok()), ); let notification_count = if send_notification_counts { Some( - db.rooms + services().rooms .notification_count(&sender_user, &room_id)? .try_into() .expect("notification count can't go that high"), @@ -647,7 +637,7 @@ async fn sync_helper( let highlight_count = if send_notification_counts { Some( - db.rooms + services().rooms .highlight_count(&sender_user, &room_id)? .try_into() .expect("highlight count can't go that high"), @@ -659,7 +649,7 @@ async fn sync_helper( let prev_batch = timeline_pdus .first() .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { - Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string())) + Ok(Some(services().rooms.pdu_count(pdu_id)?.to_string())) })?; let room_events: Vec<_> = timeline_pdus @@ -667,7 +657,7 @@ async fn sync_helper( .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); - let mut edus: Vec<_> = db + let mut edus: Vec<_> = services() .rooms .edus .readreceipts_since(&room_id, since) @@ -675,10 +665,10 @@ async fn sync_helper( .map(|(_, _, v)| v) .collect(); - if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since { + if services().rooms.edus.last_typing_update(&room_id, &services().globals)? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&db.rooms.edus.typings_all(&room_id)?) + &serde_json::to_string(&services().rooms.edus.typings_all(&room_id)?) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -686,12 +676,12 @@ async fn sync_helper( } // Save the state after this sync so we can send the correct state diff next sync - db.rooms + services().rooms .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; let joined_room = JoinedRoom { account_data: RoomAccountData { - events: db + events: services() .account_data .changes_since(Some(&room_id), &sender_user, since)? .into_iter() @@ -731,9 +721,9 @@ async fn sync_helper( // Take presence updates from this room for (user_id, presence) in - db.rooms + services().rooms .edus - .presence_since(&room_id, since, &db.rooms, &db.globals)? + .presence_since(&room_id, since)? { match presence_updates.entry(user_id) { Entry::Vacant(v) => { @@ -765,14 +755,14 @@ async fn sync_helper( } let mut left_rooms = BTreeMap::new(); - let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect(); + let all_left_rooms: Vec<_> = services().rooms.rooms_left(&sender_user).collect(); for result in all_left_rooms { let (room_id, left_state_events) = result?; { // Get and drop the lock to wait for remaining operations to finish let mutex_insert = Arc::clone( - db.globals + services().globals .roomid_mutex_insert .write() .unwrap() @@ -783,7 +773,7 @@ async fn sync_helper( drop(insert_lock); } - let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; + let left_count = services().rooms.get_left_count(&room_id, &sender_user)?; // Left before last sync if Some(since) >= left_count { @@ -807,14 +797,14 @@ async fn sync_helper( } let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect(); + let all_invited_rooms: Vec<_> = services().rooms.rooms_invited(&sender_user).collect(); for result in all_invited_rooms { let (room_id, invite_state_events) = result?; { // Get and drop the lock to wait for remaining operations to finish let mutex_insert = Arc::clone( - db.globals + services().globals .roomid_mutex_insert .write() .unwrap() @@ -825,7 +815,7 @@ async fn sync_helper( drop(insert_lock); } - let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; + let invite_count = services().rooms.get_invite_count(&room_id, &sender_user)?; // Invited before last sync if Some(since) >= invite_count { @@ -843,13 +833,13 @@ async fn sync_helper( } for user_id in left_encrypted_users { - let still_share_encrypted_room = db + let still_share_encrypted_room = services() .rooms .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? .filter_map(|r| r.ok()) .filter_map(|other_room_id| { Some( - db.rooms + services().rooms .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), @@ -864,7 +854,7 @@ async fn sync_helper( } // Remove all to-device events the device received *last time* - db.users + services().users .remove_to_device_events(&sender_user, &sender_device, since)?; let response = sync_events::v3::Response { @@ -882,7 +872,7 @@ async fn sync_helper( .collect(), }, account_data: GlobalAccountData { - events: db + events: services() .account_data .changes_since(None, &sender_user, since)? .into_iter() @@ -897,9 +887,9 @@ async fn sync_helper( changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?, + device_one_time_keys_count: services().users.count_one_time_keys(&sender_user, &sender_device)?, to_device: ToDevice { - events: db + events: services() .users .get_to_device_events(&sender_user, &sender_device)?, }, @@ -928,21 +918,19 @@ async fn sync_helper( } } -#[tracing::instrument(skip(db))] fn share_encrypted_room( - db: &Database, sender_user: &UserId, user_id: &UserId, ignore_room: &RoomId, ) -> Result { - Ok(db + Ok(services() .rooms .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? .filter_map(|r| r.ok()) .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { Some( - db.rooms + services().rooms .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index 98d895c..bbea2d5 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Result, Ruma}; +use crate::{Result, Ruma, services}; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -14,12 +14,11 @@ use std::collections::BTreeMap; /// /// - Inserts the tag into the tag event of the room account data. pub async fn update_tag_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut tags_event = db + let mut tags_event = services() .account_data .get( Some(&body.room_id), @@ -36,16 +35,13 @@ pub async fn update_tag_route( .tags .insert(body.tag.clone().into(), body.tag_info.clone()); - db.account_data.update( + services().account_data.update( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, &tags_event, - &db.globals, )?; - db.flush()?; - Ok(create_tag::v3::Response {}) } @@ -55,12 +51,11 @@ pub async fn update_tag_route( /// /// - Removes the tag from the tag event of the room account data. pub async fn delete_tag_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut tags_event = db + let mut tags_event = services() .account_data .get( Some(&body.room_id), @@ -74,16 +69,13 @@ pub async fn delete_tag_route( }); tags_event.content.tags.remove(&body.tag.clone().into()); - db.account_data.update( + services().account_data.update( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, &tags_event, - &db.globals, )?; - db.flush()?; - Ok(delete_tag::v3::Response {}) } @@ -93,13 +85,12 @@ pub async fn delete_tag_route( /// /// - Gets the tag event of the room account data. pub async fn get_tags_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_tags::v3::Response { - tags: db + tags: services() .account_data .get( Some(&body.room_id), diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index 51441dd..3a2f6c0 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -1,7 +1,7 @@ use ruma::events::ToDeviceEventType; use std::collections::BTreeMap; -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::{ client::{error::ErrorKind, to_device::send_event_to_device}, @@ -14,14 +14,13 @@ use ruma::{ /// /// Send a to-device event to a set of client devices. pub async fn send_event_to_device_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); // Check if this is a new transaction id - if db + if services() .transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? .is_some() @@ -31,13 +30,13 @@ pub async fn send_event_to_device_route( for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { - if target_user_id.server_name() != db.globals.server_name() { + if target_user_id.server_name() != services().globals.server_name() { let mut map = BTreeMap::new(); map.insert(target_device_id_maybe.clone(), event.clone()); let mut messages = BTreeMap::new(); messages.insert(target_user_id.clone(), map); - db.sending.send_reliable_edu( + services().sending.send_reliable_edu( target_user_id.server_name(), serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { @@ -48,14 +47,14 @@ pub async fn send_event_to_device_route( }, )) .expect("DirectToDevice EDU can be serialized"), - db.globals.next_count()?, + services().globals.next_count()?, )?; continue; } match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event( + DeviceIdOrAllDevices::DeviceId(target_device_id) => services().users.add_to_device_event( sender_user, target_user_id, &target_device_id, @@ -63,12 +62,11 @@ pub async fn send_event_to_device_route( event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, - &db.globals, )?, DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(target_user_id) { - db.users.add_to_device_event( + for target_device_id in services().users.all_device_ids(target_user_id) { + services().users.add_to_device_event( sender_user, target_user_id, &target_device_id?, @@ -76,7 +74,6 @@ pub async fn send_event_to_device_route( event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, - &db.globals, )?; } } @@ -85,10 +82,8 @@ pub async fn send_event_to_device_route( } // Save transaction id with empty data - db.transaction_ids + services().transaction_ids .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; - db.flush()?; - Ok(send_event_to_device::v3::Response {}) } diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index cac5a5f..afd5d6b 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -1,18 +1,17 @@ -use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services}; use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// /// Sets the typing state of the sender user. pub async fn create_typing_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { use create_typing_event::v3::Typing; let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You are not in this room.", @@ -20,16 +19,15 @@ pub async fn create_typing_event_route( } if let Typing::Yes(duration) = body.state { - db.rooms.edus.typing_add( + services().rooms.edus.typing_add( sender_user, &body.room_id, duration.as_millis() as u64 + utils::millis_since_unix_epoch(), - &db.globals, )?; } else { - db.rooms + services().rooms .edus - .typing_remove(sender_user, &body.room_id, &db.globals)?; + .typing_remove(sender_user, &body.room_id)?; } Ok(create_typing_event::v3::Response {}) diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index 349c139..60b4e2f 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Result, Ruma}; +use crate::{Result, Ruma, services}; use ruma::{ api::client::user_directory::search_users, events::{ @@ -14,20 +14,19 @@ use ruma::{ /// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public) /// and don't share a room with the sender pub async fn search_users_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let limit = u64::from(body.limit) as usize; - let mut users = db.users.iter().filter_map(|user_id| { + let mut users = services().users.iter().filter_map(|user_id| { // Filter out buggy users (they should not exist, but you never know...) let user_id = user_id.ok()?; let user = search_users::v3::User { user_id: user_id.clone(), - display_name: db.users.displayname(&user_id).ok()?, - avatar_url: db.users.avatar_url(&user_id).ok()?, + display_name: services().users.displayname(&user_id).ok()?, + avatar_url: services().users.avatar_url(&user_id).ok()?, }; let user_id_matches = user @@ -50,11 +49,11 @@ pub async fn search_users_route( } let user_is_in_public_rooms = - db.rooms + services().rooms .rooms_joined(&user_id) .filter_map(|r| r.ok()) .any(|room| { - db.rooms + services().rooms .room_state_get(&room, &StateEventType::RoomJoinRules, "") .map_or(false, |event| { event.map_or(false, |event| { @@ -70,7 +69,7 @@ pub async fn search_users_route( return Some(user); } - let user_is_in_shared_rooms = db + let user_is_in_shared_rooms = services() .rooms .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) .ok()? diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index 7e9de31..2a804f9 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Result, Ruma}; +use crate::{Result, Ruma, services}; use hmac::{Hmac, Mac, NewMac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; @@ -10,16 +10,15 @@ type HmacSha1 = Hmac; /// /// TODO: Returns information about the recommended turn server. pub async fn turn_server_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let turn_secret = db.globals.turn_secret(); + let turn_secret = services().globals.turn_secret(); let (username, password) = if !turn_secret.is_empty() { let expiry = SecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()), + SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()), ) .expect("time is valid"); @@ -34,15 +33,15 @@ pub async fn turn_server_route( (username, password) } else { ( - db.globals.turn_username().clone(), - db.globals.turn_password().clone(), + services().globals.turn_username().clone(), + services().globals.turn_password().clone(), ) }; Ok(get_turn_server_info::v3::Response { username, password, - uris: db.globals.turn_uris().to_vec(), - ttl: Duration::from_secs(db.globals.turn_ttl()), + uris: services().globals.turn_uris().to_vec(), + ttl: Duration::from_secs(services().globals.turn_ttl()), }) } diff --git a/src/api/mod.rs b/src/api/mod.rs new file mode 100644 index 0000000..68589be --- /dev/null +++ b/src/api/mod.rs @@ -0,0 +1,4 @@ +pub mod client_server; +pub mod server_server; +pub mod appservice_server; +pub mod ruma_wrapper; diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 45e9d9a..babf2a7 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -24,7 +24,7 @@ use serde::Deserialize; use tracing::{debug, error, warn}; use super::{Ruma, RumaResponse}; -use crate::{database::DatabaseGuard, server_server, Error, Result}; +use crate::{Error, Result, api::server_server, services}; #[async_trait] impl FromRequest for Ruma @@ -44,7 +44,6 @@ where } let metadata = T::METADATA; - let db = DatabaseGuard::from_request(req).await?; let auth_header = Option::>>::from_request(req).await?; let path_params = Path::>::from_request(req).await?; @@ -71,7 +70,7 @@ where let mut json_body = serde_json::from_slice::(&body).ok(); - let appservices = db.appservice.all().unwrap(); + let appservices = services().appservice.all().unwrap(); let appservice_registration = appservices.iter().find(|(_id, registration)| { registration .get("as_token") @@ -91,14 +90,14 @@ where .unwrap() .as_str() .unwrap(), - db.globals.server_name(), + services().globals.server_name(), ) .unwrap() }, |s| UserId::parse(s).unwrap(), ); - if !db.users.exists(&user_id).unwrap() { + if !services().users.exists(&user_id).unwrap() { return Err(Error::BadRequest( ErrorKind::Forbidden, "User does not exist.", @@ -124,7 +123,7 @@ where } }; - match db.users.find_from_token(token).unwrap() { + match services().users.find_from_token(token).unwrap() { None => { return Err(Error::BadRequest( ErrorKind::UnknownToken { soft_logout: false }, @@ -185,7 +184,7 @@ where ( "destination".to_owned(), CanonicalJsonValue::String( - db.globals.server_name().as_str().to_owned(), + services().globals.server_name().as_str().to_owned(), ), ), ( @@ -199,7 +198,6 @@ where }; let keys_result = server_server::fetch_signing_keys( - &db, &x_matrix.origin, vec![x_matrix.key.to_owned()], ) @@ -251,7 +249,7 @@ where if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { let user_id = sender_user.clone().unwrap_or_else(|| { - UserId::parse_with_server_name("", db.globals.server_name()) + UserId::parse_with_server_name("", services().globals.server_name()) .expect("we know this is valid") }); @@ -261,7 +259,7 @@ where .and_then(|auth| auth.get("session")) .and_then(|session| session.as_str()) .and_then(|session| { - db.uiaa.get_uiaa_request( + services().uiaa.get_uiaa_request( &user_id, &sender_device.clone().unwrap_or_else(|| "".into()), session, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index f60f735..776777d 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1,8 +1,6 @@ use crate::{ - client_server::{self, claim_keys_helper, get_keys_helper}, - database::{rooms::CompressedStateEvent, DatabaseGuard}, - pdu::EventHash, - utils, Database, Error, PduEvent, Result, Ruma, + api::client_server::{self, claim_keys_helper, get_keys_helper}, + utils, Error, PduEvent, Result, Ruma, services, service::pdu::{gen_event_id_canonical_json, PduBuilder}, }; use axum::{response::IntoResponse, Json}; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -126,22 +124,21 @@ impl FedDest { } } -#[tracing::instrument(skip(globals, request))] +#[tracing::instrument(skip(request))] pub(crate) async fn send_request( - globals: &crate::database::globals::Globals, destination: &ServerName, request: T, ) -> Result where T: Debug, { - if !globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } let mut write_destination_to_cache = false; - let cached_result = globals + let cached_result = services().globals .actual_destination_cache .read() .unwrap() @@ -153,7 +150,7 @@ where } else { write_destination_to_cache = true; - let result = find_actual_destination(globals, destination).await; + let result = find_actual_destination(destination).await; (result.0, result.1.into_uri_string()) }; @@ -194,15 +191,15 @@ where .to_string() .into(), ); - request_map.insert("origin".to_owned(), globals.server_name().as_str().into()); + request_map.insert("origin".to_owned(), services().globals.server_name().as_str().into()); request_map.insert("destination".to_owned(), destination.as_str().into()); let mut request_json = serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); ruma::signatures::sign_json( - globals.server_name().as_str(), - globals.keypair(), + services().globals.server_name().as_str(), + services().globals.keypair(), &mut request_json, ) .expect("our request json is what ruma expects"); @@ -227,7 +224,7 @@ where AUTHORIZATION, HeaderValue::from_str(&format!( "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - globals.server_name(), + services().globals.server_name(), s.0, s.1 )) @@ -241,7 +238,7 @@ where let url = reqwest_request.url().clone(); - let response = globals.federation_client().execute(reqwest_request).await; + let response = services().globals.federation_client().execute(reqwest_request).await; match response { Ok(mut response) => { @@ -281,7 +278,7 @@ where if status == 200 { let response = T::IncomingResponse::try_from_http_response(http_response); if response.is_ok() && write_destination_to_cache { - globals.actual_destination_cache.write().unwrap().insert( + services().globals.actual_destination_cache.write().unwrap().insert( Box::::from(destination), (actual_destination, host), ); @@ -332,9 +329,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { /// Returns: actual_destination, host header /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification -#[tracing::instrument(skip(globals))] async fn find_actual_destination( - globals: &crate::database::globals::Globals, destination: &'_ ServerName, ) -> (FedDest, FedDest) { let destination_str = destination.as_str().to_owned(); @@ -350,7 +345,7 @@ async fn find_actual_destination( let (host, port) = destination_str.split_at(pos); FedDest::Named(host.to_owned(), port.to_owned()) } else { - match request_well_known(globals, destination.as_str()).await { + match request_well_known(destination.as_str()).await { // 3: A .well-known file is available Some(delegated_hostname) => { hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); @@ -364,17 +359,17 @@ async fn find_actual_destination( } else { // Delegated hostname has no port in this branch if let Some(hostname_override) = - query_srv_record(globals, &delegated_hostname).await + query_srv_record(&delegated_hostname).await { // 3.3: SRV lookup successful let force_port = hostname_override.port(); - if let Ok(override_ip) = globals + if let Ok(override_ip) = services().globals .dns_resolver() .lookup_ip(hostname_override.hostname()) .await { - globals.tls_name_override.write().unwrap().insert( + services().globals.tls_name_override.write().unwrap().insert( delegated_hostname.clone(), ( override_ip.iter().collect(), @@ -400,17 +395,17 @@ async fn find_actual_destination( } // 4: No .well-known or an error occured None => { - match query_srv_record(globals, &destination_str).await { + match query_srv_record(&destination_str).await { // 4: SRV record found Some(hostname_override) => { let force_port = hostname_override.port(); - if let Ok(override_ip) = globals + if let Ok(override_ip) = services().globals .dns_resolver() .lookup_ip(hostname_override.hostname()) .await { - globals.tls_name_override.write().unwrap().insert( + services().globals.tls_name_override.write().unwrap().insert( hostname.clone(), (override_ip.iter().collect(), force_port.unwrap_or(8448)), ); @@ -448,12 +443,10 @@ async fn find_actual_destination( (actual_destination, hostname) } -#[tracing::instrument(skip(globals))] async fn query_srv_record( - globals: &crate::database::globals::Globals, hostname: &'_ str, ) -> Option { - if let Ok(Some(host_port)) = globals + if let Ok(Some(host_port)) = services().globals .dns_resolver() .srv_lookup(format!("_matrix._tcp.{}", hostname)) .await @@ -472,13 +465,11 @@ async fn query_srv_record( } } -#[tracing::instrument(skip(globals))] async fn request_well_known( - globals: &crate::database::globals::Globals, destination: &str, ) -> Option { let body: serde_json::Value = serde_json::from_str( - &globals + &services().globals .default_client() .get(&format!( "https://{}/.well-known/matrix/server", @@ -499,10 +490,9 @@ async fn request_well_known( /// /// Get version information on this server. pub async fn get_server_version_route( - db: DatabaseGuard, _body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -521,24 +511,24 @@ pub async fn get_server_version_route( /// - Matrix does not support invalidating public keys, so the key returned by this will be valid /// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response -pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { - if !db.globals.allow_federation() { +pub async fn get_server_keys_route() -> Result { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); verify_keys.insert( - format!("ed25519:{}", db.globals.keypair().version()) + format!("ed25519:{}", services().globals.keypair().version()) .try_into() .expect("found invalid server signing keys in DB"), VerifyKey { - key: Base64::new(db.globals.keypair().public_key().to_vec()), + key: Base64::new(services().globals.keypair().public_key().to_vec()), }, ); let mut response = serde_json::from_slice( get_server_keys::v2::Response { server_key: Raw::new(&ServerSigningKeys { - server_name: db.globals.server_name().to_owned(), + server_name: services().globals.server_name().to_owned(), verify_keys, old_verify_keys: BTreeMap::new(), signatures: BTreeMap::new(), @@ -556,8 +546,8 @@ pub async fn get_server_keys_route(db: DatabaseGuard) -> Result Result impl IntoResponse { - get_server_keys_route(db).await +pub async fn get_server_keys_deprecated_route() -> impl IntoResponse { + get_server_keys_route().await } /// # `POST /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. pub async fn get_public_rooms_filtered_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } let response = client_server::get_public_rooms_filtered_helper( - &db, None, body.limit, body.since.as_deref(), @@ -608,15 +596,13 @@ pub async fn get_public_rooms_filtered_route( /// /// Lists the public rooms on this server. pub async fn get_public_rooms_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } let response = client_server::get_public_rooms_filtered_helper( - &db, None, body.limit, body.since.as_deref(), @@ -637,10 +623,9 @@ pub async fn get_public_rooms_route( /// /// Push EDUs and PDUs to this server. pub async fn send_transaction_message_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -663,7 +648,7 @@ pub async fn send_transaction_message_route( for pdu in &body.pdus { // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { + let (event_id, value) = match gen_event_id_canonical_json(pdu) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -684,10 +669,10 @@ pub async fn send_transaction_message_route( } }; - acl_check(&sender_servername, &room_id, &db)?; + acl_check(&sender_servername, &room_id)?; let mutex = Arc::clone( - db.globals + services().globals .roomid_mutex_federation .write() .unwrap() @@ -698,13 +683,12 @@ pub async fn send_transaction_message_route( let start_time = Instant::now(); resolved_map.insert( event_id.clone(), - handle_incoming_pdu( + services().rooms.event_handler.handle_incoming_pdu( &sender_servername, &event_id, &room_id, value, true, - &db, &pub_key_map, ) .await @@ -743,7 +727,7 @@ pub async fn send_transaction_message_route( .event_ids .iter() .filter_map(|id| { - db.rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) + services().rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) }) .max_by_key(|(_, count)| *count) { @@ -760,11 +744,10 @@ pub async fn send_transaction_message_route( content: ReceiptEventContent(receipt_content), room_id: room_id.clone(), }; - db.rooms.edus.readreceipt_update( + services().rooms.edus.readreceipt_update( &user_id, &room_id, event, - &db.globals, )?; } else { // TODO fetch missing events @@ -774,26 +757,24 @@ pub async fn send_transaction_message_route( } } Edu::Typing(typing) => { - if db.rooms.is_joined(&typing.user_id, &typing.room_id)? { + if services().rooms.is_joined(&typing.user_id, &typing.room_id)? { if typing.typing { - db.rooms.edus.typing_add( + services().rooms.edus.typing_add( &typing.user_id, &typing.room_id, 3000 + utils::millis_since_unix_epoch(), - &db.globals, )?; } else { - db.rooms.edus.typing_remove( + services().rooms.edus.typing_remove( &typing.user_id, &typing.room_id, - &db.globals, )?; } } } Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { - db.users - .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; + services().users + .mark_device_key_update(&user_id)?; } Edu::DirectToDevice(DirectDeviceContent { sender, @@ -802,7 +783,7 @@ pub async fn send_transaction_message_route( messages, }) => { // Check if this is a new transaction id - if db + if services() .transaction_ids .existing_txnid(&sender, None, &message_id)? .is_some() @@ -814,7 +795,7 @@ pub async fn send_transaction_message_route( for (target_device_id_maybe, event) in map { match target_device_id_maybe { DeviceIdOrAllDevices::DeviceId(target_device_id) => { - db.users.add_to_device_event( + services().users.add_to_device_event( &sender, target_user_id, target_device_id, @@ -825,13 +806,12 @@ pub async fn send_transaction_message_route( "Event is invalid", ) })?, - &db.globals, )? } DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(target_user_id) { - db.users.add_to_device_event( + for target_device_id in services().users.all_device_ids(target_user_id) { + services().users.add_to_device_event( &sender, target_user_id, &target_device_id?, @@ -842,7 +822,6 @@ pub async fn send_transaction_message_route( "Event is invalid", ) })?, - &db.globals, )?; } } @@ -851,7 +830,7 @@ pub async fn send_transaction_message_route( } // Save transaction id with empty data - db.transaction_ids + services().transaction_ids .add_txnid(&sender, None, &message_id, &[])?; } Edu::SigningKeyUpdate(SigningKeyUpdateContent { @@ -863,13 +842,11 @@ pub async fn send_transaction_message_route( continue; } if let Some(master_key) = master_key { - db.users.add_cross_signing_keys( + services().users.add_cross_signing_keys( &user_id, &master_key, &self_signing_key, &None, - &db.rooms, - &db.globals, )?; } } @@ -877,8 +854,6 @@ pub async fn send_transaction_message_route( } } - db.flush()?; - Ok(send_transaction_message::v1::Response { pdus: resolved_map }) } @@ -886,14 +861,13 @@ pub async fn send_transaction_message_route( /// fetch them from the server and save to our DB. #[tracing::instrument(skip_all)] pub(crate) async fn fetch_signing_keys( - db: &Database, origin: &ServerName, signature_ids: Vec, ) -> Result> { let contains_all_ids = |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - let permit = db + let permit = services() .globals .servername_ratelimiter .read() @@ -904,7 +878,7 @@ pub(crate) async fn fetch_signing_keys( let permit = match permit { Some(p) => p, None => { - let mut write = db.globals.servername_ratelimiter.write().unwrap(); + let mut write = services().globals.servername_ratelimiter.write().unwrap(); let s = Arc::clone( write .entry(origin.to_owned()) @@ -916,7 +890,7 @@ pub(crate) async fn fetch_signing_keys( } .await; - let back_off = |id| match db + let back_off = |id| match services() .globals .bad_signature_ratelimiter .write() @@ -929,7 +903,7 @@ pub(crate) async fn fetch_signing_keys( hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; - if let Some((time, tries)) = db + if let Some((time, tries)) = services() .globals .bad_signature_ratelimiter .read() @@ -950,7 +924,7 @@ pub(crate) async fn fetch_signing_keys( trace!("Loading signing keys for {}", origin); - let mut result: BTreeMap<_, _> = db + let mut result: BTreeMap<_, _> = services() .globals .signing_keys_for(origin)? .into_iter() @@ -963,14 +937,14 @@ pub(crate) async fn fetch_signing_keys( debug!("Fetching signing keys for {} over federation", origin); - if let Some(server_key) = db + if let Some(server_key) = services() .sending - .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .send_federation_request(origin, get_server_keys::v2::Request::new()) .await .ok() .and_then(|resp| resp.server_key.deserialize().ok()) { - db.globals.add_signing_key(origin, server_key.clone())?; + services().globals.add_signing_key(origin, server_key.clone())?; result.extend( server_key @@ -990,12 +964,11 @@ pub(crate) async fn fetch_signing_keys( } } - for server in db.globals.trusted_servers() { + for server in services().globals.trusted_servers() { debug!("Asking {} for {}'s signing key", server, origin); - if let Some(server_keys) = db + if let Some(server_keys) = services() .sending .send_federation_request( - &db.globals, server, get_remote_server_keys::v2::Request::new( origin, @@ -1018,7 +991,7 @@ pub(crate) async fn fetch_signing_keys( { trace!("Got signing keys: {:?}", server_keys); for k in server_keys { - db.globals.add_signing_key(origin, k.clone())?; + services().globals.add_signing_key(origin, k.clone())?; result.extend( k.verify_keys .into_iter() @@ -1047,11 +1020,10 @@ pub(crate) async fn fetch_signing_keys( )) } -#[tracing::instrument(skip(starting_events, db))] +#[tracing::instrument(skip(starting_events))] pub(crate) async fn get_auth_chain<'a>( room_id: &RoomId, starting_events: Vec>, - db: &'a Database, ) -> Result> + 'a> { const NUM_BUCKETS: usize = 50; @@ -1059,7 +1031,7 @@ pub(crate) async fn get_auth_chain<'a>( let mut i = 0; for id in starting_events { - let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; + let short = services().rooms.get_or_create_shorteventid(&id)?; let bucket_id = (short % NUM_BUCKETS as u64) as usize; buckets[bucket_id].insert((short, id.clone())); i += 1; @@ -1078,7 +1050,7 @@ pub(crate) async fn get_auth_chain<'a>( } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { + if let Some(cached) = services().rooms.get_auth_chain_from_cache(&chunk_key)? { hits += 1; full_auth_chain.extend(cached.iter().copied()); continue; @@ -1090,13 +1062,13 @@ pub(crate) async fn get_auth_chain<'a>( let mut misses2 = 0; let mut i = 0; for (sevent_id, event_id) in chunk { - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { + if let Some(cached) = services().rooms.get_auth_chain_from_cache(&[sevent_id])? { hits2 += 1; chunk_cache.extend(cached.iter().copied()); } else { misses2 += 1; - let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?); - db.rooms + let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id)?); + services().rooms .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; println!( "cache missed event {} with auth chain len {}", @@ -1118,7 +1090,7 @@ pub(crate) async fn get_auth_chain<'a>( misses2 ); let chunk_cache = Arc::new(chunk_cache); - db.rooms + services().rooms .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; full_auth_chain.extend(chunk_cache.iter()); } @@ -1132,28 +1104,27 @@ pub(crate) async fn get_auth_chain<'a>( Ok(full_auth_chain .into_iter() - .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) + .filter_map(move |sid| services().rooms.get_eventid_from_short(sid).ok())) } -#[tracing::instrument(skip(event_id, db))] +#[tracing::instrument(skip(event_id))] fn get_auth_chain_inner( room_id: &RoomId, event_id: &EventId, - db: &Database, ) -> Result> { let mut todo = vec![Arc::from(event_id)]; let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { - match db.rooms.get_pdu(&event_id) { + match services().rooms.get_pdu(&event_id) { Ok(Some(pdu)) => { if pdu.room_id != room_id { return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); } for auth_event in &pdu.auth_events { - let sauthevent = db + let sauthevent = services() .rooms - .get_or_create_shorteventid(auth_event, &db.globals)?; + .get_or_create_shorteventid(auth_event)?; if !found.contains(&sauthevent) { found.insert(sauthevent); @@ -1179,10 +1150,9 @@ fn get_auth_chain_inner( /// /// - Only works if a user of this server is currently invited or joined the room pub async fn get_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1191,7 +1161,7 @@ pub async fn get_event_route( .as_ref() .expect("server is authenticated"); - let event = db + let event = services() .rooms .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1204,7 +1174,7 @@ pub async fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, room_id)? { + if !services().rooms.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", @@ -1212,7 +1182,7 @@ pub async fn get_event_route( } Ok(get_event::v1::Response { - origin: db.globals.server_name().to_owned(), + origin: services().globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), pdu: PduEvent::convert_to_outgoing_federation_event(event), }) @@ -1222,10 +1192,9 @@ pub async fn get_event_route( /// /// Retrieves events that the sender is missing. pub async fn get_missing_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1234,21 +1203,21 @@ pub async fn get_missing_events_route( .as_ref() .expect("server is authenticated"); - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", )); } - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); let mut i = 0; while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { - if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { + if let Some(pdu) = services().rooms.get_pdu_json(&queued_events[i])? { let room_id_str = pdu .get("room_id") .and_then(|val| val.as_str()) @@ -1295,10 +1264,9 @@ pub async fn get_missing_events_route( /// /// - This does not include the event itself pub async fn get_event_authorization_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1307,16 +1275,16 @@ pub async fn get_event_authorization_route( .as_ref() .expect("server is authenticated"); - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; - let event = db + let event = services() .rooms .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1329,11 +1297,11 @@ pub async fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?; + let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) + .filter_map(|id| services().rooms.get_pdu_json(&id).ok()?) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -1343,10 +1311,9 @@ pub async fn get_event_authorization_route( /// /// Retrieves the current state of the room. pub async fn get_room_state_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1355,16 +1322,16 @@ pub async fn get_room_state_route( .as_ref() .expect("server is authenticated"); - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; - let shortstatehash = db + let shortstatehash = services() .rooms .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( @@ -1372,25 +1339,25 @@ pub async fn get_room_state_route( "Pdu state not found.", ))?; - let pdus = db + let pdus = services() .rooms .state_full_ids(shortstatehash) .await? .into_iter() .map(|(_, id)| { PduEvent::convert_to_outgoing_federation_event( - db.rooms.get_pdu_json(&id).unwrap().unwrap(), + services().rooms.get_pdu_json(&id).unwrap().unwrap(), ) }) .collect(); let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids .map(|id| { - db.rooms.get_pdu_json(&id).map(|maybe_json| { + services().rooms.get_pdu_json(&id).map(|maybe_json| { PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) }) }) @@ -1404,10 +1371,9 @@ pub async fn get_room_state_route( /// /// Retrieves the current state of the room. pub async fn get_room_state_ids_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1416,16 +1382,16 @@ pub async fn get_room_state_ids_route( .as_ref() .expect("server is authenticated"); - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; - let shortstatehash = db + let shortstatehash = services() .rooms .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( @@ -1433,7 +1399,7 @@ pub async fn get_room_state_ids_route( "Pdu state not found.", ))?; - let pdu_ids = db + let pdu_ids = services() .rooms .state_full_ids(shortstatehash) .await? @@ -1442,7 +1408,7 @@ pub async fn get_room_state_ids_route( .collect(); let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), @@ -1454,14 +1420,13 @@ pub async fn get_room_state_ids_route( /// /// Creates a join template. pub async fn create_join_event_template_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - if !db.rooms.exists(&body.room_id)? { + if !services().rooms.exists(&body.room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server.", @@ -1473,11 +1438,21 @@ pub async fn create_join_event_template_route( .as_ref() .expect("server is authenticated"); - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(body.room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; // TODO: Conduit does not implement restricted join rules yet, we always reject let join_rules_event = - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; let join_rules_event_content: Option = join_rules_event @@ -1502,7 +1477,8 @@ pub async fn create_join_event_template_route( } } - if !body.ver.contains(&room_version_id) { + let room_version_id = services().rooms.state.get_room_version(&body.room_id); + if !body.ver.contains(room_version_id) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: room_version_id, @@ -1523,10 +1499,15 @@ pub async fn create_join_event_template_route( }) .expect("member event is valid value"); - let state_key = body.user_id.to_string(); - let kind = StateEventType::RoomMember; + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event(PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, &body.user_id, &body.room_id, &state_lock); - let (pdu, pdu_json) = create_hash_and_sign_event(); + drop(state_lock); Ok(prepare_join_event::v1::Response { room_version: Some(room_version_id), @@ -1535,26 +1516,25 @@ pub async fn create_join_event_template_route( } async fn create_join_event( - db: &DatabaseGuard, sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - if !db.rooms.exists(room_id)? { + if !services().rooms.exists(room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server.", )); } - acl_check(sender_servername, room_id, db)?; + acl_check(sender_servername, room_id)?; // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = db + let join_rules_event = services() .rooms .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; @@ -1581,7 +1561,7 @@ async fn create_join_event( } // We need to return the state prior to joining, let's keep a reference to that here - let shortstatehash = db + let shortstatehash = services() .rooms .current_shortstatehash(room_id)? .ok_or(Error::BadRequest( @@ -1593,7 +1573,7 @@ async fn create_join_event( // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { + let (event_id, value) = match gen_event_id_canonical_json(pdu) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -1614,7 +1594,7 @@ async fn create_join_event( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; let mutex = Arc::clone( - db.globals + services().globals .roomid_mutex_federation .write() .unwrap() @@ -1622,7 +1602,7 @@ async fn create_join_event( .or_default(), ); let mutex_lock = mutex.lock().await; - let pdu_id = handle_incoming_pdu(&origin, &event_id, room_id, value, true, db, &pub_key_map) + let pdu_id = services().rooms.event_handler.handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) .await .map_err(|e| { warn!("Error while handling incoming send join PDU: {}", e); @@ -1637,32 +1617,29 @@ async fn create_join_event( ))?; drop(mutex_lock); - let state_ids = db.rooms.state_full_ids(shortstatehash).await?; + let state_ids = services().rooms.state_full_ids(shortstatehash).await?; let auth_chain_ids = get_auth_chain( room_id, state_ids.iter().map(|(_, id)| id.clone()).collect(), - db, ) .await?; - let servers = db + let servers = services() .rooms .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); + .filter(|server| &**server != services().globals.server_name()); - db.sending.send_pdu(servers, &pdu_id)?; - - db.flush()?; + services().sending.send_pdu(servers, &pdu_id)?; Ok(RoomState { auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) + .filter_map(|id| services().rooms.get_pdu_json(&id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), state: state_ids .iter() - .filter_map(|(_, id)| db.rooms.get_pdu_json(id).ok().flatten()) + .filter_map(|(_, id)| services().rooms.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -1672,7 +1649,6 @@ async fn create_join_event( /// /// Submits a signed join event. pub async fn create_join_event_v1_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_servername = body @@ -1680,7 +1656,7 @@ pub async fn create_join_event_v1_route( .as_ref() .expect("server is authenticated"); - let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; + let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; Ok(create_join_event::v1::Response { room_state }) } @@ -1689,7 +1665,6 @@ pub async fn create_join_event_v1_route( /// /// Submits a signed join event. pub async fn create_join_event_v2_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_servername = body @@ -1697,7 +1672,7 @@ pub async fn create_join_event_v2_route( .as_ref() .expect("server is authenticated"); - let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; + let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; Ok(create_join_event::v2::Response { room_state }) } @@ -1706,10 +1681,9 @@ pub async fn create_join_event_v2_route( /// /// Invites a remote user to a room. pub async fn create_invite_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1718,9 +1692,9 @@ pub async fn create_invite_route( .as_ref() .expect("server is authenticated"); - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; - if !db.rooms.is_supported_version(&db, &body.room_version) { + if !services().rooms.is_supported_version(&body.room_version) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), @@ -1733,8 +1707,8 @@ pub async fn create_invite_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), + services().globals.server_name().as_str(), + services().globals.keypair(), &mut signed_event, &body.room_version, ) @@ -1793,20 +1767,17 @@ pub async fn create_invite_route( invite_state.push(pdu.to_stripped_state_event()); // If the room already exists, the remote server will notify us about the join via /send - if !db.rooms.exists(&pdu.room_id)? { - db.rooms.update_membership( + if !services().rooms.exists(&pdu.room_id)? { + services().rooms.update_membership( &body.room_id, &invited_user, MembershipState::Invite, &sender, Some(invite_state), - &db, true, )?; } - db.flush()?; - Ok(create_invite::v2::Response { event: PduEvent::convert_to_outgoing_federation_event(signed_event), }) @@ -1816,10 +1787,9 @@ pub async fn create_invite_route( /// /// Gets information on all devices of the user. pub async fn get_devices_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1830,19 +1800,19 @@ pub async fn get_devices_route( Ok(get_devices::v1::Response { user_id: body.user_id.clone(), - stream_id: db + stream_id: services() .users .get_devicelist_version(&body.user_id)? .unwrap_or(0) .try_into() .expect("version will not grow that large"), - devices: db + devices: services() .users .all_devices_metadata(&body.user_id) .filter_map(|r| r.ok()) .filter_map(|metadata| { Some(UserDevice { - keys: db + keys: services() .users .get_device_keys(&body.user_id, &metadata.device_id) .ok()??, @@ -1851,10 +1821,10 @@ pub async fn get_devices_route( }) }) .collect(), - master_key: db + master_key: services() .users .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, - self_signing_key: db + self_signing_key: services() .users .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, }) @@ -1864,14 +1834,13 @@ pub async fn get_devices_route( /// /// Resolve a room alias to a room id. pub async fn get_room_information_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - let room_id = db + let room_id = services() .rooms .id_from_alias(&body.room_alias)? .ok_or(Error::BadRequest( @@ -1881,7 +1850,7 @@ pub async fn get_room_information_route( Ok(get_room_information::v1::Response { room_id, - servers: vec![db.globals.server_name().to_owned()], + servers: vec![services().globals.server_name().to_owned()], }) } @@ -1889,10 +1858,9 @@ pub async fn get_room_information_route( /// /// Gets information on a profile. pub async fn get_profile_information_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1901,17 +1869,17 @@ pub async fn get_profile_information_route( let mut blurhash = None; match &body.field { - Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, + Some(ProfileField::DisplayName) => displayname = services().users.displayname(&body.user_id)?, Some(ProfileField::AvatarUrl) => { - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)? + avatar_url = services().users.avatar_url(&body.user_id)?; + blurhash = services().users.blurhash(&body.user_id)? } // TODO: what to do with custom Some(_) => {} None => { - displayname = db.users.displayname(&body.user_id)?; - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)?; + displayname = services().users.displayname(&body.user_id)?; + avatar_url = services().users.avatar_url(&body.user_id)?; + blurhash = services().users.blurhash(&body.user_id)?; } } @@ -1926,10 +1894,9 @@ pub async fn get_profile_information_route( /// /// Gets devices and identity keys for the given users. pub async fn get_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1937,12 +1904,9 @@ pub async fn get_keys_route( None, &body.device_keys, |u| Some(u.server_name()) == body.sender_servername.as_deref(), - &db, ) .await?; - db.flush()?; - Ok(get_keys::v1::Response { device_keys: result.device_keys, master_keys: result.master_keys, @@ -1954,16 +1918,13 @@ pub async fn get_keys_route( /// /// Claims one-time keys. pub async fn claim_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - let result = claim_keys_helper(&body.one_time_keys, &db).await?; - - db.flush()?; + let result = claim_keys_helper(&body.one_time_keys).await?; Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys, @@ -1974,7 +1935,6 @@ pub async fn claim_keys_route( pub(crate) async fn fetch_required_signing_keys( event: &BTreeMap, pub_key_map: &RwLock>>, - db: &Database, ) -> Result<()> { let signatures = event .get("signatures") @@ -1996,7 +1956,6 @@ pub(crate) async fn fetch_required_signing_keys( let signature_ids = signature_object.keys().cloned().collect::>(); let fetch_res = fetch_signing_keys( - db, signature_server.as_str().try_into().map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?, @@ -2028,7 +1987,6 @@ fn get_server_keys_from_cache( servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, room_version: &RoomVersionId, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, - db: &Database, ) -> Result<()> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); @@ -2043,7 +2001,7 @@ fn get_server_keys_from_cache( let event_id = <&EventId>::try_from(event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); - if let Some((time, tries)) = db + if let Some((time, tries)) = services() .globals .bad_event_ratelimiter .read() @@ -2092,7 +2050,7 @@ fn get_server_keys_from_cache( trace!("Loading signing keys for {}", origin); - let result: BTreeMap<_, _> = db + let result: BTreeMap<_, _> = services() .globals .signing_keys_for(origin)? .into_iter() @@ -2114,7 +2072,6 @@ pub(crate) async fn fetch_join_signing_keys( event: &create_join_event::v2::Response, room_version: &RoomVersionId, pub_key_map: &RwLock>>, - db: &Database, ) -> Result<()> { let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = BTreeMap::new(); @@ -2127,10 +2084,10 @@ pub(crate) async fn fetch_join_signing_keys( // Try to fetch keys, failure is okay // Servers we couldn't find in the cache will be added to `servers` for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); } for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); } drop(pkm); @@ -2141,12 +2098,11 @@ pub(crate) async fn fetch_join_signing_keys( return Ok(()); } - for server in db.globals.trusted_servers() { + for server in services().globals.trusted_servers() { trace!("Asking batch signing keys from trusted server {}", server); - if let Ok(keys) = db + if let Ok(keys) = services() .sending .send_federation_request( - &db.globals, server, get_remote_server_keys_batch::v2::Request { server_keys: servers.clone(), @@ -2164,7 +2120,7 @@ pub(crate) async fn fetch_join_signing_keys( // TODO: Check signature from trusted server? servers.remove(&k.server_name); - let result = db + let result = services() .globals .add_signing_key(&k.server_name, k.clone())? .into_iter() @@ -2184,9 +2140,8 @@ pub(crate) async fn fetch_join_signing_keys( .into_iter() .map(|(server, _)| async move { ( - db.sending + services().sending .send_federation_request( - &db.globals, &server, get_server_keys::v2::Request::new(), ) @@ -2198,7 +2153,7 @@ pub(crate) async fn fetch_join_signing_keys( while let Some(result) = futures.next().await { if let (Ok(get_keys_response), origin) = result { - let result: BTreeMap<_, _> = db + let result: BTreeMap<_, _> = services() .globals .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? .into_iter() @@ -2216,8 +2171,8 @@ pub(crate) async fn fetch_join_signing_keys( } /// Returns Ok if the acl allows the server -fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { - let acl_event = match db +fn acl_check(server_name: &ServerName, room_id: &RoomId) -> Result<()> { + let acl_event = match services() .rooms .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? { diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 29325bd..93660f9 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -30,7 +30,7 @@ pub trait KeyValueDatabaseEngine: Send + Sync { fn open(config: &Config) -> Result where Self: Sized; - fn open_tree(&self, name: &'static str) -> Result>; + fn open_tree(&self, name: &'static str) -> Result>; fn flush(&self) -> Result<()>; fn cleanup(&self) -> Result<()> { Ok(()) @@ -40,7 +40,7 @@ pub trait KeyValueDatabaseEngine: Send + Sync { } } -pub trait KeyValueTree: Send + Sync { +pub trait KvTree: Send + Sync { fn get(&self, key: &[u8]) -> Result>>; fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 2cf9d5e..1388dc3 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,4 +1,4 @@ -use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree}; +use super::{super::Config, watchers::Watchers, KvTree, KeyValueDatabaseEngine}; use crate::{utils, Result}; use std::{ future::Future, @@ -51,7 +51,7 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O db_opts } -impl DatabaseEngine for Arc { +impl KeyValueDatabaseEngine for Arc { fn open(config: &Config) -> Result { let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); @@ -83,7 +83,7 @@ impl DatabaseEngine for Arc { })) } - fn open_tree(&self, name: &'static str) -> Result> { + fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist let _ = self @@ -129,7 +129,7 @@ impl RocksDbEngineTree<'_> { } } -impl Tree for RocksDbEngineTree<'_> { +impl KvTree for RocksDbEngineTree<'_> { fn get(&self, key: &[u8]) -> Result>> { Ok(self.db.rocks.get_cf(&self.cf(), key)?) } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 7cfa81a..02d4dbd 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -1,4 +1,4 @@ -use super::{watchers::Watchers, DatabaseEngine, Tree}; +use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree}; use crate::{database::Config, Result}; use parking_lot::{Mutex, MutexGuard}; use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; @@ -80,7 +80,7 @@ impl Engine { } } -impl DatabaseEngine for Arc { +impl KeyValueDatabaseEngine for Arc { fn open(config: &Config) -> Result { let path = Path::new(&config.database_path).join("conduit.db"); @@ -105,7 +105,7 @@ impl DatabaseEngine for Arc { Ok(arc) } - fn open_tree(&self, name: &str) -> Result> { + fn open_tree(&self, name: &str) -> Result> { self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; Ok(Arc::new(SqliteTable { @@ -189,7 +189,7 @@ impl SqliteTable { } } -impl Tree for SqliteTable { +impl KvTree for SqliteTable { fn get(&self, key: &[u8]) -> Result>> { self.get_with_guard(self.engine.read_lock(), key) } diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index 66a2a5c..eae2cfb 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,6 +1,8 @@ +use crate::{database::KeyValueDatabase, service, utils, Error}; + impl service::appservice::Data for KeyValueDatabase { /// Registers an appservice and returns the ID to the caller - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { + fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs index 0c09c17..189571f 100644 --- a/src/database/key_value/mod.rs +++ b/src/database/key_value/mod.rs @@ -1,13 +1,13 @@ -mod account_data; -mod admin; +//mod account_data; +//mod admin; mod appservice; -mod globals; -mod key_backups; -mod media; -mod pdu; +//mod globals; +//mod key_backups; +//mod media; +//mod pdu; mod pusher; mod rooms; -mod sending; +//mod sending; mod transaction_ids; mod uiaa; mod users; diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 94374ab..b77170d 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,3 +1,7 @@ +use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; + +use crate::{service, database::KeyValueDatabase, Error}; + impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index b00eb3b..a9236a7 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,4 +1,8 @@ -impl service::room::alias::Data for KeyValueDatabase { +use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; + +use crate::{service, database::KeyValueDatabase, utils, Error, services}; + +impl service::rooms::alias::Data for KeyValueDatabase { fn set_alias( &self, alias: &RoomAliasId, @@ -8,7 +12,7 @@ impl service::room::alias::Data for KeyValueDatabase { .insert(alias.alias().as_bytes(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; Ok(()) } diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index f42de45..44a580c 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,10 +1,14 @@ -impl service::room::directory::Data for KeyValueDatabase { +use ruma::RoomId; + +use crate::{service, database::KeyValueDatabase, utils, Error}; + +impl service::rooms::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; + self.publicroomids.insert(room_id.as_bytes(), &[]) } fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; + self.publicroomids.remove(room_id.as_bytes()) } fn is_public_room(&self, room_id: &RoomId) -> Result { diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs new file mode 100644 index 0000000..9ffd33d --- /dev/null +++ b/src/database/key_value/rooms/edus/mod.rs @@ -0,0 +1,3 @@ +mod presence; +mod typing; +mod read_receipt; diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 1978ce7..9f3977d 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,4 +1,10 @@ -impl service::room::edus::presence::Data for KeyValueDatabase { +use std::collections::HashMap; + +use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; + +use crate::{service, database::KeyValueDatabase, utils, Error, services}; + +impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( &self, user_id: &UserId, @@ -7,7 +13,7 @@ impl service::room::edus::presence::Data for KeyValueDatabase { ) -> Result<()> { // TODO: Remove old entry? Or maybe just wipe completely from time to time? - let count = globals.next_count()?.to_be_bytes(); + let count = services().globals.next_count()?.to_be_bytes(); let mut presence_id = room_id.as_bytes().to_vec(); presence_id.push(0xff); @@ -101,6 +107,7 @@ impl service::room::edus::presence::Data for KeyValueDatabase { Ok(hashmap) } + /* fn presence_maintain(&self, db: Arc>) { // TODO @M0dEx: move this to a timed tasks module tokio::spawn(async move { @@ -117,6 +124,7 @@ impl service::room::edus::presence::Data for KeyValueDatabase { } }); } + */ } fn parse_presence_event(bytes: &[u8]) -> Result { diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 556e697..68aea16 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,4 +1,10 @@ -impl service::room::edus::read_receipt::Data for KeyValueDatabase { +use std::mem; + +use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; + +use crate::{database::KeyValueDatabase, service, utils, Error, services}; + +impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( &self, user_id: &UserId, @@ -28,7 +34,7 @@ impl service::room::edus::read_receipt::Data for KeyValueDatabase { } let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); + room_latest_id.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); room_latest_id.push(0xff); room_latest_id.extend_from_slice(user_id.as_bytes()); @@ -40,7 +46,7 @@ impl service::room::edus::read_receipt::Data for KeyValueDatabase { Ok(()) } - pub fn readreceipts_since<'a>( + fn readreceipts_since<'a>( &'a self, room_id: &RoomId, since: u64, @@ -102,7 +108,7 @@ impl service::room::edus::read_receipt::Data for KeyValueDatabase { .insert(&key, &count.to_be_bytes())?; self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes()) } fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 8cfb432..905bffc 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,15 +1,20 @@ -impl service::room::edus::typing::Data for KeyValueDatabase { +use std::collections::HashSet; + +use ruma::{UserId, RoomId}; + +use crate::{database::KeyValueDatabase, service, utils, Error, services}; + +impl service::rooms::edus::typing::Data for KeyValueDatabase { fn typing_add( &self, user_id: &UserId, room_id: &RoomId, timeout: u64, - globals: &super::super::globals::Globals, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - let count = globals.next_count()?.to_be_bytes(); + let count = services().globals.next_count()?.to_be_bytes(); let mut room_typing_id = prefix; room_typing_id.extend_from_slice(&timeout.to_be_bytes()); @@ -49,7 +54,7 @@ impl service::room::edus::typing::Data for KeyValueDatabase { if found_outdated { self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(room_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; } Ok(()) diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index 8abdce4..c230cbf 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,4 +1,8 @@ -impl service::room::lazy_load::Data for KeyValueDatabase { +use ruma::{UserId, DeviceId, RoomId}; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::lazy_loading::Data for KeyValueDatabase { fn lazy_load_was_sent_before( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 37dd717..b4cba2c 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,4 +1,8 @@ -impl service::room::metadata::Data for KeyValueDatabase { +use ruma::RoomId; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { let prefix = match self.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index 2a3f81d..adb810b 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -1,14 +1,13 @@ -mod state; mod alias; mod directory; mod edus; -mod event_handler; -mod lazy_loading; +//mod event_handler; +mod lazy_load; mod metadata; mod outlier; mod pdu_metadata; mod search; -mod short; +//mod short; mod state; mod state_accessor; mod state_cache; diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index c979d25..08299a0 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,4 +1,8 @@ -impl service::room::outlier::Data for KeyValueDatabase { +use ruma::{EventId, signatures::CanonicalJsonObject}; + +use crate::{service, database::KeyValueDatabase, PduEvent, Error}; + +impl service::rooms::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index 6b2171c..602f3f6 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -1,4 +1,10 @@ -impl service::room::pdu_metadata::Data for KeyValueDatabase { +use std::sync::Arc; + +use ruma::{RoomId, EventId}; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 1ffffe5..44663ff 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,7 +1,12 @@ -impl service::room::search::Data for KeyValueDatabase { +use std::mem::size_of; +use ruma::RoomId; + +use crate::{service, database::KeyValueDatabase, utils}; + +impl service::rooms::search::Data for KeyValueDatabase { fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()> { - let mut batch = body + let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) .filter(|word| word.len() <= 50) @@ -14,7 +19,7 @@ impl service::room::search::Data for KeyValueDatabase { (key, Vec::new()) }); - self.tokenids.insert_batch(&mut batch)?; + self.tokenids.insert_batch(&mut batch) } fn search_pdus<'a>( @@ -64,3 +69,4 @@ impl service::room::search::Data for KeyValueDatabase { ) })) } +} diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 5daf6c6..192dbb8 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,4 +1,11 @@ -impl service::room::state::Data for KeyValueDatabase { +use ruma::{RoomId, EventId}; +use std::sync::Arc; +use std::{sync::MutexGuard, collections::HashSet}; +use std::fmt::Debug; + +use crate::{service, database::KeyValueDatabase, utils, Error}; + +impl service::rooms::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash .get(room_id.as_bytes())? @@ -9,21 +16,21 @@ impl service::room::state::Data for KeyValueDatabase { }) } - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; Ok(()) } - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash + fn set_event_state(&self, shorteventid: Vec, shortstatehash: Vec) -> Result<()> { + self.shorteventid_shortstatehash .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { + fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -38,11 +45,11 @@ impl service::room::state::Data for KeyValueDatabase { .collect() } - fn set_forward_extremities( + fn set_forward_extremities<'a>( &self, room_id: &RoomId, event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index db81967..ea15afc 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,4 +1,11 @@ -impl service::room::state_accessor::Data for KeyValueDatabase { +use std::{collections::{BTreeMap, HashMap}, sync::Arc}; + +use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils}; +use async_trait::async_trait; +use ruma::{EventId, events::StateEventType, RoomId}; + +#[async_trait] +impl service::rooms::state_accessor::Data for KeyValueDatabase { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = self .load_shortstatehash_info(shortstatehash)? @@ -149,3 +156,4 @@ impl service::room::state_accessor::Data for KeyValueDatabase { Ok(None) } } +} diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 3781402..567dc80 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,8 +1,12 @@ -impl service::room::state_cache::Data for KeyValueDatabase { - fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()> { +use ruma::{UserId, RoomId}; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::state_cache::Data for KeyValueDatabase { + fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; + self.roomuseroncejoinedids.insert(&userroom_id, &[]) } } diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 71a2f3a..09e3566 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,11 +1,20 @@ -impl service::room::state_compressor::Data for KeyValueDatabase { - fn get_statediff(shortstatehash: u64) -> Result { +use std::{collections::HashSet, mem::size_of}; + +use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils}; + +impl service::rooms::state_compressor::Data for KeyValueDatabase { + fn get_statediff(&self, shortstatehash: u64) -> Result { let value = self .shortstatehash_statediff .get(&shortstatehash.to_be_bytes())? .ok_or_else(|| Error::bad_database("State hash does not exist"))?; let parent = utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); + let parent = if parent != 0 { + Some(parent) + } else { + None + }; let mut add_mode = true; let mut added = HashSet::new(); @@ -26,10 +35,10 @@ impl service::room::state_compressor::Data for KeyValueDatabase { i += 2 * size_of::(); } - StateDiff { parent, added, removed } + Ok(StateDiff { parent, added, removed }) } - fn save_statediff(shortstatehash: u64, diff: StateDiff) -> Result<()> { + fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { let mut value = diff.parent.to_be_bytes().to_vec(); for new in &diff.new { value.extend_from_slice(&new[..]); @@ -43,6 +52,6 @@ impl service::room::state_compressor::Data for KeyValueDatabase { } self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; + .insert(&shortstatehash.to_be_bytes(), &value) } } diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 58884ec..cf93df1 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -1,4 +1,11 @@ -impl service::room::timeline::Data for KeyValueDatabase { +use std::{collections::hash_map, mem::size_of, sync::Arc}; + +use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject}; +use tracing::error; + +use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent}; + +impl service::rooms::timeline::Data for KeyValueDatabase { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache @@ -37,7 +44,7 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { + fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map_or_else( @@ -55,7 +62,7 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( + fn get_non_outlier_pdu_json( &self, event_id: &EventId, ) -> Result> { @@ -74,14 +81,14 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { + fn get_pdu_id(&self, event_id: &EventId) -> Result>> { self.eventid_pduid.get(event_id.as_bytes()) } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { + fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map(|pduid| { @@ -99,7 +106,7 @@ impl service::room::timeline::Data for KeyValueDatabase { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { + fn get_pdu(&self, event_id: &EventId) -> Result>> { if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { return Ok(Some(Arc::clone(p))); } @@ -135,7 +142,7 @@ impl service::room::timeline::Data for KeyValueDatabase { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { + fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) @@ -145,7 +152,7 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { + fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) @@ -155,7 +162,7 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { + fn pdu_count(&self, pdu_id: &[u8]) -> Result { utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) } @@ -178,7 +185,7 @@ impl service::room::timeline::Data for KeyValueDatabase { /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. - pub fn pdus_since<'a>( + fn pdus_since<'a>( &'a self, user_id: &UserId, room_id: &RoomId, @@ -212,7 +219,7 @@ impl service::room::timeline::Data for KeyValueDatabase { /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. - pub fn pdus_until<'a>( + fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, @@ -246,7 +253,7 @@ impl service::room::timeline::Data for KeyValueDatabase { })) } - pub fn pdus_after<'a>( + fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 52145ce..2fc3b9f 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,4 +1,8 @@ -impl service::room::user::Data for KeyValueDatabase { +use ruma::{UserId, RoomId}; + +use crate::{service, database::KeyValueDatabase, utils, Error}; + +impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index 81c1197..6652a62 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,5 +1,9 @@ -impl service::pusher::Data for KeyValueDatabase { - pub fn add_txnid( +use ruma::{UserId, DeviceId, TransactionId}; + +use crate::{service, database::KeyValueDatabase}; + +impl service::transaction_ids::Data for KeyValueDatabase { + fn add_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, @@ -17,7 +21,7 @@ impl service::pusher::Data for KeyValueDatabase { Ok(()) } - pub fn existing_txnid( + fn existing_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 4d1dac5..b1960bd 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,3 +1,9 @@ +use std::io::ErrorKind; + +use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::uiaa::UiaaInfo}; + +use crate::{database::KeyValueDatabase, service, Error}; + impl service::uiaa::Data for KeyValueDatabase { fn set_uiaa_request( &self, diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 5ef058f..ea84490 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,11 +1,18 @@ +use std::{mem::size_of, collections::BTreeMap}; + +use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; +use tracing::warn; + +use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services}; + impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. - pub fn exists(&self, user_id: &UserId) -> Result { + fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) } /// Check if account is deactivated - pub fn is_deactivated(&self, user_id: &UserId) -> Result { + fn is_deactivated(&self, user_id: &UserId) -> Result { Ok(self .userid_password .get(user_id.as_bytes())? @@ -16,33 +23,13 @@ impl service::users::Data for KeyValueDatabase { .is_empty()) } - /// Check if a user is an admin - pub fn is_admin( - &self, - user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, - ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); - - rooms.is_joined(user_id, &admin_room_id) - } - - /// Create a new user account on this homeserver. - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.set_password(user_id, password)?; - Ok(()) - } - /// Returns the number of users registered on this server. - pub fn count(&self) -> Result { + fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } /// Find out which user an access token belongs to. - pub fn find_from_token(&self, token: &str) -> Result, String)>> { + fn find_from_token(&self, token: &str) -> Result, String)>> { self.token_userdeviceid .get(token.as_bytes())? .map_or(Ok(None), |bytes| { @@ -69,7 +56,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all users on this homeserver. - pub fn iter(&self) -> impl Iterator>> + '_ { + fn iter(&self) -> impl Iterator>> + '_ { self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") @@ -81,7 +68,7 @@ impl service::users::Data for KeyValueDatabase { /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - pub fn list_local_users(&self) -> Result> { + fn list_local_users(&self) -> Result> { let users: Vec = self .userid_password .iter() @@ -113,7 +100,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns the password hash for the given user. - pub fn password_hash(&self, user_id: &UserId) -> Result> { + fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -124,7 +111,7 @@ impl service::users::Data for KeyValueDatabase { } /// Hash and set the user's password to the Argon2 hash - pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { if let Ok(hash) = utils::calculate_hash(password) { self.userid_password @@ -143,7 +130,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns the displayname of a user on this homeserver. - pub fn displayname(&self, user_id: &UserId) -> Result> { + fn displayname(&self, user_id: &UserId) -> Result> { self.userid_displayname .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -154,7 +141,7 @@ impl service::users::Data for KeyValueDatabase { } /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { + fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { if let Some(displayname) = displayname { self.userid_displayname .insert(user_id.as_bytes(), displayname.as_bytes())?; @@ -166,7 +153,7 @@ impl service::users::Data for KeyValueDatabase { } /// Get the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result>> { + fn avatar_url(&self, user_id: &UserId) -> Result>> { self.userid_avatarurl .get(user_id.as_bytes())? .map(|bytes| { @@ -179,7 +166,7 @@ impl service::users::Data for KeyValueDatabase { } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; @@ -191,7 +178,7 @@ impl service::users::Data for KeyValueDatabase { } /// Get the blurhash of a user. - pub fn blurhash(&self, user_id: &UserId) -> Result> { + fn blurhash(&self, user_id: &UserId) -> Result> { self.userid_blurhash .get(user_id.as_bytes())? .map(|bytes| { @@ -204,7 +191,7 @@ impl service::users::Data for KeyValueDatabase { } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { + fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { if let Some(blurhash) = blurhash { self.userid_blurhash .insert(user_id.as_bytes(), blurhash.as_bytes())?; @@ -216,7 +203,7 @@ impl service::users::Data for KeyValueDatabase { } /// Adds a new device to a user. - pub fn create_device( + fn create_device( &self, user_id: &UserId, device_id: &DeviceId, @@ -250,7 +237,7 @@ impl service::users::Data for KeyValueDatabase { } /// Removes a device from a user. - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -280,7 +267,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all device ids of this user. - pub fn all_device_ids<'a>( + fn all_device_ids<'a>( &'a self, user_id: &UserId, ) -> impl Iterator>> + 'a { @@ -302,7 +289,7 @@ impl service::users::Data for KeyValueDatabase { } /// Replaces the access token of one device. - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -325,13 +312,12 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - pub fn add_one_time_key( + fn add_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -356,12 +342,12 @@ impl service::users::Data for KeyValueDatabase { )?; self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; Ok(()) } - pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { + fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate .get(user_id.as_bytes())? .map(|bytes| { @@ -372,12 +358,11 @@ impl service::users::Data for KeyValueDatabase { .unwrap_or(Ok(0)) } - pub fn take_one_time_key( + fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - globals: &super::globals::Globals, ) -> Result, Raw)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -388,7 +373,7 @@ impl service::users::Data for KeyValueDatabase { prefix.push(b':'); self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; self.onetimekeyid_onetimekeys .scan_prefix(prefix) @@ -411,7 +396,7 @@ impl service::users::Data for KeyValueDatabase { .transpose() } - pub fn count_one_time_keys( + fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, @@ -443,13 +428,11 @@ impl service::users::Data for KeyValueDatabase { Ok(counts) } - pub fn add_device_keys( + fn add_device_keys( &self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); @@ -460,19 +443,17 @@ impl service::users::Data for KeyValueDatabase { &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), )?; - self.mark_device_key_update(user_id, rooms, globals)?; + self.mark_device_key_update(user_id)?; Ok(()) } - pub fn add_cross_signing_keys( + fn add_cross_signing_keys( &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { // TODO: Check signatures @@ -575,19 +556,17 @@ impl service::users::Data for KeyValueDatabase { .insert(user_id.as_bytes(), &user_signing_key_key)?; } - self.mark_device_key_update(user_id, rooms, globals)?; + self.mark_device_key_update(user_id)?; Ok(()) } - pub fn sign_key( + fn sign_key( &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = target_id.as_bytes().to_vec(); key.push(0xff); @@ -619,12 +598,12 @@ impl service::users::Data for KeyValueDatabase { )?; // TODO: Should we notify about this change? - self.mark_device_key_update(target_id, rooms, globals)?; + self.mark_device_key_update(target_id)?; Ok(()) } - pub fn keys_changed<'a>( + fn keys_changed<'a>( &'a self, user_or_room_id: &str, from: u64, @@ -662,16 +641,14 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn mark_device_key_update( + fn mark_device_key_update( &self, user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { + let count = services().globals.next_count()?.to_be_bytes(); + for room_id in services().rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { // Don't send key updates to unencrypted rooms - if rooms + if services().rooms .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? .is_none() { @@ -693,7 +670,7 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - pub fn get_device_keys( + fn get_device_keys( &self, user_id: &UserId, device_id: &DeviceId, @@ -709,7 +686,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn get_master_key bool>( + fn get_master_key bool>( &self, user_id: &UserId, allowed_signatures: F, @@ -730,7 +707,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn get_self_signing_key bool>( + fn get_self_signing_key bool>( &self, user_id: &UserId, allowed_signatures: F, @@ -751,7 +728,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { + fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { self.userid_usersigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { @@ -763,20 +740,19 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn add_to_device_event( + fn add_to_device_event( &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, content: serde_json::Value, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = target_user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(target_device_id.as_bytes()); key.push(0xff); - key.extend_from_slice(&globals.next_count()?.to_be_bytes()); + key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); let mut json = serde_json::Map::new(); json.insert("type".to_owned(), event_type.to_owned().into()); @@ -790,7 +766,7 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - pub fn get_to_device_events( + fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, @@ -812,7 +788,7 @@ impl service::users::Data for KeyValueDatabase { Ok(events) } - pub fn remove_to_device_events( + fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, @@ -833,7 +809,7 @@ impl service::users::Data for KeyValueDatabase { .map(|(key, _)| { Ok::<_, Error>(( key.clone(), - utils::u64_from_bytes(&key[key.len() - mem::size_of::()..key.len()]) + utils::u64_from_bytes(&key[key.len() - size_of::()..key.len()]) .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, )) }) @@ -846,7 +822,7 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - pub fn update_device_metadata( + fn update_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, @@ -871,7 +847,7 @@ impl service::users::Data for KeyValueDatabase { } /// Get device metadata. - pub fn get_device_metadata( + fn get_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, @@ -889,7 +865,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { + fn get_devicelist_version(&self, user_id: &UserId) -> Result> { self.userid_devicelistversion .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -899,7 +875,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn all_devices_metadata<'a>( + fn all_devices_metadata<'a>( &'a self, user_id: &UserId, ) -> impl Iterator> + 'a { @@ -915,7 +891,7 @@ impl service::users::Data for KeyValueDatabase { } /// Creates a new sync filter. Returns the filter id. - pub fn create_filter( + fn create_filter( &self, user_id: &UserId, filter: &IncomingFilterDefinition, @@ -934,7 +910,7 @@ impl service::users::Data for KeyValueDatabase { Ok(filter_id) } - pub fn get_filter( + fn get_filter( &self, user_id: &UserId, filter_id: &str, diff --git a/src/database/mod.rs b/src/database/mod.rs index a35228a..12758af 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,20 +1,7 @@ pub mod abstraction; +pub mod key_value; -pub mod account_data; -pub mod admin; -pub mod appservice; -pub mod globals; -pub mod key_backups; -pub mod media; -pub mod pusher; -pub mod rooms; -pub mod sending; -pub mod transaction_ids; -pub mod uiaa; -pub mod users; - -use self::admin::create_admin_room; -use crate::{utils, Config, Error, Result}; +use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms, account_data, media, key_backups, transaction_ids, sending, admin::{self, create_admin_room}, appservice, pusher}}; use abstraction::KeyValueDatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -25,7 +12,7 @@ use ruma::{ GlobalAccountDataEvent, GlobalAccountDataEventType, }, push::Ruleset, - DeviceId, EventId, RoomId, UserId, + DeviceId, EventId, RoomId, UserId, signatures::CanonicalJsonValue, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -38,21 +25,132 @@ use std::{ }; use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, info, warn}; +use abstraction::KvTree; pub struct KeyValueDatabase { _db: Arc, - pub globals: globals::Globals, - pub users: users::Users, - pub uiaa: uiaa::Uiaa, - pub rooms: rooms::Rooms, - pub account_data: account_data::AccountData, - pub media: media::Media, - pub key_backups: key_backups::KeyBackups, - pub transaction_ids: transaction_ids::TransactionIds, - pub sending: sending::Sending, - pub admin: admin::Admin, - pub appservice: appservice::Appservice, - pub pusher: pusher::PushData, + + //pub globals: globals::Globals, + pub(super) global: Arc, + pub(super) server_signingkeys: Arc, + + //pub users: users::Users, + pub(super) userid_password: Arc, + pub(super) userid_displayname: Arc, + pub(super) userid_avatarurl: Arc, + pub(super) userid_blurhash: Arc, + pub(super) userdeviceid_token: Arc, + pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists + pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 + pub(super) token_userdeviceid: Arc, + + pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId + pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count + pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count + pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) + pub(super) userid_masterkeyid: Arc, + pub(super) userid_selfsigningkeyid: Arc, + pub(super) userid_usersigningkeyid: Arc, + + pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId + + pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count + + //pub uiaa: uiaa::Uiaa, + pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication + pub(super) userdevicesessionid_uiaarequest: + RwLock, Box, String), CanonicalJsonValue>>, + + //pub edus: RoomEdus, + pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId + pub(super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count + pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count + pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count + pub(super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count + pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId + pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count + + //pub rooms: rooms::Rooms, + pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count + pub(super) eventid_pduid: Arc, + pub(super) roomid_pduleaves: Arc, + pub(super) alias_roomid: Arc, + pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count + pub(super) publicroomids: Arc, + + pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount + + /// Participating servers in a room. + pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName + pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId + + pub(super) userroomid_joined: Arc, + pub(super) roomuserid_joined: Arc, + pub(super) roomid_joinedcount: Arc, + pub(super) roomid_invitedcount: Arc, + pub(super) roomuseroncejoinedids: Arc, + pub(super) userroomid_invitestate: Arc, // InviteState = Vec> + pub(super) roomuserid_invitecount: Arc, // InviteCount = Count + pub(super) userroomid_leftstate: Arc, + pub(super) roomuserid_leftcount: Arc, + + pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled + + pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId + + pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 + pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 + + /// Remember the current state hash of a room. + pub(super) roomid_shortstatehash: Arc, + pub(super) roomsynctoken_shortstatehash: Arc, + /// Remember the state hash at events in the past. + pub(super) shorteventid_shortstatehash: Arc, + /// StateKey = EventType + StateKey, ShortStateKey = Count + pub(super) statekey_shortstatekey: Arc, + pub(super) shortstatekey_statekey: Arc, + + pub(super) roomid_shortroomid: Arc, + + pub(super) shorteventid_eventid: Arc, + pub(super) eventid_shorteventid: Arc, + + pub(super) statehash_shortstatehash: Arc, + pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) + + pub(super) shorteventid_authchain: Arc, + + /// RoomId + EventId -> outlier PDU. + /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. + pub(super) eventid_outlierpdu: Arc, + pub(super) softfailedeventids: Arc, + + /// RoomId + EventId -> Parent PDU EventId. + pub(super) referencedevents: Arc, + + //pub account_data: account_data::AccountData, + pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type + pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type + + //pub media: media::Media, + pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType + //pub key_backups: key_backups::KeyBackups, + pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) + pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) + pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId + + //pub transaction_ids: transaction_ids::TransactionIds, + pub(super) userdevicetxnid_response: Arc, // Response can be empty (/sendToDevice) or the event id (/send) + //pub sending: sending::Sending, + pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync + pub(super) servernameevent_data: Arc, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content + pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content + + //pub appservice: appservice::Appservice, + pub(super) id_appserviceregistrations: Arc, + + //pub pusher: pusher::PushData, + pub(super) senderkey_pusher: Arc, } impl KeyValueDatabase { @@ -157,7 +255,6 @@ impl KeyValueDatabase { let db = Arc::new(TokioRwLock::from(Self { _db: builder.clone(), - users: users::Users { userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, userid_avatarurl: builder.open_tree("userid_avatarurl")?, @@ -175,13 +272,9 @@ impl KeyValueDatabase { userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, userfilterid_filter: builder.open_tree("userfilterid_filter")?, todeviceid_events: builder.open_tree("todeviceid_events")?, - }, - uiaa: uiaa::Uiaa { + userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), - }, - rooms: rooms::Rooms { - edus: rooms::RoomEdus { readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt roomuserid_lastprivatereadupdate: builder @@ -190,7 +283,6 @@ impl KeyValueDatabase { roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, presenceid_presence: builder.open_tree("presenceid_presence")?, userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, - }, pduid_pdu: builder.open_tree("pduid_pdu")?, eventid_pduid: builder.open_tree("eventid_pduid")?, roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, @@ -239,74 +331,23 @@ impl KeyValueDatabase { softfailedeventids: builder.open_tree("softfailedeventids")?, referencedevents: builder.open_tree("referencedevents")?, - pdu_cache: Mutex::new(LruCache::new( - config - .pdu_cache_capacity - .try_into() - .expect("pdu cache capacity fits into usize"), - )), - auth_chain_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shorteventid_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - eventidshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shortstatekey_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - statekeyshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - our_real_users_cache: RwLock::new(HashMap::new()), - appservice_in_room_cache: RwLock::new(HashMap::new()), - lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), - lasttimelinecount_cache: Mutex::new(HashMap::new()), - }, - account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, - }, - media: media::Media { mediaid_file: builder.open_tree("mediaid_file")?, - }, - key_backups: key_backups::KeyBackups { backupid_algorithm: builder.open_tree("backupid_algorithm")?, backupid_etag: builder.open_tree("backupid_etag")?, backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, - }, - transaction_ids: transaction_ids::TransactionIds { userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, - }, - sending: sending::Sending { servername_educount: builder.open_tree("servername_educount")?, servernameevent_data: builder.open_tree("servernameevent_data")?, servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, - maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), - sender: sending_sender, - }, - admin: admin::Admin { - sender: admin_sender, - }, - appservice: appservice::Appservice { - cached_registrations: Arc::new(RwLock::new(HashMap::new())), id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, - }, - pusher: pusher::PushData { senderkey_pusher: builder.open_tree("senderkey_pusher")?, - }, - globals: globals::Globals::load( - builder.open_tree("global")?, - builder.open_tree("server_signingkeys")?, - config.clone(), - )?, + global: builder.open_tree("global")?, + server_signingkeys: builder.open_tree("server_signingkeys")?, })); + // TODO: do this after constructing the db let guard = db.read().await; // Matrix resource ownership is based on the server name; changing it @@ -744,7 +785,7 @@ impl KeyValueDatabase { .bump_database_version(latest_database_version)?; // Create the admin room and server user on first run - create_admin_room(&guard).await?; + create_admin_room().await?; warn!( "Created new {} database with version {}", diff --git a/src/lib.rs b/src/lib.rs index c35a129..0d058df 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,17 +9,26 @@ mod config; mod database; -mod error; -mod pdu; -mod ruma_wrapper; +mod service; +pub mod api; mod utils; -pub mod appservice_server; -pub mod client_server; -pub mod server_server; +use std::cell::Cell; pub use config::Config; -pub use database::Database; -pub use error::{Error, Result}; -pub use pdu::PduEvent; -pub use ruma_wrapper::{Ruma, RumaResponse}; +pub use utils::error::{Error, Result}; +pub use service::{Services, pdu::PduEvent}; +pub use api::ruma_wrapper::{Ruma, RumaResponse}; + +use crate::database::KeyValueDatabase; + +pub static SERVICES: Cell> = Cell::new(None); + +enum ServicesEnum { + Rocksdb(Services) +} + +pub fn services() -> Services { + SERVICES.get().unwrap() +} + diff --git a/src/main.rs b/src/main.rs index a1af976..543b953 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,47 +46,44 @@ use tikv_jemallocator::Jemalloc; #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; -lazy_static! { - static ref DB: Database = { - let raw_config = - Figment::new() - .merge( - Toml::file(Env::var("CONDUIT_CONFIG").expect( - "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", - )) - .nested(), - ) - .merge(Env::prefixed("CONDUIT_").global()); - - let config = match raw_config.extract::() { - Ok(s) => s, - Err(e) => { - eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); - std::process::exit(1); - } - }; - - config.warn_deprecated(); - - let db = match Database::load_or_create(&config).await { - Ok(db) => db, - Err(e) => { - eprintln!( - "The database couldn't be loaded or created. The following error occured: {}", - e - ); - std::process::exit(1); - } - }; - }; -} - #[tokio::main] async fn main() { - lazy_static::initialize(&DB); + // Initialize DB + let raw_config = + Figment::new() + .merge( + Toml::file(Env::var("CONDUIT_CONFIG").expect( + "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", + )) + .nested(), + ) + .merge(Env::prefixed("CONDUIT_").global()); + + let config = match raw_config.extract::() { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); + std::process::exit(1); + } + }; + + config.warn_deprecated(); + + let db = match KeyValueDatabase::load_or_create(&config).await { + Ok(db) => db, + Err(e) => { + eprintln!( + "The database couldn't be loaded or created. The following error occured: {}", + e + ); + std::process::exit(1); + } + }; + + SERVICES.set(db).expect("this is the first and only time we initialize the SERVICE static"); let start = async { - run_server(&config).await.unwrap(); + run_server().await.unwrap(); }; if config.allow_jaeger { diff --git a/src/service/account_data.rs b/src/service/account_data.rs index d85918f..70ad9f2 100644 --- a/src/service/account_data.rs +++ b/src/service/account_data.rs @@ -8,23 +8,15 @@ use ruma::{ use serde::{de::DeserializeOwned, Serialize}; use std::{collections::HashMap, sync::Arc}; -use super::abstraction::Tree; - -pub struct AccountData { - pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type - pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type -} - impl AccountData { /// Places one event in the account data of the user and removes the previous entry. - #[tracing::instrument(skip(self, room_id, user_id, event_type, data, globals))] + #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] pub fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, data: &T, - globals: &super::globals::Globals, ) -> Result<()> { let mut prefix = room_id .map(|r| r.to_string()) @@ -36,7 +28,7 @@ impl AccountData { prefix.push(0xff); let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); roomuserdataid.push(0xff); roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); diff --git a/src/service/admin.rs b/src/service/admin.rs index 6f418ea..ded0adb 100644 --- a/src/service/admin.rs +++ b/src/service/admin.rs @@ -5,14 +5,6 @@ use std::{ time::Instant, }; -use crate::{ - client_server::AUTO_GEN_PASSWORD_LENGTH, - error::{Error, Result}, - pdu::PduBuilder, - server_server, utils, - utils::HtmlEscape, - Database, PduEvent, -}; use clap::Parser; use regex::Regex; use ruma::{ @@ -36,6 +28,10 @@ use ruma::{ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; +use crate::{services, Error, api::{server_server, client_server::AUTO_GEN_PASSWORD_LENGTH}, PduEvent, utils::{HtmlEscape, self}}; + +use super::pdu::PduBuilder; + #[derive(Debug)] pub enum AdminRoomEvent { ProcessMessage(String), @@ -50,22 +46,19 @@ pub struct Admin { impl Admin { pub fn start_handler( &self, - db: Arc>, mut receiver: mpsc::UnboundedReceiver, ) { tokio::spawn(async move { // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); - let guard = db.read().await; - - let conduit_user = UserId::parse(format!("@conduit:{}", guard.globals.server_name())) + let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) .expect("@conduit:server_name is valid"); - let conduit_room = guard + let conduit_room = services() .rooms .id_from_alias( - format!("#admins:{}", guard.globals.server_name()) + format!("#admins:{}", services().globals.server_name()) .as_str() .try_into() .expect("#admins:server_name is a valid room alias"), @@ -73,12 +66,9 @@ impl Admin { .expect("Database data for admin room alias must be valid") .expect("Admin room must exist"); - drop(guard); - let send_message = |message: RoomMessageEventContent, - guard: RwLockReadGuard<'_, Database>, mutex_lock: &MutexGuard<'_, ()>| { - guard + services() .rooms .build_and_append_pdu( PduBuilder { @@ -91,7 +81,6 @@ impl Admin { }, &conduit_user, &conduit_room, - &guard, mutex_lock, ) .unwrap(); @@ -100,15 +89,13 @@ impl Admin { loop { tokio::select! { Some(event) = receiver.recv() => { - let guard = db.read().await; - let message_content = match event { AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(&*guard, room_message).await + AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(room_message).await }; let mutex_state = Arc::clone( - guard.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -118,7 +105,7 @@ impl Admin { let state_lock = mutex_state.lock().await; - send_message(message_content, guard, &state_lock); + send_message(message_content, &state_lock); drop(state_lock); } @@ -141,7 +128,7 @@ impl Admin { } // Parse and process a message from the admin room -async fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { +async fn process_admin_message(room_message: String) -> RoomMessageEventContent { let mut lines = room_message.lines(); let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); @@ -149,7 +136,7 @@ async fn process_admin_message(db: &Database, room_message: String) -> RoomMessa let admin_command = match parse_admin_command(&command_line) { Ok(command) => command, Err(error) => { - let server_name = db.globals.server_name(); + let server_name = services().globals.server_name(); let message = error .to_string() .replace("server.name", server_name.as_str()); @@ -159,7 +146,7 @@ async fn process_admin_message(db: &Database, room_message: String) -> RoomMessa } }; - match process_admin_command(db, admin_command, body).await { + match process_admin_command(admin_command, body).await { Ok(reply_message) => reply_message, Err(error) => { let markdown_message = format!( @@ -322,7 +309,6 @@ enum AdminCommand { } async fn process_admin_command( - db: &Database, command: AdminCommand, body: Vec<&str>, ) -> Result { @@ -332,7 +318,7 @@ async fn process_admin_command( let appservice_config = body[1..body.len() - 1].join("\n"); let parsed_config = serde_yaml::from_str::(&appservice_config); match parsed_config { - Ok(yaml) => match db.appservice.register_appservice(yaml) { + Ok(yaml) => match services().appservice.register_appservice(yaml) { Ok(id) => RoomMessageEventContent::text_plain(format!( "Appservice registered with ID: {}.", id @@ -355,7 +341,7 @@ async fn process_admin_command( } AdminCommand::UnregisterAppservice { appservice_identifier, - } => match db.appservice.unregister_appservice(&appservice_identifier) { + } => match services().appservice.unregister_appservice(&appservice_identifier) { Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), Err(e) => RoomMessageEventContent::text_plain(format!( "Failed to unregister appservice: {}", @@ -363,7 +349,7 @@ async fn process_admin_command( )), }, AdminCommand::ListAppservices => { - if let Ok(appservices) = db.appservice.iter_ids().map(|ids| ids.collect::>()) { + if let Ok(appservices) = services().appservice.iter_ids().map(|ids| ids.collect::>()) { let count = appservices.len(); let output = format!( "Appservices ({}): {}", @@ -380,14 +366,14 @@ async fn process_admin_command( } } AdminCommand::ListRooms => { - let room_ids = db.rooms.iter_ids(); + let room_ids = services().rooms.iter_ids(); let output = format!( "Rooms:\n{}", room_ids .filter_map(|r| r.ok()) .map(|id| id.to_string() + "\tMembers: " - + &db + + &services() .rooms .room_joined_count(&id) .ok() @@ -399,7 +385,7 @@ async fn process_admin_command( ); RoomMessageEventContent::text_plain(output) } - AdminCommand::ListLocalUsers => match db.users.list_local_users() { + AdminCommand::ListLocalUsers => match services().users.list_local_users() { Ok(users) => { let mut msg: String = format!("Found {} local user account(s):\n", users.len()); msg += &users.join("\n"); @@ -408,7 +394,7 @@ async fn process_admin_command( Err(e) => RoomMessageEventContent::text_plain(e.to_string()), }, AdminCommand::IncomingFederation => { - let map = db.globals.roomid_federationhandletime.read().unwrap(); + let map = services().globals.roomid_federationhandletime.read().unwrap(); let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); for (r, (e, i)) in map.iter() { @@ -425,7 +411,7 @@ async fn process_admin_command( } AdminCommand::GetAuthChain { event_id } => { let event_id = Arc::::from(event_id); - if let Some(event) = db.rooms.get_pdu_json(&event_id)? { + if let Some(event) = services().rooms.get_pdu_json(&event_id)? { let room_id_str = event .get("room_id") .and_then(|val| val.as_str()) @@ -435,7 +421,7 @@ async fn process_admin_command( Error::bad_database("Invalid room id field in event in database") })?; let start = Instant::now(); - let count = server_server::get_auth_chain(room_id, vec![event_id], db) + let count = server_server::get_auth_chain(room_id, vec![event_id]) .await? .count(); let elapsed = start.elapsed(); @@ -486,10 +472,10 @@ async fn process_admin_command( } AdminCommand::GetPdu { event_id } => { let mut outlier = false; - let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; + let mut pdu_json = services().rooms.get_non_outlier_pdu_json(&event_id)?; if pdu_json.is_none() { outlier = true; - pdu_json = db.rooms.get_pdu_json(&event_id)?; + pdu_json = services().rooms.get_pdu_json(&event_id)?; } match pdu_json { Some(json) => { @@ -519,7 +505,7 @@ async fn process_admin_command( None => RoomMessageEventContent::text_plain("PDU not found."), } } - AdminCommand::DatabaseMemoryUsage => match db._db.memory_usage() { + AdminCommand::DatabaseMemoryUsage => match services()._db.memory_usage() { Ok(response) => RoomMessageEventContent::text_plain(response), Err(e) => RoomMessageEventContent::text_plain(format!( "Failed to get database memory usage: {}", @@ -528,12 +514,12 @@ async fn process_admin_command( }, AdminCommand::ShowConfig => { // Construct and send the response - RoomMessageEventContent::text_plain(format!("{}", db.globals.config)) + RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) } AdminCommand::ResetPassword { username } => { let user_id = match UserId::parse_with_server_name( username.as_str().to_lowercase(), - db.globals.server_name(), + services().globals.server_name(), ) { Ok(id) => id, Err(e) => { @@ -545,10 +531,10 @@ async fn process_admin_command( }; // Check if the specified user is valid - if !db.users.exists(&user_id)? - || db.users.is_deactivated(&user_id)? + if !services().users.exists(&user_id)? + || services().users.is_deactivated(&user_id)? || user_id - == UserId::parse_with_server_name("conduit", db.globals.server_name()) + == UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("conduit user exists") { return Ok(RoomMessageEventContent::text_plain( @@ -558,7 +544,7 @@ async fn process_admin_command( let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); - match db.users.set_password(&user_id, Some(new_password.as_str())) { + match services().users.set_password(&user_id, Some(new_password.as_str())) { Ok(()) => RoomMessageEventContent::text_plain(format!( "Successfully reset the password for user {}: {}", user_id, new_password @@ -574,7 +560,7 @@ async fn process_admin_command( // Validate user id let user_id = match UserId::parse_with_server_name( username.as_str().to_lowercase(), - db.globals.server_name(), + services().globals.server_name(), ) { Ok(id) => id, Err(e) => { @@ -589,21 +575,21 @@ async fn process_admin_command( "userid {user_id} is not allowed due to historical" ))); } - if db.users.exists(&user_id)? { + if services().users.exists(&user_id)? { return Ok(RoomMessageEventContent::text_plain(format!( "userid {user_id} already exists" ))); } // Create user - db.users.create(&user_id, Some(password.as_str()))?; + services().users.create(&user_id, Some(password.as_str()))?; // Default to pretty displayname let displayname = format!("{} ⚡️", user_id.localpart()); - db.users + services().users .set_displayname(&user_id, Some(displayname.clone()))?; // Initial account data - db.account_data.update( + services().account_data.update( None, &user_id, ruma::events::GlobalAccountDataEventType::PushRules @@ -614,24 +600,21 @@ async fn process_admin_command( global: ruma::push::Ruleset::server_default(&user_id), }, }, - &db.globals, )?; // we dont add a device since we're not the user, just the creator - db.flush()?; - // Inhibit login does not work for guests RoomMessageEventContent::text_plain(format!( "Created user with user_id: {user_id} and password: {password}" )) } AdminCommand::DisableRoom { room_id } => { - db.rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; + services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; RoomMessageEventContent::text_plain("Room disabled.") } AdminCommand::EnableRoom { room_id } => { - db.rooms.disabledroomids.remove(room_id.as_bytes())?; + services().rooms.disabledroomids.remove(room_id.as_bytes())?; RoomMessageEventContent::text_plain("Room enabled.") } AdminCommand::DeactivateUser { @@ -639,16 +622,16 @@ async fn process_admin_command( user_id, } => { let user_id = Arc::::from(user_id); - if db.users.exists(&user_id)? { + if services().users.exists(&user_id)? { RoomMessageEventContent::text_plain(format!( "Making {} leave all rooms before deactivation...", user_id )); - db.users.deactivate_account(&user_id)?; + services().users.deactivate_account(&user_id)?; if leave_rooms { - db.rooms.leave_all_rooms(&user_id, &db).await?; + services().rooms.leave_all_rooms(&user_id).await?; } RoomMessageEventContent::text_plain(format!( @@ -685,7 +668,7 @@ async fn process_admin_command( if !force { user_ids.retain(|&user_id| { - match db.users.is_admin(user_id, &db.rooms, &db.globals) { + match services().users.is_admin(user_id) { Ok(is_admin) => match is_admin { true => { admins.push(user_id.localpart()); @@ -699,7 +682,7 @@ async fn process_admin_command( } for &user_id in &user_ids { - match db.users.deactivate_account(user_id) { + match services().users.deactivate_account(user_id) { Ok(_) => deactivation_count += 1, Err(_) => {} } @@ -707,7 +690,7 @@ async fn process_admin_command( if leave_rooms { for &user_id in &user_ids { - let _ = db.rooms.leave_all_rooms(user_id, &db).await; + let _ = services().rooms.leave_all_rooms(user_id).await; } } @@ -814,13 +797,13 @@ fn usage_to_html(text: &str, server_name: &ServerName) -> String { /// /// Users in this room are considered admins by conduit, and the room can be /// used to issue admin commands by talking to the server user inside it. -pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { - let room_id = RoomId::new(db.globals.server_name()); +pub(crate) async fn create_admin_room() -> Result<()> { + let room_id = RoomId::new(services().globals.server_name()); - db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + services().rooms.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -830,10 +813,10 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { let state_lock = mutex_state.lock().await; // Create a user for the server - let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is valid"); - db.users.create(&conduit_user, None)?; + services().users.create(&conduit_user, None)?; let mut content = RoomCreateEventContent::new(conduit_user.clone()); content.federate = true; @@ -841,7 +824,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { content.room_version = RoomVersionId::V6; // 1. The room create event - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), @@ -851,12 +834,11 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 2. Make conduit bot join - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -876,7 +858,6 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; @@ -884,7 +865,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { let mut users = BTreeMap::new(); users.insert(conduit_user.clone(), 100.into()); - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { @@ -898,12 +879,11 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 4.1 Join Rules - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) @@ -914,12 +894,11 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 4.2 History Visibility - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( @@ -932,12 +911,11 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 4.3 Guest Access - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) @@ -948,14 +926,13 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 5. Events implied by name and topic - let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name())) + let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) .expect("Room name is valid"); - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) @@ -966,15 +943,14 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", db.globals.server_name()), + topic: format!("Manage {}", services().globals.server_name()), }) .expect("event is valid, we just created it"), unsigned: None, @@ -983,16 +959,15 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 6. Room alias - let alias: Box = format!("#admins:{}", db.globals.server_name()) + let alias: Box = format!("#admins:{}", services().globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { @@ -1006,11 +981,10 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; - db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + services().rooms.set_alias(&alias, Some(&room_id))?; Ok(()) } @@ -1019,20 +993,19 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { /// /// In conduit, this is equivalent to granting admin privileges. pub(crate) async fn make_user_admin( - db: &Database, user_id: &UserId, displayname: String, ) -> Result<()> { - let admin_room_alias: Box = format!("#admins:{}", db.globals.server_name()) + let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); - let room_id = db + let room_id = services() .rooms .id_from_alias(&admin_room_alias)? .expect("Admin room must exist"); let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -1042,11 +1015,11 @@ pub(crate) async fn make_user_admin( let state_lock = mutex_state.lock().await; // Use the server user to grant the new admin's power level - let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is valid"); // Invite and join the real user - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -1066,10 +1039,9 @@ pub(crate) async fn make_user_admin( }, &conduit_user, &room_id, - &db, &state_lock, )?; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -1089,7 +1061,6 @@ pub(crate) async fn make_user_admin( }, &user_id, &room_id, - &db, &state_lock, )?; @@ -1098,7 +1069,7 @@ pub(crate) async fn make_user_admin( users.insert(conduit_user.to_owned(), 100.into()); users.insert(user_id.to_owned(), 100.into()); - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { @@ -1112,17 +1083,16 @@ pub(crate) async fn make_user_admin( }, &conduit_user, &room_id, - &db, &state_lock, )?; // Send welcome message - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", db.globals.server_name()).to_owned(), - format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", db.globals.server_name()).to_owned(), + format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()).to_owned(), + format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()).to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, @@ -1131,7 +1101,6 @@ pub(crate) async fn make_user_admin( }, &conduit_user, &room_id, - &db, &state_lock, )?; diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index fe57451..eed84d5 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -1,17 +1,18 @@ pub trait Data { + type Iter: Iterator; /// Registers an appservice and returns the ID to the caller - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; + fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; /// Remove an appservice registration /// /// # Arguments /// /// * `service_name` - the name you send to register the service previously - pub fn unregister_appservice(&self, service_name: &str) -> Result<()>; + fn unregister_appservice(&self, service_name: &str) -> Result<()>; - pub fn get_registration(&self, id: &str) -> Result>; + fn get_registration(&self, id: &str) -> Result>; - pub fn iter_ids(&self) -> Result> + '_>; + fn iter_ids(&self) -> Result>>; - pub fn all(&self) -> Result>; + fn all(&self) -> Result>; } diff --git a/src/service/key_backups.rs b/src/service/key_backups.rs index 10443f6..be1d6b1 100644 --- a/src/service/key_backups.rs +++ b/src/service/key_backups.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result}; +use crate::{utils, Error, Result, services}; use ruma::{ api::client::{ backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, @@ -9,22 +9,13 @@ use ruma::{ }; use std::{collections::BTreeMap, sync::Arc}; -use super::abstraction::Tree; - -pub struct KeyBackups { - pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) - pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) - pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId -} - impl KeyBackups { pub fn create_backup( &self, user_id: &UserId, backup_metadata: &Raw, - globals: &super::globals::Globals, ) -> Result { - let version = globals.next_count()?.to_string(); + let version = services().globals.next_count()?.to_string(); let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -35,7 +26,7 @@ impl KeyBackups { &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), )?; self.backupid_etag - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; Ok(version) } @@ -61,7 +52,6 @@ impl KeyBackups { user_id: &UserId, version: &str, backup_metadata: &Raw, - globals: &super::globals::Globals, ) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -77,7 +67,7 @@ impl KeyBackups { self.backupid_algorithm .insert(&key, backup_metadata.json().get().as_bytes())?; self.backupid_etag - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; Ok(version.to_owned()) } @@ -157,7 +147,6 @@ impl KeyBackups { room_id: &RoomId, session_id: &str, key_data: &Raw, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -171,7 +160,7 @@ impl KeyBackups { } self.backupid_etag - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; key.push(0xff); key.extend_from_slice(room_id.as_bytes()); diff --git a/src/service/media.rs b/src/service/media.rs index a4bb402..1bdf6d4 100644 --- a/src/service/media.rs +++ b/src/service/media.rs @@ -1,4 +1,3 @@ -use crate::database::globals::Globals; use image::{imageops::FilterType, GenericImageView}; use super::abstraction::Tree; diff --git a/src/service/mod.rs b/src/service/mod.rs new file mode 100644 index 0000000..80239cb --- /dev/null +++ b/src/service/mod.rs @@ -0,0 +1,28 @@ +pub mod pdu; +pub mod appservice; +pub mod pusher; +pub mod rooms; +pub mod transaction_ids; +pub mod uiaa; +pub mod users; +pub mod account_data; +pub mod admin; +pub mod globals; +pub mod key_backups; +pub mod media; +pub mod sending; + +pub struct Services { + pub appservice: appservice::Service, + pub pusher: pusher::Service, + pub rooms: rooms::Service, + pub transaction_ids: transaction_ids::Service, + pub uiaa: uiaa::Service, + pub users: users::Service, + //pub account_data: account_data::Service, + //pub admin: admin::Service, + pub globals: globals::Service, + //pub key_backups: key_backups::Service, + //pub media: media::Service, + //pub sending: sending::Service, +} diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 20ec01e..47e21a6 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,4 +1,4 @@ -use crate::{Database, Error}; +use crate::{Database, Error, services}; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, @@ -332,7 +332,6 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, - db: &Database, ) -> crate::Result<(Box, CanonicalJsonObject)> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); @@ -344,7 +343,7 @@ pub(crate) fn gen_event_id_canonical_json( .and_then(|id| RoomId::parse(id.as_str()?).ok()) .ok_or_else(|| Error::bad_database("PDU in db has invalid room_id."))?; - let room_version_id = db.rooms.get_room_version(&room_id); + let room_version_id = services().rooms.get_room_version(&room_id); let event_id = format!( "${}", diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 468ad8b..ef2b819 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,11 +1,13 @@ +use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; + pub trait Data { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; - pub fn get_pusher(&self, senderkey: &[u8]) -> Result>; + fn get_pusher(&self, senderkey: &[u8]) -> Result>; - pub fn get_pushers(&self, sender: &UserId) -> Result>; + fn get_pushers(&self, sender: &UserId) -> Result>; - pub fn get_pusher_senderkeys<'a>( + fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, ) -> impl Iterator> + 'a; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 342763e..87e91a1 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,7 +1,27 @@ mod data; pub use data::Data; -use crate::service::*; +use crate::{services, Error, PduEvent}; +use bytes::BytesMut; +use ruma::{ + api::{ + client::push::{get_pushers, set_pusher, PusherKind}, + push_gateway::send_event_notification::{ + self, + v1::{Device, Notification, NotificationCounts, NotificationPriority}, + }, + MatrixVersion, OutgoingRequest, SendAccessToken, + }, + events::{ + room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, + AnySyncRoomEvent, RoomEventType, StateEventType, + }, + push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, + serde::Raw, + uint, RoomId, UInt, UserId, +}; +use std::{fmt::Debug, mem}; +use tracing::{error, info, warn}; pub struct Service { db: D, @@ -27,9 +47,8 @@ impl Service<_> { self.db.get_pusher_senderkeys(sender) } - #[tracing::instrument(skip(globals, destination, request))] + #[tracing::instrument(skip(destination, request))] pub async fn send_request( - globals: &crate::database::globals::Globals, destination: &str, request: T, ) -> Result @@ -57,7 +76,7 @@ impl Service<_> { //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; + let response = services().globals.default_client().execute(reqwest_request).await; match response { Ok(mut response) => { @@ -105,19 +124,19 @@ impl Service<_> { } } - #[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] + #[tracing::instrument(skip(user, unread, pusher, ruleset, pdu))] pub async fn send_push_notice( + &self, user: &UserId, unread: UInt, pusher: &get_pushers::v3::Pusher, ruleset: Ruleset, pdu: &PduEvent, - db: &Database, ) -> Result<()> { let mut notify = None; let mut tweaks = Vec::new(); - let power_levels: RoomPowerLevelsEventContent = db + let power_levels: RoomPowerLevelsEventContent = services() .rooms .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { @@ -127,13 +146,12 @@ impl Service<_> { .transpose()? .unwrap_or_default(); - for action in get_actions( + for action in self.get_actions( user, &ruleset, &power_levels, &pdu.to_sync_room_event(), &pdu.room_id, - db, )? { let n = match action { Action::DontNotify => false, @@ -155,27 +173,26 @@ impl Service<_> { } if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; + self.send_notice(unread, pusher, tweaks, pdu).await?; } // Else the event triggered no actions Ok(()) } - #[tracing::instrument(skip(user, ruleset, pdu, db))] + #[tracing::instrument(skip(user, ruleset, pdu))] pub fn get_actions<'a>( + &self, user: &UserId, ruleset: &'a Ruleset, power_levels: &RoomPowerLevelsEventContent, pdu: &Raw, room_id: &RoomId, - db: &Database, ) -> Result<&'a [Action]> { let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users + user_display_name: services().users .displayname(user)? .unwrap_or_else(|| user.localpart().to_owned()), users_power_levels: power_levels.users.clone(), @@ -186,13 +203,13 @@ impl Service<_> { Ok(ruleset.get_actions(pdu, &ctx)) } - #[tracing::instrument(skip(unread, pusher, tweaks, event, db))] + #[tracing::instrument(skip(unread, pusher, tweaks, event))] async fn send_notice( + &self, unread: UInt, pusher: &get_pushers::v3::Pusher, tweaks: Vec, event: &PduEvent, - db: &Database, ) -> Result<()> { // TODO: email if pusher.kind == PusherKind::Email { @@ -240,12 +257,8 @@ impl Service<_> { } if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; + self.send_request(url, send_event_notification::v1::Request::new(notifi)) + .await?; } else { notifi.sender = Some(&event.sender); notifi.event_type = Some(&event.kind); @@ -256,11 +269,11 @@ impl Service<_> { notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - let user_name = db.users.displayname(&event.sender)?; + let user_name = services().users.displayname(&event.sender)?; notifi.sender_display_name = user_name.as_deref(); let room_name = if let Some(room_name_pdu) = - db.rooms + services().rooms .room_state_get(&event.room_id, &StateEventType::RoomName, "")? { serde_json::from_str::(room_name_pdu.content.get()) @@ -272,8 +285,7 @@ impl Service<_> { notifi.room_name = room_name.as_deref(); - send_request( - &db.globals, + self.send_request( url, send_event_notification::v1::Request::new(notifi), ) diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 9dbfc7b..655f32a 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,22 +1,24 @@ +use ruma::{RoomId, RoomAliasId}; + pub trait Data { /// Creates or updates the alias to the given room id. - pub fn set_alias( + fn set_alias( alias: &RoomAliasId, room_id: &RoomId ) -> Result<()>; /// Forgets about an alias. Returns an error if the alias did not exist. - pub fn remove_alias( + fn remove_alias( alias: &RoomAliasId, ) -> Result<()>; /// Looks up the roomid for the given alias. - pub fn resolve_local_alias( + fn resolve_local_alias( alias: &RoomAliasId, ) -> Result<()>; /// Returns all local aliases that point to the given room - pub fn local_aliases_for_room( + fn local_aliases_for_room( alias: &RoomAliasId, ) -> Result<()>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index cfe0539..f46609a 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,14 +1,13 @@ mod data; pub use data::Data; - -use crate::service::*; +use ruma::{RoomAliasId, RoomId}; pub struct Service { db: D, } impl Service<_> { - #[tracing::instrument(skip(self, globals))] + #[tracing::instrument(skip(self))] pub fn set_alias( &self, alias: &RoomAliasId, @@ -17,7 +16,7 @@ impl Service<_> { self.db.set_alias(alias, room_id) } - #[tracing::instrument(skip(self, globals))] + #[tracing::instrument(skip(self))] pub fn remove_alias( &self, alias: &RoomAliasId, diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index d8fde95..88c86fa 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,3 +1,5 @@ +use std::collections::HashSet; + pub trait Data { fn get_cached_eventid_authchain<'a>() -> Result>; fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result>; diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index dfc289f..e17c10a 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,4 +1,6 @@ mod data; +use std::{sync::Arc, collections::HashSet}; + pub use data::Data; use crate::service::*; diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index 83d7885..e28cdd1 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,3 +1,5 @@ +use ruma::RoomId; + pub trait Data { /// Adds the room to the public room directory fn set_public(room_id: &RoomId) -> Result<()>; diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index b92933f..cb9cda8 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::RoomId; use crate::service::*; @@ -10,21 +11,21 @@ pub struct Service { impl Service<_> { #[tracing::instrument(skip(self))] pub fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.db.set_public(&self, room_id) + self.db.set_public(room_id) } #[tracing::instrument(skip(self))] pub fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.db.set_not_public(&self, room_id) + self.db.set_not_public(room_id) } #[tracing::instrument(skip(self))] pub fn is_public_room(&self, room_id: &RoomId) -> Result { - self.db.is_public_room(&self, room_id) + self.db.is_public_room(room_id) } #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.db.public_rooms(&self, room_id) + self.db.public_rooms() } } diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index d8ce530..5566fb2 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -1,3 +1,9 @@ pub mod presence; pub mod read_receipt; pub mod typing; + +pub struct Service { + presence: presence::Service, + read_receipt: read_receipt::Service, + typing: typing::Service, +} diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index de72e21..8e3c672 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,3 +1,7 @@ +use std::collections::HashMap; + +use ruma::{UserId, RoomId, events::presence::PresenceEvent}; + pub trait Data { /// Adds a presence event which will be saved until a new event replaces it. /// diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 5793a79..5a988d4 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,5 +1,8 @@ mod data; +use std::collections::HashMap; + pub use data::Data; +use ruma::{RoomId, UserId, events::presence::PresenceEvent}; use crate::service::*; @@ -108,7 +111,7 @@ impl Service<_> { }*/ /// Returns the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] + #[tracing::instrument(skip(self, since, room_id))] pub fn presence_since( &self, room_id: &RoomId, diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 4befcf2..32b091f 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,3 +1,5 @@ +use ruma::{RoomId, events::receipt::ReceiptEvent, UserId, serde::Raw}; + pub trait Data { /// Replaces the previous read receipt. fn readreceipt_update( diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 9cd474f..744fece 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,7 +1,6 @@ mod data; pub use data::Data; - -use crate::service::*; +use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; pub struct Service { db: D, @@ -15,7 +14,7 @@ impl Service<_> { room_id: &RoomId, event: ReceiptEvent, ) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event); + self.db.readreceipt_update(user_id, room_id, event) } /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. @@ -35,7 +34,7 @@ impl Service<_> { } /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] + #[tracing::instrument(skip(self))] pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { self.db.private_read_set(room_id, user_id, count) } diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 83ff90e..0c77313 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,3 +1,7 @@ +use std::collections::HashSet; + +use ruma::{UserId, RoomId}; + pub trait Data { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index b29c788..68b9fd8 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::{UserId, RoomId}; use crate::service::*; @@ -66,7 +67,6 @@ impl Service<_> { */ /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] pub fn last_typing_update(&self, room_id: &RoomId) -> Result { self.db.last_typing_update(room_id) } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 5b77586..7152957 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,8 +1,29 @@ - /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; -use crate::service::*; +use std::{ + collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, + pin::Pin, + sync::{Arc, RwLock}, + time::{Duration, Instant}, +}; + +use futures_util::Future; +use ruma::{ + api::{ + client::error::ErrorKind, + federation::event::{get_event, get_room_state_ids}, + }, + events::{room::create::RoomCreateEventContent, StateEventType}, + int, + serde::Base64, + signatures::CanonicalJsonValue, + state_res::{self, RoomVersion, StateMap}, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, +}; +use tracing::{error, info, trace, warn}; + +use crate::{service::*, services, Error, PduEvent}; pub struct Service; @@ -31,45 +52,47 @@ impl Service { /// it /// 14. Use state resolution to find new room state // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively - #[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] + #[tracing::instrument(skip(value, is_timeline_event, pub_key_map))] pub(crate) async fn handle_incoming_pdu<'a>( + &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, value: BTreeMap, is_timeline_event: bool, - db: &'a Database, pub_key_map: &'a RwLock>>, ) -> Result>> { - db.rooms.exists(room_id)?.ok_or(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"))?; + services().rooms.exists(room_id)?.ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server", + ))?; + + services() + .rooms + .is_disabled(room_id)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Federation of this room is currently disabled on this server.", + ))?; - db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of this room is currently disabled on this server."))?; - // 1. Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = db.rooms.get_pdu_id(event_id)? { - return Some(pdu_id.to_vec()); + if let Some(pdu_id) = services().rooms.get_pdu_id(event_id)? { + return Ok(Some(pdu_id.to_vec())); } - let create_event = db + let create_event = services() .rooms .room_state_get(room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; - let first_pdu_in_room = db + let first_pdu_in_room = services() .rooms .first_pdu_in_room(room_id)? .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; - let (incoming_pdu, val) = handle_outlier_pdu( - origin, - &create_event, - event_id, - room_id, - value, - db, - pub_key_map, - ) - .await?; + let (incoming_pdu, val) = self + .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, pub_key_map) + .await?; // 8. if not timeline event: stop if !is_timeline_event { @@ -82,15 +105,27 @@ impl Service { } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let sorted_prev_events = fetch_unknown_prev_events(incoming_pdu.prev_events.clone()); + let (sorted_prev_events, eventid_info) = self.fetch_unknown_prev_events( + origin, + &create_event, + room_id, + pub_key_map, + incoming_pdu.prev_events.clone(), + ); let mut errors = 0; - for prev_id in dbg!(sorted) { + for prev_id in dbg!(sorted_prev_events) { // Check for disabled again because it might have changed - db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of - this room is currently disabled on this server."))?; + services() + .rooms + .is_disabled(room_id)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Federation of + this room is currently disabled on this server.", + ))?; - if let Some((time, tries)) = db + if let Some((time, tries)) = services() .globals .bad_event_ratelimiter .read() @@ -120,26 +155,27 @@ impl Service { } let start_time = Instant::now(); - db.globals + services() + .globals .roomid_federationhandletime .write() .unwrap() .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - if let Err(e) = upgrade_outlier_to_timeline_pdu( - pdu, - json, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await + if let Err(e) = self + .upgrade_outlier_to_timeline_pdu( + pdu, + json, + &create_event, + origin, + room_id, + pub_key_map, + ) + .await { errors += 1; warn!("Prev event {} failed: {}", prev_id, e); - match db + match services() .globals .bad_event_ratelimiter .write() @@ -155,7 +191,8 @@ impl Service { } } let elapsed = start_time.elapsed(); - db.globals + services() + .globals .roomid_federationhandletime .write() .unwrap() @@ -172,22 +209,23 @@ impl Service { // Done with prev events, now handling the incoming event let start_time = Instant::now(); - db.globals + services() + .globals .roomid_federationhandletime .write() .unwrap() .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - let r = upgrade_outlier_to_timeline_pdu( + let r = services().rooms.event_handler.upgrade_outlier_to_timeline_pdu( incoming_pdu, val, &create_event, origin, - db, room_id, pub_key_map, ) .await; - db.globals + services() + .globals .roomid_federationhandletime .write() .unwrap() @@ -196,22 +234,23 @@ impl Service { r } - #[tracing::instrument(skip(create_event, value, db, pub_key_map))] + #[tracing::instrument(skip(create_event, value, pub_key_map))] fn handle_outlier_pdu<'a>( + &self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, value: BTreeMap, - db: &'a Database, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { + ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> + { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // We go through all the signatures we see on the value and fetch the corresponding signing // keys - fetch_required_signing_keys(&value, pub_key_map, db) + self.fetch_required_signing_keys(&value, pub_key_map, db) .await?; // 2. Check signatures, otherwise drop @@ -223,7 +262,8 @@ impl Service { })?; let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + let room_version = + RoomVersion::new(room_version_id).expect("room version is supported"); let mut val = match ruma::signatures::verify_event( &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, @@ -261,8 +301,7 @@ impl Service { // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // NOTE: Step 5 is not applied anymore because it failed too often warn!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_outliers( - db, + self.fetch_and_handle_outliers( origin, &incoming_pdu .auth_events @@ -284,7 +323,7 @@ impl Service { // Build map of auth events let mut auth_events = HashMap::new(); for id in &incoming_pdu.auth_events { - let auth_event = match db.rooms.get_pdu(id)? { + let auth_event = match services().rooms.get_pdu(id)? { Some(e) => e, None => { warn!("Could not find auth event {}", id); @@ -303,8 +342,9 @@ impl Service { v.insert(auth_event); } hash_map::Entry::Occupied(_) => { - return Err(Error::BadRequest(ErrorKind::InvalidParam, - "Auth event's type and state_key combination exists multiple times." + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth event's type and state_key combination exists multiple times.", )); } } @@ -316,7 +356,10 @@ impl Service { .map(|a| a.as_ref()) != Some(create_event) { - return Err(Error::BadRequest(ErrorKind::InvalidParam("Incoming event refers to wrong create event."))); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Incoming event refers to wrong create event.", + )); } if !state_res::event_auth::auth_check( @@ -325,15 +368,21 @@ impl Service { None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), ) - .map_err(|e| {error!(e); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")})? - { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); + .map_err(|e| { + error!(e); + Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") + })? { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth check failed", + )); } info!("Validation successful."); // 7. Persist the event as an outlier. - db.rooms + services() + .rooms .add_pdu_outlier(&incoming_pdu.event_id, &val)?; info!("Added pdu as outlier."); @@ -342,22 +391,22 @@ impl Service { }) } - #[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] - async fn upgrade_outlier_to_timeline_pdu( + #[tracing::instrument(skip(incoming_pdu, val, create_event, pub_key_map))] + pub async fn upgrade_outlier_to_timeline_pdu( + &self, incoming_pdu: Arc, val: BTreeMap, create_event: &PduEvent, origin: &ServerName, - db: &Database, room_id: &RoomId, pub_key_map: &RwLock>>, ) -> Result>, String> { // Skip the PDU if we already have it as a timeline event - if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { + if let Ok(Some(pduid)) = services().rooms.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); } - if db + if services() .rooms .is_event_soft_failed(&incoming_pdu.event_id) .map_err(|_| "Failed to ask db for soft fail".to_owned())? @@ -387,32 +436,32 @@ impl Service { if incoming_pdu.prev_events.len() == 1 { let prev_event = &*incoming_pdu.prev_events[0]; - let prev_event_sstatehash = db + let prev_event_sstatehash = services() .rooms .pdu_shortstatehash(prev_event) .map_err(|_| "Failed talking to db".to_owned())?; let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(db.rooms.state_full_ids(shortstatehash).await) + Some(services().rooms.state_full_ids(shortstatehash).await) } else { None }; if let Some(Ok(mut state)) = state { info!("Using cached state"); - let prev_pdu = - db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { + let prev_pdu = services() + .rooms + .get_pdu(prev_event) + .ok() + .flatten() + .ok_or_else(|| { "Could not find prev event, but we know the state.".to_owned() })?; if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = db + let shortstatekey = services() .rooms - .get_or_create_shortstatekey( - &prev_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) + .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; state.insert(shortstatekey, Arc::from(prev_event)); @@ -427,19 +476,20 @@ impl Service { let mut okay = true; for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { + let prev_event = if let Ok(Some(pdu)) = services().rooms.get_pdu(prev_eventid) { pdu } else { okay = false; break; }; - let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { - s - } else { - okay = false; - break; - }; + let sstatehash = + if let Ok(Some(s)) = services().rooms.pdu_shortstatehash(prev_eventid) { + s + } else { + okay = false; + break; + }; extremity_sstatehashes.insert(sstatehash, prev_event); } @@ -449,19 +499,18 @@ impl Service { let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: BTreeMap<_, _> = db + let mut leaf_state: BTreeMap<_, _> = services() .rooms .state_full_ids(sstatehash) .await .map_err(|_| "Failed to ask db for room state.".to_owned())?; if let Some(state_key) = &prev_event.state_key { - let shortstatekey = db + let shortstatekey = services() .rooms .get_or_create_shortstatekey( &prev_event.kind.to_string().into(), state_key, - &db.globals, ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); @@ -472,7 +521,7 @@ impl Service { let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { + if let Ok((ty, st_key)) = services().rooms.get_statekey_from_short(k) { // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType state.insert((ty.to_string().into(), st_key), id.clone()); @@ -483,7 +532,10 @@ impl Service { } auth_chain_sets.push( - get_auth_chain(room_id, starting_events, db) + services() + .rooms + .auth_chain + .get_auth_chain(room_id, starting_events, services()) .await .map_err(|_| "Failed to load auth chain.".to_owned())? .collect(), @@ -492,15 +544,16 @@ impl Service { fork_states.push(state); } - let lock = db.globals.stateres_mutex.lock(); + let lock = services().globals.stateres_mutex.lock(); - let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }); + let result = + state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = services().rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }); drop(lock); state_at_incoming_event = match result { @@ -508,14 +561,15 @@ impl Service { new_state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = db + let shortstatekey = services() .rooms .get_or_create_shortstatekey( &event_type.to_string().into(), &state_key, - &db.globals, ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + .map_err(|_| { + "Failed to get_or_create_shortstatekey".to_owned() + })?; Ok((shortstatekey, event_id)) }) .collect::>()?, @@ -532,10 +586,9 @@ impl Service { info!("Calling /state_ids"); // Call /state_ids to find out what the state at this pdu is. We trust the server's // response to some extend, but we still do a lot of checks on the events - match db + match services() .sending .send_federation_request( - &db.globals, origin, get_room_state_ids::v1::Request { room_id, @@ -546,18 +599,18 @@ impl Service { { Ok(res) => { info!("Fetching state events at event."); - let state_vec = fetch_and_handle_outliers( - db, - origin, - &res.pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; + let state_vec = self + .fetch_and_handle_outliers( + origin, + &res.pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; let mut state: BTreeMap<_, Arc> = BTreeMap::new(); for (pdu, _) in state_vec { @@ -566,13 +619,9 @@ impl Service { .clone() .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; - let shortstatekey = db + let shortstatekey = services() .rooms - .get_or_create_shortstatekey( - &pdu.kind.to_string().into(), - &state_key, - &db.globals, - ) + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; match state.entry(shortstatekey) { @@ -587,7 +636,7 @@ impl Service { } // The original create event must still be in the state - let create_shortstatekey = db + let create_shortstatekey = services() .rooms .get_shortstatekey(&StateEventType::RoomCreate, "") .map_err(|_| "Failed to talk to db.")? @@ -618,12 +667,13 @@ impl Service { &incoming_pdu, None::, // TODO: third party invite |k, s| { - db.rooms + services() + .rooms .get_shortstatekey(&k.to_string().into(), s) .ok() .flatten() .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) + .and_then(|event_id| services().rooms.get_pdu(event_id).ok().flatten()) }, ) .map_err(|_e| "Auth check failed.".to_owned())?; @@ -636,7 +686,8 @@ impl Service { // We start looking at current room state now, so lets lock the room let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -648,7 +699,7 @@ impl Service { // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) info!("Calculating extremities"); - let mut extremities = db + let mut extremities = services() .rooms .get_pdu_leaves(room_id) .map_err(|_| "Failed to load room leaves".to_owned())?; @@ -661,14 +712,16 @@ impl Service { } // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); + extremities + .retain(|id| !matches!(services().rooms.is_event_referenced(room_id, id), Ok(true))); info!("Compressing state at event"); let state_ids_compressed = state_at_incoming_event .iter() .map(|(shortstatekey, id)| { - db.rooms - .compress_state_event(*shortstatekey, id, &db.globals) + services() + .rooms + .compress_state_event(*shortstatekey, id) .map_err(|_| "Failed to compress_state_event".to_owned()) }) .collect::>()?; @@ -676,7 +729,7 @@ impl Service { // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it info!("Starting soft fail auth check"); - let auth_events = db + let auth_events = services() .rooms .get_auth_events( room_id, @@ -696,11 +749,10 @@ impl Service { .map_err(|_e| "Auth check failed.".to_owned())?; if soft_fail { - append_incoming_pdu( - db, + self.append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(Deref::deref), + extremities.iter().map(std::ops::Deref::deref), state_ids_compressed, soft_fail, &state_lock, @@ -712,7 +764,8 @@ impl Service { // Soft fail, we keep the event as an outlier but don't add it to the timeline warn!("Event was soft failed: {:?}", incoming_pdu); - db.rooms + services() + .rooms .mark_event_soft_failed(&incoming_pdu.event_id) .map_err(|_| "Failed to set soft failed flag".to_owned())?; return Err("Event has been soft failed".into()); @@ -720,13 +773,13 @@ impl Service { if incoming_pdu.state_key.is_some() { info!("Loading current room state ids"); - let current_sstatehash = db + let current_sstatehash = services() .rooms .current_shortstatehash(room_id) .map_err(|_| "Failed to load current state hash.".to_owned())? .expect("every room has state"); - let current_state_ids = db + let current_state_ids = services() .rooms .state_full_ids(current_sstatehash) .await @@ -737,14 +790,14 @@ impl Service { info!("Loading extremities"); for id in dbg!(&extremities) { - match db + match services() .rooms .get_pdu(id) .map_err(|_| "Failed to ask db for pdu.".to_owned())? { Some(leaf_pdu) => { extremity_sstatehashes.insert( - db.rooms + services() .pdu_shortstatehash(&leaf_pdu.event_id) .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? .ok_or_else(|| { @@ -777,13 +830,9 @@ impl Service { // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = db + let shortstatekey = services() .rooms - .get_or_create_shortstatekey( - &incoming_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); @@ -801,8 +850,9 @@ impl Service { fork_states[0] .iter() .map(|(k, id)| { - db.rooms - .compress_state_event(*k, id, &db.globals) + services() + .rooms + .compress_state_event(*k, id) .map_err(|_| "Failed to compress_state_event.".to_owned()) }) .collect::>()? @@ -814,14 +864,16 @@ impl Service { let mut auth_chain_sets = Vec::new(); for state in &fork_states { auth_chain_sets.push( - get_auth_chain( - room_id, - state.iter().map(|(_, id)| id.clone()).collect(), - db, - ) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), + services() + .rooms + .auth_chain + .get_auth_chain( + room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + ) + .await + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), ); } @@ -832,7 +884,8 @@ impl Service { .map(|map| { map.into_iter() .filter_map(|(k, id)| { - db.rooms + services() + .rooms .get_statekey_from_short(k) // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType @@ -846,13 +899,13 @@ impl Service { info!("Resolving state"); - let lock = db.globals.stateres_mutex.lock(); + let lock = services().globals.stateres_mutex.lock(); let state = match state_res::resolve( room_version_id, &fork_states, auth_chain_sets, |id| { - let res = db.rooms.get_pdu(id); + let res = services().rooms.get_pdu(id); if let Err(e) = &res { error!("LOOK AT ME Failed to fetch event: {}", e); } @@ -872,16 +925,13 @@ impl Service { state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = db + let shortstatekey = services() .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) + .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - db.rooms - .compress_state_event(shortstatekey, &event_id, &db.globals) + services() + .rooms + .compress_state_event(shortstatekey, &event_id) .map_err(|_| "Failed to compress state event".to_owned()) }) .collect::>()? @@ -890,8 +940,9 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); - db.rooms - .force_state(room_id, new_room_state, db) + services() + .rooms + .force_state(room_id, new_room_state) .map_err(|_| "Failed to set new room state.".to_owned())?; } } @@ -903,19 +954,19 @@ impl Service { // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - let pdu_id = append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities.iter().map(Deref::deref), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; + let pdu_id = self + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(std::ops::Deref::deref), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; info!("Appended incoming pdu"); @@ -935,15 +986,22 @@ impl Service { /// d. TODO: Ask other servers over federation? #[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( - db: &'a Database, + &self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { + ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> + { Box::pin(async move { - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + let back_off = |id| match services() + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry(id) + { hash_map::Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } @@ -952,10 +1010,16 @@ impl Service { let mut pdus = vec![]; for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&**id) { // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { min_elapsed_duration = Duration::from_secs(60 * 60 * 24); } @@ -969,7 +1033,7 @@ impl Service { // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { + if let Ok(Some(local_pdu)) = services().rooms.get_pdu(id) { trace!("Found {} in db", id); pdus.push((local_pdu, None)); continue; @@ -992,16 +1056,15 @@ impl Service { tokio::task::yield_now().await; } - if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { + if let Ok(Some(_)) = services().rooms.get_pdu(&next_id) { trace!("Found {} in db", id); continue; } info!("Fetching {} over federation.", next_id); - match db + match services() .sending .send_federation_request( - &db.globals, origin, get_event::v1::Request { event_id: &next_id }, ) @@ -1010,7 +1073,7 @@ impl Service { Ok(res) => { info!("Got {} over federation", next_id); let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { + match pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => { back_off((*next_id).to_owned()); @@ -1051,16 +1114,16 @@ impl Service { } for (next_id, value) in events_in_reverse_order.iter().rev() { - match handle_outlier_pdu( - origin, - create_event, - next_id, - room_id, - value.clone(), - db, - pub_key_map, - ) - .await + match self + .handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + pub_key_map, + ) + .await { Ok((pdu, json)) => { if next_id == id { @@ -1078,9 +1141,14 @@ impl Service { }) } - - - fn fetch_unknown_prev_events(initial_set: Vec>) -> Vec> { + async fn fetch_unknown_prev_events( + &self, + origin: &ServerName, + create_event: &PduEvent, + room_id: &RoomId, + pub_key_map: &RwLock>>, + initial_set: Vec>, + ) -> Vec<(Arc, HashMap, (Arc, BTreeMap)>)> { let mut graph: HashMap, _> = HashMap::new(); let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; @@ -1088,16 +1156,16 @@ impl Service { let mut amount = 0; while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( - db, - origin, - &[prev_event_id.clone()], - &create_event, - room_id, - pub_key_map, - ) - .await - .pop() + if let Some((pdu, json_opt)) = self + .fetch_and_handle_outliers( + origin, + &[prev_event_id.clone()], + &create_event, + room_id, + pub_key_map, + ) + .await + .pop() { if amount > 100 { // Max limit reached @@ -1106,9 +1174,13 @@ impl Service { continue; } - if let Some(json) = - json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) - { + if let Some(json) = json_opt.or_else(|| { + services() + .rooms + .get_outlier_pdu_json(&prev_event_id) + .ok() + .flatten() + }) { if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { amount += 1; for prev_prev in &pdu.prev_events { @@ -1153,6 +1225,6 @@ impl Service { }) .map_err(|_| "Error sorting prev events".to_owned())?; - sorted + (sorted, eventid_info) } } diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index 9cf2d8b..52a683d 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -1,3 +1,5 @@ +use ruma::{RoomId, DeviceId, UserId}; + pub trait Data { fn lazy_load_was_sent_before( &self, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index cf00174..bdc083a 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,5 +1,8 @@ mod data; +use std::collections::HashSet; + pub use data::Data; +use ruma::{DeviceId, UserId, RoomId}; use crate::service::*; @@ -47,7 +50,7 @@ impl Service<_> { room_id: &RoomId, since: u64, ) -> Result<()> { - self.db.lazy_load_confirm_delivery(user_d, device_id, room_id, since) + self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, since) } #[tracing::instrument(skip(self))] @@ -57,6 +60,6 @@ impl Service<_> { device_id: &DeviceId, room_id: &RoomId, ) -> Result<()> { - self.db.lazy_load_reset(user_id, device_id, room_id); + self.db.lazy_load_reset(user_id, device_id, room_id) } } diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 58bd351..2d718b2 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,3 +1,5 @@ +use ruma::RoomId; + pub trait Data { fn exists(&self, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 644cd18..8417e28 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::RoomId; use crate::service::*; diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 89598af..4725034 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -1,216 +1,37 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } +pub mod alias; +pub mod auth_chain; +pub mod directory; +pub mod edus; +pub mod event_handler; +pub mod lazy_loading; +pub mod metadata; +pub mod outlier; +pub mod pdu_metadata; +pub mod search; +pub mod short; +pub mod state; +pub mod state_accessor; +pub mod state_cache; +pub mod state_compressor; +pub mod timeline; +pub mod user; +pub struct Service { + pub alias: alias::Service, + pub auth_chain: auth_chain::Service, + pub directory: directory::Service, + pub edus: edus::Service, + pub event_handler: event_handler::Service, + pub lazy_loading: lazy_loading::Service, + pub metadata: metadata::Service, + pub outlier: outlier::Service, + pub pdu_metadata: pdu_metadata::Service, + pub search: search::Service, + pub short: short::Service, + pub state: state::Service, + pub state_accessor: state_accessor::Service, + pub state_cache: state_cache::Service, + pub state_compressor: state_compressor::Service, + pub timeline: timeline::Service, + pub user: user::Service, } diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs index 6b534b9..d579515 100644 --- a/src/service/rooms/outlier/data.rs +++ b/src/service/rooms/outlier/data.rs @@ -1,3 +1,7 @@ +use ruma::{EventId, signatures::CanonicalJsonObject}; + +use crate::PduEvent; + pub trait Data { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index c82cb62..ee8b940 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,7 +1,8 @@ mod data; pub use data::Data; +use ruma::{EventId, signatures::CanonicalJsonObject}; -use crate::service::*; +use crate::{service::*, PduEvent}; pub struct Service { db: D, diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 6778795..531823f 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,3 +1,7 @@ +use std::sync::Arc; + +use ruma::{EventId, RoomId}; + pub trait Data { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 6d6df22..3442b83 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,5 +1,8 @@ mod data; +use std::sync::Arc; + pub use data::Data; +use ruma::{RoomId, EventId}; use crate::service::*; diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 1601e0d..16287eb 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,7 +1,9 @@ -pub trait Data { - pub fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()>; +use ruma::RoomId; - pub fn search_pdus<'a>( +pub trait Data { + fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()>; + + fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 5478273..9087def 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,7 +1,6 @@ mod data; pub use data::Data; - -use crate::service::*; +use ruma::RoomId; pub struct Service { db: D, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index a8e87b9..afde14e 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,10 @@ mod data; -pub use data::Data; +use std::sync::Arc; -use crate::service::*; +pub use data::Data; +use ruma::{EventId, events::StateEventType}; + +use crate::{service::*, Error, utils}; pub struct Service { db: D, @@ -188,7 +191,6 @@ impl Service<_> { fn get_or_create_shortstatehash( &self, state_hash: &StateHashId, - globals: &super::globals::Globals, ) -> Result<(u64, bool)> { Ok(match self.statehash_shortstatehash.get(state_hash)? { Some(shortstatehash) => ( diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 8aa7638..ac8fac2 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,14 +1,20 @@ +use std::sync::Arc; +use std::{sync::MutexGuard, collections::HashSet}; +use std::fmt::Debug; + +use ruma::{EventId, RoomId}; + pub trait Data { /// Returns the last state hash key added to the db for the given room. fn get_room_shortstatehash(room_id: &RoomId); /// Update the current state of the room. - fn set_room_state(room_id: &RoomId, new_shortstatehash: u64 + fn set_room_state(room_id: &RoomId, new_shortstatehash: u64, _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex ); /// Associates a state with an event. - fn set_event_state(shorteventid: u64, shortstatehash: u64) -> Result<()> { + fn set_event_state(shorteventid: u64, shortstatehash: u64) -> Result<()>; /// Returns all events we would send as the prev_events of the next event. fn get_forward_extremities(room_id: &RoomId) -> Result>>; @@ -18,7 +24,7 @@ pub trait Data { room_id: &RoomId, event_ids: impl IntoIterator + Debug, _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) -> Result<()>; } pub struct StateLock; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index b513ab5..6c33d52 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,7 +1,12 @@ mod data; -pub use data::Data; +use std::collections::HashSet; -use crate::service::*; +pub use data::Data; +use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType}, UserId, EventId, serde::Raw, RoomVersionId}; +use serde::Deserialize; +use tracing::warn; + +use crate::{service::*, SERVICE, PduEvent, Error, utils::calculate_hash}; pub struct Service { db: D, @@ -9,22 +14,20 @@ pub struct Service { impl Service<_> { /// Set the room to the given statehash and update caches. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] pub fn force_state( &self, room_id: &RoomId, shortstatehash: u64, statediffnew: HashSet, statediffremoved: HashSet, - db: &Database, ) -> Result<()> { for event_id in statediffnew.into_iter().filter_map(|new| { - state_compressor::parse_compressed_state_event(new) + SERVICE.rooms.state_compressor.parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { - let pdu = match timeline::get_pdu_json(&event_id)? { + let pdu = match SERVICE.rooms.timeline.get_pdu_json(&event_id)? { Some(pdu) => pdu, None => continue, }; @@ -60,12 +63,12 @@ impl Service<_> { Err(_) => continue, }; - room::state_cache::update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; + SERVICE.room.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; } - room::state_cache::update_joined_count(room_id, db)?; + SERVICE.room.state_cache.update_joined_count(room_id)?; - db.set_room_state(room_id, new_shortstatehash); + self.db.set_room_state(room_id, shortstatehash); Ok(()) } @@ -74,19 +77,18 @@ impl Service<_> { /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] + #[tracing::instrument(skip(self, state_ids_compressed))] pub fn set_event_state( &self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: HashSet, - globals: &super::globals::Globals, ) -> Result<()> { - let shorteventid = short::get_or_create_shorteventid(event_id, globals)?; + let shorteventid = SERVICE.short.get_or_create_shorteventid(event_id)?; - let previous_shortstatehash = db.get_room_shortstatehash(room_id)?; + let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; - let state_hash = super::calculate_hash( + let state_hash = calculate_hash( &state_ids_compressed .iter() .map(|s| &s[..]) @@ -94,11 +96,11 @@ impl Service<_> { ); let (shortstatehash, already_existed) = - short::get_or_create_shortstatehash(&state_hash, globals)?; + SERVICE.short.get_or_create_shortstatehash(&state_hash)?; if !already_existed { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| room::state_compressor.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| SERVICE.room.state_compressor.load_shortstatehash_info(p))?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -117,7 +119,7 @@ impl Service<_> { } else { (state_ids_compressed, HashSet::new()) }; - state_compressor::save_state_from_diff( + SERVICE.room.state_compressor.save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -126,7 +128,7 @@ impl Service<_> { )?; } - db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + self.db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } @@ -135,13 +137,12 @@ impl Service<_> { /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] + #[tracing::instrument(skip(self, new_pdu))] pub fn append_to_state( &self, new_pdu: &PduEvent, - globals: &super::globals::Globals, ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; + let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id)?; let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; @@ -157,10 +158,9 @@ impl Service<_> { let shortstatekey = self.get_or_create_shortstatekey( &new_pdu.kind.to_string().into(), state_key, - globals, )?; - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; + let new = self.compress_state_event(shortstatekey, &new_pdu.event_id)?; let replaces = states_parents .last() @@ -176,7 +176,7 @@ impl Service<_> { } // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; + let shortstatehash = SERVICE.globals.next_count()?; let mut statediffnew = HashSet::new(); statediffnew.insert(new); @@ -254,7 +254,23 @@ impl Service<_> { Ok(()) } - pub fn db(&self) -> D { - &self.db + /// Returns the room's version. + #[tracing::instrument(skip(self))] + pub fn get_room_version(&self, room_id: &RoomId) -> Result { + let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + let room_version = create_event_content + .map(|create_event| create_event.room_version) + .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; + Ok(room_version) } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index a2b76e4..bf2972f 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,3 +1,9 @@ +use std::{sync::Arc, collections::HashMap}; + +use ruma::{EventId, events::StateEventType, RoomId}; + +use crate::PduEvent; + pub trait Data { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 28a49a9..92e5c8e 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,7 +1,10 @@ mod data; -pub use data::Data; +use std::{sync::Arc, collections::{HashMap, BTreeMap}}; -use crate::service::*; +pub use data::Data; +use ruma::{events::StateEventType, RoomId, EventId}; + +use crate::{service::*, PduEvent}; pub struct Service { db: D, @@ -42,7 +45,7 @@ impl Service<_> { event_type: &StateEventType, state_key: &str, ) -> Result>> { - self.db.pdu_state_get(event_id) + self.db.pdu_state_get(shortstatehash, event_type, state_key) } /// Returns the state hash for this pdu. diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index 166d4f6..f651919 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,3 +1,5 @@ +use ruma::{UserId, RoomId}; + pub trait Data { fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()>; } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 778679d..d29501a 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -1,7 +1,11 @@ mod data; -pub use data::Data; +use std::{collections::HashSet, sync::Arc}; -use crate::service::*; +pub use data::Data; +use regex::Regex; +use ruma::{RoomId, UserId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, tag::TagEvent, RoomAccountDataEventType, GlobalAccountDataEventType, direct::DirectEvent, ignored_user_list::IgnoredUserListEvent, AnySyncStateEvent}, serde::Raw, ServerName}; + +use crate::{service::*, SERVICE, utils, Error}; pub struct Service { db: D, @@ -9,7 +13,7 @@ pub struct Service { impl Service<_> { /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] + #[tracing::instrument(skip(self, last_state))] pub fn update_membership( &self, room_id: &RoomId, @@ -17,12 +21,11 @@ impl Service<_> { membership: MembershipState, sender: &UserId, last_state: Option>>, - db: &Database, update_joined_count: bool, ) -> Result<()> { // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; + if user_id.server_name() != SERVICE.globals.server_name() { + SERVICE.users.create(user_id, None)?; // TODO: displayname, avatar url } @@ -82,7 +85,7 @@ impl Service<_> { user_id, RoomAccountDataEventType::Tag, )? { - db.account_data + SERVICE.account_data .update( Some(room_id), user_id, @@ -94,7 +97,7 @@ impl Service<_> { }; // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( + if let Some(mut direct_event) = SERVICE.account_data.get::( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), @@ -109,12 +112,11 @@ impl Service<_> { } if room_ids_updated { - db.account_data.update( + SERVICE.account_data.update( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), &direct_event, - &db.globals, )?; } }; @@ -130,7 +132,7 @@ impl Service<_> { } MembershipState::Invite => { // We want to know if the sender is ignored by the receiver - let is_ignored = db + let is_ignored = SERVICE .account_data .get::( None, // Ignored users are in global account data @@ -186,7 +188,7 @@ impl Service<_> { } #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { + pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { let mut joinedcount = 0_u64; let mut invitedcount = 0_u64; let mut joined_servers = HashSet::new(); @@ -226,11 +228,10 @@ impl Service<_> { Ok(()) } - #[tracing::instrument(skip(self, room_id, db))] + #[tracing::instrument(skip(self, room_id))] pub fn get_our_real_users( &self, room_id: &RoomId, - db: &Database, ) -> Result>>> { let maybe = self .our_real_users_cache @@ -241,7 +242,7 @@ impl Service<_> { if let Some(users) = maybe { Ok(users) } else { - self.update_joined_count(room_id, db)?; + self.update_joined_count(room_id)?; Ok(Arc::clone( self.our_real_users_cache .read() @@ -252,12 +253,11 @@ impl Service<_> { } } - #[tracing::instrument(skip(self, room_id, appservice, db))] + #[tracing::instrument(skip(self, room_id, appservice))] pub fn appservice_in_room( &self, room_id: &RoomId, appservice: &(String, serde_yaml::Value), - db: &Database, ) -> Result { let maybe = self .appservice_in_room_cache @@ -285,7 +285,7 @@ impl Service<_> { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() + UserId::parse_with_server_name(string, SERVICE.globals.server_name()).ok() }); let in_room = bridge_user_id diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index 8b855cd..74a28e7 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,4 +1,6 @@ -struct StateDiff { +use crate::service::rooms::CompressedStateEvent; + +pub struct StateDiff { parent: Option, added: Vec, removed: Vec, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index d6d88e2..3aea4fe 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,7 +1,12 @@ -mod data; -pub use data::Data; +pub mod data; +use std::{mem::size_of, sync::Arc, collections::HashSet}; -use crate::service::*; +pub use data::Data; +use ruma::{EventId, RoomId}; + +use crate::{service::*, utils}; + +use self::data::StateDiff; pub struct Service { db: D, @@ -30,9 +35,9 @@ impl Service<_> { return Ok(r.clone()); } - self.db.get_statediff(shortstatehash)?; + let StateDiff { parent, added, removed } = self.db.get_statediff(shortstatehash)?; - if parent != 0_u64 { + if let Some(parent) = parent { let mut response = self.load_shortstatehash_info(parent)?; let mut state = response.last().unwrap().1.clone(); state.extend(added.iter().copied()); @@ -155,7 +160,7 @@ impl Service<_> { if parent_states.is_empty() { // There is no parent layer, create a new state - self.db.save_statediff(shortstatehash, StateDiff { parent: 0, new: statediffnew, removed: statediffremoved })?; + self.db.save_statediff(shortstatehash, StateDiff { parent: None, added: statediffnew, removed: statediffremoved })?; return Ok(()); }; @@ -197,7 +202,7 @@ impl Service<_> { )?; } else { // Diff small enough, we add diff as layer on top of parent - self.db.save_statediff(shortstatehash, StateDiff { parent: parent.0, new: statediffnew, removed: statediffremoved })?; + self.db.save_statediff(shortstatehash, StateDiff { parent: Some(parent.0), added: statediffnew, removed: statediffremoved })?; } Ok(()) diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 4e5c379..bf6d8c5 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,3 +1,9 @@ +use std::sync::Arc; + +use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; + +use crate::PduEvent; + pub trait Data { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; @@ -5,34 +11,37 @@ pub trait Data { fn get_pdu_count(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result>; + fn get_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( + fn get_non_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result>; /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>>; + fn get_pdu_id(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; + fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>>; + fn get_pdu(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; + fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; + fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result; + fn pdu_count(&self, pdu_id: &[u8]) -> Result; /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; @@ -40,7 +49,7 @@ pub trait Data { /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( + fn pdus_since<'a>( &'a self, user_id: &UserId, room_id: &RoomId, @@ -50,14 +59,14 @@ pub trait Data { /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( + fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, until: u64, ) -> Result, PduEvent)>> + 'a>; - pub fn pdus_after<'a>( + fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index c6393c6..7b60fe5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,7 +1,17 @@ mod data; -pub use data::Data; +use std::{sync::MutexGuard, iter, collections::HashSet}; +use std::fmt::Debug; -use crate::service::*; +pub use data::Data; +use regex::Regex; +use ruma::signatures::CanonicalJsonValue; +use ruma::{EventId, signatures::CanonicalJsonObject, push::{Action, Tweak}, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType, RoomEventType, room::{member::MembershipState, create::RoomCreateEventContent}, StateEventType}, UserId, RoomAliasId, RoomId, uint, state_res, api::client::error::ErrorKind, serde::to_canonical_value, ServerName}; +use serde::Deserialize; +use serde_json::value::to_raw_value; +use tracing::{warn, error}; + +use crate::SERVICE; +use crate::{service::{*, pdu::{PduBuilder, EventHash}}, Error, PduEvent, utils}; pub struct Service { db: D, @@ -126,13 +136,12 @@ impl Service<_> { /// in `append_pdu`. /// /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] + #[tracing::instrument(skip(self, pdu, pdu_json, leaves))] pub fn append_pdu<'a>( &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, leaves: impl IntoIterator + Debug, - db: &Database, ) -> Result> { let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); @@ -249,7 +258,6 @@ impl Service<_> { &power_levels, &sync_pdu, &pdu.room_id, - db, )? { match action { Action::DontNotify => notify = false, @@ -446,9 +454,8 @@ impl Service<_> { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - db: &Database, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObj) { + ) -> (PduEvent, CanonicalJsonObject) { let PduBuilder { event_type, content, @@ -457,14 +464,14 @@ impl Service<_> { redacts, } = pdu_builder; - let prev_events: Vec<_> = db + let prev_events: Vec<_> = SERVICE .rooms .get_pdu_leaves(room_id)? .into_iter() .take(20) .collect(); - let create_event = db + let create_event = SERVICE .rooms .room_state_get(room_id, &StateEventType::RoomCreate, "")?; @@ -481,7 +488,7 @@ impl Service<_> { // If there was no create event yet, assume we are creating a room with the default // version right now let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { + .map_or(SERVICE.globals.default_room_version(), |create_event| { create_event.room_version }); let room_version = @@ -575,8 +582,8 @@ impl Service<_> { ); match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), + SERVICE.globals.server_name().as_str(), + SERVICE.globals.keypair(), &mut pdu_json, &room_version_id, ) { @@ -614,22 +621,21 @@ impl Service<_> { /// Creates a new persisted data unit and adds it to a room. This function takes a /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, db, _mutex_lock))] + #[tracing::instrument(skip(self, _mutex_lock))] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - db: &Database, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let (pdu, pdu_json) = create_hash_and_sign_event()?; + let (pdu, pdu_json) = self.create_hash_and_sign_event()?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; + let statehashid = self.append_to_state(&pdu)?; let pdu_id = self.append_pdu( &pdu, @@ -637,7 +643,6 @@ impl Service<_> { // Since this PDU references all pdu_leaves we can update the leaves // of the room iter::once(&*pdu.event_id), - db, )?; // We set the room state after inserting the pdu, so that we never have a moment in time @@ -659,9 +664,9 @@ impl Service<_> { } // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); + servers.remove(SERVICE.globals.server_name()); - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; + SERVICE.sending.send_pdu(servers.into_iter(), &pdu_id)?; Ok(pdu.event_id) } @@ -670,7 +675,6 @@ impl Service<_> { /// server that sent the event. #[tracing::instrument(skip_all)] fn append_incoming_pdu<'a>( - db: &Database, pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: impl IntoIterator + Clone + Debug, @@ -680,21 +684,20 @@ impl Service<_> { ) -> Result>> { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( + SERVICE.rooms.set_event_state( &pdu.event_id, &pdu.room_id, state_ids_compressed, - &db.globals, )?; if soft_fail { - db.rooms + SERVICE.rooms .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + SERVICE.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; return Ok(None); } - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; + let pdu_id = SERVICE.rooms.append_pdu(pdu, pdu_json, new_room_leaves)?; Ok(Some(pdu_id)) } @@ -756,4 +759,4 @@ impl Service<_> { // If event does not exist, just noop Ok(()) } - +} diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 45fb355..664f8a0 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::{RoomId, UserId}; use crate::service::*; diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs index f1ff5f8..c1b4715 100644 --- a/src/service/transaction_ids/data.rs +++ b/src/service/transaction_ids/data.rs @@ -1,5 +1,5 @@ pub trait Data { - pub fn add_txnid( + fn add_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, @@ -7,7 +7,7 @@ pub trait Data { data: &[u8], ) -> Result<()>; - pub fn existing_txnid( + fn existing_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index d944847..9b76e13 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::{UserId, DeviceId, TransactionId}; use crate::service::*; diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index 40e69bd..cc943bf 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,3 +1,5 @@ +use ruma::{api::client::uiaa::UiaaInfo, DeviceId, UserId, signatures::CanonicalJsonValue}; + pub trait Data { fn set_uiaa_request( &self, diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 593ea5f..5e1df8f 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,7 +1,9 @@ mod data; pub use data::Data; +use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; +use tracing::error; -use crate::service::*; +use crate::{service::*, utils, Error, SERVICE}; pub struct Service { db: D, @@ -36,8 +38,6 @@ impl Service<_> { device_id: &DeviceId, auth: &IncomingAuthData, uiaainfo: &UiaaInfo, - users: &super::users::Users, - globals: &super::globals::Globals, ) -> Result<(bool, UiaaInfo)> { let mut uiaainfo = auth .session() @@ -66,13 +66,13 @@ impl Service<_> { }; let user_id = - UserId::parse_with_server_name(username.clone(), globals.server_name()) + UserId::parse_with_server_name(username.clone(), SERVICE.globals.server_name()) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") })?; // Check if password is correct - if let Some(hash) = users.password_hash(&user_id)? { + if let Some(hash) = SERVICE.users.password_hash(&user_id)? { let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); diff --git a/src/service/users/data.rs b/src/service/users/data.rs index d99d032..327e0c6 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,34 +1,27 @@ -pub trait Data { +use std::collections::BTreeMap; + +use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri}; + +trait Data { /// Check if a user has an account on this homeserver. - pub fn exists(&self, user_id: &UserId) -> Result; + fn exists(&self, user_id: &UserId) -> Result; /// Check if account is deactivated - pub fn is_deactivated(&self, user_id: &UserId) -> Result; - - /// Check if a user is an admin - pub fn is_admin( - &self, - user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, - ) -> Result; - - /// Create a new user account on this homeserver. - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; + fn is_deactivated(&self, user_id: &UserId) -> Result; /// Returns the number of users registered on this server. - pub fn count(&self) -> Result; + fn count(&self) -> Result; /// Find out which user an access token belongs to. - pub fn find_from_token(&self, token: &str) -> Result, String)>>; + fn find_from_token(&self, token: &str) -> Result, String)>>; /// Returns an iterator over all users on this homeserver. - pub fn iter(&self) -> impl Iterator>> + '_; + fn iter(&self) -> impl Iterator>> + '_; /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - pub fn list_local_users(&self) -> Result>; + fn list_local_users(&self) -> Result>; /// Will only return with Some(username) if the password was not empty and the /// username could be successfully parsed. @@ -37,31 +30,31 @@ pub trait Data { fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option; /// Returns the password hash for the given user. - pub fn password_hash(&self, user_id: &UserId) -> Result>; + fn password_hash(&self, user_id: &UserId) -> Result>; /// Hash and set the user's password to the Argon2 hash - pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; + fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; /// Returns the displayname of a user on this homeserver. - pub fn displayname(&self, user_id: &UserId) -> Result>; + fn displayname(&self, user_id: &UserId) -> Result>; /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; + fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; /// Get the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result>>; + fn avatar_url(&self, user_id: &UserId) -> Result>>; /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()>; + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()>; /// Get the blurhash of a user. - pub fn blurhash(&self, user_id: &UserId) -> Result>; + fn blurhash(&self, user_id: &UserId) -> Result>; /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()>; + fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()>; /// Adds a new device to a user. - pub fn create_device( + fn create_device( &self, user_id: &UserId, device_id: &DeviceId, @@ -70,129 +63,118 @@ pub trait Data { ) -> Result<()>; /// Removes a device from a user. - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; /// Returns an iterator over all device ids of this user. - pub fn all_device_ids<'a>( + fn all_device_ids<'a>( &'a self, user_id: &UserId, ) -> impl Iterator>> + 'a; /// Replaces the access token of one device. - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; + fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; - pub fn add_one_time_key( + fn add_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result; + fn last_one_time_keys_update(&self, user_id: &UserId) -> Result; - pub fn take_one_time_key( + fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - globals: &super::globals::Globals, ) -> Result, Raw)>>; - pub fn count_one_time_keys( + fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>; - pub fn add_device_keys( + fn add_device_keys( &self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn add_cross_signing_keys( + fn add_cross_signing_keys( &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn sign_key( + fn sign_key( &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn keys_changed<'a>( + fn keys_changed<'a>( &'a self, user_or_room_id: &str, from: u64, to: Option, ) -> impl Iterator>> + 'a; - pub fn mark_device_key_update( + fn mark_device_key_update( &self, user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn get_device_keys( + fn get_device_keys( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>>; - pub fn get_master_key bool>( + fn get_master_key bool>( &self, user_id: &UserId, allowed_signatures: F, ) -> Result>>; - pub fn get_self_signing_key bool>( + fn get_self_signing_key bool>( &self, user_id: &UserId, allowed_signatures: F, ) -> Result>>; - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; + fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; - pub fn add_to_device_event( + fn add_to_device_event( &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, content: serde_json::Value, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn get_to_device_events( + fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>>; - pub fn remove_to_device_events( + fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, until: u64, ) -> Result<()>; - pub fn update_device_metadata( + fn update_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, @@ -200,27 +182,27 @@ pub trait Data { ) -> Result<()>; /// Get device metadata. - pub fn get_device_metadata( + fn get_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>; - pub fn get_devicelist_version(&self, user_id: &UserId) -> Result>; + fn get_devicelist_version(&self, user_id: &UserId) -> Result>; - pub fn all_devices_metadata<'a>( + fn all_devices_metadata<'a>( &'a self, user_id: &UserId, ) -> impl Iterator> + 'a; /// Creates a new sync filter. Returns the filter id. - pub fn create_filter( + fn create_filter( &self, user_id: &UserId, filter: &IncomingFilterDefinition, ) -> Result; - pub fn get_filter( + fn get_filter( &self, user_id: &UserId, filter_id: &str, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 93d6ea5..bfa4b8e 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,7 +1,10 @@ mod data; -pub use data::Data; +use std::{collections::BTreeMap, mem}; -use crate::service::*; +pub use data::Data; +use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}}; + +use crate::{service::*, Error}; pub struct Service { db: D, @@ -19,18 +22,24 @@ impl Service<_> { } /// Check if a user is an admin - pub fn is_admin( + fn is_admin( &self, user_id: &UserId, ) -> Result { - self.db.is_admin(user_id) + let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); + + rooms.is_joined(user_id, &admin_room_id) } /// Create a new user account on this homeserver. - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.db.set_password(user_id, password) + fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + self.db.set_password(user_id, password)?; + Ok(()) } + /// Returns the number of users registered on this server. pub fn count(&self) -> Result { self.db.count() @@ -136,7 +145,6 @@ impl Service<_> { device_id: &DeviceId, one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, - globals: &super::globals::Globals, ) -> Result<()> { self.db.add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) } @@ -220,7 +228,7 @@ impl Service<_> { user_id: &UserId, allowed_signatures: F, ) -> Result>> { - self.db.get_master_key(user_id, allow_signatures) + self.db.get_master_key(user_id, allowed_signatures) } pub fn get_self_signing_key bool>( @@ -327,7 +335,7 @@ impl Service<_> { } /// Ensure that a user only sees signatures from themselves and the target user -fn clean_signatures bool>( +pub fn clean_signatures bool>( cross_signing_key: &mut serde_json::Value, user_id: &UserId, allowed_signatures: F, diff --git a/src/utils/utils.rs b/src/utils/mod.rs similarity index 99% rename from src/utils/utils.rs rename to src/utils/mod.rs index 1ad0aa3..734da2a 100644 --- a/src/utils/utils.rs +++ b/src/utils/mod.rs @@ -1,3 +1,5 @@ +pub mod error; + use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; From 4649cd82b514a4590aade5f08f1c3514fe9998f6 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:05 +0200 Subject: [PATCH 1203/1727] refactor: prepare database/key_value/globals.rs from service/globals.rs --- src/{service => database/key_value}/globals.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/globals.rs (100%) diff --git a/src/service/globals.rs b/src/database/key_value/globals.rs similarity index 100% rename from src/service/globals.rs rename to src/database/key_value/globals.rs From d024d205c03f74053c3a0285dc05557f1e2f8663 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:06 +0200 Subject: [PATCH 1204/1727] refactor: prepare service/media/mod.rs from service/media.rs --- src/service/{media.rs => media/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{media.rs => media/mod.rs} (100%) diff --git a/src/service/media.rs b/src/service/media/mod.rs similarity index 100% rename from src/service/media.rs rename to src/service/media/mod.rs From 5a29511d3406cd6c7c25afebe8285528c7be6b18 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:06 +0200 Subject: [PATCH 1205/1727] refactor: prepare service/key_backups/data.rs from service/key_backups.rs --- src/service/{key_backups.rs => key_backups/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{key_backups.rs => key_backups/data.rs} (100%) diff --git a/src/service/key_backups.rs b/src/service/key_backups/data.rs similarity index 100% rename from src/service/key_backups.rs rename to src/service/key_backups/data.rs From c6d1421e81dcf434eec3a3f937a53faee0969fb9 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:06 +0200 Subject: [PATCH 1206/1727] refactor: prepare service/key_backups/mod.rs from service/key_backups.rs --- src/service/{key_backups.rs => key_backups/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{key_backups.rs => key_backups/mod.rs} (100%) diff --git a/src/service/key_backups.rs b/src/service/key_backups/mod.rs similarity index 100% rename from src/service/key_backups.rs rename to src/service/key_backups/mod.rs From e1e87b8d0c1717b6485fcb0ba0970630fcbc5f2d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:07 +0200 Subject: [PATCH 1207/1727] refactor: prepare service/admin/mod.rs from service/admin.rs --- src/service/{admin.rs => admin/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{admin.rs => admin/mod.rs} (100%) diff --git a/src/service/admin.rs b/src/service/admin/mod.rs similarity index 100% rename from src/service/admin.rs rename to src/service/admin/mod.rs From efad401751d7ff6d87c2d5aac296326217e9aa28 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:07 +0200 Subject: [PATCH 1208/1727] refactor: prepare service/account_data/data.rs from service/account_data.rs --- src/service/{account_data.rs => account_data/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{account_data.rs => account_data/data.rs} (100%) diff --git a/src/service/account_data.rs b/src/service/account_data/data.rs similarity index 100% rename from src/service/account_data.rs rename to src/service/account_data/data.rs From 7946c5f29e0545e9ee65b56503a2524d7a5ffc66 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:07 +0200 Subject: [PATCH 1209/1727] refactor: prepare service/account_data/mod.rs from service/account_data.rs --- src/service/{account_data.rs => account_data/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{account_data.rs => account_data/mod.rs} (100%) diff --git a/src/service/account_data.rs b/src/service/account_data/mod.rs similarity index 100% rename from src/service/account_data.rs rename to src/service/account_data/mod.rs From 232978087ae24d0c7c9aff4147dee105b8fbaf17 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:07 +0200 Subject: [PATCH 1210/1727] refactor: prepare database/key_value/media.rs from service/media.rs --- src/{service => database/key_value}/media.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/media.rs (100%) diff --git a/src/service/media.rs b/src/database/key_value/media.rs similarity index 100% rename from src/service/media.rs rename to src/database/key_value/media.rs From bd8b616ca01983b5f029095e168146693ffe9dcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 7 Sep 2022 13:25:51 +0200 Subject: [PATCH 1211/1727] Fixed more compile time errors --- src/api/client_server/account.rs | 4 +- src/api/client_server/alias.rs | 12 +- src/api/client_server/context.rs | 31 +- src/api/client_server/directory.rs | 15 +- src/api/client_server/keys.rs | 2 +- src/api/client_server/media.rs | 6 +- src/api/client_server/membership.rs | 38 +- src/api/client_server/room.rs | 2 +- src/api/client_server/state.rs | 2 +- src/api/server_server.rs | 2 +- src/database/key_value/account_data.rs | 23 +- src/database/key_value/appservice.rs | 4 +- src/database/key_value/globals.rs | 404 +--- src/database/key_value/key_backups.rs | 47 +- src/database/key_value/media.rs | 349 +--- src/database/key_value/mod.rs | 8 +- src/database/key_value/pusher.rs | 4 +- src/database/key_value/rooms/alias.rs | 8 +- src/database/key_value/rooms/auth_chain.rs | 12 +- src/database/key_value/rooms/directory.rs | 4 +- src/database/key_value/rooms/edus/mod.rs | 4 + src/database/key_value/rooms/edus/presence.rs | 4 +- .../key_value/rooms/edus/read_receipt.rs | 6 +- src/database/key_value/rooms/edus/typing.rs | 4 +- src/database/key_value/rooms/lazy_load.rs | 2 +- src/database/key_value/rooms/metadata.rs | 2 +- src/database/key_value/rooms/mod.rs | 8 +- src/database/key_value/rooms/outlier.rs | 2 +- src/database/key_value/rooms/pdu_metadata.rs | 2 +- src/database/key_value/rooms/search.rs | 6 +- src/database/key_value/rooms/short.rs | 4 + src/database/key_value/rooms/state.rs | 4 +- .../key_value/rooms/state_accessor.rs | 2 +- src/database/key_value/rooms/state_cache.rs | 70 +- .../key_value/rooms/state_compressor.rs | 2 +- src/database/key_value/rooms/timeline.rs | 8 +- src/database/key_value/rooms/user.rs | 4 +- src/database/key_value/transaction_ids.rs | 2 +- src/database/key_value/uiaa.rs | 6 +- src/database/key_value/users.rs | 10 +- src/database/mod.rs | 7 +- src/lib.rs | 8 +- src/service/account_data/data.rs | 137 +- src/service/account_data/mod.rs | 21 +- src/service/admin/mod.rs | 1835 +++++++++-------- src/service/appservice/data.rs | 5 +- src/service/appservice/mod.rs | 4 +- src/service/globals/data.rs | 8 + src/service/globals/mod.rs | 44 +- src/service/key_backups/data.rs | 346 +--- src/service/key_backups/mod.rs | 9 +- src/service/media/data.rs | 8 + src/service/media/mod.rs | 195 +- src/service/mod.rs | 27 +- src/service/pdu.rs | 4 +- src/service/pusher/data.rs | 3 +- src/service/pusher/mod.rs | 4 +- src/service/rooms/alias/data.rs | 11 +- src/service/rooms/alias/mod.rs | 6 +- src/service/rooms/auth_chain/data.rs | 5 +- src/service/rooms/auth_chain/mod.rs | 4 +- src/service/rooms/directory/data.rs | 9 +- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/edus/mod.rs | 4 +- src/service/rooms/edus/presence/data.rs | 1 + src/service/rooms/edus/presence/mod.rs | 4 +- src/service/rooms/edus/read_receipt/data.rs | 5 +- src/service/rooms/edus/read_receipt/mod.rs | 4 +- src/service/rooms/edus/typing/data.rs | 4 +- src/service/rooms/edus/typing/mod.rs | 6 +- src/service/rooms/event_handler/mod.rs | 7 +- src/service/rooms/lazy_loading/data.rs | 1 + src/service/rooms/lazy_loading/mod.rs | 4 +- src/service/rooms/metadata/data.rs | 1 + src/service/rooms/metadata/mod.rs | 4 +- src/service/rooms/mod.rs | 4 +- src/service/rooms/outlier/data.rs | 4 +- src/service/rooms/outlier/mod.rs | 4 +- src/service/rooms/pdu_metadata/data.rs | 1 + src/service/rooms/pdu_metadata/mod.rs | 4 +- src/service/rooms/search/data.rs | 5 +- src/service/rooms/search/mod.rs | 4 +- src/service/rooms/short/data.rs | 2 + src/service/rooms/short/mod.rs | 19 +- src/service/rooms/state/data.rs | 22 +- src/service/rooms/state/mod.rs | 28 +- src/service/rooms/state_accessor/data.rs | 6 +- src/service/rooms/state_accessor/mod.rs | 4 +- src/service/rooms/state_cache/data.rs | 8 +- src/service/rooms/state_cache/mod.rs | 63 +- src/service/rooms/state_compressor/data.rs | 7 +- src/service/rooms/state_compressor/mod.rs | 16 +- src/service/rooms/timeline/data.rs | 10 +- src/service/rooms/timeline/mod.rs | 103 +- src/service/rooms/user/data.rs | 5 +- src/service/rooms/user/mod.rs | 6 +- src/service/{sending.rs => sending/mod.rs} | 112 +- src/service/transaction_ids/data.rs | 3 + src/service/transaction_ids/mod.rs | 6 +- src/service/uiaa/data.rs | 1 + src/service/uiaa/mod.rs | 13 +- src/service/users/data.rs | 12 +- src/service/users/mod.rs | 16 +- 103 files changed, 1617 insertions(+), 2749 deletions(-) create mode 100644 src/database/key_value/rooms/short.rs create mode 100644 src/service/globals/data.rs create mode 100644 src/service/media/data.rs create mode 100644 src/service/rooms/short/data.rs rename src/service/{sending.rs => sending/mod.rs} (88%) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 848bfaa..6af597e 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{ - utils, Error, Result, Ruma, services, + utils, Error, Result, Ruma, services, api::client_server, }; use ruma::{ api::client::{ @@ -381,7 +381,7 @@ pub async fn deactivate_route( } // Make the user leave all rooms before deactivation - services().rooms.leave_all_rooms(&sender_user).await?; + client_server::leave_all_rooms(&sender_user).await?; // Remove devices and mark account as deactivated services().users.deactivate_account(sender_user)?; diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 7aa5fb2..444cc15 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -25,12 +25,12 @@ pub async fn create_alias_route( )); } - if services().rooms.id_from_alias(&body.room_alias)?.is_some() { + if services().rooms.alias.resolve_local_alias(&body.room_alias)?.is_some() { return Err(Error::Conflict("Alias already exists.")); } - services().rooms - .set_alias(&body.room_alias, Some(&body.room_id))?; + services().rooms.alias + .set_alias(&body.room_alias, &body.room_id)?; Ok(create_alias::v3::Response::new()) } @@ -51,7 +51,7 @@ pub async fn delete_alias_route( )); } - services().rooms.set_alias(&body.room_alias, None)?; + services().rooms.alias.remove_alias(&body.room_alias)?; // TODO: update alt_aliases? @@ -88,7 +88,7 @@ pub(crate) async fn get_alias_helper( } let mut room_id = None; - match services().rooms.id_from_alias(room_alias)? { + match services().rooms.alias.resolve_local_alias(room_alias)? { Some(r) => room_id = Some(r), None => { for (_id, registration) in services().appservice.all()? { @@ -115,7 +115,7 @@ pub(crate) async fn get_alias_helper( .await .is_ok() { - room_id = Some(services().rooms.id_from_alias(room_alias)?.ok_or_else(|| { + room_id = Some(services().rooms.alias.resolve_local_alias(room_alias)?.ok_or_else(|| { Error::bad_config("Appservice lied to us. Room does not exist.") })?); break; diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index 3551dcf..c407c71 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -29,16 +29,18 @@ pub async fn get_context_route( let base_pdu_id = services() .rooms + .timeline .get_pdu_id(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Base event id not found.", ))?; - let base_token = services().rooms.pdu_count(&base_pdu_id)?; + let base_token = services().rooms.timeline.pdu_count(&base_pdu_id)?; let base_event = services() .rooms + .timeline .get_pdu_from_id(&base_pdu_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -47,14 +49,14 @@ pub async fn get_context_route( let room_id = base_event.room_id.clone(); - if !services().rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -68,6 +70,7 @@ pub async fn get_context_route( let events_before: Vec<_> = services() .rooms + .timeline .pdus_until(sender_user, &room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { @@ -79,7 +82,7 @@ pub async fn get_context_route( .collect(); for (_, event) in &events_before { - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -92,7 +95,7 @@ pub async fn get_context_route( let start_token = events_before .last() - .and_then(|(pdu_id, _)| services().rooms.pdu_count(pdu_id).ok()) + .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); let events_before: Vec<_> = events_before @@ -102,6 +105,7 @@ pub async fn get_context_route( let events_after: Vec<_> = services() .rooms + .timeline .pdus_after(sender_user, &room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { @@ -113,7 +117,7 @@ pub async fn get_context_route( .collect(); for (_, event) in &events_after { - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -124,7 +128,7 @@ pub async fn get_context_route( } } - let shortstatehash = match services().rooms.pdu_shortstatehash( + let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash( events_after .last() .map_or(&*body.event_id, |(_, e)| &*e.event_id), @@ -132,15 +136,16 @@ pub async fn get_context_route( Some(s) => s, None => services() .rooms - .current_shortstatehash(&room_id)? + .state + .get_room_shortstatehash(&room_id)? .expect("All rooms have state"), }; - let state_ids = services().rooms.state_full_ids(shortstatehash).await?; + let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; let end_token = events_after .last() - .and_then(|(pdu_id, _)| services().rooms.pdu_count(pdu_id).ok()) + .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); let events_after: Vec<_> = events_after @@ -151,10 +156,10 @@ pub async fn get_context_route( let mut state = Vec::new(); for (shortstatekey, id) in state_ids { - let (event_type, state_key) = services().rooms.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services().rooms.short.get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -163,7 +168,7 @@ pub async fn get_context_route( }; state.push(pdu.to_state_event()); } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 87493fa..2a60f67 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -86,10 +86,10 @@ pub async fn set_room_visibility_route( match &body.visibility { room::Visibility::Public => { - services().rooms.set_public(&body.room_id, true)?; + services().rooms.directory.set_public(&body.room_id)?; info!("{} made {} public", sender_user, body.room_id); } - room::Visibility::Private => services().rooms.set_public(&body.room_id, false)?, + room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?, _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -108,7 +108,7 @@ pub async fn get_room_visibility_route( body: Ruma, ) -> Result { Ok(get_room_visibility::v3::Response { - visibility: if services().rooms.is_public_room(&body.room_id)? { + visibility: if services().rooms.directory.is_public_room(&body.room_id)? { room::Visibility::Public } else { room::Visibility::Private @@ -176,6 +176,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( let mut all_rooms: Vec<_> = services() .rooms + .directory .public_rooms() .map(|room_id| { let room_id = room_id?; @@ -183,6 +184,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( let chunk = PublicRoomsChunk { canonical_alias: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) @@ -193,6 +195,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, name: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) @@ -203,6 +206,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, num_joined_members: services() .rooms + .state_cache .room_joined_count(&room_id)? .unwrap_or_else(|| { warn!("Room {} has no member count", room_id); @@ -212,6 +216,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .expect("user count should not be that big"), topic: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) @@ -222,6 +227,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, world_readable: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) @@ -236,6 +242,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, guest_can_join: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) @@ -248,6 +255,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, avatar_url: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? .map(|s| { serde_json::from_str(s.content.get()) @@ -261,6 +269,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .flatten(), join_rule: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? .map(|s| { serde_json::from_str(s.content.get()) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 698bd1e..4ce5d4c 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -230,7 +230,7 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); - for room_id in services().rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) { + for room_id in services().rooms.state_cache.rooms_joined(sender_user).filter_map(|r| r.ok()) { device_list_updates.extend( services().users .keys_changed( diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index f0da084..d6e8213 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -99,7 +99,7 @@ pub async fn get_content_route( content_disposition, content_type, file, - }) = services().media.get(&mxc).await? + }) = services().media.get(mxc.clone()).await? { Ok(get_content::v3::Response { file, @@ -129,7 +129,7 @@ pub async fn get_content_as_filename_route( content_disposition: _, content_type, file, - }) = services().media.get(&mxc).await? + }) = services().media.get(mxc.clone()).await? { Ok(get_content_as_filename::v3::Response { file, @@ -165,7 +165,7 @@ pub async fn get_content_thumbnail_route( }) = services() .media .get_thumbnail( - &mxc, + mxc.clone(), body.width .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index b000ec1..d6f820a 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -30,7 +30,7 @@ use std::{ }; use tracing::{debug, error, warn}; -use crate::{services, PduEvent, service::pdu::{gen_event_id_canonical_json, PduBuilder}, Error, api::{server_server}, utils, Ruma}; +use crate::{Result, services, PduEvent, service::pdu::{gen_event_id_canonical_json, PduBuilder}, Error, api::{server_server, client_server}, utils, Ruma}; use super::get_alias_helper; @@ -48,6 +48,7 @@ pub async fn join_room_by_id_route( let mut servers = Vec::new(); // There is no body.server_name for /roomId/join servers.extend( services().rooms + .state_cache .invite_state(sender_user, &body.room_id)? .unwrap_or_default() .iter() @@ -88,6 +89,7 @@ pub async fn join_room_by_id_or_alias_route( let mut servers = body.server_name.clone(); servers.extend( services().rooms + .state_cache .invite_state(sender_user, &room_id)? .unwrap_or_default() .iter() @@ -131,7 +133,7 @@ pub async fn leave_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.leave_room(sender_user, &body.room_id).await?; + leave_room(sender_user, &body.room_id).await?; Ok(leave_room::v3::Response::new()) } @@ -162,6 +164,7 @@ pub async fn kick_user_route( let mut event: RoomMemberEventContent = serde_json::from_str( services().rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -189,7 +192,7 @@ pub async fn kick_user_route( ); let state_lock = mutex_state.lock().await; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -219,6 +222,7 @@ pub async fn ban_user_route( let event = services() .rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -255,7 +259,7 @@ pub async fn ban_user_route( ); let state_lock = mutex_state.lock().await; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -283,6 +287,7 @@ pub async fn unban_user_route( let mut event: RoomMemberEventContent = serde_json::from_str( services().rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -309,7 +314,7 @@ pub async fn unban_user_route( ); let state_lock = mutex_state.lock().await; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -340,7 +345,7 @@ pub async fn forget_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.forget(&body.room_id, sender_user)?; + services().rooms.state_cache.forget(&body.room_id, sender_user)?; Ok(forget_room::v3::Response::new()) } @@ -356,6 +361,7 @@ pub async fn joined_rooms_route( Ok(joined_rooms::v3::Response { joined_rooms: services() .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect(), @@ -373,7 +379,7 @@ pub async fn get_member_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -383,6 +389,7 @@ pub async fn get_member_events_route( Ok(get_member_events::v3::Response { chunk: services() .rooms + .state_accessor .room_state_full(&body.room_id) .await? .iter() @@ -403,7 +410,7 @@ pub async fn joined_members_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -411,7 +418,7 @@ pub async fn joined_members_route( } let mut joined = BTreeMap::new(); - for user_id in services().rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { + for user_id in services().rooms.state_cache.room_members(&body.room_id).filter_map(|r| r.ok()) { let display_name = services().users.displayname(&user_id)?; let avatar_url = services().users.avatar_url(&user_id)?; @@ -446,7 +453,7 @@ async fn join_room_by_id_helper( let state_lock = mutex_state.lock().await; // Ask a remote server if we don't have this room - if !services().rooms.exists(room_id)? { + if !services().rooms.metadata.exists(room_id)? { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); @@ -553,7 +560,7 @@ async fn join_room_by_id_helper( ) .await?; - services().rooms.get_or_create_shortroomid(room_id, &services().globals)?; + services().rooms.short.get_or_create_shortroomid(room_id)?; let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; @@ -586,7 +593,7 @@ async fn join_room_by_id_helper( services().rooms.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = services().rooms.get_or_create_shortstatekey( + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( &pdu.kind.to_string().into(), state_key, )?; @@ -594,7 +601,7 @@ async fn join_room_by_id_helper( } } - let incoming_shortstatekey = services().rooms.get_or_create_shortstatekey( + let incoming_shortstatekey = services().rooms.short.get_or_create_shortstatekey( &parsed_pdu.kind.to_string().into(), parsed_pdu .state_key @@ -606,6 +613,7 @@ async fn join_room_by_id_helper( let create_shortstatekey = services() .rooms + .short .get_shortstatekey(&StateEventType::RoomCreate, "")? .expect("Room exists"); @@ -613,7 +621,7 @@ async fn join_room_by_id_helper( return Err(Error::BadServerResponse("State contained no create event.")); } - services().rooms.force_state( + services().rooms.state.force_state( room_id, state .into_iter() @@ -780,7 +788,7 @@ pub(crate) async fn invite_helper<'a>( redacts: None, }, sender_user, room_id, &state_lock); - let invite_room_state = services().rooms.calculate_invite_state(&pdu)?; + let invite_room_state = services().rooms.state.calculate_invite_state(&pdu)?; drop(state_lock); diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 14affc6..f8d0602 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -87,7 +87,7 @@ pub async fn create_room_route( Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.") })?; - if services().rooms.id_from_alias(&alias)?.is_some() { + if services().rooms.alias.resolve_local_alias(&alias)?.is_some() { Err(Error::BadRequest( ErrorKind::RoomInUse, "Room alias already exists.", diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 4e8d594..b2dfe2a 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -246,7 +246,7 @@ async fn send_state_event_for_key_helper( if alias.server_name() != services().globals.server_name() || services() .rooms - .id_from_alias(&alias)? + .alias.resolve_local_alias(&alias)? .filter(|room| room == room_id) // Make sure it's the right room .is_none() { diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 776777d..bacc1ac 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1842,7 +1842,7 @@ pub async fn get_room_information_route( let room_id = services() .rooms - .id_from_alias(&body.room_alias)? + .alias.resolve_local_alias(&body.room_alias)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Room alias not found.", diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 70ad9f2..49c9170 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,17 +1,14 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::error::ErrorKind, - events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - RoomId, UserId, -}; -use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; -impl AccountData { +use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw, RoomId}; +use serde::{Serialize, de::DeserializeOwned}; + +use crate::{Result, database::KeyValueDatabase, service, Error, utils, services}; + +impl service::account_data::Data for KeyValueDatabase { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] - pub fn update( + fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, @@ -63,7 +60,7 @@ impl AccountData { /// Searches the account data for a specific kind. #[tracing::instrument(skip(self, room_id, user_id, kind))] - pub fn get( + fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, @@ -96,7 +93,7 @@ impl AccountData { /// Returns all changes to the account data that happened after `since`. #[tracing::instrument(skip(self, room_id, user_id, since))] - pub fn changes_since( + fn changes_since( &self, room_id: Option<&RoomId>, user_id: &UserId, diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index eae2cfb..edb027e 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,4 +1,4 @@ -use crate::{database::KeyValueDatabase, service, utils, Error}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::appservice::Data for KeyValueDatabase { /// Registers an appservice and returns the ID to the caller @@ -54,7 +54,7 @@ impl service::appservice::Data for KeyValueDatabase { ) } - fn iter_ids(&self) -> Result> + '_> { + fn iter_ids(&self) -> Result>>> { Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { utils::string_from_bytes(&id) .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 2b47e5b..81e6ee1 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,108 +1,13 @@ -mod data; -pub use data::Data; +use ruma::signatures::Ed25519KeyPair; -use crate::service::*; +use crate::{Result, service, database::KeyValueDatabase, Error, utils}; -use crate::{database::Config, server_server::FedDest, utils, Error, Result}; -use ruma::{ - api::{ - client::sync::sync_events, - federation::discovery::{ServerSigningKeys, VerifyKey}, - }, - DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, - ServerSigningKeyId, UserId, -}; -use std::{ - collections::{BTreeMap, HashMap}, - fs, - future::Future, - net::{IpAddr, SocketAddr}, - path::PathBuf, - sync::{Arc, Mutex, RwLock}, - time::{Duration, Instant}, -}; -use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; -use tracing::error; -use trust_dns_resolver::TokioAsyncResolver; - -use super::abstraction::Tree; - -pub const COUNTER: &[u8] = b"c"; - -type WellKnownMap = HashMap, (FedDest, String)>; -type TlsNameMap = HashMap, u16)>; -type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries -type SyncHandle = ( - Option, // since - Receiver>>, // rx -); - -pub struct Service { - db: D, - - pub actual_destination_cache: Arc>, // actual_destination, host - pub tls_name_override: Arc>, - pub config: Config, - keypair: Arc, - dns_resolver: TokioAsyncResolver, - jwt_decoding_key: Option>, - federation_client: reqwest::Client, - default_client: reqwest::Client, - pub stable_room_versions: Vec, - pub unstable_room_versions: Vec, - pub bad_event_ratelimiter: Arc, RateLimitState>>>, - pub bad_signature_ratelimiter: Arc, RateLimitState>>>, - pub servername_ratelimiter: Arc, Arc>>>, - pub sync_receivers: RwLock, Box), SyncHandle>>, - pub roomid_mutex_insert: RwLock, Arc>>>, - pub roomid_mutex_state: RwLock, Arc>>>, - pub roomid_mutex_federation: RwLock, Arc>>>, // this lock will be held longer - pub roomid_federationhandletime: RwLock, (Box, Instant)>>, - pub stateres_mutex: Arc>, - pub rotate: RotationHandler, -} - -/// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. -/// -/// This is utilized to have sync workers return early and release read locks on the database. -pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>); - -impl RotationHandler { - pub fn new() -> Self { - let (s, r) = broadcast::channel(1); - Self(s, r) - } - - pub fn watch(&self) -> impl Future { - let mut r = self.0.subscribe(); - - async move { - let _ = r.recv().await; - } - } - - pub fn fire(&self) { - let _ = self.0.send(()); - } -} - -impl Default for RotationHandler { - fn default() -> Self { - Self::new() - } -} - - -impl Service<_> { - pub fn load( - globals: Arc, - server_signingkeys: Arc, - config: Config, - ) -> Result { - let keypair_bytes = globals.get(b"keypair")?.map_or_else( +impl service::globals::Data for KeyValueDatabase { + fn load_keypair(&self) -> Result { + let keypair_bytes = self.globals.get(b"keypair")?.map_or_else( || { let keypair = utils::generate_keypair(); - globals.insert(b"keypair", &keypair)?; + self.globals.insert(b"keypair", &keypair)?; Ok::<_, Error>(keypair) }, |s| Ok(s.to_vec()), @@ -125,302 +30,11 @@ impl Service<_> { .map(|key| (version, key)) }) .and_then(|(version, key)| { - ruma::signatures::Ed25519KeyPair::from_der(key, version) + Ed25519KeyPair::from_der(key, version) .map_err(|_| Error::bad_database("Private or public keys are invalid.")) }); - - let keypair = match keypair { - Ok(k) => k, - Err(e) => { - error!("Keypair invalid. Deleting..."); - globals.remove(b"keypair")?; - return Err(e); - } - }; - - let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new())); - - let jwt_decoding_key = config - .jwt_secret - .as_ref() - .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); - - let default_client = reqwest_client_builder(&config)?.build()?; - let name_override = Arc::clone(&tls_name_override); - let federation_client = reqwest_client_builder(&config)? - .resolve_fn(move |domain| { - let read_guard = name_override.read().unwrap(); - let (override_name, port) = read_guard.get(&domain)?; - let first_name = override_name.get(0)?; - Some(SocketAddr::new(*first_name, *port)) - }) - .build()?; - - // Supported and stable room versions - let stable_room_versions = vec![ - RoomVersionId::V6, - RoomVersionId::V7, - RoomVersionId::V8, - RoomVersionId::V9, - ]; - // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; - - let mut s = Self { - globals, - config, - keypair: Arc::new(keypair), - dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|e| { - error!( - "Failed to set up trust dns resolver with system config: {}", - e - ); - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, - actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), - tls_name_override, - federation_client, - default_client, - server_signingkeys, - jwt_decoding_key, - stable_room_versions, - unstable_room_versions, - bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - roomid_mutex_state: RwLock::new(HashMap::new()), - roomid_mutex_insert: RwLock::new(HashMap::new()), - roomid_mutex_federation: RwLock::new(HashMap::new()), - roomid_federationhandletime: RwLock::new(HashMap::new()), - stateres_mutex: Arc::new(Mutex::new(())), - sync_receivers: RwLock::new(HashMap::new()), - rotate: RotationHandler::new(), - }; - - fs::create_dir_all(s.get_media_folder())?; - - if !s - .supported_room_versions() - .contains(&s.config.default_room_version) - { - error!("Room version in config isn't supported, falling back to Version 6"); - s.config.default_room_version = RoomVersionId::V6; - }; - - Ok(s) } - - /// Returns this server's keypair. - pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { - &self.keypair - } - - /// Returns a reqwest client which can be used to send requests - pub fn default_client(&self) -> reqwest::Client { - // Client is cheap to clone (Arc wrapper) and avoids lifetime issues - self.default_client.clone() - } - - /// Returns a client used for resolving .well-knowns - pub fn federation_client(&self) -> reqwest::Client { - // Client is cheap to clone (Arc wrapper) and avoids lifetime issues - self.federation_client.clone() - } - - #[tracing::instrument(skip(self))] - pub fn next_count(&self) -> Result { - utils::u64_from_bytes(&self.globals.increment(COUNTER)?) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) - } - - #[tracing::instrument(skip(self))] - pub fn current_count(&self) -> Result { - self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) - }) - } - - pub fn server_name(&self) -> &ServerName { - self.config.server_name.as_ref() - } - - pub fn max_request_size(&self) -> u32 { - self.config.max_request_size - } - - pub fn allow_registration(&self) -> bool { - self.config.allow_registration - } - - pub fn allow_encryption(&self) -> bool { - self.config.allow_encryption - } - - pub fn allow_federation(&self) -> bool { - self.config.allow_federation - } - - pub fn allow_room_creation(&self) -> bool { - self.config.allow_room_creation - } - - pub fn allow_unstable_room_versions(&self) -> bool { - self.config.allow_unstable_room_versions - } - - pub fn default_room_version(&self) -> RoomVersionId { - self.config.default_room_version.clone() - } - - pub fn trusted_servers(&self) -> &[Box] { - &self.config.trusted_servers - } - - pub fn dns_resolver(&self) -> &TokioAsyncResolver { - &self.dns_resolver - } - - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { - self.jwt_decoding_key.as_ref() - } - - pub fn turn_password(&self) -> &String { - &self.config.turn_password - } - - pub fn turn_ttl(&self) -> u64 { - self.config.turn_ttl - } - - pub fn turn_uris(&self) -> &[String] { - &self.config.turn_uris - } - - pub fn turn_username(&self) -> &String { - &self.config.turn_username - } - - pub fn turn_secret(&self) -> &String { - &self.config.turn_secret - } - - pub fn emergency_password(&self) -> &Option { - &self.config.emergency_password - } - - pub fn supported_room_versions(&self) -> Vec { - let mut room_versions: Vec = vec![]; - room_versions.extend(self.stable_room_versions.clone()); - if self.allow_unstable_room_versions() { - room_versions.extend(self.unstable_room_versions.clone()); - }; - room_versions - } - - /// TODO: the key valid until timestamp is only honored in room version > 4 - /// Remove the outdated keys and insert the new ones. - /// - /// This doesn't actually check that the keys provided are newer than the old set. - pub fn add_signing_key( - &self, - origin: &ServerName, - new_keys: ServerSigningKeys, - ) -> Result, VerifyKey>> { - // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; - - let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(&keys).ok()) - .unwrap_or_else(|| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); - - let ServerSigningKeys { - verify_keys, - old_verify_keys, - .. - } = new_keys; - - keys.verify_keys.extend(verify_keys.into_iter()); - keys.old_verify_keys.extend(old_verify_keys.into_iter()); - - self.server_signingkeys.insert( - origin.as_bytes(), - &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), - )?; - - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - - Ok(tree) - } - - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. - pub fn signing_keys_for( - &self, - origin: &ServerName, - ) -> Result, VerifyKey>> { - let signingkeys = self - .server_signingkeys - .get(origin.as_bytes())? - .and_then(|bytes| serde_json::from_slice(&bytes).ok()) - .map(|keys: ServerSigningKeys| { - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - tree - }) - .unwrap_or_else(BTreeMap::new); - - Ok(signingkeys) - } - - pub fn database_version(&self) -> Result { - self.globals.get(b"version")?.map_or(Ok(0), |version| { - utils::u64_from_bytes(&version) - .map_err(|_| Error::bad_database("Database version id is invalid.")) - }) - } - - pub fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.globals - .insert(b"version", &new_version.to_be_bytes())?; - Ok(()) - } - - pub fn get_media_folder(&self) -> PathBuf { - let mut r = PathBuf::new(); - r.push(self.config.database_path.clone()); - r.push("media"); - r - } - - pub fn get_media_file(&self, key: &[u8]) -> PathBuf { - let mut r = PathBuf::new(); - r.push(self.config.database_path.clone()); - r.push("media"); - r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); - r + fn remove_keypair(&self) -> Result<()> { + self.globals.remove(b"keypair")? } } - -fn reqwest_client_builder(config: &Config) -> Result { - let mut reqwest_client_builder = reqwest::Client::builder() - .connect_timeout(Duration::from_secs(30)) - .timeout(Duration::from_secs(60 * 3)); - - if let Some(proxy) = config.proxy.to_proxy()? { - reqwest_client_builder = reqwest_client_builder.proxy(proxy); - } - - Ok(reqwest_client_builder) -} diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index be1d6b1..8171451 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -1,16 +1,11 @@ -use crate::{utils, Error, Result, services}; -use ruma::{ - api::client::{ - backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - error::ErrorKind, - }, - serde::Raw, - RoomId, UserId, -}; -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; -impl KeyBackups { - pub fn create_backup( +use ruma::{UserId, serde::Raw, api::client::{backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind}, RoomId}; + +use crate::{Result, service, database::KeyValueDatabase, services, Error, utils}; + +impl service::key_backups::Data for KeyValueDatabase { + fn create_backup( &self, user_id: &UserId, backup_metadata: &Raw, @@ -30,7 +25,7 @@ impl KeyBackups { Ok(version) } - pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { + fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -47,7 +42,7 @@ impl KeyBackups { Ok(()) } - pub fn update_backup( + fn update_backup( &self, user_id: &UserId, version: &str, @@ -71,7 +66,7 @@ impl KeyBackups { Ok(version.to_owned()) } - pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { + fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); let mut last_possible_key = prefix.clone(); @@ -92,7 +87,7 @@ impl KeyBackups { .transpose() } - pub fn get_latest_backup( + fn get_latest_backup( &self, user_id: &UserId, ) -> Result)>> { @@ -123,7 +118,7 @@ impl KeyBackups { .transpose() } - pub fn get_backup( + fn get_backup( &self, user_id: &UserId, version: &str, @@ -140,7 +135,7 @@ impl KeyBackups { }) } - pub fn add_key( + fn add_key( &self, user_id: &UserId, version: &str, @@ -173,7 +168,7 @@ impl KeyBackups { Ok(()) } - pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { + fn count_keys(&self, user_id: &UserId, version: &str) -> Result { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -181,7 +176,7 @@ impl KeyBackups { Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) } - pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { + fn get_etag(&self, user_id: &UserId, version: &str) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -196,7 +191,7 @@ impl KeyBackups { .to_string()) } - pub fn get_all( + fn get_all( &self, user_id: &UserId, version: &str, @@ -252,7 +247,7 @@ impl KeyBackups { Ok(rooms) } - pub fn get_room( + fn get_room( &self, user_id: &UserId, version: &str, @@ -289,7 +284,7 @@ impl KeyBackups { .collect()) } - pub fn get_session( + fn get_session( &self, user_id: &UserId, version: &str, @@ -314,7 +309,7 @@ impl KeyBackups { .transpose() } - pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { + fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -327,7 +322,7 @@ impl KeyBackups { Ok(()) } - pub fn delete_room_keys( + fn delete_room_keys( &self, user_id: &UserId, version: &str, @@ -347,7 +342,7 @@ impl KeyBackups { Ok(()) } - pub fn delete_room_key( + fn delete_room_key( &self, user_id: &UserId, version: &str, diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index 1bdf6d4..90a5c59 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,72 +1,7 @@ -use image::{imageops::FilterType, GenericImageView}; +use crate::{database::KeyValueDatabase, service, Error, utils, Result}; -use super::abstraction::Tree; -use crate::{utils, Error, Result}; -use std::{mem, sync::Arc}; -use tokio::{ - fs::File, - io::{AsyncReadExt, AsyncWriteExt}, -}; - -pub struct FileMeta { - pub content_disposition: Option, - pub content_type: Option, - pub file: Vec, -} - -pub struct Media { - pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType -} - -impl Media { - /// Uploads a file. - pub async fn create( - &self, - mxc: String, - globals: &Globals, - content_disposition: &Option<&str>, - content_type: &Option<&str>, - file: &[u8], - ) -> Result<()> { - let mut key = mxc.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - key.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - key.push(0xff); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); - key.push(0xff); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); - - let path = globals.get_media_file(&key); - let mut f = File::create(path).await?; - f.write_all(file).await?; - - self.mediaid_file.insert(&key, &[])?; - Ok(()) - } - - /// Uploads or replaces a file thumbnail. - #[allow(clippy::too_many_arguments)] - pub async fn upload_thumbnail( - &self, - mxc: String, - globals: &Globals, - content_disposition: &Option, - content_type: &Option, - width: u32, - height: u32, - file: &[u8], - ) -> Result<()> { +impl service::media::Data for KeyValueDatabase { + fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: &Option<&str>, content_type: &Option<&str>) -> Result> { let mut key = mxc.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&width.to_be_bytes()); @@ -86,272 +21,46 @@ impl Media { .unwrap_or_default(), ); - let path = globals.get_media_file(&key); - let mut f = File::create(path).await?; - f.write_all(file).await?; - self.mediaid_file.insert(&key, &[])?; - Ok(()) + Ok(key) } - /// Downloads a file. - pub async fn get(&self, globals: &Globals, mxc: &str) -> Result> { + fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail prefix.push(0xff); - let first = self.mediaid_file.scan_prefix(prefix).next(); - if let Some((key, _)) = first { - let path = globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); + let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::NotFound)?; - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) + let mut parts = key.rsplit(|&b| b == 0xff); + + let content_type = parts + .next() + .map(|bytes| { + utils::string_from_bytes(bytes).map_err(|_| { + Error::bad_database("Content type in mediaid_file is invalid unicode.") }) - .transpose()?; + }) + .transpose()?; - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; + let content_disposition_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file, - })) + let content_disposition = if content_disposition_bytes.is_empty() { + None } else { - Ok(None) - } - } - - /// Returns width, height of the thumbnail and whether it should be cropped. Returns None when - /// the server should send the original file. - pub fn thumbnail_properties(&self, width: u32, height: u32) -> Option<(u32, u32, bool)> { - match (width, height) { - (0..=32, 0..=32) => Some((32, 32, true)), - (0..=96, 0..=96) => Some((96, 96, true)), - (0..=320, 0..=240) => Some((320, 240, false)), - (0..=640, 0..=480) => Some((640, 480, false)), - (0..=800, 0..=600) => Some((800, 600, false)), - _ => None, - } - } - - /// Downloads a file's thumbnail. - /// - /// Here's an example on how it works: - /// - /// - Client requests an image with width=567, height=567 - /// - Server rounds that up to (800, 600), so it doesn't have to save too many thumbnails - /// - Server rounds that up again to (958, 600) to fix the aspect ratio (only for width,height>96) - /// - Server creates the thumbnail and sends it to the user - /// - /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. - pub async fn get_thumbnail( - &self, - mxc: &str, - globals: &Globals, - width: u32, - height: u32, - ) -> Result> { - let (width, height, crop) = self - .thumbnail_properties(width, height) - .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - - let mut main_prefix = mxc.as_bytes().to_vec(); - main_prefix.push(0xff); - - let mut thumbnail_prefix = main_prefix.clone(); - thumbnail_prefix.extend_from_slice(&width.to_be_bytes()); - thumbnail_prefix.extend_from_slice(&height.to_be_bytes()); - thumbnail_prefix.push(0xff); - - let mut original_prefix = main_prefix; - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - original_prefix.push(0xff); - - let first_thumbnailprefix = self.mediaid_file.scan_prefix(thumbnail_prefix).next(); - let first_originalprefix = self.mediaid_file.scan_prefix(original_prefix).next(); - if let Some((key, _)) = first_thumbnailprefix { - // Using saved thumbnail - let path = globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database("Content Disposition in db is invalid.") - })?, - ) - }; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })) - } else if let Some((key, _)) = first_originalprefix { - // Generate a thumbnail - let path = globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; - - if let Ok(image) = image::load_from_memory(&file) { - let original_width = image.width(); - let original_height = image.height(); - if width > original_width || height > original_height { - return Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })); - } - - let thumbnail = if crop { - image.resize_to_fill(width, height, FilterType::CatmullRom) - } else { - let (exact_width, exact_height) = { - // Copied from image::dynimage::resize_dimensions - let ratio = u64::from(original_width) * u64::from(height); - let nratio = u64::from(width) * u64::from(original_height); - - let use_width = nratio <= ratio; - let intermediate = if use_width { - u64::from(original_height) * u64::from(width) - / u64::from(original_width) - } else { - u64::from(original_width) * u64::from(height) - / u64::from(original_height) - }; - if use_width { - if intermediate <= u64::from(::std::u32::MAX) { - (width, intermediate as u32) - } else { - ( - (u64::from(width) * u64::from(::std::u32::MAX) / intermediate) - as u32, - ::std::u32::MAX, - ) - } - } else if intermediate <= u64::from(::std::u32::MAX) { - (intermediate as u32, height) - } else { - ( - ::std::u32::MAX, - (u64::from(height) * u64::from(::std::u32::MAX) / intermediate) - as u32, - ) - } - }; - - image.thumbnail_exact(exact_width, exact_height) - }; - - let mut thumbnail_bytes = Vec::new(); - thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; - - // Save thumbnail in database so we don't have to generate it again next time - let mut thumbnail_key = key.to_vec(); - let width_index = thumbnail_key - .iter() - .position(|&b| b == 0xff) - .ok_or_else(|| Error::bad_database("Media in db is invalid."))? - + 1; - let mut widthheight = width.to_be_bytes().to_vec(); - widthheight.extend_from_slice(&height.to_be_bytes()); - - thumbnail_key.splice( - width_index..width_index + 2 * mem::size_of::(), - widthheight, - ); - - let path = globals.get_media_file(&thumbnail_key); - let mut f = File::create(path).await?; - f.write_all(&thumbnail_bytes).await?; - - self.mediaid_file.insert(&thumbnail_key, &[])?; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file: thumbnail_bytes.to_vec(), - })) - } else { - // Couldn't parse file to generate thumbnail, send original - Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })) - } - } else { - Ok(None) - } + Some( + utils::string_from_bytes(content_disposition_bytes).map_err(|_| { + Error::bad_database( + "Content Disposition in mediaid_file is invalid unicode.", + ) + })?, + ) + }; + Ok((content_disposition, content_type, key)) } } diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs index 189571f..efb8550 100644 --- a/src/database/key_value/mod.rs +++ b/src/database/key_value/mod.rs @@ -1,9 +1,9 @@ -//mod account_data; +mod account_data; //mod admin; mod appservice; -//mod globals; -//mod key_backups; -//mod media; +mod globals; +mod key_backups; +mod media; //mod pdu; mod pusher; mod rooms; diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index b77170d..35c8463 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,6 +1,6 @@ use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; -use crate::{service, database::KeyValueDatabase, Error}; +use crate::{service, database::KeyValueDatabase, Error, Result}; impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { @@ -51,7 +51,7 @@ impl service::pusher::Data for KeyValueDatabase { fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, - ) -> impl Iterator> + 'a { + ) -> Box>> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index a9236a7..c762def 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,12 +1,12 @@ use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; -use crate::{service, database::KeyValueDatabase, utils, Error, services}; +use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; impl service::rooms::alias::Data for KeyValueDatabase { fn set_alias( &self, alias: &RoomAliasId, - room_id: Option<&RoomId> + room_id: &RoomId ) -> Result<()> { self.alias_roomid .insert(alias.alias().as_bytes(), room_id.as_bytes())?; @@ -41,7 +41,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { fn resolve_local_alias( &self, alias: &RoomAliasId - ) -> Result<()> { + ) -> Result>> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { @@ -56,7 +56,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { fn local_aliases_for_room( &self, room_id: &RoomId, - ) -> Result<()> { + ) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 57dbb14..585d562 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -1,5 +1,9 @@ -impl service::room::auth_chain::Data for KeyValueDatabase { - fn get_cached_eventid_authchain<'a>() -> Result> { +use std::{collections::HashSet, mem::size_of}; + +use crate::{service, database::KeyValueDatabase, Result, utils}; + +impl service::rooms::auth_chain::Data for KeyValueDatabase { + fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result> { self.shorteventid_authchain .get(&shorteventid.to_be_bytes())? .map(|chain| { @@ -12,8 +16,8 @@ impl service::room::auth_chain::Data for KeyValueDatabase { }) } - fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result<()> { - shorteventid_authchain.insert( + fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()> { + self.shorteventid_authchain.insert( &shorteventid.to_be_bytes(), &auth_chain .iter() diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 44a580c..c48afa9 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils, Error}; +use crate::{service, database::KeyValueDatabase, utils, Error, Result}; impl service::rooms::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { @@ -15,7 +15,7 @@ impl service::rooms::directory::Data for KeyValueDatabase { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - fn public_rooms(&self) -> impl Iterator>> + '_ { + fn public_rooms(&self) -> Box>>> { self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs index 9ffd33d..b5007f8 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus/mod.rs @@ -1,3 +1,7 @@ mod presence; mod typing; mod read_receipt; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::edus::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 9f3977d..fbbbff5 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; -use crate::{service, database::KeyValueDatabase, utils, Error, services}; +use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( @@ -56,8 +56,8 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { fn get_presence_event( &self, - user_id: &UserId, room_id: &RoomId, + user_id: &UserId, count: u64, ) -> Result> { let mut presence_id = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 68aea16..42d250f 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -2,7 +2,7 @@ use std::mem; use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; -use crate::{database::KeyValueDatabase, service, utils, Error, services}; +use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( @@ -50,13 +50,13 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { &'a self, room_id: &RoomId, since: u64, - ) -> impl Iterator< + ) -> Box, u64, Raw, )>, - > + 'a { + >> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let prefix2 = prefix.clone(); diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 905bffc..b7d3596 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -2,7 +2,7 @@ use std::collections::HashSet; use ruma::{UserId, RoomId}; -use crate::{database::KeyValueDatabase, service, utils, Error, services}; +use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; impl service::rooms::edus::typing::Data for KeyValueDatabase { fn typing_add( @@ -79,7 +79,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { fn typings_all( &self, room_id: &RoomId, - ) -> Result> { + ) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index c230cbf..aaf14dd 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,6 +1,6 @@ use ruma::{UserId, DeviceId, RoomId}; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, Result}; impl service::rooms::lazy_loading::Data for KeyValueDatabase { fn lazy_load_was_sent_before( diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index b4cba2c..0509cbb 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, Result}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index adb810b..406943e 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -1,16 +1,20 @@ mod alias; +mod auth_chain; mod directory; mod edus; -//mod event_handler; mod lazy_load; mod metadata; mod outlier; mod pdu_metadata; mod search; -//mod short; +mod short; mod state; mod state_accessor; mod state_cache; mod state_compressor; mod timeline; mod user; + +use crate::{database::KeyValueDatabase, service}; + +impl service::rooms::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index 08299a0..aa97544 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,6 +1,6 @@ use ruma::{EventId, signatures::CanonicalJsonObject}; -use crate::{service, database::KeyValueDatabase, PduEvent, Error}; +use crate::{service, database::KeyValueDatabase, PduEvent, Error, Result}; impl service::rooms::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index 602f3f6..f3ac414 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use ruma::{RoomId, EventId}; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, Result}; impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 44663ff..15937f6 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -2,10 +2,10 @@ use std::mem::size_of; use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils}; +use crate::{service, database::KeyValueDatabase, utils, Result}; impl service::rooms::search::Data for KeyValueDatabase { - fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()> { + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) @@ -26,7 +26,7 @@ impl service::rooms::search::Data for KeyValueDatabase { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result> + 'a, Vec)>> { + ) -> Result>>, Vec)>> { let prefix = self .get_shortroomid(room_id)? .expect("room exists") diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs new file mode 100644 index 0000000..9129638 --- /dev/null +++ b/src/database/key_value/rooms/short.rs @@ -0,0 +1,4 @@ +use crate::{database::KeyValueDatabase, service}; + +impl service::rooms::short::Data for KeyValueDatabase { +} diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 192dbb8..405939d 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use std::{sync::MutexGuard, collections::HashSet}; use std::fmt::Debug; -use crate::{service, database::KeyValueDatabase, utils, Error}; +use crate::{service, database::KeyValueDatabase, utils, Error, Result}; impl service::rooms::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { @@ -24,7 +24,7 @@ impl service::rooms::state::Data for KeyValueDatabase { Ok(()) } - fn set_event_state(&self, shorteventid: Vec, shortstatehash: Vec) -> Result<()> { + fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()> { self.shorteventid_shortstatehash .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index ea15afc..037b98f 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,6 +1,6 @@ use std::{collections::{BTreeMap, HashMap}, sync::Arc}; -use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils}; +use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result}; use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 567dc80..5f05485 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,6 +1,6 @@ -use ruma::{UserId, RoomId}; +use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw}; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, services, Result}; impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -9,4 +9,70 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { userroom_id.extend_from_slice(room_id.as_bytes()); self.roomuseroncejoinedids.insert(&userroom_id, &[]) } + + fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_joined.insert(&userroom_id, &[])?; + self.roomuserid_joined.insert(&roomuser_id, &[])?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_invitestate.insert( + &userroom_id, + &serde_json::to_vec(&last_state.unwrap_or_default()) + .expect("state to bytes always works"), + )?; + self.roomuserid_invitecount + .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_leftstate.insert( + &userroom_id, + &serde_json::to_vec(&Vec::>::new()).unwrap(), + )?; // TODO + self.roomuserid_leftcount + .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; + + Ok(()) + } } diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 09e3566..23a7122 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, mem::size_of}; -use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils}; +use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils, Result}; impl service::rooms::state_compressor::Data for KeyValueDatabase { fn get_statediff(&self, shortstatehash: u64) -> Result { diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index cf93df1..c42509e 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -3,7 +3,7 @@ use std::{collections::hash_map, mem::size_of, sync::Arc}; use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject}; use tracing::error; -use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent}; +use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result}; impl service::rooms::timeline::Data for KeyValueDatabase { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { @@ -190,7 +190,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>> + 'a> { + ) -> Result, PduEvent)>>>> { let prefix = self .get_shortroomid(room_id)? .expect("room exists") @@ -224,7 +224,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>> + 'a> { + ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id let prefix = self .get_shortroomid(room_id)? @@ -258,7 +258,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>> + 'a> { + ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id let prefix = self .get_shortroomid(room_id)? diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 2fc3b9f..d49bc1d 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,6 +1,6 @@ use ruma::{UserId, RoomId}; -use crate::{service, database::KeyValueDatabase, utils, Error}; +use crate::{service, database::KeyValueDatabase, utils, Error, Result}; impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -78,7 +78,7 @@ impl service::rooms::user::Data for KeyValueDatabase { fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>> + 'a> { + ) -> Result>>>> { let iterators = users.into_iter().map(move |user_id| { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index 6652a62..a63b3c5 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,6 +1,6 @@ use ruma::{UserId, DeviceId, TransactionId}; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, Result}; impl service::transaction_ids::Data for KeyValueDatabase { fn add_txnid( diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index b1960bd..cf242de 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,8 +1,6 @@ -use std::io::ErrorKind; +use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}}; -use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::uiaa::UiaaInfo}; - -use crate::{database::KeyValueDatabase, service, Error}; +use crate::{database::KeyValueDatabase, service, Error, Result}; impl service::uiaa::Data for KeyValueDatabase { fn set_uiaa_request( diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index ea84490..82e3bac 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -3,7 +3,7 @@ use std::{mem::size_of, collections::BTreeMap}; use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; use tracing::warn; -use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services}; +use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services, Result}; impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. @@ -56,7 +56,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all users on this homeserver. - fn iter(&self) -> impl Iterator>> + '_ { + fn iter(&self) -> Box>>> { self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") @@ -270,7 +270,7 @@ impl service::users::Data for KeyValueDatabase { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a { + ) -> Box>>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata @@ -608,7 +608,7 @@ impl service::users::Data for KeyValueDatabase { user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator>> + 'a { + ) -> Box>>> { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -878,7 +878,7 @@ impl service::users::Data for KeyValueDatabase { fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator> + 'a { + ) -> Box>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); diff --git a/src/database/mod.rs b/src/database/mod.rs index 12758af..4ea619a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,7 +1,7 @@ pub mod abstraction; pub mod key_value; -use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms, account_data, media, key_backups, transaction_ids, sending, admin::{self, create_admin_room}, appservice, pusher}}; +use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}}; use abstraction::KeyValueDatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -253,7 +253,7 @@ impl KeyValueDatabase { let (admin_sender, admin_receiver) = mpsc::unbounded_channel(); let (sending_sender, sending_receiver) = mpsc::unbounded_channel(); - let db = Arc::new(TokioRwLock::from(Self { + let db = Self { _db: builder.clone(), userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, @@ -345,10 +345,9 @@ impl KeyValueDatabase { senderkey_pusher: builder.open_tree("senderkey_pusher")?, global: builder.open_tree("global")?, server_signingkeys: builder.open_tree("server_signingkeys")?, - })); + }; // TODO: do this after constructing the db - let guard = db.read().await; // Matrix resource ownership is based on the server name; changing it // requires recreating the database from scratch. diff --git a/src/lib.rs b/src/lib.rs index 0d058df..c6e6569 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,7 +13,7 @@ mod service; pub mod api; mod utils; -use std::cell::Cell; +use std::{cell::Cell, sync::RwLock}; pub use config::Config; pub use utils::error::{Error, Result}; @@ -22,13 +22,13 @@ pub use api::ruma_wrapper::{Ruma, RumaResponse}; use crate::database::KeyValueDatabase; -pub static SERVICES: Cell> = Cell::new(None); +pub static SERVICES: RwLock> = RwLock::new(None); enum ServicesEnum { Rocksdb(Services) } -pub fn services() -> Services { - SERVICES.get().unwrap() +pub fn services() -> Services { + SERVICES.read().unwrap() } diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs index 70ad9f2..0f8e0bf 100644 --- a/src/service/account_data/data.rs +++ b/src/service/account_data/data.rs @@ -1,145 +1,32 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::error::ErrorKind, - events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - RoomId, UserId, -}; -use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; -impl AccountData { +use ruma::{UserId, RoomId, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw}; +use serde::{Serialize, de::DeserializeOwned}; +use crate::Result; + +pub trait Data { /// Places one event in the account data of the user and removes the previous entry. - #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] - pub fn update( + fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, data: &T, - ) -> Result<()> { - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); - - let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); - roomuserdataid.push(0xff); - roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); - - let mut key = prefix; - key.extend_from_slice(event_type.to_string().as_bytes()); - - let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling - if json.get("type").is_none() || json.get("content").is_none() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Account data doesn't have all required fields.", - )); - } - - self.roomuserdataid_accountdata.insert( - &roomuserdataid, - &serde_json::to_vec(&json).expect("to_vec always works on json values"), - )?; - - let prev = self.roomusertype_roomuserdataid.get(&key)?; - - self.roomusertype_roomuserdataid - .insert(&key, &roomuserdataid)?; - - // Remove old entry - if let Some(prev) = prev { - self.roomuserdataid_accountdata.remove(&prev)?; - } - - Ok(()) - } + ) -> Result<()>; /// Searches the account data for a specific kind. - #[tracing::instrument(skip(self, room_id, user_id, kind))] - pub fn get( + fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, - ) -> Result> { - let mut key = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(kind.to_string().as_bytes()); - - self.roomusertype_roomuserdataid - .get(&key)? - .and_then(|roomuserdataid| { - self.roomuserdataid_accountdata - .get(&roomuserdataid) - .transpose() - }) - .transpose()? - .map(|data| { - serde_json::from_slice(&data) - .map_err(|_| Error::bad_database("could not deserialize")) - }) - .transpose() - } + ) -> Result>; /// Returns all changes to the account data that happened after `since`. - #[tracing::instrument(skip(self, room_id, user_id, since))] - pub fn changes_since( + fn changes_since( &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>> { - let mut userdata = HashMap::new(); - - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); - - // Skip the data that's exactly at since, because we sent that last time - let mut first_possible = prefix.clone(); - first_possible.extend_from_slice(&(since + 1).to_be_bytes()); - - for r in self - .roomuserdataid_accountdata - .iter_from(&first_possible, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(k, v)| { - Ok::<_, Error>(( - RoomAccountDataEventType::try_from( - utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( - || Error::bad_database("RoomUserData ID in db is invalid."), - )?) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - serde_json::from_slice::>(&v).map_err(|_| { - Error::bad_database("Database contains invalid account data.") - })?, - )) - }) - { - let (kind, data) = r?; - userdata.insert(kind, data); - } - - Ok(userdata) - } + ) -> Result>>; } diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 70ad9f2..7a39922 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,14 +1,27 @@ -use crate::{utils, Error, Result}; +mod data; + +pub use data::Data; + use ruma::{ - api::client::error::ErrorKind, + api::client::{ + error::ErrorKind, + }, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, serde::Raw, - RoomId, UserId, + signatures::CanonicalJsonValue, + DeviceId, RoomId, UserId, }; use serde::{de::DeserializeOwned, Serialize}; use std::{collections::HashMap, sync::Arc}; +use tracing::error; -impl AccountData { +use crate::{service::*, services, utils, Error, Result}; + +pub struct Service { + db: D, +} + +impl Service { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] pub fn update( diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index ded0adb..dad4ceb 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -28,165 +28,10 @@ use ruma::{ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; -use crate::{services, Error, api::{server_server, client_server::AUTO_GEN_PASSWORD_LENGTH}, PduEvent, utils::{HtmlEscape, self}}; +use crate::{Result, services, Error, api::{server_server, client_server::AUTO_GEN_PASSWORD_LENGTH}, PduEvent, utils::{HtmlEscape, self}}; use super::pdu::PduBuilder; -#[derive(Debug)] -pub enum AdminRoomEvent { - ProcessMessage(String), - SendMessage(RoomMessageEventContent), -} - -#[derive(Clone)] -pub struct Admin { - pub sender: mpsc::UnboundedSender, -} - -impl Admin { - pub fn start_handler( - &self, - mut receiver: mpsc::UnboundedReceiver, - ) { - tokio::spawn(async move { - // TODO: Use futures when we have long admin commands - //let mut futures = FuturesUnordered::new(); - - let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); - - let conduit_room = services() - .rooms - .id_from_alias( - format!("#admins:{}", services().globals.server_name()) - .as_str() - .try_into() - .expect("#admins:server_name is a valid room alias"), - ) - .expect("Database data for admin room alias must be valid") - .expect("Admin room must exist"); - - let send_message = |message: RoomMessageEventContent, - mutex_lock: &MutexGuard<'_, ()>| { - services() - .rooms - .build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMessage, - content: to_raw_value(&message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - mutex_lock, - ) - .unwrap(); - }; - - loop { - tokio::select! { - Some(event) = receiver.recv() => { - let message_content = match event { - AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(room_message).await - }; - - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .unwrap() - .entry(conduit_room.clone()) - .or_default(), - ); - - let state_lock = mutex_state.lock().await; - - send_message(message_content, &state_lock); - - drop(state_lock); - } - } - } - }); - } - - pub fn process_message(&self, room_message: String) { - self.sender - .send(AdminRoomEvent::ProcessMessage(room_message)) - .unwrap(); - } - - pub fn send_message(&self, message_content: RoomMessageEventContent) { - self.sender - .send(AdminRoomEvent::SendMessage(message_content)) - .unwrap(); - } -} - -// Parse and process a message from the admin room -async fn process_admin_message(room_message: String) -> RoomMessageEventContent { - let mut lines = room_message.lines(); - let command_line = lines.next().expect("each string has at least one line"); - let body: Vec<_> = lines.collect(); - - let admin_command = match parse_admin_command(&command_line) { - Ok(command) => command, - Err(error) => { - let server_name = services().globals.server_name(); - let message = error - .to_string() - .replace("server.name", server_name.as_str()); - let html_message = usage_to_html(&message, server_name); - - return RoomMessageEventContent::text_html(message, html_message); - } - }; - - match process_admin_command(admin_command, body).await { - Ok(reply_message) => reply_message, - Err(error) => { - let markdown_message = format!( - "Encountered an error while handling the command:\n\ - ```\n{}\n```", - error, - ); - let html_message = format!( - "Encountered an error while handling the command:\n\ -
                \n{}\n
                ", - error, - ); - - RoomMessageEventContent::text_html(markdown_message, html_message) - } - } -} - -// Parse chat messages from the admin room into an AdminCommand object -fn parse_admin_command(command_line: &str) -> std::result::Result { - // Note: argv[0] is `@conduit:servername:`, which is treated as the main command - let mut argv: Vec<_> = command_line.split_whitespace().collect(); - - // Replace `help command` with `command --help` - // Clap has a help subcommand, but it omits the long help description. - if argv.len() > 1 && argv[1] == "help" { - argv.remove(1); - argv.push("--help"); - } - - // Backwards compatibility with `register_appservice`-style commands - let command_with_dashes; - if argv.len() > 1 && argv[1].contains("_") { - command_with_dashes = argv[1].replace("_", "-"); - argv[1] = &command_with_dashes; - } - - AdminCommand::try_parse_from(argv).map_err(|error| error.to_string()) -} - #[derive(Parser)] #[clap(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] enum AdminCommand { @@ -308,801 +153,959 @@ enum AdminCommand { EnableRoom { room_id: Box }, } -async fn process_admin_command( - command: AdminCommand, - body: Vec<&str>, -) -> Result { - let reply_message_content = match command { - AdminCommand::RegisterAppservice => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { - let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config); - match parsed_config { - Ok(yaml) => match services().appservice.register_appservice(yaml) { - Ok(id) => RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {}.", - id - )), - Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {}", - e - )), - }, - Err(e) => RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config: {}", - e - )), - } - } else { - RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - ) - } - } - AdminCommand::UnregisterAppservice { - appservice_identifier, - } => match services().appservice.unregister_appservice(&appservice_identifier) { - Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), - Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to unregister appservice: {}", - e - )), - }, - AdminCommand::ListAppservices => { - if let Ok(appservices) = services().appservice.iter_ids().map(|ids| ids.collect::>()) { - let count = appservices.len(); - let output = format!( - "Appservices ({}): {}", - count, - appservices - .into_iter() - .filter_map(|r| r.ok()) - .collect::>() - .join(", ") - ); - RoomMessageEventContent::text_plain(output) - } else { - RoomMessageEventContent::text_plain("Failed to get appservices.") - } - } - AdminCommand::ListRooms => { - let room_ids = services().rooms.iter_ids(); - let output = format!( - "Rooms:\n{}", - room_ids - .filter_map(|r| r.ok()) - .map(|id| id.to_string() - + "\tMembers: " - + &services() - .rooms - .room_joined_count(&id) - .ok() - .flatten() - .unwrap_or(0) - .to_string()) - .collect::>() - .join("\n") - ); - RoomMessageEventContent::text_plain(output) - } - AdminCommand::ListLocalUsers => match services().users.list_local_users() { - Ok(users) => { - let mut msg: String = format!("Found {} local user account(s):\n", users.len()); - msg += &users.join("\n"); - RoomMessageEventContent::text_plain(&msg) - } - Err(e) => RoomMessageEventContent::text_plain(e.to_string()), - }, - AdminCommand::IncomingFederation => { - let map = services().globals.roomid_federationhandletime.read().unwrap(); - let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); - for (r, (e, i)) in map.iter() { - let elapsed = i.elapsed(); - msg += &format!( - "{} {}: {}m{}s\n", - r, - e, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - RoomMessageEventContent::text_plain(&msg) - } - AdminCommand::GetAuthChain { event_id } => { - let event_id = Arc::::from(event_id); - if let Some(event) = services().rooms.get_pdu_json(&event_id)? { - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { - Error::bad_database("Invalid room id field in event in database") - })?; - let start = Instant::now(); - let count = server_server::get_auth_chain(room_id, vec![event_id]) - .await? - .count(); - let elapsed = start.elapsed(); - RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - )) - } else { - RoomMessageEventContent::text_plain("Event not found.") - } - } - AdminCommand::ParsePdu => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => { - match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { - Ok(hash) => { - let event_id = EventId::parse(format!("${}", hash)); - - match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), - ) { - Ok(pdu) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\n{:#?}", - event_id, pdu - )), - Err(e) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\nCould not parse event: {}", - event_id, e - )), - } - } - Err(e) => RoomMessageEventContent::text_plain(format!( - "Could not parse PDU JSON: {:?}", - e - )), - } - } - Err(e) => RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {}", - e - )), - } - } else { - RoomMessageEventContent::text_plain("Expected code block in command body.") - } - } - AdminCommand::GetPdu { event_id } => { - let mut outlier = false; - let mut pdu_json = services().rooms.get_non_outlier_pdu_json(&event_id)?; - if pdu_json.is_none() { - outlier = true; - pdu_json = services().rooms.get_pdu_json(&event_id)?; - } - match pdu_json { - Some(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - RoomMessageEventContent::text_html( - format!( - "{}\n```json\n{}\n```", - if outlier { - "PDU is outlier" - } else { - "PDU was accepted" - }, - json_text - ), - format!( - "

                {}

                \n
                {}\n
                \n", - if outlier { - "PDU is outlier" - } else { - "PDU was accepted" - }, - HtmlEscape(&json_text) - ), - ) - } - None => RoomMessageEventContent::text_plain("PDU not found."), - } - } - AdminCommand::DatabaseMemoryUsage => match services()._db.memory_usage() { - Ok(response) => RoomMessageEventContent::text_plain(response), - Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to get database memory usage: {}", - e - )), - }, - AdminCommand::ShowConfig => { - // Construct and send the response - RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) - } - AdminCommand::ResetPassword { username } => { - let user_id = match UserId::parse_with_server_name( - username.as_str().to_lowercase(), - services().globals.server_name(), - ) { - Ok(id) => id, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {}", - e - ))) - } - }; - - // Check if the specified user is valid - if !services().users.exists(&user_id)? - || services().users.is_deactivated(&user_id)? - || user_id - == UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("conduit user exists") - { - return Ok(RoomMessageEventContent::text_plain( - "The specified user does not exist or is deactivated!", - )); - } - - let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); - - match services().users.set_password(&user_id, Some(new_password.as_str())) { - Ok(()) => RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {}: {}", - user_id, new_password - )), - Err(e) => RoomMessageEventContent::text_plain(format!( - "Couldn't reset the password for user {}: {}", - user_id, e - )), - } - } - AdminCommand::CreateUser { username, password } => { - let password = password.unwrap_or(utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); - // Validate user id - let user_id = match UserId::parse_with_server_name( - username.as_str().to_lowercase(), - services().globals.server_name(), - ) { - Ok(id) => id, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {}", - e - ))) - } - }; - if user_id.is_historical() { - return Ok(RoomMessageEventContent::text_plain(format!( - "userid {user_id} is not allowed due to historical" - ))); - } - if services().users.exists(&user_id)? { - return Ok(RoomMessageEventContent::text_plain(format!( - "userid {user_id} already exists" - ))); - } - // Create user - services().users.create(&user_id, Some(password.as_str()))?; - - // Default to pretty displayname - let displayname = format!("{} ⚡️", user_id.localpart()); - services().users - .set_displayname(&user_id, Some(displayname.clone()))?; - - // Initial account data - services().account_data.update( - None, - &user_id, - ruma::events::GlobalAccountDataEventType::PushRules - .to_string() - .into(), - &ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: ruma::push::Ruleset::server_default(&user_id), - }, - }, - )?; - - // we dont add a device since we're not the user, just the creator - - // Inhibit login does not work for guests - RoomMessageEventContent::text_plain(format!( - "Created user with user_id: {user_id} and password: {password}" - )) - } - AdminCommand::DisableRoom { room_id } => { - services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; - RoomMessageEventContent::text_plain("Room disabled.") - } - AdminCommand::EnableRoom { room_id } => { - services().rooms.disabledroomids.remove(room_id.as_bytes())?; - RoomMessageEventContent::text_plain("Room enabled.") - } - AdminCommand::DeactivateUser { - leave_rooms, - user_id, - } => { - let user_id = Arc::::from(user_id); - if services().users.exists(&user_id)? { - RoomMessageEventContent::text_plain(format!( - "Making {} leave all rooms before deactivation...", - user_id - )); - - services().users.deactivate_account(&user_id)?; - - if leave_rooms { - services().rooms.leave_all_rooms(&user_id).await?; - } - - RoomMessageEventContent::text_plain(format!( - "User {} has been deactivated", - user_id - )) - } else { - RoomMessageEventContent::text_plain(format!( - "User {} doesn't exist on this server", - user_id - )) - } - } - AdminCommand::DeactivateAll { leave_rooms, force } => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { - let usernames = body.clone().drain(1..body.len() - 1).collect::>(); - - let mut user_ids: Vec<&UserId> = Vec::new(); - - for &username in &usernames { - match <&UserId>::try_from(username) { - Ok(user_id) => user_ids.push(user_id), - Err(_) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "{} is not a valid username", - username - ))) - } - } - } - - let mut deactivation_count = 0; - let mut admins = Vec::new(); - - if !force { - user_ids.retain(|&user_id| { - match services().users.is_admin(user_id) { - Ok(is_admin) => match is_admin { - true => { - admins.push(user_id.localpart()); - false - } - false => true, - }, - Err(_) => false, - } - }) - } - - for &user_id in &user_ids { - match services().users.deactivate_account(user_id) { - Ok(_) => deactivation_count += 1, - Err(_) => {} - } - } - - if leave_rooms { - for &user_id in &user_ids { - let _ = services().rooms.leave_all_rooms(user_id).await; - } - } - - if admins.is_empty() { - RoomMessageEventContent::text_plain(format!( - "Deactivated {} accounts.", - deactivation_count - )) - } else { - RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) - } - } else { - RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - ) - } - } - }; - - Ok(reply_message_content) +#[derive(Debug)] +pub enum AdminRoomEvent { + ProcessMessage(String), + SendMessage(RoomMessageEventContent), } -// Utility to turn clap's `--help` text to HTML. -fn usage_to_html(text: &str, server_name: &ServerName) -> String { - // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` - let text = text.replace( - &format!("@conduit:{}:-", server_name), - &format!("@conduit:{}: ", server_name), - ); +#[derive(Clone)] +pub struct Service { + pub sender: mpsc::UnboundedSender, +} - // For the conduit admin room, subcommands become main commands - let text = text.replace("SUBCOMMAND", "COMMAND"); - let text = text.replace("subcommand", "command"); +impl Service { + pub fn start_handler( + &self, + mut receiver: mpsc::UnboundedReceiver, + ) { + tokio::spawn(async move { + // TODO: Use futures when we have long admin commands + //let mut futures = FuturesUnordered::new(); - // Escape option names (e.g. ``) since they look like HTML tags - let text = text.replace("<", "<").replace(">", ">"); + let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); - // Italicize the first line (command name and version text) - let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "$1\n"); + let conduit_room = services() + .rooms + .id_from_alias( + format!("#admins:{}", services().globals.server_name()) + .as_str() + .try_into() + .expect("#admins:server_name is a valid room alias"), + ) + .expect("Database data for admin room alias must be valid") + .expect("Admin room must exist"); - // Unmerge wrapped lines - let text = text.replace("\n ", " "); - - // Wrap option names in backticks. The lines look like: - // -V, --version Prints version information - // And are converted to: - // -V, --version: Prints version information - // (?m) enables multi-line mode for ^ and $ - let re = Regex::new("(?m)^ (([a-zA-Z_&;-]+(, )?)+) +(.*)$") - .expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "$1: $4"); - - // Look for a `[commandbody]` tag. If it exists, use all lines below it that - // start with a `#` in the USAGE section. - let mut text_lines: Vec<&str> = text.lines().collect(); - let mut command_body = String::new(); - - if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { - text_lines.remove(line_index); - - while text_lines - .get(line_index) - .map(|line| line.starts_with("#")) - .unwrap_or(false) - { - command_body += if text_lines[line_index].starts_with("# ") { - &text_lines[line_index][2..] - } else { - &text_lines[line_index][1..] + let send_message = |message: RoomMessageEventContent, + mutex_lock: &MutexGuard<'_, ()>| { + services() + .rooms + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMessage, + content: to_raw_value(&message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + mutex_lock, + ) + .unwrap(); }; - command_body += "[nobr]\n"; - text_lines.remove(line_index); + + loop { + tokio::select! { + Some(event) = receiver.recv() => { + let message_content = match event { + AdminRoomEvent::SendMessage(content) => content, + AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(room_message).await + }; + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(conduit_room.clone()) + .or_default(), + ); + + let state_lock = mutex_state.lock().await; + + send_message(message_content, &state_lock); + + drop(state_lock); + } + } + } + }); + } + + pub fn process_message(&self, room_message: String) { + self.sender + .send(AdminRoomEvent::ProcessMessage(room_message)) + .unwrap(); + } + + pub fn send_message(&self, message_content: RoomMessageEventContent) { + self.sender + .send(AdminRoomEvent::SendMessage(message_content)) + .unwrap(); + } + + // Parse and process a message from the admin room + async fn process_admin_message(&self, room_message: String) -> RoomMessageEventContent { + let mut lines = room_message.lines(); + let command_line = lines.next().expect("each string has at least one line"); + let body: Vec<_> = lines.collect(); + + let admin_command = match parse_admin_command(&command_line) { + Ok(command) => command, + Err(error) => { + let server_name = services().globals.server_name(); + let message = error + .to_string() + .replace("server.name", server_name.as_str()); + let html_message = usage_to_html(&message, server_name); + + return RoomMessageEventContent::text_html(message, html_message); + } + }; + + match process_admin_command(admin_command, body).await { + Ok(reply_message) => reply_message, + Err(error) => { + let markdown_message = format!( + "Encountered an error while handling the command:\n\ + ```\n{}\n```", + error, + ); + let html_message = format!( + "Encountered an error while handling the command:\n\ +
                \n{}\n
                ", + error, + ); + + RoomMessageEventContent::text_html(markdown_message, html_message) + } } } - let text = text_lines.join("\n"); + // Parse chat messages from the admin room into an AdminCommand object + fn parse_admin_command(&self, command_line: &str) -> std::result::Result { + // Note: argv[0] is `@conduit:servername:`, which is treated as the main command + let mut argv: Vec<_> = command_line.split_whitespace().collect(); - // Improve the usage section - let text = if command_body.is_empty() { - // Wrap the usage line in code tags - let re = Regex::new("(?m)^USAGE:\n (@conduit:.*)$") + // Replace `help command` with `command --help` + // Clap has a help subcommand, but it omits the long help description. + if argv.len() > 1 && argv[1] == "help" { + argv.remove(1); + argv.push("--help"); + } + + // Backwards compatibility with `register_appservice`-style commands + let command_with_dashes; + if argv.len() > 1 && argv[1].contains("_") { + command_with_dashes = argv[1].replace("_", "-"); + argv[1] = &command_with_dashes; + } + + AdminCommand::try_parse_from(argv).map_err(|error| error.to_string()) + } + + async fn process_admin_command( + &self, + command: AdminCommand, + body: Vec<&str>, + ) -> Result { + let reply_message_content = match command { + AdminCommand::RegisterAppservice => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let appservice_config = body[1..body.len() - 1].join("\n"); + let parsed_config = serde_yaml::from_str::(&appservice_config); + match parsed_config { + Ok(yaml) => match services().appservice.register_appservice(yaml) { + Ok(id) => RoomMessageEventContent::text_plain(format!( + "Appservice registered with ID: {}.", + id + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to register appservice: {}", + e + )), + }, + Err(e) => RoomMessageEventContent::text_plain(format!( + "Could not parse appservice config: {}", + e + )), + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) + } + } + AdminCommand::UnregisterAppservice { + appservice_identifier, + } => match services().appservice.unregister_appservice(&appservice_identifier) { + Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to unregister appservice: {}", + e + )), + }, + AdminCommand::ListAppservices => { + if let Ok(appservices) = services().appservice.iter_ids().map(|ids| ids.collect::>()) { + let count = appservices.len(); + let output = format!( + "Appservices ({}): {}", + count, + appservices + .into_iter() + .filter_map(|r| r.ok()) + .collect::>() + .join(", ") + ); + RoomMessageEventContent::text_plain(output) + } else { + RoomMessageEventContent::text_plain("Failed to get appservices.") + } + } + AdminCommand::ListRooms => { + let room_ids = services().rooms.iter_ids(); + let output = format!( + "Rooms:\n{}", + room_ids + .filter_map(|r| r.ok()) + .map(|id| id.to_string() + + "\tMembers: " + + &services() + .rooms + .room_joined_count(&id) + .ok() + .flatten() + .unwrap_or(0) + .to_string()) + .collect::>() + .join("\n") + ); + RoomMessageEventContent::text_plain(output) + } + AdminCommand::ListLocalUsers => match services().users.list_local_users() { + Ok(users) => { + let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + RoomMessageEventContent::text_plain(&msg) + } + Err(e) => RoomMessageEventContent::text_plain(e.to_string()), + }, + AdminCommand::IncomingFederation => { + let map = services().globals.roomid_federationhandletime.read().unwrap(); + let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); + + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + msg += &format!( + "{} {}: {}m{}s\n", + r, + e, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + RoomMessageEventContent::text_plain(&msg) + } + AdminCommand::GetAuthChain { event_id } => { + let event_id = Arc::::from(event_id); + if let Some(event) = services().rooms.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database("Invalid room id field in event in database") + })?; + let start = Instant::now(); + let count = server_server::get_auth_chain(room_id, vec![event_id]) + .await? + .count(); + let elapsed = start.elapsed(); + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )) + } else { + RoomMessageEventContent::text_plain("Event not found.") + } + } + AdminCommand::ParsePdu => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + Ok(hash) => { + let event_id = EventId::parse(format!("${}", hash)); + + match serde_json::from_value::( + serde_json::to_value(value).expect("value is json"), + ) { + Ok(pdu) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\n{:#?}", + event_id, pdu + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\nCould not parse event: {}", + event_id, e + )), + } + } + Err(e) => RoomMessageEventContent::text_plain(format!( + "Could not parse PDU JSON: {:?}", + e + )), + } + } + Err(e) => RoomMessageEventContent::text_plain(format!( + "Invalid json in command body: {}", + e + )), + } + } else { + RoomMessageEventContent::text_plain("Expected code block in command body.") + } + } + AdminCommand::GetPdu { event_id } => { + let mut outlier = false; + let mut pdu_json = services().rooms.get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = services().rooms.get_pdu_json(&event_id)?; + } + match pdu_json { + Some(json) => { + let json_text = + serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + RoomMessageEventContent::text_html( + format!( + "{}\n```json\n{}\n```", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + json_text + ), + format!( + "

                {}

                \n
                {}\n
                \n", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + HtmlEscape(&json_text) + ), + ) + } + None => RoomMessageEventContent::text_plain("PDU not found."), + } + } + AdminCommand::DatabaseMemoryUsage => match services()._db.memory_usage() { + Ok(response) => RoomMessageEventContent::text_plain(response), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to get database memory usage: {}", + e + )), + }, + AdminCommand::ShowConfig => { + // Construct and send the response + RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) + } + AdminCommand::ResetPassword { username } => { + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + services().globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + + // Check if the specified user is valid + if !services().users.exists(&user_id)? + || services().users.is_deactivated(&user_id)? + || user_id + == UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("conduit user exists") + { + return Ok(RoomMessageEventContent::text_plain( + "The specified user does not exist or is deactivated!", + )); + } + + let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); + + match services().users.set_password(&user_id, Some(new_password.as_str())) { + Ok(()) => RoomMessageEventContent::text_plain(format!( + "Successfully reset the password for user {}: {}", + user_id, new_password + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Couldn't reset the password for user {}: {}", + user_id, e + )), + } + } + AdminCommand::CreateUser { username, password } => { + let password = password.unwrap_or(utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + // Validate user id + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + services().globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + if user_id.is_historical() { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} is not allowed due to historical" + ))); + } + if services().users.exists(&user_id)? { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} already exists" + ))); + } + // Create user + services().users.create(&user_id, Some(password.as_str()))?; + + // Default to pretty displayname + let displayname = format!("{} ⚡️", user_id.localpart()); + services().users + .set_displayname(&user_id, Some(displayname.clone()))?; + + // Initial account data + services().account_data.update( + None, + &user_id, + ruma::events::GlobalAccountDataEventType::PushRules + .to_string() + .into(), + &ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: ruma::push::Ruleset::server_default(&user_id), + }, + }, + )?; + + // we dont add a device since we're not the user, just the creator + + // Inhibit login does not work for guests + RoomMessageEventContent::text_plain(format!( + "Created user with user_id: {user_id} and password: {password}" + )) + } + AdminCommand::DisableRoom { room_id } => { + services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; + RoomMessageEventContent::text_plain("Room disabled.") + } + AdminCommand::EnableRoom { room_id } => { + services().rooms.disabledroomids.remove(room_id.as_bytes())?; + RoomMessageEventContent::text_plain("Room enabled.") + } + AdminCommand::DeactivateUser { + leave_rooms, + user_id, + } => { + let user_id = Arc::::from(user_id); + if services().users.exists(&user_id)? { + RoomMessageEventContent::text_plain(format!( + "Making {} leave all rooms before deactivation...", + user_id + )); + + services().users.deactivate_account(&user_id)?; + + if leave_rooms { + services().rooms.leave_all_rooms(&user_id).await?; + } + + RoomMessageEventContent::text_plain(format!( + "User {} has been deactivated", + user_id + )) + } else { + RoomMessageEventContent::text_plain(format!( + "User {} doesn't exist on this server", + user_id + )) + } + } + AdminCommand::DeactivateAll { leave_rooms, force } => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let usernames = body.clone().drain(1..body.len() - 1).collect::>(); + + let mut user_ids: Vec<&UserId> = Vec::new(); + + for &username in &usernames { + match <&UserId>::try_from(username) { + Ok(user_id) => user_ids.push(user_id), + Err(_) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "{} is not a valid username", + username + ))) + } + } + } + + let mut deactivation_count = 0; + let mut admins = Vec::new(); + + if !force { + user_ids.retain(|&user_id| { + match services().users.is_admin(user_id) { + Ok(is_admin) => match is_admin { + true => { + admins.push(user_id.localpart()); + false + } + false => true, + }, + Err(_) => false, + } + }) + } + + for &user_id in &user_ids { + match services().users.deactivate_account(user_id) { + Ok(_) => deactivation_count += 1, + Err(_) => {} + } + } + + if leave_rooms { + for &user_id in &user_ids { + let _ = services().rooms.leave_all_rooms(user_id).await; + } + } + + if admins.is_empty() { + RoomMessageEventContent::text_plain(format!( + "Deactivated {} accounts.", + deactivation_count + )) + } else { + RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) + } + } + }; + + Ok(reply_message_content) + } + + // Utility to turn clap's `--help` text to HTML. + fn usage_to_html(&self, text: &str, server_name: &ServerName) -> String { + // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` + let text = text.replace( + &format!("@conduit:{}:-", server_name), + &format!("@conduit:{}: ", server_name), + ); + + // For the conduit admin room, subcommands become main commands + let text = text.replace("SUBCOMMAND", "COMMAND"); + let text = text.replace("subcommand", "command"); + + // Escape option names (e.g. ``) since they look like HTML tags + let text = text.replace("<", "<").replace(">", ">"); + + // Italicize the first line (command name and version text) + let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1\n"); + + // Unmerge wrapped lines + let text = text.replace("\n ", " "); + + // Wrap option names in backticks. The lines look like: + // -V, --version Prints version information + // And are converted to: + // -V, --version: Prints version information + // (?m) enables multi-line mode for ^ and $ + let re = Regex::new("(?m)^ (([a-zA-Z_&;-]+(, )?)+) +(.*)$") .expect("Regex compilation should not fail"); - re.replace_all(&text, "USAGE:\n$1").to_string() - } else { - // Wrap the usage line in a code block, and add a yaml block example - // This makes the usage of e.g. `register-appservice` more accurate - let re = - Regex::new("(?m)^USAGE:\n (.*?)\n\n").expect("Regex compilation should not fail"); - re.replace_all(&text, "USAGE:\n
                $1[nobr]\n[commandbodyblock]
                ") - .replace("[commandbodyblock]", &command_body) - }; + let text = re.replace_all(&text, "$1: $4"); - // Add HTML line-breaks - let text = text - .replace("\n\n\n", "\n\n") - .replace("\n", "
                \n") - .replace("[nobr]
                ", ""); + // Look for a `[commandbody]` tag. If it exists, use all lines below it that + // start with a `#` in the USAGE section. + let mut text_lines: Vec<&str> = text.lines().collect(); + let mut command_body = String::new(); - text -} + if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { + text_lines.remove(line_index); -/// Create the admin room. -/// -/// Users in this room are considered admins by conduit, and the room can be -/// used to issue admin commands by talking to the server user inside it. -pub(crate) async fn create_admin_room() -> Result<()> { - let room_id = RoomId::new(services().globals.server_name()); + while text_lines + .get(line_index) + .map(|line| line.starts_with("#")) + .unwrap_or(false) + { + command_body += if text_lines[line_index].starts_with("# ") { + &text_lines[line_index][2..] + } else { + &text_lines[line_index][1..] + }; + command_body += "[nobr]\n"; + text_lines.remove(line_index); + } + } - services().rooms.get_or_create_shortroomid(&room_id)?; + let text = text_lines.join("\n"); - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + // Improve the usage section + let text = if command_body.is_empty() { + // Wrap the usage line in code tags + let re = Regex::new("(?m)^USAGE:\n (@conduit:.*)$") + .expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n$1").to_string() + } else { + // Wrap the usage line in a code block, and add a yaml block example + // This makes the usage of e.g. `register-appservice` more accurate + let re = + Regex::new("(?m)^USAGE:\n (.*?)\n\n").expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n
                $1[nobr]\n[commandbodyblock]
                ") + .replace("[commandbodyblock]", &command_body) + }; - // Create a user for the server - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + // Add HTML line-breaks + let text = text + .replace("\n\n\n", "\n\n") + .replace("\n", "
                \n") + .replace("[nobr]
                ", ""); - services().users.create(&conduit_user, None)?; + text + } - let mut content = RoomCreateEventContent::new(conduit_user.clone()); - content.federate = true; - content.predecessor = None; - content.room_version = RoomVersionId::V6; + /// Create the admin room. + /// + /// Users in this room are considered admins by conduit, and the room can be + /// used to issue admin commands by talking to the server user inside it. + pub(crate) async fn create_admin_room(&self) -> Result<()> { + let room_id = RoomId::new(services().globals.server_name()); - // 1. The room create event - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services().rooms.get_or_create_shortroomid(&room_id)?; - // 2. Make conduit bot join - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(conduit_user.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; - // 3. Power levels - let mut users = BTreeMap::new(); - users.insert(conduit_user.clone(), 100.into()); + // Create a user for the server + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services().users.create(&conduit_user, None)?; - // 4.1 Join Rules - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + let mut content = RoomCreateEventContent::new(conduit_user.clone()); + content.federate = true; + content.predecessor = None; + content.room_version = RoomVersionId::V6; + + // 1. The room create event + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 2. Make conduit bot join + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + unsigned: None, + state_key: Some(conduit_user.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; - // 4.2 History Visibility - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + // 3. Power levels + let mut users = BTreeMap::new(); + users.insert(conduit_user.clone(), 100.into()); - // 4.3 Guest Access - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; - // 5. Events implied by name and topic - let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) - .expect("Room name is valid"); - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) + // 4.1 Join Rules + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 4.2 History Visibility + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", services().globals.server_name()), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + // 4.3 Guest Access + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; - // 6. Room alias - let alias: Box = format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); + // 5. Events implied by name and topic + let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) + .expect("Room name is valid"); + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(alias.clone()), - alt_aliases: Vec::new(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: format!("Manage {}", services().globals.server_name()), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; - services().rooms.set_alias(&alias, Some(&room_id))?; + // 6. Room alias + let alias: Box = format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); - Ok(()) -} - -/// Invite the user to the conduit admin room. -/// -/// In conduit, this is equivalent to granting admin privileges. -pub(crate) async fn make_user_admin( - user_id: &UserId, - displayname: String, -) -> Result<()> { - let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); - let room_id = services() - .rooms - .id_from_alias(&admin_room_alias)? - .expect("Admin room must exist"); - - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - // Use the server user to grant the new admin's power level - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); - - // Invite and join the real user - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: Some(displayname), - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &user_id, - &room_id, - &state_lock, - )?; - - // Set power level - let mut users = BTreeMap::new(); - users.insert(conduit_user.to_owned(), 100.into()); - users.insert(user_id.to_owned(), 100.into()); - - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // Send welcome message - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMessage, - content: to_raw_value(&RoomMessageEventContent::text_html( - format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()).to_owned(), - format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()).to_owned(), - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - Ok(()) + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + services().rooms.set_alias(&alias, Some(&room_id))?; + + Ok(()) + } + + /// Invite the user to the conduit admin room. + /// + /// In conduit, this is equivalent to granting admin privileges. + pub(crate) async fn make_user_admin( + &self, + user_id: &UserId, + displayname: String, + ) -> Result<()> { + let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + let room_id = services() + .rooms + .id_from_alias(&admin_room_alias)? + .expect("Admin room must exist"); + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Use the server user to grant the new admin's power level + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); + + // Invite and join the real user + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: Some(displayname), + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &user_id, + &room_id, + &state_lock, + )?; + + // Set power level + let mut users = BTreeMap::new(); + users.insert(conduit_user.to_owned(), 100.into()); + users.insert(user_id.to_owned(), 100.into()); + + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // Send welcome message + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMessage, + content: to_raw_value(&RoomMessageEventContent::text_html( + format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()).to_owned(), + format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()).to_owned(), + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + Ok(()) + } } diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index eed84d5..cd48e85 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -1,5 +1,6 @@ +use crate::Result; + pub trait Data { - type Iter: Iterator; /// Registers an appservice and returns the ID to the caller fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; @@ -12,7 +13,7 @@ pub trait Data { fn get_registration(&self, id: &str) -> Result>; - fn iter_ids(&self) -> Result>>; + fn iter_ids(&self) -> Result>>>; fn all(&self) -> Result>; } diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index ec4ffc5..63fa3af 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,13 +1,13 @@ mod data; pub use data::Data; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Registers an appservice and returns the ID to the caller pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { self.db.register_appservice(yaml) diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs new file mode 100644 index 0000000..f36ab61 --- /dev/null +++ b/src/service/globals/data.rs @@ -0,0 +1,8 @@ +use ruma::signatures::Ed25519KeyPair; + +use crate::Result; + +pub trait Data { + fn load_keypair(&self) -> Result; + fn remove_keypair(&self) -> Result<()>; +} diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 2b47e5b..556ca71 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -3,7 +3,7 @@ pub use data::Data; use crate::service::*; -use crate::{database::Config, server_server::FedDest, utils, Error, Result}; +use crate::{Config, utils, Error, Result}; use ruma::{ api::{ client::sync::sync_events, @@ -25,8 +25,6 @@ use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; use tracing::error; use trust_dns_resolver::TokioAsyncResolver; -use super::abstraction::Tree; - pub const COUNTER: &[u8] = b"c"; type WellKnownMap = HashMap, (FedDest, String)>; @@ -93,47 +91,18 @@ impl Default for RotationHandler { } -impl Service<_> { +impl Service { pub fn load( - globals: Arc, - server_signingkeys: Arc, + db: D, config: Config, ) -> Result { - let keypair_bytes = globals.get(b"keypair")?.map_or_else( - || { - let keypair = utils::generate_keypair(); - globals.insert(b"keypair", &keypair)?; - Ok::<_, Error>(keypair) - }, - |s| Ok(s.to_vec()), - )?; - - let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff); - - let keypair = utils::string_from_bytes( - // 1. version - parts - .next() - .expect("splitn always returns at least one element"), - ) - .map_err(|_| Error::bad_database("Invalid version bytes in keypair.")) - .and_then(|version| { - // 2. key - parts - .next() - .ok_or_else(|| Error::bad_database("Invalid keypair format in database.")) - .map(|key| (version, key)) - }) - .and_then(|(version, key)| { - ruma::signatures::Ed25519KeyPair::from_der(key, version) - .map_err(|_| Error::bad_database("Private or public keys are invalid.")) - }); + let keypair = db.load_keypair(); let keypair = match keypair { Ok(k) => k, Err(e) => { error!("Keypair invalid. Deleting..."); - globals.remove(b"keypair")?; + db.remove_keypair(); return Err(e); } }; @@ -167,7 +136,7 @@ impl Service<_> { let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; let mut s = Self { - globals, + db, config, keypair: Arc::new(keypair), dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|e| { @@ -181,7 +150,6 @@ impl Service<_> { tls_name_override, federation_client, default_client, - server_signingkeys, jwt_decoding_key, stable_room_versions, unstable_room_versions, diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs index be1d6b1..6f6359e 100644 --- a/src/service/key_backups/data.rs +++ b/src/service/key_backups/data.rs @@ -1,371 +1,85 @@ -use crate::{utils, Error, Result, services}; -use ruma::{ - api::client::{ - backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - error::ErrorKind, - }, - serde::Raw, - RoomId, UserId, -}; -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; -impl KeyBackups { - pub fn create_backup( +use ruma::{api::client::backup::{BackupAlgorithm, RoomKeyBackup, KeyBackupData}, serde::Raw, UserId, RoomId}; +use crate::Result; + +pub trait Data { + fn create_backup( &self, user_id: &UserId, backup_metadata: &Raw, - ) -> Result { - let version = services().globals.next_count()?.to_string(); + ) -> Result; - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); + fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()>; - self.backupid_algorithm.insert( - &key, - &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), - )?; - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - Ok(version) - } - - pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm.remove(&key)?; - self.backupid_etag.remove(&key)?; - - key.push(0xff); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } - - pub fn update_backup( + fn update_backup( &self, user_id: &UserId, version: &str, backup_metadata: &Raw, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); + ) -> Result; - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Tried to update nonexistent backup.", - )); - } + fn get_latest_backup_version(&self, user_id: &UserId) -> Result>; - self.backupid_algorithm - .insert(&key, backup_metadata.json().get().as_bytes())?; - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - Ok(version.to_owned()) - } - - pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, _)| { - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) - }) - .transpose() - } - - pub fn get_latest_backup( + fn get_latest_backup( &self, user_id: &UserId, - ) -> Result)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + ) -> Result)>>; - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, value)| { - let version = utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; - - Ok(( - version, - serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("Algorithm in backupid_algorithm is invalid.") - })?, - )) - }) - .transpose() - } - - pub fn get_backup( + fn get_backup( &self, user_id: &UserId, version: &str, - ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); + ) -> Result>>; - self.backupid_algorithm - .get(&key)? - .map_or(Ok(None), |bytes| { - serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid.")) - }) - } - - pub fn add_key( + fn add_key( &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); + ) -> Result<()>; - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Tried to update nonexistent backup.", - )); - } + fn count_keys(&self, user_id: &UserId, version: &str) -> Result; - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; + fn get_etag(&self, user_id: &UserId, version: &str) -> Result; - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - self.backupkeyid_backup - .insert(&key, key_data.json().get().as_bytes())?; - - Ok(()) - } - - pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - - Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) - } - - pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - Ok(utils::u64_from_bytes( - &self - .backupid_etag - .get(&key)? - .ok_or_else(|| Error::bad_database("Backup has no etag."))?, - ) - .map_err(|_| Error::bad_database("etag in backupid_etag invalid."))? - .to_string()) - } - - pub fn get_all( + fn get_all( &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); + ) -> Result, RoomKeyBackup>>; - let mut rooms = BTreeMap::, RoomKeyBackup>::new(); - - for result in self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); - - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; - - let room_id = RoomId::parse( - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, - ) - .map_err(|_| { - Error::bad_database("backupkeyid_backup room_id is invalid room id.") - })?; - - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - })?; - - Ok::<_, Error>((room_id, session_id, key_data)) - }) - { - let (room_id, session_id, key_data) = result?; - rooms - .entry(room_id) - .or_insert_with(|| RoomKeyBackup { - sessions: BTreeMap::new(), - }) - .sessions - .insert(session_id, key_data); - } - - Ok(rooms) - } - - pub fn get_room( + fn get_room( &self, user_id: &UserId, version: &str, room_id: &RoomId, - ) -> Result>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); + ) -> Result>>; - Ok(self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); - - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; - - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - })?; - - Ok::<_, Error>((session_id, key_data)) - }) - .filter_map(|r| r.ok()) - .collect()) - } - - pub fn get_session( + fn get_session( &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, - ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); + ) -> Result>>; - self.backupkeyid_backup - .get(&key)? - .map(|value| { - serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - }) - }) - .transpose() - } + fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()>; - pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } - - pub fn delete_room_keys( + fn delete_room_keys( &self, user_id: &UserId, version: &str, room_id: &RoomId, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); + ) -> Result<()>; - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } - - pub fn delete_room_key( + fn delete_room_key( &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } + ) -> Result<()>; } diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index be1d6b1..8e842d4 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,3 +1,6 @@ +mod data; +pub use data::Data; + use crate::{utils, Error, Result, services}; use ruma::{ api::client::{ @@ -9,7 +12,11 @@ use ruma::{ }; use std::{collections::BTreeMap, sync::Arc}; -impl KeyBackups { +pub struct Service { + db: D, +} + +impl Service { pub fn create_backup( &self, user_id: &UserId, diff --git a/src/service/media/data.rs b/src/service/media/data.rs new file mode 100644 index 0000000..94975de --- /dev/null +++ b/src/service/media/data.rs @@ -0,0 +1,8 @@ +use crate::Result; + +pub trait Data { + fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: &Option<&str>, content_type: &Option<&str>) -> Result>; + + /// Returns content_disposition, content_type and the metadata key. + fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)>; +} diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 1bdf6d4..a5aca03 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,7 +1,8 @@ -use image::{imageops::FilterType, GenericImageView}; +mod data; +pub use data::Data; -use super::abstraction::Tree; -use crate::{utils, Error, Result}; +use image::{imageops::FilterType, GenericImageView}; +use crate::{utils, Error, Result, services}; use std::{mem, sync::Arc}; use tokio::{ fs::File, @@ -14,44 +15,25 @@ pub struct FileMeta { pub file: Vec, } -pub struct Media { - pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType +pub struct Service { + db: D, } -impl Media { +impl Service { /// Uploads a file. pub async fn create( &self, mxc: String, - globals: &Globals, content_disposition: &Option<&str>, content_type: &Option<&str>, file: &[u8], ) -> Result<()> { - let mut key = mxc.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - key.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - key.push(0xff); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); - key.push(0xff); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); + // Width, Height = 0 if it's not a thumbnail + let key = self.db.create_file_metadata(mxc, 0, 0, content_disposition, content_type); - let path = globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; f.write_all(file).await?; - - self.mediaid_file.insert(&key, &[])?; Ok(()) } @@ -60,80 +42,28 @@ impl Media { pub async fn upload_thumbnail( &self, mxc: String, - globals: &Globals, content_disposition: &Option, content_type: &Option, width: u32, height: u32, file: &[u8], ) -> Result<()> { - let mut key = mxc.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&width.to_be_bytes()); - key.extend_from_slice(&height.to_be_bytes()); - key.push(0xff); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); - key.push(0xff); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); + let key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type); - let path = globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; f.write_all(file).await?; - self.mediaid_file.insert(&key, &[])?; - Ok(()) } /// Downloads a file. - pub async fn get(&self, globals: &Globals, mxc: &str) -> Result> { - let mut prefix = mxc.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - prefix.push(0xff); - - let first = self.mediaid_file.scan_prefix(prefix).next(); - if let Some((key, _)) = first { - let path = globals.get_media_file(&key); + pub async fn get(&self, mxc: String) -> Result> { + if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, 0, 0) { + let path = services().globals.get_media_file(&key); let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; Ok(Some(FileMeta { content_disposition, @@ -170,8 +100,7 @@ impl Media { /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. pub async fn get_thumbnail( &self, - mxc: &str, - globals: &Globals, + mxc: String, width: u32, height: u32, ) -> Result> { @@ -179,89 +108,23 @@ impl Media { .thumbnail_properties(width, height) .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - let mut main_prefix = mxc.as_bytes().to_vec(); - main_prefix.push(0xff); - - let mut thumbnail_prefix = main_prefix.clone(); - thumbnail_prefix.extend_from_slice(&width.to_be_bytes()); - thumbnail_prefix.extend_from_slice(&height.to_be_bytes()); - thumbnail_prefix.push(0xff); - - let mut original_prefix = main_prefix; - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - original_prefix.push(0xff); - - let first_thumbnailprefix = self.mediaid_file.scan_prefix(thumbnail_prefix).next(); - let first_originalprefix = self.mediaid_file.scan_prefix(original_prefix).next(); - if let Some((key, _)) = first_thumbnailprefix { + if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, width, height) { // Using saved thumbnail - let path = globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database("Content Disposition in db is invalid.") - })?, - ) - }; Ok(Some(FileMeta { content_disposition, content_type, file: file.to_vec(), })) - } else if let Some((key, _)) = first_originalprefix { + } else if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, 0, 0) { // Generate a thumbnail - let path = globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; - if let Ok(image) = image::load_from_memory(&file) { let original_width = image.width(); let original_height = image.height(); @@ -317,26 +180,12 @@ impl Media { thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; // Save thumbnail in database so we don't have to generate it again next time - let mut thumbnail_key = key.to_vec(); - let width_index = thumbnail_key - .iter() - .position(|&b| b == 0xff) - .ok_or_else(|| Error::bad_database("Media in db is invalid."))? - + 1; - let mut widthheight = width.to_be_bytes().to_vec(); - widthheight.extend_from_slice(&height.to_be_bytes()); + let thumbnail_key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type)?; - thumbnail_key.splice( - width_index..width_index + 2 * mem::size_of::(), - widthheight, - ); - - let path = globals.get_media_file(&thumbnail_key); + let path = services().globals.get_media_file(&thumbnail_key); let mut f = File::create(path).await?; f.write_all(&thumbnail_bytes).await?; - self.mediaid_file.insert(&thumbnail_key, &[])?; - Ok(Some(FileMeta { content_disposition, content_type, diff --git a/src/service/mod.rs b/src/service/mod.rs index 80239cb..4364c72 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,28 +1,29 @@ -pub mod pdu; -pub mod appservice; -pub mod pusher; -pub mod rooms; -pub mod transaction_ids; -pub mod uiaa; -pub mod users; pub mod account_data; pub mod admin; +pub mod appservice; pub mod globals; pub mod key_backups; pub mod media; +pub mod pdu; +pub mod pusher; +pub mod rooms; pub mod sending; +pub mod transaction_ids; +pub mod uiaa; +pub mod users; -pub struct Services { +pub struct Services +{ pub appservice: appservice::Service, pub pusher: pusher::Service, pub rooms: rooms::Service, pub transaction_ids: transaction_ids::Service, pub uiaa: uiaa::Service, pub users: users::Service, - //pub account_data: account_data::Service, - //pub admin: admin::Service, + pub account_data: account_data::Service, + pub admin: admin::Service, pub globals: globals::Service, - //pub key_backups: key_backups::Service, - //pub media: media::Service, - //pub sending: sending::Service, + pub key_backups: key_backups::Service, + pub media: media::Service, + pub sending: sending::Service, } diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 47e21a6..2ed79f2 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,4 +1,4 @@ -use crate::{Database, Error, services}; +use crate::{Error, services}; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, @@ -357,7 +357,7 @@ pub(crate) fn gen_event_id_canonical_json( Ok((event_id, value)) } -/// Build the start of a PDU in order to add it to the `Database`. +/// Build the start of a PDU in order to add it to the Database. #[derive(Debug, Deserialize)] pub struct PduBuilder { #[serde(rename = "type")] diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index ef2b819..3951da7 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,4 +1,5 @@ use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; +use crate::Result; pub trait Data { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; @@ -10,5 +11,5 @@ pub trait Data { fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, - ) -> impl Iterator> + 'a; + ) -> Box>>; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 87e91a1..66a8ae3 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use crate::{services, Error, PduEvent}; +use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; use ruma::{ api::{ @@ -27,7 +27,7 @@ pub struct Service { db: D, } -impl Service<_> { +impl Service { pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { self.db.set_pusher(sender, pusher) } diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 655f32a..c5d45e3 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,24 +1,29 @@ use ruma::{RoomId, RoomAliasId}; +use crate::Result; pub trait Data { /// Creates or updates the alias to the given room id. fn set_alias( + &self, alias: &RoomAliasId, room_id: &RoomId ) -> Result<()>; /// Forgets about an alias. Returns an error if the alias did not exist. fn remove_alias( + &self, alias: &RoomAliasId, ) -> Result<()>; /// Looks up the roomid for the given alias. fn resolve_local_alias( + &self, alias: &RoomAliasId, - ) -> Result<()>; + ) -> Result>>; /// Returns all local aliases that point to the given room fn local_aliases_for_room( - alias: &RoomAliasId, - ) -> Result<()>; + &self, + room_id: &RoomId, + ) -> Result>>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index f46609a..abe299d 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,12 +1,14 @@ mod data; pub use data::Data; + use ruma::{RoomAliasId, RoomId}; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn set_alias( &self, @@ -26,7 +28,7 @@ impl Service<_> { #[tracing::instrument(skip(self))] pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { - self.db.resolve_local_alias(alias: &RoomAliasId) + self.db.resolve_local_alias(alias) } #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 88c86fa..5177d6d 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,6 +1,7 @@ use std::collections::HashSet; +use crate::Result; pub trait Data { - fn get_cached_eventid_authchain<'a>() -> Result>; - fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result>; + fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>; + fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index e17c10a..113d2e8 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -3,13 +3,13 @@ use std::{sync::Arc, collections::HashSet}; pub use data::Data; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn get_cached_eventid_authchain<'a>( &'a self, diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index e28cdd1..1376721 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,15 +1,16 @@ use ruma::RoomId; +use crate::Result; pub trait Data { /// Adds the room to the public room directory - fn set_public(room_id: &RoomId) -> Result<()>; + fn set_public(&self, room_id: &RoomId) -> Result<()>; /// Removes the room from the public room directory. - fn set_not_public(room_id: &RoomId) -> Result<()>; + fn set_not_public(&self, room_id: &RoomId) -> Result<()>; /// Returns true if the room is in the public room directory. - fn is_public_room(room_id: &RoomId) -> Result; + fn is_public_room(&self, room_id: &RoomId) -> Result; /// Returns the unsorted public room directory - fn public_rooms() -> impl Iterator>> + '_; + fn public_rooms(&self) -> Box>>>; } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index cb9cda8..6853505 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -2,13 +2,13 @@ mod data; pub use data::Data; use ruma::RoomId; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn set_public(&self, room_id: &RoomId) -> Result<()> { self.db.set_public(room_id) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index 5566fb2..a5ce37f 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -2,7 +2,9 @@ pub mod presence; pub mod read_receipt; pub mod typing; -pub struct Service { +pub trait Data: presence::Data + read_receipt::Data + typing::Data {} + +pub struct Service { presence: presence::Service, read_receipt: read_receipt::Service, typing: typing::Service, diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 8e3c672..ca0e241 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use ruma::{UserId, RoomId, events::presence::PresenceEvent}; +use crate::Result; pub trait Data { /// Adds a presence event which will be saved until a new event replaces it. diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 5a988d4..646cf54 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -4,13 +4,13 @@ use std::collections::HashMap; pub use data::Data; use ruma::{RoomId, UserId, events::presence::PresenceEvent}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 32b091f..e8ed965 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,4 +1,5 @@ use ruma::{RoomId, events::receipt::ReceiptEvent, UserId, serde::Raw}; +use crate::Result; pub trait Data { /// Replaces the previous read receipt. @@ -14,13 +15,13 @@ pub trait Data { &self, room_id: &RoomId, since: u64, - ) -> impl Iterator< + ) -> Box, u64, Raw, )>, - >; + >>; /// Sets a private read marker at `count`. fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 744fece..3f0b147 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,12 +1,14 @@ mod data; pub use data::Data; + use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Replaces the previous read receipt. pub fn readreceipt_update( &self, diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 0c77313..ec0be46 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,5 +1,5 @@ use std::collections::HashSet; - +use crate::Result; use ruma::{UserId, RoomId}; pub trait Data { @@ -14,5 +14,5 @@ pub trait Data { fn last_typing_update(&self, room_id: &RoomId) -> Result; /// Returns all user ids currently typing. - fn typings_all(&self, room_id: &RoomId) -> Result>; + fn typings_all(&self, room_id: &RoomId) -> Result>>; } diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 68b9fd8..00cfdec 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,14 +1,14 @@ mod data; pub use data::Data; -use ruma::{UserId, RoomId}; +use ruma::{UserId, RoomId, events::SyncEphemeralRoomEvent}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 7152957..c9b041c 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -250,7 +250,7 @@ impl Service { // We go through all the signatures we see on the value and fetch the corresponding signing // keys - self.fetch_required_signing_keys(&value, pub_key_map, db) + self.fetch_required_signing_keys(&value, pub_key_map) .await?; // 2. Check signatures, otherwise drop @@ -1153,6 +1153,11 @@ impl Service { let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; + let first_pdu_in_room = services() + .rooms + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + let mut amount = 0; while let Some(prev_event_id) = todo_outlier_stack.pop() { diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index 52a683d..5fefd3f 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -1,4 +1,5 @@ use ruma::{RoomId, DeviceId, UserId}; +use crate::Result; pub trait Data { fn lazy_load_was_sent_before( diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index bdc083a..283d45a 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -4,13 +4,13 @@ use std::collections::HashSet; pub use data::Data; use ruma::{DeviceId, UserId, RoomId}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn lazy_load_was_sent_before( &self, diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 2d718b2..9b1ce07 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,4 +1,5 @@ use ruma::RoomId; +use crate::Result; pub trait Data { fn exists(&self, room_id: &RoomId) -> Result; diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 8417e28..1bdb78d 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -2,13 +2,13 @@ mod data; pub use data::Data; use ruma::RoomId; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn exists(&self, room_id: &RoomId) -> Result { diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 4725034..4da4223 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -16,7 +16,9 @@ pub mod state_compressor; pub mod timeline; pub mod user; -pub struct Service { +pub trait Data: alias::Data + auth_chain::Data + directory::Data + edus::Data + lazy_loading::Data + metadata::Data + outlier::Data + pdu_metadata::Data + search::Data + short::Data + state::Data + state_accessor::Data + state_cache::Data + state_compressor::Data + timeline::Data + user::Data {} + +pub struct Service { pub alias: alias::Service, pub auth_chain: auth_chain::Service, pub directory: directory::Service, diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs index d579515..17d0f7b 100644 --- a/src/service/rooms/outlier/data.rs +++ b/src/service/rooms/outlier/data.rs @@ -1,6 +1,6 @@ -use ruma::{EventId, signatures::CanonicalJsonObject}; +use ruma::{signatures::CanonicalJsonObject, EventId}; -use crate::PduEvent; +use crate::{PduEvent, Result}; pub trait Data { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index ee8b940..a495db8 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -2,13 +2,13 @@ mod data; pub use data::Data; use ruma::{EventId, signatures::CanonicalJsonObject}; -use crate::{service::*, PduEvent}; +use crate::{Result, PduEvent}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Returns the pdu from the outlier tree. pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.db.get_outlier_pdu_json(event_id) diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 531823f..fb83902 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use ruma::{EventId, RoomId}; +use crate::Result; pub trait Data { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 3442b83..c57c1a2 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -4,13 +4,13 @@ use std::sync::Arc; pub use data::Data; use ruma::{RoomId, EventId}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { self.db.mark_as_referenced(room_id, event_ids) diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 16287eb..c0fd2a3 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,11 +1,12 @@ use ruma::RoomId; +use crate::Result; pub trait Data { - fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()>; + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()>; fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, - ) -> Result> + 'a, Vec)>>; + ) -> Result>>, Vec)>>; } diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 9087def..b7023f3 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,12 +1,14 @@ mod data; pub use data::Data; + +use crate::Result; use ruma::RoomId; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( &'a self, diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs new file mode 100644 index 0000000..3b1c311 --- /dev/null +++ b/src/service/rooms/short/data.rs @@ -0,0 +1,2 @@ +pub trait Data { +} diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index afde14e..1eb891e 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -2,19 +2,18 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{EventId, events::StateEventType}; +use ruma::{EventId, events::StateEventType, RoomId}; -use crate::{service::*, Error, utils}; +use crate::{Result, Error, utils, services}; pub struct Service { db: D, } -impl Service<_> { +impl Service { pub fn get_or_create_shorteventid( &self, event_id: &EventId, - globals: &super::globals::Globals, ) -> Result { if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { return Ok(*short); @@ -24,7 +23,7 @@ impl Service<_> { Some(shorteventid) => utils::u64_from_bytes(&shorteventid) .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, None => { - let shorteventid = globals.next_count()?; + let shorteventid = services().globals.next_count()?; self.eventid_shorteventid .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; self.shorteventid_eventid @@ -82,7 +81,6 @@ impl Service<_> { &self, event_type: &StateEventType, state_key: &str, - globals: &super::globals::Globals, ) -> Result { if let Some(short) = self .statekeyshort_cache @@ -101,7 +99,7 @@ impl Service<_> { Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, None => { - let shortstatekey = globals.next_count()?; + let shortstatekey = services().globals.next_count()?; self.statekey_shortstatekey .insert(&statekey, &shortstatekey.to_be_bytes())?; self.shortstatekey_statekey @@ -190,7 +188,7 @@ impl Service<_> { /// Returns (shortstatehash, already_existed) fn get_or_create_shortstatehash( &self, - state_hash: &StateHashId, + state_hash: &[u8], ) -> Result<(u64, bool)> { Ok(match self.statehash_shortstatehash.get(state_hash)? { Some(shortstatehash) => ( @@ -199,7 +197,7 @@ impl Service<_> { true, ), None => { - let shortstatehash = globals.next_count()?; + let shortstatehash = services().globals.next_count()?; self.statehash_shortstatehash .insert(state_hash, &shortstatehash.to_be_bytes())?; (shortstatehash, false) @@ -220,13 +218,12 @@ impl Service<_> { pub fn get_or_create_shortroomid( &self, room_id: &RoomId, - globals: &super::globals::Globals, ) -> Result { Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { Some(short) => utils::u64_from_bytes(&short) .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, None => { - let short = globals.next_count()?; + let short = services().globals.next_count()?; self.roomid_shortroomid .insert(room_id.as_bytes(), &short.to_be_bytes())?; short diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index ac8fac2..fd0de28 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,30 +1,28 @@ use std::sync::Arc; use std::{sync::MutexGuard, collections::HashSet}; use std::fmt::Debug; - +use crate::Result; use ruma::{EventId, RoomId}; pub trait Data { /// Returns the last state hash key added to the db for the given room. - fn get_room_shortstatehash(room_id: &RoomId); + fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; /// Update the current state of the room. - fn set_room_state(room_id: &RoomId, new_shortstatehash: u64, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ); + fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()>; /// Associates a state with an event. - fn set_event_state(shorteventid: u64, shortstatehash: u64) -> Result<()>; + fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()>; /// Returns all events we would send as the prev_events of the next event. - fn get_forward_extremities(room_id: &RoomId) -> Result>>; + fn get_forward_extremities(&self, room_id: &RoomId) -> Result>>; /// Replace the forward extremities of the room. - fn set_forward_extremities( + fn set_forward_extremities<'a>(&self, room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + event_ids: impl IntoIterator + Debug, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } - -pub struct StateLock; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 6c33d52..e6b5ce2 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -6,13 +6,15 @@ use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEv use serde::Deserialize; use tracing::warn; -use crate::{service::*, SERVICE, PduEvent, Error, utils::calculate_hash}; +use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; + +use super::state_compressor::CompressedStateEvent; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Set the room to the given statehash and update caches. pub fn force_state( &self, @@ -23,11 +25,11 @@ impl Service<_> { ) -> Result<()> { for event_id in statediffnew.into_iter().filter_map(|new| { - SERVICE.rooms.state_compressor.parse_compressed_state_event(new) + services().rooms.state_compressor.parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { - let pdu = match SERVICE.rooms.timeline.get_pdu_json(&event_id)? { + let pdu = match services().rooms.timeline.get_pdu_json(&event_id)? { Some(pdu) => pdu, None => continue, }; @@ -63,10 +65,10 @@ impl Service<_> { Err(_) => continue, }; - SERVICE.room.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; + services().room.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; } - SERVICE.room.state_cache.update_joined_count(room_id)?; + services().room.state_cache.update_joined_count(room_id)?; self.db.set_room_state(room_id, shortstatehash); @@ -84,7 +86,7 @@ impl Service<_> { room_id: &RoomId, state_ids_compressed: HashSet, ) -> Result<()> { - let shorteventid = SERVICE.short.get_or_create_shorteventid(event_id)?; + let shorteventid = services().short.get_or_create_shorteventid(event_id)?; let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; @@ -96,11 +98,11 @@ impl Service<_> { ); let (shortstatehash, already_existed) = - SERVICE.short.get_or_create_shortstatehash(&state_hash)?; + services().short.get_or_create_shortstatehash(&state_hash)?; if !already_existed { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| SERVICE.room.state_compressor.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| services().room.state_compressor.load_shortstatehash_info(p))?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -119,7 +121,7 @@ impl Service<_> { } else { (state_ids_compressed, HashSet::new()) }; - SERVICE.room.state_compressor.save_state_from_diff( + services().room.state_compressor.save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -176,7 +178,7 @@ impl Service<_> { } // TODO: statehash with deterministic inputs - let shortstatehash = SERVICE.globals.next_count()?; + let shortstatehash = services().globals.next_count()?; let mut statediffnew = HashSet::new(); statediffnew.insert(new); @@ -273,4 +275,8 @@ impl Service<_> { .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; Ok(room_version) } + + pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.db.get_room_shortstatehash(room_id) + } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index bf2972f..48031e4 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,9 +1,11 @@ -use std::{sync::Arc, collections::HashMap}; +use std::{sync::Arc, collections::{HashMap, BTreeMap}}; +use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; -use crate::PduEvent; +use crate::{Result, PduEvent}; +#[async_trait] pub trait Data { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 92e5c8e..5d6886d 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -4,13 +4,13 @@ use std::{sync::Arc, collections::{HashMap, BTreeMap}}; pub use data::Data; use ruma::{events::StateEventType, RoomId, EventId}; -use crate::{service::*, PduEvent}; +use crate::{Result, PduEvent}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index f651919..b45b2ea 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,5 +1,9 @@ -use ruma::{UserId, RoomId}; +use ruma::{UserId, RoomId, serde::Raw, events::AnyStrippedStateEvent}; +use crate::Result; pub trait Data { - fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()>; + fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index d29501a..c3b4eb9 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -5,13 +5,13 @@ pub use data::Data; use regex::Regex; use ruma::{RoomId, UserId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, tag::TagEvent, RoomAccountDataEventType, GlobalAccountDataEventType, direct::DirectEvent, ignored_user_list::IgnoredUserListEvent, AnySyncStateEvent}, serde::Raw, ServerName}; -use crate::{service::*, SERVICE, utils, Error}; +use crate::{Result, services, utils, Error}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Update current membership data. #[tracing::instrument(skip(self, last_state))] pub fn update_membership( @@ -24,8 +24,8 @@ impl Service<_> { update_joined_count: bool, ) -> Result<()> { // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != SERVICE.globals.server_name() { - SERVICE.users.create(user_id, None)?; + if user_id.server_name() != services().globals.server_name() { + services().users.create(user_id, None)?; // TODO: displayname, avatar url } @@ -37,10 +37,6 @@ impl Service<_> { serverroom_id.push(0xff); serverroom_id.extend_from_slice(room_id.as_bytes()); - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - match &membership { MembershipState::Join => { // Check if the user never joined this room @@ -80,24 +76,23 @@ impl Service<_> { // .ok(); // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( + if let Some(tag_event) = services().account_data.get::( Some(&predecessor.room_id), user_id, RoomAccountDataEventType::Tag, )? { - SERVICE.account_data + services().account_data .update( Some(room_id), user_id, RoomAccountDataEventType::Tag, &tag_event, - &db.globals, ) .ok(); }; // Copy direct chat flag - if let Some(mut direct_event) = SERVICE.account_data.get::( + if let Some(mut direct_event) = services().account_data.get::( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), @@ -112,7 +107,7 @@ impl Service<_> { } if room_ids_updated { - SERVICE.account_data.update( + services().account_data.update( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), @@ -123,16 +118,11 @@ impl Service<_> { } } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; + self.db.mark_as_joined(user_id, room_id)?; } MembershipState::Invite => { // We want to know if the sender is ignored by the receiver - let is_ignored = SERVICE + let is_ignored = services() .account_data .get::( None, // Ignored users are in global account data @@ -153,41 +143,22 @@ impl Service<_> { return Ok(()); } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; + self.db.mark_as_invited(user_id, room_id, last_state)?; } MembershipState::Leave | MembershipState::Ban => { - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; + self.db.mark_as_left(user_id, room_id)?; } _ => {} } if update_joined_count { - self.update_joined_count(room_id, db)?; + self.update_joined_count(room_id)?; } Ok(()) } - #[tracing::instrument(skip(self, room_id, db))] + #[tracing::instrument(skip(self, room_id))] pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { let mut joinedcount = 0_u64; let mut invitedcount = 0_u64; @@ -196,8 +167,8 @@ impl Service<_> { for joined in self.room_members(room_id).filter_map(|r| r.ok()) { joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) + if joined.server_name() == services().globals.server_name() + && !services().users.is_deactivated(&joined).unwrap_or(true) { real_users.insert(joined); } @@ -285,7 +256,7 @@ impl Service<_> { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, SERVICE.globals.server_name()).ok() + UserId::parse_with_server_name(string, services().globals.server_name()).ok() }); let in_room = bridge_user_id diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index 74a28e7..1768936 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,4 +1,5 @@ -use crate::service::rooms::CompressedStateEvent; +use super::CompressedStateEvent; +use crate::Result; pub struct StateDiff { parent: Option, @@ -7,6 +8,6 @@ pub struct StateDiff { } pub trait Data { - fn get_statediff(shortstatehash: u64) -> Result; - fn save_statediff(shortstatehash: u64, diff: StateDiff) -> Result<()>; + fn get_statediff(&self, shortstatehash: u64) -> Result; + fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()>; } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 3aea4fe..619e4cf 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -4,7 +4,7 @@ use std::{mem::size_of, sync::Arc, collections::HashSet}; pub use data::Data; use ruma::{EventId, RoomId}; -use crate::{service::*, utils}; +use crate::{Result, utils, services}; use self::data::StateDiff; @@ -12,7 +12,9 @@ pub struct Service { db: D, } -impl Service<_> { +pub type CompressedStateEvent = [u8; 2 * size_of::()]; + +impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( @@ -62,12 +64,11 @@ impl Service<_> { &self, shortstatekey: u64, event_id: &EventId, - globals: &super::globals::Globals, ) -> Result { let mut v = shortstatekey.to_be_bytes().to_vec(); v.extend_from_slice( &self - .get_or_create_shorteventid(event_id, globals)? + .get_or_create_shorteventid(event_id)? .to_be_bytes(), ); Ok(v.try_into().expect("we checked the size above")) @@ -210,15 +211,16 @@ impl Service<_> { /// Returns the new shortstatehash pub fn save_state( + &self, room_id: &RoomId, new_state_ids_compressed: HashSet, ) -> Result<(u64, HashSet, // added HashSet)> // removed { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; + let previous_shortstatehash = self.db.current_shortstatehash(room_id)?; - let state_hash = self.calculate_hash( + let state_hash = utils::calculate_hash( &new_state_ids_compressed .iter() .map(|bytes| &bytes[..]) @@ -226,7 +228,7 @@ impl Service<_> { ); let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; + services().rooms.short.get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { return Ok(()); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index bf6d8c5..85bedc6 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; -use crate::PduEvent; +use crate::{Result, PduEvent}; pub trait Data { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; @@ -48,28 +48,26 @@ pub trait Data { /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. - #[tracing::instrument(skip(self))] fn pdus_since<'a>( &'a self, user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>> + 'a>; + ) -> Result, PduEvent)>>>>; /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>> + 'a>; + ) -> Result, PduEvent)>>>>; fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>> + 'a>; + ) -> Result, PduEvent)>>>>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 7b60fe5..09f66dd 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,23 +1,29 @@ mod data; +use std::borrow::Cow; +use std::sync::Arc; use std::{sync::MutexGuard, iter, collections::HashSet}; use std::fmt::Debug; pub use data::Data; use regex::Regex; +use ruma::events::room::power_levels::RoomPowerLevelsEventContent; +use ruma::push::Ruleset; use ruma::signatures::CanonicalJsonValue; +use ruma::state_res::RoomVersion; use ruma::{EventId, signatures::CanonicalJsonObject, push::{Action, Tweak}, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType, RoomEventType, room::{member::MembershipState, create::RoomCreateEventContent}, StateEventType}, UserId, RoomAliasId, RoomId, uint, state_res, api::client::error::ErrorKind, serde::to_canonical_value, ServerName}; use serde::Deserialize; use serde_json::value::to_raw_value; use tracing::{warn, error}; -use crate::SERVICE; -use crate::{service::{*, pdu::{PduBuilder, EventHash}}, Error, PduEvent, utils}; +use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduEvent, utils}; + +use super::state_compressor::CompressedStateEvent; pub struct Service { db: D, } -impl Service<_> { +impl Service { /* /// Checks if a room exists. #[tracing::instrument(skip(self))] @@ -44,7 +50,7 @@ impl Service<_> { #[tracing::instrument(skip(self))] pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - self.db.last_timeline_count(sender_user: &UserId, room_id: &RoomId) + self.db.last_timeline_count(sender_user, room_id) } // TODO Is this the same as the function above? @@ -127,7 +133,7 @@ impl Service<_> { /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - self.db.pdu_count(pdu_id, pdu: &PduEvent) + self.db.replace_pdu(pdu_id, pdu) } /// Creates a new persisted data unit and adds it to a room. @@ -177,7 +183,7 @@ impl Service<_> { self.replace_pdu_leaves(&pdu.room_id, leaves)?; let mutex_insert = Arc::clone( - db.globals + services().globals .roomid_mutex_insert .write() .unwrap() @@ -186,14 +192,14 @@ impl Service<_> { ); let insert_lock = mutex_insert.lock().unwrap(); - let count1 = db.globals.next_count()?; + let count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending // fails self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; + .private_read_set(&pdu.room_id, &pdu.sender, count1)?; self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - let count2 = db.globals.next_count()?; + let count2 = services().globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); pdu_id.extend_from_slice(&count2.to_be_bytes()); @@ -218,7 +224,7 @@ impl Service<_> { drop(insert_lock); // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db + let power_levels: RoomPowerLevelsEventContent = services() .rooms .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { @@ -233,13 +239,13 @@ impl Service<_> { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { + for user in self.get_our_real_users(&pdu.room_id)?.iter() { // Don't notify the user of their own events if user == &pdu.sender { continue; } - let rules_for_user = db + let rules_for_user = services() .account_data .get( None, @@ -252,7 +258,7 @@ impl Service<_> { let mut highlight = false; let mut notify = false; - for action in pusher::get_actions( + for action in services().pusher.get_actions( user, &rules_for_user, &power_levels, @@ -282,8 +288,8 @@ impl Service<_> { highlights.push(userroom_id); } - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; + for senderkey in services().pusher.get_pusher_senderkeys(user) { + services().sending.send_push_pdu(&*pdu_id, senderkey)?; } } @@ -328,7 +334,6 @@ impl Service<_> { content.membership, &pdu.sender, invite_state, - db, true, )?; } @@ -344,34 +349,34 @@ impl Service<_> { .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if let Some(body) = content.body { - DB.rooms.search.index_pdu(room_id, pdu_id, body)?; + services().rooms.search.index_pdu(shortroomid, pdu_id, body)?; - let admin_room = self.id_from_alias( + let admin_room = self.alias.resolve_local_alias( <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), + format!("#admins:{}", services().globals.server_name()).as_str(), ) .expect("#admins:server_name is a valid room alias"), )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); + let server_user = format!("@conduit:{}", services().globals.server_name()); let to_conduit = body.starts_with(&format!("{}: ", server_user)); // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); + pdu.sender == server_user && services().globals.emergency_password().is_none(); if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); + services().admin.process_message(body.to_string()); } } } _ => {} } - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + for appservice in services().appservice.all()? { + if self.appservice_in_room(&pdu.room_id, &appservice)? { + services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } @@ -388,11 +393,11 @@ impl Service<_> { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() + UserId::parse_with_server_name(string, services().globals.server_name()).ok() }) { if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } } @@ -431,16 +436,16 @@ impl Service<_> { .map_or(false, |state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) + self.room_aliases(&pdu.room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) || users.iter().any(matching_users) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } } } @@ -464,14 +469,14 @@ impl Service<_> { redacts, } = pdu_builder; - let prev_events: Vec<_> = SERVICE + let prev_events: Vec<_> = services() .rooms .get_pdu_leaves(room_id)? .into_iter() .take(20) .collect(); - let create_event = SERVICE + let create_event = services() .rooms .room_state_get(room_id, &StateEventType::RoomCreate, "")?; @@ -488,7 +493,7 @@ impl Service<_> { // If there was no create event yet, assume we are creating a room with the default // version right now let room_version_id = create_event_content - .map_or(SERVICE.globals.default_room_version(), |create_event| { + .map_or(services().globals.default_room_version(), |create_event| { create_event.room_version }); let room_version = @@ -500,7 +505,7 @@ impl Service<_> { // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) + .filter_map(|event_id| Some(services().rooms.get_pdu(event_id).ok()??.depth)) .max() .unwrap_or_else(|| uint!(0)) + uint!(1); @@ -525,7 +530,7 @@ impl Service<_> { let pdu = PduEvent { event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), - sender: sender_user.to_owned(), + sender: sender.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), @@ -577,13 +582,13 @@ impl Service<_> { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(db.globals.server_name()) + to_canonical_value(services().globals.server_name()) .expect("server name is a valid CanonicalJsonValue"), ); match ruma::signatures::hash_and_sign_event( - SERVICE.globals.server_name().as_str(), - SERVICE.globals.keypair(), + services().globals.server_name().as_str(), + services().globals.keypair(), &mut pdu_json, &room_version_id, ) { @@ -616,22 +621,20 @@ impl Service<_> { ); // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; + let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id)?; } /// Creates a new persisted data unit and adds it to a room. This function takes a /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, _mutex_lock))] + #[tracing::instrument(skip(self, state_lock))] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - - let (pdu, pdu_json) = self.create_hash_and_sign_event()?; - + let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock); // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. @@ -664,9 +667,9 @@ impl Service<_> { } // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(SERVICE.globals.server_name()); + servers.remove(services().globals.server_name()); - SERVICE.sending.send_pdu(servers.into_iter(), &pdu_id)?; + services().sending.send_pdu(servers.into_iter(), &pdu_id)?; Ok(pdu.event_id) } @@ -684,20 +687,20 @@ impl Service<_> { ) -> Result>> { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - SERVICE.rooms.set_event_state( + services().rooms.set_event_state( &pdu.event_id, &pdu.room_id, state_ids_compressed, )?; if soft_fail { - SERVICE.rooms + services().rooms .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - SERVICE.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + services().rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; return Ok(None); } - let pdu_id = SERVICE.rooms.append_pdu(pdu, pdu_json, new_room_leaves)?; + let pdu_id = services().rooms.append_pdu(pdu, pdu_json, new_room_leaves)?; Ok(Some(pdu_id)) } diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 47a44ee..a5657bc 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,3 +1,6 @@ +use ruma::{UserId, RoomId}; +use crate::Result; + pub trait Data { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; @@ -17,5 +20,5 @@ pub trait Data { fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>> + 'a>; + ) -> Result>>>>; } diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 664f8a0..729887c 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -2,13 +2,13 @@ mod data; pub use data::Data; use ruma::{RoomId, UserId}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { self.db.reset_notification_counts(user_id, room_id) } @@ -27,7 +27,7 @@ impl Service<_> { token: u64, shortstatehash: u64, ) -> Result<()> { - self.db.associate_token_shortstatehash(user_id, room_id) + self.db.associate_token_shortstatehash(room_id, token, shortstatehash) } pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { diff --git a/src/service/sending.rs b/src/service/sending/mod.rs similarity index 88% rename from src/service/sending.rs rename to src/service/sending/mod.rs index 4c830d6..8ab557f 100644 --- a/src/service/sending.rs +++ b/src/service/sending/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use crate::{ - appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, + utils, Error, PduEvent, Result, services, api::{server_server, appservice_server}, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -34,8 +34,6 @@ use tokio::{ }; use tracing::{error, warn}; -use super::abstraction::Tree; - #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(String), @@ -77,11 +75,8 @@ pub enum SendingEventType { Edu(Vec), } -pub struct Sending { +pub struct Service { /// The state for a given state hash. - pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync - pub(super) servernameevent_data: Arc, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content - pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content pub(super) maximum_requests: Arc, pub sender: mpsc::UnboundedSender<(Vec, Vec)>, } @@ -92,10 +87,9 @@ enum TransactionStatus { Retrying(u32), // number of times failed } -impl Sending { +impl Service { pub fn start_handler( &self, - db: Arc>, mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>, ) { tokio::spawn(async move { @@ -106,9 +100,7 @@ impl Sending { // Retry requests we could not finish yet let mut initial_transactions = HashMap::>::new(); - let guard = db.read().await; - - for (key, outgoing_kind, event) in guard + for (key, outgoing_kind, event) in services() .sending .servercurrentevent_data .iter() @@ -127,22 +119,19 @@ impl Sending { "Dropping some current events: {:?} {:?} {:?}", key, outgoing_kind, event ); - guard.sending.servercurrentevent_data.remove(&key).unwrap(); + services().sending.servercurrentevent_data.remove(&key).unwrap(); continue; } entry.push(event); } - drop(guard); - for (outgoing_kind, events) in initial_transactions { current_transaction_status .insert(outgoing_kind.get_prefix(), TransactionStatus::Running); futures.push(Self::handle_events( outgoing_kind.clone(), events, - Arc::clone(&db), )); } @@ -151,17 +140,15 @@ impl Sending { Some(response) = futures.next() => { match response { Ok(outgoing_kind) => { - let guard = db.read().await; - let prefix = outgoing_kind.get_prefix(); - for (key, _) in guard.sending.servercurrentevent_data + for (key, _) in services().sending.servercurrentevent_data .scan_prefix(prefix.clone()) { - guard.sending.servercurrentevent_data.remove(&key).unwrap(); + services().sending.servercurrentevent_data.remove(&key).unwrap(); } // Find events that have been added since starting the last request - let new_events: Vec<_> = guard.sending.servernameevent_data + let new_events: Vec<_> = services().sending.servernameevent_data .scan_prefix(prefix.clone()) .filter_map(|(k, v)| { Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) @@ -175,17 +162,14 @@ impl Sending { // Insert pdus we found for (e, key) in &new_events { let value = if let SendingEventType::Edu(value) = &e.1 { &**value } else { &[] }; - guard.sending.servercurrentevent_data.insert(key, value).unwrap(); - guard.sending.servernameevent_data.remove(key).unwrap(); + services().sending.servercurrentevent_data.insert(key, value).unwrap(); + services().sending.servernameevent_data.remove(key).unwrap(); } - drop(guard); - futures.push( Self::handle_events( outgoing_kind.clone(), new_events.into_iter().map(|(event, _)| event.1).collect(), - Arc::clone(&db), ) ); } else { @@ -206,15 +190,12 @@ impl Sending { }, Some((key, value)) = receiver.recv() => { if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key, value) { - let guard = db.read().await; - if let Ok(Some(events)) = Self::select_events( &outgoing_kind, vec![(event, key)], &mut current_transaction_status, - &guard ) { - futures.push(Self::handle_events(outgoing_kind, events, Arc::clone(&db))); + futures.push(Self::handle_events(outgoing_kind, events)); } } } @@ -223,12 +204,11 @@ impl Sending { }); } - #[tracing::instrument(skip(outgoing_kind, new_events, current_transaction_status, db))] + #[tracing::instrument(skip(outgoing_kind, new_events, current_transaction_status))] fn select_events( outgoing_kind: &OutgoingKind, new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key current_transaction_status: &mut HashMap, TransactionStatus>, - db: &Database, ) -> Result>> { let mut retry = false; let mut allow = true; @@ -266,7 +246,7 @@ impl Sending { if retry { // We retry the previous transaction - for (key, value) in db.sending.servercurrentevent_data.scan_prefix(prefix) { + for (key, value) in services().sending.servercurrentevent_data.scan_prefix(prefix) { if let Ok((_, e)) = Self::parse_servercurrentevent(&key, value) { events.push(e); } @@ -278,22 +258,22 @@ impl Sending { } else { &[][..] }; - db.sending + services().sending .servercurrentevent_data .insert(&full_key, value)?; // If it was a PDU we have to unqueue it // TODO: don't try to unqueue EDUs - db.sending.servernameevent_data.remove(&full_key)?; + services().sending.servernameevent_data.remove(&full_key)?; events.push(e); } if let OutgoingKind::Normal(server_name) = outgoing_kind { - if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) { + if let Ok((select_edus, last_count)) = Self::select_edus(server_name) { events.extend(select_edus.into_iter().map(SendingEventType::Edu)); - db.sending + services().sending .servername_educount .insert(server_name.as_bytes(), &last_count.to_be_bytes())?; } @@ -303,10 +283,10 @@ impl Sending { Ok(Some(events)) } - #[tracing::instrument(skip(db, server))] - pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec>, u64)> { + #[tracing::instrument(skip(server))] + pub fn select_edus(server: &ServerName) -> Result<(Vec>, u64)> { // u64: count of last edu - let since = db + let since = services() .sending .servername_educount .get(server.as_bytes())? @@ -318,25 +298,25 @@ impl Sending { let mut max_edu_count = since; let mut device_list_changes = HashSet::new(); - 'outer: for room_id in db.rooms.server_rooms(server) { + 'outer: for room_id in services().rooms.server_rooms(server) { let room_id = room_id?; // Look for device list updates in this room device_list_changes.extend( - db.users + services().users .keys_changed(&room_id.to_string(), since, None) .filter_map(|r| r.ok()) - .filter(|user_id| user_id.server_name() == db.globals.server_name()), + .filter(|user_id| user_id.server_name() == services().globals.server_name()), ); // Look for read receipts in this room - for r in db.rooms.edus.readreceipts_since(&room_id, since) { + for r in services().rooms.edus.readreceipts_since(&room_id, since) { let (user_id, count, read_receipt) = r?; if count > max_edu_count { max_edu_count = count; } - if user_id.server_name() != db.globals.server_name() { + if user_id.server_name() != services().globals.server_name() { continue; } @@ -496,14 +476,11 @@ impl Sending { Ok(()) } - #[tracing::instrument(skip(db, events, kind))] + #[tracing::instrument(skip(events, kind))] async fn handle_events( kind: OutgoingKind, events: Vec, - db: Arc>, ) -> Result { - let db = db.read().await; - match &kind { OutgoingKind::Appservice(id) => { let mut pdu_jsons = Vec::new(); @@ -511,7 +488,7 @@ impl Sending { for event in &events { match event { SendingEventType::Pdu(pdu_id) => { - pdu_jsons.push(db.rooms + pdu_jsons.push(services().rooms .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -530,11 +507,10 @@ impl Sending { } } - let permit = db.sending.maximum_requests.acquire().await; + let permit = services().sending.maximum_requests.acquire().await; let response = appservice_server::send_request( - &db.globals, - db.appservice + services().appservice .get_registration(&id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -576,7 +552,7 @@ impl Sending { match event { SendingEventType::Pdu(pdu_id) => { pdus.push( - db.rooms + services().rooms .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -624,7 +600,7 @@ impl Sending { senderkey.push(0xff); senderkey.extend_from_slice(pushkey); - let pusher = match db + let pusher = match services() .pusher .get_pusher(&senderkey) .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? @@ -633,7 +609,7 @@ impl Sending { None => continue, }; - let rules_for_user = db + let rules_for_user = services() .account_data .get( None, @@ -644,22 +620,21 @@ impl Sending { .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| push::Ruleset::server_default(&userid)); - let unread: UInt = db + let unread: UInt = services() .rooms .notification_count(&userid, &pdu.room_id) .map_err(|e| (kind.clone(), e))? .try_into() .expect("notifiation count can't go that high"); - let permit = db.sending.maximum_requests.acquire().await; + let permit = services().sending.maximum_requests.acquire().await; - let _response = pusher::send_push_notice( + let _response = services().pusher.send_push_notice( &userid, unread, &pusher, rules_for_user, &pdu, - &db, ) .await .map(|_response| kind.clone()) @@ -678,7 +653,7 @@ impl Sending { SendingEventType::Pdu(pdu_id) => { // TODO: check room version and remove event_id if needed let raw = PduEvent::convert_to_outgoing_federation_event( - db.rooms + services().rooms .get_pdu_json_from_id(pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { @@ -700,13 +675,12 @@ impl Sending { } } - let permit = db.sending.maximum_requests.acquire().await; + let permit = services().sending.maximum_requests.acquire().await; let response = server_server::send_request( - &db.globals, &*server, send_transaction_message::v1::Request { - origin: db.globals.server_name(), + origin: services().globals.server_name(), pdus: &pdu_jsons, edus: &edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), @@ -809,10 +783,9 @@ impl Sending { }) } - #[tracing::instrument(skip(self, globals, destination, request))] + #[tracing::instrument(skip(self, destination, request))] pub async fn send_federation_request( &self, - globals: &crate::database::globals::Globals, destination: &ServerName, request: T, ) -> Result @@ -820,16 +793,15 @@ impl Sending { T: Debug, { let permit = self.maximum_requests.acquire().await; - let response = server_server::send_request(globals, destination, request).await; + let response = server_server::send_request(destination, request).await; drop(permit); response } - #[tracing::instrument(skip(self, globals, registration, request))] + #[tracing::instrument(skip(self, registration, request))] pub async fn send_appservice_request( &self, - globals: &crate::database::globals::Globals, registration: serde_yaml::Value, request: T, ) -> Result @@ -837,7 +809,7 @@ impl Sending { T: Debug, { let permit = self.maximum_requests.acquire().await; - let response = appservice_server::send_request(globals, registration, request).await; + let response = appservice_server::send_request(registration, request).await; drop(permit); response diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs index c1b4715..6e71dd4 100644 --- a/src/service/transaction_ids/data.rs +++ b/src/service/transaction_ids/data.rs @@ -1,3 +1,6 @@ +use ruma::{DeviceId, UserId, TransactionId}; +use crate::Result; + pub trait Data { fn add_txnid( &self, diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 9b76e13..ea92372 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,14 +1,14 @@ mod data; pub use data::Data; -use ruma::{UserId, DeviceId, TransactionId}; -use crate::service::*; +use ruma::{UserId, DeviceId, TransactionId}; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { pub fn add_txnid( &self, user_id: &UserId, diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index cc943bf..d7fa79d 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,4 +1,5 @@ use ruma::{api::client::uiaa::UiaaInfo, DeviceId, UserId, signatures::CanonicalJsonValue}; +use crate::Result; pub trait Data { fn set_uiaa_request( diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 5e1df8f..ffdbf35 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,15 +1,16 @@ mod data; pub use data::Data; -use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; + +use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType, IncomingUserIdentifier}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; use tracing::error; -use crate::{service::*, utils, Error, SERVICE}; +use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Creates a new Uiaa session. Make sure the session token is unique. pub fn create( &self, @@ -56,7 +57,7 @@ impl Service<_> { .. }) => { let username = match identifier { - UserIdOrLocalpart(username) => username, + IncomingUserIdentifier::UserIdOrLocalpart(username) => username, _ => { return Err(Error::BadRequest( ErrorKind::Unrecognized, @@ -66,13 +67,13 @@ impl Service<_> { }; let user_id = - UserId::parse_with_server_name(username.clone(), SERVICE.globals.server_name()) + UserId::parse_with_server_name(username.clone(), services().globals.server_name()) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") })?; // Check if password is correct - if let Some(hash) = SERVICE.users.password_hash(&user_id)? { + if let Some(hash) = services().users.password_hash(&user_id)? { let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 327e0c6..3f87589 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,8 +1,8 @@ use std::collections::BTreeMap; - +use crate::Result; use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri}; -trait Data { +pub trait Data { /// Check if a user has an account on this homeserver. fn exists(&self, user_id: &UserId) -> Result; @@ -16,7 +16,7 @@ trait Data { fn find_from_token(&self, token: &str) -> Result, String)>>; /// Returns an iterator over all users on this homeserver. - fn iter(&self) -> impl Iterator>> + '_; + fn iter(&self) -> Box>>>; /// Returns a list of local users as list of usernames. /// @@ -69,7 +69,7 @@ trait Data { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a; + ) -> Box>>>; /// Replaces the access token of one device. fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; @@ -125,7 +125,7 @@ trait Data { user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator>> + 'a; + ) -> Box>>>; fn mark_device_key_update( &self, @@ -193,7 +193,7 @@ trait Data { fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator> + 'a; + ) -> Box>>; /// Creates a new sync filter. Returns the filter id. fn create_filter( diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index bfa4b8e..dfe6c7f 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -2,15 +2,15 @@ mod data; use std::{collections::BTreeMap, mem}; pub use data::Data; -use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}}; +use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition, error::ErrorKind}, RoomAliasId}; -use crate::{service::*, Error}; +use crate::{Result, Error, services}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Check if a user has an account on this homeserver. pub fn exists(&self, user_id: &UserId) -> Result { self.db.exists(user_id) @@ -22,19 +22,19 @@ impl Service<_> { } /// Check if a user is an admin - fn is_admin( + pub fn is_admin( &self, user_id: &UserId, ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) + let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); + let admin_room_id = services().rooms.alias.resolve_local_alias(&admin_room_alias_id)?.unwrap(); - rooms.is_joined(user_id, &admin_room_id) + services().rooms.state_cache.is_joined(user_id, &admin_room_id) } /// Create a new user account on this homeserver. - fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { self.db.set_password(user_id, password)?; Ok(()) } From 8708cd3b633d88d260982563f2e2826bc8b12038 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 09:34:25 +0200 Subject: [PATCH 1212/1727] 431 errors left --- src/api/client_server/media.rs | 2 +- src/api/client_server/membership.rs | 61 ++- src/api/client_server/message.rs | 19 +- src/api/client_server/presence.rs | 7 +- src/api/client_server/profile.rs | 11 +- src/api/client_server/read_marker.rs | 17 +- src/api/client_server/redact.rs | 2 +- src/api/client_server/report.rs | 2 +- src/api/client_server/room.rs | 53 +-- src/api/client_server/search.rs | 5 +- src/api/client_server/state.rs | 18 +- src/api/client_server/sync.rs | 79 ++-- src/api/client_server/typing.rs | 6 +- src/api/client_server/user_directory.rs | 6 +- src/api/client_server/voip.rs | 2 +- src/api/server_server.rs | 379 +++---------------- src/database/key_value/appservice.rs | 6 +- src/database/key_value/rooms/directory.rs | 4 +- src/database/mod.rs | 2 +- src/lib.rs | 4 +- src/service/account_data/mod.rs | 110 +----- src/service/admin/mod.rs | 52 +-- src/service/appservice/data.rs | 2 +- src/service/globals/mod.rs | 3 +- src/service/key_backups/mod.rs | 280 +------------- src/service/media/mod.rs | 8 +- src/service/pusher/mod.rs | 2 + src/service/rooms/auth_chain/mod.rs | 2 +- src/service/rooms/edus/mod.rs | 6 +- src/service/rooms/event_handler/mod.rs | 441 +++++++++++++++++----- src/service/rooms/state/mod.rs | 10 +- src/service/rooms/timeline/mod.rs | 12 +- 32 files changed, 640 insertions(+), 973 deletions(-) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index d6e8213..316e284 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -196,7 +196,7 @@ pub async fn get_content_thumbnail_route( .upload_thumbnail( mxc, &None, - &get_thumbnail_response.content_type, + &get_thumbnail_response.content_type.as_deref(), body.width.try_into().expect("all UInts are valid u32s"), body.height.try_into().expect("all UInts are valid u32s"), &get_thumbnail_response.file, diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index d6f820a..98931f2 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -481,7 +481,7 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) if services().rooms.is_supported_version(&room_version) => room_version, + Some(room_version) if services().rooms.metadata.is_supported_version(&room_version) => room_version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -591,7 +591,7 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") })?; - services().rooms.add_pdu_outlier(&event_id, &value)?; + services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { let shortstatekey = services().rooms.short.get_or_create_shortstatekey( &pdu.kind.to_string().into(), @@ -621,14 +621,6 @@ async fn join_room_by_id_helper( return Err(Error::BadServerResponse("State contained no create event.")); } - services().rooms.state.force_state( - room_id, - state - .into_iter() - .map(|(k, id)| services().rooms.compress_state_event(k, &id)) - .collect::>()?, - )?; - for result in send_join_response .room_state .auth_chain @@ -640,14 +632,21 @@ async fn join_room_by_id_helper( Err(_) => continue, }; - services().rooms.add_pdu_outlier(&event_id, &value)?; + services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; } + let shortstatehash = services().rooms.state.set_event_state( + event_id, + room_id, + state + .into_iter() + .map(|(k, id)| services().rooms.state_compressor.compress_state_event(k, &id)) + .collect::>()?, + )?; + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = services().rooms.append_to_state(&parsed_pdu)?; - - services().rooms.append_pdu( + services().rooms.timeline.append_pdu( &parsed_pdu, join_event, iter::once(&*parsed_pdu.event_id), @@ -655,7 +654,9 @@ async fn join_room_by_id_helper( // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - services().rooms.set_room_state(room_id, statehashid)?; + services().rooms.state.set_room_state(room_id, shortstatehash)?; + + let statehashid = services().rooms.state.append_to_state(&parsed_pdu)?; } else { let event = RoomMemberEventContent { membership: MembershipState::Join, @@ -668,7 +669,7 @@ async fn join_room_by_id_helper( join_authorized_via_users_server: None, }; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -678,7 +679,6 @@ async fn join_room_by_id_helper( }, sender_user, room_id, - services(), &state_lock, )?; } @@ -786,7 +786,7 @@ pub(crate) async fn invite_helper<'a>( unsigned: None, state_key: Some(user_id.to_string()), redacts: None, - }, sender_user, room_id, &state_lock); + }, sender_user, room_id, &state_lock)?; let invite_room_state = services().rooms.state.calculate_invite_state(&pdu)?; @@ -811,7 +811,7 @@ pub(crate) async fn invite_helper<'a>( create_invite::v2::Request { room_id, event_id: expected_event_id, - room_version: &services().state.get_room_version(&room_id)?, + room_version: &services().rooms.state.get_room_version(&room_id)?, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, }, @@ -846,7 +846,7 @@ pub(crate) async fn invite_helper<'a>( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id = services().rooms.event_handler.handle_incoming_pdu( + let pdu_id: Vec = services().rooms.event_handler.handle_incoming_pdu( &origin, &event_id, room_id, @@ -854,13 +854,7 @@ pub(crate) async fn invite_helper<'a>( true, &pub_key_map, ) - .await - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? + .await? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Could not accept incoming PDU as timeline event.", @@ -868,6 +862,7 @@ pub(crate) async fn invite_helper<'a>( let servers = services() .rooms + .state_cache .room_servers(room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != services().globals.server_name()); @@ -877,7 +872,7 @@ pub(crate) async fn invite_helper<'a>( return Ok(()); } - if !services().rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -894,7 +889,7 @@ pub(crate) async fn invite_helper<'a>( ); let state_lock = mutex_state.lock().await; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -926,8 +921,9 @@ pub(crate) async fn invite_helper<'a>( pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { let all_rooms = services() .rooms + .state_cache .rooms_joined(user_id) - .chain(services().rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .chain(services().rooms.state_cache.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) .collect::>(); for room_id in all_rooms { @@ -955,7 +951,7 @@ pub async fn leave_room( let last_state = services().rooms.state_cache .invite_state(user_id, room_id)? - .map_or_else(|| services().rooms.left_state(user_id, room_id), |s| Ok(Some(s)))?; + .map_or_else(|| services().rooms.state_cache.left_state(user_id, room_id), |s| Ok(Some(s)))?; // We always drop the invite, we can't rely on other servers services().rooms.state_cache.update_membership( @@ -978,7 +974,7 @@ pub async fn leave_room( let state_lock = mutex_state.lock().await; let mut event: RoomMemberEventContent = serde_json::from_str( - services().rooms.state.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + services().rooms.state_accessor.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -1017,6 +1013,7 @@ async fn remote_leave_room( let invite_state = services() .rooms + .state_cache .invite_state(user_id, room_id)? .ok_or(Error::BadRequest( ErrorKind::BadState, diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 861f9c1..bfdc2fd 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -68,7 +68,7 @@ pub async fn send_message_event_route( let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); - let event_id = services().rooms.build_and_append_pdu( + let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: body.event_type.to_string().into(), content: serde_json::from_str(body.body.body.json().get()) @@ -108,7 +108,7 @@ pub async fn get_message_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -129,7 +129,7 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); services().rooms - .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; + .lazy_loading.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); @@ -144,12 +144,13 @@ pub async fn get_message_events_route( get_message_events::v3::Direction::Forward => { let events_after: Vec<_> = services() .rooms + .timeline .pdus_after(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { services().rooms - .pdu_count(&pdu_id) + .timeline.pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() }) @@ -157,7 +158,7 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_after { - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &body.room_id, @@ -181,11 +182,13 @@ pub async fn get_message_events_route( get_message_events::v3::Direction::Backward => { let events_before: Vec<_> = services() .rooms + .timeline .pdus_until(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { services().rooms + .timeline .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() @@ -194,7 +197,7 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_before { - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &body.room_id, @@ -220,7 +223,7 @@ pub async fn get_message_events_route( resp.state = Vec::new(); for ll_id in &lazy_loaded { if let Some(member_event) = - services().rooms + services().rooms.state_accessor .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())? { resp.state.push(member_event.to_state_event()); @@ -228,7 +231,7 @@ pub async fn get_message_events_route( } if let Some(next_token) = next_token { - services().rooms.lazy_load_mark_sent( + services().rooms.lazy_loading.lazy_load_mark_sent( sender_user, sender_device, &body.room_id, diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index bc220b8..6a915e4 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -10,10 +10,10 @@ pub async fn set_presence_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for room_id in services().rooms.rooms_joined(sender_user) { + for room_id in services().rooms.state_cache.rooms_joined(sender_user) { let room_id = room_id?; - services().rooms.edus.update_presence( + services().rooms.edus.presence.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { @@ -51,13 +51,14 @@ pub async fn get_presence_route( for room_id in services() .rooms - .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? + .user.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? { let room_id = room_id?; if let Some(presence) = services() .rooms .edus + .presence .get_last_presence_event(sender_user, &room_id)? { presence_event = Some(presence); diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 7a87bcd..3e1d736 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -30,6 +30,7 @@ pub async fn set_displayname_route( // Send a new membership event and presence update into all joined rooms let all_rooms_joined: Vec<_> = services() .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .map(|room_id| { @@ -40,6 +41,7 @@ pub async fn set_displayname_route( displayname: body.displayname.clone(), ..serde_json::from_str( services().rooms + .state_accessor .room_state_get( &room_id, &StateEventType::RoomMember, @@ -80,10 +82,11 @@ pub async fn set_displayname_route( let _ = services() .rooms + .timeline .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); // Presence update - services().rooms.edus.update_presence( + services().rooms.edus.presence.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { @@ -155,6 +158,7 @@ pub async fn set_avatar_url_route( // Send a new membership event and presence update into all joined rooms let all_joined_rooms: Vec<_> = services() .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .map(|room_id| { @@ -165,6 +169,7 @@ pub async fn set_avatar_url_route( avatar_url: body.avatar_url.clone(), ..serde_json::from_str( services().rooms + .state_accessor .room_state_get( &room_id, &StateEventType::RoomMember, @@ -205,10 +210,11 @@ pub async fn set_avatar_url_route( let _ = services() .rooms + .timeline .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); // Presence update - services().rooms.edus.update_presence( + services().rooms.edus.presence.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { @@ -226,7 +232,6 @@ pub async fn set_avatar_url_route( }, sender: sender_user.clone(), }, - &services().globals, )?; } diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index 284ae65..eda57d5 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -31,15 +31,15 @@ pub async fn set_read_marker_route( )?; if let Some(event) = &body.read_receipt { - services().rooms.edus.private_read_set( + services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, - services().rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( + services().rooms.timeline.get_pdu_count(event)?.ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", ))?, )?; - services().rooms + services().rooms.user .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -56,7 +56,7 @@ pub async fn set_read_marker_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(event.to_owned(), receipts); - services().rooms.edus.readreceipt_update( + services().rooms.edus.read_receipt.readreceipt_update( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { @@ -77,17 +77,18 @@ pub async fn create_receipt_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.edus.private_read_set( + services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, services().rooms + .timeline .get_pdu_count(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", ))?, )?; - services().rooms + services().rooms.user .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -103,7 +104,7 @@ pub async fn create_receipt_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(body.event_id.to_owned(), receipts); - services().rooms.edus.readreceipt_update( + services().rooms.edus.read_receipt.readreceipt_update( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { @@ -112,7 +113,5 @@ pub async fn create_receipt_route( }, )?; - services().flush()?; - Ok(create_receipt::v3::Response {}) } diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index d6699bc..57e442a 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -29,7 +29,7 @@ pub async fn redact_event_route( ); let state_lock = mutex_state.lock().await; - let event_id = services().rooms.build_and_append_pdu( + let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs index 2c2a549..efcc434 100644 --- a/src/api/client_server/report.rs +++ b/src/api/client_server/report.rs @@ -14,7 +14,7 @@ pub async fn report_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let pdu = match services().rooms.get_pdu(&body.event_id)? { + let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? { Some(pdu) => pdu, _ => { return Err(Error::BadRequest( diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index f8d0602..a7fa952 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -54,7 +54,7 @@ pub async fn create_room_route( let room_id = RoomId::new(services().globals.server_name()); - services().rooms.get_or_create_shortroomid(&room_id)?; + services().rooms.short.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( services().globals @@ -162,7 +162,7 @@ pub async fn create_room_route( } // 1. The room create event - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), @@ -176,7 +176,7 @@ pub async fn create_room_route( )?; // 2. Let the room creator join - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -237,7 +237,7 @@ pub async fn create_room_route( } } - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) @@ -253,7 +253,7 @@ pub async fn create_room_route( // 4. Canonical room alias if let Some(room_alias_id) = &alias { - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { @@ -274,7 +274,7 @@ pub async fn create_room_route( // 5. Events set by preset // 5.1 Join Rules - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { @@ -293,7 +293,7 @@ pub async fn create_room_route( )?; // 5.2 History Visibility - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( @@ -310,7 +310,7 @@ pub async fn create_room_route( )?; // 5.3 Guest Access - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { @@ -344,12 +344,12 @@ pub async fn create_room_route( } services().rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)?; + .timeline.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)?; } // 7. Events implied by name and topic if let Some(name) = &body.name { - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) @@ -365,7 +365,7 @@ pub async fn create_room_route( } if let Some(topic) = &body.topic { - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { @@ -390,11 +390,11 @@ pub async fn create_room_route( // Homeserver specific stuff if let Some(alias) = alias { - services().rooms.set_alias(&alias, Some(&room_id))?; + services().rooms.alias.set_alias(&alias, &room_id)?; } if body.visibility == room::Visibility::Public { - services().rooms.set_public(&room_id, true)?; + services().rooms.directory.set_public(&room_id)?; } info!("{} created a room", sender_user); @@ -412,7 +412,7 @@ pub async fn get_room_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -422,6 +422,7 @@ pub async fn get_room_event_route( Ok(get_room_event::v3::Response { event: services() .rooms + .timeline .get_pdu(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? .to_room_event(), @@ -438,7 +439,7 @@ pub async fn get_room_aliases_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -448,7 +449,7 @@ pub async fn get_room_aliases_route( Ok(aliases::v3::Response { aliases: services() .rooms - .room_aliases(&body.room_id) + .alias.local_aliases_for_room(&body.room_id) .filter_map(|a| a.ok()) .collect(), }) @@ -479,7 +480,7 @@ pub async fn upgrade_room_route( // Create a replacement room let replacement_room = RoomId::new(services().globals.server_name()); services().rooms - .get_or_create_shortroomid(&replacement_room)?; + .short.get_or_create_shortroomid(&replacement_room)?; let mutex_state = Arc::clone( services().globals @@ -493,7 +494,7 @@ pub async fn upgrade_room_route( // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions - let tombstone_event_id = services().rooms.build_and_append_pdu( + let tombstone_event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { @@ -525,6 +526,7 @@ pub async fn upgrade_room_route( // Get the old room creation event let mut create_event_content = serde_json::from_str::( services().rooms + .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content @@ -572,7 +574,7 @@ pub async fn upgrade_room_route( )); } - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&create_event_content) @@ -587,7 +589,7 @@ pub async fn upgrade_room_route( )?; // Join the new room - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -625,12 +627,12 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { - let event_content = match services().rooms.room_state_get(&body.room_id, &event_type, "")? { + let event_content = match services().rooms.state_accessor.room_state_get(&body.room_id, &event_type, "")? { Some(v) => v.content.clone(), None => continue, // Skipping missing events. }; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), content: event_content, @@ -645,14 +647,15 @@ pub async fn upgrade_room_route( } // Moves any local aliases to the new room - for alias in services().rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) { + for alias in services().rooms.alias.local_aliases_for_room(&body.room_id).filter_map(|r| r.ok()) { services().rooms - .set_alias(&alias, Some(&replacement_room))?; + .alias.set_alias(&alias, &replacement_room)?; } // Get the old room power levels let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( services().rooms + .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content @@ -666,7 +669,7 @@ pub async fn upgrade_room_route( power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - let _ = services().rooms.build_and_append_pdu( + let _ = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_event_content) diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index b7eecd5..f648649 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -24,6 +24,7 @@ pub async fn search_events_route( let room_ids = filter.rooms.clone().unwrap_or_else(|| { services().rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect() @@ -34,7 +35,7 @@ pub async fn search_events_route( let mut searches = Vec::new(); for room_id in room_ids { - if !services().rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -43,6 +44,7 @@ pub async fn search_events_route( if let Some(search) = services() .rooms + .search .search_pdus(&room_id, &search_criteria.search_term)? { searches.push(search.0.peekable()); @@ -86,6 +88,7 @@ pub async fn search_events_route( rank: None, result: services() .rooms + .timeline .get_pdu_from_id(result)? .map(|pdu| pdu.to_room_event()), }) diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index b2dfe2a..ece7453 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -90,9 +90,10 @@ pub async fn get_state_events_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? && !matches!( services().rooms + .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -115,6 +116,7 @@ pub async fn get_state_events_route( Ok(get_state_events::v3::Response { room_state: services() .rooms + .state_accessor .room_state_full(&body.room_id) .await? .values() @@ -136,10 +138,10 @@ pub async fn get_state_events_for_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? && !matches!( services().rooms - .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + .state_accessor.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -160,7 +162,7 @@ pub async fn get_state_events_for_key_route( let event = services() .rooms - .room_state_get(&body.room_id, &body.event_type, &body.state_key)? + .state_accessor.room_state_get(&body.room_id, &body.event_type, &body.state_key)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", @@ -185,10 +187,10 @@ pub async fn get_state_events_for_empty_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? && !matches!( services().rooms - .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + .state_accessor.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -209,7 +211,7 @@ pub async fn get_state_events_for_empty_key_route( let event = services() .rooms - .room_state_get(&body.room_id, &body.event_type, "")? + .state_accessor.room_state_get(&body.room_id, &body.event_type, "")? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", @@ -269,7 +271,7 @@ async fn send_state_event_for_key_helper( ); let state_lock = mutex_state.lock().await; - let event_id = services().rooms.build_and_append_pdu( + let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get()).expect("content is valid json"), diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index cc4ebf6..e38ea60 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -172,7 +172,7 @@ async fn sync_helper( }; // TODO: match body.set_presence { - services().rooms.edus.ping_presence(&sender_user)?; + services().rooms.edus.presence.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them let watcher = services().watch(&sender_user, &sender_device); @@ -216,7 +216,7 @@ async fn sync_helper( .filter_map(|r| r.ok()), ); - let all_joined_rooms = services().rooms.rooms_joined(&sender_user).collect::>(); + let all_joined_rooms = services().rooms.state_cache.rooms_joined(&sender_user).collect::>(); for room_id in all_joined_rooms { let room_id = room_id?; @@ -237,9 +237,10 @@ async fn sync_helper( let timeline_pdus; let limited; - if services().rooms.last_timeline_count(&sender_user, &room_id)? > since { + if services().rooms.timeline.last_timeline_count(&sender_user, &room_id)? > since { let mut non_timeline_pdus = services() .rooms + .timeline .pdus_until(&sender_user, &room_id, u64::MAX)? .filter_map(|r| { // Filter out buggy events @@ -250,6 +251,7 @@ async fn sync_helper( }) .take_while(|(pduid, _)| { services().rooms + .timeline .pdu_count(pduid) .map_or(false, |count| count > since) }); @@ -275,6 +277,7 @@ async fn sync_helper( || services() .rooms .edus + .read_receipt .last_privateread_update(&sender_user, &room_id)? > since; @@ -283,24 +286,24 @@ async fn sync_helper( timeline_users.insert(event.sender.as_str().to_owned()); } - services().rooms + services().rooms.lazy_loading .lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?; // Database queries: - let current_shortstatehash = if let Some(s) = services().rooms.current_shortstatehash(&room_id)? { + let current_shortstatehash = if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { s } else { error!("Room {} has no state", room_id); continue; }; - let since_shortstatehash = services().rooms.get_token_shortstatehash(&room_id, since)?; + let since_shortstatehash = services().rooms.user.get_token_shortstatehash(&room_id, since)?; // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { - let joined_member_count = services().rooms.room_joined_count(&room_id)?.unwrap_or(0); - let invited_member_count = services().rooms.room_invited_count(&room_id)?.unwrap_or(0); + let joined_member_count = services().rooms.state_cache.room_joined_count(&room_id)?.unwrap_or(0); + let invited_member_count = services().rooms.state_cache.room_invited_count(&room_id)?.unwrap_or(0); // Recalculate heroes (first 5 members) let mut heroes = Vec::new(); @@ -311,7 +314,7 @@ async fn sync_helper( for hero in services() .rooms - .all_pdus(&sender_user, &room_id)? + .timeline.all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) .map(|(_, pdu)| { @@ -329,8 +332,8 @@ async fn sync_helper( if matches!( content.membership, MembershipState::Join | MembershipState::Invite - ) && (services().rooms.is_joined(&user_id, &room_id)? - || services().rooms.is_invited(&user_id, &room_id)?) + ) && (services().rooms.state_cache.is_joined(&user_id, &room_id)? + || services().rooms.state_cache.is_invited(&user_id, &room_id)?) { Ok::<_, Error>(Some(state_key.clone())) } else { @@ -371,17 +374,17 @@ async fn sync_helper( let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - let current_state_ids = services().rooms.state_full_ids(current_shortstatehash).await?; + let current_state_ids = services().rooms.state_accessor.state_full_ids(current_shortstatehash).await?; let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); let mut i = 0; for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = services().rooms.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services().rooms.short.get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -398,7 +401,7 @@ async fn sync_helper( || body.full_state || timeline_users.contains(&state_key) { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -420,12 +423,12 @@ async fn sync_helper( } // Reset lazy loading because this is an initial sync - services().rooms + services().rooms.lazy_loading .lazy_load_reset(&sender_user, &sender_device, &room_id)?; // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. - services().rooms.lazy_load_mark_sent( + services().rooms.lazy_loading.lazy_load_mark_sent( &sender_user, &sender_device, &room_id, @@ -449,6 +452,7 @@ async fn sync_helper( let since_sender_member: Option = services() .rooms + .state_accessor .state_get( since_shortstatehash, &StateEventType::RoomMember, @@ -467,12 +471,12 @@ async fn sync_helper( let mut lazy_loaded = HashSet::new(); if since_shortstatehash != current_shortstatehash { - let current_state_ids = services().rooms.state_full_ids(current_shortstatehash).await?; - let since_state_ids = services().rooms.state_full_ids(since_shortstatehash).await?; + let current_state_ids = services().rooms.state_accessor.state_full_ids(current_shortstatehash).await?; + let since_state_ids = services().rooms.state_accessor.state_full_ids(since_shortstatehash).await?; for (key, id) in current_state_ids { if body.full_state || since_state_ids.get(&key) != Some(&id) { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -505,14 +509,14 @@ async fn sync_helper( continue; } - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( &sender_user, &sender_device, &room_id, &event.sender, )? || lazy_load_send_redundant { - if let Some(member_event) = services().rooms.room_state_get( + if let Some(member_event) = services().rooms.state_accessor.room_state_get( &room_id, &StateEventType::RoomMember, event.sender.as_str(), @@ -523,7 +527,7 @@ async fn sync_helper( } } - services().rooms.lazy_load_mark_sent( + services().rooms.lazy_loading.lazy_load_mark_sent( &sender_user, &sender_device, &room_id, @@ -533,11 +537,12 @@ async fn sync_helper( let encrypted_room = services() .rooms - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .state_accessor.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? .is_some(); let since_encryption = services().rooms + .state_accessor .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?; // Calculations: @@ -588,6 +593,7 @@ async fn sync_helper( // If the user is in a new encrypted room, give them all joined users device_list_updates.extend( services().rooms + .state_cache .room_members(&room_id) .flatten() .filter(|user_id| { @@ -627,6 +633,7 @@ async fn sync_helper( let notification_count = if send_notification_counts { Some( services().rooms + .user .notification_count(&sender_user, &room_id)? .try_into() .expect("notification count can't go that high"), @@ -638,6 +645,7 @@ async fn sync_helper( let highlight_count = if send_notification_counts { Some( services().rooms + .user .highlight_count(&sender_user, &room_id)? .try_into() .expect("highlight count can't go that high"), @@ -649,7 +657,7 @@ async fn sync_helper( let prev_batch = timeline_pdus .first() .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { - Ok(Some(services().rooms.pdu_count(pdu_id)?.to_string())) + Ok(Some(services().rooms.timeline.pdu_count(pdu_id)?.to_string())) })?; let room_events: Vec<_> = timeline_pdus @@ -660,15 +668,16 @@ async fn sync_helper( let mut edus: Vec<_> = services() .rooms .edus + .read_receipt .readreceipts_since(&room_id, since) .filter_map(|r| r.ok()) // Filter out buggy events .map(|(_, _, v)| v) .collect(); - if services().rooms.edus.last_typing_update(&room_id, &services().globals)? > since { + if services().rooms.edus.typing.last_typing_update(&room_id)? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&services().rooms.edus.typings_all(&room_id)?) + &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -676,7 +685,7 @@ async fn sync_helper( } // Save the state after this sync so we can send the correct state diff next sync - services().rooms + services().rooms.user .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; let joined_room = JoinedRoom { @@ -723,6 +732,7 @@ async fn sync_helper( for (user_id, presence) in services().rooms .edus + .presence .presence_since(&room_id, since)? { match presence_updates.entry(user_id) { @@ -755,7 +765,7 @@ async fn sync_helper( } let mut left_rooms = BTreeMap::new(); - let all_left_rooms: Vec<_> = services().rooms.rooms_left(&sender_user).collect(); + let all_left_rooms: Vec<_> = services().rooms.state_cache.rooms_left(&sender_user).collect(); for result in all_left_rooms { let (room_id, left_state_events) = result?; @@ -773,7 +783,7 @@ async fn sync_helper( drop(insert_lock); } - let left_count = services().rooms.get_left_count(&room_id, &sender_user)?; + let left_count = services().rooms.state_cache.get_left_count(&room_id, &sender_user)?; // Left before last sync if Some(since) >= left_count { @@ -797,7 +807,7 @@ async fn sync_helper( } let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms: Vec<_> = services().rooms.rooms_invited(&sender_user).collect(); + let all_invited_rooms: Vec<_> = services().rooms.state_cache.rooms_invited(&sender_user).collect(); for result in all_invited_rooms { let (room_id, invite_state_events) = result?; @@ -815,7 +825,7 @@ async fn sync_helper( drop(insert_lock); } - let invite_count = services().rooms.get_invite_count(&room_id, &sender_user)?; + let invite_count = services().rooms.state_cache.get_invite_count(&room_id, &sender_user)?; // Invited before last sync if Some(since) >= invite_count { @@ -835,12 +845,13 @@ async fn sync_helper( for user_id in left_encrypted_users { let still_share_encrypted_room = services() .rooms + .user .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? .filter_map(|r| r.ok()) .filter_map(|other_room_id| { Some( services().rooms - .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + .state_accessor.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), ) @@ -925,12 +936,14 @@ fn share_encrypted_room( ) -> Result { Ok(services() .rooms + .user .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? .filter_map(|r| r.ok()) .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { Some( services().rooms + .state_accessor .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index afd5d6b..abb669b 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -11,7 +11,7 @@ pub async fn create_typing_event_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You are not in this room.", @@ -19,14 +19,14 @@ pub async fn create_typing_event_route( } if let Typing::Yes(duration) = body.state { - services().rooms.edus.typing_add( + services().rooms.edus.typing.typing_add( sender_user, &body.room_id, duration.as_millis() as u64 + utils::millis_since_unix_epoch(), )?; } else { services().rooms - .edus + .edus.typing .typing_remove(sender_user, &body.room_id)?; } diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index 60b4e2f..c94a283 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -50,11 +50,11 @@ pub async fn search_users_route( let user_is_in_public_rooms = services().rooms - .rooms_joined(&user_id) + .state_cache.rooms_joined(&user_id) .filter_map(|r| r.ok()) .any(|room| { services().rooms - .room_state_get(&room, &StateEventType::RoomJoinRules, "") + .state_accessor.room_state_get(&room, &StateEventType::RoomJoinRules, "") .map_or(false, |event| { event.map_or(false, |event| { serde_json::from_str(event.content.get()) @@ -71,7 +71,7 @@ pub async fn search_users_route( let user_is_in_shared_rooms = services() .rooms - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) + .user.get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) .ok()? .next() .is_some(); diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index 2a804f9..9917979 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -14,7 +14,7 @@ pub async fn turn_server_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let turn_secret = services().globals.turn_secret(); + let turn_secret = services().globals.turn_secret().clone(); let (username, password) = if !turn_secret.is_empty() { let expiry = SecondsSinceUnixEpoch::from_system_time( diff --git a/src/api/server_server.rs b/src/api/server_server.rs index bacc1ac..9aa2beb 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -669,7 +669,7 @@ pub async fn send_transaction_message_route( } }; - acl_check(&sender_servername, &room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &room_id)?; let mutex = Arc::clone( services().globals @@ -727,7 +727,7 @@ pub async fn send_transaction_message_route( .event_ids .iter() .filter_map(|id| { - services().rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) + services().rooms.timeline.get_pdu_count(id).ok().flatten().map(|r| (id, r)) }) .max_by_key(|(_, count)| *count) { @@ -744,7 +744,7 @@ pub async fn send_transaction_message_route( content: ReceiptEventContent(receipt_content), room_id: room_id.clone(), }; - services().rooms.edus.readreceipt_update( + services().rooms.edus.read_receipt.readreceipt_update( &user_id, &room_id, event, @@ -757,15 +757,15 @@ pub async fn send_transaction_message_route( } } Edu::Typing(typing) => { - if services().rooms.is_joined(&typing.user_id, &typing.room_id)? { + if services().rooms.state_cache.is_joined(&typing.user_id, &typing.room_id)? { if typing.typing { - services().rooms.edus.typing_add( + services().rooms.edus.typing.typing_add( &typing.user_id, &typing.room_id, 3000 + utils::millis_since_unix_epoch(), )?; } else { - services().rooms.edus.typing_remove( + services().rooms.edus.typing.typing_remove( &typing.user_id, &typing.room_id, )?; @@ -1031,7 +1031,7 @@ pub(crate) async fn get_auth_chain<'a>( let mut i = 0; for id in starting_events { - let short = services().rooms.get_or_create_shorteventid(&id)?; + let short = services().rooms.short.get_or_create_shorteventid(&id)?; let bucket_id = (short % NUM_BUCKETS as u64) as usize; buckets[bucket_id].insert((short, id.clone())); i += 1; @@ -1050,7 +1050,7 @@ pub(crate) async fn get_auth_chain<'a>( } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = services().rooms.get_auth_chain_from_cache(&chunk_key)? { + if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&chunk_key)? { hits += 1; full_auth_chain.extend(cached.iter().copied()); continue; @@ -1062,13 +1062,14 @@ pub(crate) async fn get_auth_chain<'a>( let mut misses2 = 0; let mut i = 0; for (sevent_id, event_id) in chunk { - if let Some(cached) = services().rooms.get_auth_chain_from_cache(&[sevent_id])? { + if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&[sevent_id])? { hits2 += 1; chunk_cache.extend(cached.iter().copied()); } else { misses2 += 1; let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id)?); services().rooms + .auth_chain .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; println!( "cache missed event {} with auth chain len {}", @@ -1091,7 +1092,7 @@ pub(crate) async fn get_auth_chain<'a>( ); let chunk_cache = Arc::new(chunk_cache); services().rooms - .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + .auth_chain.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; full_auth_chain.extend(chunk_cache.iter()); } @@ -1104,7 +1105,7 @@ pub(crate) async fn get_auth_chain<'a>( Ok(full_auth_chain .into_iter() - .filter_map(move |sid| services().rooms.get_eventid_from_short(sid).ok())) + .filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok())) } #[tracing::instrument(skip(event_id))] @@ -1116,14 +1117,14 @@ fn get_auth_chain_inner( let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { - match services().rooms.get_pdu(&event_id) { + match services().rooms.timeline.get_pdu(&event_id) { Ok(Some(pdu)) => { if pdu.room_id != room_id { return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); } for auth_event in &pdu.auth_events { let sauthevent = services() - .rooms + .rooms.short .get_or_create_shorteventid(auth_event)?; if !found.contains(&sauthevent) { @@ -1162,7 +1163,7 @@ pub async fn get_event_route( .expect("server is authenticated"); let event = services() - .rooms + .rooms.timeline .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1174,7 +1175,7 @@ pub async fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !services().rooms.server_in_room(sender_servername, room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", @@ -1203,21 +1204,21 @@ pub async fn get_missing_events_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", )); } - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); let mut i = 0; while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { - if let Some(pdu) = services().rooms.get_pdu_json(&queued_events[i])? { + if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? { let room_id_str = pdu .get("room_id") .and_then(|val| val.as_str()) @@ -1275,17 +1276,17 @@ pub async fn get_event_authorization_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let event = services() - .rooms + .rooms.timeline .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1301,7 +1302,7 @@ pub async fn get_event_authorization_route( Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids - .filter_map(|id| services().rooms.get_pdu_json(&id).ok()?) + .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok()?) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -1322,17 +1323,17 @@ pub async fn get_room_state_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let shortstatehash = services() - .rooms + .rooms.state_accessor .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1340,13 +1341,13 @@ pub async fn get_room_state_route( ))?; let pdus = services() - .rooms + .rooms.state_accessor .state_full_ids(shortstatehash) .await? .into_iter() .map(|(_, id)| { PduEvent::convert_to_outgoing_federation_event( - services().rooms.get_pdu_json(&id).unwrap().unwrap(), + services().rooms.timeline.get_pdu_json(&id).unwrap().unwrap(), ) }) .collect(); @@ -1357,7 +1358,7 @@ pub async fn get_room_state_route( Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids .map(|id| { - services().rooms.get_pdu_json(&id).map(|maybe_json| { + services().rooms.timeline.get_pdu_json(&id).map(|maybe_json| { PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) }) }) @@ -1382,17 +1383,17 @@ pub async fn get_room_state_ids_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let shortstatehash = services() - .rooms + .rooms.state_accessor .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1400,7 +1401,7 @@ pub async fn get_room_state_ids_route( ))?; let pdu_ids = services() - .rooms + .rooms.state_accessor .state_full_ids(shortstatehash) .await? .into_iter() @@ -1426,7 +1427,7 @@ pub async fn create_join_event_template_route( return Err(Error::bad_config("Federation is disabled.")); } - if !services().rooms.exists(&body.room_id)? { + if !services().rooms.metadata.exists(&body.room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server.", @@ -1438,7 +1439,7 @@ pub async fn create_join_event_template_route( .as_ref() .expect("server is authenticated"); - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let mutex_state = Arc::clone( services().globals @@ -1452,7 +1453,7 @@ pub async fn create_join_event_template_route( // TODO: Conduit does not implement restricted join rules yet, we always reject let join_rules_event = - services().rooms + services().rooms.state_accessor .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; let join_rules_event_content: Option = join_rules_event @@ -1477,8 +1478,8 @@ pub async fn create_join_event_template_route( } } - let room_version_id = services().rooms.state.get_room_version(&body.room_id); - if !body.ver.contains(room_version_id) { + let room_version_id = services().rooms.state.get_room_version(&body.room_id)?; + if !body.ver.contains(&room_version_id) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: room_version_id, @@ -1505,7 +1506,7 @@ pub async fn create_join_event_template_route( unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, - }, &body.user_id, &body.room_id, &state_lock); + }, &body.user_id, &body.room_id, &state_lock)?; drop(state_lock); @@ -1524,18 +1525,18 @@ async fn create_join_event( return Err(Error::bad_config("Federation is disabled.")); } - if !services().rooms.exists(room_id)? { + if !services().rooms.metadata.exists(room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server.", )); } - acl_check(sender_servername, room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, room_id)?; // TODO: Conduit does not implement restricted join rules yet, we always reject let join_rules_event = services() - .rooms + .rooms.state_accessor .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; let join_rules_event_content: Option = join_rules_event @@ -1562,8 +1563,8 @@ async fn create_join_event( // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = services() - .rooms - .current_shortstatehash(room_id)? + .rooms.state + .get_room_shortstatehash(room_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Pdu state not found.", @@ -1602,22 +1603,15 @@ async fn create_join_event( .or_default(), ); let mutex_lock = mutex.lock().await; - let pdu_id = services().rooms.event_handler.handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) - .await - .map_err(|e| { - warn!("Error while handling incoming send join PDU: {}", e); - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? + let pdu_id: Vec = services().rooms.event_handler.handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .await? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Could not accept incoming PDU as timeline event.", ))?; drop(mutex_lock); - let state_ids = services().rooms.state_full_ids(shortstatehash).await?; + let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; let auth_chain_ids = get_auth_chain( room_id, state_ids.iter().map(|(_, id)| id.clone()).collect(), @@ -1626,6 +1620,7 @@ async fn create_join_event( let servers = services() .rooms + .state_cache .room_servers(room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != services().globals.server_name()); @@ -1634,12 +1629,12 @@ async fn create_join_event( Ok(RoomState { auth_chain: auth_chain_ids - .filter_map(|id| services().rooms.get_pdu_json(&id).ok().flatten()) + .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), state: state_ids .iter() - .filter_map(|(_, id)| services().rooms.get_pdu_json(id).ok().flatten()) + .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -1692,7 +1687,7 @@ pub async fn create_invite_route( .as_ref() .expect("server is authenticated"); - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; if !services().rooms.is_supported_version(&body.room_version) { return Err(Error::BadRequest( @@ -1767,8 +1762,8 @@ pub async fn create_invite_route( invite_state.push(pdu.to_stripped_state_event()); // If the room already exists, the remote server will notify us about the join via /send - if !services().rooms.exists(&pdu.room_id)? { - services().rooms.update_membership( + if !services().rooms.metadata.exists(&pdu.room_id)? { + services().rooms.state_cache.update_membership( &body.room_id, &invited_user, MembershipState::Invite, @@ -1931,274 +1926,6 @@ pub async fn claim_keys_route( }) } -#[tracing::instrument(skip_all)] -pub(crate) async fn fetch_required_signing_keys( - event: &BTreeMap, - pub_key_map: &RwLock>>, -) -> Result<()> { - let signatures = event - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let fetch_res = fetch_signing_keys( - signature_server.as_str().try_into().map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?, - signature_ids, - ) - .await; - - let keys = match fetch_res { - Ok(keys) => keys, - Err(_) => { - warn!("Signature verification failed: Could not fetch signing key.",); - continue; - } - }; - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(signature_server.clone(), keys); - } - - Ok(()) -} - -// Gets a list of servers for which we don't have the signing key yet. We go over -// the PDUs and either cache the key or add it to the list that needs to be retrieved. -fn get_server_keys_from_cache( - pdu: &RawJsonValue, - servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, - room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, -) -> Result<()> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - - let event_id = format!( - "${}", - ruma::signatures::reference_hash(&value, room_version) - .expect("ruma can calculate reference hashes") - ); - let event_id = <&EventId>::try_from(event_id.as_str()) - .expect("ruma's reference hashes are valid event ids"); - - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(event_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {}", event_id); - return Err(Error::BadServerResponse("bad event, still backing off")); - } - } - - let signatures = value - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?; - - if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { - continue; - } - - trace!("Loading signing keys for {}", origin); - - let result: BTreeMap<_, _> = services() - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if !contains_all_ids(&result) { - trace!("Signing key not loaded for {}", origin); - servers.insert(origin.to_owned(), BTreeMap::new()); - } - - pub_key_map.insert(origin.to_string(), result); - } - - Ok(()) -} - -pub(crate) async fn fetch_join_signing_keys( - event: &create_join_event::v2::Response, - room_version: &RoomVersionId, - pub_key_map: &RwLock>>, -) -> Result<()> { - let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = - BTreeMap::new(); - - { - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - - // Try to fetch keys, failure is okay - // Servers we couldn't find in the cache will be added to `servers` - for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); - } - for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); - } - - drop(pkm); - } - - if servers.is_empty() { - // We had all keys locally - return Ok(()); - } - - for server in services().globals.trusted_servers() { - trace!("Asking batch signing keys from trusted server {}", server); - if let Ok(keys) = services() - .sending - .send_federation_request( - server, - get_remote_server_keys_batch::v2::Request { - server_keys: servers.clone(), - }, - ) - .await - { - trace!("Got signing keys: {:?}", keys); - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - for k in keys.server_keys { - let k = k.deserialize().unwrap(); - - // TODO: Check signature from trusted server? - servers.remove(&k.server_name); - - let result = services() - .globals - .add_signing_key(&k.server_name, k.clone())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); - - pkm.insert(k.server_name.to_string(), result); - } - } - - if servers.is_empty() { - return Ok(()); - } - } - - let mut futures: FuturesUnordered<_> = servers - .into_iter() - .map(|(server, _)| async move { - ( - services().sending - .send_federation_request( - &server, - get_server_keys::v2::Request::new(), - ) - .await, - server, - ) - }) - .collect(); - - while let Some(result) = futures.next().await { - if let (Ok(get_keys_response), origin) = result { - let result: BTreeMap<_, _> = services() - .globals - .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(origin.to_string(), result); - } - } - - Ok(()) -} - -/// Returns Ok if the acl allows the server -fn acl_check(server_name: &ServerName, room_id: &RoomId) -> Result<()> { - let acl_event = match services() - .rooms - .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? - { - Some(acl) => acl, - None => return Ok(()), - }; - - let acl_event_content: RoomServerAclEventContent = - match serde_json::from_str(acl_event.content.get()) { - Ok(content) => content, - Err(_) => { - warn!("Invalid ACL event"); - return Ok(()); - } - }; - - if acl_event_content.is_allowed(server_name) { - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server was denied by ACL", - )) - } -} - #[cfg(test)] mod tests { use super::{add_port_to_hostname, get_ip_with_port, FedDest}; diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index edb027e..f427ba7 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -54,11 +54,11 @@ impl service::appservice::Data for KeyValueDatabase { ) } - fn iter_ids(&self) -> Result>>> { - Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { + fn iter_ids<'a>(&'a self) -> Result> + 'a>> { + Ok(Box::new(self.id_appserviceregistrations.iter().map(|(id, _)| { utils::string_from_bytes(&id) .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) - })) + }))) } fn all(&self) -> Result> { diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index c48afa9..727004e 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -16,13 +16,13 @@ impl service::rooms::directory::Data for KeyValueDatabase { } fn public_rooms(&self) -> Box>>> { - self.publicroomids.iter().map(|(bytes, _)| { + Box::new(self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in publicroomids is invalid unicode.") })?, ) .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) + })) } } diff --git a/src/database/mod.rs b/src/database/mod.rs index 4ea619a..22bfef0 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -513,7 +513,7 @@ impl KeyValueDatabase { let states_parents = last_roomsstatehash.map_or_else( || Ok(Vec::new()), |&last_roomsstatehash| { - db.rooms.load_shortstatehash_info(dbg!(last_roomsstatehash)) + db.rooms.state_accessor.load_shortstatehash_info(dbg!(last_roomsstatehash)) }, )?; diff --git a/src/lib.rs b/src/lib.rs index c6e6569..7239900 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,7 +28,7 @@ enum ServicesEnum { Rocksdb(Services) } -pub fn services() -> Services { - SERVICES.read().unwrap() +pub fn services<'a>() -> &'a Services { + &SERVICES.read().unwrap() } diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 7a39922..c56c69d 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -31,80 +31,18 @@ impl Service { event_type: RoomAccountDataEventType, data: &T, ) -> Result<()> { - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); - - let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); - roomuserdataid.push(0xff); - roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); - - let mut key = prefix; - key.extend_from_slice(event_type.to_string().as_bytes()); - - let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling - if json.get("type").is_none() || json.get("content").is_none() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Account data doesn't have all required fields.", - )); - } - - self.roomuserdataid_accountdata.insert( - &roomuserdataid, - &serde_json::to_vec(&json).expect("to_vec always works on json values"), - )?; - - let prev = self.roomusertype_roomuserdataid.get(&key)?; - - self.roomusertype_roomuserdataid - .insert(&key, &roomuserdataid)?; - - // Remove old entry - if let Some(prev) = prev { - self.roomuserdataid_accountdata.remove(&prev)?; - } - - Ok(()) + self.db.update(room_id, user_id, event_type, data) } /// Searches the account data for a specific kind. - #[tracing::instrument(skip(self, room_id, user_id, kind))] + #[tracing::instrument(skip(self, room_id, user_id, event_type))] pub fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: RoomAccountDataEventType, + event_type: RoomAccountDataEventType, ) -> Result> { - let mut key = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(kind.to_string().as_bytes()); - - self.roomusertype_roomuserdataid - .get(&key)? - .and_then(|roomuserdataid| { - self.roomuserdataid_accountdata - .get(&roomuserdataid) - .transpose() - }) - .transpose()? - .map(|data| { - serde_json::from_slice(&data) - .map_err(|_| Error::bad_database("could not deserialize")) - }) - .transpose() + self.db.get(room_id, user_id, event_type) } /// Returns all changes to the account data that happened after `since`. @@ -115,44 +53,6 @@ impl Service { user_id: &UserId, since: u64, ) -> Result>> { - let mut userdata = HashMap::new(); - - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); - - // Skip the data that's exactly at since, because we sent that last time - let mut first_possible = prefix.clone(); - first_possible.extend_from_slice(&(since + 1).to_be_bytes()); - - for r in self - .roomuserdataid_accountdata - .iter_from(&first_possible, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(k, v)| { - Ok::<_, Error>(( - RoomAccountDataEventType::try_from( - utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( - || Error::bad_database("RoomUserData ID in db is invalid."), - )?) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - serde_json::from_slice::>(&v).map_err(|_| { - Error::bad_database("Database contains invalid account data.") - })?, - )) - }) - { - let (kind, data) = r?; - userdata.insert(kind, data); - } - - Ok(userdata) + self.db.changes_since(room_id, user_id, since) } } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index dad4ceb..48f828f 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -192,7 +192,7 @@ impl Service { mutex_lock: &MutexGuard<'_, ()>| { services() .rooms - .build_and_append_pdu( + .timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&message) @@ -213,7 +213,7 @@ impl Service { Some(event) = receiver.recv() => { let message_content = match event { AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(room_message).await + AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await }; let mutex_state = Arc::clone( @@ -254,20 +254,20 @@ impl Service { let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); - let admin_command = match parse_admin_command(&command_line) { + let admin_command = match self.parse_admin_command(&command_line) { Ok(command) => command, Err(error) => { let server_name = services().globals.server_name(); let message = error .to_string() .replace("server.name", server_name.as_str()); - let html_message = usage_to_html(&message, server_name); + let html_message = self.usage_to_html(&message, server_name); return RoomMessageEventContent::text_html(message, html_message); } }; - match process_admin_command(admin_command, body).await { + match self.process_admin_command(admin_command, body).await { Ok(reply_message) => reply_message, Err(error) => { let markdown_message = format!( @@ -367,6 +367,8 @@ impl Service { } } AdminCommand::ListRooms => { + todo!(); + /* let room_ids = services().rooms.iter_ids(); let output = format!( "Rooms:\n{}", @@ -385,6 +387,7 @@ impl Service { .join("\n") ); RoomMessageEventContent::text_plain(output) + */ } AdminCommand::ListLocalUsers => match services().users.list_local_users() { Ok(users) => { @@ -412,7 +415,7 @@ impl Service { } AdminCommand::GetAuthChain { event_id } => { let event_id = Arc::::from(event_id); - if let Some(event) = services().rooms.get_pdu_json(&event_id)? { + if let Some(event) = services().rooms.timeline.get_pdu_json(&event_id)? { let room_id_str = event .get("room_id") .and_then(|val| val.as_str()) @@ -473,10 +476,10 @@ impl Service { } AdminCommand::GetPdu { event_id } => { let mut outlier = false; - let mut pdu_json = services().rooms.get_non_outlier_pdu_json(&event_id)?; + let mut pdu_json = services().rooms.timeline.get_non_outlier_pdu_json(&event_id)?; if pdu_json.is_none() { outlier = true; - pdu_json = services().rooms.get_pdu_json(&event_id)?; + pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?; } match pdu_json { Some(json) => { @@ -506,7 +509,7 @@ impl Service { None => RoomMessageEventContent::text_plain("PDU not found."), } } - AdminCommand::DatabaseMemoryUsage => match services()._db.memory_usage() { + AdminCommand::DatabaseMemoryUsage => match services().globals.db.memory_usage() { Ok(response) => RoomMessageEventContent::text_plain(response), Err(e) => RoomMessageEventContent::text_plain(format!( "Failed to get database memory usage: {}", @@ -825,7 +828,7 @@ impl Service { content.room_version = RoomVersionId::V6; // 1. The room create event - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), @@ -839,7 +842,7 @@ impl Service { )?; // 2. Make conduit bot join - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -866,7 +869,7 @@ impl Service { let mut users = BTreeMap::new(); users.insert(conduit_user.clone(), 100.into()); - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { @@ -884,7 +887,7 @@ impl Service { )?; // 4.1 Join Rules - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) @@ -899,7 +902,7 @@ impl Service { )?; // 4.2 History Visibility - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( @@ -916,7 +919,7 @@ impl Service { )?; // 4.3 Guest Access - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) @@ -933,7 +936,7 @@ impl Service { // 5. Events implied by name and topic let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) .expect("Room name is valid"); - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) @@ -947,7 +950,7 @@ impl Service { &state_lock, )?; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { @@ -968,7 +971,7 @@ impl Service { .try_into() .expect("#admins:server_name is a valid alias name"); - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { @@ -985,7 +988,7 @@ impl Service { &state_lock, )?; - services().rooms.set_alias(&alias, Some(&room_id))?; + services().rooms.alias.set_alias(&alias, &room_id)?; Ok(()) } @@ -1003,7 +1006,8 @@ impl Service { .expect("#admins:server_name is a valid alias name"); let room_id = services() .rooms - .id_from_alias(&admin_room_alias)? + .alias + .resolve_local_alias(&admin_room_alias)? .expect("Admin room must exist"); let mutex_state = Arc::clone( @@ -1021,7 +1025,7 @@ impl Service { .expect("@conduit:server_name is valid"); // Invite and join the real user - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -1043,7 +1047,7 @@ impl Service { &room_id, &state_lock, )?; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -1071,7 +1075,7 @@ impl Service { users.insert(conduit_user.to_owned(), 100.into()); users.insert(user_id.to_owned(), 100.into()); - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { @@ -1089,7 +1093,7 @@ impl Service { )?; // Send welcome message - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index cd48e85..a70bf9c 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -13,7 +13,7 @@ pub trait Data { fn get_registration(&self, id: &str) -> Result>; - fn iter_ids(&self) -> Result>>>; + fn iter_ids<'a>(&'a self) -> Result> + 'a>>; fn all(&self) -> Result>; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 556ca71..6cfeab8 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,6 +1,7 @@ mod data; pub use data::Data; +use crate::api::server_server::FedDest; use crate::service::*; use crate::{Config, utils, Error, Result}; @@ -36,7 +37,7 @@ type SyncHandle = ( ); pub struct Service { - db: D, + pub db: D, pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 8e842d4..ce867fb 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -22,36 +22,11 @@ impl Service { user_id: &UserId, backup_metadata: &Raw, ) -> Result { - let version = services().globals.next_count()?.to_string(); - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm.insert( - &key, - &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), - )?; - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - Ok(version) + self.db.create_backup(user_id, backup_metadata) } pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm.remove(&key)?; - self.backupid_etag.remove(&key)?; - - key.push(0xff); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) + self.db.delete_backup(user_id, version) } pub fn update_backup( @@ -60,74 +35,18 @@ impl Service { version: &str, backup_metadata: &Raw, ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Tried to update nonexistent backup.", - )); - } - - self.backupid_algorithm - .insert(&key, backup_metadata.json().get().as_bytes())?; - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - Ok(version.to_owned()) + self.db.update_backup(user_id, version, backup_metadata) } pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, _)| { - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) - }) - .transpose() + self.db.get_latest_backup_version(user_id) } pub fn get_latest_backup( &self, user_id: &UserId, ) -> Result)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, value)| { - let version = utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; - - Ok(( - version, - serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("Algorithm in backupid_algorithm is invalid.") - })?, - )) - }) - .transpose() + self.db.get_latest_backup(user_id) } pub fn get_backup( @@ -135,16 +54,7 @@ impl Service { user_id: &UserId, version: &str, ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm - .get(&key)? - .map_or(Ok(None), |bytes| { - serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid.")) - }) + self.db.get_backup(user_id, version) } pub fn add_key( @@ -155,52 +65,15 @@ impl Service { session_id: &str, key_data: &Raw, ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Tried to update nonexistent backup.", - )); - } - - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - self.backupkeyid_backup - .insert(&key, key_data.json().get().as_bytes())?; - - Ok(()) + self.db.add_key(user_id, version, room_id, session_id, key_data) } pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - - Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) + self.db.count_keys(user_id, version) } pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - Ok(utils::u64_from_bytes( - &self - .backupid_etag - .get(&key)? - .ok_or_else(|| Error::bad_database("Backup has no etag."))?, - ) - .map_err(|_| Error::bad_database("etag in backupid_etag invalid."))? - .to_string()) + self.db.get_etag(user_id, version) } pub fn get_all( @@ -208,55 +81,7 @@ impl Service { user_id: &UserId, version: &str, ) -> Result, RoomKeyBackup>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); - - let mut rooms = BTreeMap::, RoomKeyBackup>::new(); - - for result in self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); - - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; - - let room_id = RoomId::parse( - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, - ) - .map_err(|_| { - Error::bad_database("backupkeyid_backup room_id is invalid room id.") - })?; - - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - })?; - - Ok::<_, Error>((room_id, session_id, key_data)) - }) - { - let (room_id, session_id, key_data) = result?; - rooms - .entry(room_id) - .or_insert_with(|| RoomKeyBackup { - sessions: BTreeMap::new(), - }) - .sessions - .insert(session_id, key_data); - } - - Ok(rooms) + self.db.get_all(user_id, version) } pub fn get_room( @@ -265,35 +90,7 @@ impl Service { version: &str, room_id: &RoomId, ) -> Result>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - Ok(self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); - - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; - - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - })?; - - Ok::<_, Error>((session_id, key_data)) - }) - .filter_map(|r| r.ok()) - .collect()) + self.db.get_room(user_id, version, room_id) } pub fn get_session( @@ -303,35 +100,11 @@ impl Service { room_id: &RoomId, session_id: &str, ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - self.backupkeyid_backup - .get(&key)? - .map(|value| { - serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - }) - }) - .transpose() + self.db.get_session(user_id, version, room_id, session_id) } pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) + self.db.delete_all_keys(user_id, version) } pub fn delete_room_keys( @@ -340,18 +113,7 @@ impl Service { version: &str, room_id: &RoomId, ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) + self.db.delete_room_keys(user_id, version, room_id) } pub fn delete_room_key( @@ -361,18 +123,6 @@ impl Service { room_id: &RoomId, session_id: &str, ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) + self.db.delete_room_key(user_id, version, room_id, session_id) } } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index a5aca03..5037809 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -29,7 +29,7 @@ impl Service { file: &[u8], ) -> Result<()> { // Width, Height = 0 if it's not a thumbnail - let key = self.db.create_file_metadata(mxc, 0, 0, content_disposition, content_type); + let key = self.db.create_file_metadata(mxc, 0, 0, content_disposition, content_type)?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; @@ -42,13 +42,13 @@ impl Service { pub async fn upload_thumbnail( &self, mxc: String, - content_disposition: &Option, - content_type: &Option, + content_disposition: &Option<&str>, + content_type: &Option<&str>, width: u32, height: u32, file: &[u8], ) -> Result<()> { - let key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type); + let key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type)?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 66a8ae3..64c7f1f 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -138,6 +138,7 @@ impl Service { let power_levels: RoomPowerLevelsEventContent = services() .rooms + .state_accessor .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) @@ -274,6 +275,7 @@ impl Service { let room_name = if let Some(room_name_pdu) = services().rooms + .state_accessor .room_state_get(&event.room_id, &StateEventType::RoomName, "")? { serde_json::from_str::(room_name_pdu.content.get()) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 113d2e8..9ea4763 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -21,7 +21,7 @@ impl Service { } // We only save auth chains for single events in the db - if key.len == 1 { + if key.len() == 1 { // Check DB cache if let Some(chain) = self.db.get_cached_eventid_authchain(key[0]) { diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index a5ce37f..dbe1b6e 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -5,7 +5,7 @@ pub mod typing; pub trait Data: presence::Data + read_receipt::Data + typing::Data {} pub struct Service { - presence: presence::Service, - read_receipt: read_receipt::Service, - typing: typing::Service, + pub presence: presence::Service, + pub read_receipt: read_receipt::Service, + pub typing: typing::Service, } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index c9b041c..8a8725b 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -8,22 +8,23 @@ use std::{ time::{Duration, Instant}, }; -use futures_util::Future; +use futures_util::{Future, stream::FuturesUnordered}; use ruma::{ api::{ client::error::ErrorKind, - federation::event::{get_event, get_room_state_ids}, + federation::{event::{get_event, get_room_state_ids}, membership::create_join_event, discovery::get_remote_server_keys_batch::{v2::QueryCriteria, self}}, }, - events::{room::create::RoomCreateEventContent, StateEventType}, + events::{room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, StateEventType}, int, serde::Base64, signatures::CanonicalJsonValue, state_res::{self, RoomVersion, StateMap}, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use tracing::{error, info, trace, warn}; -use crate::{service::*, services, Error, PduEvent}; +use crate::{service::*, services, Result, Error, PduEvent}; pub struct Service; @@ -62,10 +63,11 @@ impl Service { is_timeline_event: bool, pub_key_map: &'a RwLock>>, ) -> Result>> { - services().rooms.exists(room_id)?.ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room is unknown to this server", - ))?; + if !services().rooms.metadata.exists(room_id)? { + return Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server", + )}; services() .rooms @@ -76,17 +78,18 @@ impl Service { ))?; // 1. Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = services().rooms.get_pdu_id(event_id)? { + if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { return Ok(Some(pdu_id.to_vec())); } let create_event = services() .rooms + .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; let first_pdu_in_room = services() - .rooms + .rooms.timeline .first_pdu_in_room(room_id)? .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; @@ -111,7 +114,7 @@ impl Service { room_id, pub_key_map, incoming_pdu.prev_events.clone(), - ); + ).await; let mut errors = 0; for prev_id in dbg!(sorted_prev_events) { @@ -243,7 +246,7 @@ impl Service { room_id: &'a RoomId, value: BTreeMap, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> + ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json @@ -367,11 +370,7 @@ impl Service { &incoming_pdu, None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), - ) - .map_err(|e| { - error!(e); - Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") - })? { + )? { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Auth check failed", @@ -400,16 +399,15 @@ impl Service { origin: &ServerName, room_id: &RoomId, pub_key_map: &RwLock>>, - ) -> Result>, String> { + ) -> Result>> { // Skip the PDU if we already have it as a timeline event - if let Ok(Some(pduid)) = services().rooms.get_pdu_id(&incoming_pdu.event_id) { + if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); } if services() .rooms - .is_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to ask db for soft fail".to_owned())? + .pdu_metadata.is_event_soft_failed(&incoming_pdu.event_id)? { return Err("Event has been soft failed".into()); } @@ -438,11 +436,11 @@ impl Service { let prev_event = &*incoming_pdu.prev_events[0]; let prev_event_sstatehash = services() .rooms - .pdu_shortstatehash(prev_event) - .map_err(|_| "Failed talking to db".to_owned())?; + .state_accessor + .pdu_shortstatehash(prev_event)?; let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(services().rooms.state_full_ids(shortstatehash).await) + Some(services().rooms.state_accessor.state_full_ids(shortstatehash).await) } else { None }; @@ -451,18 +449,19 @@ impl Service { info!("Using cached state"); let prev_pdu = services() .rooms + .timeline .get_pdu(prev_event) .ok() .flatten() .ok_or_else(|| { - "Could not find prev event, but we know the state.".to_owned() + Error::bad_database("Could not find prev event, but we know the state.") })?; if let Some(state_key) = &prev_pdu.state_key { let shortstatekey = services() .rooms - .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + .short + .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key)?; state.insert(shortstatekey, Arc::from(prev_event)); // Now it's the state after the pdu @@ -501,18 +500,18 @@ impl Service { for (sstatehash, prev_event) in extremity_sstatehashes { let mut leaf_state: BTreeMap<_, _> = services() .rooms + .state_accessor .state_full_ids(sstatehash) - .await - .map_err(|_| "Failed to ask db for room state.".to_owned())?; + .await?; if let Some(state_key) = &prev_event.state_key { let shortstatekey = services() .rooms + .short .get_or_create_shortstatekey( &prev_event.kind.to_string().into(), state_key, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + )?; leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); // Now it's the state after the pdu } @@ -536,8 +535,7 @@ impl Service { .rooms .auth_chain .get_auth_chain(room_id, starting_events, services()) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? + .await? .collect(), ); @@ -563,16 +561,14 @@ impl Service { .map(|((event_type, state_key), event_id)| { let shortstatekey = services() .rooms + .short .get_or_create_shortstatekey( &event_type.to_string().into(), &state_key, - ) - .map_err(|_| { - "Failed to get_or_create_shortstatekey".to_owned() - })?; + )?; Ok((shortstatekey, event_id)) }) - .collect::>()?, + .collect::>()?, ), Err(e) => { warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); @@ -617,20 +613,19 @@ impl Service { let state_key = pdu .state_key .clone() - .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; + .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; let shortstatekey = services() .rooms - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key)?; match state.entry(shortstatekey) { btree_map::Entry::Vacant(v) => { v.insert(Arc::from(&*pdu.event_id)); } btree_map::Entry::Occupied(_) => return Err( - "State event's type and state_key combination exists multiple times." - .to_owned(), + Error::bad_database("State event's type and state_key combination exists multiple times."), ), } } @@ -638,21 +633,21 @@ impl Service { // The original create event must still be in the state let create_shortstatekey = services() .rooms - .get_shortstatekey(&StateEventType::RoomCreate, "") - .map_err(|_| "Failed to talk to db.")? + .short + .get_shortstatekey(&StateEventType::RoomCreate, "")? .expect("Room exists"); if state.get(&create_shortstatekey).map(|id| id.as_ref()) != Some(&create_event.event_id) { - return Err("Incoming event refers to wrong create event.".to_owned()); + return Err(Error::bad_database("Incoming event refers to wrong create event.")); } state_at_incoming_event = Some(state); } Err(e) => { warn!("Fetching state for event failed: {}", e); - return Err("Fetching state for event failed".into()); + return Err(e); } }; } @@ -669,17 +664,18 @@ impl Service { |k, s| { services() .rooms + .short .get_shortstatekey(&k.to_string().into(), s) .ok() .flatten() .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| services().rooms.get_pdu(event_id).ok().flatten()) + .and_then(|event_id| services().rooms.timeline.get_pdu(event_id).ok().flatten()) }, ) .map_err(|_e| "Auth check failed.".to_owned())?; if !check_result { - return Err("Event has failed auth check with state at the event.".into()); + return Err(Error::bad_database("Event has failed auth check with state at the event.")); } info!("Auth check succeeded"); @@ -701,8 +697,8 @@ impl Service { info!("Calculating extremities"); let mut extremities = services() .rooms - .get_pdu_leaves(room_id) - .map_err(|_| "Failed to load room leaves".to_owned())?; + .state + .get_forward_extremities(room_id)?; // Remove any forward extremities that are referenced by this incoming event's prev_events for prev_event in &incoming_pdu.prev_events { @@ -721,10 +717,9 @@ impl Service { .map(|(shortstatekey, id)| { services() .rooms - .compress_state_event(*shortstatekey, id) - .map_err(|_| "Failed to compress_state_event".to_owned()) + .compress_state_event(*shortstatekey, id)? }) - .collect::>()?; + .collect::>()?; // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it info!("Starting soft fail auth check"); @@ -737,16 +732,14 @@ impl Service { &incoming_pdu.sender, incoming_pdu.state_key.as_deref(), &incoming_pdu.content, - ) - .map_err(|_| "Failed to get_auth_events.".to_owned())?; + )? let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed.".to_owned())?; + )?; if soft_fail { self.append_incoming_pdu( @@ -756,18 +749,13 @@ impl Service { state_ids_compressed, soft_fail, &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; + )?; // Soft fail, we keep the event as an outlier but don't add it to the timeline warn!("Event was soft failed: {:?}", incoming_pdu); services() .rooms - .mark_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to set soft failed flag".to_owned())?; + .mark_event_soft_failed(&incoming_pdu.event_id)?; return Err("Event has been soft failed".into()); } @@ -775,15 +763,15 @@ impl Service { info!("Loading current room state ids"); let current_sstatehash = services() .rooms - .current_shortstatehash(room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? + .state + .get_room_shortstatehash(room_id)? .expect("every room has state"); let current_state_ids = services() .rooms + .state_accessor .state_full_ids(current_sstatehash) - .await - .map_err(|_| "Failed to load room state.")?; + .await?; info!("Preparing for stateres to derive new room state"); let mut extremity_sstatehashes = HashMap::new(); @@ -792,14 +780,14 @@ impl Service { for id in dbg!(&extremities) { match services() .rooms - .get_pdu(id) - .map_err(|_| "Failed to ask db for pdu.".to_owned())? + .timeline + .get_pdu(id)? { Some(leaf_pdu) => { extremity_sstatehashes.insert( services() - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .rooms.state_accessor + .pdu_shortstatehash(&leaf_pdu.event_id)? .ok_or_else(|| { error!( "Found extremity pdu with no statehash in db: {:?}", @@ -832,8 +820,8 @@ impl Service { if let Some(state_key) = &incoming_pdu.state_key { let shortstatekey = services() .rooms - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + .short + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)? state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } @@ -852,10 +840,9 @@ impl Service { .map(|(k, id)| { services() .rooms - .compress_state_event(*k, id) - .map_err(|_| "Failed to compress_state_event.".to_owned()) + .compress_state_event(*k, id)? }) - .collect::>()? + .collect::>()? } else { info!("Loading auth chains"); // We do need to force an update to this room's state @@ -871,8 +858,7 @@ impl Service { room_id, state.iter().map(|(_, id)| id.clone()).collect(), ) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? + .await? .collect(), ); } @@ -886,11 +872,10 @@ impl Service { .filter_map(|(k, id)| { services() .rooms - .get_statekey_from_short(k) + .get_statekey_from_short(k)? // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) - .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) .ok() }) .collect::>() @@ -927,14 +912,13 @@ impl Service { .map(|((event_type, state_key), event_id)| { let shortstatekey = services() .rooms - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + .short + .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; services() .rooms .compress_state_event(shortstatekey, &event_id) - .map_err(|_| "Failed to compress state event".to_owned()) }) - .collect::>()? + .collect::>()? }; // Set the new room state to the resolved state @@ -942,8 +926,7 @@ impl Service { info!("Forcing new room state"); services() .rooms - .force_state(room_id, new_room_state) - .map_err(|_| "Failed to set new room state.".to_owned())?; + .force_state(room_id, new_room_state)?; } } @@ -962,11 +945,7 @@ impl Service { state_ids_compressed, soft_fail, &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; + )?; info!("Appended incoming pdu"); @@ -1227,9 +1206,279 @@ impl Service { .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; + })?; (sorted, eventid_info) } + + #[tracing::instrument(skip_all)] + pub(crate) async fn fetch_required_signing_keys( + &self, + event: &BTreeMap, + pub_key_map: &RwLock>>, + ) -> Result<()> { + let signatures = event + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let fetch_res = fetch_signing_keys( + signature_server.as_str().try_into().map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?, + signature_ids, + ) + .await; + + let keys = match fetch_res { + Ok(keys) => keys, + Err(_) => { + warn!("Signature verification failed: Could not fetch signing key.",); + continue; + } + }; + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(signature_server.clone(), keys); + } + + Ok(()) + } + + // Gets a list of servers for which we don't have the signing key yet. We go over + // the PDUs and either cache the key or add it to the list that needs to be retrieved. + fn get_server_keys_from_cache( + &self, + pdu: &RawJsonValue, + servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, + room_version: &RoomVersionId, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + ) -> Result<()> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let event_id = format!( + "${}", + ruma::signatures::reference_hash(&value, room_version) + .expect("ruma can calculate reference hashes") + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); + + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } + + let signatures = value + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?; + + if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { + continue; + } + + trace!("Loading signing keys for {}", origin); + + let result: BTreeMap<_, _> = services() + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if !contains_all_ids(&result) { + trace!("Signing key not loaded for {}", origin); + servers.insert(origin.to_owned(), BTreeMap::new()); + } + + pub_key_map.insert(origin.to_string(), result); + } + + Ok(()) + } + + pub(crate) async fn fetch_join_signing_keys( + &self, + event: &create_join_event::v2::Response, + room_version: &RoomVersionId, + pub_key_map: &RwLock>>, + ) -> Result<()> { + let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = + BTreeMap::new(); + + { + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + + // Try to fetch keys, failure is okay + // Servers we couldn't find in the cache will be added to `servers` + for pdu in &event.room_state.state { + let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); + } + for pdu in &event.room_state.auth_chain { + let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); + } + + drop(pkm); + } + + if servers.is_empty() { + // We had all keys locally + return Ok(()); + } + + for server in services().globals.trusted_servers() { + trace!("Asking batch signing keys from trusted server {}", server); + if let Ok(keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys_batch::v2::Request { + server_keys: servers.clone(), + }, + ) + .await + { + trace!("Got signing keys: {:?}", keys); + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + for k in keys.server_keys { + let k = k.deserialize().unwrap(); + + // TODO: Check signature from trusted server? + servers.remove(&k.server_name); + + let result = services() + .globals + .add_signing_key(&k.server_name, k.clone())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); + + pkm.insert(k.server_name.to_string(), result); + } + } + + if servers.is_empty() { + return Ok(()); + } + } + + let mut futures: FuturesUnordered<_> = servers + .into_iter() + .map(|(server, _)| async move { + ( + services().sending + .send_federation_request( + &server, + get_server_keys::v2::Request::new(), + ) + .await, + server, + ) + }) + .collect(); + + while let Some(result) = futures.next().await { + if let (Ok(get_keys_response), origin) = result { + let result: BTreeMap<_, _> = services() + .globals + .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(origin.to_string(), result); + } + } + + Ok(()) + } + + /// Returns Ok if the acl allows the server + pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { + let acl_event = match services() + .rooms.state_accessor + .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? + { + Some(acl) => acl, + None => return Ok(()), + }; + + let acl_event_content: RoomServerAclEventContent = + match serde_json::from_str(acl_event.content.get()) { + Ok(content) => content, + Err(_) => { + warn!("Invalid ACL event"); + return Ok(()); + } + }; + + if acl_event_content.is_allowed(server_name) { + Ok(()) + } else { + Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server was denied by ACL", + )) + } + } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index e6b5ce2..a26ed46 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; pub use data::Data; use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType}, UserId, EventId, serde::Raw, RoomVersionId}; @@ -85,7 +85,7 @@ impl Service { event_id: &EventId, room_id: &RoomId, state_ids_compressed: HashSet, - ) -> Result<()> { + ) -> Result { let shorteventid = services().short.get_or_create_shorteventid(event_id)?; let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; @@ -132,7 +132,7 @@ impl Service { self.db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) + Ok(shortstatehash) } /// Generates a new StateHash and associates it with the incoming event. @@ -279,4 +279,8 @@ impl Service { pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.db.get_room_shortstatehash(room_id) } + + pub fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { + self.db.get_forward_extremities(room_id) + } } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 09f66dd..7669b0b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,7 +1,7 @@ mod data; use std::borrow::Cow; use std::sync::Arc; -use std::{sync::MutexGuard, iter, collections::HashSet}; +use std::{iter, collections::HashSet}; use std::fmt::Debug; pub use data::Data; @@ -13,6 +13,7 @@ use ruma::state_res::RoomVersion; use ruma::{EventId, signatures::CanonicalJsonObject, push::{Action, Tweak}, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType, RoomEventType, room::{member::MembershipState, create::RoomCreateEventContent}, StateEventType}, UserId, RoomAliasId, RoomId, uint, state_res, api::client::error::ErrorKind, serde::to_canonical_value, ServerName}; use serde::Deserialize; use serde_json::value::to_raw_value; +use tokio::sync::MutexGuard; use tracing::{warn, error}; use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduEvent, utils}; @@ -460,7 +461,7 @@ impl Service { sender: &UserId, room_id: &RoomId, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObject) { + ) -> Result<(PduEvent, CanonicalJsonObject)> { let PduBuilder { event_type, content, @@ -471,7 +472,8 @@ impl Service { let prev_events: Vec<_> = services() .rooms - .get_pdu_leaves(room_id)? + .state + .get_forward_extremities(room_id)? .into_iter() .take(20) .collect(); @@ -622,6 +624,8 @@ impl Service { // Generate short event id let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id)?; + + Ok((pdu, pdu_json)) } /// Creates a new persisted data unit and adds it to a room. This function takes a @@ -634,7 +638,7 @@ impl Service { room_id: &RoomId, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock); + let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. From face766e0f32481fd97a435f1ed8579d8cfc634c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 12:45:54 +0200 Subject: [PATCH 1213/1727] messing with trait objects --- src/api/client_server/membership.rs | 6 +- src/api/client_server/room.rs | 4 +- src/api/client_server/sync.rs | 2 +- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 175 +---------- src/database/key_value/globals.rs | 8 +- src/database/key_value/media.rs | 4 +- src/database/key_value/pusher.rs | 2 +- src/database/key_value/rooms/alias.rs | 6 +- src/database/key_value/rooms/auth_chain.rs | 6 +- src/database/key_value/rooms/edus/presence.rs | 2 + .../key_value/rooms/edus/read_receipt.rs | 4 +- src/database/key_value/rooms/lazy_load.rs | 29 +- src/database/key_value/rooms/metadata.rs | 4 +- src/database/key_value/rooms/search.rs | 10 +- .../key_value/rooms/state_accessor.rs | 26 +- .../key_value/rooms/state_compressor.rs | 4 +- src/database/key_value/rooms/timeline.rs | 20 +- src/database/key_value/rooms/user.rs | 6 +- src/database/key_value/users.rs | 19 +- src/database/mod.rs | 296 ++++++++++-------- src/lib.rs | 14 +- src/main.rs | 17 +- src/service/account_data/mod.rs | 6 +- src/service/appservice/mod.rs | 6 +- src/service/globals/mod.rs | 8 +- src/service/key_backups/mod.rs | 6 +- src/service/media/mod.rs | 6 +- src/service/mod.rs | 44 ++- src/service/pusher/mod.rs | 6 +- src/service/rooms/alias/data.rs | 2 +- src/service/rooms/alias/mod.rs | 6 +- src/service/rooms/auth_chain/data.rs | 2 +- src/service/rooms/auth_chain/mod.rs | 6 +- src/service/rooms/directory/mod.rs | 6 +- src/service/rooms/edus/mod.rs | 8 +- src/service/rooms/edus/presence/mod.rs | 6 +- src/service/rooms/edus/read_receipt/mod.rs | 6 +- src/service/rooms/edus/typing/mod.rs | 6 +- src/service/rooms/event_handler/mod.rs | 187 ++++++++++- src/service/rooms/lazy_loading/data.rs | 2 +- src/service/rooms/lazy_loading/mod.rs | 23 +- src/service/rooms/metadata/mod.rs | 6 +- src/service/rooms/mod.rs | 34 +- src/service/rooms/outlier/mod.rs | 6 +- src/service/rooms/pdu_metadata/mod.rs | 6 +- src/service/rooms/search/data.rs | 2 +- src/service/rooms/search/mod.rs | 11 +- src/service/rooms/short/mod.rs | 6 +- src/service/rooms/state/data.rs | 3 +- src/service/rooms/state/mod.rs | 19 +- src/service/rooms/state_accessor/mod.rs | 6 +- src/service/rooms/state_cache/mod.rs | 6 +- src/service/rooms/state_compressor/data.rs | 8 +- src/service/rooms/state_compressor/mod.rs | 6 +- src/service/rooms/timeline/mod.rs | 6 +- src/service/rooms/user/mod.rs | 6 +- src/service/transaction_ids/mod.rs | 6 +- src/service/uiaa/mod.rs | 6 +- src/service/users/data.rs | 10 +- src/service/users/mod.rs | 6 +- 61 files changed, 623 insertions(+), 544 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 98931f2..720c1e6 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -481,7 +481,7 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) if services().rooms.metadata.is_supported_version(&room_version) => room_version, + Some(room_version) if services().globals.supported_room_versions().contains(&room_version) => room_version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -568,7 +568,7 @@ async fn join_room_by_id_helper( let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); - server_server::fetch_join_signing_keys( + services().rooms.event_handler.fetch_join_signing_keys( &send_join_response, &room_version, &pub_key_map, @@ -1048,7 +1048,7 @@ async fn remote_leave_room( let (make_leave_response, remote_server) = make_leave_response_and_server?; let room_version_id = match make_leave_response.room_version { - Some(version) if services().rooms.is_supported_version(&version) => version, + Some(version) if services().globals.supported_room_versions().contains(&version) => version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index a7fa952..939fbaa 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -99,7 +99,7 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if services().rooms.is_supported_version(&services(), &room_version) { + if services().globals.supported_room_versions().contains(&room_version) { room_version } else { return Err(Error::BadRequest( @@ -470,7 +470,7 @@ pub async fn upgrade_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_supported_version(&body.new_version) { + if !services().globals.supported_room_versions().contains(&body.new_version) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index e38ea60..3489a9a 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -175,7 +175,7 @@ async fn sync_helper( services().rooms.edus.presence.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them - let watcher = services().watch(&sender_user, &sender_device); + let watcher = services().globals.db.watch(&sender_user, &sender_device); let next_batch = services().globals.current_count()?; let next_batch_string = next_batch.to_string(); diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index babf2a7..d926b89 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -197,7 +197,7 @@ where request_map.insert("content".to_owned(), json_body.clone()); }; - let keys_result = server_server::fetch_signing_keys( + let keys_result = services().rooms.event_handler.fetch_signing_keys( &x_matrix.origin, vec![x_matrix.key.to_owned()], ) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9aa2beb..45d749d 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -664,7 +664,7 @@ pub async fn send_transaction_message_route( Some(id) => id, None => { // Event is invalid - resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned())); + resolved_map.insert(event_id, Err(Error::bad_database("Event needs a valid RoomId."))); continue; } }; @@ -707,7 +707,7 @@ pub async fn send_transaction_message_route( for pdu in &resolved_map { if let Err(e) = pdu.1 { - if e != "Room is unknown to this server." { + if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { warn!("Incoming PDU failed {:?}", pdu); } } @@ -854,170 +854,7 @@ pub async fn send_transaction_message_route( } } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }) -} - -/// Search the DB for the signing keys of the given server, if we don't have them -/// fetch them from the server and save to our DB. -#[tracing::instrument(skip_all)] -pub(crate) async fn fetch_signing_keys( - origin: &ServerName, - signature_ids: Vec, -) -> Result> { - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let permit = services() - .globals - .servername_ratelimiter - .read() - .unwrap() - .get(origin) - .map(|s| Arc::clone(s).acquire_owned()); - - let permit = match permit { - Some(p) => p, - None => { - let mut write = services().globals.servername_ratelimiter.write().unwrap(); - let s = Arc::clone( - write - .entry(origin.to_owned()) - .or_insert_with(|| Arc::new(Semaphore::new(1))), - ); - - s.acquire_owned() - } - } - .await; - - let back_off = |id| match services() - .globals - .bad_signature_ratelimiter - .write() - .unwrap() - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - - if let Some((time, tries)) = services() - .globals - .bad_signature_ratelimiter - .read() - .unwrap() - .get(&signature_ids) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {:?}", signature_ids); - return Err(Error::BadServerResponse("bad signature, still backing off")); - } - } - - trace!("Loading signing keys for {}", origin); - - let mut result: BTreeMap<_, _> = services() - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if contains_all_ids(&result) { - return Ok(result); - } - - debug!("Fetching signing keys for {} over federation", origin); - - if let Some(server_key) = services() - .sending - .send_federation_request(origin, get_server_keys::v2::Request::new()) - .await - .ok() - .and_then(|resp| resp.server_key.deserialize().ok()) - { - services().globals.add_signing_key(origin, server_key.clone())?; - - result.extend( - server_key - .verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - server_key - .old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - - if contains_all_ids(&result) { - return Ok(result); - } - } - - for server in services().globals.trusted_servers() { - debug!("Asking {} for {}'s signing key", server, origin); - if let Some(server_keys) = services() - .sending - .send_federation_request( - server, - get_remote_server_keys::v2::Request::new( - origin, - MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), - ) - .expect("time is valid"), - ), - ) - .await - .ok() - .map(|resp| { - resp.server_keys - .into_iter() - .filter_map(|e| e.deserialize().ok()) - .collect::>() - }) - { - trace!("Got signing keys: {:?}", server_keys); - for k in server_keys { - services().globals.add_signing_key(origin, k.clone())?; - result.extend( - k.verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } - - if contains_all_ids(&result) { - return Ok(result); - } - } - } - - drop(permit); - - back_off(signature_ids); - - warn!("Failed to find public key for server: {}", origin); - Err(Error::BadServerResponse( - "Failed to find public key for server", - )) + Ok(send_transaction_message::v1::Response { pdus: resolved_map.into_iter().map(|(e, r)| (e, r.map_err(|e| e.to_string()))).collect() }) } #[tracing::instrument(skip(starting_events))] @@ -1050,7 +887,7 @@ pub(crate) async fn get_auth_chain<'a>( } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&chunk_key)? { + if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&chunk_key)? { hits += 1; full_auth_chain.extend(cached.iter().copied()); continue; @@ -1062,7 +899,7 @@ pub(crate) async fn get_auth_chain<'a>( let mut misses2 = 0; let mut i = 0; for (sevent_id, event_id) in chunk { - if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&[sevent_id])? { + if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&[sevent_id])? { hits2 += 1; chunk_cache.extend(cached.iter().copied()); } else { @@ -1689,7 +1526,7 @@ pub async fn create_invite_route( services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; - if !services().rooms.is_supported_version(&body.room_version) { + if !services().globals.supported_room_versions().contains(&body.room_version) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 81e6ee1..e665229 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -4,10 +4,10 @@ use crate::{Result, service, database::KeyValueDatabase, Error, utils}; impl service::globals::Data for KeyValueDatabase { fn load_keypair(&self) -> Result { - let keypair_bytes = self.globals.get(b"keypair")?.map_or_else( + let keypair_bytes = self.global.get(b"keypair")?.map_or_else( || { let keypair = utils::generate_keypair(); - self.globals.insert(b"keypair", &keypair)?; + self.global.insert(b"keypair", &keypair)?; Ok::<_, Error>(keypair) }, |s| Ok(s.to_vec()), @@ -33,8 +33,10 @@ impl service::globals::Data for KeyValueDatabase { Ed25519KeyPair::from_der(key, version) .map_err(|_| Error::bad_database("Private or public keys are invalid.")) }); + + keypair } fn remove_keypair(&self) -> Result<()> { - self.globals.remove(b"keypair")? + self.global.remove(b"keypair") } } diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index 90a5c59..a84cbd5 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,3 +1,5 @@ +use ruma::api::client::error::ErrorKind; + use crate::{database::KeyValueDatabase, service, Error, utils, Result}; impl service::media::Data for KeyValueDatabase { @@ -33,7 +35,7 @@ impl service::media::Data for KeyValueDatabase { prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail prefix.push(0xff); - let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::NotFound)?; + let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; let mut parts = key.rsplit(|&b| b == 0xff); diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 35c8463..b05e47b 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -55,6 +55,6 @@ impl service::pusher::Data for KeyValueDatabase { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); - self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) + Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k)) } } diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index c762def..0aa8dd4 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -56,15 +56,15 @@ impl service::rooms::alias::Data for KeyValueDatabase { fn local_aliases_for_room( &self, room_id: &RoomId, - ) -> Result>> { + ) -> Box>>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { + Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? .try_into() .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) + })) } } diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 585d562..888d472 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -3,8 +3,8 @@ use std::{collections::HashSet, mem::size_of}; use crate::{service, database::KeyValueDatabase, Result, utils}; impl service::rooms::auth_chain::Data for KeyValueDatabase { - fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result> { - self.shorteventid_authchain + fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>> { + Ok(self.shorteventid_authchain .get(&shorteventid.to_be_bytes())? .map(|chain| { chain @@ -13,7 +13,7 @@ impl service::rooms::auth_chain::Data for KeyValueDatabase { utils::u64_from_bytes(chunk).expect("byte length is correct") }) .collect() - }) + })) } fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()> { diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index fbbbff5..1477c28 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -145,4 +145,6 @@ fn parse_presence_event(bytes: &[u8]) -> Result { .last_active_ago .map(|timestamp| current_timestamp - timestamp); } + + Ok(presence) } diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 42d250f..a12e265 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -64,7 +64,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { let mut first_possible_edu = prefix.clone(); first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - self.readreceiptid_readreceipt + Box::new(self.readreceiptid_readreceipt .iter_from(&first_possible_edu, false) .take_while(move |(k, _)| k.starts_with(&prefix2)) .map(move |(k, v)| { @@ -91,7 +91,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { serde_json::value::to_raw_value(&json).expect("json is valid raw value"), ), )) - }) + })) } fn private_read_set( diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index aaf14dd..133e1d0 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -25,26 +25,19 @@ impl service::rooms::lazy_loading::Data for KeyValueDatabase { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - since: u64, + confirmed_user_ids: &mut dyn Iterator, ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xff); - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } + for ll_id in confirmed_user_ids { + let mut key = prefix.clone(); + key.extend_from_slice(ll_id.as_bytes()); + self.lazyloadedids.insert(&key, &[])?; } Ok(()) diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 0509cbb..db2bc69 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,10 +1,10 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, Result}; +use crate::{service, database::KeyValueDatabase, Result, services}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { + let prefix = match services().rooms.short.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), None => return Ok(false), }; diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 15937f6..dfbdbc6 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -2,10 +2,10 @@ use std::mem::size_of; use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils, Result}; +use crate::{service, database::KeyValueDatabase, utils, Result, services}; impl service::rooms::search::Data for KeyValueDatabase { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()> { + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) @@ -27,7 +27,7 @@ impl service::rooms::search::Data for KeyValueDatabase { room_id: &RoomId, search_string: &str, ) -> Result>>, Vec)>> { - let prefix = self + let prefix = services().rooms.short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -60,11 +60,11 @@ impl service::rooms::search::Data for KeyValueDatabase { }) .map(|iter| { ( - iter.map(move |id| { + Box::new(iter.map(move |id| { let mut pduid = prefix_clone.clone(); pduid.extend_from_slice(&id); pduid - }), + })), words, ) })) diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 037b98f..4d5bd4a 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,13 +1,13 @@ use std::{collections::{BTreeMap, HashMap}, sync::Arc}; -use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result}; +use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result, services}; use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; #[async_trait] impl service::rooms::state_accessor::Data for KeyValueDatabase { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self + let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -15,7 +15,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = BTreeMap::new(); let mut i = 0; for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; + let parsed = services().rooms.state_compressor.parse_compressed_state_event(compressed)?; result.insert(parsed.0, parsed.1); i += 1; @@ -30,7 +30,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { &self, shortstatehash: u64, ) -> Result>> { - let full_state = self + let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -39,8 +39,8 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = HashMap::new(); let mut i = 0; for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { + let (_, eventid) = services().rooms.state_compressor.parse_compressed_state_event(compressed)?; + if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { result.insert( ( pdu.kind.to_string().into(), @@ -69,11 +69,11 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { + let shortstatekey = match services().rooms.short.get_shortstatekey(event_type, state_key)? { Some(s) => s, None => return Ok(None), }; - let full_state = self + let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -82,7 +82,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { .into_iter() .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) .and_then(|compressed| { - self.parse_compressed_state_event(compressed) + services().rooms.state_compressor.parse_compressed_state_event(compressed) .ok() .map(|(_, id)| id) })) @@ -96,7 +96,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { state_key: &str, ) -> Result>> { self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) + .map_or(Ok(None), |event_id| services().rooms.timeline.get_pdu(&event_id)) } /// Returns the state hash for this pdu. @@ -122,7 +122,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { &self, room_id: &RoomId, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { self.state_full(current_shortstatehash).await } else { Ok(HashMap::new()) @@ -136,7 +136,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { self.state_get_id(current_shortstatehash, event_type, state_key) } else { Ok(None) @@ -150,7 +150,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { self.state_get(current_shortstatehash, event_type, state_key) } else { Ok(None) diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 23a7122..aee1890 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -39,8 +39,8 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { } fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { - let mut value = diff.parent.to_be_bytes().to_vec(); - for new in &diff.new { + let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); + for new in &diff.added { value.extend_from_slice(&new[..]); } diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index c42509e..a3b6c17 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -3,7 +3,7 @@ use std::{collections::hash_map, mem::size_of, sync::Arc}; use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject}; use tracing::error; -use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result}; +use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result, services}; impl service::rooms::timeline::Data for KeyValueDatabase { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { @@ -191,7 +191,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, since: u64, ) -> Result, PduEvent)>>>> { - let prefix = self + let prefix = services().rooms.short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -203,7 +203,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(self + Ok(Box::new(self .pduid_pdu .iter_from(&first_pdu_id, false) .take_while(move |(k, _)| k.starts_with(&prefix)) @@ -214,7 +214,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { pdu.remove_transaction_id()?; } Ok((pdu_id, pdu)) - })) + }))) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -226,7 +226,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { until: u64, ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id - let prefix = self + let prefix = services().rooms.short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -239,7 +239,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(self + Ok(Box::new(self .pduid_pdu .iter_from(current, true) .take_while(move |(k, _)| k.starts_with(&prefix)) @@ -250,7 +250,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { pdu.remove_transaction_id()?; } Ok((pdu_id, pdu)) - })) + }))) } fn pdus_after<'a>( @@ -260,7 +260,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { from: u64, ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id - let prefix = self + let prefix = services().rooms.short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -273,7 +273,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(self + Ok(Box::new(self .pduid_pdu .iter_from(current, false) .take_while(move |(k, _)| k.starts_with(&prefix)) @@ -284,6 +284,6 @@ impl service::rooms::timeline::Data for KeyValueDatabase { pdu.remove_transaction_id()?; } Ok((pdu_id, pdu)) - })) + }))) } } diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index d49bc1d..66681e3 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,6 +1,6 @@ use ruma::{UserId, RoomId}; -use crate::{service, database::KeyValueDatabase, utils, Error, Result}; +use crate::{service, database::KeyValueDatabase, utils, Error, Result, services}; impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -50,7 +50,7 @@ impl service::rooms::user::Data for KeyValueDatabase { token: u64, shortstatehash: u64, ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); + let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); @@ -60,7 +60,7 @@ impl service::rooms::user::Data for KeyValueDatabase { } fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); + let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 82e3bac..338d880 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -57,12 +57,12 @@ impl service::users::Data for KeyValueDatabase { /// Returns an iterator over all users on this homeserver. fn iter(&self) -> Box>>> { - self.userid_password.iter().map(|(bytes, _)| { + Box::new(self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) - }) + })) } /// Returns a list of local users as list of usernames. @@ -274,7 +274,7 @@ impl service::users::Data for KeyValueDatabase { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata - self.userdeviceid_metadata + Box::new(self.userdeviceid_metadata .scan_prefix(prefix) .map(|(bytes, _)| { Ok(utils::string_from_bytes( @@ -285,7 +285,7 @@ impl service::users::Data for KeyValueDatabase { ) .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? .into()) - }) + })) } /// Replaces the access token of one device. @@ -617,7 +617,7 @@ impl service::users::Data for KeyValueDatabase { let to = to.unwrap_or(u64::MAX); - self.keychangeid_userid + Box::new(self.keychangeid_userid .iter_from(&start, false) .take_while(move |(k, _)| { k.starts_with(&prefix) @@ -638,7 +638,7 @@ impl service::users::Data for KeyValueDatabase { Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) - }) + })) } fn mark_device_key_update( @@ -646,9 +646,10 @@ impl service::users::Data for KeyValueDatabase { user_id: &UserId, ) -> Result<()> { let count = services().globals.next_count()?.to_be_bytes(); - for room_id in services().rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { + for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { // Don't send key updates to unencrypted rooms if services().rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? .is_none() { @@ -882,12 +883,12 @@ impl service::users::Data for KeyValueDatabase { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - self.userdeviceid_metadata + Box::new(self.userdeviceid_metadata .scan_prefix(key) .map(|(_, bytes)| { serde_json::from_slice::(&bytes) .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) - }) + })) } /// Creates a new sync filter. Returns the filter id. diff --git a/src/database/mod.rs b/src/database/mod.rs index 22bfef0..aa5c583 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,7 +1,7 @@ pub mod abstraction; pub mod key_value; -use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}}; +use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms::{self, state_compressor::CompressedStateEvent}, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}, services, PduEvent, Services, SERVICES}; use abstraction::KeyValueDatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -9,7 +9,7 @@ use lru_cache::LruCache; use ruma::{ events::{ push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, - GlobalAccountDataEvent, GlobalAccountDataEventType, + GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, DeviceId, EventId, RoomId, UserId, signatures::CanonicalJsonValue, @@ -151,6 +151,30 @@ pub struct KeyValueDatabase { //pub pusher: pusher::PushData, pub(super) senderkey_pusher: Arc, + + pub(super) cached_registrations: Arc>>, + pub(super) pdu_cache: Mutex, Arc>>, + pub(super) shorteventid_cache: Mutex>>, + pub(super) auth_chain_cache: Mutex, Arc>>>, + pub(super) eventidshort_cache: Mutex, u64>>, + pub(super) statekeyshort_cache: Mutex>, + pub(super) shortstatekey_cache: Mutex>, + pub(super) our_real_users_cache: RwLock, Arc>>>>, + pub(super) appservice_in_room_cache: RwLock, HashMap>>, + pub(super) lazy_load_waiting: + Mutex, Box, Box, u64), HashSet>>>, + pub(super) stateinfo_cache: Mutex< + LruCache< + u64, + Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + >, + >, + pub(super) lasttimelinecount_cache: Mutex, u64>>, } impl KeyValueDatabase { @@ -214,7 +238,7 @@ impl KeyValueDatabase { } /// Load an existing database or create a new one. - pub async fn load_or_create(config: &Config) -> Result>> { + pub async fn load_or_create(config: &Config) -> Result<()> { Self::check_db_setup(config)?; if !Path::new(&config.database_path).exists() { @@ -253,7 +277,7 @@ impl KeyValueDatabase { let (admin_sender, admin_receiver) = mpsc::unbounded_channel(); let (sending_sender, sending_receiver) = mpsc::unbounded_channel(); - let db = Self { + let db = Arc::new(Self { _db: builder.clone(), userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, @@ -345,18 +369,53 @@ impl KeyValueDatabase { senderkey_pusher: builder.open_tree("senderkey_pusher")?, global: builder.open_tree("global")?, server_signingkeys: builder.open_tree("server_signingkeys")?, - }; - // TODO: do this after constructing the db + cached_registrations: Arc::new(RwLock::new(HashMap::new())), + pdu_cache: Mutex::new(LruCache::new( + config + .pdu_cache_capacity + .try_into() + .expect("pdu cache capacity fits into usize"), + )), + auth_chain_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shorteventid_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + eventidshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shortstatekey_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + statekeyshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + our_real_users_cache: RwLock::new(HashMap::new()), + appservice_in_room_cache: RwLock::new(HashMap::new()), + lazy_load_waiting: Mutex::new(HashMap::new()), + stateinfo_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), + lasttimelinecount_cache: Mutex::new(HashMap::new()), + + }); + + let services_raw = Services::build(Arc::clone(&db)); + + // This is the first and only time we initialize the SERVICE static + *SERVICES.write().unwrap() = Some(services_raw); + // Matrix resource ownership is based on the server name; changing it // requires recreating the database from scratch. - if guard.users.count()? > 0 { + if services().users.count()? > 0 { let conduit_user = - UserId::parse_with_server_name("conduit", guard.globals.server_name()) + UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is valid"); - if !guard.users.exists(&conduit_user)? { + if !services().users.exists(&conduit_user)? { error!( "The {} server user does not exist, and the database is not new.", conduit_user @@ -370,11 +429,10 @@ impl KeyValueDatabase { // If the database has any data, perform data migrations before starting let latest_database_version = 11; - if guard.users.count()? > 0 { - let db = &*guard; + if services().users.count()? > 0 { // MIGRATIONS - if db.globals.database_version()? < 1 { - for (roomserverid, _) in db.rooms.roomserverids.iter() { + if services().globals.database_version()? < 1 { + for (roomserverid, _) in db.roomserverids.iter() { let mut parts = roomserverid.split(|&b| b == 0xff); let room_id = parts.next().expect("split always returns one element"); let servername = match parts.next() { @@ -388,17 +446,17 @@ impl KeyValueDatabase { serverroomid.push(0xff); serverroomid.extend_from_slice(room_id); - db.rooms.serverroomids.insert(&serverroomid, &[])?; + db.serverroomids.insert(&serverroomid, &[])?; } - db.globals.bump_database_version(1)?; + services().globals.bump_database_version(1)?; warn!("Migration: 0 -> 1 finished"); } - if db.globals.database_version()? < 2 { + if services().globals.database_version()? < 2 { // We accidentally inserted hashed versions of "" into the db instead of just "" - for (userid, password) in db.users.userid_password.iter() { + for (userid, password) in db.userid_password.iter() { let password = utils::string_from_bytes(&password); let empty_hashed_password = password.map_or(false, |password| { @@ -406,59 +464,59 @@ impl KeyValueDatabase { }); if empty_hashed_password { - db.users.userid_password.insert(&userid, b"")?; + db.userid_password.insert(&userid, b"")?; } } - db.globals.bump_database_version(2)?; + services().globals.bump_database_version(2)?; warn!("Migration: 1 -> 2 finished"); } - if db.globals.database_version()? < 3 { + if services().globals.database_version()? < 3 { // Move media to filesystem - for (key, content) in db.media.mediaid_file.iter() { + for (key, content) in db.mediaid_file.iter() { if content.is_empty() { continue; } - let path = db.globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut file = fs::File::create(path)?; file.write_all(&content)?; - db.media.mediaid_file.insert(&key, &[])?; + db.mediaid_file.insert(&key, &[])?; } - db.globals.bump_database_version(3)?; + services().globals.bump_database_version(3)?; warn!("Migration: 2 -> 3 finished"); } - if db.globals.database_version()? < 4 { - // Add federated users to db as deactivated - for our_user in db.users.iter() { + if services().globals.database_version()? < 4 { + // Add federated users to services() as deactivated + for our_user in services().users.iter() { let our_user = our_user?; - if db.users.is_deactivated(&our_user)? { + if services().users.is_deactivated(&our_user)? { continue; } - for room in db.rooms.rooms_joined(&our_user) { - for user in db.rooms.room_members(&room?) { + for room in services().rooms.state_cache.rooms_joined(&our_user) { + for user in services().rooms.state_cache.room_members(&room?) { let user = user?; - if user.server_name() != db.globals.server_name() { + if user.server_name() != services().globals.server_name() { println!("Migration: Creating user {}", user); - db.users.create(&user, None)?; + services().users.create(&user, None)?; } } } } - db.globals.bump_database_version(4)?; + services().globals.bump_database_version(4)?; warn!("Migration: 3 -> 4 finished"); } - if db.globals.database_version()? < 5 { + if services().globals.database_version()? < 5 { // Upgrade user data store - for (roomuserdataid, _) in db.account_data.roomuserdataid_accountdata.iter() { + for (roomuserdataid, _) in db.roomuserdataid_accountdata.iter() { let mut parts = roomuserdataid.split(|&b| b == 0xff); let room_id = parts.next().unwrap(); let user_id = parts.next().unwrap(); @@ -470,30 +528,29 @@ impl KeyValueDatabase { key.push(0xff); key.extend_from_slice(event_type); - db.account_data - .roomusertype_roomuserdataid + db.roomusertype_roomuserdataid .insert(&key, &roomuserdataid)?; } - db.globals.bump_database_version(5)?; + services().globals.bump_database_version(5)?; warn!("Migration: 4 -> 5 finished"); } - if db.globals.database_version()? < 6 { + if services().globals.database_version()? < 6 { // Set room member count - for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { + for (roomid, _) in db.roomid_shortstatehash.iter() { let string = utils::string_from_bytes(&roomid).unwrap(); let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); - db.rooms.update_joined_count(room_id, &db)?; + services().rooms.state_cache.update_joined_count(room_id)?; } - db.globals.bump_database_version(6)?; + services().globals.bump_database_version(6)?; warn!("Migration: 5 -> 6 finished"); } - if db.globals.database_version()? < 7 { + if services().globals.database_version()? < 7 { // Upgrade state store let mut last_roomstates: HashMap, u64> = HashMap::new(); let mut current_sstatehash: Option = None; @@ -513,7 +570,7 @@ impl KeyValueDatabase { let states_parents = last_roomsstatehash.map_or_else( || Ok(Vec::new()), |&last_roomsstatehash| { - db.rooms.state_accessor.load_shortstatehash_info(dbg!(last_roomsstatehash)) + services().rooms.state_compressor.load_shortstatehash_info(dbg!(last_roomsstatehash)) }, )?; @@ -535,7 +592,7 @@ impl KeyValueDatabase { (current_state, HashSet::new()) }; - db.rooms.save_state_from_diff( + services().rooms.state_compressor.save_state_from_diff( dbg!(current_sstatehash), statediffnew, statediffremoved, @@ -544,7 +601,7 @@ impl KeyValueDatabase { )?; /* - let mut tmp = db.rooms.load_shortstatehash_info(¤t_sstatehash, &db)?; + let mut tmp = services().rooms.load_shortstatehash_info(¤t_sstatehash)?; let state = tmp.pop().unwrap(); println!( "{}\t{}{:?}: {:?} + {:?} - {:?}", @@ -587,14 +644,13 @@ impl KeyValueDatabase { current_sstatehash = Some(sstatehash); let event_id = db - .rooms .shorteventid_eventid .get(&seventid) .unwrap() .unwrap(); let string = utils::string_from_bytes(&event_id).unwrap(); let event_id = <&EventId>::try_from(string.as_str()).unwrap(); - let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap(); + let pdu = services().rooms.timeline.get_pdu(event_id).unwrap().unwrap(); if Some(&pdu.room_id) != current_room.as_ref() { current_room = Some(pdu.room_id.clone()); @@ -615,20 +671,20 @@ impl KeyValueDatabase { )?; } - db.globals.bump_database_version(7)?; + services().globals.bump_database_version(7)?; warn!("Migration: 6 -> 7 finished"); } - if db.globals.database_version()? < 8 { + if services().globals.database_version()? < 8 { // Generate short room ids for all rooms - for (room_id, _) in db.rooms.roomid_shortstatehash.iter() { - let shortroomid = db.globals.next_count()?.to_be_bytes(); - db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; + for (room_id, _) in db.roomid_shortstatehash.iter() { + let shortroomid = services().globals.next_count()?.to_be_bytes(); + db.roomid_shortroomid.insert(&room_id, &shortroomid)?; info!("Migration: 8"); } // Update pduids db layout - let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| { + let mut batch = db.pduid_pdu.iter().filter_map(|(key, v)| { if !key.starts_with(b"!") { return None; } @@ -637,7 +693,6 @@ impl KeyValueDatabase { let count = parts.next().unwrap(); let short_room_id = db - .rooms .roomid_shortroomid .get(room_id) .unwrap() @@ -649,9 +704,9 @@ impl KeyValueDatabase { Some((new_key, v)) }); - db.rooms.pduid_pdu.insert_batch(&mut batch)?; + db.pduid_pdu.insert_batch(&mut batch)?; - let mut batch2 = db.rooms.eventid_pduid.iter().filter_map(|(k, value)| { + let mut batch2 = db.eventid_pduid.iter().filter_map(|(k, value)| { if !value.starts_with(b"!") { return None; } @@ -660,7 +715,6 @@ impl KeyValueDatabase { let count = parts.next().unwrap(); let short_room_id = db - .rooms .roomid_shortroomid .get(room_id) .unwrap() @@ -672,17 +726,16 @@ impl KeyValueDatabase { Some((k, new_value)) }); - db.rooms.eventid_pduid.insert_batch(&mut batch2)?; + db.eventid_pduid.insert_batch(&mut batch2)?; - db.globals.bump_database_version(8)?; + services().globals.bump_database_version(8)?; warn!("Migration: 7 -> 8 finished"); } - if db.globals.database_version()? < 9 { + if services().globals.database_version()? < 9 { // Update tokenids db layout let mut iter = db - .rooms .tokenids .iter() .filter_map(|(key, _)| { @@ -696,7 +749,6 @@ impl KeyValueDatabase { let pdu_id_count = parts.next().unwrap(); let short_room_id = db - .rooms .roomid_shortroomid .get(room_id) .unwrap() @@ -712,8 +764,7 @@ impl KeyValueDatabase { .peekable(); while iter.peek().is_some() { - db.rooms - .tokenids + db.tokenids .insert_batch(&mut iter.by_ref().take(1000))?; println!("smaller batch done"); } @@ -721,7 +772,6 @@ impl KeyValueDatabase { info!("Deleting starts"); let batch2: Vec<_> = db - .rooms .tokenids .iter() .filter_map(|(key, _)| { @@ -736,38 +786,37 @@ impl KeyValueDatabase { for key in batch2 { println!("del"); - db.rooms.tokenids.remove(&key)?; + db.tokenids.remove(&key)?; } - db.globals.bump_database_version(9)?; + services().globals.bump_database_version(9)?; warn!("Migration: 8 -> 9 finished"); } - if db.globals.database_version()? < 10 { + if services().globals.database_version()? < 10 { // Add other direction for shortstatekeys - for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() { - db.rooms - .shortstatekey_statekey + for (statekey, shortstatekey) in db.statekey_shortstatekey.iter() { + db.shortstatekey_statekey .insert(&shortstatekey, &statekey)?; } // Force E2EE device list updates so we can send them over federation - for user_id in db.users.iter().filter_map(|r| r.ok()) { - db.users - .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; + for user_id in services().users.iter().filter_map(|r| r.ok()) { + services().users + .mark_device_key_update(&user_id)?; } - db.globals.bump_database_version(10)?; + services().globals.bump_database_version(10)?; warn!("Migration: 9 -> 10 finished"); } - if db.globals.database_version()? < 11 { + if services().globals.database_version()? < 11 { db._db .open_tree("userdevicesessionid_uiaarequest")? .clear()?; - db.globals.bump_database_version(11)?; + services().globals.bump_database_version(11)?; warn!("Migration: 10 -> 11 finished"); } @@ -779,12 +828,12 @@ impl KeyValueDatabase { config.database_backend, latest_database_version ); } else { - guard + services() .globals .bump_database_version(latest_database_version)?; // Create the admin room and server user on first run - create_admin_room().await?; + services().admin.create_admin_room().await?; warn!( "Created new {} database with version {}", @@ -793,16 +842,16 @@ impl KeyValueDatabase { } // This data is probably outdated - guard.rooms.edus.presenceid_presence.clear()?; + db.presenceid_presence.clear()?; - guard.admin.start_handler(Arc::clone(&db), admin_receiver); + services().admin.start_handler(admin_receiver); // Set emergency access for the conduit user - match set_emergency_access(&guard) { + match set_emergency_access() { Ok(pwd_set) => { if pwd_set { warn!("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!"); - guard.admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!")); + services().admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!")); } } Err(e) => { @@ -813,21 +862,19 @@ impl KeyValueDatabase { } }; - guard + services() .sending - .start_handler(Arc::clone(&db), sending_receiver); + .start_handler(sending_receiver); - drop(guard); + Self::start_cleanup_task(config).await; - Self::start_cleanup_task(Arc::clone(&db), config).await; - - Ok(db) + Ok(()) } #[cfg(feature = "conduit_bin")] - pub async fn on_shutdown(db: Arc>) { + pub async fn on_shutdown() { info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - db.read().await.globals.rotate.fire(); + services().globals.rotate.fire(); } pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { @@ -844,33 +891,30 @@ impl KeyValueDatabase { // Return when *any* user changed his key // TODO: only send for user they share a room with futures.push( - self.users - .todeviceid_events + self.todeviceid_events .watch_prefix(&userdeviceid_prefix), ); - futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix)); + futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); futures.push( - self.rooms - .userroomid_invitestate + self.userroomid_invitestate .watch_prefix(&userid_prefix), ); - futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix)); + futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); futures.push( - self.rooms - .userroomid_notificationcount + self.userroomid_notificationcount .watch_prefix(&userid_prefix), ); futures.push( - self.rooms - .userroomid_highlightcount + self.userroomid_highlightcount .watch_prefix(&userid_prefix), ); // Events for rooms we are in - for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - let short_roomid = self + for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { + let short_roomid = services() .rooms + .short .get_shortroomid(&room_id) .ok() .flatten() @@ -883,33 +927,28 @@ impl KeyValueDatabase { roomid_prefix.push(0xff); // PDUs - futures.push(self.rooms.pduid_pdu.watch_prefix(&short_roomid)); + futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); // EDUs futures.push( - self.rooms - .edus - .roomid_lasttypingupdate + self.roomid_lasttypingupdate .watch_prefix(&roomid_bytes), ); futures.push( - self.rooms - .edus - .readreceiptid_readreceipt + self.readreceiptid_readreceipt .watch_prefix(&roomid_prefix), ); // Key changes - futures.push(self.users.keychangeid_userid.watch_prefix(&roomid_prefix)); + futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); // Room account data let mut roomuser_prefix = roomid_prefix.clone(); roomuser_prefix.extend_from_slice(&userid_prefix); futures.push( - self.account_data - .roomusertype_roomuserdataid + self.roomusertype_roomuserdataid .watch_prefix(&roomuser_prefix), ); } @@ -918,22 +957,20 @@ impl KeyValueDatabase { globaluserdata_prefix.extend_from_slice(&userid_prefix); futures.push( - self.account_data - .roomusertype_roomuserdataid + self.roomusertype_roomuserdataid .watch_prefix(&globaluserdata_prefix), ); // More key changes (used when user is not joined to any rooms) - futures.push(self.users.keychangeid_userid.watch_prefix(&userid_prefix)); + futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); // One time keys futures.push( - self.users - .userid_lastonetimekeyupdate + self.userid_lastonetimekeyupdate .watch_prefix(&userid_bytes), ); - futures.push(Box::pin(self.globals.rotate.watch())); + futures.push(Box::pin(services().globals.rotate.watch())); // Wait until one of them finds something futures.next().await; @@ -950,8 +987,8 @@ impl KeyValueDatabase { res } - #[tracing::instrument(skip(db, config))] - pub async fn start_cleanup_task(db: Arc>, config: &Config) { + #[tracing::instrument(skip(config))] + pub async fn start_cleanup_task(config: &Config) { use tokio::time::interval; #[cfg(unix)] @@ -984,7 +1021,7 @@ impl KeyValueDatabase { } let start = Instant::now(); - if let Err(e) = db.read().await._db.cleanup() { + if let Err(e) = services().globals.db._db.cleanup() { error!("cleanup: Errored: {}", e); } else { info!("cleanup: Finished in {:?}", start.elapsed()); @@ -995,26 +1032,25 @@ impl KeyValueDatabase { } /// Sets the emergency password and push rules for the @conduit account in case emergency password is set -fn set_emergency_access(db: &KeyValueDatabase) -> Result { - let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) +fn set_emergency_access() -> Result { + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is a valid UserId"); - db.users - .set_password(&conduit_user, db.globals.emergency_password().as_deref())?; + services().users + .set_password(&conduit_user, services().globals.emergency_password().as_deref())?; - let (ruleset, res) = match db.globals.emergency_password() { + let (ruleset, res) = match services().globals.emergency_password() { Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), None => (Ruleset::new(), Ok(false)), }; - db.account_data.update( + services().account_data.update( None, &conduit_user, GlobalAccountDataEventType::PushRules.to_string().into(), &GlobalAccountDataEvent { content: PushRulesEventContent { global: ruleset }, }, - &db.globals, )?; res diff --git a/src/lib.rs b/src/lib.rs index 7239900..75cf6c7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,22 +13,16 @@ mod service; pub mod api; mod utils; -use std::{cell::Cell, sync::RwLock}; +use std::{cell::Cell, sync::{RwLock, Arc}}; pub use config::Config; pub use utils::error::{Error, Result}; pub use service::{Services, pdu::PduEvent}; pub use api::ruma_wrapper::{Ruma, RumaResponse}; -use crate::database::KeyValueDatabase; +pub static SERVICES: RwLock>> = RwLock::new(None); -pub static SERVICES: RwLock> = RwLock::new(None); - -enum ServicesEnum { - Rocksdb(Services) -} - -pub fn services<'a>() -> &'a Services { - &SERVICES.read().unwrap() +pub fn services<'a>() -> Arc { + Arc::clone(&SERVICES.read().unwrap()) } diff --git a/src/main.rs b/src/main.rs index 543b953..d5b2731 100644 --- a/src/main.rs +++ b/src/main.rs @@ -69,19 +69,14 @@ async fn main() { config.warn_deprecated(); - let db = match KeyValueDatabase::load_or_create(&config).await { - Ok(db) => db, - Err(e) => { - eprintln!( - "The database couldn't be loaded or created. The following error occured: {}", - e - ); - std::process::exit(1); - } + if let Err(e) = KeyValueDatabase::load_or_create(&config).await { + eprintln!( + "The database couldn't be loaded or created. The following error occured: {}", + e + ); + std::process::exit(1); }; - SERVICES.set(db).expect("this is the first and only time we initialize the SERVICE static"); - let start = async { run_server().await.unwrap(); }; diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index c56c69d..35ca149 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -17,11 +17,11 @@ use tracing::error; use crate::{service::*, services, utils, Error, Result}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] pub fn update( diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 63fa3af..1a5ce50 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -3,11 +3,11 @@ pub use data::Data; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Registers an appservice and returns the ID to the caller pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { self.db.register_appservice(yaml) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 6cfeab8..48d7b06 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -36,8 +36,8 @@ type SyncHandle = ( Receiver>>, // rx ); -pub struct Service { - pub db: D, +pub struct Service { + pub db: Box, pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, @@ -92,9 +92,9 @@ impl Default for RotationHandler { } -impl Service { +impl Service { pub fn load( - db: D, + db: Box, config: Config, ) -> Result { let keypair = db.load_keypair(); diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index ce867fb..4bd9efd 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -12,11 +12,11 @@ use ruma::{ }; use std::{collections::BTreeMap, sync::Arc}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn create_backup( &self, user_id: &UserId, diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 5037809..d61292b 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -15,11 +15,11 @@ pub struct FileMeta { pub file: Vec, } -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Uploads a file. pub async fn create( &self, diff --git a/src/service/mod.rs b/src/service/mod.rs index 4364c72..47d4651 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + pub mod account_data; pub mod admin; pub mod appservice; @@ -12,18 +14,36 @@ pub mod transaction_ids; pub mod uiaa; pub mod users; -pub struct Services -{ - pub appservice: appservice::Service, - pub pusher: pusher::Service, - pub rooms: rooms::Service, - pub transaction_ids: transaction_ids::Service, - pub uiaa: uiaa::Service, - pub users: users::Service, - pub account_data: account_data::Service, +pub struct Services { + pub appservice: appservice::Service, + pub pusher: pusher::Service, + pub rooms: rooms::Service, + pub transaction_ids: transaction_ids::Service, + pub uiaa: uiaa::Service, + pub users: users::Service, + pub account_data: account_data::Service, pub admin: admin::Service, - pub globals: globals::Service, - pub key_backups: key_backups::Service, - pub media: media::Service, + pub globals: globals::Service, + pub key_backups: key_backups::Service, + pub media: media::Service, pub sending: sending::Service, } + +impl Services { + pub fn build(db: Arc) { + Self { + appservice: appservice::Service { db: Arc::clone(&db) }, + pusher: appservice::Service { db: Arc::clone(&db) }, + rooms: appservice::Service { db: Arc::clone(&db) }, + transaction_ids: appservice::Service { db: Arc::clone(&db) }, + uiaa: appservice::Service { db: Arc::clone(&db) }, + users: appservice::Service { db: Arc::clone(&db) }, + account_data: appservice::Service { db: Arc::clone(&db) }, + admin: appservice::Service { db: Arc::clone(&db) }, + globals: appservice::Service { db: Arc::clone(&db) }, + key_backups: appservice::Service { db: Arc::clone(&db) }, + media: appservice::Service { db: Arc::clone(&db) }, + sending: appservice::Service { db: Arc::clone(&db) }, + } + } +} diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 64c7f1f..af30ca4 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -23,11 +23,11 @@ use ruma::{ use std::{fmt::Debug, mem}; use tracing::{error, info, warn}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { self.db.set_pusher(sender, pusher) } diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index c5d45e3..8102209 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -25,5 +25,5 @@ pub trait Data { fn local_aliases_for_room( &self, room_id: &RoomId, - ) -> Result>>; + ) -> Box>>>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index abe299d..ef5888f 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -4,11 +4,11 @@ pub use data::Data; use ruma::{RoomAliasId, RoomId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { #[tracing::instrument(skip(self))] pub fn set_alias( &self, diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 5177d6d..e4e8550 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -2,6 +2,6 @@ use std::collections::HashSet; use crate::Result; pub trait Data { - fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>; + fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>>; fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 9ea4763..26a3f3f 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -5,11 +5,11 @@ pub use data::Data; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { #[tracing::instrument(skip(self))] pub fn get_cached_eventid_authchain<'a>( &'a self, diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 6853505..fb28994 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -4,11 +4,11 @@ use ruma::RoomId; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { #[tracing::instrument(skip(self))] pub fn set_public(&self, room_id: &RoomId) -> Result<()> { self.db.set_public(room_id) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index dbe1b6e..8552363 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -4,8 +4,8 @@ pub mod typing; pub trait Data: presence::Data + read_receipt::Data + typing::Data {} -pub struct Service { - pub presence: presence::Service, - pub read_receipt: read_receipt::Service, - pub typing: typing::Service, +pub struct Service { + pub presence: presence::Service, + pub read_receipt: read_receipt::Service, + pub typing: typing::Service, } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 646cf54..73b7b5a 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -6,11 +6,11 @@ use ruma::{RoomId, UserId, events::presence::PresenceEvent}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 3f0b147..2a4c0b7 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -4,11 +4,11 @@ pub use data::Data; use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Replaces the previous read receipt. pub fn readreceipt_update( &self, diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 00cfdec..16a135f 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -4,11 +4,11 @@ use ruma::{UserId, RoomId, events::SyncEphemeralRoomEvent}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 8a8725b..e229112 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,14 +1,16 @@ /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; +use ruma::{RoomVersionId, signatures::CanonicalJsonObject, api::federation::discovery::{get_server_keys, get_remote_server_keys}}; +use tokio::sync::Semaphore; use std::{ collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, pin::Pin, - sync::{Arc, RwLock}, - time::{Duration, Instant}, + sync::{Arc, RwLock, RwLockWriteGuard}, + time::{Duration, Instant, SystemTime}, }; -use futures_util::{Future, stream::FuturesUnordered}; +use futures_util::{Future, stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ client::error::ErrorKind, @@ -22,7 +24,7 @@ use ruma::{ uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use tracing::{error, info, trace, warn}; +use tracing::{error, info, trace, warn, debug}; use crate::{service::*, services, Result, Error, PduEvent}; @@ -53,7 +55,7 @@ impl Service { /// it /// 14. Use state resolution to find new room state // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively - #[tracing::instrument(skip(value, is_timeline_event, pub_key_map))] + #[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))] pub(crate) async fn handle_incoming_pdu<'a>( &self, origin: &'a ServerName, @@ -64,10 +66,11 @@ impl Service { pub_key_map: &'a RwLock>>, ) -> Result>> { if !services().rooms.metadata.exists(room_id)? { - return Error::BadRequest( + return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server", - )}; + )); + } services() .rooms @@ -732,7 +735,7 @@ impl Service { &incoming_pdu.sender, incoming_pdu.state_key.as_deref(), &incoming_pdu.content, - )? + )?; let soft_fail = !state_res::event_auth::auth_check( &room_version, @@ -821,7 +824,7 @@ impl Service { let shortstatekey = services() .rooms .short - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)? + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)?; state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } @@ -1236,7 +1239,7 @@ impl Service { let signature_ids = signature_object.keys().cloned().collect::>(); - let fetch_res = fetch_signing_keys( + let fetch_res = self.fetch_signing_keys( signature_server.as_str().try_into().map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?, @@ -1481,4 +1484,168 @@ impl Service { )) } } + + /// Search the DB for the signing keys of the given server, if we don't have them + /// fetch them from the server and save to our DB. + #[tracing::instrument(skip_all)] + pub async fn fetch_signing_keys( + &self, + origin: &ServerName, + signature_ids: Vec, + ) -> Result> { + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let permit = services() + .globals + .servername_ratelimiter + .read() + .unwrap() + .get(origin) + .map(|s| Arc::clone(s).acquire_owned()); + + let permit = match permit { + Some(p) => p, + None => { + let mut write = services().globals.servername_ratelimiter.write().unwrap(); + let s = Arc::clone( + write + .entry(origin.to_owned()) + .or_insert_with(|| Arc::new(Semaphore::new(1))), + ); + + s.acquire_owned() + } + } + .await; + + let back_off = |id| match services() + .globals + .bad_signature_ratelimiter + .write() + .unwrap() + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + if let Some((time, tries)) = services() + .globals + .bad_signature_ratelimiter + .read() + .unwrap() + .get(&signature_ids) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {:?}", signature_ids); + return Err(Error::BadServerResponse("bad signature, still backing off")); + } + } + + trace!("Loading signing keys for {}", origin); + + let mut result: BTreeMap<_, _> = services() + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if contains_all_ids(&result) { + return Ok(result); + } + + debug!("Fetching signing keys for {} over federation", origin); + + if let Some(server_key) = services() + .sending + .send_federation_request(origin, get_server_keys::v2::Request::new()) + .await + .ok() + .and_then(|resp| resp.server_key.deserialize().ok()) + { + services().globals.add_signing_key(origin, server_key.clone())?; + + result.extend( + server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + + if contains_all_ids(&result) { + return Ok(result); + } + } + + for server in services().globals.trusted_servers() { + debug!("Asking {} for {}'s signing key", server, origin); + if let Some(server_keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys::v2::Request::new( + origin, + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ) + .expect("time is valid"), + ), + ) + .await + .ok() + .map(|resp| { + resp.server_keys + .into_iter() + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) + { + trace!("Got signing keys: {:?}", server_keys); + for k in server_keys { + services().globals.add_signing_key(origin, k.clone())?; + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + } + + if contains_all_ids(&result) { + return Ok(result); + } + } + } + + drop(permit); + + back_off(signature_ids); + + warn!("Failed to find public key for server: {}", origin); + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) + } } diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index 5fefd3f..f1019c1 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -15,7 +15,7 @@ pub trait Data { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - since: u64, + confirmed_user_ids: &mut dyn Iterator, ) -> Result<()>; fn lazy_load_reset( diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 283d45a..90dad21 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,16 +1,18 @@ mod data; -use std::collections::HashSet; +use std::{collections::{HashSet, HashMap}, sync::Mutex}; pub use data::Data; use ruma::{DeviceId, UserId, RoomId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, + + lazy_load_waiting: Mutex, Box, Box, u64), HashSet>>>, } -impl Service { +impl Service { #[tracing::instrument(skip(self))] pub fn lazy_load_was_sent_before( &self, @@ -50,7 +52,18 @@ impl Service { room_id: &RoomId, since: u64, ) -> Result<()> { - self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, since) + if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + since, + )) { + self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, &mut user_ids.iter().map(|&u| &*u))?; + } else { + // Ignore + } + + Ok(()) } #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 1bdb78d..3c21dd1 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -4,11 +4,11 @@ use ruma::RoomId; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn exists(&self, room_id: &RoomId) -> Result { diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 4da4223..f1b0bad 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -18,22 +18,22 @@ pub mod user; pub trait Data: alias::Data + auth_chain::Data + directory::Data + edus::Data + lazy_loading::Data + metadata::Data + outlier::Data + pdu_metadata::Data + search::Data + short::Data + state::Data + state_accessor::Data + state_cache::Data + state_compressor::Data + timeline::Data + user::Data {} -pub struct Service { - pub alias: alias::Service, - pub auth_chain: auth_chain::Service, - pub directory: directory::Service, - pub edus: edus::Service, +pub struct Service { + pub alias: alias::Service, + pub auth_chain: auth_chain::Service, + pub directory: directory::Service, + pub edus: edus::Service, pub event_handler: event_handler::Service, - pub lazy_loading: lazy_loading::Service, - pub metadata: metadata::Service, - pub outlier: outlier::Service, - pub pdu_metadata: pdu_metadata::Service, - pub search: search::Service, - pub short: short::Service, - pub state: state::Service, - pub state_accessor: state_accessor::Service, - pub state_cache: state_cache::Service, - pub state_compressor: state_compressor::Service, - pub timeline: timeline::Service, - pub user: user::Service, + pub lazy_loading: lazy_loading::Service, + pub metadata: metadata::Service, + pub outlier: outlier::Service, + pub pdu_metadata: pdu_metadata::Service, + pub search: search::Service, + pub short: short::Service, + pub state: state::Service, + pub state_accessor: state_accessor::Service, + pub state_cache: state_cache::Service, + pub state_compressor: state_compressor::Service, + pub timeline: timeline::Service, + pub user: user::Service, } diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index a495db8..5493ce4 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -4,11 +4,11 @@ use ruma::{EventId, signatures::CanonicalJsonObject}; use crate::{Result, PduEvent}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Returns the pdu from the outlier tree. pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.db.get_outlier_pdu_json(event_id) diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index c57c1a2..a81d05c 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -6,11 +6,11 @@ use ruma::{RoomId, EventId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { self.db.mark_as_referenced(room_id, event_ids) diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index c0fd2a3..b62904c 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -2,7 +2,7 @@ use ruma::RoomId; use crate::Result; pub trait Data { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()>; + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()>; fn search_pdus<'a>( &'a self, diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index b7023f3..dc57191 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -4,11 +4,16 @@ pub use data::Data; use crate::Result; use ruma::RoomId; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { + #[tracing::instrument(skip(self))] + pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { + self.db.index_pdu(shortroomid, pdu_id, message_body) + } + #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( &'a self, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 1eb891e..a024dc6 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -6,11 +6,11 @@ use ruma::{EventId, events::StateEventType, RoomId}; use crate::{Result, Error, utils, services}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn get_or_create_shorteventid( &self, event_id: &EventId, diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index fd0de28..7008d86 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,6 +1,5 @@ use std::sync::Arc; use std::{sync::MutexGuard, collections::HashSet}; -use std::fmt::Debug; use crate::Result; use ruma::{EventId, RoomId}; @@ -22,7 +21,7 @@ pub trait Data { /// Replace the forward extremities of the room. fn set_forward_extremities<'a>(&self, room_id: &RoomId, - event_ids: impl IntoIterator + Debug, + event_ids: &dyn Iterator, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index a26ed46..979060d 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -10,11 +10,11 @@ use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; use super::state_compressor::CompressedStateEvent; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Set the room to the given statehash and update caches. pub fn force_state( &self, @@ -23,6 +23,15 @@ impl Service { statediffnew: HashSet, statediffremoved: HashSet, ) -> Result<()> { + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(body.room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; for event_id in statediffnew.into_iter().filter_map(|new| { services().rooms.state_compressor.parse_compressed_state_event(new) @@ -70,7 +79,9 @@ impl Service { services().room.state_cache.update_joined_count(room_id)?; - self.db.set_room_state(room_id, shortstatehash); + self.db.set_room_state(room_id, shortstatehash, &state_lock); + + drop(state_lock); Ok(()) } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 5d6886d..1911e52 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -6,11 +6,11 @@ use ruma::{events::StateEventType, RoomId, EventId}; use crate::{Result, PduEvent}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index c3b4eb9..18d1123 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -7,11 +7,11 @@ use ruma::{RoomId, UserId, events::{room::{member::MembershipState, create::Room use crate::{Result, services, utils, Error}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Update current membership data. #[tracing::instrument(skip(self, last_state))] pub fn update_membership( diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index 1768936..cd87242 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,10 +1,12 @@ +use std::collections::HashSet; + use super::CompressedStateEvent; use crate::Result; pub struct StateDiff { - parent: Option, - added: Vec, - removed: Vec, + pub parent: Option, + pub added: HashSet, + pub removed: HashSet, } pub trait Data { diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 619e4cf..ab9f427 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -8,13 +8,13 @@ use crate::{Result, utils, services}; use self::data::StateDiff; -pub struct Service { - db: D, +pub struct Service { + db: Box, } pub type CompressedStateEvent = [u8; 2 * size_of::()]; -impl Service { +impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 7669b0b..e8f4205 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -20,11 +20,11 @@ use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduE use super::state_compressor::CompressedStateEvent; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /* /// Checks if a room exists. #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 729887c..7c7dfae 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -4,11 +4,11 @@ use ruma::{RoomId, UserId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { self.db.reset_notification_counts(user_id, room_id) } diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index ea92372..a9c516c 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -4,11 +4,11 @@ pub use data::Data; use ruma::{UserId, DeviceId, TransactionId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn add_txnid( &self, user_id: &UserId, diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index ffdbf35..01c0d2f 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -6,11 +6,11 @@ use tracing::error; use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Creates a new Uiaa session. Make sure the session token is unique. pub fn create( &self, diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 3f87589..7eb0ceb 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use crate::Result; use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri}; -pub trait Data { +pub trait Data: Send + Sync { /// Check if a user has an account on this homeserver. fn exists(&self, user_id: &UserId) -> Result; @@ -138,16 +138,16 @@ pub trait Data { device_id: &DeviceId, ) -> Result>>; - fn get_master_key bool>( + fn get_master_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>>; - fn get_self_signing_key bool>( + fn get_self_signing_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>>; fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index dfe6c7f..8adc936 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -6,11 +6,11 @@ use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTi use crate::{Result, Error, services}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Check if a user has an account on this homeserver. pub fn exists(&self, user_id: &UserId) -> Result { self.db.exists(user_id) From cff52d7ebb5066f3d8e513488b84a431c0093e65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 15:33:57 +0200 Subject: [PATCH 1214/1727] messing around with arcs --- src/api/client_server/account.rs | 4 +- src/api/client_server/media.rs | 12 +- src/api/client_server/membership.rs | 5 +- src/api/client_server/push.rs | 67 ++++-- src/api/client_server/read_marker.rs | 2 +- src/api/client_server/sync.rs | 2 +- src/api/client_server/tag.rs | 61 ++--- src/api/server_server.rs | 4 +- src/database/key_value/account_data.rs | 17 +- src/database/key_value/appservice.rs | 2 + src/database/key_value/globals.rs | 211 +++++++++++++++++- src/database/key_value/key_backups.rs | 4 +- src/database/key_value/media.rs | 6 +- src/database/key_value/pusher.rs | 4 +- src/database/key_value/rooms/alias.rs | 4 +- src/database/key_value/rooms/auth_chain.rs | 74 ++++-- src/database/key_value/rooms/directory.rs | 4 +- src/database/key_value/rooms/edus/mod.rs | 4 +- src/database/key_value/rooms/edus/presence.rs | 4 +- .../key_value/rooms/edus/read_receipt.rs | 4 +- src/database/key_value/rooms/edus/typing.rs | 4 +- src/database/key_value/rooms/lazy_load.rs | 4 +- src/database/key_value/rooms/metadata.rs | 4 +- src/database/key_value/rooms/mod.rs | 4 +- src/database/key_value/rooms/outlier.rs | 4 +- src/database/key_value/rooms/pdu_metadata.rs | 2 +- src/database/key_value/rooms/search.rs | 4 +- src/database/key_value/rooms/short.rs | 4 +- src/database/key_value/rooms/state.rs | 7 +- .../key_value/rooms/state_accessor.rs | 2 +- src/database/key_value/rooms/state_cache.rs | 4 +- .../key_value/rooms/state_compressor.rs | 4 +- src/database/key_value/rooms/timeline.rs | 2 +- src/database/key_value/rooms/user.rs | 4 +- src/database/key_value/transaction_ids.rs | 4 +- src/database/key_value/uiaa.rs | 4 +- src/database/key_value/users.rs | 12 +- src/database/mod.rs | 109 +-------- src/lib.rs | 6 +- src/service/account_data/data.rs | 11 +- src/service/account_data/mod.rs | 8 +- src/service/admin/mod.rs | 27 ++- src/service/appservice/data.rs | 2 +- src/service/appservice/mod.rs | 4 +- src/service/globals/data.rs | 26 ++- src/service/globals/mod.rs | 81 ++----- src/service/key_backups/data.rs | 2 +- src/service/media/data.rs | 4 +- src/service/media/mod.rs | 14 +- src/service/mod.rs | 26 +-- src/service/pdu.rs | 2 +- src/service/pusher/data.rs | 2 +- src/service/pusher/mod.rs | 13 +- src/service/rooms/alias/data.rs | 2 +- src/service/rooms/auth_chain/data.rs | 8 +- src/service/rooms/auth_chain/mod.rs | 34 +-- src/service/rooms/directory/data.rs | 2 +- src/service/rooms/edus/presence/data.rs | 2 +- src/service/rooms/edus/read_receipt/data.rs | 2 +- src/service/rooms/edus/typing/data.rs | 2 +- src/service/rooms/event_handler/mod.rs | 26 ++- src/service/rooms/lazy_loading/data.rs | 2 +- src/service/rooms/metadata/data.rs | 2 +- src/service/rooms/outlier/data.rs | 2 +- src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/search/data.rs | 2 +- src/service/rooms/short/data.rs | 2 +- src/service/rooms/state/data.rs | 7 +- src/service/rooms/state/mod.rs | 8 +- src/service/rooms/state_accessor/data.rs | 2 +- src/service/rooms/state_cache/data.rs | 2 +- src/service/rooms/state_compressor/data.rs | 2 +- src/service/rooms/timeline/data.rs | 2 +- src/service/rooms/user/data.rs | 2 +- src/service/transaction_ids/data.rs | 2 +- src/service/uiaa/data.rs | 2 +- src/service/users/mod.rs | 8 +- 77 files changed, 598 insertions(+), 434 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 6af597e..6d37ce9 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -184,11 +184,11 @@ pub async fn register_route( None, &user_id, GlobalAccountDataEventType::PushRules.to_string().into(), - &ruma::events::push_rules::PushRulesEvent { + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { global: push::Ruleset::server_default(&user_id), }, - }, + }).expect("to json always works"), )?; // Inhibit login does not work for guests diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 316e284..80cbb61 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -40,12 +40,12 @@ pub async fn create_content_route( services().media .create( mxc.clone(), - &body + body .filename .as_ref() .map(|filename| "inline; filename=".to_owned() + filename) .as_deref(), - &body.content_type.as_deref(), + body.content_type.as_deref(), &body.file, ) .await?; @@ -76,8 +76,8 @@ pub async fn get_remote_content( services().media .create( mxc.to_string(), - &content_response.content_disposition.as_deref(), - &content_response.content_type.as_deref(), + content_response.content_disposition.as_deref(), + content_response.content_type.as_deref(), &content_response.file, ) .await?; @@ -195,8 +195,8 @@ pub async fn get_content_thumbnail_route( services().media .upload_thumbnail( mxc, - &None, - &get_thumbnail_response.content_type.as_deref(), + None, + get_thumbnail_response.content_type.as_deref(), body.width.try_into().expect("all UInts are valid u32s"), body.height.try_into().expect("all UInts are valid u32s"), &get_thumbnail_response.file, diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 720c1e6..58ed040 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -860,9 +860,8 @@ pub(crate) async fn invite_helper<'a>( "Could not accept incoming PDU as timeline event.", ))?; - let servers = services() - .rooms - .state_cache + // Bind to variable because of lifetimes + let servers = services().rooms.state_cache .room_servers(room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != services().globals.server_name()); diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index 112fa00..12ec25d 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -20,7 +20,7 @@ pub async fn get_pushrules_all_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -32,8 +32,12 @@ pub async fn get_pushrules_all_route( "PushRules event not found.", ))?; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + Ok(get_pushrules_all::v3::Response { - global: event.content.global, + global: account_data.global, }) } @@ -45,7 +49,7 @@ pub async fn get_pushrule_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -57,7 +61,11 @@ pub async fn get_pushrule_route( "PushRules event not found.", ))?; - let global = event.content.global; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + let global = account_data.global; let rule = match body.kind { RuleKind::Override => global .override_ @@ -108,7 +116,7 @@ pub async fn set_pushrule_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -120,7 +128,10 @@ pub async fn set_pushrule_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { global.override_.replace( @@ -187,7 +198,7 @@ pub async fn set_pushrule_route( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &event, + &serde_json::to_value(account_data).expect("to json value always works"), )?; Ok(set_pushrule::v3::Response {}) @@ -208,7 +219,7 @@ pub async fn get_pushrule_actions_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -220,7 +231,11 @@ pub async fn get_pushrule_actions_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + let global = account_data.global; let actions = match body.kind { RuleKind::Override => global .override_ @@ -265,7 +280,7 @@ pub async fn set_pushrule_actions_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -277,7 +292,10 @@ pub async fn set_pushrule_actions_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { @@ -316,7 +334,7 @@ pub async fn set_pushrule_actions_route( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &event, + &serde_json::to_value(account_data).expect("to json value always works"), )?; Ok(set_pushrule_actions::v3::Response {}) @@ -337,7 +355,7 @@ pub async fn get_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -349,7 +367,10 @@ pub async fn get_pushrule_enabled_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = account_data.content.global; let enabled = match body.kind { RuleKind::Override => global .override_ @@ -397,7 +418,7 @@ pub async fn set_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -409,7 +430,10 @@ pub async fn set_pushrule_enabled_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { @@ -453,7 +477,7 @@ pub async fn set_pushrule_enabled_route( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &event, + &serde_json::to_value(account_data).expect("to json value always works"), )?; Ok(set_pushrule_enabled::v3::Response {}) @@ -474,7 +498,7 @@ pub async fn delete_pushrule_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -486,7 +510,10 @@ pub async fn delete_pushrule_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() { @@ -520,7 +547,7 @@ pub async fn delete_pushrule_route( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &event, + &serde_json::to_value(account_data).expect("to json value always works"), )?; Ok(delete_pushrule::v3::Response {}) diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index eda57d5..c6d77c1 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -27,7 +27,7 @@ pub async fn set_read_marker_route( Some(&body.room_id), sender_user, RoomAccountDataEventType::FullyRead, - &fully_read_event, + &serde_json::to_value(fully_read_event).expect("to json value always works"), )?; if let Some(event) = &body.read_receipt { diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 3489a9a..9eb6383 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -175,7 +175,7 @@ async fn sync_helper( services().rooms.edus.presence.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them - let watcher = services().globals.db.watch(&sender_user, &sender_device); + let watcher = services().globals.watch(&sender_user, &sender_device); let next_batch = services().globals.current_count()?; let next_batch_string = next_batch.to_string(); diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index bbea2d5..abf2b87 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services}; +use crate::{Result, Ruma, services, Error}; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -18,18 +18,22 @@ pub async fn update_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut tags_event = services() + let event = services() .account_data .get( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - )? - .unwrap_or_else(|| TagEvent { + )?; + + let mut tags_event = event.map(|e| serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))) + .unwrap_or_else(|| Ok(TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, - }); + }))?; + tags_event .content .tags @@ -39,7 +43,7 @@ pub async fn update_tag_route( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - &tags_event, + &serde_json::to_value(tags_event).expect("to json value always works"), )?; Ok(create_tag::v3::Response {}) @@ -55,25 +59,29 @@ pub async fn delete_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut tags_event = services() + let mut event = services() .account_data .get( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - )? - .unwrap_or_else(|| TagEvent { + )?; + + let mut tags_event = event.map(|e| serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))) + .unwrap_or_else(|| Ok(TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, - }); + }))?; + tags_event.content.tags.remove(&body.tag.clone().into()); services().account_data.update( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - &tags_event, + &serde_json::to_value(tags_event).expect("to json value always works"), )?; Ok(delete_tag::v3::Response {}) @@ -89,20 +97,23 @@ pub async fn get_tags_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let mut event = services() + .account_data + .get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; + + let mut tags_event = event.map(|e| serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))) + .unwrap_or_else(|| Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }))?; + Ok(get_tags::v3::Response { - tags: services() - .account_data - .get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )? - .unwrap_or_else(|| TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }) - .content - .tags, + tags: tags_event.content.tags, }) } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 45d749d..647f457 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1655,10 +1655,10 @@ pub async fn get_devices_route( .collect(), master_key: services() .users - .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, + .get_master_key(&body.user_id, &|u| u.server_name() == sender_servername)?, self_signing_key: services() .users - .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, + .get_self_signing_key(&body.user_id, &|u| u.server_name() == sender_servername)?, }) } diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 49c9170..f0325d2 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,19 +1,19 @@ -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw, RoomId}; use serde::{Serialize, de::DeserializeOwned}; use crate::{Result, database::KeyValueDatabase, service, Error, utils, services}; -impl service::account_data::Data for KeyValueDatabase { +impl service::account_data::Data for Arc { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] - fn update( + fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - data: &T, + data: &serde_json::Value, ) -> Result<()> { let mut prefix = room_id .map(|r| r.to_string()) @@ -32,8 +32,7 @@ impl service::account_data::Data for KeyValueDatabase { let mut key = prefix; key.extend_from_slice(event_type.to_string().as_bytes()); - let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling - if json.get("type").is_none() || json.get("content").is_none() { + if data.get("type").is_none() || data.get("content").is_none() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Account data doesn't have all required fields.", @@ -42,7 +41,7 @@ impl service::account_data::Data for KeyValueDatabase { self.roomuserdataid_accountdata.insert( &roomuserdataid, - &serde_json::to_vec(&json).expect("to_vec always works on json values"), + &serde_json::to_vec(&data).expect("to_vec always works on json values"), )?; let prev = self.roomusertype_roomuserdataid.get(&key)?; @@ -60,12 +59,12 @@ impl service::account_data::Data for KeyValueDatabase { /// Searches the account data for a specific kind. #[tracing::instrument(skip(self, room_id, user_id, kind))] - fn get( + fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, - ) -> Result> { + ) -> Result>> { let mut key = room_id .map(|r| r.to_string()) .unwrap_or_default() diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index f427ba7..ee6ae20 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::appservice::Data for KeyValueDatabase { diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index e665229..8711920 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,8 +1,136 @@ -use ruma::signatures::Ed25519KeyPair; +use std::{collections::BTreeMap, sync::Arc}; -use crate::{Result, service, database::KeyValueDatabase, Error, utils}; +use async_trait::async_trait; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use ruma::{signatures::Ed25519KeyPair, UserId, DeviceId, ServerName, api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerSigningKeyId, MilliSecondsSinceUnixEpoch}; + +use crate::{Result, service, database::KeyValueDatabase, Error, utils, services}; + +pub const COUNTER: &[u8] = b"c"; + +#[async_trait] +impl service::globals::Data for Arc { + fn next_count(&self) -> Result { + utils::u64_from_bytes(&self.global.increment(COUNTER)?) + .map_err(|_| Error::bad_database("Count has invalid bytes.")) + } + + fn current_count(&self) -> Result { + self.global.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Count has invalid bytes.")) + }) + } + + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + let userid_bytes = user_id.as_bytes().to_vec(); + let mut userid_prefix = userid_bytes.clone(); + userid_prefix.push(0xff); + + let mut userdeviceid_prefix = userid_prefix.clone(); + userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); + userdeviceid_prefix.push(0xff); + + let mut futures = FuturesUnordered::new(); + + // Return when *any* user changed his key + // TODO: only send for user they share a room with + futures.push( + self.todeviceid_events + .watch_prefix(&userdeviceid_prefix), + ); + + futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); + futures.push( + self.userroomid_invitestate + .watch_prefix(&userid_prefix), + ); + futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); + futures.push( + self.userroomid_notificationcount + .watch_prefix(&userid_prefix), + ); + futures.push( + self.userroomid_highlightcount + .watch_prefix(&userid_prefix), + ); + + // Events for rooms we are in + for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { + let short_roomid = services() + .rooms + .short + .get_shortroomid(&room_id) + .ok() + .flatten() + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let roomid_bytes = room_id.as_bytes().to_vec(); + let mut roomid_prefix = roomid_bytes.clone(); + roomid_prefix.push(0xff); + + // PDUs + futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); + + // EDUs + futures.push( + self.roomid_lasttypingupdate + .watch_prefix(&roomid_bytes), + ); + + futures.push( + self.readreceiptid_readreceipt + .watch_prefix(&roomid_prefix), + ); + + // Key changes + futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); + + // Room account data + let mut roomuser_prefix = roomid_prefix.clone(); + roomuser_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.roomusertype_roomuserdataid + .watch_prefix(&roomuser_prefix), + ); + } + + let mut globaluserdata_prefix = vec![0xff]; + globaluserdata_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.roomusertype_roomuserdataid + .watch_prefix(&globaluserdata_prefix), + ); + + // More key changes (used when user is not joined to any rooms) + futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); + + // One time keys + futures.push( + self.userid_lastonetimekeyupdate + .watch_prefix(&userid_bytes), + ); + + futures.push(Box::pin(services().globals.rotate.watch())); + + // Wait until one of them finds something + futures.next().await; + + Ok(()) + } + + fn cleanup(&self) -> Result<()> { + self._db.cleanup() + } + + fn memory_usage(&self) -> Result { + self._db.memory_usage() + } -impl service::globals::Data for KeyValueDatabase { fn load_keypair(&self) -> Result { let keypair_bytes = self.global.get(b"keypair")?.map_or_else( || { @@ -39,4 +167,81 @@ impl service::globals::Data for KeyValueDatabase { fn remove_keypair(&self) -> Result<()> { self.global.remove(b"keypair") } + + fn add_signing_key( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result, VerifyKey>> { + // Not atomic, but this is not critical + let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; + + let mut keys = signingkeys + .and_then(|keys| serde_json::from_slice(&keys).ok()) + .unwrap_or_else(|| { + // Just insert "now", it doesn't matter + ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) + }); + + let ServerSigningKeys { + verify_keys, + old_verify_keys, + .. + } = new_keys; + + keys.verify_keys.extend(verify_keys.into_iter()); + keys.old_verify_keys.extend(old_verify_keys.into_iter()); + + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), + )?; + + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + + Ok(tree) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result, VerifyKey>> { + let signingkeys = self + .server_signingkeys + .get(origin.as_bytes())? + .and_then(|bytes| serde_json::from_slice(&bytes).ok()) + .map(|keys: ServerSigningKeys| { + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + tree + }) + .unwrap_or_else(BTreeMap::new); + + Ok(signingkeys) + } + + fn database_version(&self) -> Result { + self.global.get(b"version")?.map_or(Ok(0), |version| { + utils::u64_from_bytes(&version) + .map_err(|_| Error::bad_database("Database version id is invalid.")) + }) + } + + fn bump_database_version(&self, new_version: u64) -> Result<()> { + self.global + .insert(b"version", &new_version.to_be_bytes())?; + Ok(()) + } + + } diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index 8171451..c59ed36 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -1,10 +1,10 @@ -use std::collections::BTreeMap; +use std::{collections::BTreeMap, sync::Arc}; use ruma::{UserId, serde::Raw, api::client::{backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind}, RoomId}; use crate::{Result, service, database::KeyValueDatabase, services, Error, utils}; -impl service::key_backups::Data for KeyValueDatabase { +impl service::key_backups::Data for Arc { fn create_backup( &self, user_id: &UserId, diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index a84cbd5..1726755 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,9 +1,11 @@ +use std::sync::Arc; + use ruma::api::client::error::ErrorKind; use crate::{database::KeyValueDatabase, service, Error, utils, Result}; -impl service::media::Data for KeyValueDatabase { - fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: &Option<&str>, content_type: &Option<&str>) -> Result> { +impl service::media::Data for Arc { + fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result> { let mut key = mxc.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&width.to_be_bytes()); diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index b05e47b..85d1d86 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; use crate::{service, database::KeyValueDatabase, Error, Result}; -impl service::pusher::Data for KeyValueDatabase { +impl service::pusher::Data for Arc { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 0aa8dd4..437902d 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; -impl service::rooms::alias::Data for KeyValueDatabase { +impl service::rooms::alias::Data for Arc { fn set_alias( &self, alias: &RoomAliasId, diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 888d472..2dffb04 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -1,28 +1,60 @@ -use std::{collections::HashSet, mem::size_of}; +use std::{collections::HashSet, mem::size_of, sync::Arc}; use crate::{service, database::KeyValueDatabase, Result, utils}; -impl service::rooms::auth_chain::Data for KeyValueDatabase { - fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>> { - Ok(self.shorteventid_authchain - .get(&shorteventid.to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - })) +impl service::rooms::auth_chain::Data for Arc { + fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { + // Check RAM cache + if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { + return Ok(Some(Arc::clone(result))); + } + + // We only save auth chains for single events in the db + if key.len() == 1 { + // Check DB cache + let chain = self.shorteventid_authchain + .get(&key[0].to_be_bytes())? + .map(|chain| { + chain + .chunks_exact(size_of::()) + .map(|chunk| { + utils::u64_from_bytes(chunk).expect("byte length is correct") + }) + .collect() + }); + + if let Some(chain) = chain { + let chain = Arc::new(chain); + + // Cache in RAM + self.auth_chain_cache + .lock() + .unwrap() + .insert(vec![key[0]], Arc::clone(&chain)); + + return Ok(Some(chain)); + } + } + + Ok(None) + } - fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()> { - self.shorteventid_authchain.insert( - &shorteventid.to_be_bytes(), - &auth_chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - ) + fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { + // Only persist single events in db + if key.len() == 1 { + self.shorteventid_authchain.insert( + &key[0].to_be_bytes(), + &auth_chain + .iter() + .flat_map(|s| s.to_be_bytes().to_vec()) + .collect::>(), + )?; + } + + // Cache in RAM + self.auth_chain_cache.lock().unwrap().insert(key, auth_chain); + + Ok(()) } } diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 727004e..864e75e 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::RoomId; use crate::{service, database::KeyValueDatabase, utils, Error, Result}; -impl service::rooms::directory::Data for KeyValueDatabase { +impl service::rooms::directory::Data for Arc { fn set_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.insert(room_id.as_bytes(), &[]) } diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs index b5007f8..03e4219 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus/mod.rs @@ -2,6 +2,8 @@ mod presence; mod typing; mod read_receipt; +use std::sync::Arc; + use crate::{service, database::KeyValueDatabase}; -impl service::rooms::edus::Data for KeyValueDatabase {} +impl service::rooms::edus::Data for Arc {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 1477c28..5aeb147 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,10 +1,10 @@ -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; -impl service::rooms::edus::presence::Data for KeyValueDatabase { +impl service::rooms::edus::presence::Data for Arc { fn update_presence( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index a12e265..7fcb8ac 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,10 +1,10 @@ -use std::mem; +use std::{mem, sync::Arc}; use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; -impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { +impl service::rooms::edus::read_receipt::Data for Arc { fn readreceipt_update( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index b7d3596..7f3526d 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,10 +1,10 @@ -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; use ruma::{UserId, RoomId}; use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; -impl service::rooms::edus::typing::Data for KeyValueDatabase { +impl service::rooms::edus::typing::Data for Arc { fn typing_add( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index 133e1d0..b16657a 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, DeviceId, RoomId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::rooms::lazy_loading::Data for KeyValueDatabase { +impl service::rooms::lazy_loading::Data for Arc { fn lazy_load_was_sent_before( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index db2bc69..560beb9 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::RoomId; use crate::{service, database::KeyValueDatabase, Result, services}; -impl service::rooms::metadata::Data for KeyValueDatabase { +impl service::rooms::metadata::Data for Arc { fn exists(&self, room_id: &RoomId) -> Result { let prefix = match services().rooms.short.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index 406943e..97c29e5 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -15,6 +15,8 @@ mod state_compressor; mod timeline; mod user; +use std::sync::Arc; + use crate::{database::KeyValueDatabase, service}; -impl service::rooms::Data for KeyValueDatabase {} +impl service::rooms::Data for Arc {} diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index aa97544..b1ae816 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{EventId, signatures::CanonicalJsonObject}; use crate::{service, database::KeyValueDatabase, PduEvent, Error, Result}; -impl service::rooms::outlier::Data for KeyValueDatabase { +impl service::rooms::outlier::Data for Arc { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index f3ac414..f5e8f76 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -4,7 +4,7 @@ use ruma::{RoomId, EventId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::rooms::pdu_metadata::Data for KeyValueDatabase { +impl service::rooms::pdu_metadata::Data for Arc { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index dfbdbc6..7b8d278 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,10 +1,10 @@ -use std::mem::size_of; +use std::{mem::size_of, sync::Arc}; use ruma::RoomId; use crate::{service, database::KeyValueDatabase, utils, Result, services}; -impl service::rooms::search::Data for KeyValueDatabase { +impl service::rooms::search::Data for Arc { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs index 9129638..9a302b5 100644 --- a/src/database/key_value/rooms/short.rs +++ b/src/database/key_value/rooms/short.rs @@ -1,4 +1,6 @@ +use std::sync::Arc; + use crate::{database::KeyValueDatabase, service}; -impl service::rooms::short::Data for KeyValueDatabase { +impl service::rooms::short::Data for Arc { } diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 405939d..527c240 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,11 +1,12 @@ use ruma::{RoomId, EventId}; +use tokio::sync::MutexGuard; use std::sync::Arc; -use std::{sync::MutexGuard, collections::HashSet}; +use std::collections::HashSet; use std::fmt::Debug; use crate::{service, database::KeyValueDatabase, utils, Error, Result}; -impl service::rooms::state::Data for KeyValueDatabase { +impl service::rooms::state::Data for Arc { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash .get(room_id.as_bytes())? @@ -48,7 +49,7 @@ impl service::rooms::state::Data for KeyValueDatabase { fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: impl IntoIterator + Debug, + event_ids: &mut dyn Iterator, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 4d5bd4a..9af45db 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -5,7 +5,7 @@ use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; #[async_trait] -impl service::rooms::state_accessor::Data for KeyValueDatabase { +impl service::rooms::state_accessor::Data for Arc { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 5f05485..bdb8cf8 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw}; use crate::{service, database::KeyValueDatabase, services, Result}; -impl service::rooms::state_cache::Data for KeyValueDatabase { +impl service::rooms::state_cache::Data for Arc { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index aee1890..e1c0280 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,8 +1,8 @@ -use std::{collections::HashSet, mem::size_of}; +use std::{collections::HashSet, mem::size_of, sync::Arc}; use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils, Result}; -impl service::rooms::state_compressor::Data for KeyValueDatabase { +impl service::rooms::state_compressor::Data for Arc { fn get_statediff(&self, shortstatehash: u64) -> Result { let value = self .shortstatehash_statediff diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index a3b6c17..2d334b9 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -5,7 +5,7 @@ use tracing::error; use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result, services}; -impl service::rooms::timeline::Data for KeyValueDatabase { +impl service::rooms::timeline::Data for Arc { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 66681e3..4d20b00 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, RoomId}; use crate::{service, database::KeyValueDatabase, utils, Error, Result, services}; -impl service::rooms::user::Data for KeyValueDatabase { +impl service::rooms::user::Data for Arc { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index a63b3c5..7fa6908 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, DeviceId, TransactionId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::transaction_ids::Data for KeyValueDatabase { +impl service::transaction_ids::Data for Arc { fn add_txnid( &self, user_id: &UserId, diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index cf242de..8752e55 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}}; use crate::{database::KeyValueDatabase, service, Error, Result}; -impl service::uiaa::Data for KeyValueDatabase { +impl service::uiaa::Data for Arc { fn set_uiaa_request( &self, user_id: &UserId, diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 338d880..1ac85b3 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,11 +1,11 @@ -use std::{mem::size_of, collections::BTreeMap}; +use std::{mem::size_of, collections::BTreeMap, sync::Arc}; use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; use tracing::warn; use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services, Result}; -impl service::users::Data for KeyValueDatabase { +impl service::users::Data for Arc { /// Check if a user has an account on this homeserver. fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) @@ -687,10 +687,10 @@ impl service::users::Data for KeyValueDatabase { }) } - fn get_master_key bool>( + fn get_master_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.userid_masterkeyid .get(user_id.as_bytes())? @@ -708,10 +708,10 @@ impl service::users::Data for KeyValueDatabase { }) } - fn get_self_signing_key bool>( + fn get_self_signing_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.userid_selfsigningkeyid .get(user_id.as_bytes())? diff --git a/src/database/mod.rs b/src/database/mod.rs index aa5c583..35922f0 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -402,10 +402,10 @@ impl KeyValueDatabase { }); - let services_raw = Services::build(Arc::clone(&db)); + let services_raw = Box::new(Services::build(Arc::clone(&db))); // This is the first and only time we initialize the SERVICE static - *SERVICES.write().unwrap() = Some(services_raw); + *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); // Matrix resource ownership is based on the server name; changing it @@ -877,105 +877,6 @@ impl KeyValueDatabase { services().globals.rotate.fire(); } - pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { - let userid_bytes = user_id.as_bytes().to_vec(); - let mut userid_prefix = userid_bytes.clone(); - userid_prefix.push(0xff); - - let mut userdeviceid_prefix = userid_prefix.clone(); - userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); - userdeviceid_prefix.push(0xff); - - let mut futures = FuturesUnordered::new(); - - // Return when *any* user changed his key - // TODO: only send for user they share a room with - futures.push( - self.todeviceid_events - .watch_prefix(&userdeviceid_prefix), - ); - - futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); - futures.push( - self.userroomid_invitestate - .watch_prefix(&userid_prefix), - ); - futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); - futures.push( - self.userroomid_notificationcount - .watch_prefix(&userid_prefix), - ); - futures.push( - self.userroomid_highlightcount - .watch_prefix(&userid_prefix), - ); - - // Events for rooms we are in - for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { - let short_roomid = services() - .rooms - .short - .get_shortroomid(&room_id) - .ok() - .flatten() - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let roomid_bytes = room_id.as_bytes().to_vec(); - let mut roomid_prefix = roomid_bytes.clone(); - roomid_prefix.push(0xff); - - // PDUs - futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); - - // EDUs - futures.push( - self.roomid_lasttypingupdate - .watch_prefix(&roomid_bytes), - ); - - futures.push( - self.readreceiptid_readreceipt - .watch_prefix(&roomid_prefix), - ); - - // Key changes - futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); - - // Room account data - let mut roomuser_prefix = roomid_prefix.clone(); - roomuser_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.roomusertype_roomuserdataid - .watch_prefix(&roomuser_prefix), - ); - } - - let mut globaluserdata_prefix = vec![0xff]; - globaluserdata_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.roomusertype_roomuserdataid - .watch_prefix(&globaluserdata_prefix), - ); - - // More key changes (used when user is not joined to any rooms) - futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); - - // One time keys - futures.push( - self.userid_lastonetimekeyupdate - .watch_prefix(&userid_bytes), - ); - - futures.push(Box::pin(services().globals.rotate.watch())); - - // Wait until one of them finds something - futures.next().await; - } - #[tracing::instrument(skip(self))] pub fn flush(&self) -> Result<()> { let start = std::time::Instant::now(); @@ -1021,7 +922,7 @@ impl KeyValueDatabase { } let start = Instant::now(); - if let Err(e) = services().globals.db._db.cleanup() { + if let Err(e) = services().globals.cleanup() { error!("cleanup: Errored: {}", e); } else { info!("cleanup: Finished in {:?}", start.elapsed()); @@ -1048,9 +949,9 @@ fn set_emergency_access() -> Result { None, &conduit_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &GlobalAccountDataEvent { + &serde_json::to_value(&GlobalAccountDataEvent { content: PushRulesEventContent { global: ruleset }, - }, + }).expect("to json value always works"), )?; res diff --git a/src/lib.rs b/src/lib.rs index 75cf6c7..c103d52 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,9 +20,9 @@ pub use utils::error::{Error, Result}; pub use service::{Services, pdu::PduEvent}; pub use api::ruma_wrapper::{Ruma, RumaResponse}; -pub static SERVICES: RwLock>> = RwLock::new(None); +pub static SERVICES: RwLock> = RwLock::new(None); -pub fn services<'a>() -> Arc { - Arc::clone(&SERVICES.read().unwrap()) +pub fn services<'a>() -> &'static Services { + &SERVICES.read().unwrap().expect("SERVICES should be initialized when this is called") } diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs index 0f8e0bf..65780a6 100644 --- a/src/service/account_data/data.rs +++ b/src/service/account_data/data.rs @@ -1,26 +1,25 @@ use std::collections::HashMap; use ruma::{UserId, RoomId, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw}; -use serde::{Serialize, de::DeserializeOwned}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Places one event in the account data of the user and removes the previous entry. - fn update( + fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - data: &T, + data: &serde_json::Value, ) -> Result<()>; /// Searches the account data for a specific kind. - fn get( + fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, - ) -> Result>; + ) -> Result>>; /// Returns all changes to the account data that happened after `since`. fn changes_since( diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 35ca149..9785478 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -24,24 +24,24 @@ pub struct Service { impl Service { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] - pub fn update( + pub fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - data: &T, + data: &serde_json::Value, ) -> Result<()> { self.db.update(room_id, user_id, event_type, data) } /// Searches the account data for a specific kind. #[tracing::instrument(skip(self, room_id, user_id, event_type))] - pub fn get( + pub fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - ) -> Result> { + ) -> Result>> { self.db.get(room_id, user_id, event_type) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 48f828f..32a709c 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -28,7 +28,7 @@ use ruma::{ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; -use crate::{Result, services, Error, api::{server_server, client_server::AUTO_GEN_PASSWORD_LENGTH}, PduEvent, utils::{HtmlEscape, self}}; +use crate::{Result, services, Error, api::{server_server, client_server::{AUTO_GEN_PASSWORD_LENGTH, leave_all_rooms}}, PduEvent, utils::{HtmlEscape, self}}; use super::pdu::PduBuilder; @@ -179,7 +179,8 @@ impl Service { let conduit_room = services() .rooms - .id_from_alias( + .alias + .resolve_local_alias( format!("#admins:{}", services().globals.server_name()) .as_str() .try_into() @@ -221,7 +222,7 @@ impl Service { .roomid_mutex_state .write() .unwrap() - .entry(conduit_room.clone()) + .entry(conduit_room.to_owned()) .or_default(), ); @@ -599,11 +600,11 @@ impl Service { ruma::events::GlobalAccountDataEventType::PushRules .to_string() .into(), - &ruma::events::push_rules::PushRulesEvent { + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { global: ruma::push::Ruleset::server_default(&user_id), }, - }, + }).expect("to json value always works"), )?; // we dont add a device since we're not the user, just the creator @@ -614,12 +615,14 @@ impl Service { )) } AdminCommand::DisableRoom { room_id } => { - services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; - RoomMessageEventContent::text_plain("Room disabled.") + todo!(); + //services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; + //RoomMessageEventContent::text_plain("Room disabled.") } AdminCommand::EnableRoom { room_id } => { - services().rooms.disabledroomids.remove(room_id.as_bytes())?; - RoomMessageEventContent::text_plain("Room enabled.") + todo!(); + //services().rooms.disabledroomids.remove(room_id.as_bytes())?; + //RoomMessageEventContent::text_plain("Room enabled.") } AdminCommand::DeactivateUser { leave_rooms, @@ -635,7 +638,7 @@ impl Service { services().users.deactivate_account(&user_id)?; if leave_rooms { - services().rooms.leave_all_rooms(&user_id).await?; + leave_all_rooms(&user_id).await?; } RoomMessageEventContent::text_plain(format!( @@ -694,7 +697,7 @@ impl Service { if leave_rooms { for &user_id in &user_ids { - let _ = services().rooms.leave_all_rooms(user_id).await; + let _ = leave_all_rooms(user_id).await; } } @@ -804,7 +807,7 @@ impl Service { pub(crate) async fn create_admin_room(&self) -> Result<()> { let room_id = RoomId::new(services().globals.server_name()); - services().rooms.get_or_create_shortroomid(&room_id)?; + services().rooms.short.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( services().globals diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index a70bf9c..744f0f9 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -1,6 +1,6 @@ use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Registers an appservice and returns the ID to the caller fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 1a5ce50..ad5ab4a 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,10 +1,12 @@ mod data; +use std::sync::Arc; + pub use data::Data; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index f36ab61..0f74b2a 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,8 +1,30 @@ -use ruma::signatures::Ed25519KeyPair; +use std::collections::BTreeMap; + +use async_trait::async_trait; +use ruma::{signatures::Ed25519KeyPair, DeviceId, UserId, ServerName, api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerSigningKeyId}; use crate::Result; -pub trait Data { +#[async_trait] +pub trait Data: Send + Sync { + fn next_count(&self) -> Result; + fn current_count(&self) -> Result; + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + fn cleanup(&self) -> Result<()>; + fn memory_usage(&self) -> Result; fn load_keypair(&self) -> Result; fn remove_keypair(&self) -> Result<()>; + fn add_signing_key( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result, VerifyKey>>; + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result, VerifyKey>>; + fn database_version(&self) -> Result; + fn bump_database_version(&self, new_version: u64) -> Result<()>; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 48d7b06..8fd69df 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -26,8 +26,6 @@ use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; use tracing::error; use trust_dns_resolver::TokioAsyncResolver; -pub const COUNTER: &[u8] = b"c"; - type WellKnownMap = HashMap, (FedDest, String)>; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries @@ -198,16 +196,24 @@ impl Service { #[tracing::instrument(skip(self))] pub fn next_count(&self) -> Result { - utils::u64_from_bytes(&self.globals.increment(COUNTER)?) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) + self.db.next_count() } #[tracing::instrument(skip(self))] pub fn current_count(&self) -> Result { - self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) - }) + self.db.current_count() + } + + pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + self.db.watch(user_id, device_id).await + } + + pub fn cleanup(&self) -> Result<()> { + self.db.cleanup() + } + + pub fn memory_usage(&self) -> Result { + self.db.memory_usage() } pub fn server_name(&self) -> &ServerName { @@ -296,38 +302,7 @@ impl Service { origin: &ServerName, new_keys: ServerSigningKeys, ) -> Result, VerifyKey>> { - // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; - - let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(&keys).ok()) - .unwrap_or_else(|| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); - - let ServerSigningKeys { - verify_keys, - old_verify_keys, - .. - } = new_keys; - - keys.verify_keys.extend(verify_keys.into_iter()); - keys.old_verify_keys.extend(old_verify_keys.into_iter()); - - self.server_signingkeys.insert( - origin.as_bytes(), - &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), - )?; - - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - - Ok(tree) + self.db.add_signing_key(origin, new_keys) } /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. @@ -335,35 +310,15 @@ impl Service { &self, origin: &ServerName, ) -> Result, VerifyKey>> { - let signingkeys = self - .server_signingkeys - .get(origin.as_bytes())? - .and_then(|bytes| serde_json::from_slice(&bytes).ok()) - .map(|keys: ServerSigningKeys| { - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - tree - }) - .unwrap_or_else(BTreeMap::new); - - Ok(signingkeys) + self.db.signing_keys_for(origin) } pub fn database_version(&self) -> Result { - self.globals.get(b"version")?.map_or(Ok(0), |version| { - utils::u64_from_bytes(&version) - .map_err(|_| Error::bad_database("Database version id is invalid.")) - }) + self.db.database_version() } pub fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.globals - .insert(b"version", &new_version.to_be_bytes())?; - Ok(()) + self.db.bump_database_version(new_version) } pub fn get_media_folder(&self) -> PathBuf { diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs index 6f6359e..226b1e1 100644 --- a/src/service/key_backups/data.rs +++ b/src/service/key_backups/data.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use ruma::{api::client::backup::{BackupAlgorithm, RoomKeyBackup, KeyBackupData}, serde::Raw, UserId, RoomId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn create_backup( &self, user_id: &UserId, diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 94975de..2e24049 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,7 +1,7 @@ use crate::Result; -pub trait Data { - fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: &Option<&str>, content_type: &Option<&str>) -> Result>; +pub trait Data: Send + Sync { + fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result>; /// Returns content_disposition, content_type and the metadata key. fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)>; diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index d61292b..f86251f 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -24,8 +24,8 @@ impl Service { pub async fn create( &self, mxc: String, - content_disposition: &Option<&str>, - content_type: &Option<&str>, + content_disposition: Option<&str>, + content_type: Option<&str>, file: &[u8], ) -> Result<()> { // Width, Height = 0 if it's not a thumbnail @@ -42,8 +42,8 @@ impl Service { pub async fn upload_thumbnail( &self, mxc: String, - content_disposition: &Option<&str>, - content_type: &Option<&str>, + content_disposition: Option<&str>, + content_type: Option<&str>, width: u32, height: u32, file: &[u8], @@ -108,7 +108,7 @@ impl Service { .thumbnail_properties(width, height) .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, width, height) { + if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), width, height) { // Using saved thumbnail let path = services().globals.get_media_file(&key); let mut file = Vec::new(); @@ -119,7 +119,7 @@ impl Service { content_type, file: file.to_vec(), })) - } else if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, 0, 0) { + } else if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), 0, 0) { // Generate a thumbnail let path = services().globals.get_media_file(&key); let mut file = Vec::new(); @@ -180,7 +180,7 @@ impl Service { thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; // Save thumbnail in database so we don't have to generate it again next time - let thumbnail_key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type)?; + let thumbnail_key = self.db.create_file_metadata(mxc, width, height, content_disposition.as_deref(), content_type.as_deref())?; let path = services().globals.get_media_file(&thumbnail_key); let mut f = File::create(path).await?; diff --git a/src/service/mod.rs b/src/service/mod.rs index 47d4651..a1a728c 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -30,20 +30,20 @@ pub struct Services { } impl Services { - pub fn build(db: Arc) { + pub fn build(db: Arc) -> Self { Self { - appservice: appservice::Service { db: Arc::clone(&db) }, - pusher: appservice::Service { db: Arc::clone(&db) }, - rooms: appservice::Service { db: Arc::clone(&db) }, - transaction_ids: appservice::Service { db: Arc::clone(&db) }, - uiaa: appservice::Service { db: Arc::clone(&db) }, - users: appservice::Service { db: Arc::clone(&db) }, - account_data: appservice::Service { db: Arc::clone(&db) }, - admin: appservice::Service { db: Arc::clone(&db) }, - globals: appservice::Service { db: Arc::clone(&db) }, - key_backups: appservice::Service { db: Arc::clone(&db) }, - media: appservice::Service { db: Arc::clone(&db) }, - sending: appservice::Service { db: Arc::clone(&db) }, + appservice: appservice::Service { db: db.clone() }, + pusher: pusher::Service { db: db.clone() }, + rooms: rooms::Service { db: Arc::clone(&db) }, + transaction_ids: transaction_ids::Service { db: Arc::clone(&db) }, + uiaa: uiaa::Service { db: Arc::clone(&db) }, + users: users::Service { db: Arc::clone(&db) }, + account_data: account_data::Service { db: Arc::clone(&db) }, + admin: admin::Service { db: Arc::clone(&db) }, + globals: globals::Service { db: Arc::clone(&db) }, + key_backups: key_backups::Service { db: Arc::clone(&db) }, + media: media::Service { db: Arc::clone(&db) }, + sending: sending::Service { db: Arc::clone(&db) }, } } } diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 2ed79f2..3be3300 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -343,7 +343,7 @@ pub(crate) fn gen_event_id_canonical_json( .and_then(|id| RoomId::parse(id.as_str()?).ok()) .ok_or_else(|| Error::bad_database("PDU in db has invalid room_id."))?; - let room_version_id = services().rooms.get_room_version(&room_id); + let room_version_id = services().rooms.state.get_room_version(&room_id); let event_id = format!( "${}", diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 3951da7..305a538 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,7 +1,7 @@ use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; fn get_pusher(&self, senderkey: &[u8]) -> Result>; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index af30ca4..e65c57a 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -3,6 +3,7 @@ pub use data::Data; use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; +use ruma::api::IncomingResponse; use ruma::{ api::{ client::push::{get_pushers, set_pusher, PusherKind}, @@ -20,11 +21,12 @@ use ruma::{ serde::Raw, uint, RoomId, UInt, UserId, }; +use std::sync::Arc; use std::{fmt::Debug, mem}; use tracing::{error, info, warn}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -47,8 +49,9 @@ impl Service { self.db.get_pusher_senderkeys(sender) } - #[tracing::instrument(skip(destination, request))] + #[tracing::instrument(skip(self, destination, request))] pub async fn send_request( + &self, destination: &str, request: T, ) -> Result @@ -124,7 +127,7 @@ impl Service { } } - #[tracing::instrument(skip(user, unread, pusher, ruleset, pdu))] + #[tracing::instrument(skip(self, user, unread, pusher, ruleset, pdu))] pub async fn send_push_notice( &self, user: &UserId, @@ -181,7 +184,7 @@ impl Service { Ok(()) } - #[tracing::instrument(skip(user, ruleset, pdu))] + #[tracing::instrument(skip(self, user, ruleset, pdu))] pub fn get_actions<'a>( &self, user: &UserId, @@ -204,7 +207,7 @@ impl Service { Ok(ruleset.get_actions(pdu, &ctx)) } - #[tracing::instrument(skip(unread, pusher, tweaks, event))] + #[tracing::instrument(skip(self, unread, pusher, tweaks, event))] async fn send_notice( &self, unread: UInt, diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 8102209..26bffae 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,7 +1,7 @@ use ruma::{RoomId, RoomAliasId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Creates or updates the alias to the given room id. fn set_alias( &self, diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index e4e8550..13fac2d 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,7 +1,7 @@ -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; use crate::Result; -pub trait Data { - fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>>; - fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()>; +pub trait Data: Send + Sync { + fn get_cached_eventid_authchain(&self, shorteventid: &[u64]) -> Result>>>; + fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc>) -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 26a3f3f..5fe0e3e 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -15,41 +15,11 @@ impl Service { &'a self, key: &[u64], ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key.to_be_bytes()) { - return Ok(Some(Arc::clone(result))); - } - - // We only save auth chains for single events in the db - if key.len() == 1 { - // Check DB cache - if let Some(chain) = self.db.get_cached_eventid_authchain(key[0]) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) + self.db.get_cached_eventid_authchain(key) } #[tracing::instrument(skip(self))] pub fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { - // Only persist single events in db - if key.len() == 1 { - self.db.cache_auth_chain(key[0], auth_chain)?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, auth_chain); - - Ok(()) + self.db.cache_auth_chain(key, auth_chain) } } diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index 1376721..b4e020d 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,7 +1,7 @@ use ruma::RoomId; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Adds the room to the public room directory fn set_public(&self, room_id: &RoomId) -> Result<()>; diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index ca0e241..f759255 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use ruma::{UserId, RoomId, events::presence::PresenceEvent}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index e8ed965..5ebd89d 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,7 +1,7 @@ use ruma::{RoomId, events::receipt::ReceiptEvent, UserId, serde::Raw}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Replaces the previous read receipt. fn readreceipt_update( &self, diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index ec0be46..426d4e0 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -2,7 +2,7 @@ use std::collections::HashSet; use crate::Result; use ruma::{UserId, RoomId}; -pub trait Data { +pub trait Data: Send + Sync { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e229112..ac3cca6 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -117,7 +117,7 @@ impl Service { room_id, pub_key_map, incoming_pdu.prev_events.clone(), - ).await; + ).await?; let mut errors = 0; for prev_id in dbg!(sorted_prev_events) { @@ -240,7 +240,7 @@ impl Service { r } - #[tracing::instrument(skip(create_event, value, pub_key_map))] + #[tracing::instrument(skip(self, create_event, value, pub_key_map))] fn handle_outlier_pdu<'a>( &self, origin: &'a ServerName, @@ -272,7 +272,7 @@ impl Service { RoomVersion::new(room_version_id).expect("room version is supported"); let mut val = match ruma::signatures::verify_event( - &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, + &*pub_key_map.read().expect("RwLock is poisoned."), &value, room_version_id, ) { @@ -301,7 +301,7 @@ impl Service { let incoming_pdu = serde_json::from_value::( serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), ) - .map_err(|_| "Event is not a valid PDU.".to_owned())?; + .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" @@ -329,7 +329,7 @@ impl Service { // Build map of auth events let mut auth_events = HashMap::new(); for id in &incoming_pdu.auth_events { - let auth_event = match services().rooms.get_pdu(id)? { + let auth_event = match services().rooms.timeline.get_pdu(id)? { Some(e) => e, None => { warn!("Could not find auth event {}", id); @@ -373,7 +373,8 @@ impl Service { &incoming_pdu, None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), - )? { + ).map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? + { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Auth check failed", @@ -385,6 +386,7 @@ impl Service { // 7. Persist the event as an outlier. services() .rooms + .outlier .add_pdu_outlier(&incoming_pdu.event_id, &val)?; info!("Added pdu as outlier."); @@ -393,7 +395,7 @@ impl Service { }) } - #[tracing::instrument(skip(incoming_pdu, val, create_event, pub_key_map))] + #[tracing::instrument(skip(self, incoming_pdu, val, create_event, pub_key_map))] pub async fn upgrade_outlier_to_timeline_pdu( &self, incoming_pdu: Arc, @@ -412,7 +414,7 @@ impl Service { .rooms .pdu_metadata.is_event_soft_failed(&incoming_pdu.event_id)? { - return Err("Event has been soft failed".into()); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); } info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); @@ -1130,7 +1132,8 @@ impl Service { room_id: &RoomId, pub_key_map: &RwLock>>, initial_set: Vec>, - ) -> Vec<(Arc, HashMap, (Arc, BTreeMap)>)> { + ) -> Result<(Vec>, HashMap, +(Arc, BTreeMap)>)> { let mut graph: HashMap, _> = HashMap::new(); let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; @@ -1164,6 +1167,7 @@ impl Service { if let Some(json) = json_opt.or_else(|| { services() .rooms + .outlier .get_outlier_pdu_json(&prev_event_id) .ok() .flatten() @@ -1209,9 +1213,9 @@ impl Service { .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), )) - })?; + }).map_err(|_| Error::bad_database("Error sorting prev events"))?; - (sorted, eventid_info) + Ok((sorted, eventid_info)) } #[tracing::instrument(skip_all)] diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index f1019c1..524071c 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -1,7 +1,7 @@ use ruma::{RoomId, DeviceId, UserId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn lazy_load_was_sent_before( &self, user_id: &UserId, diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 9b1ce07..9444db4 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,6 +1,6 @@ use ruma::RoomId; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs index 17d0f7b..edc7c4f 100644 --- a/src/service/rooms/outlier/data.rs +++ b/src/service/rooms/outlier/data.rs @@ -2,7 +2,7 @@ use ruma::{signatures::CanonicalJsonObject, EventId}; use crate::{PduEvent, Result}; -pub trait Data { +pub trait Data: Send + Sync { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()>; diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index fb83902..9bc49cf 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use ruma::{EventId, RoomId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index b62904c..0c14ffe 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,7 +1,7 @@ use ruma::RoomId; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()>; fn search_pdus<'a>( diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs index 3b1c311..bc2b28f 100644 --- a/src/service/rooms/short/data.rs +++ b/src/service/rooms/short/data.rs @@ -1,2 +1,2 @@ -pub trait Data { +pub trait Data: Send + Sync { } diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 7008d86..20c177a 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,9 +1,10 @@ use std::sync::Arc; -use std::{sync::MutexGuard, collections::HashSet}; +use std::collections::HashSet; use crate::Result; use ruma::{EventId, RoomId}; +use tokio::sync::MutexGuard; -pub trait Data { +pub trait Data: Send + Sync { /// Returns the last state hash key added to the db for the given room. fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; @@ -21,7 +22,7 @@ pub trait Data { /// Replace the forward extremities of the room. fn set_forward_extremities<'a>(&self, room_id: &RoomId, - event_ids: &dyn Iterator, + event_ids: &mut dyn Iterator, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 979060d..5385978 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -16,7 +16,7 @@ pub struct Service { impl Service { /// Set the room to the given statehash and update caches. - pub fn force_state( + pub async fn force_state( &self, room_id: &RoomId, shortstatehash: u64, @@ -28,7 +28,7 @@ impl Service { .roomid_mutex_state .write() .unwrap() - .entry(body.room_id.to_owned()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -74,10 +74,10 @@ impl Service { Err(_) => continue, }; - services().room.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; + services().rooms.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; } - services().room.state_cache.update_joined_count(room_id)?; + services().rooms.state_cache.update_joined_count(room_id)?; self.db.set_room_state(room_id, shortstatehash, &state_lock); diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 48031e4..14f96bc 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -6,7 +6,7 @@ use ruma::{EventId, events::StateEventType, RoomId}; use crate::{Result, PduEvent}; #[async_trait] -pub trait Data { +pub trait Data: Send + Sync { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index b45b2ea..b9db721 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,7 +1,7 @@ use ruma::{UserId, RoomId, serde::Raw, events::AnyStrippedStateEvent}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()>; diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index cd87242..ce164c6 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -9,7 +9,7 @@ pub struct StateDiff { pub removed: HashSet, } -pub trait Data { +pub trait Data: Send + Sync { fn get_statediff(&self, shortstatehash: u64) -> Result; fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()>; } diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 85bedc6..d073e86 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -4,7 +4,7 @@ use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; use crate::{Result, PduEvent}; -pub trait Data { +pub trait Data: Send + Sync { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index a5657bc..6b7ebc7 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,7 +1,7 @@ use ruma::{UserId, RoomId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs index 6e71dd4..c5ff05c 100644 --- a/src/service/transaction_ids/data.rs +++ b/src/service/transaction_ids/data.rs @@ -1,7 +1,7 @@ use ruma::{DeviceId, UserId, TransactionId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn add_txnid( &self, user_id: &UserId, diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index d7fa79d..091f064 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,7 +1,7 @@ use ruma::{api::client::uiaa::UiaaInfo, DeviceId, UserId, signatures::CanonicalJsonValue}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn set_uiaa_request( &self, user_id: &UserId, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 8adc936..b13ae1f 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -223,18 +223,18 @@ impl Service { self.db.get_device_keys(user_id, device_id) } - pub fn get_master_key bool>( + pub fn get_master_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.db.get_master_key(user_id, allowed_signatures) } - pub fn get_self_signing_key bool>( + pub fn get_self_signing_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.db.get_self_signing_key(user_id, allowed_signatures) } From 44fe6d1554eaa0a15314686974ab01f48c836588 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 18:36:12 +0200 Subject: [PATCH 1215/1727] 127 errors left --- src/api/client_server/membership.rs | 2 +- src/api/server_server.rs | 133 +---------- src/database/key_value/account_data.rs | 4 +- src/database/key_value/appservice.rs | 2 - src/database/key_value/globals.rs | 4 +- src/database/key_value/key_backups.rs | 4 +- src/database/key_value/media.rs | 4 +- src/database/key_value/pusher.rs | 4 +- src/database/key_value/rooms/alias.rs | 4 +- src/database/key_value/rooms/auth_chain.rs | 2 +- src/database/key_value/rooms/directory.rs | 4 +- src/database/key_value/rooms/edus/mod.rs | 4 +- src/database/key_value/rooms/edus/presence.rs | 4 +- .../key_value/rooms/edus/read_receipt.rs | 4 +- src/database/key_value/rooms/edus/typing.rs | 4 +- src/database/key_value/rooms/lazy_load.rs | 4 +- src/database/key_value/rooms/metadata.rs | 18 +- src/database/key_value/rooms/mod.rs | 4 +- src/database/key_value/rooms/outlier.rs | 4 +- src/database/key_value/rooms/pdu_metadata.rs | 2 +- src/database/key_value/rooms/search.rs | 4 +- src/database/key_value/rooms/short.rs | 225 +++++++++++++++++- src/database/key_value/rooms/state.rs | 2 +- .../key_value/rooms/state_accessor.rs | 2 +- src/database/key_value/rooms/state_cache.rs | 4 +- .../key_value/rooms/state_compressor.rs | 4 +- src/database/key_value/rooms/timeline.rs | 22 +- src/database/key_value/rooms/user.rs | 8 +- src/database/key_value/transaction_ids.rs | 4 +- src/database/key_value/uiaa.rs | 4 +- src/database/key_value/users.rs | 6 +- src/database/mod.rs | 24 +- src/service/account_data/mod.rs | 2 +- src/service/admin/mod.rs | 12 +- src/service/globals/mod.rs | 4 +- src/service/key_backups/mod.rs | 2 +- src/service/media/mod.rs | 2 +- src/service/mod.rs | 86 +++++-- src/service/rooms/alias/mod.rs | 4 +- src/service/rooms/auth_chain/mod.rs | 135 ++++++++++- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/edus/presence/mod.rs | 4 +- src/service/rooms/edus/read_receipt/mod.rs | 4 +- src/service/rooms/edus/typing/mod.rs | 4 +- src/service/rooms/event_handler/mod.rs | 84 ++++--- src/service/rooms/lazy_loading/mod.rs | 4 +- src/service/rooms/metadata/data.rs | 2 + src/service/rooms/metadata/mod.rs | 12 +- src/service/rooms/outlier/mod.rs | 4 +- src/service/rooms/pdu_metadata/mod.rs | 2 +- src/service/rooms/search/mod.rs | 4 +- src/service/rooms/short/data.rs | 38 +++ src/service/rooms/short/mod.rs | 190 +-------------- src/service/rooms/state/mod.rs | 99 +++++--- src/service/rooms/state_accessor/mod.rs | 4 +- src/service/rooms/state_cache/mod.rs | 66 +++-- src/service/rooms/state_compressor/mod.rs | 6 +- src/service/rooms/timeline/data.rs | 1 + src/service/rooms/timeline/mod.rs | 26 +- src/service/rooms/user/mod.rs | 4 +- src/service/sending/mod.rs | 8 - src/service/transaction_ids/mod.rs | 4 +- src/service/uiaa/mod.rs | 4 +- src/service/users/mod.rs | 4 +- src/utils/mod.rs | 12 +- 65 files changed, 809 insertions(+), 556 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 58ed040..f07f2ad 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -654,7 +654,7 @@ async fn join_room_by_id_helper( // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - services().rooms.state.set_room_state(room_id, shortstatehash)?; + services().rooms.state.set_room_state(room_id, shortstatehash, &state_lock)?; let statehashid = services().rooms.state.append_to_state(&parsed_pdu)?; } else { diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 647f457..11f7ec3 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -857,131 +857,6 @@ pub async fn send_transaction_message_route( Ok(send_transaction_message::v1::Response { pdus: resolved_map.into_iter().map(|(e, r)| (e, r.map_err(|e| e.to_string()))).collect() }) } -#[tracing::instrument(skip(starting_events))] -pub(crate) async fn get_auth_chain<'a>( - room_id: &RoomId, - starting_events: Vec>, -) -> Result> + 'a> { - const NUM_BUCKETS: usize = 50; - - let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; - - let mut i = 0; - for id in starting_events { - let short = services().rooms.short.get_or_create_shorteventid(&id)?; - let bucket_id = (short % NUM_BUCKETS as u64) as usize; - buckets[bucket_id].insert((short, id.clone())); - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - let mut full_auth_chain = HashSet::new(); - - let mut hits = 0; - let mut misses = 0; - for chunk in buckets { - if chunk.is_empty() { - continue; - } - - let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&chunk_key)? { - hits += 1; - full_auth_chain.extend(cached.iter().copied()); - continue; - } - misses += 1; - - let mut chunk_cache = HashSet::new(); - let mut hits2 = 0; - let mut misses2 = 0; - let mut i = 0; - for (sevent_id, event_id) in chunk { - if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&[sevent_id])? { - hits2 += 1; - chunk_cache.extend(cached.iter().copied()); - } else { - misses2 += 1; - let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id)?); - services().rooms - .auth_chain - .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; - println!( - "cache missed event {} with auth chain len {}", - event_id, - auth_chain.len() - ); - chunk_cache.extend(auth_chain.iter()); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - }; - } - println!( - "chunk missed with len {}, event hits2: {}, misses2: {}", - chunk_cache.len(), - hits2, - misses2 - ); - let chunk_cache = Arc::new(chunk_cache); - services().rooms - .auth_chain.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; - full_auth_chain.extend(chunk_cache.iter()); - } - - println!( - "total: {}, chunk hits: {}, misses: {}", - full_auth_chain.len(), - hits, - misses - ); - - Ok(full_auth_chain - .into_iter() - .filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok())) -} - -#[tracing::instrument(skip(event_id))] -fn get_auth_chain_inner( - room_id: &RoomId, - event_id: &EventId, -) -> Result> { - let mut todo = vec![Arc::from(event_id)]; - let mut found = HashSet::new(); - - while let Some(event_id) = todo.pop() { - match services().rooms.timeline.get_pdu(&event_id) { - Ok(Some(pdu)) => { - if pdu.room_id != room_id { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); - } - for auth_event in &pdu.auth_events { - let sauthevent = services() - .rooms.short - .get_or_create_shorteventid(auth_event)?; - - if !found.contains(&sauthevent) { - found.insert(sauthevent); - todo.push(auth_event.clone()); - } - } - } - Ok(None) => { - warn!("Could not find pdu mentioned in auth events: {}", event_id); - } - Err(e) => { - warn!("Could not load event in auth chain: {} {}", event_id, e); - } - } - } - - Ok(found) -} - /// # `GET /_matrix/federation/v1/event/{eventId}` /// /// Retrieves a single event from the server. @@ -1135,7 +1010,7 @@ pub async fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]).await?; + let auth_chain_ids = services().rooms.auth_chain.get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -1190,7 +1065,7 @@ pub async fn get_room_state_route( .collect(); let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; + services().rooms.auth_chain.get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids @@ -1246,7 +1121,7 @@ pub async fn get_room_state_ids_route( .collect(); let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; + services().rooms.auth_chain.get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), @@ -1449,7 +1324,7 @@ async fn create_join_event( drop(mutex_lock); let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; - let auth_chain_ids = get_auth_chain( + let auth_chain_ids = services().rooms.auth_chain.get_auth_chain( room_id, state_ids.iter().map(|(_, id)| id.clone()).collect(), ) diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index f0325d2..5674ac0 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,11 +1,11 @@ -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw, RoomId}; use serde::{Serialize, de::DeserializeOwned}; use crate::{Result, database::KeyValueDatabase, service, Error, utils, services}; -impl service::account_data::Data for Arc { +impl service::account_data::Data for KeyValueDatabase { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] fn update( diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index ee6ae20..f427ba7 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::appservice::Data for KeyValueDatabase { diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 8711920..199cbf6 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; use async_trait::async_trait; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -9,7 +9,7 @@ use crate::{Result, service, database::KeyValueDatabase, Error, utils, services} pub const COUNTER: &[u8] = b"c"; #[async_trait] -impl service::globals::Data for Arc { +impl service::globals::Data for KeyValueDatabase { fn next_count(&self) -> Result { utils::u64_from_bytes(&self.global.increment(COUNTER)?) .map_err(|_| Error::bad_database("Count has invalid bytes.")) diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index c59ed36..8171451 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -1,10 +1,10 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; use ruma::{UserId, serde::Raw, api::client::{backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind}, RoomId}; use crate::{Result, service, database::KeyValueDatabase, services, Error, utils}; -impl service::key_backups::Data for Arc { +impl service::key_backups::Data for KeyValueDatabase { fn create_backup( &self, user_id: &UserId, diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index 1726755..f024487 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::api::client::error::ErrorKind; use crate::{database::KeyValueDatabase, service, Error, utils, Result}; -impl service::media::Data for Arc { +impl service::media::Data for KeyValueDatabase { fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result> { let mut key = mxc.as_bytes().to_vec(); key.push(0xff); diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 85d1d86..b05e47b 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; use crate::{service, database::KeyValueDatabase, Error, Result}; -impl service::pusher::Data for Arc { +impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 437902d..0aa8dd4 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; -impl service::rooms::alias::Data for Arc { +impl service::rooms::alias::Data for KeyValueDatabase { fn set_alias( &self, alias: &RoomAliasId, diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 2dffb04..49d3956 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -2,7 +2,7 @@ use std::{collections::HashSet, mem::size_of, sync::Arc}; use crate::{service, database::KeyValueDatabase, Result, utils}; -impl service::rooms::auth_chain::Data for Arc { +impl service::rooms::auth_chain::Data for KeyValueDatabase { fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { // Check RAM cache if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 864e75e..727004e 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::RoomId; use crate::{service, database::KeyValueDatabase, utils, Error, Result}; -impl service::rooms::directory::Data for Arc { +impl service::rooms::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.insert(room_id.as_bytes(), &[]) } diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs index 03e4219..b5007f8 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus/mod.rs @@ -2,8 +2,6 @@ mod presence; mod typing; mod read_receipt; -use std::sync::Arc; - use crate::{service, database::KeyValueDatabase}; -impl service::rooms::edus::Data for Arc {} +impl service::rooms::edus::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 5aeb147..1477c28 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,10 +1,10 @@ -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; -impl service::rooms::edus::presence::Data for Arc { +impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 7fcb8ac..a12e265 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,10 +1,10 @@ -use std::{mem, sync::Arc}; +use std::mem; use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; -impl service::rooms::edus::read_receipt::Data for Arc { +impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 7f3526d..b7d3596 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,10 +1,10 @@ -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; use ruma::{UserId, RoomId}; use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; -impl service::rooms::edus::typing::Data for Arc { +impl service::rooms::edus::typing::Data for KeyValueDatabase { fn typing_add( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index b16657a..133e1d0 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, DeviceId, RoomId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::rooms::lazy_loading::Data for Arc { +impl service::rooms::lazy_loading::Data for KeyValueDatabase { fn lazy_load_was_sent_before( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 560beb9..72f6251 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::RoomId; use crate::{service, database::KeyValueDatabase, Result, services}; -impl service::rooms::metadata::Data for Arc { +impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { let prefix = match services().rooms.short.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), @@ -19,4 +17,18 @@ impl service::rooms::metadata::Data for Arc { .filter(|(k, _)| k.starts_with(&prefix)) .is_some()) } + + fn is_disabled(&self, room_id: &RoomId) -> Result { + Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) + } + + fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { + if disabled { + self.disabledroomids.insert(room_id.as_bytes(), &[])?; + } else { + self.disabledroomids.remove(room_id.as_bytes())?; + } + + Ok(()) + } } diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index 97c29e5..406943e 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -15,8 +15,6 @@ mod state_compressor; mod timeline; mod user; -use std::sync::Arc; - use crate::{database::KeyValueDatabase, service}; -impl service::rooms::Data for Arc {} +impl service::rooms::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index b1ae816..aa97544 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{EventId, signatures::CanonicalJsonObject}; use crate::{service, database::KeyValueDatabase, PduEvent, Error, Result}; -impl service::rooms::outlier::Data for Arc { +impl service::rooms::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index f5e8f76..f3ac414 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -4,7 +4,7 @@ use ruma::{RoomId, EventId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::rooms::pdu_metadata::Data for Arc { +impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 7b8d278..dfbdbc6 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,10 +1,10 @@ -use std::{mem::size_of, sync::Arc}; +use std::mem::size_of; use ruma::RoomId; use crate::{service, database::KeyValueDatabase, utils, Result, services}; -impl service::rooms::search::Data for Arc { +impl service::rooms::search::Data for KeyValueDatabase { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs index 9a302b5..ecd12da 100644 --- a/src/database/key_value/rooms/short.rs +++ b/src/database/key_value/rooms/short.rs @@ -1,6 +1,227 @@ use std::sync::Arc; -use crate::{database::KeyValueDatabase, service}; +use ruma::{EventId, events::StateEventType, RoomId}; -impl service::rooms::short::Data for Arc { +use crate::{Result, database::KeyValueDatabase, service, utils, Error, services}; + +impl service::rooms::short::Data for KeyValueDatabase { + fn get_or_create_shorteventid( + &self, + event_id: &EventId, + ) -> Result { + if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { + return Ok(*short); + } + + let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { + Some(shorteventid) => utils::u64_from_bytes(&shorteventid) + .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, + None => { + let shorteventid = services().globals.next_count()?; + self.eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; + shorteventid + } + }; + + self.eventidshort_cache + .lock() + .unwrap() + .insert(event_id.to_owned(), short); + + Ok(short) + } + + fn get_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result> { + if let Some(short) = self + .statekeyshort_cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + return Ok(Some(*short)); + } + + let mut statekey = event_type.to_string().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(state_key.as_bytes()); + + let short = self + .statekey_shortstatekey + .get(&statekey)? + .map(|shortstatekey| { + utils::u64_from_bytes(&shortstatekey) + .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) + }) + .transpose()?; + + if let Some(s) = short { + self.statekeyshort_cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), s); + } + + Ok(short) + } + + fn get_or_create_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + if let Some(short) = self + .statekeyshort_cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + return Ok(*short); + } + + let mut statekey = event_type.to_string().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(state_key.as_bytes()); + + let short = match self.statekey_shortstatekey.get(&statekey)? { + Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) + .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, + None => { + let shortstatekey = services().globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes())?; + self.shortstatekey_statekey + .insert(&shortstatekey.to_be_bytes(), &statekey)?; + shortstatekey + } + }; + + self.statekeyshort_cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), short); + + Ok(short) + } + + fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { + if let Some(id) = self + .shorteventid_cache + .lock() + .unwrap() + .get_mut(&shorteventid) + { + return Ok(Arc::clone(id)); + } + + let bytes = self + .shorteventid_eventid + .get(&shorteventid.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; + + let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; + + self.shorteventid_cache + .lock() + .unwrap() + .insert(shorteventid, Arc::clone(&event_id)); + + Ok(event_id) + } + + fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { + if let Some(id) = self + .shortstatekey_cache + .lock() + .unwrap() + .get_mut(&shortstatekey) + { + return Ok(id.clone()); + } + + let bytes = self + .shortstatekey_statekey + .get(&shortstatekey.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; + + let mut parts = bytes.splitn(2, |&b| b == 0xff); + let eventtype_bytes = parts.next().expect("split always returns one entry"); + let statekey_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; + + let event_type = + StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { + Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; + + let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { + Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") + })?; + + let result = (event_type, state_key); + + self.shortstatekey_cache + .lock() + .unwrap() + .insert(shortstatekey, result.clone()); + + Ok(result) + } + + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash( + &self, + state_hash: &[u8], + ) -> Result<(u64, bool)> { + Ok(match self.statehash_shortstatehash.get(state_hash)? { + Some(shortstatehash) => ( + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, + true, + ), + None => { + let shortstatehash = services().globals.next_count()?; + self.statehash_shortstatehash + .insert(state_hash, &shortstatehash.to_be_bytes())?; + (shortstatehash, false) + } + }) + } + + fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self.roomid_shortroomid + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + }) + .transpose() + } + + fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + ) -> Result { + Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, + None => { + let short = services().globals.next_count()?; + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes())?; + short + } + }) + } } diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 527c240..b2822b3 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -6,7 +6,7 @@ use std::fmt::Debug; use crate::{service, database::KeyValueDatabase, utils, Error, Result}; -impl service::rooms::state::Data for Arc { +impl service::rooms::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash .get(room_id.as_bytes())? diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 9af45db..4d5bd4a 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -5,7 +5,7 @@ use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; #[async_trait] -impl service::rooms::state_accessor::Data for Arc { +impl service::rooms::state_accessor::Data for KeyValueDatabase { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index bdb8cf8..5f05485 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw}; use crate::{service, database::KeyValueDatabase, services, Result}; -impl service::rooms::state_cache::Data for Arc { +impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index e1c0280..aee1890 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,8 +1,8 @@ -use std::{collections::HashSet, mem::size_of, sync::Arc}; +use std::{collections::HashSet, mem::size_of}; use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils, Result}; -impl service::rooms::state_compressor::Data for Arc { +impl service::rooms::state_compressor::Data for KeyValueDatabase { fn get_statediff(&self, shortstatehash: u64) -> Result { let value = self .shortstatehash_statediff diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 2d334b9..0b7286b 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -5,7 +5,27 @@ use tracing::error; use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result, services}; -impl service::rooms::timeline::Data for Arc { +impl service::rooms::timeline::Data for KeyValueDatabase { + fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { + let prefix = services().rooms.short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + // Look for PDUs in that room. + self.pduid_pdu + .iter_from(&prefix, false) + .filter(|(k, _)| k.starts_with(&prefix)) + .map(|(_, pdu)| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid first PDU in db.")) + .map(Arc::new) + }) + .next() + .transpose() + } + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 4d20b00..3759bda 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, RoomId}; use crate::{service, database::KeyValueDatabase, utils, Error, Result, services}; -impl service::rooms::user::Data for Arc { +impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -104,13 +102,13 @@ impl service::rooms::user::Data for Arc { }); // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) + Ok(Box::new(Box::new(utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { Error::bad_database("Invalid RoomId bytes in userroomid_joined") })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) + })))) } } diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index 7fa6908..a63b3c5 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, DeviceId, TransactionId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::transaction_ids::Data for Arc { +impl service::transaction_ids::Data for KeyValueDatabase { fn add_txnid( &self, user_id: &UserId, diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 8752e55..cf242de 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}}; use crate::{database::KeyValueDatabase, service, Error, Result}; -impl service::uiaa::Data for Arc { +impl service::uiaa::Data for KeyValueDatabase { fn set_uiaa_request( &self, user_id: &UserId, diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 1ac85b3..55a518d 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,11 +1,11 @@ -use std::{mem::size_of, collections::BTreeMap, sync::Arc}; +use std::{mem::size_of, collections::BTreeMap}; use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; use tracing::warn; use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services, Result}; -impl service::users::Data for Arc { +impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) @@ -113,7 +113,7 @@ impl service::users::Data for Arc { /// Hash and set the user's password to the Argon2 hash fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { - if let Ok(hash) = utils::calculate_hash(password) { + if let Ok(hash) = utils::calculate_password_hash(password) { self.userid_password .insert(user_id.as_bytes(), hash.as_bytes())?; Ok(()) diff --git a/src/database/mod.rs b/src/database/mod.rs index 35922f0..6868467 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -238,8 +238,8 @@ impl KeyValueDatabase { } /// Load an existing database or create a new one. - pub async fn load_or_create(config: &Config) -> Result<()> { - Self::check_db_setup(config)?; + pub async fn load_or_create(config: Config) -> Result<()> { + Self::check_db_setup(&config)?; if !Path::new(&config.database_path).exists() { std::fs::create_dir_all(&config.database_path) @@ -251,19 +251,19 @@ impl KeyValueDatabase { #[cfg(not(feature = "sqlite"))] return Err(Error::BadConfig("Database backend not found.")); #[cfg(feature = "sqlite")] - Arc::new(Arc::::open(config)?) + Arc::new(Arc::::open(&config)?) } "rocksdb" => { #[cfg(not(feature = "rocksdb"))] return Err(Error::BadConfig("Database backend not found.")); #[cfg(feature = "rocksdb")] - Arc::new(Arc::::open(config)?) + Arc::new(Arc::::open(&config)?) } "persy" => { #[cfg(not(feature = "persy"))] return Err(Error::BadConfig("Database backend not found.")); #[cfg(feature = "persy")] - Arc::new(Arc::::open(config)?) + Arc::new(Arc::::open(&config)?) } _ => { return Err(Error::BadConfig("Database backend not found.")); @@ -402,7 +402,7 @@ impl KeyValueDatabase { }); - let services_raw = Box::new(Services::build(Arc::clone(&db))); + let services_raw = Box::new(Services::build(Arc::clone(&db), config)?); // This is the first and only time we initialize the SERVICE static *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); @@ -825,7 +825,7 @@ impl KeyValueDatabase { info!( "Loaded {} database with version {}", - config.database_backend, latest_database_version + services().globals.config.database_backend, latest_database_version ); } else { services() @@ -837,7 +837,7 @@ impl KeyValueDatabase { warn!( "Created new {} database with version {}", - config.database_backend, latest_database_version + services().globals.config.database_backend, latest_database_version ); } @@ -866,7 +866,7 @@ impl KeyValueDatabase { .sending .start_handler(sending_receiver); - Self::start_cleanup_task(config).await; + Self::start_cleanup_task().await; Ok(()) } @@ -888,8 +888,8 @@ impl KeyValueDatabase { res } - #[tracing::instrument(skip(config))] - pub async fn start_cleanup_task(config: &Config) { + #[tracing::instrument] + pub async fn start_cleanup_task() { use tokio::time::interval; #[cfg(unix)] @@ -898,7 +898,7 @@ impl KeyValueDatabase { use std::time::{Duration, Instant}; - let timer_interval = Duration::from_secs(config.cleanup_second_interval as u64); + let timer_interval = Duration::from_secs(services().globals.config.cleanup_second_interval as u64); tokio::spawn(async move { let mut i = interval(timer_interval); diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 9785478..1289f7a 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -18,7 +18,7 @@ use tracing::error; use crate::{service::*, services, utils, Error, Result}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 32a709c..0b14314 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -426,7 +426,7 @@ impl Service { Error::bad_database("Invalid room id field in event in database") })?; let start = Instant::now(); - let count = server_server::get_auth_chain(room_id, vec![event_id]) + let count = services().rooms.auth_chain.get_auth_chain(room_id, vec![event_id]) .await? .count(); let elapsed = start.elapsed(); @@ -615,14 +615,12 @@ impl Service { )) } AdminCommand::DisableRoom { room_id } => { - todo!(); - //services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; - //RoomMessageEventContent::text_plain("Room disabled.") + services().rooms.metadata.disable_room(&room_id, true); + RoomMessageEventContent::text_plain("Room disabled.") } AdminCommand::EnableRoom { room_id } => { - todo!(); - //services().rooms.disabledroomids.remove(room_id.as_bytes())?; - //RoomMessageEventContent::text_plain("Room enabled.") + services().rooms.metadata.disable_room(&room_id, false); + RoomMessageEventContent::text_plain("Room enabled.") } AdminCommand::DeactivateUser { leave_rooms, diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 8fd69df..de8d1aa 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -35,7 +35,7 @@ type SyncHandle = ( ); pub struct Service { - pub db: Box, + pub db: Arc, pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, @@ -92,7 +92,7 @@ impl Default for RotationHandler { impl Service { pub fn load( - db: Box, + db: Arc, config: Config, ) -> Result { let keypair = db.load_keypair(); diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 4bd9efd..a3bed71 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -13,7 +13,7 @@ use ruma::{ use std::{collections::BTreeMap, sync::Arc}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index f86251f..d3dd2bd 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -16,7 +16,7 @@ pub struct FileMeta { } pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/mod.rs b/src/service/mod.rs index a1a728c..a772c1d 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,4 +1,9 @@ -use std::sync::Arc; +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, Mutex}, +}; + +use crate::{Result, Config}; pub mod account_data; pub mod admin; @@ -30,20 +35,73 @@ pub struct Services { } impl Services { - pub fn build(db: Arc) -> Self { - Self { + pub fn build< + D: appservice::Data + + pusher::Data + + rooms::Data + + transaction_ids::Data + + uiaa::Data + + users::Data + + account_data::Data + + globals::Data + + key_backups::Data + + media::Data, + >( + db: Arc, config: Config + ) -> Result { + Ok(Self { appservice: appservice::Service { db: db.clone() }, pusher: pusher::Service { db: db.clone() }, - rooms: rooms::Service { db: Arc::clone(&db) }, - transaction_ids: transaction_ids::Service { db: Arc::clone(&db) }, - uiaa: uiaa::Service { db: Arc::clone(&db) }, - users: users::Service { db: Arc::clone(&db) }, - account_data: account_data::Service { db: Arc::clone(&db) }, - admin: admin::Service { db: Arc::clone(&db) }, - globals: globals::Service { db: Arc::clone(&db) }, - key_backups: key_backups::Service { db: Arc::clone(&db) }, - media: media::Service { db: Arc::clone(&db) }, - sending: sending::Service { db: Arc::clone(&db) }, - } + rooms: rooms::Service { + alias: rooms::alias::Service { db: db.clone() }, + auth_chain: rooms::auth_chain::Service { db: db.clone() }, + directory: rooms::directory::Service { db: db.clone() }, + edus: rooms::edus::Service { + presence: rooms::edus::presence::Service { db: db.clone() }, + read_receipt: rooms::edus::read_receipt::Service { db: db.clone() }, + typing: rooms::edus::typing::Service { db: db.clone() }, + }, + event_handler: rooms::event_handler::Service, + lazy_loading: rooms::lazy_loading::Service { + db: db.clone(), + lazy_load_waiting: Mutex::new(HashMap::new()), + }, + metadata: rooms::metadata::Service { db: db.clone() }, + outlier: rooms::outlier::Service { db: db.clone() }, + pdu_metadata: rooms::pdu_metadata::Service { db: db.clone() }, + search: rooms::search::Service { db: db.clone() }, + short: rooms::short::Service { db: db.clone() }, + state: rooms::state::Service { db: db.clone() }, + state_accessor: rooms::state_accessor::Service { db: db.clone() }, + state_cache: rooms::state_cache::Service { db: db.clone() }, + state_compressor: rooms::state_compressor::Service { db: db.clone() }, + timeline: rooms::timeline::Service { db: db.clone() }, + user: rooms::user::Service { db: db.clone() }, + }, + transaction_ids: transaction_ids::Service { + db: db.clone() + }, + uiaa: uiaa::Service { + db: db.clone() + }, + users: users::Service { + db: db.clone() + }, + account_data: account_data::Service { + db: db.clone() + }, + admin: admin::Service { sender: todo!() }, + globals: globals::Service::load(db.clone(), config)?, + key_backups: key_backups::Service { + db: db.clone() + }, + media: media::Service { + db: db.clone() + }, + sending: sending::Service { + maximum_requests: todo!(), + sender: todo!(), + }, + }) } } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index ef5888f..65fb367 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{RoomAliasId, RoomId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 5fe0e3e..e35094b 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,12 +1,14 @@ mod data; -use std::{sync::Arc, collections::HashSet}; +use std::{sync::Arc, collections::{HashSet, BTreeSet}}; pub use data::Data; +use ruma::{RoomId, EventId, api::client::error::ErrorKind}; +use tracing::log::warn; -use crate::Result; +use crate::{Result, services, Error}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -22,4 +24,131 @@ impl Service { pub fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { self.db.cache_auth_chain(key, auth_chain) } + + #[tracing::instrument(skip(self, starting_events))] + pub async fn get_auth_chain<'a>( + &self, + room_id: &RoomId, + starting_events: Vec>, + ) -> Result> + 'a> { + const NUM_BUCKETS: usize = 50; + + let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; + + let mut i = 0; + for id in starting_events { + let short = services().rooms.short.get_or_create_shorteventid(&id)?; + let bucket_id = (short % NUM_BUCKETS as u64) as usize; + buckets[bucket_id].insert((short, id.clone())); + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + let mut full_auth_chain = HashSet::new(); + + let mut hits = 0; + let mut misses = 0; + for chunk in buckets { + if chunk.is_empty() { + continue; + } + + let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); + if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&chunk_key)? { + hits += 1; + full_auth_chain.extend(cached.iter().copied()); + continue; + } + misses += 1; + + let mut chunk_cache = HashSet::new(); + let mut hits2 = 0; + let mut misses2 = 0; + let mut i = 0; + for (sevent_id, event_id) in chunk { + if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&[sevent_id])? { + hits2 += 1; + chunk_cache.extend(cached.iter().copied()); + } else { + misses2 += 1; + let auth_chain = Arc::new(self.get_auth_chain_inner(room_id, &event_id)?); + services().rooms + .auth_chain + .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; + println!( + "cache missed event {} with auth chain len {}", + event_id, + auth_chain.len() + ); + chunk_cache.extend(auth_chain.iter()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + }; + } + println!( + "chunk missed with len {}, event hits2: {}, misses2: {}", + chunk_cache.len(), + hits2, + misses2 + ); + let chunk_cache = Arc::new(chunk_cache); + services().rooms + .auth_chain.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + full_auth_chain.extend(chunk_cache.iter()); + } + + println!( + "total: {}, chunk hits: {}, misses: {}", + full_auth_chain.len(), + hits, + misses + ); + + Ok(full_auth_chain + .into_iter() + .filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok())) + } + + #[tracing::instrument(skip(self, event_id))] + fn get_auth_chain_inner( + &self, + room_id: &RoomId, + event_id: &EventId, + ) -> Result> { + let mut todo = vec![Arc::from(event_id)]; + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop() { + match services().rooms.timeline.get_pdu(&event_id) { + Ok(Some(pdu)) => { + if pdu.room_id != room_id { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); + } + for auth_event in &pdu.auth_events { + let sauthevent = services() + .rooms.short + .get_or_create_shorteventid(auth_event)?; + + if !found.contains(&sauthevent) { + found.insert(sauthevent); + todo.push(auth_event.clone()); + } + } + } + Ok(None) => { + warn!("Could not find pdu mentioned in auth events: {}", event_id); + } + Err(e) => { + warn!("Could not load event in auth chain: {} {}", event_id, e); + } + } + } + + Ok(found) + } } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index fb28994..e85afef 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::RoomId; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 73b7b5a..d657897 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; pub use data::Data; use ruma::{RoomId, UserId, events::presence::PresenceEvent}; @@ -7,7 +7,7 @@ use ruma::{RoomId, UserId, events::presence::PresenceEvent}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 2a4c0b7..1770877 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 16a135f..3752056 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{UserId, RoomId, events::SyncEphemeralRoomEvent}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index ac3cca6..79f93b5 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -72,13 +72,15 @@ impl Service { )); } - services() + if services() .rooms - .is_disabled(room_id)? - .ok_or(Error::BadRequest( + .metadata + .is_disabled(room_id)? { + return Err(Error::BadRequest( ErrorKind::Forbidden, "Federation of this room is currently disabled on this server.", - ))?; + )); + } // 1. Skip the PDU if we already have it as a timeline event if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { @@ -111,7 +113,7 @@ impl Service { } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let (sorted_prev_events, eventid_info) = self.fetch_unknown_prev_events( + let (sorted_prev_events, mut eventid_info) = self.fetch_unknown_prev_events( origin, &create_event, room_id, @@ -122,14 +124,15 @@ impl Service { let mut errors = 0; for prev_id in dbg!(sorted_prev_events) { // Check for disabled again because it might have changed - services() + if services() .rooms - .is_disabled(room_id)? - .ok_or(Error::BadRequest( + .metadata + .is_disabled(room_id)? { + return Err(Error::BadRequest( ErrorKind::Forbidden, - "Federation of - this room is currently disabled on this server.", - ))?; + "Federation of this room is currently disabled on this server.", + )); + } if let Some((time, tries)) = services() .globals @@ -279,14 +282,14 @@ impl Service { Err(e) => { // Drop warn!("Dropping bad event {}: {}", event_id, e); - return Err("Signature verification failed".to_owned()); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Signature verification failed")); } Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); match ruma::signatures::redact(&value, room_version_id) { Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_owned()), + Err(_) => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Redaction failed")), } } Ok(ruma::signatures::Verified::All) => value, @@ -480,7 +483,7 @@ impl Service { let mut okay = true; for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = services().rooms.get_pdu(prev_eventid) { + let prev_event = if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(prev_eventid) { pdu } else { okay = false; @@ -488,7 +491,7 @@ impl Service { }; let sstatehash = - if let Ok(Some(s)) = services().rooms.pdu_shortstatehash(prev_eventid) { + if let Ok(Some(s)) = services().rooms.state_accessor.pdu_shortstatehash(prev_eventid) { s } else { okay = false; @@ -525,7 +528,7 @@ impl Service { let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok((ty, st_key)) = services().rooms.get_statekey_from_short(k) { + if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) { // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType state.insert((ty.to_string().into(), st_key), id.clone()); @@ -539,7 +542,7 @@ impl Service { services() .rooms .auth_chain - .get_auth_chain(room_id, starting_events, services()) + .get_auth_chain(room_id, starting_events) .await? .collect(), ); @@ -551,7 +554,7 @@ impl Service { let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = services().rooms.get_pdu(id); + let res = services().rooms.timeline.get_pdu(id); if let Err(e) = &res { error!("LOOK AT ME Failed to fetch event: {}", e); } @@ -677,7 +680,7 @@ impl Service { .and_then(|event_id| services().rooms.timeline.get_pdu(event_id).ok().flatten()) }, ) - .map_err(|_e| "Auth check failed.".to_owned())?; + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if !check_result { return Err(Error::bad_database("Event has failed auth check with state at the event.")); @@ -714,7 +717,7 @@ impl Service { // Only keep those extremities were not referenced yet extremities - .retain(|id| !matches!(services().rooms.is_event_referenced(room_id, id), Ok(true))); + .retain(|id| !matches!(services().rooms.pdu_metadata.is_event_referenced(room_id, id), Ok(true))); info!("Compressing state at event"); let state_ids_compressed = state_at_incoming_event @@ -722,7 +725,8 @@ impl Service { .map(|(shortstatekey, id)| { services() .rooms - .compress_state_event(*shortstatekey, id)? + .state_compressor + .compress_state_event(*shortstatekey, id) }) .collect::>()?; @@ -731,6 +735,7 @@ impl Service { let auth_events = services() .rooms + .state .get_auth_events( room_id, &incoming_pdu.kind, @@ -744,10 +749,10 @@ impl Service { &incoming_pdu, None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), - )?; + ).map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if soft_fail { - self.append_incoming_pdu( + services().rooms.timeline.append_incoming_pdu( &incoming_pdu, val, extremities.iter().map(std::ops::Deref::deref), @@ -760,8 +765,9 @@ impl Service { warn!("Event was soft failed: {:?}", incoming_pdu); services() .rooms + .pdu_metadata .mark_event_soft_failed(&incoming_pdu.event_id)?; - return Err("Event has been soft failed".into()); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); } if incoming_pdu.state_key.is_some() { @@ -798,14 +804,14 @@ impl Service { "Found extremity pdu with no statehash in db: {:?}", leaf_pdu ); - "Found pdu with no statehash in db.".to_owned() + Error::bad_database("Found pdu with no statehash in db.") })?, leaf_pdu, ); } _ => { error!("Missing state snapshot for {:?}", id); - return Err("Missing state snapshot.".to_owned()); + return Err(Error::BadDatabase("Missing state snapshot.")); } } } @@ -835,7 +841,7 @@ impl Service { let mut update_state = false; // 14. Use state resolution to find new room state let new_room_state = if fork_states.is_empty() { - return Err("State is empty.".to_owned()); + panic!("State is empty"); } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { info!("State resolution trivial"); // There was only one state, so it has to be the room's current state (because that is @@ -845,7 +851,8 @@ impl Service { .map(|(k, id)| { services() .rooms - .compress_state_event(*k, id)? + .state_compressor + .compress_state_event(*k, id) }) .collect::>()? } else { @@ -877,9 +884,8 @@ impl Service { .filter_map(|(k, id)| { services() .rooms - .get_statekey_from_short(k)? - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType + .short + .get_statekey_from_short(k) .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) .ok() }) @@ -895,7 +901,7 @@ impl Service { &fork_states, auth_chain_sets, |id| { - let res = services().rooms.get_pdu(id); + let res = services().rooms.timeline.get_pdu(id); if let Err(e) = &res { error!("LOOK AT ME Failed to fetch event: {}", e); } @@ -904,7 +910,7 @@ impl Service { ) { Ok(new_state) => new_state, Err(_) => { - return Err("State resolution failed, either an event could not be found or deserialization".into()); + return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); } }; @@ -921,6 +927,7 @@ impl Service { .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; services() .rooms + .state_compressor .compress_state_event(shortstatekey, &event_id) }) .collect::>()? @@ -929,9 +936,11 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); + let (sstatehash, _, _) = services().rooms.state_compressor.save_state(room_id, new_room_state)?; services() .rooms - .force_state(room_id, new_room_state)?; + .state + .set_room_state(room_id, sstatehash, &state_lock)?; } } @@ -942,7 +951,7 @@ impl Service { // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - let pdu_id = self + let pdu_id = services().rooms.timeline .append_incoming_pdu( &incoming_pdu, val, @@ -1017,7 +1026,7 @@ impl Service { // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = services().rooms.get_pdu(id) { + if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) { trace!("Found {} in db", id); pdus.push((local_pdu, None)); continue; @@ -1040,7 +1049,7 @@ impl Service { tokio::task::yield_now().await; } - if let Ok(Some(_)) = services().rooms.get_pdu(&next_id) { + if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { trace!("Found {} in db", id); continue; } @@ -1140,6 +1149,7 @@ impl Service { let first_pdu_in_room = services() .rooms + .timeline .first_pdu_in_room(room_id)? .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 90dad21..760fffe 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::{HashSet, HashMap}, sync::Mutex}; +use std::{collections::{HashSet, HashMap}, sync::{Mutex, Arc}}; pub use data::Data; use ruma::{DeviceId, UserId, RoomId}; @@ -7,7 +7,7 @@ use ruma::{DeviceId, UserId, RoomId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, lazy_load_waiting: Mutex, Box, Box, u64), HashSet>>>, } diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 9444db4..bc31ee8 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -3,4 +3,6 @@ use crate::Result; pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; + fn is_disabled(&self, room_id: &RoomId) -> Result; + fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 3c21dd1..b6cccd1 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::RoomId; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -14,4 +16,12 @@ impl Service { pub fn exists(&self, room_id: &RoomId) -> Result { self.db.exists(room_id) } + + pub fn is_disabled(&self, room_id: &RoomId) -> Result { + self.db.is_disabled(room_id) + } + + pub fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { + self.db.disable_room(room_id, disabled) + } } diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 5493ce4..d36adc4 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{EventId, signatures::CanonicalJsonObject}; use crate::{Result, PduEvent}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index a81d05c..4724f85 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -7,7 +7,7 @@ use ruma::{RoomId, EventId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index dc57191..ec1ad53 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use crate::Result; use ruma::RoomId; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs index bc2b28f..07a2712 100644 --- a/src/service/rooms/short/data.rs +++ b/src/service/rooms/short/data.rs @@ -1,2 +1,40 @@ +use std::sync::Arc; + +use ruma::{EventId, events::StateEventType, RoomId}; +use crate::Result; + pub trait Data: Send + Sync { + fn get_or_create_shorteventid( + &self, + event_id: &EventId, + ) -> Result; + + fn get_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result>; + + fn get_or_create_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result; + + fn get_eventid_from_short(&self, shorteventid: u64) -> Result>; + + fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)>; + + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash( + &self, + state_hash: &[u8], + ) -> Result<(u64, bool)>; + + fn get_shortroomid(&self, room_id: &RoomId) -> Result>; + + fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + ) -> Result; } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index a024dc6..08ce5c5 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -7,7 +7,7 @@ use ruma::{EventId, events::StateEventType, RoomId}; use crate::{Result, Error, utils, services}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -15,29 +15,7 @@ impl Service { &self, event_id: &EventId, ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = services().globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) + self.db.get_or_create_shorteventid(event_id) } pub fn get_shortstatekey( @@ -45,36 +23,7 @@ impl Service { event_type: &StateEventType, state_key: &str, ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) + self.db.get_shortstatekey(event_type, state_key) } pub fn get_or_create_shortstatekey( @@ -82,152 +31,33 @@ impl Service { event_type: &StateEventType, state_key: &str, ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = services().globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) + self.db.get_or_create_shortstatekey(event_type, state_key) } pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) + self.db.get_eventid_from_short(shorteventid) } pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) + self.db.get_statekey_from_short(shortstatekey) } /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( + pub fn get_or_create_shortstatehash( &self, state_hash: &[u8], ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = services().globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) + self.db.get_or_create_shortstatehash(state_hash) } pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() + self.db.get_shortroomid(room_id) } pub fn get_or_create_shortroomid( &self, room_id: &RoomId, ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = services().globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) + self.db.get_or_create_shortroomid(room_id) } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 5385978..79807c5 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,9 +1,10 @@ mod data; -use std::{collections::HashSet, sync::Arc}; +use std::{collections::{HashSet, HashMap}, sync::Arc}; pub use data::Data; -use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType}, UserId, EventId, serde::Raw, RoomVersionId}; +use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, RoomEventType}, UserId, EventId, serde::Raw, RoomVersionId, state_res::{StateMap, self}}; use serde::Deserialize; +use tokio::sync::MutexGuard; use tracing::warn; use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; @@ -11,7 +12,7 @@ use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; use super::state_compressor::CompressedStateEvent; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -97,7 +98,7 @@ impl Service { room_id: &RoomId, state_ids_compressed: HashSet, ) -> Result { - let shorteventid = services().short.get_or_create_shorteventid(event_id)?; + let shorteventid = services().rooms.short.get_or_create_shorteventid(event_id)?; let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; @@ -109,11 +110,11 @@ impl Service { ); let (shortstatehash, already_existed) = - services().short.get_or_create_shortstatehash(&state_hash)?; + services().rooms.short.get_or_create_shortstatehash(&state_hash)?; if !already_existed { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| services().room.state_compressor.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| services().rooms.state_compressor.load_shortstatehash_info(p))?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -132,7 +133,7 @@ impl Service { } else { (state_ids_compressed, HashSet::new()) }; - services().room.state_compressor.save_state_from_diff( + services().rooms.state_compressor.save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -141,7 +142,7 @@ impl Service { )?; } - self.db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + self.db.set_event_state(shorteventid, shortstatehash)?; Ok(shortstatehash) } @@ -155,25 +156,24 @@ impl Service { &self, new_pdu: &PduEvent, ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id)?; + let shorteventid = services().rooms.short.get_or_create_shorteventid(&new_pdu.event_id)?; let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; + self.db.set_event_state(shorteventid, p)?; } if let Some(state_key) = &new_pdu.state_key { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| services().rooms.state_compressor.load_shortstatehash_info(p))?; - let shortstatekey = self.get_or_create_shortstatekey( + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( &new_pdu.kind.to_string().into(), state_key, )?; - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id)?; + let new = services().rooms.state_compressor.compress_state_event(shortstatekey, &new_pdu.event_id)?; let replaces = states_parents .last() @@ -199,7 +199,7 @@ impl Service { statediffremoved.insert(*replaces); } - self.save_state_from_diff( + services().rooms.state_compressor.save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -221,16 +221,16 @@ impl Service { let mut state = Vec::new(); // Add recommended events if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? + services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? { state.push(e.to_stripped_state_event()); } if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? + services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? { state.push(e.to_stripped_state_event()); } - if let Some(e) = self.room_state_get( + if let Some(e) = services().rooms.state_accessor.room_state_get( &invite_event.room_id, &StateEventType::RoomCanonicalAlias, "", @@ -238,16 +238,16 @@ impl Service { state.push(e.to_stripped_state_event()); } if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? + services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? { state.push(e.to_stripped_state_event()); } if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? + services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? { state.push(e.to_stripped_state_event()); } - if let Some(e) = self.room_state_get( + if let Some(e) = services().rooms.state_accessor.room_state_get( &invite_event.room_id, &StateEventType::RoomMember, invite_event.sender.as_str(), @@ -260,17 +260,16 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) + pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64, + mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + self.db.set_room_state(room_id, shortstatehash, mutex_lock) } /// Returns the room's version. #[tracing::instrument(skip(self))] pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + let create_event = services().rooms.state_accessor.room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -294,4 +293,50 @@ impl Service { pub fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { self.db.get_forward_extremities(room_id) } + + /// This fetches auth events from the current state. + #[tracing::instrument(skip(self))] + pub fn get_auth_events( + &self, + room_id: &RoomId, + kind: &RoomEventType, + sender: &UserId, + state_key: Option<&str>, + content: &serde_json::value::RawValue, + ) -> Result>> { + let shortstatehash = + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + current_shortstatehash + } else { + return Ok(HashMap::new()); + }; + + let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) + .expect("content is a valid JSON object"); + + let mut sauthevents = auth_events + .into_iter() + .filter_map(|(event_type, state_key)| { + services().rooms.short.get_shortstatekey(&event_type.to_string().into(), &state_key) + .ok() + .flatten() + .map(|s| (s, (event_type, state_key))) + }) + .collect::>(); + + let full_state = services().rooms.state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + + Ok(full_state + .into_iter() + .filter_map(|compressed| services().rooms.state_compressor.parse_compressed_state_event(compressed).ok()) + .filter_map(|(shortstatekey, event_id)| { + sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) + }) + .filter_map(|(k, event_id)| services().rooms.timeline.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) + .collect()) + } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 1911e52..fd29948 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -7,7 +7,7 @@ use ruma::{events::StateEventType, RoomId, EventId}; use crate::{Result, PduEvent}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -45,7 +45,7 @@ impl Service { event_type: &StateEventType, state_key: &str, ) -> Result>> { - self.db.pdu_state_get(shortstatehash, event_type, state_key) + self.db.state_get(shortstatehash, event_type, state_key) } /// Returns the state hash for this pdu. diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 18d1123..ab6a0d6 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -3,12 +3,23 @@ use std::{collections::HashSet, sync::Arc}; pub use data::Data; use regex::Regex; -use ruma::{RoomId, UserId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, tag::TagEvent, RoomAccountDataEventType, GlobalAccountDataEventType, direct::DirectEvent, ignored_user_list::IgnoredUserListEvent, AnySyncStateEvent}, serde::Raw, ServerName}; +use ruma::{ + events::{ + direct::{DirectEvent, DirectEventContent}, + ignored_user_list::IgnoredUserListEvent, + room::{create::RoomCreateEventContent, member::MembershipState}, + tag::{TagEvent, TagEventContent}, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, RoomAccountDataEvent, RoomAccountDataEventContent, + }, + serde::Raw, + RoomId, ServerName, UserId, +}; -use crate::{Result, services, utils, Error}; +use crate::{services, utils, Error, Result}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -45,7 +56,9 @@ impl Service { self.db.mark_as_once_joined(user_id, room_id)?; // Check if the room has a predecessor - if let Some(predecessor) = self + if let Some(predecessor) = services() + .rooms + .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, "")? .and_then(|create| serde_json::from_str(create.content.get()).ok()) .and_then(|content: RoomCreateEventContent| content.predecessor) @@ -76,27 +89,41 @@ impl Service { // .ok(); // Copy old tags to new room - if let Some(tag_event) = services().account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - services().account_data + if let Some(tag_event) = services() + .account_data + .get( + Some(&predecessor.room_id), + user_id, + RoomAccountDataEventType::Tag, + )? + .map(|event| { + serde_json::from_str(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + { + services() + .account_data .update( Some(room_id), user_id, RoomAccountDataEventType::Tag, - &tag_event, + &tag_event?, ) .ok(); }; // Copy direct chat flag - if let Some(mut direct_event) = services().account_data.get::( + if let Some(mut direct_event) = services().account_data.get( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), - )? { + )? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + { + let direct_event = direct_event?; let mut room_ids_updated = false; for room_ids in direct_event.content.0.values_mut() { @@ -111,7 +138,7 @@ impl Service { None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, + &serde_json::to_value(&direct_event).expect("to json always works"), )?; } }; @@ -124,13 +151,17 @@ impl Service { // We want to know if the sender is ignored by the receiver let is_ignored = services() .account_data - .get::( + .get( None, // Ignored users are in global account data user_id, // Receiver GlobalAccountDataEventType::IgnoredUserList .to_string() .into(), )? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }).transpose()? .map_or(false, |ignored| { ignored .content @@ -200,10 +231,7 @@ impl Service { } #[tracing::instrument(skip(self, room_id))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - ) -> Result>>> { + pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { let maybe = self .our_real_users_cache .read() diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index ab9f427..0c32c4b 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -9,7 +9,7 @@ use crate::{Result, utils, services}; use self::data::StateDiff; pub struct Service { - db: Box, + db: Arc, } pub type CompressedStateEvent = [u8; 2 * size_of::()]; @@ -67,7 +67,7 @@ impl Service { ) -> Result { let mut v = shortstatekey.to_be_bytes().to_vec(); v.extend_from_slice( - &self + &services().rooms.short .get_or_create_shorteventid(event_id)? .to_be_bytes(), ); @@ -218,7 +218,7 @@ impl Service { HashSet, // added HashSet)> // removed { - let previous_shortstatehash = self.db.current_shortstatehash(room_id)?; + let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; let state_hash = utils::calculate_hash( &new_state_ids_compressed diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index d073e86..2220b5f 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -5,6 +5,7 @@ use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; use crate::{Result, PduEvent}; pub trait Data: Send + Sync { + fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>>; fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index e8f4205..7817225 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -21,33 +21,14 @@ use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduE use super::state_compressor::CompressedStateEvent; pub struct Service { - db: Box, + db: Arc, } impl Service { - /* - /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() + self.db.first_pdu_in_room(room_id) } - */ #[tracing::instrument(skip(self))] pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { @@ -681,7 +662,8 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip_all)] - fn append_incoming_pdu<'a>( + pub fn append_incoming_pdu<'a>( + &self, pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: impl IntoIterator + Clone + Debug, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 7c7dfae..394a550 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{RoomId, UserId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 8ab557f..fde251b 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -448,14 +448,6 @@ impl Service { Ok(()) } - #[tracing::instrument(skip(keys))] - fn calculate_hash(keys: &[&[u8]]) -> Vec { - // We only hash the pdu's event ids, not the whole pdu - let bytes = keys.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().to_owned() - } - /// Cleanup event data /// Used for instance after we remove an appservice registration /// diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index a9c516c..d7066e2 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{UserId, DeviceId, TransactionId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 01c0d2f..73b2273 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,4 +1,6 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType, IncomingUserIdentifier}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; @@ -7,7 +9,7 @@ use tracing::error; use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b13ae1f..2cf1876 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::BTreeMap, mem}; +use std::{collections::BTreeMap, mem, sync::Arc}; pub use data::Data; use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition, error::ErrorKind}, RoomAliasId}; @@ -7,7 +7,7 @@ use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTi use crate::{Result, Error, services}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 734da2a..0ee3ae8 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -3,6 +3,7 @@ pub mod error; use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; +use ring::digest; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ cmp, fmt, @@ -59,7 +60,7 @@ pub fn random_string(length: usize) -> String { } /// Calculate a new hash for the given password -pub fn calculate_hash(password: &str) -> Result { +pub fn calculate_password_hash(password: &str) -> Result { let hashing_config = Config { variant: Variant::Argon2id, ..Default::default() @@ -69,6 +70,15 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } +#[tracing::instrument(skip(keys))] +pub fn calculate_hash(keys: &[&[u8]]) -> Vec { + // We only hash the pdu's event ids, not the whole pdu + let bytes = keys.join(&0xff); + let hash = digest::digest(&digest::SHA256, &bytes); + hash.as_ref().to_owned() +} + + pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, From 33a2b2b7729bb40253fd174d99ad773869b5ecfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 20:33:55 +0200 Subject: [PATCH 1216/1727] 37 errors left --- src/api/client_server/membership.rs | 3 +- src/database/key_value/rooms/search.rs | 19 +- src/database/key_value/rooms/state.rs | 2 +- src/database/key_value/rooms/state_cache.rs | 488 +++++++++++++++++++- src/database/key_value/rooms/timeline.rs | 43 +- src/service/mod.rs | 6 +- src/service/rooms/event_handler/mod.rs | 6 +- src/service/rooms/state/data.rs | 2 +- src/service/rooms/state/mod.rs | 8 + src/service/rooms/state_accessor/mod.rs | 8 +- src/service/rooms/state_cache/data.rs | 95 +++- src/service/rooms/state_cache/mod.rs | 368 +-------------- src/service/rooms/state_compressor/mod.rs | 26 +- src/service/rooms/timeline/data.rs | 5 + src/service/rooms/timeline/mod.rs | 120 +++-- src/service/sending/mod.rs | 4 +- src/service/transaction_ids/mod.rs | 19 +- src/service/uiaa/mod.rs | 20 +- src/service/users/mod.rs | 4 +- 19 files changed, 764 insertions(+), 482 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index f07f2ad..c930ce4 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -649,7 +649,8 @@ async fn join_room_by_id_helper( services().rooms.timeline.append_pdu( &parsed_pdu, join_event, - iter::once(&*parsed_pdu.event_id), + vec![(*parsed_pdu.event_id).to_owned()], + &state_lock )?; // We set the room state after inserting the pdu, so that we never have a moment in time diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index dfbdbc6..41df544 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -54,19 +54,20 @@ impl service::rooms::search::Data for KeyValueDatabase { .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) }); - Ok(utils::common_elements(iterators, |a, b| { + let common_elements = match utils::common_elements(iterators, |a, b| { // We compare b with a because we reversed the iterator earlier b.cmp(a) - }) - .map(|iter| { - ( - Box::new(iter.map(move |id| { + }) { + Some(it) => it, + None => return Ok(None), + }; + + let mapped = common_elements.map(move |id| { let mut pduid = prefix_clone.clone(); pduid.extend_from_slice(&id); pduid - })), - words, - ) - })) + }); + + Ok(Some((Box::new(mapped), words))) } } diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index b2822b3..90ac0d5 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -49,7 +49,7 @@ impl service::rooms::state::Data for KeyValueDatabase { fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: &mut dyn Iterator, + event_ids: Vec>, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 5f05485..4043bc4 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,6 +1,9 @@ -use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw}; +use std::{collections::HashSet, sync::Arc}; -use crate::{service, database::KeyValueDatabase, services, Result}; +use regex::Regex; +use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, ServerName}; + +use crate::{service, database::KeyValueDatabase, services, Result, Error, utils}; impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -75,4 +78,485 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { Ok(()) } + + fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { + let mut joinedcount = 0_u64; + let mut invitedcount = 0_u64; + let mut joined_servers = HashSet::new(); + let mut real_users = HashSet::new(); + + for joined in self.room_members(room_id).filter_map(|r| r.ok()) { + joined_servers.insert(joined.server_name().to_owned()); + if joined.server_name() == services().globals.server_name() + && !services().users.is_deactivated(&joined).unwrap_or(true) + { + real_users.insert(joined); + } + joinedcount += 1; + } + + for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { + joined_servers.insert(invited.server_name().to_owned()); + invitedcount += 1; + } + + self.roomid_joinedcount + .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; + + self.roomid_invitedcount + .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; + + self.our_real_users_cache + .write() + .unwrap() + .insert(room_id.to_owned(), Arc::new(real_users)); + + self.appservice_in_room_cache + .write() + .unwrap() + .remove(room_id); + + Ok(()) + } + + #[tracing::instrument(skip(self, room_id))] + fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { + let maybe = self + .our_real_users_cache + .read() + .unwrap() + .get(room_id) + .cloned(); + if let Some(users) = maybe { + Ok(users) + } else { + self.update_joined_count(room_id)?; + Ok(Arc::clone( + self.our_real_users_cache + .read() + .unwrap() + .get(room_id) + .unwrap(), + )) + } + } + + #[tracing::instrument(skip(self, room_id, appservice))] + fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &(String, serde_yaml::Value), + ) -> Result { + let maybe = self + .appservice_in_room_cache + .read() + .unwrap() + .get(room_id) + .and_then(|map| map.get(&appservice.0)) + .copied(); + + if let Some(b) = maybe { + Ok(b) + } else if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else(Vec::new, |users| { + users + .iter() + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) + .collect::>() + }); + + let bridge_user_id = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, services().globals.server_name()).ok() + }); + + let in_room = bridge_user_id + .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) + || self.room_members(room_id).any(|userid| { + userid.map_or(false, |userid| { + users.iter().any(|r| r.is_match(userid.as_str())) + }) + }); + + self.appservice_in_room_cache + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default() + .insert(appservice.0.clone(), in_room); + + Ok(in_room) + } else { + Ok(false) + } + } + + /// Makes a user forget a room. + #[tracing::instrument(skip(self))] + fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + /// Returns an iterator of all servers participating in this room. + #[tracing::instrument(skip(self))] + fn room_servers<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomserverids.scan_prefix(prefix).map(|(key, _)| { + ServerName::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Server name in roomserverids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) + })) + } + + #[tracing::instrument(skip(self))] + fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { + let mut key = server.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.serverroomids.get(&key).map(|o| o.is_some()) + } + + /// Returns an iterator of all rooms a server participates in (as far as we know). + #[tracing::instrument(skip(self))] + fn server_rooms<'a>( + &'a self, + server: &ServerName, + ) -> Box>> + 'a> { + let mut prefix = server.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.serverroomids.scan_prefix(prefix).map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) + })) + } + + /// Returns an iterator over all joined members of a room. + #[tracing::instrument(skip(self))] + fn room_members<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_joined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) + })) + } + + #[tracing::instrument(skip(self))] + fn room_joined_count(&self, room_id: &RoomId) -> Result> { + self.roomid_joinedcount + .get(room_id.as_bytes())? + .map(|b| { + utils::u64_from_bytes(&b) + .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + fn room_invited_count(&self, room_id: &RoomId) -> Result> { + self.roomid_invitedcount + .get(room_id.as_bytes())? + .map(|b| { + utils::u64_from_bytes(&b) + .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + }) + .transpose() + } + + /// Returns an iterator over all User IDs who ever joined a room. + #[tracing::instrument(skip(self))] + fn room_useroncejoined<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomuseroncejoinedids + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in room_useroncejoined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) + })) + } + + /// Returns an iterator over all invited members of a room. + #[tracing::instrument(skip(self))] + fn room_members_invited<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomuserid_invitecount + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_invited is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) + })) + } + + #[tracing::instrument(skip(self))] + fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_invitecount + .get(&key)? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid invitecount in db.") + })?)) + }) + } + + #[tracing::instrument(skip(self))] + fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_leftcount + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid leftcount in db.")) + }) + .transpose() + } + + /// Returns an iterator over all rooms this user joined. + #[tracing::instrument(skip(self))] + fn rooms_joined<'a>( + &'a self, + user_id: &UserId, + ) -> Box>> + 'a> { + Box::new(self.userroomid_joined + .scan_prefix(user_id.as_bytes().to_vec()) + .map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_joined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) + })) + } + + /// Returns an iterator over all rooms a user was invited to. + #[tracing::instrument(skip(self))] + fn rooms_invited<'a>( + &'a self, + user_id: &UserId, + ) -> Box, Vec>)>> + 'a> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.userroomid_invitestate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok((room_id, state)) + })) + } + + #[tracing::instrument(skip(self))] + fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.userroomid_invitestate + .get(&key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok(state) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.userroomid_leftstate + .get(&key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok(state) + }) + .transpose() + } + + /// Returns an iterator over all rooms a user left. + #[tracing::instrument(skip(self))] + fn rooms_left<'a>( + &'a self, + user_id: &UserId, + ) -> Box, Vec>)>> + 'a> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.userroomid_leftstate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok((room_id, state)) + })) + } + + #[tracing::instrument(skip(self))] + fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) + } } diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 0b7286b..1723186 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -187,13 +187,29 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) } + fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()> { + self.pduid_pdu.insert( + pdu_id, + &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"))?; + + self.lasttimelinecount_cache + .lock() + .unwrap() + .insert(pdu.room_id.clone(), count); + + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), &pdu_id)?; + self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; + + Ok(()) + } + /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(pdu_id)?.is_some() { self.pduid_pdu.insert( pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; + &serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"))?; Ok(()) } else { Err(Error::BadRequest( @@ -306,4 +322,27 @@ impl service::rooms::timeline::Data for KeyValueDatabase { Ok((pdu_id, pdu)) }))) } + + fn increment_notification_counts(&self, room_id: &RoomId, notifies: Vec>, highlights: Vec>) -> Result<()> { + let notifies_batch = Vec::new(); + let highlights_batch = Vec::new(); + for user in notifies { + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + notifies_batch.push(userroom_id); + } + for user in highlights { + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + highlights_batch.push(userroom_id); + } + + self.userroomid_notificationcount + .increment_batch(&mut notifies_batch.into_iter())?; + self.userroomid_highlightcount + .increment_batch(&mut highlights_batch.into_iter())?; + Ok(()) + } } diff --git a/src/service/mod.rs b/src/service/mod.rs index a772c1d..daf4329 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -3,6 +3,8 @@ use std::{ sync::{Arc, Mutex}, }; +use lru_cache::LruCache; + use crate::{Result, Config}; pub mod account_data; @@ -74,8 +76,8 @@ impl Services { state: rooms::state::Service { db: db.clone() }, state_accessor: rooms::state_accessor::Service { db: db.clone() }, state_cache: rooms::state_cache::Service { db: db.clone() }, - state_compressor: rooms::state_compressor::Service { db: db.clone() }, - timeline: rooms::timeline::Service { db: db.clone() }, + state_compressor: rooms::state_compressor::Service { db: db.clone(), stateinfo_cache: Mutex::new(LruCache::new((100.0 * config.conduit_cache_capacity_modifier) as usize,)) }, + timeline: rooms::timeline::Service { db: db.clone(), lasttimelinecount_cache: Mutex::new(HashMap::new()) }, user: rooms::user::Service { db: db.clone() }, }, transaction_ids: transaction_ids::Service { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 79f93b5..d6ec8e9 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -755,7 +755,7 @@ impl Service { services().rooms.timeline.append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(std::ops::Deref::deref), + extremities.iter().map(|e| (**e).to_owned()).collect(), state_ids_compressed, soft_fail, &state_lock, @@ -936,7 +936,7 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); - let (sstatehash, _, _) = services().rooms.state_compressor.save_state(room_id, new_room_state)?; + let sstatehash = services().rooms.state_compressor.save_state(room_id, new_room_state)?; services() .rooms .state @@ -955,7 +955,7 @@ impl Service { .append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(std::ops::Deref::deref), + extremities.iter().map(|e| (**e).to_owned()).collect(), state_ids_compressed, soft_fail, &state_lock, diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 20c177a..8eca21d 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -22,7 +22,7 @@ pub trait Data: Send + Sync { /// Replace the forward extremities of the room. fn set_forward_extremities<'a>(&self, room_id: &RoomId, - event_ids: &mut dyn Iterator, + event_ids: Vec>, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 79807c5..57a0e77 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -294,6 +294,14 @@ impl Service { self.db.get_forward_extremities(room_id) } + pub fn set_forward_extremities<'a>(&self, + room_id: &RoomId, + event_ids: Vec>, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + self.db.set_forward_extremities(room_id, event_ids, state_lock) + } + /// This fetches auth events from the current state. #[tracing::instrument(skip(self))] pub fn get_auth_events( diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index fd29948..a0f5523 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -13,17 +13,15 @@ pub struct Service { impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - self.db.state_full_ids(shortstatehash) + self.db.state_full_ids(shortstatehash).await } - #[tracing::instrument(skip(self))] pub async fn state_full( &self, shortstatehash: u64, ) -> Result>> { - self.db.state_full(shortstatehash) + self.db.state_full(shortstatehash).await } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -59,7 +57,7 @@ impl Service { &self, room_id: &RoomId, ) -> Result>> { - self.db.room_state_full(room_id) + self.db.room_state_full(room_id).await } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index b9db721..950143f 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,4 +1,6 @@ -use ruma::{UserId, RoomId, serde::Raw, events::AnyStrippedStateEvent}; +use std::{collections::HashSet, sync::Arc}; + +use ruma::{UserId, RoomId, serde::Raw, events::{AnyStrippedStateEvent, AnySyncStateEvent}, ServerName}; use crate::Result; pub trait Data: Send + Sync { @@ -6,4 +8,95 @@ pub trait Data: Send + Sync { fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()>; fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + + fn update_joined_count(&self, room_id: &RoomId) -> Result<()>; + + fn get_our_real_users(&self, room_id: &RoomId) -> Result>>>; + + fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &(String, serde_yaml::Value), + ) -> Result; + + /// Makes a user forget a room. + fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>; + + /// Returns an iterator of all servers participating in this room. + fn room_servers<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a>; + + fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result; + + /// Returns an iterator of all rooms a server participates in (as far as we know). + fn server_rooms<'a>( + &'a self, + server: &ServerName, + ) -> Box>> + 'a>; + + /// Returns an iterator over all joined members of a room. + fn room_members<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a>; + + fn room_joined_count(&self, room_id: &RoomId) -> Result>; + + fn room_invited_count(&self, room_id: &RoomId) -> Result>; + + /// Returns an iterator over all User IDs who ever joined a room. + fn room_useroncejoined<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a>; + + /// Returns an iterator over all invited members of a room. + fn room_members_invited<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a>; + + fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + /// Returns an iterator over all rooms this user joined. + fn rooms_joined<'a>( + &'a self, + user_id: &UserId, + ) -> Box>> + 'a>; + + /// Returns an iterator over all rooms a user was invited to. + fn rooms_invited<'a>( + &'a self, + user_id: &UserId, + ) -> Box, Vec>)>> + 'a>; + + fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>>; + + fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>>; + + /// Returns an iterator over all rooms a user left. + fn rooms_left<'a>( + &'a self, + user_id: &UserId, + ) -> Box, Vec>)>> + 'a>; + + fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index ab6a0d6..69bd832 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -191,65 +191,12 @@ impl Service { #[tracing::instrument(skip(self, room_id))] pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == services().globals.server_name() - && !services().users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) + self.db.update_joined_count(room_id) } #[tracing::instrument(skip(self, room_id))] pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } + self.db.get_our_real_users(room_id) } #[tracing::instrument(skip(self, room_id, appservice))] @@ -258,71 +205,13 @@ impl Service { room_id: &RoomId, appservice: &(String, serde_yaml::Value), ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, services().globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } + self.db.appservice_in_room(room_id, appservice) } /// Makes a user forget a room. #[tracing::instrument(skip(self))] pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) + self.db.forget(room_id, user_id) } /// Returns an iterator of all servers participating in this room. @@ -331,31 +220,12 @@ impl Service { &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) + self.db.room_servers(room_id) } #[tracing::instrument(skip(self))] pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) + self.db.server_in_room(server, room_id) } /// Returns an iterator of all rooms a server participates in (as far as we know). @@ -364,20 +234,7 @@ impl Service { &'a self, server: &ServerName, ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) + self.db.server_rooms(server) } /// Returns an iterator over all joined members of a room. @@ -386,44 +243,17 @@ impl Service { &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) + self.db.room_members(room_id) } #[tracing::instrument(skip(self))] pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() + self.db.room_joined_count(room_id) } #[tracing::instrument(skip(self))] pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() + self.db.room_invited_count(room_id) } /// Returns an iterator over all User IDs who ever joined a room. @@ -432,24 +262,7 @@ impl Service { &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) + self.db.room_useroncejoined(room_id) } /// Returns an iterator over all invited members of a room. @@ -458,54 +271,17 @@ impl Service { &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) + self.db.room_members_invited(room_id) } #[tracing::instrument(skip(self))] pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) + self.db.get_invite_count(room_id, user_id) } #[tracing::instrument(skip(self))] pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() + self.db.get_left_count(room_id, user_id) } /// Returns an iterator over all rooms this user joined. @@ -514,21 +290,7 @@ impl Service { &'a self, user_id: &UserId, ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) + self.db.rooms_joined(user_id) } /// Returns an iterator over all rooms a user was invited to. @@ -537,29 +299,7 @@ impl Service { &'a self, user_id: &UserId, ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) + self.db.rooms_invited(user_id) } #[tracing::instrument(skip(self))] @@ -568,19 +308,7 @@ impl Service { user_id: &UserId, room_id: &RoomId, ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() + self.db.invite_state(user_id, room_id) } #[tracing::instrument(skip(self))] @@ -589,19 +317,7 @@ impl Service { user_id: &UserId, room_id: &RoomId, ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() + self.db.left_state(user_id, room_id) } /// Returns an iterator over all rooms a user left. @@ -610,64 +326,26 @@ impl Service { &'a self, user_id: &UserId, ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) + self.db.rooms_left(user_id) } #[tracing::instrument(skip(self))] pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) + self.db.once_joined(user_id, room_id) } #[tracing::instrument(skip(self))] pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) + self.db.is_joined(user_id, room_id) } #[tracing::instrument(skip(self))] pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) + self.db.is_invited(user_id, room_id) } #[tracing::instrument(skip(self))] pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) + self.db.is_left(user_id, room_id) } } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 0c32c4b..5f2cf02 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,7 +1,8 @@ pub mod data; -use std::{mem::size_of, sync::Arc, collections::HashSet}; +use std::{mem::size_of, sync::{Arc, Mutex}, collections::HashSet}; pub use data::Data; +use lru_cache::LruCache; use ruma::{EventId, RoomId}; use crate::{Result, utils, services}; @@ -10,6 +11,19 @@ use self::data::StateDiff; pub struct Service { db: Arc, + + pub stateinfo_cache: Mutex< + LruCache< + u64, + Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + >, + >, + } pub type CompressedStateEvent = [u8; 2 * size_of::()]; @@ -82,7 +96,7 @@ impl Service { Ok(( utils::u64_from_bytes(&compressed_event[0..size_of::()]) .expect("bytes have right length"), - self.get_eventid_from_short( + services().rooms.short.get_eventid_from_short( utils::u64_from_bytes(&compressed_event[size_of::()..]) .expect("bytes have right length"), )?, @@ -214,9 +228,7 @@ impl Service { &self, room_id: &RoomId, new_state_ids_compressed: HashSet, - ) -> Result<(u64, - HashSet, // added - HashSet)> // removed + ) -> Result { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; @@ -231,7 +243,7 @@ impl Service { services().rooms.short.get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); + return Ok(new_shortstatehash); } let states_parents = previous_shortstatehash @@ -265,6 +277,6 @@ impl Service { )?; }; - Ok((new_shortstatehash, statediffnew, statediffremoved)) + Ok(new_shortstatehash) } } diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 2220b5f..20eae7f 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -44,6 +44,9 @@ pub trait Data: Send + Sync { /// Returns the `count` of this pdu's id. fn pdu_count(&self, pdu_id: &[u8]) -> Result; + /// Adds a new pdu to the timeline + fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()>; + /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; @@ -71,4 +74,6 @@ pub trait Data: Send + Sync { room_id: &RoomId, from: u64, ) -> Result, PduEvent)>>>>; + + fn increment_notification_counts(&self, room_id: &RoomId, notifies: Vec>, highlights: Vec>) -> Result<()>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 7817225..f25550d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,6 +1,7 @@ mod data; use std::borrow::Cow; -use std::sync::Arc; +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; use std::{iter, collections::HashSet}; use std::fmt::Debug; @@ -22,6 +23,8 @@ use super::state_compressor::CompressedStateEvent; pub struct Service { db: Arc, + + pub(super) lasttimelinecount_cache: Mutex, u64>>, } impl Service { @@ -73,7 +76,7 @@ impl Service { &self, event_id: &EventId, ) -> Result> { - self.db.get_non_outlier_pdu(event_id) + self.db.get_non_outlier_pdu_json(event_id) } /// Returns the pdu's id. @@ -129,9 +132,10 @@ impl Service { &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, + leaves: Vec>, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); + let shortroomid = services().rooms.short.get_shortroomid(&pdu.room_id)?.expect("room exists"); // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -141,8 +145,8 @@ impl Service { .entry("unsigned".to_owned()) .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self + if let Some(shortstatehash) = services().rooms.state_accessor.pdu_shortstatehash(&pdu.event_id).unwrap() { + if let Some(prev_state) = services().rooms.state_accessor .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) .unwrap() { @@ -161,8 +165,8 @@ impl Service { } // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; + services().rooms.pdu_metadata.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + services().rooms.state.set_forward_extremities(&pdu.room_id, leaves, state_lock)?; let mutex_insert = Arc::clone( services().globals @@ -177,37 +181,23 @@ impl Service { let count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending // fails - self.edus + services().rooms.edus.read_receipt .private_read_set(&pdu.room_id, &pdu.sender, count1)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; + services().rooms.user.reset_notification_counts(&pdu.sender, &pdu.room_id)?; let count2 = services().globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); pdu_id.extend_from_slice(&count2.to_be_bytes()); - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; + // Insert pdu + self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2)?; drop(insert_lock); // See if the event matches any known pushers let power_levels: RoomPowerLevelsEventContent = services() .rooms + .state_accessor .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) @@ -221,9 +211,9 @@ impl Service { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - for user in self.get_our_real_users(&pdu.room_id)?.iter() { + for user in services().rooms.state_cache.get_our_real_users(&pdu.room_id)?.into_iter() { // Don't notify the user of their own events - if user == &pdu.sender { + if &user == &pdu.sender { continue; } @@ -231,17 +221,19 @@ impl Service { .account_data .get( None, - user, + &user, GlobalAccountDataEventType::PushRules.to_string().into(), )? + .map(|event| serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid push rules event in db."))).transpose()? .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); + .unwrap_or_else(|| Ruleset::server_default(&user)); let mut highlight = false; let mut notify = false; for action in services().pusher.get_actions( - user, + &user, &rules_for_user, &power_levels, &sync_pdu, @@ -258,27 +250,20 @@ impl Service { }; } - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - if notify { - notifies.push(userroom_id.clone()); + notifies.push(user); } if highlight { - highlights.push(userroom_id); + highlights.push(user); } - for senderkey in services().pusher.get_pusher_senderkeys(user) { + for senderkey in services().pusher.get_pusher_senderkeys(&user) { services().sending.send_push_pdu(&*pdu_id, senderkey)?; } } - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; + self.db.increment_notification_counts(&pdu.room_id, notifies, highlights); match pdu.kind { RoomEventType::RoomRedaction => { @@ -302,7 +287,7 @@ impl Service { let invite_state = match content.membership { MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; + let state = services().rooms.state.calculate_invite_state(pdu)?; Some(state) } _ => None, @@ -310,7 +295,7 @@ impl Service { // Update our membership info, we do this here incase a user is invited // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( + services().rooms.state_cache.update_membership( &pdu.room_id, &target_user_id, content.membership, @@ -322,18 +307,17 @@ impl Service { } RoomEventType::RoomMessage => { #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, + struct ExtractBody { + body: Option, } - let content = serde_json::from_str::>(pdu.content.get()) + let content = serde_json::from_str::(pdu.content.get()) .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if let Some(body) = content.body { - services().rooms.search.index_pdu(shortroomid, pdu_id, body)?; + services().rooms.search.index_pdu(shortroomid, &pdu_id, body)?; - let admin_room = self.alias.resolve_local_alias( + let admin_room = services().rooms.alias.resolve_local_alias( <&RoomAliasId>::try_from( format!("#admins:{}", services().globals.server_name()).as_str(), ) @@ -357,7 +341,7 @@ impl Service { } for appservice in services().appservice.all()? { - if self.appservice_in_room(&pdu.room_id, &appservice)? { + if services().rooms.state_cache.appservice_in_room(&pdu.room_id, &appservice)? { services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } @@ -418,7 +402,7 @@ impl Service { .map_or(false, |state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: &Regex| { - self.room_aliases(&pdu.room_id) + services().rooms.alias.local_aliases_for_room(&pdu.room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; @@ -461,6 +445,7 @@ impl Service { let create_event = services() .rooms + .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event @@ -483,12 +468,12 @@ impl Service { RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; + services().rooms.state.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .filter_map(|event_id| Some(services().rooms.get_pdu(event_id).ok()??.depth)) + .filter_map(|event_id| Some(services().rooms.timeline.get_pdu(event_id).ok()??.depth)) .max() .unwrap_or_else(|| uint!(0)) + uint!(1); @@ -497,7 +482,7 @@ impl Service { if let Some(state_key) = &state_key { if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? + services().rooms.state_accessor.room_state_get(room_id, &event_type.to_string().into(), state_key)? { unsigned.insert( "prev_content".to_owned(), @@ -604,7 +589,7 @@ impl Service { ); // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id)?; + let _shorteventid = services().rooms.short.get_or_create_shorteventid(&pdu.event_id)?; Ok((pdu, pdu_json)) } @@ -623,22 +608,23 @@ impl Service { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu)?; + let statehashid = services().rooms.state.append_to_state(&pdu)?; let pdu_id = self.append_pdu( &pdu, pdu_json, // Since this PDU references all pdu_leaves we can update the leaves // of the room - iter::once(&*pdu.event_id), + vec![(*pdu.event_id).to_owned()], + state_lock, )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; + services().rooms.state.set_room_state(room_id, statehashid, state_lock)?; let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); + services().rooms.state_cache.room_servers(room_id).filter_map(|r| r.ok()).collect(); // In case we are kicking or banning a user, we need to inform their server of the change if pdu.kind == RoomEventType::RoomMember { @@ -666,27 +652,27 @@ impl Service { &self, pdu: &PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, + new_room_leaves: Vec>, state_ids_compressed: HashSet, soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result>> { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - services().rooms.set_event_state( + services().rooms.state.set_event_state( &pdu.event_id, &pdu.room_id, state_ids_compressed, )?; if soft_fail { - services().rooms + services().rooms.pdu_metadata .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - services().rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + services().rooms.state.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)?; return Ok(None); } - let pdu_id = services().rooms.append_pdu(pdu, pdu_json, new_room_leaves)?; + let pdu_id = services().rooms.timeline.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?; Ok(Some(pdu_id)) } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index fde251b..b335095 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use crate::{ - utils, Error, PduEvent, Result, services, api::{server_server, appservice_server}, + utils::{self, calculate_hash}, Error, PduEvent, Result, services, api::{server_server, appservice_server}, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -677,7 +677,7 @@ impl Service { edus: &edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), transaction_id: (&*base64::encode_config( - Self::calculate_hash( + calculate_hash( &events .iter() .map(|e| match e { diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index d7066e2..8d5fd0a 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -18,15 +18,7 @@ impl Service { txn_id: &TransactionId, data: &[u8], ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); - key.push(0xff); - key.extend_from_slice(txn_id.as_bytes()); - - self.userdevicetxnid_response.insert(&key, data)?; - - Ok(()) + self.db.add_txnid(user_id, device_id, txn_id, data) } pub fn existing_txnid( @@ -35,13 +27,6 @@ impl Service { device_id: Option<&DeviceId>, txn_id: &TransactionId, ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); - key.push(0xff); - key.extend_from_slice(txn_id.as_bytes()); - - // If there's no entry, this is a new transaction - self.userdevicetxnid_response.get(&key) + self.db.existing_txnid(user_id, device_id, txn_id) } } diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 73b2273..5444118 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -21,13 +21,13 @@ impl Service { uiaainfo: &UiaaInfo, json_body: &CanonicalJsonValue, ) -> Result<()> { - self.set_uiaa_request( + self.db.set_uiaa_request( user_id, device_id, uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) json_body, )?; - self.update_uiaa_session( + self.db.update_uiaa_session( user_id, device_id, uiaainfo.session.as_ref().expect("session should be set"), @@ -44,7 +44,7 @@ impl Service { ) -> Result<(bool, UiaaInfo)> { let mut uiaainfo = auth .session() - .map(|session| self.get_uiaa_session(user_id, device_id, session)) + .map(|session| self.db.get_uiaa_session(user_id, device_id, session)) .unwrap_or_else(|| Ok(uiaainfo.clone()))?; if uiaainfo.session.is_none() { @@ -110,7 +110,7 @@ impl Service { } if !completed { - self.update_uiaa_session( + self.db.update_uiaa_session( user_id, device_id, uiaainfo.session.as_ref().expect("session is always set"), @@ -120,7 +120,7 @@ impl Service { } // UIAA was successful! Remove this session and return true - self.update_uiaa_session( + self.db.update_uiaa_session( user_id, device_id, uiaainfo.session.as_ref().expect("session is always set"), @@ -137,14 +137,4 @@ impl Service { ) -> Option { self.db.get_uiaa_request(user_id, device_id, session) } - - fn update_uiaa_session( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - uiaainfo: Option<&UiaaInfo>, - ) -> Result<()> { - self.db.update_uiaa_session(user_id, device_id, session, uiaainfo) - } } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 2cf1876..826e049 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -290,7 +290,7 @@ impl Service { } pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.db.devicelist_version(user_id) + self.db.get_devicelist_version(user_id) } pub fn all_devices_metadata<'a>( @@ -310,7 +310,7 @@ impl Service { // Set the password to "" to indicate a deactivated account. Hashes will never result in an // empty string, so the user will not be able to log in again. Systems like changing the // password without logging in should check if the account is deactivated. - self.userid_password.insert(user_id.as_bytes(), &[])?; + self.db.set_password(user_id, None)?; // TODO: Unhook 3PID Ok(()) From a4637e2ba1093065a6fda3fa2ad2b2b9f30eea63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 20:34:31 +0200 Subject: [PATCH 1217/1727] cargo fmt --- src/api/appservice_server.rs | 8 +- src/api/client_server/account.rs | 103 ++--- src/api/client_server/alias.rs | 29 +- src/api/client_server/backup.rs | 101 +++-- src/api/client_server/capabilities.rs | 2 +- src/api/client_server/config.rs | 2 +- src/api/client_server/context.rs | 19 +- src/api/client_server/device.rs | 35 +- src/api/client_server/directory.rs | 5 +- src/api/client_server/filter.rs | 2 +- src/api/client_server/keys.rs | 80 ++-- src/api/client_server/media.rs | 16 +- src/api/client_server/membership.rs | 248 +++++++---- src/api/client_server/message.rs | 40 +- src/api/client_server/presence.rs | 5 +- src/api/client_server/profile.rs | 48 ++- src/api/client_server/push.rs | 2 +- src/api/client_server/read_marker.rs | 25 +- src/api/client_server/redact.rs | 5 +- src/api/client_server/report.rs | 2 +- src/api/client_server/room.rs | 106 +++-- src/api/client_server/search.rs | 11 +- src/api/client_server/session.rs | 34 +- src/api/client_server/state.rs | 48 ++- src/api/client_server/sync.rs | 207 +++++++--- src/api/client_server/tag.rs | 95 +++-- src/api/client_server/to_device.rs | 25 +- src/api/client_server/typing.rs | 14 +- src/api/client_server/user_directory.rs | 38 +- src/api/client_server/voip.rs | 2 +- src/api/mod.rs | 4 +- src/api/ruma_wrapper/axum.rs | 12 +- src/api/server_server.rs | 388 ++++++++++++------ src/database/abstraction/rocksdb.rs | 2 +- src/database/key_value/account_data.rs | 12 +- src/database/key_value/appservice.rs | 11 +- src/database/key_value/globals.rs | 50 +-- src/database/key_value/key_backups.rs | 24 +- src/database/key_value/media.rs | 28 +- src/database/key_value/pusher.rs | 12 +- src/database/key_value/rooms/alias.rs | 20 +- src/database/key_value/rooms/auth_chain.rs | 15 +- src/database/key_value/rooms/directory.rs | 2 +- src/database/key_value/rooms/edus/mod.rs | 4 +- src/database/key_value/rooms/edus/presence.rs | 4 +- .../key_value/rooms/edus/read_receipt.rs | 86 ++-- src/database/key_value/rooms/edus/typing.rs | 35 +- src/database/key_value/rooms/lazy_load.rs | 4 +- src/database/key_value/rooms/metadata.rs | 2 +- src/database/key_value/rooms/outlier.rs | 4 +- src/database/key_value/rooms/pdu_metadata.rs | 4 +- src/database/key_value/rooms/search.rs | 14 +- src/database/key_value/rooms/short.rs | 19 +- src/database/key_value/rooms/state.rs | 15 +- .../key_value/rooms/state_accessor.rs | 58 ++- src/database/key_value/rooms/state_cache.rs | 195 +++++---- .../key_value/rooms/state_compressor.rs | 18 +- src/database/key_value/rooms/timeline.rs | 123 +++--- src/database/key_value/rooms/user.rs | 34 +- src/database/key_value/transaction_ids.rs | 4 +- src/database/key_value/uiaa.rs | 6 +- src/database/key_value/users.rs | 136 +++--- src/database/mod.rs | 278 +++++++------ src/lib.rs | 19 +- src/service/account_data/data.rs | 6 +- src/service/account_data/mod.rs | 4 +- src/service/admin/mod.rs | 132 +++--- src/service/globals/data.rs | 6 +- src/service/globals/mod.rs | 8 +- src/service/key_backups/data.rs | 25 +- src/service/key_backups/mod.rs | 8 +- src/service/media/data.rs | 16 +- src/service/media/mod.rs | 31 +- src/service/mod.rs | 41 +- src/service/pdu.rs | 2 +- src/service/pusher/data.rs | 10 +- src/service/pusher/mod.rs | 22 +- src/service/rooms/alias/data.rs | 18 +- src/service/rooms/alias/mod.rs | 13 +- src/service/rooms/auth_chain/data.rs | 10 +- src/service/rooms/auth_chain/mod.rs | 39 +- src/service/rooms/directory/data.rs | 2 +- src/service/rooms/edus/presence/data.rs | 2 +- src/service/rooms/edus/presence/mod.rs | 2 +- src/service/rooms/edus/read_receipt/data.rs | 18 +- src/service/rooms/edus/read_receipt/mod.rs | 2 +- src/service/rooms/edus/typing/data.rs | 4 +- src/service/rooms/edus/typing/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 330 ++++++++------- src/service/rooms/lazy_loading/data.rs | 4 +- src/service/rooms/lazy_loading/mod.rs | 20 +- src/service/rooms/metadata/data.rs | 2 +- src/service/rooms/mod.rs | 20 +- src/service/rooms/outlier/mod.rs | 4 +- src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/pdu_metadata/mod.rs | 2 +- src/service/rooms/search/data.rs | 2 +- src/service/rooms/search/mod.rs | 7 +- src/service/rooms/short/data.rs | 17 +- src/service/rooms/short/mod.rs | 19 +- src/service/rooms/state/data.rs | 12 +- src/service/rooms/state/mod.rs | 181 +++++--- src/service/rooms/state_accessor/data.rs | 9 +- src/service/rooms/state_accessor/mod.rs | 9 +- src/service/rooms/state_cache/data.rs | 13 +- src/service/rooms/state_cache/mod.rs | 40 +- src/service/rooms/state_compressor/mod.rs | 46 ++- src/service/rooms/timeline/data.rs | 24 +- src/service/rooms/timeline/mod.rs | 193 ++++++--- src/service/rooms/user/data.rs | 2 +- src/service/rooms/user/mod.rs | 3 +- src/service/sending/mod.rs | 55 +-- src/service/transaction_ids/data.rs | 2 +- src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/data.rs | 2 +- src/service/uiaa/mod.rs | 21 +- src/service/users/data.rs | 28 +- src/service/users/mod.rs | 54 ++- src/utils/mod.rs | 1 - 119 files changed, 2787 insertions(+), 1761 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 1f6e2c9..6dca60b 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, services}; +use crate::{services, utils, Error, Result}; use bytes::BytesMut; use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; use std::{fmt::Debug, mem, time::Duration}; @@ -45,7 +45,11 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = services().globals.default_client().execute(reqwest_request).await?; + let mut response = services() + .globals + .default_client() + .execute(reqwest_request) + .await?; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 6d37ce9..28d6c07 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -1,9 +1,7 @@ use std::sync::Arc; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; -use crate::{ - utils, Error, Result, Ruma, services, api::client_server, -}; +use crate::{api::client_server, services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ account::{ @@ -43,16 +41,18 @@ pub async fn get_register_available_route( body: Ruma, ) -> Result { // Validate user id - let user_id = - UserId::parse_with_server_name(body.username.to_lowercase(), services().globals.server_name()) - .ok() - .filter(|user_id| { - !user_id.is_historical() && user_id.server_name() == services().globals.server_name() - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let user_id = UserId::parse_with_server_name( + body.username.to_lowercase(), + services().globals.server_name(), + ) + .ok() + .filter(|user_id| { + !user_id.is_historical() && user_id.server_name() == services().globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; // Check if username is creative enough if services().users.exists(&user_id)? { @@ -95,17 +95,19 @@ pub async fn register_route( let user_id = match (&body.username, is_guest) { (Some(username), false) => { - let proposed_user_id = - UserId::parse_with_server_name(username.to_lowercase(), services().globals.server_name()) - .ok() - .filter(|user_id| { - !user_id.is_historical() - && user_id.server_name() == services().globals.server_name() - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let proposed_user_id = UserId::parse_with_server_name( + username.to_lowercase(), + services().globals.server_name(), + ) + .ok() + .filter(|user_id| { + !user_id.is_historical() + && user_id.server_name() == services().globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; if services().users.exists(&proposed_user_id)? { return Err(Error::BadRequest( ErrorKind::UserInUse, @@ -176,7 +178,8 @@ pub async fn register_route( // Default to pretty displayname let displayname = format!("{} ⚡️", user_id.localpart()); - services().users + services() + .users .set_displayname(&user_id, Some(displayname.clone()))?; // Initial account data @@ -188,7 +191,8 @@ pub async fn register_route( content: ruma::events::push_rules::PushRulesEventContent { global: push::Ruleset::server_default(&user_id), }, - }).expect("to json always works"), + }) + .expect("to json always works"), )?; // Inhibit login does not work for guests @@ -220,7 +224,8 @@ pub async fn register_route( )?; info!("New user {} registered on this server.", user_id); - services().admin + services() + .admin .send_message(RoomMessageEventContent::notice_plain(format!( "New user {} registered on this server.", user_id @@ -229,7 +234,10 @@ pub async fn register_route( // If this is the first real user, grant them admin privileges // Note: the server user, @conduit:servername, is generated first if services().users.count()? == 2 { - services().admin.make_user_admin(&user_id, displayname).await?; + services() + .admin + .make_user_admin(&user_id, displayname) + .await?; warn!("Granting {} admin privileges as the first user", user_id); } @@ -272,26 +280,26 @@ pub async fn change_password_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - services().users + services() + .users .set_password(sender_user, Some(&body.new_password))?; if body.logout_devices { @@ -307,7 +315,8 @@ pub async fn change_password_route( } info!("User {} changed their password.", sender_user); - services().admin + services() + .admin .send_message(RoomMessageEventContent::notice_plain(format!( "User {} changed their password.", sender_user @@ -321,9 +330,7 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -pub async fn whoami_route( - body: Ruma, -) -> Result { +pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device_id = body.sender_device.as_ref().cloned(); @@ -361,19 +368,18 @@ pub async fn deactivate_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -387,7 +393,8 @@ pub async fn deactivate_route( services().users.deactivate_account(sender_user)?; info!("User {} deactivated their account.", sender_user); - services().admin + services() + .admin .send_message(RoomMessageEventContent::notice_plain(format!( "User {} deactivated their account.", sender_user diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 444cc15..b28606c 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use regex::Regex; use ruma::{ api::{ @@ -25,11 +25,18 @@ pub async fn create_alias_route( )); } - if services().rooms.alias.resolve_local_alias(&body.room_alias)?.is_some() { + if services() + .rooms + .alias + .resolve_local_alias(&body.room_alias)? + .is_some() + { return Err(Error::Conflict("Alias already exists.")); } - services().rooms.alias + services() + .rooms + .alias .set_alias(&body.room_alias, &body.room_id)?; Ok(create_alias::v3::Response::new()) @@ -69,9 +76,7 @@ pub async fn get_alias_route( get_alias_helper(&body.room_alias).await } -pub(crate) async fn get_alias_helper( - room_alias: &RoomAliasId, -) -> Result { +pub(crate) async fn get_alias_helper(room_alias: &RoomAliasId) -> Result { if room_alias.server_name() != services().globals.server_name() { let response = services() .sending @@ -115,9 +120,15 @@ pub(crate) async fn get_alias_helper( .await .is_ok() { - room_id = Some(services().rooms.alias.resolve_local_alias(room_alias)?.ok_or_else(|| { - Error::bad_config("Appservice lied to us. Room does not exist.") - })?); + room_id = Some( + services() + .rooms + .alias + .resolve_local_alias(room_alias)? + .ok_or_else(|| { + Error::bad_config("Appservice lied to us. Room does not exist.") + })?, + ); break; } } diff --git a/src/api/client_server/backup.rs b/src/api/client_server/backup.rs index e413893..f3d5ddc 100644 --- a/src/api/client_server/backup.rs +++ b/src/api/client_server/backup.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::api::client::{ backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, @@ -31,7 +31,8 @@ pub async fn update_backup_version_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups + services() + .key_backups .update_backup(sender_user, &body.version, &body.algorithm)?; Ok(update_backup_version::v3::Response {}) @@ -45,13 +46,13 @@ pub async fn get_latest_backup_info_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let (version, algorithm) = - services().key_backups - .get_latest_backup(sender_user)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Key backup does not exist.", - ))?; + let (version, algorithm) = services() + .key_backups + .get_latest_backup(sender_user)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; Ok(get_latest_backup_info::v3::Response { algorithm, @@ -78,8 +79,13 @@ pub async fn get_backup_info_route( Ok(get_backup_info::v3::Response { algorithm, - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, version: body.version.to_owned(), }) } @@ -94,7 +100,9 @@ pub async fn delete_backup_version_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups.delete_backup(sender_user, &body.version)?; + services() + .key_backups + .delete_backup(sender_user, &body.version)?; Ok(delete_backup_version::v3::Response {}) } @@ -136,8 +144,13 @@ pub async fn add_backup_keys_route( } Ok(add_backup_keys::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -176,8 +189,13 @@ pub async fn add_backup_keys_for_room_route( } Ok(add_backup_keys_for_room::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -214,8 +232,13 @@ pub async fn add_backup_keys_for_session_route( )?; Ok(add_backup_keys_for_session::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -274,11 +297,18 @@ pub async fn delete_backup_keys_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups.delete_all_keys(sender_user, &body.version)?; + services() + .key_backups + .delete_all_keys(sender_user, &body.version)?; Ok(delete_backup_keys::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -290,12 +320,18 @@ pub async fn delete_backup_keys_for_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups + services() + .key_backups .delete_room_keys(sender_user, &body.version, &body.room_id)?; Ok(delete_backup_keys_for_room::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -307,11 +343,20 @@ pub async fn delete_backup_keys_for_session_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups - .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?; + services().key_backups.delete_room_key( + sender_user, + &body.version, + &body.room_id, + &body.session_id, + )?; Ok(delete_backup_keys_for_session::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs index e4283b7..97529cf 100644 --- a/src/api/client_server/capabilities.rs +++ b/src/api/client_server/capabilities.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services}; +use crate::{services, Result, Ruma}; use ruma::api::client::discovery::get_capabilities::{ self, Capabilities, RoomVersionStability, RoomVersionsCapability, }; diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs index 36f4fcb..dbd2b2c 100644 --- a/src/api/client_server/config.rs +++ b/src/api/client_server/config.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{ config::{ diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index c407c71..2e0f257 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, events::StateEventType, @@ -49,7 +49,11 @@ pub async fn get_context_route( let room_id = base_event.room_id.clone(); - if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -141,7 +145,11 @@ pub async fn get_context_route( .expect("All rooms have state"), }; - let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; + let state_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await?; let end_token = events_after .last() @@ -156,7 +164,10 @@ pub async fn get_context_route( let mut state = Vec::new(); for (shortstatekey, id) in state_ids { - let (event_type, state_key) = services().rooms.short.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { let pdu = match services().rooms.timeline.get_pdu(&id)? { diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs index 2f55993..d4c4178 100644 --- a/src/api/client_server/device.rs +++ b/src/api/client_server/device.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, Ruma, services}; +use crate::{services, utils, Error, Result, Ruma}; use ruma::api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, @@ -55,7 +55,8 @@ pub async fn update_device_route( device.display_name = body.display_name.clone(); - services().users + services() + .users .update_device_metadata(sender_user, &body.device_id, &device)?; Ok(update_device::v3::Response {}) @@ -88,26 +89,27 @@ pub async fn delete_device_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - services().users.remove_device(sender_user, &body.device_id)?; + services() + .users + .remove_device(sender_user, &body.device_id)?; Ok(delete_device::v3::Response {}) } @@ -141,19 +143,18 @@ pub async fn delete_devices_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 2a60f67..c1b0eda 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::{ client::{ @@ -123,7 +123,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( filter: &IncomingFilter, _network: &IncomingRoomNetwork, ) -> Result { - if let Some(other_server) = server.filter(|server| *server != services().globals.server_name().as_str()) + if let Some(other_server) = + server.filter(|server| *server != services().globals.server_name().as_str()) { let response = services() .sending diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs index e0c9506..a0d5a19 100644 --- a/src/api/client_server/filter.rs +++ b/src/api/client_server/filter.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, filter::{create_filter, get_filter}, diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 4ce5d4c..be62cc2 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -1,5 +1,5 @@ use super::SESSION_ID_LENGTH; -use crate::{utils, Error, Result, Ruma, services}; +use crate::{services, utils, Error, Result, Ruma}; use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -32,7 +32,8 @@ pub async fn upload_keys_route( let sender_device = body.sender_device.as_ref().expect("user is authenticated"); for (key_key, key_value) in &body.one_time_keys { - services().users + services() + .users .add_one_time_key(sender_user, sender_device, key_key, key_value)?; } @@ -44,16 +45,16 @@ pub async fn upload_keys_route( .get_device_keys(sender_user, sender_device)? .is_none() { - services().users.add_device_keys( - sender_user, - sender_device, - device_keys, - )?; + services() + .users + .add_device_keys(sender_user, sender_device, device_keys)?; } } Ok(upload_keys::v3::Response { - one_time_key_counts: services().users.count_one_time_keys(sender_user, sender_device)?, + one_time_key_counts: services() + .users + .count_one_time_keys(sender_user, sender_device)?, }) } @@ -69,12 +70,8 @@ pub async fn get_keys_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let response = get_keys_helper( - Some(sender_user), - &body.device_keys, - |u| u == sender_user, - ) - .await?; + let response = + get_keys_helper(Some(sender_user), &body.device_keys, |u| u == sender_user).await?; Ok(response) } @@ -113,19 +110,18 @@ pub async fn upload_signing_keys_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -187,12 +183,9 @@ pub async fn upload_signatures_route( ))? .to_owned(), ); - services().users.sign_key( - user_id, - key_id, - signature, - sender_user, - )?; + services() + .users + .sign_key(user_id, key_id, signature, sender_user)?; } } } @@ -215,7 +208,8 @@ pub async fn get_key_changes_route( let mut device_list_updates = HashSet::new(); device_list_updates.extend( - services().users + services() + .users .keys_changed( sender_user.as_str(), body.from @@ -230,9 +224,15 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); - for room_id in services().rooms.state_cache.rooms_joined(sender_user).filter_map(|r| r.ok()) { + for room_id in services() + .rooms + .state_cache + .rooms_joined(sender_user) + .filter_map(|r| r.ok()) + { device_list_updates.extend( - services().users + services() + .users .keys_changed( &room_id.to_string(), body.from.parse().map_err(|_| { @@ -296,12 +296,13 @@ pub(crate) async fn get_keys_helper bool>( for device_id in device_ids { let mut container = BTreeMap::new(); if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? { - let metadata = services().users.get_device_metadata(user_id, device_id)?.ok_or( - Error::BadRequest( + let metadata = services() + .users + .get_device_metadata(user_id, device_id)? + .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Tried to get keys for nonexistent device.", - ), - )?; + ))?; add_unsigned_device_display_name(&mut keys, metadata) .map_err(|_| Error::bad_database("invalid device keys in database"))?; @@ -311,7 +312,10 @@ pub(crate) async fn get_keys_helper bool>( } } - if let Some(master_key) = services().users.get_master_key(user_id, &allowed_signatures)? { + if let Some(master_key) = services() + .users + .get_master_key(user_id, &allowed_signatures)? + { master_keys.insert(user_id.to_owned(), master_key); } if let Some(self_signing_key) = services() @@ -338,7 +342,8 @@ pub(crate) async fn get_keys_helper bool>( } ( server, - services().sending + services() + .sending .send_federation_request( server, federation::keys::get_keys::v1::Request { @@ -408,7 +413,8 @@ pub(crate) async fn claim_keys_helper( let mut container = BTreeMap::new(); for (device_id, key_algorithm) in map { if let Some(one_time_keys) = - services().users + services() + .users .take_one_time_key(user_id, device_id, key_algorithm)? { let mut c = BTreeMap::new(); diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 80cbb61..c1f5e1d 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -1,6 +1,4 @@ -use crate::{ - utils, Error, Result, Ruma, services, service::media::FileMeta, -}; +use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, media::{ @@ -37,11 +35,11 @@ pub async fn create_content_route( utils::random_string(MXC_LENGTH) ); - services().media + services() + .media .create( mxc.clone(), - body - .filename + body.filename .as_ref() .map(|filename| "inline; filename=".to_owned() + filename) .as_deref(), @@ -73,7 +71,8 @@ pub async fn get_remote_content( ) .await?; - services().media + services() + .media .create( mxc.to_string(), content_response.content_disposition.as_deref(), @@ -192,7 +191,8 @@ pub async fn get_content_thumbnail_route( ) .await?; - services().media + services() + .media .upload_thumbnail( mxc, None, diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index c930ce4..5de8ce1 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -30,7 +30,11 @@ use std::{ }; use tracing::{debug, error, warn}; -use crate::{Result, services, PduEvent, service::pdu::{gen_event_id_canonical_json, PduBuilder}, Error, api::{server_server, client_server}, utils, Ruma}; +use crate::{ + api::{client_server, server_server}, + service::pdu::{gen_event_id_canonical_json, PduBuilder}, + services, utils, Error, PduEvent, Result, Ruma, +}; use super::get_alias_helper; @@ -47,8 +51,9 @@ pub async fn join_room_by_id_route( let mut servers = Vec::new(); // There is no body.server_name for /roomId/join servers.extend( - services().rooms - .state_cache + services() + .rooms + .state_cache .invite_state(sender_user, &body.room_id)? .unwrap_or_default() .iter() @@ -88,8 +93,9 @@ pub async fn join_room_by_id_or_alias_route( Ok(room_id) => { let mut servers = body.server_name.clone(); servers.extend( - services().rooms - .state_cache + services() + .rooms + .state_cache .invite_state(sender_user, &room_id)? .unwrap_or_default() .iter() @@ -163,8 +169,9 @@ pub async fn kick_user_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -183,7 +190,8 @@ pub async fn kick_user_route( // TODO: reason let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -250,7 +258,8 @@ pub async fn ban_user_route( )?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -286,8 +295,9 @@ pub async fn unban_user_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -305,7 +315,8 @@ pub async fn unban_user_route( event.membership = MembershipState::Leave; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -345,7 +356,10 @@ pub async fn forget_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.state_cache.forget(&body.room_id, sender_user)?; + services() + .rooms + .state_cache + .forget(&body.room_id, sender_user)?; Ok(forget_room::v3::Response::new()) } @@ -379,7 +393,11 @@ pub async fn get_member_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -410,7 +428,11 @@ pub async fn joined_members_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -418,7 +440,12 @@ pub async fn joined_members_route( } let mut joined = BTreeMap::new(); - for user_id in services().rooms.state_cache.room_members(&body.room_id).filter_map(|r| r.ok()) { + for user_id in services() + .rooms + .state_cache + .room_members(&body.room_id) + .filter_map(|r| r.ok()) + { let display_name = services().users.displayname(&user_id)?; let avatar_url = services().users.avatar_url(&user_id)?; @@ -443,7 +470,8 @@ async fn join_room_by_id_helper( let sender_user = sender_user.expect("user is authenticated"); let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -481,7 +509,14 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) if services().globals.supported_room_versions().contains(&room_version) => room_version, + Some(room_version) + if services() + .globals + .supported_room_versions() + .contains(&room_version) => + { + room_version + } _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -568,12 +603,11 @@ async fn join_room_by_id_helper( let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); - services().rooms.event_handler.fetch_join_signing_keys( - &send_join_response, - &room_version, - &pub_key_map, - ) - .await?; + services() + .rooms + .event_handler + .fetch_join_signing_keys(&send_join_response, &room_version, &pub_key_map) + .await?; for result in send_join_response .room_state @@ -591,12 +625,15 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") })?; - services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; + services() + .rooms + .outlier + .add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &pdu.kind.to_string().into(), - state_key, - )?; + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)?; state.insert(shortstatekey, pdu.event_id.clone()); } } @@ -632,7 +669,10 @@ async fn join_room_by_id_helper( Err(_) => continue, }; - services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; + services() + .rooms + .outlier + .add_pdu_outlier(&event_id, &value)?; } let shortstatehash = services().rooms.state.set_event_state( @@ -640,7 +680,12 @@ async fn join_room_by_id_helper( room_id, state .into_iter() - .map(|(k, id)| services().rooms.state_compressor.compress_state_event(k, &id)) + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(k, &id) + }) .collect::>()?, )?; @@ -650,12 +695,15 @@ async fn join_room_by_id_helper( &parsed_pdu, join_event, vec![(*parsed_pdu.event_id).to_owned()], - &state_lock + &state_lock, )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - services().rooms.state.set_room_state(room_id, shortstatehash, &state_lock)?; + services() + .rooms + .state + .set_room_state(room_id, shortstatehash, &state_lock)?; let statehashid = services().rooms.state.append_to_state(&parsed_pdu)?; } else { @@ -705,7 +753,13 @@ fn validate_and_add_event_id( )) .expect("ruma's reference hashes are valid event ids"); - let back_off = |id| match services().globals.bad_event_ratelimiter.write().unwrap().entry(id) { + let back_off = |id| match services() + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry(id) + { Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } @@ -760,7 +814,8 @@ pub(crate) async fn invite_helper<'a>( if user_id.server_name() != services().globals.server_name() { let (pdu_json, invite_room_state) = { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -781,13 +836,18 @@ pub(crate) async fn invite_helper<'a>( }) .expect("member event is valid value"); - let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event(PduBuilder { - event_type: RoomEventType::RoomMember, - content, - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, sender_user, room_id, &state_lock)?; + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + )?; let invite_room_state = services().rooms.state.calculate_invite_state(&pdu)?; @@ -799,8 +859,11 @@ pub(crate) async fn invite_helper<'a>( // Generate event id let expected_event_id = format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &services().rooms.state.get_room_version(&room_id)?) - .expect("ruma can calculate reference hashes") + ruma::signatures::reference_hash( + &pdu_json, + &services().rooms.state.get_room_version(&room_id)? + ) + .expect("ruma can calculate reference hashes") ); let expected_event_id = <&EventId>::try_from(expected_event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); @@ -822,8 +885,7 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match gen_event_id_canonical_json(&response.event) - { + let (event_id, value) = match gen_event_id_canonical_json(&response.event) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -847,22 +909,20 @@ pub(crate) async fn invite_helper<'a>( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id: Vec = services().rooms.event_handler.handle_incoming_pdu( - &origin, - &event_id, - room_id, - value, - true, - &pub_key_map, - ) - .await? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; + let pdu_id: Vec = services() + .rooms + .event_handler + .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .await? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; // Bind to variable because of lifetimes - let servers = services().rooms.state_cache + let servers = services() + .rooms + .state_cache .room_servers(room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != services().globals.server_name()); @@ -872,7 +932,11 @@ pub(crate) async fn invite_helper<'a>( return Ok(()); } - if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -880,7 +944,8 @@ pub(crate) async fn invite_helper<'a>( } let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -923,7 +988,13 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { .rooms .state_cache .rooms_joined(user_id) - .chain(services().rooms.state_cache.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .chain( + services() + .rooms + .state_cache + .rooms_invited(user_id) + .map(|t| t.map(|(r, _)| r)), + ) .collect::>(); for room_id in all_rooms { @@ -938,20 +1009,24 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { Ok(()) } -pub async fn leave_room( - user_id: &UserId, - room_id: &RoomId, -) -> Result<()> { +pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { // Ask a remote server if we don't have this room - if !services().rooms.metadata.exists(room_id)? && room_id.server_name() != services().globals.server_name() { + if !services().rooms.metadata.exists(room_id)? + && room_id.server_name() != services().globals.server_name() + { if let Err(e) = remote_leave_room(user_id, room_id).await { warn!("Failed to leave room {} remotely: {}", user_id, e); // Don't tell the client about this error } - let last_state = services().rooms.state_cache + let last_state = services() + .rooms + .state_cache .invite_state(user_id, room_id)? - .map_or_else(|| services().rooms.state_cache.left_state(user_id, room_id), |s| Ok(Some(s)))?; + .map_or_else( + || services().rooms.state_cache.left_state(user_id, room_id), + |s| Ok(Some(s)), + )?; // We always drop the invite, we can't rely on other servers services().rooms.state_cache.update_membership( @@ -964,7 +1039,8 @@ pub async fn leave_room( )?; } else { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -974,7 +1050,10 @@ pub async fn leave_room( let state_lock = mutex_state.lock().await; let mut event: RoomMemberEventContent = serde_json::from_str( - services().rooms.state_accessor.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -1003,10 +1082,7 @@ pub async fn leave_room( Ok(()) } -async fn remote_leave_room( - user_id: &UserId, - room_id: &RoomId, -) -> Result<()> { +async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut make_leave_response_and_server = Err(Error::BadServerResponse( "No server available to assist in leaving.", )); @@ -1048,14 +1124,21 @@ async fn remote_leave_room( let (make_leave_response, remote_server) = make_leave_response_and_server?; let room_version_id = match make_leave_response.room_version { - Some(version) if services().globals.supported_room_versions().contains(&version) => version, + Some(version) + if services() + .globals + .supported_room_versions() + .contains(&version) => + { + version + } _ => return Err(Error::BadServerResponse("Room version is not supported")), }; - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; + let mut leave_event_stub = serde_json::from_str::( + make_leave_response.event.get(), + ) + .map_err(|_| Error::BadServerResponse("Invalid make_leave event json received from server."))?; // TODO: Is origin needed? leave_event_stub.insert( @@ -1099,7 +1182,8 @@ async fn remote_leave_room( // It has enough fields to be called a proper event now let leave_event = leave_event_stub; - services().sending + services() + .sending .send_federation_request( &remote_server, federation::membership::create_leave_event::v2::Request { diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index bfdc2fd..e086e4a 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, Ruma, services, service::pdu::PduBuilder}; +use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -25,7 +25,8 @@ pub async fn send_message_event_route( let sender_device = body.sender_device.as_deref(); let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -46,7 +47,8 @@ pub async fn send_message_event_route( // Check if this is a new transaction id if let Some(response) = - services().transaction_ids + services() + .transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? { // The client might have sent a txnid of the /sendToDevice endpoint @@ -108,7 +110,11 @@ pub async fn get_message_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -128,8 +134,12 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); - services().rooms - .lazy_loading.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; + services().rooms.lazy_loading.lazy_load_confirm_delivery( + sender_user, + sender_device, + &body.room_id, + from, + )?; // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); @@ -149,8 +159,10 @@ pub async fn get_message_events_route( .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { - services().rooms - .timeline.pdu_count(&pdu_id) + services() + .rooms + .timeline + .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() }) @@ -187,7 +199,8 @@ pub async fn get_message_events_route( .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { - services().rooms + services() + .rooms .timeline .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) @@ -222,10 +235,11 @@ pub async fn get_message_events_route( resp.state = Vec::new(); for ll_id in &lazy_loaded { - if let Some(member_event) = - services().rooms.state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())? - { + if let Some(member_event) = services().rooms.state_accessor.room_state_get( + &body.room_id, + &StateEventType::RoomMember, + ll_id.as_str(), + )? { resp.state.push(member_event.to_state_event()); } } diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index 6a915e4..dfac3db 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -1,4 +1,4 @@ -use crate::{utils, Result, Ruma, services}; +use crate::{services, utils, Result, Ruma}; use ruma::api::client::presence::{get_presence, set_presence}; use std::time::Duration; @@ -51,7 +51,8 @@ pub async fn get_presence_route( for room_id in services() .rooms - .user.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? + .user + .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? { let room_id = room_id?; diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 3e1d736..5ace177 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, Ruma, services, service::pdu::PduBuilder}; +use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma}; use ruma::{ api::{ client::{ @@ -24,7 +24,8 @@ pub async fn set_displayname_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().users + services() + .users .set_displayname(sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms @@ -40,8 +41,9 @@ pub async fn set_displayname_route( content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_str( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get( &room_id, &StateEventType::RoomMember, @@ -71,7 +73,8 @@ pub async fn set_displayname_route( for (pdu_builder, room_id) in all_rooms_joined { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -80,10 +83,12 @@ pub async fn set_displayname_route( ); let state_lock = mutex_state.lock().await; - let _ = services() - .rooms - .timeline - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); + let _ = services().rooms.timeline.build_and_append_pdu( + pdu_builder, + sender_user, + &room_id, + &state_lock, + ); // Presence update services().rooms.edus.presence.update_presence( @@ -150,10 +155,13 @@ pub async fn set_avatar_url_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().users + services() + .users .set_avatar_url(sender_user, body.avatar_url.clone())?; - services().users.set_blurhash(sender_user, body.blurhash.clone())?; + services() + .users + .set_blurhash(sender_user, body.blurhash.clone())?; // Send a new membership event and presence update into all joined rooms let all_joined_rooms: Vec<_> = services() @@ -168,8 +176,9 @@ pub async fn set_avatar_url_route( content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_str( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get( &room_id, &StateEventType::RoomMember, @@ -199,7 +208,8 @@ pub async fn set_avatar_url_route( for (pdu_builder, room_id) in all_joined_rooms { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -208,10 +218,12 @@ pub async fn set_avatar_url_route( ); let state_lock = mutex_state.lock().await; - let _ = services() - .rooms - .timeline - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); + let _ = services().rooms.timeline.build_and_append_pdu( + pdu_builder, + sender_user, + &room_id, + &state_lock, + ); // Presence update services().rooms.edus.presence.update_presence( diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index 12ec25d..2301ddc 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index c6d77c1..fd0e090 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, events::RoomAccountDataEventType, @@ -34,12 +34,18 @@ pub async fn set_read_marker_route( services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, - services().rooms.timeline.get_pdu_count(event)?.ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?, + services() + .rooms + .timeline + .get_pdu_count(event)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, )?; - services().rooms.user + services() + .rooms + .user .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -80,7 +86,8 @@ pub async fn create_receipt_route( services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, - services().rooms + services() + .rooms .timeline .get_pdu_count(&body.event_id)? .ok_or(Error::BadRequest( @@ -88,7 +95,9 @@ pub async fn create_receipt_route( "Event does not exist.", ))?, )?; - services().rooms.user + services() + .rooms + .user .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index 57e442a..ab586c0 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{Result, Ruma, services, service::pdu::PduBuilder}; +use crate::{service::pdu::PduBuilder, services, Result, Ruma}; use ruma::{ api::client::redact::redact_event, events::{room::redaction::RoomRedactionEventContent, RoomEventType}, @@ -20,7 +20,8 @@ pub async fn redact_event_route( let body = body.body; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs index efcc434..e45820e 100644 --- a/src/api/client_server/report.rs +++ b/src/api/client_server/report.rs @@ -1,4 +1,4 @@ -use crate::{utils::HtmlEscape, Error, Result, Ruma, services}; +use crate::{services, utils::HtmlEscape, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, room::report_content}, events::room::message, diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 939fbaa..ca191d6 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,5 +1,5 @@ use crate::{ - Error, Result, Ruma, service::pdu::PduBuilder, services, api::client_server::invite_helper, + api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma, }; use ruma::{ api::client::{ @@ -57,7 +57,8 @@ pub async fn create_room_route( services().rooms.short.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -81,13 +82,19 @@ pub async fn create_room_route( .as_ref() .map_or(Ok(None), |localpart| { // TODO: Check for invalid characters and maximum length - let alias = - RoomAliasId::parse(format!("#{}:{}", localpart, services().globals.server_name())) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.") - })?; + let alias = RoomAliasId::parse(format!( + "#{}:{}", + localpart, + services().globals.server_name() + )) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - if services().rooms.alias.resolve_local_alias(&alias)?.is_some() { + if services() + .rooms + .alias + .resolve_local_alias(&alias)? + .is_some() + { Err(Error::BadRequest( ErrorKind::RoomInUse, "Room alias already exists.", @@ -99,7 +106,11 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if services().globals.supported_room_versions().contains(&room_version) { + if services() + .globals + .supported_room_versions() + .contains(&room_version) + { room_version } else { return Err(Error::BadRequest( @@ -338,13 +349,18 @@ pub async fn create_room_route( pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == RoomEventType::RoomEncryption && !services().globals.allow_encryption() + if pdu_builder.event_type == RoomEventType::RoomEncryption + && !services().globals.allow_encryption() { continue; } - services().rooms - .timeline.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)?; + services().rooms.timeline.build_and_append_pdu( + pdu_builder, + sender_user, + &room_id, + &state_lock, + )?; } // 7. Events implied by name and topic @@ -412,7 +428,11 @@ pub async fn get_room_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -439,7 +459,11 @@ pub async fn get_room_aliases_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -449,7 +473,8 @@ pub async fn get_room_aliases_route( Ok(aliases::v3::Response { aliases: services() .rooms - .alias.local_aliases_for_room(&body.room_id) + .alias + .local_aliases_for_room(&body.room_id) .filter_map(|a| a.ok()) .collect(), }) @@ -470,7 +495,11 @@ pub async fn upgrade_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().globals.supported_room_versions().contains(&body.new_version) { + if !services() + .globals + .supported_room_versions() + .contains(&body.new_version) + { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", @@ -479,11 +508,14 @@ pub async fn upgrade_room_route( // Create a replacement room let replacement_room = RoomId::new(services().globals.server_name()); - services().rooms - .short.get_or_create_shortroomid(&replacement_room)?; + services() + .rooms + .short + .get_or_create_shortroomid(&replacement_room)?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -514,7 +546,8 @@ pub async fn upgrade_room_route( // Change lock to replacement room drop(state_lock); let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -525,7 +558,8 @@ pub async fn upgrade_room_route( // Get the old room creation event let mut create_event_content = serde_json::from_str::( - services().rooms + services() + .rooms .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? @@ -627,10 +661,15 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { - let event_content = match services().rooms.state_accessor.room_state_get(&body.room_id, &event_type, "")? { - Some(v) => v.content.clone(), - None => continue, // Skipping missing events. - }; + let event_content = + match services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &event_type, "")? + { + Some(v) => v.content.clone(), + None => continue, // Skipping missing events. + }; services().rooms.timeline.build_and_append_pdu( PduBuilder { @@ -647,14 +686,22 @@ pub async fn upgrade_room_route( } // Moves any local aliases to the new room - for alias in services().rooms.alias.local_aliases_for_room(&body.room_id).filter_map(|r| r.ok()) { - services().rooms - .alias.set_alias(&alias, &replacement_room)?; + for alias in services() + .rooms + .alias + .local_aliases_for_room(&body.room_id) + .filter_map(|r| r.ok()) + { + services() + .rooms + .alias + .set_alias(&alias, &replacement_room)?; } // Get the old room power levels let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( - services().rooms + services() + .rooms .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? @@ -688,4 +735,3 @@ pub async fn upgrade_room_route( // Return the replacement room id Ok(upgrade_room::v3::Response { replacement_room }) } - diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index f648649..1ba9cdf 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, search::search_events::{ @@ -23,7 +23,8 @@ pub async fn search_events_route( let filter = &search_criteria.filter; let room_ids = filter.rooms.clone().unwrap_or_else(|| { - services().rooms + services() + .rooms .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) @@ -35,7 +36,11 @@ pub async fn search_events_route( let mut searches = Vec::new(); for room_id in room_ids { - if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 7feeb66..14f1404 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -1,5 +1,5 @@ use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{utils, Error, Result, Ruma, services}; +use crate::{services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -40,9 +40,7 @@ pub async fn get_login_types_route( /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -pub async fn login_route( - body: Ruma, -) -> Result { +pub async fn login_route(body: Ruma) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { @@ -55,15 +53,18 @@ pub async fn login_route( } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; - let user_id = - UserId::parse_with_server_name(username.to_owned(), services().globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; - let hash = services().users.password_hash(&user_id)?.ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "Wrong username or password.", - ))?; + let user_id = UserId::parse_with_server_name( + username.to_owned(), + services().globals.server_name(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let hash = services() + .users + .password_hash(&user_id)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Wrong username or password.", + ))?; if hash.is_empty() { return Err(Error::BadRequest( @@ -121,7 +122,8 @@ pub async fn login_route( // Determine if device_id was provided and exists in the db for this user let device_exists = body.device_id.as_ref().map_or(false, |device_id| { - services().users + services() + .users .all_device_ids(&user_id) .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); @@ -156,9 +158,7 @@ pub async fn login_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -pub async fn logout_route( - body: Ruma, -) -> Result { +pub async fn logout_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index ece7453..36466b8 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -1,8 +1,6 @@ use std::sync::Arc; -use crate::{ - Error, Result, Ruma, RumaResponse, services, service::pdu::PduBuilder, -}; +use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::{ error::ErrorKind, @@ -90,10 +88,14 @@ pub async fn get_state_events_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? && !matches!( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -138,10 +140,15 @@ pub async fn get_state_events_for_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? && !matches!( - services().rooms - .state_accessor.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -162,7 +169,8 @@ pub async fn get_state_events_for_key_route( let event = services() .rooms - .state_accessor.room_state_get(&body.room_id, &body.event_type, &body.state_key)? + .state_accessor + .room_state_get(&body.room_id, &body.event_type, &body.state_key)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", @@ -187,10 +195,15 @@ pub async fn get_state_events_for_empty_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? && !matches!( - services().rooms - .state_accessor.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -211,7 +224,8 @@ pub async fn get_state_events_for_empty_key_route( let event = services() .rooms - .state_accessor.room_state_get(&body.room_id, &body.event_type, "")? + .state_accessor + .room_state_get(&body.room_id, &body.event_type, "")? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", @@ -248,7 +262,8 @@ async fn send_state_event_for_key_helper( if alias.server_name() != services().globals.server_name() || services() .rooms - .alias.resolve_local_alias(&alias)? + .alias + .resolve_local_alias(&alias)? .filter(|room| room == room_id) // Make sure it's the right room .is_none() { @@ -262,7 +277,8 @@ async fn send_state_event_for_key_helper( } let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 9eb6383..9ce98b7 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, RumaResponse, services}; +use crate::{services, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::{ filter::{IncomingFilterDefinition, LazyLoadOptions}, @@ -129,12 +129,7 @@ async fn sync_helper_wrapper( ) { let since = body.since.clone(); - let r = sync_helper( - sender_user.clone(), - sender_device.clone(), - body, - ) - .await; + let r = sync_helper(sender_user.clone(), sender_device.clone(), body).await; if let Ok((_, caching_allowed)) = r { if !caching_allowed { @@ -211,12 +206,17 @@ async fn sync_helper( // Look for device list updates of this account device_list_updates.extend( - services().users + services() + .users .keys_changed(&sender_user.to_string(), since, None) .filter_map(|r| r.ok()), ); - let all_joined_rooms = services().rooms.state_cache.rooms_joined(&sender_user).collect::>(); + let all_joined_rooms = services() + .rooms + .state_cache + .rooms_joined(&sender_user) + .collect::>(); for room_id in all_joined_rooms { let room_id = room_id?; @@ -224,7 +224,8 @@ async fn sync_helper( // Get and drop the lock to wait for remaining operations to finish // This will make sure the we have all events until next_batch let mutex_insert = Arc::clone( - services().globals + services() + .globals .roomid_mutex_insert .write() .unwrap() @@ -237,7 +238,12 @@ async fn sync_helper( let timeline_pdus; let limited; - if services().rooms.timeline.last_timeline_count(&sender_user, &room_id)? > since { + if services() + .rooms + .timeline + .last_timeline_count(&sender_user, &room_id)? + > since + { let mut non_timeline_pdus = services() .rooms .timeline @@ -250,7 +256,8 @@ async fn sync_helper( r.ok() }) .take_while(|(pduid, _)| { - services().rooms + services() + .rooms .timeline .pdu_count(pduid) .map_or(false, |count| count > since) @@ -286,24 +293,40 @@ async fn sync_helper( timeline_users.insert(event.sender.as_str().to_owned()); } - services().rooms.lazy_loading - .lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?; + services().rooms.lazy_loading.lazy_load_confirm_delivery( + &sender_user, + &sender_device, + &room_id, + since, + )?; // Database queries: - let current_shortstatehash = if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { - s - } else { - error!("Room {} has no state", room_id); - continue; - }; + let current_shortstatehash = + if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + s + } else { + error!("Room {} has no state", room_id); + continue; + }; - let since_shortstatehash = services().rooms.user.get_token_shortstatehash(&room_id, since)?; + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { - let joined_member_count = services().rooms.state_cache.room_joined_count(&room_id)?.unwrap_or(0); - let invited_member_count = services().rooms.state_cache.room_invited_count(&room_id)?.unwrap_or(0); + let joined_member_count = services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or(0); + let invited_member_count = services() + .rooms + .state_cache + .room_invited_count(&room_id)? + .unwrap_or(0); // Recalculate heroes (first 5 members) let mut heroes = Vec::new(); @@ -314,7 +337,8 @@ async fn sync_helper( for hero in services() .rooms - .timeline.all_pdus(&sender_user, &room_id)? + .timeline + .all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) .map(|(_, pdu)| { @@ -333,7 +357,10 @@ async fn sync_helper( content.membership, MembershipState::Join | MembershipState::Invite ) && (services().rooms.state_cache.is_joined(&user_id, &room_id)? - || services().rooms.state_cache.is_invited(&user_id, &room_id)?) + || services() + .rooms + .state_cache + .is_invited(&user_id, &room_id)?) { Ok::<_, Error>(Some(state_key.clone())) } else { @@ -374,14 +401,21 @@ async fn sync_helper( let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - let current_state_ids = services().rooms.state_accessor.state_full_ids(current_shortstatehash).await?; + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); let mut i = 0; for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = services().rooms.short.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { let pdu = match services().rooms.timeline.get_pdu(&id)? { @@ -423,8 +457,11 @@ async fn sync_helper( } // Reset lazy loading because this is an initial sync - services().rooms.lazy_loading - .lazy_load_reset(&sender_user, &sender_device, &room_id)?; + services().rooms.lazy_loading.lazy_load_reset( + &sender_user, + &sender_device, + &room_id, + )?; // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. @@ -471,8 +508,16 @@ async fn sync_helper( let mut lazy_loaded = HashSet::new(); if since_shortstatehash != current_shortstatehash { - let current_state_ids = services().rooms.state_accessor.state_full_ids(current_shortstatehash).await?; - let since_state_ids = services().rooms.state_accessor.state_full_ids(since_shortstatehash).await?; + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; for (key, id) in current_state_ids { if body.full_state || since_state_ids.get(&key) != Some(&id) { @@ -537,13 +582,15 @@ async fn sync_helper( let encrypted_room = services() .rooms - .state_accessor.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? .is_some(); - let since_encryption = - services().rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?; + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; // Calculations: let new_encrypted_room = encrypted_room && since_encryption.is_none(); @@ -592,8 +639,9 @@ async fn sync_helper( if joined_since_last_sync && encrypted_room || new_encrypted_room { // If the user is in a new encrypted room, give them all joined users device_list_updates.extend( - services().rooms - .state_cache + services() + .rooms + .state_cache .room_members(&room_id) .flatten() .filter(|user_id| { @@ -602,8 +650,7 @@ async fn sync_helper( }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) - .unwrap_or(false) + !share_encrypted_room(&sender_user, user_id, &room_id).unwrap_or(false) }), ); } @@ -625,15 +672,17 @@ async fn sync_helper( // Look for device list updates in this room device_list_updates.extend( - services().users + services() + .users .keys_changed(&room_id.to_string(), since, None) .filter_map(|r| r.ok()), ); let notification_count = if send_notification_counts { Some( - services().rooms - .user + services() + .rooms + .user .notification_count(&sender_user, &room_id)? .try_into() .expect("notification count can't go that high"), @@ -644,8 +693,9 @@ async fn sync_helper( let highlight_count = if send_notification_counts { Some( - services().rooms - .user + services() + .rooms + .user .highlight_count(&sender_user, &room_id)? .try_into() .expect("highlight count can't go that high"), @@ -657,7 +707,9 @@ async fn sync_helper( let prev_batch = timeline_pdus .first() .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { - Ok(Some(services().rooms.timeline.pdu_count(pdu_id)?.to_string())) + Ok(Some( + services().rooms.timeline.pdu_count(pdu_id)?.to_string(), + )) })?; let room_events: Vec<_> = timeline_pdus @@ -685,8 +737,11 @@ async fn sync_helper( } // Save the state after this sync so we can send the correct state diff next sync - services().rooms.user - .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; + services().rooms.user.associate_token_shortstatehash( + &room_id, + next_batch, + current_shortstatehash, + )?; let joined_room = JoinedRoom { account_data: RoomAccountData { @@ -729,11 +784,11 @@ async fn sync_helper( } // Take presence updates from this room - for (user_id, presence) in - services().rooms - .edus - .presence - .presence_since(&room_id, since)? + for (user_id, presence) in services() + .rooms + .edus + .presence + .presence_since(&room_id, since)? { match presence_updates.entry(user_id) { Entry::Vacant(v) => { @@ -765,14 +820,19 @@ async fn sync_helper( } let mut left_rooms = BTreeMap::new(); - let all_left_rooms: Vec<_> = services().rooms.state_cache.rooms_left(&sender_user).collect(); + let all_left_rooms: Vec<_> = services() + .rooms + .state_cache + .rooms_left(&sender_user) + .collect(); for result in all_left_rooms { let (room_id, left_state_events) = result?; { // Get and drop the lock to wait for remaining operations to finish let mutex_insert = Arc::clone( - services().globals + services() + .globals .roomid_mutex_insert .write() .unwrap() @@ -783,7 +843,10 @@ async fn sync_helper( drop(insert_lock); } - let left_count = services().rooms.state_cache.get_left_count(&room_id, &sender_user)?; + let left_count = services() + .rooms + .state_cache + .get_left_count(&room_id, &sender_user)?; // Left before last sync if Some(since) >= left_count { @@ -807,14 +870,19 @@ async fn sync_helper( } let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms: Vec<_> = services().rooms.state_cache.rooms_invited(&sender_user).collect(); + let all_invited_rooms: Vec<_> = services() + .rooms + .state_cache + .rooms_invited(&sender_user) + .collect(); for result in all_invited_rooms { let (room_id, invite_state_events) = result?; { // Get and drop the lock to wait for remaining operations to finish let mutex_insert = Arc::clone( - services().globals + services() + .globals .roomid_mutex_insert .write() .unwrap() @@ -825,7 +893,10 @@ async fn sync_helper( drop(insert_lock); } - let invite_count = services().rooms.state_cache.get_invite_count(&room_id, &sender_user)?; + let invite_count = services() + .rooms + .state_cache + .get_invite_count(&room_id, &sender_user)?; // Invited before last sync if Some(since) >= invite_count { @@ -850,8 +921,10 @@ async fn sync_helper( .filter_map(|r| r.ok()) .filter_map(|other_room_id| { Some( - services().rooms - .state_accessor.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + services() + .rooms + .state_accessor + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), ) @@ -865,7 +938,8 @@ async fn sync_helper( } // Remove all to-device events the device received *last time* - services().users + services() + .users .remove_to_device_events(&sender_user, &sender_device, since)?; let response = sync_events::v3::Response { @@ -898,7 +972,9 @@ async fn sync_helper( changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: services().users.count_one_time_keys(&sender_user, &sender_device)?, + device_one_time_keys_count: services() + .users + .count_one_time_keys(&sender_user, &sender_device)?, to_device: ToDevice { events: services() .users @@ -942,8 +1018,9 @@ fn share_encrypted_room( .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { Some( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index abf2b87..cb46d9c 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services, Error}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -18,21 +18,24 @@ pub async fn update_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services() - .account_data - .get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )?; + let event = services().account_data.get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; - let mut tags_event = event.map(|e| serde_json::from_str(e.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))) - .unwrap_or_else(|| Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }))?; + let mut tags_event = event + .map(|e| { + serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .unwrap_or_else(|| { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + })?; tags_event .content @@ -59,21 +62,24 @@ pub async fn delete_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = services() - .account_data - .get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )?; + let mut event = services().account_data.get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; - let mut tags_event = event.map(|e| serde_json::from_str(e.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))) - .unwrap_or_else(|| Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }))?; + let mut tags_event = event + .map(|e| { + serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .unwrap_or_else(|| { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + })?; tags_event.content.tags.remove(&body.tag.clone().into()); @@ -97,21 +103,24 @@ pub async fn get_tags_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = services() - .account_data - .get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )?; + let mut event = services().account_data.get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; - let mut tags_event = event.map(|e| serde_json::from_str(e.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))) - .unwrap_or_else(|| Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }))?; + let mut tags_event = event + .map(|e| { + serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .unwrap_or_else(|| { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + })?; Ok(get_tags::v3::Response { tags: tags_event.content.tags, diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index 3a2f6c0..34db3f9 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -1,7 +1,7 @@ use ruma::events::ToDeviceEventType; use std::collections::BTreeMap; -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::{ client::{error::ErrorKind, to_device::send_event_to_device}, @@ -54,15 +54,17 @@ pub async fn send_event_to_device_route( } match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => services().users.add_to_device_event( - sender_user, - target_user_id, - &target_device_id, - &body.event_type, - event.deserialize_as().map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") - })?, - )?, + DeviceIdOrAllDevices::DeviceId(target_device_id) => { + services().users.add_to_device_event( + sender_user, + target_user_id, + &target_device_id, + &body.event_type, + event.deserialize_as().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, + )? + } DeviceIdOrAllDevices::AllDevices => { for target_device_id in services().users.all_device_ids(target_user_id) { @@ -82,7 +84,8 @@ pub async fn send_event_to_device_route( } // Save transaction id with empty data - services().transaction_ids + services() + .transaction_ids .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; Ok(send_event_to_device::v3::Response {}) diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index abb669b..ecc926f 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, Ruma, services}; +use crate::{services, utils, Error, Result, Ruma}; use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` @@ -11,7 +11,11 @@ pub async fn create_typing_event_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You are not in this room.", @@ -25,8 +29,10 @@ pub async fn create_typing_event_route( duration.as_millis() as u64 + utils::millis_since_unix_epoch(), )?; } else { - services().rooms - .edus.typing + services() + .rooms + .edus + .typing .typing_remove(sender_user, &body.room_id)?; } diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index c94a283..9d7a828 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services}; +use crate::{services, Result, Ruma}; use ruma::{ api::client::user_directory::search_users, events::{ @@ -48,22 +48,25 @@ pub async fn search_users_route( return None; } - let user_is_in_public_rooms = - services().rooms - .state_cache.rooms_joined(&user_id) - .filter_map(|r| r.ok()) - .any(|room| { - services().rooms - .state_accessor.room_state_get(&room, &StateEventType::RoomJoinRules, "") - .map_or(false, |event| { - event.map_or(false, |event| { - serde_json::from_str(event.content.get()) - .map_or(false, |r: RoomJoinRulesEventContent| { - r.join_rule == JoinRule::Public - }) - }) + let user_is_in_public_rooms = services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(|r| r.ok()) + .any(|room| { + services() + .rooms + .state_accessor + .room_state_get(&room, &StateEventType::RoomJoinRules, "") + .map_or(false, |event| { + event.map_or(false, |event| { + serde_json::from_str(event.content.get()) + .map_or(false, |r: RoomJoinRulesEventContent| { + r.join_rule == JoinRule::Public + }) }) - }); + }) + }); if user_is_in_public_rooms { return Some(user); @@ -71,7 +74,8 @@ pub async fn search_users_route( let user_is_in_shared_rooms = services() .rooms - .user.get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) + .user + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) .ok()? .next() .is_some(); diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index 9917979..dc9caaa 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services}; +use crate::{services, Result, Ruma}; use hmac::{Hmac, Mac, NewMac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; diff --git a/src/api/mod.rs b/src/api/mod.rs index 68589be..0d2cd66 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,4 +1,4 @@ -pub mod client_server; -pub mod server_server; pub mod appservice_server; +pub mod client_server; pub mod ruma_wrapper; +pub mod server_server; diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index d926b89..ee8c9e7 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -24,7 +24,7 @@ use serde::Deserialize; use tracing::{debug, error, warn}; use super::{Ruma, RumaResponse}; -use crate::{Error, Result, api::server_server, services}; +use crate::{api::server_server, services, Error, Result}; #[async_trait] impl FromRequest for Ruma @@ -197,11 +197,11 @@ where request_map.insert("content".to_owned(), json_body.clone()); }; - let keys_result = services().rooms.event_handler.fetch_signing_keys( - &x_matrix.origin, - vec![x_matrix.key.to_owned()], - ) - .await; + let keys_result = services() + .rooms + .event_handler + .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()]) + .await; let keys = match keys_result { Ok(b) => b, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 11f7ec3..dba4489 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1,6 +1,7 @@ use crate::{ api::client_server::{self, claim_keys_helper, get_keys_helper}, - utils, Error, PduEvent, Result, Ruma, services, service::pdu::{gen_event_id_canonical_json, PduBuilder}, + service::pdu::{gen_event_id_canonical_json, PduBuilder}, + services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -138,7 +139,8 @@ where let mut write_destination_to_cache = false; - let cached_result = services().globals + let cached_result = services() + .globals .actual_destination_cache .read() .unwrap() @@ -191,7 +193,10 @@ where .to_string() .into(), ); - request_map.insert("origin".to_owned(), services().globals.server_name().as_str().into()); + request_map.insert( + "origin".to_owned(), + services().globals.server_name().as_str().into(), + ); request_map.insert("destination".to_owned(), destination.as_str().into()); let mut request_json = @@ -238,7 +243,11 @@ where let url = reqwest_request.url().clone(); - let response = services().globals.federation_client().execute(reqwest_request).await; + let response = services() + .globals + .federation_client() + .execute(reqwest_request) + .await; match response { Ok(mut response) => { @@ -278,10 +287,15 @@ where if status == 200 { let response = T::IncomingResponse::try_from_http_response(http_response); if response.is_ok() && write_destination_to_cache { - services().globals.actual_destination_cache.write().unwrap().insert( - Box::::from(destination), - (actual_destination, host), - ); + services() + .globals + .actual_destination_cache + .write() + .unwrap() + .insert( + Box::::from(destination), + (actual_destination, host), + ); } response.map_err(|e| { @@ -329,9 +343,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { /// Returns: actual_destination, host header /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification -async fn find_actual_destination( - destination: &'_ ServerName, -) -> (FedDest, FedDest) { +async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { let destination_str = destination.as_str().to_owned(); let mut hostname = destination_str.clone(); let actual_destination = match get_ip_with_port(&destination_str) { @@ -364,18 +376,24 @@ async fn find_actual_destination( // 3.3: SRV lookup successful let force_port = hostname_override.port(); - if let Ok(override_ip) = services().globals + if let Ok(override_ip) = services() + .globals .dns_resolver() .lookup_ip(hostname_override.hostname()) .await { - services().globals.tls_name_override.write().unwrap().insert( - delegated_hostname.clone(), - ( - override_ip.iter().collect(), - force_port.unwrap_or(8448), - ), - ); + services() + .globals + .tls_name_override + .write() + .unwrap() + .insert( + delegated_hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); } else { warn!("Using SRV record, but could not resolve to IP"); } @@ -400,15 +418,24 @@ async fn find_actual_destination( Some(hostname_override) => { let force_port = hostname_override.port(); - if let Ok(override_ip) = services().globals + if let Ok(override_ip) = services() + .globals .dns_resolver() .lookup_ip(hostname_override.hostname()) .await { - services().globals.tls_name_override.write().unwrap().insert( - hostname.clone(), - (override_ip.iter().collect(), force_port.unwrap_or(8448)), - ); + services() + .globals + .tls_name_override + .write() + .unwrap() + .insert( + hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); } else { warn!("Using SRV record, but could not resolve to IP"); } @@ -443,10 +470,9 @@ async fn find_actual_destination( (actual_destination, hostname) } -async fn query_srv_record( - hostname: &'_ str, -) -> Option { - if let Ok(Some(host_port)) = services().globals +async fn query_srv_record(hostname: &'_ str) -> Option { + if let Ok(Some(host_port)) = services() + .globals .dns_resolver() .srv_lookup(format!("_matrix._tcp.{}", hostname)) .await @@ -465,11 +491,10 @@ async fn query_srv_record( } } -async fn request_well_known( - destination: &str, -) -> Option { +async fn request_well_known(destination: &str) -> Option { let body: serde_json::Value = serde_json::from_str( - &services().globals + &services() + .globals .default_client() .get(&format!( "https://{}/.well-known/matrix/server", @@ -664,15 +689,22 @@ pub async fn send_transaction_message_route( Some(id) => id, None => { // Event is invalid - resolved_map.insert(event_id, Err(Error::bad_database("Event needs a valid RoomId."))); + resolved_map.insert( + event_id, + Err(Error::bad_database("Event needs a valid RoomId.")), + ); continue; } }; - services().rooms.event_handler.acl_check(&sender_servername, &room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &room_id)?; let mutex = Arc::clone( - services().globals + services() + .globals .roomid_mutex_federation .write() .unwrap() @@ -683,16 +715,19 @@ pub async fn send_transaction_message_route( let start_time = Instant::now(); resolved_map.insert( event_id.clone(), - services().rooms.event_handler.handle_incoming_pdu( - &sender_servername, - &event_id, - &room_id, - value, - true, - &pub_key_map, - ) - .await - .map(|_| ()), + services() + .rooms + .event_handler + .handle_incoming_pdu( + &sender_servername, + &event_id, + &room_id, + value, + true, + &pub_key_map, + ) + .await + .map(|_| ()), ); drop(mutex_lock); @@ -727,7 +762,13 @@ pub async fn send_transaction_message_route( .event_ids .iter() .filter_map(|id| { - services().rooms.timeline.get_pdu_count(id).ok().flatten().map(|r| (id, r)) + services() + .rooms + .timeline + .get_pdu_count(id) + .ok() + .flatten() + .map(|r| (id, r)) }) .max_by_key(|(_, count)| *count) { @@ -744,11 +785,11 @@ pub async fn send_transaction_message_route( content: ReceiptEventContent(receipt_content), room_id: room_id.clone(), }; - services().rooms.edus.read_receipt.readreceipt_update( - &user_id, - &room_id, - event, - )?; + services() + .rooms + .edus + .read_receipt + .readreceipt_update(&user_id, &room_id, event)?; } else { // TODO fetch missing events info!("No known event ids in read receipt: {:?}", user_updates); @@ -757,7 +798,11 @@ pub async fn send_transaction_message_route( } } Edu::Typing(typing) => { - if services().rooms.state_cache.is_joined(&typing.user_id, &typing.room_id)? { + if services() + .rooms + .state_cache + .is_joined(&typing.user_id, &typing.room_id)? + { if typing.typing { services().rooms.edus.typing.typing_add( &typing.user_id, @@ -765,16 +810,16 @@ pub async fn send_transaction_message_route( 3000 + utils::millis_since_unix_epoch(), )?; } else { - services().rooms.edus.typing.typing_remove( - &typing.user_id, - &typing.room_id, - )?; + services() + .rooms + .edus + .typing + .typing_remove(&typing.user_id, &typing.room_id)?; } } } Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { - services().users - .mark_device_key_update(&user_id)?; + services().users.mark_device_key_update(&user_id)?; } Edu::DirectToDevice(DirectDeviceContent { sender, @@ -810,7 +855,9 @@ pub async fn send_transaction_message_route( } DeviceIdOrAllDevices::AllDevices => { - for target_device_id in services().users.all_device_ids(target_user_id) { + for target_device_id in + services().users.all_device_ids(target_user_id) + { services().users.add_to_device_event( &sender, target_user_id, @@ -830,7 +877,8 @@ pub async fn send_transaction_message_route( } // Save transaction id with empty data - services().transaction_ids + services() + .transaction_ids .add_txnid(&sender, None, &message_id, &[])?; } Edu::SigningKeyUpdate(SigningKeyUpdateContent { @@ -854,7 +902,12 @@ pub async fn send_transaction_message_route( } } - Ok(send_transaction_message::v1::Response { pdus: resolved_map.into_iter().map(|(e, r)| (e, r.map_err(|e| e.to_string()))).collect() }) + Ok(send_transaction_message::v1::Response { + pdus: resolved_map + .into_iter() + .map(|(e, r)| (e, r.map_err(|e| e.to_string()))) + .collect(), + }) } /// # `GET /_matrix/federation/v1/event/{eventId}` @@ -875,7 +928,8 @@ pub async fn get_event_route( .expect("server is authenticated"); let event = services() - .rooms.timeline + .rooms + .timeline .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -887,7 +941,11 @@ pub async fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !services().rooms.state_cache.server_in_room(sender_servername, room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", @@ -916,14 +974,21 @@ pub async fn get_missing_events_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", )); } - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); @@ -988,17 +1053,25 @@ pub async fn get_event_authorization_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let event = services() - .rooms.timeline + .rooms + .timeline .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1010,7 +1083,11 @@ pub async fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let auth_chain_ids = services().rooms.auth_chain.get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]).await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]) + .await?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -1035,17 +1112,25 @@ pub async fn get_room_state_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let shortstatehash = services() - .rooms.state_accessor + .rooms + .state_accessor .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1053,26 +1138,39 @@ pub async fn get_room_state_route( ))?; let pdus = services() - .rooms.state_accessor + .rooms + .state_accessor .state_full_ids(shortstatehash) .await? .into_iter() .map(|(_, id)| { PduEvent::convert_to_outgoing_federation_event( - services().rooms.timeline.get_pdu_json(&id).unwrap().unwrap(), + services() + .rooms + .timeline + .get_pdu_json(&id) + .unwrap() + .unwrap(), ) }) .collect(); - let auth_chain_ids = - services().rooms.auth_chain.get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]) + .await?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids .map(|id| { - services().rooms.timeline.get_pdu_json(&id).map(|maybe_json| { - PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) - }) + services() + .rooms + .timeline + .get_pdu_json(&id) + .map(|maybe_json| { + PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) + }) }) .filter_map(|r| r.ok()) .collect(), @@ -1095,17 +1193,25 @@ pub async fn get_room_state_ids_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let shortstatehash = services() - .rooms.state_accessor + .rooms + .state_accessor .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1113,15 +1219,19 @@ pub async fn get_room_state_ids_route( ))?; let pdu_ids = services() - .rooms.state_accessor + .rooms + .state_accessor .state_full_ids(shortstatehash) .await? .into_iter() .map(|(_, id)| (*id).to_owned()) .collect(); - let auth_chain_ids = - services().rooms.auth_chain.get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]) + .await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), @@ -1151,10 +1261,14 @@ pub async fn create_join_event_template_route( .as_ref() .expect("server is authenticated"); - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -1164,9 +1278,11 @@ pub async fn create_join_event_template_route( let state_lock = mutex_state.lock().await; // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = - services().rooms.state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; + let join_rules_event = services().rooms.state_accessor.room_state_get( + &body.room_id, + &StateEventType::RoomJoinRules, + "", + )?; let join_rules_event_content: Option = join_rules_event .as_ref() @@ -1212,13 +1328,18 @@ pub async fn create_join_event_template_route( }) .expect("member event is valid value"); - let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event(PduBuilder { - event_type: RoomEventType::RoomMember, - content, - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, &body.user_id, &body.room_id, &state_lock)?; + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &body.user_id, + &body.room_id, + &state_lock, + )?; drop(state_lock); @@ -1244,12 +1365,17 @@ async fn create_join_event( )); } - services().rooms.event_handler.acl_check(&sender_servername, room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, room_id)?; // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = services() - .rooms.state_accessor - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; + let join_rules_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomJoinRules, + "", + )?; let join_rules_event_content: Option = join_rules_event .as_ref() @@ -1275,7 +1401,8 @@ async fn create_join_event( // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = services() - .rooms.state + .rooms + .state .get_room_shortstatehash(room_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1307,7 +1434,8 @@ async fn create_join_event( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; let mutex = Arc::clone( - services().globals + services() + .globals .roomid_mutex_federation .write() .unwrap() @@ -1315,7 +1443,10 @@ async fn create_join_event( .or_default(), ); let mutex_lock = mutex.lock().await; - let pdu_id: Vec = services().rooms.event_handler.handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + let pdu_id: Vec = services() + .rooms + .event_handler + .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) .await? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, @@ -1323,12 +1454,19 @@ async fn create_join_event( ))?; drop(mutex_lock); - let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; - let auth_chain_ids = services().rooms.auth_chain.get_auth_chain( - room_id, - state_ids.iter().map(|(_, id)| id.clone()).collect(), - ) - .await?; + let state_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain( + room_id, + state_ids.iter().map(|(_, id)| id.clone()).collect(), + ) + .await?; let servers = services() .rooms @@ -1399,9 +1537,16 @@ pub async fn create_invite_route( .as_ref() .expect("server is authenticated"); - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; - if !services().globals.supported_room_versions().contains(&body.room_version) { + if !services() + .globals + .supported_room_versions() + .contains(&body.room_version) + { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), @@ -1549,7 +1694,8 @@ pub async fn get_room_information_route( let room_id = services() .rooms - .alias.resolve_local_alias(&body.room_alias)? + .alias + .resolve_local_alias(&body.room_alias)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Room alias not found.", @@ -1576,7 +1722,9 @@ pub async fn get_profile_information_route( let mut blurhash = None; match &body.field { - Some(ProfileField::DisplayName) => displayname = services().users.displayname(&body.user_id)?, + Some(ProfileField::DisplayName) => { + displayname = services().users.displayname(&body.user_id)? + } Some(ProfileField::AvatarUrl) => { avatar_url = services().users.avatar_url(&body.user_id)?; blurhash = services().users.blurhash(&body.user_id)? @@ -1600,18 +1748,14 @@ pub async fn get_profile_information_route( /// # `POST /_matrix/federation/v1/user/keys/query` /// /// Gets devices and identity keys for the given users. -pub async fn get_keys_route( - body: Ruma, -) -> Result { +pub async fn get_keys_route(body: Ruma) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - let result = get_keys_helper( - None, - &body.device_keys, - |u| Some(u.server_name()) == body.sender_servername.as_deref(), - ) + let result = get_keys_helper(None, &body.device_keys, |u| { + Some(u.server_name()) == body.sender_servername.as_deref() + }) .await?; Ok(get_keys::v1::Response { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 1388dc3..0727728 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,4 +1,4 @@ -use super::{super::Config, watchers::Watchers, KvTree, KeyValueDatabaseEngine}; +use super::{super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree}; use crate::{utils, Result}; use std::{ future::Future, diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 5674ac0..7d2a870 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,9 +1,15 @@ use std::collections::HashMap; -use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw, RoomId}; -use serde::{Serialize, de::DeserializeOwned}; +use ruma::{ + api::client::{error::ErrorKind, uiaa::UiaaInfo}, + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, + serde::Raw, + signatures::CanonicalJsonValue, + DeviceId, RoomId, UserId, +}; +use serde::{de::DeserializeOwned, Serialize}; -use crate::{Result, database::KeyValueDatabase, service, Error, utils, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::account_data::Data for KeyValueDatabase { /// Places one event in the account data of the user and removes the previous entry. diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index f427ba7..9a821a6 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -55,10 +55,13 @@ impl service::appservice::Data for KeyValueDatabase { } fn iter_ids<'a>(&'a self) -> Result> + 'a>> { - Ok(Box::new(self.id_appserviceregistrations.iter().map(|(id, _)| { - utils::string_from_bytes(&id) - .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) - }))) + Ok(Box::new(self.id_appserviceregistrations.iter().map( + |(id, _)| { + utils::string_from_bytes(&id).map_err(|_| { + Error::bad_database("Invalid id bytes in id_appserviceregistrations.") + }) + }, + ))) } fn all(&self) -> Result> { diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 199cbf6..fafaf49 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -2,9 +2,13 @@ use std::collections::BTreeMap; use async_trait::async_trait; use futures_util::{stream::FuturesUnordered, StreamExt}; -use ruma::{signatures::Ed25519KeyPair, UserId, DeviceId, ServerName, api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerSigningKeyId, MilliSecondsSinceUnixEpoch}; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + signatures::Ed25519KeyPair, + DeviceId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, UserId, +}; -use crate::{Result, service, database::KeyValueDatabase, Error, utils, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; pub const COUNTER: &[u8] = b"c"; @@ -35,28 +39,24 @@ impl service::globals::Data for KeyValueDatabase { // Return when *any* user changed his key // TODO: only send for user they share a room with - futures.push( - self.todeviceid_events - .watch_prefix(&userdeviceid_prefix), - ); + futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix)); futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); - futures.push( - self.userroomid_invitestate - .watch_prefix(&userid_prefix), - ); + futures.push(self.userroomid_invitestate.watch_prefix(&userid_prefix)); futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); futures.push( self.userroomid_notificationcount .watch_prefix(&userid_prefix), ); - futures.push( - self.userroomid_highlightcount - .watch_prefix(&userid_prefix), - ); + futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix)); // Events for rooms we are in - for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { + for room_id in services() + .rooms + .state_cache + .rooms_joined(user_id) + .filter_map(|r| r.ok()) + { let short_roomid = services() .rooms .short @@ -75,15 +75,9 @@ impl service::globals::Data for KeyValueDatabase { futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); // EDUs - futures.push( - self.roomid_lasttypingupdate - .watch_prefix(&roomid_bytes), - ); + futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes)); - futures.push( - self.readreceiptid_readreceipt - .watch_prefix(&roomid_prefix), - ); + futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix)); // Key changes futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); @@ -110,10 +104,7 @@ impl service::globals::Data for KeyValueDatabase { futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); // One time keys - futures.push( - self.userid_lastonetimekeyupdate - .watch_prefix(&userid_bytes), - ); + futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes)); futures.push(Box::pin(services().globals.rotate.watch())); @@ -238,10 +229,7 @@ impl service::globals::Data for KeyValueDatabase { } fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.global - .insert(b"version", &new_version.to_be_bytes())?; + self.global.insert(b"version", &new_version.to_be_bytes())?; Ok(()) } - - } diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index 8171451..0738f73 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -1,8 +1,15 @@ use std::collections::BTreeMap; -use ruma::{UserId, serde::Raw, api::client::{backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind}, RoomId}; +use ruma::{ + api::client::{ + backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, + error::ErrorKind, + }, + serde::Raw, + RoomId, UserId, +}; -use crate::{Result, service, database::KeyValueDatabase, services, Error, utils}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::key_backups::Data for KeyValueDatabase { fn create_backup( @@ -118,11 +125,7 @@ impl service::key_backups::Data for KeyValueDatabase { .transpose() } - fn get_backup( - &self, - user_id: &UserId, - version: &str, - ) -> Result>> { + fn get_backup(&self, user_id: &UserId, version: &str) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -322,12 +325,7 @@ impl service::key_backups::Data for KeyValueDatabase { Ok(()) } - fn delete_room_keys( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - ) -> Result<()> { + fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index f024487..de96ace 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,9 +1,16 @@ use ruma::api::client::error::ErrorKind; -use crate::{database::KeyValueDatabase, service, Error, utils, Result}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::media::Data for KeyValueDatabase { - fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result> { + fn create_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + content_disposition: Option<&str>, + content_type: Option<&str>, + ) -> Result> { let mut key = mxc.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&width.to_be_bytes()); @@ -28,14 +35,23 @@ impl service::media::Data for KeyValueDatabase { Ok(key) } - fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)> { + fn search_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + ) -> Result<(Option, Option, Vec)> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail prefix.push(0xff); - let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; + let (key, _) = self + .mediaid_file + .scan_prefix(prefix) + .next() + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; let mut parts = key.rsplit(|&b| b == 0xff); @@ -57,9 +73,7 @@ impl service::media::Data for KeyValueDatabase { } else { Some( utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) + Error::bad_database("Content Disposition in mediaid_file is invalid unicode.") })?, ) }; diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index b05e47b..15f4e26 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,6 +1,9 @@ -use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; +use ruma::{ + api::client::push::{get_pushers, set_pusher}, + UserId, +}; -use crate::{service, database::KeyValueDatabase, Error, Result}; +use crate::{database::KeyValueDatabase, service, Error, Result}; impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { @@ -48,10 +51,7 @@ impl service::pusher::Data for KeyValueDatabase { .collect() } - fn get_pusher_senderkeys<'a>( - &'a self, - sender: &UserId, - ) -> Box>> { + fn get_pusher_senderkeys<'a>(&'a self, sender: &UserId) -> Box>> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 0aa8dd4..112d6eb 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,13 +1,9 @@ -use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; +use ruma::{api::client::error::ErrorKind, RoomAliasId, RoomId}; -use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: &RoomId - ) -> Result<()> { + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { self.alias_roomid .insert(alias.alias().as_bytes(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); @@ -17,10 +13,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { Ok(()) } - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { + fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { let mut prefix = room_id.to_vec(); prefix.push(0xff); @@ -38,10 +31,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { Ok(()) } - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result>> { + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 49d3956..60057ac 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, mem::size_of, sync::Arc}; -use crate::{service, database::KeyValueDatabase, Result, utils}; +use crate::{database::KeyValueDatabase, service, utils, Result}; impl service::rooms::auth_chain::Data for KeyValueDatabase { fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { @@ -12,14 +12,13 @@ impl service::rooms::auth_chain::Data for KeyValueDatabase { // We only save auth chains for single events in the db if key.len() == 1 { // Check DB cache - let chain = self.shorteventid_authchain + let chain = self + .shorteventid_authchain .get(&key[0].to_be_bytes())? .map(|chain| { chain .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) + .map(|chunk| utils::u64_from_bytes(chunk).expect("byte length is correct")) .collect() }); @@ -37,7 +36,6 @@ impl service::rooms::auth_chain::Data for KeyValueDatabase { } Ok(None) - } fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { @@ -53,7 +51,10 @@ impl service::rooms::auth_chain::Data for KeyValueDatabase { } // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, auth_chain); + self.auth_chain_cache + .lock() + .unwrap() + .insert(key, auth_chain); Ok(()) } diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 727004e..661c202 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils, Error, Result}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::rooms::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs index b5007f8..6c65291 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus/mod.rs @@ -1,7 +1,7 @@ mod presence; -mod typing; mod read_receipt; +mod typing; -use crate::{service, database::KeyValueDatabase}; +use crate::{database::KeyValueDatabase, service}; impl service::rooms::edus::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 1477c28..fdd51ce 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; -use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; +use ruma::{events::presence::PresenceEvent, presence::PresenceState, RoomId, UInt, UserId}; -use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index a12e265..c78f0f5 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,8 +1,10 @@ use std::mem; -use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; +use ruma::{ + events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject, RoomId, UserId, +}; -use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( @@ -50,13 +52,15 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { &'a self, room_id: &RoomId, since: u64, - ) -> Box, - u64, - Raw, - )>, - >> { + ) -> Box< + dyn Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + >, + > { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let prefix2 = prefix.clone(); @@ -64,42 +68,44 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { let mut first_possible_edu = prefix.clone(); first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - Box::new(self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) + Box::new( + self.readreceiptid_readreceipt + .iter_from(&first_possible_edu, false) + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(k, v)| { + let count = utils::u64_from_bytes( + &k[prefix.len()..prefix.len() + mem::size_of::()], + ) + .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; + let user_id = UserId::parse( + utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) + .map_err(|_| { + Error::bad_database("Invalid readreceiptid userid bytes in db.") + })?, + ) .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); + let mut json = + serde_json::from_slice::(&v).map_err(|_| { + Error::bad_database( + "Read receipt in roomlatestid_roomlatest is invalid json.", + ) + })?; + json.remove("room_id"); - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - })) + Ok(( + user_id, + count, + Raw::from_json( + serde_json::value::to_raw_value(&json) + .expect("json is valid raw value"), + ), + )) + }), + ) } - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { + fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(user_id.as_bytes()); diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index b7d3596..7b211e7 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,16 +1,11 @@ use std::collections::HashSet; -use ruma::{UserId, RoomId}; +use ruma::{RoomId, UserId}; -use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { + fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -30,11 +25,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { Ok(()) } - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { + fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -53,17 +44,16 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { } if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + self.roomid_lasttypingupdate.insert( + room_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; } Ok(()) } - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { + fn last_typing_update(&self, room_id: &RoomId) -> Result { Ok(self .roomid_lasttypingupdate .get(room_id.as_bytes())? @@ -76,10 +66,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { .unwrap_or(0)) } - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result>> { + fn typings_all(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -89,7 +76,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { Error::bad_database("User ID in typingid_userid is invalid unicode.") })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; user_ids.insert(user_id); } diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index 133e1d0..a19d52c 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,6 +1,6 @@ -use ruma::{UserId, DeviceId, RoomId}; +use ruma::{DeviceId, RoomId, UserId}; -use crate::{service, database::KeyValueDatabase, Result}; +use crate::{database::KeyValueDatabase, service, Result}; impl service::rooms::lazy_loading::Data for KeyValueDatabase { fn lazy_load_was_sent_before( diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 72f6251..63a6b1a 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, Result, services}; +use crate::{database::KeyValueDatabase, service, services, Result}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index aa97544..2ecaadb 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,6 +1,6 @@ -use ruma::{EventId, signatures::CanonicalJsonObject}; +use ruma::{signatures::CanonicalJsonObject, EventId}; -use crate::{service, database::KeyValueDatabase, PduEvent, Error, Result}; +use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result}; impl service::rooms::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index f3ac414..76ec734 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use ruma::{RoomId, EventId}; +use ruma::{EventId, RoomId}; -use crate::{service, database::KeyValueDatabase, Result}; +use crate::{database::KeyValueDatabase, service, Result}; impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 41df544..79e6a32 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -2,7 +2,7 @@ use std::mem::size_of; use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils, Result, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Result}; impl service::rooms::search::Data for KeyValueDatabase { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { @@ -27,7 +27,9 @@ impl service::rooms::search::Data for KeyValueDatabase { room_id: &RoomId, search_string: &str, ) -> Result>>, Vec)>> { - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -63,10 +65,10 @@ impl service::rooms::search::Data for KeyValueDatabase { }; let mapped = common_elements.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }); + let mut pduid = prefix_clone.clone(); + pduid.extend_from_slice(&id); + pduid + }); Ok(Some((Box::new(mapped), words))) } diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs index ecd12da..c022317 100644 --- a/src/database/key_value/rooms/short.rs +++ b/src/database/key_value/rooms/short.rs @@ -1,14 +1,11 @@ use std::sync::Arc; -use ruma::{EventId, events::StateEventType, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result, database::KeyValueDatabase, service, utils, Error, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::short::Data for KeyValueDatabase { - fn get_or_create_shorteventid( - &self, - event_id: &EventId, - ) -> Result { + fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { return Ok(*short); } @@ -180,10 +177,7 @@ impl service::rooms::short::Data for KeyValueDatabase { } /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &[u8], - ) -> Result<(u64, bool)> { + fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { Ok(match self.statehash_shortstatehash.get(state_hash)? { Some(shortstatehash) => ( utils::u64_from_bytes(&shortstatehash) @@ -209,10 +203,7 @@ impl service::rooms::short::Data for KeyValueDatabase { .transpose() } - fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - ) -> Result { + fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { Some(short) => utils::u64_from_bytes(&short) .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 90ac0d5..80a7458 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,10 +1,10 @@ -use ruma::{RoomId, EventId}; -use tokio::sync::MutexGuard; -use std::sync::Arc; +use ruma::{EventId, RoomId}; use std::collections::HashSet; use std::fmt::Debug; +use std::sync::Arc; +use tokio::sync::MutexGuard; -use crate::{service, database::KeyValueDatabase, utils, Error, Result}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::rooms::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { @@ -17,9 +17,12 @@ impl service::rooms::state::Data for KeyValueDatabase { }) } - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64, + fn set_room_state( + &self, + room_id: &RoomId, + new_shortstatehash: u64, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) -> Result<()> { self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; Ok(()) diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 4d5bd4a..39c261f 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,13 +1,18 @@ -use std::{collections::{BTreeMap, HashMap}, sync::Arc}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; -use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; use async_trait::async_trait; -use ruma::{EventId, events::StateEventType, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId}; #[async_trait] impl service::rooms::state_accessor::Data for KeyValueDatabase { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = services().rooms.state_compressor + let full_state = services() + .rooms + .state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -15,7 +20,10 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = BTreeMap::new(); let mut i = 0; for compressed in full_state.into_iter() { - let parsed = services().rooms.state_compressor.parse_compressed_state_event(compressed)?; + let parsed = services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed)?; result.insert(parsed.0, parsed.1); i += 1; @@ -30,7 +38,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { &self, shortstatehash: u64, ) -> Result>> { - let full_state = services().rooms.state_compressor + let full_state = services() + .rooms + .state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -39,7 +49,10 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = HashMap::new(); let mut i = 0; for compressed in full_state { - let (_, eventid) = services().rooms.state_compressor.parse_compressed_state_event(compressed)?; + let (_, eventid) = services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed)?; if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { result.insert( ( @@ -69,11 +82,17 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - let shortstatekey = match services().rooms.short.get_shortstatekey(event_type, state_key)? { + let shortstatekey = match services() + .rooms + .short + .get_shortstatekey(event_type, state_key)? + { Some(s) => s, None => return Ok(None), }; - let full_state = services().rooms.state_compressor + let full_state = services() + .rooms + .state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -82,7 +101,10 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { .into_iter() .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) .and_then(|compressed| { - services().rooms.state_compressor.parse_compressed_state_event(compressed) + services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed) .ok() .map(|(_, id)| id) })) @@ -96,7 +118,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { state_key: &str, ) -> Result>> { self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| services().rooms.timeline.get_pdu(&event_id)) + .map_or(Ok(None), |event_id| { + services().rooms.timeline.get_pdu(&event_id) + }) } /// Returns the state hash for this pdu. @@ -122,7 +146,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { &self, room_id: &RoomId, ) -> Result>> { - if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { self.state_full(current_shortstatehash).await } else { Ok(HashMap::new()) @@ -136,7 +162,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { self.state_get_id(current_shortstatehash, event_type, state_key) } else { Ok(None) @@ -150,7 +178,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { self.state_get(current_shortstatehash, event_type, state_key) } else { Ok(None) diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 4043bc4..4ca6ac4 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,9 +1,13 @@ use std::{collections::HashSet, sync::Arc}; use regex::Regex; -use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, ServerName}; +use ruma::{ + events::{AnyStrippedStateEvent, AnySyncStateEvent}, + serde::Raw, + RoomId, ServerName, UserId, +}; -use crate::{service, database::KeyValueDatabase, services, Result, Error, utils}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -31,8 +35,13 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { Ok(()) } - - fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()> { + + fn mark_as_invited( + &self, + user_id: &UserId, + room_id: &RoomId, + last_state: Option>>, + ) -> Result<()> { let mut roomuser_id = room_id.as_bytes().to_vec(); roomuser_id.push(0xff); roomuser_id.extend_from_slice(user_id.as_bytes()); @@ -46,8 +55,10 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { &serde_json::to_vec(&last_state.unwrap_or_default()) .expect("state to bytes always works"), )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.roomuserid_invitecount.insert( + &roomuser_id, + &services().globals.next_count()?.to_be_bytes(), + )?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_leftstate.remove(&userroom_id)?; @@ -69,8 +80,10 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { &userroom_id, &serde_json::to_vec(&Vec::>::new()).unwrap(), )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.roomuserid_leftcount.insert( + &roomuser_id, + &services().globals.next_count()?.to_be_bytes(), + )?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -324,21 +337,25 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.roomuseroncejoinedids + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database( + "User ID in room_useroncejoined is invalid unicode.", + ) + })?, ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - })) + .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) + }), + ) } /// Returns an iterator over all invited members of a room. @@ -350,21 +367,23 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.roomuserid_invitecount + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_invited is invalid unicode.") + })?, ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - })) + .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) + }), + ) } #[tracing::instrument(skip(self))] @@ -403,21 +422,23 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { &'a self, user_id: &UserId, ) -> Box>> + 'a> { - Box::new(self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.userroomid_joined + .scan_prefix(user_id.as_bytes().to_vec()) + .map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_joined is invalid unicode.") + })?, ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - })) + .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) + }), + ) } /// Returns an iterator over all rooms a user was invited to. @@ -429,26 +450,31 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.userroomid_invitestate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, ) .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + Error::bad_database("Room ID in userroomid_invited is invalid.") + })?; - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database("Invalid state in userroomid_invitestate.") + })?; - Ok((room_id, state)) - })) + Ok((room_id, state)) + }), + ) } #[tracing::instrument(skip(self))] @@ -502,26 +528,31 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.userroomid_leftstate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, ) .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + Error::bad_database("Room ID in userroomid_invited is invalid.") + })?; - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database("Invalid state in userroomid_leftstate.") + })?; - Ok((room_id, state)) - })) + Ok((room_id, state)) + }), + ) } #[tracing::instrument(skip(self))] diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index aee1890..d0a9be4 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,6 +1,10 @@ use std::{collections::HashSet, mem::size_of}; -use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::state_compressor::data::StateDiff}, + utils, Error, Result, +}; impl service::rooms::state_compressor::Data for KeyValueDatabase { fn get_statediff(&self, shortstatehash: u64) -> Result { @@ -10,11 +14,7 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { .ok_or_else(|| Error::bad_database("State hash does not exist"))?; let parent = utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - let parent = if parent != 0 { - Some(parent) - } else { - None - }; + let parent = if parent != 0 { Some(parent) } else { None }; let mut add_mode = true; let mut added = HashSet::new(); @@ -35,7 +35,11 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { i += 2 * size_of::(); } - Ok(StateDiff { parent, added, removed }) + Ok(StateDiff { + parent, + added, + removed, + }) } fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 1723186..5d684a1 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -1,13 +1,17 @@ use std::{collections::hash_map, mem::size_of, sync::Arc}; -use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject}; +use ruma::{ + api::client::error::ErrorKind, signatures::CanonicalJsonObject, EventId, RoomId, UserId, +}; use tracing::error; -use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; impl service::rooms::timeline::Data for KeyValueDatabase { fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -82,10 +86,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Returns the json of a pdu. - fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { + fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map(|pduid| { @@ -187,10 +188,17 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) } - fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()> { + fn append_pdu( + &self, + pdu_id: &[u8], + pdu: &PduEvent, + json: &CanonicalJsonObject, + count: u64, + ) -> Result<()> { self.pduid_pdu.insert( pdu_id, - &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"))?; + &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), + )?; self.lasttimelinecount_cache .lock() @@ -209,7 +217,8 @@ impl service::rooms::timeline::Data for KeyValueDatabase { if self.pduid_pdu.get(pdu_id)?.is_some() { self.pduid_pdu.insert( pdu_id, - &serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"))?; + &serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"), + )?; Ok(()) } else { Err(Error::BadRequest( @@ -227,7 +236,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, since: u64, ) -> Result, PduEvent)>>>> { - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -239,18 +250,19 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(Box::new(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - }))) + Ok(Box::new( + self.pduid_pdu + .iter_from(&first_pdu_id, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -262,7 +274,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase { until: u64, ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -275,18 +289,19 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(Box::new(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - }))) + Ok(Box::new( + self.pduid_pdu + .iter_from(current, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) } fn pdus_after<'a>( @@ -296,7 +311,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase { from: u64, ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -309,21 +326,27 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(Box::new(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - }))) + Ok(Box::new( + self.pduid_pdu + .iter_from(current, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) } - fn increment_notification_counts(&self, room_id: &RoomId, notifies: Vec>, highlights: Vec>) -> Result<()> { + fn increment_notification_counts( + &self, + room_id: &RoomId, + notifies: Vec>, + highlights: Vec>, + ) -> Result<()> { let notifies_batch = Vec::new(); let highlights_batch = Vec::new(); for user in notifies { diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 3759bda..78c78e1 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,6 +1,6 @@ -use ruma::{UserId, RoomId}; +use ruma::{RoomId, UserId}; -use crate::{service, database::KeyValueDatabase, utils, Error, Result, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -50,7 +50,11 @@ impl service::rooms::user::Data for KeyValueDatabase { token: u64, shortstatehash: u64, ) -> Result<()> { - let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists"); + let shortroomid = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); @@ -60,7 +64,11 @@ impl service::rooms::user::Data for KeyValueDatabase { } fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists"); + let shortroomid = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); @@ -102,13 +110,15 @@ impl service::rooms::user::Data for KeyValueDatabase { }); // We use the default compare function because keys are sorted correctly (not reversed) - Ok(Box::new(Box::new(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })))) + Ok(Box::new(Box::new( + utils::common_elements(iterators, Ord::cmp) + .expect("users is not empty") + .map(|bytes| { + RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { + Error::bad_database("Invalid RoomId bytes in userroomid_joined") + })?) + .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) + }), + ))) } } diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index a63b3c5..2ea6ad4 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,6 +1,6 @@ -use ruma::{UserId, DeviceId, TransactionId}; +use ruma::{DeviceId, TransactionId, UserId}; -use crate::{service, database::KeyValueDatabase, Result}; +use crate::{database::KeyValueDatabase, service, Result}; impl service::transaction_ids::Data for KeyValueDatabase { fn add_txnid( diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index cf242de..8a9f176 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,4 +1,8 @@ -use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}}; +use ruma::{ + api::client::{error::ErrorKind, uiaa::UiaaInfo}, + signatures::CanonicalJsonValue, + DeviceId, UserId, +}; use crate::{database::KeyValueDatabase, service, Error, Result}; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 55a518d..15699a1 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,9 +1,20 @@ -use std::{mem::size_of, collections::BTreeMap}; +use std::{collections::BTreeMap, mem::size_of}; -use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; +use ruma::{ + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::{AnyToDeviceEvent, StateEventType}, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, + UInt, UserId, +}; use tracing::warn; -use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, users::clean_signatures}, + services, utils, Error, Result, +}; impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. @@ -274,18 +285,21 @@ impl service::users::Data for KeyValueDatabase { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata - Box::new(self.userdeviceid_metadata - .scan_prefix(prefix) - .map(|(bytes, _)| { - Ok(utils::string_from_bytes( - bytes - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? - .into()) - })) + Box::new( + self.userdeviceid_metadata + .scan_prefix(prefix) + .map(|(bytes, _)| { + Ok(utils::string_from_bytes( + bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { + Error::bad_database("UserDevice ID in db is invalid.") + })?, + ) + .map_err(|_| { + Error::bad_database("Device ID in userdeviceid_metadata is invalid.") + })? + .into()) + }), + ) } /// Replaces the access token of one device. @@ -341,8 +355,10 @@ impl service::users::Data for KeyValueDatabase { &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), )?; - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + self.userid_lastonetimekeyupdate.insert( + user_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; Ok(()) } @@ -372,8 +388,10 @@ impl service::users::Data for KeyValueDatabase { prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); prefix.push(b':'); - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + self.userid_lastonetimekeyupdate.insert( + user_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; self.onetimekeyid_onetimekeys .scan_prefix(prefix) @@ -617,38 +635,47 @@ impl service::users::Data for KeyValueDatabase { let to = to.unwrap_or(u64::MAX); - Box::new(self.keychangeid_userid - .iter_from(&start, false) - .take_while(move |(k, _)| { - k.starts_with(&prefix) - && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { - if let Ok(c) = utils::u64_from_bytes(current) { - c <= to + Box::new( + self.keychangeid_userid + .iter_from(&start, false) + .take_while(move |(k, _)| { + k.starts_with(&prefix) + && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { + if let Ok(c) = utils::u64_from_bytes(current) { + c <= to + } else { + warn!("BadDatabase: Could not parse keychangeid_userid bytes"); + false + } } else { - warn!("BadDatabase: Could not parse keychangeid_userid bytes"); + warn!("BadDatabase: Could not parse keychangeid_userid"); false } - } else { - warn!("BadDatabase: Could not parse keychangeid_userid"); - false - } - }) - .map(|(_, bytes)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) - })) + }) + .map(|(_, bytes)| { + UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "User ID in devicekeychangeid_userid is invalid unicode.", + ) + })?) + .map_err(|_| { + Error::bad_database("User ID in devicekeychangeid_userid is invalid.") + }) + }), + ) } - fn mark_device_key_update( - &self, - user_id: &UserId, - ) -> Result<()> { + fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { let count = services().globals.next_count()?.to_be_bytes(); - for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { + for room_id in services() + .rooms + .state_cache + .rooms_joined(user_id) + .filter_map(|r| r.ok()) + { // Don't send key updates to unencrypted rooms - if services().rooms + if services() + .rooms .state_accessor .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? .is_none() @@ -883,20 +910,19 @@ impl service::users::Data for KeyValueDatabase { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - Box::new(self.userdeviceid_metadata - .scan_prefix(key) - .map(|(_, bytes)| { - serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) - })) + Box::new( + self.userdeviceid_metadata + .scan_prefix(key) + .map(|(_, bytes)| { + serde_json::from_slice::(&bytes).map_err(|_| { + Error::bad_database("Device in userdeviceid_metadata is invalid.") + }) + }), + ) } /// Creates a new sync filter. Returns the filter id. - fn create_filter( - &self, - user_id: &UserId, - filter: &IncomingFilterDefinition, - ) -> Result { + fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result { let filter_id = utils::random_string(4); let mut key = user_id.as_bytes().to_vec(); diff --git a/src/database/mod.rs b/src/database/mod.rs index 6868467..8a7c78e 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,8 +1,16 @@ pub mod abstraction; pub mod key_value; -use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms::{self, state_compressor::CompressedStateEvent}, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}, services, PduEvent, Services, SERVICES}; +use crate::{ + service::{ + account_data, appservice, globals, key_backups, media, pusher, + rooms::{self, state_compressor::CompressedStateEvent}, + sending, transaction_ids, uiaa, users, + }, + services, utils, Config, Error, PduEvent, Result, Services, SERVICES, +}; use abstraction::KeyValueDatabaseEngine; +use abstraction::KvTree; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; @@ -12,7 +20,8 @@ use ruma::{ GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, - DeviceId, EventId, RoomId, UserId, signatures::CanonicalJsonValue, + signatures::CanonicalJsonValue, + DeviceId, EventId, RoomId, UserId, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -25,7 +34,6 @@ use std::{ }; use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, info, warn}; -use abstraction::KvTree; pub struct KeyValueDatabase { _db: Arc, @@ -65,9 +73,9 @@ pub struct KeyValueDatabase { pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId pub(super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count - pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count + pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count pub(super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId + pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count //pub rooms: rooms::Rooms, @@ -279,127 +287,126 @@ impl KeyValueDatabase { let db = Arc::new(Self { _db: builder.clone(), - userid_password: builder.open_tree("userid_password")?, - userid_displayname: builder.open_tree("userid_displayname")?, - userid_avatarurl: builder.open_tree("userid_avatarurl")?, - userid_blurhash: builder.open_tree("userid_blurhash")?, - userdeviceid_token: builder.open_tree("userdeviceid_token")?, - userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, - userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, - token_userdeviceid: builder.open_tree("token_userdeviceid")?, - onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?, - userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?, - keychangeid_userid: builder.open_tree("keychangeid_userid")?, - keyid_key: builder.open_tree("keyid_key")?, - userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, - userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, - userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, - userfilterid_filter: builder.open_tree("userfilterid_filter")?, - todeviceid_events: builder.open_tree("todeviceid_events")?, + userid_password: builder.open_tree("userid_password")?, + userid_displayname: builder.open_tree("userid_displayname")?, + userid_avatarurl: builder.open_tree("userid_avatarurl")?, + userid_blurhash: builder.open_tree("userid_blurhash")?, + userdeviceid_token: builder.open_tree("userdeviceid_token")?, + userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, + userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, + token_userdeviceid: builder.open_tree("token_userdeviceid")?, + onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?, + userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?, + keychangeid_userid: builder.open_tree("keychangeid_userid")?, + keyid_key: builder.open_tree("keyid_key")?, + userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, + userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, + userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, + userfilterid_filter: builder.open_tree("userfilterid_filter")?, + todeviceid_events: builder.open_tree("todeviceid_events")?, - userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, - userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), - readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, - roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt - roomuserid_lastprivatereadupdate: builder - .open_tree("roomuserid_lastprivatereadupdate")?, - typingid_userid: builder.open_tree("typingid_userid")?, - roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, - presenceid_presence: builder.open_tree("presenceid_presence")?, - userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, - pduid_pdu: builder.open_tree("pduid_pdu")?, - eventid_pduid: builder.open_tree("eventid_pduid")?, - roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, + userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, + userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), + readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, + roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt + roomuserid_lastprivatereadupdate: builder + .open_tree("roomuserid_lastprivatereadupdate")?, + typingid_userid: builder.open_tree("typingid_userid")?, + roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, + presenceid_presence: builder.open_tree("presenceid_presence")?, + userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, + pduid_pdu: builder.open_tree("pduid_pdu")?, + eventid_pduid: builder.open_tree("eventid_pduid")?, + roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, - alias_roomid: builder.open_tree("alias_roomid")?, - aliasid_alias: builder.open_tree("aliasid_alias")?, - publicroomids: builder.open_tree("publicroomids")?, + alias_roomid: builder.open_tree("alias_roomid")?, + aliasid_alias: builder.open_tree("aliasid_alias")?, + publicroomids: builder.open_tree("publicroomids")?, - tokenids: builder.open_tree("tokenids")?, + tokenids: builder.open_tree("tokenids")?, - roomserverids: builder.open_tree("roomserverids")?, - serverroomids: builder.open_tree("serverroomids")?, - userroomid_joined: builder.open_tree("userroomid_joined")?, - roomuserid_joined: builder.open_tree("roomuserid_joined")?, - roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, - roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, - roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, - userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, - roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, - userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, - roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + roomserverids: builder.open_tree("roomserverids")?, + serverroomids: builder.open_tree("serverroomids")?, + userroomid_joined: builder.open_tree("userroomid_joined")?, + roomuserid_joined: builder.open_tree("roomuserid_joined")?, + roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, + roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, + roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, + userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, + roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, + userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, + roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, - disabledroomids: builder.open_tree("disabledroomids")?, + disabledroomids: builder.open_tree("disabledroomids")?, - lazyloadedids: builder.open_tree("lazyloadedids")?, + lazyloadedids: builder.open_tree("lazyloadedids")?, - userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, - userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, + userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, + userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, - statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, - shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, + statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, + shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, - shorteventid_authchain: builder.open_tree("shorteventid_authchain")?, + shorteventid_authchain: builder.open_tree("shorteventid_authchain")?, - roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, + roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, - shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, - eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, - shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, - shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, - roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, - roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?, - statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, + shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, + eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, + shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, + shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, + roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, + roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?, + statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, - eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, - softfailedeventids: builder.open_tree("softfailedeventids")?, + eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, + softfailedeventids: builder.open_tree("softfailedeventids")?, - referencedevents: builder.open_tree("referencedevents")?, - roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, - roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, - mediaid_file: builder.open_tree("mediaid_file")?, - backupid_algorithm: builder.open_tree("backupid_algorithm")?, - backupid_etag: builder.open_tree("backupid_etag")?, - backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, - userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, - servername_educount: builder.open_tree("servername_educount")?, - servernameevent_data: builder.open_tree("servernameevent_data")?, - servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, - id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, - senderkey_pusher: builder.open_tree("senderkey_pusher")?, - global: builder.open_tree("global")?, - server_signingkeys: builder.open_tree("server_signingkeys")?, - - cached_registrations: Arc::new(RwLock::new(HashMap::new())), - pdu_cache: Mutex::new(LruCache::new( - config - .pdu_cache_capacity - .try_into() - .expect("pdu cache capacity fits into usize"), - )), - auth_chain_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shorteventid_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - eventidshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shortstatekey_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - statekeyshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - our_real_users_cache: RwLock::new(HashMap::new()), - appservice_in_room_cache: RwLock::new(HashMap::new()), - lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), - lasttimelinecount_cache: Mutex::new(HashMap::new()), + referencedevents: builder.open_tree("referencedevents")?, + roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, + roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, + mediaid_file: builder.open_tree("mediaid_file")?, + backupid_algorithm: builder.open_tree("backupid_algorithm")?, + backupid_etag: builder.open_tree("backupid_etag")?, + backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, + userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, + servername_educount: builder.open_tree("servername_educount")?, + servernameevent_data: builder.open_tree("servernameevent_data")?, + servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, + id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, + senderkey_pusher: builder.open_tree("senderkey_pusher")?, + global: builder.open_tree("global")?, + server_signingkeys: builder.open_tree("server_signingkeys")?, + cached_registrations: Arc::new(RwLock::new(HashMap::new())), + pdu_cache: Mutex::new(LruCache::new( + config + .pdu_cache_capacity + .try_into() + .expect("pdu cache capacity fits into usize"), + )), + auth_chain_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shorteventid_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + eventidshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shortstatekey_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + statekeyshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + our_real_users_cache: RwLock::new(HashMap::new()), + appservice_in_room_cache: RwLock::new(HashMap::new()), + lazy_load_waiting: Mutex::new(HashMap::new()), + stateinfo_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), + lasttimelinecount_cache: Mutex::new(HashMap::new()), }); let services_raw = Box::new(Services::build(Arc::clone(&db), config)?); @@ -407,7 +414,6 @@ impl KeyValueDatabase { // This is the first and only time we initialize the SERVICE static *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); - // Matrix resource ownership is based on the server name; changing it // requires recreating the database from scratch. if services().users.count()? > 0 { @@ -570,7 +576,10 @@ impl KeyValueDatabase { let states_parents = last_roomsstatehash.map_or_else( || Ok(Vec::new()), |&last_roomsstatehash| { - services().rooms.state_compressor.load_shortstatehash_info(dbg!(last_roomsstatehash)) + services() + .rooms + .state_compressor + .load_shortstatehash_info(dbg!(last_roomsstatehash)) }, )?; @@ -643,14 +652,15 @@ impl KeyValueDatabase { current_state = HashSet::new(); current_sstatehash = Some(sstatehash); - let event_id = db - .shorteventid_eventid - .get(&seventid) - .unwrap() - .unwrap(); + let event_id = db.shorteventid_eventid.get(&seventid).unwrap().unwrap(); let string = utils::string_from_bytes(&event_id).unwrap(); let event_id = <&EventId>::try_from(string.as_str()).unwrap(); - let pdu = services().rooms.timeline.get_pdu(event_id).unwrap().unwrap(); + let pdu = services() + .rooms + .timeline + .get_pdu(event_id) + .unwrap() + .unwrap(); if Some(&pdu.room_id) != current_room.as_ref() { current_room = Some(pdu.room_id.clone()); @@ -764,8 +774,7 @@ impl KeyValueDatabase { .peekable(); while iter.peek().is_some() { - db.tokenids - .insert_batch(&mut iter.by_ref().take(1000))?; + db.tokenids.insert_batch(&mut iter.by_ref().take(1000))?; println!("smaller batch done"); } @@ -803,8 +812,7 @@ impl KeyValueDatabase { // Force E2EE device list updates so we can send them over federation for user_id in services().users.iter().filter_map(|r| r.ok()) { - services().users - .mark_device_key_update(&user_id)?; + services().users.mark_device_key_update(&user_id)?; } services().globals.bump_database_version(10)?; @@ -825,7 +833,8 @@ impl KeyValueDatabase { info!( "Loaded {} database with version {}", - services().globals.config.database_backend, latest_database_version + services().globals.config.database_backend, + latest_database_version ); } else { services() @@ -837,7 +846,8 @@ impl KeyValueDatabase { warn!( "Created new {} database with version {}", - services().globals.config.database_backend, latest_database_version + services().globals.config.database_backend, + latest_database_version ); } @@ -862,9 +872,7 @@ impl KeyValueDatabase { } }; - services() - .sending - .start_handler(sending_receiver); + services().sending.start_handler(sending_receiver); Self::start_cleanup_task().await; @@ -898,7 +906,8 @@ impl KeyValueDatabase { use std::time::{Duration, Instant}; - let timer_interval = Duration::from_secs(services().globals.config.cleanup_second_interval as u64); + let timer_interval = + Duration::from_secs(services().globals.config.cleanup_second_interval as u64); tokio::spawn(async move { let mut i = interval(timer_interval); @@ -937,8 +946,10 @@ fn set_emergency_access() -> Result { let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is a valid UserId"); - services().users - .set_password(&conduit_user, services().globals.emergency_password().as_deref())?; + services().users.set_password( + &conduit_user, + services().globals.emergency_password().as_deref(), + )?; let (ruleset, res) = match services().globals.emergency_password() { Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), @@ -951,7 +962,8 @@ fn set_emergency_access() -> Result { GlobalAccountDataEventType::PushRules.to_string().into(), &serde_json::to_value(&GlobalAccountDataEvent { content: PushRulesEventContent { global: ruleset }, - }).expect("to json value always works"), + }) + .expect("to json value always works"), )?; res diff --git a/src/lib.rs b/src/lib.rs index c103d52..e6421e8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,22 +7,27 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] +pub mod api; mod config; mod database; mod service; -pub mod api; mod utils; -use std::{cell::Cell, sync::{RwLock, Arc}}; +use std::{ + cell::Cell, + sync::{Arc, RwLock}, +}; -pub use config::Config; -pub use utils::error::{Error, Result}; -pub use service::{Services, pdu::PduEvent}; pub use api::ruma_wrapper::{Ruma, RumaResponse}; +pub use config::Config; +pub use service::{pdu::PduEvent, Services}; +pub use utils::error::{Error, Result}; pub static SERVICES: RwLock> = RwLock::new(None); pub fn services<'a>() -> &'static Services { - &SERVICES.read().unwrap().expect("SERVICES should be initialized when this is called") + &SERVICES + .read() + .unwrap() + .expect("SERVICES should be initialized when this is called") } - diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs index 65780a6..c7c9298 100644 --- a/src/service/account_data/data.rs +++ b/src/service/account_data/data.rs @@ -1,7 +1,11 @@ use std::collections::HashMap; -use ruma::{UserId, RoomId, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw}; use crate::Result; +use ruma::{ + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, + serde::Raw, + RoomId, UserId, +}; pub trait Data: Send + Sync { /// Places one event in the account data of the user and removes the previous entry. diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 1289f7a..5bf167d 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -3,9 +3,7 @@ mod data; pub use data::Data; use ruma::{ - api::client::{ - error::ErrorKind, - }, + api::client::error::ErrorKind, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, serde::Raw, signatures::CanonicalJsonValue, diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 0b14314..db596a3 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -28,7 +28,15 @@ use ruma::{ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; -use crate::{Result, services, Error, api::{server_server, client_server::{AUTO_GEN_PASSWORD_LENGTH, leave_all_rooms}}, PduEvent, utils::{HtmlEscape, self}}; +use crate::{ + api::{ + client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, + server_server, + }, + services, + utils::{self, HtmlEscape}, + Error, PduEvent, Result, +}; use super::pdu::PduBuilder; @@ -153,7 +161,6 @@ enum AdminCommand { EnableRoom { room_id: Box }, } - #[derive(Debug)] pub enum AdminRoomEvent { ProcessMessage(String), @@ -166,16 +173,14 @@ pub struct Service { } impl Service { - pub fn start_handler( - &self, - mut receiver: mpsc::UnboundedReceiver, - ) { + pub fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver) { tokio::spawn(async move { // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); - let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); + let conduit_user = + UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); let conduit_room = services() .rooms @@ -193,7 +198,8 @@ impl Service { mutex_lock: &MutexGuard<'_, ()>| { services() .rooms - .timeline.build_and_append_pdu( + .timeline + .build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&message) @@ -316,9 +322,11 @@ impl Service { ) -> Result { let reply_message_content = match command { AdminCommand::RegisterAppservice => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config); + let parsed_config = + serde_yaml::from_str::(&appservice_config); match parsed_config { Ok(yaml) => match services().appservice.register_appservice(yaml) { Ok(id) => RoomMessageEventContent::text_plain(format!( @@ -343,7 +351,10 @@ impl Service { } AdminCommand::UnregisterAppservice { appservice_identifier, - } => match services().appservice.unregister_appservice(&appservice_identifier) { + } => match services() + .appservice + .unregister_appservice(&appservice_identifier) + { Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), Err(e) => RoomMessageEventContent::text_plain(format!( "Failed to unregister appservice: {}", @@ -351,7 +362,11 @@ impl Service { )), }, AdminCommand::ListAppservices => { - if let Ok(appservices) = services().appservice.iter_ids().map(|ids| ids.collect::>()) { + if let Ok(appservices) = services() + .appservice + .iter_ids() + .map(|ids| ids.collect::>()) + { let count = appservices.len(); let output = format!( "Appservices ({}): {}", @@ -399,7 +414,11 @@ impl Service { Err(e) => RoomMessageEventContent::text_plain(e.to_string()), }, AdminCommand::IncomingFederation => { - let map = services().globals.roomid_federationhandletime.read().unwrap(); + let map = services() + .globals + .roomid_federationhandletime + .read() + .unwrap(); let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); for (r, (e, i)) in map.iter() { @@ -426,7 +445,10 @@ impl Service { Error::bad_database("Invalid room id field in event in database") })?; let start = Instant::now(); - let count = services().rooms.auth_chain.get_auth_chain(room_id, vec![event_id]) + let count = services() + .rooms + .auth_chain + .get_auth_chain(room_id, vec![event_id]) .await? .count(); let elapsed = start.elapsed(); @@ -439,7 +461,8 @@ impl Service { } } AdminCommand::ParsePdu => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { Ok(value) => { @@ -477,15 +500,18 @@ impl Service { } AdminCommand::GetPdu { event_id } => { let mut outlier = false; - let mut pdu_json = services().rooms.timeline.get_non_outlier_pdu_json(&event_id)?; + let mut pdu_json = services() + .rooms + .timeline + .get_non_outlier_pdu_json(&event_id)?; if pdu_json.is_none() { outlier = true; pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?; } match pdu_json { Some(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + let json_text = serde_json::to_string_pretty(&json) + .expect("canonical json is valid json"); RoomMessageEventContent::text_html( format!( "{}\n```json\n{}\n```", @@ -539,8 +565,11 @@ impl Service { if !services().users.exists(&user_id)? || services().users.is_deactivated(&user_id)? || user_id - == UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("conduit user exists") + == UserId::parse_with_server_name( + "conduit", + services().globals.server_name(), + ) + .expect("conduit user exists") { return Ok(RoomMessageEventContent::text_plain( "The specified user does not exist or is deactivated!", @@ -549,7 +578,10 @@ impl Service { let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); - match services().users.set_password(&user_id, Some(new_password.as_str())) { + match services() + .users + .set_password(&user_id, Some(new_password.as_str())) + { Ok(()) => RoomMessageEventContent::text_plain(format!( "Successfully reset the password for user {}: {}", user_id, new_password @@ -590,7 +622,8 @@ impl Service { // Default to pretty displayname let displayname = format!("{} ⚡️", user_id.localpart()); - services().users + services() + .users .set_displayname(&user_id, Some(displayname.clone()))?; // Initial account data @@ -604,7 +637,8 @@ impl Service { content: ruma::events::push_rules::PushRulesEventContent { global: ruma::push::Ruleset::server_default(&user_id), }, - }).expect("to json value always works"), + }) + .expect("to json value always works"), )?; // we dont add a device since we're not the user, just the creator @@ -651,7 +685,8 @@ impl Service { } } AdminCommand::DeactivateAll { leave_rooms, force } => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { let usernames = body.clone().drain(1..body.len() - 1).collect::>(); let mut user_ids: Vec<&UserId> = Vec::new(); @@ -672,17 +707,15 @@ impl Service { let mut admins = Vec::new(); if !force { - user_ids.retain(|&user_id| { - match services().users.is_admin(user_id) { - Ok(is_admin) => match is_admin { - true => { - admins.push(user_id.localpart()); - false - } - false => true, - }, - Err(_) => false, - } + user_ids.retain(|&user_id| match services().users.is_admin(user_id) { + Ok(is_admin) => match is_admin { + true => { + admins.push(user_id.localpart()); + false + } + false => true, + }, + Err(_) => false, }) } @@ -783,8 +816,8 @@ impl Service { } else { // Wrap the usage line in a code block, and add a yaml block example // This makes the usage of e.g. `register-appservice` more accurate - let re = - Regex::new("(?m)^USAGE:\n (.*?)\n\n").expect("Regex compilation should not fail"); + let re = Regex::new("(?m)^USAGE:\n (.*?)\n\n") + .expect("Regex compilation should not fail"); re.replace_all(&text, "USAGE:\n
                $1[nobr]\n[commandbodyblock]
                ") .replace("[commandbodyblock]", &command_body) }; @@ -808,7 +841,8 @@ impl Service { services().rooms.short.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -818,8 +852,9 @@ impl Service { let state_lock = mutex_state.lock().await; // Create a user for the server - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + let conduit_user = + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); services().users.create(&conduit_user, None)?; @@ -1002,9 +1037,10 @@ impl Service { user_id: &UserId, displayname: String, ) -> Result<()> { - let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); + let admin_room_alias: Box = + format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); let room_id = services() .rooms .alias @@ -1012,7 +1048,8 @@ impl Service { .expect("Admin room must exist"); let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -1022,8 +1059,9 @@ impl Service { let state_lock = mutex_state.lock().await; // Use the server user to grant the new admin's power level - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + let conduit_user = + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); // Invite and join the real user services().rooms.timeline.build_and_append_pdu( diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 0f74b2a..407ff1c 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,7 +1,11 @@ use std::collections::BTreeMap; use async_trait::async_trait; -use ruma::{signatures::Ed25519KeyPair, DeviceId, UserId, ServerName, api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerSigningKeyId}; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + signatures::Ed25519KeyPair, + DeviceId, ServerName, ServerSigningKeyId, UserId, +}; use crate::Result; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index de8d1aa..23a6159 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -4,7 +4,7 @@ pub use data::Data; use crate::api::server_server::FedDest; use crate::service::*; -use crate::{Config, utils, Error, Result}; +use crate::{utils, Config, Error, Result}; use ruma::{ api::{ client::sync::sync_events, @@ -89,12 +89,8 @@ impl Default for RotationHandler { } } - impl Service { - pub fn load( - db: Arc, - config: Config, - ) -> Result { + pub fn load(db: Arc, config: Config) -> Result { let keypair = db.load_keypair(); let keypair = match keypair { diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs index 226b1e1..f711e5d 100644 --- a/src/service/key_backups/data.rs +++ b/src/service/key_backups/data.rs @@ -1,7 +1,11 @@ use std::collections::BTreeMap; -use ruma::{api::client::backup::{BackupAlgorithm, RoomKeyBackup, KeyBackupData}, serde::Raw, UserId, RoomId}; use crate::Result; +use ruma::{ + api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, + serde::Raw, + RoomId, UserId, +}; pub trait Data: Send + Sync { fn create_backup( @@ -21,16 +25,10 @@ pub trait Data: Send + Sync { fn get_latest_backup_version(&self, user_id: &UserId) -> Result>; - fn get_latest_backup( - &self, - user_id: &UserId, - ) -> Result)>>; + fn get_latest_backup(&self, user_id: &UserId) + -> Result)>>; - fn get_backup( - &self, - user_id: &UserId, - version: &str, - ) -> Result>>; + fn get_backup(&self, user_id: &UserId, version: &str) -> Result>>; fn add_key( &self, @@ -68,12 +66,7 @@ pub trait Data: Send + Sync { fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()>; - fn delete_room_keys( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - ) -> Result<()>; + fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()>; fn delete_room_key( &self, diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index a3bed71..41ec1c1 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use crate::{utils, Error, Result, services}; +use crate::{services, utils, Error, Result}; use ruma::{ api::client::{ backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, @@ -65,7 +65,8 @@ impl Service { session_id: &str, key_data: &Raw, ) -> Result<()> { - self.db.add_key(user_id, version, room_id, session_id, key_data) + self.db + .add_key(user_id, version, room_id, session_id, key_data) } pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { @@ -123,6 +124,7 @@ impl Service { room_id: &RoomId, session_id: &str, ) -> Result<()> { - self.db.delete_room_key(user_id, version, room_id, session_id) + self.db + .delete_room_key(user_id, version, room_id, session_id) } } diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 2e24049..75a682c 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,8 +1,20 @@ use crate::Result; pub trait Data: Send + Sync { - fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result>; + fn create_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + content_disposition: Option<&str>, + content_type: Option<&str>, + ) -> Result>; /// Returns content_disposition, content_type and the metadata key. - fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)>; + fn search_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + ) -> Result<(Option, Option, Vec)>; } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index d3dd2bd..ea276c0 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,8 +1,8 @@ mod data; pub use data::Data; +use crate::{services, utils, Error, Result}; use image::{imageops::FilterType, GenericImageView}; -use crate::{utils, Error, Result, services}; use std::{mem, sync::Arc}; use tokio::{ fs::File, @@ -29,7 +29,9 @@ impl Service { file: &[u8], ) -> Result<()> { // Width, Height = 0 if it's not a thumbnail - let key = self.db.create_file_metadata(mxc, 0, 0, content_disposition, content_type)?; + let key = self + .db + .create_file_metadata(mxc, 0, 0, content_disposition, content_type)?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; @@ -48,7 +50,9 @@ impl Service { height: u32, file: &[u8], ) -> Result<()> { - let key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type)?; + let key = + self.db + .create_file_metadata(mxc, width, height, content_disposition, content_type)?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; @@ -59,12 +63,13 @@ impl Service { /// Downloads a file. pub async fn get(&self, mxc: String) -> Result> { - if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, 0, 0) { + if let Ok((content_disposition, content_type, key)) = + self.db.search_file_metadata(mxc, 0, 0) + { let path = services().globals.get_media_file(&key); let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; - Ok(Some(FileMeta { content_disposition, content_type, @@ -108,7 +113,9 @@ impl Service { .thumbnail_properties(width, height) .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), width, height) { + if let Ok((content_disposition, content_type, key)) = + self.db.search_file_metadata(mxc.clone(), width, height) + { // Using saved thumbnail let path = services().globals.get_media_file(&key); let mut file = Vec::new(); @@ -119,7 +126,9 @@ impl Service { content_type, file: file.to_vec(), })) - } else if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), 0, 0) { + } else if let Ok((content_disposition, content_type, key)) = + self.db.search_file_metadata(mxc.clone(), 0, 0) + { // Generate a thumbnail let path = services().globals.get_media_file(&key); let mut file = Vec::new(); @@ -180,7 +189,13 @@ impl Service { thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; // Save thumbnail in database so we don't have to generate it again next time - let thumbnail_key = self.db.create_file_metadata(mxc, width, height, content_disposition.as_deref(), content_type.as_deref())?; + let thumbnail_key = self.db.create_file_metadata( + mxc, + width, + height, + content_disposition.as_deref(), + content_type.as_deref(), + )?; let path = services().globals.get_media_file(&thumbnail_key); let mut f = File::create(path).await?; diff --git a/src/service/mod.rs b/src/service/mod.rs index daf4329..dbddf40 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -5,7 +5,7 @@ use std::{ use lru_cache::LruCache; -use crate::{Result, Config}; +use crate::{Config, Result}; pub mod account_data; pub mod admin; @@ -49,7 +49,8 @@ impl Services { + key_backups::Data + media::Data, >( - db: Arc, config: Config + db: Arc, + config: Config, ) -> Result { Ok(Self { appservice: appservice::Service { db: db.clone() }, @@ -76,30 +77,26 @@ impl Services { state: rooms::state::Service { db: db.clone() }, state_accessor: rooms::state_accessor::Service { db: db.clone() }, state_cache: rooms::state_cache::Service { db: db.clone() }, - state_compressor: rooms::state_compressor::Service { db: db.clone(), stateinfo_cache: Mutex::new(LruCache::new((100.0 * config.conduit_cache_capacity_modifier) as usize,)) }, - timeline: rooms::timeline::Service { db: db.clone(), lasttimelinecount_cache: Mutex::new(HashMap::new()) }, + state_compressor: rooms::state_compressor::Service { + db: db.clone(), + stateinfo_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), + }, + timeline: rooms::timeline::Service { + db: db.clone(), + lasttimelinecount_cache: Mutex::new(HashMap::new()), + }, user: rooms::user::Service { db: db.clone() }, }, - transaction_ids: transaction_ids::Service { - db: db.clone() - }, - uiaa: uiaa::Service { - db: db.clone() - }, - users: users::Service { - db: db.clone() - }, - account_data: account_data::Service { - db: db.clone() - }, + transaction_ids: transaction_ids::Service { db: db.clone() }, + uiaa: uiaa::Service { db: db.clone() }, + users: users::Service { db: db.clone() }, + account_data: account_data::Service { db: db.clone() }, admin: admin::Service { sender: todo!() }, globals: globals::Service::load(db.clone(), config)?, - key_backups: key_backups::Service { - db: db.clone() - }, - media: media::Service { - db: db.clone() - }, + key_backups: key_backups::Service { db: db.clone() }, + media: media::Service { db: db.clone() }, sending: sending::Service { maximum_requests: todo!(), sender: todo!(), diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 3be3300..724b2b2 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,4 +1,4 @@ -use crate::{Error, services}; +use crate::{services, Error}; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 305a538..243b77f 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,5 +1,8 @@ -use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; use crate::Result; +use ruma::{ + api::client::push::{get_pushers, set_pusher}, + UserId, +}; pub trait Data: Send + Sync { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; @@ -8,8 +11,5 @@ pub trait Data: Send + Sync { fn get_pushers(&self, sender: &UserId) -> Result>; - fn get_pusher_senderkeys<'a>( - &'a self, - sender: &UserId, - ) -> Box>>; + fn get_pusher_senderkeys<'a>(&'a self, sender: &UserId) -> Box>>; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index e65c57a..78d5f26 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -79,7 +79,11 @@ impl Service { //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let response = services().globals.default_client().execute(reqwest_request).await; + let response = services() + .globals + .default_client() + .execute(reqwest_request) + .await; match response { Ok(mut response) => { @@ -196,7 +200,8 @@ impl Service { let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: services().users + user_display_name: services() + .users .displayname(user)? .unwrap_or_else(|| user.localpart().to_owned()), users_power_levels: power_levels.users.clone(), @@ -276,10 +281,10 @@ impl Service { let user_name = services().users.displayname(&event.sender)?; notifi.sender_display_name = user_name.as_deref(); - let room_name = if let Some(room_name_pdu) = - services().rooms + let room_name = if let Some(room_name_pdu) = services() + .rooms .state_accessor - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? + .room_state_get(&event.room_id, &StateEventType::RoomName, "")? { serde_json::from_str::(room_name_pdu.content.get()) .map_err(|_| Error::bad_database("Invalid room name event in database."))? @@ -290,11 +295,8 @@ impl Service { notifi.room_name = room_name.as_deref(); - self.send_request( - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; + self.send_request(url, send_event_notification::v1::Request::new(notifi)) + .await?; } // TODO: email diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 26bffae..90205f9 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,25 +1,15 @@ -use ruma::{RoomId, RoomAliasId}; use crate::Result; +use ruma::{RoomAliasId, RoomId}; pub trait Data: Send + Sync { /// Creates or updates the alias to the given room id. - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: &RoomId - ) -> Result<()>; + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>; /// Forgets about an alias. Returns an error if the alias did not exist. - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()>; + fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; /// Looks up the roomid for the given alias. - fn resolve_local_alias( - &self, - alias: &RoomAliasId, - ) -> Result>>; + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>>; /// Returns all local aliases that point to the given room fn local_aliases_for_room( diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 65fb367..6a3cf4e 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -3,8 +3,8 @@ use std::sync::Arc; pub use data::Data; -use ruma::{RoomAliasId, RoomId}; use crate::Result; +use ruma::{RoomAliasId, RoomId}; pub struct Service { db: Arc, @@ -12,19 +12,12 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: &RoomId, - ) -> Result<()> { + pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { self.db.set_alias(alias, room_id) } #[tracing::instrument(skip(self))] - pub fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { + pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { self.db.remove_alias(alias) } diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 13fac2d..e8c379f 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,7 +1,11 @@ -use std::{collections::HashSet, sync::Arc}; use crate::Result; +use std::{collections::HashSet, sync::Arc}; pub trait Data: Send + Sync { - fn get_cached_eventid_authchain(&self, shorteventid: &[u64]) -> Result>>>; - fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc>) -> Result<()>; + fn get_cached_eventid_authchain( + &self, + shorteventid: &[u64], + ) -> Result>>>; + fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc>) + -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index e35094b..ed06385 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,11 +1,14 @@ mod data; -use std::{sync::Arc, collections::{HashSet, BTreeSet}}; +use std::{ + collections::{BTreeSet, HashSet}, + sync::Arc, +}; pub use data::Data; -use ruma::{RoomId, EventId, api::client::error::ErrorKind}; +use ruma::{api::client::error::ErrorKind, EventId, RoomId}; use tracing::log::warn; -use crate::{Result, services, Error}; +use crate::{services, Error, Result}; pub struct Service { db: Arc, @@ -56,7 +59,11 @@ impl Service { } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&chunk_key)? { + if let Some(cached) = services() + .rooms + .auth_chain + .get_cached_eventid_authchain(&chunk_key)? + { hits += 1; full_auth_chain.extend(cached.iter().copied()); continue; @@ -68,13 +75,18 @@ impl Service { let mut misses2 = 0; let mut i = 0; for (sevent_id, event_id) in chunk { - if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&[sevent_id])? { + if let Some(cached) = services() + .rooms + .auth_chain + .get_cached_eventid_authchain(&[sevent_id])? + { hits2 += 1; chunk_cache.extend(cached.iter().copied()); } else { misses2 += 1; let auth_chain = Arc::new(self.get_auth_chain_inner(room_id, &event_id)?); - services().rooms + services() + .rooms .auth_chain .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; println!( @@ -97,8 +109,10 @@ impl Service { misses2 ); let chunk_cache = Arc::new(chunk_cache); - services().rooms - .auth_chain.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + services() + .rooms + .auth_chain + .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; full_auth_chain.extend(chunk_cache.iter()); } @@ -115,11 +129,7 @@ impl Service { } #[tracing::instrument(skip(self, event_id))] - fn get_auth_chain_inner( - &self, - room_id: &RoomId, - event_id: &EventId, - ) -> Result> { + fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { let mut todo = vec![Arc::from(event_id)]; let mut found = HashSet::new(); @@ -131,7 +141,8 @@ impl Service { } for auth_event in &pdu.auth_events { let sauthevent = services() - .rooms.short + .rooms + .short .get_or_create_shorteventid(auth_event)?; if !found.contains(&sauthevent) { diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index b4e020d..fb523cf 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,5 +1,5 @@ -use ruma::RoomId; use crate::Result; +use ruma::RoomId; pub trait Data: Send + Sync { /// Adds the room to the public room directory diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index f759255..f378404 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; -use ruma::{UserId, RoomId, events::presence::PresenceEvent}; use crate::Result; +use ruma::{events::presence::PresenceEvent, RoomId, UserId}; pub trait Data: Send + Sync { /// Adds a presence event which will be saved until a new event replaces it. diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index d657897..636bd91 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -2,7 +2,7 @@ mod data; use std::{collections::HashMap, sync::Arc}; pub use data::Data; -use ruma::{RoomId, UserId, events::presence::PresenceEvent}; +use ruma::{events::presence::PresenceEvent, RoomId, UserId}; use crate::Result; diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 5ebd89d..734c68d 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,5 +1,5 @@ -use ruma::{RoomId, events::receipt::ReceiptEvent, UserId, serde::Raw}; use crate::Result; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; pub trait Data: Send + Sync { /// Replaces the previous read receipt. @@ -15,13 +15,15 @@ pub trait Data: Send + Sync { &self, room_id: &RoomId, since: u64, - ) -> Box, - u64, - Raw, - )>, - >>; + ) -> Box< + dyn Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + >, + >; /// Sets a private read marker at `count`. fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 1770877..35fee1a 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -3,8 +3,8 @@ use std::sync::Arc; pub use data::Data; -use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; use crate::Result; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; pub struct Service { db: Arc, diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 426d4e0..50b6d13 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,6 +1,6 @@ -use std::collections::HashSet; use crate::Result; -use ruma::{UserId, RoomId}; +use ruma::{RoomId, UserId}; +use std::collections::HashSet; pub trait Data: Send + Sync { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 3752056..91892df 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -2,7 +2,7 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{UserId, RoomId, events::SyncEphemeralRoomEvent}; +use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; use crate::Result; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index d6ec8e9..689f678 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,22 +1,33 @@ /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; -use ruma::{RoomVersionId, signatures::CanonicalJsonObject, api::federation::discovery::{get_server_keys, get_remote_server_keys}}; -use tokio::sync::Semaphore; +use ruma::{ + api::federation::discovery::{get_remote_server_keys, get_server_keys}, + signatures::CanonicalJsonObject, + RoomVersionId, +}; use std::{ collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, pin::Pin, sync::{Arc, RwLock, RwLockWriteGuard}, time::{Duration, Instant, SystemTime}, }; +use tokio::sync::Semaphore; -use futures_util::{Future, stream::FuturesUnordered, StreamExt}; +use futures_util::{stream::FuturesUnordered, Future, StreamExt}; use ruma::{ api::{ client::error::ErrorKind, - federation::{event::{get_event, get_room_state_ids}, membership::create_join_event, discovery::get_remote_server_keys_batch::{v2::QueryCriteria, self}}, + federation::{ + discovery::get_remote_server_keys_batch::{self, v2::QueryCriteria}, + event::{get_event, get_room_state_ids}, + membership::create_join_event, + }, + }, + events::{ + room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, + StateEventType, }, - events::{room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, StateEventType}, int, serde::Base64, signatures::CanonicalJsonValue, @@ -24,9 +35,9 @@ use ruma::{ uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use tracing::{error, info, trace, warn, debug}; +use tracing::{debug, error, info, trace, warn}; -use crate::{service::*, services, Result, Error, PduEvent}; +use crate::{service::*, services, Error, PduEvent, Result}; pub struct Service; @@ -72,10 +83,7 @@ impl Service { )); } - if services() - .rooms - .metadata - .is_disabled(room_id)? { + if services().rooms.metadata.is_disabled(room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Federation of this room is currently disabled on this server.", @@ -94,7 +102,8 @@ impl Service { .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; let first_pdu_in_room = services() - .rooms.timeline + .rooms + .timeline .first_pdu_in_room(room_id)? .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; @@ -113,21 +122,20 @@ impl Service { } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let (sorted_prev_events, mut eventid_info) = self.fetch_unknown_prev_events( - origin, - &create_event, - room_id, - pub_key_map, - incoming_pdu.prev_events.clone(), - ).await?; + let (sorted_prev_events, mut eventid_info) = self + .fetch_unknown_prev_events( + origin, + &create_event, + room_id, + pub_key_map, + incoming_pdu.prev_events.clone(), + ) + .await?; let mut errors = 0; for prev_id in dbg!(sorted_prev_events) { // Check for disabled again because it might have changed - if services() - .rooms - .metadata - .is_disabled(room_id)? { + if services().rooms.metadata.is_disabled(room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Federation of this room is currently disabled on this server.", @@ -224,15 +232,18 @@ impl Service { .write() .unwrap() .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - let r = services().rooms.event_handler.upgrade_outlier_to_timeline_pdu( - incoming_pdu, - val, - &create_event, - origin, - room_id, - pub_key_map, - ) - .await; + let r = services() + .rooms + .event_handler + .upgrade_outlier_to_timeline_pdu( + incoming_pdu, + val, + &create_event, + origin, + room_id, + pub_key_map, + ) + .await; services() .globals .roomid_federationhandletime @@ -252,8 +263,7 @@ impl Service { room_id: &'a RoomId, value: BTreeMap, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> - { + ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json @@ -282,14 +292,22 @@ impl Service { Err(e) => { // Drop warn!("Dropping bad event {}: {}", event_id, e); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Signature verification failed")); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Signature verification failed", + )); } Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); match ruma::signatures::redact(&value, room_version_id) { Ok(obj) => obj, - Err(_) => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Redaction failed")), + Err(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Redaction failed", + )) + } } } Ok(ruma::signatures::Verified::All) => value, @@ -376,7 +394,8 @@ impl Service { &incoming_pdu, None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), - ).map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -415,9 +434,13 @@ impl Service { if services() .rooms - .pdu_metadata.is_event_soft_failed(&incoming_pdu.event_id)? + .pdu_metadata + .is_event_soft_failed(&incoming_pdu.event_id)? { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event has been soft failed", + )); } info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); @@ -448,7 +471,13 @@ impl Service { .pdu_shortstatehash(prev_event)?; let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(services().rooms.state_accessor.state_full_ids(shortstatehash).await) + Some( + services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await, + ) } else { None }; @@ -466,10 +495,10 @@ impl Service { })?; if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key)?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &prev_pdu.kind.to_string().into(), + state_key, + )?; state.insert(shortstatekey, Arc::from(prev_event)); // Now it's the state after the pdu @@ -483,21 +512,25 @@ impl Service { let mut okay = true; for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(prev_eventid) { - pdu - } else { - okay = false; - break; - }; - - let sstatehash = - if let Ok(Some(s)) = services().rooms.state_accessor.pdu_shortstatehash(prev_eventid) { - s + let prev_event = + if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(prev_eventid) { + pdu } else { okay = false; break; }; + let sstatehash = if let Ok(Some(s)) = services() + .rooms + .state_accessor + .pdu_shortstatehash(prev_eventid) + { + s + } else { + okay = false; + break; + }; + extremity_sstatehashes.insert(sstatehash, prev_event); } @@ -513,13 +546,10 @@ impl Service { .await?; if let Some(state_key) = &prev_event.state_key { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey( - &prev_event.kind.to_string().into(), - state_key, - )?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + )?; leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); // Now it's the state after the pdu } @@ -528,7 +558,8 @@ impl Service { let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) { + if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) + { // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType state.insert((ty.to_string().into(), st_key), id.clone()); @@ -567,10 +598,8 @@ impl Service { new_state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey( + let shortstatekey = + services().rooms.short.get_or_create_shortstatekey( &event_type.to_string().into(), &state_key, )?; @@ -618,15 +647,14 @@ impl Service { let mut state: BTreeMap<_, Arc> = BTreeMap::new(); for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; + let state_key = pdu.state_key.clone().ok_or_else(|| { + Error::bad_database("Found non-state pdu in state events.") + })?; - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key)?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + )?; match state.entry(shortstatekey) { btree_map::Entry::Vacant(v) => { @@ -648,7 +676,9 @@ impl Service { if state.get(&create_shortstatekey).map(|id| id.as_ref()) != Some(&create_event.event_id) { - return Err(Error::bad_database("Incoming event refers to wrong create event.")); + return Err(Error::bad_database( + "Incoming event refers to wrong create event.", + )); } state_at_incoming_event = Some(state); @@ -683,7 +713,9 @@ impl Service { .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if !check_result { - return Err(Error::bad_database("Event has failed auth check with state at the event.")); + return Err(Error::bad_database( + "Event has failed auth check with state at the event.", + )); } info!("Auth check succeeded"); @@ -703,10 +735,7 @@ impl Service { // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) info!("Calculating extremities"); - let mut extremities = services() - .rooms - .state - .get_forward_extremities(room_id)?; + let mut extremities = services().rooms.state.get_forward_extremities(room_id)?; // Remove any forward extremities that are referenced by this incoming event's prev_events for prev_event in &incoming_pdu.prev_events { @@ -716,8 +745,15 @@ impl Service { } // Only keep those extremities were not referenced yet - extremities - .retain(|id| !matches!(services().rooms.pdu_metadata.is_event_referenced(room_id, id), Ok(true))); + extremities.retain(|id| { + !matches!( + services() + .rooms + .pdu_metadata + .is_event_referenced(room_id, id), + Ok(true) + ) + }); info!("Compressing state at event"); let state_ids_compressed = state_at_incoming_event @@ -733,23 +769,21 @@ impl Service { // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it info!("Starting soft fail auth check"); - let auth_events = services() - .rooms - .state - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - )?; + let auth_events = services().rooms.state.get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + )?; let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ).map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if soft_fail { services().rooms.timeline.append_incoming_pdu( @@ -767,7 +801,10 @@ impl Service { .rooms .pdu_metadata .mark_event_soft_failed(&incoming_pdu.event_id)?; - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event has been soft failed", + )); } if incoming_pdu.state_key.is_some() { @@ -789,15 +826,12 @@ impl Service { info!("Loading extremities"); for id in dbg!(&extremities) { - match services() - .rooms - .timeline - .get_pdu(id)? - { + match services().rooms.timeline.get_pdu(id)? { Some(leaf_pdu) => { extremity_sstatehashes.insert( services() - .rooms.state_accessor + .rooms + .state_accessor .pdu_shortstatehash(&leaf_pdu.event_id)? .ok_or_else(|| { error!( @@ -829,10 +863,10 @@ impl Service { // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + )?; state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } @@ -921,10 +955,10 @@ impl Service { state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + )?; services() .rooms .state_compressor @@ -936,7 +970,10 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); - let sstatehash = services().rooms.state_compressor.save_state(room_id, new_room_state)?; + let sstatehash = services() + .rooms + .state_compressor + .save_state(room_id, new_room_state)?; services() .rooms .state @@ -951,15 +988,14 @@ impl Service { // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - let pdu_id = services().rooms.timeline - .append_incoming_pdu( - &incoming_pdu, - val, - extremities.iter().map(|e| (**e).to_owned()).collect(), - state_ids_compressed, - soft_fail, - &state_lock, - )?; + let pdu_id = services().rooms.timeline.append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + )?; info!("Appended incoming pdu"); @@ -1141,8 +1177,10 @@ impl Service { room_id: &RoomId, pub_key_map: &RwLock>>, initial_set: Vec>, - ) -> Result<(Vec>, HashMap, -(Arc, BTreeMap)>)> { + ) -> Result<( + Vec>, + HashMap, (Arc, BTreeMap)>, + )> { let mut graph: HashMap, _> = HashMap::new(); let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; @@ -1223,7 +1261,8 @@ impl Service { .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), )) - }).map_err(|_| Error::bad_database("Error sorting prev events"))?; + }) + .map_err(|_| Error::bad_database("Error sorting prev events"))?; Ok((sorted, eventid_info)) } @@ -1253,13 +1292,16 @@ impl Service { let signature_ids = signature_object.keys().cloned().collect::>(); - let fetch_res = self.fetch_signing_keys( - signature_server.as_str().try_into().map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?, - signature_ids, - ) - .await; + let fetch_res = self + .fetch_signing_keys( + signature_server.as_str().try_into().map_err(|_| { + Error::BadServerResponse( + "Invalid servername in signatures of server response pdu.", + ) + })?, + signature_ids, + ) + .await; let keys = match fetch_res { Ok(keys) => keys, @@ -1336,8 +1378,9 @@ impl Service { let signature_ids = signature_object.keys().cloned().collect::>(); - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + let contains_all_ids = |keys: &BTreeMap| { + signature_ids.iter().all(|id| keys.contains_key(id)) + }; let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") @@ -1373,8 +1416,10 @@ impl Service { room_version: &RoomVersionId, pub_key_map: &RwLock>>, ) -> Result<()> { - let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = - BTreeMap::new(); + let mut servers: BTreeMap< + Box, + BTreeMap, QueryCriteria>, + > = BTreeMap::new(); { let mut pkm = pub_key_map @@ -1440,11 +1485,9 @@ impl Service { .into_iter() .map(|(server, _)| async move { ( - services().sending - .send_federation_request( - &server, - get_server_keys::v2::Request::new(), - ) + services() + .sending + .send_federation_request(&server, get_server_keys::v2::Request::new()) .await, server, ) @@ -1472,10 +1515,11 @@ impl Service { /// Returns Ok if the acl allows the server pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { - let acl_event = match services() - .rooms.state_accessor - .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? - { + let acl_event = match services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomServerAcl, + "", + )? { Some(acl) => acl, None => return Ok(()), }; @@ -1587,7 +1631,9 @@ impl Service { .ok() .and_then(|resp| resp.server_key.deserialize().ok()) { - services().globals.add_signing_key(origin, server_key.clone())?; + services() + .globals + .add_signing_key(origin, server_key.clone())?; result.extend( server_key diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index 524071c..9af8e21 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -1,5 +1,5 @@ -use ruma::{RoomId, DeviceId, UserId}; use crate::Result; +use ruma::{DeviceId, RoomId, UserId}; pub trait Data: Send + Sync { fn lazy_load_was_sent_before( @@ -15,7 +15,7 @@ pub trait Data: Send + Sync { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - confirmed_user_ids: &mut dyn Iterator, + confirmed_user_ids: &mut dyn Iterator, ) -> Result<()>; fn lazy_load_reset( diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 760fffe..a01ce9b 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,15 +1,19 @@ mod data; -use std::{collections::{HashSet, HashMap}, sync::{Mutex, Arc}}; +use std::{ + collections::{HashMap, HashSet}, + sync::{Arc, Mutex}, +}; pub use data::Data; -use ruma::{DeviceId, UserId, RoomId}; +use ruma::{DeviceId, RoomId, UserId}; use crate::Result; pub struct Service { db: Arc, - lazy_load_waiting: Mutex, Box, Box, u64), HashSet>>>, + lazy_load_waiting: + Mutex, Box, Box, u64), HashSet>>>, } impl Service { @@ -21,7 +25,8 @@ impl Service { room_id: &RoomId, ll_user: &UserId, ) -> Result { - self.db.lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) + self.db + .lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) } #[tracing::instrument(skip(self))] @@ -58,7 +63,12 @@ impl Service { room_id.to_owned(), since, )) { - self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, &mut user_ids.iter().map(|&u| &*u))?; + self.db.lazy_load_confirm_delivery( + user_id, + device_id, + room_id, + &mut user_ids.iter().map(|&u| &*u), + )?; } else { // Ignore } diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index bc31ee8..27e7eb9 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,5 +1,5 @@ -use ruma::RoomId; use crate::Result; +use ruma::RoomId; pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index f1b0bad..8956e4d 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -16,7 +16,25 @@ pub mod state_compressor; pub mod timeline; pub mod user; -pub trait Data: alias::Data + auth_chain::Data + directory::Data + edus::Data + lazy_loading::Data + metadata::Data + outlier::Data + pdu_metadata::Data + search::Data + short::Data + state::Data + state_accessor::Data + state_cache::Data + state_compressor::Data + timeline::Data + user::Data {} +pub trait Data: + alias::Data + + auth_chain::Data + + directory::Data + + edus::Data + + lazy_loading::Data + + metadata::Data + + outlier::Data + + pdu_metadata::Data + + search::Data + + short::Data + + state::Data + + state_accessor::Data + + state_cache::Data + + state_compressor::Data + + timeline::Data + + user::Data +{ +} pub struct Service { pub alias: alias::Service, diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index d36adc4..6404d8a 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -2,9 +2,9 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{EventId, signatures::CanonicalJsonObject}; +use ruma::{signatures::CanonicalJsonObject, EventId}; -use crate::{Result, PduEvent}; +use crate::{PduEvent, Result}; pub struct Service { db: Arc, diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 9bc49cf..b157938 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use ruma::{EventId, RoomId}; use crate::Result; +use ruma::{EventId, RoomId}; pub trait Data: Send + Sync { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 4724f85..7044338 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,7 +2,7 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{RoomId, EventId}; +use ruma::{EventId, RoomId}; use crate::Result; diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 0c14ffe..59652e0 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,5 +1,5 @@ -use ruma::RoomId; use crate::Result; +use ruma::RoomId; pub trait Data: Send + Sync { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()>; diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index ec1ad53..0ef9634 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -12,7 +12,12 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] - pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { + pub fn index_pdu<'a>( + &self, + shortroomid: u64, + pdu_id: &[u8], + message_body: String, + ) -> Result<()> { self.db.index_pdu(shortroomid, pdu_id, message_body) } diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs index 07a2712..652c525 100644 --- a/src/service/rooms/short/data.rs +++ b/src/service/rooms/short/data.rs @@ -1,13 +1,10 @@ use std::sync::Arc; -use ruma::{EventId, events::StateEventType, RoomId}; use crate::Result; +use ruma::{events::StateEventType, EventId, RoomId}; pub trait Data: Send + Sync { - fn get_or_create_shorteventid( - &self, - event_id: &EventId, - ) -> Result; + fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result; fn get_shortstatekey( &self, @@ -26,15 +23,9 @@ pub trait Data: Send + Sync { fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)>; /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &[u8], - ) -> Result<(u64, bool)>; + fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)>; fn get_shortroomid(&self, room_id: &RoomId) -> Result>; - fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - ) -> Result; + fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 08ce5c5..1d2e040 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -2,19 +2,16 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{EventId, events::StateEventType, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result, Error, utils, services}; +use crate::{services, utils, Error, Result}; pub struct Service { db: Arc, } impl Service { - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - ) -> Result { + pub fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { self.db.get_or_create_shorteventid(event_id) } @@ -43,10 +40,7 @@ impl Service { } /// Returns (shortstatehash, already_existed) - pub fn get_or_create_shortstatehash( - &self, - state_hash: &[u8], - ) -> Result<(u64, bool)> { + pub fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { self.db.get_or_create_shortstatehash(state_hash) } @@ -54,10 +48,7 @@ impl Service { self.db.get_shortroomid(room_id) } - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - ) -> Result { + pub fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { self.db.get_or_create_shortroomid(room_id) } } diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 8eca21d..3aa4914 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,7 +1,7 @@ -use std::sync::Arc; -use std::collections::HashSet; use crate::Result; use ruma::{EventId, RoomId}; +use std::collections::HashSet; +use std::sync::Arc; use tokio::sync::MutexGuard; pub trait Data: Send + Sync { @@ -9,7 +9,10 @@ pub trait Data: Send + Sync { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; /// Update the current state of the room. - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64, + fn set_room_state( + &self, + room_id: &RoomId, + new_shortstatehash: u64, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; @@ -20,7 +23,8 @@ pub trait Data: Send + Sync { fn get_forward_extremities(&self, room_id: &RoomId) -> Result>>; /// Replace the forward extremities of the room. - fn set_forward_extremities<'a>(&self, + fn set_forward_extremities<'a>( + &self, room_id: &RoomId, event_ids: Vec>, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 57a0e77..2dff4b7 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,13 +1,24 @@ mod data; -use std::{collections::{HashSet, HashMap}, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; pub use data::Data; -use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, RoomEventType}, UserId, EventId, serde::Raw, RoomVersionId, state_res::{StateMap, self}}; +use ruma::{ + events::{ + room::{create::RoomCreateEventContent, member::MembershipState}, + AnyStrippedStateEvent, RoomEventType, StateEventType, + }, + serde::Raw, + state_res::{self, StateMap}, + EventId, RoomId, RoomVersionId, UserId, +}; use serde::Deserialize; use tokio::sync::MutexGuard; use tracing::warn; -use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; +use crate::{services, utils::calculate_hash, Error, PduEvent, Result}; use super::state_compressor::CompressedStateEvent; @@ -25,7 +36,8 @@ impl Service { statediffremoved: HashSet, ) -> Result<()> { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -35,7 +47,10 @@ impl Service { let state_lock = mutex_state.lock().await; for event_id in statediffnew.into_iter().filter_map(|new| { - services().rooms.state_compressor.parse_compressed_state_event(new) + services() + .rooms + .state_compressor + .parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { @@ -75,7 +90,14 @@ impl Service { Err(_) => continue, }; - services().rooms.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; + services().rooms.state_cache.update_membership( + room_id, + &user_id, + membership, + &pdu.sender, + None, + false, + )?; } services().rooms.state_cache.update_joined_count(room_id)?; @@ -98,7 +120,10 @@ impl Service { room_id: &RoomId, state_ids_compressed: HashSet, ) -> Result { - let shorteventid = services().rooms.short.get_or_create_shorteventid(event_id)?; + let shorteventid = services() + .rooms + .short + .get_or_create_shorteventid(event_id)?; let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; @@ -109,12 +134,21 @@ impl Service { .collect::>(), ); - let (shortstatehash, already_existed) = - services().rooms.short.get_or_create_shortstatehash(&state_hash)?; + let (shortstatehash, already_existed) = services() + .rooms + .short + .get_or_create_shortstatehash(&state_hash)?; if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| services().rooms.state_compressor.load_shortstatehash_info(p))?; + let states_parents = previous_shortstatehash.map_or_else( + || Ok(Vec::new()), + |p| { + services() + .rooms + .state_compressor + .load_shortstatehash_info(p) + }, + )?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -152,11 +186,11 @@ impl Service { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. #[tracing::instrument(skip(self, new_pdu))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - ) -> Result { - let shorteventid = services().rooms.short.get_or_create_shorteventid(&new_pdu.event_id)?; + pub fn append_to_state(&self, new_pdu: &PduEvent) -> Result { + let shorteventid = services() + .rooms + .short + .get_or_create_shorteventid(&new_pdu.event_id)?; let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; @@ -165,15 +199,25 @@ impl Service { } if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| services().rooms.state_compressor.load_shortstatehash_info(p))?; - - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, + let states_parents = previous_shortstatehash.map_or_else( + || Ok(Vec::new()), + |p| { + services() + .rooms + .state_compressor + .load_shortstatehash_info(p) + }, )?; - let new = services().rooms.state_compressor.compress_state_event(shortstatekey, &new_pdu.event_id)?; + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key)?; + + let new = services() + .rooms + .state_compressor + .compress_state_event(shortstatekey, &new_pdu.event_id)?; let replaces = states_parents .last() @@ -220,14 +264,18 @@ impl Service { ) -> Result>> { let mut state = Vec::new(); // Add recommended events - if let Some(e) = - services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomCreate, + "", + )? { state.push(e.to_stripped_state_event()); } - if let Some(e) = - services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomJoinRules, + "", + )? { state.push(e.to_stripped_state_event()); } if let Some(e) = services().rooms.state_accessor.room_state_get( @@ -237,14 +285,18 @@ impl Service { )? { state.push(e.to_stripped_state_event()); } - if let Some(e) = - services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomAvatar, + "", + )? { state.push(e.to_stripped_state_event()); } - if let Some(e) = - services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomName, + "", + )? { state.push(e.to_stripped_state_event()); } if let Some(e) = services().rooms.state_accessor.room_state_get( @@ -260,16 +312,23 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64, + pub fn set_room_state( + &self, + room_id: &RoomId, + shortstatehash: u64, mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) -> Result<()> { self.db.set_room_state(room_id, shortstatehash, mutex_lock) } /// Returns the room's version. #[tracing::instrument(skip(self))] pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = services().rooms.state_accessor.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + let create_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomCreate, + "", + )?; let create_event_content: Option = create_event .as_ref() @@ -294,12 +353,14 @@ impl Service { self.db.get_forward_extremities(room_id) } - pub fn set_forward_extremities<'a>(&self, + pub fn set_forward_extremities<'a>( + &self, room_id: &RoomId, event_ids: Vec>, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { - self.db.set_forward_extremities(room_id, event_ids, state_lock) + self.db + .set_forward_extremities(room_id, event_ids, state_lock) } /// This fetches auth events from the current state. @@ -312,12 +373,13 @@ impl Service { state_key: Option<&str>, content: &serde_json::value::RawValue, ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; + let shortstatehash = if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { + current_shortstatehash + } else { + return Ok(HashMap::new()); + }; let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) .expect("content is a valid JSON object"); @@ -325,14 +387,19 @@ impl Service { let mut sauthevents = auth_events .into_iter() .filter_map(|(event_type, state_key)| { - services().rooms.short.get_shortstatekey(&event_type.to_string().into(), &state_key) + services() + .rooms + .short + .get_shortstatekey(&event_type.to_string().into(), &state_key) .ok() .flatten() .map(|s| (s, (event_type, state_key))) }) .collect::>(); - let full_state = services().rooms.state_compressor + let full_state = services() + .rooms + .state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -340,11 +407,25 @@ impl Service { Ok(full_state .into_iter() - .filter_map(|compressed| services().rooms.state_compressor.parse_compressed_state_event(compressed).ok()) + .filter_map(|compressed| { + services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed) + .ok() + }) .filter_map(|(shortstatekey, event_id)| { sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) }) - .filter_map(|(k, event_id)| services().rooms.timeline.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) + .filter_map(|(k, event_id)| { + services() + .rooms + .timeline + .get_pdu(&event_id) + .ok() + .flatten() + .map(|pdu| (k, pdu)) + }) .collect()) } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 14f96bc..340b19c 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,9 +1,12 @@ -use std::{sync::Arc, collections::{HashMap, BTreeMap}}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; use async_trait::async_trait; -use ruma::{EventId, events::StateEventType, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result, PduEvent}; +use crate::{PduEvent, Result}; #[async_trait] pub trait Data: Send + Sync { diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index a0f5523..e179d70 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,10 +1,13 @@ mod data; -use std::{sync::Arc, collections::{HashMap, BTreeMap}}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; pub use data::Data; -use ruma::{events::StateEventType, RoomId, EventId}; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result, PduEvent}; +use crate::{PduEvent, Result}; pub struct Service { db: Arc, diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index 950143f..a6b06a5 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,12 +1,21 @@ use std::{collections::HashSet, sync::Arc}; -use ruma::{UserId, RoomId, serde::Raw, events::{AnyStrippedStateEvent, AnySyncStateEvent}, ServerName}; use crate::Result; +use ruma::{ + events::{AnyStrippedStateEvent, AnySyncStateEvent}, + serde::Raw, + RoomId, ServerName, UserId, +}; pub trait Data: Send + Sync { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; - fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()>; + fn mark_as_invited( + &self, + user_id: &UserId, + room_id: &RoomId, + last_state: Option>>, + ) -> Result<()>; fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn update_joined_count(&self, room_id: &RoomId) -> Result<()>; diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 69bd832..04eb9af 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -9,8 +9,8 @@ use ruma::{ ignored_user_list::IgnoredUserListEvent, room::{create::RoomCreateEventContent, member::MembershipState}, tag::{TagEvent, TagEventContent}, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, StateEventType, RoomAccountDataEvent, RoomAccountDataEventContent, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEvent, + RoomAccountDataEventContent, RoomAccountDataEventType, StateEventType, }, serde::Raw, RoomId, ServerName, UserId, @@ -97,8 +97,9 @@ impl Service { RoomAccountDataEventType::Tag, )? .map(|event| { - serde_json::from_str(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) + serde_json::from_str(event.get()).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + }) }) { services() @@ -113,16 +114,19 @@ impl Service { }; // Copy direct chat flag - if let Some(mut direct_event) = services().account_data.get( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? + if let Some(mut direct_event) = services() + .account_data + .get( + None, + user_id, + GlobalAccountDataEventType::Direct.to_string().into(), + )? .map(|event| { - serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) + serde_json::from_str::(event.get()).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + }) }) - { + { let direct_event = direct_event?; let mut room_ids_updated = false; @@ -138,7 +142,8 @@ impl Service { None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), - &serde_json::to_value(&direct_event).expect("to json always works"), + &serde_json::to_value(&direct_event) + .expect("to json always works"), )?; } }; @@ -158,10 +163,11 @@ impl Service { .to_string() .into(), )? - .map(|event| { - serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) - }).transpose()? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .transpose()? .map_or(false, |ignored| { ignored .content diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 5f2cf02..f7c6dba 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,11 +1,15 @@ pub mod data; -use std::{mem::size_of, sync::{Arc, Mutex}, collections::HashSet}; +use std::{ + collections::HashSet, + mem::size_of, + sync::{Arc, Mutex}, +}; pub use data::Data; use lru_cache::LruCache; use ruma::{EventId, RoomId}; -use crate::{Result, utils, services}; +use crate::{services, utils, Result}; use self::data::StateDiff; @@ -23,7 +27,6 @@ pub struct Service { )>, >, >, - } pub type CompressedStateEvent = [u8; 2 * size_of::()]; @@ -51,7 +54,11 @@ impl Service { return Ok(r.clone()); } - let StateDiff { parent, added, removed } = self.db.get_statediff(shortstatehash)?; + let StateDiff { + parent, + added, + removed, + } = self.db.get_statediff(shortstatehash)?; if let Some(parent) = parent { let mut response = self.load_shortstatehash_info(parent)?; @@ -81,7 +88,9 @@ impl Service { ) -> Result { let mut v = shortstatekey.to_be_bytes().to_vec(); v.extend_from_slice( - &services().rooms.short + &services() + .rooms + .short .get_or_create_shorteventid(event_id)? .to_be_bytes(), ); @@ -175,7 +184,14 @@ impl Service { if parent_states.is_empty() { // There is no parent layer, create a new state - self.db.save_statediff(shortstatehash, StateDiff { parent: None, added: statediffnew, removed: statediffremoved })?; + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: None, + added: statediffnew, + removed: statediffremoved, + }, + )?; return Ok(()); }; @@ -217,7 +233,14 @@ impl Service { )?; } else { // Diff small enough, we add diff as layer on top of parent - self.db.save_statediff(shortstatehash, StateDiff { parent: Some(parent.0), added: statediffnew, removed: statediffremoved })?; + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: Some(parent.0), + added: statediffnew, + removed: statediffremoved, + }, + )?; } Ok(()) @@ -228,8 +251,7 @@ impl Service { &self, room_id: &RoomId, new_state_ids_compressed: HashSet, - ) -> Result - { + ) -> Result { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; let state_hash = utils::calculate_hash( @@ -239,8 +261,10 @@ impl Service { .collect::>(), ); - let (new_shortstatehash, already_existed) = - services().rooms.short.get_or_create_shortstatehash(&state_hash)?; + let (new_shortstatehash, already_existed) = services() + .rooms + .short + .get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { return Ok(new_shortstatehash); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 20eae7f..4ae8ce9 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; +use ruma::{signatures::CanonicalJsonObject, EventId, RoomId, UserId}; -use crate::{Result, PduEvent}; +use crate::{PduEvent, Result}; pub trait Data: Send + Sync { fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>>; @@ -15,10 +15,7 @@ pub trait Data: Send + Sync { fn get_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result>; + fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the pdu's id. fn get_pdu_id(&self, event_id: &EventId) -> Result>>; @@ -45,7 +42,13 @@ pub trait Data: Send + Sync { fn pdu_count(&self, pdu_id: &[u8]) -> Result; /// Adds a new pdu to the timeline - fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()>; + fn append_pdu( + &self, + pdu_id: &[u8], + pdu: &PduEvent, + json: &CanonicalJsonObject, + count: u64, + ) -> Result<()>; /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; @@ -75,5 +78,10 @@ pub trait Data: Send + Sync { from: u64, ) -> Result, PduEvent)>>>>; - fn increment_notification_counts(&self, room_id: &RoomId, notifies: Vec>, highlights: Vec>) -> Result<()>; + fn increment_notification_counts( + &self, + room_id: &RoomId, + notifies: Vec>, + highlights: Vec>, + ) -> Result<()>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index f25550d..b71dacb 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,9 +1,9 @@ mod data; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::{Arc, Mutex}; -use std::{iter, collections::HashSet}; use std::fmt::Debug; +use std::sync::{Arc, Mutex}; +use std::{collections::HashSet, iter}; pub use data::Data; use regex::Regex; @@ -11,13 +11,27 @@ use ruma::events::room::power_levels::RoomPowerLevelsEventContent; use ruma::push::Ruleset; use ruma::signatures::CanonicalJsonValue; use ruma::state_res::RoomVersion; -use ruma::{EventId, signatures::CanonicalJsonObject, push::{Action, Tweak}, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType, RoomEventType, room::{member::MembershipState, create::RoomCreateEventContent}, StateEventType}, UserId, RoomAliasId, RoomId, uint, state_res, api::client::error::ErrorKind, serde::to_canonical_value, ServerName}; +use ruma::{ + api::client::error::ErrorKind, + events::{ + push_rules::PushRulesEvent, + room::{create::RoomCreateEventContent, member::MembershipState}, + GlobalAccountDataEventType, RoomEventType, StateEventType, + }, + push::{Action, Tweak}, + serde::to_canonical_value, + signatures::CanonicalJsonObject, + state_res, uint, EventId, RoomAliasId, RoomId, ServerName, UserId, +}; use serde::Deserialize; use serde_json::value::to_raw_value; use tokio::sync::MutexGuard; -use tracing::{warn, error}; +use tracing::{error, warn}; -use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduEvent, utils}; +use crate::{ + service::pdu::{EventHash, PduBuilder}, + services, utils, Error, PduEvent, Result, +}; use super::state_compressor::CompressedStateEvent; @@ -135,7 +149,11 @@ impl Service { leaves: Vec>, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let shortroomid = services().rooms.short.get_shortroomid(&pdu.room_id)?.expect("room exists"); + let shortroomid = services() + .rooms + .short + .get_shortroomid(&pdu.room_id)? + .expect("room exists"); // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -145,8 +163,15 @@ impl Service { .entry("unsigned".to_owned()) .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) { - if let Some(shortstatehash) = services().rooms.state_accessor.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = services().rooms.state_accessor + if let Some(shortstatehash) = services() + .rooms + .state_accessor + .pdu_shortstatehash(&pdu.event_id) + .unwrap() + { + if let Some(prev_state) = services() + .rooms + .state_accessor .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) .unwrap() { @@ -165,11 +190,18 @@ impl Service { } // We must keep track of all events that have been referenced. - services().rooms.pdu_metadata.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - services().rooms.state.set_forward_extremities(&pdu.room_id, leaves, state_lock)?; + services() + .rooms + .pdu_metadata + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + services() + .rooms + .state + .set_forward_extremities(&pdu.room_id, leaves, state_lock)?; let mutex_insert = Arc::clone( - services().globals + services() + .globals .roomid_mutex_insert .write() .unwrap() @@ -181,9 +213,15 @@ impl Service { let count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending // fails - services().rooms.edus.read_receipt + services() + .rooms + .edus + .read_receipt .private_read_set(&pdu.room_id, &pdu.sender, count1)?; - services().rooms.user.reset_notification_counts(&pdu.sender, &pdu.room_id)?; + services() + .rooms + .user + .reset_notification_counts(&pdu.sender, &pdu.room_id)?; let count2 = services().globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); @@ -211,7 +249,12 @@ impl Service { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - for user in services().rooms.state_cache.get_our_real_users(&pdu.room_id)?.into_iter() { + for user in services() + .rooms + .state_cache + .get_our_real_users(&pdu.room_id)? + .into_iter() + { // Don't notify the user of their own events if &user == &pdu.sender { continue; @@ -224,8 +267,11 @@ impl Service { &user, GlobalAccountDataEventType::PushRules.to_string().into(), )? - .map(|event| serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid push rules event in db."))).transpose()? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid push rules event in db.")) + }) + .transpose()? .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| Ruleset::server_default(&user)); @@ -263,7 +309,8 @@ impl Service { } } - self.db.increment_notification_counts(&pdu.room_id, notifies, highlights); + self.db + .increment_notification_counts(&pdu.room_id, notifies, highlights); match pdu.kind { RoomEventType::RoomRedaction => { @@ -315,7 +362,10 @@ impl Service { .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if let Some(body) = content.body { - services().rooms.search.index_pdu(shortroomid, &pdu_id, body)?; + services() + .rooms + .search + .index_pdu(shortroomid, &pdu_id, body)?; let admin_room = services().rooms.alias.resolve_local_alias( <&RoomAliasId>::try_from( @@ -329,8 +379,8 @@ impl Service { // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && services().globals.emergency_password().is_none(); + let from_conduit = pdu.sender == server_user + && services().globals.emergency_password().is_none(); if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { services().admin.process_message(body.to_string()); @@ -341,8 +391,14 @@ impl Service { } for appservice in services().appservice.all()? { - if services().rooms.state_cache.appservice_in_room(&pdu.room_id, &appservice)? { - services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + if services() + .rooms + .state_cache + .appservice_in_room(&pdu.room_id, &appservice)? + { + services() + .sending + .send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } @@ -359,11 +415,14 @@ impl Service { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, services().globals.server_name()).ok() + UserId::parse_with_server_name(string, services().globals.server_name()) + .ok() }) { if state_key_uid == &appservice_uid { - services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + services() + .sending + .send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } } @@ -402,7 +461,10 @@ impl Service { .map_or(false, |state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: &Regex| { - services().rooms.alias.local_aliases_for_room(&pdu.room_id) + services() + .rooms + .alias + .local_aliases_for_room(&pdu.room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; @@ -411,21 +473,22 @@ impl Service { || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) || users.iter().any(matching_users) { - services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + services() + .sending + .send_pdu_appservice(&appservice.0, &pdu_id)?; } } } - Ok(pdu_id) } pub fn create_hash_and_sign_event( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<(PduEvent, CanonicalJsonObject)> { let PduBuilder { event_type, @@ -443,10 +506,11 @@ impl Service { .take(20) .collect(); - let create_event = services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; + let create_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomCreate, + "", + )?; let create_event_content: Option = create_event .as_ref() @@ -464,11 +528,15 @@ impl Service { .map_or(services().globals.default_room_version(), |create_event| { create_event.room_version }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - let auth_events = - services().rooms.state.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; + let auth_events = services().rooms.state.get_auth_events( + room_id, + &event_type, + sender, + state_key.as_deref(), + &content, + )?; // Our depth is the maximum depth of prev_events + 1 let depth = prev_events @@ -481,9 +549,11 @@ impl Service { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - services().rooms.state_accessor.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { + if let Some(prev_pdu) = services().rooms.state_accessor.room_state_get( + room_id, + &event_type.to_string().into(), + state_key, + )? { unsigned.insert( "prev_content".to_owned(), serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), @@ -589,7 +659,10 @@ impl Service { ); // Generate short event id - let _shorteventid = services().rooms.short.get_or_create_shorteventid(&pdu.event_id)?; + let _shorteventid = services() + .rooms + .short + .get_or_create_shorteventid(&pdu.event_id)?; Ok((pdu, pdu_json)) } @@ -604,7 +677,8 @@ impl Service { room_id: &RoomId, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock)?; + let (pdu, pdu_json) = + self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. @@ -621,10 +695,17 @@ impl Service { // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - services().rooms.state.set_room_state(room_id, statehashid, state_lock)?; + services() + .rooms + .state + .set_room_state(room_id, statehashid, state_lock)?; - let mut servers: HashSet> = - services().rooms.state_cache.room_servers(room_id).filter_map(|r| r.ok()).collect(); + let mut servers: HashSet> = services() + .rooms + .state_cache + .room_servers(room_id) + .filter_map(|r| r.ok()) + .collect(); // In case we are kicking or banning a user, we need to inform their server of the change if pdu.kind == RoomEventType::RoomMember { @@ -666,13 +747,23 @@ impl Service { )?; if soft_fail { - services().rooms.pdu_metadata + services() + .rooms + .pdu_metadata .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - services().rooms.state.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)?; + services().rooms.state.set_forward_extremities( + &pdu.room_id, + new_room_leaves, + state_lock, + )?; return Ok(None); } - let pdu_id = services().rooms.timeline.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?; + let pdu_id = + services() + .rooms + .timeline + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?; Ok(Some(pdu_id)) } diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 6b7ebc7..fcaff5a 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,5 +1,5 @@ -use ruma::{UserId, RoomId}; use crate::Result; +use ruma::{RoomId, UserId}; pub trait Data: Send + Sync { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 394a550..1caa4b3 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -29,7 +29,8 @@ impl Service { token: u64, shortstatehash: u64, ) -> Result<()> { - self.db.associate_token_shortstatehash(room_id, token, shortstatehash) + self.db + .associate_token_shortstatehash(room_id, token, shortstatehash) } pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b335095..e09d423 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -6,7 +6,10 @@ use std::{ }; use crate::{ - utils::{self, calculate_hash}, Error, PduEvent, Result, services, api::{server_server, appservice_server}, + api::{appservice_server, server_server}, + services, + utils::{self, calculate_hash}, + Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -88,10 +91,7 @@ enum TransactionStatus { } impl Service { - pub fn start_handler( - &self, - mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>, - ) { + pub fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>) { tokio::spawn(async move { let mut futures = FuturesUnordered::new(); @@ -119,7 +119,11 @@ impl Service { "Dropping some current events: {:?} {:?} {:?}", key, outgoing_kind, event ); - services().sending.servercurrentevent_data.remove(&key).unwrap(); + services() + .sending + .servercurrentevent_data + .remove(&key) + .unwrap(); continue; } @@ -129,10 +133,7 @@ impl Service { for (outgoing_kind, events) in initial_transactions { current_transaction_status .insert(outgoing_kind.get_prefix(), TransactionStatus::Running); - futures.push(Self::handle_events( - outgoing_kind.clone(), - events, - )); + futures.push(Self::handle_events(outgoing_kind.clone(), events)); } loop { @@ -246,7 +247,11 @@ impl Service { if retry { // We retry the previous transaction - for (key, value) in services().sending.servercurrentevent_data.scan_prefix(prefix) { + for (key, value) in services() + .sending + .servercurrentevent_data + .scan_prefix(prefix) + { if let Ok((_, e)) = Self::parse_servercurrentevent(&key, value) { events.push(e); } @@ -258,7 +263,8 @@ impl Service { } else { &[][..] }; - services().sending + services() + .sending .servercurrentevent_data .insert(&full_key, value)?; @@ -273,7 +279,8 @@ impl Service { if let Ok((select_edus, last_count)) = Self::select_edus(server_name) { events.extend(select_edus.into_iter().map(SendingEventType::Edu)); - services().sending + services() + .sending .servername_educount .insert(server_name.as_bytes(), &last_count.to_be_bytes())?; } @@ -302,7 +309,8 @@ impl Service { let room_id = room_id?; // Look for device list updates in this room device_list_changes.extend( - services().users + services() + .users .keys_changed(&room_id.to_string(), since, None) .filter_map(|r| r.ok()) .filter(|user_id| user_id.server_name() == services().globals.server_name()), @@ -502,7 +510,8 @@ impl Service { let permit = services().sending.maximum_requests.acquire().await; let response = appservice_server::send_request( - services().appservice + services() + .appservice .get_registration(&id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -621,16 +630,12 @@ impl Service { let permit = services().sending.maximum_requests.acquire().await; - let _response = services().pusher.send_push_notice( - &userid, - unread, - &pusher, - rules_for_user, - &pdu, - ) - .await - .map(|_response| kind.clone()) - .map_err(|e| (kind.clone(), e)); + let _response = services() + .pusher + .send_push_notice(&userid, unread, &pusher, rules_for_user, &pdu) + .await + .map(|_response| kind.clone()) + .map_err(|e| (kind.clone(), e)); drop(permit); } diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs index c5ff05c..7485531 100644 --- a/src/service/transaction_ids/data.rs +++ b/src/service/transaction_ids/data.rs @@ -1,5 +1,5 @@ -use ruma::{DeviceId, UserId, TransactionId}; use crate::Result; +use ruma::{DeviceId, TransactionId, UserId}; pub trait Data: Send + Sync { fn add_txnid( diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 8d5fd0a..a473e2b 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -3,8 +3,8 @@ use std::sync::Arc; pub use data::Data; -use ruma::{UserId, DeviceId, TransactionId}; use crate::Result; +use ruma::{DeviceId, TransactionId, UserId}; pub struct Service { db: Arc, diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index 091f064..3b7eb2b 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,5 +1,5 @@ -use ruma::{api::client::uiaa::UiaaInfo, DeviceId, UserId, signatures::CanonicalJsonValue}; use crate::Result; +use ruma::{api::client::uiaa::UiaaInfo, signatures::CanonicalJsonValue, DeviceId, UserId}; pub trait Data: Send + Sync { fn set_uiaa_request( diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 5444118..8f3b3b8 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -3,10 +3,17 @@ use std::sync::Arc; pub use data::Data; -use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType, IncomingUserIdentifier}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; +use ruma::{ + api::client::{ + error::ErrorKind, + uiaa::{AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier, UiaaInfo}, + }, + signatures::CanonicalJsonValue, + DeviceId, UserId, +}; use tracing::error; -use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH}; +use crate::{api::client_server::SESSION_ID_LENGTH, services, utils, Error, Result}; pub struct Service { db: Arc, @@ -68,11 +75,11 @@ impl Service { } }; - let user_id = - UserId::parse_with_server_name(username.clone(), services().globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") - })?; + let user_id = UserId::parse_with_server_name( + username.clone(), + services().globals.server_name(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; // Check if password is correct if let Some(hash) = services().users.password_hash(&user_id)? { diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 7eb0ceb..9f315d3 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,6 +1,12 @@ -use std::collections::BTreeMap; use crate::Result; -use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri}; +use ruma::{ + api::client::{device::Device, filter::IncomingFilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::AnyToDeviceEvent, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, UInt, UserId, +}; +use std::collections::BTreeMap; pub trait Data: Send + Sync { /// Check if a user has an account on this homeserver. @@ -127,10 +133,7 @@ pub trait Data: Send + Sync { to: Option, ) -> Box>>>; - fn mark_device_key_update( - &self, - user_id: &UserId, - ) -> Result<()>; + fn mark_device_key_update(&self, user_id: &UserId) -> Result<()>; fn get_device_keys( &self, @@ -182,11 +185,8 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Get device metadata. - fn get_device_metadata( - &self, - user_id: &UserId, - device_id: &DeviceId, - ) -> Result>; + fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) + -> Result>; fn get_devicelist_version(&self, user_id: &UserId) -> Result>; @@ -196,11 +196,7 @@ pub trait Data: Send + Sync { ) -> Box>>; /// Creates a new sync filter. Returns the filter id. - fn create_filter( - &self, - user_id: &UserId, - filter: &IncomingFilterDefinition, - ) -> Result; + fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result; fn get_filter( &self, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 826e049..0b83460 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -2,9 +2,15 @@ mod data; use std::{collections::BTreeMap, mem, sync::Arc}; pub use data::Data; -use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition, error::ErrorKind}, RoomAliasId}; +use ruma::{ + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::AnyToDeviceEvent, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, RoomAliasId, UInt, UserId, +}; -use crate::{Result, Error, services}; +use crate::{services, Error, Result}; pub struct Service { db: Arc, @@ -22,15 +28,20 @@ impl Service { } /// Check if a user is an admin - pub fn is_admin( - &self, - user_id: &UserId, - ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = services().rooms.alias.resolve_local_alias(&admin_room_alias_id)?.unwrap(); + pub fn is_admin(&self, user_id: &UserId) -> Result { + let admin_room_alias_id = + RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let admin_room_id = services() + .rooms + .alias + .resolve_local_alias(&admin_room_alias_id)? + .unwrap(); - services().rooms.state_cache.is_joined(user_id, &admin_room_id) + services() + .rooms + .state_cache + .is_joined(user_id, &admin_room_id) } /// Create a new user account on this homeserver. @@ -39,7 +50,6 @@ impl Service { Ok(()) } - /// Returns the number of users registered on this server. pub fn count(&self) -> Result { self.db.count() @@ -118,7 +128,8 @@ impl Service { token: &str, initial_device_display_name: Option, ) -> Result<()> { - self.db.create_device(user_id, device_id, token, initial_device_display_name) + self.db + .create_device(user_id, device_id, token, initial_device_display_name) } /// Removes a device from a user. @@ -146,7 +157,8 @@ impl Service { one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, ) -> Result<()> { - self.db.add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) + self.db + .add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) } pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { @@ -186,7 +198,8 @@ impl Service { self_signing_key: &Option>, user_signing_key: &Option>, ) -> Result<()> { - self.db.add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key) + self.db + .add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key) } pub fn sign_key( @@ -208,10 +221,7 @@ impl Service { self.db.keys_changed(user_or_room_id, from, to) } - pub fn mark_device_key_update( - &self, - user_id: &UserId, - ) -> Result<()> { + pub fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { self.db.mark_device_key_update(user_id) } @@ -251,7 +261,13 @@ impl Service { event_type: &str, content: serde_json::Value, ) -> Result<()> { - self.db.add_to_device_event(sender, target_user_id, target_device_id, event_type, content) + self.db.add_to_device_event( + sender, + target_user_id, + target_device_id, + event_type, + content, + ) } pub fn get_to_device_events( diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 0ee3ae8..9202eeb 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -78,7 +78,6 @@ pub fn calculate_hash(keys: &[&[u8]]) -> Vec { hash.as_ref().to_owned() } - pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, From f47a5cd5d5ce20b5996d28e37415771ee6d8a34b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 20:41:05 +0200 Subject: [PATCH 1218/1727] cargo fix --- src/api/client_server/account.rs | 7 +++---- src/api/client_server/membership.rs | 7 +------ src/api/client_server/tag.rs | 6 +++--- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 27 +++++++++----------------- src/database/key_value/account_data.rs | 8 +++----- src/database/key_value/rooms/state.rs | 2 +- src/database/key_value/users.rs | 2 +- src/database/mod.rs | 9 +++------ src/lib.rs | 3 +-- src/service/account_data/mod.rs | 13 +++++-------- src/service/admin/mod.rs | 3 +-- src/service/globals/mod.rs | 6 +++--- src/service/key_backups/mod.rs | 3 +-- src/service/media/mod.rs | 4 ++-- src/service/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 2 +- src/service/rooms/short/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 10 ++++------ src/service/rooms/timeline/mod.rs | 6 +++--- src/service/sending/mod.rs | 6 +++--- 21 files changed, 51 insertions(+), 79 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 28d6c07..e27d295 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; + use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{api::client_server, services, utils, Error, Result, Ruma}; @@ -13,14 +13,13 @@ use ruma::{ }, events::{ room::{ - member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, }, - GlobalAccountDataEventType, RoomEventType, + GlobalAccountDataEventType, }, push, UserId, }; -use serde_json::value::to_raw_value; + use tracing::{info, warn}; use register::RegistrationKind; diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 5de8ce1..8ccaa89 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -12,26 +12,21 @@ use ruma::{ }, events::{ room::{ - create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, }, RoomEventType, StateEventType, }, - serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, RoomVersion}, - uint, EventId, RoomId, RoomVersionId, ServerName, UserId, + serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - iter, sync::{Arc, RwLock}, time::{Duration, Instant}, }; use tracing::{debug, error, warn}; use crate::{ - api::{client_server, server_server}, service::pdu::{gen_event_id_canonical_json, PduBuilder}, services, utils, Error, PduEvent, Result, Ruma, }; diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index cb46d9c..c87e233 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -62,7 +62,7 @@ pub async fn delete_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = services().account_data.get( + let event = services().account_data.get( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, @@ -103,13 +103,13 @@ pub async fn get_tags_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = services().account_data.get( + let event = services().account_data.get( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, )?; - let mut tags_event = event + let tags_event = event .map(|e| { serde_json::from_str(e.get()) .map_err(|_| Error::bad_database("Invalid account data event in db.")) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index ee8c9e7..2d986a5 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -24,7 +24,7 @@ use serde::Deserialize; use tracing::{debug, error, warn}; use super::{Ruma, RumaResponse}; -use crate::{api::server_server, services, Error, Result}; +use crate::{services, Error, Result}; #[async_trait] impl FromRequest for Ruma diff --git a/src/api/server_server.rs b/src/api/server_server.rs index dba4489..c832b0d 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -4,10 +4,10 @@ use crate::{ services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; -use futures_util::{stream::FuturesUnordered, StreamExt}; +use futures_util::{StreamExt}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; -use regex::Regex; + use ruma::{ api::{ client::error::{Error as RumaError, ErrorKind}, @@ -16,8 +16,7 @@ use ruma::{ device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ - get_remote_server_keys, get_remote_server_keys_batch, - get_remote_server_keys_batch::v2::QueryCriteria, get_server_keys, + get_server_keys, get_server_version, ServerSigningKeys, VerifyKey, }, event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, @@ -40,36 +39,28 @@ use ruma::{ events::{ receipt::{ReceiptEvent, ReceiptEventContent}, room::{ - create::RoomCreateEventContent, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, - server_acl::RoomServerAclEventContent, }, RoomEventType, StateEventType, }, - int, receipt::ReceiptType, serde::{Base64, JsonObject, Raw}, - signatures::{CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, RoomVersion, StateMap}, - to_device::DeviceIdOrAllDevices, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + signatures::{CanonicalJsonValue}, + to_device::DeviceIdOrAllDevices, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{BTreeMap}, fmt::Debug, - future::Future, mem, net::{IpAddr, SocketAddr}, - ops::Deref, - pin::Pin, - sync::{Arc, RwLock, RwLockWriteGuard}, + sync::{Arc, RwLock}, time::{Duration, Instant, SystemTime}, }; -use tokio::sync::{MutexGuard, Semaphore}; -use tracing::{debug, error, info, trace, warn}; + +use tracing::{info, warn}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 7d2a870..0e8029f 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,13 +1,11 @@ use std::collections::HashMap; use ruma::{ - api::client::{error::ErrorKind, uiaa::UiaaInfo}, + api::client::{error::ErrorKind}, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - signatures::CanonicalJsonValue, - DeviceId, RoomId, UserId, + serde::Raw, RoomId, UserId, }; -use serde::{de::DeserializeOwned, Serialize}; + use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 80a7458..dbc1398 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,6 +1,6 @@ use ruma::{EventId, RoomId}; use std::collections::HashSet; -use std::fmt::Debug; + use std::sync::Arc; use tokio::sync::MutexGuard; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 15699a1..791e249 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,7 +5,7 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, UInt, UserId, }; use tracing::warn; diff --git a/src/database/mod.rs b/src/database/mod.rs index 8a7c78e..c4e64af 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -3,16 +3,14 @@ pub mod key_value; use crate::{ service::{ - account_data, appservice, globals, key_backups, media, pusher, - rooms::{self, state_compressor::CompressedStateEvent}, - sending, transaction_ids, uiaa, users, + rooms::{state_compressor::CompressedStateEvent}, }, services, utils, Config, Error, PduEvent, Result, Services, SERVICES, }; use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; -use futures_util::{stream::FuturesUnordered, StreamExt}; +use futures_util::{StreamExt}; use lru_cache::LruCache; use ruma::{ events::{ @@ -28,11 +26,10 @@ use std::{ fs::{self, remove_dir_all}, io::Write, mem::size_of, - ops::Deref, path::Path, sync::{Arc, Mutex, RwLock}, }; -use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; +use tokio::sync::{mpsc}; use tracing::{debug, error, info, warn}; pub struct KeyValueDatabase { diff --git a/src/lib.rs b/src/lib.rs index e6421e8..0afc75f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,8 +14,7 @@ mod service; mod utils; use std::{ - cell::Cell, - sync::{Arc, RwLock}, + sync::{RwLock}, }; pub use api::ruma_wrapper::{Ruma, RumaResponse}; diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 5bf167d..60a5308 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -3,17 +3,14 @@ mod data; pub use data::Data; use ruma::{ - api::client::error::ErrorKind, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - signatures::CanonicalJsonValue, - DeviceId, RoomId, UserId, + serde::Raw, RoomId, UserId, }; -use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::HashMap, sync::Arc}; -use tracing::error; -use crate::{service::*, services, utils, Error, Result}; +use std::{collections::HashMap, sync::Arc}; + + +use crate::{Result}; pub struct Service { db: Arc, diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index db596a3..8725e67 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -26,12 +26,11 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; -use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; +use tokio::sync::{mpsc, MutexGuard}; use crate::{ api::{ client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, - server_server, }, services, utils::{self, HtmlEscape}, diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 23a6159..6e03c15 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -2,15 +2,15 @@ mod data; pub use data::Data; use crate::api::server_server::FedDest; -use crate::service::*; -use crate::{utils, Config, Error, Result}; + +use crate::{Config, Error, Result}; use ruma::{ api::{ client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, - DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + DeviceId, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use std::{ diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 41ec1c1..31652d2 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,11 +1,10 @@ mod data; pub use data::Data; -use crate::{services, utils, Error, Result}; +use crate::{Result}; use ruma::{ api::client::{ backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - error::ErrorKind, }, serde::Raw, RoomId, UserId, diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index ea276c0..61a733a 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,9 +1,9 @@ mod data; pub use data::Data; -use crate::{services, utils, Error, Result}; +use crate::{services, Result}; use image::{imageops::FilterType, GenericImageView}; -use std::{mem, sync::Arc}; +use std::{sync::Arc}; use tokio::{ fs::File, io::{AsyncReadExt, AsyncWriteExt}, diff --git a/src/service/mod.rs b/src/service/mod.rs index dbddf40..e1c6f7a 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeMap, HashMap}, + collections::{HashMap}, sync::{Arc, Mutex}, }; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 689f678..1232038 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -34,7 +34,7 @@ use ruma::{ state_res::{self, RoomVersion, StateMap}, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue}; use tracing::{debug, error, info, trace, warn}; use crate::{service::*, services, Error, PduEvent, Result}; diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 1d2e040..efa4362 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; pub use data::Data; use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{services, utils, Error, Result}; +use crate::{Result}; pub struct Service { db: Arc, diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 04eb9af..608dbca 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -2,21 +2,19 @@ mod data; use std::{collections::HashSet, sync::Arc}; pub use data::Data; -use regex::Regex; + use ruma::{ events::{ - direct::{DirectEvent, DirectEventContent}, + direct::{DirectEvent}, ignored_user_list::IgnoredUserListEvent, room::{create::RoomCreateEventContent, member::MembershipState}, - tag::{TagEvent, TagEventContent}, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEvent, - RoomAccountDataEventContent, RoomAccountDataEventType, StateEventType, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEventType, StateEventType, }, serde::Raw, RoomId, ServerName, UserId, }; -use crate::{services, utils, Error, Result}; +use crate::{services, Error, Result}; pub struct Service { db: Arc, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index b71dacb..73f1451 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,9 +1,9 @@ mod data; -use std::borrow::Cow; + use std::collections::HashMap; -use std::fmt::Debug; + use std::sync::{Arc, Mutex}; -use std::{collections::HashSet, iter}; +use std::{collections::HashSet}; pub use data::Data; use regex::Regex; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index e09d423..e5e8cff 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -13,7 +13,7 @@ use crate::{ }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; -use ring::digest; + use ruma::{ api::{ appservice, @@ -33,7 +33,7 @@ use ruma::{ }; use tokio::{ select, - sync::{mpsc, RwLock, Semaphore}, + sync::{mpsc, Semaphore}, }; use tracing::{error, warn}; @@ -297,7 +297,7 @@ impl Service { .sending .servername_educount .get(server.as_bytes())? - .map_or(Ok(0), |bytes| { + .map_or(Ok(0), |&bytes| { utils::u64_from_bytes(&bytes) .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) })?; From d5b4754cf47982c91898bde9a9bb61a8cbf6ab40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 13:02:52 +0200 Subject: [PATCH 1219/1727] 0 errors left! --- src/api/client_server/membership.rs | 13 +- src/api/server_server.rs | 2 +- src/database/key_value/media.rs | 4 +- src/database/key_value/mod.rs | 2 +- src/database/key_value/pusher.rs | 21 +- src/database/key_value/rooms/alias.rs | 6 +- src/database/key_value/rooms/directory.rs | 2 +- .../key_value/rooms/edus/read_receipt.rs | 2 +- src/database/key_value/rooms/metadata.rs | 14 +- src/database/key_value/rooms/search.rs | 4 +- src/database/key_value/rooms/timeline.rs | 10 +- src/database/key_value/rooms/user.rs | 2 +- src/database/key_value/sending.rs | 203 +++++++++ src/database/key_value/users.rs | 55 +-- src/database/mod.rs | 36 +- src/lib.rs | 1 + src/main.rs | 17 +- src/service/account_data/mod.rs | 2 +- src/service/admin/mod.rs | 132 +++--- src/service/appservice/mod.rs | 2 +- src/service/globals/mod.rs | 6 +- src/service/key_backups/mod.rs | 2 +- src/service/media/mod.rs | 2 +- src/service/mod.rs | 72 +-- src/service/pusher/data.rs | 4 +- src/service/pusher/mod.rs | 12 +- src/service/rooms/alias/data.rs | 6 +- src/service/rooms/alias/mod.rs | 4 +- src/service/rooms/auth_chain/mod.rs | 2 +- src/service/rooms/directory/data.rs | 2 +- src/service/rooms/directory/mod.rs | 2 +- src/service/rooms/edus/mod.rs | 2 +- src/service/rooms/edus/presence/mod.rs | 2 +- src/service/rooms/edus/read_receipt/data.rs | 6 +- src/service/rooms/edus/read_receipt/mod.rs | 2 +- src/service/rooms/edus/typing/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 4 +- src/service/rooms/lazy_loading/mod.rs | 6 +- src/service/rooms/metadata/data.rs | 1 + src/service/rooms/metadata/mod.rs | 6 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/pdu_metadata/mod.rs | 2 +- src/service/rooms/search/data.rs | 4 +- src/service/rooms/search/mod.rs | 4 +- src/service/rooms/short/mod.rs | 2 +- src/service/rooms/state/mod.rs | 6 +- src/service/rooms/state_accessor/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 6 +- src/service/rooms/state_compressor/mod.rs | 2 +- src/service/rooms/timeline/data.rs | 6 +- src/service/rooms/timeline/mod.rs | 28 +- src/service/rooms/user/data.rs | 2 +- src/service/rooms/user/mod.rs | 2 +- src/service/sending/data.rs | 29 ++ src/service/sending/mod.rs | 415 ++++++------------ src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/mod.rs | 2 +- src/service/users/data.rs | 14 +- src/service/users/mod.rs | 14 +- 59 files changed, 656 insertions(+), 563 deletions(-) create mode 100644 src/database/key_value/sending.rs create mode 100644 src/service/sending/data.rs diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 8ccaa89..0aae995 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -670,7 +670,7 @@ async fn join_room_by_id_helper( .add_pdu_outlier(&event_id, &value)?; } - let shortstatehash = services().rooms.state.set_event_state( + let statehash_before_join = services().rooms.state.set_event_state( event_id, room_id, state @@ -684,8 +684,15 @@ async fn join_room_by_id_helper( .collect::>()?, )?; + services() + .rooms + .state + .set_room_state(room_id, statehash_before_join, &state_lock)?; + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. + let statehash_after_join = services().rooms.state.append_to_state(&parsed_pdu)?; + services().rooms.timeline.append_pdu( &parsed_pdu, join_event, @@ -698,9 +705,7 @@ async fn join_room_by_id_helper( services() .rooms .state - .set_room_state(room_id, shortstatehash, &state_lock)?; - - let statehashid = services().rooms.state.append_to_state(&parsed_pdu)?; + .set_room_state(room_id, statehash_after_join, &state_lock)?; } else { let event = RoomMemberEventContent { membership: MembershipState::Join, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index c832b0d..bcf893c 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1319,7 +1319,7 @@ pub async fn create_join_event_template_route( }) .expect("member event is valid value"); - let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + let (_pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( PduBuilder { event_type: RoomEventType::RoomMember, content, diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index de96ace..6abe5ba 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -43,8 +43,8 @@ impl service::media::Data for KeyValueDatabase { ) -> Result<(Option, Option, Vec)> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail + prefix.extend_from_slice(&width.to_be_bytes()); + prefix.extend_from_slice(&height.to_be_bytes()); prefix.push(0xff); let (key, _) = self diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs index efb8550..c4496af 100644 --- a/src/database/key_value/mod.rs +++ b/src/database/key_value/mod.rs @@ -7,7 +7,7 @@ mod media; //mod pdu; mod pusher; mod rooms; -//mod sending; +mod sending; mod transaction_ids; mod uiaa; mod users; diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 15f4e26..1468a55 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -3,7 +3,7 @@ use ruma::{ UserId, }; -use crate::{database::KeyValueDatabase, service, Error, Result}; +use crate::{database::KeyValueDatabase, service, Error, Result, utils}; impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { @@ -28,9 +28,13 @@ impl service::pusher::Data for KeyValueDatabase { Ok(()) } - fn get_pusher(&self, senderkey: &[u8]) -> Result> { + fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + let mut senderkey = sender.as_bytes().to_vec(); + senderkey.push(0xff); + senderkey.extend_from_slice(pushkey.as_bytes()); + self.senderkey_pusher - .get(senderkey)? + .get(&senderkey)? .map(|push| { serde_json::from_slice(&*push) .map_err(|_| Error::bad_database("Invalid Pusher in db.")) @@ -51,10 +55,17 @@ impl service::pusher::Data for KeyValueDatabase { .collect() } - fn get_pusher_senderkeys<'a>(&'a self, sender: &UserId) -> Box>> { + fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k)) + Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| { + let mut parts = k.splitn(2, |&b| b == 0xff); + let _senderkey = parts.next(); + let push_key = parts.next().ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; + let push_key_string = utils::string_from_bytes(push_key).map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; + + Ok(push_key_string) + })) } } diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 112d6eb..f3de89d 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -43,10 +43,10 @@ impl service::rooms::alias::Data for KeyValueDatabase { .transpose() } - fn local_aliases_for_room( - &self, + fn local_aliases_for_room<'a>( + &'a self, room_id: &RoomId, - ) -> Box>>> { + ) -> Box>> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 661c202..212ced9 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -15,7 +15,7 @@ impl service::rooms::directory::Data for KeyValueDatabase { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - fn public_rooms(&self) -> Box>>> { + fn public_rooms<'a>(&'a self) -> Box>> + 'a> { Box::new(self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index c78f0f5..19c1ced 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -59,7 +59,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { u64, Raw, )>, - >, + > + 'a, > { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 63a6b1a..2ec18be 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{database::KeyValueDatabase, service, services, Result}; +use crate::{database::KeyValueDatabase, service, services, Result, utils, Error}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { @@ -18,6 +18,18 @@ impl service::rooms::metadata::Data for KeyValueDatabase { .is_some()) } + fn iter_ids<'a>(&'a self) -> Box>> + 'a> { + Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) + })) + + } + fn is_disabled(&self, room_id: &RoomId) -> Result { Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) } diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 79e6a32..8aa7a63 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -5,7 +5,7 @@ use ruma::RoomId; use crate::{database::KeyValueDatabase, service, services, utils, Result}; impl service::rooms::search::Data for KeyValueDatabase { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) @@ -26,7 +26,7 @@ impl service::rooms::search::Data for KeyValueDatabase { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result>>, Vec)>> { + ) -> Result>+ 'a>, Vec)>> { let prefix = services() .rooms .short diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 5d684a1..1660a9e 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -235,7 +235,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>>>> { + ) -> Result, PduEvent)>> + 'a>> { let prefix = services() .rooms .short @@ -272,7 +272,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>>>> { + ) -> Result, PduEvent)>> + 'a>> { // Create the first part of the full pdu id let prefix = services() .rooms @@ -309,7 +309,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>>>> { + ) -> Result, PduEvent)>> + 'a>> { // Create the first part of the full pdu id let prefix = services() .rooms @@ -347,8 +347,8 @@ impl service::rooms::timeline::Data for KeyValueDatabase { notifies: Vec>, highlights: Vec>, ) -> Result<()> { - let notifies_batch = Vec::new(); - let highlights_batch = Vec::new(); + let mut notifies_batch = Vec::new(); + let mut highlights_batch = Vec::new(); for user in notifies { let mut userroom_id = user.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 78c78e1..9230e61 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -86,7 +86,7 @@ impl service::rooms::user::Data for KeyValueDatabase { fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>>>> { + ) -> Result>> + 'a>> { let iterators = users.into_iter().map(move |user_id| { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs new file mode 100644 index 0000000..d84bd49 --- /dev/null +++ b/src/database/key_value/sending.rs @@ -0,0 +1,203 @@ +use ruma::{ServerName, UserId}; + +use crate::{ + database::KeyValueDatabase, + service::{ + self, + sending::{OutgoingKind, SendingEventType}, + }, + utils, Error, Result, +}; + +impl service::sending::Data for KeyValueDatabase { + fn active_requests<'a>( + &'a self, + ) -> Box, OutgoingKind, SendingEventType)>> + 'a> { + Box::new( + self.servercurrentevent_data + .iter() + .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(k, e)| (key, k, e))), + ) + } + + fn active_requests_for<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box, SendingEventType)>> + 'a> { + let prefix = outgoing_kind.get_prefix(); + Box::new( + self.servercurrentevent_data + .scan_prefix(prefix) + .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(_, e)| (key, e))), + ) + } + + fn delete_active_request(&self, key: Vec) -> Result<()> { + self.servercurrentevent_data.remove(&key) + } + + fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { + let prefix = outgoing_kind.get_prefix(); + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key)?; + } + + Ok(()) + } + + fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { + let prefix = outgoing_kind.get_prefix(); + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key).unwrap(); + } + + for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { + self.servernameevent_data.remove(&key).unwrap(); + } + + Ok(()) + } + + fn queue_requests( + &self, + requests: &[(&OutgoingKind, SendingEventType)], + ) -> Result>> { + let mut batch = Vec::new(); + let mut keys = Vec::new(); + for (outgoing_kind, event) in requests { + let mut key = outgoing_kind.get_prefix(); + key.push(0xff); + key.extend_from_slice(if let SendingEventType::Pdu(value) = &event { + &**value + } else { + &[] + }); + let value = if let SendingEventType::Edu(value) = &event { + &**value + } else { + &[] + }; + batch.push((key.clone(), value.to_owned())); + keys.push(key); + } + self.servernameevent_data + .insert_batch(&mut batch.into_iter())?; + Ok(keys) + } + + fn queued_requests<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box)>> + 'a> { + let prefix = outgoing_kind.get_prefix(); + return Box::new( + self.servernameevent_data + .scan_prefix(prefix.clone()) + .map(|(k, v)| parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k))), + ); + } + + fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()> { + for (e, key) in events { + let value = if let SendingEventType::Edu(value) = &e { + &**value + } else { + &[] + }; + self.servercurrentevent_data.insert(key, value)?; + self.servernameevent_data.remove(key)?; + } + + Ok(()) + } + + fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) -> Result<()> { + self.servername_educount + .insert(server_name.as_bytes(), &last_count.to_be_bytes()) + } + + fn get_latest_educount(&self, server_name: &ServerName) -> Result { + self.servername_educount + .get(server_name.as_bytes())? + .map_or(Ok(0), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) + }) + } +} + +#[tracing::instrument(skip(key))] +fn parse_servercurrentevent( + key: &[u8], + value: Vec, +) -> Result<(OutgoingKind, SendingEventType)> { + // Appservices start with a plus + Ok::<_, Error>(if key.starts_with(b"+") { + let mut parts = key[1..].splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + + ( + OutgoingKind::Appservice(server), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) + }, + ) + } else if key.starts_with(b"$") { + let mut parts = key[1..].splitn(3, |&b| b == 0xff); + + let user = parts.next().expect("splitn always returns one element"); + let user_string = utils::string_from_bytes(&user) + .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; + let user_id = UserId::parse(user_string) + .map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; + + let pushkey = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let pushkey_string = utils::string_from_bytes(pushkey) + .map_err(|_| Error::bad_database("Invalid pushkey in servercurrentevent"))?; + + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + ( + OutgoingKind::Push(user_id, pushkey_string), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + // I'm pretty sure this should never be called + SendingEventType::Edu(value) + }, + ) + } else { + let mut parts = key.splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + + ( + OutgoingKind::Normal(ServerName::parse(server).map_err(|_| { + Error::bad_database("Invalid server string in server_currenttransaction") + })?), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) + }, + ) + }) +} diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 791e249..86689f8 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -67,7 +67,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all users on this homeserver. - fn iter(&self) -> Box>>> { + fn iter<'a>(&'a self) -> Box>> + 'a> { Box::new(self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") @@ -83,33 +83,11 @@ impl service::users::Data for KeyValueDatabase { let users: Vec = self .userid_password .iter() - .filter_map(|(username, pw)| self.get_username_with_valid_password(&username, &pw)) + .filter_map(|(username, pw)| get_username_with_valid_password(&username, &pw)) .collect(); Ok(users) } - /// Will only return with Some(username) if the password was not empty and the - /// username could be successfully parsed. - /// If utils::string_from_bytes(...) returns an error that username will be skipped - /// and the error will be logged. - fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { - // A valid password is not empty - if password.is_empty() { - None - } else { - match utils::string_from_bytes(username) { - Ok(u) => Some(u), - Err(e) => { - warn!( - "Failed to parse username while calling get_local_users(): {}", - e.to_string() - ); - None - } - } - } - } - /// Returns the password hash for the given user. fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password @@ -281,7 +259,7 @@ impl service::users::Data for KeyValueDatabase { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> Box>>> { + ) -> Box>> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata @@ -626,7 +604,7 @@ impl service::users::Data for KeyValueDatabase { user_or_room_id: &str, from: u64, to: Option, - ) -> Box>>> { + ) -> Box>> + 'a> { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -906,7 +884,7 @@ impl service::users::Data for KeyValueDatabase { fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> Box>> { + ) -> Box> + 'a> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -956,3 +934,26 @@ impl service::users::Data for KeyValueDatabase { } } } + +/// Will only return with Some(username) if the password was not empty and the +/// username could be successfully parsed. +/// If utils::string_from_bytes(...) returns an error that username will be skipped +/// and the error will be logged. +fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option { + // A valid password is not empty + if password.is_empty() { + None + } else { + match utils::string_from_bytes(username) { + Ok(u) => Some(u), + Err(e) => { + warn!( + "Failed to parse username while calling get_local_users(): {}", + e.to_string() + ); + None + } + } + } +} + diff --git a/src/database/mod.rs b/src/database/mod.rs index c4e64af..191cd62 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -166,19 +166,6 @@ pub struct KeyValueDatabase { pub(super) shortstatekey_cache: Mutex>, pub(super) our_real_users_cache: RwLock, Arc>>>>, pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, pub(super) lasttimelinecount_cache: Mutex, u64>>, } @@ -279,10 +266,7 @@ impl KeyValueDatabase { eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); } - let (admin_sender, admin_receiver) = mpsc::unbounded_channel(); - let (sending_sender, sending_receiver) = mpsc::unbounded_channel(); - - let db = Arc::new(Self { + let db_raw = Box::new(Self { _db: builder.clone(), userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, @@ -399,14 +383,12 @@ impl KeyValueDatabase { )), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), - lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), lasttimelinecount_cache: Mutex::new(HashMap::new()), }); - let services_raw = Box::new(Services::build(Arc::clone(&db), config)?); + let db = Box::leak(db_raw); + + let services_raw = Box::new(Services::build(db, config)?); // This is the first and only time we initialize the SERVICE static *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); @@ -851,8 +833,6 @@ impl KeyValueDatabase { // This data is probably outdated db.presenceid_presence.clear()?; - services().admin.start_handler(admin_receiver); - // Set emergency access for the conduit user match set_emergency_access() { Ok(pwd_set) => { @@ -869,19 +849,11 @@ impl KeyValueDatabase { } }; - services().sending.start_handler(sending_receiver); - Self::start_cleanup_task().await; Ok(()) } - #[cfg(feature = "conduit_bin")] - pub async fn on_shutdown() { - info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - services().globals.rotate.fire(); - } - #[tracing::instrument(skip(self))] pub fn flush(&self) -> Result<()> { let start = std::time::Instant::now(); diff --git a/src/lib.rs b/src/lib.rs index 0afc75f..9c397c0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,6 +21,7 @@ pub use api::ruma_wrapper::{Ruma, RumaResponse}; pub use config::Config; pub use service::{pdu::PduEvent, Services}; pub use utils::error::{Error, Result}; +pub use database::KeyValueDatabase; pub static SERVICES: RwLock> = RwLock::new(None); diff --git a/src/main.rs b/src/main.rs index d5b2731..71eaa66 100644 --- a/src/main.rs +++ b/src/main.rs @@ -17,6 +17,7 @@ use axum::{ Router, }; use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle}; +use conduit::api::{client_server, server_server}; use figment::{ providers::{Env, Format, Toml}, Figment, @@ -34,7 +35,7 @@ use tower_http::{ trace::TraceLayer, ServiceBuilderExt as _, }; -use tracing::warn; +use tracing::{warn, info}; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate @@ -69,7 +70,7 @@ async fn main() { config.warn_deprecated(); - if let Err(e) = KeyValueDatabase::load_or_create(&config).await { + if let Err(e) = KeyValueDatabase::load_or_create(config).await { eprintln!( "The database couldn't be loaded or created. The following error occured: {}", e @@ -77,6 +78,8 @@ async fn main() { std::process::exit(1); }; + let config = &services().globals.config; + let start = async { run_server().await.unwrap(); }; @@ -119,7 +122,7 @@ async fn main() { } async fn run_server() -> io::Result<()> { - let config = DB.globals.config; + let config = &services().globals.config; let addr = SocketAddr::from((config.address, config.port)); let x_requested_with = HeaderName::from_static("x-requested-with"); @@ -156,8 +159,7 @@ async fn run_server() -> io::Result<()> { header::AUTHORIZATION, ]) .max_age(Duration::from_secs(86400)), - ) - .add_extension(db.clone()); + ); let app = routes().layer(middlewares).into_make_service(); let handle = ServerHandle::new(); @@ -174,8 +176,9 @@ async fn run_server() -> io::Result<()> { } } - // After serve exits and before exiting, shutdown the DB - Database::on_shutdown(db).await; + // On shutdown + info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + services().globals.rotate.fire(); Ok(()) } diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 60a5308..975c820 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -13,7 +13,7 @@ use std::{collections::HashMap, sync::Arc}; use crate::{Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 8725e67..2c77661 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -172,74 +172,82 @@ pub struct Service { } impl Service { - pub fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver) { - tokio::spawn(async move { - // TODO: Use futures when we have long admin commands - //let mut futures = FuturesUnordered::new(); + pub fn build() -> Arc { + let (sender, receiver) = mpsc::unbounded_channel(); + let self1 = Arc::new(Self { sender }); + let self2 = Arc::clone(&self1); - let conduit_user = - UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); + tokio::spawn(async move { self2.start_handler(receiver).await; }); - let conduit_room = services() + self1 + } + + async fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver) { + // TODO: Use futures when we have long admin commands + //let mut futures = FuturesUnordered::new(); + + let conduit_user = + UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); + + let conduit_room = services() + .rooms + .alias + .resolve_local_alias( + format!("#admins:{}", services().globals.server_name()) + .as_str() + .try_into() + .expect("#admins:server_name is a valid room alias"), + ) + .expect("Database data for admin room alias must be valid") + .expect("Admin room must exist"); + + let send_message = |message: RoomMessageEventContent, + mutex_lock: &MutexGuard<'_, ()>| { + services() .rooms - .alias - .resolve_local_alias( - format!("#admins:{}", services().globals.server_name()) - .as_str() - .try_into() - .expect("#admins:server_name is a valid room alias"), + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMessage, + content: to_raw_value(&message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + mutex_lock, ) - .expect("Database data for admin room alias must be valid") - .expect("Admin room must exist"); + .unwrap(); + }; - let send_message = |message: RoomMessageEventContent, - mutex_lock: &MutexGuard<'_, ()>| { - services() - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMessage, - content: to_raw_value(&message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - mutex_lock, - ) - .unwrap(); - }; + loop { + tokio::select! { + Some(event) = receiver.recv() => { + let message_content = match event { + AdminRoomEvent::SendMessage(content) => content, + AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await + }; - loop { - tokio::select! { - Some(event) = receiver.recv() => { - let message_content = match event { - AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await - }; + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(conduit_room.to_owned()) + .or_default(), + ); - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .unwrap() - .entry(conduit_room.to_owned()) - .or_default(), - ); + let state_lock = mutex_state.lock().await; - let state_lock = mutex_state.lock().await; + send_message(message_content, &state_lock); - send_message(message_content, &state_lock); - - drop(state_lock); - } + drop(state_lock); } } - }); + } } pub fn process_message(&self, room_message: String) { @@ -382,9 +390,7 @@ impl Service { } } AdminCommand::ListRooms => { - todo!(); - /* - let room_ids = services().rooms.iter_ids(); + let room_ids = services().rooms.metadata.iter_ids(); let output = format!( "Rooms:\n{}", room_ids @@ -393,6 +399,7 @@ impl Service { + "\tMembers: " + &services() .rooms + .state_cache .room_joined_count(&id) .ok() .flatten() @@ -402,7 +409,6 @@ impl Service { .join("\n") ); RoomMessageEventContent::text_plain(output) - */ } AdminCommand::ListLocalUsers => match services().users.list_local_users() { Ok(users) => { @@ -648,11 +654,11 @@ impl Service { )) } AdminCommand::DisableRoom { room_id } => { - services().rooms.metadata.disable_room(&room_id, true); + services().rooms.metadata.disable_room(&room_id, true)?; RoomMessageEventContent::text_plain("Room disabled.") } AdminCommand::EnableRoom { room_id } => { - services().rooms.metadata.disable_room(&room_id, false); + services().rooms.metadata.disable_room(&room_id, false)?; RoomMessageEventContent::text_plain("Room enabled.") } AdminCommand::DeactivateUser { diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index ad5ab4a..20ba08a 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -6,7 +6,7 @@ pub use data::Data; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 6e03c15..477b269 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -35,7 +35,7 @@ type SyncHandle = ( ); pub struct Service { - pub db: Arc, + pub db: &'static dyn Data, pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, @@ -90,14 +90,14 @@ impl Default for RotationHandler { } impl Service { - pub fn load(db: Arc, config: Config) -> Result { + pub fn load(db: &'static dyn Data, config: Config) -> Result { let keypair = db.load_keypair(); let keypair = match keypair { Ok(k) => k, Err(e) => { error!("Keypair invalid. Deleting..."); - db.remove_keypair(); + db.remove_keypair()?; return Err(e); } }; diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 31652d2..5d0ad59 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -12,7 +12,7 @@ use ruma::{ use std::{collections::BTreeMap, sync::Arc}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 61a733a..2964857 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -16,7 +16,7 @@ pub struct FileMeta { } pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/mod.rs b/src/service/mod.rs index e1c6f7a..e8696e7 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -29,11 +29,11 @@ pub struct Services { pub uiaa: uiaa::Service, pub users: users::Service, pub account_data: account_data::Service, - pub admin: admin::Service, + pub admin: Arc, pub globals: globals::Service, pub key_backups: key_backups::Service, pub media: media::Service, - pub sending: sending::Service, + pub sending: Arc, } impl Services { @@ -47,60 +47,60 @@ impl Services { + account_data::Data + globals::Data + key_backups::Data - + media::Data, + + media::Data + + sending::Data + + 'static >( - db: Arc, + db: &'static D, config: Config, ) -> Result { Ok(Self { - appservice: appservice::Service { db: db.clone() }, - pusher: pusher::Service { db: db.clone() }, + appservice: appservice::Service { db }, + pusher: pusher::Service { db }, rooms: rooms::Service { - alias: rooms::alias::Service { db: db.clone() }, - auth_chain: rooms::auth_chain::Service { db: db.clone() }, - directory: rooms::directory::Service { db: db.clone() }, + alias: rooms::alias::Service { db }, + auth_chain: rooms::auth_chain::Service { db }, + directory: rooms::directory::Service { db }, edus: rooms::edus::Service { - presence: rooms::edus::presence::Service { db: db.clone() }, - read_receipt: rooms::edus::read_receipt::Service { db: db.clone() }, - typing: rooms::edus::typing::Service { db: db.clone() }, + presence: rooms::edus::presence::Service { db }, + read_receipt: rooms::edus::read_receipt::Service { db }, + typing: rooms::edus::typing::Service { db }, }, event_handler: rooms::event_handler::Service, lazy_loading: rooms::lazy_loading::Service { - db: db.clone(), + db, lazy_load_waiting: Mutex::new(HashMap::new()), }, - metadata: rooms::metadata::Service { db: db.clone() }, - outlier: rooms::outlier::Service { db: db.clone() }, - pdu_metadata: rooms::pdu_metadata::Service { db: db.clone() }, - search: rooms::search::Service { db: db.clone() }, - short: rooms::short::Service { db: db.clone() }, - state: rooms::state::Service { db: db.clone() }, - state_accessor: rooms::state_accessor::Service { db: db.clone() }, - state_cache: rooms::state_cache::Service { db: db.clone() }, + metadata: rooms::metadata::Service { db }, + outlier: rooms::outlier::Service { db }, + pdu_metadata: rooms::pdu_metadata::Service { db }, + search: rooms::search::Service { db }, + short: rooms::short::Service { db }, + state: rooms::state::Service { db }, + state_accessor: rooms::state_accessor::Service { db }, + state_cache: rooms::state_cache::Service { db }, state_compressor: rooms::state_compressor::Service { - db: db.clone(), + db, stateinfo_cache: Mutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), }, timeline: rooms::timeline::Service { - db: db.clone(), + db, lasttimelinecount_cache: Mutex::new(HashMap::new()), }, - user: rooms::user::Service { db: db.clone() }, - }, - transaction_ids: transaction_ids::Service { db: db.clone() }, - uiaa: uiaa::Service { db: db.clone() }, - users: users::Service { db: db.clone() }, - account_data: account_data::Service { db: db.clone() }, - admin: admin::Service { sender: todo!() }, - globals: globals::Service::load(db.clone(), config)?, - key_backups: key_backups::Service { db: db.clone() }, - media: media::Service { db: db.clone() }, - sending: sending::Service { - maximum_requests: todo!(), - sender: todo!(), + user: rooms::user::Service { db }, }, + transaction_ids: transaction_ids::Service { db }, + uiaa: uiaa::Service { db }, + users: users::Service { db }, + account_data: account_data::Service { db }, + admin: admin::Service::build(), + key_backups: key_backups::Service { db }, + media: media::Service { db }, + sending: sending::Service::build(db, &config), + + globals: globals::Service::load(db, config)?, }) } } diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 243b77f..cb8768d 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -7,9 +7,9 @@ use ruma::{ pub trait Data: Send + Sync { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; - fn get_pusher(&self, senderkey: &[u8]) -> Result>; + fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result>; fn get_pushers(&self, sender: &UserId) -> Result>; - fn get_pusher_senderkeys<'a>(&'a self, sender: &UserId) -> Box>>; + fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a>; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 78d5f26..3b12f38 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -26,7 +26,7 @@ use std::{fmt::Debug, mem}; use tracing::{error, info, warn}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -34,19 +34,19 @@ impl Service { self.db.set_pusher(sender, pusher) } - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { - self.db.get_pusher(senderkey) + pub fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + self.db.get_pusher(sender, pushkey) } pub fn get_pushers(&self, sender: &UserId) -> Result> { self.db.get_pushers(sender) } - pub fn get_pusher_senderkeys<'a>( + pub fn get_pushkeys<'a>( &'a self, sender: &UserId, - ) -> impl Iterator> + 'a { - self.db.get_pusher_senderkeys(sender) + ) -> Box>> { + self.db.get_pushkeys(sender) } #[tracing::instrument(skip(self, destination, request))] diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 90205f9..6299add 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -12,8 +12,8 @@ pub trait Data: Send + Sync { fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>>; /// Returns all local aliases that point to the given room - fn local_aliases_for_room( - &self, + fn local_aliases_for_room<'a>( + &'a self, room_id: &RoomId, - ) -> Box>>>; + ) -> Box>> + 'a>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 6a3cf4e..e76589a 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -7,7 +7,7 @@ use crate::Result; use ruma::{RoomAliasId, RoomId}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -30,7 +30,7 @@ impl Service { pub fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> Box>> + 'a> { self.db.local_aliases_for_room(room_id) } } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index ed06385..d3b6e40 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -11,7 +11,7 @@ use tracing::log::warn; use crate::{services, Error, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index fb523cf..320c6db 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -12,5 +12,5 @@ pub trait Data: Send + Sync { fn is_public_room(&self, room_id: &RoomId) -> Result; /// Returns the unsorted public room directory - fn public_rooms(&self) -> Box>>>; + fn public_rooms<'a>(&'a self) -> Box>> + 'a>; } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index e85afef..9e5e815 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -7,7 +7,7 @@ use ruma::RoomId; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index 8552363..cf7a359 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -2,7 +2,7 @@ pub mod presence; pub mod read_receipt; pub mod typing; -pub trait Data: presence::Data + read_receipt::Data + typing::Data {} +pub trait Data: presence::Data + read_receipt::Data + typing::Data + 'static {} pub struct Service { pub presence: presence::Service, diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 636bd91..9cce9d8 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -7,7 +7,7 @@ use ruma::{events::presence::PresenceEvent, RoomId, UserId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 734c68d..9a02ee4 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -11,8 +11,8 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - fn readreceipts_since( - &self, + fn readreceipts_since<'a>( + &'a self, room_id: &RoomId, since: u64, ) -> Box< @@ -22,7 +22,7 @@ pub trait Data: Send + Sync { u64, Raw, )>, - >, + > + 'a, >; /// Sets a private read marker at `count`. diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 35fee1a..8d6eaaf 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -7,7 +7,7 @@ use crate::Result; use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 91892df..fc06fe4 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -7,7 +7,7 @@ use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 1232038..0c0bd2c 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -256,7 +256,7 @@ impl Service { #[tracing::instrument(skip(self, create_event, value, pub_key_map))] fn handle_outlier_pdu<'a>( - &self, + &'a self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, @@ -1015,7 +1015,7 @@ impl Service { /// d. TODO: Ask other servers over federation? #[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( - &self, + &'a self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index a01ce9b..2ed0bed 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -10,9 +10,9 @@ use ruma::{DeviceId, RoomId, UserId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, - lazy_load_waiting: + pub lazy_load_waiting: Mutex, Box, Box, u64), HashSet>>>, } @@ -67,7 +67,7 @@ impl Service { user_id, device_id, room_id, - &mut user_ids.iter().map(|&u| &*u), + &mut user_ids.iter().map(|u| &**u), )?; } else { // Ignore diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 27e7eb9..df416da 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -3,6 +3,7 @@ use ruma::RoomId; pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; + fn iter_ids<'a>(&'a self) -> Box>> + 'a>; fn is_disabled(&self, room_id: &RoomId) -> Result; fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index b6cccd1..df9f40a 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -7,7 +7,7 @@ use ruma::RoomId; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -17,6 +17,10 @@ impl Service { self.db.exists(room_id) } + pub fn iter_ids<'a>(&'a self) -> Box>> + 'a> { + self.db.iter_ids() + } + pub fn is_disabled(&self, room_id: &RoomId) -> Result { self.db.is_disabled(room_id) } diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 6404d8a..443abd1 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -7,7 +7,7 @@ use ruma::{signatures::CanonicalJsonObject, EventId}; use crate::{PduEvent, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 7044338..b816678 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -7,7 +7,7 @@ use ruma::{EventId, RoomId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 59652e0..bd7d61b 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -2,11 +2,11 @@ use crate::Result; use ruma::RoomId; pub trait Data: Send + Sync { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()>; + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, - ) -> Result>>, Vec)>>; + ) -> Result>+ 'a>, Vec)>>; } diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 0ef9634..1d8d01e 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -7,7 +7,7 @@ use crate::Result; use ruma::RoomId; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -16,7 +16,7 @@ impl Service { &self, shortroomid: u64, pdu_id: &[u8], - message_body: String, + message_body: &str, ) -> Result<()> { self.db.index_pdu(shortroomid, pdu_id, message_body) } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index efa4362..d847dea 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -7,7 +7,7 @@ use ruma::{events::StateEventType, EventId, RoomId}; use crate::{Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 2dff4b7..614236c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -23,7 +23,7 @@ use crate::{services, utils::calculate_hash, Error, PduEvent, Result}; use super::state_compressor::CompressedStateEvent; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -33,7 +33,7 @@ impl Service { room_id: &RoomId, shortstatehash: u64, statediffnew: HashSet, - statediffremoved: HashSet, + _statediffremoved: HashSet, ) -> Result<()> { let mutex_state = Arc::clone( services() @@ -102,7 +102,7 @@ impl Service { services().rooms.state_cache.update_joined_count(room_id)?; - self.db.set_room_state(room_id, shortstatehash, &state_lock); + self.db.set_room_state(room_id, shortstatehash, &state_lock)?; drop(state_lock); diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index e179d70..1a9c4a9 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -10,7 +10,7 @@ use ruma::{events::StateEventType, EventId, RoomId}; use crate::{PduEvent, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 608dbca..cf4c665 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -17,7 +17,7 @@ use ruma::{ use crate::{services, Error, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -112,7 +112,7 @@ impl Service { }; // Copy direct chat flag - if let Some(mut direct_event) = services() + if let Some(direct_event) = services() .account_data .get( None, @@ -125,7 +125,7 @@ impl Service { }) }) { - let direct_event = direct_event?; + let mut direct_event = direct_event?; let mut room_ids_updated = false; for room_ids in direct_event.content.0.values_mut() { diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index f7c6dba..b927cb7 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -14,7 +14,7 @@ use crate::{services, utils, Result}; use self::data::StateDiff; pub struct Service { - db: Arc, + pub db: &'static dyn Data, pub stateinfo_cache: Mutex< LruCache< diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 4ae8ce9..095731c 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -60,7 +60,7 @@ pub trait Data: Send + Sync { user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>>>>; + ) -> Result, PduEvent)>> + 'a>>; /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. @@ -69,14 +69,14 @@ pub trait Data: Send + Sync { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>>>>; + ) -> Result, PduEvent)>> + 'a>>; fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>>>>; + ) -> Result, PduEvent)>> + 'a>>; fn increment_notification_counts( &self, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 73f1451..01c54a3 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -36,9 +36,9 @@ use crate::{ use super::state_compressor::CompressedStateEvent; pub struct Service { - db: Arc, + pub db: &'static dyn Data, - pub(super) lasttimelinecount_cache: Mutex, u64>>, + pub lasttimelinecount_cache: Mutex, u64>>, } impl Service { @@ -253,10 +253,10 @@ impl Service { .rooms .state_cache .get_our_real_users(&pdu.room_id)? - .into_iter() + .iter() { // Don't notify the user of their own events - if &user == &pdu.sender { + if user == &pdu.sender { continue; } @@ -297,20 +297,20 @@ impl Service { } if notify { - notifies.push(user); + notifies.push(user.clone()); } if highlight { - highlights.push(user); + highlights.push(user.clone()); } - for senderkey in services().pusher.get_pusher_senderkeys(&user) { - services().sending.send_push_pdu(&*pdu_id, senderkey)?; + for push_key in services().pusher.get_pushkeys(&user) { + services().sending.send_push_pdu(&*pdu_id, &user, push_key?)?; } } self.db - .increment_notification_counts(&pdu.room_id, notifies, highlights); + .increment_notification_counts(&pdu.room_id, notifies, highlights)?; match pdu.kind { RoomEventType::RoomRedaction => { @@ -365,7 +365,7 @@ impl Service { services() .rooms .search - .index_pdu(shortroomid, &pdu_id, body)?; + .index_pdu(shortroomid, &pdu_id, &body)?; let admin_room = services().rooms.alias.resolve_local_alias( <&RoomAliasId>::try_from( @@ -398,7 +398,7 @@ impl Service { { services() .sending - .send_pdu_appservice(&appservice.0, &pdu_id)?; + .send_pdu_appservice(appservice.0, pdu_id.clone())?; continue; } @@ -422,7 +422,7 @@ impl Service { if state_key_uid == &appservice_uid { services() .sending - .send_pdu_appservice(&appservice.0, &pdu_id)?; + .send_pdu_appservice(appservice.0, pdu_id.clone())?; continue; } } @@ -475,7 +475,7 @@ impl Service { { services() .sending - .send_pdu_appservice(&appservice.0, &pdu_id)?; + .send_pdu_appservice(appservice.0, pdu_id.clone())?; } } } @@ -565,7 +565,7 @@ impl Service { } } - let pdu = PduEvent { + let mut pdu = PduEvent { event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), sender: sender.to_owned(), diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index fcaff5a..7b7841f 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -20,5 +20,5 @@ pub trait Data: Send + Sync { fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>>>>; + ) -> Result>> + 'a>>; } diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 1caa4b3..0148399 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -7,7 +7,7 @@ use ruma::{RoomId, UserId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs new file mode 100644 index 0000000..2e574e2 --- /dev/null +++ b/src/service/sending/data.rs @@ -0,0 +1,29 @@ +use ruma::ServerName; + +use crate::Result; + +use super::{OutgoingKind, SendingEventType}; + +pub trait Data: Send + Sync { + fn active_requests<'a>( + &'a self, + ) -> Box, OutgoingKind, SendingEventType)>> + 'a>; + fn active_requests_for<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box, SendingEventType)>> + 'a>; + fn delete_active_request(&self, key: Vec) -> Result<()>; + fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; + fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; + fn queue_requests( + &self, + requests: &[(&OutgoingKind, SendingEventType)], + ) -> Result>>; + fn queued_requests<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box)>> + 'a>; + fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()>; + fn set_latest_educount(&self, server_name: &ServerName, educount: u64) -> Result<()>; + fn get_latest_educount(&self, server_name: &ServerName) -> Result; +} diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index e5e8cff..cb16e70 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -1,15 +1,19 @@ +mod data; + +pub use data::Data; + use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, sync::Arc, - time::{Duration, Instant}, + time::{Duration, Instant}, iter, }; use crate::{ api::{appservice_server, server_server}, services, utils::{self, calculate_hash}, - Error, PduEvent, Result, + Error, PduEvent, Result, Config, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -40,7 +44,7 @@ use tracing::{error, warn}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(String), - Push(Vec, Vec), // user and pushkey + Push(Box, String), // user and pushkey Normal(Box), } @@ -55,9 +59,9 @@ impl OutgoingKind { } OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(user); + p.extend_from_slice(user.as_bytes()); p.push(0xff); - p.extend_from_slice(pushkey); + p.extend_from_slice(pushkey.as_bytes()); p } OutgoingKind::Normal(server) => { @@ -74,14 +78,16 @@ impl OutgoingKind { #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum SendingEventType { - Pdu(Vec), - Edu(Vec), + Pdu(Vec), // pduid + Edu(Vec), // pdu json } pub struct Service { + db: &'static dyn Data, + /// The state for a given state hash. pub(super) maximum_requests: Arc, - pub sender: mpsc::UnboundedSender<(Vec, Vec)>, + pub sender: mpsc::UnboundedSender<(OutgoingKind, SendingEventType, Vec)>, } enum TransactionStatus { @@ -91,131 +97,113 @@ enum TransactionStatus { } impl Service { - pub fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>) { + pub fn build(db: &'static dyn Data, config: &Config) -> Arc { + let (sender, receiver) = mpsc::unbounded_channel(); + + let self1 = Arc::new(Self { db, sender, maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)) }); + let self2 = Arc::clone(&self1); + tokio::spawn(async move { - let mut futures = FuturesUnordered::new(); + self2.start_handler(receiver).await.unwrap(); + }); - let mut current_transaction_status = HashMap::, TransactionStatus>::new(); + self1 + } - // Retry requests we could not finish yet - let mut initial_transactions = HashMap::>::new(); + async fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver<(OutgoingKind, SendingEventType, Vec)>) -> Result<()> { + let mut futures = FuturesUnordered::new(); - for (key, outgoing_kind, event) in services() - .sending - .servercurrentevent_data - .iter() - .filter_map(|(key, v)| { - Self::parse_servercurrentevent(&key, v) - .ok() - .map(|(k, e)| (key, k, e)) - }) - { - let entry = initial_transactions - .entry(outgoing_kind.clone()) - .or_insert_with(Vec::new); + let mut current_transaction_status = HashMap::::new(); - if entry.len() > 30 { - warn!( - "Dropping some current events: {:?} {:?} {:?}", - key, outgoing_kind, event - ); - services() - .sending - .servercurrentevent_data - .remove(&key) - .unwrap(); - continue; - } + // Retry requests we could not finish yet + let mut initial_transactions = HashMap::>::new(); - entry.push(event); + for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) + { + let entry = initial_transactions + .entry(outgoing_kind.clone()) + .or_insert_with(Vec::new); + + if entry.len() > 30 { + warn!( + "Dropping some current events: {:?} {:?} {:?}", + key, outgoing_kind, event + ); + self.db.delete_active_request(key)?; + continue; } - for (outgoing_kind, events) in initial_transactions { - current_transaction_status - .insert(outgoing_kind.get_prefix(), TransactionStatus::Running); - futures.push(Self::handle_events(outgoing_kind.clone(), events)); - } + entry.push(event); + } - loop { - select! { - Some(response) = futures.next() => { - match response { - Ok(outgoing_kind) => { - let prefix = outgoing_kind.get_prefix(); - for (key, _) in services().sending.servercurrentevent_data - .scan_prefix(prefix.clone()) - { - services().sending.servercurrentevent_data.remove(&key).unwrap(); - } + for (outgoing_kind, events) in initial_transactions { + current_transaction_status + .insert(outgoing_kind.clone(), TransactionStatus::Running); + futures.push(Self::handle_events(outgoing_kind.clone(), events)); + } - // Find events that have been added since starting the last request - let new_events: Vec<_> = services().sending.servernameevent_data - .scan_prefix(prefix.clone()) - .filter_map(|(k, v)| { - Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) - }) - .take(30) - .collect(); + loop { + select! { + Some(response) = futures.next() => { + match response { + Ok(outgoing_kind) => { + self.db.delete_all_active_requests_for(&outgoing_kind)?; - // TODO: find edus + // Find events that have been added since starting the last request + let new_events = self.db.queued_requests(&outgoing_kind).filter_map(|r| r.ok()).take(30).collect::>(); - if !new_events.is_empty() { - // Insert pdus we found - for (e, key) in &new_events { - let value = if let SendingEventType::Edu(value) = &e.1 { &**value } else { &[] }; - services().sending.servercurrentevent_data.insert(key, value).unwrap(); - services().sending.servernameevent_data.remove(key).unwrap(); - } + // TODO: find edus - futures.push( - Self::handle_events( - outgoing_kind.clone(), - new_events.into_iter().map(|(event, _)| event.1).collect(), - ) - ); - } else { - current_transaction_status.remove(&prefix); - } - } - Err((outgoing_kind, _)) => { - current_transaction_status.entry(outgoing_kind.get_prefix()).and_modify(|e| *e = match e { - TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - TransactionStatus::Retrying(n) => TransactionStatus::Failed(*n+1, Instant::now()), - TransactionStatus::Failed(_, _) => { - error!("Request that was not even running failed?!"); - return - }, - }); - } - }; - }, - Some((key, value)) = receiver.recv() => { - if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key, value) { - if let Ok(Some(events)) = Self::select_events( - &outgoing_kind, - vec![(event, key)], - &mut current_transaction_status, - ) { - futures.push(Self::handle_events(outgoing_kind, events)); + if !new_events.is_empty() { + // Insert pdus we found + self.db.mark_as_active(&new_events)?; + + futures.push( + Self::handle_events( + outgoing_kind.clone(), + new_events.into_iter().map(|(event, _)| event).collect(), + ) + ); + } else { + current_transaction_status.remove(&outgoing_kind); } } + Err((outgoing_kind, _)) => { + current_transaction_status.entry(outgoing_kind).and_modify(|e| *e = match e { + TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), + TransactionStatus::Retrying(n) => TransactionStatus::Failed(*n+1, Instant::now()), + TransactionStatus::Failed(_, _) => { + error!("Request that was not even running failed?!"); + return + }, + }); + } + }; + }, + Some((outgoing_kind, event, key)) = receiver.recv() => { + if let Ok(Some(events)) = self.select_events( + &outgoing_kind, + vec![(event, key)], + &mut current_transaction_status, + ) { + futures.push(Self::handle_events(outgoing_kind, events)); } } } - }); + } } - #[tracing::instrument(skip(outgoing_kind, new_events, current_transaction_status))] + #[tracing::instrument(skip(self, outgoing_kind, new_events, current_transaction_status))] fn select_events( + &self, outgoing_kind: &OutgoingKind, new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key - current_transaction_status: &mut HashMap, TransactionStatus>, + current_transaction_status: &mut HashMap, ) -> Result>> { let mut retry = false; let mut allow = true; - let prefix = outgoing_kind.get_prefix(); - let entry = current_transaction_status.entry(prefix.clone()); + let entry = current_transaction_status.entry(outgoing_kind.clone()); entry .and_modify(|e| match e { @@ -247,42 +235,20 @@ impl Service { if retry { // We retry the previous transaction - for (key, value) in services() - .sending - .servercurrentevent_data - .scan_prefix(prefix) - { - if let Ok((_, e)) = Self::parse_servercurrentevent(&key, value) { - events.push(e); - } + for (_, e) in self.db.active_requests_for(outgoing_kind).filter_map(|r| r.ok()) { + events.push(e); } } else { - for (e, full_key) in new_events { - let value = if let SendingEventType::Edu(value) = &e { - &**value - } else { - &[][..] - }; - services() - .sending - .servercurrentevent_data - .insert(&full_key, value)?; - - // If it was a PDU we have to unqueue it - // TODO: don't try to unqueue EDUs - services().sending.servernameevent_data.remove(&full_key)?; - + self.db.mark_as_active(&new_events)?; + for (e, _) in new_events { events.push(e); } if let OutgoingKind::Normal(server_name) = outgoing_kind { - if let Ok((select_edus, last_count)) = Self::select_edus(server_name) { + if let Ok((select_edus, last_count)) = self.select_edus(server_name) { events.extend(select_edus.into_iter().map(SendingEventType::Edu)); - services() - .sending - .servername_educount - .insert(server_name.as_bytes(), &last_count.to_be_bytes())?; + self.db.set_latest_educount(server_name, last_count)?; } } } @@ -290,22 +256,15 @@ impl Service { Ok(Some(events)) } - #[tracing::instrument(skip(server))] - pub fn select_edus(server: &ServerName) -> Result<(Vec>, u64)> { + #[tracing::instrument(skip(self, server_name))] + pub fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { // u64: count of last edu - let since = services() - .sending - .servername_educount - .get(server.as_bytes())? - .map_or(Ok(0), |&bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) - })?; + let since = self.db.get_latest_educount(server_name)?; let mut events = Vec::new(); let mut max_edu_count = since; let mut device_list_changes = HashSet::new(); - 'outer: for room_id in services().rooms.server_rooms(server) { + 'outer: for room_id in services().rooms.state_cache.server_rooms(server_name) { let room_id = room_id?; // Look for device list updates in this room device_list_changes.extend( @@ -317,7 +276,7 @@ impl Service { ); // Look for read receipts in this room - for r in services().rooms.edus.readreceipts_since(&room_id, since) { + for r in services().rooms.edus.read_receipt.readreceipts_since(&room_id, since) { let (user_id, count, read_receipt) = r?; if count > max_edu_count { @@ -395,14 +354,12 @@ impl Service { Ok((events, max_edu_count)) } - #[tracing::instrument(skip(self, pdu_id, senderkey))] - pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Vec) -> Result<()> { - let mut key = b"$".to_vec(); - key.extend_from_slice(&senderkey); - key.push(0xff); - key.extend_from_slice(pdu_id); - self.servernameevent_data.insert(&key, &[])?; - self.sender.send((key, vec![])).unwrap(); + #[tracing::instrument(skip(self, pdu_id, user, pushkey))] + pub fn send_push_pdu(&self, pdu_id: &[u8], user: &UserId, pushkey: String) -> Result<()> { + let outgoing_kind = OutgoingKind::Push(user.to_owned(), pushkey); + let event = SendingEventType::Pdu(pdu_id.to_owned()); + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); Ok(()) } @@ -413,17 +370,11 @@ impl Service { servers: I, pdu_id: &[u8], ) -> Result<()> { - let mut batch = servers.map(|server| { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu_id); - - self.sender.send((key.clone(), vec![])).unwrap(); - - (key, Vec::new()) - }); - - self.servernameevent_data.insert_batch(&mut batch)?; + let requests = servers.into_iter().map(|server| (OutgoingKind::Normal(server), SendingEventType::Pdu(pdu_id.to_owned()))).collect::>(); + let keys = self.db.queue_requests(&requests.iter().map(|(o, e)| (o, e.clone())).collect::>())?; + for ((outgoing_kind, event), key) in requests.into_iter().zip(keys) { + self.sender.send((outgoing_kind.to_owned(), event, key)).unwrap(); + } Ok(()) } @@ -435,23 +386,20 @@ impl Service { serialized: Vec, id: u64, ) -> Result<()> { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&id.to_be_bytes()); - self.servernameevent_data.insert(&key, &serialized)?; - self.sender.send((key, serialized)).unwrap(); + let outgoing_kind = OutgoingKind::Normal(server.to_owned()); + let event = SendingEventType::Edu(serialized); + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); Ok(()) } #[tracing::instrument(skip(self))] - pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = b"+".to_vec(); - key.extend_from_slice(appservice_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(pdu_id); - self.servernameevent_data.insert(&key, &[])?; - self.sender.send((key, vec![])).unwrap(); + pub fn send_pdu_appservice(&self, appservice_id: String, pdu_id: Vec) -> Result<()> { + let outgoing_kind = OutgoingKind::Appservice(appservice_id); + let event = SendingEventType::Pdu(pdu_id); + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); Ok(()) } @@ -460,18 +408,8 @@ impl Service { /// Used for instance after we remove an appservice registration /// #[tracing::instrument(skip(self))] - pub fn cleanup_events(&self, key_id: &str) -> Result<()> { - let mut prefix = b"+".to_vec(); - prefix.extend_from_slice(key_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { - self.servercurrentevent_data.remove(&key).unwrap(); - } - - for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { - self.servernameevent_data.remove(&key).unwrap(); - } + pub fn cleanup_events(&self, appservice_id: String) -> Result<()> { + self.db.delete_all_requests_for(&OutgoingKind::Appservice(appservice_id))?; Ok(()) } @@ -488,7 +426,7 @@ impl Service { for event in &events { match event { SendingEventType::Pdu(pdu_id) => { - pdu_jsons.push(services().rooms + pdu_jsons.push(services().rooms.timeline .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -525,7 +463,7 @@ impl Service { appservice::event::push_events::v1::Request { events: &pdu_jsons, txn_id: (&*base64::encode_config( - Self::calculate_hash( + calculate_hash( &events .iter() .map(|e| match e { @@ -546,7 +484,7 @@ impl Service { response } - OutgoingKind::Push(user, pushkey) => { + OutgoingKind::Push(userid, pushkey) => { let mut pdus = Vec::new(); for event in &events { @@ -554,6 +492,7 @@ impl Service { SendingEventType::Pdu(pdu_id) => { pdus.push( services().rooms + .timeline .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -584,27 +523,10 @@ impl Service { } } - let userid = UserId::parse(utils::string_from_bytes(user).map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user string in db."), - ) - })?) - .map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user id in db."), - ) - })?; - - let mut senderkey = user.clone(); - senderkey.push(0xff); - senderkey.extend_from_slice(pushkey); - let pusher = match services() .pusher - .get_pusher(&senderkey) - .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + .get_pusher(&userid, pushkey) + .map_err(|e| (OutgoingKind::Push(userid.clone(), pushkey.clone()), e))? { Some(pusher) => pusher, None => continue, @@ -618,11 +540,13 @@ impl Service { GlobalAccountDataEventType::PushRules.to_string().into(), ) .unwrap_or_default() + .and_then(|event| serde_json::from_str::(event.get()).ok()) .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| push::Ruleset::server_default(&userid)); let unread: UInt = services() .rooms + .user .notification_count(&userid, &pdu.room_id) .map_err(|e| (kind.clone(), e))? .try_into() @@ -639,7 +563,7 @@ impl Service { drop(permit); } - Ok(OutgoingKind::Push(user.clone(), pushkey.clone())) + Ok(OutgoingKind::Push(userid.clone(), pushkey.clone())) } OutgoingKind::Normal(server) => { let mut edu_jsons = Vec::new(); @@ -651,6 +575,7 @@ impl Service { // TODO: check room version and remove event_id if needed let raw = PduEvent::convert_to_outgoing_federation_event( services().rooms + .timeline .get_pdu_json_from_id(pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { @@ -713,72 +638,6 @@ impl Service { } } - #[tracing::instrument(skip(key))] - fn parse_servercurrentevent( - key: &[u8], - value: Vec, - ) -> Result<(OutgoingKind, SendingEventType)> { - // Appservices start with a plus - Ok::<_, Error>(if key.starts_with(b"+") { - let mut parts = key[1..].splitn(2, |&b| b == 0xff); - - let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - - ( - OutgoingKind::Appservice(server), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - } else if key.starts_with(b"$") { - let mut parts = key[1..].splitn(3, |&b| b == 0xff); - - let user = parts.next().expect("splitn always returns one element"); - let pushkey = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - ( - OutgoingKind::Push(user.to_vec(), pushkey.to_vec()), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - } else { - let mut parts = key.splitn(2, |&b| b == 0xff); - - let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - - ( - OutgoingKind::Normal(ServerName::parse(server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - }) - } #[tracing::instrument(skip(self, destination, request))] pub async fn send_federation_request( diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index a473e2b..509b65c 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -7,7 +7,7 @@ use crate::Result; use ruma::{DeviceId, TransactionId, UserId}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 8f3b3b8..f8addcc 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -16,7 +16,7 @@ use tracing::error; use crate::{api::client_server::SESSION_ID_LENGTH, services, utils, Error, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 9f315d3..9537ed2 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -22,19 +22,13 @@ pub trait Data: Send + Sync { fn find_from_token(&self, token: &str) -> Result, String)>>; /// Returns an iterator over all users on this homeserver. - fn iter(&self) -> Box>>>; + fn iter<'a>(&'a self) -> Box>> + 'a>; /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. fn list_local_users(&self) -> Result>; - /// Will only return with Some(username) if the password was not empty and the - /// username could be successfully parsed. - /// If utils::string_from_bytes(...) returns an error that username will be skipped - /// and the error will be logged. - fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option; - /// Returns the password hash for the given user. fn password_hash(&self, user_id: &UserId) -> Result>; @@ -75,7 +69,7 @@ pub trait Data: Send + Sync { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> Box>>>; + ) -> Box>> + 'a>; /// Replaces the access token of one device. fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; @@ -131,7 +125,7 @@ pub trait Data: Send + Sync { user_or_room_id: &str, from: u64, to: Option, - ) -> Box>>>; + ) -> Box>> + 'a>; fn mark_device_key_update(&self, user_id: &UserId) -> Result<()>; @@ -193,7 +187,7 @@ pub trait Data: Send + Sync { fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> Box>>; + ) -> Box> + 'a>; /// Creates a new sync filter. Returns the filter id. fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 0b83460..e3419e7 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -13,7 +13,7 @@ use ruma::{ use crate::{services, Error, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -72,14 +72,6 @@ impl Service { self.db.list_local_users() } - /// Will only return with Some(username) if the password was not empty and the - /// username could be successfully parsed. - /// If utils::string_from_bytes(...) returns an error that username will be skipped - /// and the error will be logged. - fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { - self.db.get_username_with_valid_password(username, password) - } - /// Returns the password hash for the given user. pub fn password_hash(&self, user_id: &UserId) -> Result> { self.db.password_hash(user_id) @@ -275,7 +267,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, ) -> Result>> { - self.get_to_device_events(user_id, device_id) + self.db.get_to_device_events(user_id, device_id) } pub fn remove_to_device_events( @@ -302,7 +294,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, ) -> Result> { - self.get_device_metadata(user_id, device_id) + self.db.get_device_metadata(user_id, device_id) } pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { From 7822a385bbbdc608501a93499b83a3ca89fafb0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 13:03:07 +0200 Subject: [PATCH 1220/1727] cargo fmt --- src/api/client_server/account.rs | 9 +-- src/api/client_server/membership.rs | 7 +- src/api/server_server.rs | 15 ++-- src/database/key_value/account_data.rs | 6 +- src/database/key_value/pusher.rs | 20 ++++-- .../key_value/rooms/edus/read_receipt.rs | 12 ++-- src/database/key_value/rooms/metadata.rs | 3 +- src/database/key_value/rooms/search.rs | 2 +- src/database/key_value/users.rs | 4 +- src/database/mod.rs | 10 ++- src/lib.rs | 6 +- src/main.rs | 2 +- src/service/account_data/mod.rs | 6 +- src/service/admin/mod.rs | 16 ++--- src/service/globals/mod.rs | 4 +- src/service/key_backups/mod.rs | 6 +- src/service/media/mod.rs | 2 +- src/service/mod.rs | 4 +- src/service/pusher/data.rs | 6 +- src/service/pusher/mod.rs | 11 +-- src/service/rooms/edus/read_receipt/data.rs | 12 ++-- src/service/rooms/event_handler/mod.rs | 2 +- src/service/rooms/search/data.rs | 2 +- src/service/rooms/search/mod.rs | 7 +- src/service/rooms/short/mod.rs | 2 +- src/service/rooms/state/mod.rs | 3 +- src/service/rooms/state_cache/mod.rs | 5 +- src/service/rooms/timeline/mod.rs | 6 +- src/service/sending/mod.rs | 72 ++++++++++++++----- 29 files changed, 143 insertions(+), 119 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index e27d295..58624a2 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -1,5 +1,3 @@ - - use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{api::client_server, services, utils, Error, Result, Ruma}; use ruma::{ @@ -11,12 +9,7 @@ use ruma::{ error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, - events::{ - room::{ - message::RoomMessageEventContent, - }, - GlobalAccountDataEventType, - }, + events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType}, push, UserId, }; diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 0aae995..d971e6b 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -11,12 +11,11 @@ use ruma::{ federation::{self, membership::create_invite}, }, events::{ - room::{ - member::{MembershipState, RoomMemberEventContent}, - }, + room::member::{MembershipState, RoomMemberEventContent}, RoomEventType, StateEventType, }, - serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, EventId, RoomId, RoomVersionId, ServerName, UserId, + serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, + EventId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ diff --git a/src/api/server_server.rs b/src/api/server_server.rs index bcf893c..66aac9e 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -4,7 +4,7 @@ use crate::{ services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; -use futures_util::{StreamExt}; +use futures_util::StreamExt; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; @@ -15,10 +15,7 @@ use ruma::{ authorization::get_event_authorization, device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, - discovery::{ - get_server_keys, - get_server_version, ServerSigningKeys, VerifyKey, - }, + discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, keys::{claim_keys, get_keys}, membership::{ @@ -46,13 +43,13 @@ use ruma::{ }, receipt::ReceiptType, serde::{Base64, JsonObject, Raw}, - signatures::{CanonicalJsonValue}, - to_device::DeviceIdOrAllDevices, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, - ServerSigningKeyId, + signatures::CanonicalJsonValue, + to_device::DeviceIdOrAllDevices, + EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::{BTreeMap}, + collections::BTreeMap, fmt::Debug, mem, net::{IpAddr, SocketAddr}, diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 0e8029f..e1eef96 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,12 +1,12 @@ use std::collections::HashMap; use ruma::{ - api::client::{error::ErrorKind}, + api::client::error::ErrorKind, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, RoomId, UserId, + serde::Raw, + RoomId, UserId, }; - use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::account_data::Data for KeyValueDatabase { diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 1468a55..42d4030 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -3,7 +3,7 @@ use ruma::{ UserId, }; -use crate::{database::KeyValueDatabase, service, Error, Result, utils}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { @@ -28,7 +28,11 @@ impl service::pusher::Data for KeyValueDatabase { Ok(()) } - fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + fn get_pusher( + &self, + sender: &UserId, + pushkey: &str, + ) -> Result> { let mut senderkey = sender.as_bytes().to_vec(); senderkey.push(0xff); senderkey.extend_from_slice(pushkey.as_bytes()); @@ -55,15 +59,21 @@ impl service::pusher::Data for KeyValueDatabase { .collect() } - fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a> { + fn get_pushkeys<'a>( + &'a self, + sender: &UserId, + ) -> Box> + 'a> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| { let mut parts = k.splitn(2, |&b| b == 0xff); let _senderkey = parts.next(); - let push_key = parts.next().ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; - let push_key_string = utils::string_from_bytes(push_key).map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; + let push_key = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; + let push_key_string = utils::string_from_bytes(push_key) + .map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; Ok(push_key_string) })) diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 19c1ced..a8349f6 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -54,12 +54,12 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { since: u64, ) -> Box< dyn Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a, + Item = Result<( + Box, + u64, + Raw, + )>, + > + 'a, > { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 2ec18be..0f61dbb 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{database::KeyValueDatabase, service, services, Result, utils, Error}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { @@ -27,7 +27,6 @@ impl service::rooms::metadata::Data for KeyValueDatabase { ) .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) })) - } fn is_disabled(&self, room_id: &RoomId) -> Result { diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 8aa7a63..788c296 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -26,7 +26,7 @@ impl service::rooms::search::Data for KeyValueDatabase { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result>+ 'a>, Vec)>> { + ) -> Result> + 'a>, Vec)>> { let prefix = services() .rooms .short diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 86689f8..8213c5d 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,8 +5,7 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, - UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, UInt, UserId, }; use tracing::warn; @@ -956,4 +955,3 @@ fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option< } } } - diff --git a/src/database/mod.rs b/src/database/mod.rs index 191cd62..977daf3 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -2,15 +2,13 @@ pub mod abstraction; pub mod key_value; use crate::{ - service::{ - rooms::{state_compressor::CompressedStateEvent}, - }, - services, utils, Config, Error, PduEvent, Result, Services, SERVICES, + service::rooms::state_compressor::CompressedStateEvent, services, utils, Config, Error, + PduEvent, Result, Services, SERVICES, }; use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; -use futures_util::{StreamExt}; +use futures_util::StreamExt; use lru_cache::LruCache; use ruma::{ events::{ @@ -29,7 +27,7 @@ use std::{ path::Path, sync::{Arc, Mutex, RwLock}, }; -use tokio::sync::{mpsc}; +use tokio::sync::mpsc; use tracing::{debug, error, info, warn}; pub struct KeyValueDatabase { diff --git a/src/lib.rs b/src/lib.rs index 9c397c0..541b8c8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,15 +13,13 @@ mod database; mod service; mod utils; -use std::{ - sync::{RwLock}, -}; +use std::sync::RwLock; pub use api::ruma_wrapper::{Ruma, RumaResponse}; pub use config::Config; +pub use database::KeyValueDatabase; pub use service::{pdu::PduEvent, Services}; pub use utils::error::{Error, Result}; -pub use database::KeyValueDatabase; pub static SERVICES: RwLock> = RwLock::new(None); diff --git a/src/main.rs b/src/main.rs index 71eaa66..c7ef62d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -35,7 +35,7 @@ use tower_http::{ trace::TraceLayer, ServiceBuilderExt as _, }; -use tracing::{warn, info}; +use tracing::{info, warn}; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 975c820..a4a678d 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -4,13 +4,13 @@ pub use data::Data; use ruma::{ events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, RoomId, UserId, + serde::Raw, + RoomId, UserId, }; use std::{collections::HashMap, sync::Arc}; - -use crate::{Result}; +use crate::Result; pub struct Service { pub db: &'static dyn Data, diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 2c77661..8f33056 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -29,9 +29,7 @@ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard}; use crate::{ - api::{ - client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, - }, + api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, services, utils::{self, HtmlEscape}, Error, PduEvent, Result, @@ -177,7 +175,9 @@ impl Service { let self1 = Arc::new(Self { sender }); let self2 = Arc::clone(&self1); - tokio::spawn(async move { self2.start_handler(receiver).await; }); + tokio::spawn(async move { + self2.start_handler(receiver).await; + }); self1 } @@ -186,9 +186,8 @@ impl Service { // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); - let conduit_user = - UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); + let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); let conduit_room = services() .rooms @@ -202,8 +201,7 @@ impl Service { .expect("Database data for admin room alias must be valid") .expect("Admin room must exist"); - let send_message = |message: RoomMessageEventContent, - mutex_lock: &MutexGuard<'_, ()>| { + let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| { services() .rooms .timeline diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 477b269..054df09 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -3,15 +3,13 @@ pub use data::Data; use crate::api::server_server::FedDest; - use crate::{Config, Error, Result}; use ruma::{ api::{ client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, - DeviceId, EventId, RoomId, RoomVersionId, ServerName, - ServerSigningKeyId, UserId, + DeviceId, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use std::{ collections::{BTreeMap, HashMap}, diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 5d0ad59..c8df0af 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,11 +1,9 @@ mod data; pub use data::Data; -use crate::{Result}; +use crate::Result; use ruma::{ - api::client::{ - backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - }, + api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, RoomId, UserId, }; diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 2964857..96e9aa3 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -3,7 +3,7 @@ pub use data::Data; use crate::{services, Result}; use image::{imageops::FilterType, GenericImageView}; -use std::{sync::Arc}; +use std::sync::Arc; use tokio::{ fs::File, io::{AsyncReadExt, AsyncWriteExt}, diff --git a/src/service/mod.rs b/src/service/mod.rs index e8696e7..385dcc6 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap}, + collections::HashMap, sync::{Arc, Mutex}, }; @@ -49,7 +49,7 @@ impl Services { + key_backups::Data + media::Data + sending::Data - + 'static + + 'static, >( db: &'static D, config: Config, diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index cb8768d..e317121 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -7,9 +7,11 @@ use ruma::{ pub trait Data: Send + Sync { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; - fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result>; + fn get_pusher(&self, sender: &UserId, pushkey: &str) + -> Result>; fn get_pushers(&self, sender: &UserId) -> Result>; - fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a>; + fn get_pushkeys<'a>(&'a self, sender: &UserId) + -> Box> + 'a>; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 3b12f38..f8e5bca 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -34,7 +34,11 @@ impl Service { self.db.set_pusher(sender, pusher) } - pub fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + pub fn get_pusher( + &self, + sender: &UserId, + pushkey: &str, + ) -> Result> { self.db.get_pusher(sender, pushkey) } @@ -42,10 +46,7 @@ impl Service { self.db.get_pushers(sender) } - pub fn get_pushkeys<'a>( - &'a self, - sender: &UserId, - ) -> Box>> { + pub fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box>> { self.db.get_pushkeys(sender) } diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 9a02ee4..800c035 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -17,12 +17,12 @@ pub trait Data: Send + Sync { since: u64, ) -> Box< dyn Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a, + Item = Result<( + Box, + u64, + Raw, + )>, + > + 'a, >; /// Sets a private read marker at `count`. diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 0c0bd2c..e5f8424 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -34,7 +34,7 @@ use ruma::{ state_res::{self, RoomVersion, StateMap}, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; -use serde_json::value::{RawValue as RawJsonValue}; +use serde_json::value::RawValue as RawJsonValue; use tracing::{debug, error, info, trace, warn}; use crate::{service::*, services, Error, PduEvent, Result}; diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index bd7d61b..82c0800 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -8,5 +8,5 @@ pub trait Data: Send + Sync { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result>+ 'a>, Vec)>>; + ) -> Result> + 'a>, Vec)>>; } diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 1d8d01e..8035630 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -12,12 +12,7 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] - pub fn index_pdu<'a>( - &self, - shortroomid: u64, - pdu_id: &[u8], - message_body: &str, - ) -> Result<()> { + pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { self.db.index_pdu(shortroomid, pdu_id, message_body) } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index d847dea..45fadd7 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; pub use data::Data; use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result}; +use crate::Result; pub struct Service { pub db: &'static dyn Data, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 614236c..7b8b0fd 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -102,7 +102,8 @@ impl Service { services().rooms.state_cache.update_joined_count(room_id)?; - self.db.set_room_state(room_id, shortstatehash, &state_lock)?; + self.db + .set_room_state(room_id, shortstatehash, &state_lock)?; drop(state_lock); diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index cf4c665..2b4762a 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -5,10 +5,11 @@ pub use data::Data; use ruma::{ events::{ - direct::{DirectEvent}, + direct::DirectEvent, ignored_user_list::IgnoredUserListEvent, room::{create::RoomCreateEventContent, member::MembershipState}, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEventType, StateEventType, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, }, serde::Raw, RoomId, ServerName, UserId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 01c54a3..16f50d2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -2,8 +2,8 @@ mod data; use std::collections::HashMap; +use std::collections::HashSet; use std::sync::{Arc, Mutex}; -use std::{collections::HashSet}; pub use data::Data; use regex::Regex; @@ -305,7 +305,9 @@ impl Service { } for push_key in services().pusher.get_pushkeys(&user) { - services().sending.send_push_pdu(&*pdu_id, &user, push_key?)?; + services() + .sending + .send_push_pdu(&*pdu_id, &user, push_key?)?; } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index cb16e70..b67f1e2 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -5,15 +5,16 @@ pub use data::Data; use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, + iter, sync::Arc, - time::{Duration, Instant}, iter, + time::{Duration, Instant}, }; use crate::{ api::{appservice_server, server_server}, services, utils::{self, calculate_hash}, - Error, PduEvent, Result, Config, + Config, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -100,7 +101,11 @@ impl Service { pub fn build(db: &'static dyn Data, config: &Config) -> Arc { let (sender, receiver) = mpsc::unbounded_channel(); - let self1 = Arc::new(Self { db, sender, maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)) }); + let self1 = Arc::new(Self { + db, + sender, + maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), + }); let self2 = Arc::clone(&self1); tokio::spawn(async move { @@ -110,7 +115,10 @@ impl Service { self1 } - async fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver<(OutgoingKind, SendingEventType, Vec)>) -> Result<()> { + async fn start_handler( + &self, + mut receiver: mpsc::UnboundedReceiver<(OutgoingKind, SendingEventType, Vec)>, + ) -> Result<()> { let mut futures = FuturesUnordered::new(); let mut current_transaction_status = HashMap::::new(); @@ -118,8 +126,7 @@ impl Service { // Retry requests we could not finish yet let mut initial_transactions = HashMap::>::new(); - for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) - { + for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) { let entry = initial_transactions .entry(outgoing_kind.clone()) .or_insert_with(Vec::new); @@ -137,8 +144,7 @@ impl Service { } for (outgoing_kind, events) in initial_transactions { - current_transaction_status - .insert(outgoing_kind.clone(), TransactionStatus::Running); + current_transaction_status.insert(outgoing_kind.clone(), TransactionStatus::Running); futures.push(Self::handle_events(outgoing_kind.clone(), events)); } @@ -235,7 +241,11 @@ impl Service { if retry { // We retry the previous transaction - for (_, e) in self.db.active_requests_for(outgoing_kind).filter_map(|r| r.ok()) { + for (_, e) in self + .db + .active_requests_for(outgoing_kind) + .filter_map(|r| r.ok()) + { events.push(e); } } else { @@ -276,7 +286,12 @@ impl Service { ); // Look for read receipts in this room - for r in services().rooms.edus.read_receipt.readreceipts_since(&room_id, since) { + for r in services() + .rooms + .edus + .read_receipt + .readreceipts_since(&room_id, since) + { let (user_id, count, read_receipt) = r?; if count > max_edu_count { @@ -359,7 +374,9 @@ impl Service { let outgoing_kind = OutgoingKind::Push(user.to_owned(), pushkey); let event = SendingEventType::Pdu(pdu_id.to_owned()); let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; - self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); Ok(()) } @@ -370,10 +387,25 @@ impl Service { servers: I, pdu_id: &[u8], ) -> Result<()> { - let requests = servers.into_iter().map(|server| (OutgoingKind::Normal(server), SendingEventType::Pdu(pdu_id.to_owned()))).collect::>(); - let keys = self.db.queue_requests(&requests.iter().map(|(o, e)| (o, e.clone())).collect::>())?; + let requests = servers + .into_iter() + .map(|server| { + ( + OutgoingKind::Normal(server), + SendingEventType::Pdu(pdu_id.to_owned()), + ) + }) + .collect::>(); + let keys = self.db.queue_requests( + &requests + .iter() + .map(|(o, e)| (o, e.clone())) + .collect::>(), + )?; for ((outgoing_kind, event), key) in requests.into_iter().zip(keys) { - self.sender.send((outgoing_kind.to_owned(), event, key)).unwrap(); + self.sender + .send((outgoing_kind.to_owned(), event, key)) + .unwrap(); } Ok(()) @@ -389,7 +421,9 @@ impl Service { let outgoing_kind = OutgoingKind::Normal(server.to_owned()); let event = SendingEventType::Edu(serialized); let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; - self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); Ok(()) } @@ -399,7 +433,9 @@ impl Service { let outgoing_kind = OutgoingKind::Appservice(appservice_id); let event = SendingEventType::Pdu(pdu_id); let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; - self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); Ok(()) } @@ -409,7 +445,8 @@ impl Service { /// #[tracing::instrument(skip(self))] pub fn cleanup_events(&self, appservice_id: String) -> Result<()> { - self.db.delete_all_requests_for(&OutgoingKind::Appservice(appservice_id))?; + self.db + .delete_all_requests_for(&OutgoingKind::Appservice(appservice_id))?; Ok(()) } @@ -638,7 +675,6 @@ impl Service { } } - #[tracing::instrument(skip(self, destination, request))] pub async fn send_federation_request( &self, From 50b0eb9929104a1eed008cbf0a8965a802c20306 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 13:04:55 +0200 Subject: [PATCH 1221/1727] cargo fix --- src/api/server_server.rs | 1 - src/database/mod.rs | 5 ++--- src/main.rs | 4 ++-- src/service/account_data/mod.rs | 2 +- src/service/appservice/mod.rs | 2 +- src/service/key_backups/mod.rs | 2 +- src/service/media/mod.rs | 2 +- src/service/pusher/mod.rs | 2 +- src/service/rooms/alias/mod.rs | 2 +- src/service/rooms/directory/mod.rs | 2 +- src/service/rooms/edus/presence/mod.rs | 2 +- src/service/rooms/edus/read_receipt/mod.rs | 2 +- src/service/rooms/edus/typing/mod.rs | 2 +- src/service/rooms/lazy_loading/mod.rs | 2 +- src/service/rooms/metadata/mod.rs | 2 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/search/mod.rs | 2 +- src/service/rooms/user/mod.rs | 2 +- src/service/sending/mod.rs | 3 +-- src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/mod.rs | 2 +- src/service/users/mod.rs | 2 +- 22 files changed, 23 insertions(+), 26 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 66aac9e..d54e130 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -4,7 +4,6 @@ use crate::{ services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; -use futures_util::StreamExt; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; diff --git a/src/database/mod.rs b/src/database/mod.rs index 977daf3..882455f 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -2,13 +2,12 @@ pub mod abstraction; pub mod key_value; use crate::{ - service::rooms::state_compressor::CompressedStateEvent, services, utils, Config, Error, + services, utils, Config, Error, PduEvent, Result, Services, SERVICES, }; use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; -use futures_util::StreamExt; use lru_cache::LruCache; use ruma::{ events::{ @@ -27,7 +26,7 @@ use std::{ path::Path, sync::{Arc, Mutex, RwLock}, }; -use tokio::sync::mpsc; + use tracing::{debug, error, info, warn}; pub struct KeyValueDatabase { diff --git a/src/main.rs b/src/main.rs index c7ef62d..1aad62b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,7 +7,7 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::{future::Future, io, net::SocketAddr, sync::Arc, time::Duration}; +use std::{future::Future, io, net::SocketAddr, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, @@ -28,7 +28,7 @@ use http::{ }; use opentelemetry::trace::{FutureExt, Tracer}; use ruma::api::{client::error::ErrorKind, IncomingRequest}; -use tokio::{signal, sync::RwLock}; +use tokio::{signal}; use tower::ServiceBuilder; use tower_http::{ cors::{self, CorsLayer}, diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index a4a678d..0387b13 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -8,7 +8,7 @@ use ruma::{ RoomId, UserId, }; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap}; use crate::Result; diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 20ba08a..17402f4 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index c8df0af..51117cd 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -7,7 +7,7 @@ use ruma::{ serde::Raw, RoomId, UserId, }; -use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::BTreeMap}; pub struct Service { pub db: &'static dyn Data, diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 96e9aa3..6684108 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -3,7 +3,7 @@ pub use data::Data; use crate::{services, Result}; use image::{imageops::FilterType, GenericImageView}; -use std::sync::Arc; + use tokio::{ fs::File, io::{AsyncReadExt, AsyncWriteExt}, diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index f8e5bca..385a207 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -21,7 +21,7 @@ use ruma::{ serde::Raw, uint, RoomId, UInt, UserId, }; -use std::sync::Arc; + use std::{fmt::Debug, mem}; use tracing::{error, info, warn}; diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index e76589a..600a120 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 9e5e815..fcc0003 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::RoomId; diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 9cce9d8..0c3a3d6 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap}; pub use data::Data; use ruma::{events::presence::PresenceEvent, RoomId, UserId}; diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 8d6eaaf..3664fe9 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index fc06fe4..3d8afe6 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 2ed0bed..4ef58fd 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,7 +1,7 @@ mod data; use std::{ collections::{HashMap, HashSet}, - sync::{Arc, Mutex}, + sync::{Mutex}, }; pub use data::Data; diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index df9f40a..1a36010 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::RoomId; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 443abd1..2b5976c 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::{signatures::CanonicalJsonObject, EventId}; diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 8035630..d15e42e 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 0148399..b7e2c00 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::{RoomId, UserId}; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b67f1e2..f9e81d6 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -5,7 +5,6 @@ pub use data::Data; use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, - iter, sync::Arc, time::{Duration, Instant}, }; @@ -13,7 +12,7 @@ use std::{ use crate::{ api::{appservice_server, server_server}, services, - utils::{self, calculate_hash}, + utils::{calculate_hash}, Config, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 509b65c..0cc30db 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index f8addcc..1170193 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index e3419e7..4bcb183 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::BTreeMap, mem, sync::Arc}; +use std::{collections::BTreeMap, mem}; pub use data::Data; use ruma::{ From 8b5b7a1f637322e651c84d11a7b1616c7e29952e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 13:57:01 +0200 Subject: [PATCH 1222/1727] fix: panic on launch Now we start the admin and sending threads at a later time. --- src/api/client_server/directory.rs | 11 ++++---- src/database/mod.rs | 9 +++--- src/main.rs | 2 +- src/service/account_data/mod.rs | 2 +- src/service/admin/mod.rs | 26 ++++++++++-------- src/service/appservice/mod.rs | 1 - src/service/key_backups/mod.rs | 2 +- src/service/rooms/alias/mod.rs | 1 - src/service/rooms/directory/mod.rs | 1 - src/service/rooms/edus/presence/mod.rs | 2 +- src/service/rooms/edus/read_receipt/mod.rs | 1 - src/service/rooms/edus/typing/mod.rs | 1 - src/service/rooms/lazy_loading/mod.rs | 2 +- src/service/rooms/metadata/mod.rs | 1 - src/service/rooms/outlier/mod.rs | 1 - src/service/rooms/search/mod.rs | 1 - src/service/rooms/user/mod.rs | 1 - src/service/sending/mod.rs | 32 +++++++++++----------- src/service/transaction_ids/mod.rs | 1 - src/service/uiaa/mod.rs | 1 - 20 files changed, 46 insertions(+), 53 deletions(-) diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index c1b0eda..90f79a0 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -29,7 +29,7 @@ use ruma::{ }, ServerName, UInt, }; -use tracing::{info, warn}; +use tracing::{error, info, warn}; /// # `POST /_matrix/client/r0/publicRooms` /// @@ -279,15 +279,14 @@ pub(crate) async fn get_public_rooms_filtered_helper( JoinRule::Knock => Some(PublicRoomJoinRule::Knock), _ => None, }) - .map_err(|_| { - Error::bad_database("Invalid room join rule event in database.") + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") }) }) .transpose()? .flatten() - .ok_or(Error::bad_database( - "Invalid room join rule event in database.", - ))?, + .ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?, room_id, }; Ok(chunk) diff --git a/src/database/mod.rs b/src/database/mod.rs index 882455f..967ec88 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,10 +1,7 @@ pub mod abstraction; pub mod key_value; -use crate::{ - services, utils, Config, Error, - PduEvent, Result, Services, SERVICES, -}; +use crate::{services, utils, Config, Error, PduEvent, Result, Services, SERVICES}; use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; @@ -830,6 +827,8 @@ impl KeyValueDatabase { // This data is probably outdated db.presenceid_presence.clear()?; + services().admin.start_handler(); + // Set emergency access for the conduit user match set_emergency_access() { Ok(pwd_set) => { @@ -846,6 +845,8 @@ impl KeyValueDatabase { } }; + services().sending.start_handler(); + Self::start_cleanup_task().await; Ok(()) diff --git a/src/main.rs b/src/main.rs index 1aad62b..ce7e578 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,7 +28,7 @@ use http::{ }; use opentelemetry::trace::{FutureExt, Tracer}; use ruma::api::{client::error::ErrorKind, IncomingRequest}; -use tokio::{signal}; +use tokio::signal; use tower::ServiceBuilder; use tower_http::{ cors::{self, CorsLayer}, diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 0387b13..f9c49b1 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -8,7 +8,7 @@ use ruma::{ RoomId, UserId, }; -use std::{collections::HashMap}; +use std::collections::HashMap; use crate::Result; diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 8f33056..218a4ea 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -26,7 +26,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; -use tokio::sync::{mpsc, MutexGuard}; +use tokio::sync::{mpsc, Mutex, MutexGuard}; use crate::{ api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, @@ -164,25 +164,29 @@ pub enum AdminRoomEvent { SendMessage(RoomMessageEventContent), } -#[derive(Clone)] pub struct Service { pub sender: mpsc::UnboundedSender, + receiver: Mutex>, } impl Service { pub fn build() -> Arc { let (sender, receiver) = mpsc::unbounded_channel(); - let self1 = Arc::new(Self { sender }); - let self2 = Arc::clone(&self1); - - tokio::spawn(async move { - self2.start_handler(receiver).await; - }); - - self1 + Arc::new(Self { + sender, + receiver: Mutex::new(receiver), + }) } - async fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver) { + pub fn start_handler(self: &Arc) { + let self2 = Arc::clone(&self); + tokio::spawn(async move { + self2.handler().await; + }); + } + + async fn handler(&self) { + let mut receiver = self.receiver.lock().await; // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 17402f4..3052964 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 51117cd..fef4613 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -7,7 +7,7 @@ use ruma::{ serde::Raw, RoomId, UserId, }; -use std::{collections::BTreeMap}; +use std::collections::BTreeMap; pub struct Service { pub db: &'static dyn Data, diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 600a120..6b52549 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index fcc0003..0c1b2cd 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::RoomId; diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 0c3a3d6..3681430 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::HashMap}; +use std::collections::HashMap; pub use data::Data; use ruma::{events::presence::PresenceEvent, RoomId, UserId}; diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 3664fe9..1b3ddb1 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 3d8afe6..d05ec90 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 4ef58fd..b30bb9c 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,7 +1,7 @@ mod data; use std::{ collections::{HashMap, HashSet}, - sync::{Mutex}, + sync::Mutex, }; pub use data::Data; diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 1a36010..c99ae4a 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::RoomId; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 2b5976c..c84e975 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::{signatures::CanonicalJsonObject, EventId}; diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index d15e42e..b6f35e7 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index b7e2c00..479e556 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::{RoomId, UserId}; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index f9e81d6..60fc6f4 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -12,7 +12,7 @@ use std::{ use crate::{ api::{appservice_server, server_server}, services, - utils::{calculate_hash}, + utils::calculate_hash, Config, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; @@ -37,7 +37,7 @@ use ruma::{ }; use tokio::{ select, - sync::{mpsc, Semaphore}, + sync::{mpsc, Mutex, Semaphore}, }; use tracing::{error, warn}; @@ -88,6 +88,7 @@ pub struct Service { /// The state for a given state hash. pub(super) maximum_requests: Arc, pub sender: mpsc::UnboundedSender<(OutgoingKind, SendingEventType, Vec)>, + receiver: Mutex)>>, } enum TransactionStatus { @@ -99,25 +100,24 @@ enum TransactionStatus { impl Service { pub fn build(db: &'static dyn Data, config: &Config) -> Arc { let (sender, receiver) = mpsc::unbounded_channel(); - - let self1 = Arc::new(Self { + Arc::new(Self { db, sender, + receiver: Mutex::new(receiver), maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), - }); - let self2 = Arc::clone(&self1); - - tokio::spawn(async move { - self2.start_handler(receiver).await.unwrap(); - }); - - self1 + }) } - async fn start_handler( - &self, - mut receiver: mpsc::UnboundedReceiver<(OutgoingKind, SendingEventType, Vec)>, - ) -> Result<()> { + pub fn start_handler(self: &Arc) { + let self2 = Arc::clone(&self); + tokio::spawn(async move { + self2.handler().await.unwrap(); + }); + } + + async fn handler(&self) -> Result<()> { + let mut receiver = self.receiver.lock().await; + let mut futures = FuturesUnordered::new(); let mut current_transaction_status = HashMap::::new(); diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 0cc30db..2fa3b02 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 1170193..e827cc8 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::{ From 25c3d89f281d101fcb904abd53d7c364cbe96e83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 15:32:34 +0200 Subject: [PATCH 1223/1727] Bump rust version for const fn RwLock::new --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index b88674d..d5f3c9d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.3.0-next" -rust-version = "1.56" +rust-version = "1.63" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From 5a04559cb47ed14ce23f1a88a52c8c908a45001a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 11:26:08 +0200 Subject: [PATCH 1224/1727] fix: maintain server list again --- src/database/key_value/rooms/state_cache.rs | 30 +++++++++++++++++++++ src/service/rooms/state_cache/mod.rs | 8 ------ 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 4ca6ac4..cbc0576 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -124,6 +124,36 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { .unwrap() .insert(room_id.to_owned(), Arc::new(real_users)); + for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { + if !joined_servers.remove(&old_joined_server) { + // Server not in room anymore + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xff); + roomserver_id.extend_from_slice(old_joined_server.as_bytes()); + + let mut serverroom_id = old_joined_server.as_bytes().to_vec(); + serverroom_id.push(0xff); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.remove(&roomserver_id)?; + self.serverroomids.remove(&serverroom_id)?; + } + } + + // Now only new servers are in joined_servers anymore + for server in joined_servers { + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xff); + roomserver_id.extend_from_slice(server.as_bytes()); + + let mut serverroom_id = server.as_bytes().to_vec(); + serverroom_id.push(0xff); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.insert(&roomserver_id, &[])?; + self.serverroomids.insert(&serverroom_id, &[])?; + } + self.appservice_in_room_cache .write() .unwrap() diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 2b4762a..9431d3a 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -39,14 +39,6 @@ impl Service { // TODO: displayname, avatar url } - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - match &membership { MembershipState::Join => { // Check if the user never joined this room From 1a7893dbbd22f311d9cbce071d77d5990a8a1711 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 13:15:26 +0200 Subject: [PATCH 1225/1727] fix: update state_cache on join over federation --- src/api/client_server/membership.rs | 15 ++++++--------- .../key_value/rooms/state_accessor.rs | 6 +++--- src/service/rooms/event_handler/mod.rs | 7 ++----- src/service/rooms/state/data.rs | 2 +- src/service/rooms/state/mod.rs | 19 ++++--------------- src/service/rooms/state_compressor/mod.rs | 12 ++++++------ 6 files changed, 22 insertions(+), 39 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index d971e6b..a91d079 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -669,24 +669,21 @@ async fn join_room_by_id_helper( .add_pdu_outlier(&event_id, &value)?; } - let statehash_before_join = services().rooms.state.set_event_state( - event_id, + let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state( room_id, state .into_iter() - .map(|(k, id)| { - services() - .rooms - .state_compressor - .compress_state_event(k, &id) - }) + .map(|(k, id)| services().rooms.state_compressor.compress_state_event(k, &id)) .collect::>()?, )?; services() .rooms .state - .set_room_state(room_id, statehash_before_join, &state_lock)?; + .force_state(room_id, statehash_before_join, new, removed, &state_lock) + .await?; + + services().rooms.state_cache.update_joined_count(room_id)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 39c261f..70e59ac 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -23,7 +23,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let parsed = services() .rooms .state_compressor - .parse_compressed_state_event(compressed)?; + .parse_compressed_state_event(&compressed)?; result.insert(parsed.0, parsed.1); i += 1; @@ -52,7 +52,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let (_, eventid) = services() .rooms .state_compressor - .parse_compressed_state_event(compressed)?; + .parse_compressed_state_event(&compressed)?; if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { result.insert( ( @@ -104,7 +104,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { services() .rooms .state_compressor - .parse_compressed_state_event(compressed) + .parse_compressed_state_event(&compressed) .ok() .map(|(_, id)| id) })) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e5f8424..cfe0fbf 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -970,14 +970,11 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); - let sstatehash = services() + let (sstatehash, new, removed) = services() .rooms .state_compressor .save_state(room_id, new_room_state)?; - services() - .rooms - .state - .set_room_state(room_id, sstatehash, &state_lock)?; + services().rooms.state.force_state(room_id, sstatehash, new, removed, &state_lock).await?; } } diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 3aa4914..8e80b5e 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -8,7 +8,7 @@ pub trait Data: Send + Sync { /// Returns the last state hash key added to the db for the given room. fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; - /// Update the current state of the room. + /// Set the state hash to a new version, but does not update state_cache. fn set_room_state( &self, room_id: &RoomId, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 7b8b0fd..15fa79b 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -34,23 +34,13 @@ impl Service { shortstatehash: u64, statediffnew: HashSet, _statediffremoved: HashSet, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - for event_id in statediffnew.into_iter().filter_map(|new| { services() .rooms .state_compressor - .parse_compressed_state_event(new) + .parse_compressed_state_event(&new) .ok() .map(|(_, id)| id) }) { @@ -105,8 +95,6 @@ impl Service { self.db .set_room_state(room_id, shortstatehash, &state_lock)?; - drop(state_lock); - Ok(()) } @@ -312,6 +300,7 @@ impl Service { Ok(state) } + /// Set the state hash to a new version, but does not update state_cache. #[tracing::instrument(skip(self))] pub fn set_room_state( &self, @@ -412,7 +401,7 @@ impl Service { services() .rooms .state_compressor - .parse_compressed_state_event(compressed) + .parse_compressed_state_event(&compressed) .ok() }) .filter_map(|(shortstatekey, event_id)| { diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index b927cb7..bcd3b9a 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -100,7 +100,7 @@ impl Service { /// Returns shortstatekey, event id pub fn parse_compressed_state_event( &self, - compressed_event: CompressedStateEvent, + compressed_event: &CompressedStateEvent, ) -> Result<(u64, Arc)> { Ok(( utils::u64_from_bytes(&compressed_event[0..size_of::()]) @@ -246,12 +246,12 @@ impl Service { Ok(()) } - /// Returns the new shortstatehash + /// Returns the new shortstatehash, and the state diff from the previous room state pub fn save_state( &self, room_id: &RoomId, new_state_ids_compressed: HashSet, - ) -> Result { + ) -> Result<(u64, HashSet, HashSet)> { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; let state_hash = utils::calculate_hash( @@ -267,7 +267,7 @@ impl Service { .get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(new_shortstatehash); + return Ok((new_shortstatehash, HashSet::new(), HashSet::new())); } let states_parents = previous_shortstatehash @@ -295,12 +295,12 @@ impl Service { self.save_state_from_diff( new_shortstatehash, statediffnew.clone(), - statediffremoved, + statediffremoved.clone(), 2, // every state change is 2 event changes on average states_parents, )?; }; - Ok(new_shortstatehash) + Ok((new_shortstatehash, statediffnew, statediffremoved)) } } From 275c6b447d9a3a2bbdc579de77317f9b27d289fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 15:34:36 +0200 Subject: [PATCH 1226/1727] Bump some dependencies --- Cargo.lock | 1092 +++++++++++++++------------------ Cargo.toml | 40 +- src/api/client_server/voip.rs | 2 +- src/main.rs | 2 +- src/service/globals/mod.rs | 6 +- src/service/media/mod.rs | 9 +- 6 files changed, 518 insertions(+), 633 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c074c76..2583c52 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,12 +8,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "adler32" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" - [[package]] name = "ahash" version = "0.7.6" @@ -27,42 +21,33 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] [[package]] name = "alloc-no-stdlib" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] name = "alloc-stdlib" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" dependencies = [ "alloc-no-stdlib", ] -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "arc-swap" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" [[package]] name = "arrayref" @@ -72,9 +57,9 @@ checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" -version = "0.5.2" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "assign" @@ -84,9 +69,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-compression" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345fd392ab01f746c717b1357165b76f0b67a60192007b234058c9045fdcf695" +checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" dependencies = [ "brotli", "flate2", @@ -124,9 +109,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.8" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b4d4f9a5ca8b1ab8de59e663e68c6207059239373ca72980f5be7ab81231f74" +checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" dependencies = [ "async-trait", "axum-core", @@ -156,9 +141,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4d047478b986f14a13edad31a009e2e05cb241f9805d0d75e4cba4e129ad4d" +checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" dependencies = [ "async-trait", "bytes", @@ -166,13 +151,15 @@ dependencies = [ "http", "http-body", "mime", + "tower-layer", + "tower-service", ] [[package]] name = "axum-server" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf18303ef7e23b045301555bf8a0dfbc1444ea1a37b3c81757a32680ace4d7d" +checksum = "87ba6170b61f7b086609dabcae68d2e07352539c6ef04a7c82980bdfa01a159d" dependencies = [ "arc-swap", "bytes", @@ -182,18 +169,12 @@ dependencies = [ "hyper", "pin-project-lite", "rustls", - "rustls-pemfile 1.0.0", + "rustls-pemfile 1.0.1", "tokio", "tokio-rustls", "tower-service", ] -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -211,9 +192,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" dependencies = [ "bitflags", "cexpr", @@ -236,9 +217,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "blake2b_simd" -version = "0.5.11" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" +checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" dependencies = [ "arrayref", "arrayvec", @@ -256,9 +237,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] @@ -286,15 +267,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" +checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" [[package]] name = "bytemuck" -version = "1.9.1" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdead85bdec19c194affaeeb670c0e41fe23de31459efd1c174d049269cf02cc" +checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da" [[package]] name = "byteorder" @@ -304,9 +285,20 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" + +[[package]] +name = "bzip2-sys" +version = "0.1.11+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +dependencies = [ + "cc", + "libc", + "pkg-config", +] [[package]] name = "cc" @@ -326,36 +318,17 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "time", - "winapi", -] - [[package]] name = "clang-sys" -version = "1.3.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ "glob", "libc", @@ -364,23 +337,21 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.5" +version = "4.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53da17d37dba964b9b3ecb5c5a1f193a2762c700e6829201e645b9381c99dc7" +checksum = "4ed45cc2c62a3eff523e718d8576ba762c83a3146151093283ac62ae11933a73" dependencies = [ "bitflags", "clap_derive", "clap_lex", - "indexmap", "once_cell", - "textwrap", ] [[package]] name = "clap_derive" -version = "3.2.5" +version = "4.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11d40217d16aee8508cc8e5fde8b4ff24639758608e5374e731b53f85749fb9" +checksum = "db342ce9fda24fb191e2ed4e102055a4d381c1086a06630174cd8da8d5d917ce" dependencies = [ "heck", "proc-macro-error", @@ -391,9 +362,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5538cd660450ebeb4234cfecf8f2284b844ffc4c50531e66d584ad5b91293613" +checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" dependencies = [ "os_str_bytes", ] @@ -411,7 +382,7 @@ dependencies = [ "async-trait", "axum", "axum-server", - "base64 0.13.0", + "base64", "bytes", "clap", "crossbeam", @@ -441,8 +412,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "sha-1 0.9.8", - "sled", + "sha-1", "thiserror", "thread_local", "threadpool", @@ -487,9 +457,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -515,117 +485,86 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "crossbeam" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" +checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", - "crossbeam-queue 0.3.5", - "crossbeam-utils 0.8.9", + "crossbeam-queue", + "crossbeam-utils", ] [[package]] name = "crossbeam-channel" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.9", + "cfg-if", + "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", - "crossbeam-utils 0.8.9", + "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.9" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" dependencies = [ "autocfg", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.9", + "cfg-if", + "crossbeam-utils", "memoffset", - "once_cell", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.1.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" +checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" dependencies = [ - "crossbeam-utils 0.6.6", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.9", + "cfg-if", + "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.6.6" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" dependencies = [ - "cfg-if 0.1.10", - "lazy_static", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" -dependencies = [ - "cfg-if 1.0.0", - "once_cell", + "cfg-if", ] [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "curve25519-dalek" version = "3.2.1" @@ -645,16 +584,6 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" -[[package]] -name = "deflate" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73770f8e1fe7d64df17ca66ad28994a0a623ea497fa69486e14984e715c5d174" -dependencies = [ - "adler32", - "byteorder", -] - [[package]] name = "der" version = "0.4.5" @@ -675,12 +604,13 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", + "subtle", ] [[package]] @@ -728,9 +658,9 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "encoding_rs" @@ -738,14 +668,14 @@ version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "enum-as-inner" -version = "0.3.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" +checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", "proc-macro2", @@ -767,9 +697,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "figment" -version = "0.10.6" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "790b4292c72618abbab50f787a477014fe15634f96291de45672ce46afe122df" +checksum = "4e56602b469b2201400dec66a66aec5a9b8761ee97cd1b8c96ab2483fcc16cc9" dependencies = [ "atomic", "pear", @@ -786,7 +716,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ "crc32fast", - "miniz_oxide 0.5.3", + "miniz_oxide", ] [[package]] @@ -797,11 +727,10 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -823,9 +752,9 @@ checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c" dependencies = [ "futures-channel", "futures-core", @@ -838,9 +767,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050" dependencies = [ "futures-core", "futures-sink", @@ -848,15 +777,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab" dependencies = [ "futures-core", "futures-task", @@ -865,15 +794,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17" dependencies = [ "proc-macro2", "quote", @@ -882,21 +811,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90" dependencies = [ "futures-channel", "futures-core", @@ -910,20 +839,11 @@ dependencies = [ "slab", ] -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -935,7 +855,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -946,16 +866,16 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] name = "gif" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3a7187e78088aead22ceedeee99779455b23fc231fe13ec443f99bb71694e5b" +checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06" dependencies = [ "color_quant", "weezl", @@ -969,9 +889,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" dependencies = [ "bytes", "fnv", @@ -988,42 +908,36 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ "ahash", ] -[[package]] -name = "hashbrown" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" - [[package]] name = "hashlink" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ - "hashbrown 0.11.2", + "hashbrown", ] [[package]] name = "headers" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64 0.13.0", + "base64", "bitflags", "bytes", "headers-core", "http", "httpdate", "mime", - "sha-1 0.10.0", + "sha1", ] [[package]] @@ -1088,12 +1002,11 @@ dependencies = [ [[package]] name = "hmac" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "crypto-mac", - "digest 0.9.0", + "digest 0.10.5", ] [[package]] @@ -1137,9 +1050,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -1149,9 +1062,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.19" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -1164,7 +1077,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.4", + "socket2", "tokio", "tower-service", "tracing", @@ -1196,17 +1109,26 @@ dependencies = [ ] [[package]] -name = "image" -version = "0.23.14" +name = "idna" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ffcb7e7244a9bf19d35bf2883b9c080c4ced3c07a9895572178cdb8f13f6a1" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "image" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd8e4fb07cf672b1642304e731ef8a6a4c7891d67bb4fd4f5ce58cd6ed86803c" dependencies = [ "bytemuck", "byteorder", "color_quant", "gif", "jpeg-decoder", - "num-iter", "num-rational", "num-traits", "png", @@ -1214,20 +1136,20 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c6392766afd7964e2531940894cffe4bd8d7d17dbc3c1c4857040fd4b33bdb3" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", - "hashbrown 0.12.1", + "hashbrown", "serde", ] [[package]] name = "indoc" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a0bd019339e5d968b37855180087b7b9d512c5046fbd244cf8c95687927d6e" +checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3" [[package]] name = "inlinable_string" @@ -1235,31 +1157,22 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "integer-encoding" -version = "1.1.7" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "ipconfig" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" dependencies = [ - "socket2 0.3.19", + "socket2", "widestring", "winapi", - "winreg 0.6.2", + "winreg", ] [[package]] @@ -1270,39 +1183,39 @@ checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "jobserver" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] name = "jpeg-decoder" -version = "0.1.22" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" +checksum = "9478aa10f73e7528198d75109c8be5cd7d15fb530238040148d5f9a22d4c5b3b" [[package]] name = "js-sys" -version = "0.3.58" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] @@ -1318,11 +1231,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "7.2.0" +version = "8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" dependencies = [ - "base64 0.12.3", + "base64", "pem", "ring", "serde", @@ -1344,9 +1257,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "329c933548736bc49fd575ee68c89e8be4d260064184389a5b77517cddd99ffb" [[package]] name = "libloading" @@ -1354,27 +1267,41 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] [[package]] name = "librocksdb-sys" -version = "6.20.3" +version = "0.8.0+7.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" +checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" dependencies = [ "bindgen", + "bzip2-sys", "cc", "glob", "libc", + "libz-sys", + "zstd-sys", ] [[package]] name = "libsqlite3-sys" -version = "0.22.2" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" +checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" dependencies = [ "cc", "pkg-config", @@ -1383,9 +1310,9 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lmdb-rkv-sys" @@ -1400,9 +1327,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", @@ -1414,7 +1341,7 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1440,9 +1367,9 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" -version = "0.0.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata", ] @@ -1488,27 +1415,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.3.7" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" -dependencies = [ - "adler32", -] - -[[package]] -name = "miniz_oxide" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", @@ -1527,10 +1445,20 @@ dependencies = [ ] [[package]] -name = "num-bigint" -version = "0.2.6" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg", "num-integer", @@ -1547,22 +1475,11 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-iter" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-rational" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ "autocfg", "num-integer", @@ -1589,10 +1506,19 @@ dependencies = [ ] [[package]] -name = "once_cell" -version = "1.12.0" +name = "num_threads" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", +] + +[[package]] +name = "once_cell" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" [[package]] name = "opaque-debug" @@ -1608,31 +1534,24 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf9b1c4e9a6c4de793c632496fa490bdc0e1eea73f0c91394f7b6990935d22" +checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" dependencies = [ - "async-trait", - "crossbeam-channel", - "futures", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", - "tokio", - "tokio-stream", + "opentelemetry_api", + "opentelemetry_sdk", ] [[package]] name = "opentelemetry-jaeger" -version = "0.15.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db22f492873ea037bc267b35a0e8e4fb846340058cb7c864efe3d0bf23684593" +checksum = "1e785d273968748578931e4dc3b4f5ec86b26e09d9e0d66b55adda7fce742f7a" dependencies = [ "async-trait", - "lazy_static", + "futures", + "futures-executor", + "once_cell", "opentelemetry", "opentelemetry-semantic-conventions", "thiserror", @@ -1642,13 +1561,48 @@ dependencies = [ [[package]] name = "opentelemetry-semantic-conventions" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffeac823339e8b0f27b961f4385057bf9f97f2863bc745bd015fd6091f2270e9" +checksum = "9b02e0230abb0ab6636d18e2ba8fa02903ea63772281340ccac18e0af3ec9eeb" dependencies = [ "opentelemetry", ] +[[package]] +name = "opentelemetry_api" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" +dependencies = [ + "futures-channel", + "futures-util", + "indexmap", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "once_cell", + "opentelemetry_api", + "percent-encoding", + "rand 0.8.5", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "ordered-float" version = "1.1.1" @@ -1660,9 +1614,15 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.1.0" +version = "6.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "page_size" @@ -1676,34 +1636,32 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ - "cfg-if 1.0.0", - "instant", + "cfg-if", "libc", "redox_syscall", "smallvec", - "winapi", + "windows-sys", ] [[package]] name = "paste" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "pear" @@ -1736,26 +1694,24 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "0.8.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" dependencies = [ - "base64 0.13.0", - "once_cell", - "regex", + "base64", ] [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "persy" -version = "1.2.6" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5af61053f1daed3ff0265fad7f924e43ce07642a336c79304f8e5aec205460fb" +checksum = "5511189f4dbd737283b0dd2ff6715f2e35fd0d3e1ddf953ed6a772e439e1f73f" dependencies = [ "crc", "data-encoding", @@ -1769,18 +1725,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -1818,14 +1774,14 @@ checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "png" -version = "0.16.8" +version = "0.17.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" +checksum = "8f0e7f4c94ec26ff209cee506314212639d6c91b80afb82984819fafce9df01c" dependencies = [ "bitflags", "crc32fast", - "deflate", - "miniz_oxide 0.3.7", + "flate2", + "miniz_oxide", ] [[package]] @@ -1836,10 +1792,11 @@ checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ + "once_cell", "thiserror", "toml", ] @@ -1870,9 +1827,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.39" +version = "1.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" +checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" dependencies = [ "unicode-ident", ] @@ -1898,9 +1855,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.19" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f53dc8cf16a769a6f677e09e7ff2cd4be1ea0f48754aac39520536962011de0d" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] @@ -1926,7 +1883,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -1946,7 +1903,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -1960,9 +1917,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom 0.2.7", ] @@ -1978,9 +1935,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -1998,9 +1955,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.6" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -2018,16 +1975,16 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.26" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "reqwest" version = "0.11.9" source = "git+https://github.com/timokoesters/reqwest?rev=57b7cf4feb921573dfafad7d34b9ac6e44ead0bd#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" dependencies = [ - "base64 0.13.0", + "base64", "bytes", "encoding_rs", "futures-core", @@ -2057,7 +2014,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.7.0", + "winreg", ] [[package]] @@ -2087,9 +2044,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" +checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" dependencies = [ "libc", "librocksdb-sys", @@ -2143,7 +2100,7 @@ name = "ruma-common" version = "0.8.0" source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ - "base64 0.13.0", + "base64", "bytes", "form_urlencoded", "http", @@ -2222,7 +2179,7 @@ name = "ruma-signatures" version = "0.10.0" source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ - "base64 0.13.0", + "base64", "ed25519-dalek", "pkcs8", "rand 0.7.3", @@ -2249,29 +2206,28 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.25.4" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", - "memchr", "smallvec", ] [[package]] name = "rust-argon2" -version = "0.8.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" +checksum = "b50162d19404029c1ceca6f6980fe40d45c8b369f6f44446fa14bb39573b5bb9" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.9", + "crossbeam-utils", ] [[package]] @@ -2299,7 +2255,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.0", + "rustls-pemfile 1.0.1", "schannel", "security-framework", ] @@ -2310,23 +2266,23 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ - "base64 0.13.0", + "base64", ] [[package]] name = "rustls-pemfile" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ - "base64 0.13.0", + "base64", ] [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "schannel" @@ -2356,9 +2312,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -2379,18 +2335,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.137" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" dependencies = [ "proc-macro2", "quote", @@ -2399,9 +2355,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ "itoa", "ryu", @@ -2422,27 +2378,15 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.24" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +checksum = "8613d593412a0deb7bbd8de9d908efff5a0cb9ccd8f62c641e7b2ed2f57291d1" dependencies = [ "indexmap", + "itoa", "ryu", "serde", - "yaml-rust", -] - -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "unsafe-libyaml", ] [[package]] @@ -2451,9 +2395,20 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", +] + +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.5", ] [[package]] @@ -2463,7 +2418,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -2495,66 +2450,42 @@ dependencies = [ [[package]] name = "signature" -version = "1.5.0" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" [[package]] name = "simple_asn1" -version = "0.4.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "chrono", "num-bigint", "num-traits", + "thiserror", + "time", ] [[package]] name = "slab" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" - -[[package]] -name = "sled" -version = "0.34.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" dependencies = [ - "crc32fast", - "crossbeam-epoch", - "crossbeam-utils 0.8.9", - "fs2", - "fxhash", - "libc", - "log", - "parking_lot", - "zstd", + "autocfg", ] [[package]] name = "smallvec" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.3.19" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -2583,9 +2514,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.98" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" dependencies = [ "proc-macro2", "quote", @@ -2600,11 +2531,11 @@ checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" [[package]] name = "synchronoise" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d717ed0efc9d39ab3b642a096bc369a3e02a38a51c41845d7fe31bdad1d6eaeb" +checksum = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2" dependencies = [ - "crossbeam-queue 0.1.2", + "crossbeam-queue", ] [[package]] @@ -2619,26 +2550,20 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "textwrap" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" - [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -2665,9 +2590,9 @@ dependencies = [ [[package]] name = "thrift" -version = "0.13.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b" +checksum = "09678c4cdbb4eed72e18b7c2af1329c69825ed16fcbac62d083fc3e2b0590ff0" dependencies = [ "byteorder", "integer-encoding", @@ -2678,9 +2603,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb833c46ecbf8b6daeccb347cefcabf9c1beb5c9b0f853e1cec45632d9963e69" +checksum = "e37706572f4b151dff7a0146e040804e9c26fe3a3118591112f05cf12a4216c1" dependencies = [ "libc", "paste", @@ -2689,9 +2614,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.4.3+5.2.1-patched.2" +version = "0.5.2+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1792ccb507d955b46af42c123ea8863668fae24d03721e40cad6a41773dbb49" +checksum = "ec45c14da997d0925c7835883e4d5c181f196fa142f8c19d7643d1e9af2592c3" dependencies = [ "cc", "fs_extra", @@ -2700,9 +2625,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b7bcecfafe4998587d636f9ae9d55eb9d0499877b88757767c346875067098" +checksum = "20612db8a13a6c06d57ec83953694185a367e16945f66565e8028d2c0bd76979" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -2710,15 +2635,22 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "d634a985c4d4238ec39cacaed2e7ae552fbd3c476b552c1deac3021b7d7eaf0c" dependencies = [ + "itoa", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", + "num_threads", + "time-macros", ] +[[package]] +name = "time-macros" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" + [[package]] name = "tinyvec" version = "1.6.0" @@ -2736,19 +2668,19 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.19.2" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" dependencies = [ + "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", - "once_cell", "pin-project-lite", "signal-hook-registry", - "socket2 0.4.4", + "socket2", "tokio-macros", "winapi", ] @@ -2789,9 +2721,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "f6edf2d6bc038a43d31353570e27270603f4648d18f5ed10c0e179abe43255af" dependencies = [ "futures-core", "pin-project-lite", @@ -2800,9 +2732,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -2874,11 +2806,11 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -2887,9 +2819,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", @@ -2898,9 +2830,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.27" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", "valuable", @@ -2908,9 +2840,9 @@ dependencies = [ [[package]] name = "tracing-flame" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd520fe41c667b437952383f3a1ec14f1fa45d653f719a77eedd6e6a02d8fa54" +checksum = "0bae117ee14789185e129aaee5d93750abe67fdc5a9a62650452bfe4e122a3a9" dependencies = [ "lazy_static", "tracing", @@ -2928,80 +2860,66 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", - "tracing-core", -] - [[package]] name = "tracing-subscriber" -version = "0.2.25" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ - "ansi_term", - "chrono", - "lazy_static", "matchers", + "nu-ansi-term", + "once_cell", "regex", - "serde", - "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", - "tracing-serde", ] [[package]] name = "trust-dns-proto" -version = "0.20.4" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca94d4e9feb6a181c690c4040d7a24ef34018d8313ac5044a61d21222ae24e31" +checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.2.3", "ipnet", "lazy_static", - "log", "rand 0.8.5", "smallvec", "thiserror", "tinyvec", "tokio", + "tracing", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.20.4" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecae383baad9995efaa34ce8e57d12c3f305e545887472a492b838f4b5cfb77a" +checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", "lazy_static", - "log", "lru-cache", "parking_lot", "resolv-conf", "smallvec", "thiserror", "tokio", + "tracing", "trust-dns-proto", ] @@ -3034,24 +2952,30 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.1" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-xid" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e5fa573d8ac5f1a856f8d7be41d390ee973daf97c806b2c1a465e4e1406e68" [[package]] name = "unsigned-varint" @@ -3067,13 +2991,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.3.0", "percent-encoding", ] @@ -3120,12 +3043,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -3134,23 +3051,23 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -3159,11 +3076,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.31" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de9a9cec1733468a8c657e57fa2413d2ae2c0129b95e87c5b72b8ace4d13f31f" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -3171,9 +3088,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3181,9 +3098,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -3194,15 +3111,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "web-sys" -version = "0.3.58" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -3220,21 +3137,21 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c97e489d8f836838d497091de568cf16b117486d529ec5579233521065bd5e4" +checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb" [[package]] name = "widestring" -version = "0.4.3" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" +checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" [[package]] name = "wildmatch" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c48bd20df7e4ced539c12f570f937c6b4884928a87fee70a479d72f031d4e0" +checksum = "ee583bdc5ff1cf9db20e9db5bb3ff4c3089a8f6b8b31aff265c9aba85812db86" [[package]] name = "winapi" @@ -3301,15 +3218,6 @@ version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" -[[package]] -name = "winreg" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" -dependencies = [ - "winapi", -] - [[package]] name = "winreg" version = "0.7.0" @@ -3319,15 +3227,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "yansi" version = "0.5.1" @@ -3364,30 +3263,11 @@ dependencies = [ "num-traits", ] -[[package]] -name = "zstd" -version = "0.9.2+zstd.1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2390ea1bf6c038c39674f22d95f0564725fc06034a47129179810b2fc58caa54" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "4.1.3+zstd.1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99d81b99fb3c2c2c794e3fe56c305c63d5173a16a46b5850b07c935ffc7db79" -dependencies = [ - "libc", - "zstd-sys", -] - [[package]] name = "zstd-sys" -version = "1.6.2+zstd.1.5.1" +version = "2.0.1+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2daf2f248d9ea44454bfcb2516534e8b8ad2fc91bf818a1885495fc42bc8ac9f" +checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index d5f3c9d..83f03ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318c # Async runtime and utilities tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently -sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } +#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } persy = { version = "1.0.0", optional = true, features = ["background_ops"] } @@ -40,62 +40,62 @@ directories = "4.0.0" # Used for ruma wrapper serde_json = { version = "1.0.68", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.8.21" +serde_yaml = "0.9.13" # Used for pdu definition serde = { version = "1.0.130", features = ["rc"] } # Used for secure identifiers rand = "0.8.4" # Used to hash passwords -rust-argon2 = "0.8.3" +rust-argon2 = "1.0.0" # Used to send requests reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type thiserror = "1.0.29" # Used to generate thumbnails for images -image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.24.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key base64 = "0.13.0" # Used when hashing the state ring = "0.16.20" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.20.3" +trust-dns-resolver = "0.22.0" # Used to find matching events for appservices regex = "1.5.4" # jwt jsonwebtokens -jsonwebtoken = "7.2.0" +jsonwebtoken = "8.1.1" # Performance measurements tracing = { version = "0.1.27", features = [] } -tracing-subscriber = "0.2.22" -tracing-flame = "0.1.0" -opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } -opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] } +tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } +tracing-flame = "0.2.0" +opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } +opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } lru-cache = "0.1.2" -rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } -parking_lot = { version = "0.11.2", optional = true } +rusqlite = { version = "0.28.0", optional = true, features = ["bundled"] } +parking_lot = { version = "0.12.1", optional = true } crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.19.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication -hmac = "0.11.0" -sha-1 = "0.9.8" +hmac = "0.12.1" +sha-1 = "0.10.0" # used for conduit's CLI and admin room command parsing -clap = { version = "3.2.5", default-features = false, features = ["std", "derive"] } +clap = { version = "4.0.11", default-features = false, features = ["std", "derive"] } futures-util = { version = "0.3.17", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.6", features = ["env", "toml"] } -tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true } -tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } +tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true } +tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } lazy_static = "1.4.0" async-trait = "0.1.57" [features] -default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] -backend_sled = ["sled"] +default = ["conduit_bin", "backend_sqlite", "jemalloc"] +#backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index dc9caaa..6b1ee40 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -1,5 +1,5 @@ use crate::{services, Result, Ruma}; -use hmac::{Hmac, Mac, NewMac}; +use hmac::{Hmac, Mac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; use std::time::{Duration, SystemTime}; diff --git a/src/main.rs b/src/main.rs index ce7e578..0bba2ab 100644 --- a/src/main.rs +++ b/src/main.rs @@ -86,7 +86,7 @@ async fn main() { if config.allow_jaeger { opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); - let tracer = opentelemetry_jaeger::new_pipeline() + let tracer = opentelemetry_jaeger::new_agent_pipeline() .install_batch(opentelemetry::runtime::Tokio) .unwrap(); diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 054df09..f88fd02 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -40,7 +40,7 @@ pub struct Service { pub config: Config, keypair: Arc, dns_resolver: TokioAsyncResolver, - jwt_decoding_key: Option>, + jwt_decoding_key: Option, federation_client: reqwest::Client, default_client: reqwest::Client, pub stable_room_versions: Vec, @@ -105,7 +105,7 @@ impl Service { let jwt_decoding_key = config .jwt_secret .as_ref() - .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); + .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); let default_client = reqwest_client_builder(&config)?.build()?; let name_override = Arc::clone(&tls_name_override); @@ -250,7 +250,7 @@ impl Service { &self.dns_resolver } - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { + pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { self.jwt_decoding_key.as_ref() } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 6684108..9393753 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,8 +1,10 @@ mod data; +use std::io::Cursor; + pub use data::Data; use crate::{services, Result}; -use image::{imageops::FilterType, GenericImageView}; +use image::imageops::FilterType; use tokio::{ fs::File, @@ -186,7 +188,10 @@ impl Service { }; let mut thumbnail_bytes = Vec::new(); - thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; + thumbnail.write_to( + &mut Cursor::new(&mut thumbnail_bytes), + image::ImageOutputFormat::Png, + )?; // Save thumbnail in database so we don't have to generate it again next time let thumbnail_key = self.db.create_file_metadata( From 6b131202b9c2ec36043d73ffd2d787093e4b9fed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 17:25:06 +0200 Subject: [PATCH 1227/1727] Bump ruma --- Cargo.lock | 106 ++++++++------ Cargo.toml | 2 +- src/api/client_server/account.rs | 4 + src/api/client_server/directory.rs | 16 +++ src/api/client_server/keys.rs | 6 +- src/api/client_server/membership.rs | 21 ++- src/api/client_server/read_marker.rs | 133 +++++++++++------- src/api/client_server/room.rs | 8 +- src/api/client_server/session.rs | 2 + src/api/client_server/sync.rs | 19 ++- src/api/ruma_wrapper/axum.rs | 11 +- src/api/ruma_wrapper/mod.rs | 9 +- src/api/server_server.rs | 22 +-- src/config/mod.rs | 6 +- src/database/key_value/globals.rs | 7 +- src/database/key_value/key_backups.rs | 6 +- src/database/key_value/rooms/alias.rs | 6 +- src/database/key_value/rooms/directory.rs | 4 +- src/database/key_value/rooms/edus/presence.rs | 6 +- .../key_value/rooms/edus/read_receipt.rs | 4 +- src/database/key_value/rooms/edus/typing.rs | 4 +- src/database/key_value/rooms/metadata.rs | 4 +- src/database/key_value/rooms/outlier.rs | 2 +- src/database/key_value/rooms/state.rs | 4 +- src/database/key_value/rooms/state_cache.rs | 20 +-- src/database/key_value/rooms/timeline.rs | 6 +- src/database/key_value/rooms/user.rs | 10 +- src/database/key_value/uiaa.rs | 3 +- src/database/key_value/users.rs | 22 +-- src/database/mod.rs | 20 +-- src/service/admin/mod.rs | 7 +- src/service/globals/data.rs | 6 +- src/service/globals/mod.rs | 25 ++-- src/service/key_backups/data.rs | 4 +- src/service/key_backups/mod.rs | 4 +- src/service/pdu.rs | 20 +-- src/service/pusher/mod.rs | 8 +- src/service/rooms/alias/data.rs | 6 +- src/service/rooms/alias/mod.rs | 6 +- src/service/rooms/directory/data.rs | 4 +- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/edus/presence/data.rs | 4 +- src/service/rooms/edus/presence/mod.rs | 4 +- src/service/rooms/edus/read_receipt/data.rs | 4 +- src/service/rooms/edus/read_receipt/mod.rs | 4 +- src/service/rooms/edus/typing/data.rs | 4 +- src/service/rooms/event_handler/mod.rs | 17 ++- src/service/rooms/lazy_loading/mod.rs | 6 +- src/service/rooms/metadata/data.rs | 4 +- src/service/rooms/metadata/mod.rs | 4 +- src/service/rooms/outlier/data.rs | 2 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/state/data.rs | 4 +- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/state_cache/data.rs | 20 +-- src/service/rooms/state_cache/mod.rs | 20 +-- src/service/rooms/state_compressor/mod.rs | 6 +- src/service/rooms/timeline/data.rs | 6 +- src/service/rooms/timeline/mod.rs | 19 +-- src/service/rooms/user/data.rs | 6 +- src/service/rooms/user/mod.rs | 6 +- src/service/sending/mod.rs | 15 +- src/service/uiaa/data.rs | 2 +- src/service/uiaa/mod.rs | 3 +- src/service/users/data.rs | 17 +-- src/service/users/mod.rs | 17 +-- src/utils/error.rs | 4 +- src/utils/mod.rs | 2 +- 68 files changed, 446 insertions(+), 347 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2583c52..29603ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -181,6 +181,12 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "base64ct" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2b2456fd614d856680dcd9fcc660a51a820fa09daef2e49772b56a193c8474" + [[package]] name = "bincode" version = "1.3.3" @@ -429,9 +435,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.6.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6f2aa4d0537bcc1c74df8755072bd31c1ef1a3a1b85a68e8404a8c353b7b8b" +checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" [[package]] name = "constant_time_eq" @@ -567,9 +573,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", "digest 0.9.0", @@ -586,11 +592,12 @@ checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" [[package]] name = "der" -version = "0.4.5" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b71cca7d95d7681a4b3b9cdf63c8dbc3730d0584c2c74e31416d64a90493f4" +checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" dependencies = [ "const-oid", + "zeroize", ] [[package]] @@ -1145,12 +1152,6 @@ dependencies = [ "serde", ] -[[package]] -name = "indoc" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3" - [[package]] name = "inlinable_string" version = "0.1.15" @@ -1229,6 +1230,15 @@ dependencies = [ "serde", ] +[[package]] +name = "js_option" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68421373957a1593a767013698dbf206e2b221eefe97a44d98d18672ff38423c" +dependencies = [ + "serde", +] + [[package]] name = "jsonwebtoken" version = "8.1.1" @@ -1757,13 +1767,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.7.6" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3ef9b64d26bad0536099c816c6734379e45bbd5f14798def6809e5cc350447" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ "der", "spki", - "zeroize", ] [[package]] @@ -2054,11 +2063,12 @@ dependencies = [ [[package]] name = "ruma" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.7.4" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "assign", "js_int", + "js_option", "ruma-appservice-api", "ruma-client-api", "ruma-common", @@ -2071,8 +2081,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.7.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "ruma-common", "serde", @@ -2081,8 +2091,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.13.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.15.1" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "assign", "bytes", @@ -2097,19 +2107,20 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.10.3" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "base64", "bytes", "form_urlencoded", "http", "indexmap", - "indoc", "itoa", "js_int", + "js_option", "percent-encoding", "rand 0.8.5", + "regex", "ruma-identifiers-validation", "ruma-macros", "serde", @@ -2123,8 +2134,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "js_int", "ruma-common", @@ -2134,17 +2145,17 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.9.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ + "js_int", "thiserror", - "url", ] [[package]] name = "ruma-identity-service-api" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "js_int", "ruma-common", @@ -2153,20 +2164,23 @@ dependencies = [ [[package]] name = "ruma-macros" -version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.10.3" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ + "once_cell", "proc-macro-crate", "proc-macro2", "quote", "ruma-identifiers-validation", + "serde", "syn", + "toml", ] [[package]] name = "ruma-push-gateway-api" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "js_int", "ruma-common", @@ -2176,8 +2190,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.12.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "base64", "ed25519-dalek", @@ -2187,13 +2201,12 @@ dependencies = [ "serde_json", "sha2", "thiserror", - "tracing", ] [[package]] name = "ruma-state-res" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "itertools", "js_int", @@ -2499,10 +2512,11 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" -version = "0.4.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c01a0c15da1b0b0e1494112e7af814a678fec9bd157881b49beac661e9b6f32" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ + "base64ct", "der", ] @@ -3002,9 +3016,9 @@ dependencies = [ [[package]] name = "uuid" -version = "0.8.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" dependencies = [ "getrandom 0.2.7", ] @@ -3235,9 +3249,9 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zeroize" -version = "1.3.0" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 83f03ce..0428e74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318ca8514ae338fd6d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 58624a2..673bbb4 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -193,6 +193,8 @@ pub async fn register_route( access_token: None, user_id, device_id: None, + refresh_token: None, + expires_in: None, }); } @@ -238,6 +240,8 @@ pub async fn register_route( access_token: Some(token), user_id, device_id: Some(device_id), + refresh_token: None, + expires_in: None, }) } diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 90f79a0..7c4aa50 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -19,6 +19,7 @@ use ruma::{ room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, @@ -135,6 +136,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( since, filter: Filter { generic_search_term: filter.generic_search_term.as_deref(), + room_types: filter.room_types.clone(), }, room_network: RoomNetwork::Matrix, }, @@ -287,6 +289,20 @@ pub(crate) async fn get_public_rooms_filtered_helper( .transpose()? .flatten() .ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?, + room_type: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCreate, "")? + .map(|s| { + serde_json::from_str::(s.content.get()).map_err( + |e| { + error!("Invalid room create event in database: {}", e); + Error::BadDatabase("Invalid room create event in database.") + }, + ) + }) + .transpose()? + .and_then(|e| e.room_type), room_id, }; Ok(chunk) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index be62cc2..33ff309 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -14,7 +14,7 @@ use ruma::{ federation, }, serde::Raw, - DeviceId, DeviceKeyAlgorithm, UserId, + DeviceId, DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; use std::collections::{BTreeMap, HashMap, HashSet}; @@ -253,7 +253,7 @@ pub async fn get_key_changes_route( pub(crate) async fn get_keys_helper bool>( sender_user: Option<&UserId>, - device_keys_input: &BTreeMap, Vec>>, + device_keys_input: &BTreeMap>, allowed_signatures: F, ) -> Result { let mut master_keys = BTreeMap::new(); @@ -396,7 +396,7 @@ fn add_unsigned_device_display_name( } pub(crate) async fn claim_keys_helper( - one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, + one_time_keys_input: &BTreeMap>, ) -> Result { let mut one_time_keys = BTreeMap::new(); diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index a91d079..4f791c7 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -10,12 +10,14 @@ use ruma::{ }, federation::{self, membership::create_invite}, }, + canonical_json::to_canonical_value, events::{ room::member::{MembershipState, RoomMemberEventContent}, RoomEventType, StateEventType, }, - serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, - EventId, RoomId, RoomVersionId, ServerName, UserId, + serde::Base64, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + RoomId, RoomVersionId, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -83,7 +85,7 @@ pub async fn join_room_by_id_or_alias_route( let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; - let (servers, room_id) = match Box::::try_from(body.room_id_or_alias) { + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { Ok(room_id) => { let mut servers = body.server_name.clone(); servers.extend( @@ -458,7 +460,7 @@ pub async fn joined_members_route( async fn join_room_by_id_helper( sender_user: Option<&UserId>, room_id: &RoomId, - servers: &[Box], + servers: &[OwnedServerName], _third_party_signed: Option<&IncomingThirdPartySigned>, ) -> Result { let sender_user = sender_user.expect("user is authenticated"); @@ -673,7 +675,12 @@ async fn join_room_by_id_helper( room_id, state .into_iter() - .map(|(k, id)| services().rooms.state_compressor.compress_state_event(k, &id)) + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(k, &id) + }) .collect::>()?, )?; @@ -737,7 +744,7 @@ fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, pub_key_map: &RwLock>>, -) -> Result<(Box, CanonicalJsonObject)> { +) -> Result<(OwnedEventId, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") @@ -896,7 +903,7 @@ pub(crate) async fn invite_helper<'a>( warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); } - let origin: Box = serde_json::from_value( + let origin: OwnedServerName = serde_json::from_value( serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event needs an origin field.", diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index fd0e090..bdf467f 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,8 +1,7 @@ use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, - events::RoomAccountDataEventType, - receipt::ReceiptType, + events::{receipt::ReceiptType, RoomAccountDataEventType}, MilliSecondsSinceUnixEpoch, }; use std::collections::BTreeMap; @@ -18,19 +17,28 @@ pub async fn set_read_marker_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let fully_read_event = ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { - event_id: body.fully_read.clone(), - }, - }; - services().account_data.update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), - )?; + if let Some(fully_read) = &body.fully_read { + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: fully_read.clone(), + }, + }; + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + )?; + } - if let Some(event) = &body.read_receipt { + if body.private_read_receipt.is_some() || body.read_receipt.is_some() { + services() + .rooms + .user + .reset_notification_counts(sender_user, &body.room_id)?; + } + + if let Some(event) = &body.private_read_receipt { services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, @@ -43,11 +51,9 @@ pub async fn set_read_marker_route( "Event does not exist.", ))?, )?; - services() - .rooms - .user - .reset_notification_counts(sender_user, &body.room_id)?; + } + if let Some(event) = &body.read_receipt { let mut user_receipts = BTreeMap::new(); user_receipts.insert( sender_user.clone(), @@ -83,44 +89,69 @@ pub async fn create_receipt_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.edus.read_receipt.private_read_set( - &body.room_id, - sender_user, + if matches!( + &body.receipt_type, + create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate + ) { services() .rooms - .timeline - .get_pdu_count(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?, - )?; - services() - .rooms - .user - .reset_notification_counts(sender_user, &body.room_id)?; + .user + .reset_notification_counts(sender_user, &body.room_id)?; + } - let mut user_receipts = BTreeMap::new(); - user_receipts.insert( - sender_user.clone(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - }, - ); - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); + match body.receipt_type { + create_receipt::v3::ReceiptType::FullyRead => { + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: body.event_id.clone(), + }, + }; + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + )?; + } + create_receipt::v3::ReceiptType::Read => { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + }, + ); + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(body.event_id.to_owned(), receipts); + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(body.event_id.to_owned(), receipts); - services().rooms.edus.read_receipt.readreceipt_update( - sender_user, - &body.room_id, - ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - )?; + services().rooms.edus.read_receipt.readreceipt_update( + sender_user, + &body.room_id, + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )?; + } + create_receipt::v3::ReceiptType::ReadPrivate => { + services().rooms.edus.read_receipt.private_read_set( + &body.room_id, + sender_user, + services() + .rooms + .timeline + .get_pdu_count(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, + )?; + } + _ => return Err(Error::bad_database("Unsupported receipt type")), + } Ok(create_receipt::v3::Response {}) } diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index ca191d6..43b2e8e 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,6 +1,8 @@ use crate::{ api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma, }; +use ruma::serde::JsonObject; +use ruma::OwnedRoomAliasId; use ruma::{ api::client::{ error::ErrorKind, @@ -21,9 +23,7 @@ use ruma::{ }, RoomEventType, StateEventType, }, - int, - serde::{CanonicalJsonObject, JsonObject}, - RoomAliasId, RoomId, + int, CanonicalJsonObject, RoomAliasId, RoomId, }; use serde_json::{json, value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, sync::Arc}; @@ -77,7 +77,7 @@ pub async fn create_room_route( )); } - let alias: Option> = + let alias: Option = body.room_alias_name .as_ref() .map_or(Ok(None), |localpart| { diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 14f1404..6182516 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -147,6 +147,8 @@ pub async fn login_route(body: Ruma) -> Result, - sender_device: Box, + sender_user: OwnedUserId, + sender_device: OwnedDeviceId, body: sync_events::v3::IncomingRequest, tx: Sender>>, ) { @@ -155,15 +155,14 @@ async fn sync_helper_wrapper( } async fn sync_helper( - sender_user: Box, - sender_device: Box, + sender_user: OwnedUserId, + sender_device: OwnedDeviceId, body: sync_events::v3::IncomingRequest, // bool = caching allowed ) -> Result<(sync_events::v3::Response, bool), Error> { use sync_events::v3::{ - DeviceLists, Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom, - JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, - ToDevice, UnreadNotificationsCount, + Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom, JoinedRoom, + LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, }; // TODO: match body.set_presence { @@ -444,7 +443,7 @@ async fn sync_helper( }; // This check is in case a bad user ID made it into the database - if let Ok(uid) = UserId::parse(state_key.as_ref()) { + if let Ok(uid) = UserId::parse(&state_key) { lazy_loaded.insert(uid); } state_events.push(pdu); diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 2d986a5..c71d36b 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -17,8 +17,7 @@ use bytes::{BufMut, Bytes, BytesMut}; use http::StatusCode; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, - signatures::CanonicalJsonValue, - DeviceId, ServerName, UserId, + CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, ServerName, UserId, }; use serde::Deserialize; use tracing::{debug, error, warn}; @@ -81,7 +80,7 @@ where let (sender_user, sender_device, sender_servername, from_appservice) = if let Some((_id, registration)) = appservice_registration { match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + AuthScheme::AccessToken => { let user_id = query_params.user_id.map_or_else( || { UserId::parse_with_server_name( @@ -112,7 +111,7 @@ where } } else { match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + AuthScheme::AccessToken => { let token = match token { Some(token) => token, _ => { @@ -132,7 +131,7 @@ where } Some((user_id, device_id)) => ( Some(user_id), - Some(Box::::from(device_id)), + Some(OwnedDeviceId::from(device_id)), None, false, ), @@ -298,7 +297,7 @@ where } struct XMatrix { - origin: Box, + origin: OwnedServerName, key: String, // KeyName? sig: String, } diff --git a/src/api/ruma_wrapper/mod.rs b/src/api/ruma_wrapper/mod.rs index 15360e5..ac4c825 100644 --- a/src/api/ruma_wrapper/mod.rs +++ b/src/api/ruma_wrapper/mod.rs @@ -1,6 +1,7 @@ use crate::Error; use ruma::{ - api::client::uiaa::UiaaResponse, signatures::CanonicalJsonValue, DeviceId, ServerName, UserId, + api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, + OwnedUserId, }; use std::ops::Deref; @@ -10,9 +11,9 @@ mod axum; /// Extractor for Ruma request structs pub struct Ruma { pub body: T, - pub sender_user: Option>, - pub sender_device: Option>, - pub sender_servername: Option>, + pub sender_user: Option, + pub sender_device: Option, + pub sender_servername: Option, // This is None when body is not a valid string pub json_body: Option, pub from_appservice: bool, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index d54e130..a8ae272 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -33,18 +33,17 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ - receipt::{ReceiptEvent, ReceiptEventContent}, + receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, RoomEventType, StateEventType, }, - receipt::ReceiptType, serde::{Base64, JsonObject, Raw}, - signatures::CanonicalJsonValue, to_device::DeviceIdOrAllDevices, - EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, + CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, + OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -280,7 +279,7 @@ where .write() .unwrap() .insert( - Box::::from(destination), + OwnedServerName::from(destination), (actual_destination, host), ); } @@ -528,7 +527,7 @@ pub async fn get_server_keys_route() -> Result { return Err(Error::bad_config("Federation is disabled.")); } - let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); + let mut verify_keys: BTreeMap = BTreeMap::new(); verify_keys.insert( format!("ed25519:{}", services().globals.keypair().version()) .try_into() @@ -669,7 +668,7 @@ pub async fn send_transaction_message_route( }; // 0. Check the server is in the room - let room_id = match value + let room_id: OwnedRoomId = match value .get("room_id") .and_then(|id| RoomId::parse(id.as_str()?).ok()) { @@ -1007,7 +1006,7 @@ pub async fn get_missing_events_route( continue; } queued_events.extend_from_slice( - &serde_json::from_value::>>( + &serde_json::from_value::>( serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { Error::bad_database("Event in db has no prev_events field.") })?) @@ -1411,7 +1410,7 @@ async fn create_join_event( } }; - let origin: Box = serde_json::from_value( + let origin: OwnedServerName = serde_json::from_value( serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event needs an origin field.", @@ -1474,6 +1473,7 @@ async fn create_join_event( .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), + origin: services().globals.server_name().to_string(), }) } @@ -1564,10 +1564,10 @@ pub async fn create_invite_route( // Add event_id back signed_event.insert( "event_id".to_owned(), - CanonicalJsonValue::String(event_id.into()), + CanonicalJsonValue::String(event_id.to_string()), ); - let sender: Box<_> = serde_json::from_value( + let sender: OwnedUserId = serde_json::from_value( signed_event .get("sender") .ok_or(Error::BadRequest( diff --git a/src/config/mod.rs b/src/config/mod.rs index 29af883..e0efa60 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -4,7 +4,7 @@ use std::{ net::{IpAddr, Ipv4Addr}, }; -use ruma::{RoomVersionId, ServerName}; +use ruma::{OwnedServerName, RoomVersionId, ServerName}; use serde::{de::IgnoredAny, Deserialize}; use tracing::warn; @@ -20,7 +20,7 @@ pub struct Config { pub port: u16, pub tls: Option, - pub server_name: Box, + pub server_name: OwnedServerName, #[serde(default = "default_database_backend")] pub database_backend: String, pub database_path: String, @@ -58,7 +58,7 @@ pub struct Config { pub proxy: ProxyConfig, pub jwt_secret: Option, #[serde(default = "Vec::new")] - pub trusted_servers: Vec>, + pub trusted_servers: Vec, #[serde(default = "default_log")] pub log: String, #[serde(default)] diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index fafaf49..75d00b4 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -5,7 +5,8 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, - DeviceId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, UserId, + DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -163,7 +164,7 @@ impl service::globals::Data for KeyValueDatabase { &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result, VerifyKey>> { + ) -> Result> { // Not atomic, but this is not critical let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; @@ -202,7 +203,7 @@ impl service::globals::Data for KeyValueDatabase { fn signing_keys_for( &self, origin: &ServerName, - ) -> Result, VerifyKey>> { + ) -> Result> { let signingkeys = self .server_signingkeys .get(origin.as_bytes())? diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index 0738f73..900b700 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -6,7 +6,7 @@ use ruma::{ error::ErrorKind, }, serde::Raw, - RoomId, UserId, + OwnedRoomId, RoomId, UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -198,13 +198,13 @@ impl service::key_backups::Data for KeyValueDatabase { &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>> { + ) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::, RoomKeyBackup>::new(); + let mut rooms = BTreeMap::::new(); for result in self .backupkeyid_backup diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index f3de89d..c0f6de8 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,4 +1,4 @@ -use ruma::{api::client::error::ErrorKind, RoomAliasId, RoomId}; +use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -31,7 +31,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { Ok(()) } - fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { @@ -46,7 +46,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 212ced9..e05dee8 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,4 +1,4 @@ -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; use crate::{database::KeyValueDatabase, service, utils, Error, Result}; @@ -15,7 +15,7 @@ impl service::rooms::directory::Data for KeyValueDatabase { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - fn public_rooms<'a>(&'a self) -> Box>> + 'a> { + fn public_rooms<'a>(&'a self) -> Box> + 'a> { Box::new(self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index fdd51ce..5259bef 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,6 +1,8 @@ use std::collections::HashMap; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, RoomId, UInt, UserId}; +use ruma::{ + events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, +}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -76,7 +78,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { &self, room_id: &RoomId, since: u64, - ) -> Result, PresenceEvent>> { + ) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index a8349f6..fa97ea3 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,7 +1,7 @@ use std::mem; use ruma::{ - events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject, RoomId, UserId, + events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, OwnedUserId, RoomId, UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -55,7 +55,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { ) -> Box< dyn Iterator< Item = Result<( - Box, + OwnedUserId, u64, Raw, )>, diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 7b211e7..4e6c86b 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,6 +1,6 @@ use std::collections::HashSet; -use ruma::{RoomId, UserId}; +use ruma::{OwnedUserId, RoomId, UserId}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -66,7 +66,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { .unwrap_or(0)) } - fn typings_all(&self, room_id: &RoomId) -> Result>> { + fn typings_all(&self, room_id: &RoomId) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 0f61dbb..57540c4 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,4 +1,4 @@ -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -18,7 +18,7 @@ impl service::rooms::metadata::Data for KeyValueDatabase { .is_some()) } - fn iter_ids<'a>(&'a self) -> Box>> + 'a> { + fn iter_ids<'a>(&'a self) -> Box> + 'a> { Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index 2ecaadb..7985ba8 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,4 +1,4 @@ -use ruma::{signatures::CanonicalJsonObject, EventId}; +use ruma::{CanonicalJsonObject, EventId}; use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result}; diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index dbc1398..f17d37b 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,4 +1,4 @@ -use ruma::{EventId, RoomId}; +use ruma::{EventId, OwnedEventId, RoomId}; use std::collections::HashSet; use std::sync::Arc; @@ -52,7 +52,7 @@ impl service::rooms::state::Data for KeyValueDatabase { fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: Vec>, + event_ids: Vec, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index cbc0576..ff4594f 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -4,7 +4,7 @@ use regex::Regex; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, - RoomId, ServerName, UserId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -163,7 +163,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { } #[tracing::instrument(skip(self, room_id))] - fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { + fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { let maybe = self .our_real_users_cache .read() @@ -262,7 +262,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn room_servers<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -295,7 +295,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn server_rooms<'a>( &'a self, server: &ServerName, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = server.as_bytes().to_vec(); prefix.push(0xff); @@ -317,7 +317,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn room_members<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -363,7 +363,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn room_useroncejoined<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -393,7 +393,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn room_members_invited<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -451,7 +451,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn rooms_joined<'a>( &'a self, user_id: &UserId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { Box::new( self.userroomid_joined .scan_prefix(user_id.as_bytes().to_vec()) @@ -476,7 +476,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> Box, Vec>)>> + 'a> { + ) -> Box>)>> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -554,7 +554,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> Box, Vec>)>> + 'a> { + ) -> Box>)>> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 1660a9e..0c6c2dd 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -1,7 +1,7 @@ use std::{collections::hash_map, mem::size_of, sync::Arc}; use ruma::{ - api::client::error::ErrorKind, signatures::CanonicalJsonObject, EventId, RoomId, UserId, + api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, }; use tracing::error; @@ -344,8 +344,8 @@ impl service::rooms::timeline::Data for KeyValueDatabase { fn increment_notification_counts( &self, room_id: &RoomId, - notifies: Vec>, - highlights: Vec>, + notifies: Vec, + highlights: Vec, ) -> Result<()> { let mut notifies_batch = Vec::new(); let mut highlights_batch = Vec::new(); diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 9230e61..e678c87 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,4 +1,4 @@ -use ruma::{RoomId, UserId}; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -85,8 +85,8 @@ impl service::rooms::user::Data for KeyValueDatabase { fn get_shared_rooms<'a>( &'a self, - users: Vec>, - ) -> Result>> + 'a>> { + users: Vec, + ) -> Result> + 'a>> { let iterators = users.into_iter().map(move |user_id| { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -110,7 +110,7 @@ impl service::rooms::user::Data for KeyValueDatabase { }); // We use the default compare function because keys are sorted correctly (not reversed) - Ok(Box::new(Box::new( + Ok(Box::new( utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { @@ -119,6 +119,6 @@ impl service::rooms::user::Data for KeyValueDatabase { })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) }), - ))) + )) } } diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 8a9f176..5fd91b0 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,7 +1,6 @@ use ruma::{ api::client::{error::ErrorKind, uiaa::UiaaInfo}, - signatures::CanonicalJsonValue, - DeviceId, UserId, + CanonicalJsonValue, DeviceId, UserId, }; use crate::{database::KeyValueDatabase, service, Error, Result}; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 8213c5d..3bb8e61 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,8 +5,10 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, OwnedUserId, + UInt, UserId, }; +use ruma::{OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri}; use tracing::warn; use crate::{ @@ -39,7 +41,7 @@ impl service::users::Data for KeyValueDatabase { } /// Find out which user an access token belongs to. - fn find_from_token(&self, token: &str) -> Result, String)>> { + fn find_from_token(&self, token: &str) -> Result> { self.token_userdeviceid .get(token.as_bytes())? .map_or(Ok(None), |bytes| { @@ -66,7 +68,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all users on this homeserver. - fn iter<'a>(&'a self) -> Box>> + 'a> { + fn iter<'a>(&'a self) -> Box> + 'a> { Box::new(self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") @@ -141,7 +143,7 @@ impl service::users::Data for KeyValueDatabase { } /// Get the avatar_url of a user. - fn avatar_url(&self, user_id: &UserId) -> Result>> { + fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl .get(user_id.as_bytes())? .map(|bytes| { @@ -154,7 +156,7 @@ impl service::users::Data for KeyValueDatabase { } /// Sets a new avatar_url or removes it if avatar_url is None. - fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; @@ -258,7 +260,7 @@ impl service::users::Data for KeyValueDatabase { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata @@ -356,7 +358,7 @@ impl service::users::Data for KeyValueDatabase { user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - ) -> Result, Raw)>> { + ) -> Result)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); @@ -407,7 +409,7 @@ impl service::users::Data for KeyValueDatabase { .scan_prefix(userdeviceid) .map(|(bytes, _)| { Ok::<_, Error>( - serde_json::from_slice::>( + serde_json::from_slice::( &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { Error::bad_database("OneTimeKey ID in db is invalid.") })?, @@ -579,7 +581,7 @@ impl service::users::Data for KeyValueDatabase { .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? .as_object_mut() .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? - .entry(sender_id.to_owned()) + .entry(sender_id.to_string()) .or_insert_with(|| serde_json::Map::new().into()); signatures @@ -603,7 +605,7 @@ impl service::users::Data for KeyValueDatabase { user_or_room_id: &str, from: u64, to: Option, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/mod.rs b/src/database/mod.rs index 967ec88..f4ca44f 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -6,13 +6,17 @@ use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; use lru_cache::LruCache; +use ruma::CanonicalJsonValue; +use ruma::OwnedDeviceId; +use ruma::OwnedEventId; +use ruma::OwnedRoomId; +use ruma::OwnedUserId; use ruma::{ events::{ push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, - signatures::CanonicalJsonValue, DeviceId, EventId, RoomId, UserId, }; use std::{ @@ -58,7 +62,7 @@ pub struct KeyValueDatabase { //pub uiaa: uiaa::Uiaa, pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication pub(super) userdevicesessionid_uiaarequest: - RwLock, Box, String), CanonicalJsonValue>>, + RwLock>, //pub edus: RoomEdus, pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId @@ -152,15 +156,15 @@ pub struct KeyValueDatabase { pub(super) senderkey_pusher: Arc, pub(super) cached_registrations: Arc>>, - pub(super) pdu_cache: Mutex, Arc>>, + pub(super) pdu_cache: Mutex>>, pub(super) shorteventid_cache: Mutex>>, pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, + pub(super) eventidshort_cache: Mutex>, pub(super) statekeyshort_cache: Mutex>, pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lasttimelinecount_cache: Mutex, u64>>, + pub(super) our_real_users_cache: RwLock>>>, + pub(super) appservice_in_room_cache: RwLock>>, + pub(super) lasttimelinecount_cache: Mutex>, } impl KeyValueDatabase { @@ -531,7 +535,7 @@ impl KeyValueDatabase { if services().globals.database_version()? < 7 { // Upgrade state store - let mut last_roomstates: HashMap, u64> = HashMap::new(); + let mut last_roomstates: HashMap = HashMap::new(); let mut current_sstatehash: Option = None; let mut current_room = None; let mut current_state = HashSet::new(); diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 218a4ea..9e3f586 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -23,7 +23,7 @@ use ruma::{ }, RoomEventType, }, - EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId, + EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{mpsc, Mutex, MutexGuard}; @@ -977,8 +977,7 @@ impl Service { )?; // 5. Events implied by name and topic - let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) - .expect("Room name is valid"); + let room_name = format!("{} Admin Room", services().globals.server_name()); services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, @@ -1010,7 +1009,7 @@ impl Service { )?; // 6. Room alias - let alias: Box = format!("#admins:{}", services().globals.server_name()) + let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 407ff1c..f333254 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, - DeviceId, ServerName, ServerSigningKeyId, UserId, + DeviceId, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, UserId, }; use crate::Result; @@ -22,13 +22,13 @@ pub trait Data: Send + Sync { &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result, VerifyKey>>; + ) -> Result>; /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. fn signing_keys_for( &self, origin: &ServerName, - ) -> Result, VerifyKey>>; + ) -> Result>; fn database_version(&self) -> Result; fn bump_database_version(&self, new_version: u64) -> Result<()>; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index f88fd02..e7daff8 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,5 +1,8 @@ mod data; pub use data::Data; +use ruma::{ + OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, +}; use crate::api::server_server::FedDest; @@ -24,7 +27,7 @@ use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; use tracing::error; use trust_dns_resolver::TokioAsyncResolver; -type WellKnownMap = HashMap, (FedDest, String)>; +type WellKnownMap = HashMap; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type SyncHandle = ( @@ -45,14 +48,14 @@ pub struct Service { default_client: reqwest::Client, pub stable_room_versions: Vec, pub unstable_room_versions: Vec, - pub bad_event_ratelimiter: Arc, RateLimitState>>>, + pub bad_event_ratelimiter: Arc>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, - pub servername_ratelimiter: Arc, Arc>>>, - pub sync_receivers: RwLock, Box), SyncHandle>>, - pub roomid_mutex_insert: RwLock, Arc>>>, - pub roomid_mutex_state: RwLock, Arc>>>, - pub roomid_mutex_federation: RwLock, Arc>>>, // this lock will be held longer - pub roomid_federationhandletime: RwLock, (Box, Instant)>>, + pub servername_ratelimiter: Arc>>>, + pub sync_receivers: RwLock>, + pub roomid_mutex_insert: RwLock>>>, + pub roomid_mutex_state: RwLock>>>, + pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer + pub roomid_federationhandletime: RwLock>, pub stateres_mutex: Arc>, pub rotate: RotationHandler, } @@ -242,7 +245,7 @@ impl Service { self.config.default_room_version.clone() } - pub fn trusted_servers(&self) -> &[Box] { + pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } @@ -295,7 +298,7 @@ impl Service { &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result, VerifyKey>> { + ) -> Result> { self.db.add_signing_key(origin, new_keys) } @@ -303,7 +306,7 @@ impl Service { pub fn signing_keys_for( &self, origin: &ServerName, - ) -> Result, VerifyKey>> { + ) -> Result> { self.db.signing_keys_for(origin) } diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs index f711e5d..bf64001 100644 --- a/src/service/key_backups/data.rs +++ b/src/service/key_backups/data.rs @@ -4,7 +4,7 @@ use crate::Result; use ruma::{ api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, - RoomId, UserId, + OwnedRoomId, RoomId, UserId, }; pub trait Data: Send + Sync { @@ -47,7 +47,7 @@ pub trait Data: Send + Sync { &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>>; + ) -> Result>; fn get_room( &self, diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index fef4613..5fc52ce 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -5,7 +5,7 @@ use crate::Result; use ruma::{ api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, - RoomId, UserId, + OwnedRoomId, RoomId, UserId, }; use std::collections::BTreeMap; @@ -78,7 +78,7 @@ impl Service { &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>> { + ) -> Result> { self.db.get_all(user_id, version) } diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 724b2b2..593a687 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,11 +1,13 @@ use crate::{services, Error}; use ruma::{ events::{ - room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, - AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, RoomEventType, StateEvent, + room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyStateEvent, + AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, + RoomEventType, StateEvent, }, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, UInt, UserId, + serde::Raw, + state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, + OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::{ @@ -25,8 +27,8 @@ pub struct EventHash { #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: Arc, - pub room_id: Box, - pub sender: Box, + pub room_id: OwnedRoomId, + pub sender: OwnedUserId, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: RoomEventType, @@ -102,7 +104,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_sync_room_event(&self) -> Raw { + pub fn to_sync_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, "type": self.kind, @@ -146,7 +148,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_room_event(&self) -> Raw { + pub fn to_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, "type": self.kind, @@ -332,7 +334,7 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, -) -> crate::Result<(Box, CanonicalJsonObject)> { +) -> crate::Result<(OwnedEventId, CanonicalJsonObject)> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 385a207..2d2fa1f 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::events::AnySyncTimelineEvent; use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; @@ -15,7 +16,7 @@ use ruma::{ }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, + RoomEventType, StateEventType, }, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, @@ -195,12 +196,13 @@ impl Service { user: &UserId, ruleset: &'a Ruleset, power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, + pdu: &Raw, room_id: &RoomId, ) -> Result<&'a [Action]> { let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), member_count: 10_u32.into(), // TODO: get member count efficiently + user_id: user.to_owned(), user_display_name: services() .users .displayname(user)? @@ -242,7 +244,7 @@ impl Service { let mut data_minus_url = pusher.data.clone(); // The url must be stripped off according to spec data_minus_url.url = None; - device.data = data_minus_url; + device.data = data_minus_url.into(); // Tweaks are only added if the format is NOT event_id_only if !event_id_only { diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 6299add..629b1ee 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{RoomAliasId, RoomId}; +use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; pub trait Data: Send + Sync { /// Creates or updates the alias to the given room id. @@ -9,11 +9,11 @@ pub trait Data: Send + Sync { fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; /// Looks up the roomid for the given alias. - fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>>; + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>; /// Returns all local aliases that point to the given room fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 6b52549..d26030c 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -3,7 +3,7 @@ mod data; pub use data::Data; use crate::Result; -use ruma::{RoomAliasId, RoomId}; +use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; pub struct Service { pub db: &'static dyn Data, @@ -21,7 +21,7 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { + pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { self.db.resolve_local_alias(alias) } @@ -29,7 +29,7 @@ impl Service { pub fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { self.db.local_aliases_for_room(room_id) } } diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index 320c6db..aca731c 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; pub trait Data: Send + Sync { /// Adds the room to the public room directory @@ -12,5 +12,5 @@ pub trait Data: Send + Sync { fn is_public_room(&self, room_id: &RoomId) -> Result; /// Returns the unsorted public room directory - fn public_rooms<'a>(&'a self) -> Box>> + 'a>; + fn public_rooms<'a>(&'a self) -> Box> + 'a>; } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 0c1b2cd..10f782b 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; use crate::Result; @@ -26,7 +26,7 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { + pub fn public_rooms(&self) -> impl Iterator> + '_ { self.db.public_rooms() } } diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index f378404..53329e0 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use crate::Result; -use ruma::{events::presence::PresenceEvent, RoomId, UserId}; +use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { /// Adds a presence event which will be saved until a new event replaces it. @@ -34,5 +34,5 @@ pub trait Data: Send + Sync { &self, room_id: &RoomId, since: u64, - ) -> Result, PresenceEvent>>; + ) -> Result>; } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 3681430..860aea1 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -2,7 +2,7 @@ mod data; use std::collections::HashMap; pub use data::Data; -use ruma::{events::presence::PresenceEvent, RoomId, UserId}; +use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; use crate::Result; @@ -116,7 +116,7 @@ impl Service { &self, room_id: &RoomId, since: u64, - ) -> Result, PresenceEvent>> { + ) -> Result> { self.db.presence_since(room_id, since) } } diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 800c035..a183d19 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { /// Replaces the previous read receipt. @@ -18,7 +18,7 @@ pub trait Data: Send + Sync { ) -> Box< dyn Iterator< Item = Result<( - Box, + OwnedUserId, u64, Raw, )>, diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 1b3ddb1..c603528 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -3,7 +3,7 @@ mod data; pub use data::Data; use crate::Result; -use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; pub struct Service { pub db: &'static dyn Data, @@ -28,7 +28,7 @@ impl Service { since: u64, ) -> impl Iterator< Item = Result<( - Box, + OwnedUserId, u64, Raw, )>, diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 50b6d13..c4ad867 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{RoomId, UserId}; +use ruma::{OwnedUserId, RoomId, UserId}; use std::collections::HashSet; pub trait Data: Send + Sync { @@ -14,5 +14,5 @@ pub trait Data: Send + Sync { fn last_typing_update(&self, room_id: &RoomId) -> Result; /// Returns all user ids currently typing. - fn typings_all(&self, room_id: &RoomId) -> Result>>; + fn typings_all(&self, room_id: &RoomId) -> Result>; } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index cfe0fbf..2d831f7 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -3,7 +3,7 @@ type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; use ruma::{ api::federation::discovery::{get_remote_server_keys, get_server_keys}, - signatures::CanonicalJsonObject, + CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, OwnedServerSigningKeyId, RoomVersionId, }; use std::{ @@ -30,7 +30,6 @@ use ruma::{ }, int, serde::Base64, - signatures::CanonicalJsonValue, state_res::{self, RoomVersion, StateMap}, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; @@ -300,7 +299,7 @@ impl Service { Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); - match ruma::signatures::redact(&value, room_version_id) { + match ruma::canonical_json::redact(&value, room_version_id) { Ok(obj) => obj, Err(_) => { return Err(Error::BadRequest( @@ -974,7 +973,11 @@ impl Service { .rooms .state_compressor .save_state(room_id, new_room_state)?; - services().rooms.state.force_state(room_id, sstatehash, new, removed, &state_lock).await?; + services() + .rooms + .state + .force_state(room_id, sstatehash, new, removed, &state_lock) + .await?; } } @@ -1322,7 +1325,7 @@ impl Service { fn get_server_keys_from_cache( &self, pdu: &RawJsonValue, - servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, + servers: &mut BTreeMap>, room_version: &RoomVersionId, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, ) -> Result<()> { @@ -1414,8 +1417,8 @@ impl Service { pub_key_map: &RwLock>>, ) -> Result<()> { let mut servers: BTreeMap< - Box, - BTreeMap, QueryCriteria>, + OwnedServerName, + BTreeMap, > = BTreeMap::new(); { diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index b30bb9c..701a734 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -5,7 +5,7 @@ use std::{ }; pub use data::Data; -use ruma::{DeviceId, RoomId, UserId}; +use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use crate::Result; @@ -13,7 +13,7 @@ pub struct Service { pub db: &'static dyn Data, pub lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, + Mutex>>, } impl Service { @@ -35,7 +35,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - lazy_load: HashSet>, + lazy_load: HashSet, count: u64, ) { self.lazy_load_waiting.lock().unwrap().insert( diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index df416da..339db57 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,9 +1,9 @@ use crate::Result; -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; - fn iter_ids<'a>(&'a self) -> Box>> + 'a>; + fn iter_ids<'a>(&'a self) -> Box> + 'a>; fn is_disabled(&self, room_id: &RoomId) -> Result; fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index c99ae4a..d188469 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; use crate::Result; @@ -16,7 +16,7 @@ impl Service { self.db.exists(room_id) } - pub fn iter_ids<'a>(&'a self) -> Box>> + 'a> { + pub fn iter_ids<'a>(&'a self) -> Box> + 'a> { self.db.iter_ids() } diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs index edc7c4f..0ed521d 100644 --- a/src/service/rooms/outlier/data.rs +++ b/src/service/rooms/outlier/data.rs @@ -1,4 +1,4 @@ -use ruma::{signatures::CanonicalJsonObject, EventId}; +use ruma::{CanonicalJsonObject, EventId}; use crate::{PduEvent, Result}; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index c84e975..dae41e4 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use ruma::{signatures::CanonicalJsonObject, EventId}; +use ruma::{CanonicalJsonObject, EventId}; use crate::{PduEvent, Result}; diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 8e80b5e..19a1e30 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{EventId, RoomId}; +use ruma::{EventId, OwnedEventId, RoomId}; use std::collections::HashSet; use std::sync::Arc; use tokio::sync::MutexGuard; @@ -26,7 +26,7 @@ pub trait Data: Send + Sync { fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: Vec>, + event_ids: Vec, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 15fa79b..2c49c35 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -12,7 +12,7 @@ use ruma::{ }, serde::Raw, state_res::{self, StateMap}, - EventId, RoomId, RoomVersionId, UserId, + EventId, OwnedEventId, RoomId, RoomVersionId, UserId, }; use serde::Deserialize; use tokio::sync::MutexGuard; @@ -346,7 +346,7 @@ impl Service { pub fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: Vec>, + event_ids: Vec, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { self.db diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index a6b06a5..42de56d 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -4,7 +4,7 @@ use crate::Result; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, - RoomId, ServerName, UserId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; pub trait Data: Send + Sync { @@ -20,7 +20,7 @@ pub trait Data: Send + Sync { fn update_joined_count(&self, room_id: &RoomId) -> Result<()>; - fn get_our_real_users(&self, room_id: &RoomId) -> Result>>>; + fn get_our_real_users(&self, room_id: &RoomId) -> Result>>; fn appservice_in_room( &self, @@ -35,7 +35,7 @@ pub trait Data: Send + Sync { fn room_servers<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result; @@ -43,13 +43,13 @@ pub trait Data: Send + Sync { fn server_rooms<'a>( &'a self, server: &ServerName, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; /// Returns an iterator over all joined members of a room. fn room_members<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; fn room_joined_count(&self, room_id: &RoomId) -> Result>; @@ -59,13 +59,13 @@ pub trait Data: Send + Sync { fn room_useroncejoined<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; /// Returns an iterator over all invited members of a room. fn room_members_invited<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; @@ -75,13 +75,13 @@ pub trait Data: Send + Sync { fn rooms_joined<'a>( &'a self, user_id: &UserId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; /// Returns an iterator over all rooms a user was invited to. fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> Box, Vec>)>> + 'a>; + ) -> Box>)>> + 'a>; fn invite_state( &self, @@ -99,7 +99,7 @@ pub trait Data: Send + Sync { fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> Box, Vec>)>> + 'a>; + ) -> Box>)>> + 'a>; fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 9431d3a..6c9bed3 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -12,7 +12,7 @@ use ruma::{ RoomAccountDataEventType, StateEventType, }, serde::Raw, - RoomId, ServerName, UserId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use crate::{services, Error, Result}; @@ -192,7 +192,7 @@ impl Service { } #[tracing::instrument(skip(self, room_id))] - pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { + pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { self.db.get_our_real_users(room_id) } @@ -216,7 +216,7 @@ impl Service { pub fn room_servers<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.room_servers(room_id) } @@ -230,7 +230,7 @@ impl Service { pub fn server_rooms<'a>( &'a self, server: &ServerName, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.server_rooms(server) } @@ -239,7 +239,7 @@ impl Service { pub fn room_members<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.room_members(room_id) } @@ -258,7 +258,7 @@ impl Service { pub fn room_useroncejoined<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.room_useroncejoined(room_id) } @@ -267,7 +267,7 @@ impl Service { pub fn room_members_invited<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.room_members_invited(room_id) } @@ -286,7 +286,7 @@ impl Service { pub fn rooms_joined<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.rooms_joined(user_id) } @@ -295,7 +295,7 @@ impl Service { pub fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { + ) -> impl Iterator>)>> + 'a { self.db.rooms_invited(user_id) } @@ -322,7 +322,7 @@ impl Service { pub fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { + ) -> impl Iterator>)>> + 'a { self.db.rooms_left(user_id) } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index bcd3b9a..356f32c 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -251,7 +251,11 @@ impl Service { &self, room_id: &RoomId, new_state_ids_compressed: HashSet, - ) -> Result<(u64, HashSet, HashSet)> { + ) -> Result<( + u64, + HashSet, + HashSet, + )> { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; let state_hash = utils::calculate_hash( diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 095731c..9377af0 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use ruma::{signatures::CanonicalJsonObject, EventId, RoomId, UserId}; +use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; use crate::{PduEvent, Result}; @@ -81,7 +81,7 @@ pub trait Data: Send + Sync { fn increment_notification_counts( &self, room_id: &RoomId, - notifies: Vec>, - highlights: Vec>, + notifies: Vec, + highlights: Vec, ) -> Result<()>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 16f50d2..e96afce 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -7,10 +7,15 @@ use std::sync::{Arc, Mutex}; pub use data::Data; use regex::Regex; +use ruma::canonical_json::to_canonical_value; use ruma::events::room::power_levels::RoomPowerLevelsEventContent; use ruma::push::Ruleset; -use ruma::signatures::CanonicalJsonValue; use ruma::state_res::RoomVersion; +use ruma::CanonicalJsonObject; +use ruma::CanonicalJsonValue; +use ruma::OwnedEventId; +use ruma::OwnedRoomId; +use ruma::OwnedServerName; use ruma::{ api::client::error::ErrorKind, events::{ @@ -19,8 +24,6 @@ use ruma::{ GlobalAccountDataEventType, RoomEventType, StateEventType, }, push::{Action, Tweak}, - serde::to_canonical_value, - signatures::CanonicalJsonObject, state_res, uint, EventId, RoomAliasId, RoomId, ServerName, UserId, }; use serde::Deserialize; @@ -38,7 +41,7 @@ use super::state_compressor::CompressedStateEvent; pub struct Service { pub db: &'static dyn Data, - pub lasttimelinecount_cache: Mutex, u64>>, + pub lasttimelinecount_cache: Mutex>, } impl Service { @@ -146,7 +149,7 @@ impl Service { &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: Vec>, + leaves: Vec, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { let shortroomid = services() @@ -702,7 +705,7 @@ impl Service { .state .set_room_state(room_id, statehashid, state_lock)?; - let mut servers: HashSet> = services() + let mut servers: HashSet = services() .rooms .state_cache .room_servers(room_id) @@ -716,7 +719,7 @@ impl Service { .as_ref() .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) { - servers.insert(Box::from(state_key_uid.server_name())); + servers.insert(state_key_uid.server_name().to_owned()); } } @@ -735,7 +738,7 @@ impl Service { &self, pdu: &PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: Vec>, + new_room_leaves: Vec, state_ids_compressed: HashSet, soft_fail: bool, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 7b7841f..43c4c92 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{RoomId, UserId}; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; @@ -19,6 +19,6 @@ pub trait Data: Send + Sync { fn get_shared_rooms<'a>( &'a self, - users: Vec>, - ) -> Result>> + 'a>>; + users: Vec, + ) -> Result> + 'a>>; } diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 479e556..a765cfd 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use ruma::{RoomId, UserId}; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use crate::Result; @@ -38,8 +38,8 @@ impl Service { pub fn get_shared_rooms<'a>( &'a self, - users: Vec>, - ) -> Result>> + 'a> { + users: Vec, + ) -> Result> + 'a> { self.db.get_shared_rooms(users) } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 60fc6f4..697ca85 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -30,10 +30,11 @@ use ruma::{ OutgoingRequest, }, device_id, - events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType}, - push, - receipt::ReceiptType, - uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, + events::{ + push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, + GlobalAccountDataEventType, + }, + push, uint, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedUserId, ServerName, UInt, UserId, }; use tokio::{ select, @@ -44,8 +45,8 @@ use tracing::{error, warn}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(String), - Push(Box, String), // user and pushkey - Normal(Box), + Push(OwnedUserId, String), // user and pushkey + Normal(OwnedServerName), } impl OutgoingKind { @@ -381,7 +382,7 @@ impl Service { } #[tracing::instrument(skip(self, servers, pdu_id))] - pub fn send_pdu>>( + pub fn send_pdu>( &self, servers: I, pdu_id: &[u8], diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index 3b7eb2b..c64deb9 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{api::client::uiaa::UiaaInfo, signatures::CanonicalJsonValue, DeviceId, UserId}; +use ruma::{api::client::uiaa::UiaaInfo, CanonicalJsonValue, DeviceId, UserId}; pub trait Data: Send + Sync { fn set_uiaa_request( diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index e827cc8..672290c 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -7,8 +7,7 @@ use ruma::{ error::ErrorKind, uiaa::{AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier, UiaaInfo}, }, - signatures::CanonicalJsonValue, - DeviceId, UserId, + CanonicalJsonValue, DeviceId, UserId, }; use tracing::error; diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 9537ed2..bc1db33 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -4,7 +4,8 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, + OwnedUserId, UInt, UserId, }; use std::collections::BTreeMap; @@ -19,10 +20,10 @@ pub trait Data: Send + Sync { fn count(&self) -> Result; /// Find out which user an access token belongs to. - fn find_from_token(&self, token: &str) -> Result, String)>>; + fn find_from_token(&self, token: &str) -> Result>; /// Returns an iterator over all users on this homeserver. - fn iter<'a>(&'a self) -> Box>> + 'a>; + fn iter<'a>(&'a self) -> Box> + 'a>; /// Returns a list of local users as list of usernames. /// @@ -42,10 +43,10 @@ pub trait Data: Send + Sync { fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; /// Get the avatar_url of a user. - fn avatar_url(&self, user_id: &UserId) -> Result>>; + fn avatar_url(&self, user_id: &UserId) -> Result>; /// Sets a new avatar_url or removes it if avatar_url is None. - fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()>; + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()>; /// Get the blurhash of a user. fn blurhash(&self, user_id: &UserId) -> Result>; @@ -69,7 +70,7 @@ pub trait Data: Send + Sync { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; /// Replaces the access token of one device. fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; @@ -89,7 +90,7 @@ pub trait Data: Send + Sync { user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - ) -> Result, Raw)>>; + ) -> Result)>>; fn count_one_time_keys( &self, @@ -125,7 +126,7 @@ pub trait Data: Send + Sync { user_or_room_id: &str, from: u64, to: Option, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; fn mark_device_key_update(&self, user_id: &UserId) -> Result<()>; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 4bcb183..ac66f03 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -7,7 +7,8 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, RoomAliasId, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, OwnedDeviceId, OwnedDeviceKeyId, + OwnedMxcUri, OwnedUserId, RoomAliasId, UInt, UserId, }; use crate::{services, Error, Result}; @@ -56,12 +57,12 @@ impl Service { } /// Find out which user an access token belongs to. - pub fn find_from_token(&self, token: &str) -> Result, String)>> { + pub fn find_from_token(&self, token: &str) -> Result> { self.db.find_from_token(token) } /// Returns an iterator over all users on this homeserver. - pub fn iter(&self) -> impl Iterator>> + '_ { + pub fn iter(&self) -> impl Iterator> + '_ { self.db.iter() } @@ -93,12 +94,12 @@ impl Service { } /// Get the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result>> { + pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.db.avatar_url(user_id) } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { self.db.set_avatar_url(user_id, avatar_url) } @@ -133,7 +134,7 @@ impl Service { pub fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.all_device_ids(user_id) } @@ -162,7 +163,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - ) -> Result, Raw)>> { + ) -> Result)>> { self.db.take_one_time_key(user_id, device_id, key_algorithm) } @@ -209,7 +210,7 @@ impl Service { user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.keys_changed(user_or_room_id, from, to) } diff --git a/src/utils/error.rs b/src/utils/error.rs index 206a055..bd3d73c 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -6,7 +6,7 @@ use ruma::{ error::{Error as RumaError, ErrorKind}, uiaa::{UiaaInfo, UiaaResponse}, }, - ServerName, + OwnedServerName, ServerName, }; use thiserror::Error; use tracing::{error, warn}; @@ -55,7 +55,7 @@ pub enum Error { source: reqwest::Error, }, #[error("{0}")] - FederationError(Box, RumaError), + FederationError(OwnedServerName, RumaError), #[error("Could not do this io: {source}")] IoError { #[from] diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 9202eeb..0b5b1ae 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -4,7 +4,7 @@ use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; use ring::digest; -use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; +use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ cmp, fmt, str::FromStr, From 076e9810ba721cfc74b3d9c677ebe08009a23481 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 17:26:53 +0200 Subject: [PATCH 1228/1727] cargo fix --- src/api/client_server/keys.rs | 2 +- src/api/client_server/sync.rs | 2 +- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 2 +- src/config/mod.rs | 2 +- src/database/key_value/globals.rs | 3 +-- src/database/key_value/users.rs | 4 ++-- src/database/mod.rs | 2 +- src/service/globals/data.rs | 2 +- src/service/globals/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 2 +- src/service/users/mod.rs | 4 ++-- src/utils/error.rs | 2 +- 14 files changed, 16 insertions(+), 17 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 33ff309..ef4e455 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -14,7 +14,7 @@ use ruma::{ federation, }, serde::Raw, - DeviceId, DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, + DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; use std::collections::{BTreeMap, HashMap, HashSet}; diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 0479322..f7907ce 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -10,7 +10,7 @@ use ruma::{ RoomEventType, StateEventType, }, serde::Raw, - DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UserId, + OwnedDeviceId, OwnedUserId, RoomId, UserId, }; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index c71d36b..818cffc 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -17,7 +17,7 @@ use bytes::{BufMut, Bytes, BytesMut}; use http::StatusCode; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, - CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, ServerName, UserId, + CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId, }; use serde::Deserialize; use tracing::{debug, error, warn}; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index a8ae272..513a076 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -43,7 +43,7 @@ use ruma::{ serde::{Base64, JsonObject, Raw}, to_device::DeviceIdOrAllDevices, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, ServerSigningKeyId, + OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ diff --git a/src/config/mod.rs b/src/config/mod.rs index e0efa60..2c31b6b 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -4,7 +4,7 @@ use std::{ net::{IpAddr, Ipv4Addr}, }; -use ruma::{OwnedServerName, RoomVersionId, ServerName}; +use ruma::{OwnedServerName, RoomVersionId}; use serde::{de::IgnoredAny, Deserialize}; use tracing::warn; diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 75d00b4..4332930 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -5,8 +5,7 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, - DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, - UserId, + DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 3bb8e61..f7ee07c 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,8 +5,8 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, OwnedUserId, - UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedUserId, UInt, + UserId, }; use ruma::{OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri}; use tracing::warn; diff --git a/src/database/mod.rs b/src/database/mod.rs index f4ca44f..689ab57 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -17,7 +17,7 @@ use ruma::{ GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, - DeviceId, EventId, RoomId, UserId, + EventId, RoomId, UserId, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index f333254..04371a0 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, - DeviceId, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, UserId, + DeviceId, OwnedServerSigningKeyId, ServerName, UserId, }; use crate::Result; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index e7daff8..44192e0 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -12,7 +12,7 @@ use ruma::{ client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, - DeviceId, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + DeviceId, RoomVersionId, ServerName, UserId, }; use std::{ collections::{BTreeMap, HashMap}, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 2d831f7..ae63d9a 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -31,7 +31,7 @@ use ruma::{ int, serde::Base64, state_res::{self, RoomVersion, StateMap}, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; use tracing::{debug, error, info, trace, warn}; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index e96afce..dc859d8 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -24,7 +24,7 @@ use ruma::{ GlobalAccountDataEventType, RoomEventType, StateEventType, }, push::{Action, Tweak}, - state_res, uint, EventId, RoomAliasId, RoomId, ServerName, UserId, + state_res, uint, EventId, RoomAliasId, RoomId, UserId, }; use serde::Deserialize; use serde_json::value::to_raw_value; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index ac66f03..9dcfa8b 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -7,8 +7,8 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, OwnedDeviceId, OwnedDeviceKeyId, - OwnedMxcUri, OwnedUserId, RoomAliasId, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, + OwnedUserId, RoomAliasId, UInt, UserId, }; use crate::{services, Error, Result}; diff --git a/src/utils/error.rs b/src/utils/error.rs index bd3d73c..8967acb 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -6,7 +6,7 @@ use ruma::{ error::{Error as RumaError, ErrorKind}, uiaa::{UiaaInfo, UiaaResponse}, }, - OwnedServerName, ServerName, + OwnedServerName, }; use thiserror::Error; use tracing::{error, warn}; From 229444c9321ef32d09969eb690cab55de66a5d12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 17:37:55 +0200 Subject: [PATCH 1229/1727] Use ring-compat feature so out signing keys work again --- Cargo.lock | 10 ++++++++++ Cargo.toml | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 29603ee..35bcdea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2200,6 +2200,7 @@ dependencies = [ "ruma-common", "serde_json", "sha2", + "subslice", "thiserror", ] @@ -2520,6 +2521,15 @@ dependencies = [ "der", ] +[[package]] +name = "subslice" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a8e4809a3bb02de01f1f7faf1ba01a83af9e8eabcd4d31dd6e413d14d56aae" +dependencies = [ + "memchr", +] + [[package]] name = "subtle" version = "2.4.1" diff --git a/Cargo.toml b/Cargo.toml index 0428e74..f1ef0bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } From ca82b2940d9241b99e694f3c8d597feb5bf1bbc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 21:56:56 +0200 Subject: [PATCH 1230/1727] fix: sending does not work We were inserting one too many 0xff bytes --- Cargo.lock | 47 ++++------------------------- Cargo.toml | 4 +-- src/database/abstraction/rocksdb.rs | 3 ++ src/database/key_value/sending.rs | 4 ++- src/database/mod.rs | 4 +-- src/service/sending/mod.rs | 3 +- 6 files changed, 18 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 35bcdea..941634e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.60.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags", "cexpr", @@ -295,17 +295,6 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "cc" version = "1.0.73" @@ -1283,17 +1272,14 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "0.8.0+7.4.4" +version = "6.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" +checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" dependencies = [ "bindgen", - "bzip2-sys", "cc", "glob", "libc", - "libz-sys", - "zstd-sys", ] [[package]] @@ -1307,17 +1293,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "libz-sys" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2053,9 +2028,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.19.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" +checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" dependencies = [ "libc", "librocksdb-sys", @@ -3286,13 +3261,3 @@ checksum = "70b40401a28d86ce16a330b863b86fd7dbee4d7c940587ab09ab8c019f9e3fdf" dependencies = [ "num-traits", ] - -[[package]] -name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" -dependencies = [ - "cc", - "libc", -] diff --git a/Cargo.toml b/Cargo.toml index f1ef0bb..031f279 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.19.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication @@ -94,7 +94,7 @@ lazy_static = "1.4.0" async-trait = "0.1.57" [features] -default = ["conduit_bin", "backend_sqlite", "jemalloc"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] #backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 0727728..96027f6 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -161,6 +161,7 @@ impl KvTree for RocksDbEngineTree<'_> { self.db .rocks .iterator_cf(&self.cf(), rocksdb::IteratorMode::Start) + //.map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -184,6 +185,7 @@ impl KvTree for RocksDbEngineTree<'_> { }, ), ) + //.map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -224,6 +226,7 @@ impl KvTree for RocksDbEngineTree<'_> { &self.cf(), rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), ) + //.map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))) .take_while(move |(k, _)| k.starts_with(&prefix)), ) diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs index d84bd49..fddbd67 100644 --- a/src/database/key_value/sending.rs +++ b/src/database/key_value/sending.rs @@ -66,7 +66,6 @@ impl service::sending::Data for KeyValueDatabase { let mut keys = Vec::new(); for (outgoing_kind, event) in requests { let mut key = outgoing_kind.get_prefix(); - key.push(0xff); key.extend_from_slice(if let SendingEventType::Pdu(value) = &event { &**value } else { @@ -139,6 +138,7 @@ fn parse_servercurrentevent( let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(server).map_err(|_| { Error::bad_database("Invalid server bytes in server_currenttransaction") })?; @@ -169,6 +169,7 @@ fn parse_servercurrentevent( let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + ( OutgoingKind::Push(user_id, pushkey_string), if value.is_empty() { @@ -185,6 +186,7 @@ fn parse_servercurrentevent( let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(server).map_err(|_| { Error::bad_database("Invalid server bytes in server_currenttransaction") })?; diff --git a/src/database/mod.rs b/src/database/mod.rs index 689ab57..9f893d6 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -556,7 +556,7 @@ impl KeyValueDatabase { services() .rooms .state_compressor - .load_shortstatehash_info(dbg!(last_roomsstatehash)) + .load_shortstatehash_info(last_roomsstatehash) }, )?; @@ -579,7 +579,7 @@ impl KeyValueDatabase { }; services().rooms.state_compressor.save_state_from_diff( - dbg!(current_sstatehash), + current_sstatehash, statediffnew, statediffremoved, 2, // every state change is 2 event changes on average diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 697ca85..20c652f 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -587,7 +587,7 @@ impl Service { .notification_count(&userid, &pdu.room_id) .map_err(|e| (kind.clone(), e))? .try_into() - .expect("notifiation count can't go that high"); + .expect("notification count can't go that high"); let permit = services().sending.maximum_requests.acquire().await; @@ -616,6 +616,7 @@ impl Service { .get_pdu_json_from_id(pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { + error!("event not found: {server} {pdu_id:?}"); ( OutgoingKind::Normal(server.clone()), Error::bad_database( From f430b874598f4262e5af5ecee8dd396e317a1e87 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 14:09:11 +0200 Subject: [PATCH 1231/1727] cargo clippy --- src/api/client_server/account.rs | 4 +- src/api/client_server/keys.rs | 4 +- src/api/client_server/membership.rs | 20 ++++---- src/api/client_server/room.rs | 19 ++++---- src/api/client_server/session.rs | 10 ++-- src/api/client_server/sync.rs | 8 ++-- src/api/client_server/to_device.rs | 2 +- src/api/client_server/user_directory.rs | 2 +- src/api/server_server.rs | 33 ++++++------- src/config/mod.rs | 2 +- src/database/abstraction/rocksdb.rs | 2 +- src/database/abstraction/sqlite.rs | 14 +++--- src/database/key_value/globals.rs | 6 +-- src/database/key_value/pusher.rs | 4 +- src/database/key_value/rooms/alias.rs | 2 +- src/database/key_value/rooms/edus/presence.rs | 2 +- src/database/key_value/rooms/edus/typing.rs | 2 +- src/database/key_value/rooms/search.rs | 2 +- src/database/key_value/rooms/timeline.rs | 5 +- src/database/key_value/rooms/user.rs | 2 +- src/database/key_value/sending.rs | 10 ++-- src/database/key_value/users.rs | 12 ++--- src/database/mod.rs | 11 ++--- src/lib.rs | 2 +- src/main.rs | 5 +- src/service/admin/mod.rs | 32 ++++++------- src/service/pusher/mod.rs | 3 +- src/service/rooms/event_handler/mod.rs | 12 ++--- src/service/rooms/state/data.rs | 3 +- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/timeline/mod.rs | 48 +++++++++---------- src/service/sending/mod.rs | 18 +++---- 32 files changed, 139 insertions(+), 166 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 673bbb4..17b2920 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -333,7 +333,7 @@ pub async fn whoami_route(body: Ruma) -> Result bool>( let mut get_over_federation = HashMap::new(); for (user_id, device_ids) in device_keys_input { - let user_id: &UserId = &**user_id; + let user_id: &UserId = user_id; if user_id.server_name() != services().globals.server_name() { get_over_federation diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 4f791c7..b69a6d1 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -62,15 +62,13 @@ pub async fn join_room_by_id_route( servers.push(body.room_id.server_name().to_owned()); - let ret = join_room_by_id_helper( + join_room_by_id_helper( body.sender_user.as_deref(), &body.room_id, &servers, body.third_party_signed.as_ref(), ) - .await; - - ret + .await } /// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` @@ -171,7 +169,7 @@ pub async fn kick_user_route( .room_state_get( &body.room_id, &StateEventType::RoomMember, - &body.user_id.to_string(), + body.user_id.as_ref(), )? .ok_or(Error::BadRequest( ErrorKind::BadState, @@ -230,7 +228,7 @@ pub async fn ban_user_route( .room_state_get( &body.room_id, &StateEventType::RoomMember, - &body.user_id.to_string(), + body.user_id.as_ref(), )? .map_or( Ok(RoomMemberEventContent { @@ -297,7 +295,7 @@ pub async fn unban_user_route( .room_state_get( &body.room_id, &StateEventType::RoomMember, - &body.user_id.to_string(), + body.user_id.as_ref(), )? .ok_or(Error::BadRequest( ErrorKind::BadState, @@ -408,7 +406,7 @@ pub async fn get_member_events_route( .await? .iter() .filter(|(key, _)| key.0 == StateEventType::RoomMember) - .map(|(_, pdu)| pdu.to_member_event().into()) + .map(|(_, pdu)| pdu.to_member_event()) .collect(), }) } @@ -864,7 +862,7 @@ pub(crate) async fn invite_helper<'a>( "${}", ruma::signatures::reference_hash( &pdu_json, - &services().rooms.state.get_room_version(&room_id)? + &services().rooms.state.get_room_version(room_id)? ) .expect("ruma can calculate reference hashes") ); @@ -878,7 +876,7 @@ pub(crate) async fn invite_helper<'a>( create_invite::v2::Request { room_id, event_id: expected_event_id, - room_version: &services().rooms.state.get_room_version(&room_id)?, + room_version: &services().rooms.state.get_room_version(room_id)?, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, }, @@ -938,7 +936,7 @@ pub(crate) async fn invite_helper<'a>( if !services() .rooms .state_cache - .is_joined(sender_user, &room_id)? + .is_joined(sender_user, room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 43b2e8e..097f0e1 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,8 +1,6 @@ use crate::{ api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma, }; -use ruma::serde::JsonObject; -use ruma::OwnedRoomAliasId; use ruma::{ api::client::{ error::ErrorKind, @@ -23,7 +21,9 @@ use ruma::{ }, RoomEventType, StateEventType, }, - int, CanonicalJsonObject, RoomAliasId, RoomId, + int, + serde::JsonObject, + CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, }; use serde_json::{json, value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, sync::Arc}; @@ -213,14 +213,11 @@ pub async fn create_room_route( // 3. Power levels // Figure out preset. We need it for preset specific events - let preset = body - .preset - .clone() - .unwrap_or_else(|| match &body.visibility { - room::Visibility::Private => RoomPreset::PrivateChat, - room::Visibility::Public => RoomPreset::PublicChat, - _ => RoomPreset::PrivateChat, // Room visibility should not be custom - }); + let preset = body.preset.clone().unwrap_or(match &body.visibility { + room::Visibility::Private => RoomPreset::PrivateChat, + room::Visibility::Public => RoomPreset::PublicChat, + _ => RoomPreset::PrivateChat, // Room visibility should not be custom + }); let mut users = BTreeMap::new(); users.insert(sender_user.clone(), int!(100)); diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 6182516..f62ccbb 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -53,11 +53,11 @@ pub async fn login_route(body: Ruma) -> Result u32 { } fn default_cleanup_second_interval() -> u32 { - 1 * 60 // every minute + 60 // every minute } fn default_max_request_size() -> u32 { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 96027f6..34d91d2 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -193,7 +193,7 @@ impl KvTree for RocksDbEngineTree<'_> { fn increment(&self, key: &[u8]) -> Result> { let lock = self.write_lock.write().unwrap(); - let old = self.db.rocks.get_cf(&self.cf(), &key)?; + let old = self.db.rocks.get_cf(&self.cf(), key)?; let new = utils::increment(old.as_deref()).unwrap(); self.db.rocks.put_cf(&self.cf(), key, &new)?; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 02d4dbd..4961fd7 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -48,13 +48,13 @@ pub struct Engine { impl Engine { fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result { - let conn = Connection::open(&path)?; + let conn = Connection::open(path)?; - conn.pragma_update(Some(Main), "page_size", &2048)?; - conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; - conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; - conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; - conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?; + conn.pragma_update(Some(Main), "page_size", 2048)?; + conn.pragma_update(Some(Main), "journal_mode", "WAL")?; + conn.pragma_update(Some(Main), "synchronous", "NORMAL")?; + conn.pragma_update(Some(Main), "cache_size", -i64::from(cache_size_kb))?; + conn.pragma_update(Some(Main), "wal_autocheckpoint", 0)?; Ok(conn) } @@ -75,7 +75,7 @@ impl Engine { pub fn flush_wal(self: &Arc) -> Result<()> { self.write_lock() - .pragma_update(Some(Main), "wal_checkpoint", &"RESTART")?; + .pragma_update(Some(Main), "wal_checkpoint", "RESTART")?; Ok(()) } } diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 4332930..7b7675c 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -134,7 +134,7 @@ impl service::globals::Data for KeyValueDatabase { let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff); - let keypair = utils::string_from_bytes( + utils::string_from_bytes( // 1. version parts .next() @@ -151,9 +151,7 @@ impl service::globals::Data for KeyValueDatabase { .and_then(|(version, key)| { Ed25519KeyPair::from_der(key, version) .map_err(|_| Error::bad_database("Private or public keys are invalid.")) - }); - - keypair + }) } fn remove_keypair(&self) -> Result<()> { self.global.remove(b"keypair") diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 42d4030..3dfceb6 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -40,7 +40,7 @@ impl service::pusher::Data for KeyValueDatabase { self.senderkey_pusher .get(&senderkey)? .map(|push| { - serde_json::from_slice(&*push) + serde_json::from_slice(&push) .map_err(|_| Error::bad_database("Invalid Pusher in db.")) }) .transpose() @@ -53,7 +53,7 @@ impl service::pusher::Data for KeyValueDatabase { self.senderkey_pusher .scan_prefix(prefix) .map(|(_, push)| { - serde_json::from_slice(&*push) + serde_json::from_slice(&push) .map_err(|_| Error::bad_database("Invalid Pusher in db.")) }) .collect() diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index c0f6de8..6f23032 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -9,7 +9,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { let mut aliasid = room_id.as_bytes().to_vec(); aliasid.push(0xff); aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; + self.aliasid_alias.insert(&aliasid, alias.as_bytes())?; Ok(()) } diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 5259bef..904b1c4 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -88,7 +88,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { for (key, value) in self .presenceid_presence - .iter_from(&*first_possible_edu, false) + .iter_from(&first_possible_edu, false) .take_while(|(key, _)| key.starts_with(&prefix)) { let user_id = UserId::parse( diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 4e6c86b..4a2f0f9 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -17,7 +17,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { room_typing_id.extend_from_slice(&count); self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; + .insert(&room_typing_id, user_id.as_bytes())?; self.roomid_lasttypingupdate .insert(room_id.as_bytes(), &count)?; diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 788c296..19ae57b 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -15,7 +15,7 @@ impl service::rooms::search::Data for KeyValueDatabase { let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(word.as_bytes()); key.push(0xff); - key.extend_from_slice(&pdu_id); + key.extend_from_slice(pdu_id); (key, Vec::new()) }); diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 0c6c2dd..336317d 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -39,7 +39,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { { hash_map::Entry::Vacant(v) => { if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? + .pdus_until(sender_user, room_id, u64::MAX)? .filter_map(|r| { // Filter out buggy events if r.is_err() { @@ -205,8 +205,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .unwrap() .insert(pdu.room_id.clone(), count); - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; + self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; Ok(()) diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index e678c87..3d8d1c8 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -114,7 +114,7 @@ impl service::rooms::user::Data for KeyValueDatabase { utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Invalid RoomId bytes in userroomid_joined") })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs index fddbd67..5424e8c 100644 --- a/src/database/key_value/sending.rs +++ b/src/database/key_value/sending.rs @@ -38,7 +38,7 @@ impl service::sending::Data for KeyValueDatabase { fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { let prefix = outgoing_kind.get_prefix(); - for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix) { self.servercurrentevent_data.remove(&key)?; } @@ -51,7 +51,7 @@ impl service::sending::Data for KeyValueDatabase { self.servercurrentevent_data.remove(&key).unwrap(); } - for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { + for (key, _) in self.servernameevent_data.scan_prefix(prefix) { self.servernameevent_data.remove(&key).unwrap(); } @@ -67,7 +67,7 @@ impl service::sending::Data for KeyValueDatabase { for (outgoing_kind, event) in requests { let mut key = outgoing_kind.get_prefix(); key.extend_from_slice(if let SendingEventType::Pdu(value) = &event { - &**value + value } else { &[] }); @@ -91,7 +91,7 @@ impl service::sending::Data for KeyValueDatabase { let prefix = outgoing_kind.get_prefix(); return Box::new( self.servernameevent_data - .scan_prefix(prefix.clone()) + .scan_prefix(prefix) .map(|(k, v)| parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k))), ); } @@ -155,7 +155,7 @@ fn parse_servercurrentevent( let mut parts = key[1..].splitn(3, |&b| b == 0xff); let user = parts.next().expect("splitn always returns one element"); - let user_string = utils::string_from_bytes(&user) + let user_string = utils::string_from_bytes(user) .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; let user_id = UserId::parse(user_string) .map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index f7ee07c..cd5a535 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,10 +5,9 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedUserId, UInt, - UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, + OwnedDeviceKeyId, OwnedMxcUri, OwnedUserId, UInt, UserId, }; -use ruma::{OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri}; use tracing::warn; use crate::{ @@ -380,13 +379,12 @@ impl service::users::Data for KeyValueDatabase { Ok(( serde_json::from_slice( - &*key - .rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, ) .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, - serde_json::from_slice(&*value) + serde_json::from_slice(&value) .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, )) }) @@ -410,7 +408,7 @@ impl service::users::Data for KeyValueDatabase { .map(|(bytes, _)| { Ok::<_, Error>( serde_json::from_slice::( - &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { + bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { Error::bad_database("OneTimeKey ID in db is invalid.") })?, ) diff --git a/src/database/mod.rs b/src/database/mod.rs index 9f893d6..15ee137 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -2,22 +2,17 @@ pub mod abstraction; pub mod key_value; use crate::{services, utils, Config, Error, PduEvent, Result, Services, SERVICES}; -use abstraction::KeyValueDatabaseEngine; -use abstraction::KvTree; +use abstraction::{KeyValueDatabaseEngine, KvTree}; use directories::ProjectDirs; use lru_cache::LruCache; -use ruma::CanonicalJsonValue; -use ruma::OwnedDeviceId; -use ruma::OwnedEventId; -use ruma::OwnedRoomId; -use ruma::OwnedUserId; use ruma::{ events::{ push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, - EventId, RoomId, UserId, + CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, + UserId, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, diff --git a/src/lib.rs b/src/lib.rs index 541b8c8..3d7f7ae 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -24,7 +24,7 @@ pub use utils::error::{Error, Result}; pub static SERVICES: RwLock> = RwLock::new(None); pub fn services<'a>() -> &'static Services { - &SERVICES + SERVICES .read() .unwrap() .expect("SERVICES should be initialized when this is called") diff --git a/src/main.rs b/src/main.rs index 0bba2ab..bdbeaa6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -444,7 +444,7 @@ impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); fn method_to_filter(method: Method) -> MethodFilter { - let method_filter = match method { + match method { Method::DELETE => MethodFilter::DELETE, Method::GET => MethodFilter::GET, Method::HEAD => MethodFilter::HEAD, @@ -454,6 +454,5 @@ fn method_to_filter(method: Method) -> MethodFilter { Method::PUT => MethodFilter::PUT, Method::TRACE => MethodFilter::TRACE, m => panic!("Unsupported HTTP method: {:?}", m), - }; - method_filter + } } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 9e3f586..b14ce2b 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -179,7 +179,7 @@ impl Service { } pub fn start_handler(self: &Arc) { - let self2 = Arc::clone(&self); + let self2 = Arc::clone(self); tokio::spawn(async move { self2.handler().await; }); @@ -270,13 +270,11 @@ impl Service { let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); - let admin_command = match self.parse_admin_command(&command_line) { + let admin_command = match self.parse_admin_command(command_line) { Ok(command) => command, Err(error) => { let server_name = services().globals.server_name(); - let message = error - .to_string() - .replace("server.name", server_name.as_str()); + let message = error.replace("server.name", server_name.as_str()); let html_message = self.usage_to_html(&message, server_name); return RoomMessageEventContent::text_html(message, html_message); @@ -316,8 +314,8 @@ impl Service { // Backwards compatibility with `register_appservice`-style commands let command_with_dashes; - if argv.len() > 1 && argv[1].contains("_") { - command_with_dashes = argv[1].replace("_", "-"); + if argv.len() > 1 && argv[1].contains('_') { + command_with_dashes = argv[1].replace('_', "-"); argv[1] = &command_with_dashes; } @@ -631,7 +629,7 @@ impl Service { let displayname = format!("{} ⚡️", user_id.localpart()); services() .users - .set_displayname(&user_id, Some(displayname.clone()))?; + .set_displayname(&user_id, Some(displayname))?; // Initial account data services().account_data.update( @@ -771,7 +769,7 @@ impl Service { let text = text.replace("subcommand", "command"); // Escape option names (e.g. ``) since they look like HTML tags - let text = text.replace("<", "<").replace(">", ">"); + let text = text.replace('<', "<").replace('>', ">"); // Italicize the first line (command name and version text) let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); @@ -799,7 +797,7 @@ impl Service { while text_lines .get(line_index) - .map(|line| line.starts_with("#")) + .map(|line| line.starts_with('#')) .unwrap_or(false) { command_body += if text_lines[line_index].starts_with("# ") { @@ -830,12 +828,10 @@ impl Service { }; // Add HTML line-breaks - let text = text - .replace("\n\n\n", "\n\n") - .replace("\n", "
                \n") - .replace("[nobr]
                ", ""); - text + text.replace("\n\n\n", "\n\n") + .replace('\n', "
                \n") + .replace("[nobr]
                ", "") } /// Create the admin room. @@ -1110,7 +1106,7 @@ impl Service { state_key: Some(user_id.to_string()), redacts: None, }, - &user_id, + user_id, &room_id, &state_lock, )?; @@ -1142,8 +1138,8 @@ impl Service { PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()).to_owned(), - format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()).to_owned(), + format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()), + format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()), )) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 2d2fa1f..8f8610c 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -4,7 +4,6 @@ use ruma::events::AnySyncTimelineEvent; use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; -use ruma::api::IncomingResponse; use ruma::{ api::{ client::push::{get_pushers, set_pusher, PusherKind}, @@ -12,7 +11,7 @@ use ruma::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - MatrixVersion, OutgoingRequest, SendAccessToken, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index ae63d9a..cd270c7 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -284,7 +284,7 @@ impl Service { RoomVersion::new(room_version_id).expect("room version is supported"); let mut val = match ruma::signatures::verify_event( - &*pub_key_map.read().expect("RwLock is poisoned."), + &pub_key_map.read().expect("RwLock is poisoned."), &value, room_version_id, ) { @@ -1198,7 +1198,7 @@ impl Service { .fetch_and_handle_outliers( origin, &[prev_event_id.clone()], - &create_event, + create_event, room_id, pub_key_map, ) @@ -1224,7 +1224,7 @@ impl Service { amount += 1; for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(prev_prev.clone())); + todo_outlier_stack.push(prev_prev.clone()); } } @@ -1248,7 +1248,7 @@ impl Service { } } - let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { + let sorted = state_res::lexicographical_topological_sort(&graph, |event_id| { // This return value is the key used for sorting events, // events are then sorted by power level, time, // and lexically by event_id. @@ -1482,8 +1482,8 @@ impl Service { } let mut futures: FuturesUnordered<_> = servers - .into_iter() - .map(|(server, _)| async move { + .into_keys() + .map(|server| async move { ( services() .sending diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 19a1e30..f52ea72 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,7 +1,6 @@ use crate::Result; use ruma::{EventId, OwnedEventId, RoomId}; -use std::collections::HashSet; -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; use tokio::sync::MutexGuard; pub trait Data: Send + Sync { diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 2c49c35..0e45032 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -93,7 +93,7 @@ impl Service { services().rooms.state_cache.update_joined_count(room_id)?; self.db - .set_room_state(room_id, shortstatehash, &state_lock)?; + .set_room_state(room_id, shortstatehash, state_lock)?; Ok(()) } @@ -331,7 +331,7 @@ impl Service { .transpose()?; let room_version = create_event_content .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; + .ok_or(Error::BadDatabase("Invalid room version"))?; Ok(room_version) } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index dc859d8..619dca2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -2,29 +2,29 @@ mod data; use std::collections::HashMap; -use std::collections::HashSet; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; pub use data::Data; use regex::Regex; -use ruma::canonical_json::to_canonical_value; -use ruma::events::room::power_levels::RoomPowerLevelsEventContent; -use ruma::push::Ruleset; -use ruma::state_res::RoomVersion; -use ruma::CanonicalJsonObject; -use ruma::CanonicalJsonValue; -use ruma::OwnedEventId; -use ruma::OwnedRoomId; -use ruma::OwnedServerName; use ruma::{ api::client::error::ErrorKind, + canonical_json::to_canonical_value, events::{ push_rules::PushRulesEvent, - room::{create::RoomCreateEventContent, member::MembershipState}, + room::{ + create::RoomCreateEventContent, member::MembershipState, + power_levels::RoomPowerLevelsEventContent, + }, GlobalAccountDataEventType, RoomEventType, StateEventType, }, - push::{Action, Tweak}, - state_res, uint, EventId, RoomAliasId, RoomId, UserId, + push::{Action, Ruleset, Tweak}, + state_res, + state_res::RoomVersion, + uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedServerName, RoomAliasId, RoomId, UserId, }; use serde::Deserialize; use serde_json::value::to_raw_value; @@ -267,7 +267,7 @@ impl Service { .account_data .get( None, - &user, + user, GlobalAccountDataEventType::PushRules.to_string().into(), )? .map(|event| { @@ -276,13 +276,13 @@ impl Service { }) .transpose()? .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(&user)); + .unwrap_or_else(|| Ruleset::server_default(user)); let mut highlight = false; let mut notify = false; for action in services().pusher.get_actions( - &user, + user, &rules_for_user, &power_levels, &sync_pdu, @@ -307,10 +307,8 @@ impl Service { highlights.push(user.clone()); } - for push_key in services().pusher.get_pushkeys(&user) { - services() - .sending - .send_push_pdu(&*pdu_id, &user, push_key?)?; + for push_key in services().pusher.get_pushkeys(user) { + services().sending.send_push_pdu(&pdu_id, user, push_key?)?; } } @@ -388,7 +386,7 @@ impl Service { && services().globals.emergency_password().is_none(); if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - services().admin.process_message(body.to_string()); + services().admin.process_message(body); } } } @@ -583,8 +581,8 @@ impl Service { prev_events, depth, auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) + .values() + .map(|pdu| pdu.event_id.clone()) .collect(), redacts, unsigned: if unsigned.is_empty() { @@ -683,7 +681,7 @@ impl Service { state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { let (pdu, pdu_json) = - self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock)?; + self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 20c652f..adaf7c0 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -110,7 +110,7 @@ impl Service { } pub fn start_handler(self: &Arc) { - let self2 = Arc::clone(&self); + let self2 = Arc::clone(self); tokio::spawn(async move { self2.handler().await.unwrap(); }); @@ -280,7 +280,7 @@ impl Service { device_list_changes.extend( services() .users - .keys_changed(&room_id.to_string(), since, None) + .keys_changed(room_id.as_ref(), since, None) .filter_map(|r| r.ok()) .filter(|user_id| user_id.server_name() == services().globals.server_name()), ); @@ -487,7 +487,7 @@ impl Service { let response = appservice_server::send_request( services() .appservice - .get_registration(&id) + .get_registration(id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { ( @@ -562,7 +562,7 @@ impl Service { let pusher = match services() .pusher - .get_pusher(&userid, pushkey) + .get_pusher(userid, pushkey) .map_err(|e| (OutgoingKind::Push(userid.clone(), pushkey.clone()), e))? { Some(pusher) => pusher, @@ -573,18 +573,18 @@ impl Service { .account_data .get( None, - &userid, + userid, GlobalAccountDataEventType::PushRules.to_string().into(), ) .unwrap_or_default() .and_then(|event| serde_json::from_str::(event.get()).ok()) .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| push::Ruleset::server_default(&userid)); + .unwrap_or_else(|| push::Ruleset::server_default(userid)); let unread: UInt = services() .rooms .user - .notification_count(&userid, &pdu.room_id) + .notification_count(userid, &pdu.room_id) .map_err(|e| (kind.clone(), e))? .try_into() .expect("notification count can't go that high"); @@ -593,7 +593,7 @@ impl Service { let _response = services() .pusher - .send_push_notice(&userid, unread, &pusher, rules_for_user, &pdu) + .send_push_notice(userid, unread, &pusher, rules_for_user, &pdu) .await .map(|_response| kind.clone()) .map_err(|e| (kind.clone(), e)); @@ -638,7 +638,7 @@ impl Service { let permit = services().sending.maximum_requests.acquire().await; let response = server_server::send_request( - &*server, + server, send_transaction_message::v1::Request { origin: services().globals.server_name(), pdus: &pdu_jsons, From 71cffcd5379fbca8ad283c695da18adaa98412e2 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 22 Jun 2022 22:14:53 +0000 Subject: [PATCH 1232/1727] feat(ci): Split clippy into own fallible job For some reason, the clippy build does not work. This change allows the cargo:test job to still succeed and the pipeline to pass --- .gitlab-ci.yml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 380332b..eb7a96f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -250,17 +250,30 @@ docker:tags:dockerhub: test:cargo: extends: .test-shared-settings before_script: - - rustup component add clippy # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" artifacts: when: always reports: junit: report.xml + + +test:clippy: + extends: .test-shared-settings + allow_failure: true + before_script: + - rustup component add clippy + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi + script: + - rustc --version && cargo --version # Print version info for debugging + - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + artifacts: + when: always + reports: codequality: gl-code-quality-report.json test:format: From df8703cc1304779449fde3a9bf9d1122e5345def Mon Sep 17 00:00:00 2001 From: Jim Date: Thu, 23 Jun 2022 06:58:34 +0000 Subject: [PATCH 1233/1727] Lightning bolt optional --- conduit-example.toml | 3 +++ src/api/client_server/account.rs | 9 +++++++-- src/config/mod.rs | 6 ++++++ src/service/admin/mod.rs | 8 +++++++- src/service/globals/mod.rs | 4 ++++ 5 files changed, 27 insertions(+), 3 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 362f7e7..5eed070 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -40,6 +40,9 @@ allow_registration = true allow_federation = true +# Enable the display name lightning bolt on registration. +enable_lightning_bolt = true + trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 17b2920..51343ae 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -12,7 +12,6 @@ use ruma::{ events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType}, push, UserId, }; - use tracing::{info, warn}; use register::RegistrationKind; @@ -169,7 +168,13 @@ pub async fn register_route( services().users.create(&user_id, password)?; // Default to pretty displayname - let displayname = format!("{} ⚡️", user_id.localpart()); + let mut displayname = user_id.localpart().to_owned(); + + // If enabled append lightning bolt to display name (default true) + if services().globals.enable_lightning_bolt() { + displayname.push_str(" ⚡️"); + } + services() .users .set_displayname(&user_id, Some(displayname.clone()))?; diff --git a/src/config/mod.rs b/src/config/mod.rs index b60b9cf..31d96b6 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -26,6 +26,8 @@ pub struct Config { pub database_path: String, #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, + #[serde(default = "true_fn")] + pub enable_lightning_bolt: bool, #[serde(default = "default_conduit_cache_capacity_modifier")] pub conduit_cache_capacity_modifier: f64, #[serde(default = "default_rocksdb_max_open_files")] @@ -135,6 +137,10 @@ impl fmt::Display for Config { &self.max_concurrent_requests.to_string(), ), ("Allow registration", &self.allow_registration.to_string()), + ( + "Enabled lightning bolt", + &self.enable_lightning_bolt.to_string(), + ), ("Allow encryption", &self.allow_encryption.to_string()), ("Allow federation", &self.allow_federation.to_string()), ("Allow room creation", &self.allow_room_creation.to_string()), diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b14ce2b..9110378 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -626,7 +626,13 @@ impl Service { services().users.create(&user_id, Some(password.as_str()))?; // Default to pretty displayname - let displayname = format!("{} ⚡️", user_id.localpart()); + let mut displayname = user_id.localpart().to_owned(); + + // If enabled append lightning bolt to display name (default true) + if services().globals.enable_lightning_bolt() { + displayname.push_str(" ⚡️"); + } + services() .users .set_displayname(&user_id, Some(displayname))?; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 44192e0..4daddab 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -245,6 +245,10 @@ impl Service { self.config.default_room_version.clone() } + pub fn enable_lightning_bolt(&self) -> bool { + self.config.enable_lightning_bolt + } + pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } From 7cf060ae5b31f5cf5b03bb31bb327067f37b5035 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 23 Jun 2022 09:04:19 +0200 Subject: [PATCH 1234/1727] Bump version to 0.4 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 941634e..760b4d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -372,7 +372,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.3.0-next" +version = "0.4.0-next" dependencies = [ "async-trait", "axum", diff --git a/Cargo.toml b/Cargo.toml index 031f279..e007e49 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.3.0-next" +version = "0.4.0-next" rust-version = "1.63" edition = "2021" From 18ca2e4c2984eb265e4fe2ef12b87bf50a8fdd60 Mon Sep 17 00:00:00 2001 From: majso Date: Sat, 25 Jun 2022 21:59:49 +0000 Subject: [PATCH 1235/1727] Dockerfile: changing DB path to be same as we are using in CI --- Dockerfile | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 76d10ea..8a76c47 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,9 +36,11 @@ FROM docker.io/debian:bullseye-slim AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 +ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit + ENV CONDUIT_PORT=6167 \ CONDUIT_ADDRESS="0.0.0.0" \ - CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \ + CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ CONDUIT_CONFIG='' # └─> Set no config file to do all configuration with env vars @@ -51,9 +53,6 @@ RUN apt-get update && apt-get -y --no-install-recommends install \ wget \ && rm -rf /var/lib/apt/lists/* -# Created directory for the database and media files -RUN mkdir -p /srv/conduit/.local/share/conduit - # Test if Conduit is still alive, uses the same endpoint as Element COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh @@ -69,10 +68,12 @@ RUN set -x ; \ groupadd -r -g ${GROUP_ID} conduit ; \ useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1 -# Change ownership of Conduit files to conduit user and group and make the healthcheck executable: +# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable: RUN chown -cR conduit:conduit /srv/conduit && \ - chmod +x /srv/conduit/healthcheck.sh - + chmod +x /srv/conduit/healthcheck.sh && \ + mkdir -p ${DEFAULT_DB_PATH} && \ + chown -cR conduit:conduit ${DEFAULT_DB_PATH} + # Change user to conduit, no root permissions afterwards: USER conduit # Set container home directory From c15205fb4679b7058790c9691c3f767d4b2c3c3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 14 Aug 2022 19:29:46 +0200 Subject: [PATCH 1236/1727] fix(client/keys): ignore non-signature keys in signature upload route --- src/api/client_server/keys.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 9a21dd6..837e166 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -148,11 +148,24 @@ pub async fn upload_signatures_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for (user_id, signed_keys) in &body.signed_keys { - for (key_id, signed_key) in signed_keys { - let signed_key = serde_json::to_value(signed_key).unwrap(); + for (user_id, keys) in &body.signed_keys { + for (key_id, key) in keys { + let key = serde_json::to_value(key) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; - for signature in signed_key + let is_signature_key = match key.get("usage") { + Some(usage) => usage + .as_array() + .map(|usage| !usage.contains(&json!("master"))) + .unwrap_or(false), + None => true, + }; + + if !is_signature_key { + continue; + } + + for signature in key .get("signatures") .ok_or(Error::BadRequest( ErrorKind::InvalidParam, From 0ddc3c01ef4b1c62e2a9f58b53a137f15ca72b14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 14 Aug 2022 19:33:13 +0200 Subject: [PATCH 1237/1727] style(client/keys): rename signature key to signed key --- src/api/client_server/keys.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 837e166..2de785f 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -153,7 +153,7 @@ pub async fn upload_signatures_route( let key = serde_json::to_value(key) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; - let is_signature_key = match key.get("usage") { + let is_signed_key = match key.get("usage") { Some(usage) => usage .as_array() .map(|usage| !usage.contains(&json!("master"))) @@ -161,7 +161,7 @@ pub async fn upload_signatures_route( None => true, }; - if !is_signature_key { + if !is_signed_key { continue; } From 2b7c19835b65e4dd3a6a32466a9f45b06bf1ced2 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 15:00:44 +0200 Subject: [PATCH 1238/1727] Add room version 10 to experimental versions --- src/service/globals/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 4daddab..d4c9dad 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -129,7 +129,12 @@ impl Service { RoomVersionId::V9, ]; // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; + let unstable_room_versions = vec![ + RoomVersionId::V3, + RoomVersionId::V4, + RoomVersionId::V5, + RoomVersionId::V10, + ]; let mut s = Self { db, From c30cc6120b31e8d631bfe8c988d399c120c638c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 11:53:13 +0200 Subject: [PATCH 1239/1727] fix: send right errors on make/send join in restricted rooms --- src/api/server_server.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 3750598..c9f6a78 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1283,10 +1283,10 @@ pub async fn create_join_event_template_route( if let Some(join_rules_event_content) = join_rules_event_content { if matches!( join_rules_event_content.join_rule, - JoinRule::Restricted { .. } + JoinRule::Restricted { .. } | JoinRule::KnockRestricted { .. } ) { return Err(Error::BadRequest( - ErrorKind::Unknown, + ErrorKind::UnableToAuthorizeJoin, "Conduit does not support restricted rooms yet.", )); } @@ -1376,10 +1376,10 @@ async fn create_join_event( if let Some(join_rules_event_content) = join_rules_event_content { if matches!( join_rules_event_content.join_rule, - JoinRule::Restricted { .. } + JoinRule::Restricted { .. } | JoinRule::KnockRestricted { .. } ) { return Err(Error::BadRequest( - ErrorKind::Unknown, + ErrorKind::UnableToAuthorizeJoin, "Conduit does not support restricted rooms yet.", )); } From fb6bfa97530e09f3d2c69dfde3a65c4c633f937a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 15:25:10 +0200 Subject: [PATCH 1240/1727] fix: missing field `origin` error with synapse servers --- Cargo.toml | 2 +- src/api/server_server.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e007e49..37b0529 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index c9f6a78..eabe8c8 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1470,7 +1470,6 @@ async fn create_join_event( .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), - origin: services().globals.server_name().to_string(), }) } From 31d180191262b2e130e5c7463cdd8d12ee7c6c98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 17:10:09 +0200 Subject: [PATCH 1241/1727] fix: workaround for missing avatars on element and rooms becoming historical --- src/api/client_server/message.rs | 13 +++++++++++++ src/api/client_server/sync.rs | 2 ++ 2 files changed, 15 insertions(+) diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index e086e4a..2b5bdf9 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -170,6 +170,9 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_after { + /* TODO: Remove this when these are resolved: + * https://github.com/vector-im/element-android/issues/3417 + * https://github.com/vector-im/element-web/issues/21034 if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, @@ -178,6 +181,8 @@ pub async fn get_message_events_route( )? { lazy_loaded.insert(event.sender.clone()); } + */ + lazy_loaded.insert(event.sender.clone()); } next_token = events_after.last().map(|(count, _)| count).copied(); @@ -210,6 +215,9 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_before { + /* TODO: Remove this when these are resolved: + * https://github.com/vector-im/element-android/issues/3417 + * https://github.com/vector-im/element-web/issues/21034 if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, @@ -218,6 +226,8 @@ pub async fn get_message_events_route( )? { lazy_loaded.insert(event.sender.clone()); } + */ + lazy_loaded.insert(event.sender.clone()); } next_token = events_before.last().map(|(count, _)| count).copied(); @@ -244,6 +254,8 @@ pub async fn get_message_events_route( } } + // TODO: enable again when we are sure clients can handle it + /* if let Some(next_token) = next_token { services().rooms.lazy_loading.lazy_load_mark_sent( sender_user, @@ -253,6 +265,7 @@ pub async fn get_message_events_route( next_token, ); } + */ Ok(resp) } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index d876a92..483b32b 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -433,6 +433,8 @@ async fn sync_helper( } else if !lazy_load_enabled || body.full_state || timeline_users.contains(&state_key) + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *sender_user == state_key { let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, From 68227c06c398ece491cd7f3b7bebe254dcdb43f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 17:10:56 +0200 Subject: [PATCH 1242/1727] fix: state for left rooms --- src/api/client_server/sync.rs | 86 ++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 483b32b..739b42f 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -827,7 +827,9 @@ async fn sync_helper( .rooms_left(&sender_user) .collect(); for result in all_left_rooms { - let (room_id, left_state_events) = result?; + let (room_id, _) = result?; + + let mut left_state_events = Vec::new(); { // Get and drop the lock to wait for remaining operations to finish @@ -854,6 +856,88 @@ async fn sync_helper( continue; } + if !services().rooms.metadata.exists(&room_id)? { + // This is just a rejected invite, not a room we know + continue; + } + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; + + let since_state_ids = match since_shortstatehash { + Some(s) => services().rooms.state_accessor.state_full_ids(s).await?, + None => BTreeMap::new(), + }; + + let left_event_id = match services().rooms.state_accessor.room_state_get_id( + &room_id, + &StateEventType::RoomMember, + sender_user.as_str(), + )? { + Some(e) => e, + None => { + error!("Left room but no left state event"); + continue; + } + }; + + let left_shortstatehash = match services() + .rooms + .state_accessor + .pdu_shortstatehash(&left_event_id)? + { + Some(s) => s, + None => { + error!("Leave event has no state"); + continue; + } + }; + + let mut left_state_ids = services() + .rooms + .state_accessor + .state_full_ids(left_shortstatehash) + .await?; + + let leave_shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &StateEventType::RoomMember, + &sender_user.as_str(), + )?; + + left_state_ids.insert(leave_shortstatekey, left_event_id); + + let mut i = 0; + for (key, id) in left_state_ids { + if body.full_state || since_state_ids.get(&key) != Some(&id) { + let (event_type, state_key) = + services().rooms.short.get_statekey_from_short(key)?; + + if !lazy_load_enabled + || event_type != StateEventType::RoomMember + || body.full_state + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *sender_user == state_key + { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + left_state_events.push(pdu.to_sync_state_event()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + } + left_rooms.insert( room_id.clone(), LeftRoom { From d1e5acd7b3270fd08601dc9ec2eaad2455a53b9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 17:59:49 +0200 Subject: [PATCH 1243/1727] fix: don't panic on missing events in state --- src/api/server_server.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index eabe8c8..35c01f9 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -55,7 +55,7 @@ use std::{ time::{Duration, Instant, SystemTime}, }; -use tracing::{info, warn}; +use tracing::{error, info, warn}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). @@ -1149,16 +1149,18 @@ pub async fn get_room_state_route( Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids - .map(|id| { - services() + .filter_map(|id| { + match services() .rooms .timeline - .get_pdu_json(&id) - .map(|maybe_json| { - PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) - }) + .get_pdu_json(&id).ok()? { + Some(json) => Some(PduEvent::convert_to_outgoing_federation_event(json)), + None => { + error!("Could not find event json for {id} in db."); + None + } + } }) - .filter_map(|r| r.ok()) .collect(), pdus, }) From 8105c5cc60912afc487a6d045c67a7dada76322e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 18:10:51 +0200 Subject: [PATCH 1244/1727] cargo fmt --- src/api/client_server/sync.rs | 8 ++++---- src/api/server_server.rs | 19 ++++++++----------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 739b42f..4b73269 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -901,10 +901,10 @@ async fn sync_helper( .state_full_ids(left_shortstatehash) .await?; - let leave_shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &StateEventType::RoomMember, - &sender_user.as_str(), - )?; + let leave_shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&StateEventType::RoomMember, &sender_user.as_str())?; left_state_ids.insert(leave_shortstatekey, left_event_id); diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 35c01f9..03128a7 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1149,18 +1149,15 @@ pub async fn get_room_state_route( Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids - .filter_map(|id| { - match services() - .rooms - .timeline - .get_pdu_json(&id).ok()? { - Some(json) => Some(PduEvent::convert_to_outgoing_federation_event(json)), - None => { - error!("Could not find event json for {id} in db."); - None - } + .filter_map( + |id| match services().rooms.timeline.get_pdu_json(&id).ok()? { + Some(json) => Some(PduEvent::convert_to_outgoing_federation_event(json)), + None => { + error!("Could not find event json for {id} in db."); + None } - }) + }, + ) .collect(), pdus, }) From d3968c2fd1d901011e5aaf1dd14cecfed5af10bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 21:51:20 +0200 Subject: [PATCH 1245/1727] fix: bump ruma again to fix state res problems --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- src/api/client_server/media.rs | 9 ++++++++- src/api/client_server/message.rs | 8 ++++---- src/api/client_server/read_marker.rs | 4 +++- src/api/client_server/sync.rs | 1 + 6 files changed, 28 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 760b4d9..9e58bcc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2039,7 +2039,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.7.4" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "assign", "js_int", @@ -2057,7 +2057,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "ruma-common", "serde", @@ -2067,7 +2067,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.15.1" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "assign", "bytes", @@ -2083,7 +2083,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "base64", "bytes", @@ -2110,7 +2110,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "js_int", "ruma-common", @@ -2121,7 +2121,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "js_int", "thiserror", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "js_int", "ruma-common", @@ -2140,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "once_cell", "proc-macro-crate", @@ -2155,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "js_int", "ruma-common", @@ -2166,7 +2166,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "base64", "ed25519-dalek", @@ -2182,7 +2182,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 37b0529..0b3062d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "c2c45551335c443ede7fb9158284196899a0c696", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index c1f5e1d..ae023c9 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -104,6 +104,7 @@ pub async fn get_content_route( file, content_type, content_disposition, + cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = @@ -134,6 +135,7 @@ pub async fn get_content_as_filename_route( file, content_type, content_disposition: Some(format!("inline; filename={}", body.filename)), + cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = @@ -143,6 +145,7 @@ pub async fn get_content_as_filename_route( content_disposition: Some(format!("inline: filename={}", body.filename)), content_type: remote_content_response.content_type, file: remote_content_response.file, + cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -174,7 +177,11 @@ pub async fn get_content_thumbnail_route( ) .await? { - Ok(get_content_thumbnail::v3::Response { file, content_type }) + Ok(get_content_thumbnail::v3::Response { + file, + content_type, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let get_thumbnail_response = services() .sending diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 2b5bdf9..b04c262 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -127,8 +127,8 @@ pub async fn get_message_events_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?, None => match body.dir { - get_message_events::v3::Direction::Forward => 0, - get_message_events::v3::Direction::Backward => u64::MAX, + ruma::api::client::Direction::Forward => 0, + ruma::api::client::Direction::Backward => u64::MAX, }, }; @@ -151,7 +151,7 @@ pub async fn get_message_events_route( let mut lazy_loaded = HashSet::new(); match body.dir { - get_message_events::v3::Direction::Forward => { + ruma::api::client::Direction::Forward => { let events_after: Vec<_> = services() .rooms .timeline @@ -196,7 +196,7 @@ pub async fn get_message_events_route( resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_after; } - get_message_events::v3::Direction::Backward => { + ruma::api::client::Direction::Backward => { let events_before: Vec<_> = services() .rooms .timeline diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index bdf467f..48520fc 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,7 +1,7 @@ use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, - events::{receipt::ReceiptType, RoomAccountDataEventType}, + events::{receipt::{ReceiptType, ReceiptThread}, RoomAccountDataEventType}, MilliSecondsSinceUnixEpoch, }; use std::collections::BTreeMap; @@ -59,6 +59,7 @@ pub async fn set_read_marker_route( sender_user.clone(), ruma::events::receipt::Receipt { ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, }, ); @@ -119,6 +120,7 @@ pub async fn create_receipt_route( sender_user.clone(), ruma::events::receipt::Receipt { ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, }, ); let mut receipts = BTreeMap::new(); diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 4b73269..828ae19 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -778,6 +778,7 @@ async fn sync_helper( .collect(), }, ephemeral: Ephemeral { events: edus }, + unread_thread_notifications: BTreeMap::new(), }; if !joined_room.is_empty() { From 2b70d9604a2cb10830a6677c2380374836b4d990 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 22:37:14 +0200 Subject: [PATCH 1246/1727] fix: element gets stuck in /initialSync --- src/main.rs | 14 +++++++++++++- src/service/rooms/event_handler/mod.rs | 6 +++++- src/utils/error.rs | 2 +- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index bdbeaa6..0836841 100644 --- a/src/main.rs +++ b/src/main.rs @@ -342,6 +342,14 @@ fn routes() -> Router { .ruma_route(server_server::get_profile_information_route) .ruma_route(server_server::get_keys_route) .ruma_route(server_server::claim_keys_route) + .route( + "/_matrix/client/r0/rooms/:room_id/initialSync", + get(initial_sync), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/initialSync", + get(initial_sync), + ) .fallback(not_found.into_service()) } @@ -375,7 +383,11 @@ async fn shutdown_signal(handle: ServerHandle) { } async fn not_found(_uri: Uri) -> impl IntoResponse { - Error::BadRequest(ErrorKind::NotFound, "Unknown or unimplemented route") + Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request") +} + +async fn initial_sync(_uri: Uri) -> impl IntoResponse { + Error::BadRequest(ErrorKind::GuestAccessForbidden, "Guest access not implemented") } trait RouterExt { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index cd270c7..477a971 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -44,6 +44,7 @@ impl Service { /// When receiving an event one needs to: /// 0. Check the server is in the room /// 1. Skip the PDU if we already know about it + /// 1.1. Remove unsigned field /// 2. Check signatures, otherwise drop /// 3. Check content hash, redact if doesn't match /// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not @@ -260,10 +261,13 @@ impl Service { create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, - value: BTreeMap, + mut value: BTreeMap, pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { Box::pin(async move { + // 1.1. Remove unsigned field + value.remove("unsigned"); + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // We go through all the signatures we see on the value and fetch the corresponding signing diff --git a/src/utils/error.rs b/src/utils/error.rs index 8967acb..9c8617f 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -117,7 +117,7 @@ impl Error { StatusCode::FORBIDDEN } Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED, - NotFound => StatusCode::NOT_FOUND, + NotFound | Unrecognized => StatusCode::NOT_FOUND, LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS, UserDeactivated => StatusCode::FORBIDDEN, TooLarge => StatusCode::PAYLOAD_TOO_LARGE, From 0290f1f3554ef8cca02218a396961d13dbbd44ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Oct 2022 10:42:59 +0200 Subject: [PATCH 1247/1727] improvement: more efficient /claim --- src/api/client_server/keys.rs | 42 ++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 2de785f..86cfaa4 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -440,25 +440,35 @@ pub(crate) async fn claim_keys_helper( let mut failures = BTreeMap::new(); - for (server, vec) in get_over_federation { + let mut futures: FuturesUnordered<_> = get_over_federation + .into_iter() + .map(|(server, vec)| async move { let mut one_time_keys_input_fed = BTreeMap::new(); - for (user_id, keys) in vec { - one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); - } - // Ignore failures - if let Ok(keys) = services() - .sending - .send_federation_request( + for (user_id, keys) in vec { + one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); + } + ( server, - federation::keys::claim_keys::v1::Request { - one_time_keys: one_time_keys_input_fed, - }, + services() + .sending + .send_federation_request( + server, + federation::keys::claim_keys::v1::Request { + one_time_keys: one_time_keys_input_fed, + }, + ) + .await, ) - .await - { - one_time_keys.extend(keys.one_time_keys); - } else { - failures.insert(server.to_string(), json!({})); + }).collect(); + + while let Some((server, response)) = futures.next().await { + match response { + Ok(keys) => { + one_time_keys.extend(keys.one_time_keys); + } + Err(_e) => { + failures.insert(server.to_string(), json!({})); + } } } From dd8f4681a2175e271abd8f88198cee6dcba59655 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Oct 2022 10:57:54 +0200 Subject: [PATCH 1248/1727] fix: make join should not send event id --- src/api/server_server.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 03128a7..9b32b96 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1313,7 +1313,7 @@ pub async fn create_join_event_template_route( }) .expect("member event is valid value"); - let (_pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + let (_pdu, mut pdu_json) = services().rooms.timeline.create_hash_and_sign_event( PduBuilder { event_type: RoomEventType::RoomMember, content, @@ -1328,6 +1328,8 @@ pub async fn create_join_event_template_route( drop(state_lock); + pdu_json.remove("event_id"); + Ok(prepare_join_event::v1::Response { room_version: Some(room_version_id), event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), From fdd64fc966df79064f7590f3750d2288666fa467 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Oct 2022 17:17:16 +0200 Subject: [PATCH 1249/1727] fix: fluffychat login works again --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e58bcc..48ce6c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2039,7 +2039,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.7.4" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "assign", "js_int", @@ -2057,7 +2057,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "ruma-common", "serde", @@ -2067,7 +2067,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.15.1" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "assign", "bytes", @@ -2083,7 +2083,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "base64", "bytes", @@ -2110,7 +2110,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", "ruma-common", @@ -2121,7 +2121,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", "thiserror", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", "ruma-common", @@ -2140,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "once_cell", "proc-macro-crate", @@ -2155,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", "ruma-common", @@ -2166,7 +2166,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "base64", "ed25519-dalek", @@ -2182,7 +2182,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 0b3062d..cce6f9b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "c2c45551335c443ede7fb9158284196899a0c696", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "fba6f70c2df8294f96567f56464a46e3d237a8e9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } From 3c20c1b72e8aef3253ad1c96e8943b9803bb0a3b Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 12 Oct 2022 14:39:58 -0700 Subject: [PATCH 1250/1727] fix `cargo test` --- src/api/server_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9b32b96..f84ca36 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -65,7 +65,7 @@ use tracing::{error, info, warn}; /// /// # Examples: /// ```rust -/// # use conduit::server_server::FedDest; +/// # use conduit::api::server_server::FedDest; /// # fn main() -> Result<(), std::net::AddrParseError> { /// FedDest::Literal("198.51.100.3:8448".parse()?); /// FedDest::Literal("[2001:db8::4:5]:443".parse()?); From 4710f739c0b9b230a155ffb2cf0947d96a4246cc Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 12 Oct 2022 17:48:09 -0700 Subject: [PATCH 1251/1727] clap v4 turned more things into optional features So we need to re-enable some things. See their changelog[0] for details. [0]: https://github.com/clap-rs/clap/blob/master/CHANGELOG.md#migrating --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index cce6f9b..e7e48c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,7 +83,7 @@ thread_local = "1.1.3" hmac = "0.12.1" sha-1 = "0.10.0" # used for conduit's CLI and admin room command parsing -clap = { version = "4.0.11", default-features = false, features = ["std", "derive"] } +clap = { version = "4.0.11", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] } futures-util = { version = "0.3.17", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.6", features = ["env", "toml"] } From fc852f8be64daa663d05dc64f5c09dd64e7a1609 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 12 Oct 2022 17:49:13 -0700 Subject: [PATCH 1252/1727] resolve `cargo check --features clap/deprecated` This has no functional effects. --- src/service/admin/mod.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 9110378..ad6d26b 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -38,9 +38,9 @@ use crate::{ use super::pdu::PduBuilder; #[derive(Parser)] -#[clap(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] +#[command(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] enum AdminCommand { - #[clap(verbatim_doc_comment)] + #[command(verbatim_doc_comment)] /// Register an appservice using its registration YAML /// /// This command needs a YAML generated by an appservice (such as a bridge), @@ -80,12 +80,12 @@ enum AdminCommand { /// User will not be removed from all rooms by default. /// Use --leave-rooms to force the user to leave all rooms DeactivateUser { - #[clap(short, long)] + #[arg(short, long)] leave_rooms: bool, user_id: Box, }, - #[clap(verbatim_doc_comment)] + #[command(verbatim_doc_comment)] /// Deactivate a list of users /// /// Recommended to use in conjunction with list-local-users. @@ -100,10 +100,10 @@ enum AdminCommand { /// # User list here /// # ``` DeactivateAll { - #[clap(short, long)] + #[arg(short, long)] /// Remove users from their joined rooms leave_rooms: bool, - #[clap(short, long)] + #[arg(short, long)] /// Also deactivate admin accounts force: bool, }, @@ -114,7 +114,7 @@ enum AdminCommand { event_id: Box, }, - #[clap(verbatim_doc_comment)] + #[command(verbatim_doc_comment)] /// Parse and print a PDU from a JSON /// /// The PDU event is only checked for validity and is not added to the From 7ef9fe3454f4a025c88884ce59e3b0b58af0cf97 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 12 Oct 2022 17:50:04 -0700 Subject: [PATCH 1253/1727] add regression tests This way we don't regress on accident again in the future. --- src/service/admin/mod.rs | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index ad6d26b..942df1c 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -37,6 +37,7 @@ use crate::{ use super::pdu::PduBuilder; +#[cfg_attr(test, derive(Debug))] #[derive(Parser)] #[command(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] enum AdminCommand { @@ -1160,3 +1161,34 @@ impl Service { Ok(()) } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn get_help_short() { + get_help_inner("-h"); + } + + #[test] + fn get_help_long() { + get_help_inner("--help"); + } + + #[test] + fn get_help_subcommand() { + get_help_inner("help"); + } + + fn get_help_inner(input: &str) { + let error = AdminCommand::try_parse_from(["argv[0] doesn't matter", input]) + .unwrap_err() + .to_string(); + + // Search for a handful of keywords that suggest the help printed properly + assert!(error.contains("Usage:")); + assert!(error.contains("Commands:")); + assert!(error.contains("Options:")); + } +} From 9a47069f45e6565d51d017b07fddbe7fbf7177cd Mon Sep 17 00:00:00 2001 From: AndSDev Date: Mon, 29 Aug 2022 07:15:55 +0000 Subject: [PATCH 1254/1727] fix(client/login): username in lowercase for login by token --- src/api/client_server/session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index f62ccbb..7c8c128 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -92,7 +92,7 @@ pub async fn login_route(body: Ruma) -> Result Date: Thu, 13 Oct 2022 10:14:52 +0200 Subject: [PATCH 1255/1727] fix: all the e2ee problems --- src/api/appservice_server.rs | 14 ++++++++++++-- src/api/client_server/to_device.rs | 5 +++-- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 8 ++++++-- src/database/key_value/sending.rs | 10 +++++----- src/service/pusher/mod.rs | 5 ++++- src/service/sending/mod.rs | 2 -- 7 files changed, 31 insertions(+), 15 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 6dca60b..339a0c2 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -45,11 +45,21 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = services() + let mut response = match services() .globals .default_client() .execute(reqwest_request) - .await?; + .await + { + Ok(r) => r, + Err(e) => { + warn!( + "Could not send request to appservice {:?} at {}: {}", + registration.get("id"), destination, e + ); + return Err(e.into()); + } + }; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index f84d54f..139b845 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -35,6 +35,7 @@ pub async fn send_event_to_device_route( map.insert(target_device_id_maybe.clone(), event.clone()); let mut messages = BTreeMap::new(); messages.insert(target_user_id.clone(), map); + let count = services().globals.next_count()?; services().sending.send_reliable_edu( target_user_id.server_name(), @@ -42,12 +43,12 @@ pub async fn send_event_to_device_route( DirectDeviceContent { sender: sender_user.clone(), ev_type: ToDeviceEventType::from(&*body.event_type), - message_id: body.txn_id.to_owned(), + message_id: count.to_string().into(), messages, }, )) .expect("DirectToDevice EDU can be serialized"), - services().globals.next_count()?, + count, )?; continue; diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 818cffc..d056f3f 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -281,7 +281,7 @@ where debug!("{:?}", http_request); let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { - warn!("{:?}", e); + warn!("{:?}\n{:?}", e, json_body); Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") })?; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9b32b96..de0f840 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -304,7 +304,10 @@ where )) } } - Err(e) => Err(e.into()), + Err(e) => { + warn!("Could not send request to {} at {}: {}", destination, actual_destination_str, e); + Err(e.into()) + }, } } @@ -831,7 +834,8 @@ pub async fn send_transaction_message_route( target_user_id, target_device_id, &ev_type.to_string(), - event.deserialize_as().map_err(|_| { + event.deserialize_as().map_err(|e| { + warn!("To-Device event is invalid: {event:?} {e}"); Error::BadRequest( ErrorKind::InvalidParam, "Event is invalid", diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs index 5424e8c..fcbe0f3 100644 --- a/src/database/key_value/sending.rs +++ b/src/database/key_value/sending.rs @@ -6,7 +6,7 @@ use crate::{ self, sending::{OutgoingKind, SendingEventType}, }, - utils, Error, Result, + utils, Error, Result, services, }; impl service::sending::Data for KeyValueDatabase { @@ -66,11 +66,11 @@ impl service::sending::Data for KeyValueDatabase { let mut keys = Vec::new(); for (outgoing_kind, event) in requests { let mut key = outgoing_kind.get_prefix(); - key.extend_from_slice(if let SendingEventType::Pdu(value) = &event { - value + if let SendingEventType::Pdu(value) = &event { + key.extend_from_slice(value) } else { - &[] - }); + key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()) + } let value = if let SendingEventType::Edu(value) = &event { &**value } else { diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 8f8610c..767687d 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -128,7 +128,10 @@ impl Service { Error::BadServerResponse("Push gateway returned bad response.") }) } - Err(e) => Err(e.into()), + Err(e) => { + warn!("Could not send request to pusher {}: {}", destination, e); + Err(e.into()) + }, } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index adaf7c0..afa12fc 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -158,8 +158,6 @@ impl Service { // Find events that have been added since starting the last request let new_events = self.db.queued_requests(&outgoing_kind).filter_map(|r| r.ok()).take(30).collect::>(); - // TODO: find edus - if !new_events.is_empty() { // Insert pdus we found self.db.mark_as_active(&new_events)?; From 8c6e75a0cd2d7870a32320f36af0851d5cae1f58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Oct 2022 10:27:42 +0200 Subject: [PATCH 1256/1727] Mark unstable versions as unstable in /capabilities --- src/api/client_server/capabilities.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs index 97529cf..31d42d2 100644 --- a/src/api/client_server/capabilities.rs +++ b/src/api/client_server/capabilities.rs @@ -11,14 +11,8 @@ pub async fn get_capabilities_route( _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); - if services().globals.allow_unstable_room_versions() { - for room_version in &services().globals.unstable_room_versions { - available.insert(room_version.clone(), RoomVersionStability::Stable); - } - } else { - for room_version in &services().globals.unstable_room_versions { - available.insert(room_version.clone(), RoomVersionStability::Unstable); - } + for room_version in &services().globals.unstable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Unstable); } for room_version in &services().globals.stable_room_versions { available.insert(room_version.clone(), RoomVersionStability::Stable); From 3a8321f9adcb575370fa2c606c2a212c1f6f03f3 Mon Sep 17 00:00:00 2001 From: AndSDev Date: Mon, 29 Aug 2022 14:09:08 +0000 Subject: [PATCH 1257/1727] feat(db/rooms): encryption is not allowed in the admins room --- src/service/rooms/timeline/mod.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 619dca2..32781f2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -22,6 +22,7 @@ use ruma::{ }, push::{Action, Ruleset, Tweak}, state_res, + state_res::Event, state_res::RoomVersion, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, RoomAliasId, RoomId, UserId, @@ -683,6 +684,22 @@ impl Service { let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; + let admin_room = services().rooms.alias.resolve_local_alias( + <&RoomAliasId>::try_from( + format!("#admins:{}", services().globals.server_name()).as_str(), + ) + .expect("#admins:server_name is a valid room alias"), + )?; + if admin_room.filter(|v| v == room_id).is_some() { + if pdu.event_type() == &RoomEventType::RoomEncryption { + warn!("Encryption is not allowed in the admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Encryption is not allowed in the admins room.", + )); + } + } + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = services().rooms.state.append_to_state(&pdu)?; From c67f95ebffd60a9efe222816c742b1522e06578e Mon Sep 17 00:00:00 2001 From: AndSDev Date: Wed, 31 Aug 2022 07:10:54 +0000 Subject: [PATCH 1258/1727] feat(db/rooms): disable leaving from admin room for conduit user --- src/service/rooms/timeline/mod.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 32781f2..2f56fea 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -698,6 +698,26 @@ impl Service { "Encryption is not allowed in the admins room.", )); } + if pdu.event_type() == &RoomEventType::RoomMember { + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, + } + + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + if content.membership == MembershipState::Leave { + let server_user = format!("@conduit:{}", services().globals.server_name()); + if sender == &server_user { + warn!("Conduit user cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Conduit user cannot leave from admins room.", + )); + } + } + } } // We append to state before appending the pdu, so we don't have a moment in time with the From da2dbd2877d3b7b1edc2a05bb5593759d8072794 Mon Sep 17 00:00:00 2001 From: AndSDev Date: Wed, 31 Aug 2022 07:11:28 +0000 Subject: [PATCH 1259/1727] feat(db/rooms): disable leaving from admin room for last user --- src/service/rooms/timeline/mod.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2f56fea..5d28628 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -716,6 +716,21 @@ impl Service { "Conduit user cannot leave from admins room.", )); } + + let count = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(|m| m.ok()) + .filter(|m| m.server_name() == services().globals.server_name()) + .count(); + if count < 3 { + warn!("Last admin cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Last admin cannot leave from admins room.", + )); + } } } } From 912491cb28f88fb0ed9cdd1aac4a76a307cbec03 Mon Sep 17 00:00:00 2001 From: AndSDev Date: Wed, 31 Aug 2022 08:22:45 +0000 Subject: [PATCH 1260/1727] style(db/rooms): refactor admin room pdu validating --- src/service/rooms/timeline/mod.rs | 75 ++++++++++++++++--------------- 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 5d28628..9ededd2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -691,47 +691,50 @@ impl Service { .expect("#admins:server_name is a valid room alias"), )?; if admin_room.filter(|v| v == room_id).is_some() { - if pdu.event_type() == &RoomEventType::RoomEncryption { - warn!("Encryption is not allowed in the admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Encryption is not allowed in the admins room.", - )); - } - if pdu.event_type() == &RoomEventType::RoomMember { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, + match pdu.event_type() { + RoomEventType::RoomEncryption => { + warn!("Encryption is not allowed in the admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Encryption is not allowed in the admins room.", + )); } - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if content.membership == MembershipState::Leave { - let server_user = format!("@conduit:{}", services().globals.server_name()); - if sender == &server_user { - warn!("Conduit user cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Conduit user cannot leave from admins room.", - )); + RoomEventType::RoomMember => { + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, } - let count = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(|m| m.ok()) - .filter(|m| m.server_name() == services().globals.server_name()) - .count(); - if count < 3 { - warn!("Last admin cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Last admin cannot leave from admins room.", - )); + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + if content.membership == MembershipState::Leave { + let server_user = format!("@conduit:{}", services().globals.server_name()); + if sender == &server_user { + warn!("Conduit user cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Conduit user cannot leave from admins room.", + )); + } + + let count = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(|m| m.ok()) + .filter(|m| m.server_name() == services().globals.server_name()) + .count(); + if count < 3 { + warn!("Last admin cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Last admin cannot leave from admins room.", + )); + } } } + _ => {} } } From 9c922db14b53c9184f7a27ea2da75fe607efa580 Mon Sep 17 00:00:00 2001 From: exin Date: Sat, 25 Jun 2022 13:35:58 -0500 Subject: [PATCH 1261/1727] Lower default log level Update config-example.toml to accordingly Closes #281 --- conduit-example.toml | 2 +- src/config/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 5eed070..0549030 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -46,7 +46,7 @@ enable_lightning_bolt = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "warn,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. diff --git a/src/config/mod.rs b/src/config/mod.rs index 31d96b6..645f440 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -250,7 +250,7 @@ fn default_max_concurrent_requests() -> u16 { } fn default_log() -> String { - "info,state_res=warn,_=off,sled=off".to_owned() + "warn,state_res=warn,_=off,sled=off".to_owned() } fn default_turn_ttl() -> u64 { From 3a40bf8ae07285882477ca83e7da714ddf02c73d Mon Sep 17 00:00:00 2001 From: exin Date: Sat, 25 Jun 2022 14:30:20 -0500 Subject: [PATCH 1262/1727] Add error for invalid log config Log config falls back to "warn" --- src/main.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index 0836841..bf8bc11 100644 --- a/src/main.rs +++ b/src/main.rs @@ -110,9 +110,13 @@ async fn main() { start.await; } else { let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = EnvFilter::try_new(&config.log) - .or_else(|_| EnvFilter::try_new("info")) - .unwrap(); + let filter_layer = match EnvFilter::try_new(&config.log) { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your log config is invalid. The following error occurred: {}", e); + EnvFilter::try_new("warn").unwrap() + }, + }; let subscriber = registry.with(filter_layer).with(fmt_layer); tracing::subscriber::set_global_default(subscriber).unwrap(); From 3e6c66b899bfcbf9ba07f79ec406dc0c809c8216 Mon Sep 17 00:00:00 2001 From: exin Date: Sat, 25 Jun 2022 15:29:05 -0500 Subject: [PATCH 1263/1727] Fix formatting --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index bf8bc11..78a38ad 100644 --- a/src/main.rs +++ b/src/main.rs @@ -115,7 +115,7 @@ async fn main() { Err(e) => { eprintln!("It looks like your log config is invalid. The following error occurred: {}", e); EnvFilter::try_new("warn").unwrap() - }, + } }; let subscriber = registry.with(filter_layer).with(fmt_layer); From 7451abe3ea7dc5fb9225b45a0672be7bb4194d9c Mon Sep 17 00:00:00 2001 From: exin Date: Sat, 25 Jun 2022 15:58:50 -0500 Subject: [PATCH 1264/1727] Lower default log level for docker and debian --- DEPLOY.md | 2 +- docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index f0990dc..1c7d1af 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -136,7 +136,7 @@ allow_federation = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "warn,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. diff --git a/docker-compose.yml b/docker-compose.yml index 0a9d8f4..d9c32b5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -31,7 +31,7 @@ services: CONDUIT_ALLOW_FEDERATION: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 - #CONDUIT_LOG: info,rocket=off,_=off,sled=off + #CONDUIT_LOG: warn,rocket=off,_=off,sled=off CONDUIT_ADDRESS: 0.0.0.0 CONDUIT_CONFIG: '' # Ignore this # From 92f7f0c849edfa19ca1fd2e3f3702ca64d40ce89 Mon Sep 17 00:00:00 2001 From: exin Date: Sun, 26 Jun 2022 09:20:11 -0500 Subject: [PATCH 1265/1727] Lower log level commented config options --- debian/postinst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/postinst b/debian/postinst index aab2480..73e554b 100644 --- a/debian/postinst +++ b/debian/postinst @@ -77,7 +77,7 @@ allow_federation = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "warn,state_res=warn,rocket=off,_=off,sled=off" EOF fi ;; From 98702da4e66404db4d987fdea42a758425f40707 Mon Sep 17 00:00:00 2001 From: exin Date: Sun, 26 Jun 2022 09:26:04 -0500 Subject: [PATCH 1266/1727] Lower default log level for docker --- docker/README.md | 2 +- docker/docker-compose.for-traefik.yml | 2 +- docker/docker-compose.with-traefik.yml | 2 +- tests/Complement.Dockerfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/README.md b/docker/README.md index c980adc..36717c4 100644 --- a/docker/README.md +++ b/docker/README.md @@ -33,7 +33,7 @@ docker run -d -p 8448:6167 \ -e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \ -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ - -e CONDUIT_LOG="info,rocket=off,_=off,sled=off" \ + -e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \ --name conduit matrixconduit/matrix-conduit:latest ``` diff --git a/docker/docker-compose.for-traefik.yml b/docker/docker-compose.for-traefik.yml index ca560b8..474299f 100644 --- a/docker/docker-compose.for-traefik.yml +++ b/docker/docker-compose.for-traefik.yml @@ -31,7 +31,7 @@ services: CONDUIT_ALLOW_FEDERATION: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 - #CONDUIT_LOG: info,rocket=off,_=off,sled=off + #CONDUIT_LOG: warn,rocket=off,_=off,sled=off CONDUIT_ADDRESS: 0.0.0.0 CONDUIT_CONFIG: '' # Ignore this diff --git a/docker/docker-compose.with-traefik.yml b/docker/docker-compose.with-traefik.yml index 6d46827..79ebef4 100644 --- a/docker/docker-compose.with-traefik.yml +++ b/docker/docker-compose.with-traefik.yml @@ -33,7 +33,7 @@ services: # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,_=off,sled=off" + # CONDUIT_LOG: info # default is: "warn,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' # CONDUIT_ALLOW_FEDERATION: 'false' diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 22016e9..b9d0f8c 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -33,7 +33,7 @@ RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml RUN echo "allow_federation = true" >> conduit.toml RUN echo "allow_encryption = true" >> conduit.toml RUN echo "allow_registration = true" >> conduit.toml -RUN echo "log = \"info,_=off,sled=off\"" >> conduit.toml +RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml # Enabled Caddy auto cert generation for complement provided CA. From 76f81ac2010746c5cba1105a37187ce2e59d2ffe Mon Sep 17 00:00:00 2001 From: AndSDev Date: Tue, 6 Sep 2022 11:32:33 +0000 Subject: [PATCH 1267/1727] feat(db/rooms): disable banning for last user and conduit user in admins room --- src/service/rooms/timeline/mod.rs | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 9ededd2..66a8196 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -733,6 +733,32 @@ impl Service { )); } } + + if content.membership == MembershipState::Ban && pdu.state_key().is_some() { + let server_user = format!("@conduit:{}", services().globals.server_name()); + if pdu.state_key().as_ref().unwrap() == &server_user { + warn!("Conduit user cannot be banned in admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Conduit user cannot be banned in admins room.", + )); + } + + let count = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(|m| m.ok()) + .filter(|m| m.server_name() == services().globals.server_name()) + .count(); + if count < 3 { + warn!("Last admin cannot be banned in admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Last admin cannot be banned in admins room.", + )); + } + } } _ => {} } From d755a96c2c3422849fdac8792e5d91141f837a18 Mon Sep 17 00:00:00 2001 From: AndSDev Date: Thu, 13 Oct 2022 11:19:51 +0000 Subject: [PATCH 1268/1727] refactor(service/rooms/timeline): add cache for server_name --- src/service/rooms/timeline/mod.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 66a8196..9494f67 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -705,11 +705,12 @@ impl Service { membership: MembershipState, } + let server_name = services().globals.server_name(); let content = serde_json::from_str::(pdu.content.get()) .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if content.membership == MembershipState::Leave { - let server_user = format!("@conduit:{}", services().globals.server_name()); + let server_user = format!("@conduit:{}", server_name); if sender == &server_user { warn!("Conduit user cannot leave from admins room"); return Err(Error::BadRequest( @@ -723,7 +724,7 @@ impl Service { .state_cache .room_members(room_id) .filter_map(|m| m.ok()) - .filter(|m| m.server_name() == services().globals.server_name()) + .filter(|m| m.server_name() == server_name) .count(); if count < 3 { warn!("Last admin cannot leave from admins room"); @@ -735,7 +736,7 @@ impl Service { } if content.membership == MembershipState::Ban && pdu.state_key().is_some() { - let server_user = format!("@conduit:{}", services().globals.server_name()); + let server_user = format!("@conduit:{}", server_name); if pdu.state_key().as_ref().unwrap() == &server_user { warn!("Conduit user cannot be banned in admins room"); return Err(Error::BadRequest( @@ -749,7 +750,7 @@ impl Service { .state_cache .room_members(room_id) .filter_map(|m| m.ok()) - .filter(|m| m.server_name() == services().globals.server_name()) + .filter(|m| m.server_name() == server_name) .count(); if count < 3 { warn!("Last admin cannot be banned in admins room"); From bf7c4b4001ef2fdd08b1145169c6465f2f97eda3 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 13 Oct 2022 08:06:49 -0700 Subject: [PATCH 1269/1727] update rust to avoid a cargo problem We were hitting [this bug][0] when trying to select a version for clap ^4. [0]: https://github.com/rust-lang/cargo/issues/10623 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 8a76c47..a089f02 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.58-bullseye AS builder +FROM docker.io/rust:1.60-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies From 286936db3213114ae565e93a4065601a60912d0f Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 13 Oct 2022 08:26:41 -0700 Subject: [PATCH 1270/1727] msrv is 1.63 in Cargo.toml; use that --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a089f02..3154ebb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.60-bullseye AS builder +FROM docker.io/rust:1.63-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies From 842feabced55fb77060369c41a18c6ec27fcd70f Mon Sep 17 00:00:00 2001 From: Tglman Date: Thu, 13 Oct 2022 20:02:36 +0100 Subject: [PATCH 1271/1727] fix: update persy implementation after refactor --- src/database/abstraction/persy.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs index e78e731..1fa7a0d 100644 --- a/src/database/abstraction/persy.rs +++ b/src/database/abstraction/persy.rs @@ -1,6 +1,6 @@ use crate::{ database::{ - abstraction::{watchers::Watchers, DatabaseEngine, Tree}, + abstraction::{watchers::Watchers, KeyValueDatabaseEngine, KvTree}, Config, }, Result, @@ -15,7 +15,7 @@ pub struct Engine { persy: Persy, } -impl DatabaseEngine for Arc { +impl KeyValueDatabaseEngine for Arc { fn open(config: &Config) -> Result { let mut cfg = persy::Config::new(); cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64); @@ -27,7 +27,7 @@ impl DatabaseEngine for Arc { Ok(Arc::new(Engine { persy })) } - fn open_tree(&self, name: &'static str) -> Result> { + fn open_tree(&self, name: &'static str) -> Result> { // Create if it doesn't exist if !self.persy.exists_index(name)? { let mut tx = self.persy.begin()?; @@ -61,7 +61,7 @@ impl PersyTree { } } -impl Tree for PersyTree { +impl KvTree for PersyTree { fn get(&self, key: &[u8]) -> Result>> { let result = self .persy From e923f63c4919a9eb65f074c8099fc7bc115925ee Mon Sep 17 00:00:00 2001 From: AndSDev Date: Fri, 14 Oct 2022 14:45:05 +0300 Subject: [PATCH 1272/1727] fix(service/rooms/timeline): fix validating for non-joined members --- src/service/rooms/timeline/mod.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 9494f67..403b400 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -705,13 +705,17 @@ impl Service { membership: MembershipState, } + let target = pdu + .state_key() + .filter(|v| v.starts_with("@")) + .unwrap_or(sender.as_str()); let server_name = services().globals.server_name(); + let server_user = format!("@conduit:{}", server_name); let content = serde_json::from_str::(pdu.content.get()) .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if content.membership == MembershipState::Leave { - let server_user = format!("@conduit:{}", server_name); - if sender == &server_user { + if target == &server_user { warn!("Conduit user cannot leave from admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -725,8 +729,9 @@ impl Service { .room_members(room_id) .filter_map(|m| m.ok()) .filter(|m| m.server_name() == server_name) + .filter(|m| m != target) .count(); - if count < 3 { + if count < 2 { warn!("Last admin cannot leave from admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -736,8 +741,7 @@ impl Service { } if content.membership == MembershipState::Ban && pdu.state_key().is_some() { - let server_user = format!("@conduit:{}", server_name); - if pdu.state_key().as_ref().unwrap() == &server_user { + if target == &server_user { warn!("Conduit user cannot be banned in admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -751,8 +755,9 @@ impl Service { .room_members(room_id) .filter_map(|m| m.ok()) .filter(|m| m.server_name() == server_name) + .filter(|m| m != target) .count(); - if count < 3 { + if count < 2 { warn!("Last admin cannot be banned in admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, From 3a45628e1d0d8fe4b9b6227ba26e448879ce03f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 15 Oct 2022 00:28:43 +0200 Subject: [PATCH 1273/1727] fix: send unrecognized error on wrong http methods --- src/api/appservice_server.rs | 4 +++- src/api/client_server/keys.rs | 5 +++-- src/api/client_server/read_marker.rs | 5 ++++- src/api/server_server.rs | 7 +++++-- src/database/key_value/sending.rs | 2 +- src/main.rs | 25 +++++++++++++++++++++++-- src/service/pusher/mod.rs | 2 +- 7 files changed, 40 insertions(+), 10 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 339a0c2..dc319e2 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -55,7 +55,9 @@ where Err(e) => { warn!( "Could not send request to appservice {:?} at {}: {}", - registration.get("id"), destination, e + registration.get("id"), + destination, + e ); return Err(e.into()); } diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 86cfaa4..b649166 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -443,7 +443,7 @@ pub(crate) async fn claim_keys_helper( let mut futures: FuturesUnordered<_> = get_over_federation .into_iter() .map(|(server, vec)| async move { - let mut one_time_keys_input_fed = BTreeMap::new(); + let mut one_time_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); } @@ -459,7 +459,8 @@ pub(crate) async fn claim_keys_helper( ) .await, ) - }).collect(); + }) + .collect(); while let Some((server, response)) = futures.next().await { match response { diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index 48520fc..d529c6a 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,7 +1,10 @@ use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, - events::{receipt::{ReceiptType, ReceiptThread}, RoomAccountDataEventType}, + events::{ + receipt::{ReceiptThread, ReceiptType}, + RoomAccountDataEventType, + }, MilliSecondsSinceUnixEpoch, }; use std::collections::BTreeMap; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 0064a86..320e396 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -305,9 +305,12 @@ where } } Err(e) => { - warn!("Could not send request to {} at {}: {}", destination, actual_destination_str, e); + warn!( + "Could not send request to {} at {}: {}", + destination, actual_destination_str, e + ); Err(e.into()) - }, + } } } diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs index fcbe0f3..3fc3e04 100644 --- a/src/database/key_value/sending.rs +++ b/src/database/key_value/sending.rs @@ -6,7 +6,7 @@ use crate::{ self, sending::{OutgoingKind, SendingEventType}, }, - utils, Error, Result, services, + services, utils, Error, Result, }; impl service::sending::Data for KeyValueDatabase { diff --git a/src/main.rs b/src/main.rs index 78a38ad..626de3a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -145,6 +145,7 @@ async fn run_server() -> io::Result<()> { }), ) .compression() + .layer(axum::middleware::from_fn(unrecognized_method)) .layer( CorsLayer::new() .allow_origin(cors::Any) @@ -187,6 +188,22 @@ async fn run_server() -> io::Result<()> { Ok(()) } +async fn unrecognized_method( + req: axum::http::Request, + next: axum::middleware::Next, +) -> std::result::Result { + let method = req.method().clone(); + let uri = req.uri().clone(); + let inner = next.run(req).await; + if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED { + warn!("Method not allowed: {method} {uri}"); + return Ok( + Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request").into_response(), + ); + } + Ok(inner) +} + fn routes() -> Router { Router::new() .ruma_route(client_server::get_supported_versions_route) @@ -386,12 +403,16 @@ async fn shutdown_signal(handle: ServerHandle) { handle.graceful_shutdown(Some(Duration::from_secs(30))); } -async fn not_found(_uri: Uri) -> impl IntoResponse { +async fn not_found(uri: Uri) -> impl IntoResponse { + warn!("Not found: {uri}"); Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request") } async fn initial_sync(_uri: Uri) -> impl IntoResponse { - Error::BadRequest(ErrorKind::GuestAccessForbidden, "Guest access not implemented") + Error::BadRequest( + ErrorKind::GuestAccessForbidden, + "Guest access not implemented", + ) } trait RouterExt { diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 767687d..7fee276 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -131,7 +131,7 @@ impl Service { Err(e) => { warn!("Could not send request to pusher {}: {}", destination, e); Err(e.into()) - }, + } } } From 3b0aa23fdfb0106add15df4f80e68e33b5ff5de7 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 13 Feb 2022 22:00:36 +0000 Subject: [PATCH 1274/1727] Rejoin room over federation if we are not participating in it; do not include invited users in participating servers calculation --- src/api/client_server/membership.rs | 8 ++++++-- src/database/key_value/rooms/state_cache.rs | 3 +-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index b69a6d1..ca33993 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -474,8 +474,12 @@ async fn join_room_by_id_helper( ); let state_lock = mutex_state.lock().await; - // Ask a remote server if we don't have this room - if !services().rooms.metadata.exists(room_id)? { + // Ask a remote server if we are not participating in this room + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), room_id)? + { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index ff4594f..d0ea0c2 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -108,8 +108,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { joinedcount += 1; } - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); + for _invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { invitedcount += 1; } From e9697f13d680abff7917264f76c96929f18d9e7d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 9 Sep 2022 20:38:36 +0200 Subject: [PATCH 1275/1727] Handle initiating restricted joins over federation Allows Conduit users to join restricted rooms if we are not currently participating --- src/api/client_server/membership.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index b69a6d1..975d1a5 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -17,7 +17,7 @@ use ruma::{ }, serde::Base64, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - RoomId, RoomVersionId, UserId, + OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -519,6 +519,15 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid make_join event json received from server.") })?; + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), @@ -542,7 +551,7 @@ async fn join_room_by_id_helper( third_party_invite: None, blurhash: services().users.blurhash(sender_user)?, reason: None, - join_authorized_via_users_server: None, + join_authorized_via_users_server, }) .expect("event is valid, we just created it"), ); From cc3e1f58cc2526aea55d974d0ec89a58a81fba14 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 15 Oct 2022 10:42:14 +0200 Subject: [PATCH 1276/1727] Bump default room version to V9; per matrix spec recommendation --- src/config/mod.rs | 4 ++-- src/service/admin/mod.rs | 2 +- src/service/globals/mod.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 645f440..3c3a764 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -258,6 +258,6 @@ fn default_turn_ttl() -> u64 { } // I know, it's a great name -fn default_default_room_version() -> RoomVersionId { - RoomVersionId::V6 +pub fn default_default_room_version() -> RoomVersionId { + RoomVersionId::V9 } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 942df1c..5766b2f 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -871,7 +871,7 @@ impl Service { let mut content = RoomCreateEventContent::new(conduit_user.clone()); content.federate = true; content.predecessor = None; - content.room_version = RoomVersionId::V6; + content.room_version = services().globals.default_room_version(); // 1. The room create event services().rooms.timeline.build_and_append_pdu( diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index d4c9dad..b2fadc7 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -172,8 +172,8 @@ impl Service { .supported_room_versions() .contains(&s.config.default_room_version) { - error!("Room version in config isn't supported, falling back to Version 6"); - s.config.default_room_version = RoomVersionId::V6; + error!("Room version in config isn't supported, falling back to default version"); + s.config.default_room_version = crate::config::default_default_room_version(); }; Ok(s) From 1e1a144dfa98429ef9f02d16045796b73013830d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 15 Oct 2022 12:16:32 +0200 Subject: [PATCH 1277/1727] Move room version 10 out of experimental/unstable --- src/service/globals/mod.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index b2fadc7..affc051 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -127,14 +127,10 @@ impl Service { RoomVersionId::V7, RoomVersionId::V8, RoomVersionId::V9, - ]; - // Experimental, partially supported room versions - let unstable_room_versions = vec![ - RoomVersionId::V3, - RoomVersionId::V4, - RoomVersionId::V5, RoomVersionId::V10, ]; + // Experimental, partially supported room versions + let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; let mut s = Self { db, From 2d0fdddd34cc922b6de23ac1f77fcce5975b8249 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 15 Oct 2022 13:17:58 +0200 Subject: [PATCH 1278/1727] Do not return true for is_guest on whoami for appservice users --- src/api/client_server/account.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 51343ae..ce4dadd 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -338,7 +338,7 @@ pub async fn whoami_route(body: Ruma) -> Result Date: Sat, 10 Sep 2022 18:14:29 +0200 Subject: [PATCH 1279/1727] Raise 404 when room doesn't exist Raise 404 "Room not found" when changing or accessing room visibility settings (`GET` and `PUT /_matrix/client/r0/directory/list/room/{roomId}`). See issue #290 --- src/api/client_server/directory.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 7c4aa50..781e966 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -85,6 +85,14 @@ pub async fn set_room_visibility_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if !db.rooms.exists(&body.room_id)? { + // Return 404 if the room doesn't exist + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room not found", + )); + } + match &body.visibility { room::Visibility::Public => { services().rooms.directory.set_public(&body.room_id)?; @@ -108,6 +116,15 @@ pub async fn set_room_visibility_route( pub async fn get_room_visibility_route( body: Ruma, ) -> Result { + + if !db.rooms.exists(&body.room_id)? { + // Return 404 if the room doesn't exist + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room not found", + )); + } + Ok(get_room_visibility::v3::Response { visibility: if services().rooms.directory.is_public_room(&body.room_id)? { room::Visibility::Public From 2231a69b4c91e4b774975dae653488ab640b6a23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 15 Oct 2022 14:07:27 +0200 Subject: [PATCH 1280/1727] fix: make previous MR compile --- src/api/client_server/directory.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 781e966..a7381d8 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -85,7 +85,7 @@ pub async fn set_room_visibility_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.exists(&body.room_id)? { + if !services().rooms.metadata.exists(&body.room_id)? { // Return 404 if the room doesn't exist return Err(Error::BadRequest( ErrorKind::NotFound, @@ -117,7 +117,7 @@ pub async fn get_room_visibility_route( body: Ruma, ) -> Result { - if !db.rooms.exists(&body.room_id)? { + if !services().rooms.metadata.exists(&body.room_id)? { // Return 404 if the room doesn't exist return Err(Error::BadRequest( ErrorKind::NotFound, From fe7d8c4f1279f0498fdfccf0b49d9b544c449e79 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 15 Oct 2022 13:02:13 -0700 Subject: [PATCH 1281/1727] add nix flake Also add `.envrc` for direnv + Nix users. This makes developing locally easier for us NixOS folks. The flake itself will allow NixOS users to pull code directly from Conduit's repository, making it completely trivial to stay up-to-date with every commit. I'd also like to add a NixOS module directly to this repository at some point so that new configuration options will be available in the NixOS module faster. But for now, NixOS users can simply override `serivces.matrix-conduit.package` and get pretty much all the functionality. I've added myself to the `CODEOWNERS` file for the Nix files, since I am willing to maintain this stuff. I use Conduit on NixOS so I'm personally invested in having this work. Lastly, `.gitignore` was updated to exclude symlinks created by `direnv` and `nix build` and other such Nix commands. This doesn't come without maintenance burden, however: * The `sha256` in `flake.nix` will need to be updated whenever Conduit's MSRV is updated, but that should be pretty infrequent. * `nix flake update` should be run every so often to pull in updates to `nixpkgs` and other flake inputs. I think downstream users can also override this themselves with `inputs..inputs..follows`. * `nix flake check` should be run in CI to ensure Nix builds keep working. * `nixpkgs-fmt --check $(fd '\.nix')` (or similar) should be run in CI to ensure style uniformity. --- .envrc | 1 + .gitignore | 6 +++ .gitlab/CODEOWNERS | 4 ++ flake.lock | 102 +++++++++++++++++++++++++++++++++++++++++++++ flake.nix | 75 +++++++++++++++++++++++++++++++++ 5 files changed, 188 insertions(+) create mode 100644 .envrc create mode 100644 .gitlab/CODEOWNERS create mode 100644 flake.lock create mode 100644 flake.nix diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..3550a30 --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +use flake diff --git a/.gitignore b/.gitignore index f5e9505..19f05ce 100644 --- a/.gitignore +++ b/.gitignore @@ -62,3 +62,9 @@ conduit.db # Etc. **/*.rs.bk + +# Nix artifacts +/result* + +# Direnv cache +/.direnv diff --git a/.gitlab/CODEOWNERS b/.gitlab/CODEOWNERS new file mode 100644 index 0000000..20ec9fa --- /dev/null +++ b/.gitlab/CODEOWNERS @@ -0,0 +1,4 @@ +# Nix things +.envrc @CobaltCause +flake.lock @CobaltCause +flake.nix @CobaltCause diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000..9217ff2 --- /dev/null +++ b/flake.lock @@ -0,0 +1,102 @@ +{ + "nodes": { + "fenix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "rust-analyzer-src": "rust-analyzer-src" + }, + "locked": { + "lastModified": 1665815894, + "narHash": "sha256-Vboo1L4NMGLKZKVLnOPi9OHlae7uoNyfgvyIUm+SVXE=", + "owner": "nix-community", + "repo": "fenix", + "rev": "2348450241a5f945f0ba07e44ecbfac2f541d7f4", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, + "flake-utils": { + "locked": { + "lastModified": 1659877975, + "narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "naersk": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1662220400, + "narHash": "sha256-9o2OGQqu4xyLZP9K6kNe1pTHnyPz0Wr3raGYnr9AIgY=", + "owner": "nix-community", + "repo": "naersk", + "rev": "6944160c19cb591eb85bbf9b2f2768a935623ed3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "naersk", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1665856037, + "narHash": "sha256-/RvIWnGKdTSoIq5Xc2HwPIL0TzRslzU6Rqk4Img6UNg=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "c95ebc5125ffffcd431df0ad8620f0926b8125b8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "fenix": "fenix", + "flake-utils": "flake-utils", + "naersk": "naersk", + "nixpkgs": "nixpkgs" + } + }, + "rust-analyzer-src": { + "flake": false, + "locked": { + "lastModified": 1665765556, + "narHash": "sha256-w9L5j0TIB5ay4aRwzGCp8mgvGsu5dVJQvbEFutwr6xE=", + "owner": "rust-lang", + "repo": "rust-analyzer", + "rev": "018b8429cf3fa9d8aed916704e41dfedeb0f4f78", + "type": "github" + }, + "original": { + "owner": "rust-lang", + "ref": "nightly", + "repo": "rust-analyzer", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..924300c --- /dev/null +++ b/flake.nix @@ -0,0 +1,75 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs"; + flake-utils.url = "github:numtide/flake-utils"; + + fenix = { + url = "github:nix-community/fenix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + naersk = { + url = "github:nix-community/naersk"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = + { self + , nixpkgs + , flake-utils + + , fenix + , naersk + }: flake-utils.lib.eachDefaultSystem (system: + let + pkgs = nixpkgs.legacyPackages.${system}; + + # Nix-accessible `Cargo.toml` + cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); + + # The Rust toolchain to use + toolchain = fenix.packages.${system}.toolchainOf { + # Use the Rust version defined in `Cargo.toml` + channel = cargoToml.package.rust-version; + + # This will need to be updated when `package.rust-version` is changed in + # `Cargo.toml` + sha256 = "sha256-KXx+ID0y4mg2B3LHp7IyaiMrdexF6octADnAtFIOjrY="; + }; + + builder = (pkgs.callPackage naersk { + inherit (toolchain) rustc cargo; + }).buildPackage; + in + { + packages.default = builder { + src = ./.; + + nativeBuildInputs = (with pkgs.rustPlatform; [ + bindgenHook + ]); + }; + + devShells.default = pkgs.mkShell { + # Rust Analyzer needs to be able to find the path to default crate + # sources, and it can read this environment variable to do so + RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; + + # Development tools + nativeBuildInputs = (with pkgs.rustPlatform; [ + bindgenHook + ]) ++ (with toolchain; [ + cargo + clippy + rust-src + rustc + rustfmt + ]); + }; + + checks = { + packagesDefault = self.packages.${system}.default; + devShellsDefault = self.devShells.${system}.default; + }; + }); +} From 716f82db6dbd324bbd0b142db740e8f5e39b2a6c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 15 Oct 2022 15:21:04 -0700 Subject: [PATCH 1282/1727] add nix/nixos deployment documentation --- .gitlab/CODEOWNERS | 1 + README.md | 1 + nix/README.md | 188 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 190 insertions(+) create mode 100644 nix/README.md diff --git a/.gitlab/CODEOWNERS b/.gitlab/CODEOWNERS index 20ec9fa..665aaaa 100644 --- a/.gitlab/CODEOWNERS +++ b/.gitlab/CODEOWNERS @@ -2,3 +2,4 @@ .envrc @CobaltCause flake.lock @CobaltCause flake.nix @CobaltCause +nix/ @CobaltCause diff --git a/README.md b/README.md index 730b251..ab47176 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,7 @@ Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit - Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md) - Debian package: [debian/README.Debian](debian/README.Debian) +- Nix/NixOS: [nix/README.md](nix/README.md) - Docker: [docker/README.md](docker/README.md) If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md). diff --git a/nix/README.md b/nix/README.md new file mode 100644 index 0000000..d92f910 --- /dev/null +++ b/nix/README.md @@ -0,0 +1,188 @@ +# Conduit for Nix/NixOS + +This guide assumes you have a recent version of Nix (^2.4) installed. + +Since Conduit ships as a Nix flake, you'll first need to [enable +flakes][enable_flakes]. + +You can now use the usual Nix commands to interact with Conduit's flake. For +example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need +to provide configuration and such manually as usual). + +If your NixOS configuration is defined as a flake, you can depend on this flake +to provide a more up-to-date version than provided by `nixpkgs`. In your flake, +add the following to your `inputs`: + +```nix +conduit = { + url = "gitlab:famedly/conduit"; + + # Assuming you have an input for nixpkgs called `nixpkgs`. If you experience + # build failures while using this, try commenting/deleting this line. This + # will probably also require you to always build from source. + inputs.nixpkgs.follows = "nixpkgs"; +}; +``` + +Next, make sure you're passing your flake inputs to the `specialArgs` argument +of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will +assume you've named the group `flake-inputs`. + +Now you can configure Conduit and a reverse proxy for it. Add the following to +a new Nix file and include it in your configuration: + +```nix +{ config +, pkgs +, flake-inputs +, ... +}: + +let + # You'll need to edit these values + + # The hostname that will appear in your user and room IDs + server_name = "example.com"; + + # The hostname that Conduit actually runs on + # + # This can be the same as `server_name` if you want. This is only necessary + # when Conduit is running on a different machine than the one hosting your + # root domain. This configuration also assumes this is all running on a single + # machine, some tweaks will need to be made if this is not the case. + matrix_hostname = "matrix.${server_name}"; + + # An admin email for TLS certificate notifications + admin_email = "admin@${server_name}"; + + # These ones you can leave alone + + # Build a dervation that stores the content of `${server_name}/.well-known/matrix/server` + well_known_server = pkgs.writeText "well-known-matrix-server" '' + { + "m.server": "${matrix_hostname}" + } + ''; + + # Build a dervation that stores the content of `${server_name}/.well-known/matrix/client` + well_known_client = pkgs.writeText "well-known-matrix-client" '' + { + "m.homeserver": { + "base_url": "https://${matrix_hostname}" + } + } + ''; +in + +{ + # Configure Conduit itself + services.matrix-conduit = { + enable = true; + + # This causes NixOS to use the flake defined in this repository instead of + # the build of Conduit built into nixpkgs. + package = flake-inputs.conduit.packages.${pkgs.system}.default; + + settings.global = { + inherit server_name; + }; + }; + + # Configure automated TLS acquisition/renewal + security.acme = { + acceptTerms = true; + defaults = { + email = admin_email; + }; + }; + + # ACME data must be readable by the NGINX user + users.users.nginx.extraGroups = [ + "acme" + ]; + + # Configure NGINX as a reverse proxy + services.nginx = { + enable = true; + recommendedProxySettings = true; + + virtualHosts = { + "${server_name}" = { + forceSSL = true; + enableACME = true; + + listen = [ + { + addr = "0.0.0.0"; + port = 443; + ssl = true; + } + { + addr = "0.0.0.0"; + port = 8448; + ssl = true; + } + ]; + + extraConfig = '' + merge_slashes off; + ''; + + "${matrix_hostname}" = { + forceSSL = true; + enableACME = true; + + locations."/_matrix/" = { + proxyPass = "http://backend_conduit$request_uri"; + proxyWebsockets = true; + extraConfig = '' + proxy_set_header Host $host; + proxy_buffering off; + ''; + }; + + locations."=/.well-known/matrix/server" = { + # Use the contents of the derivation built previously + alias = "${well_known_server}"; + + extraConfig = '' + # Set the header since by default NGINX thinks it's just bytes + default_type application/json; + ''; + }; + + locations."=/.well-known/matrix/client" = { + # Use the contents of the derivation built previously + alias = "${well_known_client}"; + + extraConfig = '' + # Set the header since by default NGINX thinks it's just bytes + default_type application/json; + + # https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients + add_header Access-Control-Allow-Origin "*"; + ''; + }; + }; + }; + + upstreams = { + "backend_conduit" = { + servers = { + "localhost:${toString config.services.matrix-conduit.settings.global.port}" = { }; + }; + }; + }; + }; + + # Open firewall ports for HTTP, HTTPS, and Matrix federation + networking.firewall.allowedTCPPorts = [ 80 443 8448 ]; + networking.firewall.allowedUDPPorts = [ 80 443 8448 ]; +} +``` + +Now you can rebuild your system configuration and you should be good to go! + +[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes + +[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS From ada15ceacc4a18d53b40a7d84216404ae79aa888 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 17 Oct 2022 18:41:45 +0200 Subject: [PATCH 1283/1727] Complement improvements --- .gitignore | 1 + complement/Dockerfile | 47 ++++++++++++++++++++++++ complement/caddy.json | 72 +++++++++++++++++++++++++++++++++++++ tests/Complement.Dockerfile | 48 ------------------------- 4 files changed, 120 insertions(+), 48 deletions(-) create mode 100644 complement/Dockerfile create mode 100644 complement/caddy.json delete mode 100644 tests/Complement.Dockerfile diff --git a/.gitignore b/.gitignore index f5e9505..f5cbfaa 100644 --- a/.gitignore +++ b/.gitignore @@ -62,3 +62,4 @@ conduit.db # Etc. **/*.rs.bk +cached_target \ No newline at end of file diff --git a/complement/Dockerfile b/complement/Dockerfile new file mode 100644 index 0000000..43416fa --- /dev/null +++ b/complement/Dockerfile @@ -0,0 +1,47 @@ +# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit +FROM registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:commit-16a08e9b as builder +#FROM rust:latest as builder + +WORKDIR /workdir + +ARG RUSTC_WRAPPER +ARG AWS_ACCESS_KEY_ID +ARG AWS_SECRET_ACCESS_KEY +ARG SCCACHE_BUCKET +ARG SCCACHE_ENDPOINT +ARG SCCACHE_S3_USE_SSL + +COPY . . +RUN mkdir -p target/release +RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release + +## Actual image +FROM debian:bullseye +WORKDIR /workdir + +# Install caddy +RUN apt-get update && apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-testing.list && apt-get update && apt-get install -y caddy + +COPY conduit-example.toml conduit.toml +COPY complement/caddy.json caddy.json + +ENV SERVER_NAME=localhost +ENV CONDUIT_CONFIG=/workdir/conduit.toml + +RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml +RUN echo "allow_federation = true" >> conduit.toml +RUN echo "allow_encryption = true" >> conduit.toml +RUN echo "allow_registration = true" >> conduit.toml +RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml +RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml + +COPY --from=builder /workdir/target/release/conduit /workdir/conduit +RUN chmod +x /workdir/conduit + +EXPOSE 8008 8448 + +CMD uname -a && \ + sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \ + sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ + caddy start --config caddy.json > /dev/null && \ + /workdir/conduit diff --git a/complement/caddy.json b/complement/caddy.json new file mode 100644 index 0000000..ea52c2c --- /dev/null +++ b/complement/caddy.json @@ -0,0 +1,72 @@ +{ + "logging": { + "logs": { + "default": { + "level": "WARN" + } + } + }, + "apps": { + "http": { + "https_port": 8448, + "servers": { + "srv0": { + "listen": [":8448"], + "routes": [{ + "match": [{ + "host": ["your.server.name"] + }], + "handle": [{ + "handler": "subroute", + "routes": [{ + "handle": [{ + "handler": "reverse_proxy", + "upstreams": [{ + "dial": "127.0.0.1:8008" + }] + }] + }] + }], + "terminal": true + }], + "tls_connection_policies": [{ + "match": { + "sni": ["your.server.name"] + } + }] + } + } + }, + "pki": { + "certificate_authorities": { + "local": { + "name": "Complement CA", + "root": { + "certificate": "/complement/ca/ca.crt", + "private_key": "/complement/ca/ca.key" + }, + "intermediate": { + "certificate": "/complement/ca/ca.crt", + "private_key": "/complement/ca/ca.key" + } + } + } + }, + "tls": { + "automation": { + "policies": [{ + "subjects": ["your.server.name"], + "issuers": [{ + "module": "internal" + }], + "on_demand": true + }, { + "issuers": [{ + "module": "internal", + "ca": "local" + }] + }] + } + } + } +} \ No newline at end of file diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile deleted file mode 100644 index b9d0f8c..0000000 --- a/tests/Complement.Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit -FROM valkum/docker-rust-ci:latest as builder -WORKDIR /workdir - -ARG RUSTC_WRAPPER -ARG AWS_ACCESS_KEY_ID -ARG AWS_SECRET_ACCESS_KEY -ARG SCCACHE_BUCKET -ARG SCCACHE_ENDPOINT -ARG SCCACHE_S3_USE_SSL - -COPY . . -RUN mkdir -p target/release -RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release - - -FROM valkum/docker-rust-ci:latest -WORKDIR /workdir - -RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.2.1/caddy_2.2.1_linux_amd64.tar.gz" -RUN tar xzf caddy_2.2.1_linux_amd64.tar.gz - -COPY cached_target/release/conduit /workdir/conduit -RUN chmod +x /workdir/conduit -RUN chmod +x /workdir/caddy - -COPY conduit-example.toml conduit.toml - -ENV SERVER_NAME=localhost -ENV CONDUIT_CONFIG=/workdir/conduit.toml - -RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml -RUN echo "allow_federation = true" >> conduit.toml -RUN echo "allow_encryption = true" >> conduit.toml -RUN echo "allow_registration = true" >> conduit.toml -RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml -RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml - -# Enabled Caddy auto cert generation for complement provided CA. -RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json - -EXPOSE 8008 8448 - -CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement PKI support" && true) || \ - sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \ - sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ - /workdir/caddy start --config caddy.json > /dev/null && \ - /workdir/conduit From 215d909e5986600b8b9eeb07edfff7a90d8f75bb Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 17 Oct 2022 18:41:59 +0200 Subject: [PATCH 1284/1727] More debug info when try_from_http_request fails --- src/api/ruma_wrapper/axum.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index d056f3f..3870250 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -281,7 +281,10 @@ where debug!("{:?}", http_request); let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { - warn!("{:?}\n{:?}", e, json_body); + warn!( + "try_from_http_request failed: {:?}\nJSON body: {:?}", + e, json_body + ); Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") })?; From 9c0c74f547f5088de3e74cf1dc01f58538922e16 Mon Sep 17 00:00:00 2001 From: Paul Beziau Date: Tue, 18 Oct 2022 09:15:07 +0000 Subject: [PATCH 1285/1727] Migrate database to use correct rule id in pushrules. it convert : - ".m.rules.call" to ".m.rule.call" - ".m.rules.room_one_to_one" to ".m.rule.room_one_to_one" - ".m.rules.encrypted_room_one_to_one" to ".m.rule.encrypted_room_one_to_one" - ".m.rules.message" to ".m.rule.message" - ".m.rules.encrypted" to ".m.rule.encrypted" related to issue #264 --- src/database/mod.rs | 68 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/src/database/mod.rs b/src/database/mod.rs index 15ee137..dce4eff 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -7,7 +7,7 @@ use directories::ProjectDirs; use lru_cache::LruCache; use ruma::{ events::{ - push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, + push_rules::{PushRulesEventContent, PushRulesEvent}, room::message::RoomMessageEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, @@ -801,6 +801,72 @@ impl KeyValueDatabase { warn!("Migration: 10 -> 11 finished"); } + if services().globals.database_version()? < 12 { + + for username in services().users.list_local_users().unwrap() { + + let user = UserId::parse_with_server_name(username, services().globals.server_name()) + .unwrap(); + + + let raw_rules_list = services().account_data + .get( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into()) + .unwrap() + .expect("Username is invalid"); + + let mut account_data = serde_json::from_str::(raw_rules_list.get()).unwrap(); + let rules_list = &mut account_data.content.global; + + //content rule + { + let content_rule_transformation = + [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; + + let rule = rules_list.content.get(content_rule_transformation[0]); + if rule.is_some() { + let mut rule = rule.unwrap().clone(); + rule.rule_id = content_rule_transformation[1].to_string(); + rules_list.content.remove(content_rule_transformation[0]); + rules_list.content.insert(rule); + } + } + + //underride rules + { + let underride_rule_transformation = + [[".m.rules.call", ".m.rule.call"], + [".m.rules.room_one_to_one", ".m.rule.room_one_to_one"], + [".m.rules.encrypted_room_one_to_one", ".m.rule.encrypted_room_one_to_one"], + [".m.rules.message", ".m.rule.message"], + [".m.rules.encrypted", ".m.rule.encrypted"]]; + + for transformation in underride_rule_transformation { + let rule = rules_list.underride.get(transformation[0]); + if rule.is_some() { + let mut rule = rule.unwrap().clone(); + rule.rule_id = transformation[1].to_string(); + rules_list.underride.remove(transformation[0]); + rules_list.underride.insert(rule); + } + } + } + + services().account_data.update( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; + } + + services().globals.bump_database_version(12)?; + + warn!("Migration: 11 -> 12 finished"); + } + assert_eq!(11, latest_database_version); info!( From d47c1a8ba678bf58b1ff8bdb65438aaaddc80188 Mon Sep 17 00:00:00 2001 From: Paul Beziau Date: Fri, 21 Oct 2022 12:27:11 +0000 Subject: [PATCH 1286/1727] Fix database version check & code formating --- src/database/mod.rs | 127 ++++++++++++++++++++++++-------------------- 1 file changed, 68 insertions(+), 59 deletions(-) diff --git a/src/database/mod.rs b/src/database/mod.rs index dce4eff..ccc8177 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -7,7 +7,8 @@ use directories::ProjectDirs; use lru_cache::LruCache; use ruma::{ events::{ - push_rules::{PushRulesEventContent, PushRulesEvent}, room::message::RoomMessageEventContent, + push_rules::{PushRulesEvent, PushRulesEventContent}, + room::message::RoomMessageEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, @@ -405,7 +406,7 @@ impl KeyValueDatabase { } // If the database has any data, perform data migrations before starting - let latest_database_version = 11; + let latest_database_version = 12; if services().users.count()? > 0 { // MIGRATIONS @@ -801,73 +802,81 @@ impl KeyValueDatabase { warn!("Migration: 10 -> 11 finished"); } - if services().globals.database_version()? < 12 { + if services().globals.database_version()? < 12 { + for username in services().users.list_local_users().unwrap() { + let user = + UserId::parse_with_server_name(username, services().globals.server_name()) + .unwrap(); - for username in services().users.list_local_users().unwrap() { + let raw_rules_list = services() + .account_data + .get( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into(), + ) + .unwrap() + .expect("Username is invalid"); - let user = UserId::parse_with_server_name(username, services().globals.server_name()) - .unwrap(); + let mut account_data = + serde_json::from_str::(raw_rules_list.get()).unwrap(); + let rules_list = &mut account_data.content.global; + //content rule + { + let content_rule_transformation = + [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; - let raw_rules_list = services().account_data - .get( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into()) - .unwrap() - .expect("Username is invalid"); + let rule = rules_list.content.get(content_rule_transformation[0]); + if rule.is_some() { + let mut rule = rule.unwrap().clone(); + rule.rule_id = content_rule_transformation[1].to_string(); + rules_list.content.remove(content_rule_transformation[0]); + rules_list.content.insert(rule); + } + } - let mut account_data = serde_json::from_str::(raw_rules_list.get()).unwrap(); - let rules_list = &mut account_data.content.global; + //underride rules + { + let underride_rule_transformation = [ + [".m.rules.call", ".m.rule.call"], + [".m.rules.room_one_to_one", ".m.rule.room_one_to_one"], + [ + ".m.rules.encrypted_room_one_to_one", + ".m.rule.encrypted_room_one_to_one", + ], + [".m.rules.message", ".m.rule.message"], + [".m.rules.encrypted", ".m.rule.encrypted"], + ]; - //content rule - { - let content_rule_transformation = - [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; + for transformation in underride_rule_transformation { + let rule = rules_list.underride.get(transformation[0]); + if rule.is_some() { + let mut rule = rule.unwrap().clone(); + rule.rule_id = transformation[1].to_string(); + rules_list.underride.remove(transformation[0]); + rules_list.underride.insert(rule); + } + } + } - let rule = rules_list.content.get(content_rule_transformation[0]); - if rule.is_some() { - let mut rule = rule.unwrap().clone(); - rule.rule_id = content_rule_transformation[1].to_string(); - rules_list.content.remove(content_rule_transformation[0]); - rules_list.content.insert(rule); - } - } + services().account_data.update( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; + } - //underride rules - { - let underride_rule_transformation = - [[".m.rules.call", ".m.rule.call"], - [".m.rules.room_one_to_one", ".m.rule.room_one_to_one"], - [".m.rules.encrypted_room_one_to_one", ".m.rule.encrypted_room_one_to_one"], - [".m.rules.message", ".m.rule.message"], - [".m.rules.encrypted", ".m.rule.encrypted"]]; + services().globals.bump_database_version(12)?; - for transformation in underride_rule_transformation { - let rule = rules_list.underride.get(transformation[0]); - if rule.is_some() { - let mut rule = rule.unwrap().clone(); - rule.rule_id = transformation[1].to_string(); - rules_list.underride.remove(transformation[0]); - rules_list.underride.insert(rule); - } - } - } + warn!("Migration: 11 -> 12 finished"); + } - services().account_data.update( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; - } - - services().globals.bump_database_version(12)?; - - warn!("Migration: 11 -> 12 finished"); - } - - assert_eq!(11, latest_database_version); + assert_eq!( + services().globals.database_version().unwrap(), + latest_database_version + ); info!( "Loaded {} database with version {}", From 10d2da30091d9e8aeaa9dee2ee626ecdf85059df Mon Sep 17 00:00:00 2001 From: AndSDev Date: Tue, 25 Oct 2022 12:53:58 +0300 Subject: [PATCH 1287/1727] fix(main): fix request size limit to max_request_size (axum defaults 2MB) --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- src/main.rs | 10 ++++++++-- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 48ce6c0..bdadf71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -109,9 +109,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.16" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" +checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" dependencies = [ "async-trait", "axum-core", @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" +checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" dependencies = [ "async-trait", "bytes", diff --git a/Cargo.toml b/Cargo.toml index e7e48c8..db51f4a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2021" [dependencies] # Web framework -axum = { version = "0.5.8", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } +axum = { version = "0.5.17", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } axum-server = { version = "0.4.0", features = ["tls-rustls"] } tower = { version = "0.4.8", features = ["util"] } tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } diff --git a/src/main.rs b/src/main.rs index 626de3a..88a4838 100644 --- a/src/main.rs +++ b/src/main.rs @@ -10,7 +10,7 @@ use std::{future::Future, io, net::SocketAddr, time::Duration}; use axum::{ - extract::{FromRequest, MatchedPath}, + extract::{DefaultBodyLimit, FromRequest, MatchedPath}, handler::Handler, response::IntoResponse, routing::{get, on, MethodFilter}, @@ -164,7 +164,13 @@ async fn run_server() -> io::Result<()> { header::AUTHORIZATION, ]) .max_age(Duration::from_secs(86400)), - ); + ) + .layer(DefaultBodyLimit::max( + config + .max_request_size + .try_into() + .expect("failed to convert max request size"), + )); let app = routes().layer(middlewares).into_make_service(); let handle = ServerHandle::new(); From 3bc0a1924b230e0a3fe28fcba2830aa46f3f1156 Mon Sep 17 00:00:00 2001 From: James Blachly Date: Tue, 25 Oct 2022 20:47:41 +0000 Subject: [PATCH 1288/1727] Return 403 to 3pid token routes to signal not implemented --- src/api/client_server/account.rs | 33 ++++++++++++++++++++++++++++++-- src/main.rs | 2 ++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index ce4dadd..c9e3c9b 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -3,7 +3,8 @@ use crate::{api::client_server, services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ account::{ - change_password, deactivate, get_3pids, get_username_availability, register, whoami, + change_password, deactivate, get_3pids, get_username_availability, register, + request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, ThirdPartyIdRemovalStatus, }, error::ErrorKind, @@ -406,7 +407,7 @@ pub async fn deactivate_route( }) } -/// # `GET _matrix/client/r0/account/3pid` +/// # `GET _matrix/client/v3/account/3pid` /// /// Get a list of third party identifiers associated with this account. /// @@ -418,3 +419,31 @@ pub async fn third_party_route( Ok(get_3pids::v3::Response::new(Vec::new())) } + +/// # `POST /_matrix/client/v3/account/3pid/email/requestToken` +/// +/// "This API should be used to request validation tokens when adding an email address to an account" +/// +/// - 403 signals that The homeserver does not allow the third party identifier as a contact option. +pub async fn request_3pid_management_token_via_email_route( + _body: Ruma, +) -> Result { + Err(Error::BadRequest( + ErrorKind::ThreepidDenied, + "Third party identifier is not allowed", + )) +} + +/// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken` +/// +/// "This API should be used to request validation tokens when adding an phone number to an account" +/// +/// - 403 signals that The homeserver does not allow the third party identifier as a contact option. +pub async fn request_3pid_management_token_via_msisdn_route( + _body: Ruma, +) -> Result { + Err(Error::BadRequest( + ErrorKind::ThreepidDenied, + "Third party identifier is not allowed", + )) +} diff --git a/src/main.rs b/src/main.rs index 626de3a..38fdfdd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -217,6 +217,8 @@ fn routes() -> Router { .ruma_route(client_server::change_password_route) .ruma_route(client_server::deactivate_route) .ruma_route(client_server::third_party_route) + .ruma_route(client_server::request_3pid_management_token_via_email_route) + .ruma_route(client_server::request_3pid_management_token_via_msisdn_route) .ruma_route(client_server::get_capabilities_route) .ruma_route(client_server::get_pushrules_all_route) .ruma_route(client_server::set_pushrule_route) From 238ebcfcac94973c486653f9e5ad3ca35753c9c5 Mon Sep 17 00:00:00 2001 From: Nabulator Date: Thu, 27 Oct 2022 04:20:56 +0000 Subject: [PATCH 1289/1727] Update nginx configuration to allow for larger uploads. --- DEPLOY.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/DEPLOY.md b/DEPLOY.md index 1c7d1af..a248d81 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -214,6 +214,9 @@ server { server_name your.server.name; # EDIT THIS merge_slashes off; + # Nginx default to only allow 1MB uploads. + client_max_body_size 20M; + location /_matrix/ { proxy_pass http://127.0.0.1:6167$request_uri; proxy_set_header Host $http_host; From 1aff2a54ef5ef571b622d5bb27710c3119b5714d Mon Sep 17 00:00:00 2001 From: Nabulator Date: Thu, 27 Oct 2022 04:23:07 +0000 Subject: [PATCH 1290/1727] comment typo --- DEPLOY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index a248d81..51f310e 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -214,7 +214,7 @@ server { server_name your.server.name; # EDIT THIS merge_slashes off; - # Nginx default to only allow 1MB uploads. + # Nginx defaults to only allow 1MB uploads client_max_body_size 20M; location /_matrix/ { From 7c98ba64aa3f64be9aff77f36c33d2d48d9653b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 15 Oct 2022 16:56:08 +0200 Subject: [PATCH 1291/1727] fix: HEAD requests should produce METHOD_NOT_ALLOWED --- src/api/client_server/directory.rs | 11 ++--------- src/api/client_server/sync.rs | 2 +- src/main.rs | 18 ++++++++++++------ 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index a7381d8..f07a225 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -87,10 +87,7 @@ pub async fn set_room_visibility_route( if !services().rooms.metadata.exists(&body.room_id)? { // Return 404 if the room doesn't exist - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Room not found", - )); + return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); } match &body.visibility { @@ -116,13 +113,9 @@ pub async fn set_room_visibility_route( pub async fn get_room_visibility_route( body: Ruma, ) -> Result { - if !services().rooms.metadata.exists(&body.room_id)? { // Return 404 if the room doesn't exist - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Room not found", - )); + return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); } Ok(get_room_visibility::v3::Response { diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 828ae19..7de274b 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -905,7 +905,7 @@ async fn sync_helper( let leave_shortstatekey = services() .rooms .short - .get_or_create_shortstatekey(&StateEventType::RoomMember, &sender_user.as_str())?; + .get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str())?; left_state_ids.insert(leave_shortstatekey, left_event_id); diff --git a/src/main.rs b/src/main.rs index 626de3a..a782de0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,10 +24,13 @@ use figment::{ }; use http::{ header::{self, HeaderName}, - Method, Uri, + Method, StatusCode, Uri, }; use opentelemetry::trace::{FutureExt, Tracer}; -use ruma::api::{client::error::ErrorKind, IncomingRequest}; +use ruma::api::{ + client::{error::Error as RumaError, error::ErrorKind, uiaa::UiaaResponse}, + IncomingRequest, +}; use tokio::signal; use tower::ServiceBuilder; use tower_http::{ @@ -191,15 +194,18 @@ async fn run_server() -> io::Result<()> { async fn unrecognized_method( req: axum::http::Request, next: axum::middleware::Next, -) -> std::result::Result { +) -> std::result::Result { let method = req.method().clone(); let uri = req.uri().clone(); let inner = next.run(req).await; if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED { warn!("Method not allowed: {method} {uri}"); - return Ok( - Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request").into_response(), - ); + return Ok(RumaResponse(UiaaResponse::MatrixError(RumaError { + kind: ErrorKind::Unrecognized, + message: "M_UNRECOGNIZED: Unrecognized request".to_owned(), + status_code: StatusCode::METHOD_NOT_ALLOWED, + })) + .into_response()); } Ok(inner) } From 02dd3d32f2ea7a40fe76402f321009f16579fd6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 30 Oct 2022 20:36:14 +0100 Subject: [PATCH 1292/1727] fix: element android did not reset notification counts --- src/api/client_server/account.rs | 4 ++-- src/api/client_server/directory.rs | 11 ++--------- src/api/client_server/sync.rs | 5 ++--- src/database/key_value/rooms/user.rs | 25 +++++++++++++++++++++++++ src/database/mod.rs | 2 ++ src/service/rooms/user/data.rs | 3 +++ src/service/rooms/user/mod.rs | 4 ++++ 7 files changed, 40 insertions(+), 14 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index c9e3c9b..309a361 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -4,8 +4,8 @@ use ruma::{ api::client::{ account::{ change_password, deactivate, get_3pids, get_username_availability, register, - request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, - ThirdPartyIdRemovalStatus, + request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, + whoami, ThirdPartyIdRemovalStatus, }, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index a7381d8..f07a225 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -87,10 +87,7 @@ pub async fn set_room_visibility_route( if !services().rooms.metadata.exists(&body.room_id)? { // Return 404 if the room doesn't exist - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Room not found", - )); + return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); } match &body.visibility { @@ -116,13 +113,9 @@ pub async fn set_room_visibility_route( pub async fn get_room_visibility_route( body: Ruma, ) -> Result { - if !services().rooms.metadata.exists(&body.room_id)? { // Return 404 if the room doesn't exist - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Room not found", - )); + return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); } Ok(get_room_visibility::v3::Response { diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 828ae19..56f918a 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -282,9 +282,8 @@ async fn sync_helper( let send_notification_counts = !timeline_pdus.is_empty() || services() .rooms - .edus - .read_receipt - .last_privateread_update(&sender_user, &room_id)? + .user + .last_notification_read(&sender_user, &room_id)? > since; let mut timeline_users = HashSet::new(); diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 3d8d1c8..4c43572 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -7,12 +7,20 @@ impl service::rooms::user::Data for KeyValueDatabase { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); self.userroomid_notificationcount .insert(&userroom_id, &0_u64.to_be_bytes())?; self.userroomid_highlightcount .insert(&userroom_id, &0_u64.to_be_bytes())?; + self.roomuserid_lastnotificationread.insert( + &roomuser_id, + &services().globals.next_count()?.to_be_bytes(), + )?; + Ok(()) } @@ -44,6 +52,23 @@ impl service::rooms::user::Data for KeyValueDatabase { .unwrap_or(Ok(0)) } + fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + Ok(self + .roomuserid_lastnotificationread + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") + }) + }) + .transpose()? + .unwrap_or(0)) + } + fn associate_token_shortstatehash( &self, room_id: &RoomId, diff --git a/src/database/mod.rs b/src/database/mod.rs index 15ee137..3746efe 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -98,6 +98,7 @@ pub struct KeyValueDatabase { pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 + pub(super) roomuserid_lastnotificationread: Arc, // LastNotificationRead = u64 /// Remember the current state hash of a room. pub(super) roomid_shortstatehash: Arc, @@ -317,6 +318,7 @@ impl KeyValueDatabase { userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, + roomuserid_lastnotificationread: builder.open_tree("userroomid_highlightcount")?, statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 43c4c92..4b8a4ec 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -8,6 +8,9 @@ pub trait Data: Send + Sync { fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; + // Returns the count at which the last reset_notification_counts was called + fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result; + fn associate_token_shortstatehash( &self, room_id: &RoomId, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index a765cfd..2266d97 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -22,6 +22,10 @@ impl Service { self.db.highlight_count(user_id, room_id) } + pub fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.last_notification_read(user_id, room_id) + } + pub fn associate_token_shortstatehash( &self, room_id: &RoomId, From 5d691f405eb6cb13f40ec420b5fe99d99fe5feb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 30 Oct 2022 21:22:32 +0100 Subject: [PATCH 1293/1727] fix: stuck typing indicators --- src/database/key_value/rooms/edus/typing.rs | 43 +++++++++++++++++++++ src/service/rooms/edus/typing/data.rs | 3 ++ src/service/rooms/edus/typing/mod.rs | 42 ++------------------ 3 files changed, 49 insertions(+), 39 deletions(-) diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 4a2f0f9..d50c000 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,4 +1,5 @@ use std::collections::HashSet; +use std::mem; use ruma::{OwnedUserId, RoomId, UserId}; @@ -53,6 +54,48 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { Ok(()) } + fn typings_maintain( + &self, + room_id: &RoomId, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let current_timestamp = utils::millis_since_unix_epoch(); + + let mut found_outdated = false; + + // Find all outdated edus before inserting a new one + for outdated_edu in self + .typingid_userid + .scan_prefix(prefix) + .map(|(key, _)| { + Ok::<_, Error>(( + key.clone(), + utils::u64_from_bytes( + &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { + Error::bad_database("RoomTyping has invalid timestamp or delimiters.") + })?[0..mem::size_of::()], + ) + .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, + )) + }) + .filter_map(|r| r.ok()) + .take_while(|&(_, timestamp)| timestamp < current_timestamp) + { + // This is an outdated edu (time > timestamp) + self.typingid_userid.remove(&outdated_edu.0)?; + found_outdated = true; + } + + if found_outdated { + self.roomid_lasttypingupdate + .insert(room_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + } + + Ok(()) + } + fn last_typing_update(&self, room_id: &RoomId) -> Result { Ok(self .roomid_lasttypingupdate diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index c4ad867..3b1eecf 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -10,6 +10,9 @@ pub trait Data: Send + Sync { /// Removes a user from typing before the timeout is reached. fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + /// Makes sure that typing events with old timestamps get removed. + fn typings_maintain(&self, room_id: &RoomId) -> Result<()>; + /// Returns the count of the last typing update in this room. fn last_typing_update(&self, room_id: &RoomId) -> Result; diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index d05ec90..6b066f0 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -21,54 +21,18 @@ impl Service { self.db.typing_remove(user_id, room_id) } - /* TODO: Do this in background thread? /// Makes sure that typing events with old timestamps get removed. fn typings_maintain( &self, room_id: &RoomId, - globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) + self.db.typings_maintain(room_id) } - */ /// Returns the count of the last typing update in this room. pub fn last_typing_update(&self, room_id: &RoomId) -> Result { + self.typings_maintain(room_id)?; + self.db.last_typing_update(room_id) } From 0cf6545116f475db1e42f0c72ae30cdd2663f029 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 30 Oct 2022 21:23:10 +0100 Subject: [PATCH 1294/1727] fix: not sending enough state on join --- src/api/client_server/sync.rs | 41 ++++++++++++--------- src/database/key_value/rooms/edus/typing.rs | 11 +++--- src/service/rooms/edus/typing/mod.rs | 5 +-- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 5c1c23d..94e4f5b 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -388,13 +388,35 @@ async fn sync_helper( )) }; + let since_sender_member: Option = since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get( + shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); + + let joined_since_last_sync = + since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + let ( heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events, - ) = if since_shortstatehash.is_none() { + ) = if since_shortstatehash.is_none() || joined_since_last_sync { // Probably since = 0, we will do an initial sync let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; @@ -487,23 +509,6 @@ async fn sync_helper( // Incremental /sync let since_shortstatehash = since_shortstatehash.unwrap(); - let since_sender_member: Option = services() - .rooms - .state_accessor - .state_get( - since_shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - )? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); - - let joined_since_last_sync = since_sender_member - .map_or(true, |member| member.membership != MembershipState::Join); - let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index d50c000..d2d4306 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -54,10 +54,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { Ok(()) } - fn typings_maintain( - &self, - room_id: &RoomId, - ) -> Result<()> { + fn typings_maintain(&self, room_id: &RoomId) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -89,8 +86,10 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { } if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + self.roomid_lasttypingupdate.insert( + room_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; } Ok(()) diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 6b066f0..7d44f7d 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -22,10 +22,7 @@ impl Service { } /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( - &self, - room_id: &RoomId, - ) -> Result<()> { + fn typings_maintain(&self, room_id: &RoomId) -> Result<()> { self.db.typings_maintain(room_id) } From 00996dd83441abac357e27b4bd325c8ccef62e75 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 31 Oct 2022 09:31:17 +0100 Subject: [PATCH 1295/1727] Cargo Clippy --- src/api/client_server/search.rs | 2 +- src/database/key_value/rooms/edus/typing.rs | 3 +-- src/main.rs | 5 ++++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index 1ba9cdf..5b634a4 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -103,7 +103,7 @@ pub async fn search_events_route( .take(limit) .collect(); - let next_batch = if results.len() < limit as usize { + let next_batch = if results.len() < limit { None } else { Some((skip + limit).to_string()) diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index d2d4306..5709192 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,5 +1,4 @@ -use std::collections::HashSet; -use std::mem; +use std::{collections::HashSet, mem}; use ruma::{OwnedUserId, RoomId, UserId}; diff --git a/src/main.rs b/src/main.rs index 695f0be..72c6d51 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,7 +28,10 @@ use http::{ }; use opentelemetry::trace::{FutureExt, Tracer}; use ruma::api::{ - client::{error::Error as RumaError, error::ErrorKind, uiaa::UiaaResponse}, + client::{ + error::{Error as RumaError, ErrorKind}, + uiaa::UiaaResponse, + }, IncomingRequest, }; use tokio::signal; From 23cf39c525790122a97090b041e16b68e94644de Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 31 Oct 2022 12:08:01 +0100 Subject: [PATCH 1296/1727] Cleanly handle invalid response from trusted server instead of panicking --- src/service/rooms/event_handler/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 477a971..3b41e86 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1464,7 +1464,17 @@ impl Service { .write() .map_err(|_| Error::bad_database("RwLock is poisoned."))?; for k in keys.server_keys { - let k = k.deserialize().unwrap(); + let k = match k.deserialize() { + Ok(key) => key, + Err(e) => { + warn!( + "Received error {} while fetching keys from trusted server {}", + e, server + ); + warn!("{}", k.into_json()); + continue; + } + }; // TODO: Check signature from trusted server? servers.remove(&k.server_name); From b37876f3b2280cab122b46f15290979eef355ea3 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sat, 29 Oct 2022 14:32:22 +0200 Subject: [PATCH 1297/1727] fix(ci): Only build in (remote host) docker and switch to glibc --- .dockerignore | 2 +- .gitlab-ci.yml | 359 +++++++----------------- .gitlab/setup-buildx-remote-builders.sh | 37 +++ DEPLOY.md | 40 +-- Dockerfile | 56 +++- 5 files changed, 212 insertions(+), 282 deletions(-) create mode 100644 .gitlab/setup-buildx-remote-builders.sh diff --git a/.dockerignore b/.dockerignore index 933b380..c78ddba 100644 --- a/.dockerignore +++ b/.dockerignore @@ -25,4 +25,4 @@ docker-compose* rustfmt.toml # Documentation -*.md +#*.md diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index eb7a96f..91258ea 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -5,140 +5,10 @@ stages: - upload artifacts variables: + # Make GitLab CI go fast: GIT_SUBMODULE_STRATEGY: recursive FF_USE_FASTZIP: 1 CACHE_COMPRESSION_LEVEL: fastest - # Docker in Docker - DOCKER_HOST: tcp://docker:2375/ - DOCKER_TLS_CERTDIR: "" - DOCKER_DRIVER: overlay2 - -# --------------------------------------------------------------------- # -# Cargo: Compiling for different architectures # -# --------------------------------------------------------------------- # - -.build-cargo-shared-settings: - stage: "build" - needs: [] - rules: - - if: '$CI_COMMIT_BRANCH == "master"' - - if: '$CI_COMMIT_BRANCH == "next"' - - if: "$CI_COMMIT_TAG" - - if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set. - interruptible: true - image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools@sha256:69ab327974aef4cc0daf4273579253bf7ae5e379a6c52729b83137e4caa9d093" - tags: ["docker"] - services: ["docker:dind"] - variables: - SHARED_PATH: $CI_PROJECT_DIR/shared - CARGO_PROFILE_RELEASE_LTO: "true" - CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" - CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow - before_script: - - 'echo "Building for target $TARGET"' - - "rustup show && rustc --version && cargo --version" # Print version info for debugging - # fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227) - - "mkdir -p $SHARED_PATH/cargo" - - "cp -r $CARGO_HOME/bin $SHARED_PATH/cargo" - - "cp -r $RUSTUP_HOME $SHARED_PATH" - - "export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup" - # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results. - - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/sccache; fi - script: - # cross-compile conduit for target - - 'time cross build --target="$TARGET" --locked --release' - - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' - # print information about linking for debugging - - "file conduit-$TARGET" # print file information - - 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked - cache: - # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci - key: "cargo-cache-$TARGET" - paths: - - $SHARED_PATH/cargo/registry/index - - $SHARED_PATH/cargo/registry/cache - - $SHARED_PATH/cargo/git/db - artifacts: - expire_in: never - -build:release:cargo:x86_64-unknown-linux-musl-with-debug: - extends: .build-cargo-shared-settings - variables: - CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling - TARGET: "x86_64-unknown-linux-musl" - after_script: - - "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug" - artifacts: - name: "conduit-x86_64-unknown-linux-musl-with-debug" - paths: - - "conduit-x86_64-unknown-linux-musl-with-debug" - expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug" - -build:release:cargo:x86_64-unknown-linux-musl: - extends: .build-cargo-shared-settings - variables: - TARGET: "x86_64-unknown-linux-musl" - artifacts: - name: "conduit-x86_64-unknown-linux-musl" - paths: - - "conduit-x86_64-unknown-linux-musl" - expose_as: "Conduit for x86_64-unknown-linux-musl" - -build:release:cargo:arm-unknown-linux-musleabihf: - extends: .build-cargo-shared-settings - variables: - TARGET: "arm-unknown-linux-musleabihf" - artifacts: - name: "conduit-arm-unknown-linux-musleabihf" - paths: - - "conduit-arm-unknown-linux-musleabihf" - expose_as: "Conduit for arm-unknown-linux-musleabihf" - -build:release:cargo:armv7-unknown-linux-musleabihf: - extends: .build-cargo-shared-settings - variables: - TARGET: "armv7-unknown-linux-musleabihf" - artifacts: - name: "conduit-armv7-unknown-linux-musleabihf" - paths: - - "conduit-armv7-unknown-linux-musleabihf" - expose_as: "Conduit for armv7-unknown-linux-musleabihf" - -build:release:cargo:aarch64-unknown-linux-musl: - extends: .build-cargo-shared-settings - variables: - TARGET: "aarch64-unknown-linux-musl" - artifacts: - name: "conduit-aarch64-unknown-linux-musl" - paths: - - "conduit-aarch64-unknown-linux-musl" - expose_as: "Conduit for aarch64-unknown-linux-musl" - -.cargo-debug-shared-settings: - extends: ".build-cargo-shared-settings" - rules: - - when: "always" - cache: - key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" - script: - # cross-compile conduit for target - - 'time time cross build --target="$TARGET" --locked' - - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' - # print information about linking for debugging - - "file conduit-debug-$TARGET" # print file information - - 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked - artifacts: - expire_in: 4 weeks - -build:debug:cargo:x86_64-unknown-linux-musl: - extends: ".cargo-debug-shared-settings" - variables: - TARGET: "x86_64-unknown-linux-musl" - artifacts: - name: "conduit-debug-x86_64-unknown-linux-musl" - paths: - - "conduit-debug-x86_64-unknown-linux-musl" - expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl" # --------------------------------------------------------------------- # # Create and publish docker image # @@ -146,98 +16,106 @@ build:debug:cargo:x86_64-unknown-linux-musl: .docker-shared-settings: stage: "build docker image" - image: jdrouet/docker-with-buildx:stable + image: jdrouet/docker-with-buildx:20.10.21-0.9.1 + needs: [] tags: ["docker"] + variables: + # Docker in Docker: + DOCKER_HOST: tcp://docker:2375/ + DOCKER_TLS_CERTDIR: "" + DOCKER_DRIVER: overlay2 services: - docker:dind - needs: - - "build:release:cargo:x86_64-unknown-linux-musl" - - "build:release:cargo:arm-unknown-linux-musleabihf" - - "build:release:cargo:armv7-unknown-linux-musleabihf" - - "build:release:cargo:aarch64-unknown-linux-musl" - variables: - PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64" - DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile" - cache: - paths: - - docker_cache - key: "$CI_JOB_NAME" - before_script: - - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - # Only log in to Dockerhub if the credentials are given: - - if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi script: - # Prepare buildx to build multiarch stuff: - - docker context create 'ci-context' - - docker buildx create --name 'multiarch-builder' --use 'ci-context' - # Copy binaries to their docker arch path - - mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64 - - mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6 - - mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7 - - mv ./conduit-aarch64-unknown-linux-musl linux/arm64 - - 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"' - # Build and push image: + - apk add openssh-client + - eval $(ssh-agent -s) + - mkdir -p ~/.ssh && chmod 700 ~/.ssh + - printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config + - sh .gitlab/setup-buildx-remote-builders.sh + # Authorize against this project's own image registry: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + # Build multiplatform image and push to temporary tag: - > - docker buildx build + docker buildx build + --platform "linux/arm/v7,linux/arm64,linux/amd64" --pull + --tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" --push - --cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache - --cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache - --build-arg CREATED=$CREATED - --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) - --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --platform "$PLATFORMS" - --tag "$TAG" - --tag "$TAG-alpine" - --tag "$TAG-commit-$CI_COMMIT_SHORT_SHA" - --file "$DOCKER_FILE" . + --file "Dockerfile" . + # Build multiplatform image to deb stage and extract their .deb files: + - > + docker buildx build + --platform "linux/arm/v7,linux/arm64,linux/amd64" + --target "packager-result" + --output="type=local,dest=/tmp/build-output" + --file "Dockerfile" . + # Build multiplatform image to binary stage and extract their binaries: + - > + docker buildx build + --platform "linux/arm/v7,linux/arm64,linux/amd64" + --target "builder-result" + --output="type=local,dest=/tmp/build-output" + --file "Dockerfile" . + # Copy to GitLab container registry: + - > + docker buildx imagetools create + --tag "$CI_REGISTRY_IMAGE/$TAG" + --tag "$CI_REGISTRY_IMAGE/$TAG-bullseye" + --tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA" + "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" + # if DockerHub credentials exist, also copy to dockerhub: + - if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi + - > + if [ -n "${DOCKER_HUB}" ]; then + docker buildx imagetools create + --tag "$DOCKER_HUB_IMAGE/$TAG" + --tag "$DOCKER_HUB_IMAGE/$TAG-bullseye" + --tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA" + "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" + ; fi + - mv /tmp/build-output ./ + artifacts: + paths: + - "./build-output/" -docker:next:gitlab: +docker:next: extends: .docker-shared-settings rules: - - if: '$CI_COMMIT_BRANCH == "next"' + - if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"' variables: - TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next" + TAG: "matrix-conduit:next" -docker:next:dockerhub: +docker:master: extends: .docker-shared-settings rules: - - if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB' + - if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"' variables: - TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" + TAG: "matrix-conduit:latest" -docker:master:gitlab: +docker:tags: extends: .docker-shared-settings rules: - - if: '$CI_COMMIT_BRANCH == "master"' + - if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG" variables: - TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest" + TAG: "matrix-conduit:$CI_COMMIT_TAG" -docker:master:dockerhub: - extends: .docker-shared-settings - rules: - - if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB' - variables: - TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" - -docker:tags:gitlab: - extends: .docker-shared-settings - rules: - - if: "$CI_COMMIT_TAG" - variables: - TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:$CI_COMMIT_TAG" - -docker:tags:dockerhub: - extends: .docker-shared-settings - rules: - - if: "$CI_COMMIT_TAG && $DOCKER_HUB" - variables: - TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG" # --------------------------------------------------------------------- # # Run tests # # --------------------------------------------------------------------- # +cargo check: + stage: test + image: docker.io/rust:1.64.0-bullseye + needs: [] + interruptible: true + before_script: + - "rustup show && rustc --version && cargo --version" # Print version info for debugging + - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb + script: + - cargo check + + .test-shared-settings: stage: "test" needs: [] @@ -250,8 +128,7 @@ docker:tags:dockerhub: test:cargo: extends: .test-shared-settings before_script: - # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi + - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb script: - rustc --version && cargo --version # Print version info for debugging - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" @@ -260,14 +137,12 @@ test:cargo: reports: junit: report.xml - test:clippy: extends: .test-shared-settings allow_failure: true before_script: - rustup component add clippy - # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi + - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb script: - rustc --version && cargo --version # Print version info for debugging - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" @@ -294,38 +169,6 @@ test:audit: reports: sast: gl-sast-report.json -test:sytest: - stage: "test" - allow_failure: true - needs: - - "build:debug:cargo:x86_64-unknown-linux-musl" - image: - name: "valkum/sytest-conduit:latest" - entrypoint: [""] - tags: ["docker"] - variables: - PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" - interruptible: true - before_script: - - "mkdir -p /app" - - "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit" - - "chmod +x /app/conduit" - - "rm -rf /src && ln -s $CI_PROJECT_DIR/ /src" - - "mkdir -p /work/server-0/database/ && mkdir -p /work/server-1/database/ && mkdir -p /work/server-2/database/" - - "cd /" - script: - - "SYTEST_EXIT_CODE=0" - - "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1" - - 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap' - - "exit $SYTEST_EXIT_CODE" - artifacts: - when: always - paths: - - "$CI_PROJECT_DIR/sytest.xml" - - "$CI_PROJECT_DIR/results.tap" - reports: - junit: "$CI_PROJECT_DIR/sytest.xml" - test:dockerlint: stage: "test" needs: [] @@ -338,14 +181,12 @@ test:dockerlint: hadolint --no-fail --verbose ./Dockerfile - ./docker/ci-binaries-packaging.Dockerfile # Then output the results into a json for GitLab to pretty-print this in the MR: - > hadolint --format gitlab_codeclimate --failure-threshold error - ./Dockerfile - ./docker/ci-binaries-packaging.Dockerfile > dockerlint.json + ./Dockerfile > dockerlint.json artifacts: when: always reports: @@ -365,28 +206,26 @@ test:dockerlint: # Store binaries as package so they have download urls # # --------------------------------------------------------------------- # -publish:package: - stage: "upload artifacts" - needs: - - "build:release:cargo:x86_64-unknown-linux-musl" - - "build:release:cargo:arm-unknown-linux-musleabihf" - - "build:release:cargo:armv7-unknown-linux-musleabihf" - - "build:release:cargo:aarch64-unknown-linux-musl" - # - "build:cargo-deb:x86_64-unknown-linux-gnu" - rules: - - if: '$CI_COMMIT_BRANCH == "master"' - - if: '$CI_COMMIT_BRANCH == "next"' - - if: "$CI_COMMIT_TAG" - image: curlimages/curl:latest - tags: ["docker"] - variables: - GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts - script: - - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"' +# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME: + +#publish:package: +# stage: "upload artifacts" +# needs: +# - "docker:tags" +# rules: +# - if: "$CI_COMMIT_TAG" +# image: curlimages/curl:latest +# tags: ["docker"] +# variables: +# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts +# script: +# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' +# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' +# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"' +# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' +# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' +# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"' +# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"' # Avoid duplicate pipelines # See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines diff --git a/.gitlab/setup-buildx-remote-builders.sh b/.gitlab/setup-buildx-remote-builders.sh new file mode 100644 index 0000000..29d50dd --- /dev/null +++ b/.gitlab/setup-buildx-remote-builders.sh @@ -0,0 +1,37 @@ +#!/bin/sh +set -eux + +# --------------------------------------------------------------------- # +# # +# Configures docker buildx to use a remote server for arm building. # +# Expects $SSH_PRIVATE_KEY to be a valid ssh ed25519 private key with # +# access to the server $ARM_SERVER_USER@$ARM_SERVER_IP # +# # +# This is expected to only be used in the official CI/CD pipeline! # +# # +# Requirements: openssh-client, docker buildx # +# Inspired by: https://depot.dev/blog/building-arm-containers # +# # +# --------------------------------------------------------------------- # + +cat "$BUILD_SERVER_SSH_PRIVATE_KEY" | ssh-add - + +# Test server connections: +ssh "$ARM_SERVER_USER@$ARM_SERVER_IP" "uname -a" +ssh "$AMD_SERVER_USER@$AMD_SERVER_IP" "uname -a" + +# Connect remote arm64 server for all arm builds: +docker buildx create \ + --name "multi" \ + --driver "docker-container" \ + --platform "linux/arm64,linux/arm/v7" \ + "ssh://$ARM_SERVER_USER@$ARM_SERVER_IP" + +# Connect remote amd64 server for adm64 builds: +docker buildx create --append \ + --name "multi" \ + --driver "docker-container" \ + --platform "linux/amd64" \ + "ssh://$AMD_SERVER_USER@$AMD_SERVER_IP" + +docker buildx use multi diff --git a/DEPLOY.md b/DEPLOY.md index 1c7d1af..a2f93b1 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -12,21 +12,27 @@ only offer Linux binaries. You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -| CPU Architecture | Download stable version | Download development version | -| ------------------------------------------- | ------------------------------ | ---------------------------- | -| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | [Download][x84_64-musl-next] | -| armv6 | [Download][armv6-musl-master] | [Download][armv6-musl-next] | -| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | [Download][armv7-musl-next] | -| armv8 / aarch64 | [Download][armv8-musl-master] | [Download][armv8-musl-next] | +| CPU Architecture | Download stable version | Download development version | +| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- | +| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] | +| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] | +| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] | -[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl -[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf -[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf -[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl -[x84_64-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl -[armv6-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf -[armv7-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf -[armv8-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl +These builds were created on and linked against the glibc version shipped with Debian bullseye. +If you use a system with an older glibc version, you might need to compile Conduit yourself. + +[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master +[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master +[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master +[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next +[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next +[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next +[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master +[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master +[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master +[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next +[armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next +[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next ```bash $ sudo wget -O /usr/local/bin/matrix-conduit @@ -43,7 +49,6 @@ $ sudo apt install libclang-dev build-essential $ cargo build --release ``` - If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md). ## Adding a Conduit user @@ -189,18 +194,21 @@ $ sudo systemctl reload apache2 ``` ### Caddy + Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name). + ```caddy your.server.name, your.server.name:8448 { reverse_proxy /_matrix/* 127.0.0.1:6167 } ``` + That's it! Just start or enable the service and you're set. + ```bash $ sudo systemctl enable caddy ``` - ### Nginx If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf` diff --git a/Dockerfile b/Dockerfile index 3154ebb..2763b12 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.63-bullseye AS builder +FROM docker.io/rust:1.64-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies @@ -27,6 +27,49 @@ COPY src src # Builds conduit and places the binary at /usr/src/conduit/target/release/conduit RUN touch src/main.rs && touch src/lib.rs && cargo build --release + +# ONLY USEFUL FOR CI: target stage to extract build artifacts +FROM scratch AS builder-result +COPY --from=builder /usr/src/conduit/target/release/conduit /conduit + + + +# --------------------------------------------------------------------------------------------------------------- +# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems: +# --------------------------------------------------------------------------------------------------------------- +FROM docker.io/rust:1.64-bullseye AS build-cargo-deb + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + dpkg \ + dpkg-dev \ + liblzma-dev + +RUN cargo install cargo-deb +# => binary is in /usr/local/cargo/bin/cargo-deb + + +# --------------------------------------------------------------------------------------------------------------- +# Package conduit build-result into a .deb package: +# --------------------------------------------------------------------------------------------------------------- +FROM builder AS packager +WORKDIR /usr/src/conduit + +COPY ./LICENSE ./LICENSE +COPY ./README.md ./README.md +COPY debian/README.Debian ./debian/ +COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb + +# --no-build makes cargo-deb reuse already compiled project +RUN cargo deb --no-build +# => Package is in /usr/src/conduit/target/debian/__.deb + + +# ONLY USEFUL FOR CI: target stage to extract build artifacts +FROM scratch AS packager-result +COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb + + # --------------------------------------------------------------------------------------------------------------- # Stuff below this line actually ends up in the resulting docker image # --------------------------------------------------------------------------------------------------------------- @@ -45,9 +88,11 @@ ENV CONDUIT_PORT=6167 \ # └─> Set no config file to do all configuration with env vars # Conduit needs: +# dpkg: to install conduit.deb # ca-certificates: for https # iproute2 & wget: for the healthcheck script RUN apt-get update && apt-get -y --no-install-recommends install \ + dpkg \ ca-certificates \ iproute2 \ wget \ @@ -57,8 +102,9 @@ RUN apt-get update && apt-get -y --no-install-recommends install \ COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh -# Copy over the actual Conduit binary from the builder stage -COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit +# Install conduit.deb: +COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/ +RUN dpkg -i /srv/conduit/*.deb # Improve security: Don't run stuff as root, that does not need to run as root # Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. @@ -73,7 +119,7 @@ RUN chown -cR conduit:conduit /srv/conduit && \ chmod +x /srv/conduit/healthcheck.sh && \ mkdir -p ${DEFAULT_DB_PATH} && \ chown -cR conduit:conduit ${DEFAULT_DB_PATH} - + # Change user to conduit, no root permissions afterwards: USER conduit # Set container home directory @@ -81,4 +127,4 @@ WORKDIR /srv/conduit # Run Conduit and print backtraces on panics ENV RUST_BACKTRACE=1 -ENTRYPOINT [ "/srv/conduit/conduit" ] +ENTRYPOINT [ "/usr/sbin/matrix-conduit" ] From a2d8aec1e3a1578872f1caaee719d3acfa5227bc Mon Sep 17 00:00:00 2001 From: Paul Beziau Date: Thu, 3 Nov 2022 13:12:53 +0000 Subject: [PATCH 1298/1727] Moving the unwraping of a variable Moving the unwraping of the variable "rule" inside the condition instead of the if body, for the migration of the database from version 11 to 12. --- src/database/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/mod.rs b/src/database/mod.rs index ccc8177..f6a76c6 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -851,8 +851,8 @@ impl KeyValueDatabase { for transformation in underride_rule_transformation { let rule = rules_list.underride.get(transformation[0]); - if rule.is_some() { - let mut rule = rule.unwrap().clone(); + if let Some(rule) = rule { + let mut rule = rule.clone(); rule.rule_id = transformation[1].to_string(); rules_list.underride.remove(transformation[0]); rules_list.underride.insert(rule); From 09015f113ce19280825e54e98d8c5b92b54a03bb Mon Sep 17 00:00:00 2001 From: Ticho 34782694 Date: Tue, 8 Nov 2022 15:56:24 +0000 Subject: [PATCH 1299/1727] Describe a better way to enforce Content-Type in nginx add_header will not override the Content-Type header set by the server, but will instead add another header below, which is obviously not ideal. The proposed change will instead tell nginx to set the correct value for this header straight away. --- docker/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/README.md b/docker/README.md index 36717c4..c702832 100644 --- a/docker/README.md +++ b/docker/README.md @@ -121,12 +121,12 @@ So...step by step: location /.well-known/matrix/server { return 200 '{"m.server": ".:443"}'; - add_header Content-Type application/json; + types { } default_type "application/json; charset=utf-8"; } location /.well-known/matrix/client { return 200 '{"m.homeserver": {"base_url": "https://."}}'; - add_header Content-Type application/json; + types { } default_type "application/json; charset=utf-8"; add_header "Access-Control-Allow-Origin" *; } From 75402273882d4641771d5edde7ed8b498eafcb07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 9 Nov 2022 18:46:10 +0100 Subject: [PATCH 1300/1727] chore: bump dependencies --- Cargo.lock | 357 +++++++++++++++++++------------ Cargo.toml | 2 +- src/api/client_server/push.rs | 5 +- src/api/server_server.rs | 1 + src/database/key_value/pusher.rs | 51 +++-- src/main.rs | 2 +- src/service/pusher/data.rs | 9 +- src/service/pusher/mod.rs | 156 +++++++------- 8 files changed, 333 insertions(+), 250 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bdadf71..71524af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,7 +14,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "once_cell", "version_check", ] @@ -83,9 +83,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.57" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" +checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" dependencies = [ "proc-macro2", "quote", @@ -157,9 +157,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba6170b61f7b086609dabcae68d2e07352539c6ef04a7c82980bdfa01a159d" +checksum = "8456dab8f11484979a86651da8e619b355ede5d61a160755155f6c344bd18c47" dependencies = [ "arc-swap", "bytes", @@ -177,15 +177,15 @@ dependencies = [ [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64ct" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2b2456fd614d856680dcd9fcc660a51a820fa09daef2e49772b56a193c8474" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "bincode" @@ -273,15 +273,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.0" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "bytemuck" -version = "1.12.1" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da" +checksum = "aaa3a8d9a1ca92e282c96a32d6511b695d7d994d1d102ba85d279f9b2756947f" [[package]] name = "byteorder" @@ -297,9 +297,9 @@ checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "cc" -version = "1.0.73" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f" dependencies = [ "jobserver", ] @@ -332,9 +332,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.11" +version = "4.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ed45cc2c62a3eff523e718d8576ba762c83a3146151093283ac62ae11933a73" +checksum = "91b9970d7505127a162fdaa9b96428d28a479ba78c9ec7550a63a5d9863db682" dependencies = [ "bitflags", "clap_derive", @@ -344,9 +344,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.0.10" +version = "4.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db342ce9fda24fb191e2ed4e102055a4d381c1086a06630174cd8da8d5d917ce" +checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" dependencies = [ "heck", "proc-macro-error", @@ -712,7 +712,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.5.4", ] [[package]] @@ -748,9 +748,9 @@ checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" [[package]] name = "futures" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -763,9 +763,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -773,15 +773,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -790,15 +790,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-macro" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", @@ -807,21 +807,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-util" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures-channel", "futures-core", @@ -858,9 +858,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", @@ -885,9 +885,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -1058,9 +1058,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -1167,9 +1167,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" +checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" [[package]] name = "itertools" @@ -1242,6 +1242,28 @@ dependencies = [ "simple_asn1", ] +[[package]] +name = "konst" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "330f0e13e6483b8c34885f7e6c9f19b1a7bd449c673fbb948a51c99d66ef74f4" +dependencies = [ + "konst_macro_rules", + "konst_proc_macros", +] + +[[package]] +name = "konst_macro_rules" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4933f3f57a8e9d9da04db23fb153356ecaf00cbd14aee46279c33dc80925c37" + +[[package]] +name = "konst_proc_macros" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "984e109462d46ad18314f10e392c286c3d47bce203088a09012de1015b45b737" + [[package]] name = "lazy_static" version = "1.4.0" @@ -1256,15 +1278,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.134" +version = "0.2.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "329c933548736bc49fd575ee68c89e8be4d260064184389a5b77517cddd99ffb" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" [[package]] name = "libloading" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", "winapi", @@ -1284,9 +1306,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -1408,15 +1430,24 @@ dependencies = [ ] [[package]] -name = "mio" -version = "0.8.4" +name = "miniz_oxide" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -1482,28 +1513,19 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ "hermit-abi", "libc", ] -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - [[package]] name = "once_cell" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "opaque-debug" @@ -1599,9 +1621,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.3.0" +version = "6.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" +checksum = "3baf96e39c5359d2eb0dd6ccb42c62b91d9678aa68160d261b9e0ccbf9e9dea9" [[package]] name = "overload" @@ -1631,15 +1653,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -1752,27 +1774,27 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "png" -version = "0.17.6" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f0e7f4c94ec26ff209cee506314212639d6c91b80afb82984819fafce9df01c" +checksum = "5d708eaf860a19b19ce538740d2b4bdeeb8337fa53f7738455e706623ad5c638" dependencies = [ "bitflags", "crc32fast", "flate2", - "miniz_oxide", + "miniz_oxide 0.6.2", ] [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-crate" @@ -1811,9 +1833,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.46" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ "unicode-ident", ] @@ -1905,7 +1927,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] @@ -1932,16 +1954,16 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "redox_syscall", "thiserror", ] [[package]] name = "regex" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ "aho-corasick", "memchr", @@ -1959,9 +1981,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "reqwest" @@ -2039,7 +2061,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.7.4" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "assign", "js_int", @@ -2057,8 +2079,9 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ + "js_int", "ruma-common", "serde", "serde_json", @@ -2066,13 +2089,14 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.15.1" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +version = "0.15.3" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "assign", "bytes", "http", "js_int", + "js_option", "maplit", "percent-encoding", "ruma-common", @@ -2082,8 +2106,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +version = "0.10.5" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "base64", "bytes", @@ -2093,6 +2117,7 @@ dependencies = [ "itoa", "js_int", "js_option", + "konst", "percent-encoding", "rand 0.8.5", "regex", @@ -2110,7 +2135,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "js_int", "ruma-common", @@ -2121,7 +2146,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "js_int", "thiserror", @@ -2130,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "js_int", "ruma-common", @@ -2139,8 +2164,8 @@ dependencies = [ [[package]] name = "ruma-macros" -version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +version = "0.10.5" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "once_cell", "proc-macro-crate", @@ -2155,7 +2180,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "js_int", "ruma-common", @@ -2166,7 +2191,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "base64", "ed25519-dalek", @@ -2182,7 +2207,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "itertools", "js_int", @@ -2227,9 +2252,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustls" -version = "0.20.6" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", "ring", @@ -2280,7 +2305,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "windows-sys", + "windows-sys 0.36.1", ] [[package]] @@ -2324,18 +2349,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.145" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.145" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", @@ -2344,9 +2369,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.85" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ "itoa", "ryu", @@ -2367,9 +2392,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8613d593412a0deb7bbd8de9d908efff5a0cb9ccd8f62c641e7b2ed2f57291d1" +checksum = "6d232d893b10de3eb7258ff01974d6ee20663d8e833263c99409d4b13a0209da" dependencies = [ "indexmap", "itoa", @@ -2513,9 +2538,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.102" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" +checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" dependencies = [ "proc-macro2", "quote", @@ -2634,21 +2659,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d634a985c4d4238ec39cacaed2e7ae552fbd3c476b552c1deac3021b7d7eaf0c" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ "itoa", - "libc", - "num_threads", + "serde", + "time-core", "time-macros", ] [[package]] -name = "time-macros" -version = "0.2.4" +name = "time-core" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + +[[package]] +name = "time-macros" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +dependencies = [ + "time-core", +] [[package]] name = "tinyvec" @@ -2720,9 +2754,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6edf2d6bc038a43d31353570e27270603f4648d18f5ed10c0e179abe43255af" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite", @@ -2793,9 +2827,9 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" @@ -3005,7 +3039,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] @@ -3180,43 +3214,100 @@ version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", ] +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" + [[package]] name = "windows_aarch64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" + [[package]] name = "windows_i686_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +[[package]] +name = "windows_i686_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" + [[package]] name = "windows_i686_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +[[package]] +name = "windows_i686_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" + [[package]] name = "windows_x86_64_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" + [[package]] name = "windows_x86_64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + [[package]] name = "winreg" version = "0.7.0" diff --git a/Cargo.toml b/Cargo.toml index db51f4a..588b443 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "fba6f70c2df8294f96567f56464a46e3d237a8e9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "2bd5c131f49b2239750c39ed63b623cd5a01c965", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index 2301ddc..dc936a6 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -575,9 +575,10 @@ pub async fn set_pushers_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let pusher = body.pusher.clone(); - services().pusher.set_pusher(sender_user, pusher)?; + services() + .pusher + .set_pusher(sender_user, body.action.clone())?; Ok(set_pusher::v3::Response::default()) } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 320e396..f66e96c 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1478,6 +1478,7 @@ async fn create_join_event( .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), + event: None, // TODO: handle restricted joins }) } diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 3dfceb6..b203107 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,38 +1,37 @@ use ruma::{ - api::client::push::{get_pushers, set_pusher}, + api::client::push::{set_pusher, Pusher}, UserId, }; use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::pusher::Data for KeyValueDatabase { - fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { - let mut key = sender.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pusher.pushkey.as_bytes()); - - // There are 2 kinds of pushers but the spec says: null deletes the pusher. - if pusher.kind.is_none() { - return self - .senderkey_pusher - .remove(&key) - .map(|_| ()) - .map_err(Into::into); + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> { + match &pusher { + set_pusher::v3::PusherAction::Post(data) => { + let mut key = sender.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(data.pusher.ids.pushkey.as_bytes()); + self.senderkey_pusher.insert( + &key, + &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), + )?; + Ok(()) + } + set_pusher::v3::PusherAction::Delete(ids) => { + let mut key = sender.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(ids.pushkey.as_bytes()); + return self + .senderkey_pusher + .remove(&key) + .map(|_| ()) + .map_err(Into::into); + } } - - self.senderkey_pusher.insert( - &key, - &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), - )?; - - Ok(()) } - fn get_pusher( - &self, - sender: &UserId, - pushkey: &str, - ) -> Result> { + fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { let mut senderkey = sender.as_bytes().to_vec(); senderkey.push(0xff); senderkey.extend_from_slice(pushkey.as_bytes()); @@ -46,7 +45,7 @@ impl service::pusher::Data for KeyValueDatabase { .transpose() } - fn get_pushers(&self, sender: &UserId) -> Result> { + fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/main.rs b/src/main.rs index 72c6d51..d2183a3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -473,7 +473,7 @@ macro_rules! impl_ruma_handler { let meta = Req::METADATA; let method_filter = method_to_filter(meta.method); - for path in IntoIterator::into_iter([meta.unstable_path, meta.r0_path, meta.stable_path]).flatten() { + for path in meta.history.all_paths() { let handler = self.clone(); router = router.route(path, on(method_filter, |$( $ty: $ty, )* req| async move { diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index e317121..2062f56 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,16 +1,15 @@ use crate::Result; use ruma::{ - api::client::push::{get_pushers, set_pusher}, + api::client::push::{set_pusher, Pusher}, UserId, }; pub trait Data: Send + Sync { - fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()>; - fn get_pusher(&self, sender: &UserId, pushkey: &str) - -> Result>; + fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result>; - fn get_pushers(&self, sender: &UserId) -> Result>; + fn get_pushers(&self, sender: &UserId) -> Result>; fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a>; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 7fee276..cd11d71 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -6,7 +6,7 @@ use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; use ruma::{ api::{ - client::push::{get_pushers, set_pusher, PusherKind}, + client::push::{set_pusher, Pusher, PusherKind}, push_gateway::send_event_notification::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, @@ -23,26 +23,22 @@ use ruma::{ }; use std::{fmt::Debug, mem}; -use tracing::{error, info, warn}; +use tracing::{info, warn}; pub struct Service { pub db: &'static dyn Data, } impl Service { - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { + pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> { self.db.set_pusher(sender, pusher) } - pub fn get_pusher( - &self, - sender: &UserId, - pushkey: &str, - ) -> Result> { + pub fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { self.db.get_pusher(sender, pushkey) } - pub fn get_pushers(&self, sender: &UserId) -> Result> { + pub fn get_pushers(&self, sender: &UserId) -> Result> { self.db.get_pushers(sender) } @@ -140,7 +136,7 @@ impl Service { &self, user: &UserId, unread: UInt, - pusher: &get_pushers::v3::Pusher, + pusher: &Pusher, ruleset: Ruleset, pdu: &PduEvent, ) -> Result<()> { @@ -221,91 +217,87 @@ impl Service { async fn send_notice( &self, unread: UInt, - pusher: &get_pushers::v3::Pusher, + pusher: &Pusher, tweaks: Vec, event: &PduEvent, ) -> Result<()> { // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } + match &pusher.kind { + PusherKind::Http(http) => { + // TODO: + // Two problems with this + // 1. if "event_id_only" is the only format kind it seems we should never add more info + // 2. can pusher/devices have conflicting formats + let event_id_only = http.format == Some(PushFormat::EventIdOnly); - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; + let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone()); + device.data.default_payload = http.default_payload.clone(); + device.data.format = http.format.clone(); - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = data_minus_url.into(); + // Tweaks are only added if the format is NOT event_id_only + if !event_id_only { + device.tweaks = tweaks.clone(); + } - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); - } + let d = &[device]; + let mut notifi = Notification::new(d); - let d = &[device]; - let mut notifi = Notification::new(d); + notifi.prio = NotificationPriority::Low; + notifi.event_id = Some(&event.event_id); + notifi.room_id = Some(&event.room_id); + // TODO: missed calls + notifi.counts = NotificationCounts::new(unread, uint!(0)); - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); + if event.kind == RoomEventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + { + notifi.prio = NotificationPriority::High + } - if event.kind == RoomEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } + if event_id_only { + self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) + .await?; + } else { + notifi.sender = Some(&event.sender); + notifi.event_type = Some(&event.kind); + let content = serde_json::value::to_raw_value(&event.content).ok(); + notifi.content = content.as_deref(); - if event_id_only { - self.send_request(url, send_event_notification::v1::Request::new(notifi)) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); + if event.kind == RoomEventType::RoomMember { + notifi.user_is_target = + event.state_key.as_deref() == Some(event.sender.as_str()); + } - if event.kind == RoomEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); + let user_name = services().users.displayname(&event.sender)?; + notifi.sender_display_name = user_name.as_deref(); + + let room_name = if let Some(room_name_pdu) = services() + .rooms + .state_accessor + .room_state_get(&event.room_id, &StateEventType::RoomName, "")? + { + serde_json::from_str::(room_name_pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid room name event in database.") + })? + .name + } else { + None + }; + + notifi.room_name = room_name.as_deref(); + + self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) + .await?; + } + + Ok(()) } - - let user_name = services().users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - - let room_name = if let Some(room_name_pdu) = services() - .rooms - .state_accessor - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name - } else { - None - }; - - notifi.room_name = room_name.as_deref(); - - self.send_request(url, send_event_notification::v1::Request::new(notifi)) - .await?; + // TODO: Handle email + PusherKind::Email(_) => return Ok(()), + _ => return Ok(()), } - - // TODO: email - - Ok(()) } } From c063700255e0e6bc4637981709ad4835319b75df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 9 Nov 2022 21:14:17 +0100 Subject: [PATCH 1301/1727] fix: invite dendrite users --- src/api/client_server/membership.rs | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 7142b8e..397e13c 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -826,7 +826,7 @@ pub(crate) async fn invite_helper<'a>( is_direct: bool, ) -> Result<()> { if user_id.server_name() != services().globals.server_name() { - let (pdu_json, invite_room_state) = { + let (pdu, pdu_json, invite_room_state) = { let mutex_state = Arc::clone( services() .globals @@ -867,28 +867,16 @@ pub(crate) async fn invite_helper<'a>( drop(state_lock); - (pdu_json, invite_room_state) + (pdu, pdu_json, invite_room_state) }; - // Generate event id - let expected_event_id = format!( - "${}", - ruma::signatures::reference_hash( - &pdu_json, - &services().rooms.state.get_room_version(room_id)? - ) - .expect("ruma can calculate reference hashes") - ); - let expected_event_id = <&EventId>::try_from(expected_event_id.as_str()) - .expect("ruma's reference hashes are valid event ids"); - let response = services() .sending .send_federation_request( user_id.server_name(), create_invite::v2::Request { room_id, - event_id: expected_event_id, + event_id: &pdu.event_id, room_version: &services().rooms.state.get_room_version(room_id)?, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, @@ -910,7 +898,7 @@ pub(crate) async fn invite_helper<'a>( } }; - if expected_event_id != event_id { + if pdu.event_id != event_id { warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); } From 3b3c451c83ade05a1259c6666964d96b9eba1f06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 19 Nov 2022 12:52:47 +0100 Subject: [PATCH 1302/1727] fix: unable to leave room --- src/api/client_server/membership.rs | 39 +++++++++++++++++++---------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 397e13c..2267cbf 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -1051,19 +1051,32 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { ); let state_lock = mutex_state.lock().await; - let mut event: RoomMemberEventContent = serde_json::from_str( - services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + let member_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomMember, + user_id.as_str(), + )?; + + // Fix for broken rooms + let member_event = match member_event { + None => { + error!("Trying to leave a room you are not a member of."); + + services().rooms.state_cache.update_membership( + room_id, + user_id, + MembershipState::Leave, + user_id, + None, + true, + )?; + return Ok(()); + } + Some(e) => e, + }; + + let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get()) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = MembershipState::Leave; From a3a1db124d2fdf78999b493e51dceb344683b502 Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Mon, 21 Nov 2022 21:48:06 +0200 Subject: [PATCH 1303/1727] Clean some noisy logs --- src/api/server_server.rs | 4 ++-- src/service/rooms/event_handler/mod.rs | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index f66e96c..b7f8807 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -55,7 +55,7 @@ use std::{ time::{Duration, Instant, SystemTime}, }; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). @@ -724,7 +724,7 @@ pub async fn send_transaction_message_route( drop(mutex_lock); let elapsed = start_time.elapsed(); - warn!( + debug!( "Handling transaction of event {} took {}m{}s", event_id, elapsed.as_secs() / 60, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 3b41e86..03f1f93 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -133,7 +133,8 @@ impl Service { .await?; let mut errors = 0; - for prev_id in dbg!(sorted_prev_events) { + debug!(events = ?sorted_prev_events, "Got previous events"); + for prev_id in sorted_prev_events { // Check for disabled again because it might have changed if services().rooms.metadata.is_disabled(room_id)? { return Err(Error::BadRequest( @@ -330,7 +331,7 @@ impl Service { // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // NOTE: Step 5 is not applied anymore because it failed too often - warn!("Fetching auth events for {}", incoming_pdu.event_id); + debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events"); self.fetch_and_handle_outliers( origin, &incoming_pdu From 6786c44f4df96e7505b68c165b6a275b556f7ab6 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 21 Nov 2022 09:39:17 +0100 Subject: [PATCH 1304/1727] chore: Fix MSRV Ruma requires Rust 1.64 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 588b443..ae51945 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.4.0-next" -rust-version = "1.63" +rust-version = "1.64" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From 66bc41125c244b3a1d17a5590882ba1b71212cc7 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 21 Nov 2022 09:51:39 +0100 Subject: [PATCH 1305/1727] refactor: cleanup --- Cargo.lock | 2 +- src/api/client_server/media.rs | 2 +- src/config/mod.rs | 4 ++-- src/database/key_value/pusher.rs | 5 ++--- src/lib.rs | 2 +- src/service/admin/mod.rs | 8 ++++---- src/service/pusher/mod.rs | 6 +++--- src/service/rooms/search/data.rs | 2 +- src/service/rooms/state/data.rs | 2 +- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/user/mod.rs | 6 +++--- 11 files changed, 20 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 71524af..75b1222 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -372,7 +372,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.4.0-next" +version = "0.5.0-alpha.next" dependencies = [ "async-trait", "axum", diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index ae023c9..fa6def0 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -74,7 +74,7 @@ pub async fn get_remote_content( services() .media .create( - mxc.to_string(), + mxc.to_owned(), content_response.content_disposition.as_deref(), content_response.content_type.as_deref(), &content_response.file, diff --git a/src/config/mod.rs b/src/config/mod.rs index 3c3a764..6b862bb 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -183,7 +183,7 @@ impl fmt::Display for Config { ("Turn TTL", &self.turn_ttl.to_string()), ("Turn URIs", { let mut lst = vec![]; - for item in self.turn_uris.to_vec().into_iter().enumerate() { + for item in self.turn_uris.iter().cloned().enumerate() { let (_, uri): (usize, String) = item; lst.push(uri); } @@ -191,7 +191,7 @@ impl fmt::Display for Config { }), ]; - let mut msg: String = "Active config values:\n\n".to_string(); + let mut msg: String = "Active config values:\n\n".to_owned(); for line in lines.into_iter().enumerate() { msg += &format!("{}: {}\n", line.1 .0, line.1 .1); diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index b203107..50a6fac 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -22,11 +22,10 @@ impl service::pusher::Data for KeyValueDatabase { let mut key = sender.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(ids.pushkey.as_bytes()); - return self - .senderkey_pusher + self.senderkey_pusher .remove(&key) .map(|_| ()) - .map_err(Into::into); + .map_err(Into::into) } } } diff --git a/src/lib.rs b/src/lib.rs index 3d7f7ae..dc6a9d2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,7 +23,7 @@ pub use utils::error::{Error, Result}; pub static SERVICES: RwLock> = RwLock::new(None); -pub fn services<'a>() -> &'static Services { +pub fn services() -> &'static Services { SERVICES .read() .unwrap() diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 5766b2f..e2b2fd8 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -599,7 +599,8 @@ impl Service { } } AdminCommand::CreateUser { username, password } => { - let password = password.unwrap_or(utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + let password = + password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); // Validate user id let user_id = match UserId::parse_with_server_name( username.as_str().to_lowercase(), @@ -732,9 +733,8 @@ impl Service { } for &user_id in &user_ids { - match services().users.deactivate_account(user_id) { - Ok(_) => deactivation_count += 1, - Err(_) => {} + if services().users.deactivate_account(user_id).is_ok() { + deactivation_count += 1 } } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index cd11d71..d3d157c 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -42,7 +42,7 @@ impl Service { self.db.get_pushers(sender) } - pub fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box>> { + pub fn get_pushkeys(&self, sender: &UserId) -> Box>> { self.db.get_pushkeys(sender) } @@ -296,8 +296,8 @@ impl Service { Ok(()) } // TODO: Handle email - PusherKind::Email(_) => return Ok(()), - _ => return Ok(()), + PusherKind::Email(_) => Ok(()), + _ => Ok(()), } } } diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 82c0800..6eef38f 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -2,7 +2,7 @@ use crate::Result; use ruma::RoomId; pub trait Data: Send + Sync { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; + fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; fn search_pdus<'a>( &'a self, diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index f52ea72..96116b0 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -22,7 +22,7 @@ pub trait Data: Send + Sync { fn get_forward_extremities(&self, room_id: &RoomId) -> Result>>; /// Replace the forward extremities of the room. - fn set_forward_extremities<'a>( + fn set_forward_extremities( &self, room_id: &RoomId, event_ids: Vec, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 0e45032..3072b80 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -343,7 +343,7 @@ impl Service { self.db.get_forward_extremities(room_id) } - pub fn set_forward_extremities<'a>( + pub fn set_forward_extremities( &self, room_id: &RoomId, event_ids: Vec, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 2266d97..672e502 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -40,10 +40,10 @@ impl Service { self.db.get_token_shortstatehash(room_id, token) } - pub fn get_shared_rooms<'a>( - &'a self, + pub fn get_shared_rooms( + &self, users: Vec, - ) -> Result> + 'a> { + ) -> Result>> { self.db.get_shared_rooms(users) } } From b59304a4df970f1d74b8e15b733f6754125b6fec Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 25 Sep 2022 11:46:18 +0200 Subject: [PATCH 1306/1727] Reduce length of generated access tokens and session ids Reduces generated tokens and session ids down to 32 characters (~190 bits of entropy) in length --- src/api/client_server/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs index 65b7a10..6ed17e7 100644 --- a/src/api/client_server/mod.rs +++ b/src/api/client_server/mod.rs @@ -63,6 +63,6 @@ pub use user_directory::*; pub use voip::*; pub const DEVICE_ID_LENGTH: usize = 10; -pub const TOKEN_LENGTH: usize = 256; -pub const SESSION_ID_LENGTH: usize = 256; +pub const TOKEN_LENGTH: usize = 32; +pub const SESSION_ID_LENGTH: usize = 32; pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15; From 583aea187b24e3a8d2a582a2cf6aae34bf04a8ce Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Fri, 25 Nov 2022 23:13:58 +0200 Subject: [PATCH 1307/1727] Update Cargo.lock --- Cargo.lock | 114 ++++++++++++++++++++++++++--------------------------- 1 file changed, 57 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75b1222..db8ee9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,9 +21,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.19" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -291,15 +291,15 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "cc" -version = "1.0.76" +version = "1.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f" +checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" dependencies = [ "jobserver", ] @@ -332,9 +332,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.22" +version = "4.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91b9970d7505127a162fdaa9b96428d28a479ba78c9ec7550a63a5d9863db682" +checksum = "0acbd8d28a0a60d7108d7ae850af6ba34cf2d1257fc646980e5f97ce14275966" dependencies = [ "bitflags", "clap_derive", @@ -372,7 +372,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.5.0-alpha.next" +version = "0.4.0-next" dependencies = [ "async-trait", "axum", @@ -424,9 +424,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" +checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" [[package]] name = "constant_time_eq" @@ -520,9 +520,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.11" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg", "cfg-if", @@ -533,9 +533,9 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if", "crossbeam-utils", @@ -543,9 +543,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if", ] @@ -600,9 +600,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.3", "crypto-common", @@ -707,12 +707,12 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", - "miniz_oxide 0.5.4", + "miniz_oxide", ] [[package]] @@ -1002,7 +1002,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -1082,9 +1082,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "59df7c4e19c950e6e0e868dcc0a300b09a9b88e9ec55bd879ca819087a77355d" dependencies = [ "http", "hyper", @@ -1116,9 +1116,9 @@ dependencies = [ [[package]] name = "image" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd8e4fb07cf672b1642304e731ef8a6a4c7891d67bb4fd4f5ce58cd6ed86803c" +checksum = "69b7ea949b537b0fd0af141fff8c77690f2ce96f4f41f042ccb6c69c6c965945" dependencies = [ "bytemuck", "byteorder", @@ -1132,9 +1132,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown", @@ -1155,14 +1155,14 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "ipconfig" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" +checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ "socket2", "widestring", "winapi", - "winreg", + "winreg 0.10.1", ] [[package]] @@ -1197,9 +1197,9 @@ dependencies = [ [[package]] name = "jpeg-decoder" -version = "0.2.6" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9478aa10f73e7528198d75109c8be5cd7d15fb530238040148d5f9a22d4c5b3b" +checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e" [[package]] name = "js-sys" @@ -1401,9 +1401,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] @@ -1420,15 +1420,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.6.2" @@ -1621,9 +1612,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.3.1" +version = "6.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3baf96e39c5359d2eb0dd6ccb42c62b91d9678aa68160d261b9e0ccbf9e9dea9" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" [[package]] name = "overload" @@ -1787,7 +1778,7 @@ dependencies = [ "bitflags", "crc32fast", "flate2", - "miniz_oxide 0.6.2", + "miniz_oxide", ] [[package]] @@ -2020,7 +2011,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", + "winreg 0.7.0", ] [[package]] @@ -2369,9 +2360,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" dependencies = [ "itoa", "ryu", @@ -2411,7 +2402,7 @@ checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -2422,7 +2413,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -2701,9 +2692,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.21.2" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" dependencies = [ "autocfg", "bytes", @@ -3035,9 +3026,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ "getrandom 0.2.8", ] @@ -3317,6 +3308,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "winreg" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +dependencies = [ + "winapi", +] + [[package]] name = "yansi" version = "0.5.1" From bcd522e75f29730109320850cb908204cb1857bb Mon Sep 17 00:00:00 2001 From: Orhideous Date: Sun, 27 Nov 2022 20:15:47 +0000 Subject: [PATCH 1308/1727] Added cross-compilation instructions to DEPLOY.md --- DEPLOY.md | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index c484823..89631f5 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -49,7 +49,25 @@ $ sudo apt install libclang-dev build-essential $ cargo build --release ``` -If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md). +If you want to cross compile Conduit to another architecture, read the guide below. + +
                +Cross compilation + +As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first. + +In order to use RockDB as storage backend append `-latomic` to linker flags. + +For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation +target. + +```bash +git clone https://gitlab.com/famedly/conduit.git +cd conduit +export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc' +cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf +``` +
                ## Adding a Conduit user From 66ad114e1909ddde5a17005df9c332605a9f9d60 Mon Sep 17 00:00:00 2001 From: Vladan Popovic Date: Fri, 25 Nov 2022 21:20:45 +0100 Subject: [PATCH 1309/1727] feat: add systemd feature flag --- Cargo.lock | 7 +++++++ Cargo.toml | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index db8ee9d..6ae1836 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -404,6 +404,7 @@ dependencies = [ "ruma", "rusqlite", "rust-argon2", + "sd-notify", "serde", "serde_json", "serde_yaml", @@ -2315,6 +2316,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sd-notify" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32" + [[package]] name = "security-framework" version = "2.7.0" diff --git a/Cargo.toml b/Cargo.toml index ae51945..801d363 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,8 +93,10 @@ tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_suppo lazy_static = "1.4.0" async-trait = "0.1.57" +sd-notify = { version = "0.4.1", optional = true } + [features] -default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc", "systemd"] #backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] @@ -103,6 +105,7 @@ backend_rocksdb = ["rocksdb"] jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = ["axum"] +systemd = ["sd-notify"] [[bin]] name = "conduit" From 06d3efc4d0c98fbb0f5973929cceede77c0f7e44 Mon Sep 17 00:00:00 2001 From: Vladan Popovic Date: Fri, 25 Nov 2022 21:23:21 +0100 Subject: [PATCH 1310/1727] feat(systemd): call sd-notify after init and before exit --- src/main.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index d2183a3..e754b84 100644 --- a/src/main.rs +++ b/src/main.rs @@ -186,10 +186,20 @@ async fn run_server() -> io::Result<()> { match &config.tls { Some(tls) => { let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?; - bind_rustls(addr, conf).handle(handle).serve(app).await?; + let server = bind_rustls(addr, conf).handle(handle).serve(app); + + #[cfg(feature = "systemd")] + let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); + + server.await? } None => { - bind(addr).handle(handle).serve(app).await?; + let server = bind(addr).handle(handle).serve(app); + + #[cfg(feature = "systemd")] + let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); + + server.await? } } @@ -197,6 +207,9 @@ async fn run_server() -> io::Result<()> { info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); services().globals.rotate.fire(); + #[cfg(feature = "systemd")] + let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]); + Ok(()) } From b9fd6127e22a72d6563446572577a2e110cca95a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 27 Nov 2022 23:25:42 +0100 Subject: [PATCH 1311/1727] fix: rejoining restricted rooms over federation --- src/api/client_server/membership.rs | 368 ++++++++++++++++++++----- src/api/server_server.rs | 48 ++-- src/service/pdu.rs | 14 +- src/service/rooms/event_handler/mod.rs | 16 +- 4 files changed, 352 insertions(+), 94 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 2267cbf..f6e94e6 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -12,12 +12,15 @@ use ruma::{ }, canonical_json::to_canonical_value, events::{ - room::member::{MembershipState, RoomMemberEventContent}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, RoomEventType, StateEventType, }, serde::Base64, - CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, UserId, + state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -480,33 +483,10 @@ async fn join_room_by_id_helper( .state_cache .server_in_room(services().globals.server_name(), room_id)? { - let mut make_join_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in joining.", - )); + let (make_join_response, remote_server) = + make_join_request(sender_user, room_id, servers).await?; - for remote_server in servers { - let make_join_response = services() - .sending - .send_federation_request( - remote_server, - federation::membership::prepare_join_event::v1::Request { - room_id, - user_id: sender_user, - ver: &services().globals.supported_room_versions(), - }, - ) - .await; - - make_join_response_and_server = make_join_response.map(|r| (r, remote_server)); - - if make_join_response_and_server.is_ok() { - break; - } - } - - let (make_join_response, remote_server) = make_join_response_and_server?; - - let room_version = match make_join_response.room_version { + let room_version_id = match make_join_response.room_version { Some(room_version) if services() .globals @@ -568,14 +548,14 @@ async fn join_room_by_id_helper( services().globals.server_name().as_str(), services().globals.keypair(), &mut join_event_stub, - &room_version, + &room_version_id, ) .expect("event is valid, we just created it"); // Generate event id let event_id = format!( "${}", - ruma::signatures::reference_hash(&join_event_stub, &room_version) + ruma::signatures::reference_hash(&join_event_stub, &room_version_id) .expect("ruma can calculate reference hashes") ); let event_id = <&EventId>::try_from(event_id.as_str()) @@ -588,12 +568,12 @@ async fn join_room_by_id_helper( ); // It has enough fields to be called a proper event now - let join_event = join_event_stub; + let mut join_event = join_event_stub; let send_join_response = services() .sending .send_federation_request( - remote_server, + &remote_server, federation::membership::create_join_event::v2::Request { room_id, event_id, @@ -602,9 +582,53 @@ async fn join_room_by_id_helper( ) .await?; + if let Some(signed_raw) = &send_join_response.room_state.event { + let (signed_event_id, signed_value) = + match gen_event_id_canonical_json(&signed_raw, &room_version_id) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + } + }; + + if &signed_event_id != event_id { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Server sent event with wrong event id", + )); + } + + if let Ok(signature) = signed_value["signatures"] + .as_object() + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Server sent invalid signatures type", + )) + .and_then(|e| { + e.get(remote_server.as_str()).ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Server did not send its signature", + )) + }) + { + join_event + .get_mut("signatures") + .expect("we created a valid pdu") + .as_object_mut() + .expect("we created a valid pdu") + .insert(remote_server.to_string(), signature.clone()); + } else { + warn!("Server {} sent invalid sendjoin event", remote_server); + } + } + services().rooms.short.get_or_create_shortroomid(room_id)?; - let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone()) + let parsed_join_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; let mut state = HashMap::new(); @@ -613,14 +637,14 @@ async fn join_room_by_id_helper( services() .rooms .event_handler - .fetch_join_signing_keys(&send_join_response, &room_version, &pub_key_map) + .fetch_join_signing_keys(&send_join_response, &room_version_id, &pub_key_map) .await?; for result in send_join_response .room_state .state .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, @@ -645,31 +669,11 @@ async fn join_room_by_id_helper( } } - let incoming_shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &parsed_pdu.kind.to_string().into(), - parsed_pdu - .state_key - .as_ref() - .expect("Pdu is a membership state event"), - )?; - - state.insert(incoming_shortstatekey, parsed_pdu.event_id.clone()); - - let create_shortstatekey = services() - .rooms - .short - .get_shortstatekey(&StateEventType::RoomCreate, "")? - .expect("Room exists"); - - if state.get(&create_shortstatekey).is_none() { - return Err(Error::BadServerResponse("State contained no create event.")); - } - for result in send_join_response .room_state .auth_chain .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, @@ -682,6 +686,34 @@ async fn join_room_by_id_helper( .add_pdu_outlier(&event_id, &value)?; } + if !state_res::event_auth::auth_check( + &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), + &parsed_join_pdu, + None::, // TODO: third party invite + |k, s| { + services() + .rooms + .timeline + .get_pdu( + state.get( + &services() + .rooms + .short + .get_or_create_shortstatekey(&k.to_string().into(), s) + .ok()?, + )?, + ) + .ok()? + }, + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth check failed", + )); + } + let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state( room_id, state @@ -705,12 +737,12 @@ async fn join_room_by_id_helper( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehash_after_join = services().rooms.state.append_to_state(&parsed_pdu)?; + let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?; services().rooms.timeline.append_pdu( - &parsed_pdu, + &parsed_join_pdu, join_event, - vec![(*parsed_pdu.event_id).to_owned()], + vec![(*parsed_join_pdu.event_id).to_owned()], &state_lock, )?; @@ -732,7 +764,8 @@ async fn join_room_by_id_helper( join_authorized_via_users_server: None, }; - services().rooms.timeline.build_and_append_pdu( + // Try normal join first + let error = match services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -743,14 +776,216 @@ async fn join_room_by_id_helper( sender_user, room_id, &state_lock, + ) { + Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())), + Err(e) => e, + }; + + // TODO: Conduit does not implement restricted join rules yet, we always ask over + // federation + let join_rules_event = services().rooms.state_accessor.room_state_get( + &room_id, + &StateEventType::RoomJoinRules, + "", )?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if matches!( + join_rules_event_content, + Some(RoomJoinRulesEventContent { + join_rule: JoinRule::Restricted { .. } + }) | Some(RoomJoinRulesEventContent { + join_rule: JoinRule::KnockRestricted { .. } + }) + ) { + let (make_join_response, remote_server) = + make_join_request(sender_user, room_id, servers).await?; + + let room_version_id = match make_join_response.room_version { + Some(room_version_id) + if services() + .globals + .supported_room_versions() + .contains(&room_version_id) => + { + room_version_id + } + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + let mut join_event_stub: CanonicalJsonObject = + serde_json::from_str(make_join_response.event.get()).map_err(|_| { + Error::BadServerResponse("Invalid make_join event json received from server.") + })?; + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + // TODO: Is origin needed? + join_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + join_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason: None, + join_authorized_via_users_server, + }) + .expect("event is valid, we just created it"), + ); + + // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + join_event_stub.remove("event_id"); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut join_event_stub, + &room_version_id, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = format!( + "${}", + ruma::signatures::reference_hash(&join_event_stub, &room_version_id) + .expect("ruma can calculate reference hashes") + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + join_event_stub.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + + // It has enough fields to be called a proper event now + let join_event = join_event_stub; + + let send_join_response = services() + .sending + .send_federation_request( + &remote_server, + federation::membership::create_join_event::v2::Request { + room_id, + event_id, + pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + }, + ) + .await?; + + if let Some(signed_raw) = send_join_response.room_state.event { + let (signed_event_id, signed_value) = + match gen_event_id_canonical_json(&signed_raw, &room_version_id) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + } + }; + + if &signed_event_id != event_id { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Server sent event with wrong event id", + )); + } + + drop(state_lock); + let pub_key_map = RwLock::new(BTreeMap::new()); + services() + .rooms + .event_handler + .handle_incoming_pdu( + &remote_server, + &signed_event_id, + room_id, + signed_value, + true, + &pub_key_map, + ) + .await?; + } else { + return Err(error); + } + } else { + return Err(error); + } } - drop(state_lock); - Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) } +async fn make_join_request( + sender_user: &UserId, + room_id: &RoomId, + servers: &[OwnedServerName], +) -> Result<( + federation::membership::prepare_join_event::v1::Response, + OwnedServerName, +)> { + let mut make_join_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in joining.", + )); + + for remote_server in servers { + if remote_server == services().globals.server_name() { + continue; + } + let make_join_response = services() + .sending + .send_federation_request( + remote_server, + federation::membership::prepare_join_event::v1::Request { + room_id, + user_id: sender_user, + ver: &services().globals.supported_room_versions(), + }, + ) + .await; + + make_join_response_and_server = make_join_response.map(|r| (r, remote_server.clone())); + + if make_join_response_and_server.is_ok() { + break; + } + } + + make_join_response_and_server +} + fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, @@ -870,6 +1105,8 @@ pub(crate) async fn invite_helper<'a>( (pdu, pdu_json, invite_room_state) }; + let room_version_id = &services().rooms.state.get_room_version(room_id)?; + let response = services() .sending .send_federation_request( @@ -877,7 +1114,7 @@ pub(crate) async fn invite_helper<'a>( create_invite::v2::Request { room_id, event_id: &pdu.event_id, - room_version: &services().rooms.state.get_room_version(room_id)?, + room_version: &room_version_id, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, }, @@ -887,7 +1124,8 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match gen_event_id_canonical_json(&response.event) { + let (event_id, value) = match gen_event_id_canonical_json(&response.event, room_version_id) + { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json diff --git a/src/api/server_server.rs b/src/api/server_server.rs index f66e96c..44babe5 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -42,8 +42,8 @@ use ruma::{ }, serde::{Base64, JsonObject, Raw}, to_device::DeviceIdOrAllDevices, - CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, + CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, + OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -664,16 +664,11 @@ pub async fn send_transaction_message_route( // let mut auth_cache = EventMap::new(); for pdu in &body.pdus { - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match gen_event_id_canonical_json(pdu) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - continue; - } - }; + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + warn!("Error parsing incoming event {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; - // 0. Check the server is in the room let room_id: OwnedRoomId = match value .get("room_id") .and_then(|id| RoomId::parse(id.as_str()?).ok()) @@ -681,14 +676,26 @@ pub async fn send_transaction_message_route( Some(id) => id, None => { // Event is invalid - resolved_map.insert( - event_id, - Err(Error::bad_database("Event needs a valid RoomId.")), - ); continue; } }; + let room_version_id = match services().rooms.state.get_room_version(&room_id) { + Ok(v) => v, + Err(_) => { + continue; + } + }; + + let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + continue; + } + }; + // We do not add the event_id field to the pdu here because of signature and hashes checks + services() .rooms .event_handler @@ -1407,7 +1414,8 @@ async fn create_join_event( // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match gen_event_id_canonical_json(pdu) { + let room_version_id = services().rooms.state.get_room_version(room_id)?; + let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -1610,8 +1618,12 @@ pub async fn create_invite_route( invite_state.push(pdu.to_stripped_state_event()); - // If the room already exists, the remote server will notify us about the join via /send - if !services().rooms.metadata.exists(&pdu.room_id)? { + // If we are active in the room, the remote server will notify us about the join via /send + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), &body.room_id)? + { services().rooms.state_cache.update_membership( &body.room_id, &invited_user, diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 593a687..554f3be 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,4 +1,4 @@ -use crate::{services, Error}; +use crate::Error; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyStateEvent, @@ -7,7 +7,7 @@ use ruma::{ }, serde::Raw, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, - OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, + OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::{ @@ -334,23 +334,17 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, + room_version_id: &RoomVersionId, ) -> crate::Result<(OwnedEventId, CanonicalJsonObject)> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; - let room_id = value - .get("room_id") - .and_then(|id| RoomId::parse(id.as_str()?).ok()) - .ok_or_else(|| Error::bad_database("PDU in db has invalid room_id."))?; - - let room_version_id = services().rooms.state.get_room_version(&room_id); - let event_id = format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &room_version_id?) + ruma::signatures::reference_hash(&value, room_version_id) .expect("ruma can calculate reference hashes") ) .try_into() diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 3b41e86..b941aa1 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -76,6 +76,7 @@ impl Service { is_timeline_event: bool, pub_key_map: &'a RwLock>>, ) -> Result>> { + // 0. Check the server is in the room if !services().rooms.metadata.exists(room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, @@ -101,6 +102,13 @@ impl Service { .room_state_get(room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + error!("Invalid create event: {}", e); + Error::BadDatabase("Invalid create event in db") + })?; + let room_version_id = &create_event_content.room_version; + let first_pdu_in_room = services() .rooms .timeline @@ -127,6 +135,7 @@ impl Service { origin, &create_event, room_id, + room_version_id, pub_key_map, incoming_pdu.prev_events.clone(), ) @@ -340,6 +349,7 @@ impl Service { .collect::>(), create_event, room_id, + room_version_id, pub_key_map, ) .await; @@ -644,6 +654,7 @@ impl Service { .collect::>(), create_event, room_id, + room_version_id, pub_key_map, ) .await; @@ -1024,6 +1035,7 @@ impl Service { events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, + room_version_id: &'a RoomVersionId, pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { @@ -1106,7 +1118,7 @@ impl Service { Ok(res) => { info!("Got {} over federation", next_id); let (calculated_event_id, value) = - match pdu::gen_event_id_canonical_json(&res.pdu) { + match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) { Ok(t) => t, Err(_) => { back_off((*next_id).to_owned()); @@ -1179,6 +1191,7 @@ impl Service { origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, + room_version_id: &RoomVersionId, pub_key_map: &RwLock>>, initial_set: Vec>, ) -> Result<( @@ -1204,6 +1217,7 @@ impl Service { &[prev_event_id.clone()], create_event, room_id, + room_version_id, pub_key_map, ) .await From 7fd5b22e3b03cd6f10bb40b1dda05a3815ee25b2 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 16 Dec 2022 10:12:11 +0100 Subject: [PATCH 1312/1727] The procMacro option has long been on by default MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … and it's good to let people have their own local configs that won't be tracked by git. --- .gitignore | 1 - .vscode/settings.json | 3 --- 2 files changed, 4 deletions(-) delete mode 100644 .vscode/settings.json diff --git a/.gitignore b/.gitignore index 19f05ce..203fe05 100644 --- a/.gitignore +++ b/.gitignore @@ -31,7 +31,6 @@ modules.xml ### vscode ### .vscode/* -!.vscode/settings.json !.vscode/tasks.json !.vscode/launch.json !.vscode/extensions.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 95294d4..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "rust-analyzer.procMacro.enable": true, -} \ No newline at end of file From d39ce1401d773a0b68515b8ae1a434f37abfc650 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 14 Dec 2022 13:09:10 +0100 Subject: [PATCH 1313/1727] WIP: Upgrade Ruma --- Cargo.lock | 22 ++++---- Cargo.toml | 2 +- src/api/client_server/account.rs | 14 ++--- src/api/client_server/alias.rs | 26 +++++---- src/api/client_server/backup.rs | 24 ++++---- src/api/client_server/capabilities.rs | 2 +- src/api/client_server/config.rs | 8 +-- src/api/client_server/context.rs | 2 +- src/api/client_server/device.rs | 8 +-- src/api/client_server/directory.rs | 25 ++++----- src/api/client_server/filter.rs | 4 +- src/api/client_server/keys.rs | 8 +-- src/api/client_server/media.rs | 20 +++---- src/api/client_server/membership.rs | 75 +++++++++++++------------ src/api/client_server/message.rs | 4 +- src/api/client_server/presence.rs | 4 +- src/api/client_server/profile.rs | 20 +++---- src/api/client_server/push.rs | 26 ++++----- src/api/client_server/read_marker.rs | 4 +- src/api/client_server/redact.rs | 2 +- src/api/client_server/report.rs | 2 +- src/api/client_server/room.rs | 8 +-- src/api/client_server/search.rs | 2 +- src/api/client_server/session.rs | 12 ++-- src/api/client_server/state.rs | 10 ++-- src/api/client_server/sync.rs | 18 +++--- src/api/client_server/tag.rs | 8 +-- src/api/client_server/thirdparty.rs | 2 +- src/api/client_server/to_device.rs | 2 +- src/api/client_server/typing.rs | 2 +- src/api/client_server/unversioned.rs | 2 +- src/api/client_server/user_directory.rs | 2 +- src/api/client_server/voip.rs | 2 +- src/api/server_server.rs | 44 +++++++-------- src/database/key_value/users.rs | 10 +--- src/service/rooms/event_handler/mod.rs | 10 ++-- src/service/sending/mod.rs | 8 +-- src/service/uiaa/mod.rs | 12 ++-- src/service/users/data.rs | 10 +--- src/service/users/mod.rs | 10 +--- src/utils/error.rs | 5 +- 41 files changed, 231 insertions(+), 250 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6ae1836..a659dec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2053,7 +2053,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.7.4" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "assign", "js_int", @@ -2071,7 +2071,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "js_int", "ruma-common", @@ -2082,7 +2082,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.15.3" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "assign", "bytes", @@ -2099,7 +2099,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.10.5" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "base64", "bytes", @@ -2127,7 +2127,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "js_int", "ruma-common", @@ -2138,7 +2138,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "js_int", "thiserror", @@ -2147,7 +2147,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "js_int", "ruma-common", @@ -2157,7 +2157,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.10.5" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "once_cell", "proc-macro-crate", @@ -2172,7 +2172,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "js_int", "ruma-common", @@ -2183,7 +2183,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "base64", "ed25519-dalek", @@ -2199,7 +2199,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" +source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 801d363..87102c0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "2bd5c131f49b2239750c39ed63b623cd5a01c965", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "af28dc8339773e5cad460289fa3c4e22d9a058cd", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 309a361..50a6a18 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -30,7 +30,7 @@ const RANDOM_USER_ID_LENGTH: usize = 10; /// /// Note: This will not reserve the username, so the username might become invalid when trying to register pub async fn get_register_available_route( - body: Ruma, + body: Ruma, ) -> Result { // Validate user id let user_id = UserId::parse_with_server_name( @@ -73,9 +73,7 @@ pub async fn get_register_available_route( /// - If type is not guest and no username is given: Always fails after UIAA check /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token -pub async fn register_route( - body: Ruma, -) -> Result { +pub async fn register_route(body: Ruma) -> Result { if !services().globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -266,7 +264,7 @@ pub async fn register_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn change_password_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -354,7 +352,7 @@ pub async fn whoami_route(body: Ruma) -> Result, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -426,7 +424,7 @@ pub async fn third_party_route( /// /// - 403 signals that The homeserver does not allow the third party identifier as a contact option. pub async fn request_3pid_management_token_via_email_route( - _body: Ruma, + _body: Ruma, ) -> Result { Err(Error::BadRequest( ErrorKind::ThreepidDenied, @@ -440,7 +438,7 @@ pub async fn request_3pid_management_token_via_email_route( /// /// - 403 signals that The homeserver does not allow the third party identifier as a contact option. pub async fn request_3pid_management_token_via_msisdn_route( - _body: Ruma, + _body: Ruma, ) -> Result { Err(Error::BadRequest( ErrorKind::ThreepidDenied, diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index b28606c..ab51b50 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -9,14 +9,14 @@ use ruma::{ }, federation, }, - RoomAliasId, + OwnedRoomAliasId, }; /// # `PUT /_matrix/client/r0/directory/room/{roomAlias}` /// /// Creates a new room alias on this server. pub async fn create_alias_route( - body: Ruma, + body: Ruma, ) -> Result { if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( @@ -49,7 +49,7 @@ pub async fn create_alias_route( /// - TODO: additional access control checks /// - TODO: Update canonical alias event pub async fn delete_alias_route( - body: Ruma, + body: Ruma, ) -> Result { if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( @@ -71,18 +71,22 @@ pub async fn delete_alias_route( /// /// - TODO: Suggest more servers to join via pub async fn get_alias_route( - body: Ruma, + body: Ruma, ) -> Result { - get_alias_helper(&body.room_alias).await + get_alias_helper(body.body.room_alias).await } -pub(crate) async fn get_alias_helper(room_alias: &RoomAliasId) -> Result { +pub(crate) async fn get_alias_helper( + room_alias: OwnedRoomAliasId, +) -> Result { if room_alias.server_name() != services().globals.server_name() { let response = services() .sending .send_federation_request( room_alias.server_name(), - federation::query::get_room_information::v1::Request { room_alias }, + federation::query::get_room_information::v1::Request { + room_alias: room_alias.to_owned(), + }, ) .await?; @@ -93,7 +97,7 @@ pub(crate) async fn get_alias_helper(room_alias: &RoomAliasId) -> Result room_id = Some(r), None => { for (_id, registration) in services().appservice.all()? { @@ -115,7 +119,9 @@ pub(crate) async fn get_alias_helper(room_alias: &RoomAliasId) -> Result Result, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); services() @@ -66,7 +66,7 @@ pub async fn get_latest_backup_info_route( /// /// Get information about an existing backup. pub async fn get_backup_info_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = services() @@ -96,7 +96,7 @@ pub async fn get_backup_info_route( /// /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_version_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -115,7 +115,7 @@ pub async fn delete_backup_version_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -162,7 +162,7 @@ pub async fn add_backup_keys_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -207,7 +207,7 @@ pub async fn add_backup_keys_for_room_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_session_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -246,7 +246,7 @@ pub async fn add_backup_keys_for_session_route( /// /// Retrieves all keys from the backup. pub async fn get_backup_keys_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -259,7 +259,7 @@ pub async fn get_backup_keys_route( /// /// Retrieves all keys from the backup for a given room. pub async fn get_backup_keys_for_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -274,7 +274,7 @@ pub async fn get_backup_keys_for_room_route( /// /// Retrieves a key from the backup. pub async fn get_backup_keys_for_session_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -293,7 +293,7 @@ pub async fn get_backup_keys_for_session_route( /// /// Delete the keys from the backup. pub async fn delete_backup_keys_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -316,7 +316,7 @@ pub async fn delete_backup_keys_route( /// /// Delete the keys from the backup for a given room. pub async fn delete_backup_keys_for_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -339,7 +339,7 @@ pub async fn delete_backup_keys_for_room_route( /// /// Delete a key from the backup. pub async fn delete_backup_keys_for_session_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs index 31d42d2..233e3c9 100644 --- a/src/api/client_server/capabilities.rs +++ b/src/api/client_server/capabilities.rs @@ -8,7 +8,7 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( - _body: Ruma, + _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); for room_version in &services().globals.unstable_room_versions { diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs index dbd2b2c..12f9aea 100644 --- a/src/api/client_server/config.rs +++ b/src/api/client_server/config.rs @@ -17,7 +17,7 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// /// Sets some account data for the sender user. pub async fn set_global_account_data_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -43,7 +43,7 @@ pub async fn set_global_account_data_route( /// /// Sets some room account data for the sender user. pub async fn set_room_account_data_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -69,7 +69,7 @@ pub async fn set_room_account_data_route( /// /// Gets some account data for the sender user. pub async fn get_global_account_data_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -89,7 +89,7 @@ pub async fn get_global_account_data_route( /// /// Gets some room account data for the sender user. pub async fn get_room_account_data_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index 2e0f257..1e62f91 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -13,7 +13,7 @@ use tracing::error; /// - Only works if the user is joined (TODO: always allow, but only show events if the user was /// joined, depending on history_visibility) pub async fn get_context_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs index d4c4178..aba061b 100644 --- a/src/api/client_server/device.rs +++ b/src/api/client_server/device.rs @@ -28,7 +28,7 @@ pub async fn get_devices_route( /// /// Get metadata on a single device of the sender user. pub async fn get_device_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -44,7 +44,7 @@ pub async fn get_device_route( /// /// Updates the metadata on a given device of the sender user. pub async fn update_device_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -72,7 +72,7 @@ pub async fn update_device_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn delete_device_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -126,7 +126,7 @@ pub async fn delete_device_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn delete_devices_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index f07a225..645710f 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -11,10 +11,7 @@ use ruma::{ }, federation, }, - directory::{ - Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomJoinRule, PublicRoomsChunk, - RoomNetwork, - }, + directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, events::{ room::{ avatar::RoomAvatarEventContent, @@ -38,7 +35,7 @@ use tracing::{error, info, warn}; /// /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_filtered_route( - body: Ruma, + body: Ruma, ) -> Result { get_public_rooms_filtered_helper( body.server.as_deref(), @@ -56,14 +53,14 @@ pub async fn get_public_rooms_filtered_route( /// /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_route( - body: Ruma, + body: Ruma, ) -> Result { let response = get_public_rooms_filtered_helper( body.server.as_deref(), body.limit, body.since.as_deref(), - &IncomingFilter::default(), - &IncomingRoomNetwork::Matrix, + &Filter::default(), + &RoomNetwork::Matrix, ) .await?; @@ -81,7 +78,7 @@ pub async fn get_public_rooms_route( /// /// - TODO: Access control checks pub async fn set_room_visibility_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -111,7 +108,7 @@ pub async fn set_room_visibility_route( /// /// Gets the visibility of a given room in the room directory. pub async fn get_room_visibility_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().rooms.metadata.exists(&body.room_id)? { // Return 404 if the room doesn't exist @@ -131,8 +128,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( server: Option<&ServerName>, limit: Option, since: Option<&str>, - filter: &IncomingFilter, - _network: &IncomingRoomNetwork, + filter: &Filter, + _network: &RoomNetwork, ) -> Result { if let Some(other_server) = server.filter(|server| *server != services().globals.server_name().as_str()) @@ -143,9 +140,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, - since, + since: since.map(ToOwned::to_owned), filter: Filter { - generic_search_term: filter.generic_search_term.as_deref(), + generic_search_term: filter.generic_search_term.clone(), room_types: filter.room_types.clone(), }, room_network: RoomNetwork::Matrix, diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs index a0d5a19..e9a359d 100644 --- a/src/api/client_server/filter.rs +++ b/src/api/client_server/filter.rs @@ -10,7 +10,7 @@ use ruma::api::client::{ /// /// - A user can only access their own filters pub async fn get_filter_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let filter = match services().users.get_filter(sender_user, &body.filter_id)? { @@ -25,7 +25,7 @@ pub async fn get_filter_route( /// /// Creates a new filter to be used by other endpoints. pub async fn create_filter_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(create_filter::v3::Response::new( diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index b649166..ba89ece 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -65,9 +65,7 @@ pub async fn upload_keys_route( /// - Always fetches users from other servers over federation /// - Gets master keys, self-signing keys, user signing keys and device keys. /// - The master and self-signing keys contain signatures that the user is allowed to see -pub async fn get_keys_route( - body: Ruma, -) -> Result { +pub async fn get_keys_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let response = @@ -93,7 +91,7 @@ pub async fn claim_keys_route( /// /// - Requires UIAA to verify password pub async fn upload_signing_keys_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -214,7 +212,7 @@ pub async fn upload_signatures_route( /// /// - TODO: left users pub async fn get_key_changes_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index fa6def0..3410cc0 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -27,7 +27,7 @@ pub async fn get_media_config_route( /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory pub async fn create_content_route( - body: Ruma, + body: Ruma, ) -> Result { let mxc = format!( "mxc://{}/{}", @@ -57,7 +57,7 @@ pub async fn create_content_route( pub async fn get_remote_content( mxc: &str, server_name: &ruma::ServerName, - media_id: &str, + media_id: String, ) -> Result { let content_response = services() .sending @@ -65,7 +65,7 @@ pub async fn get_remote_content( server_name, get_content::v3::Request { allow_remote: false, - server_name, + server_name: server_name.to_owned(), media_id, }, ) @@ -90,7 +90,7 @@ pub async fn get_remote_content( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_route( - body: Ruma, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -108,7 +108,7 @@ pub async fn get_content_route( }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = - get_remote_content(&mxc, &body.server_name, &body.media_id).await?; + get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; Ok(remote_content_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -121,7 +121,7 @@ pub async fn get_content_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_as_filename_route( - body: Ruma, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -139,7 +139,7 @@ pub async fn get_content_as_filename_route( }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = - get_remote_content(&mxc, &body.server_name, &body.media_id).await?; + get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; Ok(get_content_as_filename::v3::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), @@ -158,7 +158,7 @@ pub async fn get_content_as_filename_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_thumbnail_route( - body: Ruma, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -192,8 +192,8 @@ pub async fn get_content_thumbnail_route( height: body.height, width: body.width, method: body.method.clone(), - server_name: &body.server_name, - media_id: &body.media_id, + server_name: body.server_name.clone(), + media_id: body.media_id.clone(), }, ) .await?; diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index f6e94e6..8674a60 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -5,7 +5,7 @@ use ruma::{ membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, - unban_user, IncomingThirdPartySigned, + unban_user, ThirdPartySigned, }, }, federation::{self, membership::create_invite}, @@ -44,7 +44,7 @@ use super::get_alias_helper; /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -81,7 +81,7 @@ pub async fn join_room_by_id_route( /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_or_alias_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; @@ -107,7 +107,7 @@ pub async fn join_room_by_id_or_alias_route( (servers, room_id) } Err(room_alias) => { - let response = get_alias_helper(&room_alias).await?; + let response = get_alias_helper(room_alias).await?; (response.servers.into_iter().collect(), response.room_id) } @@ -132,7 +132,7 @@ pub async fn join_room_by_id_or_alias_route( /// /// - This should always work if the user is currently joined. pub async fn leave_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -145,11 +145,11 @@ pub async fn leave_room_route( /// /// Tries to send an invite event into the room. pub async fn invite_user_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let invite_user::v3::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { + if let invite_user::v3::InvitationRecipient::UserId { user_id } = &body.recipient { invite_helper(sender_user, user_id, &body.room_id, false).await?; Ok(invite_user::v3::Response {}) } else { @@ -161,7 +161,7 @@ pub async fn invite_user_route( /// /// Tries to send a kick event into the room. pub async fn kick_user_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -218,9 +218,7 @@ pub async fn kick_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` /// /// Tries to send a ban event into the room. -pub async fn ban_user_route( - body: Ruma, -) -> Result { +pub async fn ban_user_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: reason @@ -287,7 +285,7 @@ pub async fn ban_user_route( /// /// Tries to send an unban event into the room. pub async fn unban_user_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -349,7 +347,7 @@ pub async fn unban_user_route( /// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to /// be called from every device pub async fn forget_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -385,7 +383,7 @@ pub async fn joined_rooms_route( /// /// - Only works if the user is currently joined pub async fn get_member_events_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -421,7 +419,7 @@ pub async fn get_member_events_route( /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined pub async fn joined_members_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -462,7 +460,7 @@ async fn join_room_by_id_helper( sender_user: Option<&UserId>, room_id: &RoomId, servers: &[OwnedServerName], - _third_party_signed: Option<&IncomingThirdPartySigned>, + _third_party_signed: Option<&ThirdPartySigned>, ) -> Result { let sender_user = sender_user.expect("user is authenticated"); @@ -575,9 +573,9 @@ async fn join_room_by_id_helper( .send_federation_request( &remote_server, federation::membership::create_join_event::v2::Request { - room_id, - event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + room_id: room_id.to_owned(), + event_id: event_id.to_owned(), + pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) .await?; @@ -896,9 +894,9 @@ async fn join_room_by_id_helper( .send_federation_request( &remote_server, federation::membership::create_join_event::v2::Request { - room_id, - event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + room_id: room_id.to_owned(), + event_id: event_id.to_owned(), + pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) .await?; @@ -969,9 +967,9 @@ async fn make_join_request( .send_federation_request( remote_server, federation::membership::prepare_join_event::v1::Request { - room_id, - user_id: sender_user, - ver: &services().globals.supported_room_versions(), + room_id: room_id.to_owned(), + user_id: sender_user.to_owned(), + ver: services().globals.supported_room_versions(), }, ) .await; @@ -1105,18 +1103,18 @@ pub(crate) async fn invite_helper<'a>( (pdu, pdu_json, invite_room_state) }; - let room_version_id = &services().rooms.state.get_room_version(room_id)?; + let room_version_id = services().rooms.state.get_room_version(room_id)?; let response = services() .sending .send_federation_request( user_id.server_name(), create_invite::v2::Request { - room_id, - event_id: &pdu.event_id, - room_version: &room_version_id, - event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), - invite_room_state: &invite_room_state, + room_id: room_id.to_owned(), + event_id: (&*pdu.event_id).to_owned(), + room_version: room_version_id.clone(), + event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), + invite_room_state, }, ) .await?; @@ -1124,7 +1122,7 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match gen_event_id_canonical_json(&response.event, room_version_id) + let (event_id, value) = match gen_event_id_canonical_json(&response.event, &room_version_id) { Ok(t) => t, Err(_) => { @@ -1136,7 +1134,7 @@ pub(crate) async fn invite_helper<'a>( } }; - if pdu.event_id != event_id { + if *pdu.event_id != *event_id { warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); } @@ -1363,7 +1361,10 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { .sending .send_federation_request( &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, + federation::membership::prepare_leave_event::v1::Request { + room_id: room_id.to_owned(), + user_id: user_id.to_owned(), + }, ) .await; @@ -1440,9 +1441,9 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { .send_federation_request( &remote_server, federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + room_id: room_id.to_owned(), + event_id, + pdu: PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), }, ) .await?; diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index b04c262..6ad0751 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -19,7 +19,7 @@ use std::{ /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed pub async fn send_message_event_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -105,7 +105,7 @@ pub async fn send_message_event_route( /// - Only works if the user is joined (TODO: always allow, but only show events where the user was /// joined, depending on history_visibility) pub async fn get_message_events_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index dfac3db..ef88d1a 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -6,7 +6,7 @@ use std::time::Duration; /// /// Sets the presence state of the sender user. pub async fn set_presence_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -43,7 +43,7 @@ pub async fn set_presence_route( /// /// - Only works if you share a room with the user pub async fn get_presence_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 5ace177..6400e89 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -20,7 +20,7 @@ use std::sync::Arc; /// /// - Also makes sure other users receive the update using presence EDUs pub async fn set_displayname_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -121,7 +121,7 @@ pub async fn set_displayname_route( /// /// - If user is on another server: Fetches displayname over federation pub async fn get_displayname_route( - body: Ruma, + body: Ruma, ) -> Result { if body.user_id.server_name() != services().globals.server_name() { let response = services() @@ -129,8 +129,8 @@ pub async fn get_displayname_route( .send_federation_request( body.user_id.server_name(), federation::query::get_profile_information::v1::Request { - user_id: &body.user_id, - field: Some(&ProfileField::DisplayName), + user_id: body.user_id.clone(), + field: Some(ProfileField::DisplayName), }, ) .await?; @@ -151,7 +151,7 @@ pub async fn get_displayname_route( /// /// - Also makes sure other users receive the update using presence EDUs pub async fn set_avatar_url_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -256,7 +256,7 @@ pub async fn set_avatar_url_route( /// /// - If user is on another server: Fetches avatar_url and blurhash over federation pub async fn get_avatar_url_route( - body: Ruma, + body: Ruma, ) -> Result { if body.user_id.server_name() != services().globals.server_name() { let response = services() @@ -264,8 +264,8 @@ pub async fn get_avatar_url_route( .send_federation_request( body.user_id.server_name(), federation::query::get_profile_information::v1::Request { - user_id: &body.user_id, - field: Some(&ProfileField::AvatarUrl), + user_id: body.user_id.clone(), + field: Some(ProfileField::AvatarUrl), }, ) .await?; @@ -288,7 +288,7 @@ pub async fn get_avatar_url_route( /// /// - If user is on another server: Fetches profile over federation pub async fn get_profile_route( - body: Ruma, + body: Ruma, ) -> Result { if body.user_id.server_name() != services().globals.server_name() { let response = services() @@ -296,7 +296,7 @@ pub async fn get_profile_route( .send_federation_request( body.user_id.server_name(), federation::query::get_profile_information::v1::Request { - user_id: &body.user_id, + user_id: body.user_id.clone(), field: None, }, ) diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index dc936a6..affd8e8 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -5,7 +5,7 @@ use ruma::{ push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions, - set_pushrule_enabled, RuleKind, + set_pushrule_enabled, RuleKind, RuleScope, }, }, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, @@ -45,7 +45,7 @@ pub async fn get_pushrules_all_route( /// /// Retrieves a single specified push rule for this user. pub async fn get_pushrule_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -104,12 +104,12 @@ pub async fn get_pushrule_route( /// /// Creates a single specified push rule for this user. pub async fn set_pushrule_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; - if body.scope != "global" { + if body.scope != RuleScope::Global { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -208,7 +208,7 @@ pub async fn set_pushrule_route( /// /// Gets the actions of a single specified push rule for this user. pub async fn get_pushrule_actions_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -269,11 +269,11 @@ pub async fn get_pushrule_actions_route( /// /// Sets the actions of a single specified push rule for this user. pub async fn set_pushrule_actions_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != "global" { + if body.scope != RuleScope::Global { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -344,11 +344,11 @@ pub async fn set_pushrule_actions_route( /// /// Gets the enabled status of a single specified push rule for this user. pub async fn get_pushrule_enabled_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != "global" { + if body.scope != RuleScope::Global { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -407,11 +407,11 @@ pub async fn get_pushrule_enabled_route( /// /// Sets the enabled status of a single specified push rule for this user. pub async fn set_pushrule_enabled_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != "global" { + if body.scope != RuleScope::Global { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -487,11 +487,11 @@ pub async fn set_pushrule_enabled_route( /// /// Deletes a single specified push rule for this user. pub async fn delete_pushrule_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != "global" { + if body.scope != RuleScope::Global { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index d529c6a..b12468a 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -16,7 +16,7 @@ use std::collections::BTreeMap; /// - Updates fully-read account data event to `fully_read` /// - If `read_receipt` is set: Update private marker and public read receipt EDU pub async fn set_read_marker_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -89,7 +89,7 @@ pub async fn set_read_marker_route( /// /// Sets private read marker and public read receipt EDU. pub async fn create_receipt_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index ab586c0..a29a561 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -14,7 +14,7 @@ use serde_json::value::to_raw_value; /// /// - TODO: Handle txn id pub async fn redact_event_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs index e45820e..ab5027c 100644 --- a/src/api/client_server/report.rs +++ b/src/api/client_server/report.rs @@ -10,7 +10,7 @@ use ruma::{ /// Reports an inappropriate event to homeserver admins /// pub async fn report_event_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 097f0e1..c77cfa9 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -46,7 +46,7 @@ use tracing::{info, warn}; /// - Send events implied by `name` and `topic` /// - Send invite events pub async fn create_room_route( - body: Ruma, + body: Ruma, ) -> Result { use create_room::v3::RoomPreset; @@ -421,7 +421,7 @@ pub async fn create_room_route( /// /// - You have to currently be joined to the room (TODO: Respect history visibility) pub async fn get_room_event_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -452,7 +452,7 @@ pub async fn get_room_event_route( /// /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable pub async fn get_room_aliases_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -488,7 +488,7 @@ pub async fn get_room_aliases_route( /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking pub async fn upgrade_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index 5b634a4..51255d5 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -15,7 +15,7 @@ use std::collections::BTreeMap; /// /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 7c8c128..64c0072 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -4,7 +4,7 @@ use ruma::{ api::client::{ error::ErrorKind, session::{get_login_types, login, logout, logout_all}, - uiaa::IncomingUserIdentifier, + uiaa::UserIdentifier, }, UserId, }; @@ -22,7 +22,7 @@ struct Claims { /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. pub async fn get_login_types_route( - _body: Ruma, + _body: Ruma, ) -> Result { Ok(get_login_types::v3::Response::new(vec![ get_login_types::v3::LoginType::Password(Default::default()), @@ -40,15 +40,15 @@ pub async fn get_login_types_route( /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -pub async fn login_route(body: Ruma) -> Result { +pub async fn login_route(body: Ruma) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { - login::v3::IncomingLoginInfo::Password(login::v3::IncomingPassword { + login::v3::LoginInfo::Password(login::v3::Password { identifier, password, }) => { - let username = if let IncomingUserIdentifier::UserIdOrLocalpart(user_id) = identifier { + let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier { user_id.to_lowercase() } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); @@ -84,7 +84,7 @@ pub async fn login_route(body: Ruma) -> Result { + login::v3::LoginInfo::Token(login::v3::Token { token }) => { if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() { let token = jsonwebtoken::decode::( token, diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 36466b8..d9c1464 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -25,7 +25,7 @@ use ruma::{ /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_key_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -50,7 +50,7 @@ pub async fn send_state_event_for_key_route( /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_empty_key_route( - body: Ruma, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -81,7 +81,7 @@ pub async fn send_state_event_for_empty_key_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -133,7 +133,7 @@ pub async fn get_state_events_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_key_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -188,7 +188,7 @@ pub async fn get_state_events_for_key_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_empty_key_route( - body: Ruma, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 94e4f5b..43ca238 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,7 +1,7 @@ use crate::{services, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::{ - filter::{IncomingFilterDefinition, LazyLoadOptions}, + filter::{FilterDefinition, LazyLoadOptions}, sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, uiaa::UiaaResponse, }, @@ -55,7 +55,7 @@ use tracing::error; /// - Sync is handled in an async task, multiple requests from the same device with the same /// `since` will be cached pub async fn sync_events_route( - body: Ruma, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); @@ -124,7 +124,7 @@ pub async fn sync_events_route( async fn sync_helper_wrapper( sender_user: OwnedUserId, sender_device: OwnedDeviceId, - body: sync_events::v3::IncomingRequest, + body: sync_events::v3::Request, tx: Sender>>, ) { let since = body.since.clone(); @@ -157,12 +157,12 @@ async fn sync_helper_wrapper( async fn sync_helper( sender_user: OwnedUserId, sender_device: OwnedDeviceId, - body: sync_events::v3::IncomingRequest, + body: sync_events::v3::Request, // bool = caching allowed ) -> Result<(sync_events::v3::Response, bool), Error> { use sync_events::v3::{ - Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom, JoinedRoom, - LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, + Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, + Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, }; // TODO: match body.set_presence { @@ -176,9 +176,9 @@ async fn sync_helper( // Load filter let filter = match body.filter { - None => IncomingFilterDefinition::default(), - Some(IncomingFilter::FilterDefinition(filter)) => filter, - Some(IncomingFilter::FilterId(filter_id)) => services() + None => FilterDefinition::default(), + Some(Filter::FilterDefinition(filter)) => filter, + Some(Filter::FilterId(filter_id)) => services() .users .get_filter(&sender_user, &filter_id)? .unwrap_or_default(), diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index c87e233..16f1600 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -14,7 +14,7 @@ use std::collections::BTreeMap; /// /// - Inserts the tag into the tag event of the room account data. pub async fn update_tag_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -58,7 +58,7 @@ pub async fn update_tag_route( /// /// - Removes the tag from the tag event of the room account data. pub async fn delete_tag_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -98,9 +98,7 @@ pub async fn delete_tag_route( /// Returns tags on the room. /// /// - Gets the tag event of the room account data. -pub async fn get_tags_route( - body: Ruma, -) -> Result { +pub async fn get_tags_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event = services().account_data.get( diff --git a/src/api/client_server/thirdparty.rs b/src/api/client_server/thirdparty.rs index 5665ad6..c2c1adf 100644 --- a/src/api/client_server/thirdparty.rs +++ b/src/api/client_server/thirdparty.rs @@ -7,7 +7,7 @@ use std::collections::BTreeMap; /// /// TODO: Fetches all metadata about protocols supported by the homeserver. pub async fn get_protocols_route( - _body: Ruma, + _body: Ruma, ) -> Result { // TODO Ok(get_protocols::v3::Response { diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index 139b845..26db4e4 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -14,7 +14,7 @@ use ruma::{ /// /// Send a to-device event to a set of client devices. pub async fn send_event_to_device_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index ecc926f..43217e1 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -5,7 +5,7 @@ use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; /// /// Sets the typing state of the sender user. pub async fn create_typing_event_route( - body: Ruma, + body: Ruma, ) -> Result { use create_typing_event::v3::Typing; diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 8a5c3d2..526598b 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -15,7 +15,7 @@ use crate::{Result, Ruma}; /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases pub async fn get_supported_versions_route( - _body: Ruma, + _body: Ruma, ) -> Result { let resp = get_supported_versions::Response { versions: vec![ diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index 518daa5..c30bac5 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -14,7 +14,7 @@ use ruma::{ /// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public) /// and don't share a room with the sender pub async fn search_users_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let limit = u64::from(body.limit) as usize; diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index 6b1ee40..4990c17 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -10,7 +10,7 @@ type HmacSha1 = Hmac; /// /// TODO: Returns information about the recommended turn server. pub async fn turn_server_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 12b255b..2b854a6 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -31,7 +31,7 @@ use ruma::{ EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, SendAccessToken, }, - directory::{IncomingFilter, IncomingRoomNetwork}, + directory::{Filter, RoomNetwork}, events::{ receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, room::{ @@ -294,13 +294,7 @@ where } else { Err(Error::FederationError( destination.to_owned(), - RumaError::try_from_http_response(http_response).map_err(|e| { - warn!( - "Invalid {} response from {} on: {} {}", - status, &destination, url, e - ); - Error::BadServerResponse("Server returned bad error response.") - })?, + RumaError::from_http_response(http_response), )) } } @@ -586,7 +580,7 @@ pub async fn get_server_keys_deprecated_route() -> impl IntoResponse { /// /// Lists the public rooms on this server. pub async fn get_public_rooms_filtered_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -613,7 +607,7 @@ pub async fn get_public_rooms_filtered_route( /// /// Lists the public rooms on this server. pub async fn get_public_rooms_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -623,8 +617,8 @@ pub async fn get_public_rooms_route( None, body.limit, body.since.as_deref(), - &IncomingFilter::default(), - &IncomingRoomNetwork::Matrix, + &Filter::default(), + &RoomNetwork::Matrix, ) .await?; @@ -640,7 +634,7 @@ pub async fn get_public_rooms_route( /// /// Push EDUs and PDUs to this server. pub async fn send_transaction_message_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -916,7 +910,7 @@ pub async fn send_transaction_message_route( /// /// - Only works if a user of this server is currently invited or joined the room pub async fn get_event_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -963,7 +957,7 @@ pub async fn get_event_route( /// /// Retrieves events that the sender is missing. pub async fn get_missing_events_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1042,7 +1036,7 @@ pub async fn get_missing_events_route( /// /// - This does not include the event itself pub async fn get_event_authorization_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1101,7 +1095,7 @@ pub async fn get_event_authorization_route( /// /// Retrieves the current state of the room. pub async fn get_room_state_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1181,7 +1175,7 @@ pub async fn get_room_state_route( /// /// Retrieves the current state of the room. pub async fn get_room_state_ids_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1242,7 +1236,7 @@ pub async fn get_room_state_ids_route( /// /// Creates a join template. pub async fn create_join_event_template_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1494,7 +1488,7 @@ async fn create_join_event( /// /// Submits a signed join event. pub async fn create_join_event_v1_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_servername = body .sender_servername @@ -1510,7 +1504,7 @@ pub async fn create_join_event_v1_route( /// /// Submits a signed join event. pub async fn create_join_event_v2_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_servername = body .sender_servername @@ -1526,7 +1520,7 @@ pub async fn create_join_event_v2_route( /// /// Invites a remote user to a room. pub async fn create_invite_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1643,7 +1637,7 @@ pub async fn create_invite_route( /// /// Gets information on all devices of the user. pub async fn get_devices_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1690,7 +1684,7 @@ pub async fn get_devices_route( /// /// Resolve a room alias to a room id. pub async fn get_room_information_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1715,7 +1709,7 @@ pub async fn get_room_information_route( /// /// Gets information on a profile. pub async fn get_profile_information_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index cd5a535..1cabab0 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,7 +1,7 @@ use std::{collections::BTreeMap, mem::size_of}; use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, + api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, @@ -899,7 +899,7 @@ impl service::users::Data for KeyValueDatabase { } /// Creates a new sync filter. Returns the filter id. - fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result { + fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { let filter_id = utils::random_string(4); let mut key = user_id.as_bytes().to_vec(); @@ -914,11 +914,7 @@ impl service::users::Data for KeyValueDatabase { Ok(filter_id) } - fn get_filter( - &self, - user_id: &UserId, - filter_id: &str, - ) -> Result> { + fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(filter_id.as_bytes()); diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index b352647..85d21bf 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -638,8 +638,8 @@ impl Service { .send_federation_request( origin, get_room_state_ids::v1::Request { - room_id, - event_id: &incoming_pdu.event_id, + room_id: room_id.to_owned(), + event_id: (&*incoming_pdu.event_id).to_owned(), }, ) .await @@ -1112,7 +1112,9 @@ impl Service { .sending .send_federation_request( origin, - get_event::v1::Request { event_id: &next_id }, + get_event::v1::Request { + event_id: next_id.into(), + }, ) .await { @@ -1689,7 +1691,7 @@ impl Service { .send_federation_request( server, get_remote_server_keys::v2::Request::new( - origin, + origin.to_owned(), MilliSecondsSinceUnixEpoch::from_system_time( SystemTime::now() .checked_add(Duration::from_secs(3600)) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index afa12fc..1861feb 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -496,7 +496,7 @@ impl Service { ) })?, appservice::event::push_events::v1::Request { - events: &pdu_jsons, + events: pdu_jsons, txn_id: (&*base64::encode_config( calculate_hash( &events @@ -638,9 +638,9 @@ impl Service { let response = server_server::send_request( server, send_transaction_message::v1::Request { - origin: services().globals.server_name(), - pdus: &pdu_jsons, - edus: &edu_jsons, + origin: services().globals.server_name().to_owned(), + pdus: pdu_jsons, + edus: edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), transaction_id: (&*base64::encode_config( calculate_hash( diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 672290c..147ce4d 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -5,7 +5,7 @@ pub use data::Data; use ruma::{ api::client::{ error::ErrorKind, - uiaa::{AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier, UiaaInfo}, + uiaa::{AuthData, AuthType, Password, UiaaInfo, UserIdentifier}, }, CanonicalJsonValue, DeviceId, UserId, }; @@ -44,7 +44,7 @@ impl Service { &self, user_id: &UserId, device_id: &DeviceId, - auth: &IncomingAuthData, + auth: &AuthData, uiaainfo: &UiaaInfo, ) -> Result<(bool, UiaaInfo)> { let mut uiaainfo = auth @@ -58,13 +58,13 @@ impl Service { match auth { // Find out what the user completed - IncomingAuthData::Password(IncomingPassword { + AuthData::Password(Password { identifier, password, .. }) => { let username = match identifier { - IncomingUserIdentifier::UserIdOrLocalpart(username) => username, + UserIdentifier::UserIdOrLocalpart(username) => username, _ => { return Err(Error::BadRequest( ErrorKind::Unrecognized, @@ -85,7 +85,7 @@ impl Service { argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { + uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { kind: ErrorKind::Forbidden, message: "Invalid username or password.".to_owned(), }); @@ -96,7 +96,7 @@ impl Service { // Password was correct! Let's add it to `completed` uiaainfo.completed.push(AuthType::Password); } - IncomingAuthData::Dummy(_) => { + AuthData::Dummy(_) => { uiaainfo.completed.push(AuthType::Dummy); } k => error!("type not supported: {:?}", k), diff --git a/src/service/users/data.rs b/src/service/users/data.rs index bc1db33..8553210 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,6 +1,6 @@ use crate::Result; use ruma::{ - api::client::{device::Device, filter::IncomingFilterDefinition}, + api::client::{device::Device, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, @@ -191,11 +191,7 @@ pub trait Data: Send + Sync { ) -> Box> + 'a>; /// Creates a new sync filter. Returns the filter id. - fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result; + fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result; - fn get_filter( - &self, - user_id: &UserId, - filter_id: &str, - ) -> Result>; + fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result>; } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 9dcfa8b..6be5c89 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -3,7 +3,7 @@ use std::{collections::BTreeMap, mem}; pub use data::Data; use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, + api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, @@ -326,11 +326,7 @@ impl Service { } /// Creates a new sync filter. Returns the filter id. - pub fn create_filter( - &self, - user_id: &UserId, - filter: &IncomingFilterDefinition, - ) -> Result { + pub fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { self.db.create_filter(user_id, filter) } @@ -338,7 +334,7 @@ impl Service { &self, user_id: &UserId, filter_id: &str, - ) -> Result> { + ) -> Result> { self.db.get_filter(user_id, filter_id) } } diff --git a/src/utils/error.rs b/src/utils/error.rs index 9c8617f..0ef13ce 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -3,7 +3,7 @@ use std::convert::Infallible; use http::StatusCode; use ruma::{ api::client::{ - error::{Error as RumaError, ErrorKind}, + error::{Error as RumaError, ErrorBody, ErrorKind}, uiaa::{UiaaInfo, UiaaResponse}, }, OwnedServerName, @@ -131,8 +131,7 @@ impl Error { warn!("{}: {}", status_code, message); RumaResponse(UiaaResponse::MatrixError(RumaError { - kind, - message, + body: ErrorBody::Standard { kind, message }, status_code, })) } From f1d25746511dfaa16d4316b76d71275bd0a8a35a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 17 Dec 2022 09:21:19 +0100 Subject: [PATCH 1314/1727] finish upgrade ruma --- src/api/client_server/push.rs | 43 +++++++++++++------------- src/main.rs | 8 +++-- src/service/pusher/mod.rs | 18 +++++------ src/service/rooms/event_handler/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 2 +- src/utils/error.rs | 5 ++- 6 files changed, 40 insertions(+), 38 deletions(-) diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index affd8e8..b044138 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -9,7 +9,7 @@ use ruma::{ }, }, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, - push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, + push::{ConditionalPushRuleInit, NewPushRule, PatternedPushRuleInit, SimplePushRuleInit}, }; /// # `GET /_matrix/client/r0/pushrules` @@ -132,66 +132,65 @@ pub async fn set_pushrule_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))?; let global = &mut account_data.content.global; - match body.kind { - RuleKind::Override => { + match body.rule { + NewPushRule::Override(rule) => { global.override_.replace( ConditionalPushRuleInit { - actions: body.actions, + actions: rule.actions, default: false, enabled: true, - rule_id: body.rule_id, - conditions: body.conditions, + rule_id: rule.rule_id, + conditions: rule.conditions, } .into(), ); } - RuleKind::Underride => { + NewPushRule::Underride(rule) => { global.underride.replace( ConditionalPushRuleInit { - actions: body.actions, + actions: rule.actions, default: false, enabled: true, - rule_id: body.rule_id, - conditions: body.conditions, + rule_id: rule.rule_id, + conditions: rule.conditions, } .into(), ); } - RuleKind::Sender => { + NewPushRule::Sender(rule) => { global.sender.replace( SimplePushRuleInit { - actions: body.actions, + actions: rule.actions, default: false, enabled: true, - rule_id: body.rule_id, + rule_id: rule.rule_id, } .into(), ); } - RuleKind::Room => { + NewPushRule::Room(rule) => { global.room.replace( SimplePushRuleInit { - actions: body.actions, + actions: rule.actions, default: false, enabled: true, - rule_id: body.rule_id, + rule_id: rule.rule_id, } .into(), ); } - RuleKind::Content => { + NewPushRule::Content(rule) => { global.content.replace( PatternedPushRuleInit { - actions: body.actions, + actions: rule.actions, default: false, enabled: true, - rule_id: body.rule_id, - pattern: body.pattern.unwrap_or_default(), + rule_id: rule.rule_id, + pattern: rule.pattern, } .into(), ); } - _ => {} } services().account_data.update( @@ -212,7 +211,7 @@ pub async fn get_pushrule_actions_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != "global" { + if body.scope != RuleScope::Global { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", diff --git a/src/main.rs b/src/main.rs index e754b84..013c4de 100644 --- a/src/main.rs +++ b/src/main.rs @@ -29,7 +29,7 @@ use http::{ use opentelemetry::trace::{FutureExt, Tracer}; use ruma::api::{ client::{ - error::{Error as RumaError, ErrorKind}, + error::{Error as RumaError, ErrorBody, ErrorKind}, uiaa::UiaaResponse, }, IncomingRequest, @@ -223,8 +223,10 @@ async fn unrecognized_method( if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED { warn!("Method not allowed: {method} {uri}"); return Ok(RumaResponse(UiaaResponse::MatrixError(RumaError { - kind: ErrorKind::Unrecognized, - message: "M_UNRECOGNIZED: Unrecognized request".to_owned(), + body: ErrorBody::Standard { + kind: ErrorKind::Unrecognized, + message: "M_UNRECOGNIZED: Unrecognized request".to_owned(), + }, status_code: StatusCode::METHOD_NOT_ALLOWED, })) .into_response()); diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index d3d157c..ba096a2 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -239,12 +239,12 @@ impl Service { device.tweaks = tweaks.clone(); } - let d = &[device]; + let d = vec![device]; let mut notifi = Notification::new(d); notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); + notifi.event_id = Some((*event.event_id).to_owned()); + notifi.room_id = Some((*event.room_id).to_owned()); // TODO: missed calls notifi.counts = NotificationCounts::new(unread, uint!(0)); @@ -260,18 +260,16 @@ impl Service { self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) .await?; } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); + notifi.sender = Some(event.sender.clone()); + notifi.event_type = Some(event.kind.clone()); + notifi.content = serde_json::value::to_raw_value(&event.content).ok(); if event.kind == RoomEventType::RoomMember { notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - let user_name = services().users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); + notifi.sender_display_name = services().users.displayname(&event.sender)?; let room_name = if let Some(room_name_pdu) = services() .rooms @@ -287,7 +285,7 @@ impl Service { None }; - notifi.room_name = room_name.as_deref(); + notifi.room_name = room_name; self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) .await?; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 85d21bf..3c49349 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1113,7 +1113,7 @@ impl Service { .send_federation_request( origin, get_event::v1::Request { - event_id: next_id.into(), + event_id: (*next_id).to_owned(), }, ) .await diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 6c9bed3..32afdd4 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -164,7 +164,7 @@ impl Service { .content .ignored_users .iter() - .any(|user| user == sender) + .any(|(user, _details)| user == sender) }); if is_ignored { diff --git a/src/utils/error.rs b/src/utils/error.rs index 0ef13ce..3e0d8ca 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -102,7 +102,10 @@ impl Error { if let Self::FederationError(origin, error) = self { let mut error = error.clone(); - error.message = format!("Answer from {}: {}", origin, error.message); + error.body = ErrorBody::Standard { + kind: Unknown, + message: format!("Answer from {}: {}", origin, error), + }; return RumaResponse(UiaaResponse::MatrixError(error)); } From 6d5e54a66b96ab504eeb6cca03499fb03761dcb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Dec 2022 06:37:03 +0100 Subject: [PATCH 1315/1727] fix: jaeger support --- Cargo.lock | 31 +++++++++++++++++++ Cargo.toml | 1 + src/api/client_server/sync.rs | 2 +- .../key_value/rooms/state_accessor.rs | 9 ++---- src/main.rs | 24 +++++++++++--- src/service/rooms/auth_chain/mod.rs | 1 - src/service/rooms/event_handler/mod.rs | 10 +++--- src/service/rooms/state_accessor/data.rs | 7 ++--- src/service/rooms/state_accessor/mod.rs | 9 ++---- 9 files changed, 65 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a659dec..bb5943a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -419,6 +419,7 @@ dependencies = [ "tower-http", "tracing", "tracing-flame", + "tracing-opentelemetry", "tracing-subscriber", "trust-dns-resolver", ] @@ -574,6 +575,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dashmap" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +dependencies = [ + "cfg-if", + "hashbrown", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "data-encoding" version = "2.3.2" @@ -1573,6 +1587,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" dependencies = [ + "fnv", "futures-channel", "futures-util", "indexmap", @@ -1590,6 +1605,8 @@ checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" dependencies = [ "async-trait", "crossbeam-channel", + "dashmap", + "fnv", "futures-channel", "futures-executor", "futures-util", @@ -2891,6 +2908,20 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-opentelemetry" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de" +dependencies = [ + "once_cell", + "opentelemetry", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + [[package]] name = "tracing-subscriber" version = "0.3.16" diff --git a/Cargo.toml b/Cargo.toml index 87102c0..737799d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,6 +69,7 @@ tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } tracing-flame = "0.2.0" opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } +tracing-opentelemetry = "0.18.0" lru-cache = "0.1.2" rusqlite = { version = "0.28.0", optional = true, features = ["bundled"] } parking_lot = { version = "0.12.1", optional = true } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 43ca238..568a23c 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -873,7 +873,7 @@ async fn sync_helper( let since_state_ids = match since_shortstatehash { Some(s) => services().rooms.state_accessor.state_full_ids(s).await?, - None => BTreeMap::new(), + None => HashMap::new(), }; let left_event_id = match services().rooms.state_accessor.room_state_get_id( diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 70e59ac..0f0c0dc 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; use async_trait::async_trait; @@ -9,7 +6,7 @@ use ruma::{events::StateEventType, EventId, RoomId}; #[async_trait] impl service::rooms::state_accessor::Data for KeyValueDatabase { - async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = services() .rooms .state_compressor @@ -17,7 +14,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { .pop() .expect("there is always one layer") .1; - let mut result = BTreeMap::new(); + let mut result = HashMap::new(); let mut i = 0; for compressed in full_state.into_iter() { let parsed = services() diff --git a/src/main.rs b/src/main.rs index 013c4de..fa33c09 100644 --- a/src/main.rs +++ b/src/main.rs @@ -26,7 +26,6 @@ use http::{ header::{self, HeaderName}, Method, StatusCode, Uri, }; -use opentelemetry::trace::{FutureExt, Tracer}; use ruma::api::{ client::{ error::{Error as RumaError, ErrorBody, ErrorKind}, @@ -93,14 +92,29 @@ async fn main() { if config.allow_jaeger { opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); let tracer = opentelemetry_jaeger::new_agent_pipeline() + .with_auto_split_batch(true) + .with_service_name("conduit") .install_batch(opentelemetry::runtime::Tokio) .unwrap(); + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - let span = tracer.start("conduit"); - start.with_current_context().await; - drop(span); + let filter_layer = match EnvFilter::try_new(&config.log) { + Ok(s) => s, + Err(e) => { + eprintln!( + "It looks like your log config is invalid. The following error occurred: {}", + e + ); + EnvFilter::try_new("warn").unwrap() + } + }; - println!("exporting"); + let subscriber = tracing_subscriber::Registry::default() + .with(filter_layer) + .with(telemetry); + tracing::subscriber::set_global_default(subscriber).unwrap(); + start.await; + println!("exporting remaining spans"); opentelemetry::global::shutdown_tracer_provider(); } else { let registry = tracing_subscriber::Registry::default(); diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index d3b6e40..3963604 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -15,7 +15,6 @@ pub struct Service { } impl Service { - #[tracing::instrument(skip(self))] pub fn get_cached_eventid_authchain<'a>( &'a self, key: &[u64], diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 3c49349..0bba61c 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -7,7 +7,7 @@ use ruma::{ RoomVersionId, }; use std::{ - collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, + collections::{hash_map, BTreeMap, HashMap, HashSet}, pin::Pin, sync::{Arc, RwLock, RwLockWriteGuard}, time::{Duration, Instant, SystemTime}, @@ -553,7 +553,7 @@ impl Service { let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: BTreeMap<_, _> = services() + let mut leaf_state: HashMap<_, _> = services() .rooms .state_accessor .state_full_ids(sstatehash) @@ -660,7 +660,7 @@ impl Service { ) .await; - let mut state: BTreeMap<_, Arc> = BTreeMap::new(); + let mut state: HashMap<_, Arc> = HashMap::new(); for (pdu, _) in state_vec { let state_key = pdu.state_key.clone().ok_or_else(|| { Error::bad_database("Found non-state pdu in state events.") @@ -672,10 +672,10 @@ impl Service { )?; match state.entry(shortstatekey) { - btree_map::Entry::Vacant(v) => { + hash_map::Entry::Vacant(v) => { v.insert(Arc::from(&*pdu.event_id)); } - btree_map::Entry::Occupied(_) => return Err( + hash_map::Entry::Occupied(_) => return Err( Error::bad_database("State event's type and state_key combination exists multiple times."), ), } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 340b19c..f3ae3c2 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use async_trait::async_trait; use ruma::{events::StateEventType, EventId, RoomId}; @@ -12,7 +9,7 @@ use crate::{PduEvent, Result}; pub trait Data: Send + Sync { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; + async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; async fn state_full( &self, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 1a9c4a9..87d9936 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,8 +1,5 @@ mod data; -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; pub use data::Data; use ruma::{events::StateEventType, EventId, RoomId}; @@ -16,7 +13,8 @@ pub struct Service { impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + #[tracing::instrument(skip(self))] + pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { self.db.state_full_ids(shortstatehash).await } @@ -39,7 +37,6 @@ impl Service { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] pub fn state_get( &self, shortstatehash: u64, From 683eefbd0bef1a5a84bd9fbd31fe11b3106f6bc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Dec 2022 06:52:18 +0100 Subject: [PATCH 1316/1727] Update README --- DEPLOY.md | 2 +- README.md | 28 +++++++++++++++++++++------- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 89631f5..1d1fc13 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -2,7 +2,7 @@ > ## Getting help > -> If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us +> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us > in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Installing Conduit diff --git a/README.md b/README.md index ab47176..f73f6aa 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,12 @@ # Conduit - ### A Matrix homeserver written in Rust +#### What is Matrix? +[Matrix](https://matrix.org) is an open network for secure and decentralized +communication. Users from every Matrix homeserver can chat with users from all +other Matrix servers. You can even use bridges (also called Matrix appservices) +to communicate with users outside of Matrix, like a community on Discord. + #### What is the goal? An efficient Matrix homeserver that's easy to set up and just works. You can install @@ -13,9 +18,10 @@ friends or company. Yes! You can test our Conduit instance by opening a Matrix client ( or Element Android for example) and registering on the `conduit.rs` homeserver. -It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which -was used in the Samsung Galaxy S5. It joined many big rooms including Matrix -HQ. +*Registration is currently disabled because of scammers. For an account please + message us (see contact section below).* + +Server hosting for conduit.rs is donated by the Matrix.org Foundation. #### What is the current status? @@ -25,8 +31,8 @@ from time to time. There are still a few important features missing: -- E2EE verification over federation -- Outgoing read receipts, typing, presence over federation +- E2EE emoji comparison over federation (E2EE chat works) +- Outgoing read receipts, typing, presence over federation (incoming works) Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). @@ -50,13 +56,21 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md] #### Thanks to -Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project. +Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project. Thanks to the contributors to Conduit and all libraries we use, for example: - Ruma: A clean library for the Matrix Spec in Rust - axum: A modular web framework +#### Contact + +If you run into any question, feel free to +- Ask us in `#conduit:fachschaften.org` on Matrix +- Write an E-Mail to `conduit@koesters.xyz` +- Send an direct message to `timo@fachschaften.org` on Matrix +- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) + #### Donate Liberapay: \ From 2a0515f5289ed712ecd4a68dfc57e7529fe1b57e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Dec 2022 07:47:18 +0100 Subject: [PATCH 1317/1727] Replace println/dbg calls with corresponding macros from tracing crate --- src/database/abstraction/sqlite.rs | 17 +----- src/database/mod.rs | 20 +++---- src/main.rs | 78 +++++++++++++------------- src/service/rooms/auth_chain/mod.rs | 36 ++++++------ src/service/rooms/event_handler/mod.rs | 5 +- 5 files changed, 68 insertions(+), 88 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 4961fd7..af3e192 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -135,7 +135,6 @@ type TupleOfBytes = (Vec, Vec); impl SqliteTable { fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { - //dbg!(&self.name); Ok(guard .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? .query_row([key], |row| row.get(0)) @@ -143,7 +142,6 @@ impl SqliteTable { } fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { - //dbg!(&self.name); guard.execute( format!( "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", @@ -176,10 +174,7 @@ impl SqliteTable { statement .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(move |r| { - //dbg!(&name); - r.unwrap() - }), + .map(move |r| r.unwrap()), ); Box::new(PreparedStatementIterator { @@ -276,10 +271,7 @@ impl KvTree for SqliteTable { statement .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(move |r| { - //dbg!(&name); - r.unwrap() - }), + .map(move |r| r.unwrap()), ); Box::new(PreparedStatementIterator { iterator, @@ -301,10 +293,7 @@ impl KvTree for SqliteTable { statement .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(move |r| { - //dbg!(&name); - r.unwrap() - }), + .map(move |r| r.unwrap()), ); Box::new(PreparedStatementIterator { diff --git a/src/database/mod.rs b/src/database/mod.rs index 46ffad0..ebaa746 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -258,7 +258,7 @@ impl KeyValueDatabase { }; if config.max_request_size < 1024 { - eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); + error!(?config.max_request_size, "Max request size is less than 1KB. Please increase it."); } let db_raw = Box::new(Self { @@ -483,7 +483,7 @@ impl KeyValueDatabase { for user in services().rooms.state_cache.room_members(&room?) { let user = user?; if user.server_name() != services().globals.server_name() { - println!("Migration: Creating user {}", user); + info!(?user, "Migration: creating user"); services().users.create(&user, None)?; } } @@ -545,7 +545,6 @@ impl KeyValueDatabase { current_state: HashSet<_>, last_roomstates: &mut HashMap<_, _>| { counter += 1; - println!("counter: {}", counter); let last_roomsstatehash = last_roomstates.get(current_room); let states_parents = last_roomsstatehash.map_or_else( @@ -742,15 +741,13 @@ impl KeyValueDatabase { new_key.extend_from_slice(word); new_key.push(0xff); new_key.extend_from_slice(pdu_id_count); - println!("old {:?}", key); - println!("new {:?}", new_key); Some((new_key, Vec::new())) }) .peekable(); while iter.peek().is_some() { db.tokenids.insert_batch(&mut iter.by_ref().take(1000))?; - println!("smaller batch done"); + debug!("Inserted smaller batch"); } info!("Deleting starts"); @@ -760,7 +757,6 @@ impl KeyValueDatabase { .iter() .filter_map(|(key, _)| { if key.starts_with(b"!") { - println!("del {:?}", key); Some(key) } else { None @@ -769,7 +765,6 @@ impl KeyValueDatabase { .collect(); for key in batch2 { - println!("del"); db.tokenids.remove(&key)?; } @@ -945,7 +940,6 @@ impl KeyValueDatabase { #[cfg(unix)] use tokio::signal::unix::{signal, SignalKind}; - use tracing::info; use std::time::{Duration, Instant}; @@ -961,23 +955,23 @@ impl KeyValueDatabase { #[cfg(unix)] tokio::select! { _ = i.tick() => { - info!("cleanup: Timer ticked"); + debug!("cleanup: Timer ticked"); } _ = s.recv() => { - info!("cleanup: Received SIGHUP"); + debug!("cleanup: Received SIGHUP"); } }; #[cfg(not(unix))] { i.tick().await; - info!("cleanup: Timer ticked") + debug!("cleanup: Timer ticked") } let start = Instant::now(); if let Err(e) = services().globals.cleanup() { error!("cleanup: Errored: {}", e); } else { - info!("cleanup: Finished in {:?}", start.elapsed()); + debug!("cleanup: Finished in {:?}", start.elapsed()); } } }); diff --git a/src/main.rs b/src/main.rs index fa33c09..49ac22d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -40,7 +40,7 @@ use tower_http::{ trace::TraceLayer, ServiceBuilderExt as _, }; -use tracing::{info, warn}; +use tracing::{error, info, warn}; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate @@ -68,27 +68,16 @@ async fn main() { let config = match raw_config.extract::() { Ok(s) => s, Err(e) => { - eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); + eprintln!( + "It looks like your config is invalid. The following error occurred: {}", + e + ); std::process::exit(1); } }; config.warn_deprecated(); - if let Err(e) = KeyValueDatabase::load_or_create(config).await { - eprintln!( - "The database couldn't be loaded or created. The following error occured: {}", - e - ); - std::process::exit(1); - }; - - let config = &services().globals.config; - - let start = async { - run_server().await.unwrap(); - }; - if config.allow_jaeger { opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); let tracer = opentelemetry_jaeger::new_agent_pipeline() @@ -113,35 +102,44 @@ async fn main() { .with(filter_layer) .with(telemetry); tracing::subscriber::set_global_default(subscriber).unwrap(); - start.await; - println!("exporting remaining spans"); - opentelemetry::global::shutdown_tracer_provider(); + } else if config.tracing_flame { + let registry = tracing_subscriber::Registry::default(); + let (flame_layer, _guard) = + tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap(); + let flame_layer = flame_layer.with_empty_samples(false); + + let filter_layer = EnvFilter::new("trace,h2=off"); + + let subscriber = registry.with(filter_layer).with(flame_layer); + tracing::subscriber::set_global_default(subscriber).unwrap(); } else { let registry = tracing_subscriber::Registry::default(); - if config.tracing_flame { - let (flame_layer, _guard) = - tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap(); - let flame_layer = flame_layer.with_empty_samples(false); + let fmt_layer = tracing_subscriber::fmt::Layer::new(); + let filter_layer = match EnvFilter::try_new(&config.log) { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); + EnvFilter::try_new("warn").unwrap() + } + }; - let filter_layer = EnvFilter::new("trace,h2=off"); + let subscriber = registry.with(filter_layer).with(fmt_layer); + tracing::subscriber::set_global_default(subscriber).unwrap(); + } - let subscriber = registry.with(filter_layer).with(flame_layer); - tracing::subscriber::set_global_default(subscriber).unwrap(); - start.await; - } else { - let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = match EnvFilter::try_new(&config.log) { - Ok(s) => s, - Err(e) => { - eprintln!("It looks like your log config is invalid. The following error occurred: {}", e); - EnvFilter::try_new("warn").unwrap() - } - }; + info!("Loading database"); + if let Err(error) = KeyValueDatabase::load_or_create(config).await { + error!(?error, "The database couldn't be loaded or created"); - let subscriber = registry.with(filter_layer).with(fmt_layer); - tracing::subscriber::set_global_default(subscriber).unwrap(); - start.await; - } + std::process::exit(1); + }; + let config = &services().globals.config; + + info!("Starting server"); + run_server().await.unwrap(); + + if config.allow_jaeger { + opentelemetry::global::shutdown_tracer_provider(); } } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 3963604..92fdd0c 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -6,7 +6,7 @@ use std::{ pub use data::Data; use ruma::{api::client::error::ErrorKind, EventId, RoomId}; -use tracing::log::warn; +use tracing::{debug, error, warn}; use crate::{services, Error, Result}; @@ -88,10 +88,10 @@ impl Service { .rooms .auth_chain .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; - println!( - "cache missed event {} with auth chain len {}", - event_id, - auth_chain.len() + debug!( + event_id = ?event_id, + chain_length = ?auth_chain.len(), + "Cache missed event" ); chunk_cache.extend(auth_chain.iter()); @@ -101,11 +101,11 @@ impl Service { } }; } - println!( - "chunk missed with len {}, event hits2: {}, misses2: {}", - chunk_cache.len(), - hits2, - misses2 + debug!( + chunk_cache_length = ?chunk_cache.len(), + hits = ?hits2, + misses = ?misses2, + "Chunk missed", ); let chunk_cache = Arc::new(chunk_cache); services() @@ -115,11 +115,11 @@ impl Service { full_auth_chain.extend(chunk_cache.iter()); } - println!( - "total: {}, chunk hits: {}, misses: {}", - full_auth_chain.len(), - hits, - misses + debug!( + chain_length = ?full_auth_chain.len(), + hits = ?hits, + misses = ?misses, + "Auth chain stats", ); Ok(full_auth_chain @@ -151,10 +151,10 @@ impl Service { } } Ok(None) => { - warn!("Could not find pdu mentioned in auth events: {}", event_id); + warn!(?event_id, "Could not find pdu mentioned in auth events"); } - Err(e) => { - warn!("Could not load event in auth chain: {} {}", event_id, e); + Err(error) => { + error!(?event_id, ?error, "Could not load event in auth chain"); } } } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 0bba61c..cf1f370 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -839,8 +839,8 @@ impl Service { info!("Preparing for stateres to derive new room state"); let mut extremity_sstatehashes = HashMap::new(); - info!("Loading extremities"); - for id in dbg!(&extremities) { + info!(?extremities, "Loading extremities"); + for id in &extremities { match services().rooms.timeline.get_pdu(id)? { Some(leaf_pdu) => { extremity_sstatehashes.insert( @@ -1273,7 +1273,6 @@ impl Service { // This return value is the key used for sorting events, // events are then sorted by power level, time, // and lexically by event_id. - println!("{}", event_id); Ok(( int!(0), MilliSecondsSinceUnixEpoch( From 2a04c213f98cfd340fcb55b9d30a0751e796d431 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 18 Dec 2022 09:44:46 +0100 Subject: [PATCH 1318/1727] improvement: handle restricted joins locally --- src/api/client_server/membership.rs | 124 +++++++++++++++++++++------- src/service/globals/mod.rs | 2 +- 2 files changed, 97 insertions(+), 29 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 8674a60..d6a1bd8 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -13,8 +13,9 @@ use ruma::{ canonical_json::to_canonical_value, events::{ room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, + join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, + power_levels::RoomPowerLevelsEventContent, }, RoomEventType, StateEventType, }, @@ -751,6 +752,96 @@ async fn join_room_by_id_helper( .state .set_room_state(room_id, statehash_after_join, &state_lock)?; } else { + let join_rules_event = services().rooms.state_accessor.room_state_get( + &room_id, + &StateEventType::RoomJoinRules, + "", + )?; + let power_levels_event = services().rooms.state_accessor.room_state_get( + &room_id, + &StateEventType::RoomPowerLevels, + "", + )?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + let power_levels_event_content: Option = power_levels_event + .as_ref() + .map(|power_levels_event| { + serde_json::from_str(power_levels_event.content.get()).map_err(|e| { + warn!("Invalid power levels event: {}", e); + Error::bad_database("Invalid power levels event in db.") + }) + }) + .transpose()?; + + let restriction_rooms = match join_rules_event_content { + Some(RoomJoinRulesEventContent { + join_rule: JoinRule::Restricted(restricted), + }) + | Some(RoomJoinRulesEventContent { + join_rule: JoinRule::KnockRestricted(restricted), + }) => restricted + .allow + .into_iter() + .filter_map(|a| match a { + AllowRule::RoomMembership(r) => Some(r.room_id), + _ => None, + }) + .collect(), + _ => Vec::new(), + }; + + let authorized_user = restriction_rooms + .iter() + .find_map(|restriction_room_id| { + if !services() + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + .ok()? + { + return None; + } + let authorized_user = power_levels_event_content + .as_ref() + .and_then(|c| { + c.users + .iter() + .filter(|(uid, i)| { + uid.server_name() == services().globals.server_name() + && **i > ruma::int!(0) + && services() + .rooms + .state_cache + .is_joined(uid, restriction_room_id) + .unwrap_or(false) + }) + .max_by_key(|(_, i)| *i) + .map(|(u, _)| u.to_owned()) + }) + .or_else(|| { + // TODO: Check here if user is actually allowed to invite. Currently the auth + // check will just fail in this case. + services() + .rooms + .state_cache + .room_members(restriction_room_id) + .filter_map(|r| r.ok()) + .filter(|uid| uid.server_name() == services().globals.server_name()) + .next() + }); + Some(authorized_user) + }) + .flatten(); + let event = RoomMemberEventContent { membership: MembershipState::Join, displayname: services().users.displayname(sender_user)?, @@ -759,7 +850,7 @@ async fn join_room_by_id_helper( third_party_invite: None, blurhash: services().users.blurhash(sender_user)?, reason: None, - join_authorized_via_users_server: None, + join_authorized_via_users_server: authorized_user, }; // Try normal join first @@ -779,32 +870,9 @@ async fn join_room_by_id_helper( Err(e) => e, }; - // TODO: Conduit does not implement restricted join rules yet, we always ask over - // federation - let join_rules_event = services().rooms.state_accessor.room_state_get( - &room_id, - &StateEventType::RoomJoinRules, - "", - )?; - - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") - }) - }) - .transpose()?; - - if matches!( - join_rules_event_content, - Some(RoomJoinRulesEventContent { - join_rule: JoinRule::Restricted { .. } - }) | Some(RoomJoinRulesEventContent { - join_rule: JoinRule::KnockRestricted { .. } - }) - ) { + if !restriction_rooms.is_empty() { + // We couldn't do the join locally, maybe federation can help to satisfy the restricted + // join requirements let (make_join_response, remote_server) = make_join_request(sender_user, room_id, servers).await?; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index affc051..c0fcb4b 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -168,7 +168,7 @@ impl Service { .supported_room_versions() .contains(&s.config.default_room_version) { - error!("Room version in config isn't supported, falling back to default version"); + error!(config=?s.config.default_room_version, fallback=?crate::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); s.config.default_room_version = crate::config::default_default_room_version(); }; From c86313d4fa5b4d074e5ef40442a8a272d915dd3a Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Wed, 21 Dec 2022 10:42:12 +0100 Subject: [PATCH 1319/1727] chore: code cleanup https://rust-lang.github.io/rust-clippy/master/index.html#op_ref https://rust-lang.github.io/rust-clippy/master/index.html#str_to_string https://rust-lang.github.io/rust-clippy/master/index.html#needless_lifetimes --- src/api/client_server/account.rs | 9 ++-- src/api/client_server/directory.rs | 2 +- src/api/client_server/membership.rs | 15 +++--- src/api/ruma_wrapper/axum.rs | 3 +- src/api/server_server.rs | 15 +++--- src/config/mod.rs | 2 +- src/database/abstraction/sqlite.rs | 2 +- src/database/mod.rs | 4 +- src/main.rs | 12 ++--- src/service/admin/mod.rs | 69 +++++++++----------------- src/service/rooms/auth_chain/mod.rs | 5 +- src/service/rooms/event_handler/mod.rs | 2 +- src/service/rooms/state_cache/data.rs | 2 +- src/service/rooms/timeline/mod.rs | 2 +- src/utils/error.rs | 4 +- 15 files changed, 56 insertions(+), 92 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 50a6a18..7459254 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -225,8 +225,7 @@ pub async fn register_route(body: Ruma) -> Result t, Err(_) => { // Event could not be converted to canonical json @@ -594,7 +594,7 @@ async fn join_room_by_id_helper( } }; - if &signed_event_id != event_id { + if signed_event_id != event_id { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Server sent event with wrong event id", @@ -753,12 +753,12 @@ async fn join_room_by_id_helper( .set_room_state(room_id, statehash_after_join, &state_lock)?; } else { let join_rules_event = services().rooms.state_accessor.room_state_get( - &room_id, + room_id, &StateEventType::RoomJoinRules, "", )?; let power_levels_event = services().rooms.state_accessor.room_state_get( - &room_id, + room_id, &StateEventType::RoomPowerLevels, "", )?; @@ -835,8 +835,7 @@ async fn join_room_by_id_helper( .state_cache .room_members(restriction_room_id) .filter_map(|r| r.ok()) - .filter(|uid| uid.server_name() == services().globals.server_name()) - .next() + .find(|uid| uid.server_name() == services().globals.server_name()) }); Some(authorized_user) }) @@ -982,7 +981,7 @@ async fn join_room_by_id_helper( } }; - if &signed_event_id != event_id { + if signed_event_id != event_id { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Server sent event with wrong event id", @@ -1179,7 +1178,7 @@ pub(crate) async fn invite_helper<'a>( user_id.server_name(), create_invite::v2::Request { room_id: room_id.to_owned(), - event_id: (&*pdu.event_id).to_owned(), + event_id: (*pdu.event_id).to_owned(), room_version: room_version_id.clone(), event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state, diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index d056f3f..ed28f9d 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -308,8 +308,7 @@ impl Credentials for XMatrix { fn decode(value: &http::HeaderValue) -> Option { debug_assert!( value.as_bytes().starts_with(b"X-Matrix "), - "HeaderValue to decode should start with \"X-Matrix ..\", received = {:?}", - value, + "HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}", ); let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..]) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 2b854a6..fc3e2c0 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -84,8 +84,8 @@ pub enum FedDest { impl FedDest { fn into_https_string(self) -> String { match self { - Self::Literal(addr) => format!("https://{}", addr), - Self::Named(host, port) => format!("https://{}{}", host, port), + Self::Literal(addr) => format!("https://{addr}"), + Self::Named(host, port) => format!("https://{host}{port}"), } } @@ -385,7 +385,7 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe } if let Some(port) = force_port { - FedDest::Named(delegated_hostname, format!(":{}", port)) + FedDest::Named(delegated_hostname, format!(":{port}")) } else { add_port_to_hostname(&delegated_hostname) } @@ -427,7 +427,7 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe } if let Some(port) = force_port { - FedDest::Named(hostname.clone(), format!(":{}", port)) + FedDest::Named(hostname.clone(), format!(":{port}")) } else { add_port_to_hostname(&hostname) } @@ -460,7 +460,7 @@ async fn query_srv_record(hostname: &'_ str) -> Option { if let Ok(Some(host_port)) = services() .globals .dns_resolver() - .srv_lookup(format!("_matrix._tcp.{}", hostname)) + .srv_lookup(format!("_matrix._tcp.{hostname}")) .await .map(|srv| { srv.iter().next().map(|result| { @@ -482,10 +482,7 @@ async fn request_well_known(destination: &str) -> Option { &services() .globals .default_client() - .get(&format!( - "https://{}/.well-known/matrix/server", - destination - )) + .get(&format!("https://{destination}/.well-known/matrix/server")) .send() .await .ok()? diff --git a/src/config/mod.rs b/src/config/mod.rs index 6b862bb..8a4b54e 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -197,7 +197,7 @@ impl fmt::Display for Config { msg += &format!("{}: {}\n", line.1 .0, line.1 .1); } - write!(f, "{}", msg) + write!(f, "{msg}") } } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index af3e192..b69efb6 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -106,7 +106,7 @@ impl KeyValueDatabaseEngine for Arc { } fn open_tree(&self, name: &str) -> Result> { - self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; + self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {name} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )"), [])?; Ok(Arc::new(SqliteTable { engine: Arc::clone(self), diff --git a/src/database/mod.rs b/src/database/mod.rs index ebaa746..78bb358 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -827,7 +827,7 @@ impl KeyValueDatabase { let rule = rules_list.content.get(content_rule_transformation[0]); if rule.is_some() { let mut rule = rule.unwrap().clone(); - rule.rule_id = content_rule_transformation[1].to_string(); + rule.rule_id = content_rule_transformation[1].to_owned(); rules_list.content.remove(content_rule_transformation[0]); rules_list.content.insert(rule); } @@ -850,7 +850,7 @@ impl KeyValueDatabase { let rule = rules_list.underride.get(transformation[0]); if let Some(rule) = rule { let mut rule = rule.clone(); - rule.rule_id = transformation[1].to_string(); + rule.rule_id = transformation[1].to_owned(); rules_list.underride.remove(transformation[0]); rules_list.underride.insert(rule); } diff --git a/src/main.rs b/src/main.rs index 49ac22d..da80507 100644 --- a/src/main.rs +++ b/src/main.rs @@ -68,10 +68,7 @@ async fn main() { let config = match raw_config.extract::() { Ok(s) => s, Err(e) => { - eprintln!( - "It looks like your config is invalid. The following error occurred: {}", - e - ); + eprintln!("It looks like your config is invalid. The following error occurred: {e}"); std::process::exit(1); } }; @@ -91,8 +88,7 @@ async fn main() { Ok(s) => s, Err(e) => { eprintln!( - "It looks like your log config is invalid. The following error occurred: {}", - e + "It looks like your log config is invalid. The following error occurred: {e}" ); EnvFilter::try_new("warn").unwrap() } @@ -118,7 +114,7 @@ async fn main() { let filter_layer = match EnvFilter::try_new(&config.log) { Ok(s) => s, Err(e) => { - eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); + eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}"); EnvFilter::try_new("warn").unwrap() } }; @@ -534,6 +530,6 @@ fn method_to_filter(method: Method) -> MethodFilter { Method::POST => MethodFilter::POST, Method::PUT => MethodFilter::PUT, Method::TRACE => MethodFilter::TRACE, - m => panic!("Unsupported HTTP method: {:?}", m), + m => panic!("Unsupported HTTP method: {m:?}"), } } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index e2b2fd8..77f351a 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -287,13 +287,11 @@ impl Service { Err(error) => { let markdown_message = format!( "Encountered an error while handling the command:\n\ - ```\n{}\n```", - error, + ```\n{error}\n```", ); let html_message = format!( "Encountered an error while handling the command:\n\ -
                \n{}\n
                ", - error, +
                \n{error}\n
                ", ); RoomMessageEventContent::text_html(markdown_message, html_message) @@ -338,17 +336,14 @@ impl Service { match parsed_config { Ok(yaml) => match services().appservice.register_appservice(yaml) { Ok(id) => RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {}.", - id + "Appservice registered with ID: {id}." )), Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {}", - e + "Failed to register appservice: {e}" )), }, Err(e) => RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config: {}", - e + "Could not parse appservice config: {e}" )), } } else { @@ -365,8 +360,7 @@ impl Service { { Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to unregister appservice: {}", - e + "Failed to unregister appservice: {e}" )), }, AdminCommand::ListAppservices => { @@ -459,8 +453,7 @@ impl Service { .count(); let elapsed = start.elapsed(); RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed + "Loaded auth chain with length {count} in {elapsed:?}" )) } else { RoomMessageEventContent::text_plain("Event not found.") @@ -474,30 +467,26 @@ impl Service { Ok(value) => { match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { Ok(hash) => { - let event_id = EventId::parse(format!("${}", hash)); + let event_id = EventId::parse(format!("${hash}")); match serde_json::from_value::( serde_json::to_value(value).expect("value is json"), ) { Ok(pdu) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\n{:#?}", - event_id, pdu + "EventId: {event_id:?}\n{pdu:#?}" )), Err(e) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\nCould not parse event: {}", - event_id, e + "EventId: {event_id:?}\nCould not parse event: {e}" )), } } Err(e) => RoomMessageEventContent::text_plain(format!( - "Could not parse PDU JSON: {:?}", - e + "Could not parse PDU JSON: {e:?}" )), } } Err(e) => RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {}", - e + "Invalid json in command body: {e}" )), } } else { @@ -545,8 +534,7 @@ impl Service { AdminCommand::DatabaseMemoryUsage => match services().globals.db.memory_usage() { Ok(response) => RoomMessageEventContent::text_plain(response), Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to get database memory usage: {}", - e + "Failed to get database memory usage: {e}" )), }, AdminCommand::ShowConfig => { @@ -561,8 +549,7 @@ impl Service { Ok(id) => id, Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {}", - e + "The supplied username is not a valid username: {e}" ))) } }; @@ -589,12 +576,10 @@ impl Service { .set_password(&user_id, Some(new_password.as_str())) { Ok(()) => RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {}: {}", - user_id, new_password + "Successfully reset the password for user {user_id}: {new_password}" )), Err(e) => RoomMessageEventContent::text_plain(format!( - "Couldn't reset the password for user {}: {}", - user_id, e + "Couldn't reset the password for user {user_id}: {e}" )), } } @@ -609,8 +594,7 @@ impl Service { Ok(id) => id, Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {}", - e + "The supplied username is not a valid username: {e}" ))) } }; @@ -676,8 +660,7 @@ impl Service { let user_id = Arc::::from(user_id); if services().users.exists(&user_id)? { RoomMessageEventContent::text_plain(format!( - "Making {} leave all rooms before deactivation...", - user_id + "Making {user_id} leave all rooms before deactivation..." )); services().users.deactivate_account(&user_id)?; @@ -687,13 +670,11 @@ impl Service { } RoomMessageEventContent::text_plain(format!( - "User {} has been deactivated", - user_id + "User {user_id} has been deactivated" )) } else { RoomMessageEventContent::text_plain(format!( - "User {} doesn't exist on this server", - user_id + "User {user_id} doesn't exist on this server" )) } } @@ -709,8 +690,7 @@ impl Service { Ok(user_id) => user_ids.push(user_id), Err(_) => { return Ok(RoomMessageEventContent::text_plain(format!( - "{} is not a valid username", - username + "{username} is not a valid username" ))) } } @@ -746,8 +726,7 @@ impl Service { if admins.is_empty() { RoomMessageEventContent::text_plain(format!( - "Deactivated {} accounts.", - deactivation_count + "Deactivated {deactivation_count} accounts." )) } else { RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) @@ -767,8 +746,8 @@ impl Service { fn usage_to_html(&self, text: &str, server_name: &ServerName) -> String { // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` let text = text.replace( - &format!("@conduit:{}:-", server_name), - &format!("@conduit:{}: ", server_name), + &format!("@conduit:{server_name}:-"), + &format!("@conduit:{server_name}: "), ); // For the conduit admin room, subcommands become main commands diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 92fdd0c..da1944e 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -15,10 +15,7 @@ pub struct Service { } impl Service { - pub fn get_cached_eventid_authchain<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { + pub fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { self.db.get_cached_eventid_authchain(key) } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index cf1f370..7531674 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -639,7 +639,7 @@ impl Service { origin, get_room_state_ids::v1::Request { room_id: room_id.to_owned(), - event_id: (&*incoming_pdu.event_id).to_owned(), + event_id: (*incoming_pdu.event_id).to_owned(), }, ) .await diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index 42de56d..d8bb4a4 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -37,7 +37,7 @@ pub trait Data: Send + Sync { room_id: &RoomId, ) -> Box> + 'a>; - fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result; + fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result; /// Returns an iterator of all rooms a server participates in (as far as we know). fn server_rooms<'a>( diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 619dca2..34399d4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -378,7 +378,7 @@ impl Service { )?; let server_user = format!("@conduit:{}", services().globals.server_name()); - let to_conduit = body.starts_with(&format!("{}: ", server_user)); + let to_conduit = body.starts_with(&format!("{server_user}: ")); // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit diff --git a/src/utils/error.rs b/src/utils/error.rs index 3e0d8ca..4f044ca 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -104,12 +104,12 @@ impl Error { let mut error = error.clone(); error.body = ErrorBody::Standard { kind: Unknown, - message: format!("Answer from {}: {}", origin, error), + message: format!("Answer from {origin}: {error}"), }; return RumaResponse(UiaaResponse::MatrixError(error)); } - let message = format!("{}", self); + let message = format!("{self}"); use ErrorKind::*; let (kind, status_code) = match self { From 7c196f4e00b74bed247a03e701439bdac759914e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 9 Sep 2022 19:17:29 +0200 Subject: [PATCH 1320/1727] feat: Add max prev events config option, allowing adjusting limit for prev_events fetching --- src/config/mod.rs | 6 ++++++ src/service/globals/mod.rs | 4 ++++ src/service/rooms/event_handler/mod.rs | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 8a4b54e..b974fb1 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -40,6 +40,8 @@ pub struct Config { pub max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] pub max_concurrent_requests: u16, + #[serde(default = "default_max_fetch_prev_events")] + pub max_fetch_prev_events: u16, #[serde(default = "false_fn")] pub allow_registration: bool, #[serde(default = "true_fn")] @@ -249,6 +251,10 @@ fn default_max_concurrent_requests() -> u16 { 100 } +fn default_max_fetch_prev_events() -> u16 { + 100_u16 +} + fn default_log() -> String { "warn,state_res=warn,_=off,sled=off".to_owned() } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index c0fcb4b..bb823e2 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -222,6 +222,10 @@ impl Service { self.config.max_request_size } + pub fn max_fetch_prev_events(&self) -> u16 { + self.config.max_fetch_prev_events + } + pub fn allow_registration(&self) -> bool { self.config.allow_registration } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 7531674..5633bc5 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1226,7 +1226,7 @@ impl Service { .await .pop() { - if amount > 100 { + if amount > services().globals.max_fetch_prev_events() { // Max limit reached warn!("Max prev event limit reached!"); graph.insert(prev_event_id.clone(), HashSet::new()); From 7cc346bc18d50d614bd07f4d2dbe0186eb024389 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Wed, 21 Dec 2022 11:45:12 +0100 Subject: [PATCH 1321/1727] feat: Implement membership ban/join/leave/invite reason support --- src/api/client_server/membership.rs | 37 +++++++++++++++++++---------- src/api/client_server/room.rs | 2 +- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 87954ed..61c67cb 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -69,6 +69,7 @@ pub async fn join_room_by_id_route( join_room_by_id_helper( body.sender_user.as_deref(), &body.room_id, + body.reason.clone(), &servers, body.third_party_signed.as_ref(), ) @@ -117,6 +118,7 @@ pub async fn join_room_by_id_or_alias_route( let join_room_response = join_room_by_id_helper( Some(sender_user), &room_id, + body.reason.clone(), &servers, body.third_party_signed.as_ref(), ) @@ -137,7 +139,7 @@ pub async fn leave_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - leave_room(sender_user, &body.room_id).await?; + leave_room(sender_user, &body.room_id, body.reason.clone()).await?; Ok(leave_room::v3::Response::new()) } @@ -151,7 +153,14 @@ pub async fn invite_user_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let invite_user::v3::InvitationRecipient::UserId { user_id } = &body.recipient { - invite_helper(sender_user, user_id, &body.room_id, false).await?; + invite_helper( + sender_user, + user_id, + &body.room_id, + body.reason.clone(), + false, + ) + .await?; Ok(invite_user::v3::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) @@ -185,7 +194,7 @@ pub async fn kick_user_route( .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = MembershipState::Leave; - // TODO: reason + event.reason = body.reason.clone(); let mutex_state = Arc::clone( services() @@ -222,8 +231,6 @@ pub async fn kick_user_route( pub async fn ban_user_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - // TODO: reason - let event = services() .rooms .state_accessor @@ -240,7 +247,7 @@ pub async fn ban_user_route(body: Ruma) -> Result, room_id: &RoomId, + reason: Option, servers: &[OwnedServerName], _third_party_signed: Option<&ThirdPartySigned>, ) -> Result { @@ -533,7 +542,7 @@ async fn join_room_by_id_helper( is_direct: None, third_party_invite: None, blurhash: services().users.blurhash(sender_user)?, - reason: None, + reason, join_authorized_via_users_server, }) .expect("event is valid, we just created it"), @@ -848,7 +857,7 @@ async fn join_room_by_id_helper( is_direct: None, third_party_invite: None, blurhash: services().users.blurhash(sender_user)?, - reason: None, + reason: reason.clone(), join_authorized_via_users_server: authorized_user, }; @@ -920,7 +929,7 @@ async fn join_room_by_id_helper( is_direct: None, third_party_invite: None, blurhash: services().users.blurhash(sender_user)?, - reason: None, + reason, join_authorized_via_users_server, }) .expect("event is valid, we just created it"), @@ -1123,6 +1132,7 @@ pub(crate) async fn invite_helper<'a>( sender_user: &UserId, user_id: &UserId, room_id: &RoomId, + reason: Option, is_direct: bool, ) -> Result<()> { if user_id.server_name() != services().globals.server_name() { @@ -1145,7 +1155,7 @@ pub(crate) async fn invite_helper<'a>( membership: MembershipState::Invite, third_party_invite: None, blurhash: None, - reason: None, + reason, join_authorized_via_users_server: None, }) .expect("member event is valid value"); @@ -1269,7 +1279,7 @@ pub(crate) async fn invite_helper<'a>( is_direct: Some(is_direct), third_party_invite: None, blurhash: services().users.blurhash(user_id)?, - reason: None, + reason, join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), @@ -1308,13 +1318,13 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { Err(_) => continue, }; - let _ = leave_room(user_id, &room_id).await; + let _ = leave_room(user_id, &room_id, None).await; } Ok(()) } -pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { +pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option) -> Result<()> { // Ask a remote server if we don't have this room if !services().rooms.metadata.exists(room_id)? && room_id.server_name() != services().globals.server_name() @@ -1382,6 +1392,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = MembershipState::Leave; + event.reason = reason; services().rooms.timeline.build_and_append_pdu( PduBuilder { diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index c77cfa9..830e085 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -398,7 +398,7 @@ pub async fn create_room_route( // 8. Events implied by invite (and TODO: invite_3pid) drop(state_lock); for user_id in &body.invite { - let _ = invite_helper(sender_user, user_id, &room_id, body.is_direct).await; + let _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await; } // Homeserver specific stuff From 76a82339a2fd8e0d1197cf298bfbaff400b3c34b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Dec 2022 09:56:07 +0100 Subject: [PATCH 1322/1727] tweak default rocksdb settings --- src/config/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index b974fb1..31a586f 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -224,7 +224,7 @@ fn default_database_backend() -> String { } fn default_db_cache_capacity_mb() -> f64 { - 10.0 + 1000.0 } fn default_conduit_cache_capacity_modifier() -> f64 { @@ -232,7 +232,7 @@ fn default_conduit_cache_capacity_modifier() -> f64 { } fn default_rocksdb_max_open_files() -> i32 { - 20 + 1000 } fn default_pdu_cache_capacity() -> u32 { From c7a7c913d4513e0002f85c4d0118ceffc7bbbd8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Dec 2022 13:44:41 +0100 Subject: [PATCH 1323/1727] Bump ruma --- Cargo.lock | 48 +++++++++++++++----------- Cargo.toml | 2 +- src/service/rooms/event_handler/mod.rs | 2 +- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb5943a..bbfda3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -181,6 +181,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" + [[package]] name = "base64ct" version = "1.5.3" @@ -377,7 +383,7 @@ dependencies = [ "async-trait", "axum", "axum-server", - "base64", + "base64 0.13.1", "bytes", "clap", "crossbeam", @@ -941,7 +947,7 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "bytes", "headers-core", @@ -1249,7 +1255,7 @@ version = "8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" dependencies = [ - "base64", + "base64 0.13.1", "pem", "ring", "serde", @@ -1714,7 +1720,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] @@ -1999,7 +2005,7 @@ name = "reqwest" version = "0.11.9" source = "git+https://github.com/timokoesters/reqwest?rev=57b7cf4feb921573dfafad7d34b9ac6e44ead0bd#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" dependencies = [ - "base64", + "base64 0.13.1", "bytes", "encoding_rs", "futures-core", @@ -2070,7 +2076,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.7.4" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ "assign", "js_int", @@ -2088,7 +2094,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ "js_int", "ruma-common", @@ -2099,7 +2105,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.15.3" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ "assign", "bytes", @@ -2116,9 +2122,9 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.10.5" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ - "base64", + "base64 0.20.0", "bytes", "form_urlencoded", "http", @@ -2144,7 +2150,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ "js_int", "ruma-common", @@ -2155,7 +2161,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ "js_int", "thiserror", @@ -2164,7 +2170,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ "js_int", "ruma-common", @@ -2174,7 +2180,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.10.5" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ "once_cell", "proc-macro-crate", @@ -2189,7 +2195,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ "js_int", "ruma-common", @@ -2200,9 +2206,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ - "base64", + "base64 0.20.0", "ed25519-dalek", "pkcs8", "rand 0.7.3", @@ -2216,7 +2222,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=af28dc8339773e5cad460289fa3c4e22d9a058cd#af28dc8339773e5cad460289fa3c4e22d9a058cd" +source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" dependencies = [ "itertools", "js_int", @@ -2247,7 +2253,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b50162d19404029c1ceca6f6980fe40d45c8b369f6f44446fa14bb39573b5bb9" dependencies = [ - "base64", + "base64 0.13.1", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -2289,7 +2295,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] @@ -2298,7 +2304,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 737799d..6d8af44 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "af28dc8339773e5cad460289fa3c4e22d9a058cd", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 5633bc5..bc67f7a 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -313,7 +313,7 @@ impl Service { Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); - match ruma::canonical_json::redact(&value, room_version_id) { + match ruma::canonical_json::redact(value, room_version_id, None) { Ok(obj) => obj, Err(_) => { return Err(Error::BadRequest( From f7db3490f668b9cf1b38491728c1c60c5ac49fad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Dec 2022 10:25:54 +0100 Subject: [PATCH 1324/1727] Bump version to v0.5.0 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bbfda3f..e773892 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -378,7 +378,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.4.0-next" +version = "0.5.0" dependencies = [ "async-trait", "axum", diff --git a/Cargo.toml b/Cargo.toml index 6d8af44..7dc6448 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.4.0-next" +version = "0.5.0" rust-version = "1.64" edition = "2021" From 2a66ad4329c012e482b80deeaf750eb9360d758d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Dec 2022 16:08:05 +0100 Subject: [PATCH 1325/1727] Bump version to 0.6.0-alpha --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 7dc6448..019f9ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.5.0" +version = "0.6.0-alpha" rust-version = "1.64" edition = "2021" From 19156c7bbf8dab7267db235807ea80255ee91e7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 21 Dec 2022 16:15:56 +0100 Subject: [PATCH 1326/1727] Update Cargo.lock --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index e773892..505c71c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -378,7 +378,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.5.0" +version = "0.6.0-alpha" dependencies = [ "async-trait", "axum", From 0a4e8e59094b5ace727262820d05899ffa1be3bd Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Fri, 16 Dec 2022 13:06:59 -0800 Subject: [PATCH 1327/1727] update rust toolchain hash --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 924300c..5db8168 100644 --- a/flake.nix +++ b/flake.nix @@ -34,7 +34,7 @@ # This will need to be updated when `package.rust-version` is changed in # `Cargo.toml` - sha256 = "sha256-KXx+ID0y4mg2B3LHp7IyaiMrdexF6octADnAtFIOjrY="; + sha256 = "sha256-8len3i8oTwJSOJZMosGGXHBL5BVuGQnWOT2St5YAUFU="; }; builder = (pkgs.callPackage naersk { From 9f74555c88a7705f393c3175e9bda60aaf438c62 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Fri, 16 Dec 2022 13:09:35 -0800 Subject: [PATCH 1328/1727] update flake.lock --- flake.lock | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/flake.lock b/flake.lock index 9217ff2..bfe0a9b 100644 --- a/flake.lock +++ b/flake.lock @@ -8,11 +8,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1665815894, - "narHash": "sha256-Vboo1L4NMGLKZKVLnOPi9OHlae7uoNyfgvyIUm+SVXE=", + "lastModified": 1671776618, + "narHash": "sha256-myjhExbKIzZy+kqqFyqvX59KErqYZVNTPsCfgByTOKo=", "owner": "nix-community", "repo": "fenix", - "rev": "2348450241a5f945f0ba07e44ecbfac2f541d7f4", + "rev": "64d1607710b99e72d9afb2cde11bd1c2cea7cb91", "type": "github" }, "original": { @@ -23,11 +23,11 @@ }, "flake-utils": { "locked": { - "lastModified": 1659877975, - "narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=", + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", "owner": "numtide", "repo": "flake-utils", - "rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", "type": "github" }, "original": { @@ -43,11 +43,11 @@ ] }, "locked": { - "lastModified": 1662220400, - "narHash": "sha256-9o2OGQqu4xyLZP9K6kNe1pTHnyPz0Wr3raGYnr9AIgY=", + "lastModified": 1671096816, + "narHash": "sha256-ezQCsNgmpUHdZANDCILm3RvtO1xH8uujk/+EqNvzIOg=", "owner": "nix-community", "repo": "naersk", - "rev": "6944160c19cb591eb85bbf9b2f2768a935623ed3", + "rev": "d998160d6a076cfe8f9741e56aeec7e267e3e114", "type": "github" }, "original": { @@ -58,11 +58,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1665856037, - "narHash": "sha256-/RvIWnGKdTSoIq5Xc2HwPIL0TzRslzU6Rqk4Img6UNg=", + "lastModified": 1671780662, + "narHash": "sha256-Tsc64sN8LLHa7eqDZVVeubI8CyqIjs9l5tQ5EeRlgvM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c95ebc5125ffffcd431df0ad8620f0926b8125b8", + "rev": "339063a22409514cb2baea677b329e618faa6a08", "type": "github" }, "original": { @@ -82,11 +82,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1665765556, - "narHash": "sha256-w9L5j0TIB5ay4aRwzGCp8mgvGsu5dVJQvbEFutwr6xE=", + "lastModified": 1671750139, + "narHash": "sha256-xbL8BZU87rHfQkF3tuFXduNGPW8fDwFI+0fFmRJx66E=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "018b8429cf3fa9d8aed916704e41dfedeb0f4f78", + "rev": "a06525517b0b69cd97f2c39a4012d96f44bf0776", "type": "github" }, "original": { From 315944968b4f84c31f7f1396a8409b4a13e52709 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Fri, 23 Dec 2022 00:20:05 -0800 Subject: [PATCH 1329/1727] remind people to update the hash And offer help since it's pretty easy but impossible if you don't have Nix installed. --- Cargo.toml | 6 +++++- flake.nix | 3 +-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 019f9ce..16c04bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,9 +7,13 @@ homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.6.0-alpha" -rust-version = "1.64" edition = "2021" +# When changing this, change the hash near the text "THE rust-version HASH" in +# `/flake.nix` too. If you don't have Nix installed or otherwise don't know how +# to do this, ping `@charles:computer.surgery` in the matrix room. +rust-version = "1.64" + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/flake.nix b/flake.nix index 5db8168..e10e8bb 100644 --- a/flake.nix +++ b/flake.nix @@ -32,8 +32,7 @@ # Use the Rust version defined in `Cargo.toml` channel = cargoToml.package.rust-version; - # This will need to be updated when `package.rust-version` is changed in - # `Cargo.toml` + # THE rust-version HASH sha256 = "sha256-8len3i8oTwJSOJZMosGGXHBL5BVuGQnWOT2St5YAUFU="; }; From 112b76b1c134be7514986e30e72c2be5ebda5323 Mon Sep 17 00:00:00 2001 From: r3g_5z Date: Sun, 8 Jan 2023 02:44:25 -0500 Subject: [PATCH 1330/1727] Add Contributor's Covenant Code of Conduct Signed-off-by: r3g_5z --- CODE_OF_CONDUCT.md | 134 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..1b06035 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,134 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement over email at +coc@koesters.xyz or over Matrix at @timo:conduit.rs. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations + From 391beddaf4034e114020c707198414fd6e4d141a Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 8 Jan 2023 12:44:59 -0800 Subject: [PATCH 1331/1727] fix nix docs I made some silly copy paste errors while writing this... --- nix/README.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/nix/README.md b/nix/README.md index d92f910..77bad0f 100644 --- a/nix/README.md +++ b/nix/README.md @@ -107,7 +107,7 @@ in recommendedProxySettings = true; virtualHosts = { - "${server_name}" = { + "${matrix_hostname}" = { forceSSL = true; enableACME = true; @@ -124,14 +124,6 @@ in } ]; - extraConfig = '' - merge_slashes off; - ''; - - "${matrix_hostname}" = { - forceSSL = true; - enableACME = true; - locations."/_matrix/" = { proxyPass = "http://backend_conduit$request_uri"; proxyWebsockets = true; @@ -141,6 +133,15 @@ in ''; }; + extraConfig = '' + merge_slashes off; + ''; + }; + + "${server_name}" = { + forceSSL = true; + enableACME = true; + locations."=/.well-known/matrix/server" = { # Use the contents of the derivation built previously alias = "${well_known_server}"; From 844508bc482f0bafab61111b8fad70684368bd8d Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 9 Jan 2023 08:09:41 -0800 Subject: [PATCH 1332/1727] document `trusted_servers` option --- conduit-example.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/conduit-example.toml b/conduit-example.toml index 0549030..6089aa5 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -43,6 +43,10 @@ allow_federation = true # Enable the display name lightning bolt on registration. enable_lightning_bolt = true +# Servers listed here will be used to gather public keys of other servers. +# Generally, copying this exactly should be enough. (Currently, Conduit doesn't +# support batched key requests, so this list should only contain Synapse +# servers.) trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time From 809c9b44817faa4b4c4126a11d9bb9e5486ba3c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 14 Jan 2023 21:20:16 +0100 Subject: [PATCH 1333/1727] Maybe fix room joins This is a workaround for https://github.com/hyperium/hyper/issues/2312 --- src/service/globals/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index bb823e2..cd3be08 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -345,6 +345,7 @@ impl Service { fn reqwest_client_builder(config: &Config) -> Result { let mut reqwest_client_builder = reqwest::Client::builder() + .pool_max_idle_per_host(0) .connect_timeout(Duration::from_secs(30)) .timeout(Duration::from_secs(60 * 3)); From 4d589d9788d4c8d038bef243e97035e7ab4aa01b Mon Sep 17 00:00:00 2001 From: digital Date: Wed, 18 Jan 2023 23:21:23 +0100 Subject: [PATCH 1334/1727] feat: support end to bridge encryption by implementing appservice logins --- src/api/client_server/session.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 64c0072..f8fc7f1 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -26,6 +26,7 @@ pub async fn get_login_types_route( ) -> Result { Ok(get_login_types::v3::Response::new(vec![ get_login_types::v3::LoginType::Password(Default::default()), + get_login_types::v3::LoginType::ApplicationService(Default::default()), ])) } @@ -103,6 +104,27 @@ pub async fn login_route(body: Ruma) -> Result { + info!("hi"); + if !body.from_appservice { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + // TODO: is this the correct response + "Wrong username or password.", + )); + }; + let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier { + user_id.to_lowercase() + } else { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); + }; + let user_id = + UserId::parse_with_server_name(username, services().globals.server_name()) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; + user_id + } _ => { return Err(Error::BadRequest( ErrorKind::Unknown, From f01b96588dae3ffbfca267a1ff51477decba998a Mon Sep 17 00:00:00 2001 From: The one with the braid Date: Thu, 19 Jan 2023 07:21:04 +0100 Subject: [PATCH 1335/1727] fix: adjust CI config to runner requirements - make use of more stable BTRFS driver - set default pull policy to `if-not-present` Signed-off-by: The one with the braid --- .gitlab-ci.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 91258ea..d05bb89 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,14 +16,17 @@ variables: .docker-shared-settings: stage: "build docker image" - image: jdrouet/docker-with-buildx:20.10.21-0.9.1 + image: + name: jdrouet/docker-with-buildx:20.10.21-0.9.1 + pull_policy: if-not-present needs: [] - tags: ["docker"] + tags: [ "docker" ] variables: # Docker in Docker: DOCKER_HOST: tcp://docker:2375/ DOCKER_TLS_CERTDIR: "" - DOCKER_DRIVER: overlay2 + # Famedly runners use BTRFS, overlayfs and overlay2 often break jobs + DOCKER_DRIVER: btrfs services: - docker:dind script: From e13dc7c14a0c5b318a57e47b37fddd92203d32a6 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 26 Jan 2023 18:28:33 +0100 Subject: [PATCH 1336/1727] add little readme --- complement/README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 complement/README.md diff --git a/complement/README.md b/complement/README.md new file mode 100644 index 0000000..b86aab3 --- /dev/null +++ b/complement/README.md @@ -0,0 +1,13 @@ +# Running Conduit on Complement + +This assumes that you're familiar with complement, if not, please readme +[their readme](https://github.com/matrix-org/complement#running). + +Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker +image. + +To build, `cd` to the base directory of the workspace, and run this: + +`docker build -t complement-conduit:dev -f complement/Dockerfile .` + +Then use `complement-conduit:dev` as a base image for running complement tests. From 5d913f701083e1519a3595190e63193c519fbc6b Mon Sep 17 00:00:00 2001 From: Yusuf Bera Ertan Date: Sat, 28 Jan 2023 00:10:21 +0300 Subject: [PATCH 1337/1727] build(nix): fix flake builds --- Cargo.toml | 2 +- flake.lock | 376 ++++++++++++++++++++++++++++++++++++++++++++++++----- flake.nix | 122 ++++++++--------- 3 files changed, 399 insertions(+), 101 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 16c04bc..e0e3e32 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ edition = "2021" # When changing this, change the hash near the text "THE rust-version HASH" in # `/flake.nix` too. If you don't have Nix installed or otherwise don't know how # to do this, ping `@charles:computer.surgery` in the matrix room. -rust-version = "1.64" +rust-version = "1.64.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/flake.lock b/flake.lock index bfe0a9b..1bb1123 100644 --- a/flake.lock +++ b/flake.lock @@ -1,18 +1,124 @@ { "nodes": { + "alejandra": { + "inputs": { + "fenix": "fenix", + "flakeCompat": "flakeCompat", + "nixpkgs": [ + "d2n", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1658427149, + "narHash": "sha256-ToD/1z/q5VHsLMrS2h96vjJoLho59eNRtknOUd19ey8=", + "owner": "kamadorueda", + "repo": "alejandra", + "rev": "f5a22afd2adfb249b4e68e0b33aa1f0fb73fb1be", + "type": "github" + }, + "original": { + "owner": "kamadorueda", + "repo": "alejandra", + "type": "github" + } + }, + "all-cabal-json": { + "flake": false, + "locked": { + "lastModified": 1665552503, + "narHash": "sha256-r14RmRSwzv5c+bWKUDaze6pXM7nOsiz1H8nvFHJvufc=", + "owner": "nix-community", + "repo": "all-cabal-json", + "rev": "d7c0434eebffb305071404edcf9d5cd99703878e", + "type": "github" + }, + "original": { + "owner": "nix-community", + "ref": "hackage", + "repo": "all-cabal-json", + "type": "github" + } + }, + "crane": { + "flake": false, + "locked": { + "lastModified": 1670900067, + "narHash": "sha256-VXVa+KBfukhmWizaiGiHRVX/fuk66P8dgSFfkVN4/MY=", + "owner": "ipetkov", + "repo": "crane", + "rev": "59b31b41a589c0a65e4a1f86b0e5eac68081468b", + "type": "github" + }, + "original": { + "owner": "ipetkov", + "repo": "crane", + "type": "github" + } + }, + "d2n": { + "inputs": { + "alejandra": "alejandra", + "all-cabal-json": "all-cabal-json", + "crane": "crane", + "devshell": "devshell", + "flake-parts": "flake-parts", + "flake-utils-pre-commit": "flake-utils-pre-commit", + "ghc-utils": "ghc-utils", + "gomod2nix": "gomod2nix", + "mach-nix": "mach-nix", + "nix-pypi-fetcher": "nix-pypi-fetcher", + "nixpkgs": [ + "nixpkgs" + ], + "poetry2nix": "poetry2nix", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1674848374, + "narHash": "sha256-1+xlsmUWzpptK8mLjznwqOLogeicLkxB8tV6XUZbobc=", + "owner": "nix-community", + "repo": "dream2nix", + "rev": "d91e7381fa303be02f70e472207e05b26ce35b41", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "dream2nix", + "type": "github" + } + }, + "devshell": { + "flake": false, + "locked": { + "lastModified": 1663445644, + "narHash": "sha256-+xVlcK60x7VY1vRJbNUEAHi17ZuoQxAIH4S4iUFUGBA=", + "owner": "numtide", + "repo": "devshell", + "rev": "e3dc3e21594fe07bdb24bdf1c8657acaa4cb8f66", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "devshell", + "type": "github" + } + }, "fenix": { "inputs": { "nixpkgs": [ + "d2n", + "alejandra", "nixpkgs" ], "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1671776618, - "narHash": "sha256-myjhExbKIzZy+kqqFyqvX59KErqYZVNTPsCfgByTOKo=", + "lastModified": 1657607339, + "narHash": "sha256-HaqoAwlbVVZH2n4P3jN2FFPMpVuhxDy1poNOR7kzODc=", "owner": "nix-community", "repo": "fenix", - "rev": "64d1607710b99e72d9afb2cde11bd1c2cea7cb91", + "rev": "b814c83d9e6aa5a28d0cf356ecfdafb2505ad37d", "type": "github" }, "original": { @@ -21,13 +127,31 @@ "type": "github" } }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1668450977, + "narHash": "sha256-cfLhMhnvXn6x1vPm+Jow3RiFAUSCw/l1utktCw5rVA4=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "d591857e9d7dd9ddbfba0ea02b43b927c3c0f1fa", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, "flake-utils": { "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "lastModified": 1659877975, + "narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=", "owner": "numtide", "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0", "type": "github" }, "original": { @@ -36,57 +160,228 @@ "type": "github" } }, - "naersk": { - "inputs": { - "nixpkgs": [ - "nixpkgs" - ] - }, + "flake-utils-pre-commit": { "locked": { - "lastModified": 1671096816, - "narHash": "sha256-ezQCsNgmpUHdZANDCILm3RvtO1xH8uujk/+EqNvzIOg=", - "owner": "nix-community", - "repo": "naersk", - "rev": "d998160d6a076cfe8f9741e56aeec7e267e3e114", + "lastModified": 1644229661, + "narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797", "type": "github" }, "original": { - "owner": "nix-community", - "repo": "naersk", + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flakeCompat": { + "flake": false, + "locked": { + "lastModified": 1650374568, + "narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "b4a34015c698c7793d592d66adbab377907a2be8", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "ghc-utils": { + "flake": false, + "locked": { + "lastModified": 1662774800, + "narHash": "sha256-1Rd2eohGUw/s1tfvkepeYpg8kCEXiIot0RijapUjAkE=", + "ref": "refs/heads/master", + "rev": "bb3a2d3dc52ff0253fb9c2812bd7aa2da03e0fea", + "revCount": 1072, + "type": "git", + "url": "https://gitlab.haskell.org/bgamari/ghc-utils" + }, + "original": { + "type": "git", + "url": "https://gitlab.haskell.org/bgamari/ghc-utils" + } + }, + "gomod2nix": { + "flake": false, + "locked": { + "lastModified": 1627572165, + "narHash": "sha256-MFpwnkvQpauj799b4QTBJQFEddbD02+Ln5k92QyHOSk=", + "owner": "tweag", + "repo": "gomod2nix", + "rev": "67f22dd738d092c6ba88e420350ada0ed4992ae8", + "type": "github" + }, + "original": { + "owner": "tweag", + "repo": "gomod2nix", + "type": "github" + } + }, + "mach-nix": { + "flake": false, + "locked": { + "lastModified": 1634711045, + "narHash": "sha256-m5A2Ty88NChLyFhXucECj6+AuiMZPHXNbw+9Kcs7F6Y=", + "owner": "DavHau", + "repo": "mach-nix", + "rev": "4433f74a97b94b596fa6cd9b9c0402104aceef5d", + "type": "github" + }, + "original": { + "id": "mach-nix", + "type": "indirect" + } + }, + "nix-pypi-fetcher": { + "flake": false, + "locked": { + "lastModified": 1669065297, + "narHash": "sha256-UStjXjNIuIm7SzMOWvuYWIHBkPUKQ8Id63BMJjnIDoA=", + "owner": "DavHau", + "repo": "nix-pypi-fetcher", + "rev": "a9885ac6a091576b5195d547ac743d45a2a615ac", + "type": "github" + }, + "original": { + "owner": "DavHau", + "repo": "nix-pypi-fetcher", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1671780662, - "narHash": "sha256-Tsc64sN8LLHa7eqDZVVeubI8CyqIjs9l5tQ5EeRlgvM=", - "owner": "NixOS", + "lastModified": 1674641431, + "narHash": "sha256-qfo19qVZBP4qn5M5gXc/h1MDgAtPA5VxJm9s8RUAkVk=", + "owner": "nixos", "repo": "nixpkgs", - "rev": "339063a22409514cb2baea677b329e618faa6a08", + "rev": "9b97ad7b4330aacda9b2343396eb3df8a853b4fc", "type": "github" }, "original": { + "owner": "nixos", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "dir": "lib", + "lastModified": 1665349835, + "narHash": "sha256-UK4urM3iN80UXQ7EaOappDzcisYIuEURFRoGQ/yPkug=", "owner": "NixOS", "repo": "nixpkgs", + "rev": "34c5293a71ffdb2fe054eb5288adc1882c1eb0b1", + "type": "github" + }, + "original": { + "dir": "lib", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib_2": { + "locked": { + "dir": "lib", + "lastModified": 1672350804, + "narHash": "sha256-jo6zkiCabUBn3ObuKXHGqqORUMH27gYDIFFfLq5P4wg=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "677ed08a50931e38382dbef01cba08a8f7eac8f6", + "type": "github" + }, + "original": { + "dir": "lib", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib_2" + }, + "locked": { + "lastModified": 1674771137, + "narHash": "sha256-Zpk1GbEsYrqKmuIZkx+f+8pU0qcCYJoSUwNz1Zk+R00=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "7c7a8bce3dffe71203dcd4276504d1cb49dfe05f", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "poetry2nix": { + "flake": false, + "locked": { + "lastModified": 1666918719, + "narHash": "sha256-BkK42fjAku+2WgCOv2/1NrPa754eQPV7gPBmoKQBWlc=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "289efb187123656a116b915206e66852f038720e", + "type": "github" + }, + "original": { + "owner": "nix-community", + "ref": "1.36.0", + "repo": "poetry2nix", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-utils": [ + "d2n", + "flake-utils-pre-commit" + ], + "nixpkgs": [ + "d2n", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1646153636, + "narHash": "sha256-AlWHMzK+xJ1mG267FdT8dCq/HvLCA6jwmx2ZUy5O8tY=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "b6bc0b21e1617e2b07d8205e7fae7224036dfa4b", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", "type": "github" } }, "root": { "inputs": { - "fenix": "fenix", - "flake-utils": "flake-utils", - "naersk": "naersk", - "nixpkgs": "nixpkgs" + "d2n": "d2n", + "nixpkgs": "nixpkgs", + "parts": "parts", + "rust-overlay": "rust-overlay" } }, "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1671750139, - "narHash": "sha256-xbL8BZU87rHfQkF3tuFXduNGPW8fDwFI+0fFmRJx66E=", + "lastModified": 1657557289, + "narHash": "sha256-PRW+nUwuqNTRAEa83SfX+7g+g8nQ+2MMbasQ9nt6+UM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "a06525517b0b69cd97f2c39a4012d96f44bf0776", + "rev": "caf23f29144b371035b864a1017dbc32573ad56d", "type": "github" }, "original": { @@ -95,6 +390,27 @@ "repo": "rust-analyzer", "type": "github" } + }, + "rust-overlay": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1674786480, + "narHash": "sha256-n25V3Ug/dJewbJaxj1gL0cUMBdOonrVkIQCHd9yHHvw=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "296dd673b46aaebe1c8355f1848ceb7c905dda35", + "type": "github" + }, + "original": { + "owner": "oxalica", + "repo": "rust-overlay", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index e10e8bb..f4db253 100644 --- a/flake.nix +++ b/flake.nix @@ -1,74 +1,56 @@ { - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs"; - flake-utils.url = "github:numtide/flake-utils"; + inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable"; + inputs.d2n.url = "github:nix-community/dream2nix"; + inputs.d2n.inputs.nixpkgs.follows = "nixpkgs"; + inputs.parts.url = "github:hercules-ci/flake-parts"; + inputs.rust-overlay.url = "github:oxalica/rust-overlay"; + inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs"; - fenix = { - url = "github:nix-community/fenix"; - inputs.nixpkgs.follows = "nixpkgs"; + outputs = inp: + inp.parts.lib.mkFlake {inputs = inp;} { + systems = ["x86_64-linux"]; + imports = [inp.d2n.flakeModuleBeta]; + perSystem = { + config, + system, + pkgs, + ... + }: let + cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); + pkgsWithToolchain = pkgs.appendOverlays [inp.rust-overlay.overlays.default]; + + toolchains = pkgsWithToolchain.rust-bin.stable."${cargoToml.package.rust-version}"; + # toolchain to use when building conduit, includes only cargo and rustc to reduce closure size + buildToolchain = toolchains.minimal; + # toolchain to use in development shell + # the "default" component set of toolchain adds rustfmt, clippy etc. + devToolchain = toolchains.default.override { + extensions = ["rust-src"]; + }; + + # flake outputs for conduit project + conduitOutputs = config.dream2nix.outputs.conduit; + in { + dream2nix.inputs.conduit = { + source = inp.self; + projects.conduit = { + name = "conduit"; + subsystem = "rust"; + translator = "cargo-lock"; + }; + packageOverrides = { + "^.*".set-toolchain.overrideRustToolchain = _: { + cargo = buildToolchain; + rustc = buildToolchain; + }; + }; + }; + devShells.conduit = conduitOutputs.devShells.conduit.overrideAttrs (old: { + # export default crate sources for rust-analyzer to read + RUST_SRC_PATH = "${devToolchain}/lib/rustlib/src/rust/library"; + nativeBuildInputs = (old.nativeBuildInputs or []) ++ [devToolchain]; + }); + devShells.default = config.devShells.conduit; + }; }; - naersk = { - url = "github:nix-community/naersk"; - inputs.nixpkgs.follows = "nixpkgs"; - }; - }; - - outputs = - { self - , nixpkgs - , flake-utils - - , fenix - , naersk - }: flake-utils.lib.eachDefaultSystem (system: - let - pkgs = nixpkgs.legacyPackages.${system}; - - # Nix-accessible `Cargo.toml` - cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); - - # The Rust toolchain to use - toolchain = fenix.packages.${system}.toolchainOf { - # Use the Rust version defined in `Cargo.toml` - channel = cargoToml.package.rust-version; - - # THE rust-version HASH - sha256 = "sha256-8len3i8oTwJSOJZMosGGXHBL5BVuGQnWOT2St5YAUFU="; - }; - - builder = (pkgs.callPackage naersk { - inherit (toolchain) rustc cargo; - }).buildPackage; - in - { - packages.default = builder { - src = ./.; - - nativeBuildInputs = (with pkgs.rustPlatform; [ - bindgenHook - ]); - }; - - devShells.default = pkgs.mkShell { - # Rust Analyzer needs to be able to find the path to default crate - # sources, and it can read this environment variable to do so - RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; - - # Development tools - nativeBuildInputs = (with pkgs.rustPlatform; [ - bindgenHook - ]) ++ (with toolchain; [ - cargo - clippy - rust-src - rustc - rustfmt - ]); - }; - - checks = { - packagesDefault = self.packages.${system}.default; - devShellsDefault = self.devShells.${system}.default; - }; - }); } From 11b9cfad5e326e0abbd9b05758e2e72abb3be93c Mon Sep 17 00:00:00 2001 From: Yusuf Bera Ertan Date: Sat, 28 Jan 2023 00:14:58 +0300 Subject: [PATCH 1338/1727] docs: update nix comment for rust-version in Cargo.toml --- Cargo.toml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e0e3e32..36ffb13 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,9 +9,10 @@ readme = "README.md" version = "0.6.0-alpha" edition = "2021" -# When changing this, change the hash near the text "THE rust-version HASH" in -# `/flake.nix` too. If you don't have Nix installed or otherwise don't know how -# to do this, ping `@charles:computer.surgery` in the matrix room. +# When changing this, make sure to update the `flake.lock` file by running +# `nix flake update`. If you don't have Nix installed or otherwise don't know +# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in +# the matrix room. rust-version = "1.64.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From cfcc9086ff4890131d498ed434d7934e23c5f060 Mon Sep 17 00:00:00 2001 From: Moritz Heiber Date: Fri, 27 Jan 2023 22:43:04 +0000 Subject: [PATCH 1339/1727] Add a dynamic address resolution to the Docker healthcheck --- docker/healthcheck.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index 42b2e10..62f2f98 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -6,9 +6,14 @@ if [ -z "${CONDUIT_PORT}" ]; then CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') fi +# If CONDUIT_ADDRESS is not set try to get the address from the process list +if [ -z "${CONDUIT_ADDRESS}" ]; then + CONDUIT_ADDRESS=$(ss -tlpn | awk -F ' +|:' '/conduit/ { print $4 }') +fi + # The actual health check. # We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. # TODO: Change this to a single wget call. Do we have a config value that we can check for that? -wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ - wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ +wget --no-verbose --tries=1 --spider "http://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \ + wget --no-verbose --tries=1 --spider "https://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \ exit 1 From a4f18f99ad2d02635fb72a0053d551878339ffca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 7 Feb 2023 15:26:34 +0100 Subject: [PATCH 1340/1727] fix: ignore bad user ids --- src/database/mod.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/database/mod.rs b/src/database/mod.rs index 78bb358..46ba5b3 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -800,10 +800,17 @@ impl KeyValueDatabase { } if services().globals.database_version()? < 12 { - for username in services().users.list_local_users().unwrap() { - let user = - UserId::parse_with_server_name(username, services().globals.server_name()) - .unwrap(); + for username in services().users.list_local_users()? { + let user = match UserId::parse_with_server_name( + username.clone(), + services().globals.server_name(), + ) { + Ok(u) => u, + Err(e) => { + warn!("Invalid username {username}: {e}"); + continue; + } + }; let raw_rules_list = services() .account_data From fc0aff20cfcc30350d87994ee518fd1bec7ce088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 11 Feb 2023 12:43:41 +0100 Subject: [PATCH 1341/1727] fix: allow reactivation of users using reset-password admin command --- src/service/admin/mod.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 77f351a..b6609e1 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -556,7 +556,6 @@ impl Service { // Check if the specified user is valid if !services().users.exists(&user_id)? - || services().users.is_deactivated(&user_id)? || user_id == UserId::parse_with_server_name( "conduit", @@ -565,7 +564,7 @@ impl Service { .expect("conduit user exists") { return Ok(RoomMessageEventContent::text_plain( - "The specified user does not exist or is deactivated!", + "The specified user does not exist!", )); } @@ -600,12 +599,12 @@ impl Service { }; if user_id.is_historical() { return Ok(RoomMessageEventContent::text_plain(format!( - "userid {user_id} is not allowed due to historical" + "Userid {user_id} is not allowed due to historical" ))); } if services().users.exists(&user_id)? { return Ok(RoomMessageEventContent::text_plain(format!( - "userid {user_id} already exists" + "Userid {user_id} already exists" ))); } // Create user From 23b18d71eef78544148746d30d8b9630998a3a22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 18 Feb 2023 13:20:20 +0100 Subject: [PATCH 1342/1727] feat: handle backfill requests Based on https://gitlab.com/famedly/conduit/-/merge_requests/421 --- src/api/server_server.rs | 83 ++++++++++++++++- src/main.rs | 1 + src/service/mod.rs | 7 +- src/service/rooms/state_accessor/mod.rs | 114 +++++++++++++++++++++++- 4 files changed, 199 insertions(+), 6 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index fc3e2c0..11a6cbf 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -12,6 +12,7 @@ use ruma::{ client::error::{Error as RumaError, ErrorKind}, federation::{ authorization::get_event_authorization, + backfill::get_backfill, device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, @@ -42,8 +43,9 @@ use ruma::{ }, serde::{Base64, JsonObject, Raw}, to_device::DeviceIdOrAllDevices, - CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, - OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, + uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, + OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, + ServerName, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -950,6 +952,83 @@ pub async fn get_event_route( }) } +/// # `GET /_matrix/federation/v1/backfill/` +/// +/// Retrieves events from before the sender joined the room, if the room's +/// history visibility allows. +pub async fn get_backfill_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + info!("Got backfill request from: {}", sender_servername); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let until = body + .v + .iter() + .map(|eventid| services().rooms.timeline.get_pdu_count(eventid)) + .filter_map(|r| r.ok().flatten()) + .max() + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "No known eventid in v", + ))?; + + let limit = body.limit.min(uint!(100)); + + let all_events = services() + .rooms + .timeline + .pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? + .take(limit.try_into().unwrap()); + + let events = all_events + .filter_map(|r| r.ok()) + .filter(|(_, e)| { + matches!( + services().rooms.state_accessor.server_can_see_event( + sender_servername, + &e.room_id, + &e.event_id, + ), + Ok(true), + ) + }) + .map(|(pdu_id, _)| services().rooms.timeline.get_pdu_json_from_id(&pdu_id)) + .filter_map(|r| r.ok().flatten()) + .map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu)) + .collect(); + + Ok(get_backfill::v1::Response { + origin: services().globals.server_name().to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + pdus: events, + }) +} + /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. diff --git a/src/main.rs b/src/main.rs index da80507..fe6cfc0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -390,6 +390,7 @@ fn routes() -> Router { .ruma_route(server_server::get_public_rooms_filtered_route) .ruma_route(server_server::send_transaction_message_route) .ruma_route(server_server::get_event_route) + .ruma_route(server_server::get_backfill_route) .ruma_route(server_server::get_missing_events_route) .ruma_route(server_server::get_event_authorization_route) .ruma_route(server_server::get_room_state_route) diff --git a/src/service/mod.rs b/src/service/mod.rs index 385dcc6..07d80a1 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -77,7 +77,12 @@ impl Services { search: rooms::search::Service { db }, short: rooms::short::Service { db }, state: rooms::state::Service { db }, - state_accessor: rooms::state_accessor::Service { db }, + state_accessor: rooms::state_accessor::Service { + db, + server_visibility_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), + }, state_cache: rooms::state_cache::Service { db }, state_compressor: rooms::state_compressor::Service { db, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 87d9936..e940ffa 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,13 +1,28 @@ mod data; -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; pub use data::Data; -use ruma::{events::StateEventType, EventId, RoomId}; +use lru_cache::LruCache; +use ruma::{ + events::{ + room::{ + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + StateEventType, + }, + EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, +}; +use tracing::error; -use crate::{PduEvent, Result}; +use crate::{services, Error, PduEvent, Result}; pub struct Service { pub db: &'static dyn Data, + pub server_visibility_cache: Mutex>, } impl Service { @@ -46,6 +61,99 @@ impl Service { self.db.state_get(shortstatehash, event_type, state_key) } + /// Get membership for given user in state + fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> Result { + self.state_get( + shortstatehash, + &StateEventType::RoomMember, + user_id.as_str(), + )? + .map_or(Ok(MembershipState::Leave), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomMemberEventContent| c.membership) + .map_err(|_| Error::bad_database("Invalid room membership event in database.")) + }) + } + + /// The user was a joined member at this state (potentially in the past) + fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id) + .map(|s| s == MembershipState::Join) + .unwrap_or_default() // Return sensible default, i.e. false + } + + /// The user was an invited or joined room member at this state (potentially + /// in the past) + fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id) + .map(|s| s == MembershipState::Join || s == MembershipState::Invite) + .unwrap_or_default() // Return sensible default, i.e. false + } + + /// Whether a server is allowed to see an event through federation, based on + /// the room's history_visibility at that event's state. + #[tracing::instrument(skip(self))] + pub fn server_can_see_event( + &self, + origin: &ServerName, + room_id: &RoomId, + event_id: &EventId, + ) -> Result { + let shortstatehash = match self.pdu_shortstatehash(event_id)? { + Some(shortstatehash) => shortstatehash, + None => return Ok(false), + }; + + if let Some(visibility) = self + .server_visibility_cache + .lock() + .unwrap() + .get_mut(&(origin.to_owned(), shortstatehash)) + { + return Ok(*visibility); + } + + let history_visibility = self + .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(HistoryVisibility::Shared), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) + .map_err(|_| { + Error::bad_database("Invalid history visibility event in database.") + }) + })?; + + let mut current_server_members = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(|r| r.ok()) + .filter(|member| member.server_name() == origin); + + let visibility = match history_visibility { + HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, + HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + current_server_members.any(|member| self.user_was_invited(shortstatehash, &member)) + } + HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + current_server_members.any(|member| self.user_was_joined(shortstatehash, &member)) + } + _ => { + error!("Unknown history visibility {history_visibility}"); + false + } + }; + + self.server_visibility_cache + .lock() + .unwrap() + .insert((origin.to_owned(), shortstatehash), visibility); + + Ok(visibility) + } + /// Returns the state hash for this pdu. pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.db.pdu_shortstatehash(event_id) From 7bdd9660aa51b2d3d0d39b35b14e49c3e4d6a23a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 20 Feb 2023 22:59:45 +0100 Subject: [PATCH 1343/1727] feat: ask for backfill --- src/api/client_server/context.rs | 33 ++-- src/api/client_server/membership.rs | 6 +- src/api/client_server/message.rs | 52 +++-- src/api/client_server/read_marker.rs | 60 ++++-- src/api/client_server/sync.rs | 34 ++-- src/api/server_server.rs | 64 +++--- src/database/key_value/rooms/timeline.rs | 237 ++++++++++++++--------- src/database/mod.rs | 11 +- src/service/rooms/lazy_loading/mod.rs | 8 +- src/service/rooms/state_accessor/mod.rs | 2 +- src/service/rooms/timeline/data.rs | 36 ++-- src/service/rooms/timeline/mod.rs | 228 +++++++++++++++++++--- 12 files changed, 502 insertions(+), 269 deletions(-) diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index 1e62f91..fa3c754 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -27,25 +27,24 @@ pub async fn get_context_route( let mut lazy_loaded = HashSet::new(); - let base_pdu_id = services() + let base_token = services() .rooms .timeline - .get_pdu_id(&body.event_id)? + .get_pdu_count(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Base event id not found.", ))?; - let base_token = services().rooms.timeline.pdu_count(&base_pdu_id)?; - - let base_event = services() - .rooms - .timeline - .get_pdu_from_id(&base_pdu_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Base event not found.", - ))?; + let base_event = + services() + .rooms + .timeline + .get_pdu(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Base event not found.", + ))?; let room_id = base_event.room_id.clone(); @@ -97,10 +96,7 @@ pub async fn get_context_route( } } - let start_token = events_before - .last() - .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) - .map(|count| count.to_string()); + let start_token = events_before.last().map(|(count, _)| count.stringify()); let events_before: Vec<_> = events_before .into_iter() @@ -151,10 +147,7 @@ pub async fn get_context_route( .state_full_ids(shortstatehash) .await?; - let end_token = events_after - .last() - .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) - .map(|count| count.to_string()); + let end_token = events_after.last().map(|(count, _)| count.stringify()); let events_after: Vec<_> = events_after .into_iter() diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 61c67cb..cd0cc7a 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -714,8 +714,10 @@ async fn join_room_by_id_helper( .ok()? }, ) - .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? - { + .map_err(|e| { + warn!("Auth check failed: {e}"); + Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") + })? { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Auth check failed", diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 6ad0751..a0c9571 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -1,4 +1,7 @@ -use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma}; +use crate::{ + service::{pdu::PduBuilder, rooms::timeline::PduCount}, + services, utils, Error, Result, Ruma, +}; use ruma::{ api::client::{ error::ErrorKind, @@ -122,17 +125,17 @@ pub async fn get_message_events_route( } let from = match body.from.clone() { - Some(from) => from - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?, - + Some(from) => PduCount::try_from_string(&from)?, None => match body.dir { - ruma::api::client::Direction::Forward => 0, - ruma::api::client::Direction::Backward => u64::MAX, + ruma::api::client::Direction::Forward => PduCount::min(), + ruma::api::client::Direction::Backward => PduCount::max(), }, }; - let to = body.to.as_ref().map(|t| t.parse()); + let to = body + .to + .as_ref() + .and_then(|t| PduCount::try_from_string(&t).ok()); services().rooms.lazy_loading.lazy_load_confirm_delivery( sender_user, @@ -158,15 +161,7 @@ pub async fn get_message_events_route( .pdus_after(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events - .filter_map(|(pdu_id, pdu)| { - services() - .rooms - .timeline - .pdu_count(&pdu_id) - .map(|pdu_count| (pdu_count, pdu)) - .ok() - }) - .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` + .take_while(|&(k, _)| Some(k) != to) // Stop at `to` .collect(); for (_, event) in &events_after { @@ -192,26 +187,23 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - resp.start = from.to_string(); - resp.end = next_token.map(|count| count.to_string()); + resp.start = from.stringify(); + resp.end = next_token.map(|count| count.stringify()); resp.chunk = events_after; } ruma::api::client::Direction::Backward => { + services() + .rooms + .timeline + .backfill_if_required(&body.room_id, from) + .await?; let events_before: Vec<_> = services() .rooms .timeline .pdus_until(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events - .filter_map(|(pdu_id, pdu)| { - services() - .rooms - .timeline - .pdu_count(&pdu_id) - .map(|pdu_count| (pdu_count, pdu)) - .ok() - }) - .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` + .take_while(|&(k, _)| Some(k) != to) // Stop at `to` .collect(); for (_, event) in &events_before { @@ -237,8 +229,8 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - resp.start = from.to_string(); - resp.end = next_token.map(|count| count.to_string()); + resp.start = from.stringify(); + resp.end = next_token.map(|count| count.stringify()); resp.chunk = events_before; } } diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index b12468a..a5553d2 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,4 +1,4 @@ -use crate::{services, Error, Result, Ruma}; +use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, events::{ @@ -42,18 +42,28 @@ pub async fn set_read_marker_route( } if let Some(event) = &body.private_read_receipt { - services().rooms.edus.read_receipt.private_read_set( - &body.room_id, - sender_user, - services() - .rooms - .timeline - .get_pdu_count(event)? - .ok_or(Error::BadRequest( + let count = services() + .rooms + .timeline + .get_pdu_count(event)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?; + let count = match count { + PduCount::Backfilled(_) => { + return Err(Error::BadRequest( ErrorKind::InvalidParam, - "Event does not exist.", - ))?, - )?; + "Read receipt is in backfilled timeline", + )) + } + PduCount::Normal(c) => c, + }; + services() + .rooms + .edus + .read_receipt + .private_read_set(&body.room_id, sender_user, count)?; } if let Some(event) = &body.read_receipt { @@ -142,17 +152,27 @@ pub async fn create_receipt_route( )?; } create_receipt::v3::ReceiptType::ReadPrivate => { + let count = services() + .rooms + .timeline + .get_pdu_count(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?; + let count = match count { + PduCount::Backfilled(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Read receipt is in backfilled timeline", + )) + } + PduCount::Normal(c) => c, + }; services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, - services() - .rooms - .timeline - .get_pdu_count(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?, + count, )?; } _ => return Err(Error::bad_database("Unsupported receipt type")), diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 568a23c..834438c 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,4 +1,4 @@ -use crate::{services, Error, Result, Ruma, RumaResponse}; +use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::{ filter::{FilterDefinition, LazyLoadOptions}, @@ -172,6 +172,7 @@ async fn sync_helper( let watcher = services().globals.watch(&sender_user, &sender_device); let next_batch = services().globals.current_count()?; + let next_batchcount = PduCount::Normal(next_batch); let next_batch_string = next_batch.to_string(); // Load filter @@ -197,6 +198,7 @@ async fn sync_helper( .clone() .and_then(|string| string.parse().ok()) .unwrap_or(0); + let sincecount = PduCount::Normal(since); let mut presence_updates = HashMap::new(); let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in @@ -241,12 +243,12 @@ async fn sync_helper( .rooms .timeline .last_timeline_count(&sender_user, &room_id)? - > since + > sincecount { let mut non_timeline_pdus = services() .rooms .timeline - .pdus_until(&sender_user, &room_id, u64::MAX)? + .pdus_until(&sender_user, &room_id, PduCount::max())? .filter_map(|r| { // Filter out buggy events if r.is_err() { @@ -254,13 +256,7 @@ async fn sync_helper( } r.ok() }) - .take_while(|(pduid, _)| { - services() - .rooms - .timeline - .pdu_count(pduid) - .map_or(false, |count| count > since) - }); + .take_while(|(pducount, _)| pducount > &sincecount); // Take the last 10 events for the timeline timeline_pdus = non_timeline_pdus @@ -295,7 +291,7 @@ async fn sync_helper( &sender_user, &sender_device, &room_id, - since, + sincecount, )?; // Database queries: @@ -492,7 +488,7 @@ async fn sync_helper( &sender_device, &room_id, lazy_loaded, - next_batch, + next_batchcount, ); ( @@ -582,7 +578,7 @@ async fn sync_helper( &sender_device, &room_id, lazy_loaded, - next_batch, + next_batchcount, ); let encrypted_room = services() @@ -711,10 +707,14 @@ async fn sync_helper( let prev_batch = timeline_pdus .first() - .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { - Ok(Some( - services().rooms.timeline.pdu_count(pdu_id)?.to_string(), - )) + .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { + Ok(Some(match pdu_count { + PduCount::Backfilled(_) => { + error!("timeline in backfill state?!"); + "0".to_owned() + } + PduCount::Normal(c) => c.to_string(), + })) })?; let room_events: Vec<_> = timeline_pdus diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 11a6cbf..e95a560 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -629,6 +629,37 @@ pub async fn get_public_rooms_route( }) } +pub fn parse_incoming_pdu( + pdu: &RawJsonValue, +) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + warn!("Error parsing incoming event {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let room_id: OwnedRoomId = value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid room id in pdu", + ))?; + + let room_version_id = services().rooms.state.get_room_version(&room_id)?; + + let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + } + }; + Ok((event_id, value, room_id)) +} + /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. @@ -657,36 +688,7 @@ pub async fn send_transaction_message_route( // let mut auth_cache = EventMap::new(); for pdu in &body.pdus { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - warn!("Error parsing incoming event {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - - let room_id: OwnedRoomId = match value - .get("room_id") - .and_then(|id| RoomId::parse(id.as_str()?).ok()) - { - Some(id) => id, - None => { - // Event is invalid - continue; - } - }; - - let room_version_id = match services().rooms.state.get_room_version(&room_id) { - Ok(v) => v, - Err(_) => { - continue; - } - }; - - let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - continue; - } - }; + let (event_id, value, room_id) = parse_incoming_pdu(&pdu)?; // We do not add the event_id field to the pdu here because of signature and hashes checks services() @@ -1017,7 +1019,7 @@ pub async fn get_backfill_route( Ok(true), ) }) - .map(|(pdu_id, _)| services().rooms.timeline.get_pdu_json_from_id(&pdu_id)) + .map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id)) .filter_map(|r| r.ok().flatten()) .map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu)) .collect(); diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 336317d..9f2c607 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -7,6 +7,8 @@ use tracing::error; use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; +use service::rooms::timeline::PduCount; + impl service::rooms::timeline::Data for KeyValueDatabase { fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { let prefix = services() @@ -30,7 +32,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .transpose() } - fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache .lock() @@ -39,20 +41,18 @@ impl service::rooms::timeline::Data for KeyValueDatabase { { hash_map::Entry::Vacant(v) => { if let Some(last_count) = self - .pdus_until(sender_user, room_id, u64::MAX)? - .filter_map(|r| { + .pdus_until(sender_user, room_id, PduCount::max())? + .find_map(|r| { // Filter out buggy events if r.is_err() { error!("Bad pdu in pdus_since: {:?}", r); } r.ok() }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() { - Ok(*v.insert(last_count?)) + Ok(*v.insert(last_count.0)) } else { - Ok(0) + Ok(PduCount::Normal(0)) } } hash_map::Entry::Occupied(o) => Ok(*o.get()), @@ -60,11 +60,23 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Returns the `count` of this pdu's id. - fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid + fn get_pdu_count(&self, event_id: &EventId) -> Result> { + Ok(self + .eventid_pduid .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() + .map(|pdu_id| Ok::<_, Error>(PduCount::Normal(pdu_count(&pdu_id)?))) + .transpose()? + .map_or_else( + || { + Ok::<_, Error>( + self.eventid_backfillpduid + .get(event_id.as_bytes())? + .map(|pdu_id| Ok::<_, Error>(PduCount::Backfilled(pdu_count(&pdu_id)?))) + .transpose()?, + ) + }, + |x| Ok(Some(x)), + )?) } /// Returns the json of a pdu. @@ -182,12 +194,6 @@ impl service::rooms::timeline::Data for KeyValueDatabase { }) } - /// Returns the `count` of this pdu's id. - fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - fn append_pdu( &self, pdu_id: &[u8], @@ -203,7 +209,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { self.lasttimelinecount_cache .lock() .unwrap() - .insert(pdu.room_id.clone(), count); + .insert(pdu.room_id.clone(), PduCount::Normal(count)); self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; @@ -211,6 +217,24 @@ impl service::rooms::timeline::Data for KeyValueDatabase { Ok(()) } + fn prepend_backfill_pdu( + &self, + pdu_id: &[u8], + event_id: &EventId, + json: &CanonicalJsonObject, + ) -> Result<()> { + self.pduid_backfillpdu.insert( + pdu_id, + &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), + )?; + + self.eventid_backfillpduid + .insert(event_id.as_bytes(), pdu_id)?; + self.eventid_outlierpdu.remove(event_id.as_bytes())?; + + Ok(()) + } + /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(pdu_id)?.is_some() { @@ -227,51 +251,14 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } } - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a>> { - let prefix = services() - .rooms - .short - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(Box::new( - self.pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - }), - )) - } - /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a>> { + until: PduCount, + ) -> Result> + 'a>> { // Create the first part of the full pdu id let prefix = services() .rooms @@ -281,34 +268,63 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .to_be_bytes() .to_vec(); - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; + let mut current_backfill = prefix.clone(); + // +1 so we don't send the base event + let backfill_count = match until { + PduCount::Backfilled(x) => x + 1, + PduCount::Normal(_) => 0, + }; + current_backfill.extend_from_slice(&backfill_count.to_be_bytes()); let user_id = user_id.to_owned(); + let user_id2 = user_id.to_owned(); + let prefix2 = prefix.clone(); - Ok(Box::new( - self.pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - }), - )) + let backfill_iter = self + .pduid_backfillpdu + .iter_from(¤t_backfill, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + let count = PduCount::Backfilled(pdu_count(&pdu_id)?); + Ok((count, pdu)) + }); + + match until { + PduCount::Backfilled(_) => Ok(Box::new(backfill_iter)), + PduCount::Normal(x) => { + let mut current_normal = prefix2.clone(); + // -1 so we don't send the base event + current_normal.extend_from_slice(&x.saturating_sub(1).to_be_bytes()); + let normal_iter = self + .pduid_pdu + .iter_from(¤t_normal, true) + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id2 { + pdu.remove_transaction_id()?; + } + let count = PduCount::Normal(pdu_count(&pdu_id)?); + Ok((count, pdu)) + }); + + Ok(Box::new(normal_iter.chain(backfill_iter))) + } + } } fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a>> { + from: PduCount, + ) -> Result> + 'a>> { // Create the first part of the full pdu id let prefix = services() .rooms @@ -318,26 +334,55 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .to_be_bytes() .to_vec(); - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; + let mut current_normal = prefix.clone(); + // +1 so we don't send the base event + let normal_count = match from { + PduCount::Normal(x) => x + 1, + PduCount::Backfilled(_) => 0, + }; + current_normal.extend_from_slice(&normal_count.to_be_bytes()); let user_id = user_id.to_owned(); + let user_id2 = user_id.to_owned(); + let prefix2 = prefix.clone(); - Ok(Box::new( - self.pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - }), - )) + let normal_iter = self + .pduid_pdu + .iter_from(¤t_normal, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + let count = PduCount::Normal(pdu_count(&pdu_id)?); + Ok((count, pdu)) + }); + + match from { + PduCount::Normal(_) => Ok(Box::new(normal_iter)), + PduCount::Backfilled(x) => { + let mut current_backfill = prefix2.clone(); + // -1 so we don't send the base event + current_backfill.extend_from_slice(&x.saturating_sub(1).to_be_bytes()); + let backfill_iter = self + .pduid_backfillpdu + .iter_from(¤t_backfill, true) + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id2 { + pdu.remove_transaction_id()?; + } + let count = PduCount::Backfilled(pdu_count(&pdu_id)?); + Ok((count, pdu)) + }); + + Ok(Box::new(backfill_iter.chain(normal_iter))) + } + } } fn increment_notification_counts( @@ -368,3 +413,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase { Ok(()) } } + +/// Returns the `count` of this pdu's id. +fn pdu_count(pdu_id: &[u8]) -> Result { + utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 46ba5b3..f07ad87 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,7 +1,10 @@ pub mod abstraction; pub mod key_value; -use crate::{services, utils, Config, Error, PduEvent, Result, Services, SERVICES}; +use crate::{ + service::rooms::timeline::PduCount, services, utils, Config, Error, PduEvent, Result, Services, + SERVICES, +}; use abstraction::{KeyValueDatabaseEngine, KvTree}; use directories::ProjectDirs; use lru_cache::LruCache; @@ -71,7 +74,9 @@ pub struct KeyValueDatabase { //pub rooms: rooms::Rooms, pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count + pub(super) pduid_backfillpdu: Arc, // PduId = ShortRoomId + Count pub(super) eventid_pduid: Arc, + pub(super) eventid_backfillpduid: Arc, pub(super) roomid_pduleaves: Arc, pub(super) alias_roomid: Arc, pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count @@ -161,7 +166,7 @@ pub struct KeyValueDatabase { pub(super) shortstatekey_cache: Mutex>, pub(super) our_real_users_cache: RwLock>>>, pub(super) appservice_in_room_cache: RwLock>>, - pub(super) lasttimelinecount_cache: Mutex>, + pub(super) lasttimelinecount_cache: Mutex>, } impl KeyValueDatabase { @@ -292,7 +297,9 @@ impl KeyValueDatabase { presenceid_presence: builder.open_tree("presenceid_presence")?, userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, pduid_pdu: builder.open_tree("pduid_pdu")?, + pduid_backfillpdu: builder.open_tree("pduid_backfillpdu")?, eventid_pduid: builder.open_tree("eventid_pduid")?, + eventid_backfillpduid: builder.open_tree("eventid_backfillpduid")?, roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, alias_roomid: builder.open_tree("alias_roomid")?, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 701a734..e6e4f89 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -9,11 +9,13 @@ use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use crate::Result; +use super::timeline::PduCount; + pub struct Service { pub db: &'static dyn Data, pub lazy_load_waiting: - Mutex>>, + Mutex>>, } impl Service { @@ -36,7 +38,7 @@ impl Service { device_id: &DeviceId, room_id: &RoomId, lazy_load: HashSet, - count: u64, + count: PduCount, ) { self.lazy_load_waiting.lock().unwrap().insert( ( @@ -55,7 +57,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - since: u64, + since: PduCount, ) -> Result<()> { if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( user_id.to_owned(), diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index e940ffa..bd9ef88 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -14,7 +14,7 @@ use ruma::{ }, StateEventType, }, - EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, + EventId, OwnedServerName, RoomId, ServerName, UserId, }; use tracing::error; diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 9377af0..c802105 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -4,12 +4,14 @@ use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; use crate::{PduEvent, Result}; +use super::PduCount; + pub trait Data: Send + Sync { fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>>; - fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. - fn get_pdu_count(&self, event_id: &EventId) -> Result>; + fn get_pdu_count(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. fn get_pdu_json(&self, event_id: &EventId) -> Result>; @@ -38,9 +40,6 @@ pub trait Data: Send + Sync { /// Returns the pdu as a `BTreeMap`. fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; - /// Returns the `count` of this pdu's id. - fn pdu_count(&self, pdu_id: &[u8]) -> Result; - /// Adds a new pdu to the timeline fn append_pdu( &self, @@ -50,33 +49,34 @@ pub trait Data: Send + Sync { count: u64, ) -> Result<()>; + // Adds a new pdu to the backfilled timeline + fn prepend_backfill_pdu( + &self, + pdu_id: &[u8], + event_id: &EventId, + json: &CanonicalJsonObject, + ) -> Result<()>; + /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a>>; - /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a>>; + until: PduCount, + ) -> Result> + 'a>>; + /// Returns an iterator over all events in a room that happened after the event with id `from` + /// in chronological order. fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a>>; + from: PduCount, + ) -> Result> + 'a>>; fn increment_notification_counts( &self, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index cc58e6f..b407dfd 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,7 +1,9 @@ mod data; -use std::collections::HashMap; +use std::cmp::Ordering; +use std::collections::{BTreeMap, HashMap}; +use std::sync::RwLock; use std::{ collections::HashSet, sync::{Arc, Mutex}, @@ -9,6 +11,8 @@ use std::{ pub use data::Data; use regex::Regex; +use ruma::api::federation; +use ruma::serde::Base64; use ruma::{ api::client::error::ErrorKind, canonical_json::to_canonical_value, @@ -27,11 +31,13 @@ use ruma::{ uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, RoomAliasId, RoomId, UserId, }; +use ruma::{user_id, ServerName}; use serde::Deserialize; -use serde_json::value::to_raw_value; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use tokio::sync::MutexGuard; -use tracing::{error, warn}; +use tracing::{error, info, warn}; +use crate::api::server_server; use crate::{ service::pdu::{EventHash, PduBuilder}, services, utils, Error, PduEvent, Result, @@ -39,10 +45,70 @@ use crate::{ use super::state_compressor::CompressedStateEvent; +#[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] +pub enum PduCount { + Backfilled(u64), + Normal(u64), +} + +impl PduCount { + pub fn min() -> Self { + Self::Backfilled(u64::MAX) + } + pub fn max() -> Self { + Self::Normal(u64::MAX) + } + + pub fn try_from_string(token: &str) -> Result { + if token.starts_with('-') { + token[1..].parse().map(PduCount::Backfilled) + } else { + token.parse().map(PduCount::Normal) + } + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid pagination token.")) + } + + pub fn stringify(&self) -> String { + match self { + PduCount::Backfilled(x) => format!("-{x}"), + PduCount::Normal(x) => x.to_string(), + } + } +} + +impl PartialOrd for PduCount { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for PduCount { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (PduCount::Normal(s), PduCount::Normal(o)) => s.cmp(o), + (PduCount::Backfilled(s), PduCount::Backfilled(o)) => o.cmp(s), + (PduCount::Normal(_), PduCount::Backfilled(_)) => Ordering::Greater, + (PduCount::Backfilled(_), PduCount::Normal(_)) => Ordering::Less, + } + } +} +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn comparisons() { + assert!(PduCount::Normal(1) < PduCount::Normal(2)); + assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1)); + assert!(PduCount::Normal(1) > PduCount::Backfilled(1)); + assert!(PduCount::Backfilled(1) < PduCount::Normal(1)); + } +} + pub struct Service { pub db: &'static dyn Data, - pub lasttimelinecount_cache: Mutex>, + pub lasttimelinecount_cache: Mutex>, } impl Service { @@ -52,10 +118,15 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { self.db.last_timeline_count(sender_user, room_id) } + /// Returns the `count` of this pdu's id. + pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { + self.db.get_pdu_count(event_id) + } + // TODO Is this the same as the function above? /* #[tracing::instrument(skip(self))] @@ -79,11 +150,6 @@ impl Service { } */ - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.db.get_pdu_count(event_id) - } - /// Returns the json of a pdu. pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.db.get_pdu_json(event_id) @@ -128,11 +194,6 @@ impl Service { self.db.get_pdu_json_from_id(pdu_id) } - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - self.db.pdu_count(pdu_id) - } - /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { @@ -863,19 +924,8 @@ impl Service { &'a self, user_id: &UserId, room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - self.db.pdus_since(user_id, room_id, since) + ) -> Result> + 'a> { + self.pdus_after(user_id, room_id, PduCount::min()) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -885,8 +935,8 @@ impl Service { &'a self, user_id: &UserId, room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { + until: PduCount, + ) -> Result> + 'a> { self.db.pdus_until(user_id, room_id, until) } @@ -897,8 +947,8 @@ impl Service { &'a self, user_id: &UserId, room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { + from: PduCount, + ) -> Result> + 'a> { self.db.pdus_after(user_id, room_id, from) } @@ -915,4 +965,118 @@ impl Service { // If event does not exist, just noop Ok(()) } + + #[tracing::instrument(skip(self, room_id))] + pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { + let first_pdu = self + .all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? + .next() + .expect("Room is not empty")?; + + if first_pdu.0 < from { + // No backfill required, there are still events between them + return Ok(()); + } + + let power_levels: RoomPowerLevelsEventContent = services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + let mut admin_servers = power_levels + .users + .iter() + .filter(|(_, level)| **level > power_levels.users_default) + .map(|(user_id, _)| user_id.server_name()) + .collect::>(); + admin_servers.remove(services().globals.server_name()); + + // Request backfill + for backfill_server in admin_servers { + info!("Asking {backfill_server} for backfill"); + let response = services() + .sending + .send_federation_request( + backfill_server, + federation::backfill::get_backfill::v1::Request { + room_id: room_id.to_owned(), + v: vec![first_pdu.1.event_id.as_ref().to_owned()], + limit: uint!(100), + }, + ) + .await; + match response { + Ok(response) => { + let mut pub_key_map = RwLock::new(BTreeMap::new()); + for pdu in response.pdus { + if let Err(e) = self + .backfill_pdu(backfill_server, pdu, &mut pub_key_map) + .await + { + warn!("Failed to add backfilled pdu: {e}"); + } + } + return Ok(()); + } + Err(e) => { + warn!("{backfill_server} could not provide backfill: {e}"); + } + } + } + + info!("No servers could backfill"); + Ok(()) + } + + #[tracing::instrument(skip(self, pdu))] + pub async fn backfill_pdu( + &self, + origin: &ServerName, + pdu: Box, + pub_key_map: &RwLock>>, + ) -> Result<()> { + let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?; + + services() + .rooms + .event_handler + .handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map) + .await?; + + let value = self.get_pdu_json(&event_id)?.expect("We just created it"); + + let shortroomid = services() + .rooms + .short + .get_shortroomid(&room_id)? + .expect("room exists"); + + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + + let count = services().globals.next_count()?; + let mut pdu_id = shortroomid.to_be_bytes().to_vec(); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + // Insert pdu + self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?; + + drop(insert_lock); + + info!("Appended incoming pdu"); + Ok(()) + } } From fcfb06ffa65c84de3253dbebae98faa5b6025d51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 21 Feb 2023 00:56:05 +0100 Subject: [PATCH 1344/1727] fix: allow handling create event itself --- src/service/rooms/event_handler/mod.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index bc67f7a..66c6394 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -392,11 +392,12 @@ impl Service { } // The original create event must be in the auth events - if auth_events - .get(&(StateEventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()) - != Some(create_event) - { + if !matches!( + auth_events + .get(&(StateEventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()), + Some(_) | None + ) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Incoming event refers to wrong create event.", From 17a6431f5f145f1c28977bda344639f6e93b9699 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 21 Feb 2023 00:56:26 +0100 Subject: [PATCH 1345/1727] fix: make backfilled events reachable --- src/database/key_value/rooms/timeline.rs | 137 +++++++++++++---------- src/service/rooms/timeline/data.rs | 1 - src/service/rooms/timeline/mod.rs | 5 +- 3 files changed, 79 insertions(+), 64 deletions(-) diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 9f2c607..7a33e5d 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -10,28 +10,6 @@ use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEven use service::rooms::timeline::PduCount; impl service::rooms::timeline::Data for KeyValueDatabase { - fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = services() - .rooms - .short - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache @@ -81,20 +59,18 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the json of a pdu. fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + self.get_non_outlier_pdu_json(event_id)?.map_or_else( + || { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map(|pdu| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .transpose() + }, + |x| Ok(Some(x)), + ) } /// Returns the json of a pdu. @@ -107,6 +83,21 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) }) .transpose()? + .map_or_else( + || { + Ok::<_, Error>( + self.eventid_backfillpduid + .get(event_id.as_bytes())? + .map(|pduid| { + self.pduid_backfillpdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + }) + }) + .transpose()?, + ) + }, + |x| Ok(Some(x)), + )? .map(|pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) @@ -115,7 +106,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the pdu's id. fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) + Ok(self.eventid_pduid.get(event_id.as_bytes())?.map_or_else( + || self.eventid_backfillpduid.get(event_id.as_bytes()), + |x| Ok(Some(x)), + )?) } /// Returns the pdu. @@ -130,6 +124,21 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) }) .transpose()? + .map_or_else( + || { + Ok::<_, Error>( + self.eventid_backfillpduid + .get(event_id.as_bytes())? + .map(|pduid| { + self.pduid_backfillpdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + }) + }) + .transpose()?, + ) + }, + |x| Ok(Some(x)), + )? .map(|pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) @@ -145,22 +154,20 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? + .get_non_outlier_pdu(event_id)? .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) + || { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map(|pdu| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .transpose() }, + |x| Ok(Some(x)), )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? + .map(Arc::new) { self.pdu_cache .lock() @@ -176,22 +183,28 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// /// This does __NOT__ check the outliers `Tree`. fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + self.pduid_pdu + .get(pdu_id)? + .map_or_else(|| self.pduid_backfillpdu.get(pdu_id), |x| Ok(Some(x)))? + .map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) } /// Returns the pdu as a `BTreeMap`. fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + self.pduid_pdu + .get(pdu_id)? + .map_or_else(|| self.pduid_backfillpdu.get(pdu_id), |x| Ok(Some(x)))? + .map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) } fn append_pdu( diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index c802105..193f384 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -7,7 +7,6 @@ use crate::{PduEvent, Result}; use super::PduCount; pub trait Data: Send + Sync { - fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>>; fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index b407dfd..dcf04be 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -114,7 +114,10 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - self.db.first_pdu_in_room(room_id) + self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? + .next() + .map(|o| o.map(|(_, p)| Arc::new(p))) + .transpose() } #[tracing::instrument(skip(self))] From eae0989c4048152729a5369ed6cb252d87f8b2a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 21 Feb 2023 16:38:50 +0100 Subject: [PATCH 1346/1727] fix: refactor backfill and add support for search --- src/api/client_server/search.rs | 14 +- src/database/key_value/rooms/search.rs | 16 +- src/database/key_value/rooms/timeline.rs | 282 ++++++++--------------- src/database/mod.rs | 4 - src/service/rooms/timeline/mod.rs | 45 +++- 5 files changed, 157 insertions(+), 204 deletions(-) diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index 51255d5..5d760db 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -81,6 +81,14 @@ pub async fn search_events_route( let results: Vec<_> = results .iter() + .filter_map(|result| { + services() + .rooms + .timeline + .get_pdu_from_id(result) + .ok()? + .map(|pdu| pdu.to_room_event()) + }) .map(|result| { Ok::<_, Error>(SearchResult { context: EventContextResult { @@ -91,11 +99,7 @@ pub async fn search_events_route( start: None, }, rank: None, - result: services() - .rooms - .timeline - .get_pdu_from_id(result)? - .map(|pdu| pdu.to_room_event()), + result: Some(result), }) }) .filter_map(|r| r.ok()) diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 19ae57b..ad573f0 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,5 +1,3 @@ -use std::mem::size_of; - use ruma::RoomId; use crate::{database::KeyValueDatabase, service, services, utils, Result}; @@ -15,7 +13,7 @@ impl service::rooms::search::Data for KeyValueDatabase { let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(word.as_bytes()); key.push(0xff); - key.extend_from_slice(pdu_id); + key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here (key, Vec::new()) }); @@ -34,7 +32,6 @@ impl service::rooms::search::Data for KeyValueDatabase { .expect("room exists") .to_be_bytes() .to_vec(); - let prefix_clone = prefix.clone(); let words: Vec<_> = search_string .split_terminator(|c: char| !c.is_alphanumeric()) @@ -46,6 +43,7 @@ impl service::rooms::search::Data for KeyValueDatabase { let mut prefix2 = prefix.clone(); prefix2.extend_from_slice(word.as_bytes()); prefix2.push(0xff); + let prefix3 = prefix2.clone(); let mut last_possible_id = prefix2.clone(); last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); @@ -53,7 +51,7 @@ impl service::rooms::search::Data for KeyValueDatabase { self.tokenids .iter_from(&last_possible_id, true) // Newest pdus first .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) + .map(move |(key, _)| key[prefix3.len()..].to_vec()) }); let common_elements = match utils::common_elements(iterators, |a, b| { @@ -64,12 +62,6 @@ impl service::rooms::search::Data for KeyValueDatabase { None => return Ok(None), }; - let mapped = common_elements.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }); - - Ok(Some((Box::new(mapped), words))) + Ok(Some((Box::new(common_elements), words))) } } diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 7a33e5d..d9c4423 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -42,19 +42,8 @@ impl service::rooms::timeline::Data for KeyValueDatabase { Ok(self .eventid_pduid .get(event_id.as_bytes())? - .map(|pdu_id| Ok::<_, Error>(PduCount::Normal(pdu_count(&pdu_id)?))) - .transpose()? - .map_or_else( - || { - Ok::<_, Error>( - self.eventid_backfillpduid - .get(event_id.as_bytes())? - .map(|pdu_id| Ok::<_, Error>(PduCount::Backfilled(pdu_count(&pdu_id)?))) - .transpose()?, - ) - }, - |x| Ok(Some(x)), - )?) + .map(|pdu_id| pdu_count(&pdu_id)) + .transpose()?) } /// Returns the json of a pdu. @@ -83,21 +72,6 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) }) .transpose()? - .map_or_else( - || { - Ok::<_, Error>( - self.eventid_backfillpduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_backfillpdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - }) - }) - .transpose()?, - ) - }, - |x| Ok(Some(x)), - )? .map(|pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) @@ -106,10 +80,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the pdu's id. fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - Ok(self.eventid_pduid.get(event_id.as_bytes())?.map_or_else( - || self.eventid_backfillpduid.get(event_id.as_bytes()), - |x| Ok(Some(x)), - )?) + Ok(self.eventid_pduid.get(event_id.as_bytes())?) } /// Returns the pdu. @@ -124,21 +95,6 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) }) .transpose()? - .map_or_else( - || { - Ok::<_, Error>( - self.eventid_backfillpduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_backfillpdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - }) - }) - .transpose()?, - ) - }, - |x| Ok(Some(x)), - )? .map(|pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) @@ -183,28 +139,22 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// /// This does __NOT__ check the outliers `Tree`. fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu - .get(pdu_id)? - .map_or_else(|| self.pduid_backfillpdu.get(pdu_id), |x| Ok(Some(x)))? - .map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) } /// Returns the pdu as a `BTreeMap`. fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu - .get(pdu_id)? - .map_or_else(|| self.pduid_backfillpdu.get(pdu_id), |x| Ok(Some(x)))? - .map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) } fn append_pdu( @@ -236,13 +186,12 @@ impl service::rooms::timeline::Data for KeyValueDatabase { event_id: &EventId, json: &CanonicalJsonObject, ) -> Result<()> { - self.pduid_backfillpdu.insert( + self.pduid_pdu.insert( pdu_id, &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), )?; - self.eventid_backfillpduid - .insert(event_id.as_bytes(), pdu_id)?; + self.eventid_pduid.insert(event_id.as_bytes(), pdu_id)?; self.eventid_outlierpdu.remove(event_id.as_bytes())?; Ok(()) @@ -272,64 +221,24 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, until: PduCount, ) -> Result> + 'a>> { - // Create the first part of the full pdu id - let prefix = services() - .rooms - .short - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current_backfill = prefix.clone(); - // +1 so we don't send the base event - let backfill_count = match until { - PduCount::Backfilled(x) => x + 1, - PduCount::Normal(_) => 0, - }; - current_backfill.extend_from_slice(&backfill_count.to_be_bytes()); + let (prefix, current) = count_to_id(&room_id, until, 1, true)?; let user_id = user_id.to_owned(); - let user_id2 = user_id.to_owned(); - let prefix2 = prefix.clone(); - let backfill_iter = self - .pduid_backfillpdu - .iter_from(¤t_backfill, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - let count = PduCount::Backfilled(pdu_count(&pdu_id)?); - Ok((count, pdu)) - }); - - match until { - PduCount::Backfilled(_) => Ok(Box::new(backfill_iter)), - PduCount::Normal(x) => { - let mut current_normal = prefix2.clone(); - // -1 so we don't send the base event - current_normal.extend_from_slice(&x.saturating_sub(1).to_be_bytes()); - let normal_iter = self - .pduid_pdu - .iter_from(¤t_normal, true) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id2 { - pdu.remove_transaction_id()?; - } - let count = PduCount::Normal(pdu_count(&pdu_id)?); - Ok((count, pdu)) - }); - - Ok(Box::new(normal_iter.chain(backfill_iter))) - } - } + Ok(Box::new( + self.pduid_pdu + .iter_from(¤t, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + let count = pdu_count(&pdu_id)?; + Ok((count, pdu)) + }), + )) } fn pdus_after<'a>( @@ -338,64 +247,24 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, from: PduCount, ) -> Result> + 'a>> { - // Create the first part of the full pdu id - let prefix = services() - .rooms - .short - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current_normal = prefix.clone(); - // +1 so we don't send the base event - let normal_count = match from { - PduCount::Normal(x) => x + 1, - PduCount::Backfilled(_) => 0, - }; - current_normal.extend_from_slice(&normal_count.to_be_bytes()); + let (prefix, current) = count_to_id(&room_id, from, 1, false)?; let user_id = user_id.to_owned(); - let user_id2 = user_id.to_owned(); - let prefix2 = prefix.clone(); - let normal_iter = self - .pduid_pdu - .iter_from(¤t_normal, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - let count = PduCount::Normal(pdu_count(&pdu_id)?); - Ok((count, pdu)) - }); - - match from { - PduCount::Normal(_) => Ok(Box::new(normal_iter)), - PduCount::Backfilled(x) => { - let mut current_backfill = prefix2.clone(); - // -1 so we don't send the base event - current_backfill.extend_from_slice(&x.saturating_sub(1).to_be_bytes()); - let backfill_iter = self - .pduid_backfillpdu - .iter_from(¤t_backfill, true) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id2 { - pdu.remove_transaction_id()?; - } - let count = PduCount::Backfilled(pdu_count(&pdu_id)?); - Ok((count, pdu)) - }); - - Ok(Box::new(backfill_iter.chain(normal_iter))) - } - } + Ok(Box::new( + self.pduid_pdu + .iter_from(¤t, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + let count = pdu_count(&pdu_id)?; + Ok((count, pdu)) + }), + )) } fn increment_notification_counts( @@ -428,7 +297,58 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Returns the `count` of this pdu's id. -fn pdu_count(pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) +fn pdu_count(pdu_id: &[u8]) -> Result { + let last_u64 = utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?; + let second_last_u64 = utils::u64_from_bytes( + &pdu_id[pdu_id.len() - 2 * size_of::()..pdu_id.len() - size_of::()], + ); + + if matches!(second_last_u64, Ok(0)) { + Ok(PduCount::Backfilled(u64::MAX - last_u64)) + } else { + Ok(PduCount::Normal(last_u64)) + } +} + +fn count_to_id( + room_id: &RoomId, + count: PduCount, + offset: u64, + subtract: bool, +) -> Result<(Vec, Vec)> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + let mut pdu_id = prefix.clone(); + // +1 so we don't send the base event + let count_raw = match count { + PduCount::Normal(x) => { + if subtract { + x - offset + } else { + x + offset + } + } + PduCount::Backfilled(x) => { + pdu_id.extend_from_slice(&0_u64.to_be_bytes()); + let num = u64::MAX - x; + if subtract { + if num > 0 { + num - offset + } else { + num + } + } else { + num + offset + } + } + }; + pdu_id.extend_from_slice(&count_raw.to_be_bytes()); + + Ok((prefix, pdu_id)) } diff --git a/src/database/mod.rs b/src/database/mod.rs index f07ad87..e05991d 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -74,9 +74,7 @@ pub struct KeyValueDatabase { //pub rooms: rooms::Rooms, pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) pduid_backfillpdu: Arc, // PduId = ShortRoomId + Count pub(super) eventid_pduid: Arc, - pub(super) eventid_backfillpduid: Arc, pub(super) roomid_pduleaves: Arc, pub(super) alias_roomid: Arc, pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count @@ -297,9 +295,7 @@ impl KeyValueDatabase { presenceid_presence: builder.open_tree("presenceid_presence")?, userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, pduid_pdu: builder.open_tree("pduid_pdu")?, - pduid_backfillpdu: builder.open_tree("pduid_backfillpdu")?, eventid_pduid: builder.open_tree("eventid_pduid")?, - eventid_backfillpduid: builder.open_tree("eventid_backfillpduid")?, roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, alias_roomid: builder.open_tree("alias_roomid")?, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index dcf04be..47f4c65 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1045,6 +1045,24 @@ impl Service { ) -> Result<()> { let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?; + // Lock so we cannot backfill the same pdu twice at the same time + let mutex = Arc::clone( + services() + .globals + .roomid_mutex_federation + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + + // Skip the PDU if we already have it as a timeline event + if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(&event_id)? { + info!("We already know {event_id} at {pdu_id:?}"); + return Ok(()); + } + services() .rooms .event_handler @@ -1052,6 +1070,7 @@ impl Service { .await?; let value = self.get_pdu_json(&event_id)?.expect("We just created it"); + let pdu = self.get_pdu(&event_id)?.expect("We just created it"); let shortroomid = services() .rooms @@ -1072,14 +1091,36 @@ impl Service { let count = services().globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count.to_be_bytes()); + pdu_id.extend_from_slice(&0_u64.to_be_bytes()); + pdu_id.extend_from_slice(&(u64::MAX - count).to_be_bytes()); // Insert pdu self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?; drop(insert_lock); - info!("Appended incoming pdu"); + match pdu.kind { + RoomEventType::RoomMessage => { + #[derive(Deserialize)] + struct ExtractBody { + body: Option, + } + + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + if let Some(body) = content.body { + services() + .rooms + .search + .index_pdu(shortroomid, &pdu_id, &body)?; + } + } + _ => {} + } + drop(mutex_lock); + + info!("Prepended backfill pdu"); Ok(()) } } From d39003ffc084eb61a017827d9b985ce6fdeefad4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 21 Feb 2023 17:43:39 +0100 Subject: [PATCH 1347/1727] Allow backfilling create event itself --- src/service/rooms/state_accessor/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index bd9ef88..154c189 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -101,7 +101,7 @@ impl Service { ) -> Result { let shortstatehash = match self.pdu_shortstatehash(event_id)? { Some(shortstatehash) => shortstatehash, - None => return Ok(false), + None => return Ok(true), }; if let Some(visibility) = self From 2aa0a2474b9f2f7b9c091b70e48ce3b37d52ab85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 22 Feb 2023 09:32:47 +0100 Subject: [PATCH 1348/1727] fix: ignore unparsable pdus in /send --- src/api/server_server.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index e95a560..adf4bc2 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -688,7 +688,14 @@ pub async fn send_transaction_message_route( // let mut auth_cache = EventMap::new(); for pdu in &body.pdus { - let (event_id, value, room_id) = parse_incoming_pdu(&pdu)?; + let r = parse_incoming_pdu(&pdu); + let (event_id, value, room_id) = match r { + Ok(t) => t, + Err(e) => { + warn!("Could not parse pdu: {e}"); + continue; + } + }; // We do not add the event_id field to the pdu here because of signature and hashes checks services() From 2a16a5e967ff15052fe03313711dda89d0f95232 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 22 Feb 2023 13:12:19 +0100 Subject: [PATCH 1349/1727] fix: don't send nulls as unsigned content --- src/service/pdu.rs | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 554f3be..5b5cbd0 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -111,9 +111,11 @@ impl PduEvent { "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, - "unsigned": self.unsigned, }); + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } @@ -133,10 +135,12 @@ impl PduEvent { "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, - "unsigned": self.unsigned, "room_id": self.room_id, }); + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } @@ -155,10 +159,12 @@ impl PduEvent { "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, - "unsigned": self.unsigned, "room_id": self.room_id, }); + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } @@ -171,32 +177,38 @@ impl PduEvent { #[tracing::instrument(skip(self))] pub fn to_state_event(&self) -> Raw { - let json = json!({ + let mut json = json!({ "content": self.content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, - "unsigned": self.unsigned, "room_id": self.room_id, "state_key": self.state_key, }); + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + serde_json::from_value(json).expect("Raw::from_value always works") } #[tracing::instrument(skip(self))] pub fn to_sync_state_event(&self) -> Raw { - let json = json!({ + let mut json = json!({ "content": self.content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, - "unsigned": self.unsigned, "state_key": self.state_key, }); + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + serde_json::from_value(json).expect("Raw::from_value always works") } @@ -214,18 +226,21 @@ impl PduEvent { #[tracing::instrument(skip(self))] pub fn to_member_event(&self) -> Raw> { - let json = json!({ + let mut json = json!({ "content": self.content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, "redacts": self.redacts, - "unsigned": self.unsigned, "room_id": self.room_id, "state_key": self.state_key, }); + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + serde_json::from_value(json).expect("Raw::from_value always works") } From 10fa686c77637ed2837ff6348ccbdebeff9dcae3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 22 Feb 2023 15:49:55 +0100 Subject: [PATCH 1350/1727] feat: respect history visibility --- src/api/client_server/context.rs | 20 +++++- src/api/client_server/message.rs | 25 ++++---- src/api/client_server/room.rs | 23 +++---- src/api/client_server/search.rs | 7 +++ src/api/client_server/state.rs | 81 +++--------------------- src/api/server_server.rs | 21 +++++++ src/service/mod.rs | 3 + src/service/rooms/state_accessor/mod.rs | 84 ++++++++++++++++++++++++- 8 files changed, 166 insertions(+), 98 deletions(-) diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index fa3c754..5a3013b 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -50,12 +50,12 @@ pub async fn get_context_route( if !services() .rooms - .state_cache - .is_joined(sender_user, &room_id)? + .state_accessor + .user_can_see_event(sender_user, &room_id, &body.event_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, - "You don't have permission to view this room.", + "You don't have permission to view this event.", )); } @@ -82,6 +82,13 @@ pub async fn get_context_route( / 2, ) .filter_map(|r| r.ok()) // Remove buggy events + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .unwrap_or(false) + }) .collect(); for (_, event) in &events_before { @@ -114,6 +121,13 @@ pub async fn get_context_route( / 2, ) .filter_map(|r| r.ok()) // Remove buggy events + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .unwrap_or(false) + }) .collect(); for (_, event) in &events_after { diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index a0c9571..f7c77f6 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -113,17 +113,6 @@ pub async fn get_message_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if !services() - .rooms - .state_cache - .is_joined(sender_user, &body.room_id)? - { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } - let from = match body.from.clone() { Some(from) => PduCount::try_from_string(&from)?, None => match body.dir { @@ -161,6 +150,13 @@ pub async fn get_message_events_route( .pdus_after(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) + .unwrap_or(false) + }) .take_while(|&(k, _)| Some(k) != to) // Stop at `to` .collect(); @@ -203,6 +199,13 @@ pub async fn get_message_events_route( .pdus_until(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) + .unwrap_or(false) + }) .take_while(|&(k, _)| Some(k) != to) // Stop at `to` .collect(); diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 830e085..aa6fa5f 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -425,24 +425,25 @@ pub async fn get_room_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services() + let event = services() .rooms - .state_cache - .is_joined(sender_user, &body.room_id)? - { + .timeline + .get_pdu(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + + if !services().rooms.state_accessor.user_can_see_event( + sender_user, + &event.room_id, + &body.event_id, + )? { return Err(Error::BadRequest( ErrorKind::Forbidden, - "You don't have permission to view this room.", + "You don't have permission to view this event.", )); } Ok(get_room_event::v3::Response { - event: services() - .rooms - .timeline - .get_pdu(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? - .to_room_event(), + event: event.to_room_event(), }) } diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index 5d760db..fe69e7c 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -87,6 +87,13 @@ pub async fn search_events_route( .timeline .get_pdu_from_id(result) .ok()? + .filter(|pdu| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) + .unwrap_or(false) + }) .map(|pdu| pdu.to_room_event()) }) .map(|result| { diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index d9c1464..e2abe48 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -7,11 +7,7 @@ use ruma::{ state::{get_state_events, get_state_events_for_key, send_state_event}, }, events::{ - room::{ - canonical_alias::RoomCanonicalAliasEventContent, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - }, - AnyStateEventContent, StateEventType, + room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType, }, serde::Raw, EventId, RoomId, UserId, @@ -85,29 +81,10 @@ pub async fn get_state_events_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - #[allow(clippy::blocks_in_if_conditions)] - // Users not in the room should not be able to access the state unless history_visibility is - // WorldReadable - if !services() + if services() .rooms - .state_cache - .is_joined(sender_user, &body.room_id)? - && !matches!( - services() - .rooms - .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? - .map(|event| { - serde_json::from_str(event.content.get()) - .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - }) - }), - Some(Ok(HistoryVisibility::WorldReadable)) - ) + .state_accessor + .user_can_see_state_events(&sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -137,29 +114,10 @@ pub async fn get_state_events_for_key_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - #[allow(clippy::blocks_in_if_conditions)] - // Users not in the room should not be able to access the state unless history_visibility is - // WorldReadable - if !services() + if services() .rooms - .state_cache - .is_joined(sender_user, &body.room_id)? - && !matches!( - services() - .rooms - .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? - .map(|event| { - serde_json::from_str(event.content.get()) - .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - }) - }), - Some(Ok(HistoryVisibility::WorldReadable)) - ) + .state_accessor + .user_can_see_state_events(&sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -192,29 +150,10 @@ pub async fn get_state_events_for_empty_key_route( ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - #[allow(clippy::blocks_in_if_conditions)] - // Users not in the room should not be able to access the state unless history_visibility is - // WorldReadable - if !services() + if services() .rooms - .state_cache - .is_joined(sender_user, &body.room_id)? - && !matches!( - services() - .rooms - .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? - .map(|event| { - serde_json::from_str(event.content.get()) - .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - }) - }), - Some(Ok(HistoryVisibility::WorldReadable)) - ) + .state_accessor + .user_can_see_state_events(&sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index adf4bc2..0247369 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -954,6 +954,17 @@ pub async fn get_event_route( )); } + if !services().rooms.state_accessor.server_can_see_event( + sender_servername, + &room_id, + &body.event_id, + )? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not allowed to see event.", + )); + } + Ok(get_event::v1::Response { origin: services().globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), @@ -1098,6 +1109,16 @@ pub async fn get_missing_events_route( i += 1; continue; } + + if !services().rooms.state_accessor.server_can_see_event( + sender_servername, + &body.room_id, + &queued_events[i], + )? { + i += 1; + continue; + } + queued_events.extend_from_slice( &serde_json::from_value::>( serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { diff --git a/src/service/mod.rs b/src/service/mod.rs index 07d80a1..eea397f 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -82,6 +82,9 @@ impl Services { server_visibility_cache: Mutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), + user_visibility_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), }, state_cache: rooms::state_cache::Service { db }, state_compressor: rooms::state_compressor::Service { diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 154c189..a25a8b5 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -14,7 +14,7 @@ use ruma::{ }, StateEventType, }, - EventId, OwnedServerName, RoomId, ServerName, UserId, + EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use tracing::error; @@ -23,6 +23,7 @@ use crate::{services, Error, PduEvent, Result}; pub struct Service { pub db: &'static dyn Data, pub server_visibility_cache: Mutex>, + pub user_visibility_cache: Mutex>, } impl Service { @@ -92,7 +93,7 @@ impl Service { /// Whether a server is allowed to see an event through federation, based on /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self, origin, room_id, event_id))] pub fn server_can_see_event( &self, origin: &ServerName, @@ -154,6 +155,85 @@ impl Service { Ok(visibility) } + /// Whether a user is allowed to see an event, based on + /// the room's history_visibility at that event's state. + #[tracing::instrument(skip(self, user_id, room_id, event_id))] + pub fn user_can_see_event( + &self, + user_id: &UserId, + room_id: &RoomId, + event_id: &EventId, + ) -> Result { + let shortstatehash = match self.pdu_shortstatehash(event_id)? { + Some(shortstatehash) => shortstatehash, + None => return Ok(true), + }; + + if let Some(visibility) = self + .user_visibility_cache + .lock() + .unwrap() + .get_mut(&(user_id.to_owned(), shortstatehash)) + { + return Ok(*visibility); + } + + let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; + + let history_visibility = self + .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(HistoryVisibility::Shared), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) + .map_err(|_| { + Error::bad_database("Invalid history visibility event in database.") + }) + })?; + + let visibility = match history_visibility { + HistoryVisibility::WorldReadable => true, + HistoryVisibility::Shared => currently_member, + HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + self.user_was_invited(shortstatehash, &user_id) + } + HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + self.user_was_joined(shortstatehash, &user_id) + } + _ => { + error!("Unknown history visibility {history_visibility}"); + false + } + }; + + self.user_visibility_cache + .lock() + .unwrap() + .insert((user_id.to_owned(), shortstatehash), visibility); + + Ok(visibility) + } + + /// Whether a user is allowed to see an event, based on + /// the room's history_visibility at that event's state. + #[tracing::instrument(skip(self, user_id, room_id))] + pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; + + let history_visibility = self + .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(HistoryVisibility::Shared), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) + .map_err(|_| { + Error::bad_database("Invalid history visibility event in database.") + }) + })?; + + Ok(currently_member || history_visibility == HistoryVisibility::WorldReadable) + } + /// Returns the state hash for this pdu. pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.db.pdu_shortstatehash(event_id) From 4617ee2b6b763f66bfc6a018471dfdb2170db5da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 22 Feb 2023 21:07:47 +0100 Subject: [PATCH 1351/1727] More logging for remote joins --- src/api/client_server/membership.rs | 31 ++++++++++++++++++++++---- src/service/rooms/event_handler/mod.rs | 8 +++++-- 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index cd0cc7a..965c618 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -29,7 +29,7 @@ use std::{ sync::{Arc, RwLock}, time::{Duration, Instant}, }; -use tracing::{debug, error, warn}; +use tracing::{debug, error, info, warn}; use crate::{ service::pdu::{gen_event_id_canonical_json, PduBuilder}, @@ -491,9 +491,13 @@ async fn join_room_by_id_helper( .state_cache .server_in_room(services().globals.server_name(), room_id)? { + info!("Joining {room_id} over federation."); + let (make_join_response, remote_server) = make_join_request(sender_user, room_id, servers).await?; + info!("make_join finished"); + let room_version_id = match make_join_response.room_version { Some(room_version) if services() @@ -578,6 +582,7 @@ async fn join_room_by_id_helper( // It has enough fields to be called a proper event now let mut join_event = join_event_stub; + info!("Asking {remote_server} for send_join"); let send_join_response = services() .sending .send_federation_request( @@ -590,7 +595,10 @@ async fn join_room_by_id_helper( ) .await?; + info!("send_join finished"); + if let Some(signed_raw) = &send_join_response.room_state.event { + info!("There is a signed event. This room is probably using restricted joins"); let (signed_event_id, signed_value) = match gen_event_id_canonical_json(signed_raw, &room_version_id) { Ok(t) => t, @@ -630,24 +638,29 @@ async fn join_room_by_id_helper( .expect("we created a valid pdu") .insert(remote_server.to_string(), signature.clone()); } else { - warn!("Server {} sent invalid sendjoin event", remote_server); + warn!( + "Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}", + ); } } services().rooms.short.get_or_create_shortroomid(room_id)?; + info!("Parsing join event"); let parsed_join_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); + info!("Fetching join signing keys"); services() .rooms .event_handler .fetch_join_signing_keys(&send_join_response, &room_version_id, &pub_key_map) .await?; + info!("Going through send_join response room_state"); for result in send_join_response .room_state .state @@ -677,6 +690,7 @@ async fn join_room_by_id_helper( } } + info!("Going through send_join response auth_chain"); for result in send_join_response .room_state .auth_chain @@ -694,6 +708,7 @@ async fn join_room_by_id_helper( .add_pdu_outlier(&event_id, &value)?; } + info!("Running send_join auth check"); if !state_res::event_auth::auth_check( &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), &parsed_join_pdu, @@ -724,6 +739,7 @@ async fn join_room_by_id_helper( )); } + info!("Saving state from send_join"); let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state( room_id, state @@ -743,12 +759,14 @@ async fn join_room_by_id_helper( .force_state(room_id, statehash_before_join, new, removed, &state_lock) .await?; + info!("Updating joined counts for new room"); services().rooms.state_cache.update_joined_count(room_id)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?; + info!("Appending new room join event"); services().rooms.timeline.append_pdu( &parsed_join_pdu, join_event, @@ -756,6 +774,7 @@ async fn join_room_by_id_helper( &state_lock, )?; + info!("Setting final room state for new room"); // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist services() @@ -763,6 +782,8 @@ async fn join_room_by_id_helper( .state .set_room_state(room_id, statehash_after_join, &state_lock)?; } else { + info!("We can join locally"); + let join_rules_event = services().rooms.state_accessor.room_state_get( room_id, &StateEventType::RoomJoinRules, @@ -881,8 +902,9 @@ async fn join_room_by_id_helper( }; if !restriction_rooms.is_empty() { - // We couldn't do the join locally, maybe federation can help to satisfy the restricted - // join requirements + info!( + "We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements" + ); let (make_join_response, remote_server) = make_join_request(sender_user, room_id, servers).await?; @@ -1040,6 +1062,7 @@ async fn make_join_request( if remote_server == services().globals.server_name() { continue; } + info!("Asking {remote_server} for make_join"); let make_join_response = services() .sending .send_federation_request( diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 66c6394..63301b8 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1460,12 +1460,12 @@ impl Service { } if servers.is_empty() { - // We had all keys locally + info!("We had all keys locally"); return Ok(()); } for server in services().globals.trusted_servers() { - trace!("Asking batch signing keys from trusted server {}", server); + info!("Asking batch signing keys from trusted server {}", server); if let Ok(keys) = services() .sending .send_federation_request( @@ -1508,10 +1508,12 @@ impl Service { } if servers.is_empty() { + info!("Trusted server supplied all signing keys"); return Ok(()); } } + info!("Asking individual servers for signing keys"); let mut futures: FuturesUnordered<_> = servers .into_keys() .map(|server| async move { @@ -1541,6 +1543,8 @@ impl Service { } } + info!("Search for signing keys done"); + Ok(()) } From 8b648d0d3f1e6bb4f9299ddaaa5502b1b5815718 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 22 Feb 2023 22:09:15 +0100 Subject: [PATCH 1352/1727] fix: force abort federation requests after 2 minutes --- src/service/sending/mod.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 1861feb..e0e2f54 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -684,7 +684,15 @@ impl Service { T: Debug, { let permit = self.maximum_requests.acquire().await; - let response = server_server::send_request(destination, request).await; + let response = tokio::time::timeout( + Duration::from_secs(2 * 60), + server_server::send_request(destination, request), + ) + .await + .map_err(|_| { + warn!("Timeout waiting for server response of {destination}"); + Error::BadServerResponse("Timeout waiting for server response") + })?; drop(permit); response From bde4880c1d169066e22e4c04288f8bce6c84b32f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 22 Feb 2023 23:49:49 +0100 Subject: [PATCH 1353/1727] fix: don't unwrap server keys --- src/service/rooms/event_handler/mod.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 63301b8..e3502e6 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1529,17 +1529,18 @@ impl Service { while let Some(result) = futures.next().await { if let (Ok(get_keys_response), origin) = result { - let result: BTreeMap<_, _> = services() - .globals - .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(origin.to_string(), result); + if let Ok(key) = get_keys_response.server_key.deserialize() { + let result: BTreeMap<_, _> = services() + .globals + .add_signing_key(&origin, key)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(origin.to_string(), result); + } } } From 2316d89048f78043af95a055f8c8cd45fae894f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 23 Feb 2023 11:20:40 +0100 Subject: [PATCH 1354/1727] Even more logging --- src/api/server_server.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 0247369..3bdbdef 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -125,6 +125,8 @@ where return Err(Error::bad_config("Federation is disabled.")); } + info!("Preparing to send request to {destination}"); + let mut write_destination_to_cache = false; let cached_result = services() @@ -231,11 +233,13 @@ where let url = reqwest_request.url().clone(); + info!("Sending request to {destination} at {url}"); let response = services() .globals .federation_client() .execute(reqwest_request) .await; + info!("Received response from {destination} at {url}"); match response { Ok(mut response) => { @@ -251,10 +255,12 @@ where .expect("http::response::Builder is usable"), ); + info!("Getting response bytes from {destination}"); let body = response.bytes().await.unwrap_or_else(|e| { warn!("server error {}", e); Vec::new().into() }); // TODO: handle timeout + info!("Got response bytes from {destination}"); if status != 200 { warn!( @@ -273,6 +279,7 @@ where .expect("reqwest body is valid http body"); if status == 200 { + info!("Parsing response bytes from {destination}"); let response = T::IncomingResponse::try_from_http_response(http_response); if response.is_ok() && write_destination_to_cache { services() @@ -294,6 +301,7 @@ where Error::BadServerResponse("Server returned bad 200 response.") }) } else { + info!("Returning error from {destination}"); Err(Error::FederationError( destination.to_owned(), RumaError::from_http_response(http_response), From b7c99788e46a4e3c0e25f38742dfac67b72a7b9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 23 Feb 2023 11:49:35 +0100 Subject: [PATCH 1355/1727] All the logs --- src/service/rooms/event_handler/mod.rs | 4 +++- src/service/sending/mod.rs | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e3502e6..da40e6f 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1513,7 +1513,7 @@ impl Service { } } - info!("Asking individual servers for signing keys"); + info!("Asking individual servers for signing keys: {servers:?}"); let mut futures: FuturesUnordered<_> = servers .into_keys() .map(|server| async move { @@ -1528,6 +1528,7 @@ impl Service { .collect(); while let Some(result) = futures.next().await { + info!("Received new result"); if let (Ok(get_keys_response), origin) = result { if let Ok(key) = get_keys_response.server_key.deserialize() { let result: BTreeMap<_, _> = services() @@ -1542,6 +1543,7 @@ impl Service { .insert(origin.to_string(), result); } } + info!("Done handling result"); } info!("Search for signing keys done"); diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index e0e2f54..b0d9b4b 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -40,7 +40,7 @@ use tokio::{ select, sync::{mpsc, Mutex, Semaphore}, }; -use tracing::{error, warn}; +use tracing::{error, info, warn}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { @@ -683,7 +683,9 @@ impl Service { where T: Debug, { + info!("Waiting for permit"); let permit = self.maximum_requests.acquire().await; + info!("Got permit"); let response = tokio::time::timeout( Duration::from_secs(2 * 60), server_server::send_request(destination, request), From cb0ce5b08f8d9e68aad51478d745730e92b2cdba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 23 Feb 2023 12:28:15 +0100 Subject: [PATCH 1356/1727] Logs for server resolution --- src/api/server_server.rs | 50 +++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 3bdbdef..422da47 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -340,36 +340,38 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { + info!("Finding actual destination for {destination}"); let destination_str = destination.as_str().to_owned(); let mut hostname = destination_str.clone(); let actual_destination = match get_ip_with_port(&destination_str) { Some(host_port) => { - // 1: IP literal with provided or default port + info!("1: IP literal with provided or default port"); host_port } None => { if let Some(pos) = destination_str.find(':') { - // 2: Hostname with included port + info!("2: Hostname with included port"); let (host, port) = destination_str.split_at(pos); FedDest::Named(host.to_owned(), port.to_owned()) } else { + info!("Requesting well known for {destination}"); match request_well_known(destination.as_str()).await { - // 3: A .well-known file is available Some(delegated_hostname) => { + info!("3: A .well-known file is available"); hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); match get_ip_with_port(&delegated_hostname) { Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file None => { if let Some(pos) = delegated_hostname.find(':') { - // 3.2: Hostname with port in .well-known file + info!("3.2: Hostname with port in .well-known file"); let (host, port) = delegated_hostname.split_at(pos); FedDest::Named(host.to_owned(), port.to_owned()) } else { - // Delegated hostname has no port in this branch + info!("Delegated hostname has no port in this branch"); if let Some(hostname_override) = query_srv_record(&delegated_hostname).await { - // 3.3: SRV lookup successful + info!("3.3: SRV lookup successful"); let force_port = hostname_override.port(); if let Ok(override_ip) = services() @@ -400,18 +402,18 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe add_port_to_hostname(&delegated_hostname) } } else { - // 3.4: No SRV records, just use the hostname from .well-known + info!("3.4: No SRV records, just use the hostname from .well-known"); add_port_to_hostname(&delegated_hostname) } } } } } - // 4: No .well-known or an error occured None => { + info!("4: No .well-known or an error occured"); match query_srv_record(&destination_str).await { - // 4: SRV record found Some(hostname_override) => { + info!("4: SRV record found"); let force_port = hostname_override.port(); if let Ok(override_ip) = services() @@ -442,14 +444,17 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe add_port_to_hostname(&hostname) } } - // 5: No SRV record found - None => add_port_to_hostname(&destination_str), + None => { + info!("5: No SRV record found"); + add_port_to_hostname(&destination_str) + } } } } } } }; + info!("Actual destination: {actual_destination:?}"); // Can't use get_ip_with_port here because we don't want to add a port // to an IP address if it wasn't specified @@ -488,19 +493,16 @@ async fn query_srv_record(hostname: &'_ str) -> Option { } async fn request_well_known(destination: &str) -> Option { - let body: serde_json::Value = serde_json::from_str( - &services() - .globals - .default_client() - .get(&format!("https://{destination}/.well-known/matrix/server")) - .send() - .await - .ok()? - .text() - .await - .ok()?, - ) - .ok()?; + let response = services() + .globals + .default_client() + .get(&format!("https://{destination}/.well-known/matrix/server")) + .send() + .await; + info!("Got well known response"); + let text = response.ok()?.text().await; + info!("Got well known response text"); + let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?; Some(body.get("m.server")?.as_str()?.to_owned()) } From 27f29ba69911fc2de240bafe165024fc8ce5ea5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 23 Feb 2023 16:12:58 +0100 Subject: [PATCH 1357/1727] fix: SRV lookups should end with a period --- src/api/server_server.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 422da47..3a6da4f 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -472,10 +472,11 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe } async fn query_srv_record(hostname: &'_ str) -> Option { + let hostname = hostname.trim_end_matches('.'); if let Ok(Some(host_port)) = services() .globals .dns_resolver() - .srv_lookup(format!("_matrix._tcp.{hostname}")) + .srv_lookup(format!("_matrix._tcp.{hostname}.")) .await .map(|srv| { srv.iter().next().map(|result| { From a1bd348977357e55508fc5510d24c8bdc9115c67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 7 Mar 2023 17:58:55 +0100 Subject: [PATCH 1358/1727] fix: history visibility --- src/api/client_server/account.rs | 2 +- src/api/client_server/membership.rs | 11 +++++------ src/api/client_server/state.rs | 6 +++--- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 7459254..1d7480a 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -129,7 +129,7 @@ pub async fn register_route(body: Ruma) -> Result Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - // TODO: check history visibility? if !services() .rooms - .state_cache - .is_joined(sender_user, &body.room_id)? + .state_accessor + .user_can_see_state_events(&sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -434,12 +433,12 @@ pub async fn joined_members_route( if !services() .rooms - .state_cache - .is_joined(sender_user, &body.room_id)? + .state_accessor + .user_can_see_state_events(&sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, - "You aren't a member of the room.", + "You don't have permission to view this room.", )); } diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index e2abe48..8e4ceaf 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -81,7 +81,7 @@ pub async fn get_state_events_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if services() + if !services() .rooms .state_accessor .user_can_see_state_events(&sender_user, &body.room_id)? @@ -114,7 +114,7 @@ pub async fn get_state_events_for_key_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if services() + if !services() .rooms .state_accessor .user_can_see_state_events(&sender_user, &body.room_id)? @@ -150,7 +150,7 @@ pub async fn get_state_events_for_empty_key_route( ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if services() + if !services() .rooms .state_accessor .user_can_see_state_events(&sender_user, &body.room_id)? From 63f787f6357ef1344243d97ffa79cacba4694bb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 13 Mar 2023 08:27:59 +0100 Subject: [PATCH 1359/1727] Reduce logs from info to debug --- src/api/server_server.rs | 44 +++++++++++++------------- src/service/rooms/event_handler/mod.rs | 1 + src/service/sending/mod.rs | 6 ++-- 3 files changed, 26 insertions(+), 25 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 3a6da4f..852e59a 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -125,7 +125,7 @@ where return Err(Error::bad_config("Federation is disabled.")); } - info!("Preparing to send request to {destination}"); + debug!("Preparing to send request to {destination}"); let mut write_destination_to_cache = false; @@ -233,13 +233,13 @@ where let url = reqwest_request.url().clone(); - info!("Sending request to {destination} at {url}"); + debug!("Sending request to {destination} at {url}"); let response = services() .globals .federation_client() .execute(reqwest_request) .await; - info!("Received response from {destination} at {url}"); + debug!("Received response from {destination} at {url}"); match response { Ok(mut response) => { @@ -255,12 +255,12 @@ where .expect("http::response::Builder is usable"), ); - info!("Getting response bytes from {destination}"); + debug!("Getting response bytes from {destination}"); let body = response.bytes().await.unwrap_or_else(|e| { warn!("server error {}", e); Vec::new().into() }); // TODO: handle timeout - info!("Got response bytes from {destination}"); + debug!("Got response bytes from {destination}"); if status != 200 { warn!( @@ -279,7 +279,7 @@ where .expect("reqwest body is valid http body"); if status == 200 { - info!("Parsing response bytes from {destination}"); + debug!("Parsing response bytes from {destination}"); let response = T::IncomingResponse::try_from_http_response(http_response); if response.is_ok() && write_destination_to_cache { services() @@ -301,7 +301,7 @@ where Error::BadServerResponse("Server returned bad 200 response.") }) } else { - info!("Returning error from {destination}"); + debug!("Returning error from {destination}"); Err(Error::FederationError( destination.to_owned(), RumaError::from_http_response(http_response), @@ -340,38 +340,38 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { - info!("Finding actual destination for {destination}"); + debug!("Finding actual destination for {destination}"); let destination_str = destination.as_str().to_owned(); let mut hostname = destination_str.clone(); let actual_destination = match get_ip_with_port(&destination_str) { Some(host_port) => { - info!("1: IP literal with provided or default port"); + debug!("1: IP literal with provided or default port"); host_port } None => { if let Some(pos) = destination_str.find(':') { - info!("2: Hostname with included port"); + debug!("2: Hostname with included port"); let (host, port) = destination_str.split_at(pos); FedDest::Named(host.to_owned(), port.to_owned()) } else { - info!("Requesting well known for {destination}"); + debug!("Requesting well known for {destination}"); match request_well_known(destination.as_str()).await { Some(delegated_hostname) => { - info!("3: A .well-known file is available"); + debug!("3: A .well-known file is available"); hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); match get_ip_with_port(&delegated_hostname) { Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file None => { if let Some(pos) = delegated_hostname.find(':') { - info!("3.2: Hostname with port in .well-known file"); + debug!("3.2: Hostname with port in .well-known file"); let (host, port) = delegated_hostname.split_at(pos); FedDest::Named(host.to_owned(), port.to_owned()) } else { - info!("Delegated hostname has no port in this branch"); + debug!("Delegated hostname has no port in this branch"); if let Some(hostname_override) = query_srv_record(&delegated_hostname).await { - info!("3.3: SRV lookup successful"); + debug!("3.3: SRV lookup successful"); let force_port = hostname_override.port(); if let Ok(override_ip) = services() @@ -402,7 +402,7 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe add_port_to_hostname(&delegated_hostname) } } else { - info!("3.4: No SRV records, just use the hostname from .well-known"); + debug!("3.4: No SRV records, just use the hostname from .well-known"); add_port_to_hostname(&delegated_hostname) } } @@ -410,10 +410,10 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe } } None => { - info!("4: No .well-known or an error occured"); + debug!("4: No .well-known or an error occured"); match query_srv_record(&destination_str).await { Some(hostname_override) => { - info!("4: SRV record found"); + debug!("4: SRV record found"); let force_port = hostname_override.port(); if let Ok(override_ip) = services() @@ -445,7 +445,7 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe } } None => { - info!("5: No SRV record found"); + debug!("5: No SRV record found"); add_port_to_hostname(&destination_str) } } @@ -454,7 +454,7 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe } } }; - info!("Actual destination: {actual_destination:?}"); + debug!("Actual destination: {actual_destination:?}"); // Can't use get_ip_with_port here because we don't want to add a port // to an IP address if it wasn't specified @@ -500,9 +500,9 @@ async fn request_well_known(destination: &str) -> Option { .get(&format!("https://{destination}/.well-known/matrix/server")) .send() .await; - info!("Got well known response"); + debug!("Got well known response"); let text = response.ok()?.text().await; - info!("Got well known response text"); + debug!("Got well known response text"); let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?; Some(body.get("m.server")?.as_str()?.to_owned()) } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index da40e6f..b01a282 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1530,6 +1530,7 @@ impl Service { while let Some(result) = futures.next().await { info!("Received new result"); if let (Ok(get_keys_response), origin) = result { + info!("Result is from {origin}"); if let Ok(key) = get_keys_response.server_key.deserialize() { let result: BTreeMap<_, _> = services() .globals diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b0d9b4b..14d83be 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -40,7 +40,7 @@ use tokio::{ select, sync::{mpsc, Mutex, Semaphore}, }; -use tracing::{error, info, warn}; +use tracing::{debug, error, warn}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { @@ -683,9 +683,9 @@ impl Service { where T: Debug, { - info!("Waiting for permit"); + debug!("Waiting for permit"); let permit = self.maximum_requests.acquire().await; - info!("Got permit"); + debug!("Got permit"); let response = tokio::time::timeout( Duration::from_secs(2 * 60), server_server::send_request(destination, request), From 42b12934e33c81b0684347d5c02e874bf3492674 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 13 Mar 2023 10:39:02 +0100 Subject: [PATCH 1360/1727] Don't crash when a room errors --- src/api/client_server/sync.rs | 1231 +++++++++++++++++---------------- 1 file changed, 630 insertions(+), 601 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 834438c..5eb820c 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -2,7 +2,14 @@ use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma, R use ruma::{ api::client::{ filter::{FilterDefinition, LazyLoadOptions}, - sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, + sync::sync_events::{ + self, + v3::{ + Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, + LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, + }, + DeviceLists, UnreadNotificationsCount, + }, uiaa::UiaaResponse, }, events::{ @@ -10,7 +17,7 @@ use ruma::{ RoomEventType, StateEventType, }, serde::Raw, - OwnedDeviceId, OwnedUserId, RoomId, UserId, + DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UserId, }; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, @@ -160,11 +167,6 @@ async fn sync_helper( body: sync_events::v3::Request, // bool = caching allowed ) -> Result<(sync_events::v3::Response, bool), Error> { - use sync_events::v3::{ - Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, - Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, - }; - // TODO: match body.set_presence { services().rooms.edus.presence.ping_presence(&sender_user)?; @@ -192,6 +194,8 @@ async fn sync_helper( _ => (false, false), }; + let full_state = body.full_state; + let mut joined_rooms = BTreeMap::new(); let since = body .since @@ -220,605 +224,57 @@ async fn sync_helper( .collect::>(); for room_id in all_joined_rooms { let room_id = room_id?; - - { - // Get and drop the lock to wait for remaining operations to finish - // This will make sure the we have all events until next_batch - let mutex_insert = Arc::clone( - services() - .globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); - } - - let timeline_pdus; - let limited; - if services() - .rooms - .timeline - .last_timeline_count(&sender_user, &room_id)? - > sincecount - { - let mut non_timeline_pdus = services() - .rooms - .timeline - .pdus_until(&sender_user, &room_id, PduCount::max())? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .take_while(|(pducount, _)| pducount > &sincecount); - - // Take the last 10 events for the timeline - timeline_pdus = non_timeline_pdus - .by_ref() - .take(10) - .collect::>() - .into_iter() - .rev() - .collect::>(); - - // They /sync response doesn't always return all messages, so we say the output is - // limited unless there are events in non_timeline_pdus - limited = non_timeline_pdus.next().is_some(); - } else { - timeline_pdus = Vec::new(); - limited = false; - } - - let send_notification_counts = !timeline_pdus.is_empty() - || services() - .rooms - .user - .last_notification_read(&sender_user, &room_id)? - > since; - - let mut timeline_users = HashSet::new(); - for (_, event) in &timeline_pdus { - timeline_users.insert(event.sender.as_str().to_owned()); - } - - services().rooms.lazy_loading.lazy_load_confirm_delivery( + if let Ok(joined_room) = load_joined_room( &sender_user, &sender_device, &room_id, + since, sincecount, - )?; - - // Database queries: - - let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { - s - } else { - error!("Room {} has no state", room_id); - continue; - }; - - let since_shortstatehash = services() - .rooms - .user - .get_token_shortstatehash(&room_id, since)?; - - // Calculates joined_member_count, invited_member_count and heroes - let calculate_counts = || { - let joined_member_count = services() - .rooms - .state_cache - .room_joined_count(&room_id)? - .unwrap_or(0); - let invited_member_count = services() - .rooms - .state_cache - .room_invited_count(&room_id)? - .unwrap_or(0); - - // Recalculate heroes (first 5 members) - let mut heroes = Vec::new(); - - if joined_member_count + invited_member_count <= 5 { - // Go through all PDUs and for each member event, check if the user is still joined or - // invited until we have 5 or we reach the end - - for hero in services() - .rooms - .timeline - .all_pdus(&sender_user, &room_id)? - .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) - .map(|(_, pdu)| { - let content: RoomMemberEventContent = - serde_json::from_str(pdu.content.get()).map_err(|_| { - Error::bad_database("Invalid member event in database.") - })?; - - if let Some(state_key) = &pdu.state_key { - let user_id = UserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - // The membership was and still is invite or join - if matches!( - content.membership, - MembershipState::Join | MembershipState::Invite - ) && (services().rooms.state_cache.is_joined(&user_id, &room_id)? - || services() - .rooms - .state_cache - .is_invited(&user_id, &room_id)?) - { - Ok::<_, Error>(Some(state_key.clone())) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - // Filter out buggy users - .filter_map(|u| u.ok()) - // Filter for possible heroes - .flatten() - { - if heroes.contains(&hero) || hero == sender_user.as_str() { - continue; - } - - heroes.push(hero); - } - } - - Ok::<_, Error>(( - Some(joined_member_count), - Some(invited_member_count), - heroes, - )) - }; - - let since_sender_member: Option = since_shortstatehash - .and_then(|shortstatehash| { - services() - .rooms - .state_accessor - .state_get( - shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - ) - .transpose() - }) - .transpose()? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); - - let joined_since_last_sync = - since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); - - let ( - heroes, - joined_member_count, - invited_member_count, - joined_since_last_sync, - state_events, - ) = if since_shortstatehash.is_none() || joined_since_last_sync { - // Probably since = 0, we will do an initial sync - - let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); - - let mut i = 0; - for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = services() - .rooms - .short - .get_statekey_from_short(shortstatekey)?; - - if event_type != StateEventType::RoomMember { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - state_events.push(pdu); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } else if !lazy_load_enabled - || body.full_state - || timeline_users.contains(&state_key) - // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 - || *sender_user == state_key - { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - // This check is in case a bad user ID made it into the database - if let Ok(uid) = UserId::parse(&state_key) { - lazy_loaded.insert(uid); - } - state_events.push(pdu); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - } - - // Reset lazy loading because this is an initial sync - services().rooms.lazy_loading.lazy_load_reset( - &sender_user, - &sender_device, - &room_id, - )?; - - // The state_events above should contain all timeline_users, let's mark them as lazy - // loaded. - services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, - lazy_loaded, - next_batchcount, - ); - - ( - heroes, - joined_member_count, - invited_member_count, - true, - state_events, - ) - } else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { - // No state changes - (Vec::new(), None, None, false, Vec::new()) - } else { - // Incremental /sync - let since_shortstatehash = since_shortstatehash.unwrap(); - - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); - - if since_shortstatehash != current_shortstatehash { - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - let since_state_ids = services() - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - - for (key, id) in current_state_ids { - if body.full_state || since_state_ids.get(&key) != Some(&id) { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - if pdu.kind == RoomEventType::RoomMember { - match UserId::parse( - pdu.state_key - .as_ref() - .expect("State event has state key") - .clone(), - ) { - Ok(state_key_userid) => { - lazy_loaded.insert(state_key_userid); - } - Err(e) => error!("Invalid state key for member event: {}", e), - } - } - - state_events.push(pdu); - tokio::task::yield_now().await; - } - } - } - - for (_, event) in &timeline_pdus { - if lazy_loaded.contains(&event.sender) { - continue; - } - - if !services().rooms.lazy_loading.lazy_load_was_sent_before( - &sender_user, - &sender_device, - &room_id, - &event.sender, - )? || lazy_load_send_redundant - { - if let Some(member_event) = services().rooms.state_accessor.room_state_get( - &room_id, - &StateEventType::RoomMember, - event.sender.as_str(), - )? { - lazy_loaded.insert(event.sender.clone()); - state_events.push(member_event); - } - } - } - - services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, - lazy_loaded, - next_batchcount, - ); - - let encrypted_room = services() - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? - .is_some(); - - let since_encryption = services().rooms.state_accessor.state_get( - since_shortstatehash, - &StateEventType::RoomEncryption, - "", - )?; - - // Calculations: - let new_encrypted_room = encrypted_room && since_encryption.is_none(); - - let send_member_count = state_events - .iter() - .any(|event| event.kind == RoomEventType::RoomMember); - - if encrypted_room { - for state_event in &state_events { - if state_event.kind != RoomEventType::RoomMember { - continue; - } - - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == sender_user { - continue; - } - - let new_membership = serde_json::from_str::( - state_event.content.get(), - ) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; - - match new_membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(&sender_user, &user_id, &room_id)? { - device_list_updates.insert(user_id); - } - } - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} - } - } - } - } - - if joined_since_last_sync && encrypted_room || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend( - services() - .rooms - .state_cache - .room_members(&room_id) - .flatten() - .filter(|user_id| { - // Don't send key updates from the sender to the sender - &sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id).unwrap_or(false) - }), - ); - } - - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - calculate_counts()? - } else { - (None, None, Vec::new()) - }; - - ( - heroes, - joined_member_count, - invited_member_count, - joined_since_last_sync, - state_events, - ) - }; - - // Look for device list updates in this room - device_list_updates.extend( - services() - .users - .keys_changed(room_id.as_ref(), since, None) - .filter_map(|r| r.ok()), - ); - - let notification_count = if send_notification_counts { - Some( - services() - .rooms - .user - .notification_count(&sender_user, &room_id)? - .try_into() - .expect("notification count can't go that high"), - ) - } else { - None - }; - - let highlight_count = if send_notification_counts { - Some( - services() - .rooms - .user - .highlight_count(&sender_user, &room_id)? - .try_into() - .expect("highlight count can't go that high"), - ) - } else { - None - }; - - let prev_batch = timeline_pdus - .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - PduCount::Backfilled(_) => { - error!("timeline in backfill state?!"); - "0".to_owned() - } - PduCount::Normal(c) => c.to_string(), - })) - })?; - - let room_events: Vec<_> = timeline_pdus - .iter() - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - - let mut edus: Vec<_> = services() - .rooms - .edus - .read_receipt - .readreceipts_since(&room_id, since) - .filter_map(|r| r.ok()) // Filter out buggy events - .map(|(_, _, v)| v) - .collect(); - - if services().rooms.edus.typing.last_typing_update(&room_id)? > since { - edus.push( - serde_json::from_str( - &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) - .expect("event is valid, we just created it"), - ) - .expect("event is valid, we just created it"), - ); - } - - // Save the state after this sync so we can send the correct state diff next sync - services().rooms.user.associate_token_shortstatehash( - &room_id, next_batch, - current_shortstatehash, - )?; - - let joined_room = JoinedRoom { - account_data: RoomAccountData { - events: services() - .account_data - .changes_since(Some(&room_id), &sender_user, since)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| Error::bad_database("Invalid account event in database.")) - .ok() - }) - .collect(), - }, - summary: RoomSummary { - heroes, - joined_member_count: joined_member_count.map(|n| (n as u32).into()), - invited_member_count: invited_member_count.map(|n| (n as u32).into()), - }, - unread_notifications: UnreadNotificationsCount { - highlight_count, - notification_count, - }, - timeline: Timeline { - limited: limited || joined_since_last_sync, - prev_batch, - events: room_events, - }, - state: State { - events: state_events - .iter() - .map(|pdu| pdu.to_sync_state_event()) - .collect(), - }, - ephemeral: Ephemeral { events: edus }, - unread_thread_notifications: BTreeMap::new(), - }; - - if !joined_room.is_empty() { - joined_rooms.insert(room_id.clone(), joined_room); - } - - // Take presence updates from this room - for (user_id, presence) in services() - .rooms - .edus - .presence - .presence_since(&room_id, since)? + next_batchcount, + lazy_load_enabled, + lazy_load_send_redundant, + full_state, + &mut device_list_updates, + &mut left_encrypted_users, + ) + .await { - match presence_updates.entry(user_id) { - Entry::Vacant(v) => { - v.insert(presence); - } - Entry::Occupied(mut o) => { - let p = o.get_mut(); + if !joined_room.is_empty() { + joined_rooms.insert(room_id.clone(), joined_room); + } - // Update existing presence event with more info - p.content.presence = presence.content.presence; - if let Some(status_msg) = presence.content.status_msg { - p.content.status_msg = Some(status_msg); + // Take presence updates from this room + for (user_id, presence) in services() + .rooms + .edus + .presence + .presence_since(&room_id, since)? + { + match presence_updates.entry(user_id) { + Entry::Vacant(v) => { + v.insert(presence); } - if let Some(last_active_ago) = presence.content.last_active_ago { - p.content.last_active_ago = Some(last_active_ago); - } - if let Some(displayname) = presence.content.displayname { - p.content.displayname = Some(displayname); - } - if let Some(avatar_url) = presence.content.avatar_url { - p.content.avatar_url = Some(avatar_url); - } - if let Some(currently_active) = presence.content.currently_active { - p.content.currently_active = Some(currently_active); + Entry::Occupied(mut o) => { + let p = o.get_mut(); + + // Update existing presence event with more info + p.content.presence = presence.content.presence; + if let Some(status_msg) = presence.content.status_msg { + p.content.status_msg = Some(status_msg); + } + if let Some(last_active_ago) = presence.content.last_active_ago { + p.content.last_active_ago = Some(last_active_ago); + } + if let Some(displayname) = presence.content.displayname { + p.content.displayname = Some(displayname); + } + if let Some(avatar_url) = presence.content.avatar_url { + p.content.avatar_url = Some(avatar_url); + } + if let Some(currently_active) = presence.content.currently_active { + p.content.currently_active = Some(currently_active); + } } } } @@ -915,13 +371,13 @@ async fn sync_helper( let mut i = 0; for (key, id) in left_state_ids { - if body.full_state || since_state_ids.get(&key) != Some(&id) { + if full_state || since_state_ids.get(&key) != Some(&id) { let (event_type, state_key) = services().rooms.short.get_statekey_from_short(key)?; if !lazy_load_enabled || event_type != StateEventType::RoomMember - || body.full_state + || full_state // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 || *sender_user == state_key { @@ -1075,7 +531,7 @@ async fn sync_helper( }; // TODO: Retry the endpoint instead of returning (waiting for #118) - if !body.full_state + if !full_state && response.rooms.is_empty() && response.presence.is_empty() && response.account_data.is_empty() @@ -1095,6 +551,579 @@ async fn sync_helper( } } +async fn load_joined_room( + sender_user: &UserId, + sender_device: &DeviceId, + room_id: &RoomId, + since: u64, + sincecount: PduCount, + next_batch: u64, + next_batchcount: PduCount, + lazy_load_enabled: bool, + lazy_load_send_redundant: bool, + full_state: bool, + device_list_updates: &mut HashSet, + left_encrypted_users: &mut HashSet, +) -> Result { + { + // Get and drop the lock to wait for remaining operations to finish + // This will make sure the we have all events until next_batch + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } + + let timeline_pdus; + let limited; + if services() + .rooms + .timeline + .last_timeline_count(&sender_user, &room_id)? + > sincecount + { + let mut non_timeline_pdus = services() + .rooms + .timeline + .pdus_until(&sender_user, &room_id, PduCount::max())? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .take_while(|(pducount, _)| pducount > &sincecount); + + // Take the last 10 events for the timeline + timeline_pdus = non_timeline_pdus + .by_ref() + .take(10) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // They /sync response doesn't always return all messages, so we say the output is + // limited unless there are events in non_timeline_pdus + limited = non_timeline_pdus.next().is_some(); + } else { + timeline_pdus = Vec::new(); + limited = false; + } + + let send_notification_counts = !timeline_pdus.is_empty() + || services() + .rooms + .user + .last_notification_read(&sender_user, &room_id)? + > since; + + let mut timeline_users = HashSet::new(); + for (_, event) in &timeline_pdus { + timeline_users.insert(event.sender.as_str().to_owned()); + } + + services().rooms.lazy_loading.lazy_load_confirm_delivery( + &sender_user, + &sender_device, + &room_id, + sincecount, + )?; + + // Database queries: + + let current_shortstatehash = + if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + s + } else { + error!("Room {} has no state", room_id); + return Err(Error::BadDatabase("Room has no state")); + }; + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; + + // Calculates joined_member_count, invited_member_count and heroes + let calculate_counts = || { + let joined_member_count = services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or(0); + let invited_member_count = services() + .rooms + .state_cache + .room_invited_count(&room_id)? + .unwrap_or(0); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still joined or + // invited until we have 5 or we reach the end + + for hero in services() + .rooms + .timeline + .all_pdus(&sender_user, &room_id)? + .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus + .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) + .map(|(_, pdu)| { + let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; + + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + // The membership was and still is invite or join + if matches!( + content.membership, + MembershipState::Join | MembershipState::Invite + ) && (services().rooms.state_cache.is_joined(&user_id, &room_id)? + || services() + .rooms + .state_cache + .is_invited(&user_id, &room_id)?) + { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + // Filter out buggy users + .filter_map(|u| u.ok()) + // Filter for possible heroes + .flatten() + { + if heroes.contains(&hero) || hero == sender_user.as_str() { + continue; + } + + heroes.push(hero); + } + } + + Ok::<_, Error>(( + Some(joined_member_count), + Some(invited_member_count), + heroes, + )) + }; + + let since_sender_member: Option = since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get( + shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); + + let joined_since_last_sync = + since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + + let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = + if since_shortstatehash.is_none() || joined_since_last_sync { + // Probably since = 0, we will do an initial sync + + let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; + + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + let mut i = 0; + for (shortstatekey, id) in current_state_ids { + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; + + if event_type != StateEventType::RoomMember { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } else if !lazy_load_enabled + || full_state + || timeline_users.contains(&state_key) + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *sender_user == state_key + { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + // This check is in case a bad user ID made it into the database + if let Ok(uid) = UserId::parse(&state_key) { + lazy_loaded.insert(uid); + } + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + + // Reset lazy loading because this is an initial sync + services().rooms.lazy_loading.lazy_load_reset( + &sender_user, + &sender_device, + &room_id, + )?; + + // The state_events above should contain all timeline_users, let's mark them as lazy + // loaded. + services().rooms.lazy_loading.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batchcount, + ); + + ( + heroes, + joined_member_count, + invited_member_count, + true, + state_events, + ) + } else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { + // No state changes + (Vec::new(), None, None, false, Vec::new()) + } else { + // Incremental /sync + let since_shortstatehash = since_shortstatehash.unwrap(); + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + if since_shortstatehash != current_shortstatehash { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if full_state || since_state_ids.get(&key) != Some(&id) { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + if pdu.kind == RoomEventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + } + Err(e) => error!("Invalid state key for member event: {}", e), + } + } + + state_events.push(pdu); + tokio::task::yield_now().await; + } + } + } + + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + if let Some(member_event) = services().rooms.state_accessor.room_state_get( + &room_id, + &StateEventType::RoomMember, + event.sender.as_str(), + )? { + lazy_loaded.insert(event.sender.clone()); + state_events.push(member_event); + } + } + } + + services().rooms.lazy_loading.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batchcount, + ); + + let encrypted_room = services() + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .is_some(); + + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + // Calculations: + let new_encrypted_room = encrypted_room && since_encryption.is_none(); + + let send_member_count = state_events + .iter() + .any(|event| event.kind == RoomEventType::RoomMember); + + if encrypted_room { + for state_event in &state_events { + if state_event.kind != RoomEventType::RoomMember { + continue; + } + + if let Some(state_key) = &state_event.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + if user_id == sender_user { + continue; + } + + let new_membership = serde_json::from_str::( + state_event.content.get(), + ) + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(&sender_user, &user_id, &room_id)? { + device_list_updates.insert(user_id); + } + } + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} + } + } + } + } + + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend( + services() + .rooms + .state_cache + .room_members(&room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to the sender + &sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target already + !share_encrypted_room(&sender_user, user_id, &room_id).unwrap_or(false) + }), + ); + } + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + calculate_counts()? + } else { + (None, None, Vec::new()) + }; + + ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) + }; + + // Look for device list updates in this room + device_list_updates.extend( + services() + .users + .keys_changed(room_id.as_ref(), since, None) + .filter_map(|r| r.ok()), + ); + + let notification_count = if send_notification_counts { + Some( + services() + .rooms + .user + .notification_count(&sender_user, &room_id)? + .try_into() + .expect("notification count can't go that high"), + ) + } else { + None + }; + + let highlight_count = if send_notification_counts { + Some( + services() + .rooms + .user + .highlight_count(&sender_user, &room_id)? + .try_into() + .expect("highlight count can't go that high"), + ) + } else { + None + }; + + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { + Ok(Some(match pdu_count { + PduCount::Backfilled(_) => { + error!("timeline in backfill state?!"); + "0".to_owned() + } + PduCount::Normal(c) => c.to_string(), + })) + })?; + + let room_events: Vec<_> = timeline_pdus + .iter() + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect(); + + let mut edus: Vec<_> = services() + .rooms + .edus + .read_receipt + .readreceipts_since(&room_id, since) + .filter_map(|r| r.ok()) // Filter out buggy events + .map(|(_, _, v)| v) + .collect(); + + if services().rooms.edus.typing.last_typing_update(&room_id)? > since { + edus.push( + serde_json::from_str( + &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) + .expect("event is valid, we just created it"), + ) + .expect("event is valid, we just created it"), + ); + } + + // Save the state after this sync so we can send the correct state diff next sync + services().rooms.user.associate_token_shortstatehash( + &room_id, + next_batch, + current_shortstatehash, + )?; + + Ok(JoinedRoom { + account_data: RoomAccountData { + events: services() + .account_data + .changes_since(Some(&room_id), &sender_user, since)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() + }) + .collect(), + }, + summary: RoomSummary { + heroes, + joined_member_count: joined_member_count.map(|n| (n as u32).into()), + invited_member_count: invited_member_count.map(|n| (n as u32).into()), + }, + unread_notifications: UnreadNotificationsCount { + highlight_count, + notification_count, + }, + timeline: Timeline { + limited: limited || joined_since_last_sync, + prev_batch, + events: room_events, + }, + state: State { + events: state_events + .iter() + .map(|pdu| pdu.to_sync_state_event()) + .collect(), + }, + ephemeral: Ephemeral { events: edus }, + unread_thread_notifications: BTreeMap::new(), + }) +} + fn share_encrypted_room( sender_user: &UserId, user_id: &UserId, From da3871f39a3a0a7b5b34e2110e41717c33754a73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 17 Mar 2023 22:44:50 +0100 Subject: [PATCH 1361/1727] fix: let requests continue event if client disconnects --- src/main.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/main.rs b/src/main.rs index fe6cfc0..a51416d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -147,6 +147,7 @@ async fn run_server() -> io::Result<()> { let middlewares = ServiceBuilder::new() .sensitive_headers([header::AUTHORIZATION]) + .layer(axum::middleware::from_fn(spawn_task)) .layer( TraceLayer::new_for_http().make_span_with(|request: &http::Request<_>| { let path = if let Some(path) = request.extensions().get::() { @@ -221,6 +222,15 @@ async fn run_server() -> io::Result<()> { Ok(()) } +async fn spawn_task( + req: axum::http::Request, + next: axum::middleware::Next, +) -> std::result::Result { + tokio::spawn(next.run(req)) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) +} + async fn unrecognized_method( req: axum::http::Request, next: axum::middleware::Next, From 2a7c4693b8680ab8f00e990e1c779a8d36967520 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 18 Mar 2023 08:58:20 +0100 Subject: [PATCH 1362/1727] fix: don't accept new requests when shutting down --- src/main.rs | 17 +++++++++-------- src/service/globals/mod.rs | 15 +++++++++++++-- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/main.rs b/src/main.rs index a51416d..59e82a7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,7 +7,7 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::{future::Future, io, net::SocketAddr, time::Duration}; +use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; use axum::{ extract::{DefaultBodyLimit, FromRequest, MatchedPath}, @@ -212,13 +212,6 @@ async fn run_server() -> io::Result<()> { } } - // On shutdown - info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - services().globals.rotate.fire(); - - #[cfg(feature = "systemd")] - let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]); - Ok(()) } @@ -226,6 +219,9 @@ async fn spawn_task( req: axum::http::Request, next: axum::middleware::Next, ) -> std::result::Result { + if services().globals.shutdown.load(atomic::Ordering::Relaxed) { + return Err(StatusCode::SERVICE_UNAVAILABLE); + } tokio::spawn(next.run(req)) .await .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) @@ -452,6 +448,11 @@ async fn shutdown_signal(handle: ServerHandle) { warn!("Received {}, shutting down...", sig); handle.graceful_shutdown(Some(Duration::from_secs(30))); + + services().globals.shutdown(); + + #[cfg(feature = "systemd")] + let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]); } async fn not_found(uri: Uri) -> impl IntoResponse { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index cd3be08..9206d43 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -6,7 +6,7 @@ use ruma::{ use crate::api::server_server::FedDest; -use crate::{Config, Error, Result}; +use crate::{services, Config, Error, Result}; use ruma::{ api::{ client::sync::sync_events, @@ -14,6 +14,7 @@ use ruma::{ }, DeviceId, RoomVersionId, ServerName, UserId, }; +use std::sync::atomic::{self, AtomicBool}; use std::{ collections::{BTreeMap, HashMap}, fs, @@ -24,7 +25,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; -use tracing::error; +use tracing::{error, info}; use trust_dns_resolver::TokioAsyncResolver; type WellKnownMap = HashMap; @@ -58,6 +59,8 @@ pub struct Service { pub roomid_federationhandletime: RwLock>, pub stateres_mutex: Arc>, pub rotate: RotationHandler, + + pub shutdown: AtomicBool, } /// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. @@ -160,6 +163,7 @@ impl Service { stateres_mutex: Arc::new(Mutex::new(())), sync_receivers: RwLock::new(HashMap::new()), rotate: RotationHandler::new(), + shutdown: AtomicBool::new(false), }; fs::create_dir_all(s.get_media_folder())?; @@ -341,6 +345,13 @@ impl Service { r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); r } + + pub fn shutdown(&self) { + self.shutdown.store(true, atomic::Ordering::Relaxed); + // On shutdown + info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + services().globals.rotate.fire(); + } } fn reqwest_client_builder(config: &Config) -> Result { From f53ecaa97db45e8ab749bd65195a3eaf85a28a7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=A9vin=20Commaille?= Date: Sun, 26 Feb 2023 16:29:06 +0100 Subject: [PATCH 1363/1727] Bump Ruma --- Cargo.lock | 143 +++++++++++++++++++++------- Cargo.toml | 4 +- src/api/client_server/config.rs | 8 +- src/api/client_server/membership.rs | 18 ++-- src/api/client_server/message.rs | 12 +-- src/api/client_server/profile.rs | 6 +- src/api/client_server/redact.rs | 4 +- src/api/client_server/room.rs | 30 +++--- src/api/client_server/sync.rs | 10 +- src/api/client_server/to_device.rs | 7 +- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 27 ++++-- src/service/admin/mod.rs | 30 +++--- src/service/pdu.rs | 18 ++-- src/service/pusher/mod.rs | 7 +- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/timeline/mod.rs | 44 ++++----- 17 files changed, 228 insertions(+), 146 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 505c71c..385d8f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -183,9 +183,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" @@ -412,6 +412,7 @@ dependencies = [ "rust-argon2", "sd-notify", "serde", + "serde_html_form", "serde_json", "serde_yaml", "sha-1", @@ -669,7 +670,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2", + "sha2 0.9.9", "zeroize", ] @@ -721,7 +722,7 @@ dependencies = [ "atomic", "pear", "serde", - "toml", + "toml 0.5.9", "uncased", "version_check", ] @@ -1819,7 +1820,7 @@ checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ "once_cell", "thiserror", - "toml", + "toml 0.5.9", ] [[package]] @@ -2075,8 +2076,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.7.4" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.8.2" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ "assign", "js_int", @@ -2093,8 +2094,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.8.1" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ "js_int", "ruma-common", @@ -2104,8 +2105,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.15.3" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.16.2" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ "assign", "bytes", @@ -2113,23 +2114,22 @@ dependencies = [ "js_int", "js_option", "maplit", - "percent-encoding", "ruma-common", "serde", + "serde_html_form", "serde_json", ] [[package]] name = "ruma-common" -version = "0.10.5" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.11.3" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ - "base64 0.20.0", + "base64 0.21.0", "bytes", "form_urlencoded", "http", "indexmap", - "itoa", "js_int", "js_option", "konst", @@ -2139,6 +2139,7 @@ dependencies = [ "ruma-identifiers-validation", "ruma-macros", "serde", + "serde_html_form", "serde_json", "thiserror", "tracing", @@ -2149,8 +2150,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.7.1" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ "js_int", "ruma-common", @@ -2160,8 +2161,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.9.1" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ "js_int", "thiserror", @@ -2169,8 +2170,8 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.7.1" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ "js_int", "ruma-common", @@ -2179,8 +2180,8 @@ dependencies = [ [[package]] name = "ruma-macros" -version = "0.10.5" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.11.3" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ "once_cell", "proc-macro-crate", @@ -2189,13 +2190,13 @@ dependencies = [ "ruma-identifiers-validation", "serde", "syn", - "toml", + "toml 0.7.2", ] [[package]] name = "ruma-push-gateway-api" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.7.1" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ "js_int", "ruma-common", @@ -2205,24 +2206,24 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.13.1" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ - "base64 0.20.0", + "base64 0.21.0", "ed25519-dalek", "pkcs8", "rand 0.7.3", "ruma-common", "serde_json", - "sha2", + "sha2 0.10.6", "subslice", "thiserror", ] [[package]] name = "ruma-state-res" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26#67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26" +version = "0.9.1" +source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" dependencies = [ "itertools", "js_int", @@ -2388,6 +2389,19 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_html_form" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53192e38d5c88564b924dbe9b60865ecbb71b81d38c4e61c817cffd3e36ef696" +dependencies = [ + "form_urlencoded", + "indexmap", + "itoa", + "ryu", + "serde", +] + [[package]] name = "serde_json" version = "1.0.89" @@ -2399,6 +2413,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2459,6 +2482,17 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.6", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -2807,6 +2841,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tower" version = "0.4.13" @@ -3343,6 +3411,15 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +[[package]] +name = "winnow" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf09497b8f8b5ac5d3bb4d05c0a99be20f26fd3d5f2db7b0716e946d5103658" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.7.0" diff --git a/Cargo.toml b/Cargo.toml index 36ffb13..7c7df7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -83,6 +83,8 @@ num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } +# Used for ruma wrapper +serde_html_form = "0.2.0" thread_local = "1.1.3" # used for TURN server authentication diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs index 12f9aea..37279e3 100644 --- a/src/api/client_server/config.rs +++ b/src/api/client_server/config.rs @@ -75,7 +75,7 @@ pub async fn get_global_account_data_route( let event: Box = services() .account_data - .get(None, sender_user, body.event_type.clone().into())? + .get(None, sender_user, body.event_type.to_string().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; let account_data = serde_json::from_str::(event.get()) @@ -95,11 +95,7 @@ pub async fn get_room_account_data_route( let event: Box = services() .account_data - .get( - Some(&body.room_id), - sender_user, - body.event_type.clone().into(), - )? + .get(Some(&body.room_id), sender_user, body.event_type.clone())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; let account_data = serde_json::from_str::(event.get()) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index b9b1756..cd26372 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -17,7 +17,7 @@ use ruma::{ member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, }, - RoomEventType, StateEventType, + StateEventType, TimelineEventType, }, serde::Base64, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, @@ -209,7 +209,7 @@ pub async fn kick_user_route( services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -273,7 +273,7 @@ pub async fn ban_user_route(body: Ruma) -> Result( let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content, unsigned: None, state_key: Some(user_id.to_string()), @@ -1295,7 +1297,7 @@ pub(crate) async fn invite_helper<'a>( services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, displayname: services().users.displayname(user_id)?, @@ -1420,7 +1422,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option PduCount::try_from_string(&from)?, None => match body.dir { - ruma::api::client::Direction::Forward => PduCount::min(), - ruma::api::client::Direction::Backward => PduCount::max(), + ruma::api::Direction::Forward => PduCount::min(), + ruma::api::Direction::Backward => PduCount::max(), }, }; @@ -143,7 +143,7 @@ pub async fn get_message_events_route( let mut lazy_loaded = HashSet::new(); match body.dir { - ruma::api::client::Direction::Forward => { + ruma::api::Direction::Forward => { let events_after: Vec<_> = services() .rooms .timeline @@ -187,7 +187,7 @@ pub async fn get_message_events_route( resp.end = next_token.map(|count| count.stringify()); resp.chunk = events_after; } - ruma::api::client::Direction::Backward => { + ruma::api::Direction::Backward => { services() .rooms .timeline diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 6400e89..8fb38b5 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -9,7 +9,7 @@ use ruma::{ }, federation::{self, query::get_profile_information::v1::ProfileField}, }, - events::{room::member::RoomMemberEventContent, RoomEventType, StateEventType}, + events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType}, }; use serde_json::value::to_raw_value; use std::sync::Arc; @@ -37,7 +37,7 @@ pub async fn set_displayname_route( .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_str( @@ -172,7 +172,7 @@ pub async fn set_avatar_url_route( .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_str( diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index a29a561..20f7e91 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::{service::pdu::PduBuilder, services, Result, Ruma}; use ruma::{ api::client::redact::redact_event, - events::{room::redaction::RoomRedactionEventContent, RoomEventType}, + events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, }; use serde_json::value::to_raw_value; @@ -32,7 +32,7 @@ pub async fn redact_event_route( let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomRedaction, + event_type: TimelineEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { reason: body.reason.clone(), }) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index aa6fa5f..8c39b78 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -19,7 +19,7 @@ use ruma::{ tombstone::RoomTombstoneEventContent, topic::RoomTopicEventContent, }, - RoomEventType, StateEventType, + StateEventType, TimelineEventType, }, int, serde::JsonObject, @@ -175,7 +175,7 @@ pub async fn create_room_route( // 1. The room create event services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomCreate, + event_type: TimelineEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -189,7 +189,7 @@ pub async fn create_room_route( // 2. Let the room creator join services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: services().users.displayname(sender_user)?, @@ -247,7 +247,7 @@ pub async fn create_room_route( services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomPowerLevels, + event_type: TimelineEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) .expect("to_raw_value always works on serde_json::Value"), unsigned: None, @@ -263,7 +263,7 @@ pub async fn create_room_route( if let Some(room_alias_id) = &alias { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomCanonicalAlias, + event_type: TimelineEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { alias: Some(room_alias_id.to_owned()), alt_aliases: vec![], @@ -284,7 +284,7 @@ pub async fn create_room_route( // 5.1 Join Rules services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomJoinRules, + event_type: TimelineEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default @@ -303,7 +303,7 @@ pub async fn create_room_route( // 5.2 History Visibility services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomHistoryVisibility, + event_type: TimelineEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( HistoryVisibility::Shared, )) @@ -320,7 +320,7 @@ pub async fn create_room_route( // 5.3 Guest Access services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomGuestAccess, + event_type: TimelineEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { RoomPreset::PublicChat => GuestAccess::Forbidden, _ => GuestAccess::CanJoin, @@ -346,7 +346,7 @@ pub async fn create_room_route( pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == RoomEventType::RoomEncryption + if pdu_builder.event_type == TimelineEventType::RoomEncryption && !services().globals.allow_encryption() { continue; @@ -364,7 +364,7 @@ pub async fn create_room_route( if let Some(name) = &body.name { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomName, + event_type: TimelineEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) .expect("event is valid, we just created it"), unsigned: None, @@ -380,7 +380,7 @@ pub async fn create_room_route( if let Some(topic) = &body.topic { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomTopic, + event_type: TimelineEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { topic: topic.clone(), }) @@ -526,7 +526,7 @@ pub async fn upgrade_room_route( // Fail if the sender does not have the required permissions let tombstone_event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomTombstone, + event_type: TimelineEventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), @@ -608,7 +608,7 @@ pub async fn upgrade_room_route( services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomCreate, + event_type: TimelineEventType::RoomCreate, content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, @@ -623,7 +623,7 @@ pub async fn upgrade_room_route( // Join the new room services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: services().users.displayname(sender_user)?, @@ -716,7 +716,7 @@ pub async fn upgrade_room_route( // Modify the power levels in the old room to prevent sending of events and inviting new users let _ = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomPowerLevels, + event_type: TimelineEventType::RoomPowerLevels, content: to_raw_value(&power_levels_event_content) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 5eb820c..b4baec1 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -14,7 +14,7 @@ use ruma::{ }, events::{ room::member::{MembershipState, RoomMemberEventContent}, - RoomEventType, StateEventType, + StateEventType, TimelineEventType, }, serde::Raw, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UserId, @@ -678,7 +678,7 @@ async fn load_joined_room( .timeline .all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) + .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) .map(|(_, pdu)| { let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get()) .map_err(|_| { @@ -868,7 +868,7 @@ async fn load_joined_room( } }; - if pdu.kind == RoomEventType::RoomMember { + if pdu.kind == TimelineEventType::RoomMember { match UserId::parse( pdu.state_key .as_ref() @@ -936,11 +936,11 @@ async fn load_joined_room( let send_member_count = state_events .iter() - .any(|event| event.kind == RoomEventType::RoomMember); + .any(|event| event.kind == TimelineEventType::RoomMember); if encrypted_room { for state_event in &state_events { - if state_event.kind != RoomEventType::RoomMember { + if state_event.kind != TimelineEventType::RoomMember { continue; } diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index 26db4e4..31590fc 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -1,4 +1,3 @@ -use ruma::events::ToDeviceEventType; use std::collections::BTreeMap; use crate::{services, Error, Result, Ruma}; @@ -42,7 +41,7 @@ pub async fn send_event_to_device_route( serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { sender: sender_user.clone(), - ev_type: ToDeviceEventType::from(&*body.event_type), + ev_type: body.event_type.clone(), message_id: count.to_string().into(), messages, }, @@ -60,7 +59,7 @@ pub async fn send_event_to_device_route( sender_user, target_user_id, target_device_id, - &body.event_type, + &body.event_type.to_string(), event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, @@ -73,7 +72,7 @@ pub async fn send_event_to_device_route( sender_user, target_user_id, &target_device_id?, - &body.event_type, + &body.event_type.to_string(), event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 74f506f..2d2af70 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -47,7 +47,7 @@ where let path_params = Path::>::from_request(req).await?; let query = req.uri().query().unwrap_or_default(); - let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) { + let query_params: QueryParams = match serde_html_form::from_str(query) { Ok(params) => params, Err(e) => { error!(%query, "Failed to deserialize query parameters: {}", e); diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 852e59a..961b658 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -18,11 +18,7 @@ use ruma::{ discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, keys::{claim_keys, get_keys}, - membership::{ - create_invite, - create_join_event::{self, RoomState}, - prepare_join_event, - }, + membership::{create_invite, create_join_event, prepare_join_event}, query::{get_profile_information, get_room_information}, transactions::{ edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, @@ -39,7 +35,7 @@ use ruma::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - RoomEventType, StateEventType, + StateEventType, TimelineEventType, }, serde::{Base64, JsonObject, Raw}, to_device::DeviceIdOrAllDevices, @@ -1440,7 +1436,7 @@ pub async fn create_join_event_template_route( let (_pdu, mut pdu_json) = services().rooms.timeline.create_hash_and_sign_event( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content, unsigned: None, state_key: Some(body.user_id.to_string()), @@ -1465,7 +1461,7 @@ async fn create_join_event( sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, -) -> Result { +) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1587,7 +1583,7 @@ async fn create_join_event( services().sending.send_pdu(servers, &pdu_id)?; - Ok(RoomState { + Ok(create_join_event::v1::RoomState { auth_chain: auth_chain_ids .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) @@ -1628,7 +1624,18 @@ pub async fn create_join_event_v2_route( .as_ref() .expect("server is authenticated"); - let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; + let create_join_event::v1::RoomState { + auth_chain, + state, + event, + } = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; + let room_state = create_join_event::v2::RoomState { + members_omitted: false, + auth_chain, + state, + event, + servers_in_room: None, + }; Ok(create_join_event::v2::Response { room_state }) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b6609e1..d37ec69 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -21,7 +21,7 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, topic::RoomTopicEventContent, }, - RoomEventType, + TimelineEventType, }, EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; @@ -212,7 +212,7 @@ impl Service { .timeline .build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomMessage, + event_type: TimelineEventType::RoomMessage, content: to_raw_value(&message) .expect("event is valid, we just created it"), unsigned: None, @@ -854,7 +854,7 @@ impl Service { // 1. The room create event services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomCreate, + event_type: TimelineEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -868,7 +868,7 @@ impl Service { // 2. Make conduit bot join services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: None, @@ -895,7 +895,7 @@ impl Service { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomPowerLevels, + event_type: TimelineEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { users, ..Default::default() @@ -913,7 +913,7 @@ impl Service { // 4.1 Join Rules services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomJoinRules, + event_type: TimelineEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) .expect("event is valid, we just created it"), unsigned: None, @@ -928,7 +928,7 @@ impl Service { // 4.2 History Visibility services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomHistoryVisibility, + event_type: TimelineEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( HistoryVisibility::Shared, )) @@ -945,7 +945,7 @@ impl Service { // 4.3 Guest Access services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomGuestAccess, + event_type: TimelineEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) .expect("event is valid, we just created it"), unsigned: None, @@ -961,7 +961,7 @@ impl Service { let room_name = format!("{} Admin Room", services().globals.server_name()); services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomName, + event_type: TimelineEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) .expect("event is valid, we just created it"), unsigned: None, @@ -975,7 +975,7 @@ impl Service { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomTopic, + event_type: TimelineEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { topic: format!("Manage {}", services().globals.server_name()), }) @@ -996,7 +996,7 @@ impl Service { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomCanonicalAlias, + event_type: TimelineEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { alias: Some(alias.clone()), alt_aliases: Vec::new(), @@ -1053,7 +1053,7 @@ impl Service { // Invite and join the real user services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, displayname: None, @@ -1075,7 +1075,7 @@ impl Service { )?; services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomMember, + event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: Some(displayname), @@ -1103,7 +1103,7 @@ impl Service { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomPowerLevels, + event_type: TimelineEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { users, ..Default::default() @@ -1121,7 +1121,7 @@ impl Service { // Send welcome message services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomMessage, + event_type: TimelineEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()), format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()), diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 5b5cbd0..a497b11 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -3,7 +3,7 @@ use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, - RoomEventType, StateEvent, + StateEvent, TimelineEventType, }, serde::Raw, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, @@ -31,7 +31,7 @@ pub struct PduEvent { pub sender: OwnedUserId, pub origin_server_ts: UInt, #[serde(rename = "type")] - pub kind: RoomEventType, + pub kind: TimelineEventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, @@ -53,10 +53,10 @@ impl PduEvent { self.unsigned = None; let allowed: &[&str] = match self.kind { - RoomEventType::RoomMember => &["join_authorised_via_users_server", "membership"], - RoomEventType::RoomCreate => &["creator"], - RoomEventType::RoomJoinRules => &["join_rule"], - RoomEventType::RoomPowerLevels => &[ + TimelineEventType::RoomMember => &["join_authorised_via_users_server", "membership"], + TimelineEventType::RoomCreate => &["creator"], + TimelineEventType::RoomJoinRules => &["join_rule"], + TimelineEventType::RoomPowerLevels => &[ "ban", "events", "events_default", @@ -66,7 +66,7 @@ impl PduEvent { "users", "users_default", ], - RoomEventType::RoomHistoryVisibility => &["history_visibility"], + TimelineEventType::RoomHistoryVisibility => &["history_visibility"], _ => &[], }; @@ -296,7 +296,7 @@ impl state_res::Event for PduEvent { &self.sender } - fn event_type(&self) -> &RoomEventType { + fn event_type(&self) -> &TimelineEventType { &self.kind } @@ -372,7 +372,7 @@ pub(crate) fn gen_event_id_canonical_json( #[derive(Debug, Deserialize)] pub struct PduBuilder { #[serde(rename = "type")] - pub event_type: RoomEventType, + pub event_type: TimelineEventType, pub content: Box, pub unsigned: Option>, pub state_key: Option, diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index ba096a2..5933c03 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -15,7 +15,7 @@ use ruma::{ }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - RoomEventType, StateEventType, + StateEventType, TimelineEventType, }, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, @@ -169,6 +169,7 @@ impl Service { tweaks.push(tweak.clone()); continue; } + _ => false, }; if notify.is_some() { @@ -248,7 +249,7 @@ impl Service { // TODO: missed calls notifi.counts = NotificationCounts::new(unread, uint!(0)); - if event.kind == RoomEventType::RoomEncrypted + if event.kind == TimelineEventType::RoomEncrypted || tweaks .iter() .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) @@ -264,7 +265,7 @@ impl Service { notifi.event_type = Some(event.kind.clone()); notifi.content = serde_json::value::to_raw_value(&event.content).ok(); - if event.kind == RoomEventType::RoomMember { + if event.kind == TimelineEventType::RoomMember { notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 3072b80..21ad2f9 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -8,7 +8,7 @@ pub use data::Data; use ruma::{ events::{ room::{create::RoomCreateEventContent, member::MembershipState}, - AnyStrippedStateEvent, RoomEventType, StateEventType, + AnyStrippedStateEvent, StateEventType, TimelineEventType, }, serde::Raw, state_res::{self, StateMap}, @@ -358,7 +358,7 @@ impl Service { pub fn get_auth_events( &self, room_id: &RoomId, - kind: &RoomEventType, + kind: &TimelineEventType, sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 47f4c65..99c5876 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,20 +1,19 @@ mod data; -use std::cmp::Ordering; -use std::collections::{BTreeMap, HashMap}; +use std::{ + cmp::Ordering, + collections::{BTreeMap, HashMap}, +}; -use std::sync::RwLock; use std::{ collections::HashSet, - sync::{Arc, Mutex}, + sync::{Arc, Mutex, RwLock}, }; pub use data::Data; use regex::Regex; -use ruma::api::federation; -use ruma::serde::Base64; use ruma::{ - api::client::error::ErrorKind, + api::{client::error::ErrorKind, federation}, canonical_json::to_canonical_value, events::{ push_rules::PushRulesEvent, @@ -22,23 +21,22 @@ use ruma::{ create::RoomCreateEventContent, member::MembershipState, power_levels::RoomPowerLevelsEventContent, }, - GlobalAccountDataEventType, RoomEventType, StateEventType, + GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, + serde::Base64, state_res, - state_res::Event, - state_res::RoomVersion, - uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomAliasId, RoomId, UserId, + state_res::{Event, RoomVersion}, + uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedServerName, RoomAliasId, RoomId, ServerName, UserId, }; -use ruma::{user_id, ServerName}; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use tokio::sync::MutexGuard; use tracing::{error, info, warn}; -use crate::api::server_server; use crate::{ + api::server_server, service::pdu::{EventHash, PduBuilder}, services, utils, Error, PduEvent, Result, }; @@ -381,12 +379,12 @@ impl Service { .increment_notification_counts(&pdu.room_id, notifies, highlights)?; match pdu.kind { - RoomEventType::RoomRedaction => { + TimelineEventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { self.redact_pdu(redact_id, pdu)?; } } - RoomEventType::RoomMember => { + TimelineEventType::RoomMember => { if let Some(state_key) = &pdu.state_key { #[derive(Deserialize)] struct ExtractMembership { @@ -420,7 +418,7 @@ impl Service { )?; } } - RoomEventType::RoomMessage => { + TimelineEventType::RoomMessage => { #[derive(Deserialize)] struct ExtractBody { body: Option, @@ -472,7 +470,7 @@ impl Service { // If the RoomMember event has a non-empty state_key, it is targeted at someone. // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { + if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key_uid) = &pdu .state_key .as_ref() @@ -522,7 +520,7 @@ impl Service { let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember + || pdu.kind == TimelineEventType::RoomMember && pdu .state_key .as_ref() @@ -756,14 +754,14 @@ impl Service { )?; if admin_room.filter(|v| v == room_id).is_some() { match pdu.event_type() { - RoomEventType::RoomEncryption => { + TimelineEventType::RoomEncryption => { warn!("Encryption is not allowed in the admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, "Encryption is not allowed in the admins room.", )); } - RoomEventType::RoomMember => { + TimelineEventType::RoomMember => { #[derive(Deserialize)] struct ExtractMembership { membership: MembershipState, @@ -862,7 +860,7 @@ impl Service { .collect(); // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { + if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key_uid) = &pdu .state_key .as_ref() @@ -1100,7 +1098,7 @@ impl Service { drop(insert_lock); match pdu.kind { - RoomEventType::RoomMessage => { + TimelineEventType::RoomMessage => { #[derive(Deserialize)] struct ExtractBody { body: Option, From 4635644e21cfdefd076d011d4f747b37d10da5b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=A9vin=20Commaille?= Date: Sun, 26 Feb 2023 16:55:42 +0100 Subject: [PATCH 1364/1727] Use the ruma methods for managing rulesets --- src/api/client_server/push.rs | 316 +++++++++------------------------- 1 file changed, 80 insertions(+), 236 deletions(-) diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index b044138..ab7c686 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -5,11 +5,11 @@ use ruma::{ push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions, - set_pushrule_enabled, RuleKind, RuleScope, + set_pushrule_enabled, RuleScope, }, }, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, - push::{ConditionalPushRuleInit, NewPushRule, PatternedPushRuleInit, SimplePushRuleInit}, + push::{InsertPushRuleError, RemovePushRuleError}, }; /// # `GET /_matrix/client/r0/pushrules` @@ -65,30 +65,10 @@ pub async fn get_pushrule_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - let global = account_data.global; - let rule = match body.kind { - RuleKind::Override => global - .override_ - .get(body.rule_id.as_str()) - .map(|rule| rule.clone().into()), - RuleKind::Underride => global - .underride - .get(body.rule_id.as_str()) - .map(|rule| rule.clone().into()), - RuleKind::Sender => global - .sender - .get(body.rule_id.as_str()) - .map(|rule| rule.clone().into()), - RuleKind::Room => global - .room - .get(body.rule_id.as_str()) - .map(|rule| rule.clone().into()), - RuleKind::Content => global - .content - .get(body.rule_id.as_str()) - .map(|rule| rule.clone().into()), - _ => None, - }; + let rule = account_data + .global + .get(body.kind.clone(), &body.rule_id) + .map(Into::into); if let Some(rule) = rule { Ok(get_pushrule::v3::Response { rule }) @@ -131,66 +111,36 @@ pub async fn set_pushrule_route( let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - let global = &mut account_data.content.global; - match body.rule { - NewPushRule::Override(rule) => { - global.override_.replace( - ConditionalPushRuleInit { - actions: rule.actions, - default: false, - enabled: true, - rule_id: rule.rule_id, - conditions: rule.conditions, - } - .into(), - ); - } - NewPushRule::Underride(rule) => { - global.underride.replace( - ConditionalPushRuleInit { - actions: rule.actions, - default: false, - enabled: true, - rule_id: rule.rule_id, - conditions: rule.conditions, - } - .into(), - ); - } - NewPushRule::Sender(rule) => { - global.sender.replace( - SimplePushRuleInit { - actions: rule.actions, - default: false, - enabled: true, - rule_id: rule.rule_id, - } - .into(), - ); - } - NewPushRule::Room(rule) => { - global.room.replace( - SimplePushRuleInit { - actions: rule.actions, - default: false, - enabled: true, - rule_id: rule.rule_id, - } - .into(), - ); - } - NewPushRule::Content(rule) => { - global.content.replace( - PatternedPushRuleInit { - actions: rule.actions, - default: false, - enabled: true, - rule_id: rule.rule_id, - pattern: rule.pattern, - } - .into(), - ); - } + if let Err(error) = account_data.content.global.insert( + body.rule.clone(), + body.after.as_deref(), + body.before.as_deref(), + ) { + let err = match error { + InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest( + ErrorKind::InvalidParam, + "Rule IDs starting with a dot are reserved for server-default rules.", + ), + InsertPushRuleError::InvalidRuleId => Error::BadRequest( + ErrorKind::InvalidParam, + "Rule ID containing invalid characters.", + ), + InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest( + ErrorKind::InvalidParam, + "Can't place a push rule relatively to a server-default rule.", + ), + InsertPushRuleError::UnknownRuleId => Error::BadRequest( + ErrorKind::NotFound, + "The before or after rule could not be found.", + ), + InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest( + ErrorKind::InvalidParam, + "The before rule has a higher priority than the after rule.", + ), + _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), + }; + + return Err(err); } services().account_data.update( @@ -235,29 +185,9 @@ pub async fn get_pushrule_actions_route( .content; let global = account_data.global; - let actions = match body.kind { - RuleKind::Override => global - .override_ - .get(body.rule_id.as_str()) - .map(|rule| rule.actions.clone()), - RuleKind::Underride => global - .underride - .get(body.rule_id.as_str()) - .map(|rule| rule.actions.clone()), - RuleKind::Sender => global - .sender - .get(body.rule_id.as_str()) - .map(|rule| rule.actions.clone()), - RuleKind::Room => global - .room - .get(body.rule_id.as_str()) - .map(|rule| rule.actions.clone()), - RuleKind::Content => global - .content - .get(body.rule_id.as_str()) - .map(|rule| rule.actions.clone()), - _ => None, - }; + let actions = global + .get(body.kind.clone(), &body.rule_id) + .map(|rule| rule.actions().to_owned()); Ok(get_pushrule_actions::v3::Response { actions: actions.unwrap_or_default(), @@ -294,40 +224,17 @@ pub async fn set_pushrule_actions_route( let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - let global = &mut account_data.content.global; - match body.kind { - RuleKind::Override => { - if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { - rule.actions = body.actions.clone(); - global.override_.replace(rule); - } - } - RuleKind::Underride => { - if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() { - rule.actions = body.actions.clone(); - global.underride.replace(rule); - } - } - RuleKind::Sender => { - if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() { - rule.actions = body.actions.clone(); - global.sender.replace(rule); - } - } - RuleKind::Room => { - if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() { - rule.actions = body.actions.clone(); - global.room.replace(rule); - } - } - RuleKind::Content => { - if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() { - rule.actions = body.actions.clone(); - global.content.replace(rule); - } - } - _ => {} - }; + if account_data + .content + .global + .set_actions(body.kind.clone(), &body.rule_id, body.actions.clone()) + .is_err() + { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Push rule not found.", + )); + } services().account_data.update( None, @@ -370,34 +277,10 @@ pub async fn get_pushrule_enabled_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))?; let global = account_data.content.global; - let enabled = match body.kind { - RuleKind::Override => global - .override_ - .iter() - .find(|rule| rule.rule_id == body.rule_id) - .map_or(false, |rule| rule.enabled), - RuleKind::Underride => global - .underride - .iter() - .find(|rule| rule.rule_id == body.rule_id) - .map_or(false, |rule| rule.enabled), - RuleKind::Sender => global - .sender - .iter() - .find(|rule| rule.rule_id == body.rule_id) - .map_or(false, |rule| rule.enabled), - RuleKind::Room => global - .room - .iter() - .find(|rule| rule.rule_id == body.rule_id) - .map_or(false, |rule| rule.enabled), - RuleKind::Content => global - .content - .iter() - .find(|rule| rule.rule_id == body.rule_id) - .map_or(false, |rule| rule.enabled), - _ => false, - }; + let enabled = global + .get(body.kind.clone(), &body.rule_id) + .map(|r| r.enabled()) + .unwrap_or_default(); Ok(get_pushrule_enabled::v3::Response { enabled }) } @@ -432,44 +315,16 @@ pub async fn set_pushrule_enabled_route( let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - let global = &mut account_data.content.global; - match body.kind { - RuleKind::Override => { - if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { - global.override_.remove(&rule); - rule.enabled = body.enabled; - global.override_.insert(rule); - } - } - RuleKind::Underride => { - if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() { - global.underride.remove(&rule); - rule.enabled = body.enabled; - global.underride.insert(rule); - } - } - RuleKind::Sender => { - if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() { - global.sender.remove(&rule); - rule.enabled = body.enabled; - global.sender.insert(rule); - } - } - RuleKind::Room => { - if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() { - global.room.remove(&rule); - rule.enabled = body.enabled; - global.room.insert(rule); - } - } - RuleKind::Content => { - if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() { - global.content.remove(&rule); - rule.enabled = body.enabled; - global.content.insert(rule); - } - } - _ => {} + if account_data + .content + .global + .set_enabled(body.kind.clone(), &body.rule_id, body.enabled) + .is_err() + { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Push rule not found.", + )); } services().account_data.update( @@ -512,34 +367,23 @@ pub async fn delete_pushrule_route( let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - let global = &mut account_data.content.global; - match body.kind { - RuleKind::Override => { - if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() { - global.override_.remove(&rule); + if let Err(error) = account_data + .content + .global + .remove(body.kind.clone(), &body.rule_id) + { + let err = match error { + RemovePushRuleError::ServerDefault => Error::BadRequest( + ErrorKind::InvalidParam, + "Cannot delete a server-default pushrule.", + ), + RemovePushRuleError::NotFound => { + Error::BadRequest(ErrorKind::NotFound, "Push rule not found.") } - } - RuleKind::Underride => { - if let Some(rule) = global.underride.get(body.rule_id.as_str()).cloned() { - global.underride.remove(&rule); - } - } - RuleKind::Sender => { - if let Some(rule) = global.sender.get(body.rule_id.as_str()).cloned() { - global.sender.remove(&rule); - } - } - RuleKind::Room => { - if let Some(rule) = global.room.get(body.rule_id.as_str()).cloned() { - global.room.remove(&rule); - } - } - RuleKind::Content => { - if let Some(rule) = global.content.get(body.rule_id.as_str()).cloned() { - global.content.remove(&rule); - } - } - _ => {} + _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), + }; + + return Err(err); } services().account_data.update( From 88c6bf75954ead182fffc900585b418b018e98e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=A9vin=20Commaille?= Date: Sun, 26 Feb 2023 17:49:42 +0100 Subject: [PATCH 1365/1727] Always return an error if a push rule is not found --- src/api/client_server/push.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index ab7c686..7276866 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -187,11 +187,13 @@ pub async fn get_pushrule_actions_route( let global = account_data.global; let actions = global .get(body.kind.clone(), &body.rule_id) - .map(|rule| rule.actions().to_owned()); + .map(|rule| rule.actions().to_owned()) + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Push rule not found.", + ))?; - Ok(get_pushrule_actions::v3::Response { - actions: actions.unwrap_or_default(), - }) + Ok(get_pushrule_actions::v3::Response { actions }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` @@ -280,7 +282,10 @@ pub async fn get_pushrule_enabled_route( let enabled = global .get(body.kind.clone(), &body.rule_id) .map(|r| r.enabled()) - .unwrap_or_default(); + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Push rule not found.", + ))?; Ok(get_pushrule_enabled::v3::Response { enabled }) } From 1929ca5d9d8d8b43a2f3fcce7af19e197f93f562 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=A9vin=20Commaille?= Date: Sun, 26 Feb 2023 17:57:44 +0100 Subject: [PATCH 1366/1727] Add a database migration to fix and update the default pushrules --- src/database/mod.rs | 48 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/src/database/mod.rs b/src/database/mod.rs index e05991d..1415f68 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -411,7 +411,7 @@ impl KeyValueDatabase { } // If the database has any data, perform data migrations before starting - let latest_database_version = 12; + let latest_database_version = 13; if services().users.count()? > 0 { // MIGRATIONS @@ -880,6 +880,52 @@ impl KeyValueDatabase { warn!("Migration: 11 -> 12 finished"); } + // This migration can be reused as-is anytime the server-default rules are updated. + if services().globals.database_version()? < 13 { + for username in services().users.list_local_users()? { + let user = match UserId::parse_with_server_name( + username.clone(), + services().globals.server_name(), + ) { + Ok(u) => u, + Err(e) => { + warn!("Invalid username {username}: {e}"); + continue; + } + }; + + let raw_rules_list = services() + .account_data + .get( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into(), + ) + .unwrap() + .expect("Username is invalid"); + + let mut account_data = + serde_json::from_str::(raw_rules_list.get()).unwrap(); + + let user_default_rules = ruma::push::Ruleset::server_default(&user); + account_data + .content + .global + .update_with_server_default(user_default_rules); + + services().account_data.update( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; + } + + services().globals.bump_database_version(13)?; + + warn!("Migration: 12 -> 13 finished"); + } + assert_eq!( services().globals.database_version().unwrap(), latest_database_version From c997311beae4becb5f8c11c46bb1c821b0935dbd Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 4 Apr 2023 16:56:51 -0700 Subject: [PATCH 1367/1727] Revert "build(nix): fix flake builds" This reverts commit 5d913f701083e1519a3595190e63193c519fbc6b. Sorry, I don't understand how any of this works, and it seems pretty opaque/difficult to fine-tune. --- flake.lock | 374 +++++------------------------------------------------ flake.nix | 122 +++++++++-------- 2 files changed, 99 insertions(+), 397 deletions(-) diff --git a/flake.lock b/flake.lock index 1bb1123..bfe0a9b 100644 --- a/flake.lock +++ b/flake.lock @@ -1,124 +1,18 @@ { "nodes": { - "alejandra": { - "inputs": { - "fenix": "fenix", - "flakeCompat": "flakeCompat", - "nixpkgs": [ - "d2n", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1658427149, - "narHash": "sha256-ToD/1z/q5VHsLMrS2h96vjJoLho59eNRtknOUd19ey8=", - "owner": "kamadorueda", - "repo": "alejandra", - "rev": "f5a22afd2adfb249b4e68e0b33aa1f0fb73fb1be", - "type": "github" - }, - "original": { - "owner": "kamadorueda", - "repo": "alejandra", - "type": "github" - } - }, - "all-cabal-json": { - "flake": false, - "locked": { - "lastModified": 1665552503, - "narHash": "sha256-r14RmRSwzv5c+bWKUDaze6pXM7nOsiz1H8nvFHJvufc=", - "owner": "nix-community", - "repo": "all-cabal-json", - "rev": "d7c0434eebffb305071404edcf9d5cd99703878e", - "type": "github" - }, - "original": { - "owner": "nix-community", - "ref": "hackage", - "repo": "all-cabal-json", - "type": "github" - } - }, - "crane": { - "flake": false, - "locked": { - "lastModified": 1670900067, - "narHash": "sha256-VXVa+KBfukhmWizaiGiHRVX/fuk66P8dgSFfkVN4/MY=", - "owner": "ipetkov", - "repo": "crane", - "rev": "59b31b41a589c0a65e4a1f86b0e5eac68081468b", - "type": "github" - }, - "original": { - "owner": "ipetkov", - "repo": "crane", - "type": "github" - } - }, - "d2n": { - "inputs": { - "alejandra": "alejandra", - "all-cabal-json": "all-cabal-json", - "crane": "crane", - "devshell": "devshell", - "flake-parts": "flake-parts", - "flake-utils-pre-commit": "flake-utils-pre-commit", - "ghc-utils": "ghc-utils", - "gomod2nix": "gomod2nix", - "mach-nix": "mach-nix", - "nix-pypi-fetcher": "nix-pypi-fetcher", - "nixpkgs": [ - "nixpkgs" - ], - "poetry2nix": "poetry2nix", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1674848374, - "narHash": "sha256-1+xlsmUWzpptK8mLjznwqOLogeicLkxB8tV6XUZbobc=", - "owner": "nix-community", - "repo": "dream2nix", - "rev": "d91e7381fa303be02f70e472207e05b26ce35b41", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "dream2nix", - "type": "github" - } - }, - "devshell": { - "flake": false, - "locked": { - "lastModified": 1663445644, - "narHash": "sha256-+xVlcK60x7VY1vRJbNUEAHi17ZuoQxAIH4S4iUFUGBA=", - "owner": "numtide", - "repo": "devshell", - "rev": "e3dc3e21594fe07bdb24bdf1c8657acaa4cb8f66", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "devshell", - "type": "github" - } - }, "fenix": { "inputs": { "nixpkgs": [ - "d2n", - "alejandra", "nixpkgs" ], "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1657607339, - "narHash": "sha256-HaqoAwlbVVZH2n4P3jN2FFPMpVuhxDy1poNOR7kzODc=", + "lastModified": 1671776618, + "narHash": "sha256-myjhExbKIzZy+kqqFyqvX59KErqYZVNTPsCfgByTOKo=", "owner": "nix-community", "repo": "fenix", - "rev": "b814c83d9e6aa5a28d0cf356ecfdafb2505ad37d", + "rev": "64d1607710b99e72d9afb2cde11bd1c2cea7cb91", "type": "github" }, "original": { @@ -127,31 +21,13 @@ "type": "github" } }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1668450977, - "narHash": "sha256-cfLhMhnvXn6x1vPm+Jow3RiFAUSCw/l1utktCw5rVA4=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "d591857e9d7dd9ddbfba0ea02b43b927c3c0f1fa", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, "flake-utils": { "locked": { - "lastModified": 1659877975, - "narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=", + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", "owner": "numtide", "repo": "flake-utils", - "rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", "type": "github" }, "original": { @@ -160,228 +36,57 @@ "type": "github" } }, - "flake-utils-pre-commit": { + "naersk": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, "locked": { - "lastModified": 1644229661, - "narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797", + "lastModified": 1671096816, + "narHash": "sha256-ezQCsNgmpUHdZANDCILm3RvtO1xH8uujk/+EqNvzIOg=", + "owner": "nix-community", + "repo": "naersk", + "rev": "d998160d6a076cfe8f9741e56aeec7e267e3e114", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flakeCompat": { - "flake": false, - "locked": { - "lastModified": 1650374568, - "narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "b4a34015c698c7793d592d66adbab377907a2be8", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "ghc-utils": { - "flake": false, - "locked": { - "lastModified": 1662774800, - "narHash": "sha256-1Rd2eohGUw/s1tfvkepeYpg8kCEXiIot0RijapUjAkE=", - "ref": "refs/heads/master", - "rev": "bb3a2d3dc52ff0253fb9c2812bd7aa2da03e0fea", - "revCount": 1072, - "type": "git", - "url": "https://gitlab.haskell.org/bgamari/ghc-utils" - }, - "original": { - "type": "git", - "url": "https://gitlab.haskell.org/bgamari/ghc-utils" - } - }, - "gomod2nix": { - "flake": false, - "locked": { - "lastModified": 1627572165, - "narHash": "sha256-MFpwnkvQpauj799b4QTBJQFEddbD02+Ln5k92QyHOSk=", - "owner": "tweag", - "repo": "gomod2nix", - "rev": "67f22dd738d092c6ba88e420350ada0ed4992ae8", - "type": "github" - }, - "original": { - "owner": "tweag", - "repo": "gomod2nix", - "type": "github" - } - }, - "mach-nix": { - "flake": false, - "locked": { - "lastModified": 1634711045, - "narHash": "sha256-m5A2Ty88NChLyFhXucECj6+AuiMZPHXNbw+9Kcs7F6Y=", - "owner": "DavHau", - "repo": "mach-nix", - "rev": "4433f74a97b94b596fa6cd9b9c0402104aceef5d", - "type": "github" - }, - "original": { - "id": "mach-nix", - "type": "indirect" - } - }, - "nix-pypi-fetcher": { - "flake": false, - "locked": { - "lastModified": 1669065297, - "narHash": "sha256-UStjXjNIuIm7SzMOWvuYWIHBkPUKQ8Id63BMJjnIDoA=", - "owner": "DavHau", - "repo": "nix-pypi-fetcher", - "rev": "a9885ac6a091576b5195d547ac743d45a2a615ac", - "type": "github" - }, - "original": { - "owner": "DavHau", - "repo": "nix-pypi-fetcher", + "owner": "nix-community", + "repo": "naersk", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1674641431, - "narHash": "sha256-qfo19qVZBP4qn5M5gXc/h1MDgAtPA5VxJm9s8RUAkVk=", - "owner": "nixos", - "repo": "nixpkgs", - "rev": "9b97ad7b4330aacda9b2343396eb3df8a853b4fc", - "type": "github" - }, - "original": { - "owner": "nixos", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "dir": "lib", - "lastModified": 1665349835, - "narHash": "sha256-UK4urM3iN80UXQ7EaOappDzcisYIuEURFRoGQ/yPkug=", + "lastModified": 1671780662, + "narHash": "sha256-Tsc64sN8LLHa7eqDZVVeubI8CyqIjs9l5tQ5EeRlgvM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "34c5293a71ffdb2fe054eb5288adc1882c1eb0b1", + "rev": "339063a22409514cb2baea677b329e618faa6a08", "type": "github" }, "original": { - "dir": "lib", - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib_2": { - "locked": { - "dir": "lib", - "lastModified": 1672350804, - "narHash": "sha256-jo6zkiCabUBn3ObuKXHGqqORUMH27gYDIFFfLq5P4wg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "677ed08a50931e38382dbef01cba08a8f7eac8f6", - "type": "github" - }, - "original": { - "dir": "lib", - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "parts": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib_2" - }, - "locked": { - "lastModified": 1674771137, - "narHash": "sha256-Zpk1GbEsYrqKmuIZkx+f+8pU0qcCYJoSUwNz1Zk+R00=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "7c7a8bce3dffe71203dcd4276504d1cb49dfe05f", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "poetry2nix": { - "flake": false, - "locked": { - "lastModified": 1666918719, - "narHash": "sha256-BkK42fjAku+2WgCOv2/1NrPa754eQPV7gPBmoKQBWlc=", - "owner": "nix-community", - "repo": "poetry2nix", - "rev": "289efb187123656a116b915206e66852f038720e", - "type": "github" - }, - "original": { - "owner": "nix-community", - "ref": "1.36.0", - "repo": "poetry2nix", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-utils": [ - "d2n", - "flake-utils-pre-commit" - ], - "nixpkgs": [ - "d2n", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1646153636, - "narHash": "sha256-AlWHMzK+xJ1mG267FdT8dCq/HvLCA6jwmx2ZUy5O8tY=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "b6bc0b21e1617e2b07d8205e7fae7224036dfa4b", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", "type": "github" } }, "root": { "inputs": { - "d2n": "d2n", - "nixpkgs": "nixpkgs", - "parts": "parts", - "rust-overlay": "rust-overlay" + "fenix": "fenix", + "flake-utils": "flake-utils", + "naersk": "naersk", + "nixpkgs": "nixpkgs" } }, "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1657557289, - "narHash": "sha256-PRW+nUwuqNTRAEa83SfX+7g+g8nQ+2MMbasQ9nt6+UM=", + "lastModified": 1671750139, + "narHash": "sha256-xbL8BZU87rHfQkF3tuFXduNGPW8fDwFI+0fFmRJx66E=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "caf23f29144b371035b864a1017dbc32573ad56d", + "rev": "a06525517b0b69cd97f2c39a4012d96f44bf0776", "type": "github" }, "original": { @@ -390,27 +95,6 @@ "repo": "rust-analyzer", "type": "github" } - }, - "rust-overlay": { - "inputs": { - "flake-utils": "flake-utils", - "nixpkgs": [ - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1674786480, - "narHash": "sha256-n25V3Ug/dJewbJaxj1gL0cUMBdOonrVkIQCHd9yHHvw=", - "owner": "oxalica", - "repo": "rust-overlay", - "rev": "296dd673b46aaebe1c8355f1848ceb7c905dda35", - "type": "github" - }, - "original": { - "owner": "oxalica", - "repo": "rust-overlay", - "type": "github" - } } }, "root": "root", diff --git a/flake.nix b/flake.nix index f4db253..e10e8bb 100644 --- a/flake.nix +++ b/flake.nix @@ -1,56 +1,74 @@ { - inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable"; - inputs.d2n.url = "github:nix-community/dream2nix"; - inputs.d2n.inputs.nixpkgs.follows = "nixpkgs"; - inputs.parts.url = "github:hercules-ci/flake-parts"; - inputs.rust-overlay.url = "github:oxalica/rust-overlay"; - inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs"; + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs"; + flake-utils.url = "github:numtide/flake-utils"; - outputs = inp: - inp.parts.lib.mkFlake {inputs = inp;} { - systems = ["x86_64-linux"]; - imports = [inp.d2n.flakeModuleBeta]; - perSystem = { - config, - system, - pkgs, - ... - }: let - cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); - pkgsWithToolchain = pkgs.appendOverlays [inp.rust-overlay.overlays.default]; - - toolchains = pkgsWithToolchain.rust-bin.stable."${cargoToml.package.rust-version}"; - # toolchain to use when building conduit, includes only cargo and rustc to reduce closure size - buildToolchain = toolchains.minimal; - # toolchain to use in development shell - # the "default" component set of toolchain adds rustfmt, clippy etc. - devToolchain = toolchains.default.override { - extensions = ["rust-src"]; - }; - - # flake outputs for conduit project - conduitOutputs = config.dream2nix.outputs.conduit; - in { - dream2nix.inputs.conduit = { - source = inp.self; - projects.conduit = { - name = "conduit"; - subsystem = "rust"; - translator = "cargo-lock"; - }; - packageOverrides = { - "^.*".set-toolchain.overrideRustToolchain = _: { - cargo = buildToolchain; - rustc = buildToolchain; - }; - }; - }; - devShells.conduit = conduitOutputs.devShells.conduit.overrideAttrs (old: { - # export default crate sources for rust-analyzer to read - RUST_SRC_PATH = "${devToolchain}/lib/rustlib/src/rust/library"; - nativeBuildInputs = (old.nativeBuildInputs or []) ++ [devToolchain]; - }); - devShells.default = config.devShells.conduit; - }; + fenix = { + url = "github:nix-community/fenix"; + inputs.nixpkgs.follows = "nixpkgs"; }; + naersk = { + url = "github:nix-community/naersk"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = + { self + , nixpkgs + , flake-utils + + , fenix + , naersk + }: flake-utils.lib.eachDefaultSystem (system: + let + pkgs = nixpkgs.legacyPackages.${system}; + + # Nix-accessible `Cargo.toml` + cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); + + # The Rust toolchain to use + toolchain = fenix.packages.${system}.toolchainOf { + # Use the Rust version defined in `Cargo.toml` + channel = cargoToml.package.rust-version; + + # THE rust-version HASH + sha256 = "sha256-8len3i8oTwJSOJZMosGGXHBL5BVuGQnWOT2St5YAUFU="; + }; + + builder = (pkgs.callPackage naersk { + inherit (toolchain) rustc cargo; + }).buildPackage; + in + { + packages.default = builder { + src = ./.; + + nativeBuildInputs = (with pkgs.rustPlatform; [ + bindgenHook + ]); + }; + + devShells.default = pkgs.mkShell { + # Rust Analyzer needs to be able to find the path to default crate + # sources, and it can read this environment variable to do so + RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; + + # Development tools + nativeBuildInputs = (with pkgs.rustPlatform; [ + bindgenHook + ]) ++ (with toolchain; [ + cargo + clippy + rust-src + rustc + rustfmt + ]); + }; + + checks = { + packagesDefault = self.packages.${system}.default; + devShellsDefault = self.devShells.${system}.default; + }; + }); } From a0c449e570886b67d1558016689dfdf7fef990ff Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 4 Apr 2023 16:58:19 -0700 Subject: [PATCH 1368/1727] update flake.lock --- flake.lock | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/flake.lock b/flake.lock index bfe0a9b..280e1b5 100644 --- a/flake.lock +++ b/flake.lock @@ -8,11 +8,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1671776618, - "narHash": "sha256-myjhExbKIzZy+kqqFyqvX59KErqYZVNTPsCfgByTOKo=", + "lastModified": 1680607374, + "narHash": "sha256-U5iiPqbAanr+sQCCZ7zxYhwCXdcDpish8Uy4ELZeXM0=", "owner": "nix-community", "repo": "fenix", - "rev": "64d1607710b99e72d9afb2cde11bd1c2cea7cb91", + "rev": "e70d498e97017daa59363eafa054619d4fa160c3", "type": "github" }, "original": { @@ -23,11 +23,11 @@ }, "flake-utils": { "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "lastModified": 1678901627, + "narHash": "sha256-U02riOqrKKzwjsxc/400XnElV+UtPUQWpANPlyazjH0=", "owner": "numtide", "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "rev": "93a2b84fc4b70d9e089d029deacc3583435c2ed6", "type": "github" }, "original": { @@ -43,11 +43,11 @@ ] }, "locked": { - "lastModified": 1671096816, - "narHash": "sha256-ezQCsNgmpUHdZANDCILm3RvtO1xH8uujk/+EqNvzIOg=", + "lastModified": 1679567394, + "narHash": "sha256-ZvLuzPeARDLiQUt6zSZFGOs+HZmE+3g4QURc8mkBsfM=", "owner": "nix-community", "repo": "naersk", - "rev": "d998160d6a076cfe8f9741e56aeec7e267e3e114", + "rev": "88cd22380154a2c36799fe8098888f0f59861a15", "type": "github" }, "original": { @@ -58,11 +58,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1671780662, - "narHash": "sha256-Tsc64sN8LLHa7eqDZVVeubI8CyqIjs9l5tQ5EeRlgvM=", + "lastModified": 1680652733, + "narHash": "sha256-FFG6Nai9M71C0Uc+D8TxyHoAjTplM0/9uWKsl7ALfUs=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "339063a22409514cb2baea677b329e618faa6a08", + "rev": "cc5bde408572508efd1273852862d418bb313443", "type": "github" }, "original": { @@ -82,11 +82,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1671750139, - "narHash": "sha256-xbL8BZU87rHfQkF3tuFXduNGPW8fDwFI+0fFmRJx66E=", + "lastModified": 1680435407, + "narHash": "sha256-IPBtZCOh3BdrR+V77cL7r6WQnclWcZ/85BDYnmq/GnQ=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "a06525517b0b69cd97f2c39a4012d96f44bf0776", + "rev": "236576227a299fd19ba836b1834ab50c948af994", "type": "github" }, "original": { From 2b63e46fc5c4234823a1d71b2a7eac4ee15cd9be Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 4 Apr 2023 16:47:08 -0700 Subject: [PATCH 1369/1727] use system rocksdb This mostly just improves build times. --- flake.nix | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/flake.nix b/flake.nix index e10e8bb..baa3261 100644 --- a/flake.nix +++ b/flake.nix @@ -36,6 +36,10 @@ sha256 = "sha256-8len3i8oTwJSOJZMosGGXHBL5BVuGQnWOT2St5YAUFU="; }; + # Point to system RocksDB + ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb_6_23}/include"; + ROCKSDB_LIB_DIR = "${pkgs.rocksdb_6_23}/lib"; + builder = (pkgs.callPackage naersk { inherit (toolchain) rustc cargo; }).buildPackage; @@ -44,6 +48,9 @@ packages.default = builder { src = ./.; + # Use system RocksDB + inherit ROCKSDB_INCLUDE_DIR ROCKSDB_LIB_DIR; + nativeBuildInputs = (with pkgs.rustPlatform; [ bindgenHook ]); @@ -54,6 +61,9 @@ # sources, and it can read this environment variable to do so RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; + # Use system RocksDB + inherit ROCKSDB_INCLUDE_DIR ROCKSDB_LIB_DIR; + # Development tools nativeBuildInputs = (with pkgs.rustPlatform; [ bindgenHook From 55149e33361b07c87be66a8b6632d9fcb2711548 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 4 Apr 2023 17:30:30 -0700 Subject: [PATCH 1370/1727] use crane instead of naersk I guess naersk still doesn't support git dependencies using workspace inheritance, but crane does. --- flake.lock | 88 +++++++++++++++++++++++++++++++++++++++++------------- flake.nix | 12 ++++---- 2 files changed, 73 insertions(+), 27 deletions(-) diff --git a/flake.lock b/flake.lock index 280e1b5..d76ff59 100644 --- a/flake.lock +++ b/flake.lock @@ -1,5 +1,30 @@ { "nodes": { + "crane": { + "inputs": { + "flake-compat": "flake-compat", + "flake-utils": [ + "flake-utils" + ], + "nixpkgs": [ + "nixpkgs" + ], + "rust-overlay": "rust-overlay" + }, + "locked": { + "lastModified": 1680584903, + "narHash": "sha256-uraq+D3jcLzw/UVk0xMHcnfILfIMa0DLrtAEq2nNlxU=", + "owner": "ipetkov", + "repo": "crane", + "rev": "65d3f6a3970cd46bef5eedfd458300f72c56b3c5", + "type": "github" + }, + "original": { + "owner": "ipetkov", + "repo": "crane", + "type": "github" + } + }, "fenix": { "inputs": { "nixpkgs": [ @@ -21,6 +46,22 @@ "type": "github" } }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-utils": { "locked": { "lastModified": 1678901627, @@ -36,26 +77,6 @@ "type": "github" } }, - "naersk": { - "inputs": { - "nixpkgs": [ - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1679567394, - "narHash": "sha256-ZvLuzPeARDLiQUt6zSZFGOs+HZmE+3g4QURc8mkBsfM=", - "owner": "nix-community", - "repo": "naersk", - "rev": "88cd22380154a2c36799fe8098888f0f59861a15", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "naersk", - "type": "github" - } - }, "nixpkgs": { "locked": { "lastModified": 1680652733, @@ -73,9 +94,9 @@ }, "root": { "inputs": { + "crane": "crane", "fenix": "fenix", "flake-utils": "flake-utils", - "naersk": "naersk", "nixpkgs": "nixpkgs" } }, @@ -95,6 +116,31 @@ "repo": "rust-analyzer", "type": "github" } + }, + "rust-overlay": { + "inputs": { + "flake-utils": [ + "crane", + "flake-utils" + ], + "nixpkgs": [ + "crane", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1680488274, + "narHash": "sha256-0vYMrZDdokVmPQQXtFpnqA2wEgCCUXf5a3dDuDVshn0=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "7ec2ff598a172c6e8584457167575b3a1a5d80d8", + "type": "github" + }, + "original": { + "owner": "oxalica", + "repo": "rust-overlay", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index baa3261..6ed12de 100644 --- a/flake.nix +++ b/flake.nix @@ -7,9 +7,10 @@ url = "github:nix-community/fenix"; inputs.nixpkgs.follows = "nixpkgs"; }; - naersk = { - url = "github:nix-community/naersk"; + crane = { + url = "github:ipetkov/crane"; inputs.nixpkgs.follows = "nixpkgs"; + inputs.flake-utils.follows = "flake-utils"; }; }; @@ -19,7 +20,7 @@ , flake-utils , fenix - , naersk + , crane }: flake-utils.lib.eachDefaultSystem (system: let pkgs = nixpkgs.legacyPackages.${system}; @@ -40,9 +41,8 @@ ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb_6_23}/include"; ROCKSDB_LIB_DIR = "${pkgs.rocksdb_6_23}/lib"; - builder = (pkgs.callPackage naersk { - inherit (toolchain) rustc cargo; - }).buildPackage; + builder = + ((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage; in { packages.default = builder { From 3be32c4dac30fe428eefc007d565fd23fddaae40 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 4 Apr 2023 17:52:15 -0700 Subject: [PATCH 1371/1727] factor out shared things --- flake.nix | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/flake.nix b/flake.nix index 6ed12de..5e7d206 100644 --- a/flake.nix +++ b/flake.nix @@ -41,6 +41,11 @@ ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb_6_23}/include"; ROCKSDB_LIB_DIR = "${pkgs.rocksdb_6_23}/lib"; + # Shared between the package and the devShell + nativeBuildInputs = (with pkgs.rustPlatform; [ + bindgenHook + ]); + builder = ((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage; in @@ -51,9 +56,7 @@ # Use system RocksDB inherit ROCKSDB_INCLUDE_DIR ROCKSDB_LIB_DIR; - nativeBuildInputs = (with pkgs.rustPlatform; [ - bindgenHook - ]); + inherit nativeBuildInputs; }; devShells.default = pkgs.mkShell { @@ -65,9 +68,7 @@ inherit ROCKSDB_INCLUDE_DIR ROCKSDB_LIB_DIR; # Development tools - nativeBuildInputs = (with pkgs.rustPlatform; [ - bindgenHook - ]) ++ (with toolchain; [ + nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [ cargo clippy rust-src From a6712627e4758a973af4f6f157778e55308b076d Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 4 Apr 2023 19:15:09 -0700 Subject: [PATCH 1372/1727] tiny refactor --- flake.nix | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/flake.nix b/flake.nix index 5e7d206..7162c93 100644 --- a/flake.nix +++ b/flake.nix @@ -37,7 +37,7 @@ sha256 = "sha256-8len3i8oTwJSOJZMosGGXHBL5BVuGQnWOT2St5YAUFU="; }; - # Point to system RocksDB + # The system's RocksDB ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb_6_23}/include"; ROCKSDB_LIB_DIR = "${pkgs.rocksdb_6_23}/lib"; @@ -53,10 +53,10 @@ packages.default = builder { src = ./.; - # Use system RocksDB - inherit ROCKSDB_INCLUDE_DIR ROCKSDB_LIB_DIR; - - inherit nativeBuildInputs; + inherit + nativeBuildInputs + ROCKSDB_INCLUDE_DIR + ROCKSDB_LIB_DIR; }; devShells.default = pkgs.mkShell { @@ -64,8 +64,9 @@ # sources, and it can read this environment variable to do so RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; - # Use system RocksDB - inherit ROCKSDB_INCLUDE_DIR ROCKSDB_LIB_DIR; + inherit + ROCKSDB_INCLUDE_DIR + ROCKSDB_LIB_DIR; # Development tools nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [ From eb4323cc0fab563655ddf9ce1471302e657724e4 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 4 Apr 2023 19:11:34 -0700 Subject: [PATCH 1373/1727] use mold on linux --- flake.nix | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 7162c93..970e847 100644 --- a/flake.nix +++ b/flake.nix @@ -25,6 +25,12 @@ let pkgs = nixpkgs.legacyPackages.${system}; + # Use mold on Linux + stdenv = if pkgs.stdenv.isLinux then + pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv + else + pkgs.stdenv; + # Nix-accessible `Cargo.toml` cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); @@ -54,12 +60,13 @@ src = ./.; inherit + stdenv nativeBuildInputs ROCKSDB_INCLUDE_DIR ROCKSDB_LIB_DIR; }; - devShells.default = pkgs.mkShell { + devShells.default = (pkgs.mkShell.override { inherit stdenv; }) { # Rust Analyzer needs to be able to find the path to default crate # sources, and it can read this environment variable to do so RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; From f5e3b0e2dd1c47d6d7dc3a9c21da892472ab57de Mon Sep 17 00:00:00 2001 From: Jonathan Flueren <9669250-JonOfUs@users.noreply.gitlab.com> Date: Mon, 15 May 2023 19:25:57 +0000 Subject: [PATCH 1374/1727] Recognize admin commands without : after tag Very useful since many Matrix clients don't insert : after user tags --- src/service/rooms/timeline/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 47f4c65..249b2a2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -443,7 +443,8 @@ impl Service { )?; let server_user = format!("@conduit:{}", services().globals.server_name()); - let to_conduit = body.starts_with(&format!("{server_user}: ")); + let to_conduit = body.starts_with(&format!("{server_user}: ")) + || body.starts_with(&format!("{server_user} ")); // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit From 921b266d86dd933250879870d9e91bb3c0189484 Mon Sep 17 00:00:00 2001 From: x4u Date: Sun, 21 May 2023 07:04:58 +0000 Subject: [PATCH 1375/1727] X4u/add apache cloudflare deploy info --- DEPLOY.md | 50 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 1d1fc13..c8f4a25 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -19,7 +19,7 @@ You may simply download the binary that fits your machine. Run `uname -m` to see | armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] | These builds were created on and linked against the glibc version shipped with Debian bullseye. -If you use a system with an older glibc version, you might need to compile Conduit yourself. +If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself. [x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master [armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master @@ -39,12 +39,16 @@ $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` -Alternatively, you may compile the binary yourself +Alternatively, you may compile the binary yourself. First, install any dependencies: ```bash +# Debian $ sudo apt install libclang-dev build-essential -``` +# RHEL +$ sudo dnf install clang +``` +Then, `cd` into the source tree of conduit-next and run: ```bash $ cargo build --release ``` @@ -74,7 +78,7 @@ cross build --release --no-default-features --features conduit_bin,backend_rocks While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows you to make sure that the file permissions are correctly set up. -In Debian you can use this command to create a Conduit user: +In Debian or RHEL, you can use this command to create a Conduit user: ```bash sudo adduser --system conduit --no-create-home @@ -86,6 +90,19 @@ Conduit uses the ports 443 and 8448 both of which need to be open in the firewal If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. +## Delegation of federation traffic + +If Conduit runs behind Cloudflare reverse proxy, which doesn't support port 8448 on free plans, [delegation](https://matrix-org.github.io/synapse/latest/delegate.html) can be set up to have federation traffic routed to port 443: +```apache +# .well-known delegation on Apache + + ErrorDocument 200 '{"m.server": "your.server.name:443"}' + Header always set Content-Type application/json + Header always set Access-Control-Allow-Origin * + +``` +[SRV DNS record](https://spec.matrix.org/latest/server-server-api/#resolving-server-names) delegation is also [possible](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-srv-record/). + ## Setting up a systemd service Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your @@ -101,6 +118,7 @@ After=network.target Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" User=conduit Group=nogroup +# On RHEL: Group=nobody Restart=always ExecStart=/usr/local/bin/matrix-conduit @@ -168,7 +186,7 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re ## Setting the correct file permissions As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on -Debian: +Debian or RHEL: ```bash sudo chown -R root:root /etc/matrix-conduit @@ -180,6 +198,7 @@ If you use the default database path you also need to run this: ```bash sudo mkdir -p /var/lib/matrix-conduit/ sudo chown -R conduit:nogroup /var/lib/matrix-conduit/ +# On RHEL: sudo chown -R conduit:nobody /var/lib/matrix-conduit/ sudo chmod 700 /var/lib/matrix-conduit/ ``` @@ -192,6 +211,11 @@ This depends on whether you use Apache, Caddy, Nginx or another web server. Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this: ```apache +# Requires mod_proxy and mod_proxy_http +# +# On Apache instance compiled from source, +# paste into httpd-ssl.conf or httpd.conf + Listen 8448 @@ -208,7 +232,11 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ **You need to make some edits again.** When you are done, run ```bash +# Debian $ sudo systemctl reload apache2 + +# Installed from source +$ sudo apachectl -k graceful ``` ### Caddy @@ -266,11 +294,19 @@ $ sudo systemctl reload nginx If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step. -The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: +The easiest way to get an SSL certificate, if you don't have one already, is to [install](https://certbot.eff.org/instructions) `certbot` and run this: ```bash +# To use ECC for the private key, +# paste into /etc/letsencrypt/cli.ini: +# key-type = ecdsa +# elliptic-curve = secp384r1 + $ sudo certbot -d your.server.name ``` +[Automated renewal](https://eff-certbot.readthedocs.io/en/stable/using.html#automated-renewals) is usually preconfigured. + +If using Cloudflare, configure instead the edge and origin certificates in dashboard. In case you’re already running a website on the same Apache server, you can just copy-and-paste the SSL configuration from your main virtual host on port 443 into the above-mentioned vhost. ## You're done! @@ -294,6 +330,8 @@ You can also use these commands as a quick health check. ```bash $ curl https://your.server.name/_matrix/client/versions + +# If using port 8448 $ curl https://your.server.name:8448/_matrix/client/versions ``` From 49b5af6d454f27f543ca080a605ab56bbeeeac37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 21 May 2023 11:51:36 +0200 Subject: [PATCH 1376/1727] chore: bump rocksdb --- Cargo.lock | 997 +++++++++++++++------------- Cargo.toml | 3 +- src/database/abstraction/rocksdb.rs | 8 +- 3 files changed, 556 insertions(+), 452 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 385d8f6..9c86f7d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,20 +10,20 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.6" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ - "getrandom 0.2.8", + "cfg-if", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] @@ -44,16 +44,22 @@ dependencies = [ ] [[package]] -name = "arc-swap" -version = "1.5.1" +name = "anstyle" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" +checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" + +[[package]] +name = "arc-swap" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -83,23 +89,20 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "atomic" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" -dependencies = [ - "autocfg", -] +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" [[package]] name = "autocfg" @@ -157,9 +160,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8456dab8f11484979a86651da8e619b355ede5d61a160755155f6c344bd18c47" +checksum = "bace45b270e36e3c27a190c65883de6dfc9f1d18c829907c127464815dc67b24" dependencies = [ "arc-swap", "bytes", @@ -169,7 +172,7 @@ dependencies = [ "hyper", "pin-project-lite", "rustls", - "rustls-pemfile 1.0.1", + "rustls-pemfile 1.0.2", "tokio", "tokio-rustls", "tower-service", @@ -189,9 +192,9 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bincode" @@ -204,9 +207,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.65.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ "bitflags", "cexpr", @@ -214,11 +217,13 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", + "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", + "syn 2.0.16", ] [[package]] @@ -229,13 +234,13 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "blake2b_simd" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" +checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" dependencies = [ "arrayref", "arrayvec", - "constant_time_eq", + "constant_time_eq 0.2.5", ] [[package]] @@ -249,9 +254,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -269,9 +274,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.3.2" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -279,15 +284,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" [[package]] name = "bytemuck" -version = "1.12.3" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaa3a8d9a1ca92e282c96a32d6511b695d7d994d1d102ba85d279f9b2756947f" +checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" [[package]] name = "byteorder" @@ -297,15 +302,26 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" + +[[package]] +name = "bzip2-sys" +version = "0.1.11+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +dependencies = [ + "cc", + "libc", + "pkg-config", +] [[package]] name = "cc" -version = "1.0.77" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", ] @@ -327,9 +343,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -338,37 +354,43 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.27" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0acbd8d28a0a60d7108d7ae850af6ba34cf2d1257fc646980e5f97ce14275966" +checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" dependencies = [ - "bitflags", + "clap_builder", "clap_derive", - "clap_lex", "once_cell", ] [[package]] -name = "clap_derive" -version = "4.0.21" +name = "clap_builder" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" +checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" +dependencies = [ + "anstyle", + "bitflags", + "clap_lex", +] + +[[package]] +name = "clap_derive" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b" dependencies = [ "heck", - "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "clap_lex" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" -dependencies = [ - "os_str_bytes", -] +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "color_quant" @@ -433,9 +455,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "constant_time_eq" @@ -443,6 +465,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constant_time_eq" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" + [[package]] name = "core-foundation" version = "0.9.3" @@ -455,33 +483,33 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] [[package]] name = "crc" -version = "2.1.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "1.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crc32fast" @@ -508,9 +536,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -518,9 +546,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -529,9 +557,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg", "cfg-if", @@ -552,9 +580,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -589,7 +617,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" dependencies = [ "cfg-if", - "hashbrown", + "hashbrown 0.12.3", "lock_api", "once_cell", "parking_lot_core", @@ -597,15 +625,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.2" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "der" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", "zeroize", @@ -622,11 +650,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -653,9 +681,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ "signature", ] @@ -676,15 +704,15 @@ dependencies = [ [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encoding_rs" -version = "0.8.31" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] @@ -698,7 +726,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -713,6 +741,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fdeflate" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10" +dependencies = [ + "simd-adler32", +] + [[package]] name = "figment" version = "0.10.8" @@ -722,16 +759,16 @@ dependencies = [ "atomic", "pear", "serde", - "toml 0.5.9", + "toml 0.5.11", "uncased", "version_check", ] [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "miniz_oxide", @@ -762,17 +799,11 @@ dependencies = [ "winapi", ] -[[package]] -name = "fs_extra" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" - [[package]] name = "futures" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -785,9 +816,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -795,15 +826,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -812,38 +843,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -859,9 +890,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -880,9 +911,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", @@ -891,9 +922,9 @@ dependencies = [ [[package]] name = "gif" -version = "0.11.4" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06" +checksum = "80792593675e051cf94a4b111980da2ba60d4a83e43e0048c5693baab3977045" dependencies = [ "color_quant", "weezl", @@ -901,15 +932,15 @@ dependencies = [ [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.15" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -929,17 +960,23 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ "ahash", ] [[package]] name = "hashlink" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" dependencies = [ - "hashbrown", + "hashbrown 0.13.2", ] [[package]] @@ -969,9 +1006,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "heed" @@ -1011,9 +1048,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.19" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ "libc", ] @@ -1024,7 +1061,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -1040,9 +1077,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -1080,9 +1117,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -1104,9 +1141,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59df7c4e19c950e6e0e868dcc0a300b09a9b88e9ec55bd879ca819087a77355d" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", @@ -1138,9 +1175,9 @@ dependencies = [ [[package]] name = "image" -version = "0.24.5" +version = "0.24.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b7ea949b537b0fd0af141fff8c77690f2ce96f4f41f042ccb6c69c6c965945" +checksum = "527909aa81e20ac3a44803521443a765550f09b5130c2c2fa1ea59c2f8f50a3a" dependencies = [ "bytemuck", "byteorder", @@ -1154,12 +1191,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.3", "serde", ] @@ -1189,9 +1226,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.5.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" @@ -1204,15 +1241,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jobserver" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] @@ -1225,9 +1262,9 @@ checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e" [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" dependencies = [ "wasm-bindgen", ] @@ -1252,11 +1289,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.1.1" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", "pem", "ring", "serde", @@ -1300,9 +1337,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.137" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libloading" @@ -1316,14 +1353,18 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "6.20.3" +version = "0.11.0+8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" +checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" dependencies = [ "bindgen", + "bzip2-sys", "cc", "glob", "libc", + "libz-sys", + "lz4-sys", + "zstd-sys", ] [[package]] @@ -1337,6 +1378,17 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "libz-sys" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -1382,6 +1434,16 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "lz4-sys" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "maplit" version = "1.0.2" @@ -1405,9 +1467,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" @@ -1423,18 +1485,18 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minimal-lexical" @@ -1444,30 +1506,31 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.6.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", + "simd-adler32", ] [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] name = "nom" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -1526,9 +1589,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ "hermit-abi", "libc", @@ -1536,9 +1599,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.16.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "opaque-debug" @@ -1635,12 +1698,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "os_str_bytes" -version = "6.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" - [[package]] name = "overload" version = "0.1.1" @@ -1669,28 +1726,28 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] name = "paste" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pear" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e44241c5e4c868e3eaa78b7c1848cadd6344ed4f54d029832d32b415a58702" +checksum = "0ec95680a7087503575284e5063e14b694b7a9c0b065e5dceec661e0497127e8" dependencies = [ "inlinable_string", "pear_codegen", @@ -1699,14 +1756,14 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a5ca643c2303ecb740d506539deba189e16f2754040a42901cd8105d0282d0" +checksum = "9661a3a53f93f09f2ea882018e4d7c88f6ff2956d809a276060476fd8c879d3c" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn", + "syn 2.0.16", ] [[package]] @@ -1717,9 +1774,9 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ "base64 0.13.1", ] @@ -1732,9 +1789,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "persy" -version = "1.3.4" +version = "1.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5511189f4dbd737283b0dd2ff6715f2e35fd0d3e1ddf953ed6a772e439e1f73f" +checksum = "3712821f12453814409ec149071bd4832a8ec458e648579c104aee30ed70b300" dependencies = [ "crc", "data-encoding", @@ -1748,22 +1805,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] @@ -1790,18 +1847,19 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "png" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d708eaf860a19b19ce538740d2b4bdeeb8337fa53f7738455e706623ad5c638" +checksum = "aaeebc51f9e7d2c150d3f3bfeb667f2aa985db5ef1e3d212847bdedb488beeaa" dependencies = [ "bitflags", "crc32fast", + "fdeflate", "flate2", "miniz_oxide", ] @@ -1813,58 +1871,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] -name = "proc-macro-crate" -version = "1.2.1" +name = "prettyplease" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +checksum = "617feabb81566b593beb4886fb8c1f38064169dae4dccad0e3220160c3b37203" +dependencies = [ + "proc-macro2", + "syn 2.0.16", +] + +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "thiserror", - "toml 0.5.9", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", + "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" dependencies = [ "unicode-ident", ] [[package]] name = "proc-macro2-diagnostics" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" +checksum = "606c4ba35817e2922a308af55ad51bab3645b59eae5c570d4a6cf07e36bd493b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", "version_check", "yansi", ] @@ -1877,9 +1920,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.21" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] @@ -1943,7 +1986,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -1970,20 +2013,20 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "redox_syscall", "thiserror", ] [[package]] name = "regex" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -1992,14 +2035,20 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" [[package]] name = "reqwest" @@ -2066,9 +2115,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.17.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" +checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" dependencies = [ "libc", "librocksdb-sys", @@ -2189,8 +2238,8 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn", - "toml 0.7.2", + "syn 1.0.109", + "toml 0.7.4", ] [[package]] @@ -2256,7 +2305,7 @@ checksum = "b50162d19404029c1ceca6f6980fe40d45c8b369f6f44446fa14bb39573b5bb9" dependencies = [ "base64 0.13.1", "blake2b_simd", - "constant_time_eq", + "constant_time_eq 0.1.5", "crossbeam-utils", ] @@ -2268,9 +2317,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustls" -version = "0.20.7" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -2285,7 +2334,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.1", + "rustls-pemfile 1.0.2", "schannel", "security-framework", ] @@ -2301,27 +2350,26 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", ] [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] @@ -2348,9 +2396,9 @@ checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32" [[package]] name = "security-framework" -version = "2.7.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags", "core-foundation", @@ -2361,9 +2409,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -2371,22 +2419,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.147" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.147" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] @@ -2404,9 +2452,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.89" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -2415,9 +2463,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" dependencies = [ "serde", ] @@ -2436,9 +2484,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.14" +version = "0.9.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d232d893b10de3eb7258ff01974d6ee20663d8e833263c99409d4b13a0209da" +checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" dependencies = [ "indexmap", "itoa", @@ -2449,13 +2497,13 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -2466,7 +2514,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -2490,7 +2538,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -2510,9 +2558,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -2523,6 +2571,12 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +[[package]] +name = "simd-adler32" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "238abfbb77c1915110ad968465608b68e869e0772622c9656714e73e5a1a522f" + [[package]] name = "simple_asn1" version = "0.6.2" @@ -2537,9 +2591,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -2552,9 +2606,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -2587,15 +2641,26 @@ dependencies = [ [[package]] name = "subtle" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" -version = "1.0.103" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" dependencies = [ "proc-macro2", "quote", @@ -2604,9 +2669,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "synchronoise" @@ -2617,44 +2682,33 @@ dependencies = [ "crossbeam-queue", ] -[[package]] -name = "synstructure" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", -] - [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if", "once_cell", ] @@ -2693,12 +2747,11 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.5.2+5.3.0-patched" +version = "0.5.3+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec45c14da997d0925c7835883e4d5c181f196fa142f8c19d7643d1e9af2592c3" +checksum = "a678df20055b43e57ef8cddde41cdfda9a3c1a060b67f4c5836dfb1d78543ba8" dependencies = [ "cc", - "fs_extra", "libc", ] @@ -2714,9 +2767,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ "itoa", "serde", @@ -2726,15 +2779,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -2750,38 +2803,37 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.22.0" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg", "bytes", "libc", - "memchr", "mio", "num_cpus", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "winapi", + "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "1.8.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] @@ -2809,9 +2861,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", @@ -2820,9 +2872,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -2834,18 +2886,18 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] [[package]] name = "toml" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6" +checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" dependencies = [ "serde", "serde_spanned", @@ -2855,18 +2907,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.4" +version = "0.19.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825" +checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" dependencies = [ "indexmap", "serde", @@ -2893,9 +2945,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ "async-compression", "bitflags", @@ -2941,20 +2993,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -2998,9 +3050,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", @@ -3061,36 +3113,36 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "uncased" -version = "0.9.7" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b01702b0fd0b3fadcf98e098780badda8742d4f4a7676615cad90e8ac73622" +checksum = "9b9bc53168a4be7402ab86c3aad243a84dd7381d09be0eddc81280c1da95ca68" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -3101,17 +3153,11 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-xid" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" - [[package]] name = "unsafe-libyaml" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e5fa573d8ac5f1a856f8d7be41d390ee973daf97c806b2c1a465e4e1406e68" +checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" [[package]] name = "unsigned-varint" @@ -3138,11 +3184,11 @@ dependencies = [ [[package]] name = "uuid" -version = "1.2.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -3187,9 +3233,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3197,24 +3243,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.16", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" dependencies = [ "cfg-if", "js-sys", @@ -3224,9 +3270,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3234,28 +3280,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" dependencies = [ "js-sys", "wasm-bindgen", @@ -3311,111 +3357,158 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", -] - [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.3.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf09497b8f8b5ac5d3bb4d05c0a99be20f26fd3d5f2db7b0716e946d5103658" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" dependencies = [ "memchr", ] @@ -3446,23 +3539,22 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.16", ] [[package]] @@ -3473,3 +3565,14 @@ checksum = "70b40401a28d86ce16a330b863b86fd7dbee4d7c940587ab09ab8c019f9e3fdf" dependencies = [ "num-traits", ] + +[[package]] +name = "zstd-sys" +version = "2.0.8+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +dependencies = [ + "cc", + "libc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml index 7c7df7f..5dfb38a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,10 +82,11 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } # Used for ruma wrapper serde_html_form = "0.2.0" +rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } + thread_local = "1.1.3" # used for TURN server authentication hmac = "0.12.1" diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 34d91d2..3e64e8b 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -54,7 +54,7 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O impl KeyValueDatabaseEngine for Arc { fn open(config: &Config) -> Result { let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; - let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); + let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes); let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache); @@ -161,7 +161,7 @@ impl KvTree for RocksDbEngineTree<'_> { self.db .rocks .iterator_cf(&self.cf(), rocksdb::IteratorMode::Start) - //.map(|r| r.unwrap()) + .map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -185,7 +185,7 @@ impl KvTree for RocksDbEngineTree<'_> { }, ), ) - //.map(|r| r.unwrap()) + .map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -226,7 +226,7 @@ impl KvTree for RocksDbEngineTree<'_> { &self.cf(), rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), ) - //.map(|r| r.unwrap()) + .map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))) .take_while(move |(k, _)| k.starts_with(&prefix)), ) From d62cd2ae51449d018c4d0d223b0b5d0d53260af2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 21 May 2023 13:42:59 +0200 Subject: [PATCH 1377/1727] chore: bump dependencies --- Cargo.toml | 54 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5dfb38a..c925bf2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,9 +20,9 @@ rust-version = "1.64.0" [dependencies] # Web framework axum = { version = "0.5.17", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } -axum-server = { version = "0.4.0", features = ["tls-rustls"] } -tower = { version = "0.4.8", features = ["util"] } -tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } +axum-server = { version = "0.4.7", features = ["tls-rustls"] } +tower = { version = "0.4.13", features = ["util"] } +tower-http = { version = "0.3.5", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -31,46 +31,46 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "8eea3e05490fa9a318f9ed66c3 #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Async runtime and utilities -tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } +tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently #sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } -persy = { version = "1.0.0", optional = true, features = ["background_ops"] } +persy = { version = "1.4.4", optional = true, features = ["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest -bytes = "1.1.0" -http = "0.2.4" +bytes = "1.4.0" +http = "0.2.9" # Used to find data directory for default db path -directories = "4.0.0" +directories = "4.0.1" # Used for ruma wrapper -serde_json = { version = "1.0.68", features = ["raw_value"] } +serde_json = { version = "1.0.96", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.9.13" +serde_yaml = "0.9.21" # Used for pdu definition -serde = { version = "1.0.130", features = ["rc"] } +serde = { version = "1.0.163", features = ["rc"] } # Used for secure identifiers -rand = "0.8.4" +rand = "0.8.5" # Used to hash passwords rust-argon2 = "1.0.0" # Used to send requests reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type -thiserror = "1.0.29" +thiserror = "1.0.40" # Used to generate thumbnails for images -image = { version = "0.24.4", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key -base64 = "0.13.0" +base64 = "0.13.1" # Used when hashing the state ring = "0.16.20" # Used when querying the SRV record of other servers trust-dns-resolver = "0.22.0" # Used to find matching events for appservices -regex = "1.5.4" +regex = "1.8.1" # jwt jsonwebtokens -jsonwebtoken = "8.1.1" +jsonwebtoken = "8.3.0" # Performance measurements -tracing = { version = "0.1.27", features = [] } -tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } +tracing = { version = "0.1.37", features = [] } +tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-flame = "0.2.0" opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } @@ -78,8 +78,8 @@ tracing-opentelemetry = "0.18.0" lru-cache = "0.1.2" rusqlite = { version = "0.28.0", optional = true, features = ["bundled"] } parking_lot = { version = "0.12.1", optional = true } -crossbeam = { version = "0.8.1", optional = true } -num_cpus = "1.13.0" +crossbeam = { version = "0.8.2", optional = true } +num_cpus = "1.15.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } # Used for ruma wrapper @@ -87,20 +87,20 @@ serde_html_form = "0.2.0" rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } -thread_local = "1.1.3" +thread_local = "1.1.7" # used for TURN server authentication hmac = "0.12.1" -sha-1 = "0.10.0" +sha-1 = "0.10.1" # used for conduit's CLI and admin room command parsing -clap = { version = "4.0.11", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] } -futures-util = { version = "0.3.17", default-features = false } +clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] } +futures-util = { version = "0.3.28", default-features = false } # Used for reading the configuration from conduit.toml & environment variables -figment = { version = "0.10.6", features = ["env", "toml"] } +figment = { version = "0.10.8", features = ["env", "toml"] } tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true } tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } lazy_static = "1.4.0" -async-trait = "0.1.57" +async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } From 4e2bbf9d6aec7773d66cfba56c49e0f87003d47a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 21 May 2023 15:16:23 +0200 Subject: [PATCH 1378/1727] Minor DEPLOY.md changes --- DEPLOY.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index c8f4a25..75db366 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -7,7 +7,7 @@ ## Installing Conduit -Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore +Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore only offer Linux binaries. You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: @@ -90,7 +90,7 @@ Conduit uses the ports 443 and 8448 both of which need to be open in the firewal If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. -## Delegation of federation traffic +## Optional: Avoid port 8448 If Conduit runs behind Cloudflare reverse proxy, which doesn't support port 8448 on free plans, [delegation](https://matrix-org.github.io/synapse/latest/delegate.html) can be set up to have federation traffic routed to port 443: ```apache @@ -174,6 +174,7 @@ allow_registration = true allow_federation = true +# Server to get public keys from. You probably shouldn't change this trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time From a4261aac767758c756512f6cf5c4173b8af80354 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 21 May 2023 20:41:08 +0000 Subject: [PATCH 1379/1727] * Fix Debian builds by actually including the whole `debian` directory into deb creation * Fix CI by explicitly setting hostname of docker in docker service * Fix Docker build by bumping the Rust version to 1.69 * Fix cargo check in CI by bumping the Rust version to 1.69 --- .gitlab-ci.yml | 5 +++-- Dockerfile | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d05bb89..24db9f4 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,7 +28,8 @@ variables: # Famedly runners use BTRFS, overlayfs and overlay2 often break jobs DOCKER_DRIVER: btrfs services: - - docker:dind + - name: docker:dind + alias: docker script: - apk add openssh-client - eval $(ssh-agent -s) @@ -109,7 +110,7 @@ docker:tags: cargo check: stage: test - image: docker.io/rust:1.64.0-bullseye + image: docker.io/rust:1.69.0-bullseye needs: [] interruptible: true before_script: diff --git a/Dockerfile b/Dockerfile index 2763b12..32ba81f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.64-bullseye AS builder +FROM docker.io/rust:1.69-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies @@ -37,7 +37,7 @@ COPY --from=builder /usr/src/conduit/target/release/conduit /conduit # --------------------------------------------------------------------------------------------------------------- # Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems: # --------------------------------------------------------------------------------------------------------------- -FROM docker.io/rust:1.64-bullseye AS build-cargo-deb +FROM docker.io/rust:1.69-bullseye AS build-cargo-deb RUN apt-get update && \ apt-get install -y --no-install-recommends \ @@ -57,7 +57,7 @@ WORKDIR /usr/src/conduit COPY ./LICENSE ./LICENSE COPY ./README.md ./README.md -COPY debian/README.Debian ./debian/ +COPY debian ./debian COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb # --no-build makes cargo-deb reuse already compiled project From be9196430d422c37c25a7f32a09a9c1fa9c52c43 Mon Sep 17 00:00:00 2001 From: Jade Date: Thu, 25 May 2023 18:21:01 +0000 Subject: [PATCH 1380/1727] fix nix readme to work with ipv6 --- nix/README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nix/README.md b/nix/README.md index 77bad0f..f8537d5 100644 --- a/nix/README.md +++ b/nix/README.md @@ -118,10 +118,19 @@ in ssl = true; } { + addr = "[::]"; + port = 443; + ssl = true; + } { addr = "0.0.0.0"; port = 8448; ssl = true; } + { + addr = "[::]"; + port = 8448; + ssl = true; + } ]; locations."/_matrix/" = { From 664d6baace232e929dd248636625f9ae835938c5 Mon Sep 17 00:00:00 2001 From: digital Date: Fri, 26 May 2023 13:06:28 +0200 Subject: [PATCH 1381/1727] fix: make requested changes --- src/api/client_server/session.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index f8fc7f1..b3328e4 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -105,12 +105,10 @@ pub async fn login_route(body: Ruma) -> Result { - info!("hi"); if !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, - // TODO: is this the correct response - "Wrong username or password.", + "Forbidden login type." )); }; let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier { From 1ea27c4f97bfc717f9753d8bbd91eafa395cdd2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 8 Jun 2023 20:48:35 +0200 Subject: [PATCH 1382/1727] fix: restricted room error is now FORBIDDEN --- src/api/client_server/membership.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index cd26372..11e37e6 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -106,6 +106,7 @@ pub async fn join_room_by_id_or_alias_route( ); servers.push(room_id.server_name().to_owned()); + (servers, room_id) } Err(room_alias) => { @@ -598,7 +599,7 @@ async fn join_room_by_id_helper( info!("send_join finished"); if let Some(signed_raw) = &send_join_response.room_state.event { - info!("There is a signed event. This room is probably using restricted joins"); + info!("There is a signed event. This room is probably using restricted joins. Adding signature to our event"); let (signed_event_id, signed_value) = match gen_event_id_canonical_json(signed_raw, &room_version_id) { Ok(t) => t, @@ -901,7 +902,13 @@ async fn join_room_by_id_helper( Err(e) => e, }; - if !restriction_rooms.is_empty() { + if !restriction_rooms.is_empty() + && servers + .iter() + .filter(|s| *s != services().globals.server_name()) + .count() + > 0 + { info!( "We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements" ); From faa9208a3e484756f7e535ca0d51488fbf1e4dbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 8 Jun 2023 20:51:34 +0200 Subject: [PATCH 1383/1727] cargo fmt --- src/api/client_server/session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index b3328e4..8908fef 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -108,7 +108,7 @@ pub async fn login_route(body: Ruma) -> Result Date: Sat, 10 Jun 2023 15:35:22 +0000 Subject: [PATCH 1384/1727] chore(ci): Adjust to rust version bumps --- .gitlab-ci.yml | 8 ++------ Dockerfile | 6 ++++-- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 24db9f4..b7df56f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -110,7 +110,7 @@ docker:tags: cargo check: stage: test - image: docker.io/rust:1.69.0-bullseye + image: docker.io/rust:1.70.0-bullseye needs: [] interruptible: true before_script: @@ -135,11 +135,7 @@ test:cargo: - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb script: - rustc --version && cargo --version # Print version info for debugging - - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - artifacts: - when: always - reports: - junit: report.xml + - "cargo test --color always --workspace --verbose --locked --no-fail-fast" test:clippy: extends: .test-shared-settings diff --git a/Dockerfile b/Dockerfile index 32ba81f..943f686 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,7 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.69-bullseye AS builder +FROM docker.io/rust:1.70-bullseye AS base + +FROM base AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies @@ -37,7 +39,7 @@ COPY --from=builder /usr/src/conduit/target/release/conduit /conduit # --------------------------------------------------------------------------------------------------------------- # Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems: # --------------------------------------------------------------------------------------------------------------- -FROM docker.io/rust:1.69-bullseye AS build-cargo-deb +FROM base AS build-cargo-deb RUN apt-get update && \ apt-get install -y --no-install-recommends \ From 15e60818c908e58d53580e3fd049e452b2133610 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 17 Jun 2023 17:02:10 -0700 Subject: [PATCH 1385/1727] pin nixos-unstable, update flake.lock `nixos-unstable` is the rolling release channel of NixOS. The default is the master branch, which doesn't always have a populated binary cache and so may result in compiling a bunch of stuff unnecessarily. --- flake.lock | 55 ++++++++++++++++++++++++++++++++++++------------------ flake.nix | 2 +- 2 files changed, 38 insertions(+), 19 deletions(-) diff --git a/flake.lock b/flake.lock index d76ff59..b69bb8c 100644 --- a/flake.lock +++ b/flake.lock @@ -12,11 +12,11 @@ "rust-overlay": "rust-overlay" }, "locked": { - "lastModified": 1680584903, - "narHash": "sha256-uraq+D3jcLzw/UVk0xMHcnfILfIMa0DLrtAEq2nNlxU=", + "lastModified": 1686621798, + "narHash": "sha256-FUwWszmSiDzUdTk8f69xwMoYlhdPaLvDaIYOE/y6VXc=", "owner": "ipetkov", "repo": "crane", - "rev": "65d3f6a3970cd46bef5eedfd458300f72c56b3c5", + "rev": "75f7d715f8088f741be9981405f6444e2d49efdd", "type": "github" }, "original": { @@ -33,11 +33,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1680607374, - "narHash": "sha256-U5iiPqbAanr+sQCCZ7zxYhwCXdcDpish8Uy4ELZeXM0=", + "lastModified": 1687004852, + "narHash": "sha256-wRSUs+v8xtIJaFlWO5NLFQjkq5+eYhxHHXnZKsZ9DpQ=", "owner": "nix-community", "repo": "fenix", - "rev": "e70d498e97017daa59363eafa054619d4fa160c3", + "rev": "df0a6e4ec44b4a276acfa5a96d2a83cb2dfdc791", "type": "github" }, "original": { @@ -63,12 +63,15 @@ } }, "flake-utils": { + "inputs": { + "systems": "systems" + }, "locked": { - "lastModified": 1678901627, - "narHash": "sha256-U02riOqrKKzwjsxc/400XnElV+UtPUQWpANPlyazjH0=", + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", "owner": "numtide", "repo": "flake-utils", - "rev": "93a2b84fc4b70d9e089d029deacc3583435c2ed6", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", "type": "github" }, "original": { @@ -79,15 +82,16 @@ }, "nixpkgs": { "locked": { - "lastModified": 1680652733, - "narHash": "sha256-FFG6Nai9M71C0Uc+D8TxyHoAjTplM0/9uWKsl7ALfUs=", + "lastModified": 1686960236, + "narHash": "sha256-AYCC9rXNLpUWzD9hm+askOfpliLEC9kwAo7ITJc4HIw=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "cc5bde408572508efd1273852862d418bb313443", + "rev": "04af42f3b31dba0ef742d254456dc4c14eedac86", "type": "github" }, "original": { "owner": "NixOS", + "ref": "nixos-unstable", "repo": "nixpkgs", "type": "github" } @@ -103,11 +107,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1680435407, - "narHash": "sha256-IPBtZCOh3BdrR+V77cL7r6WQnclWcZ/85BDYnmq/GnQ=", + "lastModified": 1686936697, + "narHash": "sha256-mCoPr1nNWKpsoGMBFaK/sswkLloRCZuoWi2a+OKs3vk=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "236576227a299fd19ba836b1834ab50c948af994", + "rev": "a5a71c75e62a0eaa1b42a376f7cf3d348cb5dec6", "type": "github" }, "original": { @@ -129,11 +133,11 @@ ] }, "locked": { - "lastModified": 1680488274, - "narHash": "sha256-0vYMrZDdokVmPQQXtFpnqA2wEgCCUXf5a3dDuDVshn0=", + "lastModified": 1685759304, + "narHash": "sha256-I3YBH6MS3G5kGzNuc1G0f9uYfTcNY9NYoRc3QsykLk4=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "7ec2ff598a172c6e8584457167575b3a1a5d80d8", + "rev": "c535b4f3327910c96dcf21851bbdd074d0760290", "type": "github" }, "original": { @@ -141,6 +145,21 @@ "repo": "rust-overlay", "type": "github" } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 970e847..669d90a 100644 --- a/flake.nix +++ b/flake.nix @@ -1,6 +1,6 @@ { inputs = { - nixpkgs.url = "github:NixOS/nixpkgs"; + nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; flake-utils.url = "github:numtide/flake-utils"; fenix = { From 4a7d3c7301bc86655299345f264cf151c17faceb Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 17 Jun 2023 17:04:11 -0700 Subject: [PATCH 1386/1727] upgrade rust in Cargo.toml/flake.nix Looks like this should've happened as part of !479. --- Cargo.toml | 2 +- flake.nix | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c925bf2..c50f3d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" # `nix flake update`. If you don't have Nix installed or otherwise don't know # how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in # the matrix room. -rust-version = "1.64.0" +rust-version = "1.70.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/flake.nix b/flake.nix index 669d90a..ba8e829 100644 --- a/flake.nix +++ b/flake.nix @@ -40,7 +40,7 @@ channel = cargoToml.package.rust-version; # THE rust-version HASH - sha256 = "sha256-8len3i8oTwJSOJZMosGGXHBL5BVuGQnWOT2St5YAUFU="; + sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc="; }; # The system's RocksDB From abd0a014e852d41d25320f6ccd19ac1de4156f96 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 17 Jun 2023 17:04:57 -0700 Subject: [PATCH 1387/1727] nixpkgs' rocksdb is too old :( --- flake.nix | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/flake.nix b/flake.nix index ba8e829..5de5621 100644 --- a/flake.nix +++ b/flake.nix @@ -43,10 +43,6 @@ sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc="; }; - # The system's RocksDB - ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb_6_23}/include"; - ROCKSDB_LIB_DIR = "${pkgs.rocksdb_6_23}/lib"; - # Shared between the package and the devShell nativeBuildInputs = (with pkgs.rustPlatform; [ bindgenHook @@ -61,9 +57,7 @@ inherit stdenv - nativeBuildInputs - ROCKSDB_INCLUDE_DIR - ROCKSDB_LIB_DIR; + nativeBuildInputs; }; devShells.default = (pkgs.mkShell.override { inherit stdenv; }) { @@ -71,10 +65,6 @@ # sources, and it can read this environment variable to do so RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; - inherit - ROCKSDB_INCLUDE_DIR - ROCKSDB_LIB_DIR; - # Development tools nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [ cargo From c7e0ea525a3c6f66072c3518bb8c533c87f1e3db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 25 Jun 2023 19:31:40 +0200 Subject: [PATCH 1388/1727] feat: WIP relationships and threads --- Cargo.lock | 597 ++++++++++--------- Cargo.toml | 8 +- Cross.toml | 23 - src/api/client_server/context.rs | 19 +- src/api/client_server/media.rs | 6 + src/api/client_server/message.rs | 8 +- src/api/client_server/mod.rs | 2 + src/api/client_server/relations.rs | 10 + src/api/client_server/threads.rs | 49 ++ src/api/client_server/unversioned.rs | 2 + src/api/server_server.rs | 4 + src/database/key_value/rooms/mod.rs | 1 + src/database/key_value/rooms/pdu_metadata.rs | 7 + src/database/key_value/rooms/threads.rs | 78 +++ src/database/key_value/rooms/timeline.rs | 21 +- src/database/mod.rs | 7 + src/main.rs | 1 + src/service/media/mod.rs | 6 +- src/service/mod.rs | 1 + src/service/pdu.rs | 30 +- src/service/pusher/mod.rs | 4 +- src/service/rooms/mod.rs | 3 + src/service/rooms/pdu_metadata/data.rs | 1 + src/service/rooms/pdu_metadata/mod.rs | 9 +- src/service/rooms/threads/data.rs | 15 + src/service/rooms/threads/mod.rs | 119 ++++ src/service/rooms/timeline/data.rs | 7 +- src/service/rooms/timeline/mod.rs | 68 ++- 28 files changed, 766 insertions(+), 340 deletions(-) delete mode 100644 Cross.toml create mode 100644 src/api/client_server/relations.rs create mode 100644 src/api/client_server/threads.rs create mode 100644 src/database/key_value/rooms/threads.rs create mode 100644 src/service/rooms/threads/data.rs create mode 100644 src/service/rooms/threads/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 9c86f7d..ebb47d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,33 +21,24 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] [[package]] -name = "alloc-no-stdlib" -version = "2.0.4" +name = "allocator-api2" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" -dependencies = [ - "alloc-no-stdlib", -] +checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9" [[package]] name = "anstyle" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "arc-swap" @@ -63,9 +54,9 @@ checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "assign" @@ -79,12 +70,12 @@ version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" dependencies = [ - "brotli", - "flate2", "futures-core", "memchr", "pin-project-lite", "tokio", + "zstd", + "zstd-safe", ] [[package]] @@ -95,7 +86,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -118,7 +109,7 @@ checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" dependencies = [ "async-trait", "axum-core", - "bitflags", + "bitflags 1.3.2", "bytes", "futures-util", "headers", @@ -137,7 +128,7 @@ dependencies = [ "sync_wrapper", "tokio", "tower", - "tower-http", + "tower-http 0.3.5", "tower-layer", "tower-service", ] @@ -160,9 +151,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.4.7" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bace45b270e36e3c27a190c65883de6dfc9f1d18c829907c127464815dc67b24" +checksum = "447f28c85900215cc1bea282f32d4a2f22d55c5a300afdfbc661c8d6a632e063" dependencies = [ "arc-swap", "bytes", @@ -171,10 +162,10 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "rustls", + "rustls 0.21.2", "rustls-pemfile 1.0.2", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tower-service", ] @@ -186,9 +177,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -211,7 +202,7 @@ version = "0.65.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -223,7 +214,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -232,6 +223,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" + [[package]] name = "blake2b_simd" version = "1.0.1" @@ -240,7 +237,7 @@ checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" dependencies = [ "arrayref", "arrayvec", - "constant_time_eq 0.2.5", + "constant_time_eq 0.2.6", ] [[package]] @@ -261,32 +258,11 @@ dependencies = [ "generic-array", ] -[[package]] -name = "brotli" -version = "3.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - [[package]] name = "bumpalo" -version = "3.12.2" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bytemuck" @@ -354,9 +330,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.0" +version = "4.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" +checksum = "d9394150f5b4273a1763355bd1c2ec54cc5a2593f790587bcd6b2c947cfa9211" dependencies = [ "clap_builder", "clap_derive", @@ -365,25 +341,25 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.0" +version = "4.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" +checksum = "9a78fbdd3cc2914ddf37ba444114bc7765bbdcb55ec9cbe6fa054f0137400717" dependencies = [ "anstyle", - "bitflags", + "bitflags 1.3.2", "clap_lex", ] [[package]] name = "clap_derive" -version = "4.3.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -445,7 +421,7 @@ dependencies = [ "tikv-jemallocator", "tokio", "tower", - "tower-http", + "tower-http 0.4.1", "tracing", "tracing-flame", "tracing-opentelemetry", @@ -467,9 +443,9 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "constant_time_eq" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" +checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" [[package]] name = "core-foundation" @@ -489,9 +465,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" +checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" dependencies = [ "libc", ] @@ -557,9 +533,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", @@ -580,9 +556,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -729,6 +705,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "equivalent" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -752,14 +734,14 @@ dependencies = [ [[package]] name = "figment" -version = "0.10.8" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e56602b469b2201400dec66a66aec5a9b8761ee97cd1b8c96ab2483fcc16cc9" +checksum = "4547e226f4c9ab860571e070a9034192b3175580ecea38da34fcdb53a018c9a5" dependencies = [ "atomic", "pear", "serde", - "toml 0.5.11", + "toml", "uncased", "version_check", ] @@ -782,9 +764,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -855,7 +837,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -911,9 +893,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -948,7 +930,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util", @@ -963,20 +945,21 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ "ahash", + "allocator-api2", ] [[package]] name = "hashlink" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" +checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.0", ] [[package]] @@ -986,7 +969,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "bytes", "headers-core", "http", @@ -1132,7 +1115,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -1147,9 +1130,9 @@ checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", - "rustls", + "rustls 0.20.8", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", ] [[package]] @@ -1165,9 +1148,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1200,6 +1183,16 @@ dependencies = [ "serde", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + [[package]] name = "inlinable_string" version = "0.1.15" @@ -1214,14 +1207,14 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "ipconfig" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", + "socket2 0.5.3", "widestring", - "winapi", - "winreg 0.10.1", + "windows-sys 0.48.0", + "winreg 0.50.0", ] [[package]] @@ -1262,9 +1255,9 @@ checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e" [[package]] name = "js-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -1293,7 +1286,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "pem", "ring", "serde", @@ -1337,9 +1330,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.144" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "libloading" @@ -1408,9 +1401,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -1418,12 +1411,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "lru-cache" @@ -1485,9 +1475,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -1516,14 +1506,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -1599,9 +1588,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -1660,7 +1649,7 @@ dependencies = [ "fnv", "futures-channel", "futures-util", - "indexmap", + "indexmap 1.9.3", "js-sys", "once_cell", "pin-project-lite", @@ -1726,15 +1715,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets", ] [[package]] @@ -1763,7 +1752,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -1783,9 +1772,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "persy" @@ -1820,7 +1809,7 @@ checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -1853,11 +1842,11 @@ checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "png" -version = "0.17.8" +version = "0.17.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaeebc51f9e7d2c150d3f3bfeb667f2aa985db5ef1e3d212847bdedb488beeaa" +checksum = "59871cc5b6cce7eaccca5a802b4173377a1c2ba90654246789a8fa2334426d11" dependencies = [ - "bitflags", + "bitflags 1.3.2", "crc32fast", "fdeflate", "flate2", @@ -1872,12 +1861,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.5" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "617feabb81566b593beb4886fb8c1f38064169dae4dccad0e3220160c3b37203" +checksum = "9825a04601d60621feed79c4e6b56d65db77cdca55cef43b46b0de1096d1c282" dependencies = [ "proc-macro2", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -1892,9 +1881,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.58" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" +checksum = "363a6f739a0c0addeaf6ed75150b95743aa18643a3c6f40409ed7b6db3a6911f" dependencies = [ "unicode-ident", ] @@ -1907,7 +1896,7 @@ checksum = "606c4ba35817e2922a308af55ad51bab3645b59eae5c570d4a6cf07e36bd493b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", "version_check", "yansi", ] @@ -1920,9 +1909,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2", ] @@ -1986,7 +1975,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -2004,7 +1993,16 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", ] [[package]] @@ -2013,20 +2011,20 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.9", - "redox_syscall", + "getrandom 0.2.10", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.8.1" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.1", + "regex-syntax 0.7.2", ] [[package]] @@ -2046,9 +2044,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "reqwest" @@ -2072,14 +2070,14 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls", + "rustls 0.20.8", "rustls-native-certs", "rustls-pemfile 0.2.1", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tokio-socks", "url", "wasm-bindgen", @@ -2126,7 +2124,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.8.2" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ "assign", "js_int", @@ -2144,7 +2142,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.8.1" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ "js_int", "ruma-common", @@ -2155,7 +2153,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ "assign", "bytes", @@ -2172,13 +2170,13 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes", "form_urlencoded", "http", - "indexmap", + "indexmap 1.9.3", "js_int", "js_option", "konst", @@ -2200,7 +2198,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ "js_int", "ruma-common", @@ -2211,7 +2209,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ "js_int", "thiserror", @@ -2220,7 +2218,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ "js_int", "ruma-common", @@ -2230,7 +2228,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ "once_cell", "proc-macro-crate", @@ -2238,14 +2236,14 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 1.0.109", - "toml 0.7.4", + "syn 2.0.21", + "toml", ] [[package]] name = "ruma-push-gateway-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ "js_int", "ruma-common", @@ -2256,15 +2254,15 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.13.1" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "ed25519-dalek", "pkcs8", "rand 0.7.3", "ruma-common", "serde_json", - "sha2 0.10.6", + "sha2 0.10.7", "subslice", "thiserror", ] @@ -2272,7 +2270,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5#8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5" +source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" dependencies = [ "itertools", "js_int", @@ -2289,7 +2287,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2328,10 +2326,22 @@ dependencies = [ ] [[package]] -name = "rustls-native-certs" -version = "0.6.2" +name = "rustls" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", "rustls-pemfile 1.0.2", @@ -2354,7 +2364,17 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", ] [[package]] @@ -2400,7 +2420,7 @@ version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -2419,22 +2439,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.163" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.163" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -2444,7 +2464,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53192e38d5c88564b924dbe9b60865ecbb71b81d38c4e61c817cffd3e36ef696" dependencies = [ "form_urlencoded", - "indexmap", + "indexmap 1.9.3", "itoa", "ryu", "serde", @@ -2452,9 +2472,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" dependencies = [ "itoa", "ryu", @@ -2463,9 +2483,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" dependencies = [ "serde", ] @@ -2484,11 +2504,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.21" +version = "0.9.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" +checksum = "452e67b9c20c37fa79df53201dc03839651086ed9bbe92b3ca585ca9fdaa7d85" dependencies = [ - "indexmap", + "indexmap 2.0.0", "itoa", "ryu", "serde", @@ -2532,9 +2552,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", @@ -2614,6 +2634,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "spin" version = "0.5.2" @@ -2658,9 +2688,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.16" +version = "2.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" +checksum = "1182caafaab7018eaea9b404afa8184c0baf42a04d5e10ae4f4843c2029c8aab" dependencies = [ "proc-macro2", "quote", @@ -2699,7 +2729,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -2767,9 +2797,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.21" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa", "serde", @@ -2809,9 +2839,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.1" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg", "bytes", @@ -2820,7 +2850,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", "windows-sys 0.48.0", ] @@ -2833,7 +2863,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -2842,11 +2872,21 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls", + "rustls 0.20.8", "tokio", "webpki", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.2", + "tokio", +] + [[package]] name = "tokio-socks" version = "0.5.1" @@ -2886,18 +2926,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.11" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - -[[package]] -name = "toml" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" +checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240" dependencies = [ "serde", "serde_spanned", @@ -2907,20 +2938,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.9" +version = "0.19.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" +checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7" dependencies = [ - "indexmap", + "indexmap 2.0.0", "serde", "serde_spanned", "toml_datetime", @@ -2948,9 +2979,28 @@ name = "tower-http" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +dependencies = [ + "bitflags 1.3.2", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8bd22a874a2d0b70452d5597b12c537331d49060824a95f49f108994f94aa4c" dependencies = [ "async-compression", - "bitflags", + "bitflags 2.3.2", "bytes", "futures-core", "futures-util", @@ -2993,13 +3043,13 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -3140,9 +3190,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -3173,22 +3223,22 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", ] [[package]] name = "uuid" -version = "1.3.3" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" +checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -3211,11 +3261,10 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -3233,9 +3282,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3243,24 +3292,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -3270,9 +3319,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3280,28 +3329,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -3325,9 +3374,9 @@ checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb" [[package]] name = "widestring" -version = "0.5.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "wildmatch" @@ -3372,37 +3421,13 @@ dependencies = [ "windows_x86_64_msvc 0.42.2", ] -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", -] - [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets", ] [[package]] @@ -3506,9 +3531,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" dependencies = [ "memchr", ] @@ -3524,11 +3549,12 @@ dependencies = [ [[package]] name = "winreg" -version = "0.10.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "winapi", + "cfg-if", + "windows-sys 0.48.0", ] [[package]] @@ -3554,7 +3580,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.21", ] [[package]] @@ -3566,6 +3592,25 @@ dependencies = [ "num-traits", ] +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.8+zstd.1.5.5" diff --git a/Cargo.toml b/Cargo.toml index c925bf2..12e9109 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,14 +19,14 @@ rust-version = "1.64.0" [dependencies] # Web framework -axum = { version = "0.5.17", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } -axum-server = { version = "0.4.7", features = ["tls-rustls"] } +axum = { version = "0.5.16", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } +axum-server = { version = "0.5.1", features = ["tls-rustls"] } tower = { version = "0.4.13", features = ["util"] } -tower-http = { version = "0.3.5", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } +tower-http = { version = "0.4.1", features = ["add-extension", "cors", "compression-zstd", "sensitive-headers", "trace", "util"] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "761771a317460f30590da170115d007892381e85", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/Cross.toml b/Cross.toml deleted file mode 100644 index 5d99a35..0000000 --- a/Cross.toml +++ /dev/null @@ -1,23 +0,0 @@ -[build.env] -# CI uses an S3 endpoint to store sccache artifacts, so their config needs to -# be available in the cross container as well -passthrough = [ - "RUSTC_WRAPPER", - "AWS_ACCESS_KEY_ID", - "AWS_SECRET_ACCESS_KEY", - "SCCACHE_BUCKET", - "SCCACHE_ENDPOINT", - "SCCACHE_S3_USE_SSL", -] - -[target.aarch64-unknown-linux-musl] -image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest" - -[target.arm-unknown-linux-musleabihf] -image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest" - -[target.armv7-unknown-linux-musleabihf] -image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest" - -[target.x86_64-unknown-linux-musl] -image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl@sha256:b6d689e42f0236c8a38b961bca2a12086018b85ed20e0826310421daf182e2bb" diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index 5a3013b..a824ea0 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -69,18 +69,18 @@ pub async fn get_context_route( lazy_loaded.insert(base_event.sender.as_str().to_owned()); } + // Use limit with maximum 100 + let limit = usize::try_from(body.limit) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid."))? + .min(100); + let base_event = base_event.to_room_event(); let events_before: Vec<_> = services() .rooms .timeline .pdus_until(sender_user, &room_id, base_token)? - .take( - u32::try_from(body.limit).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") - })? as usize - / 2, - ) + .take(limit / 2) .filter_map(|r| r.ok()) // Remove buggy events .filter(|(_, pdu)| { services() @@ -114,12 +114,7 @@ pub async fn get_context_route( .rooms .timeline .pdus_after(sender_user, &room_id, base_token)? - .take( - u32::try_from(body.limit).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") - })? as usize - / 2, - ) + .take(limit / 2) .filter_map(|r| r.ok()) // Remove buggy events .filter(|(_, pdu)| { services() diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 3410cc0..75f8e15 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, @@ -67,6 +69,8 @@ pub async fn get_remote_content( allow_remote: false, server_name: server_name.to_owned(), media_id, + timeout_ms: Duration::from_secs(20), + allow_redirect: false, }, ) .await?; @@ -194,6 +198,8 @@ pub async fn get_content_thumbnail_route( method: body.method.clone(), server_name: body.server_name.clone(), media_id: body.media_id.clone(), + timeout_ms: Duration::from_secs(20), + allow_redirect: false, }, ) .await?; diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index faf178d..dc2d994 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -133,8 +133,12 @@ pub async fn get_message_events_route( from, )?; - // Use limit or else 10 - let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); + // Use limit or else 10, with maximum 100 + let limit = body + .limit + .try_into() + .map_or(10_usize, |l: u32| l as usize) + .min(100); let next_token; diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs index 6ed17e7..4a77f23 100644 --- a/src/api/client_server/mod.rs +++ b/src/api/client_server/mod.rs @@ -24,6 +24,7 @@ mod state; mod sync; mod tag; mod thirdparty; +mod threads; mod to_device; mod typing; mod unversioned; @@ -56,6 +57,7 @@ pub use state::*; pub use sync::*; pub use tag::*; pub use thirdparty::*; +pub use threads::*; pub use to_device::*; pub use typing::*; pub use unversioned::*; diff --git a/src/api/client_server/relations.rs b/src/api/client_server/relations.rs new file mode 100644 index 0000000..4d2af47 --- /dev/null +++ b/src/api/client_server/relations.rs @@ -0,0 +1,10 @@ +use crate::{services, Result, Ruma}; +use std::time::{Duration, SystemTime}; + +/// # `GET /_matrix/client/r0/todo` +pub async fn get_relating_events_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + todo!(); +} diff --git a/src/api/client_server/threads.rs b/src/api/client_server/threads.rs new file mode 100644 index 0000000..a095b42 --- /dev/null +++ b/src/api/client_server/threads.rs @@ -0,0 +1,49 @@ +use ruma::api::client::{error::ErrorKind, threads::get_threads}; + +use crate::{services, Error, Result, Ruma}; + +/// # `GET /_matrix/client/r0/rooms/{roomId}/threads` +pub async fn get_threads_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + // Use limit or else 10, with maximum 100 + let limit = body + .limit + .and_then(|l| l.try_into().ok()) + .unwrap_or(10) + .min(100); + + let from = if let Some(from) = &body.from { + from.parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, ""))? + } else { + u64::MAX + }; + + let threads = services() + .rooms + .threads + .threads_until(sender_user, &body.room_id, from, &body.include)? + .take(limit) + .filter_map(|r| r.ok()) + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) + .unwrap_or(false) + }) + .collect::>(); + + let next_batch = threads.last().map(|(count, _)| count.to_string()); + + Ok(get_threads::v1::Response { + chunk: threads + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(), + next_batch, + }) +} diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 526598b..b4f03f4 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -23,6 +23,8 @@ pub async fn get_supported_versions_route( "r0.6.0".to_owned(), "v1.1".to_owned(), "v1.2".to_owned(), + "v1.3".to_owned(), + "v1.4".to_owned(), ], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 961b658..c1c23a5 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1,3 +1,4 @@ +#[allow(deprecated)] use crate::{ api::client_server::{self, claim_keys_helper, get_keys_helper}, service::pdu::{gen_event_id_canonical_json, PduBuilder}, @@ -497,6 +498,9 @@ async fn request_well_known(destination: &str) -> Option { .send() .await; debug!("Got well known response"); + if let Err(e) = &response { + error!("Well known error: {e:?}"); + } let text = response.ok()?.text().await; debug!("Got well known response text"); let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?; diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index 406943e..e7b53d3 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -12,6 +12,7 @@ mod state; mod state_accessor; mod state_cache; mod state_compressor; +mod threads; mod timeline; mod user; diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index 76ec734..4b3f810 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -5,6 +5,13 @@ use ruma::{EventId, RoomId}; use crate::{database::KeyValueDatabase, service, Result}; impl service::rooms::pdu_metadata::Data for KeyValueDatabase { + fn add_relation(&self, from: u64, to: u64) -> Result<()> { + let mut key = from.to_be_bytes().to_vec(); + key.extend_from_slice(&to.to_be_bytes()); + self.fromto_relation.insert(&key, &[])?; + Ok(()) + } + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/threads.rs b/src/database/key_value/rooms/threads.rs new file mode 100644 index 0000000..4be289b --- /dev/null +++ b/src/database/key_value/rooms/threads.rs @@ -0,0 +1,78 @@ +use std::mem; + +use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; + +impl service::rooms::threads::Data for KeyValueDatabase { + fn threads_until<'a>( + &'a self, + user_id: &'a UserId, + room_id: &'a RoomId, + until: u64, + include: &'a IncludeThreads, + ) -> Result> + 'a>> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let mut current = prefix.clone(); + current.extend_from_slice(&(until - 1).to_be_bytes()); + + Ok(Box::new( + self.threadid_userids + .iter_from(¤t, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pduid, users)| { + let count = utils::u64_from_bytes(&pduid[(mem::size_of::())..]) + .map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?; + let mut pdu = services() + .rooms + .timeline + .get_pdu_from_id(&pduid)? + .ok_or_else(|| { + Error::bad_database("Invalid pduid reference in threadid_userids") + })?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((count, pdu)) + }), + )) + } + + fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()> { + let users = participants + .iter() + .map(|user| user.as_bytes()) + .collect::>() + .join(&[0xff][..]); + + self.threadid_userids.insert(&root_id, &users)?; + + Ok(()) + } + + fn get_participants(&self, root_id: &[u8]) -> Result>> { + if let Some(users) = self.threadid_userids.get(&root_id)? { + Ok(Some( + users + .split(|b| *b == 0xff) + .map(|bytes| { + UserId::parse(utils::string_from_bytes(bytes).map_err(|_| { + Error::bad_database("Invalid UserId bytes in threadid_userids.") + })?) + .map_err(|_| Error::bad_database("Invalid UserId in threadid_userids.")) + }) + .filter_map(|r| r.ok()) + .collect(), + )) + } else { + Ok(None) + } + } +} diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index d9c4423..74e3e5c 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -198,19 +198,30 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Removes a pdu and creates a new one with the same id. - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { + fn replace_pdu( + &self, + pdu_id: &[u8], + pdu_json: &CanonicalJsonObject, + pdu: &PduEvent, + ) -> Result<()> { if self.pduid_pdu.get(pdu_id)?.is_some() { self.pduid_pdu.insert( pdu_id, - &serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"), + &serde_json::to_vec(pdu_json).expect("CanonicalJsonObject is always a valid"), )?; - Ok(()) } else { - Err(Error::BadRequest( + return Err(Error::BadRequest( ErrorKind::NotFound, "PDU does not exist.", - )) + )); } + + self.pdu_cache + .lock() + .unwrap() + .remove(&(*pdu.event_id).to_owned()); + + Ok(()) } /// Returns an iterator over all events and their tokens in a room that happened before the diff --git a/src/database/mod.rs b/src/database/mod.rs index 1415f68..b864ceb 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -80,6 +80,8 @@ pub struct KeyValueDatabase { pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count pub(super) publicroomids: Arc, + pub(super) threadid_userids: Arc, // ThreadId = RoomId + Count + pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount /// Participating servers in a room. @@ -128,6 +130,8 @@ pub struct KeyValueDatabase { pub(super) eventid_outlierpdu: Arc, pub(super) softfailedeventids: Arc, + /// ShortEventId + ShortEventId -> (). + pub(super) fromto_relation: Arc, /// RoomId + EventId -> Parent PDU EventId. pub(super) referencedevents: Arc, @@ -302,6 +306,8 @@ impl KeyValueDatabase { aliasid_alias: builder.open_tree("aliasid_alias")?, publicroomids: builder.open_tree("publicroomids")?, + threadid_userids: builder.open_tree("threadid_userids")?, + tokenids: builder.open_tree("tokenids")?, roomserverids: builder.open_tree("roomserverids")?, @@ -342,6 +348,7 @@ impl KeyValueDatabase { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, softfailedeventids: builder.open_tree("softfailedeventids")?, + fromto_relation: builder.open_tree("fromto_relation")?, referencedevents: builder.open_tree("referencedevents")?, roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, diff --git a/src/main.rs b/src/main.rs index 59e82a7..edb7640 100644 --- a/src/main.rs +++ b/src/main.rs @@ -383,6 +383,7 @@ fn routes() -> Router { .ruma_route(client_server::set_pushers_route) // .ruma_route(client_server::third_party_route) .ruma_route(client_server::upgrade_room_route) + .ruma_route(client_server::get_threads_route) .ruma_route(server_server::get_server_version_route) .route( "/_matrix/key/v2/server", diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 9393753..fc8fa56 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -8,7 +8,7 @@ use image::imageops::FilterType; use tokio::{ fs::File, - io::{AsyncReadExt, AsyncWriteExt}, + io::{AsyncReadExt, AsyncWriteExt, BufReader}, }; pub struct FileMeta { @@ -70,7 +70,9 @@ impl Service { { let path = services().globals.get_media_file(&key); let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; + BufReader::new(File::open(path).await?) + .read_to_end(&mut file) + .await?; Ok(Some(FileMeta { content_disposition, diff --git a/src/service/mod.rs b/src/service/mod.rs index eea397f..3b48810 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -97,6 +97,7 @@ impl Services { db, lasttimelinecount_cache: Mutex::new(HashMap::new()), }, + threads: rooms::threads::Service { db }, user: rooms::user::Service { db }, }, transaction_ids: transaction_ids::Service { db }, diff --git a/src/service/pdu.rs b/src/service/pdu.rs index a497b11..9d284c0 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,9 +1,9 @@ use crate::Error; use ruma::{ events::{ - room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyStateEvent, - AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, - StateEvent, TimelineEventType, + room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyMessageLikeEvent, + AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, + AnyTimelineEvent, StateEvent, TimelineEventType, }, serde::Raw, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, @@ -175,6 +175,30 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] + pub fn to_message_like_event(&self) -> Raw { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "room_id": self.room_id, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &self.redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } + #[tracing::instrument(skip(self))] pub fn to_state_event(&self) -> Raw { let mut json = json!({ diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 5933c03..d4acaa5 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -162,9 +162,7 @@ impl Service { &pdu.room_id, )? { let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, + Action::Notify => true, Action::SetTweak(tweak) => { tweaks.push(tweak.clone()); continue; diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 8956e4d..61304d1 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -13,6 +13,7 @@ pub mod state; pub mod state_accessor; pub mod state_cache; pub mod state_compressor; +pub mod threads; pub mod timeline; pub mod user; @@ -32,6 +33,7 @@ pub trait Data: + state_cache::Data + state_compressor::Data + timeline::Data + + threads::Data + user::Data { } @@ -53,5 +55,6 @@ pub struct Service { pub state_cache: state_cache::Service, pub state_compressor: state_compressor::Service, pub timeline: timeline::Service, + pub threads: threads::Service, pub user: user::Service, } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index b157938..5577b3e 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -4,6 +4,7 @@ use crate::Result; use ruma::{EventId, RoomId}; pub trait Data: Send + Sync { + fn add_relation(&self, from: u64, to: u64) -> Result<()>; fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index b816678..a82b9a6 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -4,13 +4,20 @@ use std::sync::Arc; pub use data::Data; use ruma::{EventId, RoomId}; -use crate::Result; +use crate::{services, Result}; pub struct Service { pub db: &'static dyn Data, } impl Service { + #[tracing::instrument(skip(self, from, to))] + pub fn add_relation(&self, from: &EventId, to: &EventId) -> Result<()> { + let from = services().rooms.short.get_or_create_shorteventid(from)?; + let to = services().rooms.short.get_or_create_shorteventid(to)?; + self.db.add_relation(from, to) + } + #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { self.db.mark_as_referenced(room_id, event_ids) diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs new file mode 100644 index 0000000..9221e8e --- /dev/null +++ b/src/service/rooms/threads/data.rs @@ -0,0 +1,15 @@ +use crate::{PduEvent, Result}; +use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; + +pub trait Data: Send + Sync { + fn threads_until<'a>( + &'a self, + user_id: &'a UserId, + room_id: &'a RoomId, + until: u64, + include: &'a IncludeThreads, + ) -> Result> + 'a>>; + + fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()>; + fn get_participants(&self, root_id: &[u8]) -> Result>>; +} diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs new file mode 100644 index 0000000..241927a --- /dev/null +++ b/src/service/rooms/threads/mod.rs @@ -0,0 +1,119 @@ +mod data; +use std::sync::Arc; + +pub use data::Data; +use ruma::{ + api::client::{error::ErrorKind, threads::get_threads::v1::IncludeThreads}, + events::{relation::BundledThread, StateEventType}, + uint, CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, +}; +use serde::Deserialize; +use serde_json::json; + +use crate::{services, utils, Error, PduEvent, Result}; + +use super::timeline::PduCount; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + pub fn threads_until<'a>( + &'a self, + user_id: &'a UserId, + room_id: &'a RoomId, + until: u64, + include: &'a IncludeThreads, + ) -> Result> + 'a> { + self.db.threads_until(user_id, room_id, until, include) + } + + pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { + let root_id = &services() + .rooms + .timeline + .get_pdu_id(root_event_id)? + .ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid event id in thread message", + ) + })?; + + let root_pdu = services() + .rooms + .timeline + .get_pdu_from_id(root_id)? + .ok_or_else(|| { + Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found") + })?; + + let mut root_pdu_json = services() + .rooms + .timeline + .get_pdu_json_from_id(root_id)? + .ok_or_else(|| { + Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found") + })?; + + if let CanonicalJsonValue::Object(unsigned) = root_pdu_json + .entry("unsigned".to_owned()) + .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) + { + if let Some(mut relations) = unsigned + .get("m.relations") + .and_then(|r| r.as_object()) + .and_then(|r| r.get("m.thread")) + .and_then(|relations| { + serde_json::from_value::(relations.clone().into()).ok() + }) + { + // Thread already existed + relations.count += uint!(1); + relations.latest_event = pdu.to_message_like_event(); + + let content = serde_json::to_value(relations).expect("to_value always works"); + + unsigned.insert( + "m.relations".to_owned(), + json!({ "m.thread": content }) + .try_into() + .expect("thread is valid json"), + ); + } else { + // New thread + let relations = BundledThread { + latest_event: pdu.to_message_like_event(), + count: uint!(1), + current_user_participated: true, + }; + + let content = serde_json::to_value(relations).expect("to_value always works"); + + unsigned.insert( + "m.relations".to_owned(), + json!({ "m.thread": content }) + .try_into() + .expect("thread is valid json"), + ); + } + + services() + .rooms + .timeline + .replace_pdu(root_id, &root_pdu_json, &root_pdu)?; + } + + let mut users = Vec::new(); + if let Some(userids) = self.db.get_participants(&root_id)? { + users.extend_from_slice(&userids); + users.push(pdu.sender.clone()); + } else { + users.push(root_pdu.sender); + users.push(pdu.sender.clone()); + } + + self.db.update_participants(root_id, &users) + } +} diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 193f384..afa2cfb 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -57,7 +57,12 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Removes a pdu and creates a new one with the same id. - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; + fn replace_pdu( + &self, + pdu_id: &[u8], + pdu_json: &CanonicalJsonObject, + pdu: &PduEvent, + ) -> Result<()>; /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2ffd3a6..0547fcf 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -18,13 +18,13 @@ use ruma::{ events::{ push_rules::PushRulesEvent, room::{ - create::RoomCreateEventContent, member::MembershipState, + create::RoomCreateEventContent, encrypted::Relation, member::MembershipState, power_levels::RoomPowerLevelsEventContent, }, GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - serde::Base64, + serde::{Base64, JsonObject}, state_res, state_res::{Event, RoomVersion}, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, @@ -197,8 +197,13 @@ impl Service { /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - self.db.replace_pdu(pdu_id, pdu) + pub fn replace_pdu( + &self, + pdu_id: &[u8], + pdu_json: &CanonicalJsonObject, + pdu: &PduEvent, + ) -> Result<()> { + self.db.replace_pdu(pdu_id, pdu_json, pdu) } /// Creates a new persisted data unit and adds it to a room. @@ -352,9 +357,7 @@ impl Service { &pdu.room_id, )? { match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, + Action::Notify => notify = true, Action::SetTweak(Tweak::Highlight(true)) => { highlight = true; } @@ -457,6 +460,50 @@ impl Service { _ => {} } + // Update Relationships + #[derive(Deserialize)] + struct ExtractRelatesTo { + #[serde(rename = "m.relates_to")] + relates_to: Relation, + } + + #[derive(Clone, Debug, Deserialize)] + struct ExtractEventId { + event_id: OwnedEventId, + } + #[derive(Clone, Debug, Deserialize)] + struct ExtractRelatesToEventId { + #[serde(rename = "m.relates_to")] + relates_to: ExtractEventId, + } + + if let Ok(content) = serde_json::from_str::(pdu.content.get()) { + services() + .rooms + .pdu_metadata + .add_relation(&pdu.event_id, &content.relates_to.event_id)?; + } + + if let Ok(content) = serde_json::from_str::(pdu.content.get()) { + match content.relates_to { + Relation::Reply { in_reply_to } => { + // We need to do it again here, because replies don't have + // event_id as a top level field + services() + .rooms + .pdu_metadata + .add_relation(&pdu.event_id, &in_reply_to.event_id)?; + } + Relation::Thread(thread) => { + services() + .rooms + .threads + .add_to_thread(&thread.event_id, pdu)?; + } + _ => {} // TODO: Aggregate other types + } + } + for appservice in services().appservice.all()? { if services() .rooms @@ -957,12 +1004,17 @@ impl Service { /// Replace a PDU with the redacted form. #[tracing::instrument(skip(self, reason))] pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { + // TODO: Don't reserialize, keep original json if let Some(pdu_id) = self.get_pdu_id(event_id)? { let mut pdu = self .get_pdu_from_id(&pdu_id)? .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; + self.replace_pdu( + &pdu_id, + &utils::to_canonical_object(&pdu).expect("PDU is an object"), + &pdu, + )?; } // If event does not exist, just noop Ok(()) From 15cc801840465ec5bfd309deecd09b484dd27734 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 25 Jun 2023 23:43:19 +0200 Subject: [PATCH 1389/1727] Disable compression, see https://en.wikipedia.org/wiki/BREACH --- Cargo.lock | 36 ------------------------------------ Cargo.toml | 2 +- src/main.rs | 1 - 3 files changed, 1 insertion(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ebb47d9..b33a7d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -64,20 +64,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" -[[package]] -name = "async-compression" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" -dependencies = [ - "futures-core", - "memchr", - "pin-project-lite", - "tokio", - "zstd", - "zstd-safe", -] - [[package]] name = "async-trait" version = "0.1.68" @@ -2999,7 +2985,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8bd22a874a2d0b70452d5597b12c537331d49060824a95f49f108994f94aa4c" dependencies = [ - "async-compression", "bitflags 2.3.2", "bytes", "futures-core", @@ -3008,8 +2993,6 @@ dependencies = [ "http-body", "http-range-header", "pin-project-lite", - "tokio", - "tokio-util", "tower", "tower-layer", "tower-service", @@ -3592,25 +3575,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "zstd" -version = "0.11.2+zstd.1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "5.0.2+zstd.1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" -dependencies = [ - "libc", - "zstd-sys", -] - [[package]] name = "zstd-sys" version = "2.0.8+zstd.1.5.5" diff --git a/Cargo.toml b/Cargo.toml index 12e9109..6055563 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ rust-version = "1.64.0" axum = { version = "0.5.16", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } axum-server = { version = "0.5.1", features = ["tls-rustls"] } tower = { version = "0.4.13", features = ["util"] } -tower-http = { version = "0.4.1", features = ["add-extension", "cors", "compression-zstd", "sensitive-headers", "trace", "util"] } +tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/main.rs b/src/main.rs index edb7640..20fab91 100644 --- a/src/main.rs +++ b/src/main.rs @@ -159,7 +159,6 @@ async fn run_server() -> io::Result<()> { tracing::info_span!("http_request", %path) }), ) - .compression() .layer(axum::middleware::from_fn(unrecognized_method)) .layer( CorsLayer::new() From 49a0f3a60ddb1b707c97218e5ccc7d747b4f34a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 26 Jun 2023 08:33:31 +0200 Subject: [PATCH 1390/1727] fix: /context for element android. start and end must be set even with limit=0 --- src/api/client_server/context.rs | 14 ++++++++++---- src/api/server_server.rs | 3 ++- src/service/rooms/threads/mod.rs | 11 ++++------- src/service/rooms/timeline/mod.rs | 2 +- 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index a824ea0..e70f9f1 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -103,7 +103,10 @@ pub async fn get_context_route( } } - let start_token = events_before.last().map(|(count, _)| count.stringify()); + let start_token = events_before + .last() + .map(|(count, _)| count.stringify()) + .unwrap_or_else(|| base_token.stringify()); let events_before: Vec<_> = events_before .into_iter() @@ -156,7 +159,10 @@ pub async fn get_context_route( .state_full_ids(shortstatehash) .await?; - let end_token = events_after.last().map(|(count, _)| count.stringify()); + let end_token = events_after + .last() + .map(|(count, _)| count.stringify()) + .unwrap_or_else(|| base_token.stringify()); let events_after: Vec<_> = events_after .into_iter() @@ -193,8 +199,8 @@ pub async fn get_context_route( } let resp = get_context::v3::Response { - start: start_token, - end: end_token, + start: Some(start_token), + end: Some(end_token), events_before, event: Some(base_event), events_after, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index c1c23a5..5e218be 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1,4 +1,5 @@ -#[allow(deprecated)] +#![allow(deprecated)] + use crate::{ api::client_server::{self, claim_keys_helper, get_keys_helper}, service::pdu::{gen_event_id_canonical_json, PduBuilder}, diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index 241927a..fb70383 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,18 +1,15 @@ mod data; -use std::sync::Arc; pub use data::Data; use ruma::{ api::client::{error::ErrorKind, threads::get_threads::v1::IncludeThreads}, - events::{relation::BundledThread, StateEventType}, - uint, CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, + events::relation::BundledThread, + uint, CanonicalJsonValue, EventId, RoomId, UserId, }; -use serde::Deserialize; + use serde_json::json; -use crate::{services, utils, Error, PduEvent, Result}; - -use super::timeline::PduCount; +use crate::{services, Error, PduEvent, Result}; pub struct Service { pub db: &'static dyn Data, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 0547fcf..625d346 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -24,7 +24,7 @@ use ruma::{ GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - serde::{Base64, JsonObject}, + serde::Base64, state_res, state_res::{Event, RoomVersion}, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, From db6def8800e12a778dd0c1117c7a0623deb76984 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 26 Jun 2023 09:15:14 +0200 Subject: [PATCH 1391/1727] fix: send correct bearer token to appservices --- src/api/appservice_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index dc319e2..082a1bc 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -18,7 +18,7 @@ where let mut http_request = request .try_into_http_request::( destination, - SendAccessToken::IfRequired(""), + SendAccessToken::IfRequired(hs_token), &[MatrixVersion::V1_0], ) .unwrap() From 72eb1972c18307d77c9dbfaddc2cb92bd9f38efa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 26 Jun 2023 12:38:51 +0200 Subject: [PATCH 1392/1727] Add relations endpoints, edits and threads work now --- src/api/client_server/mod.rs | 2 + src/api/client_server/relations.rs | 150 ++++++++++++++++- src/database/key_value/rooms/pdu_metadata.rs | 59 ++++++- src/database/mod.rs | 4 +- src/main.rs | 3 + src/service/rooms/pdu_metadata/data.rs | 11 +- src/service/rooms/pdu_metadata/mod.rs | 161 ++++++++++++++++++- src/service/rooms/timeline/mod.rs | 24 ++- 8 files changed, 385 insertions(+), 29 deletions(-) diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs index 4a77f23..2ab3a98 100644 --- a/src/api/client_server/mod.rs +++ b/src/api/client_server/mod.rs @@ -16,6 +16,7 @@ mod profile; mod push; mod read_marker; mod redact; +mod relations; mod report; mod room; mod search; @@ -49,6 +50,7 @@ pub use profile::*; pub use push::*; pub use read_marker::*; pub use redact::*; +pub use relations::*; pub use report::*; pub use room::*; pub use search::*; diff --git a/src/api/client_server/relations.rs b/src/api/client_server/relations.rs index 4d2af47..a7cea78 100644 --- a/src/api/client_server/relations.rs +++ b/src/api/client_server/relations.rs @@ -1,10 +1,146 @@ -use crate::{services, Result, Ruma}; -use std::time::{Duration, SystemTime}; +use ruma::api::client::relations::{ + get_relating_events, get_relating_events_with_rel_type, + get_relating_events_with_rel_type_and_event_type, +}; -/// # `GET /_matrix/client/r0/todo` -pub async fn get_relating_events_route( - body: Ruma, -) -> Result { +use crate::{service::rooms::timeline::PduCount, services, Result, Ruma}; + +/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` +pub async fn get_relating_events_with_rel_type_and_event_type_route( + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - todo!(); + + let from = match body.from.clone() { + Some(from) => PduCount::try_from_string(&from)?, + None => match ruma::api::Direction::Backward { + // TODO: fix ruma so `body.dir` exists + ruma::api::Direction::Forward => PduCount::min(), + ruma::api::Direction::Backward => PduCount::max(), + }, + }; + + let to = body + .to + .as_ref() + .and_then(|t| PduCount::try_from_string(&t).ok()); + + // Use limit or else 10, with maximum 100 + let limit = body + .limit + .and_then(|u| u32::try_from(u).ok()) + .map_or(10_usize, |u| u as usize) + .min(100); + + let res = services() + .rooms + .pdu_metadata + .paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + Some(body.event_type.clone()), + Some(body.rel_type.clone()), + from, + to, + limit, + )?; + + Ok( + get_relating_events_with_rel_type_and_event_type::v1::Response { + chunk: res.chunk, + next_batch: res.next_batch, + prev_batch: res.prev_batch, + }, + ) +} + +/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}` +pub async fn get_relating_events_with_rel_type_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let from = match body.from.clone() { + Some(from) => PduCount::try_from_string(&from)?, + None => match ruma::api::Direction::Backward { + // TODO: fix ruma so `body.dir` exists + ruma::api::Direction::Forward => PduCount::min(), + ruma::api::Direction::Backward => PduCount::max(), + }, + }; + + let to = body + .to + .as_ref() + .and_then(|t| PduCount::try_from_string(&t).ok()); + + // Use limit or else 10, with maximum 100 + let limit = body + .limit + .and_then(|u| u32::try_from(u).ok()) + .map_or(10_usize, |u| u as usize) + .min(100); + + let res = services() + .rooms + .pdu_metadata + .paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + None, + Some(body.rel_type.clone()), + from, + to, + limit, + )?; + + Ok(get_relating_events_with_rel_type::v1::Response { + chunk: res.chunk, + next_batch: res.next_batch, + prev_batch: res.prev_batch, + }) +} + +/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}` +pub async fn get_relating_events_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let from = match body.from.clone() { + Some(from) => PduCount::try_from_string(&from)?, + None => match ruma::api::Direction::Backward { + // TODO: fix ruma so `body.dir` exists + ruma::api::Direction::Forward => PduCount::min(), + ruma::api::Direction::Backward => PduCount::max(), + }, + }; + + let to = body + .to + .as_ref() + .and_then(|t| PduCount::try_from_string(&t).ok()); + + // Use limit or else 10, with maximum 100 + let limit = body + .limit + .and_then(|u| u32::try_from(u).ok()) + .map_or(10_usize, |u| u as usize) + .min(100); + + services() + .rooms + .pdu_metadata + .paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + None, + None, + from, + to, + limit, + ) } diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index 4b3f810..0641f9d 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -1,17 +1,64 @@ -use std::sync::Arc; +use std::{mem, sync::Arc}; -use ruma::{EventId, RoomId}; +use ruma::{EventId, RoomId, UserId}; -use crate::{database::KeyValueDatabase, service, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::timeline::PduCount}, + services, utils, Error, PduEvent, Result, +}; impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn add_relation(&self, from: u64, to: u64) -> Result<()> { - let mut key = from.to_be_bytes().to_vec(); - key.extend_from_slice(&to.to_be_bytes()); - self.fromto_relation.insert(&key, &[])?; + let mut key = to.to_be_bytes().to_vec(); + key.extend_from_slice(&from.to_be_bytes()); + self.tofrom_relation.insert(&key, &[])?; Ok(()) } + fn relations_until<'a>( + &'a self, + user_id: &'a UserId, + shortroomid: u64, + target: u64, + until: PduCount, + ) -> Result> + 'a>> { + let prefix = target.to_be_bytes().to_vec(); + let mut current = prefix.clone(); + + let count_raw = match until { + PduCount::Normal(x) => x - 1, + PduCount::Backfilled(x) => { + current.extend_from_slice(&0_u64.to_be_bytes()); + u64::MAX - x - 1 + } + }; + current.extend_from_slice(&count_raw.to_be_bytes()); + + Ok(Box::new( + self.tofrom_relation + .iter_from(¤t, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(tofrom, _data)| { + let from = utils::u64_from_bytes(&tofrom[(mem::size_of::())..]) + .map_err(|_| Error::bad_database("Invalid count in tofrom_relation."))?; + + let mut pduid = shortroomid.to_be_bytes().to_vec(); + pduid.extend_from_slice(&from.to_be_bytes()); + + let mut pdu = services() + .rooms + .timeline + .get_pdu_from_id(&pduid)? + .ok_or_else(|| Error::bad_database("Pdu in tofrom_relation is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((PduCount::Normal(from), pdu)) + }), + )) + } + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); diff --git a/src/database/mod.rs b/src/database/mod.rs index b864ceb..5d89d4a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -131,7 +131,7 @@ pub struct KeyValueDatabase { pub(super) softfailedeventids: Arc, /// ShortEventId + ShortEventId -> (). - pub(super) fromto_relation: Arc, + pub(super) tofrom_relation: Arc, /// RoomId + EventId -> Parent PDU EventId. pub(super) referencedevents: Arc, @@ -348,7 +348,7 @@ impl KeyValueDatabase { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, softfailedeventids: builder.open_tree("softfailedeventids")?, - fromto_relation: builder.open_tree("fromto_relation")?, + tofrom_relation: builder.open_tree("tofrom_relation")?, referencedevents: builder.open_tree("referencedevents")?, roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, diff --git a/src/main.rs b/src/main.rs index 20fab91..f9f88f4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -383,6 +383,9 @@ fn routes() -> Router { // .ruma_route(client_server::third_party_route) .ruma_route(client_server::upgrade_room_route) .ruma_route(client_server::get_threads_route) + .ruma_route(client_server::get_relating_events_with_rel_type_and_event_type_route) + .ruma_route(client_server::get_relating_events_with_rel_type_route) + .ruma_route(client_server::get_relating_events_route) .ruma_route(server_server::get_server_version_route) .route( "/_matrix/key/v2/server", diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 5577b3e..6c4cb3c 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,10 +1,17 @@ use std::sync::Arc; -use crate::Result; -use ruma::{EventId, RoomId}; +use crate::{service::rooms::timeline::PduCount, PduEvent, Result}; +use ruma::{EventId, RoomId, UserId}; pub trait Data: Send + Sync { fn add_relation(&self, from: u64, to: u64) -> Result<()>; + fn relations_until<'a>( + &'a self, + user_id: &'a UserId, + room_id: u64, + target: u64, + until: PduCount, + ) -> Result> + 'a>>; fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index a82b9a6..9ce74f4 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,20 +2,169 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{EventId, RoomId}; +use ruma::{ + api::client::relations::get_relating_events, + events::{relation::RelationType, TimelineEventType}, + EventId, RoomId, UserId, +}; +use serde::Deserialize; -use crate::{services, Result}; +use crate::{services, PduEvent, Result}; + +use super::timeline::PduCount; pub struct Service { pub db: &'static dyn Data, } +#[derive(Clone, Debug, Deserialize)] +struct ExtractRelType { + rel_type: RelationType, +} +#[derive(Clone, Debug, Deserialize)] +struct ExtractRelatesToEventId { + #[serde(rename = "m.relates_to")] + relates_to: ExtractRelType, +} + impl Service { #[tracing::instrument(skip(self, from, to))] - pub fn add_relation(&self, from: &EventId, to: &EventId) -> Result<()> { - let from = services().rooms.short.get_or_create_shorteventid(from)?; - let to = services().rooms.short.get_or_create_shorteventid(to)?; - self.db.add_relation(from, to) + pub fn add_relation(&self, from: PduCount, to: PduCount) -> Result<()> { + match (from, to) { + (PduCount::Normal(f), PduCount::Normal(t)) => self.db.add_relation(f, t), + _ => { + // TODO: Relations with backfilled pdus + + Ok(()) + } + } + } + + pub fn paginate_relations_with_filter( + &self, + sender_user: &UserId, + room_id: &RoomId, + target: &EventId, + filter_event_type: Option, + filter_rel_type: Option, + from: PduCount, + to: Option, + limit: usize, + ) -> Result { + let next_token; + + //TODO: Fix ruma: match body.dir { + match ruma::api::Direction::Backward { + ruma::api::Direction::Forward => { + let events_after: Vec<_> = services() + .rooms + .pdu_metadata + .relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after + .filter(|r| { + r.as_ref().map_or(true, |(_, pdu)| { + filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) + && if let Ok(content) = + serde_json::from_str::( + pdu.content.get(), + ) + { + filter_rel_type + .as_ref() + .map_or(true, |r| &content.relates_to.rel_type == r) + } else { + false + } + }) + }) + .take(limit) + .filter_map(|r| r.ok()) // Filter out buggy events + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .unwrap_or(false) + }) + .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .collect(); + + next_token = events_after.last().map(|(count, _)| count).copied(); + + let events_after: Vec<_> = events_after + .into_iter() + .rev() // relations are always most recent first + .map(|(_, pdu)| pdu.to_message_like_event()) + .collect(); + + Ok(get_relating_events::v1::Response { + chunk: events_after, + next_batch: next_token.map(|t| t.stringify()), + prev_batch: Some(from.stringify()), + }) + } + ruma::api::Direction::Backward => { + let events_before: Vec<_> = services() + .rooms + .pdu_metadata + .relations_until(sender_user, &room_id, target, from)? + .filter(|r| { + r.as_ref().map_or(true, |(_, pdu)| { + filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) + && if let Ok(content) = + serde_json::from_str::( + pdu.content.get(), + ) + { + filter_rel_type + .as_ref() + .map_or(true, |r| &content.relates_to.rel_type == r) + } else { + false + } + }) + }) + .take(limit) + .filter_map(|r| r.ok()) // Filter out buggy events + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .unwrap_or(false) + }) + .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .collect(); + + next_token = events_before.last().map(|(count, _)| count).copied(); + + let events_before: Vec<_> = events_before + .into_iter() + .map(|(_, pdu)| pdu.to_message_like_event()) + .collect(); + + Ok(get_relating_events::v1::Response { + chunk: events_before, + next_batch: next_token.map(|t| t.stringify()), + prev_batch: Some(from.stringify()), + }) + } + } + } + + pub fn relations_until<'a>( + &'a self, + user_id: &'a UserId, + room_id: &'a RoomId, + target: &'a EventId, + until: PduCount, + ) -> Result> + 'a> { + let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?; + let target = match services().rooms.timeline.get_pdu_count(target)? { + Some(PduCount::Normal(c)) => c, + // TODO: Support backfilled relations + _ => 0, // This will result in an empty iterator + }; + self.db.relations_until(user_id, room_id, target, until) } #[tracing::instrument(skip(self, room_id, event_ids))] diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 625d346..2356a00 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -478,10 +478,16 @@ impl Service { } if let Ok(content) = serde_json::from_str::(pdu.content.get()) { - services() + if let Some(related_pducount) = services() .rooms - .pdu_metadata - .add_relation(&pdu.event_id, &content.relates_to.event_id)?; + .timeline + .get_pdu_count(&content.relates_to.event_id)? + { + services() + .rooms + .pdu_metadata + .add_relation(PduCount::Normal(count2), related_pducount)?; + } } if let Ok(content) = serde_json::from_str::(pdu.content.get()) { @@ -489,10 +495,16 @@ impl Service { Relation::Reply { in_reply_to } => { // We need to do it again here, because replies don't have // event_id as a top level field - services() + if let Some(related_pducount) = services() .rooms - .pdu_metadata - .add_relation(&pdu.event_id, &in_reply_to.event_id)?; + .timeline + .get_pdu_count(&in_reply_to.event_id)? + { + services() + .rooms + .pdu_metadata + .add_relation(PduCount::Normal(count2), related_pducount)?; + } } Relation::Thread(thread) => { services() From 26b8605fa0483775acaa1a41dfada24d01cabb6f Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Mon, 26 Jun 2023 22:06:17 +0200 Subject: [PATCH 1393/1727] ci: Adjust to current docker --- .gitlab-ci.yml | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b7df56f..d2da91b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,19 +16,15 @@ variables: .docker-shared-settings: stage: "build docker image" - image: - name: jdrouet/docker-with-buildx:20.10.21-0.9.1 - pull_policy: if-not-present needs: [] tags: [ "docker" ] variables: # Docker in Docker: - DOCKER_HOST: tcp://docker:2375/ - DOCKER_TLS_CERTDIR: "" - # Famedly runners use BTRFS, overlayfs and overlay2 often break jobs - DOCKER_DRIVER: btrfs + DOCKER_BUILDKIT: 1 + image: + name: docker.io/docker services: - - name: docker:dind + - name: docker.io/docker:dind alias: docker script: - apk add openssh-client @@ -104,6 +100,13 @@ docker:tags: TAG: "matrix-conduit:$CI_COMMIT_TAG" +docker build debugging: + extends: .docker-shared-settings + rules: + - if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/" + variables: + TAG: "matrix-conduit-docker-tests:latest" + # --------------------------------------------------------------------- # # Run tests # # --------------------------------------------------------------------- # From 91180e011db93230aabec51e2504765303e9379a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 26 Jun 2023 23:10:26 +0200 Subject: [PATCH 1394/1727] bump ruma --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b33a7d9..19f63b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2110,7 +2110,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.8.2" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "assign", "js_int", @@ -2128,7 +2128,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.8.1" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "js_int", "ruma-common", @@ -2139,7 +2139,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "assign", "bytes", @@ -2156,7 +2156,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "base64 0.21.2", "bytes", @@ -2184,7 +2184,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "js_int", "ruma-common", @@ -2195,7 +2195,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "js_int", "thiserror", @@ -2204,7 +2204,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "js_int", "ruma-common", @@ -2214,7 +2214,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "once_cell", "proc-macro-crate", @@ -2229,7 +2229,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "js_int", "ruma-common", @@ -2240,7 +2240,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.13.1" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "base64 0.21.2", "ed25519-dalek", @@ -2256,7 +2256,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=761771a317460f30590da170115d007892381e85#761771a317460f30590da170115d007892381e85" +source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index f024b06..3164842 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "761771a317460f30590da170115d007892381e85", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "de9a5a6ecca197e59623c210bd21f53055f83568", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } From 7c6d25dcd165ffa3535ba103ab5ffb4acbe8c558 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 26 Jun 2023 21:57:59 +0200 Subject: [PATCH 1395/1727] Do state res even if the event soft fails --- src/service/rooms/event_handler/mod.rs | 310 ++++++++++--------------- 1 file changed, 129 insertions(+), 181 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index b01a282..800d849 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -38,6 +38,8 @@ use tracing::{debug, error, info, trace, warn}; use crate::{service::*, services, Error, PduEvent, Result}; +use super::state_compressor::CompressedStateEvent; + pub struct Service; impl Service { @@ -62,9 +64,8 @@ impl Service { /// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by /// doing state res where one of the inputs was a previously trusted set of state, don't just /// trust a set of state we got from a remote) - /// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" - /// it - /// 14. Use state resolution to find new room state + /// 13. Use state resolution to find new room state + /// 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively #[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))] pub(crate) async fn handle_incoming_pdu<'a>( @@ -304,7 +305,7 @@ impl Service { ) { Err(e) => { // Drop - warn!("Dropping bad event {}: {}", event_id, e); + warn!("Dropping bad event {}: {}", event_id, e,); return Err(Error::BadRequest( ErrorKind::InvalidParam, "Signature verification failed", @@ -735,8 +736,9 @@ impl Service { } info!("Auth check succeeded"); - // We start looking at current room state now, so lets lock the room + // 13. Use state resolution to find new room state + // We start looking at current room state now, so lets lock the room let mutex_state = Arc::clone( services() .globals @@ -782,7 +784,40 @@ impl Service { }) .collect::>()?; - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + if incoming_pdu.state_key.is_some() { + info!("Preparing for stateres to derive new room state"); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + )?; + + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); + } + + let new_room_state = self + .resolve_state(room_id, room_version_id, state_after) + .await?; + + // Set the new room state to the resolved state + info!("Forcing new room state"); + + let (sstatehash, new, removed) = services() + .rooms + .state_compressor + .save_state(room_id, new_room_state)?; + + services() + .rooms + .state + .force_state(room_id, sstatehash, new, removed, &state_lock) + .await?; + } + + // 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it info!("Starting soft fail auth check"); let auth_events = services().rooms.state.get_auth_events( @@ -823,181 +858,6 @@ impl Service { )); } - if incoming_pdu.state_key.is_some() { - info!("Loading current room state ids"); - let current_sstatehash = services() - .rooms - .state - .get_room_shortstatehash(room_id)? - .expect("every room has state"); - - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_sstatehash) - .await?; - - info!("Preparing for stateres to derive new room state"); - let mut extremity_sstatehashes = HashMap::new(); - - info!(?extremities, "Loading extremities"); - for id in &extremities { - match services().rooms.timeline.get_pdu(id)? { - Some(leaf_pdu) => { - extremity_sstatehashes.insert( - services() - .rooms - .state_accessor - .pdu_shortstatehash(&leaf_pdu.event_id)? - .ok_or_else(|| { - error!( - "Found extremity pdu with no statehash in db: {:?}", - leaf_pdu - ); - Error::bad_database("Found pdu with no statehash in db.") - })?, - leaf_pdu, - ); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err(Error::BadDatabase("Missing state snapshot.")); - } - } - } - - let mut fork_states = Vec::new(); - - // 12. Ensure that the state is derived from the previous current state (i.e. we calculated - // by doing state res where one of the inputs was a previously trusted set of state, - // don't just trust a set of state we got from a remote). - - // We do this by adding the current state to the list of fork states - extremity_sstatehashes.remove(¤t_sstatehash); - fork_states.push(current_state_ids); - - // We also add state after incoming event to the fork states - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &incoming_pdu.kind.to_string().into(), - state_key, - )?; - - state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); - } - fork_states.push(state_after); - - let mut update_state = false; - // 14. Use state resolution to find new room state - let new_room_state = if fork_states.is_empty() { - panic!("State is empty"); - } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { - info!("State resolution trivial"); - // There was only one state, so it has to be the room's current state (because that is - // always included) - fork_states[0] - .iter() - .map(|(k, id)| { - services() - .rooms - .state_compressor - .compress_state_event(*k, id) - }) - .collect::>()? - } else { - info!("Loading auth chains"); - // We do need to force an update to this room's state - update_state = true; - - let mut auth_chain_sets = Vec::new(); - for state in &fork_states { - auth_chain_sets.push( - services() - .rooms - .auth_chain - .get_auth_chain( - room_id, - state.iter().map(|(_, id)| id.clone()).collect(), - ) - .await? - .collect(), - ); - } - - info!("Loading fork states"); - - let fork_states: Vec<_> = fork_states - .into_iter() - .map(|map| { - map.into_iter() - .filter_map(|(k, id)| { - services() - .rooms - .short - .get_statekey_from_short(k) - .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) - .ok() - }) - .collect::>() - }) - .collect(); - - info!("Resolving state"); - - let lock = services().globals.stateres_mutex.lock(); - let state = match state_res::resolve( - room_version_id, - &fork_states, - auth_chain_sets, - |id| { - let res = services().rooms.timeline.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { - Ok(new_state) => new_state, - Err(_) => { - return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); - } - }; - - drop(lock); - - info!("State resolution done. Compressing state"); - - state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - )?; - services() - .rooms - .state_compressor - .compress_state_event(shortstatekey, &event_id) - }) - .collect::>()? - }; - - // Set the new room state to the resolved state - if update_state { - info!("Forcing new room state"); - let (sstatehash, new, removed) = services() - .rooms - .state_compressor - .save_state(room_id, new_room_state)?; - services() - .rooms - .state - .force_state(room_id, sstatehash, new, removed, &state_lock) - .await?; - } - } - info!("Appending pdu to timeline"); extremities.insert(incoming_pdu.event_id.clone()); @@ -1021,6 +881,94 @@ impl Service { Ok(pdu_id) } + async fn resolve_state( + &self, + room_id: &RoomId, + room_version_id: &RoomVersionId, + incoming_state: HashMap>, + ) -> Result> { + info!("Loading current room state ids"); + let current_sstatehash = services() + .rooms + .state + .get_room_shortstatehash(room_id)? + .expect("every room has state"); + + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_sstatehash) + .await?; + + let fork_states = [current_state_ids, incoming_state]; + + let mut auth_chain_sets = Vec::new(); + for state in &fork_states { + auth_chain_sets.push( + services() + .rooms + .auth_chain + .get_auth_chain(room_id, state.iter().map(|(_, id)| id.clone()).collect()) + .await? + .collect(), + ); + } + + info!("Loading fork states"); + + let fork_states: Vec<_> = fork_states + .into_iter() + .map(|map| { + map.into_iter() + .filter_map(|(k, id)| { + services() + .rooms + .short + .get_statekey_from_short(k) + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) + .ok() + }) + .collect::>() + }) + .collect(); + + info!("Resolving state"); + + let lock = services().globals.stateres_mutex.lock(); + let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = services().rooms.timeline.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }) { + Ok(new_state) => new_state, + Err(_) => { + return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); + } + }; + + drop(lock); + + info!("State resolution done. Compressing state"); + + let new_room_state = state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; + services() + .rooms + .state_compressor + .compress_state_event(shortstatekey, &event_id) + }) + .collect::>()?; + + Ok(new_room_state) + } + /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// From be877ef7191a2d6cbe9a3c9b40125f1bc42e6961 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 27 Jun 2023 13:06:55 +0200 Subject: [PATCH 1396/1727] Improve sync performance with more caching and wrapping things in Arcs to avoid copies --- Cargo.lock | 22 ++--- Cargo.toml | 2 +- src/api/client_server/membership.rs | 20 ++-- .../key_value/rooms/state_accessor.rs | 6 +- .../key_value/rooms/state_compressor.rs | 10 +- src/database/mod.rs | 4 +- src/service/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 24 ++--- src/service/rooms/state/mod.rs | 18 ++-- src/service/rooms/state_compressor/data.rs | 6 +- src/service/rooms/state_compressor/mod.rs | 94 ++++++++++--------- src/service/rooms/timeline/mod.rs | 2 +- 12 files changed, 112 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 19f63b1..9f62c18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2110,7 +2110,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.8.2" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "assign", "js_int", @@ -2128,7 +2128,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.8.1" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "js_int", "ruma-common", @@ -2139,7 +2139,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "assign", "bytes", @@ -2156,7 +2156,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "base64 0.21.2", "bytes", @@ -2184,7 +2184,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "js_int", "ruma-common", @@ -2195,7 +2195,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "js_int", "thiserror", @@ -2204,7 +2204,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "js_int", "ruma-common", @@ -2214,7 +2214,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "once_cell", "proc-macro-crate", @@ -2229,7 +2229,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "js_int", "ruma-common", @@ -2240,7 +2240,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.13.1" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "base64 0.21.2", "ed25519-dalek", @@ -2256,7 +2256,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568" +source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 3164842..9698caf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "de9a5a6ecca197e59623c210bd21f53055f83568", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "38294bd5206498c02b1001227d65654eb548308b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 11e37e6..ccd8d7a 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -743,15 +743,17 @@ async fn join_room_by_id_helper( info!("Saving state from send_join"); let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state( room_id, - state - .into_iter() - .map(|(k, id)| { - services() - .rooms - .state_compressor - .compress_state_event(k, &id) - }) - .collect::>()?, + Arc::new( + state + .into_iter() + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(k, &id) + }) + .collect::>()?, + ), )?; services() diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 0f0c0dc..ad08f46 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -16,7 +16,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { .1; let mut result = HashMap::new(); let mut i = 0; - for compressed in full_state.into_iter() { + for compressed in full_state.iter() { let parsed = services() .rooms .state_compressor @@ -45,7 +45,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = HashMap::new(); let mut i = 0; - for compressed in full_state { + for compressed in full_state.iter() { let (_, eventid) = services() .rooms .state_compressor @@ -95,7 +95,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { .expect("there is always one layer") .1; Ok(full_state - .into_iter() + .iter() .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) .and_then(|compressed| { services() diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index d0a9be4..65ea603 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, mem::size_of}; +use std::{collections::HashSet, mem::size_of, sync::Arc}; use crate::{ database::KeyValueDatabase, @@ -37,20 +37,20 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { Ok(StateDiff { parent, - added, - removed, + added: Arc::new(added), + removed: Arc::new(removed), }) } fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); - for new in &diff.added { + for new in diff.added.iter() { value.extend_from_slice(&new[..]); } if !diff.removed.is_empty() { value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &diff.removed { + for removed in diff.removed.iter() { value.extend_from_slice(&removed[..]); } } diff --git a/src/database/mod.rs b/src/database/mod.rs index 5d89d4a..4e7bda6 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -587,8 +587,8 @@ impl KeyValueDatabase { services().rooms.state_compressor.save_state_from_diff( current_sstatehash, - statediffnew, - statediffremoved, + Arc::new(statediffnew), + Arc::new(statediffremoved), 2, // every state change is 2 event changes on average states_parents, )?; diff --git a/src/service/mod.rs b/src/service/mod.rs index 3b48810..7a2bb64 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -90,7 +90,7 @@ impl Services { state_compressor: rooms::state_compressor::Service { db, stateinfo_cache: Mutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, + (1000.0 * config.conduit_cache_capacity_modifier) as usize, )), }, timeline: rooms::timeline::Service { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 800d849..066cef4 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -774,15 +774,17 @@ impl Service { }); info!("Compressing state at event"); - let state_ids_compressed = state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - services() - .rooms - .state_compressor - .compress_state_event(*shortstatekey, id) - }) - .collect::>()?; + let state_ids_compressed = Arc::new( + state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + services() + .rooms + .state_compressor + .compress_state_event(*shortstatekey, id) + }) + .collect::>()?, + ); if incoming_pdu.state_key.is_some() { info!("Preparing for stateres to derive new room state"); @@ -886,7 +888,7 @@ impl Service { room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap>, - ) -> Result> { + ) -> Result>> { info!("Loading current room state ids"); let current_sstatehash = services() .rooms @@ -966,7 +968,7 @@ impl Service { }) .collect::>()?; - Ok(new_room_state) + Ok(Arc::new(new_room_state)) } /// Find the event and auth it. Once the event is validated (steps 1 - 8) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 21ad2f9..ca9430f 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -32,11 +32,11 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - statediffnew: HashSet, - _statediffremoved: HashSet, + statediffnew: Arc>, + _statediffremoved: Arc>, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { - for event_id in statediffnew.into_iter().filter_map(|new| { + for event_id in statediffnew.iter().filter_map(|new| { services() .rooms .state_compressor @@ -107,7 +107,7 @@ impl Service { &self, event_id: &EventId, room_id: &RoomId, - state_ids_compressed: HashSet, + state_ids_compressed: Arc>, ) -> Result { let shorteventid = services() .rooms @@ -152,9 +152,9 @@ impl Service { .copied() .collect(); - (statediffnew, statediffremoved) + (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (state_ids_compressed, HashSet::new()) + (state_ids_compressed, Arc::new(HashSet::new())) }; services().rooms.state_compressor.save_state_from_diff( shortstatehash, @@ -234,8 +234,8 @@ impl Service { services().rooms.state_compressor.save_state_from_diff( shortstatehash, - statediffnew, - statediffremoved, + Arc::new(statediffnew), + Arc::new(statediffremoved), 2, states_parents, )?; @@ -396,7 +396,7 @@ impl Service { .1; Ok(full_state - .into_iter() + .iter() .filter_map(|compressed| { services() .rooms diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index ce164c6..d221d57 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,12 +1,12 @@ -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; use super::CompressedStateEvent; use crate::Result; pub struct StateDiff { pub parent: Option, - pub added: HashSet, - pub removed: HashSet, + pub added: Arc>, + pub removed: Arc>, } pub trait Data: Send + Sync { diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 356f32c..d29b020 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -20,10 +20,10 @@ pub struct Service { LruCache< u64, Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed + u64, // sstatehash + Arc>, // full state + Arc>, // added + Arc>, // removed )>, >, >, @@ -39,10 +39,10 @@ impl Service { shortstatehash: u64, ) -> Result< Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed + u64, // sstatehash + Arc>, // full state + Arc>, // added + Arc>, // removed )>, > { if let Some(r) = self @@ -62,13 +62,19 @@ impl Service { if let Some(parent) = parent { let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); + let mut state = (*response.last().unwrap().1).clone(); state.extend(added.iter().copied()); + let removed = (*removed).clone(); for r in &removed { state.remove(r); } - response.push((shortstatehash, state, added, removed)); + response.push((shortstatehash, Arc::new(state), added, Arc::new(removed))); + + self.stateinfo_cache + .lock() + .unwrap() + .insert(shortstatehash, response.clone()); Ok(response) } else { @@ -135,14 +141,14 @@ impl Service { pub fn save_state_from_diff( &self, shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, + statediffnew: Arc>, + statediffremoved: Arc>, diff_to_sibling: usize, mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed + u64, // sstatehash + Arc>, // full state + Arc>, // added + Arc>, // removed )>, ) -> Result<()> { let diffsum = statediffnew.len() + statediffremoved.len(); @@ -152,29 +158,29 @@ impl Service { // To many layers, we have to go deeper let parent = parent_states.pop().unwrap(); - let mut parent_new = parent.2; - let mut parent_removed = parent.3; + let mut parent_new = (*parent.2).clone(); + let mut parent_removed = (*parent.3).clone(); - for removed in statediffremoved { - if !parent_new.remove(&removed) { + for removed in statediffremoved.iter() { + if !parent_new.remove(removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed); + parent_removed.insert(removed.clone()); } // Else it was added in the parent and we removed it again. We can forget this change } - for new in statediffnew { - if !parent_removed.remove(&new) { + for new in statediffnew.iter() { + if !parent_removed.remove(new) { // It was not touched in the parent and we added it - parent_new.insert(new); + parent_new.insert(new.clone()); } // Else it was removed in the parent and we added it again. We can forget this change } self.save_state_from_diff( shortstatehash, - parent_new, - parent_removed, + Arc::new(parent_new), + Arc::new(parent_removed), diffsum, parent_states, )?; @@ -205,29 +211,29 @@ impl Service { if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; + let mut parent_new = (*parent.2).clone(); + let mut parent_removed = (*parent.3).clone(); - for removed in statediffremoved { - if !parent_new.remove(&removed) { + for removed in statediffremoved.iter() { + if !parent_new.remove(removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed); + parent_removed.insert(removed.clone()); } // Else it was added in the parent and we removed it again. We can forget this change } - for new in statediffnew { - if !parent_removed.remove(&new) { + for new in statediffnew.iter() { + if !parent_removed.remove(new) { // It was not touched in the parent and we added it - parent_new.insert(new); + parent_new.insert(new.clone()); } // Else it was removed in the parent and we added it again. We can forget this change } self.save_state_from_diff( shortstatehash, - parent_new, - parent_removed, + Arc::new(parent_new), + Arc::new(parent_removed), diffsum, parent_states, )?; @@ -250,11 +256,11 @@ impl Service { pub fn save_state( &self, room_id: &RoomId, - new_state_ids_compressed: HashSet, + new_state_ids_compressed: Arc>, ) -> Result<( u64, - HashSet, - HashSet, + Arc>, + Arc>, )> { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; @@ -271,7 +277,11 @@ impl Service { .get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { - return Ok((new_shortstatehash, HashSet::new(), HashSet::new())); + return Ok(( + new_shortstatehash, + Arc::new(HashSet::new()), + Arc::new(HashSet::new()), + )); } let states_parents = previous_shortstatehash @@ -290,9 +300,9 @@ impl Service { .copied() .collect(); - (statediffnew, statediffremoved) + (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (new_state_ids_compressed, HashSet::new()) + (new_state_ids_compressed, Arc::new(HashSet::new())) }; if !already_existed { diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2356a00..56769d5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -946,7 +946,7 @@ impl Service { pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: Vec, - state_ids_compressed: HashSet, + state_ids_compressed: Arc>, soft_fail: bool, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result>> { From d64a56d88b72266abcb746d6c9e9691dbfe48301 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 27 Jun 2023 18:48:34 +0200 Subject: [PATCH 1397/1727] Do soft fail check before doing state res to allow leave events --- src/service/rooms/event_handler/mod.rs | 33 +++++++++++++------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 066cef4..ef5616e 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -736,6 +736,23 @@ impl Service { } info!("Auth check succeeded"); + // Soft fail check before doing state res + let auth_events = services().rooms.state.get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + )?; + + let soft_fail = !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; + // 13. Use state resolution to find new room state // We start looking at current room state now, so lets lock the room @@ -822,22 +839,6 @@ impl Service { // 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it info!("Starting soft fail auth check"); - let auth_events = services().rooms.state.get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - )?; - - let soft_fail = !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; - if soft_fail { services().rooms.timeline.append_incoming_pdu( &incoming_pdu, From fd1ccbd3ad474e2eac801a01ca71748827dce92b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 28 Jun 2023 17:41:55 +0200 Subject: [PATCH 1398/1727] improvement: randomize server order for alias joins --- src/api/client_server/alias.rs | 9 +++++---- src/api/client_server/membership.rs | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index ab51b50..7660ca2 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -1,4 +1,5 @@ use crate::{services, Error, Result, Ruma}; +use rand::seq::SliceRandom; use regex::Regex; use ruma::{ api::{ @@ -90,10 +91,10 @@ pub(crate) async fn get_alias_helper( ) .await?; - return Ok(get_alias::v3::Response::new( - response.room_id, - response.servers, - )); + let mut servers = response.servers; + servers.shuffle(&mut rand::thread_rng()); + + return Ok(get_alias::v3::Response::new(response.room_id, servers)); } let mut room_id = None; diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index ccd8d7a..c9357b2 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -112,7 +112,7 @@ pub async fn join_room_by_id_or_alias_route( Err(room_alias) => { let response = get_alias_helper(room_alias).await?; - (response.servers.into_iter().collect(), response.room_id) + (response.servers, response.room_id) } }; From 06a1321e5692b240e0ce1e117cce0bd7008908eb Mon Sep 17 00:00:00 2001 From: purplemeteorite Date: Wed, 28 Jun 2023 18:51:44 +0200 Subject: [PATCH 1399/1727] easier-to-read docker setup instructions --- docker/README.md | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/docker/README.md b/docker/README.md index c702832..ef93af2 100644 --- a/docker/README.md +++ b/docker/README.md @@ -4,7 +4,31 @@ ## Docker -### Build & Dockerfile +To run conduit with docker you can either build the image yourself or pull it from a registry. + + +### Use a registry + +The image is available in the following registries: + +| Registry | Image | Size | +| --------------- | --------------------------------------------------------------- | --------------------- | +| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] | +| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] | + +[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit +[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 +[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest + +Use +```bash +docker image pull +``` +to pull it to your machine. + + + +### Build using a dockerfile The Dockerfile provided by Conduit has two stages, each of which creates an image. @@ -19,9 +43,11 @@ docker build --tag matrixconduit/matrix-conduit:latest . which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`. + + ### Run -After building the image you can simply run it with +When you have the image you can simply run it with ```bash docker run -d -p 8448:6167 \ @@ -37,16 +63,7 @@ docker run -d -p 8448:6167 \ --name conduit matrixconduit/matrix-conduit:latest ``` -or you can skip the build step and pull the image from one of the following registries: - -| Registry | Image | Size | -| --------------- | --------------------------------------------------------------- | --------------------- | -| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] | -| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] | - -[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit -[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 -[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest +or you can use [docker-compose](#docker-compose). The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need From dc50197a13e2d66608f1ee121b7db34a964031dd Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 29 Jun 2023 02:42:32 +0000 Subject: [PATCH 1400/1727] update example configurations in DEPLOY.md for Apache and Nginx which include upstream proxy timeouts of 5 minutes to allow for room joins which take a while --- DEPLOY.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 75db366..8a6e48a 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -224,7 +224,8 @@ Listen 8448 ServerName your.server.name # EDIT THIS AllowEncodedSlashes NoDecode -ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon +# joining large rooms can be slow. increase timeout to 600 if you still have issues. +ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ timeout=300 nocanon ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ @@ -270,12 +271,15 @@ server { merge_slashes off; # Nginx defaults to only allow 1MB uploads + # Increase this to allow posting large files such as videos client_max_body_size 20M; location /_matrix/ { proxy_pass http://127.0.0.1:6167$request_uri; proxy_set_header Host $http_host; proxy_buffering off; + # joining large rooms can be slow. increase to 10m if you still have issues. + proxy_read_timeout 5m; } ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS From 0ded637b4a6b885fc8d8015baeaaf1534b6b1d29 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 29 Jun 2023 11:20:52 +0200 Subject: [PATCH 1401/1727] Upgrade axum to 0.6 --- Cargo.lock | 54 +++++++------- Cargo.toml | 2 +- src/api/ruma_wrapper/axum.rs | 139 +++++++++++++++++++++++++---------- src/main.rs | 7 +- 4 files changed, 130 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9f62c18..4148394 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -89,9 +89,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", @@ -108,22 +108,22 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", + "rustversion", "serde", "serde_json", + "serde_path_to_error", "serde_urlencoded", "sync_wrapper", - "tokio", "tower", - "tower-http 0.3.5", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.2.9" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", @@ -131,6 +131,7 @@ dependencies = [ "http", "http-body", "mime", + "rustversion", "tower-layer", "tower-service", ] @@ -407,7 +408,7 @@ dependencies = [ "tikv-jemallocator", "tokio", "tower", - "tower-http 0.4.1", + "tower-http", "tracing", "tracing-flame", "tracing-opentelemetry", @@ -1449,9 +1450,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "memchr" @@ -2363,6 +2364,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustversion" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" + [[package]] name = "ryu" version = "1.0.13" @@ -2467,6 +2474,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7f05c1d5476066defcdfacce1f52fc3cae3af1d3089727100c02ae92e5abbe0" +dependencies = [ + "serde", +] + [[package]] name = "serde_spanned" version = "0.6.3" @@ -2954,31 +2970,11 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite", - "tokio", "tower-layer", "tower-service", "tracing", ] -[[package]] -name = "tower-http" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" -dependencies = [ - "bitflags 1.3.2", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-http" version = "0.4.1" diff --git a/Cargo.toml b/Cargo.toml index 9698caf..424007c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rust-version = "1.70.0" [dependencies] # Web framework -axum = { version = "0.5.16", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } +axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } axum-server = { version = "0.5.1", features = ["tls-rustls"] } tower = { version = "0.4.13", features = ["util"] } tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] } diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 2d2af70..069e12b 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -3,18 +3,16 @@ use std::{collections::BTreeMap, iter::FromIterator, str}; use axum::{ async_trait, body::{Full, HttpBody}, - extract::{ - rejection::TypedHeaderRejectionReason, FromRequest, Path, RequestParts, TypedHeader, - }, + extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader}, headers::{ authorization::{Bearer, Credentials}, Authorization, }, response::{IntoResponse, Response}, - BoxError, + BoxError, RequestExt, RequestPartsExt, }; -use bytes::{BufMut, Bytes, BytesMut}; -use http::StatusCode; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use http::{Request, StatusCode}; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId, @@ -26,27 +24,44 @@ use super::{Ruma, RumaResponse}; use crate::{services, Error, Result}; #[async_trait] -impl FromRequest for Ruma +impl FromRequest for Ruma where T: IncomingRequest, - B: HttpBody + Send, + B: HttpBody + Send + 'static, B::Data: Send, B::Error: Into, { type Rejection = Error; - async fn from_request(req: &mut RequestParts) -> Result { + async fn from_request(req: Request, _state: &S) -> Result { #[derive(Deserialize)] struct QueryParams { access_token: Option, user_id: Option, } - let metadata = T::METADATA; - let auth_header = Option::>>::from_request(req).await?; - let path_params = Path::>::from_request(req).await?; + let (mut parts, mut body) = match req.with_limited_body() { + Ok(limited_req) => { + let (parts, body) = limited_req.into_parts(); + let body = to_bytes(body) + .await + .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; + (parts, body) + } + Err(original_req) => { + let (parts, body) = original_req.into_parts(); + let body = to_bytes(body) + .await + .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; + (parts, body) + } + }; - let query = req.uri().query().unwrap_or_default(); + let metadata = T::METADATA; + let auth_header: Option>> = parts.extract().await?; + let path_params: Path> = parts.extract().await?; + + let query = parts.uri.query().unwrap_or_default(); let query_params: QueryParams = match serde_html_form::from_str(query) { Ok(params) => params, Err(e) => { @@ -63,10 +78,6 @@ where None => query_params.access_token.as_deref(), }; - let mut body = Bytes::from_request(req) - .await - .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; - let mut json_body = serde_json::from_slice::(&body).ok(); let appservices = services().appservice.all().unwrap(); @@ -138,24 +149,24 @@ where } } AuthScheme::ServerSignatures => { - let TypedHeader(Authorization(x_matrix)) = - TypedHeader::>::from_request(req) - .await - .map_err(|e| { - warn!("Missing or invalid Authorization header: {}", e); + let TypedHeader(Authorization(x_matrix)) = parts + .extract::>>() + .await + .map_err(|e| { + warn!("Missing or invalid Authorization header: {}", e); - let msg = match e.reason() { - TypedHeaderRejectionReason::Missing => { - "Missing Authorization header." - } - TypedHeaderRejectionReason::Error(_) => { - "Invalid X-Matrix signatures." - } - _ => "Unknown header-related error", - }; + let msg = match e.reason() { + TypedHeaderRejectionReason::Missing => { + "Missing Authorization header." + } + TypedHeaderRejectionReason::Error(_) => { + "Invalid X-Matrix signatures." + } + _ => "Unknown header-related error", + }; - Error::BadRequest(ErrorKind::Forbidden, msg) - })?; + Error::BadRequest(ErrorKind::Forbidden, msg) + })?; let origin_signatures = BTreeMap::from_iter([( x_matrix.key.clone(), @@ -170,11 +181,11 @@ where let mut request_map = BTreeMap::from_iter([ ( "method".to_owned(), - CanonicalJsonValue::String(req.method().to_string()), + CanonicalJsonValue::String(parts.method.to_string()), ), ( "uri".to_owned(), - CanonicalJsonValue::String(req.uri().to_string()), + CanonicalJsonValue::String(parts.uri.to_string()), ), ( "origin".to_owned(), @@ -224,7 +235,7 @@ where x_matrix.origin, e, request_map ); - if req.uri().to_string().contains('@') { + if parts.uri.to_string().contains('@') { warn!( "Request uri contained '@' character. Make sure your \ reverse proxy gives Conduit the raw uri (apache: use \ @@ -243,8 +254,8 @@ where } }; - let mut http_request = http::Request::builder().uri(req.uri()).method(req.method()); - *http_request.headers_mut().unwrap() = req.headers().clone(); + let mut http_request = http::Request::builder().uri(parts.uri).method(parts.method); + *http_request.headers_mut().unwrap() = parts.headers; if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { let user_id = sender_user.clone().unwrap_or_else(|| { @@ -364,3 +375,55 @@ impl IntoResponse for RumaResponse { } } } + +// copied from hyper under the following license: +// Copyright (c) 2014-2021 Sean McArthur + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +pub(crate) async fn to_bytes(body: T) -> Result +where + T: HttpBody, +{ + futures_util::pin_mut!(body); + + // If there's only 1 chunk, we can just return Buf::to_bytes() + let mut first = if let Some(buf) = body.data().await { + buf? + } else { + return Ok(Bytes::new()); + }; + + let second = if let Some(buf) = body.data().await { + buf? + } else { + return Ok(first.copy_to_bytes(first.remaining())); + }; + + // With more than 1 buf, we gotta flatten into a Vec first. + let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize; + let mut vec = Vec::with_capacity(cap); + vec.put(first); + vec.put(second); + + while let Some(buf) = body.data().await { + vec.put(buf?); + } + + Ok(vec.into()) +} diff --git a/src/main.rs b/src/main.rs index f9f88f4..e0f84d9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -10,8 +10,7 @@ use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; use axum::{ - extract::{DefaultBodyLimit, FromRequest, MatchedPath}, - handler::Handler, + extract::{DefaultBodyLimit, FromRequestParts, MatchedPath}, response::IntoResponse, routing::{get, on, MethodFilter}, Router, @@ -421,7 +420,7 @@ fn routes() -> Router { "/_matrix/client/v3/rooms/:room_id/initialSync", get(initial_sync), ) - .fallback(not_found.into_service()) + .fallback(not_found) } async fn shutdown_signal(handle: ServerHandle) { @@ -505,7 +504,7 @@ macro_rules! impl_ruma_handler { Fut: Future> + Send, E: IntoResponse, - $( $ty: FromRequest + Send + 'static, )* + $( $ty: FromRequestParts<()> + Send + 'static, )* { fn add_to_router(self, mut router: Router) -> Router { let meta = Req::METADATA; From 2640f67e4b6373c6db0032cdc3786d07d14e4309 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sun, 2 Jul 2023 18:00:30 +0000 Subject: [PATCH 1402/1727] remove comments --- DEPLOY.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 8a6e48a..e5e1530 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -224,7 +224,6 @@ Listen 8448 ServerName your.server.name # EDIT THIS AllowEncodedSlashes NoDecode -# joining large rooms can be slow. increase timeout to 600 if you still have issues. ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ timeout=300 nocanon ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ @@ -278,7 +277,6 @@ server { proxy_pass http://127.0.0.1:6167$request_uri; proxy_set_header Host $http_host; proxy_buffering off; - # joining large rooms can be slow. increase to 10m if you still have issues. proxy_read_timeout 5m; } From 9d49d599f3a1d5da535b71f2f8e4986c25b997e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 2 Jul 2023 16:06:54 +0200 Subject: [PATCH 1403/1727] feat: space hierarchies --- src/api/client_server/context.rs | 6 +- src/api/client_server/message.rs | 7 +- src/api/client_server/mod.rs | 2 + src/api/client_server/search.rs | 3 +- src/api/client_server/space.rs | 34 +++ src/api/server_server.rs | 2 +- src/main.rs | 4 +- src/service/mod.rs | 5 +- src/service/pdu.rs | 19 +- src/service/rooms/mod.rs | 2 + src/service/rooms/spaces/mod.rs | 436 +++++++++++++++++++++++++++++++ 11 files changed, 503 insertions(+), 17 deletions(-) create mode 100644 src/api/client_server/space.rs create mode 100644 src/service/rooms/spaces/mod.rs diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index e70f9f1..8e193e6 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, events::StateEventType, }; -use std::{collections::HashSet, convert::TryFrom}; +use std::collections::HashSet; use tracing::error; /// # `GET /_matrix/client/r0/rooms/{roomId}/context` @@ -70,9 +70,7 @@ pub async fn get_context_route( } // Use limit with maximum 100 - let limit = usize::try_from(body.limit) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid."))? - .min(100); + let limit = u64::from(body.limit).min(100) as usize; let base_event = base_event.to_room_event(); diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index dc2d994..750e030 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -133,12 +133,7 @@ pub async fn get_message_events_route( from, )?; - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .try_into() - .map_or(10_usize, |l: u32| l as usize) - .min(100); + let limit = u64::from(body.limit).min(100) as usize; let next_token; diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs index 2ab3a98..54c99aa 100644 --- a/src/api/client_server/mod.rs +++ b/src/api/client_server/mod.rs @@ -21,6 +21,7 @@ mod report; mod room; mod search; mod session; +mod space; mod state; mod sync; mod tag; @@ -55,6 +56,7 @@ pub use report::*; pub use room::*; pub use search::*; pub use session::*; +pub use space::*; pub use state::*; pub use sync::*; pub use tag::*; diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index fe69e7c..e9fac36 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -31,7 +31,8 @@ pub async fn search_events_route( .collect() }); - let limit = filter.limit.map_or(10, |l| u64::from(l) as usize); + // Use limit or else 10, with maximum 100 + let limit = filter.limit.map_or(10, u64::from).min(100) as usize; let mut searches = Vec::new(); diff --git a/src/api/client_server/space.rs b/src/api/client_server/space.rs new file mode 100644 index 0000000..e2ea8c3 --- /dev/null +++ b/src/api/client_server/space.rs @@ -0,0 +1,34 @@ +use crate::{services, Result, Ruma}; +use ruma::api::client::space::get_hierarchy; + +/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy`` +/// +/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space. +pub async fn get_hierarchy_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let skip = body + .from + .as_ref() + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + let limit = body.limit.map_or(10, u64::from).min(100) as usize; + + let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself + + services() + .rooms + .spaces + .get_hierarchy( + sender_user, + &body.room_id, + limit, + skip, + max_depth, + body.suggested_only, + ) + .await +} diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 5e218be..adb5f1f 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -151,7 +151,7 @@ where .try_into_http_request::>( &actual_destination_str, SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], + &[MatrixVersion::V1_4], ) .map_err(|e| { warn!( diff --git a/src/main.rs b/src/main.rs index f9f88f4..3f14ca8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,8 @@ rust_2018_idioms, unused_qualifications, clippy::cloned_instead_of_copied, - clippy::str_to_string + clippy::str_to_string, + clippy::future_not_send )] #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] @@ -386,6 +387,7 @@ fn routes() -> Router { .ruma_route(client_server::get_relating_events_with_rel_type_and_event_type_route) .ruma_route(client_server::get_relating_events_with_rel_type_route) .ruma_route(client_server::get_relating_events_route) + .ruma_route(client_server::get_hierarchy_route) .ruma_route(server_server::get_server_version_route) .route( "/_matrix/key/v2/server", diff --git a/src/service/mod.rs b/src/service/mod.rs index 7a2bb64..dfdc5a6 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -90,7 +90,7 @@ impl Services { state_compressor: rooms::state_compressor::Service { db, stateinfo_cache: Mutex::new(LruCache::new( - (1000.0 * config.conduit_cache_capacity_modifier) as usize, + (300.0 * config.conduit_cache_capacity_modifier) as usize, )), }, timeline: rooms::timeline::Service { @@ -98,6 +98,9 @@ impl Services { lasttimelinecount_cache: Mutex::new(HashMap::new()), }, threads: rooms::threads::Service { db }, + spaces: rooms::spaces::Service { + roomid_spacechunk_cache: Mutex::new(LruCache::new(200)), + }, user: rooms::user::Service { db }, }, transaction_ids: transaction_ids::Service { db }, diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 9d284c0..d24e174 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,9 +1,9 @@ use crate::Error; use ruma::{ events::{ - room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyMessageLikeEvent, - AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, - AnyTimelineEvent, StateEvent, TimelineEventType, + room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, + AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, + AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, TimelineEventType, }, serde::Raw, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, @@ -248,6 +248,19 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] + pub fn to_stripped_spacechild_state_event(&self) -> Raw { + let json = json!({ + "content": self.content, + "type": self.kind, + "sender": self.sender, + "state_key": self.state_key, + "origin_server_ts": self.origin_server_ts, + }); + + serde_json::from_value(json).expect("Raw::from_value always works") + } + #[tracing::instrument(skip(self))] pub fn to_member_event(&self) -> Raw> { let mut json = json!({ diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 61304d1..f073984 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -9,6 +9,7 @@ pub mod outlier; pub mod pdu_metadata; pub mod search; pub mod short; +pub mod spaces; pub mod state; pub mod state_accessor; pub mod state_cache; @@ -56,5 +57,6 @@ pub struct Service { pub state_compressor: state_compressor::Service, pub timeline: timeline::Service, pub threads: threads::Service, + pub spaces: spaces::Service, pub user: user::Service, } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs new file mode 100644 index 0000000..76ba6c5 --- /dev/null +++ b/src/service/rooms/spaces/mod.rs @@ -0,0 +1,436 @@ +use std::sync::{Arc, Mutex}; + +use lru_cache::LruCache; +use ruma::{ + api::{ + client::{ + error::ErrorKind, + space::{get_hierarchy, SpaceHierarchyRoomsChunk, SpaceRoomJoinRule}, + }, + federation, + }, + directory::PublicRoomJoinRule, + events::{ + room::{ + avatar::RoomAvatarEventContent, + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + name::RoomNameEventContent, + topic::RoomTopicEventContent, + }, + StateEventType, + }, + OwnedRoomId, RoomId, UserId, +}; + +use tracing::{debug, error, warn}; + +use crate::{services, Error, PduEvent, Result}; + +pub struct CachedSpaceChunk { + chunk: SpaceHierarchyRoomsChunk, + children: Vec, + join_rule: JoinRule, +} + +pub struct Service { + pub roomid_spacechunk_cache: Mutex>>, +} + +impl Service { + pub async fn get_hierarchy( + &self, + sender_user: &UserId, + room_id: &RoomId, + limit: usize, + skip: usize, + max_depth: usize, + suggested_only: bool, + ) -> Result { + let mut left_to_skip = skip; + + let mut rooms_in_path = Vec::new(); + let mut stack = vec![vec![room_id.to_owned()]]; + let mut results = Vec::new(); + + while let Some(current_room) = { + while stack.last().map_or(false, |s| s.is_empty()) { + stack.pop(); + } + if !stack.is_empty() { + stack.last_mut().and_then(|s| s.pop()) + } else { + None + } + } { + rooms_in_path.push(current_room.clone()); + if results.len() >= limit { + break; + } + + if let Some(cached) = self + .roomid_spacechunk_cache + .lock() + .unwrap() + .get_mut(¤t_room.to_owned()) + .as_ref() + { + if let Some(cached) = cached { + if let Some(_join_rule) = + self.handle_join_rule(&cached.join_rule, sender_user, ¤t_room)? + { + if left_to_skip > 0 { + left_to_skip -= 1; + } else { + results.push(cached.chunk.clone()); + } + if rooms_in_path.len() < max_depth { + stack.push(cached.children.clone()); + } + } + } + continue; + } + + if let Some(current_shortstatehash) = services() + .rooms + .state + .get_room_shortstatehash(¤t_room)? + { + let state = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let mut children_ids = Vec::new(); + let mut children_pdus = Vec::new(); + for (key, id) in state { + let (event_type, state_key) = + services().rooms.short.get_statekey_from_short(key)?; + if event_type != StateEventType::SpaceChild { + continue; + } + if let Ok(room_id) = OwnedRoomId::try_from(state_key) { + children_ids.push(room_id); + children_pdus.push(services().rooms.timeline.get_pdu(&id)?.ok_or_else( + || Error::bad_database("Event in space state not found"), + )?); + } + } + + // TODO: Sort children + children_ids.reverse(); + + let chunk = self.get_room_chunk(sender_user, ¤t_room, children_pdus); + if let Ok(chunk) = chunk { + if left_to_skip > 0 { + left_to_skip -= 1; + } else { + results.push(chunk.clone()); + } + let join_rule = services() + .rooms + .state_accessor + .room_state_get(¤t_room, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| c.join_rule) + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") + }) + }) + .transpose()? + .unwrap_or(JoinRule::Invite); + + self.roomid_spacechunk_cache.lock().unwrap().insert( + current_room.clone(), + Some(CachedSpaceChunk { + chunk, + children: children_ids.clone(), + join_rule, + }), + ); + } + + if rooms_in_path.len() < max_depth { + stack.push(children_ids); + } + } else { + let server = current_room.server_name(); + if server == services().globals.server_name() { + continue; + } + if !results.is_empty() { + // Early return so the client can see some data already + break; + } + warn!("Asking {server} for /hierarchy"); + if let Ok(response) = services() + .sending + .send_federation_request( + &server, + federation::space::get_hierarchy::v1::Request { + room_id: current_room.to_owned(), + suggested_only, + }, + ) + .await + { + warn!("Got response from {server} for /hierarchy\n{response:?}"); + let join_rule = self.translate_pjoinrule(&response.room.join_rule)?; + let chunk = SpaceHierarchyRoomsChunk { + canonical_alias: response.room.canonical_alias, + name: response.room.name, + num_joined_members: response.room.num_joined_members, + room_id: response.room.room_id, + topic: response.room.topic, + world_readable: response.room.world_readable, + guest_can_join: response.room.guest_can_join, + avatar_url: response.room.avatar_url, + join_rule: self.translate_sjoinrule(&response.room.join_rule)?, + room_type: response.room.room_type, + children_state: response.room.children_state, + }; + let children = response + .children + .iter() + .map(|c| c.room_id.clone()) + .collect::>(); + + if let Some(_join_rule) = + self.handle_join_rule(&join_rule, sender_user, ¤t_room)? + { + if left_to_skip > 0 { + left_to_skip -= 1; + } else { + results.push(chunk.clone()); + } + if rooms_in_path.len() < max_depth { + stack.push(children.clone()); + } + } + + self.roomid_spacechunk_cache.lock().unwrap().insert( + current_room.clone(), + Some(CachedSpaceChunk { + chunk, + children, + join_rule, + }), + ); + + /* TODO: + for child in response.children { + roomid_spacechunk_cache.insert( + current_room.clone(), + CachedSpaceChunk { + chunk: child.chunk, + children, + join_rule, + }, + ); + } + */ + } else { + self.roomid_spacechunk_cache + .lock() + .unwrap() + .insert(current_room.clone(), None); + } + } + } + + Ok(get_hierarchy::v1::Response { + next_batch: if results.is_empty() { + None + } else { + Some((skip + results.len()).to_string()) + }, + rooms: results, + }) + } + + fn get_room_chunk( + &self, + sender_user: &UserId, + room_id: &RoomId, + children: Vec>, + ) -> Result { + Ok(SpaceHierarchyRoomsChunk { + canonical_alias: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomCanonicalAliasEventContent| c.alias) + .map_err(|_| { + Error::bad_database("Invalid canonical alias event in database.") + }) + })?, + name: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomName, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomNameEventContent| c.name) + .map_err(|_| Error::bad_database("Invalid room name event in database.")) + })?, + num_joined_members: services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or_else(|| { + warn!("Room {} has no member count", room_id); + 0 + }) + .try_into() + .expect("user count should not be that big"), + room_id: room_id.to_owned(), + topic: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomTopic, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomTopicEventContent| Some(c.topic)) + .map_err(|_| Error::bad_database("Invalid room topic event in database.")) + })?, + world_readable: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility == HistoryVisibility::WorldReadable + }) + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + }) + })?, + guest_can_join: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomGuestAccessEventContent| { + c.guest_access == GuestAccess::CanJoin + }) + .map_err(|_| { + Error::bad_database("Invalid room guest access event in database.") + }) + })?, + avatar_url: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomAvatarEventContent| c.url) + .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) + }) + .transpose()? + // url is now an Option so we must flatten + .flatten(), + join_rule: { + let join_rule = services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| c.join_rule) + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") + }) + }) + .transpose()? + .unwrap_or(JoinRule::Invite); + self.handle_join_rule(&join_rule, sender_user, room_id)? + .ok_or_else(|| { + debug!("User is not allowed to see room {room_id}"); + // This error will be caught later + Error::BadRequest( + ErrorKind::Forbidden, + "User is not allowed to see the room", + ) + })? + }, + room_type: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCreate, "")? + .map(|s| { + serde_json::from_str::(s.content.get()).map_err(|e| { + error!("Invalid room create event in database: {}", e); + Error::BadDatabase("Invalid room create event in database.") + }) + }) + .transpose()? + .and_then(|e| e.room_type), + children_state: children + .into_iter() + .map(|pdu| pdu.to_stripped_spacechild_state_event()) + .collect(), + }) + } + + fn translate_pjoinrule(&self, join_rule: &PublicRoomJoinRule) -> Result { + match join_rule { + PublicRoomJoinRule::Knock => Ok(JoinRule::Knock), + PublicRoomJoinRule::Public => Ok(JoinRule::Public), + _ => Err(Error::BadServerResponse("Unknown join rule")), + } + } + + fn translate_sjoinrule(&self, join_rule: &PublicRoomJoinRule) -> Result { + match join_rule { + PublicRoomJoinRule::Knock => Ok(SpaceRoomJoinRule::Knock), + PublicRoomJoinRule::Public => Ok(SpaceRoomJoinRule::Public), + _ => Err(Error::BadServerResponse("Unknown join rule")), + } + } + + fn handle_join_rule( + &self, + join_rule: &JoinRule, + sender_user: &UserId, + room_id: &RoomId, + ) -> Result> { + match join_rule { + JoinRule::Public => Ok::<_, Error>(Some(SpaceRoomJoinRule::Public)), + JoinRule::Knock => Ok(Some(SpaceRoomJoinRule::Knock)), + JoinRule::Invite => { + if services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)? + { + Ok(Some(SpaceRoomJoinRule::Invite)) + } else { + Ok(None) + } + } + JoinRule::Restricted(_r) => { + // TODO: Check rules + Ok(None) + } + JoinRule::KnockRestricted(_r) => { + // TODO: Check rules + Ok(None) + } + _ => Ok(None), + } + } +} From bac13d08ae8cb8205725d31b72b09baec2ee9265 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 2 Jul 2023 22:50:50 +0200 Subject: [PATCH 1404/1727] fix: cache invalidation --- src/service/rooms/state/mod.rs | 73 ++++++++++++++++++------------- src/service/rooms/timeline/mod.rs | 11 +++++ 2 files changed, 53 insertions(+), 31 deletions(-) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index ca9430f..d782386 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -49,10 +49,6 @@ impl Service { None => continue, }; - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - let pdu: PduEvent = match serde_json::from_str( &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), ) { @@ -60,34 +56,49 @@ impl Service { Err(_) => continue, }; - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, + match pdu.kind { + TimelineEventType::RoomMember => { + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, + } + + let membership = + match serde_json::from_str::(pdu.content.get()) { + Ok(e) => e.membership, + Err(_) => continue, + }; + + let state_key = match pdu.state_key { + Some(k) => k, + None => continue, + }; + + let user_id = match UserId::parse(state_key) { + Ok(id) => id, + Err(_) => continue, + }; + + services().rooms.state_cache.update_membership( + room_id, + &user_id, + membership, + &pdu.sender, + None, + false, + )?; + } + TimelineEventType::SpaceChild => { + services() + .rooms + .spaces + .roomid_spacechunk_cache + .lock() + .unwrap() + .remove(&pdu.room_id); + } + _ => continue, } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - services().rooms.state_cache.update_membership( - room_id, - &user_id, - membership, - &pdu.sender, - None, - false, - )?; } services().rooms.state_cache.update_joined_count(room_id)?; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 56769d5..83c3010 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -387,6 +387,17 @@ impl Service { self.redact_pdu(redact_id, pdu)?; } } + TimelineEventType::SpaceChild => { + if let Some(_state_key) = &pdu.state_key { + services() + .rooms + .spaces + .roomid_spacechunk_cache + .lock() + .unwrap() + .remove(&pdu.room_id); + } + } TimelineEventType::RoomMember => { if let Some(state_key) = &pdu.state_key { #[derive(Deserialize)] From a2c3256cedeeb696b303b6ca9533acd73d110e1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 3 Jul 2023 19:37:54 +0200 Subject: [PATCH 1405/1727] improvement: better memory usage and admin commands to analyze it --- Cargo.toml | 2 +- src/api/client_server/sync.rs | 636 ++++++++++++++-------------- src/config/mod.rs | 2 +- src/database/abstraction.rs | 1 + src/database/abstraction/rocksdb.rs | 6 + src/database/key_value/globals.rs | 55 ++- src/service/admin/mod.rs | 32 +- src/service/globals/data.rs | 3 +- src/service/globals/mod.rs | 4 - src/service/mod.rs | 107 ++++- 10 files changed, 517 insertions(+), 331 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9698caf..687f4b4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -105,7 +105,7 @@ async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } [features] -default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc", "systemd"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] #backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index b4baec1..dd75347 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -653,213 +653,152 @@ async fn load_joined_room( .user .get_token_shortstatehash(&room_id, since)?; - // Calculates joined_member_count, invited_member_count and heroes - let calculate_counts = || { - let joined_member_count = services() - .rooms - .state_cache - .room_joined_count(&room_id)? - .unwrap_or(0); - let invited_member_count = services() - .rooms - .state_cache - .room_invited_count(&room_id)? - .unwrap_or(0); - - // Recalculate heroes (first 5 members) - let mut heroes = Vec::new(); - - if joined_member_count + invited_member_count <= 5 { - // Go through all PDUs and for each member event, check if the user is still joined or - // invited until we have 5 or we reach the end - - for hero in services() - .rooms - .timeline - .all_pdus(&sender_user, &room_id)? - .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) - .map(|(_, pdu)| { - let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get()) - .map_err(|_| { - Error::bad_database("Invalid member event in database.") - })?; - - if let Some(state_key) = &pdu.state_key { - let user_id = UserId::parse(state_key.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - // The membership was and still is invite or join - if matches!( - content.membership, - MembershipState::Join | MembershipState::Invite - ) && (services().rooms.state_cache.is_joined(&user_id, &room_id)? - || services() - .rooms - .state_cache - .is_invited(&user_id, &room_id)?) - { - Ok::<_, Error>(Some(state_key.clone())) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - // Filter out buggy users - .filter_map(|u| u.ok()) - // Filter for possible heroes - .flatten() - { - if heroes.contains(&hero) || hero == sender_user.as_str() { - continue; - } - - heroes.push(hero); - } - } - - Ok::<_, Error>(( - Some(joined_member_count), - Some(invited_member_count), - heroes, - )) - }; - - let since_sender_member: Option = since_shortstatehash - .and_then(|shortstatehash| { - services() - .rooms - .state_accessor - .state_get( - shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - ) - .transpose() - }) - .transpose()? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); - - let joined_since_last_sync = - since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); - let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = - if since_shortstatehash.is_none() || joined_since_last_sync { - // Probably since = 0, we will do an initial sync - - let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); - - let mut i = 0; - for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = services() - .rooms - .short - .get_statekey_from_short(shortstatekey)?; - - if event_type != StateEventType::RoomMember { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - state_events.push(pdu); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } else if !lazy_load_enabled - || full_state - || timeline_users.contains(&state_key) - // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 - || *sender_user == state_key - { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - // This check is in case a bad user ID made it into the database - if let Ok(uid) = UserId::parse(&state_key) { - lazy_loaded.insert(uid); - } - state_events.push(pdu); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - } - - // Reset lazy loading because this is an initial sync - services().rooms.lazy_loading.lazy_load_reset( - &sender_user, - &sender_device, - &room_id, - )?; - - // The state_events above should contain all timeline_users, let's mark them as lazy - // loaded. - services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, - lazy_loaded, - next_batchcount, - ); - - ( - heroes, - joined_member_count, - invited_member_count, - true, - state_events, - ) - } else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { + if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { // No state changes (Vec::new(), None, None, false, Vec::new()) } else { - // Incremental /sync - let since_shortstatehash = since_shortstatehash.unwrap(); + // Calculates joined_member_count, invited_member_count and heroes + let calculate_counts = || { + let joined_member_count = services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or(0); + let invited_member_count = services() + .rooms + .state_cache + .room_invited_count(&room_id)? + .unwrap_or(0); - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still joined or + // invited until we have 5 or we reach the end + + for hero in services() + .rooms + .timeline + .all_pdus(&sender_user, &room_id)? + .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus + .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) + .map(|(_, pdu)| { + let content: RoomMemberEventContent = + serde_json::from_str(pdu.content.get()).map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; + + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + // The membership was and still is invite or join + if matches!( + content.membership, + MembershipState::Join | MembershipState::Invite + ) && (services() + .rooms + .state_cache + .is_joined(&user_id, &room_id)? + || services() + .rooms + .state_cache + .is_invited(&user_id, &room_id)?) + { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + // Filter out buggy users + .filter_map(|u| u.ok()) + // Filter for possible heroes + .flatten() + { + if heroes.contains(&hero) || hero == sender_user.as_str() { + continue; + } + + heroes.push(hero); + } + } + + Ok::<_, Error>(( + Some(joined_member_count), + Some(invited_member_count), + heroes, + )) + }; + + let since_sender_member: Option = since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get( + shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); + + let joined_since_last_sync = since_sender_member + .map_or(true, |member| member.membership != MembershipState::Join); + + if since_shortstatehash.is_none() || joined_since_last_sync { + // Probably since = 0, we will do an initial sync + + let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - if since_shortstatehash != current_shortstatehash { let current_state_ids = services() .rooms .state_accessor .state_full_ids(current_shortstatehash) .await?; - let since_state_ids = services() - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - for (key, id) in current_state_ids { - if full_state || since_state_ids.get(&key) != Some(&id) { + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + let mut i = 0; + for (shortstatekey, id) in current_state_ids { + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; + + if event_type != StateEventType::RoomMember { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } else if !lazy_load_enabled + || full_state + || timeline_users.contains(&state_key) + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *sender_user == state_key + { let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { @@ -868,145 +807,214 @@ async fn load_joined_room( } }; - if pdu.kind == TimelineEventType::RoomMember { - match UserId::parse( - pdu.state_key - .as_ref() - .expect("State event has state key") - .clone(), - ) { - Ok(state_key_userid) => { - lazy_loaded.insert(state_key_userid); - } - Err(e) => error!("Invalid state key for member event: {}", e), - } + // This check is in case a bad user ID made it into the database + if let Ok(uid) = UserId::parse(&state_key) { + lazy_loaded.insert(uid); } - state_events.push(pdu); - tokio::task::yield_now().await; + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } } } - } - for (_, event) in &timeline_pdus { - if lazy_loaded.contains(&event.sender) { - continue; - } - - if !services().rooms.lazy_loading.lazy_load_was_sent_before( + // Reset lazy loading because this is an initial sync + services().rooms.lazy_loading.lazy_load_reset( &sender_user, &sender_device, &room_id, - &event.sender, - )? || lazy_load_send_redundant - { - if let Some(member_event) = services().rooms.state_accessor.room_state_get( - &room_id, - &StateEventType::RoomMember, - event.sender.as_str(), - )? { - lazy_loaded.insert(event.sender.clone()); - state_events.push(member_event); + )?; + + // The state_events above should contain all timeline_users, let's mark them as lazy + // loaded. + services().rooms.lazy_loading.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batchcount, + ); + + ( + heroes, + joined_member_count, + invited_member_count, + true, + state_events, + ) + } else { + // Incremental /sync + let since_shortstatehash = since_shortstatehash.unwrap(); + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + if since_shortstatehash != current_shortstatehash { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if full_state || since_state_ids.get(&key) != Some(&id) { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + if pdu.kind == TimelineEventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + } + Err(e) => error!("Invalid state key for member event: {}", e), + } + } + + state_events.push(pdu); + tokio::task::yield_now().await; + } } } - } - services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, - lazy_loaded, - next_batchcount, - ); - - let encrypted_room = services() - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? - .is_some(); - - let since_encryption = services().rooms.state_accessor.state_get( - since_shortstatehash, - &StateEventType::RoomEncryption, - "", - )?; - - // Calculations: - let new_encrypted_room = encrypted_room && since_encryption.is_none(); - - let send_member_count = state_events - .iter() - .any(|event| event.kind == TimelineEventType::RoomMember); - - if encrypted_room { - for state_event in &state_events { - if state_event.kind != TimelineEventType::RoomMember { + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { continue; } - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == sender_user { - continue; - } - - let new_membership = serde_json::from_str::( - state_event.content.get(), - ) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; - - match new_membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(&sender_user, &user_id, &room_id)? { - device_list_updates.insert(user_id); - } - } - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + if let Some(member_event) = services().rooms.state_accessor.room_state_get( + &room_id, + &StateEventType::RoomMember, + event.sender.as_str(), + )? { + lazy_loaded.insert(event.sender.clone()); + state_events.push(member_event); } } } - } - if joined_since_last_sync && encrypted_room || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend( - services() - .rooms - .state_cache - .room_members(&room_id) - .flatten() - .filter(|user_id| { - // Don't send key updates from the sender to the sender - &sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id).unwrap_or(false) - }), + services().rooms.lazy_loading.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batchcount, ); + + let encrypted_room = services() + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .is_some(); + + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + // Calculations: + let new_encrypted_room = encrypted_room && since_encryption.is_none(); + + let send_member_count = state_events + .iter() + .any(|event| event.kind == TimelineEventType::RoomMember); + + if encrypted_room { + for state_event in &state_events { + if state_event.kind != TimelineEventType::RoomMember { + continue; + } + + if let Some(state_key) = &state_event.state_key { + let user_id = UserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + if user_id == sender_user { + continue; + } + + let new_membership = serde_json::from_str::( + state_event.content.get(), + ) + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(&sender_user, &user_id, &room_id)? { + device_list_updates.insert(user_id); + } + } + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} + } + } + } + } + + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend( + services() + .rooms + .state_cache + .room_members(&room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to the sender + &sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target already + !share_encrypted_room(&sender_user, user_id, &room_id) + .unwrap_or(false) + }), + ); + } + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + calculate_counts()? + } else { + (None, None, Vec::new()) + }; + + ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) } - - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - calculate_counts()? - } else { - (None, None, Vec::new()) - }; - - ( - heroes, - joined_member_count, - invited_member_count, - joined_since_last_sync, - state_events, - ) }; // Look for device list updates in this room diff --git a/src/config/mod.rs b/src/config/mod.rs index 31a586f..f922282 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -224,7 +224,7 @@ fn default_database_backend() -> String { } fn default_db_cache_capacity_mb() -> f64 { - 1000.0 + 300.0 } fn default_conduit_cache_capacity_modifier() -> f64 { diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 93660f9..0a32105 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -38,6 +38,7 @@ pub trait KeyValueDatabaseEngine: Send + Sync { fn memory_usage(&self) -> Result { Ok("Current database engine does not support memory usage reporting.".to_owned()) } + fn clear_caches(&self) {} } pub trait KvTree: Send + Sync { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 3e64e8b..f0b5f2a 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -45,6 +45,10 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); db_opts.optimize_level_style_compaction(10 * 1024 * 1024); + // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning + db_opts.set_max_background_jobs(6); + db_opts.set_bytes_per_sync(1048576); + let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); db_opts.set_prefix_extractor(prefix_extractor); @@ -121,6 +125,8 @@ impl KeyValueDatabaseEngine for Arc { self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, )) } + + fn clear_caches(&self) {} } impl RocksDbEngineTree<'_> { diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 7b7675c..ab3dfe0 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -118,8 +118,59 @@ impl service::globals::Data for KeyValueDatabase { self._db.cleanup() } - fn memory_usage(&self) -> Result { - self._db.memory_usage() + fn memory_usage(&self) -> String { + let pdu_cache = self.pdu_cache.lock().unwrap().len(); + let shorteventid_cache = self.shorteventid_cache.lock().unwrap().len(); + let auth_chain_cache = self.auth_chain_cache.lock().unwrap().len(); + let eventidshort_cache = self.eventidshort_cache.lock().unwrap().len(); + let statekeyshort_cache = self.statekeyshort_cache.lock().unwrap().len(); + let our_real_users_cache = self.our_real_users_cache.read().unwrap().len(); + let appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().len(); + let lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().len(); + + let mut response = format!( + "\ +pdu_cache: {pdu_cache} +shorteventid_cache: {shorteventid_cache} +auth_chain_cache: {auth_chain_cache} +eventidshort_cache: {eventidshort_cache} +statekeyshort_cache: {statekeyshort_cache} +our_real_users_cache: {our_real_users_cache} +appservice_in_room_cache: {appservice_in_room_cache} +lasttimelinecount_cache: {lasttimelinecount_cache}\n" + ); + if let Ok(db_stats) = self._db.memory_usage() { + response += &db_stats; + } + + response + } + + fn clear_caches(&self, amount: u32) { + if amount > 0 { + self.pdu_cache.lock().unwrap().clear(); + } + if amount > 1 { + self.shorteventid_cache.lock().unwrap().clear(); + } + if amount > 2 { + self.auth_chain_cache.lock().unwrap().clear(); + } + if amount > 3 { + self.eventidshort_cache.lock().unwrap().clear(); + } + if amount > 4 { + self.statekeyshort_cache.lock().unwrap().clear(); + } + if amount > 5 { + self.our_real_users_cache.write().unwrap().clear(); + } + if amount > 6 { + self.appservice_in_room_cache.write().unwrap().clear(); + } + if amount > 7 { + self.lasttimelinecount_cache.lock().unwrap().clear(); + } } fn load_keypair(&self) -> Result { diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index d37ec69..9250a3e 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -134,7 +134,13 @@ enum AdminCommand { }, /// Print database memory usage statistics - DatabaseMemoryUsage, + MemoryUsage, + + /// Clears all of Conduit's database caches with index smaller than the amount + ClearDatabaseCaches { amount: u32 }, + + /// Clears all of Conduit's service caches with index smaller than the amount + ClearServiceCaches { amount: u32 }, /// Show configuration values ShowConfig, @@ -531,12 +537,24 @@ impl Service { None => RoomMessageEventContent::text_plain("PDU not found."), } } - AdminCommand::DatabaseMemoryUsage => match services().globals.db.memory_usage() { - Ok(response) => RoomMessageEventContent::text_plain(response), - Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to get database memory usage: {e}" - )), - }, + AdminCommand::MemoryUsage => { + let response1 = services().memory_usage(); + let response2 = services().globals.db.memory_usage(); + + RoomMessageEventContent::text_plain(format!( + "Services:\n{response1}\n\nDatabase:\n{response2}" + )) + } + AdminCommand::ClearDatabaseCaches { amount } => { + services().globals.db.clear_caches(amount); + + RoomMessageEventContent::text_plain("Done.") + } + AdminCommand::ClearServiceCaches { amount } => { + services().clear_caches(amount); + + RoomMessageEventContent::text_plain("Done.") + } AdminCommand::ShowConfig => { // Construct and send the response RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 04371a0..171b3fe 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -15,7 +15,8 @@ pub trait Data: Send + Sync { fn current_count(&self) -> Result; async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; fn cleanup(&self) -> Result<()>; - fn memory_usage(&self) -> Result; + fn memory_usage(&self) -> String; + fn clear_caches(&self, amount: u32); fn load_keypair(&self) -> Result; fn remove_keypair(&self) -> Result<()>; fn add_signing_key( diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 9206d43..e4affde 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -214,10 +214,6 @@ impl Service { self.db.cleanup() } - pub fn memory_usage(&self) -> Result { - self.db.memory_usage() - } - pub fn server_name(&self) -> &ServerName { self.config.server_name.as_ref() } diff --git a/src/service/mod.rs b/src/service/mod.rs index dfdc5a6..56aed7f 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -90,7 +90,7 @@ impl Services { state_compressor: rooms::state_compressor::Service { db, stateinfo_cache: Mutex::new(LruCache::new( - (300.0 * config.conduit_cache_capacity_modifier) as usize, + (100.0 * config.conduit_cache_capacity_modifier) as usize, )), }, timeline: rooms::timeline::Service { @@ -115,4 +115,109 @@ impl Services { globals: globals::Service::load(db, config)?, }) } + fn memory_usage(&self) -> String { + let lazy_load_waiting = self + .rooms + .lazy_loading + .lazy_load_waiting + .lock() + .unwrap() + .len(); + let server_visibility_cache = self + .rooms + .state_accessor + .server_visibility_cache + .lock() + .unwrap() + .len(); + let user_visibility_cache = self + .rooms + .state_accessor + .user_visibility_cache + .lock() + .unwrap() + .len(); + let stateinfo_cache = self + .rooms + .state_compressor + .stateinfo_cache + .lock() + .unwrap() + .len(); + let lasttimelinecount_cache = self + .rooms + .timeline + .lasttimelinecount_cache + .lock() + .unwrap() + .len(); + let roomid_spacechunk_cache = self + .rooms + .spaces + .roomid_spacechunk_cache + .lock() + .unwrap() + .len(); + + format!( + "\ +lazy_load_waiting: {lazy_load_waiting} +server_visibility_cache: {server_visibility_cache} +user_visibility_cache: {user_visibility_cache} +stateinfo_cache: {stateinfo_cache} +lasttimelinecount_cache: {lasttimelinecount_cache} +roomid_spacechunk_cache: {roomid_spacechunk_cache}\ + " + ) + } + fn clear_caches(&self, amount: u32) { + if amount > 0 { + self.rooms + .lazy_loading + .lazy_load_waiting + .lock() + .unwrap() + .clear(); + } + if amount > 1 { + self.rooms + .state_accessor + .server_visibility_cache + .lock() + .unwrap() + .clear(); + } + if amount > 2 { + self.rooms + .state_accessor + .user_visibility_cache + .lock() + .unwrap() + .clear(); + } + if amount > 3 { + self.rooms + .state_compressor + .stateinfo_cache + .lock() + .unwrap() + .clear(); + } + if amount > 4 { + self.rooms + .timeline + .lasttimelinecount_cache + .lock() + .unwrap() + .clear(); + } + if amount > 5 { + self.rooms + .spaces + .roomid_spacechunk_cache + .lock() + .unwrap() + .clear(); + } + } } From c4824a6ebcb7bb72528d87b26b7e3fb19c01212d Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 4 Jul 2023 21:13:11 +0000 Subject: [PATCH 1406/1727] ci: Fix "0 B" image size display works around gitlab issue https://gitlab.com/gitlab-org/gitlab/-/issues/388865#workaround --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d2da91b..f5ab424 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -41,6 +41,7 @@ variables: --pull --tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" --push + --provenance=false --file "Dockerfile" . # Build multiplatform image to deb stage and extract their .deb files: - > @@ -48,6 +49,7 @@ variables: --platform "linux/arm/v7,linux/arm64,linux/amd64" --target "packager-result" --output="type=local,dest=/tmp/build-output" + --provenance=false --file "Dockerfile" . # Build multiplatform image to binary stage and extract their binaries: - > @@ -55,6 +57,7 @@ variables: --platform "linux/arm/v7,linux/arm64,linux/amd64" --target "builder-result" --output="type=local,dest=/tmp/build-output" + --provenance=false --file "Dockerfile" . # Copy to GitLab container registry: - > From e4f769963fb880d6131ae940f89ab9b1193c5d32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 6 Jul 2023 10:32:25 +0200 Subject: [PATCH 1407/1727] feat: very simple sliding sync implementation --- Cargo.lock | 22 +- Cargo.toml | 6 +- README.md | 2 +- src/api/client_server/directory.rs | 12 +- src/api/client_server/sync.rs | 270 ++++++++++++++++++++---- src/api/client_server/unversioned.rs | 20 +- src/config/mod.rs | 1 + src/main.rs | 1 + src/service/globals/mod.rs | 4 + src/service/pusher/mod.rs | 16 +- src/service/rooms/spaces/mod.rs | 10 +- src/service/rooms/state_accessor/mod.rs | 13 ++ 12 files changed, 284 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4148394..487780d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2111,7 +2111,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.8.2" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "assign", "js_int", @@ -2129,7 +2129,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.8.1" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "js_int", "ruma-common", @@ -2140,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "assign", "bytes", @@ -2157,7 +2157,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "base64 0.21.2", "bytes", @@ -2185,7 +2185,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "js_int", "ruma-common", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "js_int", "thiserror", @@ -2205,7 +2205,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "js_int", "ruma-common", @@ -2215,7 +2215,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "once_cell", "proc-macro-crate", @@ -2230,7 +2230,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "js_int", "ruma-common", @@ -2241,7 +2241,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.13.1" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "base64 0.21.2", "ed25519-dalek", @@ -2257,7 +2257,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b" +source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index bc29c5a..a01f410 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,9 +26,9 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "38294bd5206498c02b1001227d65654eb548308b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } -#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/ruma/ruma", rev = "38294bd5206498c02b1001227d65654eb548308b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } # Async runtime and utilities tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] } diff --git a/README.md b/README.md index f73f6aa..8fabefd 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ Thanks to the contributors to Conduit and all libraries we use, for example: If you run into any question, feel free to - Ask us in `#conduit:fachschaften.org` on Matrix - Write an E-Mail to `conduit@koesters.xyz` -- Send an direct message to `timo@fachschaften.org` on Matrix +- Send an direct message to `timokoesters@fachschaften.org` on Matrix - [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) #### Donate diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index e132210..df1ac40 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -203,17 +203,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid canonical alias event in database.") }) })?, - name: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomName, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomNameEventContent| c.name) - .map_err(|_| { - Error::bad_database("Invalid room name event in database.") - }) - })?, + name: services().rooms.state_accessor.get_name(&room_id)?, num_joined_members: services() .rooms .state_cache diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index dd75347..bc89a4c 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,4 +1,6 @@ -use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma, RumaResponse}; +use crate::{ + service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse, +}; use ruma::{ api::client::{ filter::{FilterDefinition, LazyLoadOptions}, @@ -8,6 +10,7 @@ use ruma::{ Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, }, + v4::SlidingOp, DeviceLists, UnreadNotificationsCount, }, uiaa::UiaaResponse, @@ -17,10 +20,10 @@ use ruma::{ StateEventType, TimelineEventType, }, serde::Raw, - DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UserId, + uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, }; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, sync::Arc, time::Duration, }; @@ -199,7 +202,7 @@ async fn sync_helper( let mut joined_rooms = BTreeMap::new(); let since = body .since - .clone() + .as_ref() .and_then(|string| string.parse().ok()) .unwrap_or(0); let sincecount = PduCount::Normal(since); @@ -581,43 +584,7 @@ async fn load_joined_room( drop(insert_lock); } - let timeline_pdus; - let limited; - if services() - .rooms - .timeline - .last_timeline_count(&sender_user, &room_id)? - > sincecount - { - let mut non_timeline_pdus = services() - .rooms - .timeline - .pdus_until(&sender_user, &room_id, PduCount::max())? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .take_while(|(pducount, _)| pducount > &sincecount); - - // Take the last 10 events for the timeline - timeline_pdus = non_timeline_pdus - .by_ref() - .take(10) - .collect::>() - .into_iter() - .rev() - .collect::>(); - - // They /sync response doesn't always return all messages, so we say the output is - // limited unless there are events in non_timeline_pdus - limited = non_timeline_pdus.next().is_some(); - } else { - timeline_pdus = Vec::new(); - limited = false; - } + let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?; let send_notification_counts = !timeline_pdus.is_empty() || services() @@ -1132,6 +1099,52 @@ async fn load_joined_room( }) } +fn load_timeline( + sender_user: &UserId, + room_id: &RoomId, + sincecount: PduCount, + limit: u64, +) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { + let timeline_pdus; + let limited; + if services() + .rooms + .timeline + .last_timeline_count(&sender_user, &room_id)? + > sincecount + { + let mut non_timeline_pdus = services() + .rooms + .timeline + .pdus_until(&sender_user, &room_id, PduCount::max())? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .take_while(|(pducount, _)| pducount > &sincecount); + + // Take the last events for the timeline + timeline_pdus = non_timeline_pdus + .by_ref() + .take(limit as usize) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // They /sync response doesn't always return all messages, so we say the output is + // limited unless there are events in non_timeline_pdus + limited = non_timeline_pdus.next().is_some(); + } else { + timeline_pdus = Vec::new(); + limited = false; + } + Ok((timeline_pdus, limited)) +} + fn share_encrypted_room( sender_user: &UserId, user_id: &UserId, @@ -1155,3 +1168,178 @@ fn share_encrypted_room( }) .any(|encrypted| encrypted)) } + +pub async fn sync_events_v4_route( + body: Ruma, +) -> Result> { + let sender_user = body.sender_user.expect("user is authenticated"); + let sender_device = body.sender_device.expect("user is authenticated"); + let body = dbg!(body.body); + + // Setup watchers, so if there's no response, we can wait for them + let watcher = services().globals.watch(&sender_user, &sender_device); + + let next_batch = services().globals.current_count()?; + + let since = body + .pos + .as_ref() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); + let sincecount = PduCount::Normal(since); + + let initial = since == 0; + + let all_joined_rooms = services() + .rooms + .state_cache + .rooms_joined(&sender_user) + .filter_map(|r| r.ok()) + .collect::>(); + + let mut lists = BTreeMap::new(); + let mut todo_rooms = BTreeMap::new(); // and required state + + for (list_id, list) in body.lists { + if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { + continue; + } + + lists.insert( + list_id, + sync_events::v4::SyncList { + ops: list + .ranges + .into_iter() + .map(|mut r| { + r.0 = + r.0.clamp(uint!(0), UInt::from(all_joined_rooms.len() as u32 - 1)); + r.1 = + r.1.clamp(r.0, UInt::from(all_joined_rooms.len() as u32 - 1)); + let room_ids = all_joined_rooms + [(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)] + .to_vec(); + todo_rooms.extend(room_ids.iter().cloned().map(|r| { + let limit = list + .room_details + .timeline_limit + .map_or(10, u64::from) + .min(100); + (r, (list.room_details.required_state.clone(), limit)) + })); + sync_events::v4::SyncOp { + op: SlidingOp::Sync, + range: Some(r.clone()), + index: None, + room_ids, + room_id: None, + } + }) + .collect(), + count: UInt::from(all_joined_rooms.len() as u32), + }, + ); + } + + let mut rooms = BTreeMap::new(); + for (room_id, (required_state_request, timeline_limit)) in todo_rooms { + let (timeline_pdus, limited) = + load_timeline(&sender_user, &room_id, sincecount, timeline_limit)?; + + let room_events: Vec<_> = timeline_pdus + .iter() + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect(); + + let required_state = required_state_request + .iter() + .map(|state| { + services() + .rooms + .state_accessor + .room_state_get(&room_id, &state.0, &state.1) + }) + .filter_map(|r| r.ok()) + .filter_map(|o| o) + .map(|state| state.to_sync_state_event()) + .collect(); + + rooms.insert( + room_id.clone(), + sync_events::v4::SlidingSyncRoom { + name: services().rooms.state_accessor.get_name(&room_id)?, + initial: Some(initial), + is_dm: None, + invite_state: None, + unread_notifications: UnreadNotificationsCount { + highlight_count: None, + notification_count: None, + }, + timeline: room_events, + required_state, + prev_batch: None, + limited, + joined_count: Some( + (services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or(0) as u32) + .into(), + ), + invited_count: Some( + (services() + .rooms + .state_cache + .room_invited_count(&room_id)? + .unwrap_or(0) as u32) + .into(), + ), + num_live: None, + }, + ); + } + + if rooms + .iter() + .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let mut duration = body.timeout.unwrap_or(Duration::from_secs(30)); + if duration.as_secs() > 30 { + duration = Duration::from_secs(30); + } + let _ = tokio::time::timeout(duration, watcher).await; + } + + Ok(dbg!(sync_events::v4::Response { + initial: initial, + txn_id: body.txn_id.clone(), + pos: next_batch.to_string(), + lists, + rooms, + extensions: sync_events::v4::Extensions { + to_device: None, + e2ee: sync_events::v4::E2EE { + device_lists: DeviceLists { + changed: Vec::new(), + left: Vec::new(), + }, + device_one_time_keys_count: BTreeMap::new(), + device_unused_fallback_key_types: None, + }, + account_data: sync_events::v4::AccountData { + global: Vec::new(), + rooms: BTreeMap::new(), + }, + receipts: sync_events::v4::Receipts { + rooms: BTreeMap::new(), + }, + typing: sync_events::v4::Typing { + rooms: BTreeMap::new(), + }, + }, + delta_token: None, + })) +} diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index b4f03f4..797b952 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -1,8 +1,9 @@ use std::{collections::BTreeMap, iter::FromIterator}; -use ruma::api::client::discovery::get_supported_versions; +use axum::{response::IntoResponse, Json}; +use ruma::api::client::{discovery::get_supported_versions, error::ErrorKind}; -use crate::{Result, Ruma}; +use crate::{services, Error, Result, Ruma}; /// # `GET /_matrix/client/versions` /// @@ -31,3 +32,18 @@ pub async fn get_supported_versions_route( Ok(resp) } + +/// # `GET /.well-known/matrix/client` +pub async fn well_known_client_route( + _body: Ruma, +) -> Result { + let client_url = match services().globals.well_known_client() { + Some(url) => url.clone(), + None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), + }; + + Ok(Json(serde_json::json!({ + "m.homeserver": {"base_url": client_url}, + "org.matrix.msc3575.proxy": {"url": client_url} + }))) +} diff --git a/src/config/mod.rs b/src/config/mod.rs index f922282..4dad9f7 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -54,6 +54,7 @@ pub struct Config { pub allow_unstable_room_versions: bool, #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, + pub well_known_client: Option, #[serde(default = "false_fn")] pub allow_jaeger: bool, #[serde(default = "false_fn")] diff --git a/src/main.rs b/src/main.rs index ea5572e..579eeb1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -357,6 +357,7 @@ fn routes() -> Router { .put(client_server::send_state_event_for_empty_key_route), ) .ruma_route(client_server::sync_events_route) + .ruma_route(client_server::sync_events_v4_route) .ruma_route(client_server::get_context_route) .ruma_route(client_server::get_message_events_route) .ruma_route(client_server::search_events_route) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index e4affde..5326b7a 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -342,6 +342,10 @@ impl Service { r } + pub fn well_known_client(&self) -> &Option { + &self.config.well_known_client + } + pub fn shutdown(&self) { self.shutdown.store(true, atomic::Ordering::Relaxed); // On shutdown diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index d4acaa5..5e4281d 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -270,21 +270,7 @@ impl Service { notifi.sender_display_name = services().users.displayname(&event.sender)?; - let room_name = if let Some(room_name_pdu) = services() - .rooms - .state_accessor - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| { - Error::bad_database("Invalid room name event in database.") - })? - .name - } else { - None - }; - - notifi.room_name = room_name; + notifi.room_name = services().rooms.state_accessor.get_name(&event.room_id)?; self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) .await?; diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 76ba6c5..380f86c 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -273,15 +273,7 @@ impl Service { Error::bad_database("Invalid canonical alias event in database.") }) })?, - name: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomName, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomNameEventContent| c.name) - .map_err(|_| Error::bad_database("Invalid room name event in database.")) - })?, + name: services().rooms.state_accessor.get_name(&room_id)?, num_joined_members: services() .rooms .state_cache diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index a25a8b5..9d071a5 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -11,6 +11,7 @@ use ruma::{ room::{ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, member::{MembershipState, RoomMemberEventContent}, + name::RoomNameEventContent, }, StateEventType, }, @@ -269,4 +270,16 @@ impl Service { ) -> Result>> { self.db.room_state_get(room_id, event_type, state_key) } + + pub fn get_name(&self, room_id: &RoomId) -> Result> { + services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomName, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomNameEventContent| c.name) + .map_err(|_| Error::bad_database("Invalid room name event in database.")) + }) + } } From 4b7d3e24dd0f8d1d9db2f7ba3ea2103a880a215c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jul 2023 16:24:57 +0200 Subject: [PATCH 1408/1727] bump ruma --- Cargo.lock | 63 ++++++++++++++++++++++++++++++++++-------------------- Cargo.toml | 4 ++-- 2 files changed, 42 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 487780d..3480c01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -422,6 +422,12 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +[[package]] +name = "const_panic" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -1167,7 +1173,6 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", - "serde", ] [[package]] @@ -1178,6 +1183,7 @@ checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", + "serde", ] [[package]] @@ -1212,9 +1218,9 @@ checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" -version = "0.10.5" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] @@ -1283,25 +1289,30 @@ dependencies = [ [[package]] name = "konst" -version = "0.2.19" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "330f0e13e6483b8c34885f7e6c9f19b1a7bd449c673fbb948a51c99d66ef74f4" +checksum = "1d9a8bb6c7c71d151b25936b03e012a4c00daea99e3a3797c6ead66b0a0d55e2" dependencies = [ - "konst_macro_rules", + "const_panic", + "konst_kernel", "konst_proc_macros", + "typewit", ] [[package]] -name = "konst_macro_rules" -version = "0.2.19" +name = "konst_kernel" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4933f3f57a8e9d9da04db23fb153356ecaf00cbd14aee46279c33dc80925c37" +checksum = "55d2ab266022e7309df89ed712bddc753e3a3c395c3ced1bb2e4470ec2a8146d" +dependencies = [ + "typewit", +] [[package]] name = "konst_proc_macros" -version = "0.2.11" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "984e109462d46ad18314f10e392c286c3d47bce203088a09012de1015b45b737" +checksum = "4e28ab1dc35e09d60c2b8c90d12a9a8d9666c876c10a3739a3196db0103b6043" [[package]] name = "lazy_static" @@ -2111,7 +2122,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.8.2" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "assign", "js_int", @@ -2129,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.8.1" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "js_int", "ruma-common", @@ -2140,7 +2151,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.16.2" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "assign", "bytes", @@ -2157,13 +2168,13 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.11.3" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "base64 0.21.2", "bytes", "form_urlencoded", "http", - "indexmap 1.9.3", + "indexmap 2.0.0", "js_int", "js_option", "konst", @@ -2185,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.7.1" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "js_int", "ruma-common", @@ -2196,7 +2207,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.1" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "js_int", "thiserror", @@ -2205,7 +2216,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.7.1" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "js_int", "ruma-common", @@ -2215,7 +2226,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.11.3" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "once_cell", "proc-macro-crate", @@ -2230,7 +2241,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.7.1" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "js_int", "ruma-common", @@ -2241,7 +2252,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.13.1" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "base64 0.21.2", "ed25519-dalek", @@ -2257,7 +2268,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.9.1" -source = "git+https://github.com/timokoesters/ruma?rev=4ec9c69bb7e09391add2382b3ebac97b6e8f4c64#4ec9c69bb7e09391add2382b3ebac97b6e8f4c64" +source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" dependencies = [ "itertools", "js_int", @@ -3152,6 +3163,12 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +[[package]] +name = "typewit" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4061a10d4d8f3081a8ccc025182afd8434302d8d4b4503ec6d8510d09df08c2d" + [[package]] name = "uncased" version = "0.9.9" diff --git a/Cargo.toml b/Cargo.toml index a01f410..ae7de59 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,8 +26,8 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { git = "https://github.com/ruma/ruma", rev = "38294bd5206498c02b1001227d65654eb548308b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } -ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "07bc06038fded40d4e9180637f056d256f9a1fbc", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } # Async runtime and utilities From 78e7b711df213559150b5c6e7e7da1967d353e23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jul 2023 16:25:33 +0200 Subject: [PATCH 1409/1727] fix: better sliding sync --- src/api/client_server/sync.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index bc89a4c..fed4fb7 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -23,7 +23,7 @@ use ruma::{ uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, }; use std::{ - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, sync::Arc, time::Duration, }; @@ -1246,6 +1246,18 @@ pub async fn sync_events_v4_route( let (timeline_pdus, limited) = load_timeline(&sender_user, &room_id, sincecount, timeline_limit)?; + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { + Ok(Some(match pdu_count { + PduCount::Backfilled(_) => { + error!("timeline in backfill state?!"); + "0".to_owned() + } + PduCount::Normal(c) => c.to_string(), + })) + })?; + let room_events: Vec<_> = timeline_pdus .iter() .map(|(_, pdu)| pdu.to_sync_room_event()) @@ -1277,7 +1289,7 @@ pub async fn sync_events_v4_route( }, timeline: room_events, required_state, - prev_batch: None, + prev_batch, limited, joined_count: Some( (services() From c17187777f5f3bb06183ce423d990bc1c1061929 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jul 2023 16:26:36 +0200 Subject: [PATCH 1410/1727] fix: never try federation with self --- src/api/server_server.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index adb5f1f..0177f2a 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -123,6 +123,12 @@ where return Err(Error::bad_config("Federation is disabled.")); } + if destination == services().globals.server_name() { + return Err(Error::bad_config( + "Won't send federation request to ourselves", + )); + } + debug!("Preparing to send request to {destination}"); let mut write_destination_to_cache = false; From edd4a3733fb6cf842155441eef86435efdd1cc21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jul 2023 16:27:42 +0200 Subject: [PATCH 1411/1727] fix: actually clear memory in the admin commands --- src/database/key_value/globals.rs | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index ab3dfe0..1e02459 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,7 +1,8 @@ -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use async_trait::async_trait; use futures_util::{stream::FuturesUnordered, StreamExt}; +use lru_cache::LruCache; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, @@ -148,28 +149,36 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n" fn clear_caches(&self, amount: u32) { if amount > 0 { - self.pdu_cache.lock().unwrap().clear(); + let c = &mut *self.pdu_cache.lock().unwrap(); + *c = LruCache::new(c.capacity()); } if amount > 1 { - self.shorteventid_cache.lock().unwrap().clear(); + let c = &mut *self.shorteventid_cache.lock().unwrap(); + *c = LruCache::new(c.capacity()); } if amount > 2 { - self.auth_chain_cache.lock().unwrap().clear(); + let c = &mut *self.auth_chain_cache.lock().unwrap(); + *c = LruCache::new(c.capacity()); } if amount > 3 { - self.eventidshort_cache.lock().unwrap().clear(); + let c = &mut *self.eventidshort_cache.lock().unwrap(); + *c = LruCache::new(c.capacity()); } if amount > 4 { - self.statekeyshort_cache.lock().unwrap().clear(); + let c = &mut *self.statekeyshort_cache.lock().unwrap(); + *c = LruCache::new(c.capacity()); } if amount > 5 { - self.our_real_users_cache.write().unwrap().clear(); + let c = &mut *self.our_real_users_cache.write().unwrap(); + *c = HashMap::new(); } if amount > 6 { - self.appservice_in_room_cache.write().unwrap().clear(); + let c = &mut *self.appservice_in_room_cache.write().unwrap(); + *c = HashMap::new(); } if amount > 7 { - self.lasttimelinecount_cache.lock().unwrap().clear(); + let c = &mut *self.lasttimelinecount_cache.lock().unwrap(); + *c = HashMap::new(); } } From 0b4e3de9c0135258eb68a0fa3cccdfff45de81d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jul 2023 16:28:08 +0200 Subject: [PATCH 1412/1727] fix: spaces with restricted rooms --- src/service/rooms/spaces/mod.rs | 125 +++++++++++++++++++------------- 1 file changed, 74 insertions(+), 51 deletions(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 380f86c..36fa1fc 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -5,11 +5,10 @@ use ruma::{ api::{ client::{ error::ErrorKind, - space::{get_hierarchy, SpaceHierarchyRoomsChunk, SpaceRoomJoinRule}, + space::{get_hierarchy, SpaceHierarchyRoomsChunk}, }, federation, }, - directory::PublicRoomJoinRule, events::{ room::{ avatar::RoomAvatarEventContent, @@ -18,11 +17,11 @@ use ruma::{ guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, - name::RoomNameEventContent, topic::RoomTopicEventContent, }, StateEventType, }, + space::SpaceRoomJoinRule, OwnedRoomId, RoomId, UserId, }; @@ -30,10 +29,15 @@ use tracing::{debug, error, warn}; use crate::{services, Error, PduEvent, Result}; +pub enum CachedJoinRule { + Simplified(SpaceRoomJoinRule), + Full(JoinRule), +} + pub struct CachedSpaceChunk { chunk: SpaceHierarchyRoomsChunk, children: Vec, - join_rule: JoinRule, + join_rule: CachedJoinRule, } pub struct Service { @@ -79,9 +83,15 @@ impl Service { .as_ref() { if let Some(cached) = cached { - if let Some(_join_rule) = - self.handle_join_rule(&cached.join_rule, sender_user, ¤t_room)? - { + let allowed = match &cached.join_rule { + CachedJoinRule::Simplified(s) => { + self.handle_simplified_join_rule(s, sender_user, ¤t_room)? + } + CachedJoinRule::Full(f) => { + self.handle_join_rule(f, sender_user, ¤t_room)? + } + }; + if allowed { if left_to_skip > 0 { left_to_skip -= 1; } else { @@ -152,7 +162,7 @@ impl Service { Some(CachedSpaceChunk { chunk, children: children_ids.clone(), - join_rule, + join_rule: CachedJoinRule::Full(join_rule), }), ); } @@ -182,7 +192,6 @@ impl Service { .await { warn!("Got response from {server} for /hierarchy\n{response:?}"); - let join_rule = self.translate_pjoinrule(&response.room.join_rule)?; let chunk = SpaceHierarchyRoomsChunk { canonical_alias: response.room.canonical_alias, name: response.room.name, @@ -192,7 +201,7 @@ impl Service { world_readable: response.room.world_readable, guest_can_join: response.room.guest_can_join, avatar_url: response.room.avatar_url, - join_rule: self.translate_sjoinrule(&response.room.join_rule)?, + join_rule: response.room.join_rule.clone(), room_type: response.room.room_type, children_state: response.room.children_state, }; @@ -202,9 +211,11 @@ impl Service { .map(|c| c.room_id.clone()) .collect::>(); - if let Some(_join_rule) = - self.handle_join_rule(&join_rule, sender_user, ¤t_room)? - { + if self.handle_simplified_join_rule( + &response.room.join_rule, + sender_user, + ¤t_room, + )? { if left_to_skip > 0 { left_to_skip -= 1; } else { @@ -220,7 +231,7 @@ impl Service { Some(CachedSpaceChunk { chunk, children, - join_rule, + join_rule: CachedJoinRule::Simplified(response.room.join_rule), }), ); @@ -349,15 +360,17 @@ impl Service { }) .transpose()? .unwrap_or(JoinRule::Invite); - self.handle_join_rule(&join_rule, sender_user, room_id)? - .ok_or_else(|| { - debug!("User is not allowed to see room {room_id}"); - // This error will be caught later - Error::BadRequest( - ErrorKind::Forbidden, - "User is not allowed to see the room", - ) - })? + + if !self.handle_join_rule(&join_rule, sender_user, room_id)? { + debug!("User is not allowed to see room {room_id}"); + // This error will be caught later + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "User is not allowed to see the room", + )); + } + + self.translate_joinrule(&join_rule)? }, room_type: services() .rooms @@ -378,20 +391,35 @@ impl Service { }) } - fn translate_pjoinrule(&self, join_rule: &PublicRoomJoinRule) -> Result { + fn translate_joinrule(&self, join_rule: &JoinRule) -> Result { match join_rule { - PublicRoomJoinRule::Knock => Ok(JoinRule::Knock), - PublicRoomJoinRule::Public => Ok(JoinRule::Public), + JoinRule::Invite => Ok(SpaceRoomJoinRule::Invite), + JoinRule::Knock => Ok(SpaceRoomJoinRule::Knock), + JoinRule::Private => Ok(SpaceRoomJoinRule::Private), + JoinRule::Restricted(_) => Ok(SpaceRoomJoinRule::Restricted), + JoinRule::KnockRestricted(_) => Ok(SpaceRoomJoinRule::KnockRestricted), + JoinRule::Public => Ok(SpaceRoomJoinRule::Public), _ => Err(Error::BadServerResponse("Unknown join rule")), } } - fn translate_sjoinrule(&self, join_rule: &PublicRoomJoinRule) -> Result { - match join_rule { - PublicRoomJoinRule::Knock => Ok(SpaceRoomJoinRule::Knock), - PublicRoomJoinRule::Public => Ok(SpaceRoomJoinRule::Public), - _ => Err(Error::BadServerResponse("Unknown join rule")), - } + fn handle_simplified_join_rule( + &self, + join_rule: &SpaceRoomJoinRule, + sender_user: &UserId, + room_id: &RoomId, + ) -> Result { + let allowed = match join_rule { + SpaceRoomJoinRule::Public => true, + SpaceRoomJoinRule::Knock => true, + SpaceRoomJoinRule::Invite => services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)?, + _ => false, + }; + + Ok(allowed) } fn handle_join_rule( @@ -399,30 +427,25 @@ impl Service { join_rule: &JoinRule, sender_user: &UserId, room_id: &RoomId, - ) -> Result> { + ) -> Result { + if self.handle_simplified_join_rule( + &self.translate_joinrule(join_rule)?, + sender_user, + room_id, + )? { + return Ok(true); + } + match join_rule { - JoinRule::Public => Ok::<_, Error>(Some(SpaceRoomJoinRule::Public)), - JoinRule::Knock => Ok(Some(SpaceRoomJoinRule::Knock)), - JoinRule::Invite => { - if services() - .rooms - .state_cache - .is_joined(sender_user, &room_id)? - { - Ok(Some(SpaceRoomJoinRule::Invite)) - } else { - Ok(None) - } - } - JoinRule::Restricted(_r) => { + JoinRule::Restricted(_) => { // TODO: Check rules - Ok(None) + Ok(false) } - JoinRule::KnockRestricted(_r) => { + JoinRule::KnockRestricted(_) => { // TODO: Check rules - Ok(None) + Ok(false) } - _ => Ok(None), + _ => Ok(false), } } } From 56f0f3dfa44834478a4157cb91f0c860f844953f Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 10 Jul 2023 11:03:20 -0700 Subject: [PATCH 1413/1727] only use musl on x86_64 Since that's all I've tested it on. Apparently this caused issues on aarch64 even though it allegedly shouldn't. --- flake.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 5de5621..ef1a04b 100644 --- a/flake.nix +++ b/flake.nix @@ -25,8 +25,8 @@ let pkgs = nixpkgs.legacyPackages.${system}; - # Use mold on Linux - stdenv = if pkgs.stdenv.isLinux then + # Use mold where possible + stdenv = if pkgs.stdenv.isLinux && pkgs.stdenv.isx86_64 then pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv else pkgs.stdenv; From c3966f501c5dbb495ca4ef8c044fac7049645594 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jul 2023 23:10:27 +0200 Subject: [PATCH 1414/1727] fix: nheko e2ee verification bug --- src/api/client_server/keys.rs | 12 ------------ src/database/key_value/users.rs | 1 - 2 files changed, 13 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index ba89ece..21f71b6 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -151,18 +151,6 @@ pub async fn upload_signatures_route( let key = serde_json::to_value(key) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; - let is_signed_key = match key.get("usage") { - Some(usage) => usage - .as_array() - .map(|usage| !usage.contains(&json!("master"))) - .unwrap_or(false), - None => true, - }; - - if !is_signed_key { - continue; - } - for signature in key .get("signatures") .ok_or(Error::BadRequest( diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 1cabab0..359a072 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -592,7 +592,6 @@ impl service::users::Data for KeyValueDatabase { &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), )?; - // TODO: Should we notify about this change? self.mark_device_key_update(target_id)?; Ok(()) From 17180a3e0892f164b0f255c563e8d4fb13e2b2aa Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 13 Jul 2023 16:54:56 +0000 Subject: [PATCH 1415/1727] capitalize names --- docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index ef93af2..1b9e046 100644 --- a/docker/README.md +++ b/docker/README.md @@ -4,7 +4,7 @@ ## Docker -To run conduit with docker you can either build the image yourself or pull it from a registry. +To run Conduit with Docker you can either build the image yourself or pull it from a registry. ### Use a registry From 24402312c58855679ccfe58b0d3d27ae041b4336 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 15 Jul 2023 23:43:25 +0200 Subject: [PATCH 1416/1727] fix: could not verify own events --- src/service/globals/mod.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 5326b7a..7d61829 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::serde::Base64; use ruma::{ OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, }; @@ -316,7 +317,19 @@ impl Service { &self, origin: &ServerName, ) -> Result> { - self.db.signing_keys_for(origin) + let mut keys = self.db.signing_keys_for(origin)?; + if origin == self.server_name() { + keys.insert( + format!("ed25519:{}", services().globals.keypair().version()) + .try_into() + .expect("found invalid server signing keys in DB"), + VerifyKey { + key: Base64::new(self.keypair.public_key().to_vec()), + }, + ); + } + + Ok(keys) } pub fn database_version(&self) -> Result { From a9ba067e7758207d1411b24c537cf755608632e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 16 Jul 2023 16:50:03 +0200 Subject: [PATCH 1417/1727] fix: e2ee over federation --- src/api/client_server/directory.rs | 1 - src/api/client_server/keys.rs | 34 ++++++++-- src/api/server_server.rs | 10 +-- src/database/key_value/users.rs | 100 +++++++++++++++++------------ src/service/pusher/mod.rs | 5 +- src/service/users/data.rs | 16 +++++ src/service/users/mod.rs | 34 ++++++++-- 7 files changed, 137 insertions(+), 63 deletions(-) diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index df1ac40..a812dbc 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -20,7 +20,6 @@ use ruma::{ guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, - name::RoomNameEventContent, topic::RoomTopicEventContent, }, StateEventType, diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 21f71b6..3e03221 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -311,15 +311,17 @@ pub(crate) async fn get_keys_helper bool>( } } - if let Some(master_key) = services() - .users - .get_master_key(user_id, &allowed_signatures)? + if let Some(master_key) = + services() + .users + .get_master_key(sender_user, user_id, &allowed_signatures)? { master_keys.insert(user_id.to_owned(), master_key); } - if let Some(self_signing_key) = services() - .users - .get_self_signing_key(user_id, &allowed_signatures)? + if let Some(self_signing_key) = + services() + .users + .get_self_signing_key(sender_user, user_id, &allowed_signatures)? { self_signing_keys.insert(user_id.to_owned(), self_signing_key); } @@ -357,7 +359,25 @@ pub(crate) async fn get_keys_helper bool>( while let Some((server, response)) = futures.next().await { match response { Ok(response) => { - master_keys.extend(response.master_keys); + for (user, masterkey) in response.master_keys { + let (master_key_id, mut master_key) = + services().users.parse_master_key(&user, &masterkey)?; + + if let Some(our_master_key) = services().users.get_key( + &master_key_id, + sender_user, + &user, + &allowed_signatures, + )? { + let (_, our_master_key) = + services().users.parse_master_key(&user, &our_master_key)?; + master_key.signatures.extend(our_master_key.signatures); + } + let json = serde_json::to_value(master_key).expect("to_value always works"); + let raw = serde_json::from_value(json).expect("Raw::from_value always works"); + master_keys.insert(user, raw); + } + self_signing_keys.extend(response.self_signing_keys); device_keys.extend(response.device_keys); } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 0177f2a..2179b16 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1806,12 +1806,14 @@ pub async fn get_devices_route( }) }) .collect(), - master_key: services() - .users - .get_master_key(&body.user_id, &|u| u.server_name() == sender_servername)?, + master_key: services().users.get_master_key(None, &body.user_id, &|u| { + u.server_name() == sender_servername + })?, self_signing_key: services() .users - .get_self_signing_key(&body.user_id, &|u| u.server_name() == sender_servername)?, + .get_self_signing_key(None, &body.user_id, &|u| { + u.server_name() == sender_servername + })?, }) } diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 359a072..0301cda 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -451,31 +451,10 @@ impl service::users::Data for KeyValueDatabase { user_signing_key: &Option>, ) -> Result<()> { // TODO: Check signatures - let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - // Master key - let mut master_key_ids = master_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? - .keys - .into_values(); - - let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained no key.", - ))?; - - if master_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained more than one key.", - )); - } - - let mut master_key_key = prefix.clone(); - master_key_key.extend_from_slice(master_key_id.as_bytes()); + let (master_key_key, _) = self.parse_master_key(user_id, master_key)?; self.keyid_key .insert(&master_key_key, master_key.json().get().as_bytes())?; @@ -690,45 +669,80 @@ impl service::users::Data for KeyValueDatabase { }) } + fn parse_master_key( + &self, + user_id: &UserId, + master_key: &Raw, + ) -> Result<(Vec, CrossSigningKey)> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + + let master_key = master_key + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))?; + let mut master_key_ids = master_key.keys.values(); + let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained no key.", + ))?; + if master_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained more than one key.", + )); + } + let mut master_key_key = prefix.clone(); + master_key_key.extend_from_slice(master_key_id.as_bytes()); + Ok((master_key_key, master_key)) + } + + fn get_key( + &self, + key: &[u8], + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures( + &mut cross_signing_key, + sender_user, + user_id, + allowed_signatures, + )?; + + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key) + .expect("Value to RawValue serialization"), + ))) + }) + } + fn get_master_key( &self, + sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.userid_masterkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; - - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) + self.get_key(&key, sender_user, user_id, allowed_signatures) }) } fn get_self_signing_key( &self, + sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.userid_selfsigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; - - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) + self.get_key(&key, sender_user, user_id, allowed_signatures) }) } @@ -929,6 +943,8 @@ impl service::users::Data for KeyValueDatabase { } } +impl KeyValueDatabase {} + /// Will only return with Some(username) if the password was not empty and the /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 5e4281d..315c5ef 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -13,10 +13,7 @@ use ruma::{ }, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - StateEventType, TimelineEventType, - }, + events::{room::power_levels::RoomPowerLevelsEventContent, StateEventType, TimelineEventType}, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, uint, RoomId, UInt, UserId, diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 8553210..d01e070 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -136,14 +136,30 @@ pub trait Data: Send + Sync { device_id: &DeviceId, ) -> Result>>; + fn parse_master_key( + &self, + user_id: &UserId, + master_key: &Raw, + ) -> Result<(Vec, CrossSigningKey)>; + + fn get_key( + &self, + key: &[u8], + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>>; + fn get_master_key( &self, + sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>>; fn get_self_signing_key( &self, + sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>>; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 6be5c89..2311c30 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -226,20 +226,43 @@ impl Service { self.db.get_device_keys(user_id, device_id) } - pub fn get_master_key( + pub fn parse_master_key( &self, user_id: &UserId, + master_key: &Raw, + ) -> Result<(Vec, CrossSigningKey)> { + self.db.parse_master_key(user_id, master_key) + } + + pub fn get_key( + &self, + key: &[u8], + sender_user: Option<&UserId>, + user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { - self.db.get_master_key(user_id, allowed_signatures) + self.db + .get_key(key, sender_user, user_id, allowed_signatures) + } + + pub fn get_master_key( + &self, + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.db + .get_master_key(sender_user, user_id, allowed_signatures) } pub fn get_self_signing_key( &self, + sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { - self.db.get_self_signing_key(user_id, allowed_signatures) + self.db + .get_self_signing_key(sender_user, user_id, allowed_signatures) } pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { @@ -342,6 +365,7 @@ impl Service { /// Ensure that a user only sees signatures from themselves and the target user pub fn clean_signatures bool>( cross_signing_key: &mut serde_json::Value, + sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: F, ) -> Result<(), Error> { @@ -355,9 +379,9 @@ pub fn clean_signatures bool>( for (user, signature) in mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) { - let id = <&UserId>::try_from(user.as_str()) + let sid = <&UserId>::try_from(user.as_str()) .map_err(|_| Error::bad_database("Invalid user ID in database."))?; - if id == user_id || allowed_signatures(id) { + if sender_user == Some(user_id) || sid == user_id || allowed_signatures(sid) { signatures.insert(user, signature); } } From fa3b1fd9bd330169f91934257d3d5ca1a4398bf5 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 16 Jul 2023 13:37:40 -0700 Subject: [PATCH 1418/1727] update flake.lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'crane': 'github:ipetkov/crane/75f7d715f8088f741be9981405f6444e2d49efdd' (2023-06-13) → 'github:ipetkov/crane/8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e' (2023-07-07) • Updated input 'crane/rust-overlay': 'github:oxalica/rust-overlay/c535b4f3327910c96dcf21851bbdd074d0760290' (2023-06-03) → 'github:oxalica/rust-overlay/f9b92316727af9e6c7fee4a761242f7f46880329' (2023-07-03) • Updated input 'fenix': 'github:nix-community/fenix/df0a6e4ec44b4a276acfa5a96d2a83cb2dfdc791' (2023-06-17) → 'github:nix-community/fenix/39096fe3f379036ff4a5fa198950b8e79defe939' (2023-07-16) • Updated input 'fenix/rust-analyzer-src': 'github:rust-lang/rust-analyzer/a5a71c75e62a0eaa1b42a376f7cf3d348cb5dec6' (2023-06-16) → 'github:rust-lang/rust-analyzer/996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8' (2023-07-15) • Updated input 'flake-utils': 'github:numtide/flake-utils/a1720a10a6cfe8234c0e93907ffe81be440f4cef' (2023-05-31) → 'github:numtide/flake-utils/919d646de7be200f3bf08cb76ae1f09402b6f9b4' (2023-07-11) • Updated input 'nixpkgs': 'github:NixOS/nixpkgs/04af42f3b31dba0ef742d254456dc4c14eedac86' (2023-06-17) → 'github:NixOS/nixpkgs/8acef304efe70152463a6399f73e636bcc363813' (2023-07-15) --- flake.lock | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/flake.lock b/flake.lock index b69bb8c..0065525 100644 --- a/flake.lock +++ b/flake.lock @@ -12,11 +12,11 @@ "rust-overlay": "rust-overlay" }, "locked": { - "lastModified": 1686621798, - "narHash": "sha256-FUwWszmSiDzUdTk8f69xwMoYlhdPaLvDaIYOE/y6VXc=", + "lastModified": 1688772518, + "narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=", "owner": "ipetkov", "repo": "crane", - "rev": "75f7d715f8088f741be9981405f6444e2d49efdd", + "rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e", "type": "github" }, "original": { @@ -33,11 +33,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1687004852, - "narHash": "sha256-wRSUs+v8xtIJaFlWO5NLFQjkq5+eYhxHHXnZKsZ9DpQ=", + "lastModified": 1689488573, + "narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=", "owner": "nix-community", "repo": "fenix", - "rev": "df0a6e4ec44b4a276acfa5a96d2a83cb2dfdc791", + "rev": "39096fe3f379036ff4a5fa198950b8e79defe939", "type": "github" }, "original": { @@ -67,11 +67,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", "owner": "numtide", "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", "type": "github" }, "original": { @@ -82,11 +82,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1686960236, - "narHash": "sha256-AYCC9rXNLpUWzD9hm+askOfpliLEC9kwAo7ITJc4HIw=", + "lastModified": 1689444953, + "narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "04af42f3b31dba0ef742d254456dc4c14eedac86", + "rev": "8acef304efe70152463a6399f73e636bcc363813", "type": "github" }, "original": { @@ -107,11 +107,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1686936697, - "narHash": "sha256-mCoPr1nNWKpsoGMBFaK/sswkLloRCZuoWi2a+OKs3vk=", + "lastModified": 1689441253, + "narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "a5a71c75e62a0eaa1b42a376f7cf3d348cb5dec6", + "rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8", "type": "github" }, "original": { @@ -133,11 +133,11 @@ ] }, "locked": { - "lastModified": 1685759304, - "narHash": "sha256-I3YBH6MS3G5kGzNuc1G0f9uYfTcNY9NYoRc3QsykLk4=", + "lastModified": 1688351637, + "narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "c535b4f3327910c96dcf21851bbdd074d0760290", + "rev": "f9b92316727af9e6c7fee4a761242f7f46880329", "type": "github" }, "original": { From abd8e1bf54ecd0f385d472914aff4ba2a5eeaf8c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 16 Jul 2023 13:38:33 -0700 Subject: [PATCH 1419/1727] nixpkgs' rocksdb is now new enough :) This reverts commit abd0a014e852d41d25320f6ccd19ac1de4156f96. --- flake.nix | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index ef1a04b..369759f 100644 --- a/flake.nix +++ b/flake.nix @@ -43,6 +43,10 @@ sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc="; }; + # The system's RocksDB + ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; + ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; + # Shared between the package and the devShell nativeBuildInputs = (with pkgs.rustPlatform; [ bindgenHook @@ -57,7 +61,9 @@ inherit stdenv - nativeBuildInputs; + nativeBuildInputs + ROCKSDB_INCLUDE_DIR + ROCKSDB_LIB_DIR; }; devShells.default = (pkgs.mkShell.override { inherit stdenv; }) { @@ -65,6 +71,10 @@ # sources, and it can read this environment variable to do so RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; + inherit + ROCKSDB_INCLUDE_DIR + ROCKSDB_LIB_DIR; + # Development tools nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [ cargo From 742331e054b3570e1f02f02162ee667afa1f8281 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 16 Jul 2023 13:39:13 -0700 Subject: [PATCH 1420/1727] Revert "only use musl on x86_64" This reverts commit 56f0f3dfa44834478a4157cb91f0c860f844953f. This shouldn't be needed anymore since [this][0] reached nixos-unstable. [0]: https://github.com/NixOS/nixpkgs/pull/242889 --- flake.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 369759f..eb3a31c 100644 --- a/flake.nix +++ b/flake.nix @@ -25,8 +25,8 @@ let pkgs = nixpkgs.legacyPackages.${system}; - # Use mold where possible - stdenv = if pkgs.stdenv.isLinux && pkgs.stdenv.isx86_64 then + # Use mold on Linux + stdenv = if pkgs.stdenv.isLinux then pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv else pkgs.stdenv; From bd8fec3836158053714ebc4e9ed61a4451cf831b Mon Sep 17 00:00:00 2001 From: purplemeteorite Date: Fri, 21 Jul 2023 20:33:32 +0200 Subject: [PATCH 1421/1727] changed registry options 1. Recommended GitLab's own registry over Docker Hub. (Reason: https://gitlab.com/famedly/conduit/-/merge_requests/492#note_1457220261) 2. Added the development image :next to the list of options. 3. Displayed text for Docker Hub now contains "docker.io" as part of the link for easier copy-paste for podman users. Clicking on the link still takes to the website. --- docker/README.md | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/docker/README.md b/docker/README.md index 1b9e046..cd41291 100644 --- a/docker/README.md +++ b/docker/README.md @@ -9,16 +9,21 @@ To run Conduit with Docker you can either build the image yourself or pull it fr ### Use a registry -The image is available in the following registries: +OCI images for Conduit are available in the registries listed below. We recommend using the image tagged as `latest` from GitLab's own registry. + +| Registry | Image | Size | Notes | +| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- | +| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield-latest] | Stable image. | +| Docker Hub | [docker.io/matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield-latest] | Stable image. | +| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:next][gl] | ![Image Size][shield-next] | Development version. | +| Docker Hub | [docker.io/matrixconduit/matrix-conduit:next][dh] | ![Image Size][shield-next] | Development version. | -| Registry | Image | Size | -| --------------- | --------------------------------------------------------------- | --------------------- | -| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] | -| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] | [dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit [gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 -[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest +[shield-latest]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest +[shield-next]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/next + Use ```bash @@ -60,7 +65,7 @@ docker run -d -p 8448:6167 \ -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ -e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \ - --name conduit matrixconduit/matrix-conduit:latest + --name conduit ``` or you can use [docker-compose](#docker-compose). From 6ae5143ff5b5d009beaff58c2559027269e4792c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Fri, 21 Jul 2023 12:12:37 -0700 Subject: [PATCH 1422/1727] only listen on IPv6 since that's what conduit does --- nix/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix/README.md b/nix/README.md index f8537d5..bd6f096 100644 --- a/nix/README.md +++ b/nix/README.md @@ -179,7 +179,7 @@ in upstreams = { "backend_conduit" = { servers = { - "localhost:${toString config.services.matrix-conduit.settings.global.port}" = { }; + "[::1]:${toString config.services.matrix-conduit.settings.global.port}" = { }; }; }; }; From 82f31d6b721d6ca979cd9dc98277b251b6d4c3f3 Mon Sep 17 00:00:00 2001 From: x4u <14617923-x4u@users.noreply.gitlab.com> Date: Sun, 23 Jul 2023 14:21:36 +0800 Subject: [PATCH 1423/1727] Replace nogroup with dedicated user group --- DEPLOY.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index e5e1530..b743338 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -117,8 +117,7 @@ After=network.target [Service] Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" User=conduit -Group=nogroup -# On RHEL: Group=nobody +Group=conduit Restart=always ExecStart=/usr/local/bin/matrix-conduit @@ -198,8 +197,7 @@ If you use the default database path you also need to run this: ```bash sudo mkdir -p /var/lib/matrix-conduit/ -sudo chown -R conduit:nogroup /var/lib/matrix-conduit/ -# On RHEL: sudo chown -R conduit:nobody /var/lib/matrix-conduit/ +sudo chown -R conduit:conduit /var/lib/matrix-conduit/ sudo chmod 700 /var/lib/matrix-conduit/ ``` From 8cf408e96689afc3a03afd1209cdc9cdc68330d3 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Sun, 23 Jul 2023 12:14:59 +0200 Subject: [PATCH 1424/1727] Fix up permissions of the database path Also apply the database creation and ownership change on every installation and upgrade. --- debian/postinst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/debian/postinst b/debian/postinst index 73e554b..dfa599d 100644 --- a/debian/postinst +++ b/debian/postinst @@ -19,11 +19,11 @@ case "$1" in _matrix-conduit fi - # Create the database path if it does not exist yet. - if [ ! -d "$CONDUIT_DATABASE_PATH" ]; then - mkdir -p "$CONDUIT_DATABASE_PATH" - chown _matrix-conduit "$CONDUIT_DATABASE_PATH" - fi + # Create the database path if it does not exist yet and fix up ownership + # and permissions. + mkdir -p "$CONDUIT_DATABASE_PATH" + chown _matrix-conduit "$CONDUIT_DATABASE_PATH" + chmod 700 "$CONDUIT_DATABASE_PATH" if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then # Write the debconf values in the config. From 433dad6ac2d7cba9146a403cddafdacfef6ceacc Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Sun, 23 Jul 2023 12:24:37 +0200 Subject: [PATCH 1425/1727] Turn README.Debian into a markdown file It is common to have a markdown file per deployment subdirectory. Still install it as `README.Debian` to `/usr/share/doc/matrix-conduit` as per Debian policy. Also update the link in the main `README.md` file. --- Cargo.toml | 2 +- README.md | 2 +- debian/{README.Debian => README.md} | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) rename debian/{README.Debian => README.md} (73%) diff --git a/Cargo.toml b/Cargo.toml index ae7de59..9196cf4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -137,7 +137,7 @@ instead of a server that has high scalability.""" section = "net" priority = "optional" assets = [ - ["debian/README.Debian", "usr/share/doc/matrix-conduit/", "644"], + ["debian/README.md", "usr/share/doc/matrix-conduit/README.Debian", "644"], ["README.md", "usr/share/doc/matrix-conduit/", "644"], ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], ] diff --git a/README.md b/README.md index 8fabefd..52ea3c1 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit #### How can I deploy my own? - Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md) -- Debian package: [debian/README.Debian](debian/README.Debian) +- Debian package: [debian/README.md](debian/README.md) - Nix/NixOS: [nix/README.md](nix/README.md) - Docker: [docker/README.md](docker/README.md) diff --git a/debian/README.Debian b/debian/README.md similarity index 73% rename from debian/README.Debian rename to debian/README.md index 5f63b5c..b0f8658 100644 --- a/debian/README.Debian +++ b/debian/README.md @@ -6,23 +6,23 @@ Configuration When installed, Debconf generates the configuration of the homeserver (host)name, the address and port it listens on. This configuration ends up in -/etc/matrix-conduit/conduit.toml. +`/etc/matrix-conduit/conduit.toml`. You can tweak more detailed settings by uncommenting and setting the variables -in /etc/matrix-conduit/conduit.toml. This involves settings such as the maximum +in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum file size for download/upload, enabling federation, etc. Running ------- -The package uses the matrix-conduit.service systemd unit file to start and +The package uses the `matrix-conduit.service` systemd unit file to start and stop Conduit. It loads the configuration file mentioned above to set up the environment before running the server. This package assumes by default that Conduit will be placed behind a reverse proxy such as Apache or nginx. This default deployment entails just listening -on 127.0.0.1 and the free port 6167 and is reachable via a client using the URL -http://localhost:6167. +on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL +. At a later stage this packaging may support also setting up TLS and running stand-alone. In this case, however, you need to set up some certificates and From 3cd3d0e0ff30524c7531f0e35bb50604770c5403 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Sun, 23 Jul 2023 12:34:48 +0200 Subject: [PATCH 1426/1727] Add section about how to download/install/deploy This refers to `DEPLOY.md` as to not duplicate the information. --- debian/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/debian/README.md b/debian/README.md index b0f8658..443be76 100644 --- a/debian/README.md +++ b/debian/README.md @@ -1,6 +1,14 @@ Conduit for Debian ================== +Installation +------------ + +Information about downloading, building and deploying the Debian package, see +the "Installing Conduit" section in [DEPLOY.md](../DEPLOY.md). +All following sections until "Setting up the Reverse Proxy" be ignored because +this is handled automatically by the packaging. + Configuration ------------- From b1a591a06ce40ba63da45bbdc7432ba63a005171 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Sun, 23 Jul 2023 12:37:47 +0200 Subject: [PATCH 1427/1727] Also create the conduit (system) group The `chown` command mentioned later in `DEPLOY.md` needs this group to exist. Also make sure this account cannot be used to login with by disabling its password and its shell. This is similar to how the Debian `postinst` script does this. --- DEPLOY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index b743338..ec7dd46 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -81,7 +81,7 @@ you to make sure that the file permissions are correctly set up. In Debian or RHEL, you can use this command to create a Conduit user: ```bash -sudo adduser --system conduit --no-create-home +sudo adduser --system conduit --group --disable-login --no-create-home ``` ## Forwarding ports in the firewall or the router From caddc656fba15ef3e13f65f21a7e6f43eb42e786 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 23 Jul 2023 21:57:11 +0200 Subject: [PATCH 1428/1727] slightly better sliding sync --- src/api/client_server/sync.rs | 120 ++++++++++++++++++--- src/service/mod.rs | 7 +- src/service/rooms/state_accessor/mod.rs | 15 +++ src/service/users/mod.rs | 137 +++++++++++++++++++++++- 4 files changed, 260 insertions(+), 19 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index fed4fb7..8883c16 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -23,7 +23,7 @@ use ruma::{ uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, }; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, sync::Arc, time::Duration, }; @@ -1174,8 +1174,7 @@ pub async fn sync_events_v4_route( ) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); - let body = dbg!(body.body); - + let mut body = dbg!(body.body); // Setup watchers, so if there's no response, we can wait for them let watcher = services().globals.watch(&sender_user, &sender_device); @@ -1188,7 +1187,21 @@ pub async fn sync_events_v4_route( .unwrap_or(0); let sincecount = PduCount::Normal(since); - let initial = since == 0; + if since == 0 { + if let Some(conn_id) = &body.conn_id { + services().users.forget_sync_request_connection( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ) + } + } + + let known_rooms = services().users.update_sync_request_with_cache( + sender_user.clone(), + sender_device.clone(), + &mut body, + ); let all_joined_rooms = services() .rooms @@ -1205,8 +1218,10 @@ pub async fn sync_events_v4_route( continue; } + let mut new_known_rooms = BTreeMap::new(); + lists.insert( - list_id, + list_id.clone(), sync_events::v4::SyncList { ops: list .ranges @@ -1219,14 +1234,27 @@ pub async fn sync_events_v4_route( let room_ids = all_joined_rooms [(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)] .to_vec(); - todo_rooms.extend(room_ids.iter().cloned().map(|r| { + new_known_rooms.extend(room_ids.iter().cloned().map(|r| (r, true))); + for room_id in &room_ids { + let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( + BTreeSet::new(), + 0, + true, + )); let limit = list .room_details .timeline_limit .map_or(10, u64::from) .min(100); - (r, (list.room_details.required_state.clone(), limit)) - })); + todo_room + .0 + .extend(list.room_details.required_state.iter().cloned()); + todo_room.1 = todo_room.1.min(limit); + if known_rooms.get(&list_id).and_then(|k| k.get(room_id)) != Some(&true) + { + todo_room.2 = false; + } + } sync_events::v4::SyncOp { op: SlidingOp::Sync, range: Some(r.clone()), @@ -1239,12 +1267,36 @@ pub async fn sync_events_v4_route( count: UInt::from(all_joined_rooms.len() as u32), }, ); + + if let Some(conn_id) = &body.conn_id { + services().users.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + list_id, + new_known_rooms, + ); + } + } + + for (room_id, room) in body.room_subscriptions { + let todo_room = todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0, true)); + let limit = room.timeline_limit.map_or(10, u64::from).min(100); + todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.1 = todo_room.1.min(limit); + todo_room.2 = false; } let mut rooms = BTreeMap::new(); - for (room_id, (required_state_request, timeline_limit)) in todo_rooms { + for (room_id, (required_state_request, timeline_limit, known)) in &todo_rooms { let (timeline_pdus, limited) = - load_timeline(&sender_user, &room_id, sincecount, timeline_limit)?; + load_timeline(&sender_user, &room_id, sincecount, *timeline_limit)?; + + if *known && timeline_pdus.is_empty() { + continue; + } let prev_batch = timeline_pdus .first() @@ -1256,7 +1308,14 @@ pub async fn sync_events_v4_route( } PduCount::Normal(c) => c.to_string(), })) - })?; + })? + .or_else(|| { + if since != 0 { + Some(since.to_string()) + } else { + None + } + }); let room_events: Vec<_> = timeline_pdus .iter() @@ -1279,8 +1338,41 @@ pub async fn sync_events_v4_route( rooms.insert( room_id.clone(), sync_events::v4::SlidingSyncRoom { - name: services().rooms.state_accessor.get_name(&room_id)?, - initial: Some(initial), + name: services() + .rooms + .state_accessor + .get_name(&room_id)? + .or_else(|| { + // Heroes + let mut names = services() + .rooms + .state_cache + .room_members(&room_id) + .filter_map(|r| r.ok()) + .filter(|member| member != &sender_user) + .map(|member| { + Ok::<_, Error>( + services() + .rooms + .state_accessor + .get_member(&room_id, &member)? + .and_then(|memberevent| memberevent.displayname) + .unwrap_or(member.to_string()), + ) + }) + .filter_map(|r| r.ok()) + .take(5) + .collect::>(); + if names.len() > 1 { + let last = names.pop().unwrap(); + Some(names.join(", ") + " and " + &last) + } else if names.len() == 1 { + Some(names.pop().unwrap()) + } else { + None + } + }), + initial: Some(*known), is_dm: None, invite_state: None, unread_notifications: UnreadNotificationsCount { @@ -1326,7 +1418,7 @@ pub async fn sync_events_v4_route( } Ok(dbg!(sync_events::v4::Response { - initial: initial, + initial: since == 0, txn_id: body.txn_id.clone(), pos: next_batch.to_string(), lists, diff --git a/src/service/mod.rs b/src/service/mod.rs index 56aed7f..f85da78 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::HashMap, + collections::{BTreeMap, HashMap}, sync::{Arc, Mutex}, }; @@ -105,7 +105,10 @@ impl Services { }, transaction_ids: transaction_ids::Service { db }, uiaa: uiaa::Service { db }, - users: users::Service { db }, + users: users::Service { + db, + connections: Mutex::new(BTreeMap::new()), + }, account_data: account_data::Service { db }, admin: admin::Service::build(), key_backups: key_backups::Service { db }, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 9d071a5..435f4df 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -282,4 +282,19 @@ impl Service { .map_err(|_| Error::bad_database("Invalid room name event in database.")) }) } + + pub fn get_member( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result> { + services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map_err(|_| Error::bad_database("Invalid room member event in database.")) + }) + } } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 6be5c89..63ab9b7 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,20 +1,36 @@ mod data; -use std::{collections::BTreeMap, mem}; +use std::{ + collections::BTreeMap, + mem, + sync::{Arc, Mutex}, +}; pub use data::Data; use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, + api::client::{ + device::Device, + error::ErrorKind, + filter::FilterDefinition, + sync::sync_events::{self, v4::SyncRequestList}, + }, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, - OwnedUserId, RoomAliasId, UInt, UserId, + OwnedRoomId, OwnedUserId, RoomAliasId, UInt, UserId, }; use crate::{services, Error, Result}; +pub struct SlidingSyncCache { + lists: BTreeMap, + known_rooms: BTreeMap>, +} + pub struct Service { pub db: &'static dyn Data, + pub connections: + Mutex>>>, } impl Service { @@ -23,6 +39,121 @@ impl Service { self.db.exists(user_id) } + pub fn forget_sync_request_connection( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, + ) { + self.connections + .lock() + .unwrap() + .remove(&(user_id, device_id, conn_id)); + } + + pub fn update_sync_request_with_cache( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + request: &mut sync_events::v4::Request, + ) -> BTreeMap> { + let Some(conn_id) = request.conn_id.clone() else { return BTreeMap::new(); }; + + let cache = &mut self.connections.lock().unwrap(); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + known_rooms: BTreeMap::new(), + })) + }), + ); + let cached = &mut cached.lock().unwrap(); + drop(cache); + + for (list_id, list) in &mut request.lists { + if let Some(cached_list) = cached.lists.remove(list_id) { + if list.sort.is_empty() { + list.sort = cached_list.sort; + }; + if list.room_details.required_state.is_empty() { + list.room_details.required_state = cached_list.room_details.required_state; + }; + list.room_details.timeline_limit = list + .room_details + .timeline_limit + .or(cached_list.room_details.timeline_limit); + list.include_old_rooms = list + .include_old_rooms + .clone() + .or(cached_list.include_old_rooms); + match (&mut list.filters, cached_list.filters) { + (Some(list_filters), Some(cached_filters)) => { + list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); + if list_filters.spaces.is_empty() { + list_filters.spaces = cached_filters.spaces; + } + list_filters.is_encrypted = + list_filters.is_encrypted.or(cached_filters.is_encrypted); + list_filters.is_invite = + list_filters.is_invite.or(cached_filters.is_invite); + if list_filters.room_types.is_empty() { + list_filters.room_types = cached_filters.room_types; + } + if list_filters.not_room_types.is_empty() { + list_filters.not_room_types = cached_filters.not_room_types; + } + list_filters.room_name_like = list_filters + .room_name_like + .clone() + .or(cached_filters.room_name_like); + if list_filters.tags.is_empty() { + list_filters.tags = cached_filters.tags; + } + if list_filters.not_tags.is_empty() { + list_filters.not_tags = cached_filters.not_tags; + } + } + (_, Some(cached_filters)) => list.filters = Some(cached_filters), + (_, _) => {} + } + if list.bump_event_types.is_empty() { + list.bump_event_types = cached_list.bump_event_types; + }; + } + cached.lists.insert(list_id.clone(), list.clone()); + } + + cached.known_rooms.clone() + } + + pub fn update_sync_known_rooms( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, + list_id: String, + new_cached_rooms: BTreeMap, + ) { + let cache = &mut self.connections.lock().unwrap(); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + known_rooms: BTreeMap::new(), + })) + }), + ); + let cached = &mut cached.lock().unwrap(); + drop(cache); + + cached.known_rooms.insert(list_id, new_cached_rooms); + } + /// Check if account is deactivated pub fn is_deactivated(&self, user_id: &UserId) -> Result { self.db.is_deactivated(user_id) From d220641d6453b8be767197f58c0bb32f255e2a5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 24 Jul 2023 10:41:50 +0200 Subject: [PATCH 1429/1727] Sliding sync subscriptions, e2ee, to_device messages --- src/api/client_server/sync.rs | 294 ++++++++++++++++++++++++++++++++-- src/service/users/mod.rs | 89 +++++++++- 2 files changed, 361 insertions(+), 22 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 8883c16..527625a 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -463,7 +463,7 @@ async fn sync_helper( } for user_id in left_encrypted_users { - let still_share_encrypted_room = services() + let dont_share_encrypted_room = services() .rooms .user .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? @@ -481,7 +481,7 @@ async fn sync_helper( .all(|encrypted| !encrypted); // If the user doesn't share an encrypted room with the target anymore, we need to tell // them - if still_share_encrypted_room { + if dont_share_encrypted_room { device_list_left.insert(user_id); } } @@ -1197,6 +1197,7 @@ pub async fn sync_events_v4_route( } } + // Get sticky parameters from cache let known_rooms = services().users.update_sync_request_with_cache( sender_user.clone(), sender_device.clone(), @@ -1210,6 +1211,195 @@ pub async fn sync_events_v4_route( .filter_map(|r| r.ok()) .collect::>(); + if body.extensions.to_device.enabled.unwrap_or(false) { + services() + .users + .remove_to_device_events(&sender_user, &sender_device, since)?; + } + + let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in + let mut device_list_changes = HashSet::new(); + let mut device_list_left = HashSet::new(); + + if body.extensions.e2ee.enabled.unwrap_or(false) { + // Look for device list updates of this account + device_list_changes.extend( + services() + .users + .keys_changed(sender_user.as_ref(), since, None) + .filter_map(|r| r.ok()), + ); + + for room_id in &all_joined_rooms { + let current_shortstatehash = + if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + s + } else { + error!("Room {} has no state", room_id); + continue; + }; + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; + + let since_sender_member: Option = since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get( + shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); + + let encrypted_room = services() + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .is_some(); + + if let Some(since_shortstatehash) = since_shortstatehash { + // Skip if there are only timeline changes + if since_shortstatehash == current_shortstatehash { + continue; + } + + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + let joined_since_last_sync = since_sender_member + .map_or(true, |member| member.membership != MembershipState::Join); + + let new_encrypted_room = encrypted_room && since_encryption.is_none(); + if encrypted_room { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if since_state_ids.get(&key) != Some(&id) { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key) = &pdu.state_key { + let user_id = + UserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + if user_id == sender_user { + continue; + } + + let new_membership = serde_json::from_str::< + RoomMemberEventContent, + >( + pdu.content.get() + ) + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room( + &sender_user, + &user_id, + &room_id, + )? { + device_list_changes.insert(user_id); + } + } + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} + } + } + } + } + } + if joined_since_last_sync || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_changes.extend( + services() + .rooms + .state_cache + .room_members(&room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to the sender + &sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target already + !share_encrypted_room(&sender_user, user_id, &room_id) + .unwrap_or(false) + }), + ); + } + } + } + // Look for device list updates in this room + device_list_changes.extend( + services() + .users + .keys_changed(room_id.as_ref(), since, None) + .filter_map(|r| r.ok()), + ); + } + for user_id in left_encrypted_users { + let dont_share_encrypted_room = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? + .filter_map(|r| r.ok()) + .filter_map(|other_room_id| { + Some( + services() + .rooms + .state_accessor + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + .ok()? + .is_some(), + ) + }) + .all(|encrypted| !encrypted); + // If the user doesn't share an encrypted room with the target anymore, we need to tell + // them + if dont_share_encrypted_room { + device_list_left.insert(user_id); + } + } + } + let mut lists = BTreeMap::new(); let mut todo_rooms = BTreeMap::new(); // and required state @@ -1249,7 +1439,7 @@ pub async fn sync_events_v4_route( todo_room .0 .extend(list.room_details.required_state.iter().cloned()); - todo_room.1 = todo_room.1.min(limit); + todo_room.1 = todo_room.1.max(limit); if known_rooms.get(&list_id).and_then(|k| k.get(room_id)) != Some(&true) { todo_room.2 = false; @@ -1279,18 +1469,51 @@ pub async fn sync_events_v4_route( } } - for (room_id, room) in body.room_subscriptions { + let mut known_subscription_rooms = BTreeMap::new(); + for (room_id, room) in dbg!(&body.room_subscriptions) { let todo_room = todo_rooms .entry(room_id.clone()) .or_insert((BTreeSet::new(), 0, true)); let limit = room.timeline_limit.map_or(10, u64::from).min(100); todo_room.0.extend(room.required_state.iter().cloned()); - todo_room.1 = todo_room.1.min(limit); - todo_room.2 = false; + todo_room.1 = todo_room.1.max(limit); + if known_rooms + .get("subscriptions") + .and_then(|k| k.get(room_id)) + != Some(&true) + { + todo_room.2 = false; + } + known_subscription_rooms.insert(room_id.clone(), true); + } + + for r in body.unsubscribe_rooms { + known_subscription_rooms.remove(&r); + body.room_subscriptions.remove(&r); + } + + if let Some(conn_id) = &body.conn_id { + services().users.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + "subscriptions".to_owned(), + known_subscription_rooms, + ); + } + + if let Some(conn_id) = &body.conn_id { + services().users.update_sync_subscriptions( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + body.room_subscriptions, + ); } let mut rooms = BTreeMap::new(); for (room_id, (required_state_request, timeline_limit, known)) in &todo_rooms { + // TODO: per-room sync tokens let (timeline_pdus, limited) = load_timeline(&sender_user, &room_id, sincecount, *timeline_limit)?; @@ -1372,12 +1595,26 @@ pub async fn sync_events_v4_route( None } }), - initial: Some(*known), + initial: Some(!known), is_dm: None, invite_state: None, unread_notifications: UnreadNotificationsCount { - highlight_count: None, - notification_count: None, + highlight_count: Some( + services() + .rooms + .user + .highlight_count(&sender_user, &room_id)? + .try_into() + .expect("notification count can't go that high"), + ), + notification_count: Some( + services() + .rooms + .user + .notification_count(&sender_user, &room_id)? + .try_into() + .expect("notification count can't go that high"), + ), }, timeline: room_events, required_state, @@ -1399,7 +1636,7 @@ pub async fn sync_events_v4_route( .unwrap_or(0) as u32) .into(), ), - num_live: None, + num_live: None, // Count events in timeline greater than global sync counter }, ); } @@ -1424,17 +1661,44 @@ pub async fn sync_events_v4_route( lists, rooms, extensions: sync_events::v4::Extensions { - to_device: None, + to_device: if body.extensions.to_device.enabled.unwrap_or(false) { + Some(sync_events::v4::ToDevice { + events: services() + .users + .get_to_device_events(&sender_user, &sender_device)?, + next_batch: next_batch.to_string(), + }) + } else { + None + }, e2ee: sync_events::v4::E2EE { device_lists: DeviceLists { - changed: Vec::new(), - left: Vec::new(), + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: BTreeMap::new(), + device_one_time_keys_count: services() + .users + .count_one_time_keys(&sender_user, &sender_device)?, + // Fallback keys are not yet supported device_unused_fallback_key_types: None, }, account_data: sync_events::v4::AccountData { - global: Vec::new(), + global: if body.extensions.account_data.enabled.unwrap_or(false) { + services() + .account_data + .changes_since(None, &sender_user, since)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| { + Error::bad_database("Invalid account event in database.") + }) + .ok() + }) + .collect() + } else { + Vec::new() + }, rooms: BTreeMap::new(), }, receipts: sync_events::v4::Receipts { diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 63ab9b7..786b42e 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -11,7 +11,10 @@ use ruma::{ device::Device, error::ErrorKind, filter::FilterDefinition, - sync::sync_events::{self, v4::SyncRequestList}, + sync::sync_events::{ + self, + v4::{ExtensionsConfig, SyncRequestList}, + }, }, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, @@ -24,7 +27,9 @@ use crate::{services, Error, Result}; pub struct SlidingSyncCache { lists: BTreeMap, + subscriptions: BTreeMap, known_rooms: BTreeMap>, + extensions: ExtensionsConfig, } pub struct Service { @@ -66,7 +71,9 @@ impl Service { .or_insert_with(|| { Arc::new(Mutex::new(SlidingSyncCache { lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), })) }), ); @@ -74,12 +81,13 @@ impl Service { drop(cache); for (list_id, list) in &mut request.lists { - if let Some(cached_list) = cached.lists.remove(list_id) { + if let Some(cached_list) = cached.lists.get(list_id) { if list.sort.is_empty() { - list.sort = cached_list.sort; + list.sort = cached_list.sort.clone(); }; if list.room_details.required_state.is_empty() { - list.room_details.required_state = cached_list.room_details.required_state; + list.room_details.required_state = + cached_list.room_details.required_state.clone(); }; list.room_details.timeline_limit = list .room_details @@ -88,8 +96,8 @@ impl Service { list.include_old_rooms = list .include_old_rooms .clone() - .or(cached_list.include_old_rooms); - match (&mut list.filters, cached_list.filters) { + .or(cached_list.include_old_rooms.clone()); + match (&mut list.filters, cached_list.filters.clone()) { (Some(list_filters), Some(cached_filters)) => { list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); if list_filters.spaces.is_empty() { @@ -120,15 +128,80 @@ impl Service { (_, _) => {} } if list.bump_event_types.is_empty() { - list.bump_event_types = cached_list.bump_event_types; + list.bump_event_types = cached_list.bump_event_types.clone(); }; } cached.lists.insert(list_id.clone(), list.clone()); } + cached + .subscriptions + .extend(request.room_subscriptions.clone().into_iter()); + request + .room_subscriptions + .extend(cached.subscriptions.clone().into_iter()); + + request.extensions.e2ee.enabled = request + .extensions + .e2ee + .enabled + .or(cached.extensions.e2ee.enabled); + + request.extensions.to_device.enabled = request + .extensions + .to_device + .enabled + .or(cached.extensions.to_device.enabled); + + request.extensions.account_data.enabled = request + .extensions + .account_data + .enabled + .or(cached.extensions.account_data.enabled); + request.extensions.account_data.lists = request + .extensions + .account_data + .lists + .clone() + .or(cached.extensions.account_data.lists.clone()); + request.extensions.account_data.rooms = request + .extensions + .account_data + .rooms + .clone() + .or(cached.extensions.account_data.rooms.clone()); + + cached.extensions = request.extensions.clone(); + cached.known_rooms.clone() } + pub fn update_sync_subscriptions( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, + subscriptions: BTreeMap, + ) { + let cache = &mut self.connections.lock().unwrap(); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().unwrap(); + drop(cache); + + cached.subscriptions = subscriptions; + } + pub fn update_sync_known_rooms( &self, user_id: OwnedUserId, @@ -144,7 +217,9 @@ impl Service { .or_insert_with(|| { Arc::new(Mutex::new(SlidingSyncCache { lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), })) }), ); From bf46829595450b69a83da8cada99786470a492b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 26 Jul 2023 08:33:27 +0200 Subject: [PATCH 1430/1727] fix: spaces with restricted rooms --- src/service/rooms/spaces/mod.rs | 63 ++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 13 deletions(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 36fa1fc..e92fc07 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -16,7 +16,7 @@ use ruma::{ create::RoomCreateEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, + join_rules::{self, AllowRule, JoinRule, RoomJoinRulesEventContent}, topic::RoomTopicEventContent, }, StateEventType, @@ -30,7 +30,7 @@ use tracing::{debug, error, warn}; use crate::{services, Error, PduEvent, Result}; pub enum CachedJoinRule { - Simplified(SpaceRoomJoinRule), + //Simplified(SpaceRoomJoinRule), Full(JoinRule), } @@ -84,9 +84,9 @@ impl Service { { if let Some(cached) = cached { let allowed = match &cached.join_rule { - CachedJoinRule::Simplified(s) => { - self.handle_simplified_join_rule(s, sender_user, ¤t_room)? - } + //CachedJoinRule::Simplified(s) => { + //self.handle_simplified_join_rule(s, sender_user, ¤t_room)? + //} CachedJoinRule::Full(f) => { self.handle_join_rule(f, sender_user, ¤t_room)? } @@ -211,11 +211,34 @@ impl Service { .map(|c| c.room_id.clone()) .collect::>(); - if self.handle_simplified_join_rule( - &response.room.join_rule, - sender_user, - ¤t_room, - )? { + let join_rule = match response.room.join_rule { + SpaceRoomJoinRule::Invite => JoinRule::Invite, + SpaceRoomJoinRule::Knock => JoinRule::Knock, + SpaceRoomJoinRule::Private => JoinRule::Private, + SpaceRoomJoinRule::Restricted => { + JoinRule::Restricted(join_rules::Restricted { + allow: response + .room + .allowed_room_ids + .into_iter() + .map(|room| AllowRule::room_membership(room)) + .collect(), + }) + } + SpaceRoomJoinRule::KnockRestricted => { + JoinRule::KnockRestricted(join_rules::Restricted { + allow: response + .room + .allowed_room_ids + .into_iter() + .map(|room| AllowRule::room_membership(room)) + .collect(), + }) + } + SpaceRoomJoinRule::Public => JoinRule::Public, + _ => return Err(Error::BadServerResponse("Unknown join rule")), + }; + if self.handle_join_rule(&join_rule, sender_user, ¤t_room)? { if left_to_skip > 0 { left_to_skip -= 1; } else { @@ -231,7 +254,7 @@ impl Service { Some(CachedSpaceChunk { chunk, children, - join_rule: CachedJoinRule::Simplified(response.room.join_rule), + join_rule: CachedJoinRule::Full(join_rule), }), ); @@ -437,8 +460,22 @@ impl Service { } match join_rule { - JoinRule::Restricted(_) => { - // TODO: Check rules + JoinRule::Restricted(r) => { + for rule in &r.allow { + match rule { + join_rules::AllowRule::RoomMembership(rm) => { + if let Ok(true) = services() + .rooms + .state_cache + .is_joined(sender_user, &rm.room_id) + { + return Ok(true); + } + } + _ => {} + } + } + Ok(false) } JoinRule::KnockRestricted(_) => { From 54a115caf337c339f1dc44aaa298257038c6e9f6 Mon Sep 17 00:00:00 2001 From: uak <4626956-uak@users.noreply.gitlab.com> Date: Wed, 26 Jul 2023 18:53:19 +0000 Subject: [PATCH 1431/1727] Change link from docker-compose.override.traefik.yml to docker-compose.override.yml in README.md --- docker/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/README.md b/docker/README.md index c702832..aba101e 100644 --- a/docker/README.md +++ b/docker/README.md @@ -95,7 +95,7 @@ As a container user, you probably know about Traefik. It is a easy to use revers containerized app and services available through the web. With the two provided files, [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and -[`docker-compose.override.yml`](docker-compose.override.traefik.yml), it is equally easy to deploy +[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to @@ -106,7 +106,7 @@ With the service `well-known` we use a single `nginx` container that will serve So...step by step: -1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) from the repository and remove `.traefik` from the filenames. +1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.traefik` from the filename. 2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs. 3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. 4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. From 291290db92f2720a298390c229bea81f3b56e3e9 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 26 Jul 2023 13:24:44 -0700 Subject: [PATCH 1432/1727] maximize fd limit --- Cargo.lock | 32 +++++++++++++++++++++++++++++++- Cargo.toml | 3 +++ src/main.rs | 21 +++++++++++++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 3480c01..7ddf487 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -382,6 +382,7 @@ dependencies = [ "jsonwebtoken", "lazy_static", "lru-cache", + "nix", "num_cpus", "opentelemetry", "opentelemetry-jaeger", @@ -533,7 +534,7 @@ dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset", + "memoffset 0.9.0", "scopeguard", ] @@ -1471,6 +1472,15 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg", +] + [[package]] name = "memoffset" version = "0.9.0" @@ -1513,6 +1523,20 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "nix" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", + "memoffset 0.7.1", + "pin-utils", + "static_assertions", +] + [[package]] name = "nom" version = "7.1.3" @@ -2673,6 +2697,12 @@ dependencies = [ "der", ] +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "subslice" version = "0.2.3" diff --git a/Cargo.toml b/Cargo.toml index 9196cf4..7a157f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,6 +104,9 @@ async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } +[target.'cfg(unix)'.dependencies] +nix = { version = "0.26.2", features = ["resource"] } + [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] #backend_sled = ["sled"] diff --git a/src/main.rs b/src/main.rs index 579eeb1..eb7e833 100644 --- a/src/main.rs +++ b/src/main.rs @@ -54,6 +54,16 @@ static GLOBAL: Jemalloc = Jemalloc; #[tokio::main] async fn main() { + // This is needed for opening lots of file descriptors, which tends to + // happen more often when using RocksDB and making lots of federation + // connections at startup. The soft limit is usually 1024, and the hard + // limit is usually 512000; I've personally seen it hit >2000. + // + // * https://www.freedesktop.org/software/systemd/man/systemd.exec.html#id-1.12.2.1.17.6 + // * https://github.com/systemd/systemd/commit/0abf94923b4a95a7d89bc526efc84e7ca2b71741 + #[cfg(unix)] + maximize_fd_limit().expect("should be able to increase the soft limit to the hard limit"); + // Initialize DB let raw_config = Figment::new() @@ -550,3 +560,14 @@ fn method_to_filter(method: Method) -> MethodFilter { m => panic!("Unsupported HTTP method: {m:?}"), } } + +#[cfg(unix)] +fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { + use nix::sys::resource::{getrlimit, setrlimit, Resource}; + + let res = Resource::RLIMIT_NOFILE; + + let (_, hard_limit) = getrlimit(res)?; + + setrlimit(res, hard_limit, hard_limit) +} From 9fb849806783c82934dc74d32849e194e224ed90 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 26 Jul 2023 15:32:36 -0700 Subject: [PATCH 1433/1727] relax recovery mode --- src/database/abstraction/rocksdb.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index f0b5f2a..b40c439 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -49,6 +49,13 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O db_opts.set_max_background_jobs(6); db_opts.set_bytes_per_sync(1048576); + // https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords + // + // Unclean shutdowns of a Matrix homeserver are likely to be fine when + // recovered in this manner as it's likely any lost information will be + // restored via federation. + db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords); + let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); db_opts.set_prefix_extractor(prefix_extractor); From 7990822f724c3a727e62ce8476f8da05ddfcd6fe Mon Sep 17 00:00:00 2001 From: Tobias Tom Date: Fri, 28 Jul 2023 16:26:40 +0100 Subject: [PATCH 1434/1727] It's ok not being able to find a .well-known response. --- src/api/server_server.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 2179b16..9a1b680 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -506,7 +506,8 @@ async fn request_well_known(destination: &str) -> Option { .await; debug!("Got well known response"); if let Err(e) = &response { - error!("Well known error: {e:?}"); + debug!("Well known error: {e:?}"); + return None; } let text = response.ok()?.text().await; debug!("Got well known response text"); From 7489e2c4f68cb35b3cb94f5a955940d3a565be36 Mon Sep 17 00:00:00 2001 From: purplemeteorite Date: Sat, 29 Jul 2023 08:14:38 +0200 Subject: [PATCH 1435/1727] moved docker-compose.yml into the docker folder --- docker-compose.yml => docker/docker-compose.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docker-compose.yml => docker/docker-compose.yml (100%) diff --git a/docker-compose.yml b/docker/docker-compose.yml similarity index 100% rename from docker-compose.yml rename to docker/docker-compose.yml From 081cc66edac5b1a2a6af92e4b735ea02b1848c39 Mon Sep 17 00:00:00 2001 From: purplemeteorite Date: Sat, 29 Jul 2023 08:26:34 +0200 Subject: [PATCH 1436/1727] fixed broken traefik links in docker README --- docker/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docker/README.md b/docker/README.md index cd41291..43f2996 100644 --- a/docker/README.md +++ b/docker/README.md @@ -117,7 +117,7 @@ As a container user, you probably know about Traefik. It is a easy to use revers containerized app and services available through the web. With the two provided files, [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and -[`docker-compose.override.yml`](docker-compose.override.traefik.yml), it is equally easy to deploy +[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to @@ -128,7 +128,8 @@ With the service `well-known` we use a single `nginx` container that will serve So...step by step: -1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) from the repository and remove `.traefik` from the filenames. +1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or +[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.traefik.yml`](docker-compose.override.yml) from the repository and remove `.traefik` from the filenames. 2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs. 3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. 4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. From 1f867a2c867c2ce84558e11e3dc8fa49060c21b4 Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Thu, 27 Jul 2023 16:23:24 +0000 Subject: [PATCH 1437/1727] Only print raw malformed JSON body in debug level Signed-off-by: girlbossceo --- src/api/ruma_wrapper/axum.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 069e12b..bbd4861 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -292,10 +292,8 @@ where debug!("{:?}", http_request); let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { - warn!( - "try_from_http_request failed: {:?}\nJSON body: {:?}", - e, json_body - ); + warn!("try_from_http_request failed: {:?}", e); + debug!("JSON body: {:?}", json_body); Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") })?; From a0148a9996e4a3f031e80abed31f81e7b8d4a1f4 Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Thu, 27 Jul 2023 16:24:04 +0000 Subject: [PATCH 1438/1727] Print relevant room ID and ACL'd server in informational level These are room ACLs, not server ACLs. Causes confusion where people think their Conduit homeserver was ACL'd. Print where these are coming from in informational level. Signed-off-by: girlbossceo --- src/service/rooms/event_handler/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index ef5616e..89ac72e 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1526,9 +1526,13 @@ impl Service { if acl_event_content.is_allowed(server_name) { Ok(()) } else { + info!( + "Server {} was denied by room ACL in {}", + server_name, room_id + ); Err(Error::BadRequest( ErrorKind::Forbidden, - "Server was denied by ACL", + "Server was denied by room ACL", )) } } From 863103450c876711e1d2dfead103f2bfa358a2aa Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Thu, 27 Jul 2023 17:02:57 +0000 Subject: [PATCH 1439/1727] Log the unknown login type in warning level Signed-off-by: girlbossceo --- src/api/client_server/session.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 8908fef..5ce62af 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -9,7 +9,7 @@ use ruma::{ UserId, }; use serde::Deserialize; -use tracing::info; +use tracing::{info, warn}; #[derive(Debug, Deserialize)] struct Claims { @@ -52,6 +52,7 @@ pub async fn login_route(body: Ruma) -> Result) -> Result { + warn!("Unsupported or unknown login type: {:?}", &body.login_info); return Err(Error::BadRequest( ErrorKind::Unknown, "Unsupported login type.", From cc5dcceacc38483209502d120b56407db9ad3055 Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Fri, 28 Jul 2023 23:40:10 +0000 Subject: [PATCH 1440/1727] Log the room ID, event ID, PDU, and event type where possible Signed-off-by: girlbossceo --- src/api/client_server/room.rs | 4 +++- src/api/client_server/state.rs | 16 +++++++++------- src/api/server_server.rs | 11 ++++++++--- src/service/rooms/state/mod.rs | 4 +++- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 8c39b78..7bdccae 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -429,7 +429,9 @@ pub async fn get_room_event_route( .rooms .timeline .get_pdu(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + .ok_or({ + warn!("Event not found, event ID: {:?}", &body.event_id); + Error::BadRequest(ErrorKind::NotFound, "Event not found.")})?; if !services().rooms.state_accessor.user_can_see_event( sender_user, diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 8e4ceaf..5ea7e99 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -12,6 +12,7 @@ use ruma::{ serde::Raw, EventId, RoomId, UserId, }; +use tracing::log::warn; /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}` /// @@ -129,10 +130,11 @@ pub async fn get_state_events_for_key_route( .rooms .state_accessor .room_state_get(&body.room_id, &body.event_type, &body.state_key)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, + .ok_or({ + warn!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id); + Error::BadRequest(ErrorKind::NotFound, "State event not found.", - ))?; + )})?; Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) @@ -165,10 +167,10 @@ pub async fn get_state_events_for_empty_key_route( .rooms .state_accessor .room_state_get(&body.room_id, &body.event_type, "")? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "State event not found.", - ))?; + .ok_or({ + warn!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id); + Error::BadRequest(ErrorKind::NotFound, + "State event not found.",)})?; Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9a1b680..95716e7 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -711,7 +711,8 @@ pub async fn send_transaction_message_route( let (event_id, value, room_id) = match r { Ok(t) => t, Err(e) => { - warn!("Could not parse pdu: {e}"); + warn!("Could not parse PDU: {e}"); + warn!("Full PDU: {:?}", &pdu); continue; } }; @@ -952,7 +953,9 @@ pub async fn get_event_route( .rooms .timeline .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + .ok_or({ + warn!("Event not found, event ID: {:?}", &body.event_id); + Error::BadRequest(ErrorKind::NotFound, "Event not found.")})?; let room_id_str = event .get("room_id") @@ -1192,7 +1195,9 @@ pub async fn get_event_authorization_route( .rooms .timeline .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + .ok_or({ + warn!("Event not found, event ID: {:?}", &body.event_id); + Error::BadRequest(ErrorKind::NotFound, "Event not found.")})?; let room_id_str = event .get("room_id") diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index d782386..48e3d79 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -342,7 +342,9 @@ impl Service { .transpose()?; let room_version = create_event_content .map(|create_event| create_event.room_version) - .ok_or(Error::BadDatabase("Invalid room version"))?; + .ok_or({ + warn!("Invalid room version for room {room_id}"); + Error::BadDatabase("Invalid room version")})?; Ok(room_version) } From 3494d7759e0c371ce436ed34dcc87bd03a10ca70 Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Fri, 28 Jul 2023 23:47:00 +0000 Subject: [PATCH 1441/1727] Return "Hello from Conduit!" on the / route akin to Synapes's "It works!" page, removing an unnecessary warning about / route being unknown Signed-off-by: girlbossceo --- src/main.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/main.rs b/src/main.rs index eb7e833..9b7528c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -433,6 +433,7 @@ fn routes() -> Router { "/_matrix/client/v3/rooms/:room_id/initialSync", get(initial_sync), ) + .route("/", get(it_works)) .fallback(not_found) } @@ -482,6 +483,10 @@ async fn initial_sync(_uri: Uri) -> impl IntoResponse { ) } +async fn it_works() -> &'static str { + "Hello from Conduit!" +} + trait RouterExt { fn ruma_route(self, handler: H) -> Self where From d7061e69841526f16d33b3c6739ef8c853d777b9 Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Sat, 29 Jul 2023 14:30:48 +0000 Subject: [PATCH 1442/1727] cargo fmt Signed-off-by: girlbossceo --- src/api/client_server/room.rs | 11 ++++------- src/api/client_server/state.rs | 19 ++++++++++++------- src/api/server_server.rs | 6 ++++-- src/service/globals/mod.rs | 10 ++++++---- src/service/rooms/state/mod.rs | 3 ++- src/service/users/mod.rs | 4 +++- 6 files changed, 31 insertions(+), 22 deletions(-) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 7bdccae..5c98028 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -425,13 +425,10 @@ pub async fn get_room_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services() - .rooms - .timeline - .get_pdu(&body.event_id)? - .ok_or({ - warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.")})?; + let event = services().rooms.timeline.get_pdu(&body.event_id)?.ok_or({ + warn!("Event not found, event ID: {:?}", &body.event_id); + Error::BadRequest(ErrorKind::NotFound, "Event not found.") + })?; if !services().rooms.state_accessor.user_can_see_event( sender_user, diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 5ea7e99..6a37489 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -131,10 +131,12 @@ pub async fn get_state_events_for_key_route( .state_accessor .room_state_get(&body.room_id, &body.event_type, &body.state_key)? .ok_or({ - warn!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id); - Error::BadRequest(ErrorKind::NotFound, - "State event not found.", - )})?; + warn!( + "State event {:?} not found in room {:?}", + &body.event_type, &body.room_id + ); + Error::BadRequest(ErrorKind::NotFound, "State event not found.") + })?; Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) @@ -168,9 +170,12 @@ pub async fn get_state_events_for_empty_key_route( .state_accessor .room_state_get(&body.room_id, &body.event_type, "")? .ok_or({ - warn!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id); - Error::BadRequest(ErrorKind::NotFound, - "State event not found.",)})?; + warn!( + "State event {:?} not found in room {:?}", + &body.event_type, &body.room_id + ); + Error::BadRequest(ErrorKind::NotFound, "State event not found.") + })?; Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 95716e7..8c54e92 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -955,7 +955,8 @@ pub async fn get_event_route( .get_pdu_json(&body.event_id)? .ok_or({ warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.")})?; + Error::BadRequest(ErrorKind::NotFound, "Event not found.") + })?; let room_id_str = event .get("room_id") @@ -1197,7 +1198,8 @@ pub async fn get_event_authorization_route( .get_pdu_json(&body.event_id)? .ok_or({ warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.")})?; + Error::BadRequest(ErrorKind::NotFound, "Event not found.") + })?; let room_id_str = event .get("room_id") diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 7d61829..44235b3 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,8 +1,8 @@ mod data; pub use data::Data; -use ruma::serde::Base64; use ruma::{ - OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, + serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedServerSigningKeyId, OwnedUserId, }; use crate::api::server_server::FedDest; @@ -15,14 +15,16 @@ use ruma::{ }, DeviceId, RoomVersionId, ServerName, UserId, }; -use std::sync::atomic::{self, AtomicBool}; use std::{ collections::{BTreeMap, HashMap}, fs, future::Future, net::{IpAddr, SocketAddr}, path::PathBuf, - sync::{Arc, Mutex, RwLock}, + sync::{ + atomic::{self, AtomicBool}, + Arc, Mutex, RwLock, + }, time::{Duration, Instant}, }; use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 48e3d79..9790695 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -344,7 +344,8 @@ impl Service { .map(|create_event| create_event.room_version) .ok_or({ warn!("Invalid room version for room {room_id}"); - Error::BadDatabase("Invalid room version")})?; + Error::BadDatabase("Invalid room version") + })?; Ok(room_version) } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index f5b914f..38aca80 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -62,7 +62,9 @@ impl Service { device_id: OwnedDeviceId, request: &mut sync_events::v4::Request, ) -> BTreeMap> { - let Some(conn_id) = request.conn_id.clone() else { return BTreeMap::new(); }; + let Some(conn_id) = request.conn_id.clone() else { + return BTreeMap::new(); + }; let cache = &mut self.connections.lock().unwrap(); let cached = Arc::clone( From da907451e77648b8fce022f0d7fa536a6f678267 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 23 Jul 2023 14:52:11 +0200 Subject: [PATCH 1443/1727] Admin commands to sign and verify jsons --- src/service/admin/mod.rs | 70 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 9250a3e..6c3e3d0 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1,7 +1,7 @@ use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, - sync::Arc, + sync::{Arc, RwLock}, time::Instant, }; @@ -163,6 +163,20 @@ enum AdminCommand { DisableRoom { room_id: Box }, /// Enables incoming federation handling for a room again. EnableRoom { room_id: Box }, + + /// Verify json signatures + /// [commandbody] + /// # ``` + /// # json here + /// # ``` + SignJson, + + /// Verify json signatures + /// [commandbody] + /// # ``` + /// # json here + /// # ``` + VerifyJson, } #[derive(Debug)] @@ -754,6 +768,60 @@ impl Service { ) } } + AdminCommand::SignJson => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(mut value) => { + ruma::signatures::sign_json( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut value, + ) + .expect("our request json is what ruma expects"); + let json_text = serde_json::to_string_pretty(&value) + .expect("canonical json is valid json"); + RoomMessageEventContent::text_plain(json_text) + } + Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) + } + } + AdminCommand::VerifyJson => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + let pub_key_map = RwLock::new(BTreeMap::new()); + + services() + .rooms + .event_handler + .fetch_required_signing_keys(&value, &pub_key_map) + .await?; + + let pub_key_map = pub_key_map.read().unwrap(); + match ruma::signatures::verify_json(&pub_key_map, &value) { + Ok(_) => RoomMessageEventContent::text_plain("Signature correct"), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Signature verification failed: {e}" + )), + } + } + Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) + } + } }; Ok(reply_message_content) From e2c914cc11f268bb6212e68e6b71b74255b24b97 Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Sat, 29 Jul 2023 19:17:12 +0000 Subject: [PATCH 1444/1727] fix: s/ok_or/ok_or_else in relevant places Signed-off-by: girlbossceo --- src/api/client_server/room.rs | 12 ++++++++---- src/api/client_server/state.rs | 4 ++-- src/api/server_server.rs | 4 ++-- src/service/rooms/state/mod.rs | 2 +- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 5c98028..56bdf03 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -425,10 +425,14 @@ pub async fn get_room_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services().rooms.timeline.get_pdu(&body.event_id)?.ok_or({ - warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.") - })?; + let event = services() + .rooms + .timeline + .get_pdu(&body.event_id)? + .ok_or_else(|| { + warn!("Event not found, event ID: {:?}", &body.event_id); + Error::BadRequest(ErrorKind::NotFound, "Event not found.") + })?; if !services().rooms.state_accessor.user_can_see_event( sender_user, diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 6a37489..d6d3939 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -130,7 +130,7 @@ pub async fn get_state_events_for_key_route( .rooms .state_accessor .room_state_get(&body.room_id, &body.event_type, &body.state_key)? - .ok_or({ + .ok_or_else(|| { warn!( "State event {:?} not found in room {:?}", &body.event_type, &body.room_id @@ -169,7 +169,7 @@ pub async fn get_state_events_for_empty_key_route( .rooms .state_accessor .room_state_get(&body.room_id, &body.event_type, "")? - .ok_or({ + .ok_or_else(|| { warn!( "State event {:?} not found in room {:?}", &body.event_type, &body.room_id diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 8c54e92..ca5b69d 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -953,7 +953,7 @@ pub async fn get_event_route( .rooms .timeline .get_pdu_json(&body.event_id)? - .ok_or({ + .ok_or_else(|| { warn!("Event not found, event ID: {:?}", &body.event_id); Error::BadRequest(ErrorKind::NotFound, "Event not found.") })?; @@ -1196,7 +1196,7 @@ pub async fn get_event_authorization_route( .rooms .timeline .get_pdu_json(&body.event_id)? - .ok_or({ + .ok_or_else(|| { warn!("Event not found, event ID: {:?}", &body.event_id); Error::BadRequest(ErrorKind::NotFound, "Event not found.") })?; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 9790695..16e0a04 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -342,7 +342,7 @@ impl Service { .transpose()?; let room_version = create_event_content .map(|create_event| create_event.room_version) - .ok_or({ + .ok_or_else(|| { warn!("Invalid room version for room {room_id}"); Error::BadDatabase("Invalid room version") })?; From b8c164dc6027844d665158dc8906dd5c89f9238b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 29 Jul 2023 20:01:38 +0200 Subject: [PATCH 1445/1727] feat: version checker --- DEPLOY.md | 1 + complement/Dockerfile | 1 + conduit-example.toml | 1 + debian/postinst | 1 + docker-compose.yml | 1 + docker/docker-compose.for-traefik.yml | 1 + docker/docker-compose.with-traefik.yml | 5 ++- src/config/mod.rs | 2 + src/database/key_value/globals.rs | 18 ++++++++ src/database/mod.rs | 62 +++++++++++++++++++++++++- src/service/globals/data.rs | 2 + src/service/globals/mod.rs | 14 ++++++ 12 files changed, 105 insertions(+), 4 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index ec7dd46..4605a98 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -172,6 +172,7 @@ max_request_size = 20_000_000 # in bytes allow_registration = true allow_federation = true +allow_check_for_updates = true # Server to get public keys from. You probably shouldn't change this trusted_servers = ["matrix.org"] diff --git a/complement/Dockerfile b/complement/Dockerfile index 43416fa..50173a1 100644 --- a/complement/Dockerfile +++ b/complement/Dockerfile @@ -30,6 +30,7 @@ ENV CONDUIT_CONFIG=/workdir/conduit.toml RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml RUN echo "allow_federation = true" >> conduit.toml +RUN echo "allow_check_for_updates = true" >> conduit.toml RUN echo "allow_encryption = true" >> conduit.toml RUN echo "allow_registration = true" >> conduit.toml RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml diff --git a/conduit-example.toml b/conduit-example.toml index 6089aa5..836db65 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -39,6 +39,7 @@ max_request_size = 20_000_000 # in bytes allow_registration = true allow_federation = true +allow_check_for_updates = true # Enable the display name lightning bolt on registration. enable_lightning_bolt = true diff --git a/debian/postinst b/debian/postinst index dfa599d..69a766a 100644 --- a/debian/postinst +++ b/debian/postinst @@ -73,6 +73,7 @@ max_request_size = 20_000_000 # in bytes allow_registration = true allow_federation = true +allow_check_for_updates = true trusted_servers = ["matrix.org"] diff --git a/docker-compose.yml b/docker-compose.yml index d9c32b5..5bcf84f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -29,6 +29,7 @@ services: CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' CONDUIT_ALLOW_FEDERATION: 'true' + CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 #CONDUIT_LOG: warn,rocket=off,_=off,sled=off diff --git a/docker/docker-compose.for-traefik.yml b/docker/docker-compose.for-traefik.yml index 474299f..bed734f 100644 --- a/docker/docker-compose.for-traefik.yml +++ b/docker/docker-compose.for-traefik.yml @@ -29,6 +29,7 @@ services: CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' CONDUIT_ALLOW_FEDERATION: 'true' + CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 #CONDUIT_LOG: warn,rocket=off,_=off,sled=off diff --git a/docker/docker-compose.with-traefik.yml b/docker/docker-compose.with-traefik.yml index 79ebef4..fda942b 100644 --- a/docker/docker-compose.with-traefik.yml +++ b/docker/docker-compose.with-traefik.yml @@ -35,8 +35,9 @@ services: # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUIT_LOG: info # default is: "warn,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' - # CONDUIT_ALLOW_ENCRYPTION: 'false' - # CONDUIT_ALLOW_FEDERATION: 'false' + # CONDUIT_ALLOW_ENCRYPTION: 'true' + # CONDUIT_ALLOW_FEDERATION: 'true' + # CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit # CONDUIT_WORKERS: 10 # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB diff --git a/src/config/mod.rs b/src/config/mod.rs index 4dad9f7..e2c2ff1 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -28,6 +28,8 @@ pub struct Config { pub db_cache_capacity_mb: f64, #[serde(default = "true_fn")] pub enable_lightning_bolt: bool, + #[serde(default = "true_fn")] + pub allow_check_for_updates: bool, #[serde(default = "default_conduit_cache_capacity_modifier")] pub conduit_cache_capacity_modifier: f64, #[serde(default = "default_rocksdb_max_open_files")] diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 1e02459..11aa064 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -12,6 +12,7 @@ use ruma::{ use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; pub const COUNTER: &[u8] = b"c"; +pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u"; #[async_trait] impl service::globals::Data for KeyValueDatabase { @@ -27,6 +28,23 @@ impl service::globals::Data for KeyValueDatabase { }) } + fn last_check_for_updates_id(&self) -> Result { + self.global + .get(LAST_CHECK_FOR_UPDATES_COUNT)? + .map_or(Ok(0_u64), |bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("last check for updates count has invalid bytes.") + }) + }) + } + + fn update_check_for_updates_id(&self, id: u64) -> Result<()> { + self.global + .insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes())?; + + Ok(()) + } + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let userid_bytes = user_id.as_bytes().to_vec(); let mut userid_prefix = userid_bytes.clone(); diff --git a/src/database/mod.rs b/src/database/mod.rs index 4e7bda6..b36347d 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -18,6 +18,7 @@ use ruma::{ CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, }; +use serde::Deserialize; use std::{ collections::{BTreeMap, HashMap, HashSet}, fs::{self, remove_dir_all}, @@ -25,7 +26,9 @@ use std::{ mem::size_of, path::Path, sync::{Arc, Mutex, RwLock}, + time::Duration, }; +use tokio::time::interval; use tracing::{debug, error, info, warn}; @@ -982,6 +985,9 @@ impl KeyValueDatabase { services().sending.start_handler(); Self::start_cleanup_task().await; + if services().globals.allow_check_for_updates() { + Self::start_check_for_updates_task(); + } Ok(()) } @@ -998,9 +1004,61 @@ impl KeyValueDatabase { } #[tracing::instrument] - pub async fn start_cleanup_task() { - use tokio::time::interval; + pub fn start_check_for_updates_task() { + tokio::spawn(async move { + let timer_interval = Duration::from_secs(60 * 60); + let mut i = interval(timer_interval); + loop { + i.tick().await; + let _ = Self::try_handle_updates().await; + } + }); + } + async fn try_handle_updates() -> Result<()> { + let response = services() + .globals + .default_client() + .get("https://conduit.rs/check-for-updates/stable") + .send() + .await?; + + #[derive(Deserialize)] + struct CheckForUpdatesResponseEntry { + id: u64, + date: String, + message: String, + } + #[derive(Deserialize)] + struct CheckForUpdatesResponse { + updates: Vec, + } + + let response = serde_json::from_str::(&response.text().await?) + .map_err(|_| Error::BadServerResponse("Bad version check response"))?; + + let mut last_update_id = services().globals.last_check_for_updates_id()?; + for update in response.updates { + last_update_id = last_update_id.max(update.id); + if update.id > services().globals.last_check_for_updates_id()? { + println!("{}", update.message); + services() + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "@room: The following is a message from the Conduit developers. It was sent on '{}':\n\n{}", + update.date, update.message + ))) + } + } + services() + .globals + .update_check_for_updates_id(last_update_id)?; + + Ok(()) + } + + #[tracing::instrument] + pub async fn start_cleanup_task() { #[cfg(unix)] use tokio::signal::unix::{signal, SignalKind}; diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 171b3fe..8a66751 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -13,6 +13,8 @@ use crate::Result; pub trait Data: Send + Sync { fn next_count(&self) -> Result; fn current_count(&self) -> Result; + fn last_check_for_updates_id(&self) -> Result; + fn update_check_for_updates_id(&self, id: u64) -> Result<()>; async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; fn cleanup(&self) -> Result<()>; fn memory_usage(&self) -> String; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 44235b3..875a457 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -209,6 +209,16 @@ impl Service { self.db.current_count() } + #[tracing::instrument(skip(self))] + pub fn last_check_for_updates_id(&self) -> Result { + self.db.last_check_for_updates_id() + } + + #[tracing::instrument(skip(self))] + pub fn update_check_for_updates_id(&self, id: u64) -> Result<()> { + self.db.update_check_for_updates_id(id) + } + pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { self.db.watch(user_id, device_id).await } @@ -257,6 +267,10 @@ impl Service { self.config.enable_lightning_bolt } + pub fn allow_check_for_updates(&self) -> bool { + self.config.allow_check_for_updates + } + pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } From 83805c66e509b39b5d17d1a8d5033d9593711e84 Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Sun, 30 Jul 2023 17:30:16 +0000 Subject: [PATCH 1446/1727] sanitise potentially sensitive errors prevents errors like DB or I/O errors from leaking filesystem paths Co-authored-by: infamous Signed-off-by: girlbossceo --- src/api/server_server.rs | 2 +- src/utils/error.rs | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index ca5b69d..6d2da07 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -927,7 +927,7 @@ pub async fn send_transaction_message_route( Ok(send_transaction_message::v1::Response { pdus: resolved_map .into_iter() - .map(|(e, r)| (e, r.map_err(|e| e.to_string()))) + .map(|(e, r)| (e, r.map_err(|e| e.sanitized_error()))) .collect(), }) } diff --git a/src/utils/error.rs b/src/utils/error.rs index 4f044ca..7fafea1 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -138,6 +138,28 @@ impl Error { status_code, })) } + + /// Sanitizes public-facing errors that can leak sensitive information. + pub fn sanitized_error(&self) -> String { + let db_error = String::from("Database or I/O error occurred."); + + match self { + #[cfg(feature = "sled")] + Self::SledError { .. } => db_error, + #[cfg(feature = "sqlite")] + Self::SqliteError { .. } => db_error, + #[cfg(feature = "persy")] + Self::PersyError { .. } => db_error, + #[cfg(feature = "heed")] + Self::HeedError => db_error, + #[cfg(feature = "rocksdb")] + Self::RocksDbError { .. } => db_error, + Self::IoError { .. } => db_error, + Self::BadConfig { .. } => db_error, + Self::BadDatabase { .. } => db_error, + _ => self.to_string(), + } + } } #[cfg(feature = "persy")] From acfe381dd3064512272f8f47ea4dd388c04f1c39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 31 Jul 2023 16:18:23 +0200 Subject: [PATCH 1447/1727] fix: threads get updated properly Workaround for element web while waiting for https://github.com/matrix-org/matrix-js-sdk/pull/3635 --- src/api/client_server/membership.rs | 2 +- src/api/client_server/room.rs | 3 ++ src/api/client_server/sync.rs | 3 +- src/api/server_server.rs | 4 +-- src/database/key_value/rooms/timeline.rs | 2 ++ src/main.rs | 6 ++-- src/service/pdu.rs | 13 ++++++++ src/service/rooms/event_handler/mod.rs | 42 ++++++++++++------------ src/utils/error.rs | 4 +-- 9 files changed, 50 insertions(+), 29 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index c9357b2..4a1f374 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -674,7 +674,7 @@ async fn join_room_by_id_helper( }; let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { - warn!("{:?}: {}", value, e); + warn!("Invalid PDU in send_join response: {} {:?}", e, value); Error::BadServerResponse("Invalid PDU in send_join response.") })?; diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 56bdf03..420dd50 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -445,6 +445,9 @@ pub async fn get_room_event_route( )); } + let mut event = (*event).clone(); + event.add_age()?; + Ok(get_room_event::v3::Response { event: event.to_room_event(), }) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 527625a..7c6002e 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -20,8 +20,9 @@ use ruma::{ StateEventType, TimelineEventType, }, serde::Raw, - uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, + uint, DeviceId, OwnedDeviceId, OwnedEventId, OwnedUserId, RoomId, UInt, UserId, }; +use serde::Deserialize; use std::{ collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, sync::Arc, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index ca5b69d..2220c4d 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -813,7 +813,7 @@ pub async fn send_transaction_message_route( .readreceipt_update(&user_id, &room_id, event)?; } else { // TODO fetch missing events - info!("No known event ids in read receipt: {:?}", user_updates); + debug!("No known event ids in read receipt: {:?}", user_updates); } } } @@ -1011,7 +1011,7 @@ pub async fn get_backfill_route( .as_ref() .expect("server is authenticated"); - info!("Got backfill request from: {}", sender_servername); + debug!("Got backfill request from: {}", sender_servername); if !services() .rooms diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 74e3e5c..5ce2136 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -246,6 +246,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { if pdu.sender != user_id { pdu.remove_transaction_id()?; } + pdu.add_age()?; let count = pdu_count(&pdu_id)?; Ok((count, pdu)) }), @@ -272,6 +273,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { if pdu.sender != user_id { pdu.remove_transaction_id()?; } + pdu.add_age()?; let count = pdu_count(&pdu_id)?; Ok((count, pdu)) }), diff --git a/src/main.rs b/src/main.rs index 9b7528c..1975038 100644 --- a/src/main.rs +++ b/src/main.rs @@ -85,6 +85,8 @@ async fn main() { config.warn_deprecated(); + let log = format!("{},ruma_state_res=error,_=off,sled=off", config.log); + if config.allow_jaeger { opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); let tracer = opentelemetry_jaeger::new_agent_pipeline() @@ -94,7 +96,7 @@ async fn main() { .unwrap(); let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - let filter_layer = match EnvFilter::try_new(&config.log) { + let filter_layer = match EnvFilter::try_new(&log) { Ok(s) => s, Err(e) => { eprintln!( @@ -121,7 +123,7 @@ async fn main() { } else { let registry = tracing_subscriber::Registry::default(); let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = match EnvFilter::try_new(&config.log) { + let filter_layer = match EnvFilter::try_new(&log) { Ok(s) => s, Err(e) => { eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}"); diff --git a/src/service/pdu.rs b/src/service/pdu.rs index d24e174..4a170bc 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -103,6 +103,19 @@ impl PduEvent { Ok(()) } + pub fn add_age(&mut self) -> crate::Result<()> { + let mut unsigned: BTreeMap> = self + .unsigned + .as_ref() + .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; + + unsigned.insert("age".to_owned(), to_raw_value(&1).unwrap()); + self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); + + Ok(()) + } + #[tracing::instrument(skip(self))] pub fn to_sync_room_event(&self) -> Raw { let mut json = json!({ diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 89ac72e..c6e433c 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -357,7 +357,7 @@ impl Service { .await; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - info!( + debug!( "Auth check for {} based on auth events", incoming_pdu.event_id ); @@ -419,7 +419,7 @@ impl Service { )); } - info!("Validation successful."); + debug!("Validation successful."); // 7. Persist the event as an outlier. services() @@ -427,7 +427,7 @@ impl Service { .outlier .add_pdu_outlier(&incoming_pdu.event_id, &val)?; - info!("Added pdu as outlier."); + debug!("Added pdu as outlier."); Ok((Arc::new(incoming_pdu), val)) }) @@ -476,7 +476,7 @@ impl Service { // TODO: if we know the prev_events of the incoming event we can avoid the request and build // the state from a known point and resolve if > 1 prev_event - info!("Requesting state at event"); + debug!("Requesting state at event"); let mut state_at_incoming_event = None; if incoming_pdu.prev_events.len() == 1 { @@ -499,7 +499,7 @@ impl Service { }; if let Some(Ok(mut state)) = state { - info!("Using cached state"); + debug!("Using cached state"); let prev_pdu = services() .rooms .timeline @@ -523,7 +523,7 @@ impl Service { state_at_incoming_event = Some(state); } } else { - info!("Calculating state at event using state res"); + debug!("Calculating state at event using state res"); let mut extremity_sstatehashes = HashMap::new(); let mut okay = true; @@ -632,7 +632,7 @@ impl Service { } if state_at_incoming_event.is_none() { - info!("Calling /state_ids"); + debug!("Calling /state_ids"); // Call /state_ids to find out what the state at this pdu is. We trust the server's // response to some extend, but we still do a lot of checks on the events match services() @@ -647,7 +647,7 @@ impl Service { .await { Ok(res) => { - info!("Fetching state events at event."); + debug!("Fetching state events at event."); let state_vec = self .fetch_and_handle_outliers( origin, @@ -710,7 +710,7 @@ impl Service { let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); - info!("Starting auth check"); + debug!("Starting auth check"); // 11. Check the auth of the event passes based on the state of the event let check_result = state_res::event_auth::auth_check( &room_version, @@ -734,7 +734,7 @@ impl Service { "Event has failed auth check with state at the event.", )); } - info!("Auth check succeeded"); + debug!("Auth check succeeded"); // Soft fail check before doing state res let auth_events = services().rooms.state.get_auth_events( @@ -769,7 +769,7 @@ impl Service { // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) - info!("Calculating extremities"); + debug!("Calculating extremities"); let mut extremities = services().rooms.state.get_forward_extremities(room_id)?; // Remove any forward extremities that are referenced by this incoming event's prev_events @@ -790,7 +790,7 @@ impl Service { ) }); - info!("Compressing state at event"); + debug!("Compressing state at event"); let state_ids_compressed = Arc::new( state_at_incoming_event .iter() @@ -804,7 +804,7 @@ impl Service { ); if incoming_pdu.state_key.is_some() { - info!("Preparing for stateres to derive new room state"); + debug!("Preparing for stateres to derive new room state"); // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); @@ -822,7 +822,7 @@ impl Service { .await?; // Set the new room state to the resolved state - info!("Forcing new room state"); + debug!("Forcing new room state"); let (sstatehash, new, removed) = services() .rooms @@ -837,7 +837,7 @@ impl Service { } // 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it - info!("Starting soft fail auth check"); + debug!("Starting soft fail auth check"); if soft_fail { services().rooms.timeline.append_incoming_pdu( @@ -861,7 +861,7 @@ impl Service { )); } - info!("Appending pdu to timeline"); + debug!("Appending pdu to timeline"); extremities.insert(incoming_pdu.event_id.clone()); // Now that the event has passed all auth it is added into the timeline. @@ -877,7 +877,7 @@ impl Service { &state_lock, )?; - info!("Appended incoming pdu"); + debug!("Appended incoming pdu"); // Event has passed all auth/stateres checks drop(state_lock); @@ -890,7 +890,7 @@ impl Service { room_version_id: &RoomVersionId, incoming_state: HashMap>, ) -> Result>> { - info!("Loading current room state ids"); + debug!("Loading current room state ids"); let current_sstatehash = services() .rooms .state @@ -917,7 +917,7 @@ impl Service { ); } - info!("Loading fork states"); + debug!("Loading fork states"); let fork_states: Vec<_> = fork_states .into_iter() @@ -935,7 +935,7 @@ impl Service { }) .collect(); - info!("Resolving state"); + debug!("Resolving state"); let lock = services().globals.stateres_mutex.lock(); let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { @@ -953,7 +953,7 @@ impl Service { drop(lock); - info!("State resolution done. Compressing state"); + debug!("State resolution done. Compressing state"); let new_room_state = state .into_iter() diff --git a/src/utils/error.rs b/src/utils/error.rs index 4f044ca..5ffb38c 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -9,7 +9,7 @@ use ruma::{ OwnedServerName, }; use thiserror::Error; -use tracing::{error, warn}; +use tracing::{error, info}; #[cfg(feature = "persy")] use persy::PersyError; @@ -131,7 +131,7 @@ impl Error { _ => (Unknown, StatusCode::INTERNAL_SERVER_ERROR), }; - warn!("{}: {}", status_code, message); + info!("Returning an error: {}: {}", status_code, message); RumaResponse(UiaaResponse::MatrixError(RumaError { body: ErrorBody::Standard { kind, message }, From 9ce1cad98319b9257f60b7cd0a3122c4e6f884a8 Mon Sep 17 00:00:00 2001 From: Maarten Steenhagen Date: Tue, 1 Aug 2023 10:58:07 +0000 Subject: [PATCH 1448/1727] Changed 'right' to 'appropriate' to avoid ambiguity (original could be read as right-hand-side) --- DEPLOY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 4605a98..0d13160 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -10,7 +10,7 @@ Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore only offer Linux binaries. -You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: +You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url: | CPU Architecture | Download stable version | Download development version | | ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- | From 3a6eee7019af07c10467b882dac01f84874f2e1c Mon Sep 17 00:00:00 2001 From: Maarten Steenhagen Date: Tue, 1 Aug 2023 11:03:31 +0000 Subject: [PATCH 1449/1727] Correct option error adduser in DEPLOY.md --- DEPLOY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 4605a98..36b1e39 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -81,7 +81,7 @@ you to make sure that the file permissions are correctly set up. In Debian or RHEL, you can use this command to create a Conduit user: ```bash -sudo adduser --system conduit --group --disable-login --no-create-home +sudo adduser --system conduit --group --disabled-login --no-create-home ``` ## Forwarding ports in the firewall or the router From 5a7bade476d9fa91c7c59f858d9481097b819bf9 Mon Sep 17 00:00:00 2001 From: June Date: Tue, 1 Aug 2023 14:48:50 -1000 Subject: [PATCH 1450/1727] update base64 to 0.21.2 Signed-off-by: June --- Cargo.lock | 2 +- Cargo.toml | 2 +- src/api/client_server/voip.rs | 3 ++- src/service/globals/mod.rs | 4 +++- src/service/sending/mod.rs | 24 +++++++++++------------- 5 files changed, 18 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ddf487..cdf1821 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -368,7 +368,7 @@ dependencies = [ "async-trait", "axum", "axum-server", - "base64 0.13.1", + "base64 0.21.2", "bytes", "clap", "crossbeam", diff --git a/Cargo.toml b/Cargo.toml index 7a157f4..9828162 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,7 +59,7 @@ thiserror = "1.0.40" # Used to generate thumbnails for images image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key -base64 = "0.13.1" +base64 = "0.21.2" # Used when hashing the state ring = "0.16.20" # Used when querying the SRV record of other servers diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index 4990c17..f0d91f7 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -1,4 +1,5 @@ use crate::{services, Result, Ruma}; +use base64::{engine::general_purpose, Engine as _}; use hmac::{Hmac, Mac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; @@ -28,7 +29,7 @@ pub async fn turn_server_route( .expect("HMAC can take key of any size"); mac.update(username.as_bytes()); - let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD); + let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes()); (username, password) } else { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 875a457..e9bd0da 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -31,6 +31,8 @@ use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; use tracing::{error, info}; use trust_dns_resolver::TokioAsyncResolver; +use base64::{engine::general_purpose, Engine as _}; + type WellKnownMap = HashMap; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries @@ -367,7 +369,7 @@ impl Service { let mut r = PathBuf::new(); r.push(self.config.database_path.clone()); r.push("media"); - r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); + r.push(general_purpose::URL_SAFE_NO_PAD.encode(key)); r } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 14d83be..b441144 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -18,6 +18,8 @@ use crate::{ use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; +use base64::{engine::general_purpose, Engine as _}; + use ruma::{ api::{ appservice, @@ -497,17 +499,14 @@ impl Service { })?, appservice::event::push_events::v1::Request { events: pdu_jsons, - txn_id: (&*base64::encode_config( - calculate_hash( - &events - .iter() - .map(|e| match e { - SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, - }) - .collect::>(), - ), - base64::URL_SAFE_NO_PAD, - )) + txn_id: (&*general_purpose::URL_SAFE_NO_PAD.encode(calculate_hash( + &events + .iter() + .map(|e| match e { + SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, + }) + .collect::>(), + ))) .into(), }, ) @@ -642,7 +641,7 @@ impl Service { pdus: pdu_jsons, edus: edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - transaction_id: (&*base64::encode_config( + transaction_id: (&*general_purpose::URL_SAFE_NO_PAD.encode( calculate_hash( &events .iter() @@ -651,7 +650,6 @@ impl Service { }) .collect::>(), ), - base64::URL_SAFE_NO_PAD, )) .into(), }, From fbb256dd91f8f6795f4273bcfcd98adaf04e7f2f Mon Sep 17 00:00:00 2001 From: June Date: Tue, 1 Aug 2023 15:09:55 -1000 Subject: [PATCH 1451/1727] bump rusqlite to 0.29.0 Signed-off-by: June --- Cargo.lock | 10 +++++----- Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ddf487..a234b79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1361,9 +1361,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" dependencies = [ "cc", "pkg-config", @@ -2305,11 +2305,11 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.3.2", "fallible-iterator", "fallible-streaming-iterator", "hashlink", diff --git a/Cargo.toml b/Cargo.toml index 7a157f4..e137494 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } tracing-opentelemetry = "0.18.0" lru-cache = "0.1.2" -rusqlite = { version = "0.28.0", optional = true, features = ["bundled"] } +rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] } parking_lot = { version = "0.12.1", optional = true } crossbeam = { version = "0.8.2", optional = true } num_cpus = "1.15.0" From fbd8090b0bf47ba1a4ed818c84e374b50d39c94a Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Mon, 31 Jul 2023 01:02:52 +0000 Subject: [PATCH 1452/1727] log room ID for invalid room topic event errors Signed-off-by: girlbossceo --- src/api/client_server/directory.rs | 1 + src/service/rooms/spaces/mod.rs | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index a812dbc..50ae9f1 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -221,6 +221,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) .map_err(|_| { + error!("Invalid room topic event in database for room {}", room_id); Error::bad_database("Invalid room topic event in database.") }) })?, diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index e92fc07..9b57d53 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -326,7 +326,10 @@ impl Service { .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) - .map_err(|_| Error::bad_database("Invalid room topic event in database.")) + .map_err(|_| { + error!("Invalid room topic event in database for room {}", room_id); + Error::bad_database("Invalid room topic event in database.") + }) })?, world_readable: services() .rooms From 06fccbc3405c943ed4ecd2f1ab7528101f2a891c Mon Sep 17 00:00:00 2001 From: June Date: Thu, 3 Aug 2023 14:51:39 -1000 Subject: [PATCH 1453/1727] debug log before and after nofile soft limit increases Signed-off-by: June --- src/main.rs | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/src/main.rs b/src/main.rs index 1975038..c74d6dd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -40,7 +40,7 @@ use tower_http::{ trace::TraceLayer, ServiceBuilderExt as _, }; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate @@ -54,17 +54,7 @@ static GLOBAL: Jemalloc = Jemalloc; #[tokio::main] async fn main() { - // This is needed for opening lots of file descriptors, which tends to - // happen more often when using RocksDB and making lots of federation - // connections at startup. The soft limit is usually 1024, and the hard - // limit is usually 512000; I've personally seen it hit >2000. - // - // * https://www.freedesktop.org/software/systemd/man/systemd.exec.html#id-1.12.2.1.17.6 - // * https://github.com/systemd/systemd/commit/0abf94923b4a95a7d89bc526efc84e7ca2b71741 - #[cfg(unix)] - maximize_fd_limit().expect("should be able to increase the soft limit to the hard limit"); - - // Initialize DB + // Initialize config let raw_config = Figment::new() .merge( @@ -135,6 +125,16 @@ async fn main() { tracing::subscriber::set_global_default(subscriber).unwrap(); } + // This is needed for opening lots of file descriptors, which tends to + // happen more often when using RocksDB and making lots of federation + // connections at startup. The soft limit is usually 1024, and the hard + // limit is usually 512000; I've personally seen it hit >2000. + // + // * https://www.freedesktop.org/software/systemd/man/systemd.exec.html#id-1.12.2.1.17.6 + // * https://github.com/systemd/systemd/commit/0abf94923b4a95a7d89bc526efc84e7ca2b71741 + #[cfg(unix)] + maximize_fd_limit().expect("should be able to increase the soft limit to the hard limit"); + info!("Loading database"); if let Err(error) = KeyValueDatabase::load_or_create(config).await { error!(?error, "The database couldn't be loaded or created"); @@ -569,12 +569,19 @@ fn method_to_filter(method: Method) -> MethodFilter { } #[cfg(unix)] +#[tracing::instrument(err)] fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { use nix::sys::resource::{getrlimit, setrlimit, Resource}; let res = Resource::RLIMIT_NOFILE; - let (_, hard_limit) = getrlimit(res)?; + let (soft_limit, hard_limit) = getrlimit(res)?; - setrlimit(res, hard_limit, hard_limit) + debug!("Current nofile soft limit: {soft_limit}"); + + setrlimit(res, hard_limit, hard_limit)?; + + debug!("Increased nofile soft limit to {hard_limit}"); + + Ok(()) } From c1e2ffc0cdcf11a2d9763ab85af27e1a6f6f909a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 7 Aug 2023 13:55:44 +0200 Subject: [PATCH 1454/1727] improvement: maybe cross signing really works now --- src/api/client_server/keys.rs | 5 +++++ src/api/client_server/sync.rs | 3 +-- src/api/server_server.rs | 3 ++- src/database/key_value/users.rs | 5 ++++- src/service/rooms/state_cache/mod.rs | 17 +++++++++++------ src/service/users/data.rs | 1 + src/service/users/mod.rs | 16 +++++++++++----- 7 files changed, 35 insertions(+), 15 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 3e03221..b847301 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -132,6 +132,7 @@ pub async fn upload_signing_keys_route( master_key, &body.self_signing_key, &body.user_signing_key, + true, // notify so that other users see the new keys )?; } @@ -375,6 +376,10 @@ pub(crate) async fn get_keys_helper bool>( } let json = serde_json::to_value(master_key).expect("to_value always works"); let raw = serde_json::from_value(json).expect("Raw::from_value always works"); + services().users.add_cross_signing_keys( + &user, &raw, &None, &None, + false, // Dont notify. A notification would trigger another key request resulting in an endless loop + )?; master_keys.insert(user, raw); } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 7c6002e..527625a 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -20,9 +20,8 @@ use ruma::{ StateEventType, TimelineEventType, }, serde::Raw, - uint, DeviceId, OwnedDeviceId, OwnedEventId, OwnedUserId, RoomId, UInt, UserId, + uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, }; -use serde::Deserialize; use std::{ collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, sync::Arc, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 2220c4d..554361f 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -55,7 +55,7 @@ use std::{ time::{Duration, Instant, SystemTime}, }; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, warn}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). @@ -917,6 +917,7 @@ pub async fn send_transaction_message_route( &master_key, &self_signing_key, &None, + true, )?; } } diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 0301cda..2b09d68 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -449,6 +449,7 @@ impl service::users::Data for KeyValueDatabase { master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, + notify: bool, ) -> Result<()> { // TODO: Check signatures let mut prefix = user_id.as_bytes().to_vec(); @@ -530,7 +531,9 @@ impl service::users::Data for KeyValueDatabase { .insert(user_id.as_bytes(), &user_signing_key_key)?; } - self.mark_device_key_update(user_id)?; + if notify { + self.mark_device_key_update(user_id)?; + } Ok(()) } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 32afdd4..ef1ad61 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -14,6 +14,7 @@ use ruma::{ serde::Raw, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; +use tracing::warn; use crate::{services, Error, Result}; @@ -88,8 +89,9 @@ impl Service { RoomAccountDataEventType::Tag, )? .map(|event| { - serde_json::from_str(event.get()).map_err(|_| { - Error::bad_database("Invalid account data event in db.") + serde_json::from_str(event.get()).map_err(|e| { + warn!("Invalid account data event in db: {e:?}"); + Error::BadDatabase("Invalid account data event in db.") }) }) { @@ -113,8 +115,9 @@ impl Service { GlobalAccountDataEventType::Direct.to_string().into(), )? .map(|event| { - serde_json::from_str::(event.get()).map_err(|_| { - Error::bad_database("Invalid account data event in db.") + serde_json::from_str::(event.get()).map_err(|e| { + warn!("Invalid account data event in db: {e:?}"); + Error::BadDatabase("Invalid account data event in db.") }) }) { @@ -155,8 +158,10 @@ impl Service { .into(), )? .map(|event| { - serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) + serde_json::from_str::(event.get()).map_err(|e| { + warn!("Invalid account data event in db: {e:?}"); + Error::BadDatabase("Invalid account data event in db.") + }) }) .transpose()? .map_or(false, |ignored| { diff --git a/src/service/users/data.rs b/src/service/users/data.rs index d01e070..ddf941e 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -111,6 +111,7 @@ pub trait Data: Send + Sync { master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, + notify: bool, ) -> Result<()>; fn sign_key( diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 38aca80..c345e56 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -66,7 +66,7 @@ impl Service { return BTreeMap::new(); }; - let cache = &mut self.connections.lock().unwrap(); + let mut cache = self.connections.lock().unwrap(); let cached = Arc::clone( cache .entry((user_id, device_id, conn_id)) @@ -185,7 +185,7 @@ impl Service { conn_id: String, subscriptions: BTreeMap, ) { - let cache = &mut self.connections.lock().unwrap(); + let mut cache = self.connections.lock().unwrap(); let cached = Arc::clone( cache .entry((user_id, device_id, conn_id)) @@ -212,7 +212,7 @@ impl Service { list_id: String, new_cached_rooms: BTreeMap, ) { - let cache = &mut self.connections.lock().unwrap(); + let mut cache = self.connections.lock().unwrap(); let cached = Arc::clone( cache .entry((user_id, device_id, conn_id)) @@ -398,9 +398,15 @@ impl Service { master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, + notify: bool, ) -> Result<()> { - self.db - .add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key) + self.db.add_cross_signing_keys( + user_id, + master_key, + self_signing_key, + user_signing_key, + notify, + ) } pub fn sign_key( From d82c26f0a9a92d0193924ddc3d81b7b0a152862b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 7 Aug 2023 17:54:08 +0200 Subject: [PATCH 1455/1727] Avatars for sliding sync DMs --- Cargo.lock | 29 ++++----- Cargo.toml | 2 +- src/api/client_server/sync.rs | 86 ++++++++++++++++--------- src/service/rooms/state_accessor/mod.rs | 12 ++++ 4 files changed, 80 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ddf487..b357bb9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1296,7 +1296,6 @@ checksum = "1d9a8bb6c7c71d151b25936b03e012a4c00daea99e3a3797c6ead66b0a0d55e2" dependencies = [ "const_panic", "konst_kernel", - "konst_proc_macros", "typewit", ] @@ -1309,12 +1308,6 @@ dependencies = [ "typewit", ] -[[package]] -name = "konst_proc_macros" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e28ab1dc35e09d60c2b8c90d12a9a8d9666c876c10a3739a3196db0103b6043" - [[package]] name = "lazy_static" version = "1.4.0" @@ -2146,7 +2139,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.8.2" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "assign", "js_int", @@ -2164,7 +2157,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.8.1" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "js_int", "ruma-common", @@ -2175,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "assign", "bytes", @@ -2192,7 +2185,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "base64 0.21.2", "bytes", @@ -2220,7 +2213,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "js_int", "ruma-common", @@ -2231,7 +2224,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "js_int", "thiserror", @@ -2240,7 +2233,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "js_int", "ruma-common", @@ -2250,7 +2243,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "once_cell", "proc-macro-crate", @@ -2265,7 +2258,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "js_int", "ruma-common", @@ -2276,7 +2269,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.13.1" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "base64 0.21.2", "ed25519-dalek", @@ -2292,7 +2285,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=07bc06038fded40d4e9180637f056d256f9a1fbc#07bc06038fded40d4e9180637f056d256f9a1fbc" +source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 7a157f4..9d1e9cc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "07bc06038fded40d4e9180637f056d256f9a1fbc", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "3bd58e3c899457c2d55c45268dcb8a65ae682d54", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 527625a..a0edc43 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1558,6 +1558,56 @@ pub async fn sync_events_v4_route( .map(|state| state.to_sync_state_event()) .collect(); + // Heroes + let heroes = services() + .rooms + .state_cache + .room_members(&room_id) + .filter_map(|r| r.ok()) + .filter(|member| member != &sender_user) + .map(|member| { + Ok::<_, Error>( + services() + .rooms + .state_accessor + .get_member(&room_id, &member)? + .map(|memberevent| { + ( + memberevent + .displayname + .unwrap_or_else(|| member.to_string()), + memberevent.avatar_url, + ) + }), + ) + }) + .filter_map(|r| r.ok()) + .filter_map(|o| o) + .take(5) + .collect::>(); + let name = if heroes.len() > 1 { + let last = heroes[0].0.clone(); + Some( + heroes[1..] + .iter() + .map(|h| h.0.clone()) + .collect::>() + .join(", ") + + " and " + + &last, + ) + } else if heroes.len() == 1 { + Some(heroes[0].0.clone()) + } else { + None + }; + + let avatar = if heroes.len() == 1 { + heroes[0].1.clone() + } else { + None + }; + rooms.insert( room_id.clone(), sync_events::v4::SlidingSyncRoom { @@ -1565,36 +1615,12 @@ pub async fn sync_events_v4_route( .rooms .state_accessor .get_name(&room_id)? - .or_else(|| { - // Heroes - let mut names = services() - .rooms - .state_cache - .room_members(&room_id) - .filter_map(|r| r.ok()) - .filter(|member| member != &sender_user) - .map(|member| { - Ok::<_, Error>( - services() - .rooms - .state_accessor - .get_member(&room_id, &member)? - .and_then(|memberevent| memberevent.displayname) - .unwrap_or(member.to_string()), - ) - }) - .filter_map(|r| r.ok()) - .take(5) - .collect::>(); - if names.len() > 1 { - let last = names.pop().unwrap(); - Some(names.join(", ") + " and " + &last) - } else if names.len() == 1 { - Some(names.pop().unwrap()) - } else { - None - } - }), + .or_else(|| name), + avatar: services() + .rooms + .state_accessor + .get_avatar(&room_id)? + .map_or(avatar, |a| a.url), initial: Some(!known), is_dm: None, invite_state: None, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 435f4df..a4a62fe 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -9,6 +9,7 @@ use lru_cache::LruCache; use ruma::{ events::{ room::{ + avatar::RoomAvatarEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, member::{MembershipState, RoomMemberEventContent}, name::RoomNameEventContent, @@ -283,6 +284,17 @@ impl Service { }) } + pub fn get_avatar(&self, room_id: &RoomId) -> Result> { + services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) + }) + } + pub fn get_member( &self, room_id: &RoomId, From c028e0553c541d954b50f82f8ea058c43d8b7d22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 9 Aug 2023 18:27:30 +0200 Subject: [PATCH 1456/1727] feat: registration tokens --- README.md | 5 +---- src/api/client_server/account.rs | 23 ++++++++++++++++------- src/api/client_server/sync.rs | 8 ++++---- src/config/mod.rs | 1 + src/database/mod.rs | 4 ++++ src/service/uiaa/mod.rs | 11 +++++++++++ 6 files changed, 37 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 52ea3c1..5e01c8c 100644 --- a/README.md +++ b/README.md @@ -16,10 +16,7 @@ friends or company. #### Can I try it out? Yes! You can test our Conduit instance by opening a Matrix client ( or Element Android for -example) and registering on the `conduit.rs` homeserver. - -*Registration is currently disabled because of scammers. For an account please - message us (see contact section below).* +example) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Server hosting for conduit.rs is donated by the Matrix.org Foundation. diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 1d7480a..4655130 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -74,7 +74,10 @@ pub async fn get_register_available_route( /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route(body: Ruma) -> Result { - if !services().globals.allow_registration() && !body.from_appservice { + if !services().globals.allow_registration() + && !body.from_appservice + && services().globals.config.registration_token.is_none() + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Registration has been disabled.", @@ -121,7 +124,11 @@ pub async fn register_route(body: Ruma) -> Result) -> Result Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); - let mut body = dbg!(body.body); + let mut body = body.body; // Setup watchers, so if there's no response, we can wait for them let watcher = services().globals.watch(&sender_user, &sender_device); @@ -1470,7 +1470,7 @@ pub async fn sync_events_v4_route( } let mut known_subscription_rooms = BTreeMap::new(); - for (room_id, room) in dbg!(&body.room_subscriptions) { + for (room_id, room) in &body.room_subscriptions { let todo_room = todo_rooms .entry(room_id.clone()) .or_insert((BTreeSet::new(), 0, true)); @@ -1680,7 +1680,7 @@ pub async fn sync_events_v4_route( let _ = tokio::time::timeout(duration, watcher).await; } - Ok(dbg!(sync_events::v4::Response { + Ok(sync_events::v4::Response { initial: since == 0, txn_id: body.txn_id.clone(), pos: next_batch.to_string(), @@ -1735,5 +1735,5 @@ pub async fn sync_events_v4_route( }, }, delta_token: None, - })) + }) } diff --git a/src/config/mod.rs b/src/config/mod.rs index e2c2ff1..9128c52 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -46,6 +46,7 @@ pub struct Config { pub max_fetch_prev_events: u16, #[serde(default = "false_fn")] pub allow_registration: bool, + pub registration_token: Option, #[serde(default = "true_fn")] pub allow_encryption: bool, #[serde(default = "false_fn")] diff --git a/src/database/mod.rs b/src/database/mod.rs index b36347d..e247d9f 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -267,6 +267,10 @@ impl KeyValueDatabase { } }; + if config.registration_token == Some(String::new()) { + return Err(Error::bad_config("Registration token is empty")); + } + if config.max_request_size < 1024 { error!(?config.max_request_size, "Max request size is less than 1KB. Please increase it."); } diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 147ce4d..ed39af9 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -96,6 +96,17 @@ impl Service { // Password was correct! Let's add it to `completed` uiaainfo.completed.push(AuthType::Password); } + AuthData::RegistrationToken(t) => { + if Some(t.token.trim()) == services().globals.config.registration_token.as_deref() { + uiaainfo.completed.push(AuthType::RegistrationToken); + } else { + uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { + kind: ErrorKind::Forbidden, + message: "Invalid registration token.".to_owned(), + }); + return Ok((false, uiaainfo)); + } + } AuthData::Dummy(_) => { uiaainfo.completed.push(AuthType::Dummy); } From 183558150d1c2a022b9be60b22295f78d2326b27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 9 Aug 2023 22:21:21 +0200 Subject: [PATCH 1457/1727] fix: don't show removed rooms in space --- src/service/rooms/spaces/mod.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 9b57d53..53232f4 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -19,6 +19,7 @@ use ruma::{ join_rules::{self, AllowRule, JoinRule, RoomJoinRulesEventContent}, topic::RoomTopicEventContent, }, + space::child::SpaceChildEventContent, StateEventType, }, space::SpaceRoomJoinRule, @@ -124,11 +125,24 @@ impl Service { if event_type != StateEventType::SpaceChild { continue; } + + let pdu = services() + .rooms + .timeline + .get_pdu(&id)? + .ok_or_else(|| Error::bad_database("Event in space state not found"))?; + + if serde_json::from_str::(pdu.content.get()) + .ok() + .and_then(|c| c.via) + .map_or(true, |v| v.is_empty()) + { + continue; + } + if let Ok(room_id) = OwnedRoomId::try_from(state_key) { children_ids.push(room_id); - children_pdus.push(services().rooms.timeline.get_pdu(&id)?.ok_or_else( - || Error::bad_database("Event in space state not found"), - )?); + children_pdus.push(pdu); } } From fd9e52a559303989740cb64deb273eefea9d3958 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 10 Aug 2023 11:45:31 +0200 Subject: [PATCH 1458/1727] More sanity checks --- src/service/rooms/event_handler/mod.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index c6e433c..899f035 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -119,6 +119,7 @@ impl Service { let (incoming_pdu, val) = self .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, pub_key_map) .await?; + self.check_room_id(room_id, &incoming_pdu)?; // 8. if not timeline event: stop if !is_timeline_event { @@ -338,6 +339,8 @@ impl Service { ) .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; + self.check_room_id(room_id, &incoming_pdu)?; + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // NOTE: Step 5 is not applied anymore because it failed too often @@ -373,6 +376,8 @@ impl Service { } }; + self.check_room_id(room_id, &auth_event)?; + match auth_events.entry(( auth_event.kind.to_string().into(), auth_event @@ -1178,6 +1183,8 @@ impl Service { .await .pop() { + self.check_room_id(room_id, &pdu)?; + if amount > services().globals.max_fetch_prev_events() { // Max limit reached warn!("Max prev event limit reached!"); @@ -1702,4 +1709,15 @@ impl Service { "Failed to find public key for server", )) } + + fn check_room_id(&self, room_id: &RoomId, pdu: &PduEvent) -> Result<()> { + if pdu.room_id != room_id { + warn!("Found event from room {} in room {}", pdu.room_id, room_id); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event has wrong room id", + )); + } + Ok(()) + } } From 606b25b9e73b467f44912bf10d2d4c299e9dbd2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 10 Aug 2023 17:26:55 +0200 Subject: [PATCH 1459/1727] improvement: more forgiving admin command syntax --- src/service/admin/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 6c3e3d0..0fe5edf 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -287,7 +287,7 @@ impl Service { // Parse and process a message from the admin room async fn process_admin_message(&self, room_message: String) -> RoomMessageEventContent { - let mut lines = room_message.lines(); + let mut lines = room_message.lines().filter(|l| !l.trim().is_empty()); let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 83c3010..25e1c54 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -456,7 +456,9 @@ impl Service { let server_user = format!("@conduit:{}", services().globals.server_name()); let to_conduit = body.starts_with(&format!("{server_user}: ")) - || body.starts_with(&format!("{server_user} ")); + || body.starts_with(&format!("{server_user} ")) + || body == format!("{server_user}:") + || body == format!("{server_user}"); // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit From 19bfee1835ba89246ec9e3e703ceb5c2f8dc6711 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 10 Aug 2023 17:45:58 +0200 Subject: [PATCH 1460/1727] improvement: matrix.org is default trusted server if unspecified --- src/config/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 9128c52..a4d7cca 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -65,7 +65,7 @@ pub struct Config { #[serde(default)] pub proxy: ProxyConfig, pub jwt_secret: Option, - #[serde(default = "Vec::new")] + #[serde(default = "default_trusted_servers")] pub trusted_servers: Vec, #[serde(default = "default_log")] pub log: String, @@ -259,6 +259,10 @@ fn default_max_fetch_prev_events() -> u16 { 100_u16 } +fn default_trusted_servers() -> Vec { + vec![OwnedServerName::try_from("matrix.org").unwrap()] +} + fn default_log() -> String { "warn,state_res=warn,_=off,sled=off".to_owned() } From 5d16948030d72b6123890f3e1daf0a0b044efeaf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 10 Aug 2023 18:57:25 +0200 Subject: [PATCH 1461/1727] Bump version to v0.6.0 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9d4be9..35a4208 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -363,7 +363,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.6.0-alpha" +version = "0.6.0" dependencies = [ "async-trait", "axum", diff --git a/Cargo.toml b/Cargo.toml index ecaa35c..15dc1da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.6.0-alpha" +version = "0.6.0" edition = "2021" # When changing this, make sure to update the `flake.lock` file by running From ce2017a10e92141881e0ec438a848a6ab1382a94 Mon Sep 17 00:00:00 2001 From: girlbossceo Date: Thu, 10 Aug 2023 23:12:37 +0000 Subject: [PATCH 1462/1727] log handling previous event time as debug Signed-off-by: girlbossceo --- src/service/rooms/event_handler/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 899f035..cad0a45 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -226,7 +226,7 @@ impl Service { .write() .unwrap() .remove(&room_id.to_owned()); - warn!( + debug!( "Handling prev event {} took {}m{}s", prev_id, elapsed.as_secs() / 60, From 11103a92ed98e89ae535af4ed185ebd570a2ce81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 11 Aug 2023 10:48:48 +0200 Subject: [PATCH 1463/1727] Do not show "Invalid room version" errors when server is not in room --- Cargo.lock | 2 +- Cargo.toml | 2 +- src/api/server_server.rs | 19 ++++++++++++++++++- src/service/rooms/state/mod.rs | 15 ++++++--------- 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 35a4208..26b7201 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -363,7 +363,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.6.0" +version = "0.7.0-alpha" dependencies = [ "async-trait", "axum", diff --git a/Cargo.toml b/Cargo.toml index 15dc1da..0138a2d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.6.0" +version = "0.7.0-alpha" edition = "2021" # When changing this, make sure to update the `flake.lock` file by running diff --git a/src/api/server_server.rs b/src/api/server_server.rs index bb92405..9bc9687 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -55,7 +55,7 @@ use std::{ time::{Duration, Instant, SystemTime}, }; -use tracing::{debug, error, warn}; +use tracing::{debug, error, trace, warn}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). @@ -707,6 +707,23 @@ pub async fn send_transaction_message_route( // let mut auth_cache = EventMap::new(); for pdu in &body.pdus { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + warn!("Error parsing incoming event {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + let room_id: OwnedRoomId = value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid room id in pdu", + ))?; + + if services().rooms.state.get_room_version(&room_id).is_err() { + debug!("Server is not in room {room_id}"); + continue; + } + let r = parse_incoming_pdu(&pdu); let (event_id, value, room_id) = match r { Ok(t) => t, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 16e0a04..48c6020 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -6,6 +6,7 @@ use std::{ pub use data::Data; use ruma::{ + api::client::error::ErrorKind, events::{ room::{create::RoomCreateEventContent, member::MembershipState}, AnyStrippedStateEvent, StateEventType, TimelineEventType, @@ -331,7 +332,7 @@ impl Service { "", )?; - let create_event_content: Option = create_event + let create_event_content: RoomCreateEventContent = create_event .as_ref() .map(|create_event| { serde_json::from_str(create_event.content.get()).map_err(|e| { @@ -339,14 +340,10 @@ impl Service { Error::bad_database("Invalid create event in db.") }) }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| { - warn!("Invalid room version for room {room_id}"); - Error::BadDatabase("Invalid room version") - })?; - Ok(room_version) + .transpose()? + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "No create event found"))?; + + Ok(create_event_content.room_version) } pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { From f73a657a23be2d3d5690069bd7cc32a4897ee881 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 11 Aug 2023 20:29:22 +0200 Subject: [PATCH 1464/1727] fix: ACL error shouldn't break the whole request --- src/api/server_server.rs | 5 ----- src/service/rooms/event_handler/mod.rs | 2 ++ 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9bc9687..f29de32 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -735,11 +735,6 @@ pub async fn send_transaction_message_route( }; // We do not add the event_id field to the pdu here because of signature and hashes checks - services() - .rooms - .event_handler - .acl_check(sender_servername, &room_id)?; - let mutex = Arc::clone( services() .globals diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 899f035..1fa78b6 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -92,6 +92,8 @@ impl Service { )); } + services().rooms.event_handler.acl_check(origin, &room_id)?; + // 1. Skip the PDU if we already have it as a timeline event if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { return Ok(Some(pdu_id.to_vec())); From 9b55ce933a5cf62f445a061bdcd6b17e7bb58857 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 12 Aug 2023 09:53:32 +0200 Subject: [PATCH 1465/1727] Back off from more events, don't retry auth events --- src/service/rooms/event_handler/mod.rs | 110 ++++++++++++++++--------- 1 file changed, 71 insertions(+), 39 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 1fa78b6..e93feaa 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -119,7 +119,15 @@ impl Service { .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; let (incoming_pdu, val) = self - .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, pub_key_map) + .handle_outlier_pdu( + origin, + &create_event, + event_id, + room_id, + value, + false, + pub_key_map, + ) .await?; self.check_room_id(room_id, &incoming_pdu)?; @@ -276,6 +284,7 @@ impl Service { event_id: &'a EventId, room_id: &'a RoomId, mut value: BTreeMap, + auth_events_known: bool, pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { Box::pin(async move { @@ -343,23 +352,25 @@ impl Service { self.check_room_id(room_id, &incoming_pdu)?; - // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // NOTE: Step 5 is not applied anymore because it failed too often - debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events"); - self.fetch_and_handle_outliers( - origin, - &incoming_pdu - .auth_events - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - room_version_id, - pub_key_map, - ) - .await; + if !auth_events_known { + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // NOTE: Step 5 is not applied anymore because it failed too often + debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events"); + self.fetch_and_handle_outliers( + origin, + &incoming_pdu + .auth_events + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + room_version_id, + pub_key_map, + ) + .await; + } // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events debug!( @@ -1015,26 +1026,6 @@ impl Service { let mut pdus = vec![]; for id in events { - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&**id) - { - // Exponential backoff - let mut min_elapsed_duration = - Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", id); - continue; - } - } - // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) @@ -1052,6 +1043,26 @@ impl Service { let mut events_all = HashSet::new(); let mut i = 0; while let Some(next_id) = todo_auth_events.pop() { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&*next_id) + { + // Exponential backoff + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", next_id); + continue; + } + } + if events_all.contains(&next_id) { continue; } @@ -1062,7 +1073,7 @@ impl Service { } if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { - trace!("Found {} in db", id); + trace!("Found {} in db", next_id); continue; } @@ -1121,6 +1132,26 @@ impl Service { } for (next_id, value) in events_in_reverse_order.iter().rev() { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&**next_id) + { + // Exponential backoff + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", next_id); + continue; + } + } + match self .handle_outlier_pdu( origin, @@ -1128,6 +1159,7 @@ impl Service { next_id, room_id, value.clone(), + true, pub_key_map, ) .await From 38d6426b0eed35ed0adbdfc414bedb48eb6e2ac8 Mon Sep 17 00:00:00 2001 From: purplemeteorite Date: Tue, 4 Jul 2023 18:41:05 +0200 Subject: [PATCH 1466/1727] coturn setup instructions for docker --- docker/README.md | 55 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index c702832..4ed7083 100644 --- a/docker/README.md +++ b/docker/README.md @@ -54,7 +54,7 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. -## Docker-compose +### Docker-compose If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files. @@ -138,3 +138,56 @@ So...step by step: 6. Run `docker-compose up -d` 7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin. + + + + +## Voice communication + +In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place. + +### Configuration + +Create a configuration file called `coturn.conf` containing: + +```conf +use-auth-secret +static-auth-secret= +realm= +``` +These same values need to be set in conduit. You can either modify conduit.toml to include these lines: +``` +turn_uris = ["turn:?transport=udp", "turn:?transport=tcp"] +turn_secret = "" +``` +or append the following to the docker environment variables dependig on which configuration method you used earlier: +```yml +- CONDUIT_TURN_URIS=["turn:?transport=udp", "turn:?transport=tcp"] +- CONDUIT_TURN_SECRET=" +``` +Restart Conduit to apply these changes. + +### Run +Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using +```bash +docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn +``` + +or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml` +and run `docker-compose up -d` in the same directory. + +```yml +version: 3 +services: + turn: + container_name: coturn-server + image: docker.io/coturn/coturn + restart: unless-stopped + network_mode: "host" + volumes: + - ./coturn.conf:/etc/coturn/turnserver.conf +``` + +To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md. +For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration). + From 20924a44f1a5214df22f0547ce0e686f962ea649 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 10 Aug 2023 14:42:04 +0000 Subject: [PATCH 1467/1727] Suggestion on how to generate a secure key --- docker/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/README.md b/docker/README.md index 4ed7083..dccdc35 100644 --- a/docker/README.md +++ b/docker/README.md @@ -155,6 +155,8 @@ use-auth-secret static-auth-secret= realm= ``` +A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`. + These same values need to be set in conduit. You can either modify conduit.toml to include these lines: ``` turn_uris = ["turn:?transport=udp", "turn:?transport=tcp"] From 9b3664aeeb66950d0e74104b872f8e9c739eb01d Mon Sep 17 00:00:00 2001 From: Luke McCarthy Date: Sun, 27 Aug 2023 02:14:03 +0000 Subject: [PATCH 1468/1727] Update README.md to fix typo & fix compatibility with new versions of docker compose --- docker/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/README.md b/docker/README.md index 9fe7ed0..b34f9d8 100644 --- a/docker/README.md +++ b/docker/README.md @@ -187,8 +187,8 @@ turn_secret = "" ``` or append the following to the docker environment variables dependig on which configuration method you used earlier: ```yml -- CONDUIT_TURN_URIS=["turn:?transport=udp", "turn:?transport=tcp"] -- CONDUIT_TURN_SECRET=" +CONDUIT_TURN_URIS: '["turn:?transport=udp", "turn:?transport=tcp"]' +CONDUIT_TURN_SECRET: "" ``` Restart Conduit to apply these changes. From 75c80df27133deb5a8fb65b1828e190925256e46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 13 Sep 2023 20:54:53 +0200 Subject: [PATCH 1469/1727] Sliding sync improvements and redaction fixes --- Cargo.lock | 288 ++++++++++------------- Cargo.toml | 2 +- src/api/client_server/keys.rs | 58 ++++- src/api/client_server/redact.rs | 1 + src/api/client_server/room.rs | 3 +- src/api/client_server/sync.rs | 89 +++---- src/database/key_value/rooms/timeline.rs | 2 - src/service/admin/mod.rs | 2 +- src/service/globals/mod.rs | 2 + src/service/rooms/event_handler/mod.rs | 17 +- src/service/users/mod.rs | 25 +- 11 files changed, 270 insertions(+), 219 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 26b7201..9c8596a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -58,6 +58,12 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "as_variant" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38fa22307249f86fb7fad906fcae77f2564caeb56d7209103c551cd1cf4798f" + [[package]] name = "assign" version = "1.1.1" @@ -227,15 +233,6 @@ dependencies = [ "constant_time_eq 0.2.6", ] -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -388,7 +385,7 @@ dependencies = [ "opentelemetry-jaeger", "parking_lot", "persy", - "rand 0.8.5", + "rand", "regex", "reqwest", "ring", @@ -569,17 +566,32 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.0" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "platforms", + "rustc_version", "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.21", +] + [[package]] name = "dashmap" version = "5.4.0" @@ -601,30 +613,21 @@ checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "der" -version = "0.6.1" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "zeroize", ] -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "crypto-common", "subtle", ] @@ -651,24 +654,25 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.3" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" +checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ + "pkcs8", "signature", ] [[package]] name = "ed25519-dalek" -version = "1.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ "curve25519-dalek", "ed25519", - "rand 0.7.3", + "rand_core", "serde", - "sha2 0.9.9", + "sha2", "zeroize", ] @@ -726,6 +730,12 @@ dependencies = [ "simd-adler32", ] +[[package]] +name = "fiat-crypto" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" + [[package]] name = "figment" version = "0.10.10" @@ -874,17 +884,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.10" @@ -893,7 +892,7 @@ checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -1038,7 +1037,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -1512,7 +1511,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] @@ -1607,12 +1606,6 @@ version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - [[package]] name = "openssl-probe" version = "0.1.5" @@ -1687,7 +1680,7 @@ dependencies = [ "once_cell", "opentelemetry_api", "percent-encoding", - "rand 0.8.5", + "rand", "thiserror", "tokio", "tokio-stream", @@ -1801,7 +1794,7 @@ dependencies = [ "data-encoding", "fs2", "linked-hash-map", - "rand 0.8.5", + "rand", "thiserror", "unsigned-varint", "zigzag", @@ -1841,9 +1834,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.9.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der", "spki", @@ -1855,6 +1848,12 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +[[package]] +name = "platforms" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" + [[package]] name = "png" version = "0.17.9" @@ -1931,19 +1930,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -1951,18 +1937,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -1972,16 +1948,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -1990,16 +1957,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "getrandom", ] [[package]] @@ -2026,7 +1984,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.10", + "getrandom", "redox_syscall 0.2.16", "thiserror", ] @@ -2139,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.8.2" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "assign", "js_int", @@ -2147,6 +2105,7 @@ dependencies = [ "ruma-appservice-api", "ruma-client-api", "ruma-common", + "ruma-events", "ruma-federation-api", "ruma-identity-service-api", "ruma-push-gateway-api", @@ -2157,10 +2116,11 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.8.1" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "js_int", "ruma-common", + "ruma-events", "serde", "serde_json", ] @@ -2168,7 +2128,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "assign", "bytes", @@ -2177,6 +2137,7 @@ dependencies = [ "js_option", "maplit", "ruma-common", + "ruma-events", "serde", "serde_html_form", "serde_json", @@ -2185,18 +2146,18 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ + "as_variant", "base64 0.21.2", "bytes", "form_urlencoded", "http", "indexmap 2.0.0", "js_int", - "js_option", "konst", "percent-encoding", - "rand 0.8.5", + "rand", "regex", "ruma-identifiers-validation", "ruma-macros", @@ -2210,13 +2171,36 @@ dependencies = [ "wildmatch", ] +[[package]] +name = "ruma-events" +version = "0.26.0" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +dependencies = [ + "as_variant", + "indexmap 2.0.0", + "js_int", + "js_option", + "percent-encoding", + "regex", + "ruma-common", + "ruma-identifiers-validation", + "ruma-macros", + "serde", + "serde_json", + "thiserror", + "tracing", + "url", + "wildmatch", +] + [[package]] name = "ruma-federation-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "js_int", "ruma-common", + "ruma-events", "serde", "serde_json", ] @@ -2224,7 +2208,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "js_int", "thiserror", @@ -2233,7 +2217,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "js_int", "ruma-common", @@ -2243,7 +2227,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "once_cell", "proc-macro-crate", @@ -2258,10 +2242,11 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "js_int", "ruma-common", + "ruma-events", "serde", "serde_json", ] @@ -2269,15 +2254,15 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.13.1" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "base64 0.21.2", "ed25519-dalek", "pkcs8", - "rand 0.7.3", + "rand", "ruma-common", "serde_json", - "sha2 0.10.7", + "sha2", "subslice", "thiserror", ] @@ -2285,11 +2270,12 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=3bd58e3c899457c2d55c45268dcb8a65ae682d54#3bd58e3c899457c2d55c45268dcb8a65ae682d54" +source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "itertools", "js_int", "ruma-common", + "ruma-events", "serde", "serde_json", "thiserror", @@ -2328,6 +2314,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "rustls" version = "0.20.8" @@ -2458,6 +2453,12 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" + [[package]] name = "serde" version = "1.0.164" @@ -2553,7 +2554,7 @@ checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -2564,20 +2565,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "digest", ] [[package]] @@ -2588,7 +2576,7 @@ checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -2617,9 +2605,9 @@ dependencies = [ [[package]] name = "signature" -version = "1.6.4" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" [[package]] name = "simd-adler32" @@ -2682,9 +2670,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" -version = "0.6.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", "der", @@ -3145,7 +3133,7 @@ dependencies = [ "idna 0.2.3", "ipnet", "lazy_static", - "rand 0.8.5", + "rand", "smallvec", "thiserror", "tinyvec", @@ -3257,7 +3245,7 @@ version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" dependencies = [ - "getrandom 0.2.10", + "getrandom", ] [[package]] @@ -3287,12 +3275,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -3587,20 +3569,6 @@ name = "zeroize" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.21", -] [[package]] name = "zigzag" diff --git a/Cargo.toml b/Cargo.toml index 0138a2d..ff1785e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "3bd58e3c899457c2d55c45268dcb8a65ae682d54", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "b4853aa8fa5e3a24e3689fc88044de9915f6ab67", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index b847301..7dbe040 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -17,7 +17,11 @@ use ruma::{ DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::{ + collections::{hash_map, BTreeMap, HashMap, HashSet}, + time::{Duration, Instant}, +}; +use tracing::debug; /// # `POST /_matrix/client/r0/keys/upload` /// @@ -335,31 +339,68 @@ pub(crate) async fn get_keys_helper bool>( let mut failures = BTreeMap::new(); + let back_off = |id| match services() + .globals + .bad_query_ratelimiter + .write() + .unwrap() + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + let mut futures: FuturesUnordered<_> = get_over_federation .into_iter() .map(|(server, vec)| async move { + if let Some((time, tries)) = services() + .globals + .bad_query_ratelimiter + .read() + .unwrap() + .get(&*server) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off query from {:?}", server); + return ( + server, + Err(Error::BadServerResponse("bad query, still backing off")), + ); + } + } + let mut device_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); } ( server, - services() - .sending - .send_federation_request( + tokio::time::timeout( + Duration::from_secs(25), + services().sending.send_federation_request( server, federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed, }, - ) - .await, + ), + ) + .await + .map_err(|e| Error::BadServerResponse("Query took too long")), ) }) .collect(); while let Some((server, response)) = futures.next().await { match response { - Ok(response) => { + Ok(Ok(response)) => { for (user, masterkey) in response.master_keys { let (master_key_id, mut master_key) = services().users.parse_master_key(&user, &masterkey)?; @@ -386,7 +427,8 @@ pub(crate) async fn get_keys_helper bool>( self_signing_keys.extend(response.self_signing_keys); device_keys.extend(response.device_keys); } - Err(_e) => { + _ => { + back_off(server.to_owned()); failures.insert(server.to_string(), json!({})); } } diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index 20f7e91..21da222 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -34,6 +34,7 @@ pub async fn redact_event_route( PduBuilder { event_type: TimelineEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { + redacts: Some(body.event_id.clone()), reason: body.reason.clone(), }) .expect("event is valid, we just created it"), diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 420dd50..0e2d932 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -142,8 +142,9 @@ pub async fn create_room_route( content } None => { + // TODO: Add correct value for v11 let mut content = serde_json::from_str::( - to_raw_value(&RoomCreateEventContent::new(sender_user.clone())) + to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone())) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))? .get(), ) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index dd815b5..a275b06 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1102,7 +1102,7 @@ async fn load_joined_room( fn load_timeline( sender_user: &UserId, room_id: &RoomId, - sincecount: PduCount, + roomsincecount: PduCount, limit: u64, ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { let timeline_pdus; @@ -1111,7 +1111,7 @@ fn load_timeline( .rooms .timeline .last_timeline_count(&sender_user, &room_id)? - > sincecount + > roomsincecount { let mut non_timeline_pdus = services() .rooms @@ -1124,7 +1124,7 @@ fn load_timeline( } r.ok() }) - .take_while(|(pducount, _)| pducount > &sincecount); + .take_while(|(pducount, _)| pducount > &roomsincecount); // Take the last events for the timeline timeline_pdus = non_timeline_pdus @@ -1172,22 +1172,22 @@ fn share_encrypted_room( pub async fn sync_events_v4_route( body: Ruma, ) -> Result> { + dbg!(&body.body); let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let mut body = body.body; // Setup watchers, so if there's no response, we can wait for them let watcher = services().globals.watch(&sender_user, &sender_device); - let next_batch = services().globals.current_count()?; + let next_batch = services().globals.next_count()?; - let since = body + let globalsince = body .pos .as_ref() .and_then(|string| string.parse().ok()) .unwrap_or(0); - let sincecount = PduCount::Normal(since); - if since == 0 { + if globalsince == 0 { if let Some(conn_id) = &body.conn_id { services().users.forget_sync_request_connection( sender_user.clone(), @@ -1214,7 +1214,7 @@ pub async fn sync_events_v4_route( if body.extensions.to_device.enabled.unwrap_or(false) { services() .users - .remove_to_device_events(&sender_user, &sender_device, since)?; + .remove_to_device_events(&sender_user, &sender_device, globalsince)?; } let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in @@ -1226,7 +1226,7 @@ pub async fn sync_events_v4_route( device_list_changes.extend( services() .users - .keys_changed(sender_user.as_ref(), since, None) + .keys_changed(sender_user.as_ref(), globalsince, None) .filter_map(|r| r.ok()), ); @@ -1242,7 +1242,7 @@ pub async fn sync_events_v4_route( let since_shortstatehash = services() .rooms .user - .get_token_shortstatehash(&room_id, since)?; + .get_token_shortstatehash(&room_id, globalsince)?; let since_sender_member: Option = since_shortstatehash .and_then(|shortstatehash| { @@ -1371,7 +1371,7 @@ pub async fn sync_events_v4_route( device_list_changes.extend( services() .users - .keys_changed(room_id.as_ref(), since, None) + .keys_changed(room_id.as_ref(), globalsince, None) .filter_map(|r| r.ok()), ); } @@ -1408,7 +1408,7 @@ pub async fn sync_events_v4_route( continue; } - let mut new_known_rooms = BTreeMap::new(); + let mut new_known_rooms = BTreeSet::new(); lists.insert( list_id.clone(), @@ -1424,12 +1424,12 @@ pub async fn sync_events_v4_route( let room_ids = all_joined_rooms [(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)] .to_vec(); - new_known_rooms.extend(room_ids.iter().cloned().map(|r| (r, true))); + new_known_rooms.extend(room_ids.iter().cloned()); for room_id in &room_ids { let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( BTreeSet::new(), 0, - true, + u64::MAX, )); let limit = list .room_details @@ -1440,10 +1440,14 @@ pub async fn sync_events_v4_route( .0 .extend(list.room_details.required_state.iter().cloned()); todo_room.1 = todo_room.1.max(limit); - if known_rooms.get(&list_id).and_then(|k| k.get(room_id)) != Some(&true) - { - todo_room.2 = false; - } + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get(&list_id) + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); } sync_events::v4::SyncOp { op: SlidingOp::Sync, @@ -1465,26 +1469,28 @@ pub async fn sync_events_v4_route( conn_id.clone(), list_id, new_known_rooms, + globalsince, ); } } - let mut known_subscription_rooms = BTreeMap::new(); + let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { let todo_room = todo_rooms .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0, true)); + .or_insert((BTreeSet::new(), 0, u64::MAX)); let limit = room.timeline_limit.map_or(10, u64::from).min(100); todo_room.0.extend(room.required_state.iter().cloned()); todo_room.1 = todo_room.1.max(limit); - if known_rooms - .get("subscriptions") - .and_then(|k| k.get(room_id)) - != Some(&true) - { - todo_room.2 = false; - } - known_subscription_rooms.insert(room_id.clone(), true); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get("subscriptions") + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + known_subscription_rooms.insert(room_id.clone()); } for r in body.unsubscribe_rooms { @@ -1499,6 +1505,7 @@ pub async fn sync_events_v4_route( conn_id.clone(), "subscriptions".to_owned(), known_subscription_rooms, + globalsince, ); } @@ -1512,12 +1519,13 @@ pub async fn sync_events_v4_route( } let mut rooms = BTreeMap::new(); - for (room_id, (required_state_request, timeline_limit, known)) in &todo_rooms { - // TODO: per-room sync tokens - let (timeline_pdus, limited) = - load_timeline(&sender_user, &room_id, sincecount, *timeline_limit)?; + for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms { + let roomsincecount = PduCount::Normal(*roomsince); - if *known && timeline_pdus.is_empty() { + let (timeline_pdus, limited) = + load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?; + + if roomsince != &0 && timeline_pdus.is_empty() { continue; } @@ -1533,8 +1541,8 @@ pub async fn sync_events_v4_route( })) })? .or_else(|| { - if since != 0 { - Some(since.to_string()) + if roomsince != &0 { + Some(roomsince.to_string()) } else { None } @@ -1621,7 +1629,7 @@ pub async fn sync_events_v4_route( .state_accessor .get_avatar(&room_id)? .map_or(avatar, |a| a.url), - initial: Some(!known), + initial: Some(roomsince == &0), is_dm: None, invite_state: None, unread_notifications: UnreadNotificationsCount { @@ -1663,6 +1671,7 @@ pub async fn sync_events_v4_route( .into(), ), num_live: None, // Count events in timeline greater than global sync counter + timestamp: None, }, ); } @@ -1680,8 +1689,8 @@ pub async fn sync_events_v4_route( let _ = tokio::time::timeout(duration, watcher).await; } - Ok(sync_events::v4::Response { - initial: since == 0, + Ok(dbg!(sync_events::v4::Response { + initial: globalsince == 0, txn_id: body.txn_id.clone(), pos: next_batch.to_string(), lists, @@ -1712,7 +1721,7 @@ pub async fn sync_events_v4_route( global: if body.extensions.account_data.enabled.unwrap_or(false) { services() .account_data - .changes_since(None, &sender_user, since)? + .changes_since(None, &sender_user, globalsince)? .into_iter() .filter_map(|(_, v)| { serde_json::from_str(v.json().get()) @@ -1735,5 +1744,5 @@ pub async fn sync_events_v4_route( }, }, delta_token: None, - }) + })) } diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 5ce2136..ba1e85e 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -84,8 +84,6 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 0fe5edf..b22f8ed 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -932,7 +932,7 @@ impl Service { services().users.create(&conduit_user, None)?; - let mut content = RoomCreateEventContent::new(conduit_user.clone()); + let mut content = RoomCreateEventContent::new_v1(conduit_user.clone()); content.federate = true; content.predecessor = None; content.room_version = services().globals.default_room_version(); diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index e9bd0da..9bce8a2 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -56,6 +56,7 @@ pub struct Service { pub unstable_room_versions: Vec, pub bad_event_ratelimiter: Arc>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, + pub bad_query_ratelimiter: Arc>>, pub servername_ratelimiter: Arc>>>, pub sync_receivers: RwLock>, pub roomid_mutex_insert: RwLock>>>, @@ -160,6 +161,7 @@ impl Service { unstable_room_versions, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())), servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), roomid_mutex_state: RwLock::new(HashMap::new()), roomid_mutex_insert: RwLock::new(HashMap::new()), diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e93feaa..aa1f5fb 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -326,7 +326,7 @@ impl Service { Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); - match ruma::canonical_json::redact(value, room_version_id, None) { + let obj = match ruma::canonical_json::redact(value, room_version_id, None) { Ok(obj) => obj, Err(_) => { return Err(Error::BadRequest( @@ -334,7 +334,17 @@ impl Service { "Redaction failed", )) } + }; + + // Skip the PDU if it is redacted and we already have it as an outlier event + if services().rooms.timeline.get_pdu_json(event_id)?.is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event was redacted and we already knew about it", + )); } + + obj } Ok(ruma::signatures::Verified::All) => value, }; @@ -1564,6 +1574,11 @@ impl Service { } }; + if acl_event_content.allow.is_empty() { + // Ignore broken acl events + return Ok(()); + } + if acl_event_content.is_allowed(server_name) { Ok(()) } else { diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index c345e56..6faa6d8 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,6 +1,6 @@ mod data; use std::{ - collections::BTreeMap, + collections::{BTreeMap, BTreeSet}, mem, sync::{Arc, Mutex}, }; @@ -28,7 +28,7 @@ use crate::{services, Error, Result}; pub struct SlidingSyncCache { lists: BTreeMap, subscriptions: BTreeMap, - known_rooms: BTreeMap>, + known_rooms: BTreeMap>, // For every room, the roomsince number extensions: ExtensionsConfig, } @@ -61,7 +61,7 @@ impl Service { user_id: OwnedUserId, device_id: OwnedDeviceId, request: &mut sync_events::v4::Request, - ) -> BTreeMap> { + ) -> BTreeMap> { let Some(conn_id) = request.conn_id.clone() else { return BTreeMap::new(); }; @@ -127,6 +127,7 @@ impl Service { } } (_, Some(cached_filters)) => list.filters = Some(cached_filters), + (Some(list_filters), _) => list.filters = Some(list_filters.clone()), (_, _) => {} } if list.bump_event_types.is_empty() { @@ -210,7 +211,8 @@ impl Service { device_id: OwnedDeviceId, conn_id: String, list_id: String, - new_cached_rooms: BTreeMap, + new_cached_rooms: BTreeSet, + globalsince: u64, ) { let mut cache = self.connections.lock().unwrap(); let cached = Arc::clone( @@ -228,7 +230,20 @@ impl Service { let cached = &mut cached.lock().unwrap(); drop(cache); - cached.known_rooms.insert(list_id, new_cached_rooms); + for (roomid, lastsince) in cached + .known_rooms + .entry(list_id.clone()) + .or_default() + .iter_mut() + { + if !new_cached_rooms.contains(roomid) { + *lastsince = 0; + } + } + let list = cached.known_rooms.entry(list_id).or_default(); + for roomid in new_cached_rooms { + list.insert(roomid, globalsince); + } } /// Check if account is deactivated From f3b6b3e222caa3c01a6a301d38ff15e98f8b973f Mon Sep 17 00:00:00 2001 From: AndSDev Date: Tue, 7 Nov 2023 12:46:53 +0000 Subject: [PATCH 1470/1727] feat: send push notification on invite to invited user and etc --- src/service/rooms/timeline/mod.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 25e1c54..e419980 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -320,12 +320,25 @@ impl Service { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - for user in services() + let mut push_target = services() .rooms .state_cache - .get_our_real_users(&pdu.room_id)? - .iter() - { + .get_our_real_users(&pdu.room_id)?; + + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key) = &pdu.state_key { + let target_user_id = UserId::parse(state_key.clone()) + .expect("This state_key was previously validated"); + + if !push_target.contains(&target_user_id) { + let mut target = push_target.as_ref().clone(); + target.insert(target_user_id); + push_target = Arc::new(target); + } + } + } + + for user in push_target.iter() { // Don't notify the user of their own events if user == &pdu.sender { continue; From d39d30008a81a0149279eef01e2e2969d1a10078 Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Fri, 1 Dec 2023 15:11:26 +0100 Subject: [PATCH 1471/1727] Remove "creator" property from rooms >= v11 --- src/api/client_server/room.rs | 71 +++++++++++++++++++++++++------ src/service/admin/mod.rs | 17 +++++++- src/service/pdu.rs | 20 ++++++++- src/service/rooms/timeline/mod.rs | 51 ++++++++++++---------- 4 files changed, 119 insertions(+), 40 deletions(-) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 0e2d932..6b82404 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -23,7 +23,7 @@ use ruma::{ }, int, serde::JsonObject, - CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, + CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, sync::Arc}; @@ -127,12 +127,28 @@ pub async fn create_room_route( let mut content = content .deserialize_as::() .expect("Invalid creation content"); - content.insert( - "creator".into(), - json!(&sender_user).try_into().map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") - })?, - ); + + match room_version { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => { + content.insert( + "creator".into(), + json!(&sender_user).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, + ); + } + _ => {} // V11 removed the "creator" key + } + content.insert( "room_version".into(), json!(room_version.as_str()).try_into().map_err(|_| { @@ -143,8 +159,21 @@ pub async fn create_room_route( } None => { // TODO: Add correct value for v11 + let content = match room_version { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()), + _ => RoomCreateEventContent::new_v11(), + }; let mut content = serde_json::from_str::( - to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone())) + to_raw_value(&content) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))? .get(), ) @@ -580,12 +609,26 @@ pub async fn upgrade_room_route( )); // Send a m.room.create event containing a predecessor field and the applicable room_version - create_event_content.insert( - "creator".into(), - json!(&sender_user) - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, - ); + match body.new_version { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => { + create_event_content.insert( + "creator".into(), + json!(&sender_user).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Error forming creation event") + })?, + ); + } + _ => {} // V11 removed the "creator" key + } create_event_content.insert( "room_version".into(), json!(&body.new_version) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b22f8ed..292943a 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -932,10 +932,23 @@ impl Service { services().users.create(&conduit_user, None)?; - let mut content = RoomCreateEventContent::new_v1(conduit_user.clone()); + let room_version = services().globals.default_room_version(); + let mut content = match room_version { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.clone()), + _ => RoomCreateEventContent::new_v11(), + }; content.federate = true; content.predecessor = None; - content.room_version = services().globals.default_room_version(); + content.room_version = room_version; // 1. The room create event services().rooms.timeline.build_and_append_pdu( diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 4a170bc..9e7f6d1 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -49,12 +49,28 @@ pub struct PduEvent { impl PduEvent { #[tracing::instrument(skip(self))] - pub fn redact(&mut self, reason: &PduEvent) -> crate::Result<()> { + pub fn redact( + &mut self, + room_version_id: RoomVersionId, + reason: &PduEvent, + ) -> crate::Result<()> { self.unsigned = None; let allowed: &[&str] = match self.kind { TimelineEventType::RoomMember => &["join_authorised_via_users_server", "membership"], - TimelineEventType::RoomCreate => &["creator"], + TimelineEventType::RoomCreate => match room_version_id { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => &["creator"], + _ => &[], // V11 removed the creator key + }, TimelineEventType::RoomJoinRules => &["join_rule"], TimelineEventType::RoomPowerLevels => &[ "ban", diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 25e1c54..4f3b0e5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -28,7 +28,7 @@ use ruma::{ state_res, state_res::{Event, RoomVersion}, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomAliasId, RoomId, ServerName, UserId, + OwnedServerName, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -128,6 +128,27 @@ impl Service { self.db.get_pdu_count(event_id) } + /// Returns the version of a room, if known + pub fn get_room_version(&self, room_id: &RoomId) -> Result> { + let create_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomCreate, + "", + )?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + + Ok(create_event_content.map(|content| content.room_version)) + } + // TODO Is this the same as the function above? /* #[tracing::instrument(skip(self))] @@ -645,28 +666,11 @@ impl Service { .take(20) .collect(); - let create_event = services().rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomCreate, - "", - )?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - // If there was no create event yet, assume we are creating a room with the default // version right now - let room_version_id = create_event_content - .map_or(services().globals.default_room_version(), |create_event| { - create_event.room_version - }); + let room_version_id = self + .get_room_version(room_id)? + .unwrap_or_else(|| services().globals.default_room_version()); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = services().rooms.state.get_auth_events( @@ -1034,7 +1038,10 @@ impl Service { let mut pdu = self .get_pdu_from_id(&pdu_id)? .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; + let room_version_id = self.get_room_version(&pdu.room_id)?.ok_or_else(|| { + Error::bad_database("Trying to redact PDU in in room of unknown version") + })?; + pdu.redact(room_version_id, reason)?; self.replace_pdu( &pdu_id, &utils::to_canonical_object(&pdu).expect("PDU is an object"), From a3b8eea9b4885bccdfad7279f809c472ddd89a44 Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Fri, 1 Dec 2023 17:46:50 +0100 Subject: [PATCH 1472/1727] Move "redacts" key to "content" in redaction events in v11 rooms --- src/service/rooms/timeline/mod.rs | 35 ++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 4f3b0e5..4b06acb 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -404,9 +404,38 @@ impl Service { match pdu.kind { TimelineEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } + let room_version_id = self + .get_room_version(&pdu.room_id)? + .expect("Got RoomRedaction in a room of unknown version"); + match room_version_id { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => { + if let Some(redact_id) = &pdu.redacts { + self.redact_pdu(redact_id, pdu)?; + } + } + _ => { + #[derive(Deserialize)] + struct Redaction { + redacts: Option, + } + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid content in redaction pdu.") + })?; + if let Some(redact_id) = &content.redacts { + self.redact_pdu(redact_id, pdu)?; + } + } + }; } TimelineEventType::SpaceChild => { if let Some(_state_key) = &pdu.state_key { From 18bfd79ef2d4a0bb18622bb25b3da23e05a04481 Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Fri, 1 Dec 2023 18:15:20 +0100 Subject: [PATCH 1473/1727] Remove "creator" key when upgrading rooms to v11 --- src/api/client_server/room.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 6b82404..5f62e9f 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -627,7 +627,10 @@ pub async fn upgrade_room_route( })?, ); } - _ => {} // V11 removed the "creator" key + _ => { + // "creator" key no longer exists in V11 rooms + create_event_content.remove("creator"); + } } create_event_content.insert( "room_version".into(), From fac995036ab02fe30fca6a2dfc682612c4cf43d7 Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Fri, 1 Dec 2023 18:15:57 +0100 Subject: [PATCH 1474/1727] create_hash_and_sign_event: Use actual version of RoomCreate events, instead of the default --- src/service/rooms/timeline/mod.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 4b06acb..a6e9f87 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -695,11 +695,20 @@ impl Service { .take(20) .collect(); - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = self - .get_room_version(room_id)? - .unwrap_or_else(|| services().globals.default_room_version()); + // If there was no create event yet, assume we are creating a room + let room_version_id = self.get_room_version(room_id)?.unwrap_or_else(|| { + if event_type != TimelineEventType::RoomCreate { + panic!("non-create event for room of unknown version"); + } + #[derive(Deserialize)] + struct RoomCreate { + room_version: RoomVersionId, + } + let content = serde_json::from_str::(content.get()) + .expect("Invalid content in RoomCreate pdu."); + content.room_version + }); + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = services().rooms.state.get_auth_events( From 9646439a94f35a7ab46274c1465ee1192215724a Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Fri, 1 Dec 2023 18:16:18 +0100 Subject: [PATCH 1475/1727] Enable support for room v11 --- src/service/globals/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 9bce8a2..1aa123f 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -139,7 +139,12 @@ impl Service { RoomVersionId::V10, ]; // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; + let unstable_room_versions = vec![ + RoomVersionId::V3, + RoomVersionId::V4, + RoomVersionId::V5, + RoomVersionId::V11, + ]; let mut s = Self { db, From 520806d41385cddee20ec8d8a83bc1c491a61280 Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Fri, 1 Dec 2023 19:08:24 +0100 Subject: [PATCH 1476/1727] Use Ruma's redact_content_in_place instead of custom implementation --- src/service/pdu.rs | 48 ++++++---------------------------------------- src/utils/error.rs | 2 ++ 2 files changed, 8 insertions(+), 42 deletions(-) diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 9e7f6d1..c3e8d59 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,5 +1,6 @@ use crate::Error; use ruma::{ + canonical_json::redact_content_in_place, events::{ room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, @@ -56,53 +57,16 @@ impl PduEvent { ) -> crate::Result<()> { self.unsigned = None; - let allowed: &[&str] = match self.kind { - TimelineEventType::RoomMember => &["join_authorised_via_users_server", "membership"], - TimelineEventType::RoomCreate => match room_version_id { - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - | RoomVersionId::V7 - | RoomVersionId::V8 - | RoomVersionId::V9 - | RoomVersionId::V10 => &["creator"], - _ => &[], // V11 removed the creator key - }, - TimelineEventType::RoomJoinRules => &["join_rule"], - TimelineEventType::RoomPowerLevels => &[ - "ban", - "events", - "events_default", - "kick", - "redact", - "state_default", - "users", - "users_default", - ], - TimelineEventType::RoomHistoryVisibility => &["history_visibility"], - _ => &[], - }; - - let mut old_content: BTreeMap = - serde_json::from_str(self.content.get()) - .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; - - let mut new_content = serde_json::Map::new(); - - for key in allowed { - if let Some(value) = old_content.remove(*key) { - new_content.insert((*key).to_owned(), value); - } - } + let mut content = serde_json::from_str(self.content.get()) + .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; + redact_content_in_place(&mut content, &room_version_id, self.kind.to_string()) + .map_err(|e| Error::RedactionError(self.sender.server_name().to_owned(), e))?; self.unsigned = Some(to_raw_value(&json!({ "redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works") })).expect("to string always works")); - self.content = to_raw_value(&new_content).expect("to string always works"); + self.content = to_raw_value(&content).expect("to string always works"); Ok(()) } diff --git a/src/utils/error.rs b/src/utils/error.rs index 6e88cf5..83ef309 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -80,6 +80,8 @@ pub enum Error { #[cfg(feature = "conduit_bin")] #[error("{0}")] PathError(#[from] axum::extract::rejection::PathRejection), + #[error("from {0}: {1}")] + RedactionError(OwnedServerName, ruma::canonical_json::RedactionError), } impl Error { From 5a7bb1e8f104b66e65a16e447d930b478640383a Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Sat, 2 Dec 2023 17:51:19 +0100 Subject: [PATCH 1477/1727] Return error instead of panic when first event is not m.room.create --- src/service/rooms/timeline/mod.rs | 33 ++++++++++++++++++++----------- src/utils/error.rs | 2 ++ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a6e9f87..06deb06 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -696,18 +696,27 @@ impl Service { .collect(); // If there was no create event yet, assume we are creating a room - let room_version_id = self.get_room_version(room_id)?.unwrap_or_else(|| { - if event_type != TimelineEventType::RoomCreate { - panic!("non-create event for room of unknown version"); - } - #[derive(Deserialize)] - struct RoomCreate { - room_version: RoomVersionId, - } - let content = serde_json::from_str::(content.get()) - .expect("Invalid content in RoomCreate pdu."); - content.room_version - }); + let room_version_id = self + .get_room_version(room_id)? + .or_else(|| { + if event_type == TimelineEventType::RoomCreate { + #[derive(Deserialize)] + struct RoomCreate { + room_version: RoomVersionId, + } + let content = serde_json::from_str::(content.get()) + .expect("Invalid content in RoomCreate pdu."); + Some(content.room_version) + } else { + None + } + }) + .ok_or_else(|| { + Error::InconsistentRoomState( + "non-create event for room of unknown version", + room_id.to_owned(), + ) + })?; let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); diff --git a/src/utils/error.rs b/src/utils/error.rs index 83ef309..568ad8a 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -82,6 +82,8 @@ pub enum Error { PathError(#[from] axum::extract::rejection::PathRejection), #[error("from {0}: {1}")] RedactionError(OwnedServerName, ruma::canonical_json::RedactionError), + #[error("{0} in {1}")] + InconsistentRoomState(&'static str, ruma::OwnedRoomId), } impl Error { From 98e81c62174a587c54ed111e0eca617c1d423efd Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Sun, 3 Dec 2023 19:38:09 +0100 Subject: [PATCH 1478/1727] Log underlying error when rejecting sendjoin response --- src/api/client_server/membership.rs | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 4a1f374..346f257 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -619,7 +619,7 @@ async fn join_room_by_id_helper( )); } - if let Ok(signature) = signed_value["signatures"] + match signed_value["signatures"] .as_object() .ok_or(Error::BadRequest( ErrorKind::InvalidParam, @@ -630,18 +630,20 @@ async fn join_room_by_id_helper( ErrorKind::InvalidParam, "Server did not send its signature", )) - }) - { - join_event - .get_mut("signatures") - .expect("we created a valid pdu") - .as_object_mut() - .expect("we created a valid pdu") - .insert(remote_server.to_string(), signature.clone()); - } else { - warn!( - "Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}", - ); + }) { + Ok(signature) => { + join_event + .get_mut("signatures") + .expect("we created a valid pdu") + .as_object_mut() + .expect("we created a valid pdu") + .insert(remote_server.to_string(), signature.clone()); + } + Err(e) => { + warn!( + "Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}", + ); + } } } From 9d7f7b871b9783dbdce15e0491b0eaa9d948b21f Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 23 Dec 2023 21:00:00 -0800 Subject: [PATCH 1479/1727] don't panic on missing presence status for a user --- src/api/client_server/presence.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index ef88d1a..e5cd1b8 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -1,5 +1,8 @@ -use crate::{services, utils, Result, Ruma}; -use ruma::api::client::presence::{get_presence, set_presence}; +use crate::{services, utils, Error, Result, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + presence::{get_presence, set_presence}, +}; use std::time::Duration; /// # `PUT /_matrix/client/r0/presence/{userId}/status` @@ -79,6 +82,9 @@ pub async fn get_presence_route( presence: presence.content.presence, }) } else { - todo!(); + Err(Error::BadRequest( + ErrorKind::NotFound, + "Presence state for this user was not found", + )) } } From 8f3f5c01f924820994f8bf5fe20abc5a3e6b8866 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 23 Dec 2023 22:02:24 -0800 Subject: [PATCH 1480/1727] add shebang to .envrc All this really does is make syntax highlighting and shellcheck work by default in more editors. --- .envrc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.envrc b/.envrc index 3550a30..b9238c3 100644 --- a/.envrc +++ b/.envrc @@ -1 +1,3 @@ +#!/usr/bin/env bash + use flake From eb7ac91cd5a8c62fc5dbd96527712b9750394a70 Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Sun, 24 Dec 2023 19:02:03 +0100 Subject: [PATCH 1481/1727] Reuse existing get_room_version --- src/service/rooms/timeline/mod.rs | 53 +++++++++---------------------- 1 file changed, 15 insertions(+), 38 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 06deb06..b0f6cb5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -128,27 +128,6 @@ impl Service { self.db.get_pdu_count(event_id) } - /// Returns the version of a room, if known - pub fn get_room_version(&self, room_id: &RoomId) -> Result> { - let create_event = services().rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomCreate, - "", - )?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - Ok(create_event_content.map(|content| content.room_version)) - } - // TODO Is this the same as the function above? /* #[tracing::instrument(skip(self))] @@ -404,9 +383,7 @@ impl Service { match pdu.kind { TimelineEventType::RoomRedaction => { - let room_version_id = self - .get_room_version(&pdu.room_id)? - .expect("Got RoomRedaction in a room of unknown version"); + let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?; match room_version_id { RoomVersionId::V1 | RoomVersionId::V2 @@ -696,9 +673,11 @@ impl Service { .collect(); // If there was no create event yet, assume we are creating a room - let room_version_id = self - .get_room_version(room_id)? - .or_else(|| { + let room_version_id = services() + .rooms + .state + .get_room_version(room_id) + .or_else(|_| { if event_type == TimelineEventType::RoomCreate { #[derive(Deserialize)] struct RoomCreate { @@ -706,16 +685,13 @@ impl Service { } let content = serde_json::from_str::(content.get()) .expect("Invalid content in RoomCreate pdu."); - Some(content.room_version) + Ok(content.room_version) } else { - None + Err(Error::InconsistentRoomState( + "non-create event for room of unknown version", + room_id.to_owned(), + )) } - }) - .ok_or_else(|| { - Error::InconsistentRoomState( - "non-create event for room of unknown version", - room_id.to_owned(), - ) })?; let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); @@ -1085,9 +1061,10 @@ impl Service { let mut pdu = self .get_pdu_from_id(&pdu_id)? .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - let room_version_id = self.get_room_version(&pdu.room_id)?.ok_or_else(|| { - Error::bad_database("Trying to redact PDU in in room of unknown version") - })?; + let room_version_id = services() + .rooms + .state + .get_room_version(&pdu.room_id)?; pdu.redact(room_version_id, reason)?; self.replace_pdu( &pdu_id, From 8175bc124629bff5808270c496d7e1f32ee1cdcb Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Sun, 24 Dec 2023 19:04:48 +0100 Subject: [PATCH 1482/1727] Explicitly match RoomVersionId::V11 --- src/api/client_server/room.rs | 9 ++++++--- src/service/admin/mod.rs | 3 ++- src/service/rooms/timeline/mod.rs | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 5f62e9f..128f84f 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -146,7 +146,8 @@ pub async fn create_room_route( })?, ); } - _ => {} // V11 removed the "creator" key + RoomVersionId::V11 => {} // V11 removed the "creator" key + _ => panic!("Unexpected room version {}", room_version), } content.insert( @@ -170,7 +171,8 @@ pub async fn create_room_route( | RoomVersionId::V8 | RoomVersionId::V9 | RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()), - _ => RoomCreateEventContent::new_v11(), + RoomVersionId::V11 => RoomCreateEventContent::new_v11(), + _ => panic!("Unexpected room version {}", room_version), }; let mut content = serde_json::from_str::( to_raw_value(&content) @@ -627,10 +629,11 @@ pub async fn upgrade_room_route( })?, ); } - _ => { + RoomVersionId::V11 => { // "creator" key no longer exists in V11 rooms create_event_content.remove("creator"); } + _ => panic!("Unexpected room version {}", body.new_version) } create_event_content.insert( "room_version".into(), diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 292943a..08b6e62 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -944,7 +944,8 @@ impl Service { | RoomVersionId::V8 | RoomVersionId::V9 | RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.clone()), - _ => RoomCreateEventContent::new_v11(), + RoomVersionId::V11 => RoomCreateEventContent::new_v11(), + _ => panic!("Unexpected room version {}", room_version), }; content.federate = true; content.predecessor = None; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index b0f6cb5..d0943c9 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -399,7 +399,7 @@ impl Service { self.redact_pdu(redact_id, pdu)?; } } - _ => { + RoomVersionId::V11 => { #[derive(Deserialize)] struct Redaction { redacts: Option, @@ -412,6 +412,7 @@ impl Service { self.redact_pdu(redact_id, pdu)?; } } + _ => panic!("Unexpected room version {}", room_version_id) }; } TimelineEventType::SpaceChild => { From 835f4ad8cfa8a222a56e21f37646a86c1acd90a7 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 16 Jan 2024 13:52:56 -0800 Subject: [PATCH 1483/1727] declare 1.5 support --- src/api/client_server/unversioned.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 797b952..70e260e 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -26,6 +26,7 @@ pub async fn get_supported_versions_route( "v1.2".to_owned(), "v1.3".to_owned(), "v1.4".to_owned(), + "v1.5".to_owned(), ], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; From dc2f53e77393cf49ae9f6da9bb1db20abdf59230 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 23 Dec 2023 20:40:56 -0800 Subject: [PATCH 1484/1727] comment out heed backend things The code in conduit doesn't compile. --- Cargo.lock | 136 +---------------------------------------------------- Cargo.toml | 6 +-- 2 files changed, 4 insertions(+), 138 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c8596a..b4bcdc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -180,15 +180,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - [[package]] name = "bindgen" version = "0.65.1" @@ -368,11 +359,9 @@ dependencies = [ "base64 0.21.2", "bytes", "clap", - "crossbeam", "directories", "figment", "futures-util", - "heed", "hmac", "http", "image", @@ -487,20 +476,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" -dependencies = [ - "cfg-if", - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-epoch", - "crossbeam-queue", - "crossbeam-utils", -] - [[package]] name = "crossbeam-channel" version = "0.5.8" @@ -511,40 +486,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "crossbeam-deque" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" -dependencies = [ - "cfg-if", - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "memoffset 0.9.0", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - [[package]] name = "crossbeam-utils" version = "0.8.16" @@ -986,42 +927,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -[[package]] -name = "heed" -version = "0.10.6" -source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d" -dependencies = [ - "bytemuck", - "byteorder", - "heed-traits", - "heed-types", - "libc", - "lmdb-rkv-sys", - "once_cell", - "page_size", - "serde", - "synchronoise", - "url", -] - -[[package]] -name = "heed-traits" -version = "0.7.0" -source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d" - -[[package]] -name = "heed-types" -version = "0.7.2" -source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d" -dependencies = [ - "bincode", - "bytemuck", - "byteorder", - "heed-traits", - "serde", - "serde_json", -] - [[package]] name = "hermit-abi" version = "0.2.6" @@ -1379,17 +1284,6 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" -[[package]] -name = "lmdb-rkv-sys" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "lock_api" version = "0.4.10" @@ -1473,15 +1367,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "mime" version = "0.3.17" @@ -1524,7 +1409,7 @@ dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "memoffset 0.7.1", + "memoffset", "pin-utils", "static_assertions", ] @@ -1701,16 +1586,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "page_size" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "parking_lot" version = "0.12.1" @@ -2727,15 +2602,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" -[[package]] -name = "synchronoise" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2" -dependencies = [ - "crossbeam-queue", -] - [[package]] name = "thiserror" version = "1.0.40" diff --git a/Cargo.toml b/Cargo.toml index ff1785e..6cc6650 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,10 +78,10 @@ tracing-opentelemetry = "0.18.0" lru-cache = "0.1.2" rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] } parking_lot = { version = "0.12.1", optional = true } -crossbeam = { version = "0.8.2", optional = true } +# crossbeam = { version = "0.8.2", optional = true } num_cpus = "1.15.0" threadpool = "1.8.1" -heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } +# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } # Used for ruma wrapper serde_html_form = "0.2.0" @@ -112,7 +112,7 @@ default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] #backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] -backend_heed = ["heed", "crossbeam"] +#backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] From 92c5b6b86cb40704da2a638847a4cc37dfe61212 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 23 Dec 2023 19:34:27 -0800 Subject: [PATCH 1485/1727] fix `cargo check` lints --- src/api/client_server/keys.rs | 4 ++-- src/api/client_server/session.rs | 2 ++ src/api/server_server.rs | 2 +- src/database/key_value/rooms/threads.rs | 4 ++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 7dbe040..9fd0089 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -360,7 +360,7 @@ pub(crate) async fn get_keys_helper bool>( .bad_query_ratelimiter .read() .unwrap() - .get(&*server) + .get(server) { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); @@ -393,7 +393,7 @@ pub(crate) async fn get_keys_helper bool>( ), ) .await - .map_err(|e| Error::BadServerResponse("Query took too long")), + .map_err(|_e| Error::BadServerResponse("Query took too long")), ) }) .collect(); diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 5ce62af..5ffd813 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -163,6 +163,8 @@ pub async fn login_route(body: Ruma) -> Result Result> + 'a>> { let prefix = services() .rooms @@ -27,7 +27,7 @@ impl service::rooms::threads::Data for KeyValueDatabase { self.threadid_userids .iter_from(¤t, true) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pduid, users)| { + .map(move |(pduid, _users)| { let count = utils::u64_from_bytes(&pduid[(mem::size_of::())..]) .map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?; let mut pdu = services() From ab1fff2642be961f052dde06b2c03c588d904d51 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 23 Dec 2023 19:48:14 -0800 Subject: [PATCH 1486/1727] fix `cargo clippy` lints --- src/api/client_server/membership.rs | 10 +- src/api/client_server/message.rs | 2 +- src/api/client_server/relations.rs | 6 +- src/api/client_server/session.rs | 10 +- src/api/client_server/state.rs | 6 +- src/api/client_server/sync.rs | 175 +++++++++--------- src/api/server_server.rs | 10 +- src/config/proxy.rs | 7 +- src/database/abstraction/watchers.rs | 1 + .../key_value/rooms/state_accessor.rs | 6 +- src/database/key_value/rooms/state_cache.rs | 2 + src/database/key_value/rooms/threads.rs | 4 +- src/database/key_value/rooms/timeline.rs | 11 +- src/main.rs | 2 +- src/service/rooms/edus/read_receipt/data.rs | 1 + src/service/rooms/event_handler/mod.rs | 4 +- src/service/rooms/lazy_loading/mod.rs | 1 + src/service/rooms/pdu_metadata/data.rs | 1 + src/service/rooms/pdu_metadata/mod.rs | 7 +- src/service/rooms/search/data.rs | 1 + src/service/rooms/spaces/mod.rs | 43 ++--- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/state_accessor/mod.rs | 16 +- src/service/rooms/state_cache/data.rs | 2 + src/service/rooms/state_compressor/mod.rs | 12 +- src/service/rooms/threads/data.rs | 1 + src/service/rooms/threads/mod.rs | 4 +- src/service/rooms/timeline/data.rs | 2 + src/service/rooms/timeline/mod.rs | 57 +++--- src/service/sending/data.rs | 1 + src/service/users/mod.rs | 1 + 31 files changed, 205 insertions(+), 205 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 346f257..ed59691 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -400,7 +400,7 @@ pub async fn get_member_events_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -435,7 +435,7 @@ pub async fn joined_members_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -712,7 +712,7 @@ async fn join_room_by_id_helper( } info!("Running send_join auth check"); - if !state_res::event_auth::auth_check( + let authenticated = state_res::event_auth::auth_check( &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), &parsed_join_pdu, None::, // TODO: third party invite @@ -735,7 +735,9 @@ async fn join_room_by_id_helper( .map_err(|e| { warn!("Auth check failed: {e}"); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") - })? { + })?; + + if !authenticated { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Auth check failed", diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 750e030..0952092 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -124,7 +124,7 @@ pub async fn get_message_events_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); services().rooms.lazy_loading.lazy_load_confirm_delivery( sender_user, diff --git a/src/api/client_server/relations.rs b/src/api/client_server/relations.rs index a7cea78..124f131 100644 --- a/src/api/client_server/relations.rs +++ b/src/api/client_server/relations.rs @@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body @@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body @@ -121,7 +121,7 @@ pub async fn get_relating_events_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 5ffd813..c17bd99 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -117,12 +117,10 @@ pub async fn login_route(body: Ruma) -> Result { warn!("Unsupported or unknown login type: {:?}", &body.login_info); diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index d6d3939..174282a 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -85,7 +85,7 @@ pub async fn get_state_events_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -118,7 +118,7 @@ pub async fn get_state_events_for_key_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -157,7 +157,7 @@ pub async fn get_state_events_for_empty_key_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index a275b06..5757228 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -554,6 +554,7 @@ async fn sync_helper( } } +#[allow(clippy::too_many_arguments)] async fn load_joined_room( sender_user: &UserId, sender_device: &DeviceId, @@ -590,7 +591,7 @@ async fn load_joined_room( || services() .rooms .user - .last_notification_read(&sender_user, &room_id)? + .last_notification_read(sender_user, room_id)? > since; let mut timeline_users = HashSet::new(); @@ -599,16 +600,16 @@ async fn load_joined_room( } services().rooms.lazy_loading.lazy_load_confirm_delivery( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, sincecount, )?; // Database queries: let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { s } else { error!("Room {} has no state", room_id); @@ -618,7 +619,7 @@ async fn load_joined_room( let since_shortstatehash = services() .rooms .user - .get_token_shortstatehash(&room_id, since)?; + .get_token_shortstatehash(room_id, since)?; let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { @@ -630,12 +631,12 @@ async fn load_joined_room( let joined_member_count = services() .rooms .state_cache - .room_joined_count(&room_id)? + .room_joined_count(room_id)? .unwrap_or(0); let invited_member_count = services() .rooms .state_cache - .room_invited_count(&room_id)? + .room_invited_count(room_id)? .unwrap_or(0); // Recalculate heroes (first 5 members) @@ -648,7 +649,7 @@ async fn load_joined_room( for hero in services() .rooms .timeline - .all_pdus(&sender_user, &room_id)? + .all_pdus(sender_user, room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) .map(|(_, pdu)| { @@ -669,11 +670,11 @@ async fn load_joined_room( ) && (services() .rooms .state_cache - .is_joined(&user_id, &room_id)? + .is_joined(&user_id, room_id)? || services() .rooms .state_cache - .is_invited(&user_id, &room_id)?) + .is_invited(&user_id, room_id)?) { Ok::<_, Error>(Some(state_key.clone())) } else { @@ -789,17 +790,17 @@ async fn load_joined_room( // Reset lazy loading because this is an initial sync services().rooms.lazy_loading.lazy_load_reset( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, )?; // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, lazy_loaded, next_batchcount, ); @@ -866,14 +867,14 @@ async fn load_joined_room( } if !services().rooms.lazy_loading.lazy_load_was_sent_before( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, &event.sender, )? || lazy_load_send_redundant { if let Some(member_event) = services().rooms.state_accessor.room_state_get( - &room_id, + room_id, &StateEventType::RoomMember, event.sender.as_str(), )? { @@ -884,9 +885,9 @@ async fn load_joined_room( } services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, lazy_loaded, next_batchcount, ); @@ -934,7 +935,7 @@ async fn load_joined_room( match new_membership { MembershipState::Join => { // A new user joined an encrypted room - if !share_encrypted_room(&sender_user, &user_id, &room_id)? { + if !share_encrypted_room(sender_user, &user_id, room_id)? { device_list_updates.insert(user_id); } } @@ -954,15 +955,15 @@ async fn load_joined_room( services() .rooms .state_cache - .room_members(&room_id) + .room_members(room_id) .flatten() .filter(|user_id| { // Don't send key updates from the sender to the sender - &sender_user != user_id + sender_user != user_id }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) + !share_encrypted_room(sender_user, user_id, room_id) .unwrap_or(false) }), ); @@ -997,7 +998,7 @@ async fn load_joined_room( services() .rooms .user - .notification_count(&sender_user, &room_id)? + .notification_count(sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ) @@ -1010,7 +1011,7 @@ async fn load_joined_room( services() .rooms .user - .highlight_count(&sender_user, &room_id)? + .highlight_count(sender_user, room_id)? .try_into() .expect("highlight count can't go that high"), ) @@ -1039,15 +1040,15 @@ async fn load_joined_room( .rooms .edus .read_receipt - .readreceipts_since(&room_id, since) + .readreceipts_since(room_id, since) .filter_map(|r| r.ok()) // Filter out buggy events .map(|(_, _, v)| v) .collect(); - if services().rooms.edus.typing.last_typing_update(&room_id)? > since { + if services().rooms.edus.typing.last_typing_update(room_id)? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) + &serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id)?) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -1056,7 +1057,7 @@ async fn load_joined_room( // Save the state after this sync so we can send the correct state diff next sync services().rooms.user.associate_token_shortstatehash( - &room_id, + room_id, next_batch, current_shortstatehash, )?; @@ -1065,7 +1066,7 @@ async fn load_joined_room( account_data: RoomAccountData { events: services() .account_data - .changes_since(Some(&room_id), &sender_user, since)? + .changes_since(Some(room_id), sender_user, since)? .into_iter() .filter_map(|(_, v)| { serde_json::from_str(v.json().get()) @@ -1110,13 +1111,13 @@ fn load_timeline( if services() .rooms .timeline - .last_timeline_count(&sender_user, &room_id)? + .last_timeline_count(sender_user, room_id)? > roomsincecount { let mut non_timeline_pdus = services() .rooms .timeline - .pdus_until(&sender_user, &room_id, PduCount::max())? + .pdus_until(sender_user, room_id, PduCount::max())? .filter_map(|r| { // Filter out buggy events if r.is_err() { @@ -1172,7 +1173,6 @@ fn share_encrypted_room( pub async fn sync_events_v4_route( body: Ruma, ) -> Result> { - dbg!(&body.body); let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let mut body = body.body; @@ -1232,7 +1232,7 @@ pub async fn sync_events_v4_route( for room_id in &all_joined_rooms { let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { s } else { error!("Room {} has no state", room_id); @@ -1242,7 +1242,7 @@ pub async fn sync_events_v4_route( let since_shortstatehash = services() .rooms .user - .get_token_shortstatehash(&room_id, globalsince)?; + .get_token_shortstatehash(room_id, globalsince)?; let since_sender_member: Option = since_shortstatehash .and_then(|shortstatehash| { @@ -1331,7 +1331,7 @@ pub async fn sync_events_v4_route( if !share_encrypted_room( &sender_user, &user_id, - &room_id, + room_id, )? { device_list_changes.insert(user_id); } @@ -1352,7 +1352,7 @@ pub async fn sync_events_v4_route( services() .rooms .state_cache - .room_members(&room_id) + .room_members(room_id) .flatten() .filter(|user_id| { // Don't send key updates from the sender to the sender @@ -1360,7 +1360,7 @@ pub async fn sync_events_v4_route( }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) + !share_encrypted_room(&sender_user, user_id, room_id) .unwrap_or(false) }), ); @@ -1451,7 +1451,7 @@ pub async fn sync_events_v4_route( } sync_events::v4::SyncOp { op: SlidingOp::Sync, - range: Some(r.clone()), + range: Some(r), index: None, room_ids, room_id: None, @@ -1523,7 +1523,7 @@ pub async fn sync_events_v4_route( let roomsincecount = PduCount::Normal(*roomsince); let (timeline_pdus, limited) = - load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?; + load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?; if roomsince != &0 && timeline_pdus.is_empty() { continue; @@ -1555,63 +1555,58 @@ pub async fn sync_events_v4_route( let required_state = required_state_request .iter() - .map(|state| { + .flat_map(|state| { services() .rooms .state_accessor - .room_state_get(&room_id, &state.0, &state.1) + .room_state_get(room_id, &state.0, &state.1) + .ok() + .flatten() + .map(|state| state.to_sync_state_event()) }) - .filter_map(|r| r.ok()) - .filter_map(|o| o) - .map(|state| state.to_sync_state_event()) .collect(); // Heroes let heroes = services() .rooms .state_cache - .room_members(&room_id) + .room_members(room_id) .filter_map(|r| r.ok()) .filter(|member| member != &sender_user) - .map(|member| { - Ok::<_, Error>( - services() - .rooms - .state_accessor - .get_member(&room_id, &member)? - .map(|memberevent| { - ( - memberevent - .displayname - .unwrap_or_else(|| member.to_string()), - memberevent.avatar_url, - ) - }), - ) + .flat_map(|member| { + services() + .rooms + .state_accessor + .get_member(room_id, &member) + .ok() + .flatten() + .map(|memberevent| { + ( + memberevent + .displayname + .unwrap_or_else(|| member.to_string()), + memberevent.avatar_url, + ) + }) }) - .filter_map(|r| r.ok()) - .filter_map(|o| o) .take(5) .collect::>(); - let name = if heroes.len() > 1 { - let last = heroes[0].0.clone(); - Some( - heroes[1..] + let name = match &heroes[..] { + [] => None, + [only] => Some(only.0.clone()), + [firsts @ .., last] => Some( + firsts .iter() .map(|h| h.0.clone()) .collect::>() .join(", ") + " and " - + &last, - ) - } else if heroes.len() == 1 { - Some(heroes[0].0.clone()) - } else { - None + + &last.0, + ), }; - let avatar = if heroes.len() == 1 { - heroes[0].1.clone() + let avatar = if let [only] = &heroes[..] { + only.1.clone() } else { None }; @@ -1619,15 +1614,11 @@ pub async fn sync_events_v4_route( rooms.insert( room_id.clone(), sync_events::v4::SlidingSyncRoom { - name: services() - .rooms - .state_accessor - .get_name(&room_id)? - .or_else(|| name), + name: services().rooms.state_accessor.get_name(room_id)?.or(name), avatar: services() .rooms .state_accessor - .get_avatar(&room_id)? + .get_avatar(room_id)? .map_or(avatar, |a| a.url), initial: Some(roomsince == &0), is_dm: None, @@ -1637,7 +1628,7 @@ pub async fn sync_events_v4_route( services() .rooms .user - .highlight_count(&sender_user, &room_id)? + .highlight_count(&sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ), @@ -1645,7 +1636,7 @@ pub async fn sync_events_v4_route( services() .rooms .user - .notification_count(&sender_user, &room_id)? + .notification_count(&sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ), @@ -1658,7 +1649,7 @@ pub async fn sync_events_v4_route( (services() .rooms .state_cache - .room_joined_count(&room_id)? + .room_joined_count(room_id)? .unwrap_or(0) as u32) .into(), ), @@ -1666,7 +1657,7 @@ pub async fn sync_events_v4_route( (services() .rooms .state_cache - .room_invited_count(&room_id)? + .room_invited_count(room_id)? .unwrap_or(0) as u32) .into(), ), @@ -1689,7 +1680,7 @@ pub async fn sync_events_v4_route( let _ = tokio::time::timeout(duration, watcher).await; } - Ok(dbg!(sync_events::v4::Response { + Ok(sync_events::v4::Response { initial: globalsince == 0, txn_id: body.txn_id.clone(), pos: next_batch.to_string(), @@ -1744,5 +1735,5 @@ pub async fn sync_events_v4_route( }, }, delta_token: None, - })) + }) } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index a11857d..79f921e 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -666,7 +666,7 @@ pub fn parse_incoming_pdu( let room_version_id = services().rooms.state.get_room_version(&room_id)?; - let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) { + let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -724,7 +724,7 @@ pub async fn send_transaction_message_route( continue; } - let r = parse_incoming_pdu(&pdu); + let r = parse_incoming_pdu(pdu); let (event_id, value, room_id) = match r { Ok(t) => t, Err(e) => { @@ -992,7 +992,7 @@ pub async fn get_event_route( if !services().rooms.state_accessor.server_can_see_event( sender_servername, - &room_id, + room_id, &body.event_id, )? { return Err(Error::BadRequest( @@ -1058,7 +1058,7 @@ pub async fn get_backfill_route( let all_events = services() .rooms .timeline - .pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? + .pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? .take(limit.try_into().unwrap()); let events = all_events @@ -1075,7 +1075,7 @@ pub async fn get_backfill_route( }) .map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id)) .filter_map(|r| r.ok().flatten()) - .map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu)) + .map(PduEvent::convert_to_outgoing_federation_event) .collect(); Ok(get_backfill::v1::Response { diff --git a/src/config/proxy.rs b/src/config/proxy.rs index dcf304e..c03463e 100644 --- a/src/config/proxy.rs +++ b/src/config/proxy.rs @@ -29,7 +29,9 @@ use crate::Result; /// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`. #[derive(Clone, Debug, Deserialize)] #[serde(rename_all = "snake_case")] +#[derive(Default)] pub enum ProxyConfig { + #[default] None, Global { #[serde(deserialize_with = "crate::utils::deserialize_from_str")] @@ -48,11 +50,6 @@ impl ProxyConfig { }) } } -impl Default for ProxyConfig { - fn default() -> Self { - ProxyConfig::None - } -} #[derive(Clone, Debug, Deserialize)] pub struct PartialProxyConfig { diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs index 55cb60b..01156ab 100644 --- a/src/database/abstraction/watchers.rs +++ b/src/database/abstraction/watchers.rs @@ -8,6 +8,7 @@ use tokio::sync::watch; #[derive(Default)] pub(super) struct Watchers { + #[allow(clippy::type_complexity)] watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, } diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index ad08f46..fe40b93 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let parsed = services() .rooms .state_compressor - .parse_compressed_state_event(&compressed)?; + .parse_compressed_state_event(compressed)?; result.insert(parsed.0, parsed.1); i += 1; @@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let (_, eventid) = services() .rooms .state_compressor - .parse_compressed_state_event(&compressed)?; + .parse_compressed_state_event(compressed)?; if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { result.insert( ( @@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { services() .rooms .state_compressor - .parse_compressed_state_event(&compressed) + .parse_compressed_state_event(compressed) .ok() .map(|(_, id)| id) })) diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index d0ea0c2..3dcaf4a 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -471,6 +471,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { } /// Returns an iterator over all rooms a user was invited to. + #[allow(clippy::type_complexity)] #[tracing::instrument(skip(self))] fn rooms_invited<'a>( &'a self, @@ -549,6 +550,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { } /// Returns an iterator over all rooms a user left. + #[allow(clippy::type_complexity)] #[tracing::instrument(skip(self))] fn rooms_left<'a>( &'a self, diff --git a/src/database/key_value/rooms/threads.rs b/src/database/key_value/rooms/threads.rs index 35c4e6e..5e3dc97 100644 --- a/src/database/key_value/rooms/threads.rs +++ b/src/database/key_value/rooms/threads.rs @@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase { .collect::>() .join(&[0xff][..]); - self.threadid_userids.insert(&root_id, &users)?; + self.threadid_userids.insert(root_id, &users)?; Ok(()) } fn get_participants(&self, root_id: &[u8]) -> Result>> { - if let Some(users) = self.threadid_userids.get(&root_id)? { + if let Some(users) = self.threadid_userids.get(root_id)? { Ok(Some( users .split(|b| *b == 0xff) diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index ba1e85e..f322d43 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -39,11 +39,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the `count` of this pdu's id. fn get_pdu_count(&self, event_id: &EventId) -> Result> { - Ok(self - .eventid_pduid + self.eventid_pduid .get(event_id.as_bytes())? .map(|pdu_id| pdu_count(&pdu_id)) - .transpose()?) + .transpose() } /// Returns the json of a pdu. @@ -80,7 +79,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the pdu's id. fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - Ok(self.eventid_pduid.get(event_id.as_bytes())?) + self.eventid_pduid.get(event_id.as_bytes()) } /// Returns the pdu. @@ -230,7 +229,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, until: PduCount, ) -> Result> + 'a>> { - let (prefix, current) = count_to_id(&room_id, until, 1, true)?; + let (prefix, current) = count_to_id(room_id, until, 1, true)?; let user_id = user_id.to_owned(); @@ -257,7 +256,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, from: PduCount, ) -> Result> + 'a>> { - let (prefix, current) = count_to_id(&room_id, from, 1, false)?; + let (prefix, current) = count_to_id(room_id, from, 1, false)?; let user_id = user_id.to_owned(); diff --git a/src/main.rs b/src/main.rs index c74d6dd..683e091 100644 --- a/src/main.rs +++ b/src/main.rs @@ -238,7 +238,7 @@ async fn spawn_task( .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) } -async fn unrecognized_method( +async fn unrecognized_method( req: axum::http::Request, next: axum::middleware::Next, ) -> std::result::Result { diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index a183d19..044dad8 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -11,6 +11,7 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + #[allow(clippy::type_complexity)] fn readreceipts_since<'a>( &'a self, room_id: &RoomId, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 2919978..e7db6f7 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -92,7 +92,7 @@ impl Service { )); } - services().rooms.event_handler.acl_check(origin, &room_id)?; + services().rooms.event_handler.acl_check(origin, room_id)?; // 1. Skip the PDU if we already have it as a timeline event if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { @@ -276,6 +276,7 @@ impl Service { r } + #[allow(clippy::type_complexity, clippy::too_many_arguments)] #[tracing::instrument(skip(self, create_event, value, pub_key_map))] fn handle_outlier_pdu<'a>( &'a self, @@ -1009,6 +1010,7 @@ impl Service { /// b. Look at outlier pdu tree /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? + #[allow(clippy::type_complexity)] #[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( &'a self, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index e6e4f89..c51a57e 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -14,6 +14,7 @@ use super::timeline::PduCount; pub struct Service { pub db: &'static dyn Data, + #[allow(clippy::type_complexity)] pub lazy_load_waiting: Mutex>>, } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 6c4cb3c..a4df34c 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -5,6 +5,7 @@ use ruma::{EventId, RoomId, UserId}; pub trait Data: Send + Sync { fn add_relation(&self, from: u64, to: u64) -> Result<()>; + #[allow(clippy::type_complexity)] fn relations_until<'a>( &'a self, user_id: &'a UserId, diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 9ce74f4..411f4f5 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -40,6 +40,7 @@ impl Service { } } + #[allow(clippy::too_many_arguments)] pub fn paginate_relations_with_filter( &self, sender_user: &UserId, @@ -82,7 +83,7 @@ impl Service { services() .rooms .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) .take_while(|&(k, _)| Some(k) != to) // Stop at `to` @@ -106,7 +107,7 @@ impl Service { let events_before: Vec<_> = services() .rooms .pdu_metadata - .relations_until(sender_user, &room_id, target, from)? + .relations_until(sender_user, room_id, target, from)? .filter(|r| { r.as_ref().map_or(true, |(_, pdu)| { filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) @@ -129,7 +130,7 @@ impl Service { services() .rooms .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) .take_while(|&(k, _)| Some(k) != to) // Stop at `to` diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 6eef38f..7ea7e3d 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -4,6 +4,7 @@ use ruma::RoomId; pub trait Data: Send + Sync { fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; + #[allow(clippy::type_complexity)] fn search_pdus<'a>( &'a self, room_id: &RoomId, diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 53232f4..615e9ca 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -197,7 +197,7 @@ impl Service { if let Ok(response) = services() .sending .send_federation_request( - &server, + server, federation::space::get_hierarchy::v1::Request { room_id: current_room.to_owned(), suggested_only, @@ -235,7 +235,7 @@ impl Service { .room .allowed_room_ids .into_iter() - .map(|room| AllowRule::room_membership(room)) + .map(AllowRule::room_membership) .collect(), }) } @@ -245,7 +245,7 @@ impl Service { .room .allowed_room_ids .into_iter() - .map(|room| AllowRule::room_membership(room)) + .map(AllowRule::room_membership) .collect(), }) } @@ -313,7 +313,7 @@ impl Service { canonical_alias: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? + .room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomCanonicalAliasEventContent| c.alias) @@ -321,11 +321,11 @@ impl Service { Error::bad_database("Invalid canonical alias event in database.") }) })?, - name: services().rooms.state_accessor.get_name(&room_id)?, + name: services().rooms.state_accessor.get_name(room_id)?, num_joined_members: services() .rooms .state_cache - .room_joined_count(&room_id)? + .room_joined_count(room_id)? .unwrap_or_else(|| { warn!("Room {} has no member count", room_id); 0 @@ -336,7 +336,7 @@ impl Service { topic: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomTopic, "")? + .room_state_get(room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) @@ -348,7 +348,7 @@ impl Service { world_readable: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| { @@ -363,7 +363,7 @@ impl Service { guest_can_join: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? + .room_state_get(room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomGuestAccessEventContent| { @@ -376,7 +376,7 @@ impl Service { avatar_url: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .room_state_get(room_id, &StateEventType::RoomAvatar, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomAvatarEventContent| c.url) @@ -389,7 +389,7 @@ impl Service { let join_rule = services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomJoinRulesEventContent| c.join_rule) @@ -415,7 +415,7 @@ impl Service { room_type: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "")? + .room_state_get(room_id, &StateEventType::RoomCreate, "")? .map(|s| { serde_json::from_str::(s.content.get()).map_err(|e| { error!("Invalid room create event in database: {}", e); @@ -455,7 +455,7 @@ impl Service { SpaceRoomJoinRule::Invite => services() .rooms .state_cache - .is_joined(sender_user, &room_id)?, + .is_joined(sender_user, room_id)?, _ => false, }; @@ -479,17 +479,14 @@ impl Service { match join_rule { JoinRule::Restricted(r) => { for rule in &r.allow { - match rule { - join_rules::AllowRule::RoomMembership(rm) => { - if let Ok(true) = services() - .rooms - .state_cache - .is_joined(sender_user, &rm.room_id) - { - return Ok(true); - } + if let join_rules::AllowRule::RoomMembership(rm) = rule { + if let Ok(true) = services() + .rooms + .state_cache + .is_joined(sender_user, &rm.room_id) + { + return Ok(true); } - _ => {} } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 48c6020..c209eb5 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -41,7 +41,7 @@ impl Service { services() .rooms .state_compressor - .parse_compressed_state_event(&new) + .parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { @@ -412,7 +412,7 @@ impl Service { services() .rooms .state_compressor - .parse_compressed_state_event(&compressed) + .parse_compressed_state_event(compressed) .ok() }) .filter_map(|(shortstatekey, event_id)| { diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index a4a62fe..b00dc58 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -180,7 +180,7 @@ impl Service { return Ok(*visibility); } - let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; + let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; let history_visibility = self .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? @@ -197,11 +197,11 @@ impl Service { HistoryVisibility::Shared => currently_member, HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, &user_id) + self.user_was_invited(shortstatehash, user_id) } HistoryVisibility::Joined => { // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, &user_id) + self.user_was_joined(shortstatehash, user_id) } _ => { error!("Unknown history visibility {history_visibility}"); @@ -221,10 +221,10 @@ impl Service { /// the room's history_visibility at that event's state. #[tracing::instrument(skip(self, user_id, room_id))] pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; + let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; let history_visibility = self - .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(HistoryVisibility::Shared), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) @@ -276,7 +276,7 @@ impl Service { services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomName, "")? + .room_state_get(room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomNameEventContent| c.name) @@ -288,7 +288,7 @@ impl Service { services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .room_state_get(room_id, &StateEventType::RoomAvatar, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) @@ -303,7 +303,7 @@ impl Service { services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())? + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map_err(|_| Error::bad_database("Invalid room member event in database.")) diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index d8bb4a4..8921909 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -78,6 +78,7 @@ pub trait Data: Send + Sync { ) -> Box> + 'a>; /// Returns an iterator over all rooms a user was invited to. + #[allow(clippy::type_complexity)] fn rooms_invited<'a>( &'a self, user_id: &UserId, @@ -96,6 +97,7 @@ pub trait Data: Send + Sync { ) -> Result>>>; /// Returns an iterator over all rooms a user left. + #[allow(clippy::type_complexity)] fn rooms_left<'a>( &'a self, user_id: &UserId, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index d29b020..6118e06 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -16,6 +16,7 @@ use self::data::StateDiff; pub struct Service { pub db: &'static dyn Data, + #[allow(clippy::type_complexity)] pub stateinfo_cache: Mutex< LruCache< u64, @@ -33,6 +34,7 @@ pub type CompressedStateEvent = [u8; 2 * size_of::()]; impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. + #[allow(clippy::type_complexity)] #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( &self, @@ -131,6 +133,7 @@ impl Service { /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer + #[allow(clippy::type_complexity)] #[tracing::instrument(skip( self, statediffnew, @@ -164,7 +167,7 @@ impl Service { for removed in statediffremoved.iter() { if !parent_new.remove(removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed.clone()); + parent_removed.insert(*removed); } // Else it was added in the parent and we removed it again. We can forget this change } @@ -172,7 +175,7 @@ impl Service { for new in statediffnew.iter() { if !parent_removed.remove(new) { // It was not touched in the parent and we added it - parent_new.insert(new.clone()); + parent_new.insert(*new); } // Else it was removed in the parent and we added it again. We can forget this change } @@ -217,7 +220,7 @@ impl Service { for removed in statediffremoved.iter() { if !parent_new.remove(removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed.clone()); + parent_removed.insert(*removed); } // Else it was added in the parent and we removed it again. We can forget this change } @@ -225,7 +228,7 @@ impl Service { for new in statediffnew.iter() { if !parent_removed.remove(new) { // It was not touched in the parent and we added it - parent_new.insert(new.clone()); + parent_new.insert(*new); } // Else it was removed in the parent and we added it again. We can forget this change } @@ -253,6 +256,7 @@ impl Service { } /// Returns the new shortstatehash, and the state diff from the previous room state + #[allow(clippy::type_complexity)] pub fn save_state( &self, room_id: &RoomId, diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs index 9221e8e..e7159de 100644 --- a/src/service/rooms/threads/data.rs +++ b/src/service/rooms/threads/data.rs @@ -2,6 +2,7 @@ use crate::{PduEvent, Result}; use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { + #[allow(clippy::type_complexity)] fn threads_until<'a>( &'a self, user_id: &'a UserId, diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index fb70383..c6193bc 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -26,7 +26,7 @@ impl Service { self.db.threads_until(user_id, room_id, until, include) } - pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { + pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { let root_id = &services() .rooms .timeline @@ -103,7 +103,7 @@ impl Service { } let mut users = Vec::new(); - if let Some(userids) = self.db.get_participants(&root_id)? { + if let Some(userids) = self.db.get_participants(root_id)? { users.extend_from_slice(&userids); users.push(pdu.sender.clone()); } else { diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index afa2cfb..6290b8c 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -66,6 +66,7 @@ pub trait Data: Send + Sync { /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. + #[allow(clippy::type_complexity)] fn pdus_until<'a>( &'a self, user_id: &UserId, @@ -75,6 +76,7 @@ pub trait Data: Send + Sync { /// Returns an iterator over all events in a room that happened after the event with id `from` /// in chronological order. + #[allow(clippy::type_complexity)] fn pdus_after<'a>( &'a self, user_id: &UserId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 25e1c54..61b7337 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -58,8 +58,8 @@ impl PduCount { } pub fn try_from_string(token: &str) -> Result { - if token.starts_with('-') { - token[1..].parse().map(PduCount::Backfilled) + if let Some(stripped) = token.strip_prefix('-') { + stripped.parse().map(PduCount::Backfilled) } else { token.parse().map(PduCount::Normal) } @@ -112,7 +112,7 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? + self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? .next() .map(|o| o.map(|(_, p)| Arc::new(p))) .transpose() @@ -458,7 +458,7 @@ impl Service { let to_conduit = body.starts_with(&format!("{server_user}: ")) || body.starts_with(&format!("{server_user} ")) || body == format!("{server_user}:") - || body == format!("{server_user}"); + || body == server_user; // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit @@ -842,7 +842,7 @@ impl Service { let target = pdu .state_key() - .filter(|v| v.starts_with("@")) + .filter(|v| v.starts_with('@')) .unwrap_or(sender.as_str()); let server_name = services().globals.server_name(); let server_user = format!("@conduit:{}", server_name); @@ -850,7 +850,7 @@ impl Service { .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if content.membership == MembershipState::Leave { - if target == &server_user { + if target == server_user { warn!("Conduit user cannot leave from admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -876,7 +876,7 @@ impl Service { } if content.membership == MembershipState::Ban && pdu.state_key().is_some() { - if target == &server_user { + if target == server_user { warn!("Conduit user cannot be banned in admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -1048,7 +1048,7 @@ impl Service { #[tracing::instrument(skip(self, room_id))] pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { let first_pdu = self - .all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? + .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? .next() .expect("Room is not empty")?; @@ -1060,7 +1060,7 @@ impl Service { let power_levels: RoomPowerLevelsEventContent = services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")? + .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) @@ -1091,11 +1091,9 @@ impl Service { .await; match response { Ok(response) => { - let mut pub_key_map = RwLock::new(BTreeMap::new()); + let pub_key_map = RwLock::new(BTreeMap::new()); for pdu in response.pdus { - if let Err(e) = self - .backfill_pdu(backfill_server, pdu, &mut pub_key_map) - .await + if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await { warn!("Failed to add backfilled pdu: {e}"); } @@ -1142,7 +1140,7 @@ impl Service { services() .rooms .event_handler - .handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map) + .handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map) .await?; let value = self.get_pdu_json(&event_id)?.expect("We just created it"); @@ -1175,24 +1173,21 @@ impl Service { drop(insert_lock); - match pdu.kind { - TimelineEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody { - body: Option, - } - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - services() - .rooms - .search - .index_pdu(shortroomid, &pdu_id, &body)?; - } + if pdu.kind == TimelineEventType::RoomMessage { + #[derive(Deserialize)] + struct ExtractBody { + body: Option, + } + + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + if let Some(body) = content.body { + services() + .rooms + .search + .index_pdu(shortroomid, &pdu_id, &body)?; } - _ => {} } drop(mutex_lock); diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 2e574e2..8b4d236 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -5,6 +5,7 @@ use crate::Result; use super::{OutgoingKind, SendingEventType}; pub trait Data: Send + Sync { + #[allow(clippy::type_complexity)] fn active_requests<'a>( &'a self, ) -> Box, OutgoingKind, SendingEventType)>> + 'a>; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 6faa6d8..dc34d53 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -34,6 +34,7 @@ pub struct SlidingSyncCache { pub struct Service { pub db: &'static dyn Data, + #[allow(clippy::type_complexity)] pub connections: Mutex>>>, } From 0d17aedae50e58226bc91f10253679cfc280b9ff Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 23 Dec 2023 20:30:31 -0800 Subject: [PATCH 1487/1727] fix `cargo doc` lints Rustdoc (rightfully) thought the `[commandbody]` "tags" were broken links, so I've just made them links to nothing instead. --- src/api/server_server.rs | 2 +- src/service/admin/mod.rs | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 79f921e..db17d58 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -341,7 +341,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { } /// Returns: actual_destination, host header -/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names +/// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { debug!("Finding actual destination for {destination}"); diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b22f8ed..64958fc 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -50,7 +50,7 @@ enum AdminCommand { /// Registering a new bridge using the ID of an existing bridge will replace /// the old one. /// - /// [commandbody] + /// [commandbody]() /// # ``` /// # yaml content here /// # ``` @@ -96,7 +96,7 @@ enum AdminCommand { /// Removing a mass amount of users from a room may cause a significant amount of leave events. /// The time to leave rooms may depend significantly on joined rooms and servers. /// - /// [commandbody] + /// [commandbody]() /// # ``` /// # User list here /// # ``` @@ -121,7 +121,7 @@ enum AdminCommand { /// The PDU event is only checked for validity and is not added to the /// database. /// - /// [commandbody] + /// [commandbody]() /// # ``` /// # PDU json content here /// # ``` @@ -165,14 +165,14 @@ enum AdminCommand { EnableRoom { room_id: Box }, /// Verify json signatures - /// [commandbody] + /// [commandbody]() /// # ``` /// # json here /// # ``` SignJson, /// Verify json signatures - /// [commandbody] + /// [commandbody]() /// # ``` /// # json here /// # ``` @@ -858,12 +858,15 @@ impl Service { .expect("Regex compilation should not fail"); let text = re.replace_all(&text, "$1: $4"); - // Look for a `[commandbody]` tag. If it exists, use all lines below it that + // Look for a `[commandbody]()` tag. If it exists, use all lines below it that // start with a `#` in the USAGE section. let mut text_lines: Vec<&str> = text.lines().collect(); let mut command_body = String::new(); - if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { + if let Some(line_index) = text_lines + .iter() + .position(|line| *line == "[commandbody]()") + { text_lines.remove(line_index); while text_lines From e8ac881b2f38a8de776f1638704ed91b05cdf18c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 20 Dec 2023 15:29:09 -0800 Subject: [PATCH 1488/1727] add an engage file See for info. --- engage.toml | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++++ flake.nix | 2 ++ 2 files changed, 66 insertions(+) create mode 100644 engage.toml diff --git a/engage.toml b/engage.toml new file mode 100644 index 0000000..3e8884e --- /dev/null +++ b/engage.toml @@ -0,0 +1,64 @@ +interpreter = ["bash", "-euo", "pipefail", "-c"] + +[[task]] +name = "engage" +group = "versions" +script = "engage --version" + +[[task]] +name = "rustc" +group = "versions" +script = "rustc --version" + +[[task]] +name = "cargo" +group = "versions" +script = "cargo --version" + +[[task]] +name = "cargo-fmt" +group = "versions" +script = "cargo fmt --version" + +[[task]] +name = "rustdoc" +group = "versions" +script = "rustdoc --version" + +[[task]] +name = "cargo-clippy" +group = "versions" +script = "cargo clippy -- --version" + +[[task]] +name = "cargo-fmt" +group = "lints" +script = "cargo fmt --check -- --color=always" + +[[task]] +name = "cargo-doc" +group = "lints" +script = """ +RUSTDOCFLAGS="-D warnings" cargo doc \ + --workspace \ + --no-deps \ + --document-private-items \ + --color always +""" + +[[task]] +name = "cargo-clippy" +group = "lints" +script = "cargo clippy --workspace --all-targets --color=always -- -D warnings" + +[[task]] +name = "cargo" +group = "tests" +script = """ +cargo test \ + --workspace \ + --all-targets \ + --color=always \ + -- \ + --color=always +""" diff --git a/flake.nix b/flake.nix index eb3a31c..c1eee4a 100644 --- a/flake.nix +++ b/flake.nix @@ -82,6 +82,8 @@ rust-src rustc rustfmt + ]) ++ (with pkgs; [ + engage ]); }; From 6f052fff985c0cd559fff32c11fd32445b8110ed Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 23 Dec 2023 21:12:42 -0800 Subject: [PATCH 1489/1727] improve nix flake Also fix the comment in `Cargo.toml` about the rust-version stuff. --- Cargo.toml | 8 +++---- flake.nix | 67 +++++++++++++++++++++++++++++++----------------------- 2 files changed, 43 insertions(+), 32 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6cc6650..81661f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,10 +9,10 @@ readme = "README.md" version = "0.7.0-alpha" edition = "2021" -# When changing this, make sure to update the `flake.lock` file by running -# `nix flake update`. If you don't have Nix installed or otherwise don't know -# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in -# the matrix room. +# When changing this, make sure to update the hash near the text "THE +# rust-version HASH" in `flake.nix`. If you don't have Nix installed or +# otherwise don't know how to do this, ping `@charles:computer.surgery` or +# `@dusk:gaze.systems` in the matrix room. rust-version = "1.70.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/flake.nix b/flake.nix index c1eee4a..ed545d7 100644 --- a/flake.nix +++ b/flake.nix @@ -43,53 +43,64 @@ sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc="; }; - # The system's RocksDB - ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; - ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; + mkToolchain = fenix.packages.${system}.combine; + + buildToolchain = mkToolchain (with toolchain; [ + cargo + rustc + ]); + + devToolchain = mkToolchain (with toolchain; [ + cargo + clippy + rust-src + rustc + + # Always use nightly rustfmt because most of its options are unstable + fenix.packages.${system}.latest.rustfmt + ]); + + builder = + ((crane.mkLib pkgs).overrideToolchain buildToolchain).buildPackage; - # Shared between the package and the devShell nativeBuildInputs = (with pkgs.rustPlatform; [ bindgenHook ]); - builder = - ((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage; + env = { + ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; + ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; + }; in { packages.default = builder { src = ./.; + # This is redundant with CI + doCheck = false; + inherit - stdenv + env nativeBuildInputs - ROCKSDB_INCLUDE_DIR - ROCKSDB_LIB_DIR; + stdenv; + + meta.mainProgram = cargoToml.package.name; }; devShells.default = (pkgs.mkShell.override { inherit stdenv; }) { - # Rust Analyzer needs to be able to find the path to default crate - # sources, and it can read this environment variable to do so - RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; - - inherit - ROCKSDB_INCLUDE_DIR - ROCKSDB_LIB_DIR; + env = env // { + # Rust Analyzer needs to be able to find the path to default crate + # sources, and it can read this environment variable to do so. The + # `rust-src` component is required in order for this to work. + RUST_SRC_PATH = "${devToolchain}/lib/rustlib/src/rust/library"; + }; # Development tools - nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [ - cargo - clippy - rust-src - rustc - rustfmt - ]) ++ (with pkgs; [ + nativeBuildInputs = nativeBuildInputs ++ [ + devToolchain + ] ++ (with pkgs; [ engage ]); }; - - checks = { - packagesDefault = self.packages.${system}.default; - devShellsDefault = self.devShells.${system}.default; - }; }); } From 25ceb5ebd8e42f24020cac01ca5c29a1b1cb2b7a Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 18 Jan 2024 11:22:51 -0800 Subject: [PATCH 1490/1727] remove commented out ci step If you want it back, just look at the git history. --- .gitlab-ci.yml | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f5ab424..bacc7fd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,7 +2,6 @@ stages: - build - build docker image - test - - upload artifacts variables: # Make GitLab CI go fast: @@ -208,31 +207,6 @@ test:dockerlint: - if: '$CI_COMMIT_REF_NAME == "master"' - if: '$CI_COMMIT_REF_NAME == "next"' -# --------------------------------------------------------------------- # -# Store binaries as package so they have download urls # -# --------------------------------------------------------------------- # - -# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME: - -#publish:package: -# stage: "upload artifacts" -# needs: -# - "docker:tags" -# rules: -# - if: "$CI_COMMIT_TAG" -# image: curlimages/curl:latest -# tags: ["docker"] -# variables: -# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts -# script: -# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"' - # Avoid duplicate pipelines # See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines workflow: From 9d592d60d2d0f62fea7eec9db0cfb044e6354c24 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 18 Jan 2024 11:23:41 -0800 Subject: [PATCH 1491/1727] remove dockerlint step because it does nothing It's configured to let the pipeline pass even if there are warnings or errors, i.e. it's pointless. --- .gitlab-ci.yml | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bacc7fd..5393367 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -174,39 +174,6 @@ test:audit: reports: sast: gl-sast-report.json -test:dockerlint: - stage: "test" - needs: [] - image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine - interruptible: true - script: - - hadolint --version - # First pass: Print for CI log: - - > - hadolint - --no-fail --verbose - ./Dockerfile - # Then output the results into a json for GitLab to pretty-print this in the MR: - - > - hadolint - --format gitlab_codeclimate - --failure-threshold error - ./Dockerfile > dockerlint.json - artifacts: - when: always - reports: - codequality: dockerlint.json - paths: - - dockerlint.json - rules: - - if: '$CI_COMMIT_REF_NAME != "master"' - changes: - - docker/*Dockerfile - - Dockerfile - - .gitlab-ci.yml - - if: '$CI_COMMIT_REF_NAME == "master"' - - if: '$CI_COMMIT_REF_NAME == "next"' - # Avoid duplicate pipelines # See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines workflow: From ffd03a256b3ff497957dd15729fe02ac4b79f0c8 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 18 Jan 2024 11:35:28 -0800 Subject: [PATCH 1492/1727] remove workflow rules I don't think these are actually necessary? At least in my own testing, I haven't seen duplicate pipelines for a single commit when converting from just a branch to a merge request. --- .gitlab-ci.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5393367..0722c14 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -173,13 +173,3 @@ test:audit: when: always reports: sast: gl-sast-report.json - -# Avoid duplicate pipelines -# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines -workflow: - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' - - if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS" - when: never - - if: "$CI_COMMIT_BRANCH" - - if: "$CI_COMMIT_TAG" From 7e66d2e2c062e28f2bf16668fb41e53699dee56c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 18 Jan 2024 11:36:51 -0800 Subject: [PATCH 1493/1727] use nix and engage to manage ci --- .gitignore | 3 + .gitlab-ci.yml | 194 +++++++------------------------------------------ 2 files changed, 29 insertions(+), 168 deletions(-) diff --git a/.gitignore b/.gitignore index 1b5d37b..a34d70a 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,6 @@ cached_target # Direnv cache /.direnv + +# Gitlab CI cache +/.gitlab-ci.d diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0722c14..889cf26 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,175 +1,33 @@ stages: - - build - - build docker image - - test + - ci variables: - # Make GitLab CI go fast: - GIT_SUBMODULE_STRATEGY: recursive - FF_USE_FASTZIP: 1 - CACHE_COMPRESSION_LEVEL: fastest + # Makes some things print in color + TERM: ansi -# --------------------------------------------------------------------- # -# Create and publish docker image # -# --------------------------------------------------------------------- # +before_script: + # Enable nix-command and flakes + - echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf -.docker-shared-settings: - stage: "build docker image" - needs: [] - tags: [ "docker" ] - variables: - # Docker in Docker: - DOCKER_BUILDKIT: 1 - image: - name: docker.io/docker - services: - - name: docker.io/docker:dind - alias: docker + # Add nix-community binary cache + - echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf + - echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf + + # Install direnv and nix-direnv + - nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv + + # Allow .envrc + - direnv allow + + # Set CARGO_HOME to a cacheable path + - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" + +ci: + stage: ci + image: nixos/nix:2.19.2 script: - - apk add openssh-client - - eval $(ssh-agent -s) - - mkdir -p ~/.ssh && chmod 700 ~/.ssh - - printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config - - sh .gitlab/setup-buildx-remote-builders.sh - # Authorize against this project's own image registry: - - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - # Build multiplatform image and push to temporary tag: - - > - docker buildx build - --platform "linux/arm/v7,linux/arm64,linux/amd64" - --pull - --tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" - --push - --provenance=false - --file "Dockerfile" . - # Build multiplatform image to deb stage and extract their .deb files: - - > - docker buildx build - --platform "linux/arm/v7,linux/arm64,linux/amd64" - --target "packager-result" - --output="type=local,dest=/tmp/build-output" - --provenance=false - --file "Dockerfile" . - # Build multiplatform image to binary stage and extract their binaries: - - > - docker buildx build - --platform "linux/arm/v7,linux/arm64,linux/amd64" - --target "builder-result" - --output="type=local,dest=/tmp/build-output" - --provenance=false - --file "Dockerfile" . - # Copy to GitLab container registry: - - > - docker buildx imagetools create - --tag "$CI_REGISTRY_IMAGE/$TAG" - --tag "$CI_REGISTRY_IMAGE/$TAG-bullseye" - --tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA" - "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" - # if DockerHub credentials exist, also copy to dockerhub: - - if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi - - > - if [ -n "${DOCKER_HUB}" ]; then - docker buildx imagetools create - --tag "$DOCKER_HUB_IMAGE/$TAG" - --tag "$DOCKER_HUB_IMAGE/$TAG-bullseye" - --tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA" - "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" - ; fi - - mv /tmp/build-output ./ - artifacts: + - direnv exec . engage + cache: paths: - - "./build-output/" - -docker:next: - extends: .docker-shared-settings - rules: - - if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"' - variables: - TAG: "matrix-conduit:next" - -docker:master: - extends: .docker-shared-settings - rules: - - if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"' - variables: - TAG: "matrix-conduit:latest" - -docker:tags: - extends: .docker-shared-settings - rules: - - if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG" - variables: - TAG: "matrix-conduit:$CI_COMMIT_TAG" - - -docker build debugging: - extends: .docker-shared-settings - rules: - - if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/" - variables: - TAG: "matrix-conduit-docker-tests:latest" - -# --------------------------------------------------------------------- # -# Run tests # -# --------------------------------------------------------------------- # - -cargo check: - stage: test - image: docker.io/rust:1.70.0-bullseye - needs: [] - interruptible: true - before_script: - - "rustup show && rustc --version && cargo --version" # Print version info for debugging - - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb - script: - - cargo check - - -.test-shared-settings: - stage: "test" - needs: [] - image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" - tags: ["docker"] - variables: - CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow - interruptible: true - -test:cargo: - extends: .test-shared-settings - before_script: - - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb - script: - - rustc --version && cargo --version # Print version info for debugging - - "cargo test --color always --workspace --verbose --locked --no-fail-fast" - -test:clippy: - extends: .test-shared-settings - allow_failure: true - before_script: - - rustup component add clippy - - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb - script: - - rustc --version && cargo --version # Print version info for debugging - - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" - artifacts: - when: always - reports: - codequality: gl-code-quality-report.json - -test:format: - extends: .test-shared-settings - before_script: - - rustup component add rustfmt - script: - - cargo fmt --all -- --check - -test:audit: - extends: .test-shared-settings - allow_failure: true - script: - - cargo audit --color always || true - - cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json - artifacts: - when: always - reports: - sast: gl-sast-report.json + - target + - .gitlab-ci.d From f8bdfd82b02dbbd46cdc19f2c862378b127b2b0d Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 18 Jan 2024 12:14:03 -0800 Subject: [PATCH 1494/1727] update flake.lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'crane': 'github:ipetkov/crane/8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e' (2023-07-07) → 'github:ipetkov/crane/742170d82cd65c925dcddc5c3d6185699fbbad08' (2024-01-18) • Removed input 'crane/flake-compat' • Removed input 'crane/flake-utils' • Removed input 'crane/rust-overlay' • Removed input 'crane/rust-overlay/flake-utils' • Removed input 'crane/rust-overlay/nixpkgs' • Updated input 'fenix': 'github:nix-community/fenix/39096fe3f379036ff4a5fa198950b8e79defe939' (2023-07-16) → 'github:nix-community/fenix/e132ea0eb0c799a2109a91688e499d7bf4962801' (2024-01-18) • Updated input 'fenix/rust-analyzer-src': 'github:rust-lang/rust-analyzer/996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8' (2023-07-15) → 'github:rust-lang/rust-analyzer/9d9b34354d2f13e33568c9c55b226dd014a146a0' (2024-01-17) • Updated input 'flake-utils': 'github:numtide/flake-utils/919d646de7be200f3bf08cb76ae1f09402b6f9b4' (2023-07-11) → 'github:numtide/flake-utils/1ef2e671c3b0c19053962c07dbda38332dcebf26' (2024-01-15) • Updated input 'nixpkgs': 'github:NixOS/nixpkgs/8acef304efe70152463a6399f73e636bcc363813' (2023-07-15) → 'github:NixOS/nixpkgs/842d9d80cfd4560648c785f8a4e6f3b096790e19' (2024-01-17) --- flake.lock | 78 +++++++++++------------------------------------------- flake.nix | 1 - 2 files changed, 16 insertions(+), 63 deletions(-) diff --git a/flake.lock b/flake.lock index 0065525..7f361dd 100644 --- a/flake.lock +++ b/flake.lock @@ -2,21 +2,16 @@ "nodes": { "crane": { "inputs": { - "flake-compat": "flake-compat", - "flake-utils": [ - "flake-utils" - ], "nixpkgs": [ "nixpkgs" - ], - "rust-overlay": "rust-overlay" + ] }, "locked": { - "lastModified": 1688772518, - "narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=", + "lastModified": 1705597458, + "narHash": "sha256-vJ8Ib9ruxbaBxGEcA0d7dHqxpc6Z+SGR2XIxVeSMuLM=", "owner": "ipetkov", "repo": "crane", - "rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e", + "rev": "742170d82cd65c925dcddc5c3d6185699fbbad08", "type": "github" }, "original": { @@ -33,11 +28,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1689488573, - "narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=", + "lastModified": 1705559032, + "narHash": "sha256-Cb+Jd1+Gz4Wi+8elPnUIHnqQmE1qjDRZ+PsJaPaAffY=", "owner": "nix-community", "repo": "fenix", - "rev": "39096fe3f379036ff4a5fa198950b8e79defe939", + "rev": "e132ea0eb0c799a2109a91688e499d7bf4962801", "type": "github" }, "original": { @@ -46,32 +41,16 @@ "type": "github" } }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, "flake-utils": { "inputs": { "systems": "systems" }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "lastModified": 1705309234, + "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", "owner": "numtide", "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", "type": "github" }, "original": { @@ -82,11 +61,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1689444953, - "narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=", + "lastModified": 1705496572, + "narHash": "sha256-rPIe9G5EBLXdBdn9ilGc0nq082lzQd0xGGe092R/5QE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "8acef304efe70152463a6399f73e636bcc363813", + "rev": "842d9d80cfd4560648c785f8a4e6f3b096790e19", "type": "github" }, "original": { @@ -107,11 +86,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1689441253, - "narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=", + "lastModified": 1705523001, + "narHash": "sha256-TWq5vJ6m+9HGSDMsQAmz1TMegMi79R3TTyKjnPWsQp8=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8", + "rev": "9d9b34354d2f13e33568c9c55b226dd014a146a0", "type": "github" }, "original": { @@ -121,31 +100,6 @@ "type": "github" } }, - "rust-overlay": { - "inputs": { - "flake-utils": [ - "crane", - "flake-utils" - ], - "nixpkgs": [ - "crane", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688351637, - "narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=", - "owner": "oxalica", - "repo": "rust-overlay", - "rev": "f9b92316727af9e6c7fee4a761242f7f46880329", - "type": "github" - }, - "original": { - "owner": "oxalica", - "repo": "rust-overlay", - "type": "github" - } - }, "systems": { "locked": { "lastModified": 1681028828, diff --git a/flake.nix b/flake.nix index ed545d7..65cfc30 100644 --- a/flake.nix +++ b/flake.nix @@ -10,7 +10,6 @@ crane = { url = "github:ipetkov/crane"; inputs.nixpkgs.follows = "nixpkgs"; - inputs.flake-utils.follows = "flake-utils"; }; }; From 02781e4f9b494baf9cd238b6614f217c9474c3e2 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 18 Jan 2024 13:05:55 -0800 Subject: [PATCH 1495/1727] use nix-filter to filter sources This prevents nix from rebuilding conduit when files that don't actually effect the build are changed. --- flake.lock | 16 ++++++++++++++++ flake.nix | 11 ++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/flake.lock b/flake.lock index 7f361dd..128539b 100644 --- a/flake.lock +++ b/flake.lock @@ -59,6 +59,21 @@ "type": "github" } }, + "nix-filter": { + "locked": { + "lastModified": 1705332318, + "narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=", + "owner": "numtide", + "repo": "nix-filter", + "rev": "3449dc925982ad46246cfc36469baf66e1b64f17", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "nix-filter", + "type": "github" + } + }, "nixpkgs": { "locked": { "lastModified": 1705496572, @@ -80,6 +95,7 @@ "crane": "crane", "fenix": "fenix", "flake-utils": "flake-utils", + "nix-filter": "nix-filter", "nixpkgs": "nixpkgs" } }, diff --git a/flake.nix b/flake.nix index 65cfc30..62ba828 100644 --- a/flake.nix +++ b/flake.nix @@ -2,6 +2,7 @@ inputs = { nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; flake-utils.url = "github:numtide/flake-utils"; + nix-filter.url = "github:numtide/nix-filter"; fenix = { url = "github:nix-community/fenix"; @@ -17,6 +18,7 @@ { self , nixpkgs , flake-utils + , nix-filter , fenix , crane @@ -73,7 +75,14 @@ in { packages.default = builder { - src = ./.; + src = nix-filter { + root = ./.; + include = [ + "src" + "Cargo.toml" + "Cargo.lock" + ]; + }; # This is redundant with CI doCheck = false; From 4de54db3054c95a7931bb7be7f9f04578d06024f Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 18 Jan 2024 13:31:46 -0800 Subject: [PATCH 1496/1727] redo docker image and build it in ci --- .gitlab-ci.yml | 13 +++++ Dockerfile | 132 ------------------------------------------------- flake.nix | 20 ++++++++ 3 files changed, 33 insertions(+), 132 deletions(-) delete mode 100644 Dockerfile diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 889cf26..ac1495f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,5 +1,6 @@ stages: - ci + - artifacts variables: # Makes some things print in color @@ -31,3 +32,15 @@ ci: paths: - target - .gitlab-ci.d + +docker: + stage: artifacts + image: nixos/nix:2.19.2 + script: + - nix build .#oci-image + + # Make the output less difficult to find + - cp result docker-image.tar.gz + artifacts: + paths: + - docker-image.tar.gz diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 943f686..0000000 --- a/Dockerfile +++ /dev/null @@ -1,132 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM docker.io/rust:1.70-bullseye AS base - -FROM base AS builder -WORKDIR /usr/src/conduit - -# Install required packages to build Conduit and it's dependencies -RUN apt-get update && \ - apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5 - -# == Build dependencies without our own code separately for caching == -# -# Need a fake main.rs since Cargo refuses to build anything otherwise. -# -# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature -# request that would allow just dependencies to be compiled, presumably -# regardless of whether source files are available. -RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs -COPY Cargo.toml Cargo.lock ./ -RUN cargo build --release && rm -r src - -# Copy over actual Conduit sources -COPY src src - -# main.rs and lib.rs need their timestamp updated for this to work correctly since -# otherwise the build with the fake main.rs from above is newer than the -# source files (COPY preserves timestamps). -# -# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit -RUN touch src/main.rs && touch src/lib.rs && cargo build --release - - -# ONLY USEFUL FOR CI: target stage to extract build artifacts -FROM scratch AS builder-result -COPY --from=builder /usr/src/conduit/target/release/conduit /conduit - - - -# --------------------------------------------------------------------------------------------------------------- -# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems: -# --------------------------------------------------------------------------------------------------------------- -FROM base AS build-cargo-deb - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - dpkg \ - dpkg-dev \ - liblzma-dev - -RUN cargo install cargo-deb -# => binary is in /usr/local/cargo/bin/cargo-deb - - -# --------------------------------------------------------------------------------------------------------------- -# Package conduit build-result into a .deb package: -# --------------------------------------------------------------------------------------------------------------- -FROM builder AS packager -WORKDIR /usr/src/conduit - -COPY ./LICENSE ./LICENSE -COPY ./README.md ./README.md -COPY debian ./debian -COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb - -# --no-build makes cargo-deb reuse already compiled project -RUN cargo deb --no-build -# => Package is in /usr/src/conduit/target/debian/__.deb - - -# ONLY USEFUL FOR CI: target stage to extract build artifacts -FROM scratch AS packager-result -COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb - - -# --------------------------------------------------------------------------------------------------------------- -# Stuff below this line actually ends up in the resulting docker image -# --------------------------------------------------------------------------------------------------------------- -FROM docker.io/debian:bullseye-slim AS runner - -# Standard port on which Conduit launches. -# You still need to map the port when using the docker command or docker-compose. -EXPOSE 6167 - -ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit - -ENV CONDUIT_PORT=6167 \ - CONDUIT_ADDRESS="0.0.0.0" \ - CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ - CONDUIT_CONFIG='' -# └─> Set no config file to do all configuration with env vars - -# Conduit needs: -# dpkg: to install conduit.deb -# ca-certificates: for https -# iproute2 & wget: for the healthcheck script -RUN apt-get update && apt-get -y --no-install-recommends install \ - dpkg \ - ca-certificates \ - iproute2 \ - wget \ - && rm -rf /var/lib/apt/lists/* - -# Test if Conduit is still alive, uses the same endpoint as Element -COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh -HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh - -# Install conduit.deb: -COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/ -RUN dpkg -i /srv/conduit/*.deb - -# Improve security: Don't run stuff as root, that does not need to run as root -# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. -ARG USER_ID=1000 -ARG GROUP_ID=1000 -RUN set -x ; \ - groupadd -r -g ${GROUP_ID} conduit ; \ - useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1 - -# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable: -RUN chown -cR conduit:conduit /srv/conduit && \ - chmod +x /srv/conduit/healthcheck.sh && \ - mkdir -p ${DEFAULT_DB_PATH} && \ - chown -cR conduit:conduit ${DEFAULT_DB_PATH} - -# Change user to conduit, no root permissions afterwards: -USER conduit -# Set container home directory -WORKDIR /srv/conduit - -# Run Conduit and print backtraces on panics -ENV RUST_BACKTRACE=1 -ENTRYPOINT [ "/usr/sbin/matrix-conduit" ] diff --git a/flake.nix b/flake.nix index 62ba828..b4a32d0 100644 --- a/flake.nix +++ b/flake.nix @@ -95,6 +95,26 @@ meta.mainProgram = cargoToml.package.name; }; + packages.oci-image = + let + package = self.packages.${system}.default; + in + pkgs.dockerTools.buildImage { + name = package.pname; + tag = "latest"; + config = { + # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) are + # handled as expected + Entrypoint = [ + "${pkgs.lib.getExe' pkgs.tini "tini"}" + "--" + ]; + Cmd = [ + "${pkgs.lib.getExe package}" + ]; + }; + }; + devShells.default = (pkgs.mkShell.override { inherit stdenv; }) { env = env // { # Rust Analyzer needs to be able to find the path to default crate From 0b7ed5adc9ad17811eb8ef43b6b1e94c5a17d6b1 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Fri, 19 Jan 2024 12:10:23 -0800 Subject: [PATCH 1497/1727] add debian package building in ci This uses a separate step and docker image, which I'm not a huge fan of. At least I could get this to work for now, but I won't be shocked when it breaks later. I know, I know, fixing this kind of problem is the exact reason I bothered to do this, but I was really struggling to do better here. Maybe I can take a second pass at this later. Also, this explicitly names the caches, because without this, various things related to linking will break. --- .gitlab-ci.yml | 30 +++++++++++++++++++++++++----- Cargo.toml | 2 ++ 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ac1495f..48fe624 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -8,17 +8,17 @@ variables: before_script: # Enable nix-command and flakes - - echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf + - if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi # Add nix-community binary cache - - echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf - - echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf + - if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi # Install direnv and nix-direnv - - nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv + - if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi # Allow .envrc - - direnv allow + - if command -v nix > /dev/null; then direnv allow; fi # Set CARGO_HOME to a cacheable path - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" @@ -29,6 +29,7 @@ ci: script: - direnv exec . engage cache: + key: nix paths: - target - .gitlab-ci.d @@ -44,3 +45,22 @@ docker: artifacts: paths: - docker-image.tar.gz + +debian: + stage: artifacts + image: rust:1.70.0 + script: + - apt-get update && apt-get install -y --no-install-recommends libclang-dev + - cargo install cargo-deb + - cargo deb + + # Make the output less difficult to find + - mv target/debian/*.deb . + artifacts: + paths: + - "*.deb" + cache: + key: debian + paths: + - target + - .gitlab-ci.d diff --git a/Cargo.toml b/Cargo.toml index 81661f0..8c13915 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,8 @@ edition = "2021" # rust-version HASH" in `flake.nix`. If you don't have Nix installed or # otherwise don't know how to do this, ping `@charles:computer.surgery` or # `@dusk:gaze.systems` in the matrix room. +# +# Also make sure to update the docker image tags in `.gitlab-ci.yml`. rust-version = "1.70.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From 69d0003222e6fcf7abe6305a08aff6a8eba74659 Mon Sep 17 00:00:00 2001 From: Tobias Bucher Date: Wed, 2 Aug 2023 01:00:06 +0200 Subject: [PATCH 1498/1727] Use upstream `reqwest` instead of vendored one This uses the `ClientBuilder::dns_resolver` function that was added in reqwest 0.11.13, instead of the homebrew `ClientBuilder::resolve_fn`. --- Cargo.lock | 84 +++++++++++--------------------------- Cargo.toml | 3 +- src/service/globals/mod.rs | 45 ++++++++++++++++++-- 3 files changed, 66 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b4bcdc0..d5e1d16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -155,10 +155,10 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "rustls 0.21.2", - "rustls-pemfile 1.0.2", + "rustls", + "rustls-pemfile", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tower-service", ] @@ -364,6 +364,7 @@ dependencies = [ "futures-util", "hmac", "http", + "hyper", "image", "jsonwebtoken", "lazy_static", @@ -1022,15 +1023,16 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.2" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", - "rustls 0.20.8", + "rustls", "tokio", - "tokio-rustls 0.23.4", + "tokio-rustls", ] [[package]] @@ -1898,10 +1900,11 @@ checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "reqwest" -version = "0.11.9" -source = "git+https://github.com/timokoesters/reqwest?rev=57b7cf4feb921573dfafad7d34b9ac6e44ead0bd#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" +version = "0.11.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -1913,25 +1916,26 @@ dependencies = [ "hyper-rustls", "ipnet", "js-sys", - "lazy_static", "log", "mime", + "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.20.8", + "rustls", "rustls-native-certs", - "rustls-pemfile 0.2.1", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.23.4", + "tokio-rustls", "tokio-socks", + "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.7.0", + "winreg 0.10.1", ] [[package]] @@ -2198,18 +2202,6 @@ dependencies = [ "semver", ] -[[package]] -name = "rustls" -version = "0.20.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" -dependencies = [ - "log", - "ring", - "sct", - "webpki", -] - [[package]] name = "rustls" version = "0.21.2" @@ -2229,20 +2221,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.2", + "rustls-pemfile", "schannel", "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" -dependencies = [ - "base64 0.13.1", -] - [[package]] name = "rustls-pemfile" version = "1.0.2" @@ -2756,24 +2739,13 @@ dependencies = [ "syn 2.0.21", ] -[[package]] -name = "tokio-rustls" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" -dependencies = [ - "rustls 0.20.8", - "tokio", - "webpki", -] - [[package]] name = "tokio-rustls" version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.2", + "rustls", "tokio", ] @@ -3223,16 +3195,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "weezl" version = "0.1.7" @@ -3407,9 +3369,9 @@ dependencies = [ [[package]] name = "winreg" -version = "0.7.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ "winapi", ] diff --git a/Cargo.toml b/Cargo.toml index 8c13915..a35cf23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,8 @@ rand = "0.8.5" # Used to hash passwords rust-argon2 = "1.0.0" # Used to send requests -reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } +hyper = "0.14.26" +reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] } # Used for conduit::Error type thiserror = "1.0.40" # Used to generate thumbnails for images diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 9bce8a2..96b2954 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -8,6 +8,12 @@ use ruma::{ use crate::api::server_server::FedDest; use crate::{services, Config, Error, Result}; +use futures_util::FutureExt; +use hyper::{ + client::connect::dns::{GaiResolver, Name}, + service::Service as HyperService, +}; +use reqwest::dns::{Addrs, Resolve, Resolving}; use ruma::{ api::{ client::sync::sync_events, @@ -17,8 +23,10 @@ use ruma::{ }; use std::{ collections::{BTreeMap, HashMap}, + error::Error as StdError, fs, - future::Future, + future::{self, Future}, + iter, net::{IpAddr, SocketAddr}, path::PathBuf, sync::{ @@ -99,6 +107,35 @@ impl Default for RotationHandler { } } +pub struct Resolver { + inner: GaiResolver, + overrides: Box Option + Send + Sync>, +} + +impl Resolver { + pub fn new(overrides: Box Option + Send + Sync>) -> Resolver { + Resolver { + inner: GaiResolver::new(), + overrides, + } + } +} + +impl Resolve for Resolver { + fn resolve(&self, name: Name) -> Resolving { + if let Some(addr) = (self.overrides)(name.as_str()) { + let once: Box + Send> = Box::new(iter::once(addr)); + return Box::pin(future::ready(Ok(once))); + } + let this = &mut self.inner.clone(); + Box::pin(HyperService::::call(this, name).map(|result| { + result + .map(|addrs| -> Addrs { Box::new(addrs) }) + .map_err(|err| -> Box { Box::new(err) }) + })) + } +} + impl Service { pub fn load(db: &'static dyn Data, config: Config) -> Result { let keypair = db.load_keypair(); @@ -122,12 +159,12 @@ impl Service { let default_client = reqwest_client_builder(&config)?.build()?; let name_override = Arc::clone(&tls_name_override); let federation_client = reqwest_client_builder(&config)? - .resolve_fn(move |domain| { + .dns_resolver(Arc::new(Resolver::new(Box::new(move |domain| { let read_guard = name_override.read().unwrap(); - let (override_name, port) = read_guard.get(&domain)?; + let (override_name, port) = read_guard.get(domain)?; let first_name = override_name.get(0)?; Some(SocketAddr::new(*first_name, *port)) - }) + })))) .build()?; // Supported and stable room versions From fe86d28428a0cae53ac9ca5f5efe7783cb00022c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 24 Jan 2024 15:11:17 -0800 Subject: [PATCH 1499/1727] move resolver logic into the resolver Honestly not sure why it wasn't done like this before. This code is much less awkward to follow and more compartmentalized. These changes were mainly motivated by a clippy lint triggering on the original code, which then made me wonder if I could get rid of some of the `Box`ing. Turns out I could, and this is the result of that. --- src/service/globals/mod.rs | 42 +++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 96b2954..f034415 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -109,11 +109,11 @@ impl Default for RotationHandler { pub struct Resolver { inner: GaiResolver, - overrides: Box Option + Send + Sync>, + overrides: Arc>, } impl Resolver { - pub fn new(overrides: Box Option + Send + Sync>) -> Resolver { + pub fn new(overrides: Arc>) -> Self { Resolver { inner: GaiResolver::new(), overrides, @@ -123,16 +123,26 @@ impl Resolver { impl Resolve for Resolver { fn resolve(&self, name: Name) -> Resolving { - if let Some(addr) = (self.overrides)(name.as_str()) { - let once: Box + Send> = Box::new(iter::once(addr)); - return Box::pin(future::ready(Ok(once))); - } - let this = &mut self.inner.clone(); - Box::pin(HyperService::::call(this, name).map(|result| { - result - .map(|addrs| -> Addrs { Box::new(addrs) }) - .map_err(|err| -> Box { Box::new(err) }) - })) + self.overrides + .read() + .expect("lock should not be poisoned") + .get(name.as_str()) + .and_then(|(override_name, port)| { + override_name.get(0).map(|first_name| { + let x: Box + Send> = + Box::new(iter::once(SocketAddr::new(*first_name, *port))); + let x: Resolving = Box::pin(future::ready(Ok(x))); + x + }) + }) + .unwrap_or_else(|| { + let this = &mut self.inner.clone(); + Box::pin(HyperService::::call(this, name).map(|result| { + result + .map(|addrs| -> Addrs { Box::new(addrs) }) + .map_err(|err| -> Box { Box::new(err) }) + })) + }) } } @@ -157,14 +167,8 @@ impl Service { .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); let default_client = reqwest_client_builder(&config)?.build()?; - let name_override = Arc::clone(&tls_name_override); let federation_client = reqwest_client_builder(&config)? - .dns_resolver(Arc::new(Resolver::new(Box::new(move |domain| { - let read_guard = name_override.read().unwrap(); - let (override_name, port) = read_guard.get(domain)?; - let first_name = override_name.get(0)?; - Some(SocketAddr::new(*first_name, *port)) - })))) + .dns_resolver(Arc::new(Resolver::new(tls_name_override.clone()))) .build()?; // Supported and stable room versions From f7cc4fb3bb8ec1bfc39ccb89e770c809d5708343 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 24 Jan 2024 22:24:34 -0800 Subject: [PATCH 1500/1727] state artifacts' targets and rename artifacts This will make it more obvious what's what and be more internally consistent. --- .gitlab-ci.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 48fe624..53fd73f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -34,19 +34,19 @@ ci: - target - .gitlab-ci.d -docker: +oci-image:x86_64-unknown-linux-gnu: stage: artifacts image: nixos/nix:2.19.2 script: - nix build .#oci-image # Make the output less difficult to find - - cp result docker-image.tar.gz + - cp result oci-image.tar.gz artifacts: paths: - - docker-image.tar.gz + - oci-image.tar.gz -debian: +debian:x86_64-unknown-linux-gnu: stage: artifacts image: rust:1.70.0 script: @@ -55,10 +55,10 @@ debian: - cargo deb # Make the output less difficult to find - - mv target/debian/*.deb . + - mv target/debian/*.deb conduit.deb artifacts: paths: - - "*.deb" + - conduit.deb cache: key: debian paths: From bd2b146d5da633eb3ec80033a317feea87d23c64 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 24 Jan 2024 22:16:29 -0800 Subject: [PATCH 1501/1727] add crane binary cache This way we don't need to build e.g. crane-utils every time. --- .gitlab-ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 53fd73f..deacb81 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -10,6 +10,10 @@ before_script: # Enable nix-command and flakes - if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi + # Add crane binary cache + - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi + # Add nix-community binary cache - if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi From 6ae776218cc2da4e46fb587bd08b5164b24c41ce Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 24 Jan 2024 22:20:48 -0800 Subject: [PATCH 1502/1727] add our own binary cache The machine I'm hosting this on doesn't have incredible upload speeds but it should be good enough? --- .gitlab-ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index deacb81..746641c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -10,6 +10,10 @@ before_script: # Enable nix-command and flakes - if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi + # Add our own binary cache + - if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi + # Add crane binary cache - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi From bdc46f63929f19ddc124f0ed4a78885b1514343c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 25 Jan 2024 11:34:46 -0800 Subject: [PATCH 1503/1727] add script to build and push to binary cache This is even useful for local development, as you can pre-populate the binary cache before running CI (assuming you have the token). Also, it being in a script makes it easier to test. We've added attic as a flake input even though the flake itself doesn't use it so that we can use `--inputs-from .` in Nix commands to reference a locked version of attic. This helps with reproducibility and caching, and to makes it easy to update attic because it's part of the normal flake lifecycle. --- .envrc | 2 + flake.lock | 114 +++++++++++++++++++++++++++++++++++++++++++++++++++-- flake.nix | 2 + 3 files changed, 115 insertions(+), 3 deletions(-) diff --git a/.envrc b/.envrc index b9238c3..403a9bd 100644 --- a/.envrc +++ b/.envrc @@ -1,3 +1,5 @@ #!/usr/bin/env bash use flake + +PATH_add bin diff --git a/flake.lock b/flake.lock index 128539b..acad901 100644 --- a/flake.lock +++ b/flake.lock @@ -1,6 +1,50 @@ { "nodes": { + "attic": { + "inputs": { + "crane": "crane", + "flake-compat": "flake-compat", + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs", + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1705617092, + "narHash": "sha256-n9PK4O4X4S1JkwpkMuYm1wHZYJzRqif8g3RuVIPD+rY=", + "owner": "zhaofengli", + "repo": "attic", + "rev": "fbe252a5c21febbe920c025560cbd63b20e24f3b", + "type": "github" + }, + "original": { + "owner": "zhaofengli", + "ref": "main", + "repo": "attic", + "type": "github" + } + }, "crane": { + "inputs": { + "nixpkgs": [ + "attic", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1702918879, + "narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=", + "owner": "ipetkov", + "repo": "crane", + "rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb", + "type": "github" + }, + "original": { + "owner": "ipetkov", + "repo": "crane", + "type": "github" + } + }, + "crane_2": { "inputs": { "nixpkgs": [ "nixpkgs" @@ -41,7 +85,38 @@ "type": "github" } }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-utils": { + "locked": { + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { "inputs": { "systems": "systems" }, @@ -75,6 +150,38 @@ } }, "nixpkgs": { + "locked": { + "lastModified": 1702539185, + "narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1702780907, + "narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { "locked": { "lastModified": 1705496572, "narHash": "sha256-rPIe9G5EBLXdBdn9ilGc0nq082lzQd0xGGe092R/5QE=", @@ -92,11 +199,12 @@ }, "root": { "inputs": { - "crane": "crane", + "attic": "attic", + "crane": "crane_2", "fenix": "fenix", - "flake-utils": "flake-utils", + "flake-utils": "flake-utils_2", "nix-filter": "nix-filter", - "nixpkgs": "nixpkgs" + "nixpkgs": "nixpkgs_2" } }, "rust-analyzer-src": { diff --git a/flake.nix b/flake.nix index b4a32d0..51148ba 100644 --- a/flake.nix +++ b/flake.nix @@ -12,6 +12,7 @@ url = "github:ipetkov/crane"; inputs.nixpkgs.follows = "nixpkgs"; }; + attic.url = "github:zhaofengli/attic?ref=main"; }; outputs = @@ -22,6 +23,7 @@ , fenix , crane + , ... }: flake-utils.lib.eachDefaultSystem (system: let pkgs = nixpkgs.legacyPackages.${system}; From 5cc53c9e14ef5319f2dd8f88aa58d6e966cfdb20 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 25 Jan 2024 11:37:35 -0800 Subject: [PATCH 1504/1727] push oci image and x86_64-*-gnu build to bin cache This will allow most Nix users to use the `default` package and without having to build from source. And also allows any weirdos to get the OCI image from the Nix binary cache if they want. No idea why that would be desireable though lol --- .gitlab-ci.yml | 8 +++++++- bin/nix-build-and-cache | 31 +++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100755 bin/nix-build-and-cache diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 746641c..199429a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -46,7 +46,13 @@ oci-image:x86_64-unknown-linux-gnu: stage: artifacts image: nixos/nix:2.19.2 script: - - nix build .#oci-image + # Push artifacts and build requirements to binary cache + # + # Since the OCI image package is based on the binary package, this has the + # fun side effect of uploading the normal binary too. Conduit users who are + # deploying with Nix can leverage this fact by adding our binary cache to + # their systems. + - ./bin/nix-build-and-cache .#oci-image # Make the output less difficult to find - cp result oci-image.tar.gz diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache new file mode 100755 index 0000000..b37ebd8 --- /dev/null +++ b/bin/nix-build-and-cache @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# The first argument must be the desired installable +INSTALLABLE="$1" + +# Build the installable and forward any other arguments too +nix build "$@" + +if [ ! -z ${ATTIC_TOKEN+x} ]; then + +nix run --inputs-from . attic -- login \ + conduit \ + https://nix.computer.surgery/conduit \ + "$ATTIC_TOKEN" + + push_args=( + # Attic and its build dependencies + "$(nix path-info --inputs-from . attic)" + "$(nix path-info --inputs-from . attic --derivation)" + + # The target installable and its build dependencies + "$(nix path-info "$INSTALLABLE" --derivation)" + "$(nix path-info "$INSTALLABLE")" + ) + + nix run --inputs-from . attic -- push conduit "${push_args[@]}" +else + echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" +fi From 249fc7769d14e4b4eb9d7f6abc645972c4fc27b8 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 22 Jan 2024 22:24:04 -0800 Subject: [PATCH 1505/1727] don't bother with mold For now, at least. I suspect it will make cross compilation more difficult. --- flake.nix | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/flake.nix b/flake.nix index 51148ba..d4be7d9 100644 --- a/flake.nix +++ b/flake.nix @@ -28,12 +28,6 @@ let pkgs = nixpkgs.legacyPackages.${system}; - # Use mold on Linux - stdenv = if pkgs.stdenv.isLinux then - pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv - else - pkgs.stdenv; - # Nix-accessible `Cargo.toml` cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); @@ -91,8 +85,7 @@ inherit env - nativeBuildInputs - stdenv; + nativeBuildInputs; meta.mainProgram = cargoToml.package.name; }; @@ -117,7 +110,7 @@ }; }; - devShells.default = (pkgs.mkShell.override { inherit stdenv; }) { + devShells.default = pkgs.mkShell { env = env // { # Rust Analyzer needs to be able to find the path to default crate # sources, and it can read this environment variable to do so. The From 9cef03127bcc0e076286721f6f7d5564e0af36c8 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 23 Jan 2024 10:45:02 -0800 Subject: [PATCH 1506/1727] remove `with` for `nativeBuildInputs` It's going to get more involved and that `with` was too specific. --- flake.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.nix b/flake.nix index d4be7d9..15538ef 100644 --- a/flake.nix +++ b/flake.nix @@ -60,9 +60,9 @@ builder = ((crane.mkLib pkgs).overrideToolchain buildToolchain).buildPackage; - nativeBuildInputs = (with pkgs.rustPlatform; [ - bindgenHook - ]); + nativeBuildInputs = [ + pkgs.rustPlatform.bindgenHook + ]; env = { ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; From cf4015b8307249ccf3f7c036306a1dc69f0af795 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 22 Jan 2024 22:25:08 -0800 Subject: [PATCH 1507/1727] rename pkgs to pkgsHost This will make organizing cross compilation easier. --- flake.nix | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/flake.nix b/flake.nix index 15538ef..039bf0b 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,7 @@ , ... }: flake-utils.lib.eachDefaultSystem (system: let - pkgs = nixpkgs.legacyPackages.${system}; + pkgsHost = nixpkgs.legacyPackages.${system}; # Nix-accessible `Cargo.toml` cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); @@ -58,15 +58,15 @@ ]); builder = - ((crane.mkLib pkgs).overrideToolchain buildToolchain).buildPackage; + ((crane.mkLib pkgsHost).overrideToolchain buildToolchain).buildPackage; nativeBuildInputs = [ - pkgs.rustPlatform.bindgenHook + pkgsHost.rustPlatform.bindgenHook ]; env = { - ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; - ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; + ROCKSDB_INCLUDE_DIR = "${pkgsHost.rocksdb}/include"; + ROCKSDB_LIB_DIR = "${pkgsHost.rocksdb}/lib"; }; in { @@ -94,23 +94,23 @@ let package = self.packages.${system}.default; in - pkgs.dockerTools.buildImage { + pkgsHost.dockerTools.buildImage { name = package.pname; tag = "latest"; config = { # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) are # handled as expected Entrypoint = [ - "${pkgs.lib.getExe' pkgs.tini "tini"}" + "${pkgsHost.lib.getExe' pkgsHost.tini "tini"}" "--" ]; Cmd = [ - "${pkgs.lib.getExe package}" + "${pkgsHost.lib.getExe package}" ]; }; }; - devShells.default = pkgs.mkShell { + devShells.default = pkgsHost.mkShell { env = env // { # Rust Analyzer needs to be able to find the path to default crate # sources, and it can read this environment variable to do so. The @@ -121,7 +121,7 @@ # Development tools nativeBuildInputs = nativeBuildInputs ++ [ devToolchain - ] ++ (with pkgs; [ + ] ++ (with pkgsHost; [ engage ]); }; From 692a31620d747a9a640cb3464221c3024c546e09 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 22 Jan 2024 22:26:16 -0800 Subject: [PATCH 1508/1727] make let bindings take pkgs as an argument Again, will make cross compilation easier to set up. --- flake.nix | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/flake.nix b/flake.nix index 039bf0b..76f8bb2 100644 --- a/flake.nix +++ b/flake.nix @@ -57,20 +57,20 @@ fenix.packages.${system}.latest.rustfmt ]); - builder = - ((crane.mkLib pkgsHost).overrideToolchain buildToolchain).buildPackage; + builder = pkgs: + ((crane.mkLib pkgs).overrideToolchain buildToolchain).buildPackage; - nativeBuildInputs = [ - pkgsHost.rustPlatform.bindgenHook + nativeBuildInputs = pkgs: [ + pkgs.rustPlatform.bindgenHook ]; - env = { - ROCKSDB_INCLUDE_DIR = "${pkgsHost.rocksdb}/include"; - ROCKSDB_LIB_DIR = "${pkgsHost.rocksdb}/lib"; + env = pkgs: { + ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; + ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; }; in { - packages.default = builder { + packages.default = builder pkgsHost { src = nix-filter { root = ./.; include = [ @@ -83,9 +83,8 @@ # This is redundant with CI doCheck = false; - inherit - env - nativeBuildInputs; + env = env pkgsHost; + nativeBuildInputs = nativeBuildInputs pkgsHost; meta.mainProgram = cargoToml.package.name; }; @@ -111,7 +110,7 @@ }; devShells.default = pkgsHost.mkShell { - env = env // { + env = env pkgsHost // { # Rust Analyzer needs to be able to find the path to default crate # sources, and it can read this environment variable to do so. The # `rust-src` component is required in order for this to work. @@ -119,7 +118,7 @@ }; # Development tools - nativeBuildInputs = nativeBuildInputs ++ [ + nativeBuildInputs = nativeBuildInputs pkgsHost ++ [ devToolchain ] ++ (with pkgsHost; [ engage From 52954f7a11e795211bbecc54df456724773251b6 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 22 Jan 2024 22:48:58 -0800 Subject: [PATCH 1509/1727] use fromToolchainFile I *think* this will make it easier to pull in extra rustc targets. --- .gitlab-ci.yml | 1 + Cargo.toml | 7 +------ flake.nix | 36 ++++++++++++------------------------ rust-toolchain.toml | 17 +++++++++++++++++ 4 files changed, 31 insertions(+), 30 deletions(-) create mode 100644 rust-toolchain.toml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 199429a..1f6dab3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -62,6 +62,7 @@ oci-image:x86_64-unknown-linux-gnu: debian:x86_64-unknown-linux-gnu: stage: artifacts + # See also `rust-toolchain.toml` image: rust:1.70.0 script: - apt-get update && apt-get install -y --no-install-recommends libclang-dev diff --git a/Cargo.toml b/Cargo.toml index a35cf23..4091959 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,12 +9,7 @@ readme = "README.md" version = "0.7.0-alpha" edition = "2021" -# When changing this, make sure to update the hash near the text "THE -# rust-version HASH" in `flake.nix`. If you don't have Nix installed or -# otherwise don't know how to do this, ping `@charles:computer.surgery` or -# `@dusk:gaze.systems` in the matrix room. -# -# Also make sure to update the docker image tags in `.gitlab-ci.yml`. +# See also `rust-toolchain.toml` rust-version = "1.70.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/flake.nix b/flake.nix index 76f8bb2..89a4eb4 100644 --- a/flake.nix +++ b/flake.nix @@ -32,33 +32,15 @@ cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); # The Rust toolchain to use - toolchain = fenix.packages.${system}.toolchainOf { - # Use the Rust version defined in `Cargo.toml` - channel = cargoToml.package.rust-version; + toolchain = fenix.packages.${system}.fromToolchainFile { + file = ./rust-toolchain.toml; - # THE rust-version HASH + # See also `rust-toolchain.toml` sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc="; }; - mkToolchain = fenix.packages.${system}.combine; - - buildToolchain = mkToolchain (with toolchain; [ - cargo - rustc - ]); - - devToolchain = mkToolchain (with toolchain; [ - cargo - clippy - rust-src - rustc - - # Always use nightly rustfmt because most of its options are unstable - fenix.packages.${system}.latest.rustfmt - ]); - builder = pkgs: - ((crane.mkLib pkgs).overrideToolchain buildToolchain).buildPackage; + ((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; nativeBuildInputs = pkgs: [ pkgs.rustPlatform.bindgenHook @@ -114,12 +96,18 @@ # Rust Analyzer needs to be able to find the path to default crate # sources, and it can read this environment variable to do so. The # `rust-src` component is required in order for this to work. - RUST_SRC_PATH = "${devToolchain}/lib/rustlib/src/rust/library"; + RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; }; # Development tools nativeBuildInputs = nativeBuildInputs pkgsHost ++ [ - devToolchain + # Always use nightly rustfmt because most of its options are unstable + # + # This needs to come before `toolchain` in this list, otherwise + # `$PATH` will have stable rustfmt instead. + fenix.packages.${system}.latest.rustfmt + + toolchain ] ++ (with pkgsHost; [ engage ]); diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000..cc3e254 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,17 @@ +# This is the authoritiative configuration of this project's Rust toolchain. +# +# Other files that need upkeep when this changes: +# +# * `.gitlab-ci.yml` +# * `Cargo.toml` +# * `flake.nix` +# +# Search in those files for `rust-toolchain.toml` to find the relevant places. +# If you're having trouble making the relevant changes, bug a maintainer. + +[toolchain] +channel = "1.70.0" +components = [ + # For rust-analyzer + "rust-src", +] From 3ac9be5a789f34cd980ad68b6a3824fc3ba3c7a0 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 23 Jan 2024 11:10:23 -0800 Subject: [PATCH 1510/1727] add x86_64-unknown-linux-gnu This is probably the most common target and usually doesn't involve cross compilation. --- rust-toolchain.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index cc3e254..8d153e8 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -15,3 +15,6 @@ components = [ # For rust-analyzer "rust-src", ] +targets = [ + "x86_64-unknown-linux-gnu", +] From 67d280dd2e71dca6e412b94ce63d328f408f3490 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 22 Jan 2024 22:56:00 -0800 Subject: [PATCH 1511/1727] factor package expression into a function We'll need to call it repeatedly to make packages for cross. --- flake.nix | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/flake.nix b/flake.nix index 89a4eb4..a86422c 100644 --- a/flake.nix +++ b/flake.nix @@ -50,9 +50,8 @@ ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; }; - in - { - packages.default = builder pkgsHost { + + package = pkgs: builder pkgs { src = nix-filter { root = ./.; include = [ @@ -65,11 +64,14 @@ # This is redundant with CI doCheck = false; - env = env pkgsHost; - nativeBuildInputs = nativeBuildInputs pkgsHost; + env = env pkgs; + nativeBuildInputs = nativeBuildInputs pkgs; meta.mainProgram = cargoToml.package.name; }; + in + { + packages.default = package pkgsHost; packages.oci-image = let From d29591d47d6de9c82539f18994a268f5215a4ee6 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 22 Jan 2024 23:07:30 -0800 Subject: [PATCH 1512/1727] group packages in attrset literal This will make generating packages for cross possible. --- flake.nix | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/flake.nix b/flake.nix index a86422c..9d73ba9 100644 --- a/flake.nix +++ b/flake.nix @@ -71,25 +71,27 @@ }; in { - packages.default = package pkgsHost; + packages = { + default = package pkgsHost; - packages.oci-image = - let - package = self.packages.${system}.default; - in - pkgsHost.dockerTools.buildImage { - name = package.pname; - tag = "latest"; - config = { - # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) are - # handled as expected - Entrypoint = [ - "${pkgsHost.lib.getExe' pkgsHost.tini "tini"}" - "--" - ]; - Cmd = [ - "${pkgsHost.lib.getExe package}" - ]; + oci-image = + let + package = self.packages.${system}.default; + in + pkgsHost.dockerTools.buildImage { + name = package.pname; + tag = "latest"; + config = { + # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) + # are handled as expected + Entrypoint = [ + "${pkgsHost.lib.getExe' pkgsHost.tini "tini"}" + "--" + ]; + Cmd = [ + "${pkgsHost.lib.getExe package}" + ]; + }; }; }; From 3a3cafe912b16d4ce786bedd37247ea200f125b9 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 23 Jan 2024 21:29:03 -0800 Subject: [PATCH 1513/1727] preempt cross problems by using my crane fork I imagine this will get fixed/merged upstream in the near future. --- flake.lock | 11 ++++++----- flake.nix | 5 ++++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index acad901..4508f7c 100644 --- a/flake.lock +++ b/flake.lock @@ -51,15 +51,16 @@ ] }, "locked": { - "lastModified": 1705597458, - "narHash": "sha256-vJ8Ib9ruxbaBxGEcA0d7dHqxpc6Z+SGR2XIxVeSMuLM=", - "owner": "ipetkov", + "lastModified": 1706070683, + "narHash": "sha256-iQoXIFviH5QbSKDkWyFJKKUtTcSiCvl1tW18ax+pKfI=", + "owner": "CobaltCause", "repo": "crane", - "rev": "742170d82cd65c925dcddc5c3d6185699fbbad08", + "rev": "8060686afabc5b042e8982200cb2cc3fd22cf416", "type": "github" }, "original": { - "owner": "ipetkov", + "owner": "CobaltCause", + "ref": "crimes-for-cross", "repo": "crane", "type": "github" } diff --git a/flake.nix b/flake.nix index 9d73ba9..ba57712 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,10 @@ inputs.nixpkgs.follows = "nixpkgs"; }; crane = { - url = "github:ipetkov/crane"; + # TODO: Switch back to upstream after [this issue][0] is fixed + # + # [0]: https://github.com/ipetkov/crane/issues/497 + url = "github:CobaltCause/crane?ref=crimes-for-cross"; inputs.nixpkgs.follows = "nixpkgs"; }; attic.url = "github:zhaofengli/attic?ref=main"; From 81ae579b2546e9fe6d9eda4ed33507f4f159c67c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 23 Jan 2024 11:22:18 -0800 Subject: [PATCH 1514/1727] add static cross to x86_64-unknown-linux-musl --- .gitlab-ci.yml | 13 +++++++ flake.nix | 89 +++++++++++++++++++++++++++++++++++++++++++-- rust-toolchain.toml | 1 + 3 files changed, 100 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1f6dab3..95953e2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -42,6 +42,19 @@ ci: - target - .gitlab-ci.d +static:x86_64-unknown-linux-musl: + stage: artifacts + image: nixos/nix:2.19.2 + script: + # Push artifacts and build requirements to binary cache + - ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl + + # Make the output less difficult to find + - cp result/bin/conduit conduit + artifacts: + paths: + - conduit + oci-image:x86_64-unknown-linux-gnu: stage: artifacts image: nixos/nix:2.19.2 diff --git a/flake.nix b/flake.nix index ba57712..c30c948 100644 --- a/flake.nix +++ b/flake.nix @@ -46,13 +46,82 @@ ((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; nativeBuildInputs = pkgs: [ - pkgs.rustPlatform.bindgenHook + # bindgen needs the build platform's libclang. Apparently due to + # "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't + # quite do the right thing here. + pkgs.buildPackages.rustPlatform.bindgenHook ]; env = pkgs: { ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; - }; + } + // pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic { + ROCKSDB_STATIC = ""; + } + // { + CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in + lib.concatStringsSep " " ([] + ++ lib.optionals + # This disables PIE for static builds, which isn't great in terms + # of security. Unfortunately, my hand is forced because nixpkgs' + # `libstdc++.a` is built without `-fPIE`, which precludes us from + # leaving PIE enabled. + stdenv.hostPlatform.isStatic + ["-C" "relocation-model=static"] + ++ lib.optionals + (stdenv.buildPlatform.config != pkgs.stdenv.hostPlatform.config) + ["-l" "c"] + ); + } + + # What follows is stolen from [here][0]. Its purpose is to properly + # configure compilers and linkers for various stages of the build, and + # even covers the case of build scripts that need native code compiled and + # run on the build platform (I think). + # + # [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L64-L78 + // ( + let + inherit (pkgs.rust.lib) envVars; + in + pkgs.lib.optionalAttrs + (pkgs.stdenv.targetPlatform.rust.rustcTarget + != pkgs.stdenv.hostPlatform.rust.rustcTarget) + ( + let + inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = + envVars.linkerForTarget; + } + ) + // ( + let + inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForHost; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost; + CARGO_BUILD_TARGET = rustcTarget; + } + ) + // ( + let + inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild; + HOST_CC = "${pkgs.buildPackages.stdenv.cc}/bin/cc"; + HOST_CXX = "${pkgs.buildPackages.stdenv.cc}/bin/c++"; + } + )); package = pkgs: builder pkgs { src = nix-filter { @@ -96,7 +165,21 @@ ]; }; }; - }; + } // builtins.listToAttrs ( + builtins.map + (crossSystem: { + name = "static-${crossSystem}"; + value = package (import nixpkgs { + inherit system; + crossSystem = { + config = crossSystem; + }; + }).pkgsStatic; + }) + [ + "x86_64-unknown-linux-musl" + ] + ); devShells.default = pkgsHost.mkShell { env = env pkgsHost // { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 8d153e8..9a21cdb 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -17,4 +17,5 @@ components = [ ] targets = [ "x86_64-unknown-linux-gnu", + "x86_64-unknown-linux-musl", ] From 0e8e4f10833aa4ee7104a599e5fe56f2fa027a9b Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 23 Jan 2024 11:49:52 -0800 Subject: [PATCH 1515/1727] add static cross to aarch64-unknown-linux-musl --- .gitlab-ci.yml | 13 +++++++++++++ flake.nix | 19 +++++++++++++++++++ rust-toolchain.toml | 1 + 3 files changed, 33 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 95953e2..6695e43 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -55,6 +55,19 @@ static:x86_64-unknown-linux-musl: paths: - conduit +static:aarch64-unknown-linux-musl: + stage: artifacts + image: nixos/nix:2.19.2 + script: + # Push artifacts and build requirements to binary cache + - ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl + + # Make the output less difficult to find + - cp result/bin/conduit conduit + artifacts: + paths: + - conduit + oci-image:x86_64-unknown-linux-gnu: stage: artifacts image: nixos/nix:2.19.2 diff --git a/flake.nix b/flake.nix index c30c948..e7b8cdb 100644 --- a/flake.nix +++ b/flake.nix @@ -72,6 +72,24 @@ ++ lib.optionals (stdenv.buildPlatform.config != pkgs.stdenv.hostPlatform.config) ["-l" "c"] + ++ lib.optionals + # This check has to match the one [here][0]. We only need to set + # these flags when using a different linker. Don't ask me why, + # though, because I don't know. All I know is it breaks otherwise. + # + # [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L36-L39 + ( + pkgs.stdenv.hostPlatform.isAarch64 + && pkgs.stdenv.hostPlatform.isStatic + && !pkgs.stdenv.isDarwin + && !pkgs.stdenv.cc.bintools.isLLVM + ) + [ + "-l" + "stdc++" + "-L" + "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" + ] ); } @@ -178,6 +196,7 @@ }) [ "x86_64-unknown-linux-musl" + "aarch64-unknown-linux-musl" ] ); diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 9a21cdb..8ef64c9 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -18,4 +18,5 @@ components = [ targets = [ "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", + "aarch64-unknown-linux-musl", ] From 2a04a361e090bcd44939b5c7680ee4c445516790 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 25 Jan 2024 19:36:17 -0800 Subject: [PATCH 1516/1727] break oci image builder into a function Now it can be reused for different `pkgs` and `package`s. --- flake.nix | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/flake.nix b/flake.nix index e7b8cdb..5f4df1e 100644 --- a/flake.nix +++ b/flake.nix @@ -159,30 +159,29 @@ meta.mainProgram = cargoToml.package.name; }; - in - { - packages = { - default = package pkgsHost; - oci-image = - let - package = self.packages.${system}.default; - in - pkgsHost.dockerTools.buildImage { + mkOciImage = pkgs: package: + pkgs.dockerTools.buildImage { name = package.pname; tag = "latest"; config = { # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) # are handled as expected Entrypoint = [ - "${pkgsHost.lib.getExe' pkgsHost.tini "tini"}" + "${pkgs.lib.getExe' pkgs.tini "tini"}" "--" ]; Cmd = [ - "${pkgsHost.lib.getExe package}" + "${pkgs.lib.getExe package}" ]; }; }; + in + { + packages = { + default = package pkgsHost; + + oci-image = mkOciImage pkgsHost self.packages.${system}.default; } // builtins.listToAttrs ( builtins.map (crossSystem: { From 7c1a3e41d94dbc4d5b395dae300bd3d42b937c50 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 25 Jan 2024 19:38:25 -0800 Subject: [PATCH 1517/1727] add package to build an aarch64 oci image And build it as an artifact in CI. --- .gitlab-ci.yml | 17 +++++++++++++++++ flake.nix | 10 ++++++++++ 2 files changed, 27 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6695e43..a28db4d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -86,6 +86,23 @@ oci-image:x86_64-unknown-linux-gnu: paths: - oci-image.tar.gz +oci-image:aarch64-unknown-linux-musl: + stage: artifacts + needs: + # Wait for the static binary job to finish before starting so we don't have + # to build that twice for no reason + - static:aarch64-unknown-linux-musl + image: nixos/nix:2.19.2 + script: + # Push artifacts and build requirements to binary cache + - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl + + # Make the output less difficult to find + - cp result oci-image.tar.gz + artifacts: + paths: + - oci-image.tar.gz + debian:x86_64-unknown-linux-gnu: stage: artifacts # See also `rust-toolchain.toml` diff --git a/flake.nix b/flake.nix index 5f4df1e..1bc5c14 100644 --- a/flake.nix +++ b/flake.nix @@ -182,6 +182,16 @@ default = package pkgsHost; oci-image = mkOciImage pkgsHost self.packages.${system}.default; + + # Build an OCI image from the musl aarch64 build so we don't have to + # build for aarch64 twice (to make a gnu version specifically for the + # OCI image) + oci-image-aarch64-unknown-linux-musl = mkOciImage + pkgsHost + self.packages.${system}.static-aarch64-unknown-linux-musl; + + # Don't build a musl x86_64 OCI image because that would be pointless. + # Just use the gnu one (i.e. `self.packages."x86_64-linux".oci-image`). } // builtins.listToAttrs ( builtins.map (crossSystem: { From 9453dbc7408c2ad043c5801bd0f25edd2f1769b7 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 25 Jan 2024 20:33:15 -0800 Subject: [PATCH 1518/1727] update rust toolchain It comes with a bunch of new lints (yay!) so I fixed them all so CI will keep working. Also apparently something about linking changed because I had to change the checks for deciding the linker flags for static x86_64 builds to keep working. --- .gitlab-ci.yml | 2 +- Cargo.toml | 2 +- flake.nix | 8 ++++++-- rust-toolchain.toml | 2 +- src/api/client_server/media.rs | 2 +- src/database/abstraction/persy.rs | 6 +++--- src/database/abstraction/sqlite.rs | 2 +- src/database/key_value/account_data.rs | 5 ++--- src/database/key_value/globals.rs | 4 ++-- src/database/key_value/rooms/short.rs | 5 ++--- src/database/key_value/users.rs | 5 ++--- src/service/globals/mod.rs | 2 +- src/service/pdu.rs | 2 +- src/service/rooms/timeline/mod.rs | 25 +++++++++++++------------ src/service/sending/mod.rs | 2 +- src/service/users/mod.rs | 18 ++++++++++++------ 16 files changed, 50 insertions(+), 42 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a28db4d..f2438c8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -106,7 +106,7 @@ oci-image:aarch64-unknown-linux-musl: debian:x86_64-unknown-linux-gnu: stage: artifacts # See also `rust-toolchain.toml` - image: rust:1.70.0 + image: rust:1.75.0 script: - apt-get update && apt-get install -y --no-install-recommends libclang-dev - cargo install cargo-deb diff --git a/Cargo.toml b/Cargo.toml index 4091959..156de3c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ version = "0.7.0-alpha" edition = "2021" # See also `rust-toolchain.toml` -rust-version = "1.70.0" +rust-version = "1.75.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/flake.nix b/flake.nix index 1bc5c14..e10ea4e 100644 --- a/flake.nix +++ b/flake.nix @@ -39,7 +39,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc="; + sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8="; }; builder = pkgs: @@ -79,7 +79,11 @@ # # [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L36-L39 ( - pkgs.stdenv.hostPlatform.isAarch64 + (pkgs.stdenv.hostPlatform.isAarch64 + # Nixpkgs doesn't check for x86_64 here but we do, because I + # observed a failure building statically for x86_64 without + # including it here. Linkers are weird. + || pkgs.stdenv.hostPlatform.isx86_64) && pkgs.stdenv.hostPlatform.isStatic && !pkgs.stdenv.isDarwin && !pkgs.stdenv.cc.bintools.isLLVM diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 8ef64c9..f7a9434 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -10,7 +10,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.70.0" +channel = "1.75.0" components = [ # For rust-analyzer "rust-src", diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 75f8e15..7fc65c2 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -51,7 +51,7 @@ pub async fn create_content_route( .await?; Ok(create_content::v3::Response { - content_uri: mxc.try_into().expect("Invalid mxc:// URI"), + content_uri: mxc.into(), blurhash: None, }) } diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs index 1fa7a0d..da7d4cf 100644 --- a/src/database/abstraction/persy.rs +++ b/src/database/abstraction/persy.rs @@ -116,7 +116,7 @@ impl KvTree for PersyTree { match iter { Ok(iter) => Box::new(iter.filter_map(|(k, v)| { v.into_iter() - .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .map(|val| ((*k).to_owned(), (*val).to_owned())) .next() })), Err(e) => { @@ -142,7 +142,7 @@ impl KvTree for PersyTree { Ok(iter) => { let map = iter.filter_map(|(k, v)| { v.into_iter() - .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .map(|val| ((*k).to_owned(), (*val).to_owned())) .next() }); if backwards { @@ -179,7 +179,7 @@ impl KvTree for PersyTree { iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix)) .filter_map(|(k, v)| { v.into_iter() - .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .map(|val| ((*k).to_owned(), (*val).to_owned())) .next() }), ) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index b69efb6..222a843 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -33,7 +33,7 @@ impl Iterator for PreparedStatementIterator<'_> { struct NonAliasingBox(*mut T); impl Drop for NonAliasingBox { fn drop(&mut self) { - unsafe { Box::from_raw(self.0) }; + drop(unsafe { Box::from_raw(self.0) }); } } diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index e1eef96..970b36b 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -123,13 +123,12 @@ impl service::account_data::Data for KeyValueDatabase { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(k, v)| { Ok::<_, Error>(( - RoomAccountDataEventType::try_from( + RoomAccountDataEventType::from( utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( || Error::bad_database("RoomUserData ID in db is invalid."), )?) .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, + ), serde_json::from_slice::>(&v).map_err(|_| { Error::bad_database("Database contains invalid account data.") })?, diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 11aa064..7a70af1 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -256,8 +256,8 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n" .. } = new_keys; - keys.verify_keys.extend(verify_keys.into_iter()); - keys.old_verify_keys.extend(old_verify_keys.into_iter()); + keys.verify_keys.extend(verify_keys); + keys.old_verify_keys.extend(old_verify_keys); self.server_signingkeys.insert( origin.as_bytes(), diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs index c022317..98cfa48 100644 --- a/src/database/key_value/rooms/short.rs +++ b/src/database/key_value/rooms/short.rs @@ -157,10 +157,9 @@ impl service::rooms::short::Data for KeyValueDatabase { .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { + StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; + })?); let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 2b09d68..0e6db83 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -146,10 +146,9 @@ impl service::users::Data for KeyValueDatabase { self.userid_avatarurl .get(user_id.as_bytes())? .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - s.try_into() + utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) + .map(Into::into) }) .transpose() } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index f034415..f782294 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -128,7 +128,7 @@ impl Resolve for Resolver { .expect("lock should not be poisoned") .get(name.as_str()) .and_then(|(override_name, port)| { - override_name.get(0).map(|first_name| { + override_name.first().map(|first_name| { let x: Box + Send> = Box::new(iter::once(SocketAddr::new(*first_name, *port))); let x: Resolving = Box::pin(future::ready(Ok(x))); diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 4a170bc..c8d7860 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -385,7 +385,7 @@ impl PartialEq for PduEvent { } impl PartialOrd for PduEvent { fn partial_cmp(&self, other: &Self) -> Option { - self.event_id.partial_cmp(&other.event_id) + Some(self.cmp(other)) } } impl Ord for PduEvent { diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 06a076e..ef09d06 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -90,18 +90,6 @@ impl Ord for PduCount { } } } -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn comparisons() { - assert!(PduCount::Normal(1) < PduCount::Normal(2)); - assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1)); - assert!(PduCount::Normal(1) > PduCount::Backfilled(1)); - assert!(PduCount::Backfilled(1) < PduCount::Normal(1)); - } -} pub struct Service { pub db: &'static dyn Data, @@ -1208,3 +1196,16 @@ impl Service { Ok(()) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn comparisons() { + assert!(PduCount::Normal(1) < PduCount::Normal(2)); + assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1)); + assert!(PduCount::Normal(1) > PduCount::Backfilled(1)); + assert!(PduCount::Backfilled(1) < PduCount::Normal(1)); + } +} diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b441144..f80c4f0 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -131,7 +131,7 @@ impl Service { for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) { let entry = initial_transactions .entry(outgoing_kind.clone()) - .or_insert_with(Vec::new); + .or_default(); if entry.len() > 30 { warn!( diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index dc34d53..fb983a4 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -138,12 +138,18 @@ impl Service { cached.lists.insert(list_id.clone(), list.clone()); } - cached - .subscriptions - .extend(request.room_subscriptions.clone().into_iter()); - request - .room_subscriptions - .extend(cached.subscriptions.clone().into_iter()); + cached.subscriptions.extend( + request + .room_subscriptions + .iter() + .map(|(k, v)| (k.clone(), v.clone())), + ); + request.room_subscriptions.extend( + cached + .subscriptions + .iter() + .map(|(k, v)| (k.clone(), v.clone())), + ); request.extensions.e2ee.enabled = request .extensions From a7892a28ec9395fbd5fd99aa09ed8de1206434be Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 25 Jan 2024 22:00:32 -0800 Subject: [PATCH 1519/1727] refer directly to stdenv since it's in scope --- flake.nix | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/flake.nix b/flake.nix index e10ea4e..c9ef791 100644 --- a/flake.nix +++ b/flake.nix @@ -70,7 +70,7 @@ stdenv.hostPlatform.isStatic ["-C" "relocation-model=static"] ++ lib.optionals - (stdenv.buildPlatform.config != pkgs.stdenv.hostPlatform.config) + (stdenv.buildPlatform.config != stdenv.hostPlatform.config) ["-l" "c"] ++ lib.optionals # This check has to match the one [here][0]. We only need to set @@ -79,14 +79,13 @@ # # [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L36-L39 ( - (pkgs.stdenv.hostPlatform.isAarch64 - # Nixpkgs doesn't check for x86_64 here but we do, because I - # observed a failure building statically for x86_64 without - # including it here. Linkers are weird. - || pkgs.stdenv.hostPlatform.isx86_64) - && pkgs.stdenv.hostPlatform.isStatic - && !pkgs.stdenv.isDarwin - && !pkgs.stdenv.cc.bintools.isLLVM + # Nixpkgs doesn't check for x86_64 here but we do, because I + # observed a failure building statically for x86_64 without + # including it here. Linkers are weird. + (stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64) + && stdenv.hostPlatform.isStatic + && !stdenv.isDarwin + && !stdenv.cc.bintools.isLLVM ) [ "-l" From 3e389256f5f191d6a26eecc622ec0e70661745c0 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Fri, 26 Jan 2024 00:11:14 -0800 Subject: [PATCH 1520/1727] switch lint config to `manifest-lint` feature I removed some lint configuration in the process: * `#[allow(clippy::suspicious_else_formatting)]` because nothing is currently triggering it. * `#[warn(clippy::future_not_send)]` because some stuff under `src/lib.rs` is. And also like, auto-trait leakage generally means this isn't a problem, and if things really need to be `Send`, then you'll probably know to mark it manually. * `#[warn(rust_2018_idioms)]` and replaced it with `explicit-outlives-requirements = "warn"` which is the most useful lint in that group that isn't enabled by default. --- Cargo.toml | 14 ++++++++++++++ src/lib.rs | 9 --------- src/main.rs | 10 ---------- 3 files changed, 14 insertions(+), 19 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 156de3c..5292399 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,14 @@ +# Keep alphabetically sorted +[workspace.lints.rust] +explicit_outlives_requirements = "warn" +unused_qualifications = "warn" + +# Keep alphabetically sorted +[workspace.lints.clippy] +cloned_instead_of_copied = "warn" +dbg_macro = "warn" +str_to_string = "warn" + [package] name = "conduit" description = "A Matrix homeserver written in Rust" @@ -14,6 +25,9 @@ rust-version = "1.75.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lints] +workspace = true + [dependencies] # Web framework axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } diff --git a/src/lib.rs b/src/lib.rs index dc6a9d2..66d0c57 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,12 +1,3 @@ -#![warn( - rust_2018_idioms, - unused_qualifications, - clippy::cloned_instead_of_copied, - clippy::str_to_string -)] -#![allow(clippy::suspicious_else_formatting)] -#![deny(clippy::dbg_macro)] - pub mod api; mod config; mod database; diff --git a/src/main.rs b/src/main.rs index 683e091..524f688 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,13 +1,3 @@ -#![warn( - rust_2018_idioms, - unused_qualifications, - clippy::cloned_instead_of_copied, - clippy::str_to_string, - clippy::future_not_send -)] -#![allow(clippy::suspicious_else_formatting)] -#![deny(clippy::dbg_macro)] - use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; use axum::{ From 0df5d18fd6eb08b8398453b287b17c8bc2bae621 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Fri, 26 Jan 2024 21:38:13 -0800 Subject: [PATCH 1521/1727] change docker tag back to `next` I misunderstood what the general meaning of the `latest` tag was. --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index c9ef791..a16ea58 100644 --- a/flake.nix +++ b/flake.nix @@ -166,7 +166,7 @@ mkOciImage = pkgs: package: pkgs.dockerTools.buildImage { name = package.pname; - tag = "latest"; + tag = "next"; config = { # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) # are handled as expected From dffd771e7c9f48a0a63f9bc945ce1ee3c9afaa69 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 27 Jan 2024 12:23:57 -0800 Subject: [PATCH 1522/1727] add ca certificates to the OCI image Without this, checking the authority of TLS certificates fails, making Conduit (rightly) refuse to connect to anything. --- flake.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flake.nix b/flake.nix index a16ea58..31a8413 100644 --- a/flake.nix +++ b/flake.nix @@ -167,6 +167,9 @@ pkgs.dockerTools.buildImage { name = package.pname; tag = "next"; + copyToRoot = [ + pkgs.dockerTools.caCertificates + ]; config = { # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) # are handled as expected From cf8f1f25467c02f86433a5b0ef49e639e9b1eb45 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 24 Dec 2023 00:51:26 -0800 Subject: [PATCH 1523/1727] make a bunch of changes so complement works again Well, kinda. It crashed on me after 10 minutes because the tests timed out like in . Sounds like this means it's a them problem though. I want to use Nix to build this image instead in the future but this will at least make it work for now and give me a reference for while I'm porting it. I also want to make Conduit natively understand Complement's requirements instead of `sed`ing a bunch of stuff and needing a reverse proxy in the container. Should be more reliable that way. I'm not making this run in CI until the above stuff is addressed and until I can decide on a way to pin the revision of Complement being tested against. --- bin/complement | 37 +++++++++++++++++++++++++++++++++++ complement/Dockerfile | 45 ++++++++++++++++++++----------------------- complement/README.md | 16 +++++++-------- flake.nix | 7 +++++++ 4 files changed, 72 insertions(+), 33 deletions(-) create mode 100755 bin/complement diff --git a/bin/complement b/bin/complement new file mode 100755 index 0000000..291953d --- /dev/null +++ b/bin/complement @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Path to Complement's source code +COMPLEMENT_SRC="$1" + +# A `.jsonl` file to write test logs to +LOG_FILE="$2" + +# A `.jsonl` file to write test results to +RESULTS_FILE="$3" + +OCI_IMAGE="complement-conduit:dev" + +env \ + -C "$(git rev-parse --show-toplevel)" \ + docker build \ + --tag "$OCI_IMAGE" \ + --file complement/Dockerfile \ + . + +# It's okay (likely, even) that `go test` exits nonzero +set +o pipefail +env \ + -C "$COMPLEMENT_SRC" \ + COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ + go test -json ./tests | tee "$LOG_FILE" +set -o pipefail + +# Post-process the results into an easy-to-compare format +cat "$LOG_FILE" | jq -c ' + select( + (.Action == "pass" or .Action == "fail" or .Action == "skip") + and .Test != null + ) | {Action: .Action, Test: .Test} + ' | sort > "$RESULTS_FILE" diff --git a/complement/Dockerfile b/complement/Dockerfile index 50173a1..813af10 100644 --- a/complement/Dockerfile +++ b/complement/Dockerfile @@ -1,26 +1,30 @@ -# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit -FROM registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:commit-16a08e9b as builder -#FROM rust:latest as builder +FROM rust:1.75.0 WORKDIR /workdir -ARG RUSTC_WRAPPER -ARG AWS_ACCESS_KEY_ID -ARG AWS_SECRET_ACCESS_KEY -ARG SCCACHE_BUCKET -ARG SCCACHE_ENDPOINT -ARG SCCACHE_S3_USE_SSL +RUN apt-get update && apt-get install -y --no-install-recommends \ + libclang-dev -COPY . . -RUN mkdir -p target/release -RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release - -## Actual image -FROM debian:bullseye -WORKDIR /workdir +COPY Cargo.toml Cargo.toml +COPY Cargo.lock Cargo.lock +COPY src src +RUN cargo build --release \ + && mv target/release/conduit conduit \ + && rm -rf target # Install caddy -RUN apt-get update && apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-testing.list && apt-get update && apt-get install -y caddy +RUN apt-get update \ + && apt-get install -y \ + debian-keyring \ + debian-archive-keyring \ + apt-transport-https \ + curl \ + && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \ + | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \ + && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \ + | tee /etc/apt/sources.list.d/caddy-testing.list \ + && apt-get update \ + && apt-get install -y caddy COPY conduit-example.toml conduit.toml COPY complement/caddy.json caddy.json @@ -29,16 +33,9 @@ ENV SERVER_NAME=localhost ENV CONDUIT_CONFIG=/workdir/conduit.toml RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml -RUN echo "allow_federation = true" >> conduit.toml -RUN echo "allow_check_for_updates = true" >> conduit.toml -RUN echo "allow_encryption = true" >> conduit.toml -RUN echo "allow_registration = true" >> conduit.toml RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml -COPY --from=builder /workdir/target/release/conduit /workdir/conduit -RUN chmod +x /workdir/conduit - EXPOSE 8008 8448 CMD uname -a && \ diff --git a/complement/README.md b/complement/README.md index b86aab3..185b251 100644 --- a/complement/README.md +++ b/complement/README.md @@ -1,13 +1,11 @@ -# Running Conduit on Complement +# Complement -This assumes that you're familiar with complement, if not, please readme -[their readme](https://github.com/matrix-org/complement#running). +## What's that? -Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker -image. +Have a look at [its repository](https://github.com/matrix-org/complement). -To build, `cd` to the base directory of the workspace, and run this: +## How do I use it with Conduit? -`docker build -t complement-conduit:dev -f complement/Dockerfile .` - -Then use `complement-conduit:dev` as a base image for running complement tests. +The script at [`../bin/complement`](../bin/complement) has automation for this. +It takes a few command line arguments, you can read the script to find out what +those are. diff --git a/flake.nix b/flake.nix index 31a8413..bf50e58 100644 --- a/flake.nix +++ b/flake.nix @@ -234,6 +234,13 @@ toolchain ] ++ (with pkgsHost; [ engage + + # Needed for Complement + go + olm + + # Needed for our script for Complement + jq ]); }; }); From c167f7a6ad32d27a390ea5017572f7461a4271c4 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 28 Jan 2024 13:31:03 -0800 Subject: [PATCH 1524/1727] switch crane input back to upstream Thanks to the crane maintainer to fixing my issue in a way that doesn't suck, unlike my attempt in the fork we were briefly using. --- flake.lock | 12 ++++++------ flake.nix | 5 +---- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/flake.lock b/flake.lock index 4508f7c..85b0ab5 100644 --- a/flake.lock +++ b/flake.lock @@ -51,16 +51,16 @@ ] }, "locked": { - "lastModified": 1706070683, - "narHash": "sha256-iQoXIFviH5QbSKDkWyFJKKUtTcSiCvl1tW18ax+pKfI=", - "owner": "CobaltCause", + "lastModified": 1706473964, + "narHash": "sha256-Fq6xleee/TsX6NbtoRuI96bBuDHMU57PrcK9z1QEKbk=", + "owner": "ipetkov", "repo": "crane", - "rev": "8060686afabc5b042e8982200cb2cc3fd22cf416", + "rev": "c798790eabec3e3da48190ae3698ac227aab770c", "type": "github" }, "original": { - "owner": "CobaltCause", - "ref": "crimes-for-cross", + "owner": "ipetkov", + "ref": "master", "repo": "crane", "type": "github" } diff --git a/flake.nix b/flake.nix index bf50e58..9425968 100644 --- a/flake.nix +++ b/flake.nix @@ -9,10 +9,7 @@ inputs.nixpkgs.follows = "nixpkgs"; }; crane = { - # TODO: Switch back to upstream after [this issue][0] is fixed - # - # [0]: https://github.com/ipetkov/crane/issues/497 - url = "github:CobaltCause/crane?ref=crimes-for-cross"; + url = "github:ipetkov/crane?ref=master"; inputs.nixpkgs.follows = "nixpkgs"; }; attic.url = "github:zhaofengli/attic?ref=main"; From 2d47710b551616786dfa935d6a769f34c30b4161 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 29 Jan 2024 14:50:30 -0800 Subject: [PATCH 1525/1727] update DEPLOY.md with new build links --- DEPLOY.md | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index cb318ee..dcb1777 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -12,11 +12,13 @@ only offer Linux binaries. You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url: -| CPU Architecture | Download stable version | Download development version | -| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- | -| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] | -| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] | -| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] | +**Stable versions:** + +| CPU Architecture | Download stable version | +| ------------------------------------------- | --------------------------------------------------------------- | +| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | +| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | +| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | These builds were created on and linked against the glibc version shipped with Debian bullseye. If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself. @@ -24,15 +26,19 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to [x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master [armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master [armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master -[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next -[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next -[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next [x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master [armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master [armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master -[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next -[armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next -[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next + +**Latest versions:** + +| Target | Type | Download | +|-|-|-| +| `x86_64-unknown-linux-gnu` | Dynamically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit.deb?job=debian:x86_64-unknown-linux-gnu) | +| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:x86_64-unknown-linux-musl) | +| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:aarch64-unknown-linux-musl) | +| `x86_64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image.tar.gz?job=oci-image:x86_64-unknown-linux-musl) | +| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image.tar.gz?job=oci-image:aarch64-unknown-linux-musl) | ```bash $ sudo wget -O /usr/local/bin/matrix-conduit From a43bde69fa6ae1517e5475f9bd4d228bad3b0fea Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 29 Jan 2024 15:34:16 -0800 Subject: [PATCH 1526/1727] pass pkgsCrossStatic to mkOciImage, not pkgsHost This fixes a bug where the aarch64 OCI image had metadata saying it was an x86_64 OCI image. On top of that, I think the metadata was actually right (aside from Conduit's binary): since all other packages were being pulled from `pkgsHost`, an OCI image cross compiled for aarch64 from a different architecture would result in unexecutable binaries (e.g. tini) since they were compiled for the completely wrong architecture. --- .gitlab-ci.yml | 3 +++ flake.nix | 63 +++++++++++++++++++++++++++++--------------------- 2 files changed, 40 insertions(+), 26 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f2438c8..d4e49ce 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -68,6 +68,9 @@ static:aarch64-unknown-linux-musl: paths: - conduit +# Note that although we have an `oci-image-x86_64-unknown-linux-musl` output, +# we don't build it because it would be largely redundant to this one since it's +# all containerized anyway. oci-image:x86_64-unknown-linux-gnu: stage: artifacts image: nixos/nix:2.19.2 diff --git a/flake.nix b/flake.nix index 9425968..e0bc6fb 100644 --- a/flake.nix +++ b/flake.nix @@ -183,34 +183,45 @@ { packages = { default = package pkgsHost; - oci-image = mkOciImage pkgsHost self.packages.${system}.default; + } + // + builtins.listToAttrs + (builtins.concatLists + (builtins.map + (crossSystem: + let + binaryName = "static-${crossSystem}"; + pkgsCrossStatic = + (import nixpkgs { + inherit system; + crossSystem = { + config = crossSystem; + }; + }).pkgsStatic; + in + [ + # An output for a statically-linked binary + { + name = binaryName; + value = package pkgsCrossStatic; + } - # Build an OCI image from the musl aarch64 build so we don't have to - # build for aarch64 twice (to make a gnu version specifically for the - # OCI image) - oci-image-aarch64-unknown-linux-musl = mkOciImage - pkgsHost - self.packages.${system}.static-aarch64-unknown-linux-musl; - - # Don't build a musl x86_64 OCI image because that would be pointless. - # Just use the gnu one (i.e. `self.packages."x86_64-linux".oci-image`). - } // builtins.listToAttrs ( - builtins.map - (crossSystem: { - name = "static-${crossSystem}"; - value = package (import nixpkgs { - inherit system; - crossSystem = { - config = crossSystem; - }; - }).pkgsStatic; - }) - [ - "x86_64-unknown-linux-musl" - "aarch64-unknown-linux-musl" - ] - ); + # An output for an OCI image based on that binary + { + name = "oci-image-${crossSystem}"; + value = mkOciImage + pkgsCrossStatic + self.packages.${system}.${binaryName}; + } + ] + ) + [ + "x86_64-unknown-linux-musl" + "aarch64-unknown-linux-musl" + ] + ) + ); devShells.default = pkgsHost.mkShell { env = env pkgsHost // { From 1274b48ebb09ac1f1576cbb9e3417a4ca2256273 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 29 Jan 2024 15:54:31 -0800 Subject: [PATCH 1527/1727] run `cargo update` `IndexMap::remove` was deprecated in favor of explicitly named methods. I assume that we actually needed to be using `shift_remove`, otherwise we probably wouldn't be bothering with `indexmap` here in the first place. I wonder if this fixes any bugs lol --- Cargo.lock | 1250 ++++++++++++++++++++++++------------------- src/database/mod.rs | 6 +- 2 files changed, 705 insertions(+), 551 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d5e1d16..81b270d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,15 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + [[package]] name = "adler" version = "1.0.2" @@ -10,35 +19,36 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "anstyle" -version = "1.0.1" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +checksum = "2faccea4cc4ab4a667ce676a30e8ec13922a692c99bb8f5b11f1502c72e04220" [[package]] name = "arc-swap" @@ -72,20 +82,23 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] name = "atomic" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" +checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +dependencies = [ + "bytemuck", +] [[package]] name = "autocfg" @@ -95,9 +108,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", @@ -162,6 +175,21 @@ dependencies = [ "tower-service", ] +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + [[package]] name = "base64" version = "0.13.1" @@ -170,9 +198,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.2" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -198,7 +226,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] @@ -209,19 +237,19 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.2" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "blake2b_simd" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" +checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" dependencies = [ "arrayref", "arrayvec", - "constant_time_eq 0.2.6", + "constant_time_eq", ] [[package]] @@ -235,27 +263,27 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "bytemuck" -version = "1.13.1" +version = "1.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" +checksum = "ed2490600f404f2b94c167e31d3ed1d5f3c225a0f3b80230053b3e0b7b962bd9" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "bzip2-sys" @@ -270,11 +298,12 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "jobserver", + "libc", ] [[package]] @@ -294,9 +323,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -305,43 +334,41 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.8" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9394150f5b4273a1763355bd1c2ec54cc5a2593f790587bcd6b2c947cfa9211" +checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" dependencies = [ "clap_builder", "clap_derive", - "once_cell", ] [[package]] name = "clap_builder" -version = "4.3.8" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a78fbdd3cc2914ddf37ba444114bc7765bbdcb55ec9cbe6fa054f0137400717" +checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" dependencies = [ "anstyle", - "bitflags 1.3.2", "clap_lex", ] [[package]] name = "clap_derive" -version = "4.3.2" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] name = "clap_lex" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "color_quant" @@ -356,7 +383,7 @@ dependencies = [ "async-trait", "axum", "axum-server", - "base64 0.21.2", + "base64 0.21.7", "bytes", "clap", "directories", @@ -378,7 +405,7 @@ dependencies = [ "rand", "regex", "reqwest", - "ring", + "ring 0.16.20", "rocksdb", "ruma", "rusqlite", @@ -406,9 +433,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.2" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_panic" @@ -418,21 +445,15 @@ checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" [[package]] name = "constant_time_eq" -version = "0.1.5" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "constant_time_eq" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" +checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -440,15 +461,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.8" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -464,9 +485,9 @@ dependencies = [ [[package]] name = "crc-catalog" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" @@ -479,22 +500,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crypto-common" @@ -508,9 +525,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.0" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" dependencies = [ "cfg-if", "cpufeatures", @@ -525,23 +542,23 @@ dependencies = [ [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] name = "dashmap" -version = "5.4.0" +version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.12.3", + "hashbrown 0.14.3", "lock_api", "once_cell", "parking_lot_core", @@ -549,9 +566,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "der" @@ -563,6 +580,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] + [[package]] name = "digest" version = "0.10.7" @@ -596,9 +622,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8", "signature", @@ -606,29 +632,30 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ "curve25519-dalek", "ed25519", "rand_core", "serde", "sha2", + "subtle", "zeroize", ] [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encoding_rs" -version = "0.8.32" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ "cfg-if", ] @@ -647,9 +674,9 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "fallible-iterator" @@ -665,38 +692,38 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fdeflate" -version = "0.3.0" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10" +checksum = "4f9bfee30e4dedf0ab8b422f03af778d9612b63f502710fc500a334ebe2de645" dependencies = [ "simd-adler32", ] [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" [[package]] name = "figment" -version = "0.10.10" +version = "0.10.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4547e226f4c9ab860571e070a9034192b3175580ecea38da34fcdb53a018c9a5" +checksum = "2b6e5bc7bd59d60d0d45a6ccab6cf0f4ce28698fb4e81e750ddf229c9b824026" dependencies = [ "atomic", "pear", "serde", - "toml", + "toml 0.8.8", "uncased", "version_check", ] [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", @@ -710,9 +737,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -729,9 +756,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -744,9 +771,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -754,15 +781,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -771,38 +798,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -828,9 +855,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "libc", @@ -847,6 +874,12 @@ dependencies = [ "weezl", ] +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + [[package]] name = "glob" version = "0.3.1" @@ -855,9 +888,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.19" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" +checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" dependencies = [ "bytes", "fnv", @@ -865,7 +898,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 1.9.3", + "indexmap 2.2.1", "slab", "tokio", "tokio-util", @@ -880,9 +913,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", @@ -890,21 +923,20 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.3", ] [[package]] name = "headers" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" +checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", + "base64 0.21.7", "bytes", "headers-core", "http", @@ -930,12 +962,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.2.6" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] +checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" [[package]] name = "hmac" @@ -959,9 +988,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", @@ -970,9 +999,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http", @@ -981,9 +1010,9 @@ dependencies = [ [[package]] name = "http-range-header" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" [[package]] name = "httparse" @@ -993,15 +1022,15 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -1014,7 +1043,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2", "tokio", "tower-service", "tracing", @@ -1023,9 +1052,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http", @@ -1048,9 +1077,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1058,16 +1087,15 @@ dependencies = [ [[package]] name = "image" -version = "0.24.6" +version = "0.24.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527909aa81e20ac3a44803521443a765550f09b5130c2c2fa1ea59c2f8f50a3a" +checksum = "034bbe799d1909622a74d1193aa50147769440040ff36cb2baa947609b0a4e23" dependencies = [ "bytemuck", "byteorder", "color_quant", "gif", "jpeg-decoder", - "num-rational", "num-traits", "png", ] @@ -1084,12 +1112,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "433de089bd45971eecf4668ee0ee8f4cec17db4f8bd8f7bc3197a6ce37aa7d9b" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.3", "serde", ] @@ -1111,17 +1139,17 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.3", + "socket2", "widestring", "windows-sys 0.48.0", - "winreg 0.50.0", + "winreg", ] [[package]] name = "ipnet" -version = "2.7.2" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "itertools" @@ -1134,30 +1162,30 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" dependencies = [ "libc", ] [[package]] name = "jpeg-decoder" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e" +checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" dependencies = [ "wasm-bindgen", ] @@ -1186,9 +1214,9 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.2", + "base64 0.21.7", "pem", - "ring", + "ring 0.16.20", "serde", "serde_json", "simple_asn1", @@ -1196,9 +1224,9 @@ dependencies = [ [[package]] name = "konst" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d9a8bb6c7c71d151b25936b03e012a4c00daea99e3a3797c6ead66b0a0d55e2" +checksum = "8d712a8c49d4274f8d8a5cf61368cb5f3c143d149882b1a2918129e53395fdb0" dependencies = [ "const_panic", "konst_kernel", @@ -1207,9 +1235,9 @@ dependencies = [ [[package]] name = "konst_kernel" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55d2ab266022e7309df89ed712bddc753e3a3c395c3ced1bb2e4470ec2a8146d" +checksum = "dac6ea8c376b6e208a81cf39b8e82bebf49652454d98a4829e907dac16ef1790" dependencies = [ "typewit", ] @@ -1228,18 +1256,29 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.146" +version = "0.2.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" dependencies = [ "cfg-if", - "winapi", + "windows-sys 0.48.0", +] + +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.4.2", + "libc", + "redox_syscall", ] [[package]] @@ -1271,9 +1310,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.9" +version = "1.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" +checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" dependencies = [ "cc", "pkg-config", @@ -1288,9 +1327,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ "autocfg", "scopeguard", @@ -1298,9 +1337,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "lru-cache" @@ -1339,7 +1378,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -1350,15 +1389,15 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.5.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memoffset" @@ -1393,9 +1432,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi", @@ -1404,16 +1443,15 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", "memoffset", "pin-utils", - "static_assertions", ] [[package]] @@ -1438,9 +1476,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ "autocfg", "num-integer", @@ -1457,41 +1495,39 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-rational" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", ] [[package]] -name = "once_cell" -version = "1.18.0" +name = "object" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "openssl-probe" @@ -1600,28 +1636,28 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", + "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] name = "paste" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pear" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ec95680a7087503575284e5063e14b694b7a9c0b065e5dceec661e0497127e8" +checksum = "4ccca0f6c17acc81df8e242ed473ec144cbf5c98037e69aa6d144780aad103c8" dependencies = [ "inlinable_string", "pear_codegen", @@ -1630,14 +1666,14 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9661a3a53f93f09f2ea882018e4d7c88f6ff2956d809a276060476fd8c879d3c" +checksum = "2e22670e8eb757cff11d6c199ca7b987f352f0346e0be4dd23869ec72cb53c77" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] @@ -1657,15 +1693,15 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "persy" -version = "1.4.4" +version = "1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3712821f12453814409ec149071bd4832a8ec458e648579c104aee30ed70b300" +checksum = "3cd38c602b23c2f451842d89f27cd5e0d4b292176daf40feeda859c658dcdc76" dependencies = [ "crc", "data-encoding", @@ -1679,29 +1715,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.0" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -1721,21 +1757,21 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" [[package]] name = "platforms" -version = "3.1.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "png" -version = "0.17.9" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59871cc5b6cce7eaccca5a802b4173377a1c2ba90654246789a8fa2334426d11" +checksum = "1f6c3c3e617595665b8ea2ff95a86066be38fb121ff920a9c0eb282abcd1da5a" dependencies = [ "bitflags 1.3.2", "crc32fast", @@ -1744,6 +1780,12 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -1752,12 +1794,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.9" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9825a04601d60621feed79c4e6b56d65db77cdca55cef43b46b0de1096d1c282" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] @@ -1767,27 +1809,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit", + "toml_edit 0.19.15", ] [[package]] name = "proc-macro2" -version = "1.0.61" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "363a6f739a0c0addeaf6ed75150b95743aa18643a3c6f40409ed7b6db3a6911f" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] [[package]] name = "proc-macro2-diagnostics" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "606c4ba35817e2922a308af55ad51bab3645b59eae5c570d4a6cf07e36bd493b" +checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", "version_check", "yansi", ] @@ -1800,9 +1842,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.28" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -1839,42 +1881,34 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.16" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom", - "redox_syscall 0.2.16", + "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.8.4" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.2", + "regex-automata 0.4.5", + "regex-syntax 0.8.2", ] [[package]] @@ -1886,6 +1920,17 @@ dependencies = [ "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.2", +] + [[package]] name = "regex-syntax" version = "0.6.29" @@ -1894,17 +1939,17 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.2" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.18" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ - "base64 0.21.2", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", @@ -1927,6 +1972,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-rustls", "tokio-socks", @@ -1935,7 +1981,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.10.1", + "winreg", ] [[package]] @@ -1957,12 +2003,26 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", - "untrusted", + "spin 0.5.2", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +dependencies = [ + "cc", + "getrandom", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -2028,11 +2088,11 @@ version = "0.11.3" source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "as_variant", - "base64 0.21.2", + "base64 0.21.7", "bytes", "form_urlencoded", "http", - "indexmap 2.0.0", + "indexmap 2.2.1", "js_int", "konst", "percent-encoding", @@ -2056,7 +2116,7 @@ version = "0.26.0" source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "as_variant", - "indexmap 2.0.0", + "indexmap 2.2.1", "js_int", "js_option", "percent-encoding", @@ -2114,8 +2174,8 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.21", - "toml", + "syn 2.0.48", + "toml 0.7.8", ] [[package]] @@ -2135,7 +2195,7 @@ name = "ruma-signatures" version = "0.13.1" source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ - "base64 0.21.2", + "base64 0.21.7", "ed25519-dalek", "pkcs8", "rand", @@ -2167,7 +2227,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags 2.3.2", + "bitflags 2.4.2", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2177,16 +2237,22 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b50162d19404029c1ceca6f6980fe40d45c8b369f6f44446fa14bb39573b5bb9" +checksum = "a5885493fdf0be6cdff808d1533ce878d21cfa49c7086fa00c66355cd9141bfc" dependencies = [ - "base64 0.13.1", + "base64 0.21.7", "blake2b_simd", - "constant_time_eq 0.1.5", + "constant_time_eq", "crossbeam-utils", ] +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -2204,12 +2270,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.2" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring", + "ring 0.17.7", "rustls-webpki", "sct", ] @@ -2228,58 +2294,58 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.2", + "base64 0.21.7", ] [[package]] name = "rustls-webpki" -version = "0.100.1" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.7", + "untrusted 0.9.0", ] [[package]] name = "rustversion" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.42.0", + "windows-sys 0.52.0", ] [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring", - "untrusted", + "ring 0.17.7", + "untrusted 0.9.0", ] [[package]] @@ -2290,9 +2356,9 @@ checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32" [[package]] name = "security-framework" -version = "2.9.1" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -2303,9 +2369,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -2313,38 +2379,38 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" [[package]] name = "serde" -version = "1.0.164" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.164" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] name = "serde_html_form" -version = "0.2.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53192e38d5c88564b924dbe9b60865ecbb71b81d38c4e61c817cffd3e36ef696" +checksum = "20e1066e1cfa6692a722cf40386a2caec36da5ddc4a2c16df592f0f609677e8c" dependencies = [ "form_urlencoded", - "indexmap 1.9.3", + "indexmap 2.2.1", "itoa", "ryu", "serde", @@ -2352,9 +2418,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.99" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", "ryu", @@ -2363,18 +2429,19 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.11" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7f05c1d5476066defcdfacce1f52fc3cae3af1d3089727100c02ae92e5abbe0" +checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" dependencies = [ + "itoa", "serde", ] [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -2393,11 +2460,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.22" +version = "0.9.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452e67b9c20c37fa79df53201dc03839651086ed9bbe92b3ca585ca9fdaa7d85" +checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.2.1", "itoa", "ryu", "serde", @@ -2417,9 +2484,9 @@ dependencies = [ [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -2428,9 +2495,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -2439,18 +2506,18 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] [[package]] name = "shlex" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" @@ -2463,15 +2530,18 @@ dependencies = [ [[package]] name = "signature" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core", +] [[package]] name = "simd-adler32" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "238abfbb77c1915110ad968465608b68e869e0772622c9656714e73e5a1a522f" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] name = "simple_asn1" @@ -2487,34 +2557,24 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" -version = "1.10.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "socket2" -version = "0.4.9" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", @@ -2527,21 +2587,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "spki" -version = "0.7.2" +name = "spin" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", ] -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "subslice" version = "0.2.3" @@ -2570,9 +2630,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.21" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1182caafaab7018eaea9b404afa8184c0baf42a04d5e10ae4f4843c2029c8aab" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -2586,23 +2646,44 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] -name = "thiserror" -version = "1.0.40" +name = "system-configuration" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "thiserror" +version = "1.0.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] @@ -2639,9 +2720,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e37706572f4b151dff7a0146e040804e9c26fe3a3118591112f05cf12a4216c1" +checksum = "619bfed27d807b54f7f776b9430d4f8060e66ee138a28632ca898584d462c31c" dependencies = [ "libc", "paste", @@ -2650,9 +2731,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.5.3+5.3.0-patched" +version = "0.5.4+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a678df20055b43e57ef8cddde41cdfda9a3c1a060b67f4c5836dfb1d78543ba8" +checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" dependencies = [ "cc", "libc", @@ -2660,9 +2741,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20612db8a13a6c06d57ec83953694185a367e16945f66565e8028d2c0bd76979" +checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -2670,11 +2751,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.22" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ + "deranged", "itoa", + "powerfmt", "serde", "time-core", "time-macros", @@ -2682,15 +2765,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" dependencies = [ "time-core", ] @@ -2712,31 +2795,31 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.2" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ - "autocfg", + "backtrace", "bytes", "libc", "mio", "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.4.9", + "socket2", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] @@ -2774,9 +2857,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -2788,32 +2871,57 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.5" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.19.15", +] + +[[package]] +name = "toml" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.21.0", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.11" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.2.1", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +dependencies = [ + "indexmap 2.2.1", "serde", "serde_spanned", "toml_datetime", @@ -2837,11 +2945,11 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.1" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8bd22a874a2d0b70452d5597b12c537331d49060824a95f49f108994f94aa4c" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ - "bitflags 2.3.2", + "bitflags 2.4.2", "bytes", "futures-core", "futures-util", @@ -2869,11 +2977,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -2882,20 +2989,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", ] [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -2914,12 +3021,23 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" dependencies = [ - "lazy_static", "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", "tracing-core", ] @@ -2933,15 +3051,15 @@ dependencies = [ "opentelemetry", "tracing", "tracing-core", - "tracing-log", + "tracing-log 0.1.4", "tracing-subscriber", ] [[package]] name = "tracing-subscriber" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", @@ -2952,7 +3070,7 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-log", + "tracing-log 0.2.0", ] [[package]] @@ -3002,42 +3120,51 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "typewit" -version = "1.4.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4061a10d4d8f3081a8ccc025182afd8434302d8d4b4503ec6d8510d09df08c2d" +checksum = "c6fb9ae6a3cafaf0a5d14c2302ca525f9ae8e07a0f0e6949de88d882c37a6e24" +dependencies = [ + "typewit_proc_macros", +] + +[[package]] +name = "typewit_proc_macros" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36a83ea2b3c704935a01b4642946aadd445cea40b10935e3f8bd8052b8193d6" [[package]] name = "uncased" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b9bc53168a4be7402ab86c3aad243a84dd7381d09be0eddc81280c1da95ca68" +checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" @@ -3050,15 +3177,15 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" [[package]] name = "unsigned-varint" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" [[package]] name = "untrusted" @@ -3067,21 +3194,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] -name = "url" -version = "2.4.0" +name = "untrusted" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", ] [[package]] name = "uuid" -version = "1.3.4" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" +checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" dependencies = [ "getrandom", ] @@ -3121,9 +3254,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3131,24 +3264,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "bde2032aeb86bdfaecc8b261eef3cba735cc426c1f3a3416d1e0791be95fc461" dependencies = [ "cfg-if", "js-sys", @@ -3158,9 +3291,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3168,28 +3301,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" dependencies = [ "js-sys", "wasm-bindgen", @@ -3197,9 +3330,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb" +checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082" [[package]] name = "widestring" @@ -3209,9 +3342,9 @@ checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "wildmatch" -version = "2.1.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee583bdc5ff1cf9db20e9db5bb3ff4c3089a8f6b8b31aff265c9aba85812db86" +checksum = "495ec47bf3c1345005f40724f0269362c8556cbc43aed0526ed44cae1d35fceb" [[package]] name = "winapi" @@ -3235,147 +3368,147 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", ] [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.4.7" +version = "0.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" +checksum = "1931d78a9c73861da0134f453bb1f790ce49b2e30eba8410b4b79bac72b46a2d" dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] - [[package]] name = "winreg" version = "0.50.0" @@ -3388,15 +3521,35 @@ dependencies = [ [[package]] name = "yansi" -version = "0.5.1" +version = "1.0.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +checksum = "1367295b8f788d371ce2dbc842c7b709c73ee1364d30351dd300ec2203b12377" + +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" [[package]] name = "zigzag" @@ -3409,11 +3562,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" +version = "2.0.9+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" dependencies = [ "cc", - "libc", "pkg-config", ] diff --git a/src/database/mod.rs b/src/database/mod.rs index e247d9f..425ef4e 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -852,7 +852,9 @@ impl KeyValueDatabase { if rule.is_some() { let mut rule = rule.unwrap().clone(); rule.rule_id = content_rule_transformation[1].to_owned(); - rules_list.content.remove(content_rule_transformation[0]); + rules_list + .content + .shift_remove(content_rule_transformation[0]); rules_list.content.insert(rule); } } @@ -875,7 +877,7 @@ impl KeyValueDatabase { if let Some(rule) = rule { let mut rule = rule.clone(); rule.rule_id = transformation[1].to_owned(); - rules_list.underride.remove(transformation[0]); + rules_list.underride.shift_remove(transformation[0]); rules_list.underride.insert(rule); } } From 7fb9e99649786afbed97d31d2a26e68374e2b15d Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 29 Jan 2024 16:08:29 -0800 Subject: [PATCH 1528/1727] update ring and jsonwebtoken to remove ring ^0.16 --- Cargo.lock | 81 ++++++++++++++++++------------------------------------ Cargo.toml | 4 +-- 2 files changed, 28 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 81b270d..94a0c5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -190,12 +190,6 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - [[package]] name = "base64" version = "0.21.7" @@ -383,7 +377,7 @@ dependencies = [ "async-trait", "axum", "axum-server", - "base64 0.21.7", + "base64", "bytes", "clap", "directories", @@ -405,7 +399,7 @@ dependencies = [ "rand", "regex", "reqwest", - "ring 0.16.20", + "ring", "rocksdb", "ruma", "rusqlite", @@ -860,8 +854,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] @@ -936,7 +932,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.21.7", + "base64", "bytes", "headers-core", "http", @@ -1210,13 +1206,14 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.3.0" +version = "9.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4" dependencies = [ - "base64 0.21.7", + "base64", + "js-sys", "pem", - "ring 0.16.20", + "ring", "serde", "serde_json", "simple_asn1", @@ -1684,11 +1681,12 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.1.1" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" dependencies = [ - "base64 0.13.1", + "base64", + "serde", ] [[package]] @@ -1949,7 +1947,7 @@ version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ - "base64 0.21.7", + "base64", "bytes", "encoding_rs", "futures-core", @@ -1994,21 +1992,6 @@ dependencies = [ "quick-error", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - [[package]] name = "ring" version = "0.17.7" @@ -2018,8 +2001,8 @@ dependencies = [ "cc", "getrandom", "libc", - "spin 0.9.8", - "untrusted 0.9.0", + "spin", + "untrusted", "windows-sys 0.48.0", ] @@ -2088,7 +2071,7 @@ version = "0.11.3" source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ "as_variant", - "base64 0.21.7", + "base64", "bytes", "form_urlencoded", "http", @@ -2195,7 +2178,7 @@ name = "ruma-signatures" version = "0.13.1" source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" dependencies = [ - "base64 0.21.7", + "base64", "ed25519-dalek", "pkcs8", "rand", @@ -2241,7 +2224,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5885493fdf0be6cdff808d1533ce878d21cfa49c7086fa00c66355cd9141bfc" dependencies = [ - "base64 0.21.7", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -2275,7 +2258,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring 0.17.7", + "ring", "rustls-webpki", "sct", ] @@ -2298,7 +2281,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.7", + "base64", ] [[package]] @@ -2307,8 +2290,8 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.7", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -2344,8 +2327,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.7", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -2580,12 +2563,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -3187,12 +3164,6 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index 5292399..c00d56a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -73,13 +73,13 @@ image = { version = "0.24.6", default-features = false, features = ["jpeg", "png # Used to encode server public key base64 = "0.21.2" # Used when hashing the state -ring = "0.16.20" +ring = "0.17.7" # Used when querying the SRV record of other servers trust-dns-resolver = "0.22.0" # Used to find matching events for appservices regex = "1.8.1" # jwt jsonwebtokens -jsonwebtoken = "8.3.0" +jsonwebtoken = "9.2.0" # Performance measurements tracing = { version = "0.1.37", features = [] } tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } From d6c57f9b2e55c16218728aaf52b6ceae7908a3aa Mon Sep 17 00:00:00 2001 From: Samuel Meenzen Date: Tue, 30 Jan 2024 17:04:47 +0000 Subject: [PATCH 1529/1727] Publish oci image to the gitlab registry --- .gitlab-ci.yml | 50 ++++++++++++++++++++++++++++++++++++++++++++++---- DEPLOY.md | 4 ++-- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d4e49ce..639319c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,7 @@ stages: - ci - artifacts + - publish variables: # Makes some things print in color @@ -84,10 +85,10 @@ oci-image:x86_64-unknown-linux-gnu: - ./bin/nix-build-and-cache .#oci-image # Make the output less difficult to find - - cp result oci-image.tar.gz + - cp result oci-image-amd64.tar.gz artifacts: paths: - - oci-image.tar.gz + - oci-image-amd64.tar.gz oci-image:aarch64-unknown-linux-musl: stage: artifacts @@ -101,10 +102,10 @@ oci-image:aarch64-unknown-linux-musl: - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl # Make the output less difficult to find - - cp result oci-image.tar.gz + - cp result oci-image-arm64v8.tar.gz artifacts: paths: - - oci-image.tar.gz + - oci-image-arm64v8.tar.gz debian:x86_64-unknown-linux-gnu: stage: artifacts @@ -125,3 +126,44 @@ debian:x86_64-unknown-linux-gnu: paths: - target - .gitlab-ci.d + +docker-publish: + stage: publish + image: docker:25.0.0 + services: + - docker:25.0.0-dind + variables: + IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit + IMAGE_SUFFIX_AMD64: amd64 + IMAGE_SUFFIX_ARM64V8: arm64v8 + before_script: + - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY + script: + - docker load -i oci-image-amd64.tar.gz + - IMAGE_ID_AMD64=$(docker images -q conduit:next) + - docker load -i oci-image-arm64v8.tar.gz + - IMAGE_ID_ARM64V8=$(docker images -q conduit:next) + # Tag and push the architecture specific images + - docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 + - docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + - docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 + - docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + # Tag the multi-arch image + - docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + - docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA + # Tag and push the git ref + - docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + - docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME + # Tag git tags as 'latest' + - | + if [[ -n "$CI_COMMIT_TAG" ]]; then + docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + docker manifest push $IMAGE_NAME:latest + fi + dependencies: + - oci-image:x86_64-unknown-linux-gnu + - oci-image:aarch64-unknown-linux-musl + only: + - next + - master + - tags diff --git a/DEPLOY.md b/DEPLOY.md index dcb1777..c74fb3f 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -37,8 +37,8 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to | `x86_64-unknown-linux-gnu` | Dynamically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit.deb?job=debian:x86_64-unknown-linux-gnu) | | `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:x86_64-unknown-linux-musl) | | `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:aarch64-unknown-linux-musl) | -| `x86_64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image.tar.gz?job=oci-image:x86_64-unknown-linux-musl) | -| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image.tar.gz?job=oci-image:aarch64-unknown-linux-musl) | +| `x86_64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=oci-image:x86_64-unknown-linux-musl) | +| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=oci-image:aarch64-unknown-linux-musl) | ```bash $ sudo wget -O /usr/local/bin/matrix-conduit From 3a63f9dfb66f60fd9cc755de1aaea7b05687af26 Mon Sep 17 00:00:00 2001 From: Raito Bezarius Date: Fri, 2 Feb 2024 03:50:49 +0100 Subject: [PATCH 1530/1727] feat: support non-flake users This uses flakes-compat to read the `flake.nix` and expose it to non-flake users. Signed-off-by: Raito Bezarius --- default.nix | 10 ++++++++++ flake.lock | 17 +++++++++++++++++ flake.nix | 4 ++++ 3 files changed, 31 insertions(+) create mode 100644 default.nix diff --git a/default.nix b/default.nix new file mode 100644 index 0000000..f620865 --- /dev/null +++ b/default.nix @@ -0,0 +1,10 @@ +(import + ( + let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in + fetchTarball { + url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; + sha256 = lock.nodes.flake-compat.locked.narHash; + } + ) + { src = ./.; } +).defaultNix diff --git a/flake.lock b/flake.lock index 85b0ab5..1c97e5c 100644 --- a/flake.lock +++ b/flake.lock @@ -102,6 +102,22 @@ "type": "github" } }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-utils": { "locked": { "lastModified": 1667395993, @@ -203,6 +219,7 @@ "attic": "attic", "crane": "crane_2", "fenix": "fenix", + "flake-compat": "flake-compat_2", "flake-utils": "flake-utils_2", "nix-filter": "nix-filter", "nixpkgs": "nixpkgs_2" diff --git a/flake.nix b/flake.nix index e0bc6fb..6ca4c72 100644 --- a/flake.nix +++ b/flake.nix @@ -3,6 +3,10 @@ nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; flake-utils.url = "github:numtide/flake-utils"; nix-filter.url = "github:numtide/nix-filter"; + flake-compat = { + url = "github:edolstra/flake-compat"; + flake = false; + }; fenix = { url = "github:nix-community/fenix"; From a2ac491c5460da736c8872747aa3417a65c81a4e Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 25 Nov 2023 12:42:05 -0500 Subject: [PATCH 1531/1727] bump ruma, add wrong room keys error code, tiny logging change can't update ruma to very latest commit because of the weird JsOption thing for syncv4 that i can't wrap my head around how to use, not important anyways Signed-off-by: strawberry --- Cargo.toml | 5 ++++- src/api/client_server/membership.rs | 6 +++--- src/api/client_server/room.rs | 2 +- src/service/admin/mod.rs | 2 +- src/service/rooms/spaces/mod.rs | 6 +++--- src/service/rooms/state_accessor/mod.rs | 10 ++++++++-- src/utils/error.rs | 8 +++++--- 7 files changed, 25 insertions(+), 14 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c00d56a..0fa6914 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "b4853aa8fa5e3a24e3689fc88044de9915f6ab67", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "5446ea979b314b90da1734f20efaff443d64f73d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } @@ -116,6 +116,9 @@ async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } +# stupid ruma using JsOption instead of Option +js_option = "0.1" + [target.'cfg(unix)'.dependencies] nix = { version = "0.26.2", features = ["resource"] } diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index ed59691..09f70a0 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -64,7 +64,7 @@ pub async fn join_room_by_id_route( .map(|user| user.server_name().to_owned()), ); - servers.push(body.room_id.server_name().to_owned()); + servers.push(body.room_id.server_name().unwrap().into()); join_room_by_id_helper( body.sender_user.as_deref(), @@ -105,7 +105,7 @@ pub async fn join_room_by_id_or_alias_route( .map(|user| user.server_name().to_owned()), ); - servers.push(room_id.server_name().to_owned()); + servers.push(room_id.server_name().unwrap().into()); (servers, room_id) } @@ -1366,7 +1366,7 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option) -> Result<()> { // Ask a remote server if we don't have this room if !services().rooms.metadata.exists(room_id)? - && room_id.server_name() != services().globals.server_name() + && room_id.server_name() != Some(services().globals.server_name()) { if let Err(e) = remote_leave_room(user_id, room_id).await { warn!("Failed to leave room {} remotely: {}", user_id, e); diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 0e2d932..20409a2 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -366,7 +366,7 @@ pub async fn create_room_route( services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) + content: to_raw_value(&RoomNameEventContent::new(name.clone())) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 64958fc..bd68255 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1051,7 +1051,7 @@ impl Service { services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) + content: to_raw_value(&RoomNameEventContent::new(room_name)) .expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 615e9ca..0e84221 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -134,7 +134,7 @@ impl Service { if serde_json::from_str::(pdu.content.get()) .ok() - .and_then(|c| c.via) + .and_then(|c| Some(c.via)) .map_or(true, |v| v.is_empty()) { continue; @@ -185,7 +185,7 @@ impl Service { stack.push(children_ids); } } else { - let server = current_room.server_name(); + let server = current_room.server_name().unwrap(); if server == services().globals.server_name() { continue; } @@ -193,7 +193,7 @@ impl Service { // Early return so the client can see some data already break; } - warn!("Asking {server} for /hierarchy"); + debug!("Asking {server} for /hierarchy"); if let Ok(response) = services() .sending .send_federation_request( diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index b00dc58..14c3bae 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -279,8 +279,14 @@ impl Service { .room_state_get(room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) - .map(|c: RoomNameEventContent| c.name) - .map_err(|_| Error::bad_database("Invalid room name event in database.")) + .map(|c: RoomNameEventContent| Some(c.name)) + .map_err(|e| { + error!( + "Invalid room name event in database for room {}. {}", + room_id, e + ); + Error::bad_database("Invalid room name event in database.") + }) }) } diff --git a/src/utils/error.rs b/src/utils/error.rs index 6e88cf5..765a31b 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -116,9 +116,11 @@ impl Error { Self::BadRequest(kind, _) => ( kind.clone(), match kind { - Forbidden | GuestAccessForbidden | ThreepidAuthFailed | ThreepidDenied => { - StatusCode::FORBIDDEN - } + WrongRoomKeysVersion { .. } + | Forbidden + | GuestAccessForbidden + | ThreepidAuthFailed + | ThreepidDenied => StatusCode::FORBIDDEN, Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED, NotFound | Unrecognized => StatusCode::NOT_FOUND, LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS, From 6dcc8b6cf1dd8554c54e435132ee90a9a7353ca8 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 7 Jan 2024 21:21:13 -0500 Subject: [PATCH 1532/1727] bump ruma to latest commit (syncv3 JsOption and push optional power levels) Signed-off-by: strawberry --- Cargo.toml | 2 +- src/api/client_server/sync.rs | 18 ++++++++++++++---- src/service/pusher/mod.rs | 12 ++++++++---- src/service/rooms/state_accessor/mod.rs | 5 +++-- 4 files changed, 26 insertions(+), 11 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0fa6914..03f0cd6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "5446ea979b314b90da1734f20efaff443d64f73d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "9a5142052c808275f47613d4b66cb6c9fc286079", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 5757228..5e67529 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1614,12 +1614,22 @@ pub async fn sync_events_v4_route( rooms.insert( room_id.clone(), sync_events::v4::SlidingSyncRoom { - name: services().rooms.state_accessor.get_name(room_id)?.or(name), - avatar: services() + name: services() .rooms .state_accessor - .get_avatar(room_id)? - .map_or(avatar, |a| a.url), + .get_name(room_id)? + .or_else(|| name), + avatar: if let Some(avatar) = avatar { + ruma::JsOption::Some(avatar) + } else { + match services().rooms.state_accessor.get_avatar(room_id)? { + ruma::JsOption::Some(avatar) => { + js_option::JsOption::Some(avatar.url.unwrap()) + } + ruma::JsOption::Null => ruma::JsOption::Null, + ruma::JsOption::Undefined => ruma::JsOption::Undefined, + } + }, initial: Some(roomsince == &0), is_dm: None, invite_state: None, diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 315c5ef..418b7a8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,6 +1,6 @@ mod data; pub use data::Data; -use ruma::events::AnySyncTimelineEvent; +use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx}; use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; @@ -193,6 +193,12 @@ impl Service { pdu: &Raw, room_id: &RoomId, ) -> Result<&'a [Action]> { + let power_levels = PushConditionPowerLevelsCtx { + users: power_levels.users.clone(), + users_default: power_levels.users_default, + notifications: power_levels.notifications.clone(), + }; + let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), member_count: 10_u32.into(), // TODO: get member count efficiently @@ -201,9 +207,7 @@ impl Service { .users .displayname(user)? .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), + power_levels: Some(power_levels), }; Ok(ruleset.get_actions(pdu, &ctx)) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 14c3bae..f49d8a3 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -5,6 +5,7 @@ use std::{ }; pub use data::Data; +use js_option::JsOption; use lru_cache::LruCache; use ruma::{ events::{ @@ -290,12 +291,12 @@ impl Service { }) } - pub fn get_avatar(&self, room_id: &RoomId) -> Result> { + pub fn get_avatar(&self, room_id: &RoomId) -> Result> { services() .rooms .state_accessor .room_state_get(room_id, &StateEventType::RoomAvatar, "")? - .map_or(Ok(None), |s| { + .map_or(Ok(JsOption::Undefined), |s| { serde_json::from_str(s.content.get()) .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) }) From e7070843456bf259e395b98c60f9b185eb75e86b Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 16 Feb 2024 20:52:07 +0000 Subject: [PATCH 1533/1727] chore: bump ruma to latest commit (as of 2024-02-16) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 03f0cd6..0cf062f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "9a5142052c808275f47613d4b66cb6c9fc286079", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f1a8497ecb0cc24b0159f301336a3bf3896d476e", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } From d841b81c56ab24210e966201258c6f7b9438340a Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 16 Feb 2024 20:52:19 +0000 Subject: [PATCH 1534/1727] chore: update Cargo.lock --- Cargo.lock | 79 +++++++++++++++++++++++------------------------------- 1 file changed, 33 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 94a0c5e..fc17fda 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -387,6 +387,7 @@ dependencies = [ "http", "hyper", "image", + "js_option", "jsonwebtoken", "lazy_static", "lru-cache", @@ -708,7 +709,7 @@ dependencies = [ "atomic", "pear", "serde", - "toml 0.8.8", + "toml", "uncased", "version_check", ] @@ -1802,12 +1803,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" dependencies = [ - "once_cell", - "toml_edit 0.19.15", + "toml_edit 0.20.7", ] [[package]] @@ -2018,8 +2018,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.8.2" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.9.4" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "assign", "js_int", @@ -2037,8 +2037,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.8.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.9.0" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "js_int", "ruma-common", @@ -2049,9 +2049,10 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.17.4" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ + "as_variant", "assign", "bytes", "http", @@ -2067,8 +2068,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.12.1" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "as_variant", "base64", @@ -2095,8 +2096,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.26.0" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.27.11" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "as_variant", "indexmap 2.2.1", @@ -2117,8 +2118,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "js_int", "ruma-common", @@ -2129,8 +2130,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.9.3" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "js_int", "thiserror", @@ -2138,8 +2139,8 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" -version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "js_int", "ruma-common", @@ -2148,8 +2149,8 @@ dependencies = [ [[package]] name = "ruma-macros" -version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.12.0" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "once_cell", "proc-macro-crate", @@ -2158,13 +2159,13 @@ dependencies = [ "ruma-identifiers-validation", "serde", "syn 2.0.48", - "toml 0.7.8", + "toml", ] [[package]] name = "ruma-push-gateway-api" -version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "js_int", "ruma-common", @@ -2175,8 +2176,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.13.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.14.0" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "base64", "ed25519-dalek", @@ -2191,8 +2192,8 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.10.0" +source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" dependencies = [ "itertools", "js_int", @@ -2846,18 +2847,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "toml" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.19.15", -] - [[package]] name = "toml" version = "0.8.8" @@ -2881,13 +2870,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.15" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ "indexmap 2.2.1", - "serde", - "serde_spanned", "toml_datetime", "winnow", ] From 4c06f329c4d2f5128d6293de9bb0342559422746 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 16 Feb 2024 21:13:59 +0000 Subject: [PATCH 1535/1727] refactor: appease clippy --- src/api/client_server/sync.rs | 2 +- src/service/rooms/event_handler/mod.rs | 13 ++++++++++--- src/service/rooms/spaces/mod.rs | 3 +-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 5e67529..490a947 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1618,7 +1618,7 @@ pub async fn sync_events_v4_route( .rooms .state_accessor .get_name(room_id)? - .or_else(|| name), + .or(name), avatar: if let Some(avatar) = avatar { ruma::JsOption::Some(avatar) } else { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e7db6f7..375d1ff 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -966,14 +966,21 @@ impl Service { debug!("Resolving state"); - let lock = services().globals.stateres_mutex.lock(); - let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let fetch_event = |id: &_| { let res = services().rooms.timeline.get_pdu(id); if let Err(e) = &res { error!("LOOK AT ME Failed to fetch event: {}", e); } res.ok().flatten() - }) { + }; + + let lock = services().globals.stateres_mutex.lock(); + let state = match state_res::resolve( + room_version_id, + &fork_states, + auth_chain_sets, + fetch_event, + ) { Ok(new_state) => new_state, Err(_) => { return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 0e84221..9052366 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -133,8 +133,7 @@ impl Service { .ok_or_else(|| Error::bad_database("Event in space state not found"))?; if serde_json::from_str::(pdu.content.get()) - .ok() - .and_then(|c| Some(c.via)) + .ok().map(|c| c.via) .map_or(true, |v| v.is_empty()) { continue; From 976a73a0e5e9aa7fc880a39f6478ec13943e2d3c Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 16 Feb 2024 21:19:40 +0000 Subject: [PATCH 1536/1727] style: appease rustfmt --- src/api/client_server/sync.rs | 6 +----- src/service/rooms/spaces/mod.rs | 3 ++- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 490a947..86b3b24 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1614,11 +1614,7 @@ pub async fn sync_events_v4_route( rooms.insert( room_id.clone(), sync_events::v4::SlidingSyncRoom { - name: services() - .rooms - .state_accessor - .get_name(room_id)? - .or(name), + name: services().rooms.state_accessor.get_name(room_id)?.or(name), avatar: if let Some(avatar) = avatar { ruma::JsOption::Some(avatar) } else { diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 9052366..07d576a 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -133,7 +133,8 @@ impl Service { .ok_or_else(|| Error::bad_database("Event in space state not found"))?; if serde_json::from_str::(pdu.content.get()) - .ok().map(|c| c.via) + .ok() + .map(|c| c.via) .map_or(true, |v| v.is_empty()) { continue; From 1c6a4b1b24c92e8fc4132c453fffab66d922dc71 Mon Sep 17 00:00:00 2001 From: Samuel Meenzen Date: Sun, 18 Feb 2024 01:36:50 +0000 Subject: [PATCH 1537/1727] feat(ci): push oci-image to docker hub --- .gitlab-ci.yml | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 639319c..4f4aa69 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -127,17 +127,14 @@ debian:x86_64-unknown-linux-gnu: - target - .gitlab-ci.d -docker-publish: +.push-oci-image: stage: publish image: docker:25.0.0 services: - docker:25.0.0-dind variables: - IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit IMAGE_SUFFIX_AMD64: amd64 IMAGE_SUFFIX_ARM64V8: arm64v8 - before_script: - - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY script: - docker load -i oci-image-amd64.tar.gz - IMAGE_ID_AMD64=$(docker images -q conduit:next) @@ -167,3 +164,17 @@ docker-publish: - next - master - tags + +oci-image:push-gitlab: + extends: .push-oci-image + variables: + IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit + before_script: + - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY + +oci-image:push-dockerhub: + extends: .push-oci-image + variables: + IMAGE_NAME: matrixconduit/matrix-conduit + before_script: + - docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD \ No newline at end of file From ace9637bc299df4c59efbeb3e1c432b239b7a165 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 23 Feb 2024 19:39:30 +0000 Subject: [PATCH 1538/1727] replace unwraps with expects --- src/api/client_server/membership.rs | 14 ++++++++++++-- src/api/client_server/sync.rs | 2 +- src/service/rooms/spaces/mod.rs | 4 +++- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 09f70a0..5c78a1c 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -64,7 +64,12 @@ pub async fn join_room_by_id_route( .map(|user| user.server_name().to_owned()), ); - servers.push(body.room_id.server_name().unwrap().into()); + servers.push( + body.room_id + .server_name() + .expect("Room IDs should always have a server name") + .into(), + ); join_room_by_id_helper( body.sender_user.as_deref(), @@ -105,7 +110,12 @@ pub async fn join_room_by_id_or_alias_route( .map(|user| user.server_name().to_owned()), ); - servers.push(room_id.server_name().unwrap().into()); + servers.push( + room_id + .server_name() + .expect("Room IDs should always have a server name") + .into(), + ); (servers, room_id) } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 86b3b24..600a973 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1620,7 +1620,7 @@ pub async fn sync_events_v4_route( } else { match services().rooms.state_accessor.get_avatar(room_id)? { ruma::JsOption::Some(avatar) => { - js_option::JsOption::Some(avatar.url.unwrap()) + js_option::JsOption::from_option(avatar.url) } ruma::JsOption::Null => ruma::JsOption::Null, ruma::JsOption::Undefined => ruma::JsOption::Undefined, diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 07d576a..b0a9ed2 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -185,7 +185,9 @@ impl Service { stack.push(children_ids); } } else { - let server = current_room.server_name().unwrap(); + let server = current_room + .server_name() + .expect("Room IDs should always have a server name"); if server == services().globals.server_name() { continue; } From 8aa915acb90552398530cc20efa1c71f788da9f6 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 23 Feb 2024 20:29:17 +0000 Subject: [PATCH 1539/1727] bump ruma, support deprecated user login field --- Cargo.lock | 24 ++++++++--------- Cargo.toml | 2 +- src/api/client_server/session.rs | 45 ++++++++++++++++++++------------ 3 files changed, 42 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc17fda..e20c3f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2019,7 +2019,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.9.4" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "assign", "js_int", @@ -2038,7 +2038,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "js_int", "ruma-common", @@ -2050,7 +2050,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "as_variant", "assign", @@ -2069,7 +2069,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.12.1" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "as_variant", "base64", @@ -2097,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.27.11" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "as_variant", "indexmap 2.2.1", @@ -2119,7 +2119,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "js_int", "ruma-common", @@ -2131,7 +2131,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.3" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "js_int", "thiserror", @@ -2140,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "js_int", "ruma-common", @@ -2150,7 +2150,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "once_cell", "proc-macro-crate", @@ -2165,7 +2165,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "js_int", "ruma-common", @@ -2177,7 +2177,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.14.0" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "base64", "ed25519-dalek", @@ -2193,7 +2193,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=f1a8497ecb0cc24b0159f301336a3bf3896d476e#f1a8497ecb0cc24b0159f301336a3bf3896d476e" +source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 0cf062f..4bc9ad5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "f1a8497ecb0cc24b0159f301336a3bf3896d476e", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index c17bd99..23f0b45 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -42,24 +42,31 @@ pub async fn get_login_types_route( /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. pub async fn login_route(body: Ruma) -> Result { + // To allow deprecated login methods + #![allow(deprecated)] // Validate login method // TODO: Other login methods let user_id = match &body.login_info { login::v3::LoginInfo::Password(login::v3::Password { identifier, password, + user, + address: _, + medium: _, }) => { - let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier { - user_id.to_lowercase() + let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name( + user_id.to_lowercase(), + services().globals.server_name(), + ) + } else if let Some(user) = user { + UserId::parse(user) } else { warn!("Bad login type: {:?}", &body.login_info); return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); - }; - let user_id = - UserId::parse_with_server_name(username, services().globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + } + .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let hash = services() .users .password_hash(&user_id)? @@ -105,22 +112,28 @@ pub async fn login_route(body: Ruma) -> Result { + login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { + identifier, + user, + }) => { if !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, "Forbidden login type.", )); }; - let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier { - user_id.to_lowercase() + if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name( + user_id.to_lowercase(), + services().globals.server_name(), + ) + } else if let Some(user) = user { + UserId::parse(user) } else { + warn!("Bad login type: {:?}", &body.login_info); return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); - }; - - UserId::parse_with_server_name(username, services().globals.server_name()).map_err( - |_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."), - )? + } + .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))? } _ => { warn!("Unsupported or unknown login type: {:?}", &body.login_info); From b27e9ea95cdddce51eca99f0fd003de131c23556 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 25 Feb 2024 08:49:20 +0000 Subject: [PATCH 1540/1727] chore: bump ruma to latest commit (as of 2024-02-25) --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e20c3f0..78e8526 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2019,7 +2019,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.9.4" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "assign", "js_int", @@ -2038,7 +2038,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "js_int", "ruma-common", @@ -2050,7 +2050,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "as_variant", "assign", @@ -2069,7 +2069,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.12.1" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "as_variant", "base64", @@ -2097,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.27.11" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "as_variant", "indexmap 2.2.1", @@ -2119,7 +2119,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "js_int", "ruma-common", @@ -2131,7 +2131,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.3" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "js_int", "thiserror", @@ -2140,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "js_int", "ruma-common", @@ -2150,7 +2150,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "once_cell", "proc-macro-crate", @@ -2165,7 +2165,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "js_int", "ruma-common", @@ -2177,7 +2177,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.14.0" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "base64", "ed25519-dalek", @@ -2193,7 +2193,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3#abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3" +source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 4bc9ad5..4f873cf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "abebde0cf1cbf3eb1cf8295b240c5d3f926da3a3", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "1a1c61ee1e8f0936e956a3b69c931ce12ee28475", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } From 21a5fa3ef0ccb1ad9ae9ea6328146e6dd69e4d78 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 25 Feb 2024 10:30:30 +0000 Subject: [PATCH 1541/1727] refactor: use re-exported JsOption from ruma rather than directly adding it as a dependency --- Cargo.lock | 1 - Cargo.toml | 3 --- src/api/client_server/sync.rs | 12 +++++------- src/service/rooms/state_accessor/mod.rs | 3 +-- 4 files changed, 6 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78e8526..e99928e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -387,7 +387,6 @@ dependencies = [ "http", "hyper", "image", - "js_option", "jsonwebtoken", "lazy_static", "lru-cache", diff --git a/Cargo.toml b/Cargo.toml index 4f873cf..e8c1c8b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,9 +116,6 @@ async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } -# stupid ruma using JsOption instead of Option -js_option = "0.1" - [target.'cfg(unix)'.dependencies] nix = { version = "0.26.2", features = ["resource"] } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 600a973..04fdb2b 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -20,7 +20,7 @@ use ruma::{ StateEventType, TimelineEventType, }, serde::Raw, - uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, + uint, DeviceId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, }; use std::{ collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, @@ -1616,14 +1616,12 @@ pub async fn sync_events_v4_route( sync_events::v4::SlidingSyncRoom { name: services().rooms.state_accessor.get_name(room_id)?.or(name), avatar: if let Some(avatar) = avatar { - ruma::JsOption::Some(avatar) + JsOption::Some(avatar) } else { match services().rooms.state_accessor.get_avatar(room_id)? { - ruma::JsOption::Some(avatar) => { - js_option::JsOption::from_option(avatar.url) - } - ruma::JsOption::Null => ruma::JsOption::Null, - ruma::JsOption::Undefined => ruma::JsOption::Undefined, + JsOption::Some(avatar) => JsOption::from_option(avatar.url), + JsOption::Null => JsOption::Null, + JsOption::Undefined => JsOption::Undefined, } }, initial: Some(roomsince == &0), diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index f49d8a3..903ad47 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -5,7 +5,6 @@ use std::{ }; pub use data::Data; -use js_option::JsOption; use lru_cache::LruCache; use ruma::{ events::{ @@ -17,7 +16,7 @@ use ruma::{ }, StateEventType, }, - EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, + EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use tracing::error; From f4e57fdb225e8bf87608dce62b759755b74c8393 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 1 Feb 2024 12:05:59 +0100 Subject: [PATCH 1542/1727] Avoid federation when it is not necessary --- src/api/server_server.rs | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index db17d58..1ba2edc 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1799,6 +1799,13 @@ pub async fn get_devices_route( return Err(Error::bad_config("Federation is disabled.")); } + if body.user_id.server_name() != services().globals.server_name() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to access user from other server.", + )); + } + let sender_servername = body .sender_servername .as_ref() @@ -1873,6 +1880,13 @@ pub async fn get_profile_information_route( return Err(Error::bad_config("Federation is disabled.")); } + if body.user_id.server_name() != services().globals.server_name() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to access user from other server.", + )); + } + let mut displayname = None; let mut avatar_url = None; let mut blurhash = None; @@ -1909,6 +1923,17 @@ pub async fn get_keys_route(body: Ruma) -> Result Date: Wed, 28 Feb 2024 16:24:26 +0100 Subject: [PATCH 1543/1727] fix: avoid panic when client is confused about rooms --- src/api/client_server/sync.rs | 3 +++ src/database/key_value/rooms/timeline.rs | 2 +- src/service/rooms/event_handler/mod.rs | 17 ++++++++++++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 5757228..eefdffa 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1476,6 +1476,9 @@ pub async fn sync_events_v4_route( let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { + if !services().rooms.metadata.exists(room_id)? { + continue; + } let todo_room = todo_rooms .entry(room_id.clone()) .or_insert((BTreeSet::new(), 0, u64::MAX)); diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index f322d43..0331a62 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -331,7 +331,7 @@ fn count_to_id( .rooms .short .get_shortroomid(room_id)? - .expect("room exists") + .ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))? .to_be_bytes() .to_vec(); let mut pdu_id = prefix.clone(); diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e7db6f7..4debc60 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -184,7 +184,22 @@ impl Service { } if errors >= 5 { - break; + // Timeout other events + match services() + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry((*prev_id).to_owned()) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1) + } + } + continue; } if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { From 726b6f0fa64bfbb13953366256e52052912cab96 Mon Sep 17 00:00:00 2001 From: olly1240 Date: Tue, 24 Oct 2023 11:56:49 +0000 Subject: [PATCH 1544/1727] Fixed nginx proxy_pass directive --- DEPLOY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index c74fb3f..dcc41bd 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -279,7 +279,7 @@ server { client_max_body_size 20M; location /_matrix/ { - proxy_pass http://127.0.0.1:6167$request_uri; + proxy_pass http://127.0.0.1:6167; proxy_set_header Host $http_host; proxy_buffering off; proxy_read_timeout 5m; From 62dda7a43f2797200d08f72078267892887fd135 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 29 Feb 2024 10:28:06 +0100 Subject: [PATCH 1545/1727] improvement: delete old rocksdb LOG files --- src/database/abstraction/rocksdb.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index b40c439..7a93d78 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -49,6 +49,9 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O db_opts.set_max_background_jobs(6); db_opts.set_bytes_per_sync(1048576); + // https://github.com/facebook/rocksdb/issues/849 + db_opts.set_keep_log_file_num(100); + // https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords // // Unclean shutdowns of a Matrix homeserver are likely to be fine when From a159fff08aa07d9aa865bb03acbcdaec8cde5c6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 29 Feb 2024 10:28:38 +0100 Subject: [PATCH 1546/1727] improvement: deactivate old presence code because it slows down sync The problem is that for each sync, it creates a new rocksdb iterator for each room, and creating iterators is somewhat expensive --- src/service/rooms/edus/presence/mod.rs | 37 ++++++++++++++------------ 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 860aea1..4b929d2 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -17,29 +17,32 @@ impl Service { /// make sure users outside these rooms can't see them. pub fn update_presence( &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, + _user_id: &UserId, + _room_id: &RoomId, + _presence: PresenceEvent, ) -> Result<()> { - self.db.update_presence(user_id, room_id, presence) + // self.db.update_presence(user_id, room_id, presence) + Ok(()) } /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.db.ping_presence(user_id) + pub fn ping_presence(&self, _user_id: &UserId) -> Result<()> { + // self.db.ping_presence(user_id) + Ok(()) } pub fn get_last_presence_event( &self, - user_id: &UserId, - room_id: &RoomId, + _user_id: &UserId, + _room_id: &RoomId, ) -> Result> { - let last_update = match self.db.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; + // let last_update = match self.db.last_presence_update(user_id)? { + // Some(last) => last, + // None => return Ok(None), + // }; - self.db.get_presence_event(room_id, user_id, last_update) + // self.db.get_presence_event(room_id, user_id, last_update) + Ok(None) } /* TODO @@ -111,12 +114,12 @@ impl Service { }*/ /// Returns the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, room_id))] pub fn presence_since( &self, - room_id: &RoomId, - since: u64, + _room_id: &RoomId, + _since: u64, ) -> Result> { - self.db.presence_since(room_id, since) + // self.db.presence_since(room_id, since) + Ok(HashMap::new()) } } From e06e15d4ec614d284d494bac795299f05e5711fb Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 3 Mar 2024 11:26:18 +0000 Subject: [PATCH 1547/1727] fix(accounts): don't give guests admin --- src/api/client_server/account.rs | 29 +++++++++++++-------- src/service/admin/mod.rs | 42 +++++++++++++++---------------- src/service/rooms/timeline/mod.rs | 20 ++++----------- 3 files changed, 44 insertions(+), 47 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 4655130..3a79121 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -149,17 +149,18 @@ pub async fn register_route(body: Ruma) -> Result) -> Result| { services() @@ -1105,6 +1095,24 @@ impl Service { Ok(()) } + /// Gets the room ID of the admin room + /// + /// If the room does not exist, this function panics, since it should have been created on first run + // ^ was the case before this function when the following code was re-used in multiple places + pub(crate) fn get_admin_room(&self) -> OwnedRoomId { + let admin_room_alias: Box = + format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + services() + .rooms + .alias + .resolve_local_alias(&admin_room_alias) + .expect("Room ID should be valid unicode, since this server created it") + .expect("Admin room must exist") + } + /// Invite the user to the conduit admin room. /// /// In conduit, this is equivalent to granting admin privileges. @@ -1113,15 +1121,7 @@ impl Service { user_id: &UserId, displayname: String, ) -> Result<()> { - let admin_room_alias: Box = - format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); - let room_id = services() - .rooms - .alias - .resolve_local_alias(&admin_room_alias)? - .expect("Admin room must exist"); + let room_id = services().admin.get_admin_room(); let mutex_state = Arc::clone( services() diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index ef09d06..13193e5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -28,7 +28,7 @@ use ruma::{ state_res, state_res::{Event, RoomVersion}, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomAliasId, RoomId, ServerName, UserId, + OwnedServerName, RoomId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -448,12 +448,7 @@ impl Service { .search .index_pdu(shortroomid, &pdu_id, &body)?; - let admin_room = services().rooms.alias.resolve_local_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", services().globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; + let admin_room = services().admin.get_admin_room(); let server_user = format!("@conduit:{}", services().globals.server_name()); let to_conduit = body.starts_with(&format!("{server_user}: ")) @@ -466,7 +461,7 @@ impl Service { let from_conduit = pdu.sender == server_user && services().globals.emergency_password().is_none(); - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { + if to_conduit && !from_conduit && admin_room == pdu.room_id { services().admin.process_message(body); } } @@ -820,13 +815,8 @@ impl Service { let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; - let admin_room = services().rooms.alias.resolve_local_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", services().globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - if admin_room.filter(|v| v == room_id).is_some() { + let admin_room = services().admin.get_admin_room(); + if admin_room == room_id { match pdu.event_type() { TimelineEventType::RoomEncryption => { warn!("Encryption is not allowed in the admins room"); From 56a57d5489978a6b7a0a016a52e0a4a3347e687f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 3 Mar 2024 15:55:34 +0100 Subject: [PATCH 1548/1727] docs: small fixes for the README --- README.md | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 5e01c8c..bf7bde5 100644 --- a/README.md +++ b/README.md @@ -31,8 +31,6 @@ There are still a few important features missing: - E2EE emoji comparison over federation (E2EE chat works) - Outgoing read receipts, typing, presence over federation (incoming works) -Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). - #### How can I deploy my own? - Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md) @@ -44,13 +42,21 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md] #### How can I contribute? -1. Look for an issue you would like to work on and make sure it's not assigned - to other users -2. Ask someone to assign the issue to you (comment on the issue or chat in - [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)) -3. Fork the repo and work on the issue.[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) is happy to help :) +1. Look for an issue you would like to work on and make sure no one else is currently working on it. +2. Tell us that you are working on the issue (comment on the issue or chat in + [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions. +3. Fork the repo, create a new branch and push commits. 4. Submit a MR +#### Contact + +If you have any questions, feel free to +- Ask in `#conduit:fachschaften.org` on Matrix +- Write an E-Mail to `conduit@koesters.xyz` +- Send an direct message to `@timokoesters:fachschaften.org` on Matrix +- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) + + #### Thanks to Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project. @@ -60,14 +66,6 @@ Thanks to the contributors to Conduit and all libraries we use, for example: - Ruma: A clean library for the Matrix Spec in Rust - axum: A modular web framework -#### Contact - -If you run into any question, feel free to -- Ask us in `#conduit:fachschaften.org` on Matrix -- Write an E-Mail to `conduit@koesters.xyz` -- Send an direct message to `timokoesters@fachschaften.org` on Matrix -- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) - #### Donate Liberapay: \ From da5975d7271a717b2f788931771b879d0ea78bd9 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 3 Mar 2024 22:42:24 +0000 Subject: [PATCH 1549/1727] fix: avoid panics when admin room is not available --- src/api/client_server/account.rs | 48 +++--- src/service/admin/mod.rs | 256 +++++++++++++++--------------- src/service/rooms/timeline/mod.rs | 162 +++++++++---------- 3 files changed, 235 insertions(+), 231 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 3a79121..d4529a4 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -149,18 +149,17 @@ pub async fn register_route(body: Ruma) -> Result) -> Result| { + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMessage, + content: to_raw_value(&message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + mutex_lock, + ) + .unwrap(); + }; - let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| { - services() - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMessage, - content: to_raw_value(&message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - mutex_lock, - ) - .unwrap(); - }; + loop { + tokio::select! { + Some(event) = receiver.recv() => { + let message_content = match event { + AdminRoomEvent::SendMessage(content) => content, + AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await + }; - loop { - tokio::select! { - Some(event) = receiver.recv() => { - let message_content = match event { - AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await - }; + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(conduit_room.to_owned()) + .or_default(), + ); - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .unwrap() - .entry(conduit_room.to_owned()) - .or_default(), - ); + let state_lock = mutex_state.lock().await; - let state_lock = mutex_state.lock().await; + send_message(message_content, &state_lock); - send_message(message_content, &state_lock); - - drop(state_lock); + drop(state_lock); + } } } } @@ -1097,9 +1098,8 @@ impl Service { /// Gets the room ID of the admin room /// - /// If the room does not exist, this function panics, since it should have been created on first run - // ^ was the case before this function when the following code was re-used in multiple places - pub(crate) fn get_admin_room(&self) -> OwnedRoomId { + /// Errors are propagated from the database, and will have None if there is no admin room + pub(crate) fn get_admin_room(&self) -> Result> { let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) .try_into() @@ -1109,8 +1109,6 @@ impl Service { .rooms .alias .resolve_local_alias(&admin_room_alias) - .expect("Room ID should be valid unicode, since this server created it") - .expect("Admin room must exist") } /// Invite the user to the conduit admin room. @@ -1121,94 +1119,93 @@ impl Service { user_id: &UserId, displayname: String, ) -> Result<()> { - let room_id = services().admin.get_admin_room(); + if let Some(room_id) = services().admin.get_admin_room()? { + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + // Use the server user to grant the new admin's power level + let conduit_user = + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); - // Use the server user to grant the new admin's power level - let conduit_user = - UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + // Invite and join the real user + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: Some(displayname), + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + &room_id, + &state_lock, + )?; - // Invite and join the real user - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: Some(displayname), - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - &room_id, - &state_lock, - )?; + // Set power level + let mut users = BTreeMap::new(); + users.insert(conduit_user.to_owned(), 100.into()); + users.insert(user_id.to_owned(), 100.into()); - // Set power level - let mut users = BTreeMap::new(); - users.insert(conduit_user.to_owned(), 100.into()); - users.insert(user_id.to_owned(), 100.into()); + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // Send welcome message - services().rooms.timeline.build_and_append_pdu( + // Send welcome message + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( @@ -1225,7 +1222,10 @@ impl Service { &state_lock, )?; - Ok(()) + Ok(()) + } else { + Ok(()) + } } } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 13193e5..b66fc64 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -448,7 +448,6 @@ impl Service { .search .index_pdu(shortroomid, &pdu_id, &body)?; - let admin_room = services().admin.get_admin_room(); let server_user = format!("@conduit:{}", services().globals.server_name()); let to_conduit = body.starts_with(&format!("{server_user}: ")) @@ -461,8 +460,10 @@ impl Service { let from_conduit = pdu.sender == server_user && services().globals.emergency_password().is_none(); - if to_conduit && !from_conduit && admin_room == pdu.room_id { - services().admin.process_message(body); + if let Some(admin_room) = services().admin.get_admin_room()? { + if to_conduit && !from_conduit && admin_room == pdu.room_id { + services().admin.process_message(body); + } } } } @@ -815,84 +816,85 @@ impl Service { let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; - let admin_room = services().admin.get_admin_room(); - if admin_room == room_id { - match pdu.event_type() { - TimelineEventType::RoomEncryption => { - warn!("Encryption is not allowed in the admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Encryption is not allowed in the admins room.", - )); + if let Some(admin_room) = services().admin.get_admin_room()? { + if admin_room == room_id { + match pdu.event_type() { + TimelineEventType::RoomEncryption => { + warn!("Encryption is not allowed in the admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Encryption is not allowed in the admins room.", + )); + } + TimelineEventType::RoomMember => { + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, + } + + let target = pdu + .state_key() + .filter(|v| v.starts_with('@')) + .unwrap_or(sender.as_str()); + let server_name = services().globals.server_name(); + let server_user = format!("@conduit:{}", server_name); + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + if content.membership == MembershipState::Leave { + if target == server_user { + warn!("Conduit user cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Conduit user cannot leave from admins room.", + )); + } + + let count = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(|m| m.ok()) + .filter(|m| m.server_name() == server_name) + .filter(|m| m != target) + .count(); + if count < 2 { + warn!("Last admin cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Last admin cannot leave from admins room.", + )); + } + } + + if content.membership == MembershipState::Ban && pdu.state_key().is_some() { + if target == server_user { + warn!("Conduit user cannot be banned in admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Conduit user cannot be banned in admins room.", + )); + } + + let count = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(|m| m.ok()) + .filter(|m| m.server_name() == server_name) + .filter(|m| m != target) + .count(); + if count < 2 { + warn!("Last admin cannot be banned in admins room"); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Last admin cannot be banned in admins room.", + )); + } + } + } + _ => {} } - TimelineEventType::RoomMember => { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let target = pdu - .state_key() - .filter(|v| v.starts_with('@')) - .unwrap_or(sender.as_str()); - let server_name = services().globals.server_name(); - let server_user = format!("@conduit:{}", server_name); - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if content.membership == MembershipState::Leave { - if target == server_user { - warn!("Conduit user cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Conduit user cannot leave from admins room.", - )); - } - - let count = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(|m| m.ok()) - .filter(|m| m.server_name() == server_name) - .filter(|m| m != target) - .count(); - if count < 2 { - warn!("Last admin cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Last admin cannot leave from admins room.", - )); - } - } - - if content.membership == MembershipState::Ban && pdu.state_key().is_some() { - if target == server_user { - warn!("Conduit user cannot be banned in admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Conduit user cannot be banned in admins room.", - )); - } - - let count = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(|m| m.ok()) - .filter(|m| m.server_name() == server_name) - .filter(|m| m != target) - .count(); - if count < 2 { - warn!("Last admin cannot be banned in admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Last admin cannot be banned in admins room.", - )); - } - } - } - _ => {} } } From 4934020ee705a5ca28839583281f257f4ee1ec24 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 4 Mar 2024 09:32:21 +0000 Subject: [PATCH 1550/1727] style: remove unnecessary else block --- src/service/admin/mod.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 44c7934..f6e52f7 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1221,11 +1221,8 @@ impl Service { &room_id, &state_lock, )?; - - Ok(()) - } else { - Ok(()) } + Ok(()) } } From becaad677f22e3fb9f4a4e076f576110daae9393 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 5 Mar 2024 14:22:54 +0000 Subject: [PATCH 1551/1727] refactor: use async-aware RwLocks and Mutexes where possible --- Cargo.lock | 12 + Cargo.toml | 1 + src/api/client_server/keys.rs | 27 +- src/api/client_server/membership.rs | 263 +++++++------ src/api/client_server/message.rs | 43 ++- src/api/client_server/profile.rs | 26 +- src/api/client_server/redact.rs | 38 +- src/api/client_server/room.rs | 493 +++++++++++++----------- src/api/client_server/state.rs | 30 +- src/api/client_server/sync.rs | 130 ++++--- src/api/server_server.rs | 13 +- src/lib.rs | 3 + src/service/admin/mod.rs | 503 +++++++++++++------------ src/service/globals/mod.rs | 18 +- src/service/mod.rs | 39 +- src/service/rooms/event_handler/mod.rs | 449 +++++++++++----------- src/service/rooms/lazy_loading/mod.rs | 14 +- src/service/rooms/spaces/mod.rs | 11 +- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 56 ++- src/service/users/mod.rs | 6 +- 21 files changed, 1171 insertions(+), 1006 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e99928e..426661b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -80,6 +80,17 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" +[[package]] +name = "async-recursion" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "async-trait" version = "0.1.77" @@ -374,6 +385,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.7.0-alpha" dependencies = [ + "async-recursion", "async-trait", "axum", "axum-server", diff --git a/Cargo.toml b/Cargo.toml index e8c1c8b..93ff2f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,6 +115,7 @@ lazy_static = "1.4.0" async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } +async-recursion = "1.0.5" [target.'cfg(unix)'.dependencies] nix = { version = "0.26.2", features = ["resource"] } diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 9fd0089..4af8890 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -339,17 +339,19 @@ pub(crate) async fn get_keys_helper bool>( let mut failures = BTreeMap::new(); - let back_off = |id| match services() - .globals - .bad_query_ratelimiter - .write() - .unwrap() - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); + let back_off = |id| async { + match services() + .globals + .bad_query_ratelimiter + .write() + .await + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; let mut futures: FuturesUnordered<_> = get_over_federation @@ -359,7 +361,7 @@ pub(crate) async fn get_keys_helper bool>( .globals .bad_query_ratelimiter .read() - .unwrap() + .await .get(server) { // Exponential backoff @@ -428,7 +430,8 @@ pub(crate) async fn get_keys_helper bool>( device_keys.extend(response.device_keys); } _ => { - back_off(server.to_owned()); + back_off(server.to_owned()).await; + failures.insert(server.to_string(), json!({})); } } diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 5c78a1c..bc84b26 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -26,9 +26,10 @@ use ruma::{ use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - sync::{Arc, RwLock}, + sync::Arc, time::{Duration, Instant}, }; +use tokio::sync::RwLock; use tracing::{debug, error, info, warn}; use crate::{ @@ -212,24 +213,28 @@ pub async fn kick_user_route( .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(body.room_id.clone()) .or_default(), ); let state_lock = mutex_state.lock().await; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - sender_user, - &body.room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; drop(state_lock); @@ -276,24 +281,28 @@ pub async fn ban_user_route(body: Ruma) -> Result t, Err(_) => continue, }; @@ -710,7 +723,7 @@ async fn join_room_by_id_helper( .iter() .map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map)) { - let (event_id, value) = match result { + let (event_id, value) = match result.await { Ok(t) => t, Err(_) => continue, }; @@ -784,12 +797,16 @@ async fn join_room_by_id_helper( let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?; info!("Appending new room join event"); - services().rooms.timeline.append_pdu( - &parsed_join_pdu, - join_event, - vec![(*parsed_join_pdu.event_id).to_owned()], - &state_lock, - )?; + services() + .rooms + .timeline + .append_pdu( + &parsed_join_pdu, + join_event, + vec![(*parsed_join_pdu.event_id).to_owned()], + &state_lock, + ) + .await?; info!("Setting final room state for new room"); // We set the room state after inserting the pdu, so that we never have a moment in time @@ -902,18 +919,23 @@ async fn join_room_by_id_helper( }; // Try normal join first - let error = match services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - sender_user, - room_id, - &state_lock, - ) { + let error = match services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + ) + .await + { Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())), Err(e) => e, }; @@ -1109,7 +1131,7 @@ async fn make_join_request( make_join_response_and_server } -fn validate_and_add_event_id( +async fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, pub_key_map: &RwLock>>, @@ -1125,24 +1147,26 @@ fn validate_and_add_event_id( )) .expect("ruma's reference hashes are valid event ids"); - let back_off = |id| match services() - .globals - .bad_event_ratelimiter - .write() - .unwrap() - .entry(id) - { - Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); + let back_off = |id| async { + match services() + .globals + .bad_event_ratelimiter + .write() + .await + .entry(id) + { + Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), } - Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; if let Some((time, tries)) = services() .globals .bad_event_ratelimiter .read() - .unwrap() + .await .get(&event_id) { // Exponential backoff @@ -1157,15 +1181,10 @@ fn validate_and_add_event_id( } } - if let Err(e) = ruma::signatures::verify_event( - &*pub_key_map - .read() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?, - &value, - room_version, - ) { + if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version) + { warn!("Event {} failed verification {:?} {}", event_id, pdu, e); - back_off(event_id); + back_off(event_id).await; return Err(Error::BadServerResponse("Event failed verification.")); } @@ -1191,7 +1210,7 @@ pub(crate) async fn invite_helper<'a>( .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(room_id.to_owned()) .or_default(), ); @@ -1312,34 +1331,38 @@ pub(crate) async fn invite_helper<'a>( .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: services().users.displayname(user_id)?, - avatar_url: services().users.avatar_url(user_id)?, - is_direct: Some(is_direct), - third_party_invite: None, - blurhash: services().users.blurhash(user_id)?, - reason, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - sender_user, - room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: services().users.displayname(user_id)?, + avatar_url: services().users.avatar_url(user_id)?, + is_direct: Some(is_direct), + third_party_invite: None, + blurhash: services().users.blurhash(user_id)?, + reason, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + ) + .await?; drop(state_lock); @@ -1407,7 +1430,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option JoinRule::Public, - // according to spec "invite" is the default - _ => JoinRule::Invite, - })) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { + RoomPreset::PublicChat => JoinRule::Public, + // according to spec "invite" is the default + _ => JoinRule::Invite, + })) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; // 5.2 History Visibility - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; // 5.3 Guest Access - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { - RoomPreset::PublicChat => GuestAccess::Forbidden, - _ => GuestAccess::CanJoin, - })) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { + RoomPreset::PublicChat => GuestAccess::Forbidden, + _ => GuestAccess::CanJoin, + })) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; // 6. Events listed in initial_state for event in &body.initial_state { @@ -353,47 +381,54 @@ pub async fn create_room_route( continue; } - services().rooms.timeline.build_and_append_pdu( - pdu_builder, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + .await?; } // 7. Events implied by name and topic if let Some(name) = &body.name { - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(name.clone())) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(name.clone())) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; } if let Some(topic) = &body.topic { - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { - topic: topic.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: topic.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; } // 8. Events implied by invite (and TODO: invite_3pid) @@ -523,7 +558,7 @@ pub async fn upgrade_room_route( .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(body.room_id.clone()) .or_default(), ); @@ -531,22 +566,26 @@ pub async fn upgrade_room_route( // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions - let tombstone_event_id = services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomTombstone, - content: to_raw_value(&RoomTombstoneEventContent { - body: "This room has been replaced".to_owned(), - replacement_room: replacement_room.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &body.room_id, - &state_lock, - )?; + let tombstone_event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomTombstone, + content: to_raw_value(&RoomTombstoneEventContent { + body: "This room has been replaced".to_owned(), + replacement_room: replacement_room.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; // Change lock to replacement room drop(state_lock); @@ -555,7 +594,7 @@ pub async fn upgrade_room_route( .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(replacement_room.clone()) .or_default(), ); @@ -613,43 +652,51 @@ pub async fn upgrade_room_route( )); } - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&create_event_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &replacement_room, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomCreate, + content: to_raw_value(&create_event_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; // Join the new room - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: services().users.displayname(sender_user)?, - avatar_url: services().users.avatar_url(sender_user)?, - is_direct: None, - third_party_invite: None, - blurhash: services().users.blurhash(sender_user)?, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - sender_user, - &replacement_room, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; // Recommended transferable state events list from the specs let transferable_state_events = vec![ @@ -676,18 +723,22 @@ pub async fn upgrade_room_route( None => continue, // Skipping missing events. }; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: event_type.to_string().into(), - content: event_content, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &replacement_room, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: event_type.to_string().into(), + content: event_content, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; } // Moves any local aliases to the new room @@ -721,19 +772,23 @@ pub async fn upgrade_room_route( power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - let _ = services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&power_levels_event_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &body.room_id, - &state_lock, - )?; + let _ = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomPowerLevels, + content: to_raw_value(&power_levels_event_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; drop(state_lock); diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 174282a..e62aa01 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -227,24 +227,28 @@ async fn send_state_event_for_key_helper( .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; - let event_id = services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: event_type.to_string().into(), - content: serde_json::from_str(json.json().get()).expect("content is valid json"), - unsigned: None, - state_key: Some(state_key), - redacts: None, - }, - sender_user, - room_id, - &state_lock, - )?; + let event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: event_type.to_string().into(), + content: serde_json::from_str(json.json().get()).expect("content is valid json"), + unsigned: None, + state_key: Some(state_key), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + ) + .await?; Ok(event_id) } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 76b48d1..5ac00c6 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,6 +1,7 @@ use crate::{ service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse, }; + use ruma::{ api::client::{ filter::{FilterDefinition, LazyLoadOptions}, @@ -75,7 +76,7 @@ pub async fn sync_events_route( .globals .sync_receivers .write() - .unwrap() + .await .entry((sender_user.clone(), sender_device.clone())) { Entry::Vacant(v) => { @@ -147,7 +148,7 @@ async fn sync_helper_wrapper( .globals .sync_receivers .write() - .unwrap() + .await .entry((sender_user, sender_device)) { Entry::Occupied(o) => { @@ -302,11 +303,11 @@ async fn sync_helper( .globals .roomid_mutex_insert .write() - .unwrap() + .await .entry(room_id.clone()) .or_default(), ); - let insert_lock = mutex_insert.lock().unwrap(); + let insert_lock = mutex_insert.lock().await; drop(insert_lock); } @@ -434,11 +435,11 @@ async fn sync_helper( .globals .roomid_mutex_insert .write() - .unwrap() + .await .entry(room_id.clone()) .or_default(), ); - let insert_lock = mutex_insert.lock().unwrap(); + let insert_lock = mutex_insert.lock().await; drop(insert_lock); } @@ -577,11 +578,11 @@ async fn load_joined_room( .globals .roomid_mutex_insert .write() - .unwrap() + .await .entry(room_id.to_owned()) .or_default(), ); - let insert_lock = mutex_insert.lock().unwrap(); + let insert_lock = mutex_insert.lock().await; drop(insert_lock); } @@ -599,12 +600,11 @@ async fn load_joined_room( timeline_users.insert(event.sender.as_str().to_owned()); } - services().rooms.lazy_loading.lazy_load_confirm_delivery( - sender_user, - sender_device, - room_id, - sincecount, - )?; + services() + .rooms + .lazy_loading + .lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount) + .await?; // Database queries: @@ -797,13 +797,17 @@ async fn load_joined_room( // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. - services().rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ); + services() + .rooms + .lazy_loading + .lazy_load_mark_sent( + sender_user, + sender_device, + room_id, + lazy_loaded, + next_batchcount, + ) + .await; ( heroes, @@ -884,13 +888,17 @@ async fn load_joined_room( } } - services().rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ); + services() + .rooms + .lazy_loading + .lazy_load_mark_sent( + sender_user, + sender_device, + room_id, + lazy_loaded, + next_batchcount, + ) + .await; let encrypted_room = services() .rooms @@ -1189,11 +1197,14 @@ pub async fn sync_events_v4_route( if globalsince == 0 { if let Some(conn_id) = &body.conn_id { - services().users.forget_sync_request_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ) + services() + .users + .forget_sync_request_connection( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ) + .await } } @@ -1463,14 +1474,17 @@ pub async fn sync_events_v4_route( ); if let Some(conn_id) = &body.conn_id { - services().users.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - list_id, - new_known_rooms, - globalsince, - ); + services() + .users + .update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + list_id, + new_known_rooms, + globalsince, + ) + .await; } } @@ -1502,23 +1516,29 @@ pub async fn sync_events_v4_route( } if let Some(conn_id) = &body.conn_id { - services().users.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - "subscriptions".to_owned(), - known_subscription_rooms, - globalsince, - ); + services() + .users + .update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + "subscriptions".to_owned(), + known_subscription_rooms, + globalsince, + ) + .await; } if let Some(conn_id) = &body.conn_id { - services().users.update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ); + services() + .users + .update_sync_subscriptions( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + body.room_subscriptions, + ) + .await; } let mut rooms = BTreeMap::new(); diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 1ba2edc..f946fea 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -51,9 +51,10 @@ use std::{ fmt::Debug, mem, net::{IpAddr, SocketAddr}, - sync::{Arc, RwLock}, + sync::Arc, time::{Duration, Instant, SystemTime}, }; +use tokio::sync::RwLock; use tracing::{debug, error, warn}; @@ -137,7 +138,7 @@ where .globals .actual_destination_cache .read() - .unwrap() + .await .get(destination) .cloned(); @@ -290,7 +291,7 @@ where .globals .actual_destination_cache .write() - .unwrap() + .await .insert( OwnedServerName::from(destination), (actual_destination, host), @@ -740,7 +741,7 @@ pub async fn send_transaction_message_route( .globals .roomid_mutex_federation .write() - .unwrap() + .await .entry(room_id.to_owned()) .or_default(), ); @@ -1409,7 +1410,7 @@ pub async fn create_join_event_template_route( .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(body.room_id.to_owned()) .or_default(), ); @@ -1579,7 +1580,7 @@ async fn create_join_event( .globals .roomid_mutex_federation .write() - .unwrap() + .await .entry(room_id.to_owned()) .or_default(), ); diff --git a/src/lib.rs b/src/lib.rs index 66d0c57..9c1170d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,6 +4,9 @@ mod database; mod service; mod utils; +// Not async due to services() being used in many closures, and async colsures are not stable as of writing +// This is the case for every other occurence of sync Mutex/RwLock, except for database related ones, where +// the current maintainer (Timo) as asked to not modify those use std::sync::RwLock; pub use api::ruma_wrapper::{Ruma, RumaResponse}; diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index f6e52f7..d99be87 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1,7 +1,7 @@ use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, - sync::{Arc, RwLock}, + sync::Arc, time::Instant, }; @@ -26,7 +26,7 @@ use ruma::{ EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; -use tokio::sync::{mpsc, Mutex, MutexGuard}; +use tokio::sync::{mpsc, Mutex, RwLock}; use crate::{ api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, @@ -215,27 +215,6 @@ impl Service { .expect("@conduit:server_name is valid"); if let Ok(Some(conduit_room)) = services().admin.get_admin_room() { - let send_message = |message: RoomMessageEventContent, - mutex_lock: &MutexGuard<'_, ()>| { - services() - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMessage, - content: to_raw_value(&message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - mutex_lock, - ) - .unwrap(); - }; - loop { tokio::select! { Some(event) = receiver.recv() => { @@ -248,16 +227,30 @@ impl Service { services().globals .roomid_mutex_state .write() - .unwrap() + .await .entry(conduit_room.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; - send_message(message_content, &state_lock); - - drop(state_lock); + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMessage, + content: to_raw_value(&message_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + &state_lock, + ) + .await.unwrap(); } } } @@ -425,11 +418,7 @@ impl Service { Err(e) => RoomMessageEventContent::text_plain(e.to_string()), }, AdminCommand::IncomingFederation => { - let map = services() - .globals - .roomid_federationhandletime - .read() - .unwrap(); + let map = services().globals.roomid_federationhandletime.read().await; let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); for (r, (e, i)) in map.iter() { @@ -543,7 +532,7 @@ impl Service { } } AdminCommand::MemoryUsage => { - let response1 = services().memory_usage(); + let response1 = services().memory_usage().await; let response2 = services().globals.db.memory_usage(); RoomMessageEventContent::text_plain(format!( @@ -556,7 +545,7 @@ impl Service { RoomMessageEventContent::text_plain("Done.") } AdminCommand::ClearServiceCaches { amount } => { - services().clear_caches(amount); + services().clear_caches(amount).await; RoomMessageEventContent::text_plain("Done.") } @@ -797,7 +786,7 @@ impl Service { .fetch_required_signing_keys(&value, &pub_key_map) .await?; - let pub_key_map = pub_key_map.read().unwrap(); + let pub_key_map = pub_key_map.read().await; match ruma::signatures::verify_json(&pub_key_map, &value) { Ok(_) => RoomMessageEventContent::text_plain("Signature correct"), Err(e) => RoomMessageEventContent::text_plain(format!( @@ -913,7 +902,7 @@ impl Service { .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(room_id.clone()) .or_default(), ); @@ -932,164 +921,202 @@ impl Service { content.room_version = services().globals.default_room_version(); // 1. The room create event - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 2. Make conduit bot join - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(conduit_user.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(conduit_user.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 3. Power levels let mut users = BTreeMap::new(); users.insert(conduit_user.clone(), 100.into()); - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 4.1 Join Rules - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 4.2 History Visibility - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 4.3 Guest Access - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new( + GuestAccess::Forbidden, + )) .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 5. Events implied by name and topic let room_name = format!("{} Admin Room", services().globals.server_name()); - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(room_name)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(room_name)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", services().globals.server_name()), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: format!("Manage {}", services().globals.server_name()), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 6. Room alias let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(alias.clone()), - alt_aliases: Vec::new(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; services().rooms.alias.set_alias(&alias, &room_id)?; @@ -1125,7 +1152,7 @@ impl Service { .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(room_id.clone()) .or_default(), ); @@ -1137,72 +1164,84 @@ impl Service { .expect("@conduit:server_name is valid"); // Invite and join the real user - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: Some(displayname), - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: Some(displayname), + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + &room_id, + &state_lock, + ) + .await?; // Set power level let mut users = BTreeMap::new(); users.insert(conduit_user.to_owned(), 100.into()); users.insert(user_id.to_owned(), 100.into()); - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // Send welcome message services().rooms.timeline.build_and_append_pdu( @@ -1220,7 +1259,7 @@ impl Service { &conduit_user, &room_id, &state_lock, - )?; + ).await?; } Ok(()) } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index f782294..c3e02ad 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -31,11 +31,11 @@ use std::{ path::PathBuf, sync::{ atomic::{self, AtomicBool}, - Arc, Mutex, RwLock, + Arc, RwLock as SyncRwLock, }, time::{Duration, Instant}, }; -use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; +use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore}; use tracing::{error, info}; use trust_dns_resolver::TokioAsyncResolver; @@ -53,7 +53,7 @@ pub struct Service { pub db: &'static dyn Data, pub actual_destination_cache: Arc>, // actual_destination, host - pub tls_name_override: Arc>, + pub tls_name_override: Arc>, pub config: Config, keypair: Arc, dns_resolver: TokioAsyncResolver, @@ -68,8 +68,8 @@ pub struct Service { pub servername_ratelimiter: Arc>>>, pub sync_receivers: RwLock>, pub roomid_mutex_insert: RwLock>>>, - pub roomid_mutex_state: RwLock>>>, - pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer + pub roomid_mutex_state: RwLock>>>, + pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer pub roomid_federationhandletime: RwLock>, pub stateres_mutex: Arc>, pub rotate: RotationHandler, @@ -109,11 +109,11 @@ impl Default for RotationHandler { pub struct Resolver { inner: GaiResolver, - overrides: Arc>, + overrides: Arc>, } impl Resolver { - pub fn new(overrides: Arc>) -> Self { + pub fn new(overrides: Arc>) -> Self { Resolver { inner: GaiResolver::new(), overrides, @@ -125,7 +125,7 @@ impl Resolve for Resolver { fn resolve(&self, name: Name) -> Resolving { self.overrides .read() - .expect("lock should not be poisoned") + .unwrap() .get(name.as_str()) .and_then(|(override_name, port)| { override_name.first().map(|first_name| { @@ -159,7 +159,7 @@ impl Service { } }; - let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new())); + let tls_name_override = Arc::new(SyncRwLock::new(TlsNameMap::new())); let jwt_decoding_key = config .jwt_secret diff --git a/src/service/mod.rs b/src/service/mod.rs index f85da78..c1616c4 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,9 +1,10 @@ use std::{ collections::{BTreeMap, HashMap}, - sync::{Arc, Mutex}, + sync::{Arc, Mutex as SyncMutex}, }; use lru_cache::LruCache; +use tokio::sync::Mutex; use crate::{Config, Result}; @@ -79,17 +80,17 @@ impl Services { state: rooms::state::Service { db }, state_accessor: rooms::state_accessor::Service { db, - server_visibility_cache: Mutex::new(LruCache::new( + server_visibility_cache: SyncMutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), - user_visibility_cache: Mutex::new(LruCache::new( + user_visibility_cache: SyncMutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), }, state_cache: rooms::state_cache::Service { db }, state_compressor: rooms::state_compressor::Service { db, - stateinfo_cache: Mutex::new(LruCache::new( + stateinfo_cache: SyncMutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), }, @@ -107,7 +108,7 @@ impl Services { uiaa: uiaa::Service { db }, users: users::Service { db, - connections: Mutex::new(BTreeMap::new()), + connections: SyncMutex::new(BTreeMap::new()), }, account_data: account_data::Service { db }, admin: admin::Service::build(), @@ -118,14 +119,8 @@ impl Services { globals: globals::Service::load(db, config)?, }) } - fn memory_usage(&self) -> String { - let lazy_load_waiting = self - .rooms - .lazy_loading - .lazy_load_waiting - .lock() - .unwrap() - .len(); + async fn memory_usage(&self) -> String { + let lazy_load_waiting = self.rooms.lazy_loading.lazy_load_waiting.lock().await.len(); let server_visibility_cache = self .rooms .state_accessor @@ -152,15 +147,9 @@ impl Services { .timeline .lasttimelinecount_cache .lock() - .unwrap() - .len(); - let roomid_spacechunk_cache = self - .rooms - .spaces - .roomid_spacechunk_cache - .lock() - .unwrap() + .await .len(); + let roomid_spacechunk_cache = self.rooms.spaces.roomid_spacechunk_cache.lock().await.len(); format!( "\ @@ -173,13 +162,13 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\ " ) } - fn clear_caches(&self, amount: u32) { + async fn clear_caches(&self, amount: u32) { if amount > 0 { self.rooms .lazy_loading .lazy_load_waiting .lock() - .unwrap() + .await .clear(); } if amount > 1 { @@ -211,7 +200,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\ .timeline .lasttimelinecount_cache .lock() - .unwrap() + .await .clear(); } if amount > 5 { @@ -219,7 +208,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\ .spaces .roomid_spacechunk_cache .lock() - .unwrap() + .await .clear(); } } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 99fc2cb..7cc662e 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,25 +1,24 @@ /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; -use ruma::{ - api::federation::discovery::{get_remote_server_keys, get_server_keys}, - CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, OwnedServerSigningKeyId, - RoomVersionId, -}; use std::{ collections::{hash_map, BTreeMap, HashMap, HashSet}, pin::Pin, - sync::{Arc, RwLock, RwLockWriteGuard}, + sync::Arc, time::{Duration, Instant, SystemTime}, }; -use tokio::sync::Semaphore; +use async_recursion::async_recursion; use futures_util::{stream::FuturesUnordered, Future, StreamExt}; use ruma::{ api::{ client::error::ErrorKind, federation::{ - discovery::get_remote_server_keys_batch::{self, v2::QueryCriteria}, + discovery::{ + get_remote_server_keys, + get_remote_server_keys_batch::{self, v2::QueryCriteria}, + get_server_keys, + }, event::{get_event, get_room_state_ids}, membership::create_join_event, }, @@ -31,9 +30,11 @@ use ruma::{ int, serde::Base64, state_res::{self, RoomVersion, StateMap}, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, + uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, + OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; +use tokio::sync::{RwLock, RwLockWriteGuard, Semaphore}; use tracing::{debug, error, info, trace, warn}; use crate::{service::*, services, Error, PduEvent, Result}; @@ -168,7 +169,7 @@ impl Service { .globals .bad_event_ratelimiter .read() - .unwrap() + .await .get(&*prev_id) { // Exponential backoff @@ -189,7 +190,7 @@ impl Service { .globals .bad_event_ratelimiter .write() - .unwrap() + .await .entry((*prev_id).to_owned()) { hash_map::Entry::Vacant(e) => { @@ -213,7 +214,7 @@ impl Service { .globals .roomid_federationhandletime .write() - .unwrap() + .await .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); if let Err(e) = self @@ -233,7 +234,7 @@ impl Service { .globals .bad_event_ratelimiter .write() - .unwrap() + .await .entry((*prev_id).to_owned()) { hash_map::Entry::Vacant(e) => { @@ -249,7 +250,7 @@ impl Service { .globals .roomid_federationhandletime .write() - .unwrap() + .await .remove(&room_id.to_owned()); debug!( "Handling prev event {} took {}m{}s", @@ -267,7 +268,7 @@ impl Service { .globals .roomid_federationhandletime .write() - .unwrap() + .await .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); let r = services() .rooms @@ -285,7 +286,7 @@ impl Service { .globals .roomid_federationhandletime .write() - .unwrap() + .await .remove(&room_id.to_owned()); r @@ -326,11 +327,8 @@ impl Service { let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - let mut val = match ruma::signatures::verify_event( - &pub_key_map.read().expect("RwLock is poisoned."), - &value, - room_version_id, - ) { + let guard = pub_key_map.read().await; + let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) { Err(e) => { // Drop warn!("Dropping bad event {}: {}", event_id, e,); @@ -365,6 +363,8 @@ impl Service { Ok(ruma::signatures::Verified::All) => value, }; + drop(guard); + // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type val.insert( @@ -692,13 +692,15 @@ impl Service { { Ok(res) => { debug!("Fetching state events at event."); + let collect = res + .pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(); let state_vec = self .fetch_and_handle_outliers( origin, - &res.pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), + &collect, create_event, room_id, room_version_id, @@ -805,7 +807,7 @@ impl Service { .globals .roomid_mutex_state .write() - .unwrap() + .await .entry(room_id.to_owned()) .or_default(), ); @@ -884,14 +886,18 @@ impl Service { debug!("Starting soft fail auth check"); if soft_fail { - services().rooms.timeline.append_incoming_pdu( - &incoming_pdu, - val, - extremities.iter().map(|e| (**e).to_owned()).collect(), - state_ids_compressed, - soft_fail, - &state_lock, - )?; + services() + .rooms + .timeline + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .await?; // Soft fail, we keep the event as an outlier but don't add it to the timeline warn!("Event was soft failed: {:?}", incoming_pdu); @@ -912,14 +918,18 @@ impl Service { // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - let pdu_id = services().rooms.timeline.append_incoming_pdu( - &incoming_pdu, - val, - extremities.iter().map(|e| (**e).to_owned()).collect(), - state_ids_compressed, - soft_fail, - &state_lock, - )?; + let pdu_id = services() + .rooms + .timeline + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .await?; debug!("Appended incoming pdu"); @@ -1034,7 +1044,8 @@ impl Service { /// d. TODO: Ask other servers over federation? #[allow(clippy::type_complexity)] #[tracing::instrument(skip_all)] - pub(crate) fn fetch_and_handle_outliers<'a>( + #[async_recursion] + pub(crate) async fn fetch_and_handle_outliers<'a>( &'a self, origin: &'a ServerName, events: &'a [Arc], @@ -1042,176 +1053,175 @@ impl Service { room_id: &'a RoomId, room_version_id: &'a RoomVersionId, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> - { - Box::pin(async move { - let back_off = |id| match services() + ) -> Vec<(Arc, Option>)> { + let back_off = |id| async move { + match services() .globals .bad_event_ratelimiter .write() - .unwrap() + .await .entry(id) { hash_map::Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; + } + }; - let mut pdus = vec![]; - for id in events { - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) { - trace!("Found {} in db", id); - pdus.push((local_pdu, None)); + let mut pdus = vec![]; + for id in events { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) { + trace!("Found {} in db", id); + pdus.push((local_pdu, None)); + continue; + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); + let mut i = 0; + while let Some(next_id) = todo_auth_events.pop() { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .await + .get(&*next_id) + { + // Exponential backoff + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", next_id); + continue; + } + } + + if events_all.contains(&next_id) { continue; } - // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; - let mut events_in_reverse_order = Vec::new(); - let mut events_all = HashSet::new(); - let mut i = 0; - while let Some(next_id) = todo_auth_events.pop() { - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&*next_id) - { - // Exponential backoff - let mut min_elapsed_duration = - Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", next_id); - continue; - } - } - - if events_all.contains(&next_id) { - continue; - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - - if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { - trace!("Found {} in db", next_id); - continue; - } - - info!("Fetching {} over federation.", next_id); - match services() - .sending - .send_federation_request( - origin, - get_event::v1::Request { - event_id: (*next_id).to_owned(), - }, - ) - .await - { - Ok(res) => { - info!("Got {} over federation", next_id); - let (calculated_event_id, value) = - match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) { - Ok(t) => t, - Err(_) => { - back_off((*next_id).to_owned()); - continue; - } - }; - - if calculated_event_id != *next_id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - next_id, calculated_event_id, &res.pdu); - } - - if let Some(auth_events) = - value.get("auth_events").and_then(|c| c.as_array()) - { - for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value(auth_event.clone().into()) - { - let a: Arc = auth_event; - todo_auth_events.push(a); - } else { - warn!("Auth event id is not valid"); - } - } - } else { - warn!("Auth event list invalid"); - } - - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - } - Err(_) => { - warn!("Failed to fetch event: {}", next_id); - back_off((*next_id).to_owned()); - } - } + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; } - for (next_id, value) in events_in_reverse_order.iter().rev() { - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&**next_id) - { - // Exponential backoff - let mut min_elapsed_duration = - Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { + trace!("Found {} in db", next_id); + continue; + } + + info!("Fetching {} over federation.", next_id); + match services() + .sending + .send_federation_request( + origin, + get_event::v1::Request { + event_id: (*next_id).to_owned(), + }, + ) + .await + { + Ok(res) => { + info!("Got {} over federation", next_id); + let (calculated_event_id, value) = + match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) { + Ok(t) => t, + Err(_) => { + back_off((*next_id).to_owned()).await; + continue; + } + }; + + if calculated_event_id != *next_id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu); } - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", next_id); - continue; - } - } - - match self - .handle_outlier_pdu( - origin, - create_event, - next_id, - room_id, - value.clone(), - true, - pub_key_map, - ) - .await - { - Ok((pdu, json)) => { - if next_id == id { - pdus.push((pdu, Some(json))); + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { + for auth_event in auth_events { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } } + } else { + warn!("Auth event list invalid"); } - Err(e) => { - warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((**next_id).to_owned()); - } + + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + } + Err(_) => { + warn!("Failed to fetch event: {}", next_id); + back_off((*next_id).to_owned()).await; } } } - pdus - }) + + for (next_id, value) in events_in_reverse_order.iter().rev() { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .await + .get(&**next_id) + { + // Exponential backoff + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", next_id); + continue; + } + } + + match self + .handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + true, + pub_key_map, + ) + .await + { + Ok((pdu, json)) => { + if next_id == id { + pdus.push((pdu, Some(json))); + } + } + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()).await; + } + } + } + } + pdus } async fn fetch_unknown_prev_events( @@ -1360,7 +1370,7 @@ impl Service { pub_key_map .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .await .insert(signature_server.clone(), keys); } @@ -1369,7 +1379,7 @@ impl Service { // Gets a list of servers for which we don't have the signing key yet. We go over // the PDUs and either cache the key or add it to the list that needs to be retrieved. - fn get_server_keys_from_cache( + async fn get_server_keys_from_cache( &self, pdu: &RawJsonValue, servers: &mut BTreeMap>, @@ -1393,7 +1403,7 @@ impl Service { .globals .bad_event_ratelimiter .read() - .unwrap() + .await .get(event_id) { // Exponential backoff @@ -1469,17 +1479,19 @@ impl Service { > = BTreeMap::new(); { - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + let mut pkm = pub_key_map.write().await; // Try to fetch keys, failure is okay // Servers we couldn't find in the cache will be added to `servers` for pdu in &event.room_state.state { - let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); + let _ = self + .get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm) + .await; } for pdu in &event.room_state.auth_chain { - let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); + let _ = self + .get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm) + .await; } drop(pkm); @@ -1503,9 +1515,7 @@ impl Service { .await { trace!("Got signing keys: {:?}", keys); - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + let mut pkm = pub_key_map.write().await; for k in keys.server_keys { let k = match k.deserialize() { Ok(key) => key, @@ -1564,10 +1574,7 @@ impl Service { .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(); - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(origin.to_string(), result); + pub_key_map.write().await.insert(origin.to_string(), result); } } info!("Done handling result"); @@ -1632,14 +1639,14 @@ impl Service { .globals .servername_ratelimiter .read() - .unwrap() + .await .get(origin) .map(|s| Arc::clone(s).acquire_owned()); let permit = match permit { Some(p) => p, None => { - let mut write = services().globals.servername_ratelimiter.write().unwrap(); + let mut write = services().globals.servername_ratelimiter.write().await; let s = Arc::clone( write .entry(origin.to_owned()) @@ -1651,24 +1658,26 @@ impl Service { } .await; - let back_off = |id| match services() - .globals - .bad_signature_ratelimiter - .write() - .unwrap() - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); + let back_off = |id| async { + match services() + .globals + .bad_signature_ratelimiter + .write() + .await + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; if let Some((time, tries)) = services() .globals .bad_signature_ratelimiter .read() - .unwrap() + .await .get(&signature_ids) { // Exponential backoff @@ -1775,7 +1784,7 @@ impl Service { drop(permit); - back_off(signature_ids); + back_off(signature_ids).await; warn!("Failed to find public key for server: {}", origin); Err(Error::BadServerResponse( diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index c51a57e..e2594a0 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,11 +1,9 @@ mod data; -use std::{ - collections::{HashMap, HashSet}, - sync::Mutex, -}; +use std::collections::{HashMap, HashSet}; pub use data::Data; use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; +use tokio::sync::Mutex; use crate::Result; @@ -33,7 +31,7 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( + pub async fn lazy_load_mark_sent( &self, user_id: &UserId, device_id: &DeviceId, @@ -41,7 +39,7 @@ impl Service { lazy_load: HashSet, count: PduCount, ) { - self.lazy_load_waiting.lock().unwrap().insert( + self.lazy_load_waiting.lock().await.insert( ( user_id.to_owned(), device_id.to_owned(), @@ -53,14 +51,14 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( + pub async fn lazy_load_confirm_delivery( &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, since: PduCount, ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( + if let Some(user_ids) = self.lazy_load_waiting.lock().await.remove(&( user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index b0a9ed2..981d4a3 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,4 +1,4 @@ -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use lru_cache::LruCache; use ruma::{ @@ -25,6 +25,7 @@ use ruma::{ space::SpaceRoomJoinRule, OwnedRoomId, RoomId, UserId, }; +use tokio::sync::Mutex; use tracing::{debug, error, warn}; @@ -79,7 +80,7 @@ impl Service { if let Some(cached) = self .roomid_spacechunk_cache .lock() - .unwrap() + .await .get_mut(¤t_room.to_owned()) .as_ref() { @@ -171,7 +172,7 @@ impl Service { .transpose()? .unwrap_or(JoinRule::Invite); - self.roomid_spacechunk_cache.lock().unwrap().insert( + self.roomid_spacechunk_cache.lock().await.insert( current_room.clone(), Some(CachedSpaceChunk { chunk, @@ -265,7 +266,7 @@ impl Service { } } - self.roomid_spacechunk_cache.lock().unwrap().insert( + self.roomid_spacechunk_cache.lock().await.insert( current_room.clone(), Some(CachedSpaceChunk { chunk, @@ -289,7 +290,7 @@ impl Service { } else { self.roomid_spacechunk_cache .lock() - .unwrap() + .await .insert(current_room.clone(), None); } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index c209eb5..f6581bb 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -95,7 +95,7 @@ impl Service { .spaces .roomid_spacechunk_cache .lock() - .unwrap() + .await .remove(&pdu.room_id); } _ => continue, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index b66fc64..097cc82 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -2,12 +2,8 @@ mod data; use std::{ cmp::Ordering, - collections::{BTreeMap, HashMap}, -}; - -use std::{ - collections::HashSet, - sync::{Arc, Mutex, RwLock}, + collections::{BTreeMap, HashMap, HashSet}, + sync::Arc, }; pub use data::Data; @@ -32,7 +28,7 @@ use ruma::{ }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use tokio::sync::MutexGuard; +use tokio::sync::{Mutex, MutexGuard, RwLock}; use tracing::{error, info, warn}; use crate::{ @@ -201,7 +197,7 @@ impl Service { /// /// Returns pdu id #[tracing::instrument(skip(self, pdu, pdu_json, leaves))] - pub fn append_pdu<'a>( + pub async fn append_pdu<'a>( &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, @@ -263,11 +259,11 @@ impl Service { .globals .roomid_mutex_insert .write() - .unwrap() + .await .entry(pdu.room_id.clone()) .or_default(), ); - let insert_lock = mutex_insert.lock().unwrap(); + let insert_lock = mutex_insert.lock().await; let count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending @@ -395,7 +391,7 @@ impl Service { .spaces .roomid_spacechunk_cache .lock() - .unwrap() + .await .remove(&pdu.room_id); } } @@ -806,7 +802,7 @@ impl Service { /// Creates a new persisted data unit and adds it to a room. This function takes a /// roomid_mutex_state, meaning that only this function is able to mutate the room state. #[tracing::instrument(skip(self, state_lock))] - pub fn build_and_append_pdu( + pub async fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, @@ -902,14 +898,16 @@ impl Service { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = services().rooms.state.append_to_state(&pdu)?; - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - vec![(*pdu.event_id).to_owned()], - state_lock, - )?; + let pdu_id = self + .append_pdu( + &pdu, + pdu_json, + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + vec![(*pdu.event_id).to_owned()], + state_lock, + ) + .await?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist @@ -947,7 +945,7 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip_all)] - pub fn append_incoming_pdu<'a>( + pub async fn append_incoming_pdu<'a>( &self, pdu: &PduEvent, pdu_json: CanonicalJsonObject, @@ -977,11 +975,11 @@ impl Service { return Ok(None); } - let pdu_id = - services() - .rooms - .timeline - .append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?; + let pdu_id = services() + .rooms + .timeline + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) + .await?; Ok(Some(pdu_id)) } @@ -1118,7 +1116,7 @@ impl Service { .globals .roomid_mutex_federation .write() - .unwrap() + .await .entry(room_id.to_owned()) .or_default(), ); @@ -1150,11 +1148,11 @@ impl Service { .globals .roomid_mutex_insert .write() - .unwrap() + .await .entry(room_id.clone()) .or_default(), ); - let insert_lock = mutex_insert.lock().unwrap(); + let insert_lock = mutex_insert.lock().await; let count = services().globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index fb983a4..c83463e 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -45,7 +45,7 @@ impl Service { self.db.exists(user_id) } - pub fn forget_sync_request_connection( + pub async fn forget_sync_request_connection( &self, user_id: OwnedUserId, device_id: OwnedDeviceId, @@ -186,7 +186,7 @@ impl Service { cached.known_rooms.clone() } - pub fn update_sync_subscriptions( + pub async fn update_sync_subscriptions( &self, user_id: OwnedUserId, device_id: OwnedDeviceId, @@ -212,7 +212,7 @@ impl Service { cached.subscriptions = subscriptions; } - pub fn update_sync_known_rooms( + pub async fn update_sync_known_rooms( &self, user_id: OwnedUserId, device_id: OwnedDeviceId, From c58af8485de8eb5e01ad0a2c1f2669102efa7754 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 5 Mar 2024 19:58:39 +0000 Subject: [PATCH 1552/1727] revert: remove dependency on async_recursion --- Cargo.lock | 12 - Cargo.toml | 1 - src/service/rooms/event_handler/mod.rs | 315 +++++++++++++------------ 3 files changed, 159 insertions(+), 169 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 426661b..e99928e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -80,17 +80,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" -[[package]] -name = "async-recursion" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.48", -] - [[package]] name = "async-trait" version = "0.1.77" @@ -385,7 +374,6 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.7.0-alpha" dependencies = [ - "async-recursion", "async-trait", "axum", "axum-server", diff --git a/Cargo.toml b/Cargo.toml index 93ff2f3..e8c1c8b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,7 +115,6 @@ lazy_static = "1.4.0" async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } -async-recursion = "1.0.5" [target.'cfg(unix)'.dependencies] nix = { version = "0.26.2", features = ["resource"] } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 7cc662e..1547d40 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -8,7 +8,6 @@ use std::{ time::{Duration, Instant, SystemTime}, }; -use async_recursion::async_recursion; use futures_util::{stream::FuturesUnordered, Future, StreamExt}; use ruma::{ api::{ @@ -1044,8 +1043,7 @@ impl Service { /// d. TODO: Ask other servers over federation? #[allow(clippy::type_complexity)] #[tracing::instrument(skip_all)] - #[async_recursion] - pub(crate) async fn fetch_and_handle_outliers<'a>( + pub(crate) fn fetch_and_handle_outliers<'a>( &'a self, origin: &'a ServerName, events: &'a [Arc], @@ -1053,175 +1051,180 @@ impl Service { room_id: &'a RoomId, room_version_id: &'a RoomVersionId, pub_key_map: &'a RwLock>>, - ) -> Vec<(Arc, Option>)> { - let back_off = |id| async move { - match services() - .globals - .bad_event_ratelimiter - .write() - .await - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - } - }; - - let mut pdus = vec![]; - for id in events { - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) { - trace!("Found {} in db", id); - pdus.push((local_pdu, None)); - continue; - } - - // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; - let mut events_in_reverse_order = Vec::new(); - let mut events_all = HashSet::new(); - let mut i = 0; - while let Some(next_id) = todo_auth_events.pop() { - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .await - .get(&*next_id) - { - // Exponential backoff - let mut min_elapsed_duration = - Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", next_id); - continue; - } - } - - if events_all.contains(&next_id) { - continue; - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - - if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { - trace!("Found {} in db", next_id); - continue; - } - - info!("Fetching {} over federation.", next_id); + ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> + { + Box::pin(async move { + let back_off = |id| async move { match services() - .sending - .send_federation_request( - origin, - get_event::v1::Request { - event_id: (*next_id).to_owned(), - }, - ) - .await - { - Ok(res) => { - info!("Got {} over federation", next_id); - let (calculated_event_id, value) = - match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) { - Ok(t) => t, - Err(_) => { - back_off((*next_id).to_owned()).await; - continue; - } - }; - - if calculated_event_id != *next_id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - next_id, calculated_event_id, &res.pdu); - } - - if let Some(auth_events) = - value.get("auth_events").and_then(|c| c.as_array()) - { - for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value(auth_event.clone().into()) - { - let a: Arc = auth_event; - todo_auth_events.push(a); - } else { - warn!("Auth event id is not valid"); - } - } - } else { - warn!("Auth event list invalid"); - } - - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - } - Err(_) => { - warn!("Failed to fetch event: {}", next_id); - back_off((*next_id).to_owned()).await; - } - } - } - - for (next_id, value) in events_in_reverse_order.iter().rev() { - if let Some((time, tries)) = services() .globals .bad_event_ratelimiter - .read() + .write() .await - .get(&**next_id) + .entry(id) { - // Exponential backoff - let mut min_elapsed_duration = - Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1) + } + } + }; + + let mut pdus = vec![]; + for id in events { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) { + trace!("Found {} in db", id); + pdus.push((local_pdu, None)); + continue; + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); + let mut i = 0; + while let Some(next_id) = todo_auth_events.pop() { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .await + .get(&*next_id) + { + // Exponential backoff + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", next_id); + continue; + } } - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", next_id); + if events_all.contains(&next_id) { continue; } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + + if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { + trace!("Found {} in db", next_id); + continue; + } + + info!("Fetching {} over federation.", next_id); + match services() + .sending + .send_federation_request( + origin, + get_event::v1::Request { + event_id: (*next_id).to_owned(), + }, + ) + .await + { + Ok(res) => { + info!("Got {} over federation", next_id); + let (calculated_event_id, value) = + match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) { + Ok(t) => t, + Err(_) => { + back_off((*next_id).to_owned()).await; + continue; + } + }; + + if calculated_event_id != *next_id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu); + } + + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { + for auth_event in auth_events { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + } + Err(_) => { + warn!("Failed to fetch event: {}", next_id); + back_off((*next_id).to_owned()).await; + } + } } - match self - .handle_outlier_pdu( - origin, - create_event, - next_id, - room_id, - value.clone(), - true, - pub_key_map, - ) - .await - { - Ok((pdu, json)) => { - if next_id == id { - pdus.push((pdu, Some(json))); + for (next_id, value) in events_in_reverse_order.iter().rev() { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .await + .get(&**next_id) + { + // Exponential backoff + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", next_id); + continue; } } - Err(e) => { - warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((**next_id).to_owned()).await; + + match self + .handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + true, + pub_key_map, + ) + .await + { + Ok((pdu, json)) => { + if next_id == id { + pdus.push((pdu, Some(json))); + } + } + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()).await; + } } } } - } - pdus + pdus + }) } async fn fetch_unknown_prev_events( From e33d8430d31d022933d37ad74bacdc482590c018 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 5 Mar 2024 20:00:22 +0000 Subject: [PATCH 1553/1727] typo: colsures -> closures --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 9c1170d..13bda4c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,7 +4,7 @@ mod database; mod service; mod utils; -// Not async due to services() being used in many closures, and async colsures are not stable as of writing +// Not async due to services() being used in many closures, and async closures are not stable as of writing // This is the case for every other occurence of sync Mutex/RwLock, except for database related ones, where // the current maintainer (Timo) as asked to not modify those use std::sync::RwLock; From 17dd8cb918b4592fb45ebbaa297414100ffe939c Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 5 Mar 2024 20:15:11 +0000 Subject: [PATCH 1554/1727] style: rename Sync(Mutex|RwLock) to Std(Mutex|RwLock) --- src/service/globals/mod.rs | 10 +++++----- src/service/mod.rs | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index c3e02ad..22dc695 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -31,7 +31,7 @@ use std::{ path::PathBuf, sync::{ atomic::{self, AtomicBool}, - Arc, RwLock as SyncRwLock, + Arc, RwLock as StdRwLock, }, time::{Duration, Instant}, }; @@ -53,7 +53,7 @@ pub struct Service { pub db: &'static dyn Data, pub actual_destination_cache: Arc>, // actual_destination, host - pub tls_name_override: Arc>, + pub tls_name_override: Arc>, pub config: Config, keypair: Arc, dns_resolver: TokioAsyncResolver, @@ -109,11 +109,11 @@ impl Default for RotationHandler { pub struct Resolver { inner: GaiResolver, - overrides: Arc>, + overrides: Arc>, } impl Resolver { - pub fn new(overrides: Arc>) -> Self { + pub fn new(overrides: Arc>) -> Self { Resolver { inner: GaiResolver::new(), overrides, @@ -159,7 +159,7 @@ impl Service { } }; - let tls_name_override = Arc::new(SyncRwLock::new(TlsNameMap::new())); + let tls_name_override = Arc::new(StdRwLock::new(TlsNameMap::new())); let jwt_decoding_key = config .jwt_secret diff --git a/src/service/mod.rs b/src/service/mod.rs index c1616c4..8f9fb0a 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,6 +1,6 @@ use std::{ collections::{BTreeMap, HashMap}, - sync::{Arc, Mutex as SyncMutex}, + sync::{Arc, Mutex as StdMutex}, }; use lru_cache::LruCache; @@ -80,17 +80,17 @@ impl Services { state: rooms::state::Service { db }, state_accessor: rooms::state_accessor::Service { db, - server_visibility_cache: SyncMutex::new(LruCache::new( + server_visibility_cache: StdMutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), - user_visibility_cache: SyncMutex::new(LruCache::new( + user_visibility_cache: StdMutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), }, state_cache: rooms::state_cache::Service { db }, state_compressor: rooms::state_compressor::Service { db, - stateinfo_cache: SyncMutex::new(LruCache::new( + stateinfo_cache: StdMutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), }, @@ -108,7 +108,7 @@ impl Services { uiaa: uiaa::Service { db }, users: users::Service { db, - connections: SyncMutex::new(BTreeMap::new()), + connections: StdMutex::new(BTreeMap::new()), }, account_data: account_data::Service { db }, admin: admin::Service::build(), From 07bb369c5cee46fba463f94dfb95151f46d2c8f9 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 5 Mar 2024 20:20:19 +0000 Subject: [PATCH 1555/1727] perf: remove unnecessary async --- src/api/client_server/sync.rs | 66 ++++++++++++++--------------------- src/service/users/mod.rs | 6 ++-- 2 files changed, 30 insertions(+), 42 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 5ac00c6..c510f5f 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1197,14 +1197,11 @@ pub async fn sync_events_v4_route( if globalsince == 0 { if let Some(conn_id) = &body.conn_id { - services() - .users - .forget_sync_request_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ) - .await + services().users.forget_sync_request_connection( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ) } } @@ -1474,17 +1471,14 @@ pub async fn sync_events_v4_route( ); if let Some(conn_id) = &body.conn_id { - services() - .users - .update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - list_id, - new_known_rooms, - globalsince, - ) - .await; + services().users.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + list_id, + new_known_rooms, + globalsince, + ); } } @@ -1516,29 +1510,23 @@ pub async fn sync_events_v4_route( } if let Some(conn_id) = &body.conn_id { - services() - .users - .update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - "subscriptions".to_owned(), - known_subscription_rooms, - globalsince, - ) - .await; + services().users.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + "subscriptions".to_owned(), + known_subscription_rooms, + globalsince, + ); } if let Some(conn_id) = &body.conn_id { - services() - .users - .update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ) - .await; + services().users.update_sync_subscriptions( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + body.room_subscriptions, + ); } let mut rooms = BTreeMap::new(); diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index c83463e..fb983a4 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -45,7 +45,7 @@ impl Service { self.db.exists(user_id) } - pub async fn forget_sync_request_connection( + pub fn forget_sync_request_connection( &self, user_id: OwnedUserId, device_id: OwnedDeviceId, @@ -186,7 +186,7 @@ impl Service { cached.known_rooms.clone() } - pub async fn update_sync_subscriptions( + pub fn update_sync_subscriptions( &self, user_id: OwnedUserId, device_id: OwnedDeviceId, @@ -212,7 +212,7 @@ impl Service { cached.subscriptions = subscriptions; } - pub async fn update_sync_known_rooms( + pub fn update_sync_known_rooms( &self, user_id: OwnedUserId, device_id: OwnedDeviceId, From ee7efdd403ec514c84d20c51ad7536c4b38f3349 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 5 Mar 2024 20:31:40 +0000 Subject: [PATCH 1556/1727] typo: as -> has --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 13bda4c..70c6f37 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,7 +6,7 @@ mod utils; // Not async due to services() being used in many closures, and async closures are not stable as of writing // This is the case for every other occurence of sync Mutex/RwLock, except for database related ones, where -// the current maintainer (Timo) as asked to not modify those +// the current maintainer (Timo) has asked to not modify those use std::sync::RwLock; pub use api::ruma_wrapper::{Ruma, RumaResponse}; From 161ad8f9a4dc56695656cd3c4a1a750bd6f93514 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 4 Mar 2024 22:37:23 -0800 Subject: [PATCH 1557/1727] update to latest crane before a regression MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Once these issues are fixed, or at least just the one against crane, we can go back to `ref=master`. Flake lock file updates: • Updated input 'crane': 'github:ipetkov/crane/c798790eabec3e3da48190ae3698ac227aab770c' (2024-01-28) → 'github:ipetkov/crane/2c653e4478476a52c6aa3ac0495e4dea7449ea0e' (2024-02-11) --- flake.lock | 8 ++++---- flake.nix | 7 ++++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/flake.lock b/flake.lock index 1c97e5c..e874892 100644 --- a/flake.lock +++ b/flake.lock @@ -51,17 +51,17 @@ ] }, "locked": { - "lastModified": 1706473964, - "narHash": "sha256-Fq6xleee/TsX6NbtoRuI96bBuDHMU57PrcK9z1QEKbk=", + "lastModified": 1707685877, + "narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=", "owner": "ipetkov", "repo": "crane", - "rev": "c798790eabec3e3da48190ae3698ac227aab770c", + "rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e", "type": "github" }, "original": { "owner": "ipetkov", - "ref": "master", "repo": "crane", + "rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e", "type": "github" } }, diff --git a/flake.nix b/flake.nix index 6ca4c72..d1a947a 100644 --- a/flake.nix +++ b/flake.nix @@ -13,7 +13,12 @@ inputs.nixpkgs.follows = "nixpkgs"; }; crane = { - url = "github:ipetkov/crane?ref=master"; + # Pin latest crane that's not affected by the following bugs: + # + # * + # * + # * + url = "github:ipetkov/crane?rev=2c653e4478476a52c6aa3ac0495e4dea7449ea0e"; inputs.nixpkgs.follows = "nixpkgs"; }; attic.url = "github:zhaofengli/attic?ref=main"; From e70f33741c04781ece5032c10a9261a1a72d3780 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 4 Mar 2024 22:40:47 -0800 Subject: [PATCH 1558/1727] update flake.lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also switch names to match the newer upstream nixpkgs code. Flake lock file updates: • Updated input 'attic': 'github:zhaofengli/attic/fbe252a5c21febbe920c025560cbd63b20e24f3b' (2024-01-18) → 'github:zhaofengli/attic/6eabc3f02fae3683bffab483e614bebfcd476b21' (2024-02-14) • Updated input 'fenix': 'github:nix-community/fenix/e132ea0eb0c799a2109a91688e499d7bf4962801' (2024-01-18) → 'github:nix-community/fenix/c8943ea9e98d41325ff57d4ec14736d330b321b2' (2024-03-05) • Updated input 'fenix/rust-analyzer-src': 'github:rust-lang/rust-analyzer/9d9b34354d2f13e33568c9c55b226dd014a146a0' (2024-01-17) → 'github:rust-lang/rust-analyzer/9f14343f9ee24f53f17492c5f9b653427e2ad15e' (2024-03-04) • Updated input 'flake-utils': 'github:numtide/flake-utils/1ef2e671c3b0c19053962c07dbda38332dcebf26' (2024-01-15) → 'github:numtide/flake-utils/d465f4819400de7c8d874d50b982301f28a84605' (2024-02-28) • Updated input 'nixpkgs': 'github:NixOS/nixpkgs/842d9d80cfd4560648c785f8a4e6f3b096790e19' (2024-01-17) → 'github:NixOS/nixpkgs/b8697e57f10292a6165a20f03d2f42920dfaf973' (2024-03-03) --- flake.lock | 30 +++++++++++++++--------------- flake.nix | 10 +++++----- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/flake.lock b/flake.lock index e874892..1c2142f 100644 --- a/flake.lock +++ b/flake.lock @@ -9,11 +9,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1705617092, - "narHash": "sha256-n9PK4O4X4S1JkwpkMuYm1wHZYJzRqif8g3RuVIPD+rY=", + "lastModified": 1707922053, + "narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=", "owner": "zhaofengli", "repo": "attic", - "rev": "fbe252a5c21febbe920c025560cbd63b20e24f3b", + "rev": "6eabc3f02fae3683bffab483e614bebfcd476b21", "type": "github" }, "original": { @@ -73,11 +73,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1705559032, - "narHash": "sha256-Cb+Jd1+Gz4Wi+8elPnUIHnqQmE1qjDRZ+PsJaPaAffY=", + "lastModified": 1709619709, + "narHash": "sha256-l6EPVJfwfelWST7qWQeP6t/TDK3HHv5uUB1b2vw4mOQ=", "owner": "nix-community", "repo": "fenix", - "rev": "e132ea0eb0c799a2109a91688e499d7bf4962801", + "rev": "c8943ea9e98d41325ff57d4ec14736d330b321b2", "type": "github" }, "original": { @@ -138,11 +138,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1705309234, - "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "lastModified": 1709126324, + "narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=", "owner": "numtide", "repo": "flake-utils", - "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "rev": "d465f4819400de7c8d874d50b982301f28a84605", "type": "github" }, "original": { @@ -200,11 +200,11 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1705496572, - "narHash": "sha256-rPIe9G5EBLXdBdn9ilGc0nq082lzQd0xGGe092R/5QE=", + "lastModified": 1709479366, + "narHash": "sha256-n6F0n8UV6lnTZbYPl1A9q1BS0p4hduAv1mGAP17CVd0=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "842d9d80cfd4560648c785f8a4e6f3b096790e19", + "rev": "b8697e57f10292a6165a20f03d2f42920dfaf973", "type": "github" }, "original": { @@ -228,11 +228,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1705523001, - "narHash": "sha256-TWq5vJ6m+9HGSDMsQAmz1TMegMi79R3TTyKjnPWsQp8=", + "lastModified": 1709571018, + "narHash": "sha256-ISFrxHxE0J5g7lDAscbK88hwaT5uewvWoma9TlFmRzM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "9d9b34354d2f13e33568c9c55b226dd014a146a0", + "rev": "9f14343f9ee24f53f17492c5f9b653427e2ad15e", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index d1a947a..460bffc 100644 --- a/flake.nix +++ b/flake.nix @@ -55,7 +55,7 @@ # bindgen needs the build platform's libclang. Apparently due to # "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't # quite do the right thing here. - pkgs.buildPackages.rustPlatform.bindgenHook + pkgs.pkgsBuildHost.rustPlatform.bindgenHook ]; env = pkgs: { @@ -83,7 +83,7 @@ # these flags when using a different linker. Don't ask me why, # though, because I don't know. All I know is it breaks otherwise. # - # [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L36-L39 + # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40 ( # Nixpkgs doesn't check for x86_64 here but we do, because I # observed a failure building statically for x86_64 without @@ -107,7 +107,7 @@ # even covers the case of build scripts that need native code compiled and # run on the build platform (I think). # - # [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L64-L78 + # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80 // ( let inherit (pkgs.rust.lib) envVars; @@ -145,8 +145,8 @@ "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild; - HOST_CC = "${pkgs.buildPackages.stdenv.cc}/bin/cc"; - HOST_CXX = "${pkgs.buildPackages.stdenv.cc}/bin/c++"; + HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc"; + HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++"; } )); From 10b7b174b6bac0921f704ab768799128bf298ef1 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 4 Mar 2024 20:49:41 -0800 Subject: [PATCH 1559/1727] fix documented target triple Even though it doesn't really matter because it's containerized anyway. --- DEPLOY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index dcc41bd..1a6f483 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -37,7 +37,7 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to | `x86_64-unknown-linux-gnu` | Dynamically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit.deb?job=debian:x86_64-unknown-linux-gnu) | | `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:x86_64-unknown-linux-musl) | | `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:aarch64-unknown-linux-musl) | -| `x86_64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=oci-image:x86_64-unknown-linux-musl) | +| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=oci-image:x86_64-unknown-linux-musl) | | `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=oci-image:aarch64-unknown-linux-musl) | ```bash From 4f352a711af865ca7e75e772a09aa96fc3219af4 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 4 Mar 2024 20:42:58 -0800 Subject: [PATCH 1560/1727] add trailing newline to file Please fix your editor configuration... --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4f4aa69..59de64f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -177,4 +177,4 @@ oci-image:push-dockerhub: variables: IMAGE_NAME: matrixconduit/matrix-conduit before_script: - - docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD \ No newline at end of file + - docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD From 6281c64c331d9dd70b018eda1a124a0f12164f82 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 4 Mar 2024 20:44:15 -0800 Subject: [PATCH 1561/1727] upgrade nixos/nix image --- .gitlab-ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 59de64f..2fe90bf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -34,7 +34,7 @@ before_script: ci: stage: ci - image: nixos/nix:2.19.2 + image: nixos/nix:2.20.4 script: - direnv exec . engage cache: @@ -45,7 +45,7 @@ ci: static:x86_64-unknown-linux-musl: stage: artifacts - image: nixos/nix:2.19.2 + image: nixos/nix:2.20.4 script: # Push artifacts and build requirements to binary cache - ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl @@ -58,7 +58,7 @@ static:x86_64-unknown-linux-musl: static:aarch64-unknown-linux-musl: stage: artifacts - image: nixos/nix:2.19.2 + image: nixos/nix:2.20.4 script: # Push artifacts and build requirements to binary cache - ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl @@ -74,7 +74,7 @@ static:aarch64-unknown-linux-musl: # all containerized anyway. oci-image:x86_64-unknown-linux-gnu: stage: artifacts - image: nixos/nix:2.19.2 + image: nixos/nix:2.20.4 script: # Push artifacts and build requirements to binary cache # @@ -96,7 +96,7 @@ oci-image:aarch64-unknown-linux-musl: # Wait for the static binary job to finish before starting so we don't have # to build that twice for no reason - static:aarch64-unknown-linux-musl - image: nixos/nix:2.19.2 + image: nixos/nix:2.20.4 script: # Push artifacts and build requirements to binary cache - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl From 4e09c9e58a9de466887db659a6f9e02999a2f035 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 4 Mar 2024 20:38:57 -0800 Subject: [PATCH 1562/1727] build all nix-based artifacts in a single job This will reduce the amount of full builds that need to be done by runs that don't have write access to the nix binary cache. --- .gitlab-ci.yml | 61 +++++++++++--------------------------------------- DEPLOY.md | 8 +++---- 2 files changed, 17 insertions(+), 52 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2fe90bf..b24481b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -43,68 +43,34 @@ ci: - target - .gitlab-ci.d -static:x86_64-unknown-linux-musl: +nix:artifacts: stage: artifacts image: nixos/nix:2.20.4 script: - # Push artifacts and build requirements to binary cache - ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl + - cp result/bin/conduit x86_64-unknown-linux-musl - # Make the output less difficult to find - - cp result/bin/conduit conduit - artifacts: - paths: - - conduit - -static:aarch64-unknown-linux-musl: - stage: artifacts - image: nixos/nix:2.20.4 - script: - # Push artifacts and build requirements to binary cache - - ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl - - # Make the output less difficult to find - - cp result/bin/conduit conduit - artifacts: - paths: - - conduit - -# Note that although we have an `oci-image-x86_64-unknown-linux-musl` output, -# we don't build it because it would be largely redundant to this one since it's -# all containerized anyway. -oci-image:x86_64-unknown-linux-gnu: - stage: artifacts - image: nixos/nix:2.20.4 - script: - # Push artifacts and build requirements to binary cache - # # Since the OCI image package is based on the binary package, this has the # fun side effect of uploading the normal binary too. Conduit users who are # deploying with Nix can leverage this fact by adding our binary cache to # their systems. + # + # Note that although we have an `oci-image-x86_64-unknown-linux-musl` + # output, we don't build it because it would be largely redundant to this + # one since it's all containerized anyway. - ./bin/nix-build-and-cache .#oci-image - - # Make the output less difficult to find - cp result oci-image-amd64.tar.gz - artifacts: - paths: - - oci-image-amd64.tar.gz -oci-image:aarch64-unknown-linux-musl: - stage: artifacts - needs: - # Wait for the static binary job to finish before starting so we don't have - # to build that twice for no reason - - static:aarch64-unknown-linux-musl - image: nixos/nix:2.20.4 - script: - # Push artifacts and build requirements to binary cache + - ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl + - cp result/bin/conduit aarch64-unknown-linux-musl + - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl - - # Make the output less difficult to find - cp result oci-image-arm64v8.tar.gz artifacts: paths: + - x86_64-unknown-linux-musl + - aarch64-unknown-linux-musl + - oci-image-amd64.tar.gz - oci-image-arm64v8.tar.gz debian:x86_64-unknown-linux-gnu: @@ -158,8 +124,7 @@ debian:x86_64-unknown-linux-gnu: docker manifest push $IMAGE_NAME:latest fi dependencies: - - oci-image:x86_64-unknown-linux-gnu - - oci-image:aarch64-unknown-linux-musl + - nix:artifacts only: - next - master diff --git a/DEPLOY.md b/DEPLOY.md index 1a6f483..ccf52c3 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -35,10 +35,10 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to | Target | Type | Download | |-|-|-| | `x86_64-unknown-linux-gnu` | Dynamically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit.deb?job=debian:x86_64-unknown-linux-gnu) | -| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:x86_64-unknown-linux-musl) | -| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:aarch64-unknown-linux-musl) | -| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=oci-image:x86_64-unknown-linux-musl) | -| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=oci-image:aarch64-unknown-linux-musl) | +| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=nix:artifacts) | +| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=nix:artifacts) | +| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=nix:artifacts) | +| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=nix:artifacts) | ```bash $ sudo wget -O /usr/local/bin/matrix-conduit From d5a9c6ac32e15ca11a0f684274be9ca8356cdfcc Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 4 Mar 2024 23:12:17 -0800 Subject: [PATCH 1563/1727] use nix-built binary to produce debian package Currently just for `x86_64-unknown-linux-musl`. Theoretically, we can use this same mechanism for `aarch64-unknown-linux-musl`. Practically, I'm not sure just this will even work. --- .gitlab-ci.yml | 30 ++++++++---------------------- DEPLOY.md | 10 +++++----- flake.nix | 3 +++ 3 files changed, 16 insertions(+), 27 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b24481b..866ef11 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -43,13 +43,18 @@ ci: - target - .gitlab-ci.d -nix:artifacts: +artifacts: stage: artifacts image: nixos/nix:2.20.4 script: - ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl - cp result/bin/conduit x86_64-unknown-linux-musl + - mkdir -p target/release + - cp result/bin/conduit target/release + - direnv exec . cargo deb --no-build + - mv target/debian/*.deb x86_64-unknown-linux-musl.deb + # Since the OCI image package is based on the binary package, this has the # fun side effect of uploading the normal binary too. Conduit users who are # deploying with Nix can leverage this fact by adding our binary cache to @@ -70,29 +75,10 @@ nix:artifacts: paths: - x86_64-unknown-linux-musl - aarch64-unknown-linux-musl + - x86_64-unknown-linux-musl.deb - oci-image-amd64.tar.gz - oci-image-arm64v8.tar.gz -debian:x86_64-unknown-linux-gnu: - stage: artifacts - # See also `rust-toolchain.toml` - image: rust:1.75.0 - script: - - apt-get update && apt-get install -y --no-install-recommends libclang-dev - - cargo install cargo-deb - - cargo deb - - # Make the output less difficult to find - - mv target/debian/*.deb conduit.deb - artifacts: - paths: - - conduit.deb - cache: - key: debian - paths: - - target - - .gitlab-ci.d - .push-oci-image: stage: publish image: docker:25.0.0 @@ -124,7 +110,7 @@ debian:x86_64-unknown-linux-gnu: docker manifest push $IMAGE_NAME:latest fi dependencies: - - nix:artifacts + - artifacts only: - next - master diff --git a/DEPLOY.md b/DEPLOY.md index ccf52c3..dd27968 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -34,11 +34,11 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to | Target | Type | Download | |-|-|-| -| `x86_64-unknown-linux-gnu` | Dynamically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit.deb?job=debian:x86_64-unknown-linux-gnu) | -| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=nix:artifacts) | -| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=nix:artifacts) | -| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=nix:artifacts) | -| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=nix:artifacts) | +| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) | +| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) | +| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) | +| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) | +| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=artifacts) | ```bash $ sudo wget -O /usr/local/bin/matrix-conduit diff --git a/flake.nix b/flake.nix index 460bffc..cc95aec 100644 --- a/flake.nix +++ b/flake.nix @@ -252,6 +252,9 @@ ] ++ (with pkgsHost; [ engage + # Needed for producing Debian packages + cargo-deb + # Needed for Complement go olm From d411e9037c860368e433f9e413fc28b31f3e0e38 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 5 Mar 2024 12:05:50 -0800 Subject: [PATCH 1564/1727] upload all devshell inputs to the cache This will also include attic, so we don't need to explicitly do this in `./bin/nix-build-and-cache` anymore, which is good because that script gets called a good number of times and doing that repeatedly was a bit of a waste. --- .gitlab-ci.yml | 3 +++ bin/nix-build-and-cache | 15 +++++---------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 866ef11..e0ed08e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -36,6 +36,9 @@ ci: stage: ci image: nixos/nix:2.20.4 script: + # Cache the inputs required for the devShell + - ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation + - direnv exec . engage cache: key: nix diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache index b37ebd8..0eb816c 100755 --- a/bin/nix-build-and-cache +++ b/bin/nix-build-and-cache @@ -15,17 +15,12 @@ nix run --inputs-from . attic -- login \ https://nix.computer.surgery/conduit \ "$ATTIC_TOKEN" - push_args=( - # Attic and its build dependencies - "$(nix path-info --inputs-from . attic)" - "$(nix path-info --inputs-from . attic --derivation)" - - # The target installable and its build dependencies - "$(nix path-info "$INSTALLABLE" --derivation)" + # Push the target installable and its build dependencies + nix run --inputs-from . attic -- \ + push \ + conduit \ + "$(nix path-info "$INSTALLABLE" --derivation)" \ "$(nix path-info "$INSTALLABLE")" - ) - - nix run --inputs-from . attic -- push conduit "${push_args[@]}" else echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" fi From ae69da635b4745468896d43efd1f429d5312ca80 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 4 Mar 2024 23:47:46 -0800 Subject: [PATCH 1565/1727] allow overriding the attic endpoint And also the public key so that pulling from the new endpoint will work. This allows other people to host their own attic instances and configure their (CI) environment to override the default endpoint so e.g. they can take advantage of a binary cache without having write access to the official one. I didn't actually test this change but I think it should work. Also why'd I format the script like that, ew lol --- .gitlab-ci.yml | 4 ++++ bin/nix-build-and-cache | 8 ++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e0ed08e..0e56598 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -15,6 +15,10 @@ before_script: - if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi + # Add alternate binary cache + - if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi + # Add crane binary cache - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache index 0eb816c..350e171 100755 --- a/bin/nix-build-and-cache +++ b/bin/nix-build-and-cache @@ -9,10 +9,10 @@ INSTALLABLE="$1" nix build "$@" if [ ! -z ${ATTIC_TOKEN+x} ]; then - -nix run --inputs-from . attic -- login \ - conduit \ - https://nix.computer.surgery/conduit \ + nix run --inputs-from . attic -- \ + login \ + conduit \ + "${ATTIC_ENDPOINT:-https://nix.computer.surgery/conduit}" \ "$ATTIC_TOKEN" # Push the target installable and its build dependencies From 0d2f1348daa573b3f0e577ffccfe605c88fab8b5 Mon Sep 17 00:00:00 2001 From: Samuel Meenzen Date: Wed, 6 Mar 2024 11:47:15 +0100 Subject: [PATCH 1566/1727] feat: run ci on demand to prevent unnecessary job executions --- .gitlab-ci.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0e56598..348e7b1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,6 +49,13 @@ ci: paths: - target - .gitlab-ci.d + rules: + # CI on upstream runners (only available for maintainers) + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true" + # Manual CI on forks + - if: $IS_UPSTREAM_CI != "true" + when: manual + - if: $CI artifacts: stage: artifacts @@ -85,6 +92,14 @@ artifacts: - x86_64-unknown-linux-musl.deb - oci-image-amd64.tar.gz - oci-image-arm64v8.tar.gz + rules: + # CI required for all MRs + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + # Optional CI on forks + - if: $IS_UPSTREAM_CI != "true" + when: manual + allow_failure: true + - if: $CI .push-oci-image: stage: publish From fa930182aea942380f8db19b5e18152e17d9e634 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 27 Dec 2023 13:22:21 +0000 Subject: [PATCH 1567/1727] fix(appservices): don't panic on empty registration url perf(appservices): cache regex for namespaces --- src/api/appservice_server.rs | 166 ++++++++++---------- src/api/client_server/alias.rs | 34 ++-- src/api/ruma_wrapper/axum.rs | 15 +- src/database/key_value/appservice.rs | 11 +- src/database/key_value/rooms/state_cache.rs | 47 ++---- src/database/mod.rs | 20 ++- src/service/admin/mod.rs | 7 +- src/service/appservice/data.rs | 8 +- src/service/appservice/mod.rs | 114 +++++++++++++- src/service/mod.rs | 7 +- src/service/rooms/state_cache/data.rs | 8 +- src/service/rooms/state_cache/mod.rs | 4 +- src/service/rooms/timeline/mod.rs | 114 ++++++-------- src/service/sending/mod.rs | 19 ++- src/utils/error.rs | 5 + 15 files changed, 335 insertions(+), 244 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 082a1bc..ab4da79 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -1,105 +1,111 @@ use crate::{services, utils, Error, Result}; use bytes::BytesMut; -use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; +use ruma::api::{ + appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, +}; use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; +/// Sends a request to an appservice +/// +/// Only returns None if there is no url specified in the appservice registration file #[tracing::instrument(skip(request))] pub(crate) async fn send_request( - registration: serde_yaml::Value, + registration: Registration, request: T, -) -> Result +) -> Option> where T: Debug, { - let destination = registration.get("url").unwrap().as_str().unwrap(); - let hs_token = registration.get("hs_token").unwrap().as_str().unwrap(); + if let Some(destination) = registration.url { + let hs_token = registration.hs_token.as_str(); - let mut http_request = request - .try_into_http_request::( - destination, - SendAccessToken::IfRequired(hs_token), - &[MatrixVersion::V1_0], - ) - .unwrap() - .map(|body| body.freeze()); + let mut http_request = request + .try_into_http_request::( + &destination, + SendAccessToken::IfRequired(hs_token), + &[MatrixVersion::V1_0], + ) + .unwrap() + .map(|body| body.freeze()); - let mut parts = http_request.uri().clone().into_parts(); - let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains('?') { - "&" - } else { - "?" - }; + let mut parts = http_request.uri().clone().into_parts(); + let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); + let symbol = if old_path_and_query.contains('?') { + "&" + } else { + "?" + }; - parts.path_and_query = Some( - (old_path_and_query + symbol + "access_token=" + hs_token) - .parse() - .unwrap(), - ); - *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); + parts.path_and_query = Some( + (old_path_and_query + symbol + "access_token=" + hs_token) + .parse() + .unwrap(), + ); + *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); - let mut reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); + let mut reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); - *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); + *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); - let url = reqwest_request.url().clone(); - let mut response = match services() - .globals - .default_client() - .execute(reqwest_request) - .await - { - Ok(r) => r, - Err(e) => { + let url = reqwest_request.url().clone(); + let mut response = match services() + .globals + .default_client() + .execute(reqwest_request) + .await + { + Ok(r) => r, + Err(e) => { + warn!( + "Could not send request to appservice {:?} at {}: {}", + registration.id, destination, e + ); + return Some(Err(e.into())); + } + }; + + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); + + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error: {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if status != 200 { warn!( - "Could not send request to appservice {:?} at {}: {}", - registration.get("id"), + "Appservice returned bad response {} {}\n{}\n{:?}", destination, - e + status, + url, + utils::string_from_bytes(&body) ); - return Err(e.into()); } - }; - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error: {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - warn!( - "Appservice returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - utils::string_from_bytes(&body) + let response = T::IncomingResponse::try_from_http_response( + http_response_builder + .body(body) + .expect("reqwest body is valid http body"), ); + Some(response.map_err(|_| { + warn!( + "Appservice returned invalid response bytes {}\n{}", + destination, url + ); + Error::BadServerResponse("Server returned bad response.") + })) + } else { + None } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - warn!( - "Appservice returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Server returned bad response.") - }) } diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 7660ca2..d3a6e39 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -1,6 +1,5 @@ use crate::{services, Error, Result, Ruma}; use rand::seq::SliceRandom; -use regex::Regex; use ruma::{ api::{ appservice, @@ -101,31 +100,28 @@ pub(crate) async fn get_alias_helper( match services().rooms.alias.resolve_local_alias(&room_alias)? { Some(r) => room_id = Some(r), None => { - for (_id, registration) in services().appservice.all()? { - let aliases = registration - .get("namespaces") - .and_then(|ns| ns.get("aliases")) - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - if aliases - .iter() - .any(|aliases| aliases.is_match(room_alias.as_str())) - && services() + for appservice in services() + .appservice + .registration_info + .read() + .await + .values() + { + if appservice.aliases.is_match(room_alias.as_str()) + && if let Some(opt_result) = services() .sending .send_appservice_request( - registration, + appservice.registration.clone(), appservice::query::query_room_alias::v1::Request { room_alias: room_alias.clone(), }, ) .await - .is_ok() + { + opt_result.is_ok() + } else { + false + } { room_id = Some( services() diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index bbd4861..e841f13 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -81,12 +81,9 @@ where let mut json_body = serde_json::from_slice::(&body).ok(); let appservices = services().appservice.all().unwrap(); - let appservice_registration = appservices.iter().find(|(_id, registration)| { - registration - .get("as_token") - .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| token == Some(as_token)) - }); + let appservice_registration = appservices + .iter() + .find(|(_id, registration)| Some(registration.as_token.as_str()) == token); let (sender_user, sender_device, sender_servername, from_appservice) = if let Some((_id, registration)) = appservice_registration { @@ -95,11 +92,7 @@ where let user_id = query_params.user_id.map_or_else( || { UserId::parse_with_server_name( - registration - .get("sender_localpart") - .unwrap() - .as_str() - .unwrap(), + registration.sender_localpart.as_str(), services().globals.server_name(), ) .unwrap() diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index 9a821a6..3243183 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,10 +1,11 @@ +use ruma::api::appservice::Registration; + use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::appservice::Data for KeyValueDatabase { /// Registers an appservice and returns the ID to the caller - fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { - // TODO: Rumaify - let id = yaml.get("id").unwrap().as_str().unwrap(); + fn register_appservice(&self, yaml: Registration) -> Result { + let id = yaml.id.as_str(); self.id_appserviceregistrations.insert( id.as_bytes(), serde_yaml::to_string(&yaml).unwrap().as_bytes(), @@ -32,7 +33,7 @@ impl service::appservice::Data for KeyValueDatabase { Ok(()) } - fn get_registration(&self, id: &str) -> Result> { + fn get_registration(&self, id: &str) -> Result> { self.cached_registrations .read() .unwrap() @@ -64,7 +65,7 @@ impl service::appservice::Data for KeyValueDatabase { ))) } - fn all(&self) -> Result> { + fn all(&self) -> Result> { self.iter_ids()? .filter_map(|id| id.ok()) .map(move |id| { diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 3dcaf4a..49e3842 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,13 +1,16 @@ use std::{collections::HashSet, sync::Arc}; -use regex::Regex; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, appservice::RegistrationInfo}, + services, utils, Error, Result, +}; impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -184,46 +187,28 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { } #[tracing::instrument(skip(self, room_id, appservice))] - fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - ) -> Result { + fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result { let maybe = self .appservice_in_room_cache .read() .unwrap() .get(room_id) - .and_then(|map| map.get(&appservice.0)) + .and_then(|map| map.get(&appservice.registration.id)) .copied(); if let Some(b) = maybe { Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, services().globals.server_name()).ok() - }); + } else { + let bridge_user_id = UserId::parse_with_server_name( + appservice.registration.sender_localpart.as_str(), + services().globals.server_name(), + ) + .ok(); let in_room = bridge_user_id .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) + userid.map_or(false, |userid| appservice.users.is_match(userid.as_str())) }); self.appservice_in_room_cache @@ -231,11 +216,9 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { .unwrap() .entry(room_id.to_owned()) .or_default() - .insert(appservice.0.clone(), in_room); + .insert(appservice.registration.id.clone(), in_room); Ok(in_room) - } else { - Ok(false) } } diff --git a/src/database/mod.rs b/src/database/mod.rs index 425ef4e..5b8588c 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -8,7 +8,9 @@ use crate::{ use abstraction::{KeyValueDatabaseEngine, KvTree}; use directories::ProjectDirs; use lru_cache::LruCache; + use ruma::{ + api::appservice::Registration, events::{ push_rules::{PushRulesEvent, PushRulesEventContent}, room::message::RoomMessageEventContent, @@ -162,7 +164,7 @@ pub struct KeyValueDatabase { //pub pusher: pusher::PushData, pub(super) senderkey_pusher: Arc, - pub(super) cached_registrations: Arc>>, + pub(super) cached_registrations: Arc>>, pub(super) pdu_cache: Mutex>>, pub(super) shorteventid_cache: Mutex>>, pub(super) auth_chain_cache: Mutex, Arc>>>, @@ -967,6 +969,22 @@ impl KeyValueDatabase { ); } + // Inserting registrations into cache + for appservice in services().appservice.all()? { + services() + .appservice + .registration_info + .write() + .await + .insert( + appservice.0, + appservice + .1 + .try_into() + .expect("Should be validated on registration"), + ); + } + // This data is probably outdated db.presenceid_presence.clear()?; diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index d99be87..12bc1cf 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -8,6 +8,7 @@ use std::{ use clap::Parser; use regex::Regex; use ruma::{ + api::appservice::Registration, events::{ room::{ canonical_alias::RoomCanonicalAliasEventContent, @@ -335,10 +336,9 @@ impl Service { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = - serde_yaml::from_str::(&appservice_config); + let parsed_config = serde_yaml::from_str::(&appservice_config); match parsed_config { - Ok(yaml) => match services().appservice.register_appservice(yaml) { + Ok(yaml) => match services().appservice.register_appservice(yaml).await { Ok(id) => RoomMessageEventContent::text_plain(format!( "Appservice registered with ID: {id}." )), @@ -361,6 +361,7 @@ impl Service { } => match services() .appservice .unregister_appservice(&appservice_identifier) + .await { Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), Err(e) => RoomMessageEventContent::text_plain(format!( diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index 744f0f9..ab19a50 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -1,8 +1,10 @@ +use ruma::api::appservice::Registration; + use crate::Result; pub trait Data: Send + Sync { /// Registers an appservice and returns the ID to the caller - fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; + fn register_appservice(&self, yaml: Registration) -> Result; /// Remove an appservice registration /// @@ -11,9 +13,9 @@ pub trait Data: Send + Sync { /// * `service_name` - the name you send to register the service previously fn unregister_appservice(&self, service_name: &str) -> Result<()>; - fn get_registration(&self, id: &str) -> Result>; + fn get_registration(&self, id: &str) -> Result>; fn iter_ids<'a>(&'a self) -> Result> + 'a>>; - fn all(&self) -> Result>; + fn all(&self) -> Result>; } diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 3052964..40fa3ee 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,16 +1,113 @@ mod data; +use std::collections::HashMap; + pub use data::Data; -use crate::Result; +use regex::RegexSet; +use ruma::api::appservice::{Namespace, Registration}; +use tokio::sync::RwLock; + +use crate::{services, Result}; + +/// Compiled regular expressions for a namespace +pub struct NamespaceRegex { + pub exclusive: Option, + pub non_exclusive: Option, +} + +impl NamespaceRegex { + /// Checks if this namespace has rights to a namespace + pub fn is_match(&self, heystack: &str) -> bool { + if self.is_exclusive_match(heystack) { + return true; + } + + if let Some(non_exclusive) = &self.non_exclusive { + if non_exclusive.is_match(heystack) { + return true; + } + } + false + } + + /// Checks if this namespace has exlusive rights to a namespace + pub fn is_exclusive_match(&self, heystack: &str) -> bool { + if let Some(exclusive) = &self.exclusive { + if exclusive.is_match(heystack) { + return true; + } + } + false + } +} + +impl TryFrom> for NamespaceRegex { + fn try_from(value: Vec) -> Result { + let mut exclusive = vec![]; + let mut non_exclusive = vec![]; + + for namespace in value { + if namespace.exclusive { + exclusive.push(namespace.regex); + } else { + non_exclusive.push(namespace.regex); + } + } + + Ok(NamespaceRegex { + exclusive: if exclusive.is_empty() { + None + } else { + Some(RegexSet::new(exclusive)?) + }, + non_exclusive: if non_exclusive.is_empty() { + None + } else { + Some(RegexSet::new(non_exclusive)?) + }, + }) + } + + type Error = regex::Error; +} + +/// Compiled regular expressions for an appservice +pub struct RegistrationInfo { + pub registration: Registration, + pub users: NamespaceRegex, + pub aliases: NamespaceRegex, + pub rooms: NamespaceRegex, +} + +impl TryFrom for RegistrationInfo { + fn try_from(value: Registration) -> Result { + Ok(RegistrationInfo { + users: value.namespaces.users.clone().try_into()?, + aliases: value.namespaces.aliases.clone().try_into()?, + rooms: value.namespaces.rooms.clone().try_into()?, + registration: value, + }) + } + + type Error = regex::Error; +} pub struct Service { pub db: &'static dyn Data, + pub registration_info: RwLock>, } impl Service { /// Registers an appservice and returns the ID to the caller - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { + pub async fn register_appservice(&self, yaml: Registration) -> Result { + services() + .appservice + .registration_info + .write() + .await + .insert(yaml.id.clone(), yaml.clone().try_into()?); + self.db.register_appservice(yaml) } @@ -19,11 +116,18 @@ impl Service { /// # Arguments /// /// * `service_name` - the name you send to register the service previously - pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { + pub async fn unregister_appservice(&self, service_name: &str) -> Result<()> { + services() + .appservice + .registration_info + .write() + .await + .remove(service_name); + self.db.unregister_appservice(service_name) } - pub fn get_registration(&self, id: &str) -> Result> { + pub fn get_registration(&self, id: &str) -> Result> { self.db.get_registration(id) } @@ -31,7 +135,7 @@ impl Service { self.db.iter_ids() } - pub fn all(&self) -> Result> { + pub fn all(&self) -> Result> { self.db.all() } } diff --git a/src/service/mod.rs b/src/service/mod.rs index 8f9fb0a..045ccd1 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -4,7 +4,7 @@ use std::{ }; use lru_cache::LruCache; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, RwLock}; use crate::{Config, Result}; @@ -56,7 +56,10 @@ impl Services { config: Config, ) -> Result { Ok(Self { - appservice: appservice::Service { db }, + appservice: appservice::Service { + db, + registration_info: RwLock::new(HashMap::new()), + }, pusher: pusher::Service { db }, rooms: rooms::Service { alias: rooms::alias::Service { db }, diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index 8921909..b511919 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, sync::Arc}; -use crate::Result; +use crate::{service::appservice::RegistrationInfo, Result}; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, @@ -22,11 +22,7 @@ pub trait Data: Send + Sync { fn get_our_real_users(&self, room_id: &RoomId) -> Result>>; - fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - ) -> Result; + fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result; /// Makes a user forget a room. fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>; diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index ef1ad61..c108695 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -16,7 +16,7 @@ use ruma::{ }; use tracing::warn; -use crate::{services, Error, Result}; +use crate::{service::appservice::RegistrationInfo, services, Error, Result}; pub struct Service { pub db: &'static dyn Data, @@ -205,7 +205,7 @@ impl Service { pub fn appservice_in_room( &self, room_id: &RoomId, - appservice: &(String, serde_yaml::Value), + appservice: &RegistrationInfo, ) -> Result { self.db.appservice_in_room(room_id, appservice) } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 097cc82..1df1db5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -7,7 +7,7 @@ use std::{ }; pub use data::Data; -use regex::Regex; + use ruma::{ api::{client::error::ErrorKind, federation}, canonical_json::to_canonical_value, @@ -21,8 +21,7 @@ use ruma::{ }, push::{Action, Ruleset, Tweak}, serde::Base64, - state_res, - state_res::{Event, RoomVersion}, + state_res::{self, Event, RoomVersion}, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; @@ -33,7 +32,10 @@ use tracing::{error, info, warn}; use crate::{ api::server_server, - service::pdu::{EventHash, PduBuilder}, + service::{ + appservice::NamespaceRegex, + pdu::{EventHash, PduBuilder}, + }, services, utils, Error, PduEvent, Result, }; @@ -522,15 +524,21 @@ impl Service { } } - for appservice in services().appservice.all()? { + for appservice in services() + .appservice + .registration_info + .read() + .await + .values() + { if services() .rooms .state_cache - .appservice_in_room(&pdu.room_id, &appservice)? + .appservice_in_room(&pdu.room_id, appservice)? { services() .sending - .send_pdu_appservice(appservice.0, pdu_id.clone())?; + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; continue; } @@ -542,73 +550,41 @@ impl Service { .as_ref() .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, services().globals.server_name()) - .ok() - }) - { - if state_key_uid == &appservice_uid { - services() - .sending - .send_pdu_appservice(appservice.0, pdu_id.clone())?; - continue; - } + let appservice_uid = appservice.registration.sender_localpart.as_str(); + if state_key_uid == appservice_uid { + services().sending.send_pdu_appservice( + appservice.registration.id.clone(), + pdu_id.clone(), + )?; + continue; } } } - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); + let matching_users = |users: &NamespaceRegex| { + appservice.users.is_match(pdu.sender.as_str()) + || pdu.kind == TimelineEventType::RoomMember + && pdu + .state_key + .as_ref() + .map_or(false, |state_key| users.is_match(state_key)) + }; + let matching_aliases = |aliases: &NamespaceRegex| { + services() + .rooms + .alias + .local_aliases_for_room(&pdu.room_id) + .filter_map(|r| r.ok()) + .any(|room_alias| aliases.is_match(room_alias.as_str())) + }; - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == TimelineEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - services() - .rooms - .alias - .local_aliases_for_room(&pdu.room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) - || users.iter().any(matching_users) - { - services() - .sending - .send_pdu_appservice(appservice.0, pdu_id.clone())?; - } + if matching_aliases(&appservice.aliases) + || appservice.rooms.is_match(pdu.room_id.as_str()) + || matching_users(&appservice.users) + { + services() + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index f80c4f0..bbacfde 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -22,7 +22,7 @@ use base64::{engine::general_purpose, Engine as _}; use ruma::{ api::{ - appservice, + appservice::{self, Registration}, federation::{ self, transactions::edu::{ @@ -484,7 +484,7 @@ impl Service { let permit = services().sending.maximum_requests.acquire().await; - let response = appservice_server::send_request( + let response = match appservice_server::send_request( services() .appservice .get_registration(id) @@ -511,8 +511,12 @@ impl Service { }, ) .await - .map(|_response| kind.clone()) - .map_err(|e| (kind, e)); + { + None => Ok(kind.clone()), + Some(op_resp) => op_resp + .map(|_response| kind.clone()) + .map_err(|e| (kind.clone(), e)), + }; drop(permit); @@ -698,12 +702,15 @@ impl Service { response } + /// Sends a request to an appservice + /// + /// Only returns None if there is no url specified in the appservice registration file #[tracing::instrument(skip(self, registration, request))] pub async fn send_appservice_request( &self, - registration: serde_yaml::Value, + registration: Registration, request: T, - ) -> Result + ) -> Option> where T: Debug, { diff --git a/src/utils/error.rs b/src/utils/error.rs index 765a31b..0439028 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -54,6 +54,11 @@ pub enum Error { #[from] source: reqwest::Error, }, + #[error("Could build regular expression: {source}")] + RegexError { + #[from] + source: regex::Error, + }, #[error("{0}")] FederationError(OwnedServerName, RumaError), #[error("Could not do this io: {source}")] From 10f3f9da494bf41899c83bdef62ebb83281ac363 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 28 Jan 2024 15:47:37 -0800 Subject: [PATCH 1568/1727] switch/update rocksdb crate This fork was created because the original seems de-facto unmaintained. --- Cargo.lock | 71 +++++++++++++++++++++++------------------------------- Cargo.toml | 12 ++++++--- flake.nix | 18 ++++++++++++-- 3 files changed, 55 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e99928e..8821700 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -204,17 +204,16 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bindgen" -version = "0.65.1" +version = "0.69.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +checksum = "a4c69fae65a523209d34240b60abe0c42d33d1045d445c0839d8a4894a736e2d" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", "cexpr", "clang-sys", "lazy_static", "lazycell", "peeking_take_while", - "prettyplease", "proc-macro2", "quote", "regex", @@ -400,10 +399,10 @@ dependencies = [ "regex", "reqwest", "ring", - "rocksdb", "ruma", "rusqlite", "rust-argon2", + "rust-rocksdb", "sd-notify", "serde", "serde_html_form", @@ -1278,22 +1277,6 @@ dependencies = [ "redox_syscall", ] -[[package]] -name = "librocksdb-sys" -version = "0.11.0+8.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" -dependencies = [ - "bindgen", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", - "lz4-sys", - "zstd-sys", -] - [[package]] name = "libsqlite3-sys" version = "0.26.0" @@ -1790,16 +1773,6 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" -[[package]] -name = "prettyplease" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" -dependencies = [ - "proc-macro2", - "syn 2.0.48", -] - [[package]] name = "proc-macro-crate" version = "2.0.0" @@ -2005,16 +1978,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "rocksdb" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" -dependencies = [ - "libc", - "librocksdb-sys", -] - [[package]] name = "ruma" version = "0.9.4" @@ -2230,6 +2193,32 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rust-librocksdb-sys" +version = "0.18.1+8.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef22f434855ceb8daf99073fcf909d957ad8705f5b530154e47978ae68e867c" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", + "lz4-sys", + "zstd-sys", +] + +[[package]] +name = "rust-rocksdb" +version = "0.22.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62223f035e676bb69da2ab9487e6f710da82be89473c658c51ed3b1a60c4b4a6" +dependencies = [ + "libc", + "rust-librocksdb-sys", +] + [[package]] name = "rustc-demangle" version = "0.1.23" diff --git a/Cargo.toml b/Cargo.toml index e8c1c8b..7bb3b5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -97,8 +97,6 @@ threadpool = "1.8.1" # Used for ruma wrapper serde_html_form = "0.2.0" -rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } - thread_local = "1.1.7" # used for TURN server authentication hmac = "0.12.1" @@ -116,6 +114,14 @@ async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } +[dependencies.rust-rocksdb] +version = "0.22.7" +optional = true +features = [ + "multi-threaded-cf", + "zstd", +] + [target.'cfg(unix)'.dependencies] nix = { version = "0.26.2", features = ["resource"] } @@ -125,7 +131,7 @@ default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] #backend_heed = ["heed", "crossbeam"] -backend_rocksdb = ["rocksdb"] +backend_rocksdb = ["rust-rocksdb"] jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = ["axum"] diff --git a/flake.nix b/flake.nix index cc95aec..cdfe91f 100644 --- a/flake.nix +++ b/flake.nix @@ -58,9 +58,23 @@ pkgs.pkgsBuildHost.rustPlatform.bindgenHook ]; + rocksdb' = pkgs: + let + version = "8.11.3"; + in + pkgs.rocksdb.overrideAttrs (old: { + inherit version; + src = pkgs.fetchFromGitHub { + owner = "facebook"; + repo = "rocksdb"; + rev = "v${version}"; + hash = "sha256-OpEiMwGxZuxb9o3RQuSrwZMQGLhe9xLT1aa3HpI4KPs="; + }; + }); + env = pkgs: { - ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; - ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; + ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include"; + ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib"; } // pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic { ROCKSDB_STATIC = ""; From daed4cdddfcb84e9b648b15bcfcd89c4bbde834b Mon Sep 17 00:00:00 2001 From: tezlm Date: Sat, 14 Oct 2023 06:20:29 -0700 Subject: [PATCH 1569/1727] Remove log config modification --- src/main.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/main.rs b/src/main.rs index 524f688..aaa3434 100644 --- a/src/main.rs +++ b/src/main.rs @@ -65,8 +65,6 @@ async fn main() { config.warn_deprecated(); - let log = format!("{},ruma_state_res=error,_=off,sled=off", config.log); - if config.allow_jaeger { opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); let tracer = opentelemetry_jaeger::new_agent_pipeline() @@ -76,7 +74,7 @@ async fn main() { .unwrap(); let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - let filter_layer = match EnvFilter::try_new(&log) { + let filter_layer = match EnvFilter::try_new(&config.log) { Ok(s) => s, Err(e) => { eprintln!( @@ -103,7 +101,7 @@ async fn main() { } else { let registry = tracing_subscriber::Registry::default(); let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = match EnvFilter::try_new(&log) { + let filter_layer = match EnvFilter::try_new(&config.log) { Ok(s) => s, Err(e) => { eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}"); From 9ff9e85ebe2b6358dd1e554a8ba5ce34f4faedf0 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 10 Mar 2024 22:33:54 -0700 Subject: [PATCH 1570/1727] add newline to end of file Please, people. --- docker/docker-compose.with-traefik.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/docker-compose.with-traefik.yml b/docker/docker-compose.with-traefik.yml index fda942b..e3289ba 100644 --- a/docker/docker-compose.with-traefik.yml +++ b/docker/docker-compose.with-traefik.yml @@ -95,4 +95,4 @@ volumes: acme: networks: - proxy: \ No newline at end of file + proxy: From 5f053a93574e6bc2a36bd5b9a1c70c4bf6f5c8fa Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 10 Mar 2024 22:34:25 -0700 Subject: [PATCH 1571/1727] link to example config instead of copying it DRY FTW --- DEPLOY.md | 53 ++++------------------------------------------------- 1 file changed, 4 insertions(+), 49 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index dd27968..e87fca3 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -139,57 +139,12 @@ $ sudo systemctl daemon-reload ## Creating the Conduit configuration file -Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment -to read it. You need to change at least the server name.** +Now we need to create the Conduit's config file in +`/etc/matrix-conduit/conduit.toml`. Paste in the contents of +[`conduit-example.toml`](./conduit-example.toml) **and take a moment to read it. +You need to change at least the server name.** You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended. -```toml -[global] -# The server_name is the pretty name of this server. It is used as a suffix for user -# and room ids. Examples: matrix.org, conduit.rs - -# The Conduit server needs all /_matrix/ requests to be reachable at -# https://your.server.name/ on port 443 (client-server) and 8448 (federation). - -# If that's not possible for you, you can create /.well-known files to redirect -# requests. See -# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and -# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server -# for more information - -# YOU NEED TO EDIT THIS -#server_name = "your.server.name" - -# This is the only directory where Conduit will save its data -database_path = "/var/lib/matrix-conduit/" -database_backend = "rocksdb" - -# The port Conduit will be running on. You need to set up a reverse proxy in -# your web server (e.g. apache or nginx), so all requests to /_matrix on port -# 443 and 8448 will be forwarded to the Conduit instance running on this port -# Docker users: Don't change this, you'll need to map an external port to this. -port = 6167 - -# Max size for uploads -max_request_size = 20_000_000 # in bytes - -# Enables registration. If set to false, no users can register on this server. -allow_registration = true - -allow_federation = true -allow_check_for_updates = true - -# Server to get public keys from. You probably shouldn't change this -trusted_servers = ["matrix.org"] - -#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "warn,state_res=warn,rocket=off,_=off,sled=off" - -address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy -#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. -``` - ## Setting the correct file permissions As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on From ed5bd2325596ae73a7e81e13aafdf774cc403449 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 10 Mar 2024 22:36:48 -0700 Subject: [PATCH 1572/1727] remove explicit references to log config They're all stale. Sled was yote long ago. --- debian/postinst | 1 - docker/README.md | 1 - docker/docker-compose.for-traefik.yml | 1 - docker/docker-compose.with-traefik.yml | 1 - docker/docker-compose.yml | 1 - 5 files changed, 5 deletions(-) diff --git a/debian/postinst b/debian/postinst index 69a766a..110f22d 100644 --- a/debian/postinst +++ b/debian/postinst @@ -78,7 +78,6 @@ allow_check_for_updates = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "warn,state_res=warn,rocket=off,_=off,sled=off" EOF fi ;; diff --git a/docker/README.md b/docker/README.md index b34f9d8..2448e64 100644 --- a/docker/README.md +++ b/docker/README.md @@ -64,7 +64,6 @@ docker run -d -p 8448:6167 \ -e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \ -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ - -e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \ --name conduit ``` diff --git a/docker/docker-compose.for-traefik.yml b/docker/docker-compose.for-traefik.yml index bed734f..82bb55b 100644 --- a/docker/docker-compose.for-traefik.yml +++ b/docker/docker-compose.for-traefik.yml @@ -32,7 +32,6 @@ services: CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 - #CONDUIT_LOG: warn,rocket=off,_=off,sled=off CONDUIT_ADDRESS: 0.0.0.0 CONDUIT_CONFIG: '' # Ignore this diff --git a/docker/docker-compose.with-traefik.yml b/docker/docker-compose.with-traefik.yml index e3289ba..5860327 100644 --- a/docker/docker-compose.with-traefik.yml +++ b/docker/docker-compose.with-traefik.yml @@ -33,7 +33,6 @@ services: # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "warn,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_ENCRYPTION: 'true' # CONDUIT_ALLOW_FEDERATION: 'true' diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 5bcf84f..97f91da 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -32,7 +32,6 @@ services: CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 - #CONDUIT_LOG: warn,rocket=off,_=off,sled=off CONDUIT_ADDRESS: 0.0.0.0 CONDUIT_CONFIG: '' # Ignore this # From 516876f8ef5f88c42b44ef1c485dd9e69cb9659b Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 10 Mar 2024 22:37:22 -0700 Subject: [PATCH 1573/1727] remove final reference to sled in log config --- src/config/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index a4d7cca..4605855 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -264,7 +264,7 @@ fn default_trusted_servers() -> Vec { } fn default_log() -> String { - "warn,state_res=warn,_=off,sled=off".to_owned() + "warn,state_res=warn,_=off".to_owned() } fn default_turn_ttl() -> u64 { From dc89fbed3ad6296e72c68f4209116cf05870fc27 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 10 Mar 2024 22:38:02 -0700 Subject: [PATCH 1574/1727] document log config syntax, don't give example Because the old one was stale. Shocking! --- conduit-example.toml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/conduit-example.toml b/conduit-example.toml index 836db65..a52121a 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -51,7 +51,11 @@ enable_lightning_bolt = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "warn,state_res=warn,rocket=off,_=off,sled=off" + +# Controls the log verbosity. See also [here][0]. +# +# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives +#log = "..." address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. From d1bc7fcfd2493b77a4f701e333d3eb0faf5e5109 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 10 Mar 2024 23:39:02 -0700 Subject: [PATCH 1575/1727] rename the `rust-rocksdb` crate to just `rocksdb` This way the old `cfg`s still work and we don't need to constantly remind ourselves what programming language we're using in `use` statements. Also fixes a problem where RocksDB users couldn't start Conduit because the old `cfg`s were using the original crate's name instead of the `backend_rocksdb` feature name for some reason. Maybe that should be changed, but I'm not sure. --- Cargo.toml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7bb3b5d..08188cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,7 +114,8 @@ async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } -[dependencies.rust-rocksdb] +[dependencies.rocksdb] +package = "rust-rocksdb" version = "0.22.7" optional = true features = [ @@ -131,7 +132,7 @@ default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] #backend_heed = ["heed", "crossbeam"] -backend_rocksdb = ["rust-rocksdb"] +backend_rocksdb = ["rocksdb"] jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = ["axum"] From 4af691d737b29f6fa7e98d302279f5a85a2c879a Mon Sep 17 00:00:00 2001 From: Samuel Meenzen Date: Mon, 11 Mar 2024 11:43:05 +0100 Subject: [PATCH 1576/1727] fix(ci): avoid duplicate pipelines --- .gitlab-ci.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 348e7b1..3b7016f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -7,6 +7,15 @@ variables: # Makes some things print in color TERM: ansi +# Avoid duplicate pipelines +# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines +workflow: + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS + when: never + - if: $CI + before_script: # Enable nix-command and flakes - if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi @@ -52,10 +61,14 @@ ci: rules: # CI on upstream runners (only available for maintainers) - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true" + # Manual CI on unprotected branches that are not MRs + - if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false" + when: manual # Manual CI on forks - if: $IS_UPSTREAM_CI != "true" when: manual - if: $CI + interruptible: true artifacts: stage: artifacts @@ -100,6 +113,7 @@ artifacts: when: manual allow_failure: true - if: $CI + interruptible: true .push-oci-image: stage: publish From 9a81a49c6ae1217928658dd1d6742868ec40931e Mon Sep 17 00:00:00 2001 From: Max Cohen Date: Sun, 25 Sep 2022 10:59:19 +0200 Subject: [PATCH 1577/1727] Add argument parser for the conduit executable Allow fetching the version with `conduit --version`. Fixes #285. --- src/clap.rs | 13 +++++++++++++ src/lib.rs | 1 + src/main.rs | 2 ++ 3 files changed, 16 insertions(+) create mode 100644 src/clap.rs diff --git a/src/clap.rs b/src/clap.rs new file mode 100644 index 0000000..444f21b --- /dev/null +++ b/src/clap.rs @@ -0,0 +1,13 @@ +//! Integration with `clap` + +use clap::Parser; + +/// Command line arguments +#[derive(Parser)] +#[clap(about, version)] +pub struct Args {} + +/// Parse command line arguments into structured data +pub fn parse() -> Args { + Args::parse() +} diff --git a/src/lib.rs b/src/lib.rs index 70c6f37..5a89f80 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,5 @@ pub mod api; +pub mod clap; mod config; mod database; mod service; diff --git a/src/main.rs b/src/main.rs index 524f688..e586184 100644 --- a/src/main.rs +++ b/src/main.rs @@ -44,6 +44,8 @@ static GLOBAL: Jemalloc = Jemalloc; #[tokio::main] async fn main() { + clap::parse(); + // Initialize config let raw_config = Figment::new() From ac22b1bed13c298b566acf503cba47e7e9bc072a Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Mon, 11 Mar 2024 20:11:59 -0700 Subject: [PATCH 1578/1727] allow including extra info in `--version` output --- Cargo.toml | 2 +- flake.nix | 1 + src/clap.rs | 16 +++++++++++++++- 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 08188cb..796791b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,7 +102,7 @@ thread_local = "1.1.7" hmac = "0.12.1" sha-1 = "0.10.1" # used for conduit's CLI and admin room command parsing -clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] } +clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context", "string"] } futures-util = { version = "0.3.28", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.8", features = ["env", "toml"] } diff --git a/flake.nix b/flake.nix index cdfe91f..97f6835 100644 --- a/flake.nix +++ b/flake.nix @@ -73,6 +73,7 @@ }); env = pkgs: { + CONDUIT_VERSION_EXTRA = self.shortRev or self.dirtyShortRev; ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include"; ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib"; } diff --git a/src/clap.rs b/src/clap.rs index 444f21b..170d2a1 100644 --- a/src/clap.rs +++ b/src/clap.rs @@ -2,9 +2,23 @@ use clap::Parser; +/// Returns the current version of the crate with extra info if supplied +/// +/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to +/// include it in parenthesis after the SemVer version. A common value are git +/// commit hashes. +fn version() -> String { + let cargo_pkg_version = env!("CARGO_PKG_VERSION"); + + match option_env!("CONDUIT_VERSION_EXTRA") { + Some(x) => format!("{} ({})", cargo_pkg_version, x), + None => cargo_pkg_version.to_owned(), + } +} + /// Command line arguments #[derive(Parser)] -#[clap(about, version)] +#[clap(about, version = version())] pub struct Args {} /// Parse command line arguments into structured data From 425660472cdbb66fd1bfea30f387598b680ed64a Mon Sep 17 00:00:00 2001 From: Samuel Meenzen Date: Wed, 13 Mar 2024 18:01:41 +0100 Subject: [PATCH 1579/1727] docs: build docs using mdBook --- .gitignore | 3 ++ .gitlab/route-map.yml | 3 ++ README.md | 31 ++++++++++--------- book.toml | 18 +++++++++++ debian/README.md | 2 +- docs/SUMMARY.md | 12 +++++++ APPSERVICES.md => docs/appservices.md | 0 docs/configuration.md | 5 +++ docs/deploying.md | 8 +++++ docs/deploying/debian.md | 1 + .../deploying}/docker-compose.for-traefik.yml | 0 .../deploying}/docker-compose.override.yml | 0 .../docker-compose.with-traefik.yml | 0 {docker => docs/deploying}/docker-compose.yml | 0 docker/README.md => docs/deploying/docker.md | 7 ++--- nix/README.md => docs/deploying/nix.md | 0 DEPLOY.md => docs/deploying/simple.md | 10 +++--- docs/introduction.md | 13 ++++++++ TURN.md => docs/turn.md | 2 +- 19 files changed, 90 insertions(+), 25 deletions(-) create mode 100644 .gitlab/route-map.yml create mode 100644 book.toml create mode 100644 docs/SUMMARY.md rename APPSERVICES.md => docs/appservices.md (100%) create mode 100644 docs/configuration.md create mode 100644 docs/deploying.md create mode 100644 docs/deploying/debian.md rename {docker => docs/deploying}/docker-compose.for-traefik.yml (100%) rename {docker => docs/deploying}/docker-compose.override.yml (100%) rename {docker => docs/deploying}/docker-compose.with-traefik.yml (100%) rename {docker => docs/deploying}/docker-compose.yml (100%) rename docker/README.md => docs/deploying/docker.md (97%) rename nix/README.md => docs/deploying/nix.md (100%) rename DEPLOY.md => docs/deploying/simple.md (97%) create mode 100644 docs/introduction.md rename TURN.md => docs/turn.md (97%) diff --git a/.gitignore b/.gitignore index a34d70a..73ce2e1 100644 --- a/.gitignore +++ b/.gitignore @@ -71,3 +71,6 @@ cached_target # Gitlab CI cache /.gitlab-ci.d + +# mdbook output +public/ \ No newline at end of file diff --git a/.gitlab/route-map.yml b/.gitlab/route-map.yml new file mode 100644 index 0000000..2c23079 --- /dev/null +++ b/.gitlab/route-map.yml @@ -0,0 +1,3 @@ +# Docs: Map markdown to html files +- source: /docs/(.+)\.md/ + public: '\1.html' \ No newline at end of file diff --git a/README.md b/README.md index bf7bde5..0026f07 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,15 @@ # Conduit -### A Matrix homeserver written in Rust + +### A Matrix homeserver written in Rust + + +Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information. +Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository. + + #### What is Matrix? + [Matrix](https://matrix.org) is an open network for secure and decentralized communication. Users from every Matrix homeserver can chat with users from all other Matrix servers. You can even use bridges (also called Matrix appservices) @@ -30,16 +38,9 @@ There are still a few important features missing: - E2EE emoji comparison over federation (E2EE chat works) - Outgoing read receipts, typing, presence over federation (incoming works) + -#### How can I deploy my own? - -- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md) -- Debian package: [debian/README.md](debian/README.md) -- Nix/NixOS: [nix/README.md](nix/README.md) -- Docker: [docker/README.md](docker/README.md) - -If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md). - + #### How can I contribute? 1. Look for an issue you would like to work on and make sure no one else is currently working on it. @@ -56,7 +57,6 @@ If you have any questions, feel free to - Send an direct message to `@timokoesters:fachschaften.org` on Matrix - [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) - #### Thanks to Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project. @@ -68,10 +68,11 @@ Thanks to the contributors to Conduit and all libraries we use, for example: #### Donate -Liberapay: \ -Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` +- Liberapay: +- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` #### Logo -Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \ -Logo License: https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md +- Lightning Bolt Logo: +- Logo License: + diff --git a/book.toml b/book.toml new file mode 100644 index 0000000..e25746c --- /dev/null +++ b/book.toml @@ -0,0 +1,18 @@ +[book] +title = "Conduit" +description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol" +language = "en" +multilingual = false +src = "docs" + +[build] +build-dir = "public" +create-missing = true + +[output.html] +git-repository-url = "https://gitlab.com/famedly/conduit" +edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}" +git-repository-icon = "fa-git-square" + +[output.html.search] +limit-results = 15 diff --git a/debian/README.md b/debian/README.md index 443be76..4ddb614 100644 --- a/debian/README.md +++ b/debian/README.md @@ -5,7 +5,7 @@ Installation ------------ Information about downloading, building and deploying the Debian package, see -the "Installing Conduit" section in [DEPLOY.md](../DEPLOY.md). +the "Installing Conduit" section in the Deploying docs. All following sections until "Setting up the Reverse Proxy" be ignored because this is handled automatically by the packaging. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md new file mode 100644 index 0000000..e7ed05d --- /dev/null +++ b/docs/SUMMARY.md @@ -0,0 +1,12 @@ +# Summary + +- [Introduction](introduction.md) + +- [Example configuration](configuration.md) +- [Deployment options](deploying.md) + - [Simple (Recommended)](deploying/simple.md) + - [Debian](deploying/debian.md) + - [Docker](deploying/docker.md) + - [Nix](deploying/nix.md) +- [TURN](turn.md) +- [Appservices](appservices.md) diff --git a/APPSERVICES.md b/docs/appservices.md similarity index 100% rename from APPSERVICES.md rename to docs/appservices.md diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 0000000..a47e5ff --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,5 @@ +# Example configuration + +``` toml +{{#include ../conduit-example.toml}} +``` diff --git a/docs/deploying.md b/docs/deploying.md new file mode 100644 index 0000000..3694f6b --- /dev/null +++ b/docs/deploying.md @@ -0,0 +1,8 @@ +# Deployment options + +There are several ways to deploy a Conduit server. + +- [Simple (Recommended)](deploying/simple.md) - This is the recommended way to set up Conduit. +- [Debian](deploying/debian.md) - If you're using a debian-based system, you may find the `deb` package useful. +- [Docker](deploying/docker.md) - We provide multi-arch OCI images for Conduit. +- [Nix](deploying/nix.md) - Nix flake based setup. diff --git a/docs/deploying/debian.md b/docs/deploying/debian.md new file mode 100644 index 0000000..2e8a544 --- /dev/null +++ b/docs/deploying/debian.md @@ -0,0 +1 @@ +{{#include ../../debian/README.md}} diff --git a/docker/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml similarity index 100% rename from docker/docker-compose.for-traefik.yml rename to docs/deploying/docker-compose.for-traefik.yml diff --git a/docker/docker-compose.override.yml b/docs/deploying/docker-compose.override.yml similarity index 100% rename from docker/docker-compose.override.yml rename to docs/deploying/docker-compose.override.yml diff --git a/docker/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml similarity index 100% rename from docker/docker-compose.with-traefik.yml rename to docs/deploying/docker-compose.with-traefik.yml diff --git a/docker/docker-compose.yml b/docs/deploying/docker-compose.yml similarity index 100% rename from docker/docker-compose.yml rename to docs/deploying/docker-compose.yml diff --git a/docker/README.md b/docs/deploying/docker.md similarity index 97% rename from docker/README.md rename to docs/deploying/docker.md index 2448e64..4a38b30 100644 --- a/docker/README.md +++ b/docs/deploying/docker.md @@ -69,7 +69,7 @@ docker run -d -p 8448:6167 \ or you can use [docker-compose](#docker-compose). -The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). +The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md). You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. @@ -87,8 +87,7 @@ Depending on your proxy setup, you can use one of the following files; When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want for your server. - -Additional info about deploying Conduit can be found [here](../DEPLOY.md). +Additional info about deploying Conduit can be found [here](simple.md). ### Build @@ -130,7 +129,7 @@ So...step by step: 1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename. 2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs. -3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. +3. Create the `conduit.toml` config file, an example can be found [here](../configuration.md), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. 4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. 5. Create the files needed by the `well-known` service. diff --git a/nix/README.md b/docs/deploying/nix.md similarity index 100% rename from nix/README.md rename to docs/deploying/nix.md diff --git a/DEPLOY.md b/docs/deploying/simple.md similarity index 97% rename from DEPLOY.md rename to docs/deploying/simple.md index e87fca3..9542bf1 100644 --- a/DEPLOY.md +++ b/docs/deploying/simple.md @@ -1,4 +1,6 @@ -# Deploying Conduit +# Simple setup + +This is the recommended way to set up Conduit. It is the easiest way to get started and is suitable for most use cases. > ## Getting help > @@ -141,7 +143,7 @@ $ sudo systemctl daemon-reload Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste in the contents of -[`conduit-example.toml`](./conduit-example.toml) **and take a moment to read it. +[`conduit-example.toml`](../configuration.md) **and take a moment to read it. You need to change at least the server name.** You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended. @@ -305,8 +307,8 @@ $ curl https://your.server.name:8448/_matrix/client/versions ## Audio/Video calls -For Audio/Video call functionality see the [TURN Guide](TURN.md). +For Audio/Video call functionality see the [TURN Guide](../turn.md). ## Appservices -If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). +If you want to set up an appservice, take a look at the [Appservice Guide](../appservices.md). diff --git a/docs/introduction.md b/docs/introduction.md new file mode 100644 index 0000000..da34ab6 --- /dev/null +++ b/docs/introduction.md @@ -0,0 +1,13 @@ +# Conduit + +{{#include ../README.md:catchphrase}} + +{{#include ../README.md:body}} + +#### How can I deploy my own? + +- [Deployment options](deploying.md) + +If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md). + +{{#include ../README.md:footer}} diff --git a/TURN.md b/docs/turn.md similarity index 97% rename from TURN.md rename to docs/turn.md index 63c1e99..a61f1b1 100644 --- a/TURN.md +++ b/docs/turn.md @@ -22,4 +22,4 @@ turn_secret = "ADD SECRET HERE" ## Apply settings -Restart Conduit. \ No newline at end of file +Restart Conduit. From 4f8d3953b36e8a67deba44ef524e4e032453c575 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 14 Mar 2024 20:19:02 -0700 Subject: [PATCH 1580/1727] add nix output for the book --- flake.nix | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/flake.nix b/flake.nix index cdfe91f..0e699e1 100644 --- a/flake.nix +++ b/flake.nix @@ -207,6 +207,35 @@ packages = { default = package pkgsHost; oci-image = mkOciImage pkgsHost self.packages.${system}.default; + + book = + let + package = self.packages.${system}.default; + in + pkgsHost.stdenv.mkDerivation { + pname = "${package.pname}-book"; + version = package.version; + + src = nix-filter { + root = ./.; + include = [ + "book.toml" + "conduit-example.toml" + "README.md" + "debian/README.md" + "docs" + ]; + }; + + nativeBuildInputs = (with pkgsHost; [ + mdbook + ]); + + buildPhase = '' + mdbook build + mv public $out + ''; + }; } // builtins.listToAttrs From 6800e5fd185095ec84bdd692b2bd45502f94891a Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 14 Mar 2024 20:27:44 -0700 Subject: [PATCH 1581/1727] build book in ci, deploy it to gitlab pages --- .gitlab-ci.yml | 17 +++++++++++++++++ engage.toml | 10 ++++++++++ flake.nix | 3 +++ 3 files changed, 30 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3b7016f..8c880b9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -98,6 +98,10 @@ artifacts: - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl - cp result oci-image-arm64v8.tar.gz + + - ./bin/nix-build-and-cache .#book + # We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746 + - cp -r --dereference result public artifacts: paths: - x86_64-unknown-linux-musl @@ -105,6 +109,7 @@ artifacts: - x86_64-unknown-linux-musl.deb - oci-image-amd64.tar.gz - oci-image-arm64v8.tar.gz + - public rules: # CI required for all MRs - if: $CI_PIPELINE_SOURCE == "merge_request_event" @@ -165,3 +170,15 @@ oci-image:push-dockerhub: IMAGE_NAME: matrixconduit/matrix-conduit before_script: - docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD + +pages: + stage: publish + dependencies: + - artifacts + only: + - next + script: + - "true" + artifacts: + paths: + - public diff --git a/engage.toml b/engage.toml index 3e8884e..cb28416 100644 --- a/engage.toml +++ b/engage.toml @@ -30,6 +30,11 @@ name = "cargo-clippy" group = "versions" script = "cargo clippy -- --version" +[[task]] +name = "lychee" +group = "versions" +script = "lychee --version" + [[task]] name = "cargo-fmt" group = "lints" @@ -51,6 +56,11 @@ name = "cargo-clippy" group = "lints" script = "cargo clippy --workspace --all-targets --color=always -- -D warnings" +[[task]] +name = "lychee" +group = "lints" +script = "lychee --offline docs" + [[task]] name = "cargo" group = "tests" diff --git a/flake.nix b/flake.nix index 0e699e1..9cd826b 100644 --- a/flake.nix +++ b/flake.nix @@ -304,6 +304,9 @@ # Needed for our script for Complement jq + + # Needed for finding broken markdown links + lychee ]); }; }); From a3968725b4938fec2fcbd24e655f8b5ba2d53490 Mon Sep 17 00:00:00 2001 From: Samuel Meenzen Date: Fri, 15 Mar 2024 12:23:09 +0100 Subject: [PATCH 1582/1727] chore: add EditorConfig --- .editorconfig | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..a4e9e43 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,15 @@ +# EditorConfig is awesome: https://EditorConfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +tab_width = 4 +indent_size = 4 +indent_style = space +insert_final_newline = true +max_line_length = 120 + +[*.nix] +indent_size = 2 From 120035685b225a318844f8aed9b4088644fe9ff9 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 17 Mar 2024 17:13:34 +0000 Subject: [PATCH 1583/1727] docs: point people to the matrix client list instead of element --- README.md | 3 +-- docs/deploying/simple.md | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 0026f07..474a524 100644 --- a/README.md +++ b/README.md @@ -23,8 +23,7 @@ friends or company. #### Can I try it out? -Yes! You can test our Conduit instance by opening a Matrix client ( or Element Android for -example) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. +Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login. Server hosting for conduit.rs is donated by the Matrix.org Foundation. diff --git a/docs/deploying/simple.md b/docs/deploying/simple.md index 9542bf1..ea12a28 100644 --- a/docs/deploying/simple.md +++ b/docs/deploying/simple.md @@ -289,7 +289,7 @@ $ sudo systemctl enable conduit ## How do I know it works? -You can open , enter your homeserver and try to register. +You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your homeserver and try to register. If you are using a registration token, use [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/), as they support this feature. You can also use these commands as a quick health check. From 5f0bea69614de0bc0dac3b440fcb070274ccb9cb Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 16 Mar 2024 19:04:54 +0000 Subject: [PATCH 1584/1727] refactor: check if federation is disabled inside the authcheck where possible --- src/api/ruma_wrapper/axum.rs | 4 +++ src/api/server_server.rs | 68 ------------------------------------ 2 files changed, 4 insertions(+), 68 deletions(-) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index bbd4861..c6a1957 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -149,6 +149,10 @@ where } } AuthScheme::ServerSignatures => { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + let TypedHeader(Authorization(x_matrix)) = parts .extract::>>() .await diff --git a/src/api/server_server.rs b/src/api/server_server.rs index f946fea..e033549 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -601,10 +601,6 @@ pub async fn get_server_keys_deprecated_route() -> impl IntoResponse { pub async fn get_public_rooms_filtered_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let response = client_server::get_public_rooms_filtered_helper( None, body.limit, @@ -628,10 +624,6 @@ pub async fn get_public_rooms_filtered_route( pub async fn get_public_rooms_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let response = client_server::get_public_rooms_filtered_helper( None, body.limit, @@ -686,10 +678,6 @@ pub fn parse_incoming_pdu( pub async fn send_transaction_message_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let sender_servername = body .sender_servername .as_ref() @@ -954,10 +942,6 @@ pub async fn send_transaction_message_route( pub async fn get_event_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let sender_servername = body .sender_servername .as_ref() @@ -1016,10 +1000,6 @@ pub async fn get_event_route( pub async fn get_backfill_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let sender_servername = body .sender_servername .as_ref() @@ -1092,10 +1072,6 @@ pub async fn get_backfill_route( pub async fn get_missing_events_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let sender_servername = body .sender_servername .as_ref() @@ -1181,10 +1157,6 @@ pub async fn get_missing_events_route( pub async fn get_event_authorization_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let sender_servername = body .sender_servername .as_ref() @@ -1243,10 +1215,6 @@ pub async fn get_event_authorization_route( pub async fn get_room_state_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let sender_servername = body .sender_servername .as_ref() @@ -1323,10 +1291,6 @@ pub async fn get_room_state_route( pub async fn get_room_state_ids_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let sender_servername = body .sender_servername .as_ref() @@ -1384,10 +1348,6 @@ pub async fn get_room_state_ids_route( pub async fn create_join_event_template_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - if !services().rooms.metadata.exists(&body.room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, @@ -1495,10 +1455,6 @@ async fn create_join_event( room_id: &RoomId, pdu: &RawJsonValue, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - if !services().rooms.metadata.exists(room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, @@ -1679,10 +1635,6 @@ pub async fn create_join_event_v2_route( pub async fn create_invite_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let sender_servername = body .sender_servername .as_ref() @@ -1796,10 +1748,6 @@ pub async fn create_invite_route( pub async fn get_devices_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - if body.user_id.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -1852,10 +1800,6 @@ pub async fn get_devices_route( pub async fn get_room_information_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let room_id = services() .rooms .alias @@ -1877,10 +1821,6 @@ pub async fn get_room_information_route( pub async fn get_profile_information_route( body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - if body.user_id.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -1920,10 +1860,6 @@ pub async fn get_profile_information_route( /// /// Gets devices and identity keys for the given users. pub async fn get_keys_route(body: Ruma) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - if body .device_keys .iter() @@ -1953,10 +1889,6 @@ pub async fn get_keys_route(body: Ruma) -> Result, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - if body .one_time_keys .iter() From 879a8b969d4a6b664c2f00c219d8c18ca3d821da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 13 Mar 2024 12:52:11 +0100 Subject: [PATCH 1585/1727] improvement: use simpler rocksdb config --- Cargo.lock | 436 ++++++++++++------------ Cargo.toml | 1 + src/api/client_server/sync.rs | 4 +- src/api/client_server/user_directory.rs | 31 +- src/database/abstraction/rocksdb.rs | 73 ++-- 5 files changed, 289 insertions(+), 256 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8821700..4d294c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,9 +19,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", @@ -46,15 +46,15 @@ checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "anstyle" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2faccea4cc4ab4a667ce676a30e8ec13922a692c99bb8f5b11f1502c72e04220" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f" [[package]] name = "arrayref" @@ -88,7 +88,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -204,22 +204,22 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bindgen" -version = "0.69.2" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c69fae65a523209d34240b60abe0c42d33d1045d445c0839d8a4894a736e2d" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ "bitflags 2.4.2", "cexpr", "clang-sys", + "itertools 0.12.1", "lazy_static", "lazycell", - "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -256,15 +256,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" [[package]] name = "bytemuck" -version = "1.14.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2490600f404f2b94c167e31d3ed1d5f3c225a0f3b80230053b3e0b7b962bd9" +checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" [[package]] name = "byteorder" @@ -291,9 +291,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.83" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" dependencies = [ "jobserver", "libc", @@ -327,9 +327,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.18" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" +checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" dependencies = [ "clap_builder", "clap_derive", @@ -337,9 +337,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.18" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstyle", "clap_lex", @@ -347,21 +347,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "color_quant" @@ -484,18 +484,18 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] @@ -518,9 +518,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", @@ -541,7 +541,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -625,9 +625,9 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", @@ -640,9 +640,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "encoding_rs" @@ -694,9 +694,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" +checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" [[package]] name = "figment" @@ -803,7 +803,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -861,9 +861,9 @@ dependencies = [ [[package]] name = "gif" -version = "0.12.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80792593675e051cf94a4b111980da2ba60d4a83e43e0048c5693baab3977045" +checksum = "3fb2d69b19215e18bb912fa30f7ce15846e301408695e44e0ef719f1da9e19f2" dependencies = [ "color_quant", "weezl", @@ -893,7 +893,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.1", + "indexmap 2.2.5", "slab", "tokio", "tokio-util", @@ -957,9 +957,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.4" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hmac" @@ -983,9 +983,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1082,9 +1082,9 @@ dependencies = [ [[package]] name = "image" -version = "0.24.8" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034bbe799d1909622a74d1193aa50147769440040ff36cb2baa947609b0a4e23" +checksum = "5690139d2f55868e080017335e4b94cb7414274c74f1669c84fb5feba2c9f69d" dependencies = [ "bytemuck", "byteorder", @@ -1107,9 +1107,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.1" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433de089bd45971eecf4668ee0ee8f4cec17db4f8bd8f7bc3197a6ce37aa7d9b" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1155,6 +1155,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.10" @@ -1163,9 +1172,9 @@ checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" dependencies = [ "libc", ] @@ -1178,9 +1187,9 @@ checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" [[package]] name = "js-sys" -version = "0.3.67" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -1252,18 +1261,18 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets 0.52.4", ] [[package]] @@ -1317,9 +1326,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lru-cache" @@ -1402,9 +1411,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", "simd-adler32", @@ -1412,9 +1421,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", @@ -1466,20 +1475,25 @@ dependencies = [ ] [[package]] -name = "num-integer" -version = "0.1.45" +name = "num-conv" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] @@ -1653,15 +1667,9 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.48", + "syn 2.0.52", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "3.0.3" @@ -1680,9 +1688,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "persy" -version = "1.4.7" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd38c602b23c2f451842d89f27cd5e0d4b292176daf40feeda859c658dcdc76" +checksum = "9ef4b7250ab3a90ded0e284b2633469c23ef01ea868fe7cbb64e2f0a7d6f6d02" dependencies = [ "crc", "data-encoding", @@ -1696,22 +1704,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1738,9 +1746,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" @@ -1750,9 +1758,9 @@ checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "png" -version = "0.17.11" +version = "0.17.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f6c3c3e617595665b8ea2ff95a86066be38fb121ff920a9c0eb282abcd1da5a" +checksum = "06e4b0d3d1312775e782c86c91a111aa1f910cbb65e1337f9975b5f9a554b5e1" dependencies = [ "bitflags 1.3.2", "crc32fast", @@ -1775,18 +1783,19 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-crate" -version = "2.0.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" dependencies = [ - "toml_edit 0.20.7", + "toml_datetime", + "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] @@ -1799,7 +1808,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", "version_check", "yansi", ] @@ -1877,7 +1886,7 @@ checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "regex-syntax 0.8.2", ] @@ -1892,9 +1901,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", @@ -1915,9 +1924,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.23" +version = "0.11.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" +checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" dependencies = [ "base64", "bytes", @@ -1942,6 +1951,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-rustls", @@ -1966,16 +1976,17 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin", "untrusted", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2038,7 +2049,7 @@ dependencies = [ "bytes", "form_urlencoded", "http", - "indexmap 2.2.1", + "indexmap 2.2.5", "js_int", "konst", "percent-encoding", @@ -2062,7 +2073,7 @@ version = "0.27.11" source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ "as_variant", - "indexmap 2.2.1", + "indexmap 2.2.5", "js_int", "js_option", "percent-encoding", @@ -2120,7 +2131,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.48", + "syn 2.0.52", "toml", ] @@ -2157,7 +2168,7 @@ name = "ruma-state-res" version = "0.10.0" source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" dependencies = [ - "itertools", + "itertools 0.11.0", "js_int", "ruma-common", "ruma-events", @@ -2291,9 +2302,9 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "schannel" @@ -2351,38 +2362,38 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "serde_html_form" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20e1066e1cfa6692a722cf40386a2caec36da5ddc4a2c16df592f0f609677e8c" +checksum = "50437e6a58912eecc08865e35ea2e8d365fbb2db0debb1c8bb43bf1faf055f25" dependencies = [ "form_urlencoded", - "indexmap 2.2.1", + "indexmap 2.2.5", "itoa", "ryu", "serde", @@ -2390,9 +2401,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -2401,9 +2412,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", @@ -2432,11 +2443,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.31" +version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" +checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ - "indexmap 2.2.1", + "indexmap 2.2.5", "itoa", "ryu", "serde", @@ -2544,12 +2555,12 @@ checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2596,9 +2607,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -2634,29 +2645,29 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -2717,12 +2728,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.31" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -2737,10 +2749,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" dependencies = [ + "num-conv", "time-core", ] @@ -2761,9 +2774,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -2785,7 +2798,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2837,43 +2850,32 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.8" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.21.0", + "toml_edit", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.20.7" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.2.1", - "toml_datetime", - "winnow", -] - -[[package]] -name = "toml_edit" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" -dependencies = [ - "indexmap 2.2.1", + "indexmap 2.2.5", "serde", "serde_spanned", "toml_datetime", @@ -2947,7 +2949,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -3120,9 +3122,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -3135,9 +3137,9 @@ checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" [[package]] name = "unsigned-varint" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" [[package]] name = "untrusted" @@ -3200,9 +3202,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3210,24 +3212,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.40" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bde2032aeb86bdfaecc8b261eef3cba735cc426c1f3a3416d1e0791be95fc461" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -3237,9 +3239,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3247,28 +3249,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.67" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -3288,9 +3290,9 @@ checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "wildmatch" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "495ec47bf3c1345005f40724f0269362c8556cbc43aed0526ed44cae1d35fceb" +checksum = "017f0a8ed8331210d91b7a4c30d4edef8f21a65c02f2540496e2e79725f6d8a8" [[package]] name = "winapi" @@ -3329,7 +3331,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -3349,17 +3351,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -3370,9 +3372,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -3382,9 +3384,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -3394,9 +3396,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -3406,9 +3408,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -3418,9 +3420,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -3430,9 +3432,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -3442,15 +3444,15 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "winnow" -version = "0.5.35" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1931d78a9c73861da0134f453bb1f790ce49b2e30eba8410b4b79bac72b46a2d" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] @@ -3467,9 +3469,9 @@ dependencies = [ [[package]] name = "yansi" -version = "1.0.0-rc.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1367295b8f788d371ce2dbc842c7b709c73ee1364d30351dd300ec2203b12377" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "zerocopy" @@ -3488,7 +3490,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 08188cb..ac33472 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,6 +121,7 @@ optional = true features = [ "multi-threaded-cf", "zstd", + "lz4", ] [target.'cfg(unix)'.dependencies] diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index c510f5f..103daeb 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -29,7 +29,7 @@ use std::{ time::Duration, }; use tokio::sync::watch::Sender; -use tracing::error; +use tracing::{error, info}; /// # `GET /_matrix/client/r0/sync` /// @@ -99,6 +99,8 @@ pub async fn sync_events_route( o.insert((body.since.clone(), rx.clone())); + info!("Sync started for {sender_user}"); + tokio::spawn(sync_helper_wrapper( sender_user.clone(), sender_device.clone(), diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index c30bac5..b4d1180 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -48,6 +48,9 @@ pub async fn search_users_route( return None; } + // It's a matching user, but is the sender allowed to see them? + let mut user_visible = false; + let user_is_in_public_rooms = services() .rooms .state_cache @@ -69,22 +72,26 @@ pub async fn search_users_route( }); if user_is_in_public_rooms { - return Some(user); + user_visible = true; + } else { + let user_is_in_shared_rooms = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), user_id]) + .ok()? + .next() + .is_some(); + + if user_is_in_shared_rooms { + user_visible = true; + } } - let user_is_in_shared_rooms = services() - .rooms - .user - .get_shared_rooms(vec![sender_user.clone(), user_id]) - .ok()? - .next() - .is_some(); - - if user_is_in_shared_rooms { - return Some(user); + if !user_visible { + return None; } - None + Some(user) }); let results = users.by_ref().take(limit).collect(); diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 7a93d78..447ee03 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -23,29 +23,23 @@ pub struct RocksDbEngineTree<'a> { fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { let mut block_based_options = rocksdb::BlockBasedOptions::default(); block_based_options.set_block_cache(rocksdb_cache); - - // "Difference of spinning disk" - // https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html + block_based_options.set_bloom_filter(10.0, false); block_based_options.set_block_size(4 * 1024); block_based_options.set_cache_index_and_filter_blocks(true); + block_based_options.set_pin_l0_filter_and_index_blocks_in_cache(true); + block_based_options.set_optimize_filters_for_memory(true); let mut db_opts = rocksdb::Options::default(); db_opts.set_block_based_table_factory(&block_based_options); - db_opts.set_optimize_filters_for_hits(true); - db_opts.set_skip_stats_update_on_db_open(true); - db_opts.set_level_compaction_dynamic_level_bytes(true); - db_opts.set_target_file_size_base(256 * 1024 * 1024); - //db_opts.set_compaction_readahead_size(2 * 1024 * 1024); - //db_opts.set_use_direct_reads(true); - //db_opts.set_use_direct_io_for_flush_and_compaction(true); db_opts.create_if_missing(true); db_opts.increase_parallelism(num_cpus::get() as i32); db_opts.set_max_open_files(max_open_files); - db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + db_opts.set_compression_type(rocksdb::DBCompressionType::Lz4); + db_opts.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd); db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.optimize_level_style_compaction(10 * 1024 * 1024); // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning + db_opts.set_level_compaction_dynamic_level_bytes(true); db_opts.set_max_background_jobs(6); db_opts.set_bytes_per_sync(1048576); @@ -59,9 +53,6 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O // restored via federation. db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords); - let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); - db_opts.set_prefix_extractor(prefix_extractor); - db_opts } @@ -147,12 +138,17 @@ impl RocksDbEngineTree<'_> { impl KvTree for RocksDbEngineTree<'_> { fn get(&self, key: &[u8]) -> Result>> { - Ok(self.db.rocks.get_cf(&self.cf(), key)?) + let readoptions = rocksdb::ReadOptions::default(); + + Ok(self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?) } fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let writeoptions = rocksdb::WriteOptions::default(); let lock = self.write_lock.read().unwrap(); - self.db.rocks.put_cf(&self.cf(), key, value)?; + self.db + .rocks + .put_cf_opt(&self.cf(), key, value, &writeoptions)?; drop(lock); self.watchers.wake(key); @@ -161,22 +157,31 @@ impl KvTree for RocksDbEngineTree<'_> { } fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + let writeoptions = rocksdb::WriteOptions::default(); for (key, value) in iter { - self.db.rocks.put_cf(&self.cf(), key, value)?; + self.db + .rocks + .put_cf_opt(&self.cf(), key, value, &writeoptions)?; } Ok(()) } fn remove(&self, key: &[u8]) -> Result<()> { - Ok(self.db.rocks.delete_cf(&self.cf(), key)?) + let writeoptions = rocksdb::WriteOptions::default(); + Ok(self + .db + .rocks + .delete_cf_opt(&self.cf(), key, &writeoptions)?) } fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { + let readoptions = rocksdb::ReadOptions::default(); + Box::new( self.db .rocks - .iterator_cf(&self.cf(), rocksdb::IteratorMode::Start) + .iterator_cf_opt(&self.cf(), readoptions, rocksdb::IteratorMode::Start) .map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) @@ -187,11 +192,14 @@ impl KvTree for RocksDbEngineTree<'_> { from: &[u8], backwards: bool, ) -> Box, Vec)> + 'a> { + let readoptions = rocksdb::ReadOptions::default(); + Box::new( self.db .rocks - .iterator_cf( + .iterator_cf_opt( &self.cf(), + readoptions, rocksdb::IteratorMode::From( from, if backwards { @@ -207,23 +215,33 @@ impl KvTree for RocksDbEngineTree<'_> { } fn increment(&self, key: &[u8]) -> Result> { + let readoptions = rocksdb::ReadOptions::default(); + let writeoptions = rocksdb::WriteOptions::default(); + let lock = self.write_lock.write().unwrap(); - let old = self.db.rocks.get_cf(&self.cf(), key)?; + let old = self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?; let new = utils::increment(old.as_deref()).unwrap(); - self.db.rocks.put_cf(&self.cf(), key, &new)?; + self.db + .rocks + .put_cf_opt(&self.cf(), key, &new, &writeoptions)?; drop(lock); Ok(new) } fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + let readoptions = rocksdb::ReadOptions::default(); + let writeoptions = rocksdb::WriteOptions::default(); + let lock = self.write_lock.write().unwrap(); for key in iter { - let old = self.db.rocks.get_cf(&self.cf(), &key)?; + let old = self.db.rocks.get_cf_opt(&self.cf(), &key, &readoptions)?; let new = utils::increment(old.as_deref()).unwrap(); - self.db.rocks.put_cf(&self.cf(), key, new)?; + self.db + .rocks + .put_cf_opt(&self.cf(), key, new, &writeoptions)?; } drop(lock); @@ -235,11 +253,14 @@ impl KvTree for RocksDbEngineTree<'_> { &'a self, prefix: Vec, ) -> Box, Vec)> + 'a> { + let readoptions = rocksdb::ReadOptions::default(); + Box::new( self.db .rocks - .iterator_cf( + .iterator_cf_opt( &self.cf(), + readoptions, rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), ) .map(|r| r.unwrap()) From 68a33862b3a0298f3e478d94a0255bd95cb54326 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 21 Mar 2024 15:40:10 -0700 Subject: [PATCH 1586/1727] add mdbook to the devshell --- flake.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flake.nix b/flake.nix index 9cd826b..fdaa0b6 100644 --- a/flake.nix +++ b/flake.nix @@ -307,6 +307,9 @@ # Needed for finding broken markdown links lychee + + # Useful for editing the book locally + mdbook ]); }; }); From 0a790686c57c087103ccbbaf9096d0eba6a2d753 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 21 Mar 2024 15:31:57 -0700 Subject: [PATCH 1587/1727] avoid duplicating links in documentation Because one might forget to update them. I did, initially, which is why I'm making this change. --- docs/deploying.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/docs/deploying.md b/docs/deploying.md index 3694f6b..4c4b154 100644 --- a/docs/deploying.md +++ b/docs/deploying.md @@ -1,8 +1,3 @@ # Deployment options -There are several ways to deploy a Conduit server. - -- [Simple (Recommended)](deploying/simple.md) - This is the recommended way to set up Conduit. -- [Debian](deploying/debian.md) - If you're using a debian-based system, you may find the `deb` package useful. -- [Docker](deploying/docker.md) - We provide multi-arch OCI images for Conduit. -- [Nix](deploying/nix.md) - Nix flake based setup. +This chapter describes various ways to deploy Conduit. From 2022efd2792034ab45a9e27db514744876009c93 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 21 Mar 2024 15:36:08 -0700 Subject: [PATCH 1588/1727] remove section about cross compilation It is very stale. Please just use Nix. Trying to do it outside of Nix will be an exercise in frustration, I guarantee it. --- docs/deploying/simple.md | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/docs/deploying/simple.md b/docs/deploying/simple.md index ea12a28..ef5c15d 100644 --- a/docs/deploying/simple.md +++ b/docs/deploying/simple.md @@ -61,26 +61,6 @@ Then, `cd` into the source tree of conduit-next and run: $ cargo build --release ``` -If you want to cross compile Conduit to another architecture, read the guide below. - -
                -Cross compilation - -As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first. - -In order to use RockDB as storage backend append `-latomic` to linker flags. - -For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation -target. - -```bash -git clone https://gitlab.com/famedly/conduit.git -cd conduit -export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc' -cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf -``` -
                - ## Adding a Conduit user While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows From f56abba21651f64bf1b4f970ad0ca97c807256e8 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 21 Mar 2024 15:22:53 -0700 Subject: [PATCH 1589/1727] rename "simple" deployment to "generic" The main thing this section is really useful for is explaining how to configure various reverse proxies, which applies to basically anything. Also, remove all the language about this being "recommended", because nothing in this documentation is actually tested in CI. --- docs/SUMMARY.md | 2 +- docs/deploying/docker.md | 2 +- docs/deploying/{simple.md => generic.md} | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) rename docs/deploying/{simple.md => generic.md} (98%) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index e7ed05d..342d695 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -4,7 +4,7 @@ - [Example configuration](configuration.md) - [Deployment options](deploying.md) - - [Simple (Recommended)](deploying/simple.md) + - [Generic](deploying/generic.md) - [Debian](deploying/debian.md) - [Docker](deploying/docker.md) - [Nix](deploying/nix.md) diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index 4a38b30..20cd15f 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -87,7 +87,7 @@ Depending on your proxy setup, you can use one of the following files; When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want for your server. -Additional info about deploying Conduit can be found [here](simple.md). +Additional info about deploying Conduit can be found [here](generic.md). ### Build diff --git a/docs/deploying/simple.md b/docs/deploying/generic.md similarity index 98% rename from docs/deploying/simple.md rename to docs/deploying/generic.md index ef5c15d..307de35 100644 --- a/docs/deploying/simple.md +++ b/docs/deploying/generic.md @@ -1,6 +1,4 @@ -# Simple setup - -This is the recommended way to set up Conduit. It is the easiest way to get started and is suitable for most use cases. +# Generic deployment documentation > ## Getting help > From f6bfba70146ea01ad00c526a022e6cad1c057e6c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 21 Mar 2024 15:28:19 -0700 Subject: [PATCH 1590/1727] normalize headers to "Conduit for X" --- docs/deploying/docker.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index 20cd15f..c19ef51 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -1,4 +1,4 @@ -# Deploy using Docker +# Conduit for Docker > **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate. From 3dd21456efe8c38aed4206f05042eddba1cbbc94 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 21 Mar 2024 15:25:34 -0700 Subject: [PATCH 1591/1727] reduce scope of nixos documentation There are so many ways to do this we realistically shouldn't bother describing any of them, especially because people should be learning all the options and choosing the one that suits them best anyway. --- docs/SUMMARY.md | 2 +- docs/deploying/nix.md | 198 ---------------------------------------- docs/deploying/nixos.md | 18 ++++ 3 files changed, 19 insertions(+), 199 deletions(-) delete mode 100644 docs/deploying/nix.md create mode 100644 docs/deploying/nixos.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 342d695..89f620e 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -7,6 +7,6 @@ - [Generic](deploying/generic.md) - [Debian](deploying/debian.md) - [Docker](deploying/docker.md) - - [Nix](deploying/nix.md) + - [NixOS](deploying/nixos.md) - [TURN](turn.md) - [Appservices](appservices.md) diff --git a/docs/deploying/nix.md b/docs/deploying/nix.md deleted file mode 100644 index bd6f096..0000000 --- a/docs/deploying/nix.md +++ /dev/null @@ -1,198 +0,0 @@ -# Conduit for Nix/NixOS - -This guide assumes you have a recent version of Nix (^2.4) installed. - -Since Conduit ships as a Nix flake, you'll first need to [enable -flakes][enable_flakes]. - -You can now use the usual Nix commands to interact with Conduit's flake. For -example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need -to provide configuration and such manually as usual). - -If your NixOS configuration is defined as a flake, you can depend on this flake -to provide a more up-to-date version than provided by `nixpkgs`. In your flake, -add the following to your `inputs`: - -```nix -conduit = { - url = "gitlab:famedly/conduit"; - - # Assuming you have an input for nixpkgs called `nixpkgs`. If you experience - # build failures while using this, try commenting/deleting this line. This - # will probably also require you to always build from source. - inputs.nixpkgs.follows = "nixpkgs"; -}; -``` - -Next, make sure you're passing your flake inputs to the `specialArgs` argument -of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will -assume you've named the group `flake-inputs`. - -Now you can configure Conduit and a reverse proxy for it. Add the following to -a new Nix file and include it in your configuration: - -```nix -{ config -, pkgs -, flake-inputs -, ... -}: - -let - # You'll need to edit these values - - # The hostname that will appear in your user and room IDs - server_name = "example.com"; - - # The hostname that Conduit actually runs on - # - # This can be the same as `server_name` if you want. This is only necessary - # when Conduit is running on a different machine than the one hosting your - # root domain. This configuration also assumes this is all running on a single - # machine, some tweaks will need to be made if this is not the case. - matrix_hostname = "matrix.${server_name}"; - - # An admin email for TLS certificate notifications - admin_email = "admin@${server_name}"; - - # These ones you can leave alone - - # Build a dervation that stores the content of `${server_name}/.well-known/matrix/server` - well_known_server = pkgs.writeText "well-known-matrix-server" '' - { - "m.server": "${matrix_hostname}" - } - ''; - - # Build a dervation that stores the content of `${server_name}/.well-known/matrix/client` - well_known_client = pkgs.writeText "well-known-matrix-client" '' - { - "m.homeserver": { - "base_url": "https://${matrix_hostname}" - } - } - ''; -in - -{ - # Configure Conduit itself - services.matrix-conduit = { - enable = true; - - # This causes NixOS to use the flake defined in this repository instead of - # the build of Conduit built into nixpkgs. - package = flake-inputs.conduit.packages.${pkgs.system}.default; - - settings.global = { - inherit server_name; - }; - }; - - # Configure automated TLS acquisition/renewal - security.acme = { - acceptTerms = true; - defaults = { - email = admin_email; - }; - }; - - # ACME data must be readable by the NGINX user - users.users.nginx.extraGroups = [ - "acme" - ]; - - # Configure NGINX as a reverse proxy - services.nginx = { - enable = true; - recommendedProxySettings = true; - - virtualHosts = { - "${matrix_hostname}" = { - forceSSL = true; - enableACME = true; - - listen = [ - { - addr = "0.0.0.0"; - port = 443; - ssl = true; - } - { - addr = "[::]"; - port = 443; - ssl = true; - } { - addr = "0.0.0.0"; - port = 8448; - ssl = true; - } - { - addr = "[::]"; - port = 8448; - ssl = true; - } - ]; - - locations."/_matrix/" = { - proxyPass = "http://backend_conduit$request_uri"; - proxyWebsockets = true; - extraConfig = '' - proxy_set_header Host $host; - proxy_buffering off; - ''; - }; - - extraConfig = '' - merge_slashes off; - ''; - }; - - "${server_name}" = { - forceSSL = true; - enableACME = true; - - locations."=/.well-known/matrix/server" = { - # Use the contents of the derivation built previously - alias = "${well_known_server}"; - - extraConfig = '' - # Set the header since by default NGINX thinks it's just bytes - default_type application/json; - ''; - }; - - locations."=/.well-known/matrix/client" = { - # Use the contents of the derivation built previously - alias = "${well_known_client}"; - - extraConfig = '' - # Set the header since by default NGINX thinks it's just bytes - default_type application/json; - - # https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients - add_header Access-Control-Allow-Origin "*"; - ''; - }; - }; - }; - - upstreams = { - "backend_conduit" = { - servers = { - "[::1]:${toString config.services.matrix-conduit.settings.global.port}" = { }; - }; - }; - }; - }; - - # Open firewall ports for HTTP, HTTPS, and Matrix federation - networking.firewall.allowedTCPPorts = [ 80 443 8448 ]; - networking.firewall.allowedUDPPorts = [ 80 443 8448 ]; -} -``` - -Now you can rebuild your system configuration and you should be good to go! - -[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes - -[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md new file mode 100644 index 0000000..bf9b1a1 --- /dev/null +++ b/docs/deploying/nixos.md @@ -0,0 +1,18 @@ +# Conduit for NixOS + +Conduit can be acquired by Nix from various places: + +* The `flake.nix` at the root of the repo +* The `default.nix` at the root of the repo +* From Nixpkgs + +The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so +(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to +configure Conduit. + +If you want to run the latest code, you should get Conduit from the `flake.nix` +or `default.nix` and set [`services.matrix-conduit.package`][package] +appropriately. + +[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit +[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package From 5a4ee9808abeee0eedb932e4c02568d192ebdfca Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 21 Mar 2024 15:42:36 -0700 Subject: [PATCH 1592/1727] make chapter name reflect file name Personally I think this makes more sense anyway. --- docs/SUMMARY.md | 2 +- docs/deploying.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 89f620e..30316e2 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -3,7 +3,7 @@ - [Introduction](introduction.md) - [Example configuration](configuration.md) -- [Deployment options](deploying.md) +- [Deploying](deploying.md) - [Generic](deploying/generic.md) - [Debian](deploying/debian.md) - [Docker](deploying/docker.md) diff --git a/docs/deploying.md b/docs/deploying.md index 4c4b154..136e653 100644 --- a/docs/deploying.md +++ b/docs/deploying.md @@ -1,3 +1,3 @@ -# Deployment options +# Deploying This chapter describes various ways to deploy Conduit. From 6bd7ff4917978856613c464f813fff8cf2b7e6a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 5 Mar 2024 23:48:36 +0100 Subject: [PATCH 1593/1727] improvement: do not save typing edus in db --- src/api/client_server/sync.rs | 4 +- src/api/client_server/typing.rs | 4 +- src/api/server_server.rs | 4 +- src/service/mod.rs | 3 +- src/service/rooms/edus/typing/mod.rs | 57 ++++++++++++++++++++-------- 5 files changed, 49 insertions(+), 23 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 103daeb..ba2ceef 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1055,10 +1055,10 @@ async fn load_joined_room( .map(|(_, _, v)| v) .collect(); - if services().rooms.edus.typing.last_typing_update(room_id)? > since { + if services().rooms.edus.typing.last_typing_update(room_id).await? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id)?) + &serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id).await?) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index 43217e1..eff8405 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -27,13 +27,13 @@ pub async fn create_typing_event_route( sender_user, &body.room_id, duration.as_millis() as u64 + utils::millis_since_unix_epoch(), - )?; + ).await?; } else { services() .rooms .edus .typing - .typing_remove(sender_user, &body.room_id)?; + .typing_remove(sender_user, &body.room_id).await?; } Ok(create_typing_event::v3::Response {}) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index e033549..59724b0 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -830,13 +830,13 @@ pub async fn send_transaction_message_route( &typing.user_id, &typing.room_id, 3000 + utils::millis_since_unix_epoch(), - )?; + ).await?; } else { services() .rooms .edus .typing - .typing_remove(&typing.user_id, &typing.room_id)?; + .typing_remove(&typing.user_id, &typing.room_id).await?; } } } diff --git a/src/service/mod.rs b/src/service/mod.rs index 8f9fb0a..8c97cdd 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -7,6 +7,7 @@ use lru_cache::LruCache; use tokio::sync::Mutex; use crate::{Config, Result}; +use tokio::sync::RwLock; pub mod account_data; pub mod admin; @@ -65,7 +66,7 @@ impl Services { edus: rooms::edus::Service { presence: rooms::edus::presence::Service { db }, read_receipt: rooms::edus::read_receipt::Service { db }, - typing: rooms::edus::typing::Service { db }, + typing: rooms::edus::typing::Service { db, typing: RwLock::new(BTreeMap::new()), last_typing_update: RwLock::new(BTreeMap::new()) }, }, event_handler: rooms::event_handler::Service, lazy_loading: rooms::lazy_loading::Service { diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 7d44f7d..f343674 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,48 +1,73 @@ mod data; pub use data::Data; -use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; +use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId, OwnedRoomId, OwnedUserId}; +use tokio::sync::RwLock; +use std::collections::BTreeMap; -use crate::Result; +use crate::{utils, services, Result}; pub struct Service { pub db: &'static dyn Data, + pub typing: RwLock>>, // u64 is unix timestamp of timeout + pub last_typing_update: RwLock>, // timestamp of the last change to typing users } impl Service { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. - pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { - self.db.typing_add(user_id, room_id, timeout) + pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { + self.typing.write().await.entry(room_id.to_owned()).or_default().insert(user_id.to_owned(), timeout); + self.last_typing_update.write().await.insert(room_id.to_owned(), services().globals.next_count()?); + Ok(()) } /// Removes a user from typing before the timeout is reached. - pub fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - self.db.typing_remove(user_id, room_id) + pub async fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + self.typing.write().await.entry(room_id.to_owned()).or_default().remove(user_id); + self.last_typing_update.write().await.insert(room_id.to_owned(), services().globals.next_count()?); + Ok(()) } /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain(&self, room_id: &RoomId) -> Result<()> { - self.db.typings_maintain(room_id) + async fn typings_maintain(&self, room_id: &RoomId) -> Result<()> { + let current_timestamp = utils::millis_since_unix_epoch(); + let mut removable = Vec::new(); + { + let typing = self.typing.read().await; + let Some(room) = typing.get(room_id) else { return Ok(()); }; + for (user, timeout) in room { + if *timeout < current_timestamp { + removable.push(user.clone()); + } + } + drop(typing); + } + if !removable.is_empty() { + let typing = &mut self.typing.write().await; + let room = typing.entry(room_id.to_owned()).or_default(); + for user in removable { + room.remove(&user); + } + self.last_typing_update.write().await.insert(room_id.to_owned(), services().globals.next_count()?); + } + Ok(()) } /// Returns the count of the last typing update in this room. - pub fn last_typing_update(&self, room_id: &RoomId) -> Result { - self.typings_maintain(room_id)?; - - self.db.last_typing_update(room_id) + pub async fn last_typing_update(&self, room_id: &RoomId) -> Result { + self.typings_maintain(room_id).await?; + Ok(self.last_typing_update.read().await.get(room_id).copied().unwrap_or(0)) } /// Returns a new typing EDU. - pub fn typings_all( + pub async fn typings_all( &self, room_id: &RoomId, ) -> Result> { - let user_ids = self.db.typings_all(room_id)?; - Ok(SyncEphemeralRoomEvent { content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), + user_ids: self.typing.read().await.get(room_id).map(|m| m.keys().cloned().collect()).unwrap_or_default(), }, }) } From d2817679e59494a7944e1141e65dc236bcf7b061 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 22 Mar 2024 08:22:15 +0100 Subject: [PATCH 1594/1727] refactor: remove previous typing implementation and add sync wakeup for new one --- src/api/client_server/sync.rs | 9 +- src/api/client_server/typing.rs | 18 ++- src/api/server_server.rs | 18 ++- src/database/key_value/globals.rs | 4 +- src/database/key_value/rooms/edus/mod.rs | 1 - src/database/key_value/rooms/edus/typing.rs | 127 -------------------- src/database/mod.rs | 4 - src/service/mod.rs | 8 +- src/service/rooms/edus/mod.rs | 2 +- src/service/rooms/edus/typing/data.rs | 21 ---- src/service/rooms/edus/typing/mod.rs | 74 +++++++++--- 11 files changed, 101 insertions(+), 185 deletions(-) delete mode 100644 src/database/key_value/rooms/edus/typing.rs delete mode 100644 src/service/rooms/edus/typing/data.rs diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index ba2ceef..da603dc 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1055,7 +1055,14 @@ async fn load_joined_room( .map(|(_, _, v)| v) .collect(); - if services().rooms.edus.typing.last_typing_update(room_id).await? > since { + if services() + .rooms + .edus + .typing + .last_typing_update(room_id) + .await? + > since + { edus.push( serde_json::from_str( &serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id).await?) diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index eff8405..e9e9370 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -23,17 +23,23 @@ pub async fn create_typing_event_route( } if let Typing::Yes(duration) = body.state { - services().rooms.edus.typing.typing_add( - sender_user, - &body.room_id, - duration.as_millis() as u64 + utils::millis_since_unix_epoch(), - ).await?; + services() + .rooms + .edus + .typing + .typing_add( + sender_user, + &body.room_id, + duration.as_millis() as u64 + utils::millis_since_unix_epoch(), + ) + .await?; } else { services() .rooms .edus .typing - .typing_remove(sender_user, &body.room_id).await?; + .typing_remove(sender_user, &body.room_id) + .await?; } Ok(create_typing_event::v3::Response {}) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 59724b0..0fdf22f 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -826,17 +826,23 @@ pub async fn send_transaction_message_route( .is_joined(&typing.user_id, &typing.room_id)? { if typing.typing { - services().rooms.edus.typing.typing_add( - &typing.user_id, - &typing.room_id, - 3000 + utils::millis_since_unix_epoch(), - ).await?; + services() + .rooms + .edus + .typing + .typing_add( + &typing.user_id, + &typing.room_id, + 3000 + utils::millis_since_unix_epoch(), + ) + .await?; } else { services() .rooms .edus .typing - .typing_remove(&typing.user_id, &typing.room_id).await?; + .typing_remove(&typing.user_id, &typing.room_id) + .await?; } } } diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 7a70af1..2851ce5 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -94,7 +94,9 @@ impl service::globals::Data for KeyValueDatabase { futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); // EDUs - futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes)); + futures.push(Box::into_pin(Box::new(async move { + let _result = services().rooms.edus.typing.wait_for_update(&room_id).await; + }))); futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix)); diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs index 6c65291..7abf946 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus/mod.rs @@ -1,6 +1,5 @@ mod presence; mod read_receipt; -mod typing; use crate::{database::KeyValueDatabase, service}; diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs deleted file mode 100644 index 5709192..0000000 --- a/src/database/key_value/rooms/edus/typing.rs +++ /dev/null @@ -1,127 +0,0 @@ -use std::{collections::HashSet, mem}; - -use ruma::{OwnedUserId, RoomId, UserId}; - -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; - -impl service::rooms::edus::typing::Data for KeyValueDatabase { - fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = services().globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate.insert( - room_id.as_bytes(), - &services().globals.next_count()?.to_be_bytes(), - )?; - } - - Ok(()) - } - - fn typings_maintain(&self, room_id: &RoomId) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate.insert( - room_id.as_bytes(), - &services().globals.next_count()?.to_be_bytes(), - )?; - } - - Ok(()) - } - - fn last_typing_update(&self, room_id: &RoomId) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all(&self, room_id: &RoomId) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} diff --git a/src/database/mod.rs b/src/database/mod.rs index 425ef4e..0960dc9 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -70,8 +70,6 @@ pub struct KeyValueDatabase { pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId pub(super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count - pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count - pub(super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count @@ -301,8 +299,6 @@ impl KeyValueDatabase { roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt roomuserid_lastprivatereadupdate: builder .open_tree("roomuserid_lastprivatereadupdate")?, - typingid_userid: builder.open_tree("typingid_userid")?, - roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, presenceid_presence: builder.open_tree("presenceid_presence")?, userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, pduid_pdu: builder.open_tree("pduid_pdu")?, diff --git a/src/service/mod.rs b/src/service/mod.rs index 8c97cdd..ba1ae1c 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -4,7 +4,7 @@ use std::{ }; use lru_cache::LruCache; -use tokio::sync::Mutex; +use tokio::sync::{broadcast, Mutex}; use crate::{Config, Result}; use tokio::sync::RwLock; @@ -66,7 +66,11 @@ impl Services { edus: rooms::edus::Service { presence: rooms::edus::presence::Service { db }, read_receipt: rooms::edus::read_receipt::Service { db }, - typing: rooms::edus::typing::Service { db, typing: RwLock::new(BTreeMap::new()), last_typing_update: RwLock::new(BTreeMap::new()) }, + typing: rooms::edus::typing::Service { + typing: RwLock::new(BTreeMap::new()), + last_typing_update: RwLock::new(BTreeMap::new()), + typing_update_sender: broadcast::channel(100).0, + }, }, event_handler: rooms::event_handler::Service, lazy_loading: rooms::lazy_loading::Service { diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index cf7a359..a6bc3d5 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -2,7 +2,7 @@ pub mod presence; pub mod read_receipt; pub mod typing; -pub trait Data: presence::Data + read_receipt::Data + typing::Data + 'static {} +pub trait Data: presence::Data + read_receipt::Data + 'static {} pub struct Service { pub presence: presence::Service, diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs deleted file mode 100644 index 3b1eecf..0000000 --- a/src/service/rooms/edus/typing/data.rs +++ /dev/null @@ -1,21 +0,0 @@ -use crate::Result; -use ruma::{OwnedUserId, RoomId, UserId}; -use std::collections::HashSet; - -pub trait Data: Send + Sync { - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>; - - /// Removes a user from typing before the timeout is reached. - fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; - - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain(&self, room_id: &RoomId) -> Result<()>; - - /// Returns the count of the last typing update in this room. - fn last_typing_update(&self, room_id: &RoomId) -> Result; - - /// Returns all user ids currently typing. - fn typings_all(&self, room_id: &RoomId) -> Result>; -} diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index f343674..7546aa8 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,31 +1,57 @@ -mod data; - -pub use data::Data; -use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId, OwnedRoomId, OwnedUserId}; -use tokio::sync::RwLock; +use ruma::{events::SyncEphemeralRoomEvent, OwnedRoomId, OwnedUserId, RoomId, UserId}; use std::collections::BTreeMap; +use tokio::sync::{broadcast, RwLock}; -use crate::{utils, services, Result}; +use crate::{services, utils, Result}; pub struct Service { - pub db: &'static dyn Data, pub typing: RwLock>>, // u64 is unix timestamp of timeout pub last_typing_update: RwLock>, // timestamp of the last change to typing users + pub typing_update_sender: broadcast::Sender, } impl Service { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { - self.typing.write().await.entry(room_id.to_owned()).or_default().insert(user_id.to_owned(), timeout); - self.last_typing_update.write().await.insert(room_id.to_owned(), services().globals.next_count()?); + self.typing + .write() + .await + .entry(room_id.to_owned()) + .or_default() + .insert(user_id.to_owned(), timeout); + self.last_typing_update + .write() + .await + .insert(room_id.to_owned(), services().globals.next_count()?); + let _ = self.typing_update_sender.send(room_id.to_owned()); Ok(()) } /// Removes a user from typing before the timeout is reached. pub async fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - self.typing.write().await.entry(room_id.to_owned()).or_default().remove(user_id); - self.last_typing_update.write().await.insert(room_id.to_owned(), services().globals.next_count()?); + self.typing + .write() + .await + .entry(room_id.to_owned()) + .or_default() + .remove(user_id); + self.last_typing_update + .write() + .await + .insert(room_id.to_owned(), services().globals.next_count()?); + let _ = self.typing_update_sender.send(room_id.to_owned()); + Ok(()) + } + + pub async fn wait_for_update(&self, room_id: &RoomId) -> Result<()> { + let mut receiver = self.typing_update_sender.subscribe(); + while let Ok(next) = receiver.recv().await { + if next == room_id { + break; + } + } + Ok(()) } @@ -35,7 +61,9 @@ impl Service { let mut removable = Vec::new(); { let typing = self.typing.read().await; - let Some(room) = typing.get(room_id) else { return Ok(()); }; + let Some(room) = typing.get(room_id) else { + return Ok(()); + }; for (user, timeout) in room { if *timeout < current_timestamp { removable.push(user.clone()); @@ -49,7 +77,11 @@ impl Service { for user in removable { room.remove(&user); } - self.last_typing_update.write().await.insert(room_id.to_owned(), services().globals.next_count()?); + self.last_typing_update + .write() + .await + .insert(room_id.to_owned(), services().globals.next_count()?); + let _ = self.typing_update_sender.send(room_id.to_owned()); } Ok(()) } @@ -57,7 +89,13 @@ impl Service { /// Returns the count of the last typing update in this room. pub async fn last_typing_update(&self, room_id: &RoomId) -> Result { self.typings_maintain(room_id).await?; - Ok(self.last_typing_update.read().await.get(room_id).copied().unwrap_or(0)) + Ok(self + .last_typing_update + .read() + .await + .get(room_id) + .copied() + .unwrap_or(0)) } /// Returns a new typing EDU. @@ -67,7 +105,13 @@ impl Service { ) -> Result> { Ok(SyncEphemeralRoomEvent { content: ruma::events::typing::TypingEventContent { - user_ids: self.typing.read().await.get(room_id).map(|m| m.keys().cloned().collect()).unwrap_or_default(), + user_ids: self + .typing + .read() + .await + .get(room_id) + .map(|m| m.keys().cloned().collect()) + .unwrap_or_default(), }, }) } From 0bb28f60cfc68beaa521bb7dacdec7a9f92c2288 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 22 Mar 2024 08:52:39 +0100 Subject: [PATCH 1595/1727] refactor: minor appservice code cleanup --- src/api/appservice_server.rs | 173 ++++++++++++++------------- src/api/client_server/alias.rs | 8 +- src/api/ruma_wrapper/axum.rs | 8 +- src/database/key_value/appservice.rs | 35 ++---- src/database/mod.rs | 19 --- src/service/appservice/mod.rs | 39 ++++-- src/service/mod.rs | 7 +- src/service/rooms/timeline/mod.rs | 10 +- 8 files changed, 136 insertions(+), 163 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index ab4da79..841c32a 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -17,95 +17,96 @@ pub(crate) async fn send_request( where T: Debug, { - if let Some(destination) = registration.url { - let hs_token = registration.hs_token.as_str(); + let Some(destination) = registration.url else { + return None; + }; - let mut http_request = request - .try_into_http_request::( - &destination, - SendAccessToken::IfRequired(hs_token), - &[MatrixVersion::V1_0], - ) - .unwrap() - .map(|body| body.freeze()); + let hs_token = registration.hs_token.as_str(); - let mut parts = http_request.uri().clone().into_parts(); - let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains('?') { - "&" - } else { - "?" - }; + let mut http_request = request + .try_into_http_request::( + &destination, + SendAccessToken::IfRequired(hs_token), + &[MatrixVersion::V1_0], + ) + .unwrap() + .map(|body| body.freeze()); - parts.path_and_query = Some( - (old_path_and_query + symbol + "access_token=" + hs_token) - .parse() - .unwrap(), - ); - *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); - - let mut reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); - - let url = reqwest_request.url().clone(); - let mut response = match services() - .globals - .default_client() - .execute(reqwest_request) - .await - { - Ok(r) => r, - Err(e) => { - warn!( - "Could not send request to appservice {:?} at {}: {}", - registration.id, destination, e - ); - return Some(Err(e.into())); - } - }; - - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error: {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - warn!( - "Appservice returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - Some(response.map_err(|_| { - warn!( - "Appservice returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Server returned bad response.") - })) + let mut parts = http_request.uri().clone().into_parts(); + let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); + let symbol = if old_path_and_query.contains('?') { + "&" } else { - None + "?" + }; + + parts.path_and_query = Some( + (old_path_and_query + symbol + "access_token=" + hs_token) + .parse() + .unwrap(), + ); + *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); + + let mut reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); + + let url = reqwest_request.url().clone(); + let mut response = match services() + .globals + .default_client() + .execute(reqwest_request) + .await + { + Ok(r) => r, + Err(e) => { + warn!( + "Could not send request to appservice {:?} at {}: {}", + registration.id, destination, e + ); + return Some(Err(e.into())); + } + }; + + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); + + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error: {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if status != 200 { + warn!( + "Appservice returned bad response {} {}\n{}\n{:?}", + destination, + status, + url, + utils::string_from_bytes(&body) + ); } + + let response = T::IncomingResponse::try_from_http_response( + http_response_builder + .body(body) + .expect("reqwest body is valid http body"), + ); + + Some(response.map_err(|_| { + warn!( + "Appservice returned invalid response bytes {}\n{}", + destination, url + ); + Error::BadServerResponse("Server returned bad response.") + })) } diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index d3a6e39..00ee6c8 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -100,13 +100,7 @@ pub(crate) async fn get_alias_helper( match services().rooms.alias.resolve_local_alias(&room_alias)? { Some(r) => room_id = Some(r), None => { - for appservice in services() - .appservice - .registration_info - .read() - .await - .values() - { + for appservice in services().appservice.all().await { if appservice.aliases.is_match(room_alias.as_str()) && if let Some(opt_result) = services() .sending diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index e841f13..6411ab9 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -80,19 +80,19 @@ where let mut json_body = serde_json::from_slice::(&body).ok(); - let appservices = services().appservice.all().unwrap(); + let appservices = services().appservice.all().await; let appservice_registration = appservices .iter() - .find(|(_id, registration)| Some(registration.as_token.as_str()) == token); + .find(|info| Some(info.registration.as_token.as_str()) == token); let (sender_user, sender_device, sender_servername, from_appservice) = - if let Some((_id, registration)) = appservice_registration { + if let Some(info) = appservice_registration { match metadata.authentication { AuthScheme::AccessToken => { let user_id = query_params.user_id.map_or_else( || { UserId::parse_with_server_name( - registration.sender_localpart.as_str(), + info.registration.sender_localpart.as_str(), services().globals.server_name(), ) .unwrap() diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index 3243183..b547e66 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -10,10 +10,6 @@ impl service::appservice::Data for KeyValueDatabase { id.as_bytes(), serde_yaml::to_string(&yaml).unwrap().as_bytes(), )?; - self.cached_registrations - .write() - .unwrap() - .insert(id.to_owned(), yaml.to_owned()); Ok(id.to_owned()) } @@ -26,33 +22,18 @@ impl service::appservice::Data for KeyValueDatabase { fn unregister_appservice(&self, service_name: &str) -> Result<()> { self.id_appserviceregistrations .remove(service_name.as_bytes())?; - self.cached_registrations - .write() - .unwrap() - .remove(service_name); Ok(()) } fn get_registration(&self, id: &str) -> Result> { - self.cached_registrations - .read() - .unwrap() - .get(id) - .map_or_else( - || { - self.id_appserviceregistrations - .get(id.as_bytes())? - .map(|bytes| { - serde_yaml::from_slice(&bytes).map_err(|_| { - Error::bad_database( - "Invalid registration bytes in id_appserviceregistrations.", - ) - }) - }) - .transpose() - }, - |r| Ok(Some(r.clone())), - ) + self.id_appserviceregistrations + .get(id.as_bytes())? + .map(|bytes| { + serde_yaml::from_slice(&bytes).map_err(|_| { + Error::bad_database("Invalid registration bytes in id_appserviceregistrations.") + }) + }) + .transpose() } fn iter_ids<'a>(&'a self) -> Result> + 'a>> { diff --git a/src/database/mod.rs b/src/database/mod.rs index 5b8588c..190e7e1 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -10,7 +10,6 @@ use directories::ProjectDirs; use lru_cache::LruCache; use ruma::{ - api::appservice::Registration, events::{ push_rules::{PushRulesEvent, PushRulesEventContent}, room::message::RoomMessageEventContent, @@ -164,7 +163,6 @@ pub struct KeyValueDatabase { //pub pusher: pusher::PushData, pub(super) senderkey_pusher: Arc, - pub(super) cached_registrations: Arc>>, pub(super) pdu_cache: Mutex>>, pub(super) shorteventid_cache: Mutex>>, pub(super) auth_chain_cache: Mutex, Arc>>>, @@ -374,7 +372,6 @@ impl KeyValueDatabase { global: builder.open_tree("global")?, server_signingkeys: builder.open_tree("server_signingkeys")?, - cached_registrations: Arc::new(RwLock::new(HashMap::new())), pdu_cache: Mutex::new(LruCache::new( config .pdu_cache_capacity @@ -969,22 +966,6 @@ impl KeyValueDatabase { ); } - // Inserting registrations into cache - for appservice in services().appservice.all()? { - services() - .appservice - .registration_info - .write() - .await - .insert( - appservice.0, - appservice - .1 - .try_into() - .expect("Should be validated on registration"), - ); - } - // This data is probably outdated db.presenceid_presence.clear()?; diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 40fa3ee..6b9e21f 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -10,7 +10,8 @@ use tokio::sync::RwLock; use crate::{services, Result}; -/// Compiled regular expressions for a namespace +/// Compiled regular expressions for a namespace. +#[derive(Clone, Debug)] pub struct NamespaceRegex { pub exclusive: Option, pub non_exclusive: Option, @@ -72,7 +73,8 @@ impl TryFrom> for NamespaceRegex { type Error = regex::Error; } -/// Compiled regular expressions for an appservice +/// Appservice registration combined with its compiled regular expressions. +#[derive(Clone, Debug)] pub struct RegistrationInfo { pub registration: Registration, pub users: NamespaceRegex, @@ -95,11 +97,29 @@ impl TryFrom for RegistrationInfo { pub struct Service { pub db: &'static dyn Data, - pub registration_info: RwLock>, + registration_info: RwLock>, } impl Service { - /// Registers an appservice and returns the ID to the caller + pub fn build(db: &'static dyn Data) -> Result { + let mut registration_info = HashMap::new(); + // Inserting registrations into cache + for appservice in db.all()? { + registration_info.insert( + appservice.0, + appservice + .1 + .try_into() + .expect("Should be validated on registration"), + ); + } + + Ok(Self { + db, + registration_info: RwLock::new(registration_info), + }) + } + /// Registers an appservice and returns the ID to the caller. pub async fn register_appservice(&self, yaml: Registration) -> Result { services() .appservice @@ -111,7 +131,7 @@ impl Service { self.db.register_appservice(yaml) } - /// Remove an appservice registration + /// Removes an appservice registration. /// /// # Arguments /// @@ -135,7 +155,12 @@ impl Service { self.db.iter_ids() } - pub fn all(&self) -> Result> { - self.db.all() + pub async fn all(&self) -> Vec { + self.registration_info + .read() + .await + .values() + .cloned() + .collect() } } diff --git a/src/service/mod.rs b/src/service/mod.rs index 045ccd1..0cbe6a8 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -4,7 +4,7 @@ use std::{ }; use lru_cache::LruCache; -use tokio::sync::{Mutex, RwLock}; +use tokio::sync::Mutex; use crate::{Config, Result}; @@ -56,10 +56,7 @@ impl Services { config: Config, ) -> Result { Ok(Self { - appservice: appservice::Service { - db, - registration_info: RwLock::new(HashMap::new()), - }, + appservice: appservice::Service::build(db)?, pusher: pusher::Service { db }, rooms: rooms::Service { alias: rooms::alias::Service { db }, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 1df1db5..035513d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -524,17 +524,11 @@ impl Service { } } - for appservice in services() - .appservice - .registration_info - .read() - .await - .values() - { + for appservice in services().appservice.all().await { if services() .rooms .state_cache - .appservice_in_room(&pdu.room_id, appservice)? + .appservice_in_room(&pdu.room_id, &appservice)? { services() .sending From 34e0e710cb5faaa4a7cad62bf956267661eed2c2 Mon Sep 17 00:00:00 2001 From: lafleur Date: Thu, 26 Oct 2023 16:51:54 +0800 Subject: [PATCH 1596/1727] add registration_token in default cfg and README --- conduit-example.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/conduit-example.toml b/conduit-example.toml index a52121a..1fb2741 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -38,6 +38,12 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true +# A static registration token that new users will have to provide when creating +# an account. YOU NEED TO EDIT THIS. +# - Insert a password that users will have to enter on registration +# - Comment out the line to remove the condition +registration_token = "" + allow_federation = true allow_check_for_updates = true From 1fb5bcf98fa7832ab6c5b78f3ef04caa54f5db1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 24 Jan 2024 18:44:00 +0100 Subject: [PATCH 1597/1727] improvement: registration token now only works when registration is enabled --- conduit-example.toml | 2 +- debian/postinst | 13 +++++ docs/deploying/docker-compose.for-traefik.yml | 1 + .../deploying/docker-compose.with-traefik.yml | 3 +- src/api/client_server/account.rs | 47 ++++++++++++------- 5 files changed, 45 insertions(+), 21 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 1fb2741..c83bce7 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -41,7 +41,7 @@ allow_registration = true # A static registration token that new users will have to provide when creating # an account. YOU NEED TO EDIT THIS. # - Insert a password that users will have to enter on registration -# - Comment out the line to remove the condition +# - Start the line with '#' to remove the condition registration_token = "" allow_federation = true diff --git a/debian/postinst b/debian/postinst index 110f22d..0707b6f 100644 --- a/debian/postinst +++ b/debian/postinst @@ -72,9 +72,22 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true +# A static registration token that new users will have to provide when creating +# an account. +# - Insert a password that users will have to enter on registration +# - Start the line with '#' to remove the condition +#registration_token = "" + allow_federation = true allow_check_for_updates = true +# Enable the display name lightning bolt on registration. +enable_lightning_bolt = true + +# Servers listed here will be used to gather public keys of other servers. +# Generally, copying this exactly should be enough. (Currently, Conduit doesn't +# support batched key requests, so this list should only contain Synapse +# servers.) trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 82bb55b..c0bb042 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -28,6 +28,7 @@ services: CONDUIT_PORT: 6167 CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' + #CONDUIT_REGISTRATION_TOKEN: '' # require password for registration CONDUIT_ALLOW_FEDERATION: 'true' CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 5860327..8ce3ad4 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -31,14 +31,13 @@ services: ### Uncomment and change values as desired # CONDUIT_ADDRESS: 0.0.0.0 # CONDUIT_PORT: 6167 + # CONDUIT_REGISTRATION_TOKEN: '' # require password for registration # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_ENCRYPTION: 'true' # CONDUIT_ALLOW_FEDERATION: 'true' # CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit - # CONDUIT_WORKERS: 10 # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index d4529a4..b39e622 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -74,10 +74,7 @@ pub async fn get_register_available_route( /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route(body: Ruma) -> Result { - if !services().globals.allow_registration() - && !body.from_appservice - && services().globals.config.registration_token.is_none() - { + if !services().globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, "Registration has been disabled.", @@ -122,21 +119,35 @@ pub async fn register_route(body: Ruma) -> Result Date: Fri, 22 Mar 2024 17:51:15 +0000 Subject: [PATCH 1598/1727] refactor: use BTreeMap for cached registration info --- src/service/appservice/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 6b9e21f..d9ab9eb 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,6 +1,6 @@ mod data; -use std::collections::HashMap; +use std::collections::BTreeMap; pub use data::Data; @@ -97,12 +97,12 @@ impl TryFrom for RegistrationInfo { pub struct Service { pub db: &'static dyn Data, - registration_info: RwLock>, + registration_info: RwLock>, } impl Service { pub fn build(db: &'static dyn Data) -> Result { - let mut registration_info = HashMap::new(); + let mut registration_info = BTreeMap::new(); // Inserting registrations into cache for appservice in db.all()? { registration_info.insert( From b20483aa13399822fe047349d1af7678f6777259 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 22 Mar 2024 18:27:14 +0000 Subject: [PATCH 1599/1727] refactor(appservices): avoid cloning frequently --- src/api/appservice_server.rs | 4 +--- src/api/client_server/alias.rs | 2 +- src/api/ruma_wrapper/axum.rs | 9 ++++---- src/service/admin/mod.rs | 26 ++++++---------------- src/service/appservice/mod.rs | 37 ++++++++++++++++++++++--------- src/service/rooms/timeline/mod.rs | 4 ++-- src/service/sending/mod.rs | 2 +- 7 files changed, 44 insertions(+), 40 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 841c32a..213e4c0 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -17,9 +17,7 @@ pub(crate) async fn send_request( where T: Debug, { - let Some(destination) = registration.url else { - return None; - }; + let destination = registration.url?; let hs_token = registration.hs_token.as_str(); diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 00ee6c8..bc3a5e2 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -100,7 +100,7 @@ pub(crate) async fn get_alias_helper( match services().rooms.alias.resolve_local_alias(&room_alias)? { Some(r) => room_id = Some(r), None => { - for appservice in services().appservice.all().await { + for appservice in services().appservice.read().await.values() { if appservice.aliases.is_match(room_alias.as_str()) && if let Some(opt_result) = services() .sending diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 6411ab9..8ba9fa5 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -80,10 +80,11 @@ where let mut json_body = serde_json::from_slice::(&body).ok(); - let appservices = services().appservice.all().await; - let appservice_registration = appservices - .iter() - .find(|info| Some(info.registration.as_token.as_str()) == token); + let appservice_registration = if let Some(token) = token { + services().appservice.find_from_token(token).await + } else { + None + }; let (sender_user, sender_device, sender_servername, from_appservice) = if let Some(info) = appservice_registration { diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 12bc1cf..f2f60a7 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -369,25 +369,13 @@ impl Service { )), }, AdminCommand::ListAppservices => { - if let Ok(appservices) = services() - .appservice - .iter_ids() - .map(|ids| ids.collect::>()) - { - let count = appservices.len(); - let output = format!( - "Appservices ({}): {}", - count, - appservices - .into_iter() - .filter_map(|r| r.ok()) - .collect::>() - .join(", ") - ); - RoomMessageEventContent::text_plain(output) - } else { - RoomMessageEventContent::text_plain("Failed to get appservices.") - } + let appservices = services().appservice.iter_ids().await; + let output = format!( + "Appservices ({}): {}", + appservices.len(), + appservices.join(", ") + ); + RoomMessageEventContent::text_plain(output) } AdminCommand::ListRooms => { let room_ids = services().rooms.metadata.iter_ids(); diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index d9ab9eb..4bda896 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -4,6 +4,7 @@ use std::collections::BTreeMap; pub use data::Data; +use futures_util::Future; use regex::RegexSet; use ruma::api::appservice::{Namespace, Registration}; use tokio::sync::RwLock; @@ -147,20 +148,36 @@ impl Service { self.db.unregister_appservice(service_name) } - pub fn get_registration(&self, id: &str) -> Result> { - self.db.get_registration(id) - } - - pub fn iter_ids(&self) -> Result> + '_> { - self.db.iter_ids() - } - - pub async fn all(&self) -> Vec { + pub async fn get_registration(&self, id: &str) -> Option { self.registration_info .read() .await - .values() + .get(id) + .cloned() + .map(|info| info.registration) + } + + pub async fn iter_ids(&self) -> Vec { + self.registration_info + .read() + .await + .keys() .cloned() .collect() } + + pub async fn find_from_token(&self, token: &str) -> Option { + self.read() + .await + .values() + .find(|info| info.registration.as_token == token) + .cloned() + } + + pub fn read( + &self, + ) -> impl Future>> + { + self.registration_info.read() + } } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 035513d..379d97f 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -524,11 +524,11 @@ impl Service { } } - for appservice in services().appservice.all().await { + for appservice in services().appservice.read().await.values() { if services() .rooms .state_cache - .appservice_in_room(&pdu.room_id, &appservice)? + .appservice_in_room(&pdu.room_id, appservice)? { services() .sending diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index bbacfde..45cca17 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -488,7 +488,7 @@ impl Service { services() .appservice .get_registration(id) - .map_err(|e| (kind.clone(), e))? + .await .ok_or_else(|| { ( kind.clone(), From 8d70f69e621496796341eacf760918941728f0de Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 30 Mar 2024 12:40:58 +0000 Subject: [PATCH 1600/1727] fix: reject /register requests when there is no token and the type is appservice --- src/api/client_server/account.rs | 10 +++++++++- src/api/client_server/session.rs | 4 ++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index d4529a4..9f98369 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -3,7 +3,8 @@ use crate::{api::client_server, services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ account::{ - change_password, deactivate, get_3pids, get_username_availability, register, + change_password, deactivate, get_3pids, get_username_availability, + register::{self, LoginType}, request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, ThirdPartyIdRemovalStatus, }, @@ -84,6 +85,13 @@ pub async fn register_route(body: Ruma) -> Result) -> Result { if !body.from_appservice { return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Forbidden login type.", + ErrorKind::MissingToken, + "Missing appservice token.", )); }; if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { From 5c634ceb6b755eaddeb1b854542d9e4a16d763f8 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 30 Mar 2024 11:02:39 +0000 Subject: [PATCH 1601/1727] fix: reject requests with authentication when not used --- Cargo.lock | 36 ++-- Cargo.toml | 2 +- src/api/ruma_wrapper/axum.rs | 322 +++++++++++++++++++---------------- 3 files changed, 198 insertions(+), 162 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d294c0..bd83f2d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1992,7 +1992,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.9.4" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "assign", "js_int", @@ -2011,7 +2011,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "js_int", "ruma-common", @@ -2023,7 +2023,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "as_variant", "assign", @@ -2042,7 +2042,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.12.1" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "as_variant", "base64", @@ -2061,16 +2061,18 @@ dependencies = [ "serde_html_form", "serde_json", "thiserror", + "time", "tracing", "url", "uuid", + "web-time", "wildmatch", ] [[package]] name = "ruma-events" version = "0.27.11" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "as_variant", "indexmap 2.2.5", @@ -2092,7 +2094,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "js_int", "ruma-common", @@ -2104,7 +2106,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.3" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "js_int", "thiserror", @@ -2113,7 +2115,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "js_int", "ruma-common", @@ -2123,7 +2125,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "once_cell", "proc-macro-crate", @@ -2138,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "js_int", "ruma-common", @@ -2150,7 +2152,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.14.0" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "base64", "ed25519-dalek", @@ -2166,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=1a1c61ee1e8f0936e956a3b69c931ce12ee28475#1a1c61ee1e8f0936e956a3b69c931ce12ee28475" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "itertools 0.11.0", "js_int", @@ -3276,6 +3278,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "weezl" version = "0.1.8" diff --git a/Cargo.toml b/Cargo.toml index ac33472..d2b828a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "1a1c61ee1e8f0936e956a3b69c931ce12ee28475", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index a5ef7f7..43e73c6 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -15,13 +15,20 @@ use bytes::{Buf, BufMut, Bytes, BytesMut}; use http::{Request, StatusCode}; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, - CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId, + CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, }; use serde::Deserialize; use tracing::{debug, error, warn}; use super::{Ruma, RumaResponse}; -use crate::{services, Error, Result}; +use crate::{service::appservice::RegistrationInfo, services, Error, Result}; + +enum Token { + Appservice(Box), + User((OwnedUserId, OwnedDeviceId)), + Invalid, + None, +} #[async_trait] impl FromRequest for Ruma @@ -78,177 +85,194 @@ where None => query_params.access_token.as_deref(), }; - let mut json_body = serde_json::from_slice::(&body).ok(); - - let appservice_registration = if let Some(token) = token { - services().appservice.find_from_token(token).await + let token = if let Some(token) = token { + if let Some(reg_info) = services().appservice.find_from_token(token).await { + Token::Appservice(Box::new(reg_info.clone())) + } else if let Some((user_id, device_id)) = services().users.find_from_token(token)? { + Token::User((user_id, OwnedDeviceId::from(device_id))) + } else { + Token::Invalid + } } else { - None + Token::None }; + let mut json_body = serde_json::from_slice::(&body).ok(); + let (sender_user, sender_device, sender_servername, from_appservice) = - if let Some(info) = appservice_registration { - match metadata.authentication { - AuthScheme::AccessToken => { - let user_id = query_params.user_id.map_or_else( + match (metadata.authentication, token) { + (_, Token::Invalid) => { + return Err(Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, + "Unknown access token.", + )) + } + ( + AuthScheme::AccessToken + | AuthScheme::AppserviceToken + | AuthScheme::AccessTokenOptional, + Token::Appservice(info), + ) => { + let user_id = query_params + .user_id + .map_or_else( || { UserId::parse_with_server_name( info.registration.sender_localpart.as_str(), services().globals.server_name(), ) - .unwrap() }, - |s| UserId::parse(s).unwrap(), - ); + UserId::parse, + ) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; + if !services().users.exists(&user_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "User does not exist.", + )); + } - if !services().users.exists(&user_id).unwrap() { + // TODO: Check if appservice is allowed to be that user + (Some(user_id), None, None, true) + } + (AuthScheme::AccessToken, Token::None) => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing access token.", + )); + } + ( + AuthScheme::AccessToken | AuthScheme::AccessTokenOptional, + Token::User((user_id, device_id)), + ) => (Some(user_id), Some(device_id), None, false), + (AuthScheme::ServerSignatures, Token::None) => { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let TypedHeader(Authorization(x_matrix)) = parts + .extract::>>() + .await + .map_err(|e| { + warn!("Missing or invalid Authorization header: {}", e); + + let msg = match e.reason() { + TypedHeaderRejectionReason::Missing => { + "Missing Authorization header." + } + TypedHeaderRejectionReason::Error(_) => { + "Invalid X-Matrix signatures." + } + _ => "Unknown header-related error", + }; + + Error::BadRequest(ErrorKind::Forbidden, msg) + })?; + + let origin_signatures = BTreeMap::from_iter([( + x_matrix.key.clone(), + CanonicalJsonValue::String(x_matrix.sig), + )]); + + let signatures = BTreeMap::from_iter([( + x_matrix.origin.as_str().to_owned(), + CanonicalJsonValue::Object(origin_signatures), + )]); + + let mut request_map = BTreeMap::from_iter([ + ( + "method".to_owned(), + CanonicalJsonValue::String(parts.method.to_string()), + ), + ( + "uri".to_owned(), + CanonicalJsonValue::String(parts.uri.to_string()), + ), + ( + "origin".to_owned(), + CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()), + ), + ( + "destination".to_owned(), + CanonicalJsonValue::String( + services().globals.server_name().as_str().to_owned(), + ), + ), + ( + "signatures".to_owned(), + CanonicalJsonValue::Object(signatures), + ), + ]); + + if let Some(json_body) = &json_body { + request_map.insert("content".to_owned(), json_body.clone()); + }; + + let keys_result = services() + .rooms + .event_handler + .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()]) + .await; + + let keys = match keys_result { + Ok(b) => b, + Err(e) => { + warn!("Failed to fetch signing keys: {}", e); return Err(Error::BadRequest( ErrorKind::Forbidden, - "User does not exist.", + "Failed to fetch signing keys.", )); } + }; - // TODO: Check if appservice is allowed to be that user - (Some(user_id), None, None, true) - } - AuthScheme::ServerSignatures => (None, None, None, true), - AuthScheme::None => (None, None, None, true), - } - } else { - match metadata.authentication { - AuthScheme::AccessToken => { - let token = match token { - Some(token) => token, - _ => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing access token.", - )) - } - }; + let pub_key_map = + BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]); - match services().users.find_from_token(token).unwrap() { - None => { - return Err(Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown access token.", - )) - } - Some((user_id, device_id)) => ( - Some(user_id), - Some(OwnedDeviceId::from(device_id)), - None, - false, - ), - } - } - AuthScheme::ServerSignatures => { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } + match ruma::signatures::verify_json(&pub_key_map, &request_map) { + Ok(()) => (None, None, Some(x_matrix.origin), false), + Err(e) => { + warn!( + "Failed to verify json request from {}: {}\n{:?}", + x_matrix.origin, e, request_map + ); - let TypedHeader(Authorization(x_matrix)) = parts - .extract::>>() - .await - .map_err(|e| { - warn!("Missing or invalid Authorization header: {}", e); - - let msg = match e.reason() { - TypedHeaderRejectionReason::Missing => { - "Missing Authorization header." - } - TypedHeaderRejectionReason::Error(_) => { - "Invalid X-Matrix signatures." - } - _ => "Unknown header-related error", - }; - - Error::BadRequest(ErrorKind::Forbidden, msg) - })?; - - let origin_signatures = BTreeMap::from_iter([( - x_matrix.key.clone(), - CanonicalJsonValue::String(x_matrix.sig), - )]); - - let signatures = BTreeMap::from_iter([( - x_matrix.origin.as_str().to_owned(), - CanonicalJsonValue::Object(origin_signatures), - )]); - - let mut request_map = BTreeMap::from_iter([ - ( - "method".to_owned(), - CanonicalJsonValue::String(parts.method.to_string()), - ), - ( - "uri".to_owned(), - CanonicalJsonValue::String(parts.uri.to_string()), - ), - ( - "origin".to_owned(), - CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()), - ), - ( - "destination".to_owned(), - CanonicalJsonValue::String( - services().globals.server_name().as_str().to_owned(), - ), - ), - ( - "signatures".to_owned(), - CanonicalJsonValue::Object(signatures), - ), - ]); - - if let Some(json_body) = &json_body { - request_map.insert("content".to_owned(), json_body.clone()); - }; - - let keys_result = services() - .rooms - .event_handler - .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()]) - .await; - - let keys = match keys_result { - Ok(b) => b, - Err(e) => { - warn!("Failed to fetch signing keys: {}", e); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Failed to fetch signing keys.", - )); - } - }; - - let pub_key_map = - BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]); - - match ruma::signatures::verify_json(&pub_key_map, &request_map) { - Ok(()) => (None, None, Some(x_matrix.origin), false), - Err(e) => { + if parts.uri.to_string().contains('@') { warn!( - "Failed to verify json request from {}: {}\n{:?}", - x_matrix.origin, e, request_map - ); - - if parts.uri.to_string().contains('@') { - warn!( - "Request uri contained '@' character. Make sure your \ + "Request uri contained '@' character. Make sure your \ reverse proxy gives Conduit the raw uri (apache: use \ nocanon)" - ); - } - - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Failed to verify X-Matrix signatures.", - )); + ); } + + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Failed to verify X-Matrix signatures.", + )); } } - AuthScheme::None => (None, None, None, false), + } + ( + AuthScheme::None + | AuthScheme::AppserviceToken + | AuthScheme::AccessTokenOptional, + Token::None, + ) => (None, None, None, false), + ( + AuthScheme::ServerSignatures | AuthScheme::None, + Token::Appservice(_) | Token::User(_), + ) => { + return Err(Error::BadRequest( + ErrorKind::Unauthorized, + "Access tokens should not be used on this endpoint.", + )); + } + (AuthScheme::AppserviceToken, Token::User(_)) => { + return Err(Error::BadRequest( + ErrorKind::Unauthorized, + "Only appservice access tokens should be used on this endpoint.", + )); } }; From 11612e347d01788fec0a6f524f899724341b3293 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 31 Mar 2024 13:25:04 +0100 Subject: [PATCH 1602/1727] fix: return error when trying to unregister unknown appservice id --- src/service/appservice/mod.rs | 3 ++- src/utils/error.rs | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 4bda896..7d2d46b 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -143,7 +143,8 @@ impl Service { .registration_info .write() .await - .remove(service_name); + .remove(service_name) + .ok_or_else(|| crate::Error::AdminCommand("Appservice not found"))?; self.db.unregister_appservice(service_name) } diff --git a/src/utils/error.rs b/src/utils/error.rs index 0439028..899dfde 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -85,6 +85,8 @@ pub enum Error { #[cfg(feature = "conduit_bin")] #[error("{0}")] PathError(#[from] axum::extract::rejection::PathRejection), + #[error("{0}")] + AdminCommand(&'static str), } impl Error { From 3ce3d13378dcb523c06ed914527ccc190c1d9457 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 31 Mar 2024 21:30:26 +0100 Subject: [PATCH 1603/1727] fix: do not expect that all http requests are valid reqwest requests --- src/api/appservice_server.rs | 18 +++++++++++------- src/api/client_server/alias.rs | 26 ++++++++++++-------------- src/api/server_server.rs | 3 +-- src/service/pusher/mod.rs | 3 +-- src/service/sending/mod.rs | 8 +++----- 5 files changed, 28 insertions(+), 30 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 213e4c0..3ec7a66 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -13,11 +13,16 @@ use tracing::warn; pub(crate) async fn send_request( registration: Registration, request: T, -) -> Option> +) -> Result> where T: Debug, { - let destination = registration.url?; + let destination = match registration.url { + Some(url) => url, + None => { + return Ok(None); + } + }; let hs_token = registration.hs_token.as_str(); @@ -45,8 +50,7 @@ where ); *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); - let mut reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); + let mut reqwest_request = reqwest::Request::try_from(http_request)?; *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); @@ -63,7 +67,7 @@ where "Could not send request to appservice {:?} at {}: {}", registration.id, destination, e ); - return Some(Err(e.into())); + return Err(e.into()); } }; @@ -100,11 +104,11 @@ where .expect("reqwest body is valid http body"), ); - Some(response.map_err(|_| { + response.map(Some).map_err(|_| { warn!( "Appservice returned invalid response bytes {}\n{}", destination, url ); Error::BadServerResponse("Server returned bad response.") - })) + }) } diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index bc3a5e2..b84c7c4 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -102,20 +102,18 @@ pub(crate) async fn get_alias_helper( None => { for appservice in services().appservice.read().await.values() { if appservice.aliases.is_match(room_alias.as_str()) - && if let Some(opt_result) = services() - .sending - .send_appservice_request( - appservice.registration.clone(), - appservice::query::query_room_alias::v1::Request { - room_alias: room_alias.clone(), - }, - ) - .await - { - opt_result.is_ok() - } else { - false - } + && matches!( + services() + .sending + .send_appservice_request( + appservice.registration.clone(), + appservice::query::query_room_alias::v1::Request { + room_alias: room_alias.clone(), + }, + ) + .await, + Ok(Some(_opt_result)) + ) { room_id = Some( services() diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 0fdf22f..c116e79 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -233,8 +233,7 @@ where } } - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); + let reqwest_request = reqwest::Request::try_from(http_request)?; let url = reqwest_request.url().clone(); diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 418b7a8..6ca86be 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -66,8 +66,7 @@ impl Service { })? .map(|body| body.freeze()); - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); + let reqwest_request = reqwest::Request::try_from(http_request)?; // TODO: we could keep this very short and let expo backoff do it's thing... //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 45cca17..7e54e8b 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -512,10 +512,8 @@ impl Service { ) .await { - None => Ok(kind.clone()), - Some(op_resp) => op_resp - .map(|_response| kind.clone()) - .map_err(|e| (kind.clone(), e)), + Ok(_) => Ok(kind.clone()), + Err(e) => Err((kind.clone(), e)), }; drop(permit); @@ -710,7 +708,7 @@ impl Service { &self, registration: Registration, request: T, - ) -> Option> + ) -> Result> where T: Debug, { From 1c529529aa4c7f1f8d08f60b44487c0032cead62 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 1 Apr 2024 13:36:38 +0100 Subject: [PATCH 1604/1727] chore: upgrade nix to 0.28 needed for musl targets on s390x --- Cargo.lock | 24 ++++++++++-------------- Cargo.toml | 2 +- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bd83f2d..cc614a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -314,6 +314,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "clang-sys" version = "1.7.0" @@ -1388,15 +1394,6 @@ version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" -[[package]] -name = "memoffset" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" -dependencies = [ - "autocfg", -] - [[package]] name = "mime" version = "0.3.17" @@ -1432,15 +1429,14 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.4" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", "cfg-if", + "cfg_aliases", "libc", - "memoffset", - "pin-utils", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d2b828a..636f32e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -125,7 +125,7 @@ features = [ ] [target.'cfg(unix)'.dependencies] -nix = { version = "0.26.2", features = ["resource"] } +nix = { version = "0.28", features = ["resource"] } [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] From e38af9b7fc069a8929ead6542a8a16d2f1a48286 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 1 Apr 2024 20:55:13 +0100 Subject: [PATCH 1605/1727] feat: use _matrix-fed._tcp SRV record, fallback to _matrix._tcp --- src/api/server_server.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 0fdf22f..fb0d31a 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -476,12 +476,11 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe (actual_destination, hostname) } -async fn query_srv_record(hostname: &'_ str) -> Option { - let hostname = hostname.trim_end_matches('.'); - if let Ok(Some(host_port)) = services() +async fn query_given_srv_record(record: &str) -> Option { + services() .globals .dns_resolver() - .srv_lookup(format!("_matrix._tcp.{hostname}.")) + .srv_lookup(record) .await .map(|srv| { srv.iter().next().map(|result| { @@ -491,10 +490,17 @@ async fn query_srv_record(hostname: &'_ str) -> Option { ) }) }) + .unwrap_or(None) +} + +async fn query_srv_record(hostname: &'_ str) -> Option { + let hostname = hostname.trim_end_matches('.'); + + if let Some(host_port) = query_given_srv_record(&format!("_matrix-fed._tcp.{hostname}.")).await { Some(host_port) } else { - None + query_given_srv_record(&format!("_matrix._tcp.{hostname}.")).await } } From 0d62c9de7ce96d90356210084dc36449df59af0d Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 2 Apr 2024 17:19:59 +0100 Subject: [PATCH 1606/1727] fix: ignore access tokens where they are not needed --- src/api/ruma_wrapper/axum.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 43e73c6..895b601 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -110,7 +110,8 @@ where ( AuthScheme::AccessToken | AuthScheme::AppserviceToken - | AuthScheme::AccessTokenOptional, + | AuthScheme::AccessTokenOptional + | AuthScheme::None, Token::Appservice(info), ) => { let user_id = query_params @@ -144,7 +145,7 @@ where )); } ( - AuthScheme::AccessToken | AuthScheme::AccessTokenOptional, + AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None, Token::User((user_id, device_id)), ) => (Some(user_id), Some(device_id), None, false), (AuthScheme::ServerSignatures, Token::None) => { @@ -259,13 +260,10 @@ where | AuthScheme::AccessTokenOptional, Token::None, ) => (None, None, None, false), - ( - AuthScheme::ServerSignatures | AuthScheme::None, - Token::Appservice(_) | Token::User(_), - ) => { + (AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => { return Err(Error::BadRequest( ErrorKind::Unauthorized, - "Access tokens should not be used on this endpoint.", + "Only server signatures should be used on this endpoint.", )); } (AuthScheme::AppserviceToken, Token::User(_)) => { From 0f6b771cdd55633af3bcdb706d48139532106412 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 3 Apr 2024 22:42:14 +0100 Subject: [PATCH 1607/1727] fix(membership): remove join_authorized_via_users_server field on state update --- src/api/client_server/membership.rs | 2 ++ src/api/client_server/profile.rs | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index bc84b26..86ac595 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -270,6 +270,7 @@ pub async fn ban_user_route(body: Ruma) -> Result Date: Fri, 5 Apr 2024 10:21:44 +0100 Subject: [PATCH 1608/1727] feat(membership): check if user already has the membership that is requested to be set --- src/api/client_server/membership.rs | 34 +++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index bc84b26..beda8cf 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -187,6 +187,14 @@ pub async fn kick_user_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if let Ok(true) = services() + .rooms + .state_cache + .is_left(sender_user, &body.room_id) + { + return Ok(kick_user::v3::Response {}); + } + let mut event: RoomMemberEventContent = serde_json::from_str( services() .rooms @@ -247,6 +255,16 @@ pub async fn kick_user_route( pub async fn ban_user_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if let Ok(Some(membership_event)) = services() + .rooms + .state_accessor + .get_member(&body.room_id, sender_user) + { + if membership_event.membership == MembershipState::Ban { + return Ok(ban_user::v3::Response {}); + } + } + let event = services() .rooms .state_accessor @@ -317,6 +335,16 @@ pub async fn unban_user_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if let Ok(Some(membership_event)) = services() + .rooms + .state_accessor + .get_member(&body.room_id, sender_user) + { + if membership_event.membership != MembershipState::Ban { + return Ok(unban_user::v3::Response {}); + } + } + let mut event: RoomMemberEventContent = serde_json::from_str( services() .rooms @@ -497,6 +525,12 @@ async fn join_room_by_id_helper( ) -> Result { let sender_user = sender_user.expect("user is authenticated"); + if let Ok(true) = services().rooms.state_cache.is_joined(sender_user, room_id) { + return Ok(join_room_by_id::v3::Response { + room_id: room_id.into(), + }); + } + let mutex_state = Arc::clone( services() .globals From 110b7e10e68e21037799e5e492aabb6f605c1e56 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 1 Apr 2024 13:04:36 +0100 Subject: [PATCH 1609/1727] fix: do not allow administration of remote users --- src/service/admin/mod.rs | 87 ++++++++++++++++++++++++++++++++++------ 1 file changed, 74 insertions(+), 13 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index f2f60a7..408adf6 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -555,6 +555,13 @@ impl Service { } }; + // Checks if user is local + if user_id.server_name() != services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain( + "The specified user is not from this server!", + )); + }; + // Check if the specified user is valid if !services().users.exists(&user_id)? || user_id @@ -658,7 +665,15 @@ impl Service { user_id, } => { let user_id = Arc::::from(user_id); - if services().users.exists(&user_id)? { + if !services().users.exists(&user_id)? { + RoomMessageEventContent::text_plain(format!( + "User {user_id} doesn't exist on this server" + )) + } else if user_id.server_name() != services().globals.server_name() { + RoomMessageEventContent::text_plain(format!( + "User {user_id} is not from this server" + )) + } else { RoomMessageEventContent::text_plain(format!( "Making {user_id} leave all rooms before deactivation..." )); @@ -672,30 +687,76 @@ impl Service { RoomMessageEventContent::text_plain(format!( "User {user_id} has been deactivated" )) - } else { - RoomMessageEventContent::text_plain(format!( - "User {user_id} doesn't exist on this server" - )) } } AdminCommand::DeactivateAll { leave_rooms, force } => { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { - let usernames = body.clone().drain(1..body.len() - 1).collect::>(); + let users = body.clone().drain(1..body.len() - 1).collect::>(); - let mut user_ids: Vec<&UserId> = Vec::new(); + let mut user_ids = Vec::new(); + let mut remote_ids = Vec::new(); + let mut non_existant_ids = Vec::new(); + let mut invalid_users = Vec::new(); - for &username in &usernames { - match <&UserId>::try_from(username) { - Ok(user_id) => user_ids.push(user_id), + for &user in &users { + match <&UserId>::try_from(user) { + Ok(user_id) => { + if user_id.server_name() != services().globals.server_name() { + remote_ids.push(user_id) + } else if !services().users.exists(user_id)? { + non_existant_ids.push(user_id) + } else { + user_ids.push(user_id) + } + } Err(_) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username" - ))) + invalid_users.push(user); } } } + let mut markdown_message = String::new(); + let mut html_message = String::new(); + if !invalid_users.is_empty() { + markdown_message.push_str("The following user ids are not valid:\n```\n"); + html_message.push_str("The following user ids are not valid:\n
                \n");
                +                        for invalid_user in invalid_users {
                +                            markdown_message.push_str(&format!("{invalid_user}\n"));
                +                            html_message.push_str(&format!("{invalid_user}\n"));
                +                        }
                +                        markdown_message.push_str("```\n\n");
                +                        html_message.push_str("
                \n\n"); + } + if !remote_ids.is_empty() { + markdown_message + .push_str("The following users are not from this server:\n```\n"); + html_message + .push_str("The following users are not from this server:\n
                \n");
                +                        for remote_id in remote_ids {
                +                            markdown_message.push_str(&format!("{remote_id}\n"));
                +                            html_message.push_str(&format!("{remote_id}\n"));
                +                        }
                +                        markdown_message.push_str("```\n\n");
                +                        html_message.push_str("
                \n\n"); + } + if !non_existant_ids.is_empty() { + markdown_message.push_str("The following users do not exist:\n```\n"); + html_message.push_str("The following users do not exist:\n
                \n");
                +                        for non_existant_id in non_existant_ids {
                +                            markdown_message.push_str(&format!("{non_existant_id}\n"));
                +                            html_message.push_str(&format!("{non_existant_id}\n"));
                +                        }
                +                        markdown_message.push_str("```\n\n");
                +                        html_message.push_str("
                \n\n"); + } + if !markdown_message.is_empty() { + return Ok(RoomMessageEventContent::text_html( + markdown_message, + html_message, + )); + } + let mut deactivation_count = 0; let mut admins = Vec::new(); From 9497713a79243eb745a8e06032412a075efe2eec Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 6 Apr 2024 14:10:11 +0100 Subject: [PATCH 1610/1727] fix(membership): check if server is in room to decide whether to do remote leaves --- src/api/client_server/membership.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index bc84b26..c2ad61a 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -1398,8 +1398,10 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option) -> Result<()> { // Ask a remote server if we don't have this room - if !services().rooms.metadata.exists(room_id)? - && room_id.server_name() != Some(services().globals.server_name()) + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), room_id)? { if let Err(e) = remote_leave_room(user_id, room_id).await { warn!("Failed to leave room {} remotely: {}", user_id, e); From 2c73c3adbb1b326bdebcac856225bb93275287b8 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 6 Apr 2024 14:12:18 +0100 Subject: [PATCH 1611/1727] fix(sync): send phoney leave event where room state is unknown on invite rejection --- src/api/client_server/sync.rs | 46 ++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index da603dc..e0c6e0b 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,5 +1,6 @@ use crate::{ - service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse, + service::{pdu::EventHash, rooms::timeline::PduCount}, + services, utils, Error, PduEvent, Result, Ruma, RumaResponse, }; use ruma::{ @@ -21,7 +22,7 @@ use ruma::{ StateEventType, TimelineEventType, }, serde::Raw, - uint, DeviceId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, + uint, DeviceId, EventId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, }; use std::{ collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, @@ -296,8 +297,6 @@ async fn sync_helper( for result in all_left_rooms { let (room_id, _) = result?; - let mut left_state_events = Vec::new(); - { // Get and drop the lock to wait for remaining operations to finish let mutex_insert = Arc::clone( @@ -325,9 +324,48 @@ async fn sync_helper( if !services().rooms.metadata.exists(&room_id)? { // This is just a rejected invite, not a room we know + let event = PduEvent { + event_id: EventId::new(services().globals.server_name()).into(), + sender: sender_user.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + kind: TimelineEventType::RoomMember, + content: serde_json::from_str(r#"{ "membership": "leave"}"#).unwrap(), + state_key: Some(sender_user.to_string()), + unsigned: None, + // The following keys are dropped on conversion + room_id: room_id.clone(), + prev_events: vec![], + depth: uint!(1), + auth_events: vec![], + redacts: None, + hashes: EventHash { + sha256: String::new(), + }, + signatures: None, + }; + + left_rooms.insert( + room_id, + LeftRoom { + account_data: RoomAccountData { events: Vec::new() }, + timeline: Timeline { + limited: false, + prev_batch: Some(next_batch_string.clone()), + events: Vec::new(), + }, + state: State { + events: vec![event.to_sync_state_event()], + }, + }, + ); + continue; } + let mut left_state_events = Vec::new(); + let since_shortstatehash = services() .rooms .user From fe78cc8262d009a94efe8cdf11b2ad8fe10c48ad Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 4 Apr 2024 02:00:08 +0100 Subject: [PATCH 1612/1727] refactor(state_accessor): add method to check if a user can invite another user --- src/service/rooms/state_accessor/mod.rs | 40 +++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 903ad47..c287edc 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -18,9 +18,10 @@ use ruma::{ }, EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; -use tracing::error; +use serde_json::value::to_raw_value; +use tracing::{error, warn}; -use crate::{services, Error, PduEvent, Result}; +use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result}; pub struct Service { pub db: &'static dyn Data, @@ -301,6 +302,41 @@ impl Service { }) } + pub async fn user_can_invite( + &self, + room_id: &RoomId, + sender: &UserId, + target_user: &UserId, + ) -> Result { + let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite)) + .expect("Event content always serializes"); + + let new_event = PduBuilder { + event_type: ruma::events::TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(target_user.into()), + redacts: None, + }; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + Ok(services() + .rooms + .timeline + .create_hash_and_sign_event(new_event, sender, room_id, &state_lock) + .is_ok()) + } + pub fn get_member( &self, room_id: &RoomId, From e88d137bd71232d0cc9793757cf3d79a8fd8c998 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 11 Apr 2024 17:19:42 +0000 Subject: [PATCH 1613/1727] Replace panic!() with unreachable!() --- src/api/client_server/room.rs | 6 +++--- src/service/rooms/timeline/mod.rs | 12 ++---------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 128f84f..ee89439 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -147,7 +147,7 @@ pub async fn create_room_route( ); } RoomVersionId::V11 => {} // V11 removed the "creator" key - _ => panic!("Unexpected room version {}", room_version), + _ => unreachable!("Validity of room version already checked"), } content.insert( @@ -172,7 +172,7 @@ pub async fn create_room_route( | RoomVersionId::V9 | RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()), RoomVersionId::V11 => RoomCreateEventContent::new_v11(), - _ => panic!("Unexpected room version {}", room_version), + _ => unreachable!("Validity of room version already checked"), }; let mut content = serde_json::from_str::( to_raw_value(&content) @@ -633,7 +633,7 @@ pub async fn upgrade_room_route( // "creator" key no longer exists in V11 rooms create_event_content.remove("creator"); } - _ => panic!("Unexpected room version {}", body.new_version) + _ => unreachable!("Validity of room version already checked") } create_event_content.insert( "room_version".into(), diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index d0943c9..cb2e649 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -400,11 +400,7 @@ impl Service { } } RoomVersionId::V11 => { - #[derive(Deserialize)] - struct Redaction { - redacts: Option, - } - let content = serde_json::from_str::(pdu.content.get()) + let content = serde_json::from_str::(pdu.content.get()) .map_err(|_| { Error::bad_database("Invalid content in redaction pdu.") })?; @@ -680,11 +676,7 @@ impl Service { .get_room_version(room_id) .or_else(|_| { if event_type == TimelineEventType::RoomCreate { - #[derive(Deserialize)] - struct RoomCreate { - room_version: RoomVersionId, - } - let content = serde_json::from_str::(content.get()) + let content = serde_json::from_str::(content.get()) .expect("Invalid content in RoomCreate pdu."); Ok(content.room_version) } else { From 9e6ce8326f88d12b6fe3d5cd7bfcc440fd684f9b Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 11 Apr 2024 17:21:00 +0000 Subject: [PATCH 1614/1727] Remove TODO --- src/api/client_server/room.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index ee89439..df32505 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -159,7 +159,6 @@ pub async fn create_room_route( content } None => { - // TODO: Add correct value for v11 let content = match room_version { RoomVersionId::V1 | RoomVersionId::V2 From 561a103140325a537f248f9121163a56cd275257 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 11 Apr 2024 22:55:18 +0100 Subject: [PATCH 1615/1727] chore(config): bump default room version to v10 --- src/config/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 4605855..3ad73bc 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -273,5 +273,5 @@ fn default_turn_ttl() -> u64 { // I know, it's a great name pub fn default_default_room_version() -> RoomVersionId { - RoomVersionId::V9 + RoomVersionId::V10 } From ab8592526fe6f061756f730c349a0e575072f627 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 12 Apr 2024 05:14:39 +0000 Subject: [PATCH 1616/1727] Replace panic!() with unreachable!() --- src/service/admin/mod.rs | 2 +- src/service/globals/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index c5f014c..484fc13 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -978,7 +978,7 @@ impl Service { | RoomVersionId::V9 | RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.clone()), RoomVersionId::V11 => RoomCreateEventContent::new_v11(), - _ => panic!("Unexpected room version {}", room_version), + _ => unreachable!("Validity of room version already checked"), }; content.federate = true; content.predecessor = None; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index cab61c6..3eb49e1 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -178,13 +178,13 @@ impl Service { RoomVersionId::V8, RoomVersionId::V9, RoomVersionId::V10, + RoomVersionId::V11, ]; // Experimental, partially supported room versions let unstable_room_versions = vec![ RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5, - RoomVersionId::V11, ]; let mut s = Self { diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index dff727a..2aeffa6 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -407,7 +407,7 @@ impl Service { self.redact_pdu(redact_id, pdu)?; } } - _ => panic!("Unexpected room version {}", room_version_id) + _ => unreachable!("Validity of room version already checked") }; } TimelineEventType::SpaceChild => { From 92817213d5c828c7dc95920ded1562ec4b2003f3 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 12 Apr 2024 05:15:37 +0000 Subject: [PATCH 1617/1727] Add missing import --- src/service/rooms/timeline/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2aeffa6..2887fdd 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -15,7 +15,7 @@ use ruma::{ push_rules::PushRulesEvent, room::{ create::RoomCreateEventContent, encrypted::Relation, member::MembershipState, - power_levels::RoomPowerLevelsEventContent, + power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent, }, GlobalAccountDataEventType, StateEventType, TimelineEventType, }, From 475a68cbb93da208fed175a2868c97d4c44a8839 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 12 Apr 2024 20:52:14 +0100 Subject: [PATCH 1618/1727] refactor: disable federation at the router level --- src/api/ruma_wrapper/axum.rs | 4 -- src/api/server_server.rs | 8 ---- src/main.rs | 76 +++++++++++++++++++++--------------- 3 files changed, 44 insertions(+), 44 deletions(-) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 895b601..906904b 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -149,10 +149,6 @@ where Token::User((user_id, device_id)), ) => (Some(user_id), Some(device_id), None, false), (AuthScheme::ServerSignatures, Token::None) => { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let TypedHeader(Authorization(x_matrix)) = parts .extract::>>() .await diff --git a/src/api/server_server.rs b/src/api/server_server.rs index fa7f131..b25b131 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -527,10 +527,6 @@ async fn request_well_known(destination: &str) -> Option { pub async fn get_server_version_route( _body: Ruma, ) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - Ok(get_server_version::v1::Response { server: Some(get_server_version::v1::Server { name: Some("Conduit".to_owned()), @@ -547,10 +543,6 @@ pub async fn get_server_version_route( /// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response pub async fn get_server_keys_route() -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - let mut verify_keys: BTreeMap = BTreeMap::new(); verify_keys.insert( format!("ed25519:{}", services().globals.keypair().version()) diff --git a/src/main.rs b/src/main.rs index b5bf742..7beeb8b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,7 +3,7 @@ use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; use axum::{ extract::{DefaultBodyLimit, FromRequestParts, MatchedPath}, response::IntoResponse, - routing::{get, on, MethodFilter}, + routing::{any, get, on, MethodFilter}, Router, }; use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle}; @@ -188,7 +188,7 @@ async fn run_server() -> io::Result<()> { .expect("failed to convert max request size"), )); - let app = routes().layer(middlewares).into_make_service(); + let app = routes(config).layer(middlewares).into_make_service(); let handle = ServerHandle::new(); tokio::spawn(shutdown_signal(handle.clone())); @@ -249,8 +249,8 @@ async fn unrecognized_method( Ok(inner) } -fn routes() -> Router { - Router::new() +fn routes(config: &Config) -> Router { + let router = Router::new() .ruma_route(client_server::get_supported_versions_route) .ruma_route(client_server::get_register_available_route) .ruma_route(client_server::register_route) @@ -390,33 +390,6 @@ fn routes() -> Router { .ruma_route(client_server::get_relating_events_with_rel_type_route) .ruma_route(client_server::get_relating_events_route) .ruma_route(client_server::get_hierarchy_route) - .ruma_route(server_server::get_server_version_route) - .route( - "/_matrix/key/v2/server", - get(server_server::get_server_keys_route), - ) - .route( - "/_matrix/key/v2/server/:key_id", - get(server_server::get_server_keys_deprecated_route), - ) - .ruma_route(server_server::get_public_rooms_route) - .ruma_route(server_server::get_public_rooms_filtered_route) - .ruma_route(server_server::send_transaction_message_route) - .ruma_route(server_server::get_event_route) - .ruma_route(server_server::get_backfill_route) - .ruma_route(server_server::get_missing_events_route) - .ruma_route(server_server::get_event_authorization_route) - .ruma_route(server_server::get_room_state_route) - .ruma_route(server_server::get_room_state_ids_route) - .ruma_route(server_server::create_join_event_template_route) - .ruma_route(server_server::create_join_event_v1_route) - .ruma_route(server_server::create_join_event_v2_route) - .ruma_route(server_server::create_invite_route) - .ruma_route(server_server::get_devices_route) - .ruma_route(server_server::get_room_information_route) - .ruma_route(server_server::get_profile_information_route) - .ruma_route(server_server::get_keys_route) - .ruma_route(server_server::claim_keys_route) .route( "/_matrix/client/r0/rooms/:room_id/initialSync", get(initial_sync), @@ -426,7 +399,42 @@ fn routes() -> Router { get(initial_sync), ) .route("/", get(it_works)) - .fallback(not_found) + .fallback(not_found); + + if config.allow_federation { + router + .ruma_route(server_server::get_server_version_route) + .route( + "/_matrix/key/v2/server", + get(server_server::get_server_keys_route), + ) + .route( + "/_matrix/key/v2/server/:key_id", + get(server_server::get_server_keys_deprecated_route), + ) + .ruma_route(server_server::get_public_rooms_route) + .ruma_route(server_server::get_public_rooms_filtered_route) + .ruma_route(server_server::send_transaction_message_route) + .ruma_route(server_server::get_event_route) + .ruma_route(server_server::get_backfill_route) + .ruma_route(server_server::get_missing_events_route) + .ruma_route(server_server::get_event_authorization_route) + .ruma_route(server_server::get_room_state_route) + .ruma_route(server_server::get_room_state_ids_route) + .ruma_route(server_server::create_join_event_template_route) + .ruma_route(server_server::create_join_event_v1_route) + .ruma_route(server_server::create_join_event_v2_route) + .ruma_route(server_server::create_invite_route) + .ruma_route(server_server::get_devices_route) + .ruma_route(server_server::get_room_information_route) + .ruma_route(server_server::get_profile_information_route) + .ruma_route(server_server::get_keys_route) + .ruma_route(server_server::claim_keys_route) + } else { + router + .route("/_matrix/federation/*path", any(federation_disabled)) + .route("/_matrix/key/*path", any(federation_disabled)) + } } async fn shutdown_signal(handle: ServerHandle) { @@ -463,6 +471,10 @@ async fn shutdown_signal(handle: ServerHandle) { let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]); } +async fn federation_disabled(_: Uri) -> impl IntoResponse { + Error::bad_config("Federation is disabled.") +} + async fn not_found(uri: Uri) -> impl IntoResponse { warn!("Not found: {uri}"); Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request") From 54e0e2a14c1446847d02b299d17f7c848abb2ead Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 15 Apr 2024 19:13:10 +0100 Subject: [PATCH 1619/1727] fix(appservices): don't use identity assertion on account management endpoints --- src/api/ruma_wrapper/axum.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 895b601..e721e80 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -108,10 +108,7 @@ where )) } ( - AuthScheme::AccessToken - | AuthScheme::AppserviceToken - | AuthScheme::AccessTokenOptional - | AuthScheme::None, + AuthScheme::AccessToken | AuthScheme::AccessTokenOptional, Token::Appservice(info), ) => { let user_id = query_params @@ -138,6 +135,9 @@ where // TODO: Check if appservice is allowed to be that user (Some(user_id), None, None, true) } + (AuthScheme::None | AuthScheme::AppserviceToken, Token::Appservice(_)) => { + (None, None, None, true) + } (AuthScheme::AccessToken, Token::None) => { return Err(Error::BadRequest( ErrorKind::MissingToken, From 7a7c09785e4895336bb3559c05f8accf0c4c23ac Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 16 Apr 2024 22:45:04 +0100 Subject: [PATCH 1620/1727] feat(pdu): copy top level redact to content and vice versa --- src/api/client_server/room.rs | 2 +- src/service/globals/mod.rs | 6 +---- src/service/pdu.rs | 40 ++++++++++++++++++++++++------- src/service/rooms/timeline/mod.rs | 16 ++++++------- 4 files changed, 41 insertions(+), 23 deletions(-) diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 203a8b7..6f92e9f 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -671,7 +671,7 @@ pub async fn upgrade_room_route( // "creator" key no longer exists in V11 rooms create_event_content.remove("creator"); } - _ => unreachable!("Validity of room version already checked") + _ => unreachable!("Validity of room version already checked"), } create_event_content.insert( "room_version".into(), diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 3eb49e1..798c725 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -181,11 +181,7 @@ impl Service { RoomVersionId::V11, ]; // Experimental, partially supported room versions - let unstable_room_versions = vec![ - RoomVersionId::V3, - RoomVersionId::V4, - RoomVersionId::V5, - ]; + let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; let mut s = Self { db, diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 0a9ea86..a10573b 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -2,7 +2,8 @@ use crate::Error; use ruma::{ canonical_json::redact_content_in_place, events::{ - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, + room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent}, + space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, TimelineEventType, }, @@ -25,7 +26,7 @@ pub struct EventHash { pub sha256: String, } -#[derive(Clone, Deserialize, Serialize, Debug)] +#[derive(Clone, Deserialize, Debug, Serialize)] pub struct PduEvent { pub event_id: Arc, pub room_id: OwnedRoomId, @@ -96,10 +97,31 @@ impl PduEvent { Ok(()) } + pub fn copy_redacts(&self) -> (Option>, Box) { + if self.kind == TimelineEventType::RoomRedaction { + if let Ok(mut content) = + serde_json::from_str::(self.content.get()) + { + if let Some(redacts) = content.redacts { + return (Some(redacts.into()), self.content.clone()); + } else if let Some(redacts) = self.redacts.clone() { + content.redacts = Some(redacts.into()); + return ( + self.redacts.clone(), + to_raw_value(&content).expect("Must be valid, we only added redacts field"), + ); + } + } + } + + (self.redacts.clone(), self.content.clone()) + } + #[tracing::instrument(skip(self))] pub fn to_sync_room_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); let mut json = json!({ - "content": self.content, + "content": content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, @@ -112,7 +134,7 @@ impl PduEvent { if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } - if let Some(redacts) = &self.redacts { + if let Some(redacts) = &redacts { json["redacts"] = json!(redacts); } @@ -146,8 +168,9 @@ impl PduEvent { #[tracing::instrument(skip(self))] pub fn to_room_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); let mut json = json!({ - "content": self.content, + "content": content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, @@ -161,7 +184,7 @@ impl PduEvent { if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } - if let Some(redacts) = &self.redacts { + if let Some(redacts) = &redacts { json["redacts"] = json!(redacts); } @@ -170,8 +193,9 @@ impl PduEvent { #[tracing::instrument(skip(self))] pub fn to_message_like_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); let mut json = json!({ - "content": self.content, + "content": content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, @@ -185,7 +209,7 @@ impl PduEvent { if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } - if let Some(redacts) = &self.redacts { + if let Some(redacts) = &redacts { json["redacts"] = json!(redacts); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2887fdd..8f713c4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -399,15 +399,16 @@ impl Service { } } RoomVersionId::V11 => { - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| { - Error::bad_database("Invalid content in redaction pdu.") - })?; + let content = + serde_json::from_str::(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid content in redaction pdu.") + })?; if let Some(redact_id) = &content.redacts { self.redact_pdu(redact_id, pdu)?; } } - _ => unreachable!("Validity of room version already checked") + _ => unreachable!("Validity of room version already checked"), }; } TimelineEventType::SpaceChild => { @@ -1015,10 +1016,7 @@ impl Service { let mut pdu = self .get_pdu_from_id(&pdu_id)? .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - let room_version_id = services() - .rooms - .state - .get_room_version(&pdu.room_id)?; + let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?; pdu.redact(room_version_id, reason)?; self.replace_pdu( &pdu_id, From eb6801290b90e400b2c00c9275d6f90a2d3baad4 Mon Sep 17 00:00:00 2001 From: Valentin Lorentz Date: Wed, 17 Apr 2024 19:37:32 +0200 Subject: [PATCH 1621/1727] Document copy_redacts --- src/service/pdu.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/service/pdu.rs b/src/service/pdu.rs index a10573b..b5c9598 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -97,6 +97,14 @@ impl PduEvent { Ok(()) } + /// Copies the `redacts` property of the event to the `content` dict + /// + /// This follows the specification's + /// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property): + /// + /// > For backwards-compatibility with older clients, servers should add a redacts + /// > property to the top level of m.room.redaction events in when serving such events + /// > over the Client-Server API. pub fn copy_redacts(&self) -> (Option>, Box) { if self.kind == TimelineEventType::RoomRedaction { if let Ok(mut content) = From 2d8c551cd5f23c8ab7ab24b0bdde9dd413a5fa20 Mon Sep 17 00:00:00 2001 From: Valentin Lorentz Date: Wed, 17 Apr 2024 19:41:38 +0200 Subject: [PATCH 1622/1727] Fix doc --- src/service/pdu.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/service/pdu.rs b/src/service/pdu.rs index b5c9598..a51d7ec 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -97,7 +97,7 @@ impl PduEvent { Ok(()) } - /// Copies the `redacts` property of the event to the `content` dict + /// Copies the `redacts` property of the event to the `content` dict and vice-versa. /// /// This follows the specification's /// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property): @@ -105,6 +105,10 @@ impl PduEvent { /// > For backwards-compatibility with older clients, servers should add a redacts /// > property to the top level of m.room.redaction events in when serving such events /// > over the Client-Server API. + /// > + /// > For improved compatibility with newer clients, servers should add a redacts property + /// > to the content of m.room.redaction events in older room versions when serving + /// > such events over the Client-Server API. pub fn copy_redacts(&self) -> (Option>, Box) { if self.kind == TimelineEventType::RoomRedaction { if let Ok(mut content) = From 4b288fd22f826990dfd614ce6bc05baeda59651c Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 18 Apr 2024 20:49:50 +0100 Subject: [PATCH 1623/1727] chore: remove default database backend has been sqlite for far too long, and having a default for this is just asking for trouble --- src/config/mod.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 4605855..ff05754 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -21,7 +21,6 @@ pub struct Config { pub tls: Option, pub server_name: OwnedServerName, - #[serde(default = "default_database_backend")] pub database_backend: String, pub database_path: String, #[serde(default = "default_db_cache_capacity_mb")] @@ -223,10 +222,6 @@ fn default_port() -> u16 { 8000 } -fn default_database_backend() -> String { - "sqlite".to_owned() -} - fn default_db_cache_capacity_mb() -> f64 { 300.0 } From b48e1300f2eea997b06e439504625909da5b4e88 Mon Sep 17 00:00:00 2001 From: Awiteb Date: Thu, 18 Apr 2024 02:35:14 +0300 Subject: [PATCH 1624/1727] chore(docs): Rename configuration section to `Configuration` --- docs/SUMMARY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 30316e2..2caca3e 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -2,7 +2,7 @@ - [Introduction](introduction.md) -- [Example configuration](configuration.md) +- [Configuration](configuration.md) - [Deploying](deploying.md) - [Generic](deploying/generic.md) - [Debian](deploying/debian.md) From 2656f6f43546c81d19c24741910551f302fb1688 Mon Sep 17 00:00:00 2001 From: Awiteb Date: Thu, 18 Apr 2024 02:35:52 +0300 Subject: [PATCH 1625/1727] feat(docs): Document all configuration options Fixes: https://gitlab.com/famedly/conduit/-/issues/435 Suggested-by: Matthias Ahouansou Helped-by: Matthias Ahouansou Signed-off-by: Awiteb --- docs/configuration.md | 111 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 108 insertions(+), 3 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index a47e5ff..efa080d 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,5 +1,110 @@ -# Example configuration +# Configuration -``` toml -{{#include ../conduit-example.toml}} +**Conduit** is configured using a TOML file. The configuration file is loaded from the path specified by the `CONDUIT_CONFIG` environment variable. + +> **Note:** The configuration file is required to run Conduit. If the `CONDUIT_CONFIG` environment variable is not set, Conduit will exit with an error. + +> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect + +Conduit's configuration file is divided into the following sections: + +- [Global](#global) + - [TLS](#tls) + - [Proxy](#proxy) + + +## Global + +The `global` section contains the following fields: + +> **Note:** The `*` symbol indicates that the field is required, and the values in **parentheses** are the possible values + +| Field | Type | Description | Default | +| --- | --- | --- | --- | +| `address` | `string` | The address to bind to | `"127.0.0.1"` | +| `port` | `integer` | The port to bind to | `8000` | +| `tls` | `table` | See the [TLS configuration](#tls) | N/A | +| `server_name`_*_ | `string` | The server name | N/A | +| `database_backend`_*_ | `string` | The database backend to use (`"rocksdb"` *recommended*, `"sqlite"`) | N/A | +| `database_path`_*_ | `string` | The path to the database file/dir | N/A | +| `db_cache_capacity_mb` | `float` | The cache capacity, in MB | `300.0` | +| `enable_lightning_bolt` | `boolean` | Add `⚡️` emoji to end of user's display name | `true` | +| `allow_check_for_updates` | `boolean` | Allow Conduit to check for updates | `true` | +| `conduit_cache_capacity_modifier` | `float` | The value to multiply the default cache capacity by | `1.0` | +| `rocksdb_max_open_files` | `integer` | The maximum number of open files | `1000` | +| `pdu_cache_capacity` | `integer` | The maximum number of Persisted Data Units (PDUs) to cache | `150000` | +| `cleanup_second_interval` | `integer` | How often conduit should clean up the database, in seconds | `60` | +| `max_request_size` | `integer` | The maximum request size, in bytes | `20971520` (20 MiB) | +| `max_concurrent_requests` | `integer` | The maximum number of concurrent requests | `100` | +| `max_fetch_prev_events` | `integer` | The maximum number of previous events to fetch per request if conduit notices events are missing | `100` | +| `allow_registration` | `boolean` | Opens your homeserver to public registration | `false` | +| `registration_token` | `string` | The token users need to have when registering to your homeserver | N/A | +| `allow_encryption` | `boolean` | Allow users to enable encryption in their rooms | `true` | +| `allow_federation` | `boolean` | Allow federation with other servers | `true` | +| `allow_room_creation` | `boolean` | Allow users to create rooms | `true` | +| `allow_unstable_room_versions` | `boolean` | Allow users to create and join rooms with unstable versions | `true` | +| `default_room_version` | `string` | The default room version (`"6"`-`"10"`)| `"10"` | +| `allow_jaeger` | `boolean` | Allow Jaeger tracing | `false` | +| `tracing_flame` | `boolean` | Enable flame tracing | `false` | +| `proxy` | `table` | See the [Proxy configuration](#proxy) | N/A | +| `jwt_secret` | `string` | The secret used in the JWT to enable JWT login without it a 400 error will be returned | N/A | +| `trusted_servers` | `array` | The list of trusted servers to gather public keys of offline servers | `["matrix.org"]` | +| `log` | `string` | The log verbosity to use | `"warn"` | +| `turn_username` | `string` | The TURN username | `""` | +| `turn_password` | `string` | The TURN password | `""` | +| `turn_uris` | `array` | The TURN URIs | `[]` | +| `turn_secret` | `string` | The TURN secret | `""` | +| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` | +| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A | + + +### TLS +The `tls` table contains the following fields: +- `certs`: The path to the public PEM certificate +- `key`: The path to the PEM private key + +#### Example +```toml +[global.tls] +certs = "/path/to/cert.pem" +key = "/path/to/key.pem" +``` + + +### Proxy +You can choose what requests conduit should proxy (if any). The `proxy` table contains the following fields + +#### Global +The global option will proxy all outgoing requests. The `global` table contains the following fields: +- `url`: The URL of the proxy server +##### Example +```toml +[global.proxy.global] +url = "https://example.com" +``` + +#### By domain +An array of tables that contain the following fields: +- `url`: The URL of the proxy server +- `include`: Domains that should be proxied (assumed to be `["*"]` if unset) +- `exclude`: Domains that should not be proxied (takes precedent over `include`) + +Both `include` and `exclude` allow for glob pattern matching. +##### Example +In this example, all requests to domains ending in `.onion` and `matrix.secretly-an-onion-domain.xyz` +will be proxied via `socks://localhost:9050`, except for domains ending in `.myspecial.onion`. You can add as many `by_domain` tables as you need. +```toml +[[global.proxy.by_domain]] +url = "socks5://localhost:9050" +include = ["*.onion", "matrix.secretly-an-onion-domain.xyz"] +exclude = ["*.clearnet.onion"] +``` + +### Example + +> **Note:** The following example is a minimal configuration file. You should replace the values with your own. + +```toml +[global] +{{#include ../conduit-example.toml:22:}} ``` From 8c6ffb6bfc5a517c2531cfd83f9fba16cb5381fd Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 12:31:49 -0700 Subject: [PATCH 1626/1727] unpin crane because the bug was fixed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'crane': 'github:ipetkov/crane/2c653e4478476a52c6aa3ac0495e4dea7449ea0e?narHash=sha256-XoXRS%2B5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc%3D' (2024-02-11) → 'github:ipetkov/crane/55f4939ac59ff8f89c6a4029730a2d49ea09105f?narHash=sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU%3D' (2024-04-21) --- flake.lock | 8 ++++---- flake.nix | 7 +------ 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/flake.lock b/flake.lock index 1c2142f..1983d80 100644 --- a/flake.lock +++ b/flake.lock @@ -51,17 +51,17 @@ ] }, "locked": { - "lastModified": 1707685877, - "narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=", + "lastModified": 1713721181, + "narHash": "sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU=", "owner": "ipetkov", "repo": "crane", - "rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e", + "rev": "55f4939ac59ff8f89c6a4029730a2d49ea09105f", "type": "github" }, "original": { "owner": "ipetkov", + "ref": "master", "repo": "crane", - "rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e", "type": "github" } }, diff --git a/flake.nix b/flake.nix index 4536f17..69dd520 100644 --- a/flake.nix +++ b/flake.nix @@ -13,12 +13,7 @@ inputs.nixpkgs.follows = "nixpkgs"; }; crane = { - # Pin latest crane that's not affected by the following bugs: - # - # * - # * - # * - url = "github:ipetkov/crane?rev=2c653e4478476a52c6aa3ac0495e4dea7449ea0e"; + url = "github:ipetkov/crane?ref=master"; inputs.nixpkgs.follows = "nixpkgs"; }; attic.url = "github:zhaofengli/attic?ref=main"; From 61cb186b5b3e245f05928c54456489ca465c3437 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 12:39:27 -0700 Subject: [PATCH 1627/1727] update rocksdb --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- flake.nix | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc614a7..6e870c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2204,9 +2204,9 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.18.1+8.11.3" +version = "0.20.0+9.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef22f434855ceb8daf99073fcf909d957ad8705f5b530154e47978ae68e867c" +checksum = "48b14f4848d8574c074bb26445b43e63735d802ef2fc5cc40c1b015134baee0c" dependencies = [ "bindgen", "bzip2-sys", @@ -2220,9 +2220,9 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.22.7" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62223f035e676bb69da2ab9487e6f710da82be89473c658c51ed3b1a60c4b4a6" +checksum = "d36eae38b1d3d0018e273191f791343bd3eb030d7da63aaa20350e41c0182881" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index b5be505..cc5b37e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,7 +116,7 @@ sd-notify = { version = "0.4.1", optional = true } [dependencies.rocksdb] package = "rust-rocksdb" -version = "0.22.7" +version = "0.24.0" optional = true features = [ "multi-threaded-cf", diff --git a/flake.nix b/flake.nix index 69dd520..114e221 100644 --- a/flake.nix +++ b/flake.nix @@ -55,7 +55,7 @@ rocksdb' = pkgs: let - version = "8.11.3"; + version = "9.1.0"; in pkgs.rocksdb.overrideAttrs (old: { inherit version; @@ -63,7 +63,7 @@ owner = "facebook"; repo = "rocksdb"; rev = "v${version}"; - hash = "sha256-OpEiMwGxZuxb9o3RQuSrwZMQGLhe9xLT1aa3HpI4KPs="; + hash = "sha256-vRPyrXkXVVhP56n5FVYef8zbIsnnanQSpElmQLZ7mh8="; }; }); From 6c9c1b5afe23e136d20bcb121a0258559ce70d2f Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 22 Apr 2024 10:32:33 +0100 Subject: [PATCH 1628/1727] fix(appservices): don't perform identity assertion when auth is optional --- src/api/ruma_wrapper/axum.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index b349472..af2dbeb 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -107,10 +107,7 @@ where "Unknown access token.", )) } - ( - AuthScheme::AccessToken | AuthScheme::AccessTokenOptional, - Token::Appservice(info), - ) => { + (AuthScheme::AccessToken, Token::Appservice(info)) => { let user_id = query_params .user_id .map_or_else( @@ -135,9 +132,12 @@ where // TODO: Check if appservice is allowed to be that user (Some(user_id), None, None, true) } - (AuthScheme::None | AuthScheme::AppserviceToken, Token::Appservice(_)) => { - (None, None, None, true) - } + ( + AuthScheme::None + | AuthScheme::AppserviceToken + | AuthScheme::AccessTokenOptional, + Token::Appservice(_), + ) => (None, None, None, true), (AuthScheme::AccessToken, Token::None) => { return Err(Error::BadRequest( ErrorKind::MissingToken, From 00d6aeddb6c43660326277a675fadb08a1617c10 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 23 Apr 2024 21:42:01 +0100 Subject: [PATCH 1629/1727] refactor(redactions): move checks inside conduit ruma was already accidentally performing these checks for us, but this shouldn't be the case --- src/service/rooms/event_handler/mod.rs | 15 +++++++- src/service/rooms/state_accessor/mod.rs | 51 +++++++++++++++++++++++++ src/service/rooms/timeline/mod.rs | 35 ++++++++++++++++- 3 files changed, 97 insertions(+), 4 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 1547d40..ada289f 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -24,7 +24,7 @@ use ruma::{ }, events::{ room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, - StateEventType, + StateEventType, TimelineEventType, }, int, serde::Base64, @@ -796,7 +796,18 @@ impl Service { None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) - .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))? + || if let Some(redact_id) = &incoming_pdu.redacts { + incoming_pdu.kind == TimelineEventType::RoomRedaction + && !services().rooms.state_accessor.user_can_redact( + redact_id, + &incoming_pdu.sender, + &incoming_pdu.room_id, + true, + )? + } else { + false + }; // 13. Use state resolution to find new room state diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index c287edc..8ca1b77 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -13,9 +13,11 @@ use ruma::{ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, member::{MembershipState, RoomMemberEventContent}, name::RoomNameEventContent, + power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, StateEventType, }, + state_res::Event, EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use serde_json::value::to_raw_value; @@ -351,4 +353,53 @@ impl Service { .map_err(|_| Error::bad_database("Invalid room member event in database.")) }) } + + /// Checks if a given user can redact a given event + /// + /// If federation is true, it allows redaction events from any user of the same server as the original event sender + pub fn user_can_redact( + &self, + redacts: &EventId, + sender: &UserId, + room_id: &RoomId, + federation: bool, + ) -> Result { + self.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? + .map(|e| { + serde_json::from_str(e.content.get()) + .map(|c: RoomPowerLevelsEventContent| c.into()) + .map(|e: RoomPowerLevels| { + e.user_can_redact_event_of_other(sender) + || e.user_can_redact_own_event(sender) + && if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts) + { + if federation { + pdu.sender().server_name() == sender.server_name() + } else { + pdu.sender == sender + } + } else { + false + } + }) + .map_err(|_| { + Error::bad_database("Invalid m.room.power_levels event in database") + }) + }) + // Falling back on m.room.create to judge power levels + .unwrap_or_else(|| { + if let Some(pdu) = self.room_state_get(room_id, &StateEventType::RoomCreate, "")? { + Ok(pdu.sender == sender + || if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts) { + pdu.sender == sender + } else { + false + }) + } else { + Err(Error::bad_database( + "No m.room.power_levels or m.room.create events in database for room", + )) + } + }) + } } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 8f713c4..2752abe 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -395,7 +395,14 @@ impl Service { | RoomVersionId::V9 | RoomVersionId::V10 => { if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; + if services().rooms.state_accessor.user_can_redact( + redact_id, + &pdu.sender, + &pdu.room_id, + false, + )? { + self.redact_pdu(redact_id, pdu)?; + } } } RoomVersionId::V11 => { @@ -405,7 +412,14 @@ impl Service { Error::bad_database("Invalid content in redaction pdu.") })?; if let Some(redact_id) = &content.redacts { - self.redact_pdu(redact_id, pdu)?; + if services().rooms.state_accessor.user_can_redact( + redact_id, + &pdu.sender, + &pdu.room_id, + false, + )? { + self.redact_pdu(redact_id, pdu)?; + } } } _ => unreachable!("Validity of room version already checked"), @@ -885,6 +899,23 @@ impl Service { } } + // If redaction event is not authorized, do not append it to the timeline + if let Some(redact_id) = &pdu.redacts { + if pdu.kind == TimelineEventType::RoomRedaction + && !services().rooms.state_accessor.user_can_redact( + redact_id, + &pdu.sender, + &pdu.room_id, + false, + )? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "User cannot redact this event.", + )); + } + }; + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = services().rooms.state.append_to_state(&pdu)?; From 89c1c2109cba0cef29b09201d6aa188889c2cfdd Mon Sep 17 00:00:00 2001 From: Valentin Lorentz Date: Wed, 24 Apr 2024 08:29:47 +0200 Subject: [PATCH 1630/1727] Link to the specification from user_can_redact's documentation --- src/service/rooms/state_accessor/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 8ca1b77..9a3580d 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -356,7 +356,9 @@ impl Service { /// Checks if a given user can redact a given event /// - /// If federation is true, it allows redaction events from any user of the same server as the original event sender + /// If `federation` is `true`, it allows redaction events from any user of the same server + /// as the original event sender, [as required by room versions >= + /// v3](https://spec.matrix.org/v1.10/rooms/v11/#handling-redactions) pub fn user_can_redact( &self, redacts: &EventId, From 1c4ae8d268dc58829b5114ac2f0441c6f72e499d Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 24 Apr 2024 09:28:16 +0100 Subject: [PATCH 1631/1727] fix(redaction): use content.redacts when checking v11 events --- src/service/rooms/event_handler/mod.rs | 60 ++++++++++++++++++----- src/service/rooms/timeline/mod.rs | 68 ++++++++++++++++++++------ 2 files changed, 102 insertions(+), 26 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index ada289f..b7817e5 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -23,7 +23,10 @@ use ruma::{ }, }, events::{ - room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, + room::{ + create::RoomCreateEventContent, redaction::RoomRedactionEventContent, + server_acl::RoomServerAclEventContent, + }, StateEventType, TimelineEventType, }, int, @@ -797,17 +800,50 @@ impl Service { |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))? - || if let Some(redact_id) = &incoming_pdu.redacts { - incoming_pdu.kind == TimelineEventType::RoomRedaction - && !services().rooms.state_accessor.user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - )? - } else { - false - }; + || incoming_pdu.kind == TimelineEventType::RoomRedaction + && match room_version_id { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => { + if let Some(redact_id) = &incoming_pdu.redacts { + !services().rooms.state_accessor.user_can_redact( + redact_id, + &incoming_pdu.sender, + &incoming_pdu.room_id, + true, + )? + } else { + false + } + } + RoomVersionId::V11 => { + let content = serde_json::from_str::( + incoming_pdu.content.get(), + ) + .map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?; + + if let Some(redact_id) = &content.redacts { + !services().rooms.state_accessor.user_can_redact( + redact_id, + &incoming_pdu.sender, + &incoming_pdu.room_id, + true, + )? + } else { + false + } + } + _ => { + unreachable!("Validity of room version already checked") + } + }; // 13. Use state resolution to find new room state diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2752abe..acb00d0 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -900,21 +900,61 @@ impl Service { } // If redaction event is not authorized, do not append it to the timeline - if let Some(redact_id) = &pdu.redacts { - if pdu.kind == TimelineEventType::RoomRedaction - && !services().rooms.state_accessor.user_can_redact( - redact_id, - &pdu.sender, - &pdu.room_id, - false, - )? - { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "User cannot redact this event.", - )); + if pdu.kind == TimelineEventType::RoomRedaction { + match services().rooms.state.get_room_version(&pdu.room_id)? { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => { + if let Some(redact_id) = &pdu.redacts { + if !services().rooms.state_accessor.user_can_redact( + redact_id, + &pdu.sender, + &pdu.room_id, + false, + )? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "User cannot redact this event.", + )); + } + }; + } + RoomVersionId::V11 => { + let content = + serde_json::from_str::(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid content in redaction pdu.") + })?; + + if let Some(redact_id) = &content.redacts { + if !services().rooms.state_accessor.user_can_redact( + redact_id, + &pdu.sender, + &pdu.room_id, + false, + )? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "User cannot redact this event.", + )); + } + } + } + _ => { + return Err(Error::BadRequest( + ErrorKind::UnsupportedRoomVersion, + "Unsupported room version", + )); + } } - }; + } // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. From 0c0c9549b937b28058bba6408e21001a91c8ab94 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Wed, 24 Apr 2024 20:09:13 +0200 Subject: [PATCH 1632/1727] Sync up the generated Conduit config for Debian This applies changes made in the exampl config by commits dc89fbe and 844508b. --- debian/postinst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/debian/postinst b/debian/postinst index 0707b6f..6361af5 100644 --- a/debian/postinst +++ b/debian/postinst @@ -91,6 +91,11 @@ enable_lightning_bolt = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time + +# Controls the log verbosity. See also [here][0]. +# +# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives +#log = "..." EOF fi ;; From e40aed3a7d362733ef22fb7176634c804917b1af Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 24 Apr 2024 19:17:00 +0100 Subject: [PATCH 1633/1727] fix(state-accessor): hold the state_lock when checking if a user can invite --- src/service/rooms/state_accessor/mod.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 9a3580d..53e3176 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -21,6 +21,7 @@ use ruma::{ EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use serde_json::value::to_raw_value; +use tokio::sync::MutexGuard; use tracing::{error, warn}; use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result}; @@ -309,6 +310,7 @@ impl Service { room_id: &RoomId, sender: &UserId, target_user: &UserId, + state_lock: &MutexGuard<'_, ()>, ) -> Result { let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite)) .expect("Event content always serializes"); @@ -321,21 +323,10 @@ impl Service { redacts: None, }; - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - Ok(services() .rooms .timeline - .create_hash_and_sign_event(new_event, sender, room_id, &state_lock) + .create_hash_and_sign_event(new_event, sender, room_id, state_lock) .is_ok()) } From 3086271139826902d05637682ae6e1ce6404627b Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 16 Apr 2024 15:53:38 +0100 Subject: [PATCH 1634/1727] feat(appservice): ensure users/aliases outside of namespaces are not accessed --- src/api/client_server/account.rs | 43 ++++++++++++------- src/api/client_server/alias.rs | 36 ++++++++++++++++ src/api/client_server/room.rs | 18 +++++++- src/api/client_server/session.rs | 73 +++++++++++++++++++++++++++----- src/api/ruma_wrapper/axum.rs | 25 +++++++---- src/api/ruma_wrapper/mod.rs | 4 +- src/service/appservice/mod.rs | 42 +++++++++++++++++- 7 files changed, 202 insertions(+), 39 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 9b6bb5f..0226abc 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -75,20 +75,13 @@ pub async fn get_register_available_route( /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route(body: Ruma) -> Result { - if !services().globals.allow_registration() && !body.from_appservice { + if !services().globals.allow_registration() && body.appservice_info.is_none() { return Err(Error::BadRequest( ErrorKind::Forbidden, "Registration has been disabled.", )); } - if body.body.login_type == Some(LoginType::ApplicationService) && !body.from_appservice { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); - } - let is_guest = body.kind == RegistrationKind::Guest; let user_id = match (&body.username, is_guest) { @@ -126,10 +119,30 @@ pub async fn register_route(body: Ruma) -> Result) -> Result) -> Result) -> Result) -> Result { if services() diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 54069c0..3e583fa 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -67,6 +67,13 @@ pub async fn login_route(body: Ruma) -> Result) -> Result) -> Result { - if !body.from_appservice { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); - }; - if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { UserId::parse_with_server_name( user_id.to_lowercase(), services().globals.server_name(), @@ -133,7 +145,23 @@ pub async fn login_route(body: Ruma) -> Result { warn!("Unsupported or unknown login type: {:?}", &body.login_info); @@ -199,6 +227,15 @@ pub async fn logout_route(body: Ruma) -> Result Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if let Some(ref info) = body.appservice_info { + if !info.is_user_match(sender_user) { + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); + } + } else { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing appservice token.", + )); + } + for device_id in services().users.all_device_ids(sender_user).flatten() { services().users.remove_device(sender_user, &device_id)?; } diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index af2dbeb..649c1f5 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -99,7 +99,7 @@ where let mut json_body = serde_json::from_slice::(&body).ok(); - let (sender_user, sender_device, sender_servername, from_appservice) = + let (sender_user, sender_device, sender_servername, appservice_info) = match (metadata.authentication, token) { (_, Token::Invalid) => { return Err(Error::BadRequest( @@ -122,6 +122,14 @@ where .map_err(|_| { Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") })?; + + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); + } + if !services().users.exists(&user_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -129,15 +137,14 @@ where )); } - // TODO: Check if appservice is allowed to be that user - (Some(user_id), None, None, true) + (Some(user_id), None, None, Some(*info)) } ( AuthScheme::None | AuthScheme::AppserviceToken | AuthScheme::AccessTokenOptional, - Token::Appservice(_), - ) => (None, None, None, true), + Token::Appservice(info), + ) => (None, None, None, Some(*info)), (AuthScheme::AccessToken, Token::None) => { return Err(Error::BadRequest( ErrorKind::MissingToken, @@ -147,7 +154,7 @@ where ( AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None, Token::User((user_id, device_id)), - ) => (Some(user_id), Some(device_id), None, false), + ) => (Some(user_id), Some(device_id), None, None), (AuthScheme::ServerSignatures, Token::None) => { let TypedHeader(Authorization(x_matrix)) = parts .extract::>>() @@ -228,7 +235,7 @@ where BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]); match ruma::signatures::verify_json(&pub_key_map, &request_map) { - Ok(()) => (None, None, Some(x_matrix.origin), false), + Ok(()) => (None, None, Some(x_matrix.origin), None), Err(e) => { warn!( "Failed to verify json request from {}: {}\n{:?}", @@ -255,7 +262,7 @@ where | AuthScheme::AppserviceToken | AuthScheme::AccessTokenOptional, Token::None, - ) => (None, None, None, false), + ) => (None, None, None, None), (AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => { return Err(Error::BadRequest( ErrorKind::Unauthorized, @@ -318,7 +325,7 @@ where sender_user, sender_device, sender_servername, - from_appservice, + appservice_info, json_body, }) } diff --git a/src/api/ruma_wrapper/mod.rs b/src/api/ruma_wrapper/mod.rs index ac4c825..862da1d 100644 --- a/src/api/ruma_wrapper/mod.rs +++ b/src/api/ruma_wrapper/mod.rs @@ -1,4 +1,4 @@ -use crate::Error; +use crate::{service::appservice::RegistrationInfo, Error}; use ruma::{ api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, @@ -16,7 +16,7 @@ pub struct Ruma { pub sender_servername: Option, // This is None when body is not a valid string pub json_body: Option, - pub from_appservice: bool, + pub appservice_info: Option, } impl Deref for Ruma { diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 7d2d46b..9db6609 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -6,7 +6,10 @@ pub use data::Data; use futures_util::Future; use regex::RegexSet; -use ruma::api::appservice::{Namespace, Registration}; +use ruma::{ + api::appservice::{Namespace, Registration}, + RoomAliasId, RoomId, UserId, +}; use tokio::sync::RwLock; use crate::{services, Result}; @@ -83,6 +86,18 @@ pub struct RegistrationInfo { pub rooms: NamespaceRegex, } +impl RegistrationInfo { + pub fn is_user_match(&self, user_id: &UserId) -> bool { + self.users.is_match(user_id.as_str()) + || self.registration.sender_localpart == user_id.localpart() + } + + pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { + self.users.is_exclusive_match(user_id.as_str()) + || self.registration.sender_localpart == user_id.localpart() + } +} + impl TryFrom for RegistrationInfo { fn try_from(value: Registration) -> Result { Ok(RegistrationInfo { @@ -122,6 +137,7 @@ impl Service { } /// Registers an appservice and returns the ID to the caller. pub async fn register_appservice(&self, yaml: Registration) -> Result { + //TODO: Check for collisions between exclusive appservice namespaces services() .appservice .registration_info @@ -175,6 +191,30 @@ impl Service { .cloned() } + // Checks if a given user id matches any exclusive appservice regex + pub async fn is_exclusive_user_id(&self, user_id: &UserId) -> bool { + self.read() + .await + .values() + .any(|info| info.is_exclusive_user_match(user_id)) + } + + // Checks if a given room alias matches any exclusive appservice regex + pub async fn is_exclusive_alias(&self, alias: &RoomAliasId) -> bool { + self.read() + .await + .values() + .any(|info| info.aliases.is_exclusive_match(alias.as_str())) + } + + // Checks if a given room id matches any exclusive appservice regex + pub async fn is_exclusive_room_id(&self, room_id: &RoomId) -> bool { + self.read() + .await + .values() + .any(|info| info.rooms.is_exclusive_match(room_id.as_str())) + } + pub fn read( &self, ) -> impl Future>> From 74db555336ca42684f985e6adf8fd30bea86453d Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 3 Apr 2024 23:27:02 +0100 Subject: [PATCH 1635/1727] fix(membership): perform stricter checks when choosing an authorized user --- src/api/client_server/membership.rs | 87 ++++++++++------------------- 1 file changed, 30 insertions(+), 57 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 60e03e4..6fe1e0e 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -15,7 +15,6 @@ use ruma::{ room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, }, StateEventType, TimelineEventType, }, @@ -858,11 +857,6 @@ async fn join_room_by_id_helper( &StateEventType::RoomJoinRules, "", )?; - let power_levels_event = services().rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomPowerLevels, - "", - )?; let join_rules_event_content: Option = join_rules_event .as_ref() @@ -873,15 +867,6 @@ async fn join_room_by_id_helper( }) }) .transpose()?; - let power_levels_event_content: Option = power_levels_event - .as_ref() - .map(|power_levels_event| { - serde_json::from_str(power_levels_event.content.get()).map_err(|e| { - warn!("Invalid power levels event: {}", e); - Error::bad_database("Invalid power levels event in db.") - }) - }) - .transpose()?; let restriction_rooms = match join_rules_event_content { Some(RoomJoinRulesEventContent { @@ -900,47 +885,37 @@ async fn join_room_by_id_helper( _ => Vec::new(), }; - let authorized_user = restriction_rooms - .iter() - .find_map(|restriction_room_id| { - if !services() - .rooms - .state_cache - .is_joined(sender_user, restriction_room_id) - .ok()? + let authorized_user = if restriction_rooms.iter().any(|restriction_room_id| { + services() + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + .unwrap_or(false) + }) { + let mut auth_user = None; + for user in services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .collect::>() + { + if user.server_name() == services().globals.server_name() + && services() + .rooms + .state_accessor + .user_can_invite(room_id, &user, sender_user, &state_lock) + .await + .unwrap_or(false) { - return None; + auth_user = Some(user); + break; } - let authorized_user = power_levels_event_content - .as_ref() - .and_then(|c| { - c.users - .iter() - .filter(|(uid, i)| { - uid.server_name() == services().globals.server_name() - && **i > ruma::int!(0) - && services() - .rooms - .state_cache - .is_joined(uid, restriction_room_id) - .unwrap_or(false) - }) - .max_by_key(|(_, i)| *i) - .map(|(u, _)| u.to_owned()) - }) - .or_else(|| { - // TODO: Check here if user is actually allowed to invite. Currently the auth - // check will just fail in this case. - services() - .rooms - .state_cache - .room_members(restriction_room_id) - .filter_map(|r| r.ok()) - .find(|uid| uid.server_name() == services().globals.server_name()) - }); - Some(authorized_user) - }) - .flatten(); + } + auth_user + } else { + None + }; let event = RoomMemberEventContent { membership: MembershipState::Join, @@ -978,9 +953,7 @@ async fn join_room_by_id_helper( if !restriction_rooms.is_empty() && servers .iter() - .filter(|s| *s != services().globals.server_name()) - .count() - > 0 + .any(|s| *s != services().globals.server_name()) { info!( "We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements" From a854a46c24e23c6f56ed18a27e0fd6ec337bc053 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 24 Apr 2024 23:21:36 +0200 Subject: [PATCH 1636/1727] Bump version to v0.7.0 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6e870c0..0063a3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -377,7 +377,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.7.0-alpha" +version = "0.7.0" dependencies = [ "async-trait", "axum", diff --git a/Cargo.toml b/Cargo.toml index cc5b37e..4dfc04f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.7.0-alpha" +version = "0.7.0" edition = "2021" # See also `rust-toolchain.toml` From 3b3466fd511ffbd3c21d0ed208bd7ad3b63fa582 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 25 Apr 2024 09:19:44 +0200 Subject: [PATCH 1637/1727] Bump version to v0.8.0-alpha --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0063a3e..a26c1c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -377,7 +377,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.7.0" +version = "0.8.0-alpha" dependencies = [ "async-trait", "axum", diff --git a/Cargo.toml b/Cargo.toml index 4dfc04f..7d40b19 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.7.0" +version = "0.8.0-alpha" edition = "2021" # See also `rust-toolchain.toml` From 779cebcd77b18e4ecb38ec973ece0a84db6ffc21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 25 Apr 2024 09:50:45 +0200 Subject: [PATCH 1638/1727] Update download links in documentation --- docs/deploying/generic.md | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 307de35..5710df8 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -10,27 +10,22 @@ Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore only offer Linux binaries. -You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url: +You may simply download the binary that fits your machine. Run `uname -m` to see what you need. For `arm`, you should use `aarch`. Now copy the appropriate url: -**Stable versions:** +**Stable/Main versions:** -| CPU Architecture | Download stable version | -| ------------------------------------------- | --------------------------------------------------------------- | -| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | -| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | -| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | +| Target | Type | Download | +|-|-|-| +| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) | +| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) | +| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) | +| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) | +| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-arm64v8.tar.gz?job=artifacts) | These builds were created on and linked against the glibc version shipped with Debian bullseye. If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself. -[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master -[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master -[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master -[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master -[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master -[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master - -**Latest versions:** +**Latest/Next versions:** | Target | Type | Download | |-|-|-| From 61cd2892b85cacafc9b6153aefb5f80b5ab03d6a Mon Sep 17 00:00:00 2001 From: Ossi Herrala Date: Sat, 27 Apr 2024 11:01:55 +0300 Subject: [PATCH 1639/1727] Remove unused dependencies --- Cargo.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7d40b19..5384c95 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,9 +107,7 @@ futures-util = { version = "0.3.28", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.8", features = ["env", "toml"] } -tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true } tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } -lazy_static = "1.4.0" async-trait = "0.1.68" sd-notify = { version = "0.4.1", optional = true } @@ -134,7 +132,7 @@ backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] #backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] -jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] +jemalloc = ["tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = ["axum"] systemd = ["sd-notify"] From 3b6928ebcf95e68d16ef9d64b2c762260dd53239 Mon Sep 17 00:00:00 2001 From: Ossi Herrala Date: Sat, 27 Apr 2024 11:08:08 +0300 Subject: [PATCH 1640/1727] Update dependencies that don't need code changes --- Cargo.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5384c95..0a5cd0c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,7 @@ persy = { version = "1.4.4", optional = true, features = ["background_ops"] } bytes = "1.4.0" http = "0.2.9" # Used to find data directory for default db path -directories = "4.0.1" +directories = "5" # Used for ruma wrapper serde_json = { version = "1.0.96", features = ["raw_value"] } # Used for appservice registration files @@ -62,7 +62,7 @@ serde = { version = "1.0.163", features = ["rc"] } # Used for secure identifiers rand = "0.8.5" # Used to hash passwords -rust-argon2 = "1.0.0" +rust-argon2 = "2" # Used to send requests hyper = "0.14.26" reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] } @@ -71,7 +71,7 @@ thiserror = "1.0.40" # Used to generate thumbnails for images image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key -base64 = "0.21.2" +base64 = "0.22" # Used when hashing the state ring = "0.17.7" # Used when querying the SRV record of other servers @@ -88,7 +88,7 @@ opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } tracing-opentelemetry = "0.18.0" lru-cache = "0.1.2" -rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] } +rusqlite = { version = "0.31", optional = true, features = ["bundled"] } parking_lot = { version = "0.12.1", optional = true } # crossbeam = { version = "0.8.2", optional = true } num_cpus = "1.15.0" @@ -114,7 +114,7 @@ sd-notify = { version = "0.4.1", optional = true } [dependencies.rocksdb] package = "rust-rocksdb" -version = "0.24.0" +version = "0.25" optional = true features = [ "multi-threaded-cf", From a56139549f1b4e09ce290c86485bff7b84e45280 Mon Sep 17 00:00:00 2001 From: Ossi Herrala Date: Sat, 27 Apr 2024 11:10:13 +0300 Subject: [PATCH 1641/1727] Trust-DNS has been renamed to Hickory-DNS --- Cargo.toml | 2 +- src/service/globals/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0a5cd0c..3b28202 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,7 +75,7 @@ base64 = "0.22" # Used when hashing the state ring = "0.17.7" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.22.0" +hickory-resolver = "0.24" # Used to find matching events for appservices regex = "1.8.1" # jwt jsonwebtokens diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 798c725..ab66ed4 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -9,6 +9,7 @@ use crate::api::server_server::FedDest; use crate::{services, Config, Error, Result}; use futures_util::FutureExt; +use hickory_resolver::TokioAsyncResolver; use hyper::{ client::connect::dns::{GaiResolver, Name}, service::Service as HyperService, @@ -37,7 +38,6 @@ use std::{ }; use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore}; use tracing::{error, info}; -use trust_dns_resolver::TokioAsyncResolver; use base64::{engine::general_purpose, Engine as _}; From aff97e4032f92dee179af91fbc3fb5710f25b39d Mon Sep 17 00:00:00 2001 From: Ossi Herrala Date: Sat, 27 Apr 2024 11:12:13 +0300 Subject: [PATCH 1642/1727] Update image crate --- Cargo.toml | 2 +- src/service/media/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 3b28202..8838339 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,7 +69,7 @@ reqwest = { version = "0.11.18", default-features = false, features = ["rustls-t # Used for conduit::Error type thiserror = "1.0.40" # Used to generate thumbnails for images -image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.25", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key base64 = "0.22" # Used when hashing the state diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index fc8fa56..0340ab4 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -192,7 +192,7 @@ impl Service { let mut thumbnail_bytes = Vec::new(); thumbnail.write_to( &mut Cursor::new(&mut thumbnail_bytes), - image::ImageOutputFormat::Png, + image::ImageFormat::Png, )?; // Save thumbnail in database so we don't have to generate it again next time From 2d3f64c1e5a4b4e2f68c2d80707c2f1873ded36c Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 27 Apr 2024 10:45:53 +0100 Subject: [PATCH 1643/1727] chore: upgrade lockfile --- Cargo.lock | 269 +++++++++++++++++++++++++---------------------------- 1 file changed, 127 insertions(+), 142 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a26c1c8..b2a4739 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -88,7 +88,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -196,6 +196,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "base64ct" version = "1.6.0" @@ -219,7 +225,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.52", + "syn", ] [[package]] @@ -360,7 +366,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -382,18 +388,18 @@ dependencies = [ "async-trait", "axum", "axum-server", - "base64", + "base64 0.22.0", "bytes", "clap", "directories", "figment", "futures-util", + "hickory-resolver", "hmac", "http", "hyper", "image", "jsonwebtoken", - "lazy_static", "lru-cache", "nix", "num_cpus", @@ -418,7 +424,6 @@ dependencies = [ "thiserror", "thread_local", "threadpool", - "tikv-jemalloc-ctl", "tikv-jemallocator", "tokio", "tower", @@ -427,7 +432,6 @@ dependencies = [ "tracing-flame", "tracing-opentelemetry", "tracing-subscriber", - "trust-dns-resolver", ] [[package]] @@ -547,7 +551,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -601,22 +605,23 @@ dependencies = [ [[package]] name = "directories" -version = "4.0.1" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" -version = "0.3.7" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", + "option-ext", "redox_users", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -661,14 +666,14 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck", "proc-macro2", "quote", - "syn 1.0.109", + "syn", ] [[package]] @@ -679,9 +684,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "fallible-iterator" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fallible-streaming-iterator" @@ -809,7 +814,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -924,9 +929,9 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.4" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" dependencies = [ "hashbrown 0.14.3", ] @@ -937,7 +942,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64", + "base64 0.21.7", "bytes", "headers-core", "http", @@ -967,6 +972,51 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hickory-proto" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot", + "rand", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1067,11 +1117,10 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] @@ -1088,17 +1137,18 @@ dependencies = [ [[package]] name = "image" -version = "0.24.9" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5690139d2f55868e080017335e4b94cb7414274c74f1669c84fb5feba2c9f69d" +checksum = "fd54d660e773627692c524beaad361aca785a4f9f5730ce91f42aabe5bce3d11" dependencies = [ "bytemuck", "byteorder", "color_quant", "gif", - "jpeg-decoder", "num-traits", "png", + "zune-core", + "zune-jpeg", ] [[package]] @@ -1185,12 +1235,6 @@ dependencies = [ "libc", ] -[[package]] -name = "jpeg-decoder" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" - [[package]] name = "js-sys" version = "0.3.69" @@ -1224,7 +1268,7 @@ version = "9.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4" dependencies = [ - "base64", + "base64 0.21.7", "js-sys", "pem", "ring", @@ -1294,9 +1338,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.26.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" dependencies = [ "cc", "pkg-config", @@ -1376,12 +1420,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - [[package]] name = "matchit" version = "0.7.3" @@ -1599,6 +1637,12 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "ordered-float" version = "1.1.1" @@ -1637,12 +1681,6 @@ dependencies = [ "windows-targets 0.48.5", ] -[[package]] -name = "paste" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" - [[package]] name = "pear" version = "0.2.8" @@ -1663,7 +1701,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -1672,7 +1710,7 @@ version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" dependencies = [ - "base64", + "base64 0.21.7", "serde", ] @@ -1715,7 +1753,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -1804,7 +1842,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", "version_check", "yansi", ] @@ -1924,7 +1962,7 @@ version = "0.11.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" dependencies = [ - "base64", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", @@ -2041,7 +2079,7 @@ version = "0.12.1" source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ "as_variant", - "base64", + "base64 0.21.7", "bytes", "form_urlencoded", "http", @@ -2129,7 +2167,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.52", + "syn", "toml", ] @@ -2150,7 +2188,7 @@ name = "ruma-signatures" version = "0.14.0" source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" dependencies = [ - "base64", + "base64 0.21.7", "ed25519-dalek", "pkcs8", "rand", @@ -2178,9 +2216,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" +checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ "bitflags 2.4.2", "fallible-iterator", @@ -2192,21 +2230,20 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "1.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5885493fdf0be6cdff808d1533ce878d21cfa49c7086fa00c66355cd9141bfc" +checksum = "9d9848531d60c9cbbcf9d166c885316c24bc0e2a9d3eba0956bb6cbbd79bc6e8" dependencies = [ - "base64", + "base64 0.21.7", "blake2b_simd", "constant_time_eq", - "crossbeam-utils", ] [[package]] name = "rust-librocksdb-sys" -version = "0.20.0+9.1.0" +version = "0.21.0+9.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48b14f4848d8574c074bb26445b43e63735d802ef2fc5cc40c1b015134baee0c" +checksum = "75cb7b9cd5ce3b3ce0757ceab2240f7471826780b8700845c0cfd418cb7e398d" dependencies = [ "bindgen", "bzip2-sys", @@ -2220,9 +2257,9 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36eae38b1d3d0018e273191f791343bd3eb030d7da63aaa20350e41c0182881" +checksum = "2bcfb31b5bf2e3274686ebfdf9a946e9a327a3bc54adc7e5cda9f4fdcc4b55f1" dependencies = [ "libc", "rust-librocksdb-sys", @@ -2279,7 +2316,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64", + "base64 0.21.7", ] [[package]] @@ -2381,7 +2418,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -2592,17 +2629,6 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - [[package]] name = "syn" version = "2.0.52" @@ -2658,7 +2684,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -2693,17 +2719,6 @@ dependencies = [ "threadpool", ] -[[package]] -name = "tikv-jemalloc-ctl" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "619bfed27d807b54f7f776b9430d4f8060e66ee138a28632ca898584d462c31c" -dependencies = [ - "libc", - "paste", - "tikv-jemalloc-sys", -] - [[package]] name = "tikv-jemalloc-sys" version = "0.5.4+5.3.0-patched" @@ -2796,7 +2811,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -2947,7 +2962,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -3025,51 +3040,6 @@ dependencies = [ "tracing-log 0.2.0", ] -[[package]] -name = "trust-dns-proto" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "rand", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lazy_static", - "lru-cache", - "parking_lot", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "tracing", - "trust-dns-proto", -] - [[package]] name = "try-lock" version = "0.2.5" @@ -3219,7 +3189,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn", "wasm-bindgen-shared", ] @@ -3253,7 +3223,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3498,7 +3468,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] @@ -3525,3 +3495,18 @@ dependencies = [ "cc", "pkg-config", ] + +[[package]] +name = "zune-core" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" + +[[package]] +name = "zune-jpeg" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec866b44a2a1fd6133d363f073ca1b179f438f99e7e5bfb1e33f7181facfe448" +dependencies = [ + "zune-core", +] From 5760d981920570232e8b0b4700abd011a6f52474 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 27 Apr 2024 10:52:24 +0100 Subject: [PATCH 1644/1727] chore: upgrade rocksdb in flake --- flake.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 114e221..162eb3e 100644 --- a/flake.nix +++ b/flake.nix @@ -55,7 +55,7 @@ rocksdb' = pkgs: let - version = "9.1.0"; + version = "9.1.1"; in pkgs.rocksdb.overrideAttrs (old: { inherit version; @@ -63,7 +63,7 @@ owner = "facebook"; repo = "rocksdb"; rev = "v${version}"; - hash = "sha256-vRPyrXkXVVhP56n5FVYef8zbIsnnanQSpElmQLZ7mh8="; + hash = "sha256-/Xf0bzNJPclH9IP80QNaABfhj4IAR5LycYET18VFCXc="; }; }); From a499c80d1bfedd5ed21510e862301892ec66f367 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 27 Apr 2024 11:26:12 +0100 Subject: [PATCH 1645/1727] docs: add FAQ --- docs/SUMMARY.md | 1 + docs/faq.md | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 docs/faq.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 2caca3e..f874bb2 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -10,3 +10,4 @@ - [NixOS](deploying/nixos.md) - [TURN](turn.md) - [Appservices](appservices.md) +- [FAQ](faq.md) diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 0000000..dbfd192 --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,25 @@ +# FAQ + +Here are some of the most frequently asked questions about Conduit, and their answers. + +## Why do I get a `M_INCOMPATIBLE_ROOM_VERSION` error when trying to join some rooms? + +Conduit doesn't support room versions 1 and 2 at all, and doesn't properly support versions 3-5 currently. You can track the progress of adding support [here](https://gitlab.com/famedly/conduit/-/issues/433). + +## How do I setup sliding sync? + +You need to add a `org.matrix.msc3575.proxy` field to your `.well-known/matrix/client` response which points to Conduit. Here is an example: +```json +{ + "m.homeserver": { + "base_url": "https://matrix.example.org" + }, + "org.matrix.msc3575.proxy": { + "url": "https://matrix.example.org" + } +} +``` + +## Can I migrate from Synapse to Conduit? + +Not really. You can reuse the domain of your current server with Conduit, but you have to leave all federated rooms first. From f62db723f7f5f628fb84a7ca921deac385794af1 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 27 Apr 2024 11:32:20 +0100 Subject: [PATCH 1646/1727] docs: fix STUN typo --- docs/turn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/turn.md b/docs/turn.md index a61f1b1..11a7180 100644 --- a/docs/turn.md +++ b/docs/turn.md @@ -1,4 +1,4 @@ -# Setting up TURN/STURN +# Setting up TURN/STUN ## General instructions From df0ad2d07c8cd5da8e18597117522cd041ffa495 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 27 Apr 2024 20:41:28 +0100 Subject: [PATCH 1647/1727] fix(appservices): don't forward events relating to remote users, and forward events relating to remote aliases --- src/service/appservice/mod.rs | 2 ++ src/service/rooms/timeline/mod.rs | 51 +++++++++++++++++++++---------- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 9db6609..f493588 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -87,11 +87,13 @@ pub struct RegistrationInfo { } impl RegistrationInfo { + /// Checks if a given user ID matches either the users namespace or the localpart specified in the appservice registration pub fn is_user_match(&self, user_id: &UserId) -> bool { self.users.is_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() } + /// Checks if a given user ID exclusively matches either the users namespace or the localpart specified in the appservice registration pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { self.users.is_exclusive_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index acb00d0..a3b1d57 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -14,7 +14,8 @@ use ruma::{ events::{ push_rules::PushRulesEvent, room::{ - create::RoomCreateEventContent, encrypted::Relation, member::MembershipState, + canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, + encrypted::Relation, member::MembershipState, power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent, }, GlobalAccountDataEventType, StateEventType, TimelineEventType, @@ -32,10 +33,7 @@ use tracing::{error, info, warn}; use crate::{ api::server_server, - service::{ - appservice::NamespaceRegex, - pdu::{EventHash, PduBuilder}, - }, + service::pdu::{EventHash, PduBuilder}, services, utils, Error, PduEvent, Result, }; @@ -594,26 +592,47 @@ impl Service { } } - let matching_users = |users: &NamespaceRegex| { - appservice.users.is_match(pdu.sender.as_str()) + let matching_users = || { + services().globals.server_name() == pdu.sender.server_name() + && appservice.is_user_match(&pdu.sender) || pdu.kind == TimelineEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) + && pdu.state_key.as_ref().map_or(false, |state_key| { + UserId::parse(state_key).map_or(false, |user_id| { + services().globals.server_name() == user_id.server_name() + && appservice.is_user_match(&user_id) + }) + }) }; - let matching_aliases = |aliases: &NamespaceRegex| { + + let matching_aliases = || { services() .rooms .alias .local_aliases_for_room(&pdu.room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) + .filter_map(Result::ok) + .any(|room_alias| appservice.aliases.is_match(room_alias.as_str())) + || if let Ok(Some(pdu)) = services().rooms.state_accessor.room_state_get( + &pdu.room_id, + &StateEventType::RoomCanonicalAlias, + "", + ) { + serde_json::from_str::(pdu.content.get()) + .map_or(false, |content| { + content.alias.map_or(false, |alias| { + appservice.aliases.is_match(alias.as_str()) + }) || content + .alt_aliases + .iter() + .any(|alias| appservice.aliases.is_match(alias.as_str())) + }) + } else { + false + } }; - if matching_aliases(&appservice.aliases) + if matching_aliases() || appservice.rooms.is_match(pdu.room_id.as_str()) - || matching_users(&appservice.users) + || matching_users() { services() .sending From 2b5295aa29a7e20fe375e08476cc83594f4ac5a0 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 27 Apr 2024 20:55:19 +0100 Subject: [PATCH 1648/1727] docs(faq): correct answer about migration --- docs/faq.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/faq.md b/docs/faq.md index dbfd192..ca2b805 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -22,4 +22,5 @@ You need to add a `org.matrix.msc3575.proxy` field to your `.well-known/matrix/c ## Can I migrate from Synapse to Conduit? -Not really. You can reuse the domain of your current server with Conduit, but you have to leave all federated rooms first. +Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically. +Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable. From c90e4816b71ee0632a24423cf42eb1df2a525a08 Mon Sep 17 00:00:00 2001 From: Daniel Lo Nigro Date: Tue, 23 Apr 2024 04:53:11 +0000 Subject: [PATCH 1649/1727] [docs] Update docker-compose commands --- docs/deploying/docker-compose.for-traefik.yml | 6 +++--- docs/deploying/docker-compose.override.yml | 2 +- docs/deploying/docker-compose.with-traefik.yml | 6 +++--- docs/deploying/docker-compose.yml | 4 ++-- docs/deploying/docker.md | 18 +++++++++--------- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 82bb55b..7c0f5e6 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -7,8 +7,8 @@ services: ### then you are ready to go. image: matrixconduit/matrix-conduit:latest ### If you want to build a fresh image from the sources, then comment the image line and uncomment the - ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: - ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d # build: # context: . # args: @@ -37,7 +37,7 @@ services: # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container # to serve those two as static files. If you want to use a different way, delete or comment the below service, here - # and in the docker-compose override file. + # and in the docker compose override file. well-known: image: nginx:latest restart: unless-stopped diff --git a/docs/deploying/docker-compose.override.yml b/docs/deploying/docker-compose.override.yml index 9525078..042e363 100644 --- a/docs/deploying/docker-compose.override.yml +++ b/docs/deploying/docker-compose.override.yml @@ -18,7 +18,7 @@ services: # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container # to serve those two as static files. If you want to use a different way, delete or comment the below service, here - # and in the docker-compose file. + # and in the docker compose file. well-known: labels: - "traefik.enable=true" diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 5860327..5676eea 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -7,8 +7,8 @@ services: ### then you are ready to go. image: matrixconduit/matrix-conduit:latest ### If you want to build a fresh image from the sources, then comment the image line and uncomment the - ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: - ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d # build: # context: . # args: @@ -43,7 +43,7 @@ services: # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container # to serve those two as static files. If you want to use a different way, delete or comment the below service, here - # and in the docker-compose override file. + # and in the docker compose override file. well-known: image: nginx:latest restart: unless-stopped diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index 97f91da..f378133 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -7,8 +7,8 @@ services: ### then you are ready to go. image: matrixconduit/matrix-conduit:latest ### If you want to build a fresh image from the sources, then comment the image line and uncomment the - ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: - ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d # build: # context: . # args: diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index c19ef51..f550c4b 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -67,7 +67,7 @@ docker run -d -p 8448:6167 \ --name conduit ``` -or you can use [docker-compose](#docker-compose). +or you can use [docker compose](#docker-compose). The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md). You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need @@ -75,9 +75,9 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. -### Docker-compose +### Docker compose -If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files. +If the `docker run` command is not for you or your setup, you can also use one of the provided `docker compose` files. Depending on your proxy setup, you can use one of the following files; - If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) @@ -91,10 +91,10 @@ Additional info about deploying Conduit can be found [here](generic.md). ### Build -To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with: +To build the Conduit image with docker compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker compose with: ```bash -docker-compose up +docker compose up ``` This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. @@ -104,7 +104,7 @@ This will also start the container right afterwards, so if want it to run in det If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with: ```bash -docker-compose up -d +docker compose up -d ``` > **Note:** Don't forget to modify and adjust the compose file to your needs. @@ -157,7 +157,7 @@ So...step by step: } ``` -6. Run `docker-compose up -d` +6. Run `docker compose up -d` 7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin. @@ -196,8 +196,8 @@ Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn ``` -or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml` -and run `docker-compose up -d` in the same directory. +or docker compose. For the latter, paste the following section into a file called `docker-compose.yml` +and run `docker compose up -d` in the same directory. ```yml version: 3 From 60fe238893edaba5f1c0ca83f872113d94808ae0 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 28 Apr 2024 08:27:48 +0100 Subject: [PATCH 1650/1727] docs(faq): add backup instructions --- docs/faq.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/faq.md b/docs/faq.md index ca2b805..ce84f81 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -6,6 +6,13 @@ Here are some of the most frequently asked questions about Conduit, and their an Conduit doesn't support room versions 1 and 2 at all, and doesn't properly support versions 3-5 currently. You can track the progress of adding support [here](https://gitlab.com/famedly/conduit/-/issues/433). +## How do I backup my server? + +To backup your Conduit server, it's very easy. +You can simply stop Conduit, make a copy or file system snapshot of the database directory, then start Conduit again. + +> **Note**: When using a file system snapshot, it is not required that you stop the server, but it is still recommended as it is the safest option and should ensure your database is not left in an inconsistent state. + ## How do I setup sliding sync? You need to add a `org.matrix.msc3575.proxy` field to your `.well-known/matrix/client` response which points to Conduit. Here is an example: From a23bfdb3f0e79c4c4d5151d5520d6bc5dc0fa54d Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 28 Apr 2024 11:48:42 +0100 Subject: [PATCH 1651/1727] docs(docker): don't use underscores for max request size --- docs/deploying/docker-compose.for-traefik.yml | 2 +- docs/deploying/docker-compose.with-traefik.yml | 2 +- docs/deploying/docker-compose.yml | 2 +- docs/deploying/docker.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index c7dd2d4..0f3cedd 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -26,7 +26,7 @@ services: CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ CONDUIT_DATABASE_BACKEND: rocksdb CONDUIT_PORT: 6167 - CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' #CONDUIT_REGISTRATION_TOKEN: '' # require password for registration CONDUIT_ALLOW_FEDERATION: 'true' diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 486a4a7..2bb5aff 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -38,7 +38,7 @@ services: # CONDUIT_ALLOW_FEDERATION: 'true' # CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit - # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + # CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container # to serve those two as static files. If you want to use a different way, delete or comment the below service, here diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index f378133..7dcdeff 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -26,7 +26,7 @@ services: CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ CONDUIT_DATABASE_BACKEND: rocksdb CONDUIT_PORT: 6167 - CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' CONDUIT_ALLOW_FEDERATION: 'true' CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index f550c4b..a45c670 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -61,7 +61,7 @@ docker run -d -p 8448:6167 \ -e CONDUIT_DATABASE_BACKEND="rocksdb" \ -e CONDUIT_ALLOW_REGISTRATION=true \ -e CONDUIT_ALLOW_FEDERATION=true \ - -e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \ + -e CONDUIT_MAX_REQUEST_SIZE="20000000" \ -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ --name conduit From 3b7c00135074da4f0fa43ea11dbe0fc88a7b9fd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 28 Apr 2024 18:10:25 +0200 Subject: [PATCH 1652/1727] ci: use attic.conduit.rs --- .gitlab-ci.yml | 4 ++-- bin/nix-build-and-cache | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8c880b9..b9dbd78 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,8 +21,8 @@ before_script: - if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi # Add our own binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-substituters = https://attic.conduit.rs/conduit" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ddcaWZiWm0l0IXZlO8FERRdWvEufwmd0Negl1P+c0Ns=" >> /etc/nix/nix.conf; fi # Add alternate binary cache - if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache index 350e171..967c16a 100755 --- a/bin/nix-build-and-cache +++ b/bin/nix-build-and-cache @@ -12,7 +12,7 @@ if [ ! -z ${ATTIC_TOKEN+x} ]; then nix run --inputs-from . attic -- \ login \ conduit \ - "${ATTIC_ENDPOINT:-https://nix.computer.surgery/conduit}" \ + "${ATTIC_ENDPOINT:-https://attic.conduit.rs/conduit}" \ "$ATTIC_TOKEN" # Push the target installable and its build dependencies From 0b217232acf268377016c9517151acd0e8c0b7e8 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 1 May 2024 21:54:32 +0100 Subject: [PATCH 1653/1727] ci: push attic to binary cache, update nix script --- .gitlab-ci.yml | 3 +++ bin/nix-build-and-cache | 19 ++++++++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b9dbd78..3aecebf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,6 +49,9 @@ ci: stage: ci image: nixos/nix:2.20.4 script: + # Cache attic client + - ./bin/nix-build-and-cache --inputs-from . attic + # Cache the inputs required for the devShell - ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache index 967c16a..42c3770 100755 --- a/bin/nix-build-and-cache +++ b/bin/nix-build-and-cache @@ -2,11 +2,13 @@ set -euo pipefail -# The first argument must be the desired installable -INSTALLABLE="$1" - -# Build the installable and forward any other arguments too -nix build "$@" +# Build the installable and forward any other arguments too. Also, use +# nix-output-monitor instead if it's available. +if command -v nom &> /dev/null; then + nom build "$@" +else + nix build "$@" +fi if [ ! -z ${ATTIC_TOKEN+x} ]; then nix run --inputs-from . attic -- \ @@ -15,12 +17,15 @@ if [ ! -z ${ATTIC_TOKEN+x} ]; then "${ATTIC_ENDPOINT:-https://attic.conduit.rs/conduit}" \ "$ATTIC_TOKEN" + readarray -t outputs < <(nix path-info "$@") + readarray -t derivations < <(nix path-info "$@" --derivation) + # Push the target installable and its build dependencies nix run --inputs-from . attic -- \ push \ conduit \ - "$(nix path-info "$INSTALLABLE" --derivation)" \ - "$(nix path-info "$INSTALLABLE")" + "${outputs[@]}" \ + "${derivations[@]}" else echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" fi From dfe2916357888d5926b3237e5c7f360a2369d29d Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 30 Apr 2024 09:31:44 +0100 Subject: [PATCH 1654/1727] feat(federation): add destination field to X-Matrix header --- src/api/server_server.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index b25b131..ef6ab4a 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -223,8 +223,9 @@ where http_request.headers_mut().insert( AUTHORIZATION, HeaderValue::from_str(&format!( - "X-Matrix origin={},key=\"{}\",sig=\"{}\"", + "X-Matrix origin=\"{}\",destination=\"{}\",key=\"{}\",sig=\"{}\"", services().globals.server_name(), + destination, s.0, s.1 )) From 63ba157ef6d05b9485c56e7fcd7d99b16cb997a5 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 2 May 2024 07:14:44 +0100 Subject: [PATCH 1655/1727] feat(auth): check if X-Matrix destination is correct if present --- src/api/ruma_wrapper/axum.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 649c1f5..0e66769 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -175,6 +175,15 @@ where Error::BadRequest(ErrorKind::Forbidden, msg) })?; + if let Some(dest) = x_matrix.destination { + if dest != services().globals.server_name() { + return Err(Error::BadRequest( + ErrorKind::Unauthorized, + "X-Matrix destination field does not match server name.", + )); + } + }; + let origin_signatures = BTreeMap::from_iter([( x_matrix.key.clone(), CanonicalJsonValue::String(x_matrix.sig), @@ -332,6 +341,7 @@ where } struct XMatrix { + destination: Option, origin: OwnedServerName, key: String, // KeyName? sig: String, @@ -353,6 +363,7 @@ impl Credentials for XMatrix { let mut origin = None; let mut key = None; let mut sig = None; + let mut destination = None; for entry in parameters.split_terminator(',') { let (name, value) = entry.split_once('=')?; @@ -369,6 +380,7 @@ impl Credentials for XMatrix { "origin" => origin = Some(value.try_into().ok()?), "key" => key = Some(value.to_owned()), "sig" => sig = Some(value.to_owned()), + "destination" => destination = Some(value.try_into().ok()?), _ => debug!( "Unexpected field `{}` in X-Matrix Authorization header", name @@ -377,6 +389,7 @@ impl Credentials for XMatrix { } Some(Self { + destination, origin: origin?, key: key?, sig: sig?, From c1f695653bd991a9355fe1116f91b528e6a65969 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Thu, 2 May 2024 09:26:43 +0100 Subject: [PATCH 1656/1727] feat: support hosting .well-known from Conduit Co-authored-by: Matthias Ahouansou --- Cargo.lock | 3 +++ Cargo.toml | 3 +++ conduit-example.toml | 9 ++++++- src/api/client_server/mod.rs | 2 ++ src/api/client_server/unversioned.rs | 20 ++------------- src/api/client_server/well_known.rs | 22 ++++++++++++++++ src/api/server_server.rs | 16 +++++++++++- src/config/mod.rs | 38 +++++++++++++++++++++++++++- src/main.rs | 3 +++ src/service/globals/mod.rs | 8 ++++-- 10 files changed, 101 insertions(+), 23 deletions(-) create mode 100644 src/api/client_server/well_known.rs diff --git a/Cargo.lock b/Cargo.lock index b2a4739..c5f2fa2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -432,6 +432,8 @@ dependencies = [ "tracing-flame", "tracing-opentelemetry", "tracing-subscriber", + "trust-dns-resolver", + "url", ] [[package]] @@ -3124,6 +3126,7 @@ dependencies = [ "form_urlencoded", "idna 0.5.0", "percent-encoding", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 8838339..eb7463c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,6 +107,9 @@ futures-util = { version = "0.3.28", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.8", features = ["env", "toml"] } +# Validating urls in config +url = { version = "2", features = ["serde"] } + tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } async-trait = "0.1.68" diff --git a/conduit-example.toml b/conduit-example.toml index c83bce7..ef7bd18 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -17,7 +17,7 @@ # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client # and # https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server -# for more information +# for more information, or continue below to see how conduit can do this for you. # YOU NEED TO EDIT THIS #server_name = "your.server.name" @@ -65,3 +65,10 @@ trusted_servers = ["matrix.org"] address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. + +[global.well_known] +# Conduit handles the /.well-known/matrix/* endpoints, making both clients and servers try to access conduit with the host +# server_name and port 443 by default. +# If you want to override these defaults, uncomment and edit the following lines accordingly: +#server = your.server.name:443 +#client = https://your.server.name diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs index 54c99aa..afe5181 100644 --- a/src/api/client_server/mod.rs +++ b/src/api/client_server/mod.rs @@ -32,6 +32,7 @@ mod typing; mod unversioned; mod user_directory; mod voip; +mod well_known; pub use account::*; pub use alias::*; @@ -67,6 +68,7 @@ pub use typing::*; pub use unversioned::*; pub use user_directory::*; pub use voip::*; +pub use well_known::*; pub const DEVICE_ID_LENGTH: usize = 10; pub const TOKEN_LENGTH: usize = 32; diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 70e260e..7706afe 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -1,9 +1,8 @@ use std::{collections::BTreeMap, iter::FromIterator}; -use axum::{response::IntoResponse, Json}; -use ruma::api::client::{discovery::get_supported_versions, error::ErrorKind}; +use ruma::api::client::discovery::get_supported_versions; -use crate::{services, Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `GET /_matrix/client/versions` /// @@ -33,18 +32,3 @@ pub async fn get_supported_versions_route( Ok(resp) } - -/// # `GET /.well-known/matrix/client` -pub async fn well_known_client_route( - _body: Ruma, -) -> Result { - let client_url = match services().globals.well_known_client() { - Some(url) => url.clone(), - None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), - }; - - Ok(Json(serde_json::json!({ - "m.homeserver": {"base_url": client_url}, - "org.matrix.msc3575.proxy": {"url": client_url} - }))) -} diff --git a/src/api/client_server/well_known.rs b/src/api/client_server/well_known.rs new file mode 100644 index 0000000..e7bc2a4 --- /dev/null +++ b/src/api/client_server/well_known.rs @@ -0,0 +1,22 @@ +use ruma::api::client::discovery::discover_homeserver::{ + self, HomeserverInfo, SlidingSyncProxyInfo, +}; + +use crate::{services, Result, Ruma}; + +/// # `GET /.well-known/matrix/client` +/// +/// Returns the client server discovery information. +pub async fn well_known_client( + _body: Ruma, +) -> Result { + let client_url = services().globals.well_known_client(); + + Ok(discover_homeserver::Response { + homeserver: HomeserverInfo { + base_url: client_url.clone(), + }, + identity_server: None, + sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }), + }) +} diff --git a/src/api/server_server.rs b/src/api/server_server.rs index b25b131..6b86aca 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -17,7 +17,10 @@ use ruma::{ backfill::get_backfill, device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, - discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, + discovery::{ + discover_homeserver, get_server_keys, get_server_version, ServerSigningKeys, + VerifyKey, + }, event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, keys::{claim_keys, get_keys}, membership::{create_invite, create_join_event, prepare_join_event}, @@ -1910,6 +1913,17 @@ pub async fn claim_keys_route( }) } +/// # `GET /.well-known/matrix/server` +/// +/// Returns the federation server discovery information. +pub async fn well_known_server( + _body: Ruma, +) -> Result { + Ok(discover_homeserver::Response { + server: services().globals.well_known_server(), + }) +} + #[cfg(test)] mod tests { use super::{add_port_to_hostname, get_ip_with_port, FedDest}; diff --git a/src/config/mod.rs b/src/config/mod.rs index fb1e2f3..652b3a4 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -7,6 +7,7 @@ use std::{ use ruma::{OwnedServerName, RoomVersionId}; use serde::{de::IgnoredAny, Deserialize}; use tracing::warn; +use url::Url; mod proxy; @@ -56,7 +57,8 @@ pub struct Config { pub allow_unstable_room_versions: bool, #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, - pub well_known_client: Option, + #[serde(default)] + pub well_known: WellKnownConfig, #[serde(default = "false_fn")] pub allow_jaeger: bool, #[serde(default = "false_fn")] @@ -91,6 +93,12 @@ pub struct TlsConfig { pub key: String, } +#[derive(Clone, Debug, Deserialize, Default)] +pub struct WellKnownConfig { + pub client: Option, + pub server: Option, +} + const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; impl Config { @@ -111,9 +119,35 @@ impl Config { } } +impl Config { + pub fn well_known_client(&self) -> String { + if let Some(url) = &self.well_known.client { + url.to_string() + } else { + format!("https://{}", self.server_name) + } + } + + pub fn well_known_server(&self) -> OwnedServerName { + match &self.well_known.server { + Some(server_name) => server_name.to_owned(), + None => { + if self.server_name.port().is_some() { + self.server_name.to_owned() + } else { + format!("{}:443", self.server_name.host()) + .try_into() + .expect("Host from valid hostname + :443 must be valid") + } + } + } + } +} + impl fmt::Display for Config { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Prepare a list of config values to show + let well_known_server = self.well_known_server(); let lines = [ ("Server name", self.server_name.host()), ("Database backend", &self.database_backend), @@ -194,6 +228,8 @@ impl fmt::Display for Config { } &lst.join(", ") }), + ("Well-known server name", well_known_server.as_str()), + ("Well-known client URL", &self.well_known_client()), ]; let mut msg: String = "Active config values:\n\n".to_owned(); diff --git a/src/main.rs b/src/main.rs index 7beeb8b..8446754 100644 --- a/src/main.rs +++ b/src/main.rs @@ -390,6 +390,7 @@ fn routes(config: &Config) -> Router { .ruma_route(client_server::get_relating_events_with_rel_type_route) .ruma_route(client_server::get_relating_events_route) .ruma_route(client_server::get_hierarchy_route) + .ruma_route(client_server::well_known_client) .route( "/_matrix/client/r0/rooms/:room_id/initialSync", get(initial_sync), @@ -430,10 +431,12 @@ fn routes(config: &Config) -> Router { .ruma_route(server_server::get_profile_information_route) .ruma_route(server_server::get_keys_route) .ruma_route(server_server::claim_keys_route) + .ruma_route(server_server::well_known_server) } else { router .route("/_matrix/federation/*path", any(federation_disabled)) .route("/_matrix/key/*path", any(federation_disabled)) + .route("/.well-known/matrix/server", any(federation_disabled)) } } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index ab66ed4..263463d 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -417,8 +417,12 @@ impl Service { r } - pub fn well_known_client(&self) -> &Option { - &self.config.well_known_client + pub fn well_known_server(&self) -> OwnedServerName { + self.config.well_known_server() + } + + pub fn well_known_client(&self) -> String { + self.config.well_known_client() } pub fn shutdown(&self) { From 6e913bfec40e941dc70bad1e4a75bcf77971eeb1 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 2 May 2024 09:27:14 +0100 Subject: [PATCH 1657/1727] docs: delegation --- book.toml | 4 +++ docs/SUMMARY.md | 1 + docs/configuration.md | 1 + docs/delegation.md | 69 +++++++++++++++++++++++++++++++++++++++++++ docs/faq.md | 12 +++++--- 5 files changed, 83 insertions(+), 4 deletions(-) create mode 100644 docs/delegation.md diff --git a/book.toml b/book.toml index e25746c..700ecda 100644 --- a/book.toml +++ b/book.toml @@ -16,3 +16,7 @@ git-repository-icon = "fa-git-square" [output.html.search] limit-results = 15 + +[output.html.code.hidelines] +json = "~" + diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index f874bb2..afba3cc 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -3,6 +3,7 @@ - [Introduction](introduction.md) - [Configuration](configuration.md) +- [Delegation](delegation.md) - [Deploying](deploying.md) - [Generic](deploying/generic.md) - [Debian](deploying/debian.md) diff --git a/docs/configuration.md b/docs/configuration.md index efa080d..d903a21 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -56,6 +56,7 @@ The `global` section contains the following fields: | `turn_secret` | `string` | The TURN secret | `""` | | `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` | | `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A | +| `well_known` | `table` | Used for [delegation](delegation.md) | See [delegation](delegation.md) | ### TLS diff --git a/docs/delegation.md b/docs/delegation.md new file mode 100644 index 0000000..c8e5391 --- /dev/null +++ b/docs/delegation.md @@ -0,0 +1,69 @@ +# Delegation + +You can run Conduit on a separate domain than the actual server name (what shows up in user ids, aliases, etc.). +For example you can have your users have IDs such as `@foo:example.org` and have aliases like `#bar:example.org`, +while actually having Conduit hosted on the `matrix.example.org` domain. This is called delegation. + +## Automatic (recommended) + +Conduit has support for hosting delegation files by itself, and by default uses it to serve federation traffic on port 443. + +With this method, you need to direct requests to `/.well-known/matrix/*` to Conduit in your reverse proxy. + +This is only recommended if Conduit is on the same physical server as the server which serves your server name (e.g. example.org) +as servers don't always seem to cache the response, leading to slower response times otherwise, but it should also work if you +are connected to the server running Conduit using something like a VPN. + +> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration + +To configure it, use the following options in the `global.well_known` table: +| Field | Type | Description | Default | +| --- | --- | --- | --- | +| `client` | `String` | The URL that clients should use to connect to Conduit | `https://` | +| `server` | `String` | The hostname and port servers should use to connect to Conduit | `:443` | + +### Example + +```toml +[global.well_known] +client = "https://matrix.example.org" +server = "matrix.example.org:443" +``` + +## Manual + +Alternatively you can serve static JSON files to inform clients and servers how to connect to Conduit. + +### Servers + +For servers to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/server`: + +```json +{ + "m.server": "matrix.example.org:443" +} +``` +Where `matrix.example.org` is the domain and `443` is the port Conduit is accessible at. + +### Clients + +For clients to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/client`: +```json +{ + "m.homeserver": { + "base_url": "https://matrix.example.org" + } +} +``` +Where `matrix.example.org` is the URL Conduit is accessible at. + +To ensure that all clients can access this endpoint, it is recommended you set the following headers for this endpoint: +``` +Access-Control-Allow-Origin: * +Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS +Access-Control-Allow-Headers: X-Requested-With, Content-Type, Authorization +``` + +If you also want to be able to use [sliding sync][0], look [here](faq.md#how-do-i-setup-sliding-sync). + +[0]: https://matrix.org/blog/2023/09/matrix-2-0/#sliding-sync diff --git a/docs/faq.md b/docs/faq.md index ce84f81..4c23a25 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -15,12 +15,16 @@ You can simply stop Conduit, make a copy or file system snapshot of the database ## How do I setup sliding sync? -You need to add a `org.matrix.msc3575.proxy` field to your `.well-known/matrix/client` response which points to Conduit. Here is an example: +If you use the [automatic method for delegation](delegation.md#automatic-recommended) or just proxy `.well-known/matrix/client` to Conduit, sliding sync should work with no extra configuration. +If you don't, continue below. + +You need to add a `org.matrix.msc3575.proxy` field to your `.well-known/matrix/client` response which contains a url which Conduit is accessible behind. +Here is an example: ```json { - "m.homeserver": { - "base_url": "https://matrix.example.org" - }, +~ "m.homeserver": { +~ "base_url": "https://matrix.example.org" +~ }, "org.matrix.msc3575.proxy": { "url": "https://matrix.example.org" } From 3ecf835b50064f604940af5c48c8d138c0ee7dd8 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 2 May 2024 09:53:53 +0100 Subject: [PATCH 1658/1727] ci: cache attic on artifacts job --- .gitlab-ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3aecebf..7347931 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -45,13 +45,13 @@ before_script: # Set CARGO_HOME to a cacheable path - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" + # Cache attic client + - ./bin/nix-build-and-cache --inputs-from . attic + ci: stage: ci image: nixos/nix:2.20.4 script: - # Cache attic client - - ./bin/nix-build-and-cache --inputs-from . attic - # Cache the inputs required for the devShell - ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation From 9db1f5a13c32a6a6aea86be65524623a527e0dbe Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 2 May 2024 10:45:04 +0100 Subject: [PATCH 1659/1727] fix(admin): don't allow creation of remote users --- Cargo.lock | 1 - src/service/admin/mod.rs | 8 ++++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index c5f2fa2..8453335 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -432,7 +432,6 @@ dependencies = [ "tracing-flame", "tracing-opentelemetry", "tracing-subscriber", - "trust-dns-resolver", "url", ] diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 484fc13..ab677f6 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -605,6 +605,14 @@ impl Service { ))) } }; + + // Checks if user is local + if user_id.server_name() != services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain( + "The specified user is not from this server!", + )); + }; + if user_id.is_historical() { return Ok(RoomMessageEventContent::text_plain(format!( "Userid {user_id} is not allowed due to historical" From 5570f5f3da642a5dca3db8f12c14152832284c86 Mon Sep 17 00:00:00 2001 From: Samuel Meenzen Date: Thu, 2 May 2024 15:08:40 +0200 Subject: [PATCH 1660/1727] ci: faster cache and artifact handling --- .gitlab-ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3aecebf..ea5f201 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,6 +6,10 @@ stages: variables: # Makes some things print in color TERM: ansi + # Faster cache and artifact compression / decompression + FF_USE_FASTZIP: true + # Print progress reports for cache and artifact transfers + TRANSFER_METER_FREQUENCY: 5s # Avoid duplicate pipelines # See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines From c9ee4a920ead36d6986ae9aa24865ee116dde001 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 4 May 2024 10:03:49 +0100 Subject: [PATCH 1661/1727] always go through inputs This way we don't have to modify the destructuring of outputs' argument when adding or removing inputs. --- flake.nix | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/flake.nix b/flake.nix index 162eb3e..984398d 100644 --- a/flake.nix +++ b/flake.nix @@ -19,24 +19,15 @@ attic.url = "github:zhaofengli/attic?ref=main"; }; - outputs = - { self - , nixpkgs - , flake-utils - , nix-filter - - , fenix - , crane - , ... - }: flake-utils.lib.eachDefaultSystem (system: + outputs = inputs: inputs.flake-utils.lib.eachDefaultSystem (system: let - pkgsHost = nixpkgs.legacyPackages.${system}; + pkgsHost = inputs.nixpkgs.legacyPackages.${system}; # Nix-accessible `Cargo.toml` cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); # The Rust toolchain to use - toolchain = fenix.packages.${system}.fromToolchainFile { + toolchain = inputs.fenix.packages.${system}.fromToolchainFile { file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` @@ -44,7 +35,7 @@ }; builder = pkgs: - ((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; + ((inputs.crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; nativeBuildInputs = pkgs: [ # bindgen needs the build platform's libclang. Apparently due to @@ -68,7 +59,8 @@ }); env = pkgs: { - CONDUIT_VERSION_EXTRA = self.shortRev or self.dirtyShortRev; + CONDUIT_VERSION_EXTRA = + inputs.self.shortRev or inputs.self.dirtyShortRev; ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include"; ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib"; } @@ -161,7 +153,7 @@ )); package = pkgs: builder pkgs { - src = nix-filter { + src = inputs.nix-filter { root = ./.; include = [ "src" @@ -202,17 +194,17 @@ { packages = { default = package pkgsHost; - oci-image = mkOciImage pkgsHost self.packages.${system}.default; + oci-image = mkOciImage pkgsHost inputs.self.packages.${system}.default; book = let - package = self.packages.${system}.default; + package = inputs.self.packages.${system}.default; in pkgsHost.stdenv.mkDerivation { pname = "${package.pname}-book"; version = package.version; - src = nix-filter { + src = inputs.nix-filter { root = ./.; include = [ "book.toml" @@ -241,7 +233,7 @@ let binaryName = "static-${crossSystem}"; pkgsCrossStatic = - (import nixpkgs { + (import inputs.nixpkgs { inherit system; crossSystem = { config = crossSystem; @@ -260,7 +252,7 @@ name = "oci-image-${crossSystem}"; value = mkOciImage pkgsCrossStatic - self.packages.${system}.${binaryName}; + inputs.self.packages.${system}.${binaryName}; } ] ) @@ -285,7 +277,7 @@ # # This needs to come before `toolchain` in this list, otherwise # `$PATH` will have stable rustfmt instead. - fenix.packages.${system}.latest.rustfmt + inputs.fenix.packages.${system}.latest.rustfmt toolchain ] ++ (with pkgsHost; [ From 3336d3f8125539f20b5dd4b39eefad6ff7790c0c Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Tue, 30 Apr 2024 16:00:19 -0700 Subject: [PATCH 1662/1727] factor out nix code into new files via `makeScope` This makes the Nix code a lot easier to reason about. --- flake.nix | 347 +++++---------------- nix/pkgs/book/default.nix | 34 ++ nix/pkgs/default/cross-compilation-env.nix | 100 ++++++ nix/pkgs/default/default.nix | 64 ++++ nix/pkgs/oci-image/default.nix | 25 ++ nix/shell.nix | 55 ++++ 6 files changed, 357 insertions(+), 268 deletions(-) create mode 100644 nix/pkgs/book/default.nix create mode 100644 nix/pkgs/default/cross-compilation-env.nix create mode 100644 nix/pkgs/default/default.nix create mode 100644 nix/pkgs/oci-image/default.nix create mode 100644 nix/shell.nix diff --git a/flake.nix b/flake.nix index 984398d..09aa9e0 100644 --- a/flake.nix +++ b/flake.nix @@ -19,286 +19,97 @@ attic.url = "github:zhaofengli/attic?ref=main"; }; - outputs = inputs: inputs.flake-utils.lib.eachDefaultSystem (system: + outputs = inputs: let - pkgsHost = inputs.nixpkgs.legacyPackages.${system}; + # Keep sorted + mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { + craneLib = + (inputs.crane.mkLib pkgs).overrideToolchain self.toolchain; - # Nix-accessible `Cargo.toml` - cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); + default = self.callPackage ./nix/pkgs/default {}; - # The Rust toolchain to use - toolchain = inputs.fenix.packages.${system}.fromToolchainFile { - file = ./rust-toolchain.toml; + inherit inputs; - # See also `rust-toolchain.toml` - sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8="; - }; + oci-image = self.callPackage ./nix/pkgs/oci-image {}; - builder = pkgs: - ((inputs.crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; + book = self.callPackage ./nix/pkgs/book {}; - nativeBuildInputs = pkgs: [ - # bindgen needs the build platform's libclang. Apparently due to - # "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't - # quite do the right thing here. - pkgs.pkgsBuildHost.rustPlatform.bindgenHook - ]; + rocksdb = + let + version = "9.1.1"; + in + pkgs.rocksdb.overrideAttrs (old: { + inherit version; + src = pkgs.fetchFromGitHub { + owner = "facebook"; + repo = "rocksdb"; + rev = "v${version}"; + hash = "sha256-/Xf0bzNJPclH9IP80QNaABfhj4IAR5LycYET18VFCXc="; + }; + }); - rocksdb' = pkgs: - let - version = "9.1.1"; - in - pkgs.rocksdb.overrideAttrs (old: { - inherit version; - src = pkgs.fetchFromGitHub { - owner = "facebook"; - repo = "rocksdb"; - rev = "v${version}"; - hash = "sha256-/Xf0bzNJPclH9IP80QNaABfhj4IAR5LycYET18VFCXc="; - }; + shell = self.callPackage ./nix/shell.nix {}; + + # The Rust toolchain to use + toolchain = inputs + .fenix + .packages + .${pkgs.pkgsBuildHost.system} + .fromToolchainFile { + file = ./rust-toolchain.toml; + + # See also `rust-toolchain.toml` + sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8="; + }; }); + in + inputs.flake-utils.lib.eachDefaultSystem (system: + let + pkgs = inputs.nixpkgs.legacyPackages.${system}; + in + { + packages = { + default = (mkScope pkgs).default; + oci-image = (mkScope pkgs).oci-image; + book = (mkScope pkgs).book; + } + // + builtins.listToAttrs + (builtins.concatLists + (builtins.map + (crossSystem: + let + binaryName = "static-${crossSystem}"; + pkgsCrossStatic = + (import inputs.nixpkgs { + inherit system; + crossSystem = { + config = crossSystem; + }; + }).pkgsStatic; + in + [ + # An output for a statically-linked binary + { + name = binaryName; + value = (mkScope pkgsCrossStatic).default; + } - env = pkgs: { - CONDUIT_VERSION_EXTRA = - inputs.self.shortRev or inputs.self.dirtyShortRev; - ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include"; - ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib"; - } - // pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic { - ROCKSDB_STATIC = ""; - } - // { - CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in - lib.concatStringsSep " " ([] - ++ lib.optionals - # This disables PIE for static builds, which isn't great in terms - # of security. Unfortunately, my hand is forced because nixpkgs' - # `libstdc++.a` is built without `-fPIE`, which precludes us from - # leaving PIE enabled. - stdenv.hostPlatform.isStatic - ["-C" "relocation-model=static"] - ++ lib.optionals - (stdenv.buildPlatform.config != stdenv.hostPlatform.config) - ["-l" "c"] - ++ lib.optionals - # This check has to match the one [here][0]. We only need to set - # these flags when using a different linker. Don't ask me why, - # though, because I don't know. All I know is it breaks otherwise. - # - # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40 - ( - # Nixpkgs doesn't check for x86_64 here but we do, because I - # observed a failure building statically for x86_64 without - # including it here. Linkers are weird. - (stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64) - && stdenv.hostPlatform.isStatic - && !stdenv.isDarwin - && !stdenv.cc.bintools.isLLVM + # An output for an OCI image based on that binary + { + name = "oci-image-${crossSystem}"; + value = (mkScope pkgsCrossStatic).oci-image; + } + ] ) [ - "-l" - "stdc++" - "-L" - "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" - ] - ); - } - - # What follows is stolen from [here][0]. Its purpose is to properly - # configure compilers and linkers for various stages of the build, and - # even covers the case of build scripts that need native code compiled and - # run on the build platform (I think). - # - # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80 - // ( - let - inherit (pkgs.rust.lib) envVars; - in - pkgs.lib.optionalAttrs - (pkgs.stdenv.targetPlatform.rust.rustcTarget - != pkgs.stdenv.hostPlatform.rust.rustcTarget) - ( - let - inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = - envVars.linkerForTarget; - } - ) - // ( - let - inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForHost; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost; - CARGO_BUILD_TARGET = rustcTarget; - } - ) - // ( - let - inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild; - HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc"; - HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++"; - } - )); - - package = pkgs: builder pkgs { - src = inputs.nix-filter { - root = ./.; - include = [ - "src" - "Cargo.toml" - "Cargo.lock" - ]; - }; - - # This is redundant with CI - doCheck = false; - - env = env pkgs; - nativeBuildInputs = nativeBuildInputs pkgs; - - meta.mainProgram = cargoToml.package.name; - }; - - mkOciImage = pkgs: package: - pkgs.dockerTools.buildImage { - name = package.pname; - tag = "next"; - copyToRoot = [ - pkgs.dockerTools.caCertificates - ]; - config = { - # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) - # are handled as expected - Entrypoint = [ - "${pkgs.lib.getExe' pkgs.tini "tini"}" - "--" - ]; - Cmd = [ - "${pkgs.lib.getExe package}" - ]; - }; - }; - in - { - packages = { - default = package pkgsHost; - oci-image = mkOciImage pkgsHost inputs.self.packages.${system}.default; - - book = - let - package = inputs.self.packages.${system}.default; - in - pkgsHost.stdenv.mkDerivation { - pname = "${package.pname}-book"; - version = package.version; - - src = inputs.nix-filter { - root = ./.; - include = [ - "book.toml" - "conduit-example.toml" - "README.md" - "debian/README.md" - "docs" - ]; - }; - - nativeBuildInputs = (with pkgsHost; [ - mdbook - ]); - - buildPhase = '' - mdbook build - mv public $out - ''; - }; - } - // - builtins.listToAttrs - (builtins.concatLists - (builtins.map - (crossSystem: - let - binaryName = "static-${crossSystem}"; - pkgsCrossStatic = - (import inputs.nixpkgs { - inherit system; - crossSystem = { - config = crossSystem; - }; - }).pkgsStatic; - in - [ - # An output for a statically-linked binary - { - name = binaryName; - value = package pkgsCrossStatic; - } - - # An output for an OCI image based on that binary - { - name = "oci-image-${crossSystem}"; - value = mkOciImage - pkgsCrossStatic - inputs.self.packages.${system}.${binaryName}; - } + "x86_64-unknown-linux-musl" + "aarch64-unknown-linux-musl" ] ) - [ - "x86_64-unknown-linux-musl" - "aarch64-unknown-linux-musl" - ] - ) - ); + ); - devShells.default = pkgsHost.mkShell { - env = env pkgsHost // { - # Rust Analyzer needs to be able to find the path to default crate - # sources, and it can read this environment variable to do so. The - # `rust-src` component is required in order for this to work. - RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; - }; - - # Development tools - nativeBuildInputs = nativeBuildInputs pkgsHost ++ [ - # Always use nightly rustfmt because most of its options are unstable - # - # This needs to come before `toolchain` in this list, otherwise - # `$PATH` will have stable rustfmt instead. - inputs.fenix.packages.${system}.latest.rustfmt - - toolchain - ] ++ (with pkgsHost; [ - engage - - # Needed for producing Debian packages - cargo-deb - - # Needed for Complement - go - olm - - # Needed for our script for Complement - jq - - # Needed for finding broken markdown links - lychee - - # Useful for editing the book locally - mdbook - ]); - }; - }); + devShells.default = (mkScope pkgs).shell; + } + ); } diff --git a/nix/pkgs/book/default.nix b/nix/pkgs/book/default.nix new file mode 100644 index 0000000..cc0464d --- /dev/null +++ b/nix/pkgs/book/default.nix @@ -0,0 +1,34 @@ +# Keep sorted +{ default +, inputs +, mdbook +, stdenv +}: + +stdenv.mkDerivation { + pname = "${default.pname}-book"; + version = default.version; + + + src = let filter = inputs.nix-filter.lib; in filter { + root = inputs.self; + + # Keep sorted + include = [ + "book.toml" + "conduit-example.toml" + "debian/README.md" + "docs" + "README.md" + ]; + }; + + nativeBuildInputs = [ + mdbook + ]; + + buildPhase = '' + mdbook build + mv public $out + ''; +} diff --git a/nix/pkgs/default/cross-compilation-env.nix b/nix/pkgs/default/cross-compilation-env.nix new file mode 100644 index 0000000..fac85e0 --- /dev/null +++ b/nix/pkgs/default/cross-compilation-env.nix @@ -0,0 +1,100 @@ +{ lib +, pkgsBuildHost +, rust +, stdenv +}: + +lib.optionalAttrs stdenv.hostPlatform.isStatic { + ROCKSDB_STATIC = ""; +} +// +{ + CARGO_BUILD_RUSTFLAGS = + lib.concatStringsSep + " " + ([] + # This disables PIE for static builds, which isn't great in terms of + # security. Unfortunately, my hand is forced because nixpkgs' + # `libstdc++.a` is built without `-fPIE`, which precludes us from + # leaving PIE enabled. + ++ lib.optionals + stdenv.hostPlatform.isStatic + [ "-C" "relocation-model=static" ] + ++ lib.optionals + (stdenv.buildPlatform.config != stdenv.hostPlatform.config) + [ "-l" "c" ] + ++ lib.optionals + # This check has to match the one [here][0]. We only need to set + # these flags when using a different linker. Don't ask me why, though, + # because I don't know. All I know is it breaks otherwise. + # + # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40 + ( + # Nixpkgs doesn't check for x86_64 here but we do, because I + # observed a failure building statically for x86_64 without + # including it here. Linkers are weird. + (stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64) + && stdenv.hostPlatform.isStatic + && !stdenv.isDarwin + && !stdenv.cc.bintools.isLLVM + ) + [ + "-l" + "stdc++" + "-L" + "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" + ] + ); +} + +# What follows is stolen from [here][0]. Its purpose is to properly configure +# compilers and linkers for various stages of the build, and even covers the +# case of build scripts that need native code compiled and run on the build +# platform (I think). +# +# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80 +// +( + let + inherit (rust.lib) envVars; + in + lib.optionalAttrs + (stdenv.targetPlatform.rust.rustcTarget + != stdenv.hostPlatform.rust.rustcTarget) + ( + let + inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = + envVars.linkerForTarget; + } + ) + // + ( + let + inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForHost; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost; + CARGO_BUILD_TARGET = rustcTarget; + } + ) + // + ( + let + inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild; + HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc"; + HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++"; + } + ) +) diff --git a/nix/pkgs/default/default.nix b/nix/pkgs/default/default.nix new file mode 100644 index 0000000..75c87b4 --- /dev/null +++ b/nix/pkgs/default/default.nix @@ -0,0 +1,64 @@ +# Keep sorted +{ craneLib +, inputs +, lib +, pkgsBuildHost +, rocksdb +, rust +, stdenv +}: + +let + env = { + CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; + ROCKSDB_INCLUDE_DIR = "${rocksdb}/include"; + ROCKSDB_LIB_DIR = "${rocksdb}/lib"; + } + // + (import ./cross-compilation-env.nix { + # Keep sorted + inherit + lib + pkgsBuildHost + rust + stdenv; + }); +in + +craneLib.buildPackage rec { + inherit + (craneLib.crateNameFromCargoToml { + cargoToml = "${inputs.self}/Cargo.toml"; + }) + pname + version; + + src = let filter = inputs.nix-filter.lib; in filter { + root = inputs.self; + + # Keep sorted + include = [ + "Cargo.lock" + "Cargo.toml" + "src" + ]; + }; + + # This is redundant with CI + doCheck = false; + + nativeBuildInputs = [ + # bindgen needs the build platform's libclang. Apparently due to "splicing + # weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the + # right thing here. + pkgsBuildHost.rustPlatform.bindgenHook + ]; + + inherit env; + + passthru = { + inherit env; + }; + + meta.mainProgram = pname; +} diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix new file mode 100644 index 0000000..8b359ce --- /dev/null +++ b/nix/pkgs/oci-image/default.nix @@ -0,0 +1,25 @@ +# Keep sorted +{ default +, dockerTools +, lib +, tini +}: + +dockerTools.buildImage { + name = default.pname; + tag = "next"; + copyToRoot = [ + dockerTools.caCertificates + ]; + config = { + # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) + # are handled as expected + Entrypoint = [ + "${lib.getExe' tini "tini"}" + "--" + ]; + Cmd = [ + "${lib.getExe default}" + ]; + }; +} diff --git a/nix/shell.nix b/nix/shell.nix new file mode 100644 index 0000000..8cfc1c4 --- /dev/null +++ b/nix/shell.nix @@ -0,0 +1,55 @@ +# Keep sorted +{ cargo-deb +, default +, engage +, go +, inputs +, jq +, lychee +, mdbook +, mkShell +, olm +, system +, toolchain +}: + +mkShell { + env = default.env // { + # Rust Analyzer needs to be able to find the path to default crate + # sources, and it can read this environment variable to do so. The + # `rust-src` component is required in order for this to work. + RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; + }; + + # Development tools + nativeBuildInputs = default.nativeBuildInputs ++ [ + # Always use nightly rustfmt because most of its options are unstable + # + # This needs to come before `toolchain` in this list, otherwise + # `$PATH` will have stable rustfmt instead. + inputs.fenix.packages.${system}.latest.rustfmt + + # Keep sorted + engage + toolchain + + # Needed for producing Debian packages + cargo-deb + + # Needed for our script for Complement + jq + + # Needed for Complement + go + olm + + # Needed for our script for Complement + jq + + # Needed for finding broken markdown links + lychee + + # Useful for editing the book locally + mdbook + ]; +} From d796fe7cd32b6ff12906092a4a013cfb4cfe634a Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 1 May 2024 00:38:55 -0700 Subject: [PATCH 1663/1727] make it easy to configure cargo features from nix Users of the nix package can now just use `.override` to choose what features they want. This also makes RocksDB automatically use jemalloc when Conduit is configured to use jemalloc. --- nix/pkgs/default/default.nix | 49 +++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/nix/pkgs/default/default.nix b/nix/pkgs/default/default.nix index 75c87b4..3877151 100644 --- a/nix/pkgs/default/default.nix +++ b/nix/pkgs/default/default.nix @@ -1,4 +1,4 @@ -# Keep sorted +# Dependencies (keep sorted) { craneLib , inputs , lib @@ -6,23 +6,34 @@ , rocksdb , rust , stdenv + +# Options (keep sorted) +, default-features ? true +, features ? [] }: let - env = { - CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; - ROCKSDB_INCLUDE_DIR = "${rocksdb}/include"; - ROCKSDB_LIB_DIR = "${rocksdb}/lib"; - } - // - (import ./cross-compilation-env.nix { - # Keep sorted - inherit - lib - pkgsBuildHost - rust - stdenv; - }); + env = + let + rocksdb' = rocksdb.override { + enableJemalloc = builtins.elem "jemalloc" features; + }; + in + { + CONDUIT_VERSION_EXTRA = + inputs.self.shortRev or inputs.self.dirtyShortRev; + ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include"; + ROCKSDB_LIB_DIR = "${rocksdb'}/lib"; + } + // + (import ./cross-compilation-env.nix { + # Keep sorted + inherit + lib + pkgsBuildHost + rust + stdenv; + }); in craneLib.buildPackage rec { @@ -44,6 +55,14 @@ craneLib.buildPackage rec { ]; }; + cargoExtraArgs = "--locked " + + lib.optionalString + (!default-features) + "--no-default-features " + + lib.optionalString + (features != []) + "--features " + (builtins.concatStringsSep "," features); + # This is redundant with CI doCheck = false; From 55259329a3c0371f63c74dae95982664b5699814 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 1 May 2024 12:08:42 -0700 Subject: [PATCH 1664/1727] make it easy to configure cargo profiles from nix This way you can easily build in debug mode with Nix. --- nix/pkgs/default/default.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nix/pkgs/default/default.nix b/nix/pkgs/default/default.nix index 3877151..79510f5 100644 --- a/nix/pkgs/default/default.nix +++ b/nix/pkgs/default/default.nix @@ -10,6 +10,7 @@ # Options (keep sorted) , default-features ? true , features ? [] +, profile ? "release" }: let @@ -73,6 +74,8 @@ craneLib.buildPackage rec { pkgsBuildHost.rustPlatform.bindgenHook ]; + CARGO_PROFILE = profile; + inherit env; passthru = { From 96cc1f6abdc6fccb1dcf14966584f21b6725bdac Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 1 May 2024 01:05:24 -0700 Subject: [PATCH 1665/1727] only set `CONDUIT_VERSION_EXTRA` for final build This prevents us from needing to recompile the dependencies when that environment variable changes, which generally changes on every commit, which is far more frequently than the dependencies are actually changed. --- nix/pkgs/default/default.nix | 76 ++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 34 deletions(-) diff --git a/nix/pkgs/default/default.nix b/nix/pkgs/default/default.nix index 79510f5..560e310 100644 --- a/nix/pkgs/default/default.nix +++ b/nix/pkgs/default/default.nix @@ -14,15 +14,13 @@ }: let - env = + buildDepsOnlyEnv = let rocksdb' = rocksdb.override { enableJemalloc = builtins.elem "jemalloc" features; }; in { - CONDUIT_VERSION_EXTRA = - inputs.self.shortRev or inputs.self.dirtyShortRev; ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include"; ROCKSDB_LIB_DIR = "${rocksdb'}/lib"; } @@ -35,26 +33,45 @@ let rust stdenv; }); + + buildPackageEnv = { + CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; + } // buildDepsOnlyEnv; + + commonAttrs = { + inherit + (craneLib.crateNameFromCargoToml { + cargoToml = "${inputs.self}/Cargo.toml"; + }) + pname + version; + + src = let filter = inputs.nix-filter.lib; in filter { + root = inputs.self; + + # Keep sorted + include = [ + "Cargo.lock" + "Cargo.toml" + "src" + ]; + }; + + nativeBuildInputs = [ + # bindgen needs the build platform's libclang. Apparently due to "splicing + # weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the + # right thing here. + pkgsBuildHost.rustPlatform.bindgenHook + ]; + + CARGO_PROFILE = profile; + }; in -craneLib.buildPackage rec { - inherit - (craneLib.crateNameFromCargoToml { - cargoToml = "${inputs.self}/Cargo.toml"; - }) - pname - version; - - src = let filter = inputs.nix-filter.lib; in filter { - root = inputs.self; - - # Keep sorted - include = [ - "Cargo.lock" - "Cargo.toml" - "src" - ]; - }; +craneLib.buildPackage ( commonAttrs // { + cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // { + env = buildDepsOnlyEnv; + }); cargoExtraArgs = "--locked " + lib.optionalString @@ -67,20 +84,11 @@ craneLib.buildPackage rec { # This is redundant with CI doCheck = false; - nativeBuildInputs = [ - # bindgen needs the build platform's libclang. Apparently due to "splicing - # weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the - # right thing here. - pkgsBuildHost.rustPlatform.bindgenHook - ]; - - CARGO_PROFILE = profile; - - inherit env; + env = buildPackageEnv; passthru = { - inherit env; + env = buildPackageEnv; }; - meta.mainProgram = pname; -} + meta.mainProgram = commonAttrs.pname; +}) From 3d9d975a9fe508c64c14ce7377881649031ce98d Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 4 May 2024 14:41:30 +0100 Subject: [PATCH 1666/1727] ci: avoid rebuilding bindgen and friends --- nix/pkgs/default/default.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nix/pkgs/default/default.nix b/nix/pkgs/default/default.nix index 560e310..4577fea 100644 --- a/nix/pkgs/default/default.nix +++ b/nix/pkgs/default/default.nix @@ -21,6 +21,7 @@ let }; in { + NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed"; # https://crane.dev/faq/rebuilds-bindgen.html ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include"; ROCKSDB_LIB_DIR = "${rocksdb'}/lib"; } From 2a2b9554c8eea2b64cb4f19f5418a90488b93dae Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 4 May 2024 21:50:37 +0100 Subject: [PATCH 1667/1727] ci: temporarily disable CONDUIT_VERSION_EXTRA --- nix/pkgs/default/default.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nix/pkgs/default/default.nix b/nix/pkgs/default/default.nix index 4577fea..2595346 100644 --- a/nix/pkgs/default/default.nix +++ b/nix/pkgs/default/default.nix @@ -36,7 +36,8 @@ let }); buildPackageEnv = { - CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; + # Temporarily disabled, see https://gitlab.com/famedly/conduit/-/merge_requests/662#note_1892753424 + # CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; } // buildDepsOnlyEnv; commonAttrs = { From 70b07dfabfe8108c5eb4a62a1dbf69500be6dcab Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 5 May 2024 08:53:53 +0100 Subject: [PATCH 1668/1727] ci: use sh instead of bash --- bin/nix-build-and-cache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache index 42c3770..5b22b85 100755 --- a/bin/nix-build-and-cache +++ b/bin/nix-build-and-cache @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env sh set -euo pipefail From 358164f49d6162e45502aac8efc726159cc2d888 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 5 May 2024 09:01:40 +0100 Subject: [PATCH 1669/1727] ci: only cache attic when nix is available --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f42fa7f..da109e5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -50,7 +50,7 @@ before_script: - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" # Cache attic client - - ./bin/nix-build-and-cache --inputs-from . attic + - if command -v nix > /dev/null; then ./bin/nix-build-and-cache --inputs-from . attic; fi ci: stage: ci From f9953c31fc6b84a601364be5598748bbdb92f04d Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 5 May 2024 09:28:48 +0100 Subject: [PATCH 1670/1727] Revert "ci: use sh instead of bash" This reverts commit 70b07dfabfe8108c5eb4a62a1dbf69500be6dcab. --- bin/nix-build-and-cache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache index 5b22b85..42c3770 100755 --- a/bin/nix-build-and-cache +++ b/bin/nix-build-and-cache @@ -1,4 +1,4 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash set -euo pipefail From a4c973e57eded43c850868c46768b7a9e30c2ab1 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 5 May 2024 09:25:39 +0100 Subject: [PATCH 1671/1727] build and cache all packages and CI dependencies This fixes the problem where some artifacts were not being cached when they should have been. The secret sauce is the `nix-store` command. --- bin/nix-build-and-cache | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache index 42c3770..6bd6266 100755 --- a/bin/nix-build-and-cache +++ b/bin/nix-build-and-cache @@ -17,15 +17,24 @@ if [ ! -z ${ATTIC_TOKEN+x} ]; then "${ATTIC_ENDPOINT:-https://attic.conduit.rs/conduit}" \ "$ATTIC_TOKEN" - readarray -t outputs < <(nix path-info "$@") readarray -t derivations < <(nix path-info "$@" --derivation) + for derivation in "${derivations[@]}"; do + cache+=( + "$(nix-store --query --requisites --include-outputs "$derivation")" + ) + done + + # Upload them to Attic + # + # Use `xargs` and a here-string because something would probably explode if + # several thousand arguments got passed to a command at once. Hopefully no + # store paths include a newline in them. + ( + IFS=$'\n' + nix shell --inputs-from . attic -c xargs \ + attic push conduit <<< "${cache[*]}" + ) - # Push the target installable and its build dependencies - nix run --inputs-from . attic -- \ - push \ - conduit \ - "${outputs[@]}" \ - "${derivations[@]}" else echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" fi From 57a24f234d23b6b69da07d6cc85643fdc641cc32 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 5 May 2024 10:46:10 +0100 Subject: [PATCH 1672/1727] Revert "ci: temporarily disable CONDUIT_VERSION_EXTRA" This reverts commit 2a2b9554c8eea2b64cb4f19f5418a90488b93dae. --- nix/pkgs/default/default.nix | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nix/pkgs/default/default.nix b/nix/pkgs/default/default.nix index 2595346..4577fea 100644 --- a/nix/pkgs/default/default.nix +++ b/nix/pkgs/default/default.nix @@ -36,8 +36,7 @@ let }); buildPackageEnv = { - # Temporarily disabled, see https://gitlab.com/famedly/conduit/-/merge_requests/662#note_1892753424 - # CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; + CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; } // buildDepsOnlyEnv; commonAttrs = { From 256dae983bdbffc964b76ae17aa718b855702c92 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 5 May 2024 13:11:44 +0100 Subject: [PATCH 1673/1727] chore: bump rust and fix new lints that come with it --- Cargo.toml | 2 +- complement/Dockerfile | 2 +- flake.nix | 2 +- rust-toolchain.toml | 2 +- src/api/appservice_server.rs | 4 ++-- src/api/client_server/device.rs | 2 +- src/api/client_server/membership.rs | 4 ++-- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 4 ++-- src/database/abstraction.rs | 1 - src/database/abstraction/rocksdb.rs | 2 -- src/database/abstraction/sqlite.rs | 4 ++-- src/database/abstraction/watchers.rs | 2 +- src/database/mod.rs | 8 ++++---- src/main.rs | 6 +++--- src/service/globals/mod.rs | 6 +++--- src/service/pusher/mod.rs | 8 ++++---- src/service/rooms/spaces/mod.rs | 2 +- src/service/sending/mod.rs | 8 ++++---- src/service/users/mod.rs | 10 ++++++---- src/utils/mod.rs | 8 +++----- 21 files changed, 43 insertions(+), 46 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index eb7463c..1ab0798 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ version = "0.8.0-alpha" edition = "2021" # See also `rust-toolchain.toml` -rust-version = "1.75.0" +rust-version = "1.78.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/complement/Dockerfile b/complement/Dockerfile index 813af10..341470a 100644 --- a/complement/Dockerfile +++ b/complement/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.75.0 +FROM rust:1.78.0 WORKDIR /workdir diff --git a/flake.nix b/flake.nix index 09aa9e0..9132544 100644 --- a/flake.nix +++ b/flake.nix @@ -59,7 +59,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8="; + sha256 = "sha256-opUgs6ckUQCyDxcB9Wy51pqhd0MPGHUVbwRKKPGiwZU="; }; }); in diff --git a/rust-toolchain.toml b/rust-toolchain.toml index f7a9434..811d9ce 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -10,7 +10,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.75.0" +channel = "1.78.0" components = [ # For rust-analyzer "rust-src", diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 3ec7a66..6af31d8 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -10,12 +10,12 @@ use tracing::warn; /// /// Only returns None if there is no url specified in the appservice registration file #[tracing::instrument(skip(request))] -pub(crate) async fn send_request( +pub(crate) async fn send_request( registration: Registration, request: T, ) -> Result> where - T: Debug, + T: OutgoingRequest + Debug, { let destination = match registration.url { Some(url) => url, diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs index aba061b..9a42f04 100644 --- a/src/api/client_server/device.rs +++ b/src/api/client_server/device.rs @@ -53,7 +53,7 @@ pub async fn update_device_route( .get_device_metadata(sender_user, &body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - device.display_name = body.display_name.clone(); + device.display_name.clone_from(&body.display_name); services() .users diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 6fe1e0e..cb43545 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -213,7 +213,7 @@ pub async fn kick_user_route( .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = MembershipState::Leave; - event.reason = body.reason.clone(); + event.reason.clone_from(&body.reason); let mutex_state = Arc::clone( services() @@ -364,7 +364,7 @@ pub async fn unban_user_route( .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = MembershipState::Leave; - event.reason = body.reason.clone(); + event.reason.clone_from(&body.reason); let mutex_state = Arc::clone( services() diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 0e66769..a56ee35 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -286,7 +286,7 @@ where } }; - let mut http_request = http::Request::builder().uri(parts.uri).method(parts.method); + let mut http_request = Request::builder().uri(parts.uri).method(parts.method); *http_request.headers_mut().unwrap() = parts.headers; if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { diff --git a/src/api/server_server.rs b/src/api/server_server.rs index d816a3e..6ca352b 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -116,12 +116,12 @@ impl FedDest { } #[tracing::instrument(skip(request))] -pub(crate) async fn send_request( +pub(crate) async fn send_request( destination: &ServerName, request: T, ) -> Result where - T: Debug, + T: OutgoingRequest + Debug, { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 0a32105..93660f9 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -38,7 +38,6 @@ pub trait KeyValueDatabaseEngine: Send + Sync { fn memory_usage(&self) -> Result { Ok("Current database engine does not support memory usage reporting.".to_owned()) } - fn clear_caches(&self) {} } pub trait KvTree: Send + Sync { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 447ee03..cf77e3d 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -126,8 +126,6 @@ impl KeyValueDatabaseEngine for Arc { self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, )) } - - fn clear_caches(&self) {} } impl RocksDbEngineTree<'_> { diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 222a843..b448c3b 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -13,8 +13,8 @@ use thread_local::ThreadLocal; use tracing::debug; thread_local! { - static READ_CONNECTION: RefCell> = RefCell::new(None); - static READ_CONNECTION_ITERATOR: RefCell> = RefCell::new(None); + static READ_CONNECTION: RefCell> = const { RefCell::new(None) }; + static READ_CONNECTION_ITERATOR: RefCell> = const { RefCell::new(None) }; } struct PreparedStatementIterator<'a> { diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs index 01156ab..eb5792b 100644 --- a/src/database/abstraction/watchers.rs +++ b/src/database/abstraction/watchers.rs @@ -20,7 +20,7 @@ impl Watchers { let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) { hash_map::Entry::Occupied(o) => o.get().1.clone(), hash_map::Entry::Vacant(v) => { - let (tx, rx) = tokio::sync::watch::channel(()); + let (tx, rx) = watch::channel(()); v.insert((tx, rx.clone())); rx } diff --git a/src/database/mod.rs b/src/database/mod.rs index 41da857..8d1b191 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -237,7 +237,7 @@ impl KeyValueDatabase { Self::check_db_setup(&config)?; if !Path::new(&config.database_path).exists() { - std::fs::create_dir_all(&config.database_path) + fs::create_dir_all(&config.database_path) .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; } @@ -846,7 +846,7 @@ impl KeyValueDatabase { let rule = rules_list.content.get(content_rule_transformation[0]); if rule.is_some() { let mut rule = rule.unwrap().clone(); - rule.rule_id = content_rule_transformation[1].to_owned(); + content_rule_transformation[1].clone_into(&mut rule.rule_id); rules_list .content .shift_remove(content_rule_transformation[0]); @@ -871,7 +871,7 @@ impl KeyValueDatabase { let rule = rules_list.underride.get(transformation[0]); if let Some(rule) = rule { let mut rule = rule.clone(); - rule.rule_id = transformation[1].to_owned(); + transformation[1].clone_into(&mut rule.rule_id); rules_list.underride.shift_remove(transformation[0]); rules_list.underride.insert(rule); } @@ -918,7 +918,7 @@ impl KeyValueDatabase { let mut account_data = serde_json::from_str::(raw_rules_list.get()).unwrap(); - let user_default_rules = ruma::push::Ruleset::server_default(&user); + let user_default_rules = Ruleset::server_default(&user); account_data .content .global diff --git a/src/main.rs b/src/main.rs index 8446754..5d60a6b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -217,7 +217,7 @@ async fn run_server() -> io::Result<()> { } async fn spawn_task( - req: axum::http::Request, + req: http::Request, next: axum::middleware::Next, ) -> std::result::Result { if services().globals.shutdown.load(atomic::Ordering::Relaxed) { @@ -229,13 +229,13 @@ async fn spawn_task( } async fn unrecognized_method( - req: axum::http::Request, + req: http::Request, next: axum::middleware::Next, ) -> std::result::Result { let method = req.method().clone(); let uri = req.uri().clone(); let inner = next.run(req).await; - if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED { + if inner.status() == StatusCode::METHOD_NOT_ALLOWED { warn!("Method not allowed: {method} {uri}"); return Ok(RumaResponse(UiaaResponse::MatrixError(RumaError { body: ErrorBody::Standard { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 263463d..2373a27 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -80,12 +80,12 @@ pub struct Service { /// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. /// /// This is utilized to have sync workers return early and release read locks on the database. -pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>); +pub struct RotationHandler(broadcast::Sender<()>); impl RotationHandler { pub fn new() -> Self { - let (s, r) = broadcast::channel(1); - Self(s, r) + let s = broadcast::channel(1).0; + Self(s) } pub fn watch(&self) -> impl Future { diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 6ca86be..83127e6 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -44,13 +44,13 @@ impl Service { } #[tracing::instrument(skip(self, destination, request))] - pub async fn send_request( + pub async fn send_request( &self, destination: &str, request: T, ) -> Result where - T: Debug, + T: OutgoingRequest + Debug, { let destination = destination.replace("/_matrix/push/v1/notify", ""); @@ -231,11 +231,11 @@ impl Service { let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone()); device.data.default_payload = http.default_payload.clone(); - device.data.format = http.format.clone(); + device.data.format.clone_from(&http.format); // Tweaks are only added if the format is NOT event_id_only if !event_id_only { - device.tweaks = tweaks.clone(); + device.tweaks.clone_from(&tweaks); } let d = vec![device]; diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 981d4a3..5addc6f 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -482,7 +482,7 @@ impl Service { match join_rule { JoinRule::Restricted(r) => { for rule in &r.allow { - if let join_rules::AllowRule::RoomMembership(rm) = rule { + if let AllowRule::RoomMembership(rm) = rule { if let Ok(true) = services() .rooms .state_cache diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 7e54e8b..fa14f12 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -675,13 +675,13 @@ impl Service { } #[tracing::instrument(skip(self, destination, request))] - pub async fn send_federation_request( + pub async fn send_federation_request( &self, destination: &ServerName, request: T, ) -> Result where - T: Debug, + T: OutgoingRequest + Debug, { debug!("Waiting for permit"); let permit = self.maximum_requests.acquire().await; @@ -704,13 +704,13 @@ impl Service { /// /// Only returns None if there is no url specified in the appservice registration file #[tracing::instrument(skip(self, registration, request))] - pub async fn send_appservice_request( + pub async fn send_appservice_request( &self, registration: Registration, request: T, ) -> Result> where - T: Debug, + T: OutgoingRequest + Debug, { let permit = self.maximum_requests.acquire().await; let response = appservice_server::send_request(registration, request).await; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index fb983a4..9133166 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -86,11 +86,12 @@ impl Service { for (list_id, list) in &mut request.lists { if let Some(cached_list) = cached.lists.get(list_id) { if list.sort.is_empty() { - list.sort = cached_list.sort.clone(); + list.sort.clone_from(&cached_list.sort); }; if list.room_details.required_state.is_empty() { - list.room_details.required_state = - cached_list.room_details.required_state.clone(); + list.room_details + .required_state + .clone_from(&cached_list.room_details.required_state); }; list.room_details.timeline_limit = list .room_details @@ -132,7 +133,8 @@ impl Service { (_, _) => {} } if list.bump_event_types.is_empty() { - list.bump_event_types = cached_list.bump_event_types.clone(); + list.bump_event_types + .clone_from(&cached_list.bump_event_types); }; } cached.lists.insert(list_id.clone(), list.clone()); diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 0b5b1ae..d09a103 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -122,16 +122,14 @@ pub fn deserialize_from_str< 'de, D: serde::de::Deserializer<'de>, T: FromStr, - E: std::fmt::Display, + E: fmt::Display, >( deserializer: D, ) -> Result { struct Visitor, E>(std::marker::PhantomData); - impl<'de, T: FromStr, Err: std::fmt::Display> serde::de::Visitor<'de> - for Visitor - { + impl<'de, T: FromStr, Err: fmt::Display> serde::de::Visitor<'de> for Visitor { type Value = T; - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!(formatter, "a parsable string") } fn visit_str(self, v: &str) -> Result From eec9b9ed87ec0494f55c51685918a9af83272340 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 5 May 2024 13:13:34 +0100 Subject: [PATCH 1674/1727] chore: bump nix --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index da109e5..90de602 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -54,7 +54,7 @@ before_script: ci: stage: ci - image: nixos/nix:2.20.4 + image: nixos/nix:2.22.0 script: # Cache the inputs required for the devShell - ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation @@ -79,7 +79,7 @@ ci: artifacts: stage: artifacts - image: nixos/nix:2.20.4 + image: nixos/nix:2.22.0 script: - ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl - cp result/bin/conduit x86_64-unknown-linux-musl From d8badaf64bd29735b80d1a0652b9073a74866c55 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 5 May 2024 15:28:18 +0100 Subject: [PATCH 1675/1727] fix(membership): always set reason & allow new events if reason changed --- src/api/client_server/membership.rs | 225 ++++++++++++++-------------- 1 file changed, 112 insertions(+), 113 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index cb43545..a673eaa 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -186,15 +186,7 @@ pub async fn kick_user_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let Ok(true) = services() - .rooms - .state_cache - .is_left(sender_user, &body.room_id) - { - return Ok(kick_user::v3::Response {}); - } - - let mut event: RoomMemberEventContent = serde_json::from_str( + let event: RoomMemberEventContent = serde_json::from_str( services() .rooms .state_accessor @@ -205,15 +197,26 @@ pub async fn kick_user_route( )? .ok_or(Error::BadRequest( ErrorKind::BadState, - "Cannot kick member that's not in the room.", + "Cannot kick a user who is not in the room.", ))? .content .get(), ) .map_err(|_| Error::bad_database("Invalid member event in database."))?; - event.membership = MembershipState::Leave; - event.reason.clone_from(&body.reason); + // If they are already kicked and the reason is unchanged, there isn't any point in sending a new event. + if event.membership == MembershipState::Leave && event.reason == body.reason { + return Ok(kick_user::v3::Response {}); + } + + let event = RoomMemberEventContent { + is_direct: None, + membership: MembershipState::Leave, + third_party_invite: None, + reason: body.reason.clone(), + join_authorized_via_users_server: None, + ..event + }; let mutex_state = Arc::clone( services() @@ -254,17 +257,7 @@ pub async fn kick_user_route( pub async fn ban_user_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let Ok(Some(membership_event)) = services() - .rooms - .state_accessor - .get_member(&body.room_id, sender_user) - { - if membership_event.membership == MembershipState::Ban { - return Ok(ban_user::v3::Response {}); - } - } - - let event = services() + let event = if let Some(event) = services() .rooms .state_accessor .room_state_get( @@ -272,27 +265,30 @@ pub async fn ban_user_route(body: Ruma) -> Result(event.content.get()).ok()) + { + // If they are already banned and the reason is unchanged, there isn't any point in sending a new event. + if event.membership == MembershipState::Ban && event.reason == body.reason { + return Ok(ban_user::v3::Response {}); + } + + RoomMemberEventContent { + membership: MembershipState::Ban, + join_authorized_via_users_server: None, + reason: body.reason.clone(), + third_party_invite: None, + is_direct: None, + avatar_url: event.avatar_url, + displayname: event.displayname, + blurhash: event.blurhash, + } + } else { + RoomMemberEventContent { + reason: body.reason.clone(), + ..RoomMemberEventContent::new(MembershipState::Ban) + } + }; let mutex_state = Arc::clone( services() @@ -335,17 +331,7 @@ pub async fn unban_user_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let Ok(Some(membership_event)) = services() - .rooms - .state_accessor - .get_member(&body.room_id, sender_user) - { - if membership_event.membership != MembershipState::Ban { - return Ok(unban_user::v3::Response {}); - } - } - - let mut event: RoomMemberEventContent = serde_json::from_str( + let event: RoomMemberEventContent = serde_json::from_str( services() .rooms .state_accessor @@ -363,8 +349,19 @@ pub async fn unban_user_route( ) .map_err(|_| Error::bad_database("Invalid member event in database."))?; - event.membership = MembershipState::Leave; - event.reason.clone_from(&body.reason); + // If they are already unbanned and the reason is unchanged, there isn't any point in sending a new event. + if event.membership == MembershipState::Leave && event.reason == body.reason { + return Ok(unban_user::v3::Response {}); + } + + let event = RoomMemberEventContent { + is_direct: None, + membership: MembershipState::Leave, + third_party_invite: None, + reason: body.reason.clone(), + join_authorized_via_users_server: None, + ..event + }; let mutex_state = Arc::clone( services() @@ -1319,60 +1316,59 @@ pub(crate) async fn invite_helper<'a>( .filter(|server| &**server != services().globals.server_name()); services().sending.send_pdu(servers, &pdu_id)?; + } else { + if !services() + .rooms + .state_cache + .is_joined(sender_user, room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } - return Ok(()); - } + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; - if !services() - .rooms - .state_cache - .is_joined(sender_user, room_id)? - { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } - - let mutex_state = Arc::clone( services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: services().users.displayname(user_id)?, + avatar_url: services().users.avatar_url(user_id)?, + is_direct: Some(is_direct), + third_party_invite: None, + blurhash: services().users.blurhash(user_id)?, + reason, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + ) + .await?; - services() - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: services().users.displayname(user_id)?, - avatar_url: services().users.avatar_url(user_id)?, - is_direct: Some(is_direct), - third_party_invite: None, - blurhash: services().users.blurhash(user_id)?, - reason, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - sender_user, - room_id, - &state_lock, - ) - .await?; - - drop(state_lock); + // Critical point ends + drop(state_lock); + } Ok(()) } @@ -1470,12 +1466,15 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option e, }; - let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get()) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - event.reason = reason; - event.join_authorized_via_users_server = None; + let event = RoomMemberEventContent { + is_direct: None, + membership: MembershipState::Leave, + third_party_invite: None, + reason, + join_authorized_via_users_server: None, + ..serde_json::from_str(member_event.content.get()) + .map_err(|_| Error::bad_database("Invalid member event in database."))? + }; services() .rooms From 8876d54d78f1d68ab8134d606e297608cc4add97 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 3 May 2024 21:50:45 +0100 Subject: [PATCH 1676/1727] feat(admin): add hash-and-sign-event command --- src/service/admin/mod.rs | 46 +++++++++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index ab677f6..345268d 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -165,19 +165,15 @@ enum AdminCommand { /// Enables incoming federation handling for a room again. EnableRoom { room_id: Box }, - /// Verify json signatures - /// [commandbody]() - /// # ``` - /// # json here - /// # ``` + /// Sign a json object using Conduit's signing keys, putting the json in a codeblock SignJson, - /// Verify json signatures - /// [commandbody]() - /// # ``` - /// # json here - /// # ``` + /// Verify json signatures, putting the json in a codeblock VerifyJson, + + /// Parses a JSON object as an event then creates a hash and signs it, putting a room + /// version as an argument, and the json in a codeblock + HashAndSignEvent { room_version_id: RoomVersionId }, } #[derive(Debug)] @@ -860,6 +856,36 @@ impl Service { ) } } + AdminCommand::HashAndSignEvent { room_version_id } => { + if body.len() > 2 + // Language may be specified as part of the codeblock (e.g. "```json") + && body[0].trim().starts_with("```") + && body.last().unwrap().trim() == "```" + { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(mut value) => { + if let Err(e) = ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut value, + &room_version_id, + ) { + RoomMessageEventContent::text_plain(format!("Invalid event: {e}")) + } else { + let json_text = serde_json::to_string_pretty(&value) + .expect("canonical json is valid json"); + RoomMessageEventContent::text_plain(json_text) + } + } + Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) + } + } }; Ok(reply_message_content) From 965b6df83ddb548b7943f39883cdb4a46db749e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 5 May 2024 09:35:49 +0200 Subject: [PATCH 1677/1727] fix: make media response match spec --- src/api/client_server/media.rs | 41 ++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 7fc65c2..a1bfab4 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -22,6 +22,14 @@ pub async fn get_media_config_route( }) } +fn sanitize_content_type(content_type: String) -> String { + if content_type == "image/jpeg" || content_type == "image/png" { + content_type + } else { + "application/octet-stream".to_owned() + } +} + /// # `POST /_matrix/media/r0/upload` /// /// Permanently save media in the server. @@ -100,20 +108,26 @@ pub async fn get_content_route( if let Some(FileMeta { content_disposition, - content_type, file, + .. }) = services().media.get(mxc.clone()).await? { Ok(get_content::v3::Response { file, - content_type, + content_type: Some("application/octet-stream".to_owned()), content_disposition, cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; - Ok(remote_content_response) + + Ok(get_content::v3::Response { + content_disposition: remote_content_response.content_disposition, + content_type: Some("application/octet-stream".to_owned()), + file: remote_content_response.file, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -129,15 +143,10 @@ pub async fn get_content_as_filename_route( ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); - if let Some(FileMeta { - content_disposition: _, - content_type, - file, - }) = services().media.get(mxc.clone()).await? - { + if let Some(FileMeta { file, .. }) = services().media.get(mxc.clone()).await? { Ok(get_content_as_filename::v3::Response { file, - content_type, + content_type: Some("application/octet-stream".to_owned()), content_disposition: Some(format!("inline; filename={}", body.filename)), cross_origin_resource_policy: Some("cross-origin".to_owned()), }) @@ -147,7 +156,7 @@ pub async fn get_content_as_filename_route( Ok(get_content_as_filename::v3::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), - content_type: remote_content_response.content_type, + content_type: Some("application/octet-stream".to_owned()), file: remote_content_response.file, cross_origin_resource_policy: Some("cross-origin".to_owned()), }) @@ -167,7 +176,7 @@ pub async fn get_content_thumbnail_route( let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { - content_type, file, .. + file, content_type, .. }) = services() .media .get_thumbnail( @@ -183,11 +192,11 @@ pub async fn get_content_thumbnail_route( { Ok(get_content_thumbnail::v3::Response { file, - content_type, + content_type: content_type.map(sanitize_content_type), cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { - let get_thumbnail_response = services() + let mut get_thumbnail_response = services() .sending .send_federation_request( &body.server_name, @@ -216,6 +225,10 @@ pub async fn get_content_thumbnail_route( ) .await?; + get_thumbnail_response.content_type = get_thumbnail_response + .content_type + .map(sanitize_content_type); + Ok(get_thumbnail_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) From e2d91e26d69bfbe1e64a15acce7d553e41705d1a Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 6 May 2024 19:43:13 +0100 Subject: [PATCH 1678/1727] style: format all toml with taplo --- Cargo.toml | 140 +++++++++++++++++++++++++++-------------- book.toml | 5 +- conduit-example.toml | 4 +- engage.toml | 29 +++++---- nix/shell.nix | 10 ++- rust-toolchain.toml | 10 +-- rustfmt.toml | 2 +- taplo.toml | 24 +++++++ tests/test-config.toml | 10 +-- 9 files changed, 158 insertions(+), 76 deletions(-) create mode 100644 taplo.toml diff --git a/Cargo.toml b/Cargo.toml index 1ab0798..3a5c264 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,24 +1,22 @@ -# Keep alphabetically sorted [workspace.lints.rust] explicit_outlives_requirements = "warn" unused_qualifications = "warn" -# Keep alphabetically sorted [workspace.lints.clippy] cloned_instead_of_copied = "warn" dbg_macro = "warn" str_to_string = "warn" [package] -name = "conduit" -description = "A Matrix homeserver written in Rust" -license = "Apache-2.0" authors = ["timokoesters "] -homepage = "https://conduit.rs" -repository = "https://gitlab.com/famedly/conduit" -readme = "README.md" -version = "0.8.0-alpha" +description = "A Matrix homeserver written in Rust" edition = "2021" +homepage = "https://conduit.rs" +license = "Apache-2.0" +name = "conduit" +readme = "README.md" +repository = "https://gitlab.com/famedly/conduit" +version = "0.8.0-alpha" # See also `rust-toolchain.toml` rust-version = "1.78.0" @@ -30,14 +28,40 @@ workspace = true [dependencies] # Web framework -axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } +axum = { version = "0.6.18", default-features = false, features = [ + "form", + "headers", + "http1", + "http2", + "json", + "matched-path", +], optional = true } axum-server = { version = "0.5.1", features = ["tls-rustls"] } tower = { version = "0.4.13", features = ["util"] } -tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] } +tower-http = { version = "0.4.1", features = [ + "add-extension", + "cors", + "sensitive-headers", + "trace", + "util", +] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = [ + "appservice-api-c", + "client-api", + "compat", + "federation-api", + "push-gateway-api-c", + "rand", + "ring-compat", + "state-res", + "unstable-exhaustive-types", + "unstable-msc2448", + "unstable-msc3575", + "unstable-unspecified", +] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } @@ -65,11 +89,18 @@ rand = "0.8.5" rust-argon2 = "2" # Used to send requests hyper = "0.14.26" -reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] } +reqwest = { version = "0.11.18", default-features = false, features = [ + "rustls-tls-native-roots", + "socks", +] } # Used for conduit::Error type thiserror = "1.0.40" # Used to generate thumbnails for images -image = { version = "0.25", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.25", default-features = false, features = [ + "gif", + "jpeg", + "png", +] } # Used to encode server public key base64 = "0.22" # Used when hashing the state @@ -81,15 +112,17 @@ regex = "1.8.1" # jwt jsonwebtokens jsonwebtoken = "9.2.0" # Performance measurements -tracing = { version = "0.1.37", features = [] } -tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -tracing-flame = "0.2.0" opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } +tracing = { version = "0.1.37", features = [] } +tracing-flame = "0.2.0" tracing-opentelemetry = "0.18.0" +tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } + lru-cache = "0.1.2" -rusqlite = { version = "0.31", optional = true, features = ["bundled"] } parking_lot = { version = "0.12.1", optional = true } +rusqlite = { version = "0.31", optional = true, features = ["bundled"] } + # crossbeam = { version = "0.8.2", optional = true } num_cpus = "1.15.0" threadpool = "1.8.1" @@ -102,7 +135,14 @@ thread_local = "1.1.7" hmac = "0.12.1" sha-1 = "0.10.1" # used for conduit's CLI and admin room command parsing -clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context", "string"] } +clap = { version = "4.3.0", default-features = false, features = [ + "derive", + "error-context", + "help", + "std", + "string", + "usage", +] } futures-util = { version = "0.3.28", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.8", features = ["env", "toml"] } @@ -110,34 +150,32 @@ figment = { version = "0.10.8", features = ["env", "toml"] } # Validating urls in config url = { version = "2", features = ["serde"] } -tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } async-trait = "0.1.68" +tikv-jemallocator = { version = "0.5.0", features = [ + "unprefixed_malloc_on_supported_platforms", +], optional = true } sd-notify = { version = "0.4.1", optional = true } [dependencies.rocksdb] +features = ["lz4", "multi-threaded-cf", "zstd"] +optional = true package = "rust-rocksdb" version = "0.25" -optional = true -features = [ - "multi-threaded-cf", - "zstd", - "lz4", -] [target.'cfg(unix)'.dependencies] nix = { version = "0.28", features = ["resource"] } [features] -default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] +default = ["backend_rocksdb", "backend_sqlite", "conduit_bin", "systemd"] #backend_sled = ["sled"] -backend_persy = ["persy", "parking_lot"] +backend_persy = ["parking_lot", "persy"] backend_sqlite = ["sqlite"] #backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] -jemalloc = ["tikv-jemallocator"] -sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = ["axum"] +jemalloc = ["tikv-jemallocator"] +sqlite = ["parking_lot", "rusqlite", "tokio/signal"] systemd = ["sd-notify"] [[bin]] @@ -150,35 +188,45 @@ name = "conduit" path = "src/lib.rs" [package.metadata.deb] -name = "matrix-conduit" -maintainer = "Paul van Tilburg " +assets = [ + [ + "README.md", + "usr/share/doc/matrix-conduit/", + "644", + ], + [ + "debian/README.md", + "usr/share/doc/matrix-conduit/README.Debian", + "644", + ], + [ + "target/release/conduit", + "usr/sbin/matrix-conduit", + "755", + ], +] +conf-files = ["/etc/matrix-conduit/conduit.toml"] copyright = "2020, Timo Kösters " -license-file = ["LICENSE", "3"] depends = "$auto, ca-certificates" extended-description = """\ A fast Matrix homeserver that is optimized for smaller, personal servers, \ instead of a server that has high scalability.""" -section = "net" -priority = "optional" -assets = [ - ["debian/README.md", "usr/share/doc/matrix-conduit/README.Debian", "644"], - ["README.md", "usr/share/doc/matrix-conduit/", "644"], - ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], -] -conf-files = [ - "/etc/matrix-conduit/conduit.toml" -] +license-file = ["LICENSE", "3"] +maintainer = "Paul van Tilburg " maintainer-scripts = "debian/" +name = "matrix-conduit" +priority = "optional" +section = "net" systemd-units = { unit-name = "matrix-conduit" } [profile.dev] -lto = 'off' incremental = true +lto = 'off' [profile.release] -lto = 'thin' +codegen-units = 32 incremental = true -codegen-units=32 +lto = 'thin' # If you want to make flamegraphs, enable debug info: # debug = true diff --git a/book.toml b/book.toml index 700ecda..a80adab 100644 --- a/book.toml +++ b/book.toml @@ -1,22 +1,21 @@ [book] -title = "Conduit" description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol" language = "en" multilingual = false src = "docs" +title = "Conduit" [build] build-dir = "public" create-missing = true [output.html] -git-repository-url = "https://gitlab.com/famedly/conduit" edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}" git-repository-icon = "fa-git-square" +git-repository-url = "https://gitlab.com/famedly/conduit" [output.html.search] limit-results = 15 [output.html.code.hidelines] json = "~" - diff --git a/conduit-example.toml b/conduit-example.toml index ef7bd18..74cbb07 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -22,9 +22,9 @@ # YOU NEED TO EDIT THIS #server_name = "your.server.name" +database_backend = "rocksdb" # This is the only directory where Conduit will save its data database_path = "/var/lib/matrix-conduit/" -database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port @@ -44,8 +44,8 @@ allow_registration = true # - Start the line with '#' to remove the condition registration_token = "" -allow_federation = true allow_check_for_updates = true +allow_federation = true # Enable the display name lightning bolt on registration. enable_lightning_bolt = true diff --git a/engage.toml b/engage.toml index cb28416..9dc2b31 100644 --- a/engage.toml +++ b/engage.toml @@ -1,48 +1,48 @@ interpreter = ["bash", "-euo", "pipefail", "-c"] [[task]] -name = "engage" group = "versions" +name = "engage" script = "engage --version" [[task]] -name = "rustc" group = "versions" +name = "rustc" script = "rustc --version" [[task]] -name = "cargo" group = "versions" +name = "cargo" script = "cargo --version" [[task]] -name = "cargo-fmt" group = "versions" +name = "cargo-fmt" script = "cargo fmt --version" [[task]] -name = "rustdoc" group = "versions" +name = "rustdoc" script = "rustdoc --version" [[task]] -name = "cargo-clippy" group = "versions" +name = "cargo-clippy" script = "cargo clippy -- --version" [[task]] -name = "lychee" group = "versions" +name = "lychee" script = "lychee --version" [[task]] -name = "cargo-fmt" group = "lints" +name = "cargo-fmt" script = "cargo fmt --check -- --color=always" [[task]] -name = "cargo-doc" group = "lints" +name = "cargo-doc" script = """ RUSTDOCFLAGS="-D warnings" cargo doc \ --workspace \ @@ -52,18 +52,23 @@ RUSTDOCFLAGS="-D warnings" cargo doc \ """ [[task]] -name = "cargo-clippy" group = "lints" +name = "cargo-clippy" script = "cargo clippy --workspace --all-targets --color=always -- -D warnings" [[task]] -name = "lychee" group = "lints" +name = "taplo-fmt" +script = "taplo fmt --check --colors always" + +[[task]] +group = "lints" +name = "lychee" script = "lychee --offline docs" [[task]] -name = "cargo" group = "tests" +name = "cargo" script = """ cargo test \ --workspace \ diff --git a/nix/shell.nix b/nix/shell.nix index 8cfc1c4..bd070fe 100644 --- a/nix/shell.nix +++ b/nix/shell.nix @@ -10,6 +10,7 @@ , mkShell , olm , system +, taplo , toolchain }: @@ -29,10 +30,15 @@ mkShell { # `$PATH` will have stable rustfmt instead. inputs.fenix.packages.${system}.latest.rustfmt - # Keep sorted - engage + # rust itself toolchain + # CI tests + engage + + # format toml files + taplo + # Needed for producing Debian packages cargo-deb diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 811d9ce..3ffd3a5 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -12,11 +12,11 @@ [toolchain] channel = "1.78.0" components = [ - # For rust-analyzer - "rust-src", + # For rust-analyzer + "rust-src", ] targets = [ - "x86_64-unknown-linux-gnu", - "x86_64-unknown-linux-musl", - "aarch64-unknown-linux-musl", + "aarch64-unknown-linux-musl", + "x86_64-unknown-linux-gnu", + "x86_64-unknown-linux-musl", ] diff --git a/rustfmt.toml b/rustfmt.toml index 739b454..2f99016 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,2 +1,2 @@ +imports_granularity = "Crate" unstable_features = true -imports_granularity="Crate" diff --git a/taplo.toml b/taplo.toml new file mode 100644 index 0000000..04780b4 --- /dev/null +++ b/taplo.toml @@ -0,0 +1,24 @@ +exclude = [".**/*.toml"] +include = ["**/*.toml"] +[formatting] +reorder_arrays = true +reorder_keys = true + +# Prevent breaking command and argument order +[[rule]] +include = ["engage.toml"] +# https://github.com/tamasfe/taplo/issues/608 +#keys = ["interpreter"] + +[rule.formatting] +reorder_arrays = false + +# Prevent breaking license file order +[[rule]] +include = ["Cargo.toml"] +# https://github.com/tamasfe/taplo/issues/608 +# keys = ["package.metadata.deb.license-file", "package.metadata.deb.assets"] +keys = ["package.metadata.deb", "package.metadata.deb.assets"] + +[rule.formatting] +reorder_arrays = false diff --git a/tests/test-config.toml b/tests/test-config.toml index c466687..10db140 100644 --- a/tests/test-config.toml +++ b/tests/test-config.toml @@ -7,9 +7,9 @@ server_name = "localhost" database_path = "/tmp" # All the other settings are left at their defaults: -port = 6167 -max_request_size = 20_000_000 -allow_registration = true -trusted_servers = ["matrix.org"] address = "127.0.0.1" -proxy = "none" \ No newline at end of file +allow_registration = true +max_request_size = 20_000_000 +port = 6167 +proxy = "none" +trusted_servers = ["matrix.org"] From e51f60e437cfcfaa220f0718c7f353c583fb749a Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 14 May 2024 21:20:16 +0100 Subject: [PATCH 1679/1727] docs(faq): add instructions on how to make a user admin --- docs/faq.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/faq.md b/docs/faq.md index 4c23a25..17c8c9d 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -35,3 +35,7 @@ Here is an example: Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically. Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable. + +## How do I make someone an admin? + +Simply invite them to the admin room. Once joined, they can administer the server by interacting with the `@conduit:` user. From a888c7cb161058bba8596a078be15fdf0b588825 Mon Sep 17 00:00:00 2001 From: mikoto Date: Tue, 28 May 2024 00:22:11 +0200 Subject: [PATCH 1680/1727] OpenID routes Co-Authored-By: Matthias Ahouansou --- src/api/client_server/mod.rs | 2 ++ src/api/client_server/openid.rs | 23 ++++++++++++++++ src/api/ruma_wrapper/axum.rs | 13 ++++++--- src/api/server_server.rs | 20 ++++++++++++++ src/config/mod.rs | 6 +++++ src/database/key_value/users.rs | 47 +++++++++++++++++++++++++++++++++ src/database/mod.rs | 2 ++ src/main.rs | 2 ++ src/service/users/data.rs | 6 +++++ src/service/users/mod.rs | 10 +++++++ 10 files changed, 127 insertions(+), 4 deletions(-) create mode 100644 src/api/client_server/openid.rs diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs index afe5181..a35d7a9 100644 --- a/src/api/client_server/mod.rs +++ b/src/api/client_server/mod.rs @@ -11,6 +11,7 @@ mod keys; mod media; mod membership; mod message; +mod openid; mod presence; mod profile; mod push; @@ -47,6 +48,7 @@ pub use keys::*; pub use media::*; pub use membership::*; pub use message::*; +pub use openid::*; pub use presence::*; pub use profile::*; pub use push::*; diff --git a/src/api/client_server/openid.rs b/src/api/client_server/openid.rs new file mode 100644 index 0000000..4216041 --- /dev/null +++ b/src/api/client_server/openid.rs @@ -0,0 +1,23 @@ +use std::time::Duration; + +use ruma::{api::client::account, authentication::TokenType}; + +use crate::{services, Result, Ruma}; + +/// # `POST /_matrix/client/r0/user/{userId}/openid/request_token` +/// +/// Request an OpenID token to verify identity with third-party services. +/// +/// - The token generated is only valid for the OpenID API. +pub async fn create_openid_token_route( + body: Ruma, +) -> Result { + let (access_token, expires_in) = services().users.create_openid_token(&body.user_id)?; + + Ok(account::request_openid_token::v3::Response { + access_token, + token_type: TokenType::Bearer, + matrix_server_name: services().globals.server_name().to_owned(), + expires_in: Duration::from_secs(expires_in), + }) +} diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index a56ee35..f5ef605 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -102,10 +102,15 @@ where let (sender_user, sender_device, sender_servername, appservice_info) = match (metadata.authentication, token) { (_, Token::Invalid) => { - return Err(Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown access token.", - )) + // OpenID endpoint uses a query param with the same name, drop this once query params for user auth are removed from the spec + if query_params.access_token.is_some() { + (None, None, None, None) + } else { + return Err(Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, + "Unknown access token.", + )); + } } (AuthScheme::AccessToken, Token::Appservice(info)) => { let user_id = query_params diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 6ca352b..13a6d64 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -24,6 +24,7 @@ use ruma::{ event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, keys::{claim_keys, get_keys}, membership::{create_invite, create_join_event, prepare_join_event}, + openid::get_openid_userinfo, query::{get_profile_information, get_room_information}, transactions::{ edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, @@ -1914,6 +1915,25 @@ pub async fn claim_keys_route( }) } +/// # `GET /_matrix/federation/v1/openid/userinfo` +/// +/// Get information about the user that generated the OpenID token. +pub async fn get_openid_userinfo_route( + body: Ruma, +) -> Result { + Ok(get_openid_userinfo::v1::Response::new( + services() + .users + .find_from_openid_token(&body.access_token)? + .ok_or_else(|| { + Error::BadRequest( + ErrorKind::Unauthorized, + "OpenID token has expired or does not exist.", + ) + })?, + )) +} + /// # `GET /.well-known/matrix/server` /// /// Returns the federation server discovery information. diff --git a/src/config/mod.rs b/src/config/mod.rs index 652b3a4..378ab92 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -47,6 +47,8 @@ pub struct Config { #[serde(default = "false_fn")] pub allow_registration: bool, pub registration_token: Option, + #[serde(default = "default_openid_token_ttl")] + pub openid_token_ttl: u64, #[serde(default = "true_fn")] pub allow_encryption: bool, #[serde(default = "false_fn")] @@ -302,6 +304,10 @@ fn default_turn_ttl() -> u64 { 60 * 60 * 24 } +fn default_openid_token_ttl() -> u64 { + 60 * 60 +} + // I know, it's a great name pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V10 diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 0e6db83..63321a4 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -11,6 +11,7 @@ use ruma::{ use tracing::warn; use crate::{ + api::client_server::TOKEN_LENGTH, database::KeyValueDatabase, service::{self, users::clean_signatures}, services, utils, Error, Result, @@ -943,6 +944,52 @@ impl service::users::Data for KeyValueDatabase { Ok(None) } } + + // Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations) + fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> { + let token = utils::random_string(TOKEN_LENGTH); + + let expires_in = services().globals.config.openid_token_ttl; + let expires_at = utils::millis_since_unix_epoch() + .checked_add(expires_in * 1000) + .expect("time is valid"); + + let mut value = expires_at.to_be_bytes().to_vec(); + value.extend_from_slice(user_id.as_bytes()); + + self.openidtoken_expiresatuserid + .insert(token.as_bytes(), value.as_slice())?; + + Ok((token, expires_in)) + } + + /// Find out which user an OpenID access token belongs to. + fn find_from_openid_token(&self, token: &str) -> Result> { + let Some(value) = self.openidtoken_expiresatuserid.get(token.as_bytes())? else { + return Ok(None); + }; + let (expires_at_bytes, user_bytes) = value.split_at(0u64.to_be_bytes().len()); + + let expires_at = u64::from_be_bytes( + expires_at_bytes + .try_into() + .map_err(|_| Error::bad_database("expires_at in openid_userid is invalid u64."))?, + ); + + if expires_at < utils::millis_since_unix_epoch() { + self.openidtoken_expiresatuserid.remove(token.as_bytes())?; + + return Ok(None); + } + + Some( + UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { + Error::bad_database("User ID in openid_userid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in openid_userid is invalid.")), + ) + .transpose() + } } impl KeyValueDatabase {} diff --git a/src/database/mod.rs b/src/database/mod.rs index 8d1b191..f4740ff 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -57,6 +57,7 @@ pub struct KeyValueDatabase { pub(super) userid_masterkeyid: Arc, pub(super) userid_selfsigningkeyid: Arc, pub(super) userid_usersigningkeyid: Arc, + pub(super) openidtoken_expiresatuserid: Arc, // expiresatuserid = expiresat + userid pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId @@ -290,6 +291,7 @@ impl KeyValueDatabase { userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, + openidtoken_expiresatuserid: builder.open_tree("openidtoken_expiresatuserid")?, userfilterid_filter: builder.open_tree("userfilterid_filter")?, todeviceid_events: builder.open_tree("todeviceid_events")?, diff --git a/src/main.rs b/src/main.rs index 5d60a6b..6eeff9a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -277,6 +277,7 @@ fn routes(config: &Config) -> Router { .ruma_route(client_server::get_room_aliases_route) .ruma_route(client_server::get_filter_route) .ruma_route(client_server::create_filter_route) + .ruma_route(client_server::create_openid_token_route) .ruma_route(client_server::set_global_account_data_route) .ruma_route(client_server::set_room_account_data_route) .ruma_route(client_server::get_global_account_data_route) @@ -431,6 +432,7 @@ fn routes(config: &Config) -> Router { .ruma_route(server_server::get_profile_information_route) .ruma_route(server_server::get_keys_route) .ruma_route(server_server::claim_keys_route) + .ruma_route(server_server::get_openid_userinfo_route) .ruma_route(server_server::well_known_server) } else { router diff --git a/src/service/users/data.rs b/src/service/users/data.rs index ddf941e..4566c36 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -211,4 +211,10 @@ pub trait Data: Send + Sync { fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result; fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result>; + + // Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations) + fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)>; + + /// Find out which user an OpenID access token belongs to. + fn find_from_openid_token(&self, token: &str) -> Result>; } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 9133166..c379958 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -598,6 +598,16 @@ impl Service { ) -> Result> { self.db.get_filter(user_id, filter_id) } + + // Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations) + pub fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> { + self.db.create_openid_token(user_id) + } + + /// Find out which user an OpenID access token belongs to. + pub fn find_from_openid_token(&self, token: &str) -> Result> { + self.db.find_from_openid_token(token) + } } /// Ensure that a user only sees signatures from themselves and the target user From 6bcc2f80b8f31a6d295aeb29ad402cd20fe3c09a Mon Sep 17 00:00:00 2001 From: tony Date: Sun, 4 Jun 2023 00:12:35 +0200 Subject: [PATCH 1681/1727] add command to set the allow registration status Co-Authored-By: Matthias Ahouansou --- src/api/client_server/account.rs | 2 +- src/service/admin/mod.rs | 21 +++++++++++++++++++++ src/service/globals/mod.rs | 13 +++++++++++-- 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 0226abc..1027f14 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -75,7 +75,7 @@ pub async fn get_register_available_route( /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route(body: Ruma) -> Result { - if !services().globals.allow_registration() && body.appservice_info.is_none() { + if !services().globals.allow_registration().await && body.appservice_info.is_none() { return Err(Error::BadRequest( ErrorKind::Forbidden, "Registration has been disabled.", diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 345268d..0abce70 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -160,6 +160,9 @@ enum AdminCommand { password: Option, }, + /// Temporarily toggle user registration by passing either true or false as an argument, does not persist between restarts + AllowRegistration { status: Option }, + /// Disables incoming federation handling for a room. DisableRoom { room_id: Box }, /// Enables incoming federation handling for a room again. @@ -656,6 +659,24 @@ impl Service { "Created user with user_id: {user_id} and password: {password}" )) } + AdminCommand::AllowRegistration { status } => { + if let Some(status) = status { + services().globals.set_registration(status).await; + RoomMessageEventContent::text_plain(if status { + "Registration is now enabled" + } else { + "Registration is now disabled" + }) + } else { + RoomMessageEventContent::text_plain( + if services().globals.allow_registration().await { + "Registration is currently enabled" + } else { + "Registration is currently disabled" + }, + ) + } + } AdminCommand::DisableRoom { room_id } => { services().rooms.metadata.disable_room(&room_id, true)?; RoomMessageEventContent::text_plain("Room disabled.") diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 2373a27..47c4f89 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -55,6 +55,7 @@ pub struct Service { pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, pub config: Config, + allow_registration: RwLock, keypair: Arc, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option, @@ -184,6 +185,7 @@ impl Service { let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; let mut s = Self { + allow_registration: RwLock::new(config.allow_registration), db, config, keypair: Arc::new(keypair), @@ -285,8 +287,15 @@ impl Service { self.config.max_fetch_prev_events } - pub fn allow_registration(&self) -> bool { - self.config.allow_registration + /// Allows for the temporary (non-persistant) toggling of registration + pub async fn set_registration(&self, status: bool) { + let mut lock = self.allow_registration.write().await; + *lock = status; + } + + /// Checks whether user registration is allowed + pub async fn allow_registration(&self) -> bool { + *self.allow_registration.read().await } pub fn allow_encryption(&self) -> bool { From 59d7674b2a2cdf511be7aa7649a6226f4d60666a Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 29 May 2024 09:36:35 +0100 Subject: [PATCH 1682/1727] fix: clarify that 3pids are currently unsupported --- src/api/client_server/account.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 0226abc..1d86c65 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -475,7 +475,7 @@ pub async fn request_3pid_management_token_via_email_route( ) -> Result { Err(Error::BadRequest( ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", + "Third party identifiers are currently unsupported by this server implementation", )) } @@ -489,6 +489,6 @@ pub async fn request_3pid_management_token_via_msisdn_route( ) -> Result { Err(Error::BadRequest( ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", + "Third party identifiers are currently unsupported by this server implementation", )) } From 9374b74e7708939695ef7b51e106784a28911499 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 8 May 2024 16:51:07 +0100 Subject: [PATCH 1683/1727] refactor: let ruma-server-util handle X-Matrix parsing --- Cargo.lock | 18 +++++++++ Cargo.toml | 1 + src/api/ruma_wrapper/axum.rs | 78 +++++------------------------------- 3 files changed, 29 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8453335..a3b1559 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2039,6 +2039,7 @@ dependencies = [ "ruma-federation-api", "ruma-identity-service-api", "ruma-push-gateway-api", + "ruma-server-util", "ruma-signatures", "ruma-state-res", ] @@ -2184,6 +2185,17 @@ dependencies = [ "serde_json", ] +[[package]] +name = "ruma-server-util" +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +dependencies = [ + "headers", + "ruma-common", + "tracing", + "yap", +] + [[package]] name = "ruma-signatures" version = "0.14.0" @@ -3453,6 +3465,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +[[package]] +name = "yap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff4524214bc4629eba08d78ceb1d6507070cc0bcbbed23af74e19e6e924a24cf" + [[package]] name = "zerocopy" version = "0.7.32" diff --git a/Cargo.toml b/Cargo.toml index 3a5c264..e23501e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,6 +56,7 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7 "push-gateway-api-c", "rand", "ring-compat", + "server-util", "state-res", "unstable-exhaustive-types", "unstable-msc2448", diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index a56ee35..136f453 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -4,10 +4,7 @@ use axum::{ async_trait, body::{Full, HttpBody}, extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader}, - headers::{ - authorization::{Bearer, Credentials}, - Authorization, - }, + headers::{authorization::Bearer, Authorization}, response::{IntoResponse, Response}, BoxError, RequestExt, RequestPartsExt, }; @@ -15,7 +12,8 @@ use bytes::{Buf, BufMut, Bytes, BytesMut}; use http::{Request, StatusCode}; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, - CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, + server_util::authorization::XMatrix, + CanonicalJsonValue, OwnedDeviceId, OwnedUserId, UserId, }; use serde::Deserialize; use tracing::{debug, error, warn}; @@ -191,7 +189,12 @@ where let signatures = BTreeMap::from_iter([( x_matrix.origin.as_str().to_owned(), - CanonicalJsonValue::Object(origin_signatures), + CanonicalJsonValue::Object( + origin_signatures + .into_iter() + .map(|(k, v)| (k.to_string(), v)) + .collect(), + ), )]); let mut request_map = BTreeMap::from_iter([ @@ -226,7 +229,7 @@ where let keys_result = services() .rooms .event_handler - .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()]) + .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()]) .await; let keys = match keys_result { @@ -340,67 +343,6 @@ where } } -struct XMatrix { - destination: Option, - origin: OwnedServerName, - key: String, // KeyName? - sig: String, -} - -impl Credentials for XMatrix { - const SCHEME: &'static str = "X-Matrix"; - - fn decode(value: &http::HeaderValue) -> Option { - debug_assert!( - value.as_bytes().starts_with(b"X-Matrix "), - "HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}", - ); - - let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..]) - .ok()? - .trim_start(); - - let mut origin = None; - let mut key = None; - let mut sig = None; - let mut destination = None; - - for entry in parameters.split_terminator(',') { - let (name, value) = entry.split_once('=')?; - - // It's not at all clear why some fields are quoted and others not in the spec, - // let's simply accept either form for every field. - let value = value - .strip_prefix('"') - .and_then(|rest| rest.strip_suffix('"')) - .unwrap_or(value); - - // FIXME: Catch multiple fields of the same name - match name { - "origin" => origin = Some(value.try_into().ok()?), - "key" => key = Some(value.to_owned()), - "sig" => sig = Some(value.to_owned()), - "destination" => destination = Some(value.try_into().ok()?), - _ => debug!( - "Unexpected field `{}` in X-Matrix Authorization header", - name - ), - } - } - - Some(Self { - destination, - origin: origin?, - key: key?, - sig: sig?, - }) - } - - fn encode(&self) -> http::HeaderValue { - todo!() - } -} - impl IntoResponse for RumaResponse { fn into_response(self) -> Response { match self.0.try_into_http_response::() { From 6c2eb4c78657e5c4ef68fbcdb29648cb7ae785fe Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 29 May 2024 17:38:13 +0100 Subject: [PATCH 1684/1727] feat(admin): remove alias command --- src/service/admin/mod.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 0abce70..a6caaa2 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -77,6 +77,12 @@ enum AdminCommand { /// List all rooms we are currently handling an incoming pdu from IncomingFederation, + /// Removes an alias from the server + RemoveAlias { + /// The alias to be removed + alias: Box, + }, + /// Deactivate a user /// /// User will not be removed from all rooms by default. @@ -907,6 +913,23 @@ impl Service { ) } } + AdminCommand::RemoveAlias { alias } => { + if alias.server_name() != services().globals.server_name() { + RoomMessageEventContent::text_plain( + "Cannot remove alias which is not from this server", + ) + } else if services() + .rooms + .alias + .resolve_local_alias(&alias)? + .is_none() + { + RoomMessageEventContent::text_plain("No such alias exists") + } else { + services().rooms.alias.remove_alias(&alias)?; + RoomMessageEventContent::text_plain("Alias removed sucessfully") + } + } }; Ok(reply_message_content) From ec8dfc283ca1285c80056225116c8f2b935d94a5 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 27 May 2024 11:47:05 +0100 Subject: [PATCH 1685/1727] fix(membership): fallback to locally signed event if the join wasn't a restricted one on send_join response --- src/api/client_server/membership.rs | 53 ++++++++++++----------------- 1 file changed, 22 insertions(+), 31 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index a673eaa..a042614 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -981,6 +981,8 @@ async fn join_room_by_id_helper( .as_str() }) .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + let restricted_join = join_authorized_via_users_server.is_some(); + // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), @@ -1027,7 +1029,7 @@ async fn join_room_by_id_helper( ruma::signatures::reference_hash(&join_event_stub, &room_version_id) .expect("ruma can calculate reference hashes") ); - let event_id = <&EventId>::try_from(event_id.as_str()) + let event_id = OwnedEventId::try_from(event_id) .expect("ruma's reference hashes are valid event ids"); // Add event_id back @@ -1052,43 +1054,32 @@ async fn join_room_by_id_helper( ) .await?; - if let Some(signed_raw) = send_join_response.room_state.event { - let (signed_event_id, signed_value) = - match gen_event_id_canonical_json(&signed_raw, &room_version_id) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } - }; + let pdu = if let Some(signed_raw) = send_join_response.room_state.event { + let (signed_event_id, signed_pdu) = + gen_event_id_canonical_json(&signed_raw, &room_version_id)?; if signed_event_id != event_id { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, + return Err(Error::BadServerResponse( "Server sent event with wrong event id", )); } - drop(state_lock); - let pub_key_map = RwLock::new(BTreeMap::new()); - services() - .rooms - .event_handler - .handle_incoming_pdu( - &remote_server, - &signed_event_id, - room_id, - signed_value, - true, - &pub_key_map, - ) - .await?; + signed_pdu + } else if restricted_join { + return Err(Error::BadServerResponse( + "No signed event was returned, despite just performing a restricted join", + )); } else { - return Err(error); - } + join_event + }; + + drop(state_lock); + let pub_key_map = RwLock::new(BTreeMap::new()); + services() + .rooms + .event_handler + .handle_incoming_pdu(&remote_server, &event_id, room_id, pdu, true, &pub_key_map) + .await?; } else { return Err(error); } From 19154a9f70f66cd6e789a8aab7c89ac5c0d531c2 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 31 May 2024 21:46:38 +0100 Subject: [PATCH 1686/1727] refactor: add server_user to globals --- src/database/mod.rs | 15 +++++------ src/service/admin/mod.rs | 43 ++++++++++++++----------------- src/service/globals/mod.rs | 7 +++++ src/service/rooms/timeline/mod.rs | 8 +++--- 4 files changed, 36 insertions(+), 37 deletions(-) diff --git a/src/database/mod.rs b/src/database/mod.rs index f4740ff..1b178bd 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -406,11 +406,9 @@ impl KeyValueDatabase { // Matrix resource ownership is based on the server name; changing it // requires recreating the database from scratch. if services().users.count()? > 0 { - let conduit_user = - UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + let conduit_user = services().globals.server_user(); - if !services().users.exists(&conduit_user)? { + if !services().users.exists(conduit_user)? { error!( "The {} server user does not exist, and the database is not new.", conduit_user @@ -1104,22 +1102,21 @@ impl KeyValueDatabase { /// Sets the emergency password and push rules for the @conduit account in case emergency password is set fn set_emergency_access() -> Result { - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is a valid UserId"); + let conduit_user = services().globals.server_user(); services().users.set_password( - &conduit_user, + conduit_user, services().globals.emergency_password().as_deref(), )?; let (ruleset, res) = match services().globals.emergency_password() { - Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), + Some(_) => (Ruleset::server_default(conduit_user), Ok(true)), None => (Ruleset::new(), Ok(false)), }; services().account_data.update( None, - &conduit_user, + conduit_user, GlobalAccountDataEventType::PushRules.to_string().into(), &serde_json::to_value(&GlobalAccountDataEvent { content: PushRulesEventContent { global: ruleset }, diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index a6caaa2..90b00da 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -217,8 +217,7 @@ impl Service { // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); - let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); + let conduit_user = services().globals.server_user(); if let Ok(Some(conduit_room)) = services().admin.get_admin_room() { loop { @@ -252,7 +251,7 @@ impl Service { state_key: None, redacts: None, }, - &conduit_user, + conduit_user, &conduit_room, &state_lock, ) @@ -1037,11 +1036,9 @@ impl Service { let state_lock = mutex_state.lock().await; // Create a user for the server - let conduit_user = - UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + let conduit_user = services().globals.server_user(); - services().users.create(&conduit_user, None)?; + services().users.create(conduit_user, None)?; let room_version = services().globals.default_room_version(); let mut content = match room_version { @@ -1054,7 +1051,7 @@ impl Service { | RoomVersionId::V7 | RoomVersionId::V8 | RoomVersionId::V9 - | RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.clone()), + | RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.to_owned()), RoomVersionId::V11 => RoomCreateEventContent::new_v11(), _ => unreachable!("Validity of room version already checked"), }; @@ -1074,7 +1071,7 @@ impl Service { state_key: Some("".to_owned()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1102,7 +1099,7 @@ impl Service { state_key: Some(conduit_user.to_string()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1110,7 +1107,7 @@ impl Service { // 3. Power levels let mut users = BTreeMap::new(); - users.insert(conduit_user.clone(), 100.into()); + users.insert(conduit_user.to_owned(), 100.into()); services() .rooms @@ -1127,7 +1124,7 @@ impl Service { state_key: Some("".to_owned()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1146,7 +1143,7 @@ impl Service { state_key: Some("".to_owned()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1167,7 +1164,7 @@ impl Service { state_key: Some("".to_owned()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1188,7 +1185,7 @@ impl Service { state_key: Some("".to_owned()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1208,7 +1205,7 @@ impl Service { state_key: Some("".to_owned()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1228,7 +1225,7 @@ impl Service { state_key: Some("".to_owned()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1254,7 +1251,7 @@ impl Service { state_key: Some("".to_owned()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1301,9 +1298,7 @@ impl Service { let state_lock = mutex_state.lock().await; // Use the server user to grant the new admin's power level - let conduit_user = - UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + let conduit_user = services().globals.server_user(); // Invite and join the real user services() @@ -1327,7 +1322,7 @@ impl Service { state_key: Some(user_id.to_string()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1379,7 +1374,7 @@ impl Service { state_key: Some("".to_owned()), redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ) @@ -1398,7 +1393,7 @@ impl Service { state_key: None, redacts: None, }, - &conduit_user, + conduit_user, &room_id, &state_lock, ).await?; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 47c4f89..f597930 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -72,6 +72,7 @@ pub struct Service { pub roomid_mutex_state: RwLock>>>, pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer pub roomid_federationhandletime: RwLock>, + server_user: OwnedUserId, pub stateres_mutex: Arc>, pub rotate: RotationHandler, @@ -186,6 +187,8 @@ impl Service { let mut s = Self { allow_registration: RwLock::new(config.allow_registration), + server_user: UserId::parse(format!("@conduit:{}", &config.server_name)) + .expect("@conduit:server_name is valid"), db, config, keypair: Arc::new(keypair), @@ -279,6 +282,10 @@ impl Service { self.config.server_name.as_ref() } + pub fn server_user(&self) -> &UserId { + self.server_user.as_ref() + } + pub fn max_request_size(&self) -> u32 { self.config.max_request_size } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a3b1d57..2873331 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -483,16 +483,16 @@ impl Service { .search .index_pdu(shortroomid, &pdu_id, &body)?; - let server_user = format!("@conduit:{}", services().globals.server_name()); + let server_user = services().globals.server_user(); let to_conduit = body.starts_with(&format!("{server_user}: ")) || body.starts_with(&format!("{server_user} ")) || body == format!("{server_user}:") - || body == server_user; + || body == server_user.as_str(); // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit - let from_conduit = pdu.sender == server_user + let from_conduit = pdu.sender == *server_user && services().globals.emergency_password().is_none(); if let Some(admin_room) = services().admin.get_admin_room()? { @@ -857,7 +857,7 @@ impl Service { .filter(|v| v.starts_with('@')) .unwrap_or(sender.as_str()); let server_name = services().globals.server_name(); - let server_user = format!("@conduit:{}", server_name); + let server_user = services().globals.server_user().as_str(); let content = serde_json::from_str::(pdu.content.get()) .map_err(|_| Error::bad_database("Invalid content in pdu."))?; From b46000fadc7450799a27154a6398276c95fa208b Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 1 Apr 2024 10:52:36 +0100 Subject: [PATCH 1687/1727] feat: recurse relationships --- Cargo.lock | 27 ++--- Cargo.toml | 2 +- src/api/client_server/relations.rs | 91 +++------------ src/service/rooms/pdu_metadata/mod.rs | 158 +++++++++++++++++--------- 4 files changed, 138 insertions(+), 140 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3b1559..834deaa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2027,7 +2027,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.9.4" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "assign", "js_int", @@ -2042,12 +2042,13 @@ dependencies = [ "ruma-server-util", "ruma-signatures", "ruma-state-res", + "web-time", ] [[package]] name = "ruma-appservice-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "js_int", "ruma-common", @@ -2059,7 +2060,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "as_variant", "assign", @@ -2078,7 +2079,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.12.1" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "as_variant", "base64 0.21.7", @@ -2108,7 +2109,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.27.11" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "as_variant", "indexmap 2.2.5", @@ -2130,7 +2131,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "js_int", "ruma-common", @@ -2142,7 +2143,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.3" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "js_int", "thiserror", @@ -2151,7 +2152,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "js_int", "ruma-common", @@ -2161,7 +2162,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "once_cell", "proc-macro-crate", @@ -2176,7 +2177,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "js_int", "ruma-common", @@ -2188,7 +2189,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "headers", "ruma-common", @@ -2199,7 +2200,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.14.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "base64 0.21.7", "ed25519-dalek", @@ -2215,7 +2216,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" dependencies = [ "itertools 0.11.0", "js_int", diff --git a/Cargo.toml b/Cargo.toml index e23501e..e0eb8c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ tower-http = { version = "0.4.1", features = [ # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = [ +ruma = { git = "https://github.com/ruma/ruma", rev = "c5f8137ba9741b2317313256b57e6e14b61fb419", features = [ "appservice-api-c", "client-api", "compat", diff --git a/src/api/client_server/relations.rs b/src/api/client_server/relations.rs index 124f131..27c0072 100644 --- a/src/api/client_server/relations.rs +++ b/src/api/client_server/relations.rs @@ -3,7 +3,7 @@ use ruma::api::client::relations::{ get_relating_events_with_rel_type_and_event_type, }; -use crate::{service::rooms::timeline::PduCount, services, Result, Ruma}; +use crate::{services, Result, Ruma}; /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` pub async fn get_relating_events_with_rel_type_and_event_type_route( @@ -11,27 +11,6 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let from = match body.from.clone() { - Some(from) => PduCount::try_from_string(&from)?, - None => match ruma::api::Direction::Backward { - // TODO: fix ruma so `body.dir` exists - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), - }, - }; - - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(t).ok()); - - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .and_then(|u| u32::try_from(u).ok()) - .map_or(10_usize, |u| u as usize) - .min(100); - let res = services() .rooms .pdu_metadata @@ -41,9 +20,11 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route( &body.event_id, Some(body.event_type.clone()), Some(body.rel_type.clone()), - from, - to, - limit, + body.from.clone(), + body.to.clone(), + body.limit, + body.recurse, + &body.dir, )?; Ok( @@ -51,6 +32,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route( chunk: res.chunk, next_batch: res.next_batch, prev_batch: res.prev_batch, + recursion_depth: res.recursion_depth, }, ) } @@ -61,27 +43,6 @@ pub async fn get_relating_events_with_rel_type_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let from = match body.from.clone() { - Some(from) => PduCount::try_from_string(&from)?, - None => match ruma::api::Direction::Backward { - // TODO: fix ruma so `body.dir` exists - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), - }, - }; - - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(t).ok()); - - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .and_then(|u| u32::try_from(u).ok()) - .map_or(10_usize, |u| u as usize) - .min(100); - let res = services() .rooms .pdu_metadata @@ -91,15 +52,18 @@ pub async fn get_relating_events_with_rel_type_route( &body.event_id, None, Some(body.rel_type.clone()), - from, - to, - limit, + body.from.clone(), + body.to.clone(), + body.limit, + body.recurse, + &body.dir, )?; Ok(get_relating_events_with_rel_type::v1::Response { chunk: res.chunk, next_batch: res.next_batch, prev_batch: res.prev_batch, + recursion_depth: res.recursion_depth, }) } @@ -109,27 +73,6 @@ pub async fn get_relating_events_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let from = match body.from.clone() { - Some(from) => PduCount::try_from_string(&from)?, - None => match ruma::api::Direction::Backward { - // TODO: fix ruma so `body.dir` exists - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), - }, - }; - - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(t).ok()); - - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .and_then(|u| u32::try_from(u).ok()) - .map_or(10_usize, |u| u as usize) - .min(100); - services() .rooms .pdu_metadata @@ -139,8 +82,10 @@ pub async fn get_relating_events_route( &body.event_id, None, None, - from, - to, - limit, + body.from.clone(), + body.to.clone(), + body.limit, + body.recurse, + &body.dir, ) } diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 411f4f5..5ffe884 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -3,9 +3,9 @@ use std::sync::Arc; pub use data::Data; use ruma::{ - api::client::relations::get_relating_events, + api::{client::relations::get_relating_events, Direction}, events::{relation::RelationType, TimelineEventType}, - EventId, RoomId, UserId, + EventId, RoomId, UInt, UserId, }; use serde::Deserialize; @@ -48,37 +48,57 @@ impl Service { target: &EventId, filter_event_type: Option, filter_rel_type: Option, - from: PduCount, - to: Option, - limit: usize, + from: Option, + to: Option, + limit: Option, + recurse: bool, + dir: &Direction, ) -> Result { + let from = match from { + Some(from) => PduCount::try_from_string(&from)?, + None => match dir { + Direction::Forward => PduCount::min(), + Direction::Backward => PduCount::max(), + }, + }; + + let to = to.as_ref().and_then(|t| PduCount::try_from_string(t).ok()); + + // Use limit or else 10, with maximum 100 + let limit = limit + .and_then(|u| u32::try_from(u).ok()) + .map_or(10_usize, |u| u as usize) + .min(100); + let next_token; - //TODO: Fix ruma: match body.dir { - match ruma::api::Direction::Backward { - ruma::api::Direction::Forward => { - let events_after: Vec<_> = services() - .rooms - .pdu_metadata - .relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after - .filter(|r| { - r.as_ref().map_or(true, |(_, pdu)| { - filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) - && if let Ok(content) = - serde_json::from_str::( - pdu.content.get(), - ) - { - filter_rel_type - .as_ref() - .map_or(true, |r| &content.relates_to.rel_type == r) - } else { - false - } - }) + // Spec (v1.10) recommends depth of at least 3 + let depth: u8 = if recurse { 3 } else { 1 }; + + match dir { + Direction::Forward => { + let relations_until = &services().rooms.pdu_metadata.relations_until( + sender_user, + room_id, + target, + from, + depth, + )?; + let events_after: Vec<_> = relations_until // TODO: should be relations_after + .iter() + .filter(|(_, pdu)| { + filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) + && if let Ok(content) = + serde_json::from_str::(pdu.content.get()) + { + filter_rel_type + .as_ref() + .map_or(true, |r| &content.relates_to.rel_type == r) + } else { + false + } }) .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events .filter(|(_, pdu)| { services() .rooms @@ -86,7 +106,7 @@ impl Service { .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take_while(|(k, _)| Some(k) != to.as_ref()) // Stop at `to` .collect(); next_token = events_after.last().map(|(count, _)| count).copied(); @@ -101,31 +121,32 @@ impl Service { chunk: events_after, next_batch: next_token.map(|t| t.stringify()), prev_batch: Some(from.stringify()), + recursion_depth: if recurse { Some(depth.into()) } else { None }, }) } - ruma::api::Direction::Backward => { - let events_before: Vec<_> = services() - .rooms - .pdu_metadata - .relations_until(sender_user, room_id, target, from)? - .filter(|r| { - r.as_ref().map_or(true, |(_, pdu)| { - filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) - && if let Ok(content) = - serde_json::from_str::( - pdu.content.get(), - ) - { - filter_rel_type - .as_ref() - .map_or(true, |r| &content.relates_to.rel_type == r) - } else { - false - } - }) + Direction::Backward => { + let relations_until = &services().rooms.pdu_metadata.relations_until( + sender_user, + room_id, + target, + from, + depth, + )?; + let events_before: Vec<_> = relations_until + .iter() + .filter(|(_, pdu)| { + filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) + && if let Ok(content) = + serde_json::from_str::(pdu.content.get()) + { + filter_rel_type + .as_ref() + .map_or(true, |r| &content.relates_to.rel_type == r) + } else { + false + } }) .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events .filter(|(_, pdu)| { services() .rooms @@ -133,7 +154,7 @@ impl Service { .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take_while(|&(k, _)| Some(k) != to.as_ref()) // Stop at `to` .collect(); next_token = events_before.last().map(|(count, _)| count).copied(); @@ -147,6 +168,7 @@ impl Service { chunk: events_before, next_batch: next_token.map(|t| t.stringify()), prev_batch: Some(from.stringify()), + recursion_depth: if recurse { Some(depth.into()) } else { None }, }) } } @@ -158,14 +180,44 @@ impl Service { room_id: &'a RoomId, target: &'a EventId, until: PduCount, - ) -> Result> + 'a> { + max_depth: u8, + ) -> Result> { let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?; let target = match services().rooms.timeline.get_pdu_count(target)? { Some(PduCount::Normal(c)) => c, // TODO: Support backfilled relations _ => 0, // This will result in an empty iterator }; - self.db.relations_until(user_id, room_id, target, until) + + self.db + .relations_until(user_id, room_id, target, until) + .map(|mut relations| { + let mut pdus: Vec<_> = (*relations).into_iter().filter_map(Result::ok).collect(); + let mut stack: Vec<_> = + pdus.clone().iter().map(|pdu| (pdu.to_owned(), 1)).collect(); + + while let Some(stack_pdu) = stack.pop() { + let target = match stack_pdu.0 .0 { + PduCount::Normal(c) => c, + // TODO: Support backfilled relations + PduCount::Backfilled(_) => 0, // This will result in an empty iterator + }; + + if let Ok(relations) = self.db.relations_until(user_id, room_id, target, until) + { + for relation in relations.flatten() { + if stack_pdu.1 < max_depth { + stack.push((relation.clone(), stack_pdu.1 + 1)); + } + + pdus.push(relation); + } + } + } + + pdus.sort_by(|a, b| a.0.cmp(&b.0)); + pdus + }) } #[tracing::instrument(skip(self, room_id, event_ids))] From 1dbb3433e06d5f721408b9bf5cdfa1b0828a5054 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 3 Jun 2024 21:35:20 +0100 Subject: [PATCH 1688/1727] fix(media): use csp instead of modifying content-type --- src/api/client_server/media.rs | 31 +++++++++++-------------------- src/main.rs | 17 +++++++++++++---- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index a1bfab4..5cd2b2f 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -22,14 +22,6 @@ pub async fn get_media_config_route( }) } -fn sanitize_content_type(content_type: String) -> String { - if content_type == "image/jpeg" || content_type == "image/png" { - content_type - } else { - "application/octet-stream".to_owned() - } -} - /// # `POST /_matrix/media/r0/upload` /// /// Permanently save media in the server. @@ -108,13 +100,13 @@ pub async fn get_content_route( if let Some(FileMeta { content_disposition, + content_type, file, - .. }) = services().media.get(mxc.clone()).await? { Ok(get_content::v3::Response { file, - content_type: Some("application/octet-stream".to_owned()), + content_type, content_disposition, cross_origin_resource_policy: Some("cross-origin".to_owned()), }) @@ -124,7 +116,7 @@ pub async fn get_content_route( Ok(get_content::v3::Response { content_disposition: remote_content_response.content_disposition, - content_type: Some("application/octet-stream".to_owned()), + content_type: remote_content_response.content_type, file: remote_content_response.file, cross_origin_resource_policy: Some("cross-origin".to_owned()), }) @@ -143,10 +135,13 @@ pub async fn get_content_as_filename_route( ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); - if let Some(FileMeta { file, .. }) = services().media.get(mxc.clone()).await? { + if let Some(FileMeta { + file, content_type, .. + }) = services().media.get(mxc.clone()).await? + { Ok(get_content_as_filename::v3::Response { file, - content_type: Some("application/octet-stream".to_owned()), + content_type, content_disposition: Some(format!("inline; filename={}", body.filename)), cross_origin_resource_policy: Some("cross-origin".to_owned()), }) @@ -156,7 +151,7 @@ pub async fn get_content_as_filename_route( Ok(get_content_as_filename::v3::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), - content_type: Some("application/octet-stream".to_owned()), + content_type: remote_content_response.content_type, file: remote_content_response.file, cross_origin_resource_policy: Some("cross-origin".to_owned()), }) @@ -192,11 +187,11 @@ pub async fn get_content_thumbnail_route( { Ok(get_content_thumbnail::v3::Response { file, - content_type: content_type.map(sanitize_content_type), + content_type, cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { - let mut get_thumbnail_response = services() + let get_thumbnail_response = services() .sending .send_federation_request( &body.server_name, @@ -225,10 +220,6 @@ pub async fn get_content_thumbnail_route( ) .await?; - get_thumbnail_response.content_type = get_thumbnail_response - .content_type - .map(sanitize_content_type); - Ok(get_thumbnail_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) diff --git a/src/main.rs b/src/main.rs index 6eeff9a..5fd248a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,8 @@ use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; use axum::{ extract::{DefaultBodyLimit, FromRequestParts, MatchedPath}, - response::IntoResponse, + middleware::map_response, + response::{IntoResponse, Response}, routing::{any, get, on, MethodFilter}, Router, }; @@ -13,7 +14,7 @@ use figment::{ Figment, }; use http::{ - header::{self, HeaderName}, + header::{self, HeaderName, CONTENT_SECURITY_POLICY}, Method, StatusCode, Uri, }; use ruma::api::{ @@ -141,6 +142,13 @@ async fn main() { } } +/// Adds additional headers to prevent any potential XSS attacks via the media repo +async fn set_csp_header(response: Response) -> impl IntoResponse { + ( + [(CONTENT_SECURITY_POLICY, "sandbox; default-src 'none'; script-src 'none'; plugin-types application/pdf; style-src 'unsafe-inline'; object-src 'self';")], response + ) +} + async fn run_server() -> io::Result<()> { let config = &services().globals.config; let addr = SocketAddr::from((config.address, config.port)); @@ -181,6 +189,7 @@ async fn run_server() -> io::Result<()> { ]) .max_age(Duration::from_secs(86400)), ) + .layer(map_response(set_csp_header)) .layer(DefaultBodyLimit::max( config .max_request_size @@ -219,7 +228,7 @@ async fn run_server() -> io::Result<()> { async fn spawn_task( req: http::Request, next: axum::middleware::Next, -) -> std::result::Result { +) -> std::result::Result { if services().globals.shutdown.load(atomic::Ordering::Relaxed) { return Err(StatusCode::SERVICE_UNAVAILABLE); } @@ -231,7 +240,7 @@ async fn spawn_task( async fn unrecognized_method( req: http::Request, next: axum::middleware::Next, -) -> std::result::Result { +) -> std::result::Result { let method = req.method().clone(); let uri = req.uri().clone(); let inner = next.run(req).await; From 39b4932725ae88697f975070eae00822cc7f1dd7 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 6 Jun 2024 21:43:10 +0100 Subject: [PATCH 1689/1727] docs: add security disclosure instructions --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 474a524..6851c5b 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,13 @@ If you have any questions, feel free to - Send an direct message to `@timokoesters:fachschaften.org` on Matrix - [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) +#### Security + +If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs) +and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to +[conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before +a fix is released publically. + #### Thanks to Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project. From ba2a5a6115e3d948661c253852280339e35cbc19 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 7 Apr 2024 20:46:18 +0100 Subject: [PATCH 1690/1727] chore: bump all dependencies --- Cargo.lock | 1315 +++++++++++++++--------- Cargo.toml | 70 +- src/api/client_server/account.rs | 2 +- src/api/client_server/context.rs | 2 +- src/api/client_server/membership.rs | 6 +- src/api/client_server/message.rs | 2 +- src/api/client_server/room.rs | 6 +- src/api/client_server/search.rs | 2 +- src/api/client_server/session.rs | 8 +- src/api/client_server/state.rs | 10 +- src/api/client_server/sync.rs | 42 +- src/api/client_server/typing.rs | 2 +- src/api/ruma_wrapper/axum.rs | 109 +- src/api/server_server.rs | 14 +- src/database/key_value/uiaa.rs | 2 +- src/main.rs | 25 +- src/service/globals/mod.rs | 27 +- src/service/rooms/auth_chain/mod.rs | 5 +- src/service/rooms/event_handler/mod.rs | 6 +- src/service/rooms/spaces/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 16 +- src/service/uiaa/mod.rs | 4 +- src/utils/error.rs | 2 +- 23 files changed, 975 insertions(+), 704 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 834deaa..20013bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -31,30 +31,30 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] -name = "allocator-api2" -version = "0.2.16" +name = "anstyle" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] -name = "anstyle" -version = "1.0.6" +name = "anyhow" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "arc-swap" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "arrayref" @@ -81,10 +81,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] -name = "async-trait" -version = "0.1.77" +name = "async-stream" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "async-trait" +version = "0.1.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", @@ -101,10 +123,16 @@ dependencies = [ ] [[package]] -name = "autocfg" -version = "1.1.0" +name = "atomic-waker" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "axum" @@ -113,14 +141,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", - "axum-core", + "axum-core 0.3.4", "bitflags 1.3.2", "bytes", "futures-util", - "headers", - "http", - "http-body", - "hyper", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.29", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 0.1.2", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +dependencies = [ + "async-trait", + "axum-core 0.4.3", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -132,7 +188,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 1.0.1", "tower", "tower-layer", "tower-service", @@ -147,8 +203,8 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 0.2.12", + "http-body 0.4.6", "mime", "rustversion", "tower-layer", @@ -156,30 +212,77 @@ dependencies = [ ] [[package]] -name = "axum-server" -version = "0.5.1" +name = "axum-core" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447f28c85900215cc1bea282f32d4a2f22d55c5a300afdfbc661c8d6a632e063" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-extra" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0be6ea09c9b96cb5076af0de2e383bd2bc0c18f827cf1967bdd353e0b910d733" +dependencies = [ + "axum 0.7.5", + "axum-core 0.4.3", + "bytes", + "futures-util", + "headers", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "serde", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-server" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1ad46c3ec4e12f4a4b6835e173ba21c25e484c9d02b49770bf006ce5367c036" dependencies = [ "arc-swap", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "pin-project-lite", - "rustls", + "rustls 0.21.12", "rustls-pemfile", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", + "tower", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" dependencies = [ "addr2line", "cc", @@ -198,9 +301,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -214,10 +317,10 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cexpr", "clang-sys", - "itertools 0.12.1", + "itertools", "lazy_static", "lazycell", "proc-macro2", @@ -236,9 +339,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "blake2b_simd" @@ -262,15 +365,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.4" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" +checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" [[package]] name = "byteorder" @@ -280,9 +383,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "bzip2-sys" @@ -297,12 +400,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.90" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -328,9 +432,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -339,9 +443,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.2" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -359,11 +463,11 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn", @@ -386,9 +490,10 @@ name = "conduit" version = "0.8.0-alpha" dependencies = [ "async-trait", - "axum", + "axum 0.7.5", + "axum-extra", "axum-server", - "base64 0.22.0", + "base64 0.22.1", "bytes", "clap", "directories", @@ -396,15 +501,18 @@ dependencies = [ "futures-util", "hickory-resolver", "hmac", - "http", - "hyper", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", "image", "jsonwebtoken", "lru-cache", "nix", "num_cpus", "opentelemetry", - "opentelemetry-jaeger", + "opentelemetry-jaeger-propagator", + "opentelemetry-otlp", + "opentelemetry_sdk", "parking_lot", "persy", "rand", @@ -428,6 +536,7 @@ dependencies = [ "tokio", "tower", "tower-http", + "tower-service", "tracing", "tracing-flame", "tracing-opentelemetry", @@ -480,9 +589,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] @@ -495,27 +604,27 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crypto-common" @@ -556,29 +665,22 @@ dependencies = [ ] [[package]] -name = "dashmap" -version = "5.5.3" +name = "data-encoding" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" -dependencies = [ - "cfg-if", - "hashbrown 0.14.3", - "lock_api", - "once_cell", - "parking_lot_core", -] +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] -name = "data-encoding" -version = "2.5.0" +name = "date_header" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "0c03c416ed1a30fbb027ef484ba6ab6f80e1eada675e1a2b92fd673c045a1f1d" [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "zeroize", @@ -652,18 +754,9 @@ dependencies = [ [[package]] name = "either" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" - -[[package]] -name = "encoding_rs" -version = "0.8.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" -dependencies = [ - "cfg-if", -] +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "enum-as-inner" @@ -671,7 +764,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "syn", @@ -706,15 +799,15 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "figment" -version = "0.10.14" +version = "0.10.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b6e5bc7bd59d60d0d45a6ccab6cf0f4ce28698fb4e81e750ddf229c9b824026" +checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" dependencies = [ "atomic", "pear", @@ -726,9 +819,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "miniz_oxide", @@ -759,21 +852,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "futures" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - [[package]] name = "futures-channel" version = "0.3.30" @@ -781,7 +859,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", - "futures-sink", ] [[package]] @@ -836,13 +913,10 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ - "futures-channel", "futures-core", - "futures-io", "futures-macro", "futures-sink", "futures-task", - "memchr", "pin-project-lite", "pin-utils", "slab", @@ -860,9 +934,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -883,9 +957,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "glob" @@ -895,17 +969,36 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http", - "indexmap 2.2.5", + "http 0.2.12", + "indexmap 2.2.6", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -920,33 +1013,32 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", - "allocator-api2", ] [[package]] name = "hashlink" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] name = "headers" -version = "0.3.9" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" +checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" dependencies = [ "base64 0.21.7", "bytes", "headers-core", - "http", + "http 1.1.0", "httpdate", "mime", "sha1", @@ -954,11 +1046,11 @@ dependencies = [ [[package]] name = "headers-core" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" dependencies = [ - "http", + "http 1.1.0", ] [[package]] @@ -967,6 +1059,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.3.9" @@ -1049,6 +1147,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.6" @@ -1056,15 +1165,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.12", "pin-project-lite", ] [[package]] -name = "http-range-header" -version = "0.3.1" +name = "http-body" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +dependencies = [ + "bytes", + "futures-core", + "http 1.1.0", + "http-body 1.0.0", + "pin-project-lite", +] [[package]] name = "httparse" @@ -1080,17 +1206,17 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -1103,17 +1229,73 @@ dependencies = [ ] [[package]] -name = "hyper-rustls" -version = "0.24.2" +name = "hyper" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ "futures-util", - "http", - "hyper", - "rustls", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", + "rustls 0.22.4", + "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.25.0", + "tower-service", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.29", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "hyper 1.3.1", + "pin-project-lite", + "socket2", + "tokio", + "tower", + "tower-service", + "tracing", ] [[package]] @@ -1164,12 +1346,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.5" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "serde", ] @@ -1179,12 +1361,6 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" -[[package]] -name = "integer-encoding" -version = "3.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" - [[package]] name = "ipconfig" version = "0.3.2" @@ -1194,7 +1370,7 @@ dependencies = [ "socket2", "widestring", "windows-sys 0.48.0", - "winreg", + "winreg 0.50.0", ] [[package]] @@ -1203,15 +1379,6 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" -[[package]] -name = "itertools" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.12.1" @@ -1223,15 +1390,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.28" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -1265,9 +1432,9 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "9.2.0" +version = "9.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ "base64 0.21.7", "js-sys", @@ -1280,9 +1447,9 @@ dependencies = [ [[package]] name = "konst" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d712a8c49d4274f8d8a5cf61368cb5f3c143d149882b1a2918129e53395fdb0" +checksum = "50a0ba6de5f7af397afff922f22c149ff605c766cd3269cf6c1cd5e466dbe3b9" dependencies = [ "const_panic", "konst_kernel", @@ -1291,9 +1458,9 @@ dependencies = [ [[package]] name = "konst_kernel" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac6ea8c376b6e208a81cf39b8e82bebf49652454d98a4829e907dac16ef1790" +checksum = "be0a455a1719220fd6adf756088e1c69a85bf14b6a9e24537a5cc04f503edb2b" dependencies = [ "typewit", ] @@ -1312,9 +1479,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" @@ -1323,18 +1490,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "libc", - "redox_syscall", ] [[package]] @@ -1350,9 +1516,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.15" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" +checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" dependencies = [ "cc", "pkg-config", @@ -1367,9 +1533,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -1429,9 +1595,9 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "mime" @@ -1447,9 +1613,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", "simd-adler32", @@ -1472,7 +1638,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "cfg_aliases", "libc", @@ -1500,11 +1666,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", ] @@ -1526,9 +1691,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] @@ -1545,9 +1710,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" dependencies = [ "memchr", ] @@ -1566,71 +1731,80 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" -version = "0.18.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" +checksum = "900d57987be3f2aeb70d385fff9b27fb74c5723cc9a52d904d4f9c807a0667bf" dependencies = [ - "opentelemetry_api", - "opentelemetry_sdk", -] - -[[package]] -name = "opentelemetry-jaeger" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e785d273968748578931e4dc3b4f5ec86b26e09d9e0d66b55adda7fce742f7a" -dependencies = [ - "async-trait", - "futures", - "futures-executor", - "once_cell", - "opentelemetry", - "opentelemetry-semantic-conventions", - "thiserror", - "thrift", - "tokio", -] - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b02e0230abb0ab6636d18e2ba8fa02903ea63772281340ccac18e0af3ec9eeb" -dependencies = [ - "opentelemetry", -] - -[[package]] -name = "opentelemetry_api" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" -dependencies = [ - "fnv", - "futures-channel", - "futures-util", - "indexmap 1.9.3", + "futures-core", + "futures-sink", "js-sys", "once_cell", "pin-project-lite", "thiserror", + "urlencoding", ] [[package]] -name = "opentelemetry_sdk" -version = "0.18.0" +name = "opentelemetry-jaeger-propagator" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" +checksum = "beb4ec62efc537b60aaa89b92624f986f2523d3a609079f3511cc8ee73490826" +dependencies = [ + "opentelemetry", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a016b8d9495c639af2145ac22387dcb88e44118e45320d9238fbf4e7889abcb" +dependencies = [ + "async-trait", + "futures-core", + "http 0.2.12", + "opentelemetry", + "opentelemetry-proto", + "opentelemetry-semantic-conventions", + "opentelemetry_sdk", + "prost", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8fddc9b68f5b80dae9d6f510b88e02396f006ad48cac349411fbecc80caae4" +dependencies = [ + "opentelemetry", + "opentelemetry_sdk", + "prost", + "tonic", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9ab5bd6c42fb9349dcf28af2ba9a0667f697f9bdcca045d39f2cec5543e2910" + +[[package]] +name = "opentelemetry_sdk" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e90c7113be649e31e9a0f8b5ee24ed7a16923b322c3c5ab6367469c049d6b7e" dependencies = [ "async-trait", "crossbeam-channel", - "dashmap", - "fnv", "futures-channel", "futures-executor", "futures-util", + "glob", "once_cell", - "opentelemetry_api", + "opentelemetry", + "ordered-float", "percent-encoding", "rand", "thiserror", @@ -1646,9 +1820,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "1.1.1" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" dependencies = [ "num-traits", ] @@ -1661,9 +1835,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -1671,22 +1845,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] name = "pear" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ccca0f6c17acc81df8e242ed473ec144cbf5c98037e69aa6d144780aad103c8" +checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" dependencies = [ "inlinable_string", "pear_codegen", @@ -1695,9 +1869,9 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e22670e8eb757cff11d6c199ca7b987f352f0346e0be4dd23869ec72cb53c77" +checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", @@ -1707,11 +1881,11 @@ dependencies = [ [[package]] name = "pem" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "serde", ] @@ -1759,9 +1933,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -1787,9 +1961,9 @@ checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "png" @@ -1818,19 +1992,18 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-crate" -version = "2.0.2" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" dependencies = [ - "toml_datetime", - "toml_edit", + "toml_edit 0.21.1", ] [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" dependencies = [ "unicode-ident", ] @@ -1848,6 +2021,29 @@ dependencies = [ "yansi", ] +[[package]] +name = "prost" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -1856,9 +2052,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -1895,18 +2091,18 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom", "libredox", @@ -1915,14 +2111,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", "regex-automata 0.4.6", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -1942,7 +2138,7 @@ checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -1953,26 +2149,26 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "reqwest" -version = "0.11.26" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" +checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "bytes", - "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", "hyper-rustls", + "hyper-util", "ipnet", "js-sys", "log", @@ -1980,23 +2176,23 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls", + "rustls 0.22.4", "rustls-native-certs", "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", - "system-configuration", + "sync_wrapper 0.1.2", "tokio", - "tokio-rustls", + "tokio-rustls 0.25.0", "tokio-socks", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", + "winreg 0.52.0", ] [[package]] @@ -2026,8 +2222,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.9.4" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.10.1" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "assign", "js_int", @@ -2047,8 +2243,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.10.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "js_int", "ruma-common", @@ -2059,13 +2255,14 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.18.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "as_variant", "assign", "bytes", - "http", + "date_header", + "http 1.1.0", "js_int", "js_option", "maplit", @@ -2074,19 +2271,22 @@ dependencies = [ "serde", "serde_html_form", "serde_json", + "thiserror", + "url", + "web-time", ] [[package]] name = "ruma-common" -version = "0.12.1" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.13.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "as_variant", - "base64 0.21.7", + "base64 0.22.1", "bytes", "form_urlencoded", - "http", - "indexmap 2.2.5", + "http 1.1.0", + "indexmap 2.2.6", "js_int", "konst", "percent-encoding", @@ -2108,11 +2308,11 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.27.11" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.28.1" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "as_variant", - "indexmap 2.2.5", + "indexmap 2.2.6", "js_int", "js_option", "percent-encoding", @@ -2130,8 +2330,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.9.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "js_int", "ruma-common", @@ -2142,8 +2342,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.9.3" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.9.5" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "js_int", "thiserror", @@ -2151,8 +2351,8 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.9.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "js_int", "ruma-common", @@ -2161,8 +2361,8 @@ dependencies = [ [[package]] name = "ruma-macros" -version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.13.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "once_cell", "proc-macro-crate", @@ -2176,8 +2376,8 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.9.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "js_int", "ruma-common", @@ -2188,8 +2388,8 @@ dependencies = [ [[package]] name = "ruma-server-util" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ "headers", "ruma-common", @@ -2199,10 +2399,10 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.14.0" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.15.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "ed25519-dalek", "pkcs8", "rand", @@ -2215,10 +2415,10 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=c5f8137ba9741b2317313256b57e6e14b61fb419#c5f8137ba9741b2317313256b57e6e14b61fb419" +version = "0.11.0" +source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" dependencies = [ - "itertools 0.11.0", + "itertools", "js_int", "ruma-common", "ruma-events", @@ -2234,7 +2434,7 @@ version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2281,9 +2481,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -2302,37 +2502,59 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.7", "sct", ] [[package]] -name = "rustls-native-certs" -version = "0.6.3" +name = "rustls" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki 0.102.4", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", "rustls-pemfile", + "rustls-pki-types", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" -version = "1.0.4" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", + "rustls-pki-types", ] +[[package]] +name = "rustls-pki-types" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -2344,16 +2566,27 @@ dependencies = [ ] [[package]] -name = "rustversion" -version = "1.0.14" +name = "rustls-webpki" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "schannel" @@ -2388,11 +2621,11 @@ checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32" [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -2401,9 +2634,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -2411,24 +2644,24 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.197" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", @@ -2437,12 +2670,12 @@ dependencies = [ [[package]] name = "serde_html_form" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50437e6a58912eecc08865e35ea2e8d365fbb2db0debb1c8bb43bf1faf055f25" +checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", - "indexmap 2.2.5", + "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -2450,9 +2683,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "itoa", "ryu", @@ -2471,9 +2704,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -2492,11 +2725,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.32" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -2553,9 +2786,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -2598,15 +2831,15 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2645,9 +2878,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" -version = "2.0.52" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ "proc-macro2", "quote", @@ -2661,40 +2894,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] -name = "system-configuration" -version = "0.5.1" +name = "sync_wrapper" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", @@ -2720,19 +2938,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "thrift" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09678c4cdbb4eed72e18b7c2af1329c69825ed16fcbac62d083fc3e2b0590ff0" -dependencies = [ - "byteorder", - "integer-encoding", - "log", - "ordered-float", - "threadpool", -] - [[package]] name = "tikv-jemalloc-sys" version = "0.5.4+5.3.0-patched" @@ -2755,9 +2960,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -2776,9 +2981,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -2801,9 +3006,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", @@ -2818,10 +3023,20 @@ dependencies = [ ] [[package]] -name = "tokio-macros" -version = "2.2.0" +name = "tokio-io-timeout" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-macros" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", @@ -2834,7 +3049,18 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.12", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", "tokio", ] @@ -2852,9 +3078,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -2863,50 +3089,87 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] name = "toml" -version = "0.8.2" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.22.14", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", + "toml_datetime", + "winnow 0.5.40", +] + +[[package]] +name = "toml_edit" +version = "0.22.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" +dependencies = [ + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.11", +] + +[[package]] +name = "tonic" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +dependencies = [ + "async-stream", + "async-trait", + "axum 0.6.20", + "base64 0.21.7", + "bytes", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.29", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", ] [[package]] @@ -2917,8 +3180,13 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", + "indexmap 1.9.3", "pin-project", "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -2926,17 +3194,15 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "pin-project-lite", "tower", "tower-layer", @@ -3000,17 +3266,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "tracing-log" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -3024,16 +3279,20 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.18.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de" +checksum = "a9be14ba1bbe4ab79e9229f7f89fab8d120b865859f10527f31c033e599d2284" dependencies = [ + "js-sys", "once_cell", "opentelemetry", + "opentelemetry_sdk", + "smallvec", "tracing", "tracing-core", - "tracing-log 0.1.4", + "tracing-log", "tracing-subscriber", + "web-time", ] [[package]] @@ -3051,7 +3310,7 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-log 0.2.0", + "tracing-log", ] [[package]] @@ -3113,9 +3372,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "unsigned-varint" @@ -3142,10 +3401,16 @@ dependencies = [ ] [[package]] -name = "uuid" -version = "1.7.0" +name = "urlencoding" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "uuid" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ "getrandom", ] @@ -3277,15 +3542,15 @@ checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082" [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "wildmatch" -version = "2.3.1" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017f0a8ed8331210d91b7a4c30d4edef8f21a65c02f2540496e2e79725f6d8a8" +checksum = "3928939971918220fed093266b809d1ee4ec6c1a2d72692ff6876898f3b16c19" [[package]] name = "winapi" @@ -3324,7 +3589,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -3344,17 +3609,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -3365,9 +3631,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -3377,9 +3643,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -3389,9 +3655,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -3401,9 +3673,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -3413,9 +3685,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -3425,9 +3697,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -3437,9 +3709,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -3450,6 +3722,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c52728401e1dc672a56e81e593e912aa54c78f40246869f78359a2bf24d29d" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -3460,6 +3741,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "winreg" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "yansi" version = "1.0.1" @@ -3468,24 +3759,24 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yap" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4524214bc4629eba08d78ceb1d6507070cc0bcbbed23af74e19e6e924a24cf" +checksum = "bfe269e7b803a5e8e20cbd97860e136529cd83bf2c9c6d37b142467e7e1f051f" [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", @@ -3494,9 +3785,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] name = "zigzag" @@ -3509,9 +3800,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index e0eb8c9..66f6adb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,43 +28,24 @@ workspace = true [dependencies] # Web framework -axum = { version = "0.6.18", default-features = false, features = [ +axum = { version = "0.7", default-features = false, features = [ "form", - "headers", "http1", "http2", "json", "matched-path", ], optional = true } -axum-server = { version = "0.5.1", features = ["tls-rustls"] } +axum-extra = { version = "0.9", features = ["typed-header"] } +axum-server = { version = "0.6", features = ["tls-rustls"] } tower = { version = "0.4.13", features = ["util"] } -tower-http = { version = "0.4.1", features = [ +tower-http = { version = "0.5", features = [ "add-extension", "cors", "sensitive-headers", "trace", "util", ] } - -# Used for matrix spec type definitions and helpers -#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "c5f8137ba9741b2317313256b57e6e14b61fb419", features = [ - "appservice-api-c", - "client-api", - "compat", - "federation-api", - "push-gateway-api-c", - "rand", - "ring-compat", - "server-util", - "state-res", - "unstable-exhaustive-types", - "unstable-msc2448", - "unstable-msc3575", - "unstable-unspecified", -] } -#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } -#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +tower-service = "0.3" # Async runtime and utilities tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] } @@ -75,7 +56,7 @@ persy = { version = "1.4.4", optional = true, features = ["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.4.0" -http = "0.2.9" +http = "1" # Used to find data directory for default db path directories = "5" # Used for ruma wrapper @@ -89,8 +70,14 @@ rand = "0.8.5" # Used to hash passwords rust-argon2 = "2" # Used to send requests -hyper = "0.14.26" -reqwest = { version = "0.11.18", default-features = false, features = [ +hyper = "1.1" +hyper-util = { version = "0.1", features = [ + "client", + "client-legacy", + "http1", + "http2", +] } +reqwest = { version = "0.12", default-features = false, features = [ "rustls-tls-native-roots", "socks", ] } @@ -113,11 +100,13 @@ regex = "1.8.1" # jwt jsonwebtokens jsonwebtoken = "9.2.0" # Performance measurements -opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } -opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } -tracing = { version = "0.1.37", features = [] } +opentelemetry = "0.22" +opentelemetry-jaeger-propagator = "0.1" +opentelemetry-otlp = "0.15" +opentelemetry_sdk = { version = "0.22", features = ["rt-tokio"] } +tracing = "0.1.37" tracing-flame = "0.2.0" -tracing-opentelemetry = "0.18.0" +tracing-opentelemetry = "0.23" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } lru-cache = "0.1.2" @@ -158,6 +147,25 @@ tikv-jemallocator = { version = "0.5.0", features = [ sd-notify = { version = "0.4.1", optional = true } +# Used for matrix spec type definitions and helpers +[dependencies.ruma] +features = [ + "appservice-api-c", + "client-api", + "compat", + "federation-api", + "push-gateway-api-c", + "rand", + "ring-compat", + "server-util", + "state-res", + "unstable-exhaustive-types", + "unstable-msc2448", + "unstable-msc3575", + "unstable-unspecified", +] +git = "https://github.com/ruma/ruma" + [dependencies.rocksdb] features = ["lz4", "multi-threaded-cf", "zstd"] optional = true diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 8ee3013..36640b5 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -77,7 +77,7 @@ pub async fn get_register_available_route( pub async fn register_route(body: Ruma) -> Result { if !services().globals.allow_registration().await && body.appservice_info.is_none() { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Registration has been disabled.", )); } diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index 8e193e6..a5edb5e 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -54,7 +54,7 @@ pub async fn get_context_route( .user_can_see_event(sender_user, &room_id, &body.event_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this event.", )); } diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index a042614..25a9061 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -451,7 +451,7 @@ pub async fn get_member_events_route( .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } @@ -486,7 +486,7 @@ pub async fn joined_members_route( .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } @@ -1314,7 +1314,7 @@ pub(crate) async fn invite_helper<'a>( .is_joined(sender_user, room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 89f3359..c9b39f1 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -43,7 +43,7 @@ pub async fn send_message_event_route( && !services().globals.allow_encryption() { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Encryption has been disabled", )); } diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index e3e8a74..63e0cac 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -72,7 +72,7 @@ pub async fn create_room_route( && !services().users.is_admin(sender_user)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Room creation has been disabled.", )); } @@ -522,7 +522,7 @@ pub async fn get_room_event_route( &body.event_id, )? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this event.", )); } @@ -551,7 +551,7 @@ pub async fn get_room_aliases_route( .is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index e9fac36..d8b5842 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -43,7 +43,7 @@ pub async fn search_events_route( .is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 3e583fa..0707832 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -63,7 +63,7 @@ pub async fn login_route(body: Ruma) -> Result) -> Result) -> Result) -> Result(Some(state_key.clone())) + Ok::<_, Error>(Some(user_id)) } else { Ok(None) } @@ -1572,7 +1572,7 @@ pub async fn sync_events_v4_route( sender_user.clone(), sender_device.clone(), conn_id.clone(), - body.room_subscriptions, + body.room_subscriptions.clone(), ); } @@ -1638,33 +1638,37 @@ pub async fn sync_events_v4_route( .get_member(room_id, &member) .ok() .flatten() - .map(|memberevent| { - ( - memberevent - .displayname - .unwrap_or_else(|| member.to_string()), - memberevent.avatar_url, - ) + .map(|memberevent| SlidingSyncRoomHero { + user_id: member, + name: memberevent.displayname, + avatar: memberevent.avatar_url, }) }) .take(5) .collect::>(); let name = match &heroes[..] { [] => None, - [only] => Some(only.0.clone()), + [only] => Some( + only.name + .clone() + .unwrap_or_else(|| only.user_id.to_string()), + ), [firsts @ .., last] => Some( firsts .iter() - .map(|h| h.0.clone()) + .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) .collect::>() .join(", ") + " and " - + &last.0, + + &last + .name + .clone() + .unwrap_or_else(|| last.user_id.to_string()), ), }; let avatar = if let [only] = &heroes[..] { - only.1.clone() + only.avatar.clone() } else { None }; @@ -1725,6 +1729,16 @@ pub async fn sync_events_v4_route( ), num_live: None, // Count events in timeline greater than global sync counter timestamp: None, + heroes: if body + .room_subscriptions + .get(room_id) + .map(|sub| sub.include_heroes.unwrap_or_default()) + .unwrap_or_default() + { + Some(heroes) + } else { + None + }, }, ); } diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index e9e9370..21b7a4b 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -17,7 +17,7 @@ pub async fn create_typing_event_route( .is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You are not in this room.", )); } diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 1ad2794..9411c53 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -2,13 +2,14 @@ use std::{collections::BTreeMap, iter::FromIterator, str}; use axum::{ async_trait, - body::{Full, HttpBody}, - extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader}, - headers::{authorization::Bearer, Authorization}, + body::Body, + extract::{FromRequest, Path}, response::{IntoResponse, Response}, - BoxError, RequestExt, RequestPartsExt, + RequestExt, RequestPartsExt, }; -use bytes::{Buf, BufMut, Bytes, BytesMut}; +use axum_extra::headers::authorization::Bearer; +use axum_extra::{headers::Authorization, typed_header::TypedHeaderRejectionReason, TypedHeader}; +use bytes::{BufMut, BytesMut}; use http::{Request, StatusCode}; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, @@ -29,37 +30,33 @@ enum Token { } #[async_trait] -impl FromRequest for Ruma +impl FromRequest for Ruma where T: IncomingRequest, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into, { type Rejection = Error; - async fn from_request(req: Request, _state: &S) -> Result { + async fn from_request(req: Request, _state: &S) -> Result { #[derive(Deserialize)] struct QueryParams { access_token: Option, user_id: Option, } - let (mut parts, mut body) = match req.with_limited_body() { - Ok(limited_req) => { - let (parts, body) = limited_req.into_parts(); - let body = to_bytes(body) - .await - .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; - (parts, body) - } - Err(original_req) => { - let (parts, body) = original_req.into_parts(); - let body = to_bytes(body) - .await - .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; - (parts, body) - } + let (mut parts, mut body) = { + let limited_req = req.with_limited_body(); + let (parts, body) = limited_req.into_parts(); + let body = axum::body::to_bytes( + body, + services() + .globals + .max_request_size() + .try_into() + .unwrap_or(usize::MAX), + ) + .await + .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; + (parts, body) }; let metadata = T::METADATA; @@ -135,7 +132,7 @@ where if !services().users.exists(&user_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "User does not exist.", )); } @@ -175,7 +172,7 @@ where _ => "Unknown header-related error", }; - Error::BadRequest(ErrorKind::Forbidden, msg) + Error::BadRequest(ErrorKind::forbidden(), msg) })?; if let Some(dest) = x_matrix.destination { @@ -242,7 +239,7 @@ where Err(e) => { warn!("Failed to fetch signing keys: {}", e); return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Failed to fetch signing keys.", )); } @@ -268,7 +265,7 @@ where } return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Failed to verify X-Matrix signatures.", )); } @@ -351,60 +348,8 @@ where impl IntoResponse for RumaResponse { fn into_response(self) -> Response { match self.0.try_into_http_response::() { - Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(), + Ok(res) => res.map(BytesMut::freeze).map(Body::from).into_response(), Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), } } } - -// copied from hyper under the following license: -// Copyright (c) 2014-2021 Sean McArthur - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. -pub(crate) async fn to_bytes(body: T) -> Result -where - T: HttpBody, -{ - futures_util::pin_mut!(body); - - // If there's only 1 chunk, we can just return Buf::to_bytes() - let mut first = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(Bytes::new()); - }; - - let second = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(first.copy_to_bytes(first.remaining())); - }; - - // With more than 1 buf, we gotta flatten into a Vec first. - let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize; - let mut vec = Vec::with_capacity(cap); - vec.put(first); - vec.put(second); - - while let Some(buf) = body.data().await { - vec.put(buf?); - } - - Ok(vec.into()) -} diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 13a6d64..b3a72a9 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -978,7 +978,7 @@ pub async fn get_event_route( .server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room", )); } @@ -989,7 +989,7 @@ pub async fn get_event_route( &body.event_id, )? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not allowed to see event.", )); } @@ -1021,7 +1021,7 @@ pub async fn get_backfill_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room.", )); } @@ -1091,7 +1091,7 @@ pub async fn get_missing_events_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room", )); } @@ -1176,7 +1176,7 @@ pub async fn get_event_authorization_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room.", )); } @@ -1234,7 +1234,7 @@ pub async fn get_room_state_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room.", )); } @@ -1310,7 +1310,7 @@ pub async fn get_room_state_ids_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room.", )); } diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 5fd91b0..d6119a8 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -80,7 +80,7 @@ impl service::uiaa::Data for KeyValueDatabase { .userdevicesessionid_uiaainfo .get(&userdevicesessionid)? .ok_or(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "UIAA session does not exist.", ))?, ) diff --git a/src/main.rs b/src/main.rs index 5fd248a..8d242c5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,7 @@ use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; use axum::{ + body::Body, extract::{DefaultBodyLimit, FromRequestParts, MatchedPath}, middleware::map_response, response::{IntoResponse, Response}, @@ -69,11 +70,13 @@ async fn main() { config.warn_deprecated(); if config.allow_jaeger { - opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); - let tracer = opentelemetry_jaeger::new_agent_pipeline() - .with_auto_split_batch(true) - .with_service_name("conduit") - .install_batch(opentelemetry::runtime::Tokio) + opentelemetry::global::set_text_map_propagator( + opentelemetry_jaeger_propagator::Propagator::new(), + ); + let tracer = opentelemetry_otlp::new_pipeline() + .tracing() + .with_exporter(opentelemetry_otlp::new_exporter().tonic()) + .install_batch(opentelemetry_sdk::runtime::Tokio) .unwrap(); let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); @@ -225,9 +228,9 @@ async fn run_server() -> io::Result<()> { Ok(()) } -async fn spawn_task( - req: http::Request, - next: axum::middleware::Next, +async fn spawn_task( + req: http::Request, + next: axum::middleware::Next, ) -> std::result::Result { if services().globals.shutdown.load(atomic::Ordering::Relaxed) { return Err(StatusCode::SERVICE_UNAVAILABLE); @@ -237,9 +240,9 @@ async fn spawn_task( .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) } -async fn unrecognized_method( - req: http::Request, - next: axum::middleware::Next, +async fn unrecognized_method( + req: http::Request, + next: axum::middleware::Next, ) -> std::result::Result { let method = req.method().clone(); let uri = req.uri().clone(); diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index f597930..74bc9eb 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -10,11 +10,8 @@ use crate::api::server_server::FedDest; use crate::{services, Config, Error, Result}; use futures_util::FutureExt; use hickory_resolver::TokioAsyncResolver; -use hyper::{ - client::connect::dns::{GaiResolver, Name}, - service::Service as HyperService, -}; -use reqwest::dns::{Addrs, Resolve, Resolving}; +use hyper_util::client::legacy::connect::dns::{GaiResolver, Name as HyperName}; +use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use ruma::{ api::{ client::sync::sync_events, @@ -22,6 +19,7 @@ use ruma::{ }, DeviceId, RoomVersionId, ServerName, UserId, }; +use std::str::FromStr; use std::{ collections::{BTreeMap, HashMap}, error::Error as StdError, @@ -37,6 +35,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore}; +use tower_service::Service as TowerService; use tracing::{error, info}; use base64::{engine::general_purpose, Engine as _}; @@ -139,11 +138,19 @@ impl Resolve for Resolver { }) .unwrap_or_else(|| { let this = &mut self.inner.clone(); - Box::pin(HyperService::::call(this, name).map(|result| { - result - .map(|addrs| -> Addrs { Box::new(addrs) }) - .map_err(|err| -> Box { Box::new(err) }) - })) + Box::pin( + TowerService::::call( + this, + // Beautiful hack, please remove this in the future. + HyperName::from_str(name.as_str()) + .expect("reqwest Name is just wrapper for hyper-util Name"), + ) + .map(|result| { + result + .map(|addrs| -> Addrs { Box::new(addrs) }) + .map_err(|err| -> Box { Box::new(err) }) + }), + ) }) } } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index da1944e..1a8a3ad 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -133,7 +133,10 @@ impl Service { match services().rooms.timeline.get_pdu(&event_id) { Ok(Some(pdu)) => { if pdu.room_id != room_id { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Evil event in db", + )); } for auth_event in &pdu.auth_events { let sauthevent = services() diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index b7817e5..13d855d 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -90,7 +90,7 @@ impl Service { if services().rooms.metadata.is_disabled(room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Federation of this room is currently disabled on this server.", )); } @@ -162,7 +162,7 @@ impl Service { // Check for disabled again because it might have changed if services().rooms.metadata.is_disabled(room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Federation of this room is currently disabled on this server.", )); } @@ -1668,7 +1668,7 @@ impl Service { server_name, room_id ); Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server was denied by room ACL", )) } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 5addc6f..a78296b 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -408,7 +408,7 @@ impl Service { debug!("User is not allowed to see room {room_id}"); // This error will be caught later return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "User is not allowed to see the room", )); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2873331..6fd86d4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -762,7 +762,7 @@ impl Service { if !auth_check { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Event is not authorized.", )); } @@ -842,7 +842,7 @@ impl Service { TimelineEventType::RoomEncryption => { warn!("Encryption is not allowed in the admins room"); return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Encryption is not allowed in the admins room.", )); } @@ -865,7 +865,7 @@ impl Service { if target == server_user { warn!("Conduit user cannot leave from admins room"); return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Conduit user cannot leave from admins room.", )); } @@ -881,7 +881,7 @@ impl Service { if count < 2 { warn!("Last admin cannot leave from admins room"); return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Last admin cannot leave from admins room.", )); } @@ -891,7 +891,7 @@ impl Service { if target == server_user { warn!("Conduit user cannot be banned in admins room"); return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Conduit user cannot be banned in admins room.", )); } @@ -907,7 +907,7 @@ impl Service { if count < 2 { warn!("Last admin cannot be banned in admins room"); return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Last admin cannot be banned in admins room.", )); } @@ -939,7 +939,7 @@ impl Service { false, )? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "User cannot redact this event.", )); } @@ -960,7 +960,7 @@ impl Service { false, )? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "User cannot redact this event.", )); } diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index ed39af9..696be95 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -86,7 +86,7 @@ impl Service { if !hash_matches { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { - kind: ErrorKind::Forbidden, + kind: ErrorKind::forbidden(), message: "Invalid username or password.".to_owned(), }); return Ok((false, uiaainfo)); @@ -101,7 +101,7 @@ impl Service { uiaainfo.completed.push(AuthType::RegistrationToken); } else { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { - kind: ErrorKind::Forbidden, + kind: ErrorKind::forbidden(), message: "Invalid registration token.".to_owned(), }); return Ok((false, uiaainfo)); diff --git a/src/utils/error.rs b/src/utils/error.rs index 448f066..1d81110 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -128,7 +128,7 @@ impl Error { kind.clone(), match kind { WrongRoomKeysVersion { .. } - | Forbidden + | Forbidden { .. } | GuestAccessForbidden | ThreepidAuthFailed | ThreepidDenied => StatusCode::FORBIDDEN, From 48c1f3bdba95d6f0522923ec0cea65393ee8bfd1 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 11 Jun 2024 16:06:30 +0200 Subject: [PATCH 1691/1727] fix: userid checks for incoming EDUs --- src/api/server_server.rs | 179 ++++++++++++++++++++------------------- 1 file changed, 91 insertions(+), 88 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index b3a72a9..605a467 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -782,50 +782,53 @@ pub async fn send_transaction_message_route( Edu::Receipt(receipt) => { for (room_id, room_updates) in receipt.receipts { for (user_id, user_updates) in room_updates.read { - if let Some((event_id, _)) = user_updates - .event_ids - .iter() - .filter_map(|id| { + if user_id.server_name() == sender_servername { + if let Some((event_id, _)) = user_updates + .event_ids + .iter() + .filter_map(|id| { + services() + .rooms + .timeline + .get_pdu_count(id) + .ok() + .flatten() + .map(|r| (id, r)) + }) + .max_by_key(|(_, count)| *count) + { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert(user_id.clone(), user_updates.data); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event_id.to_owned(), receipts); + + let event = ReceiptEvent { + content: ReceiptEventContent(receipt_content), + room_id: room_id.clone(), + }; services() .rooms - .timeline - .get_pdu_count(id) - .ok() - .flatten() - .map(|r| (id, r)) - }) - .max_by_key(|(_, count)| *count) - { - let mut user_receipts = BTreeMap::new(); - user_receipts.insert(user_id.clone(), user_updates.data); - - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(event_id.to_owned(), receipts); - - let event = ReceiptEvent { - content: ReceiptEventContent(receipt_content), - room_id: room_id.clone(), - }; - services() - .rooms - .edus - .read_receipt - .readreceipt_update(&user_id, &room_id, event)?; - } else { - // TODO fetch missing events - debug!("No known event ids in read receipt: {:?}", user_updates); + .edus + .read_receipt + .readreceipt_update(&user_id, &room_id, event)?; + } else { + // TODO fetch missing events + debug!("No known event ids in read receipt: {:?}", user_updates); + } } } } } Edu::Typing(typing) => { - if services() - .rooms - .state_cache - .is_joined(&typing.user_id, &typing.room_id)? + if typing.user_id.server_name() == sender_servername + && services() + .rooms + .state_cache + .is_joined(&typing.user_id, &typing.room_id)? { if typing.typing { services() @@ -849,7 +852,9 @@ pub async fn send_transaction_message_route( } } Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { - services().users.mark_device_key_update(&user_id)?; + if user_id.server_name() == sender_servername { + services().users.mark_device_key_update(&user_id)?; + } } Edu::DirectToDevice(DirectDeviceContent { sender, @@ -857,77 +862,75 @@ pub async fn send_transaction_message_route( message_id, messages, }) => { - // Check if this is a new transaction id - if services() - .transaction_ids - .existing_txnid(&sender, None, &message_id)? - .is_some() + if sender.server_name() == sender_servername + // Check if this is a new transaction id + && services() + .transaction_ids + .existing_txnid(&sender, None, &message_id)? + .is_none() { - continue; - } - - for (target_user_id, map) in &messages { - for (target_device_id_maybe, event) in map { - match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => { - services().users.add_to_device_event( - &sender, - target_user_id, - target_device_id, - &ev_type.to_string(), - event.deserialize_as().map_err(|e| { - warn!("To-Device event is invalid: {event:?} {e}"); - Error::BadRequest( - ErrorKind::InvalidParam, - "Event is invalid", - ) - })?, - )? - } - - DeviceIdOrAllDevices::AllDevices => { - for target_device_id in - services().users.all_device_ids(target_user_id) - { + for (target_user_id, map) in &messages { + for (target_device_id_maybe, event) in map { + match target_device_id_maybe { + DeviceIdOrAllDevices::DeviceId(target_device_id) => { services().users.add_to_device_event( &sender, target_user_id, - &target_device_id?, + target_device_id, &ev_type.to_string(), - event.deserialize_as().map_err(|_| { + event.deserialize_as().map_err(|e| { + warn!("To-Device event is invalid: {event:?} {e}"); Error::BadRequest( ErrorKind::InvalidParam, "Event is invalid", ) })?, - )?; + )? + } + + DeviceIdOrAllDevices::AllDevices => { + for target_device_id in + services().users.all_device_ids(target_user_id) + { + services().users.add_to_device_event( + &sender, + target_user_id, + &target_device_id?, + &ev_type.to_string(), + event.deserialize_as().map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + })?, + )?; + } } } } } - } - // Save transaction id with empty data - services() - .transaction_ids - .add_txnid(&sender, None, &message_id, &[])?; + // Save transaction id with empty data + services() + .transaction_ids + .add_txnid(&sender, None, &message_id, &[])?; + } } Edu::SigningKeyUpdate(SigningKeyUpdateContent { user_id, master_key, self_signing_key, }) => { - if user_id.server_name() != sender_servername { - continue; - } - if let Some(master_key) = master_key { - services().users.add_cross_signing_keys( - &user_id, - &master_key, - &self_signing_key, - &None, - true, - )?; + if user_id.server_name() == sender_servername { + if let Some(master_key) = master_key { + services().users.add_cross_signing_keys( + &user_id, + &master_key, + &self_signing_key, + &None, + true, + )?; + } } } Edu::_Custom(_) => {} From 7b259272ce48f6946e853232a9af551413a97b8d Mon Sep 17 00:00:00 2001 From: Benjamin Lee Date: Tue, 11 Jun 2024 16:33:55 +0200 Subject: [PATCH 1692/1727] fix: do not return redacted events from search --- src/api/client_server/search.rs | 11 +++--- src/database/key_value/rooms/search.rs | 52 +++++++++++++++++--------- src/service/pdu.rs | 17 +++++++++ src/service/rooms/search/data.rs | 2 + src/service/rooms/search/mod.rs | 10 +++++ src/service/rooms/timeline/mod.rs | 25 +++++++++++-- 6 files changed, 92 insertions(+), 25 deletions(-) diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index d8b5842..bf31fb4 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -89,11 +89,12 @@ pub async fn search_events_route( .get_pdu_from_id(result) .ok()? .filter(|pdu| { - services() - .rooms - .state_accessor - .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) - .unwrap_or(false) + !pdu.is_redacted() + && services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) + .unwrap_or(false) }) .map(|pdu| pdu.to_room_event()) }) diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index ad573f0..8a2769b 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -2,24 +2,46 @@ use ruma::RoomId; use crate::{database::KeyValueDatabase, service, services, utils, Result}; +/// Splits a string into tokens used as keys in the search inverted index +/// +/// This may be used to tokenize both message bodies (for indexing) or search +/// queries (for querying). +fn tokenize(body: &str) -> impl Iterator + '_ { + body.split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) + .filter(|word| word.len() <= 50) + .map(str::to_lowercase) +} + impl service::rooms::search::Data for KeyValueDatabase { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { - let mut batch = message_body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here - (key, Vec::new()) - }); + let mut batch = tokenize(message_body).map(|word| { + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xff); + key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here + (key, Vec::new()) + }); self.tokenids.insert_batch(&mut batch) } + fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { + let batch = tokenize(message_body).map(|word| { + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xFF); + key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here + key + }); + + for token in batch { + self.tokenids.remove(&token)?; + } + + Ok(()) + } + fn search_pdus<'a>( &'a self, room_id: &RoomId, @@ -33,11 +55,7 @@ impl service::rooms::search::Data for KeyValueDatabase { .to_be_bytes() .to_vec(); - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); + let words: Vec<_> = tokenize(search_string).collect(); let iterators = words.clone().into_iter().map(move |word| { let mut prefix2 = prefix.clone(); diff --git a/src/service/pdu.rs b/src/service/pdu.rs index a51d7ec..6991a08 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -72,6 +72,23 @@ impl PduEvent { Ok(()) } + pub fn is_redacted(&self) -> bool { + #[derive(Deserialize)] + struct ExtractRedactedBecause { + redacted_because: Option, + } + + let Some(unsigned) = &self.unsigned else { + return false; + }; + + let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else { + return false; + }; + + unsigned.redacted_because.is_some() + } + pub fn remove_transaction_id(&mut self) -> crate::Result<()> { if let Some(unsigned) = &self.unsigned { let mut unsigned: BTreeMap> = diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 7ea7e3d..7dbfd56 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -4,6 +4,8 @@ use ruma::RoomId; pub trait Data: Send + Sync { fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; + fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; + #[allow(clippy::type_complexity)] fn search_pdus<'a>( &'a self, diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index b6f35e7..3b9de19 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -15,6 +15,16 @@ impl Service { self.db.index_pdu(shortroomid, pdu_id, message_body) } + #[tracing::instrument(skip(self))] + pub fn deindex_pdu<'a>( + &self, + shortroomid: u64, + pdu_id: &[u8], + message_body: &str, + ) -> Result<()> { + self.db.deindex_pdu(shortroomid, pdu_id, message_body) + } + #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( &'a self, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 6fd86d4..9405f00 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -399,7 +399,7 @@ impl Service { &pdu.room_id, false, )? { - self.redact_pdu(redact_id, pdu)?; + self.redact_pdu(redact_id, pdu, shortroomid)?; } } } @@ -416,7 +416,7 @@ impl Service { &pdu.room_id, false, )? { - self.redact_pdu(redact_id, pdu)?; + self.redact_pdu(redact_id, pdu, shortroomid)?; } } } @@ -1100,14 +1100,33 @@ impl Service { /// Replace a PDU with the redacted form. #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { + pub fn redact_pdu( + &self, + event_id: &EventId, + reason: &PduEvent, + shortroomid: u64, + ) -> Result<()> { // TODO: Don't reserialize, keep original json if let Some(pdu_id) = self.get_pdu_id(event_id)? { let mut pdu = self .get_pdu_from_id(&pdu_id)? .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; + + #[derive(Deserialize)] + struct ExtractBody { + body: String, + } + + if let Ok(content) = serde_json::from_str::(pdu.content.get()) { + services() + .rooms + .search + .deindex_pdu(shortroomid, &pdu_id, &content.body)?; + } + let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?; pdu.redact(room_version_id, reason)?; + self.replace_pdu( &pdu_id, &utils::to_canonical_object(&pdu).expect("PDU is an object"), From 144d548ef739324ca97db12e8cada60ca3e43e09 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 11 Jun 2024 23:15:02 +0200 Subject: [PATCH 1693/1727] fix: permission checks for aliases --- src/api/client_server/alias.rs | 12 +++-- src/api/client_server/room.rs | 7 ++- src/database/key_value/rooms/alias.rs | 28 ++++++++-- src/database/mod.rs | 4 ++ src/service/admin/mod.rs | 42 +++++++++------ src/service/globals/mod.rs | 8 +++ src/service/rooms/alias/data.rs | 7 ++- src/service/rooms/alias/mod.rs | 78 ++++++++++++++++++++++++--- src/service/rooms/timeline/mod.rs | 9 +++- src/service/users/mod.rs | 24 ++++----- 10 files changed, 168 insertions(+), 51 deletions(-) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 7cbe9fa..06fcc18 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -18,6 +18,8 @@ use ruma::{ pub async fn create_alias_route( body: Ruma, ) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -55,7 +57,7 @@ pub async fn create_alias_route( services() .rooms .alias - .set_alias(&body.room_alias, &body.room_id)?; + .set_alias(&body.room_alias, &body.room_id, sender_user)?; Ok(create_alias::v3::Response::new()) } @@ -64,11 +66,12 @@ pub async fn create_alias_route( /// /// Deletes a room alias from this server. /// -/// - TODO: additional access control checks /// - TODO: Update canonical alias event pub async fn delete_alias_route( body: Ruma, ) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -94,7 +97,10 @@ pub async fn delete_alias_route( )); } - services().rooms.alias.remove_alias(&body.room_alias)?; + services() + .rooms + .alias + .remove_alias(&body.room_alias, sender_user)?; // TODO: update alt_aliases? diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 63e0cac..890ff9c 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -485,7 +485,10 @@ pub async fn create_room_route( // Homeserver specific stuff if let Some(alias) = alias { - services().rooms.alias.set_alias(&alias, &room_id)?; + services() + .rooms + .alias + .set_alias(&alias, &room_id, sender_user)?; } if body.visibility == room::Visibility::Public { @@ -815,7 +818,7 @@ pub async fn upgrade_room_route( services() .rooms .alias - .set_alias(&alias, &replacement_room)?; + .set_alias(&alias, &replacement_room, sender_user)?; } // Get the old room power levels diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 6f23032..2f7df78 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,9 +1,15 @@ -use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; +use ruma::{ + api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, + UserId, +}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::alias::Data for KeyValueDatabase { - fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { + // Comes first as we don't want a stuck alias + self.alias_userid + .insert(alias.alias().as_bytes(), user_id.as_bytes())?; self.alias_roomid .insert(alias.alias().as_bytes(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); @@ -22,13 +28,13 @@ impl service::rooms::alias::Data for KeyValueDatabase { self.aliasid_alias.remove(&key)?; } self.alias_roomid.remove(alias.alias().as_bytes())?; + self.alias_userid.remove(alias.alias().as_bytes()) } else { - return Err(Error::BadRequest( + Err(Error::BadRequest( ErrorKind::NotFound, "Alias does not exist.", - )); + )) } - Ok(()) } fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { @@ -57,4 +63,16 @@ impl service::rooms::alias::Data for KeyValueDatabase { .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) })) } + + fn who_created_alias(&self, alias: &RoomAliasId) -> Result> { + self.alias_userid + .get(alias.alias().as_bytes())? + .map(|bytes| { + UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("User ID in alias_userid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in alias_roomid is invalid.")) + }) + .transpose() + } } diff --git a/src/database/mod.rs b/src/database/mod.rs index 1b178bd..5171d4b 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -101,6 +101,8 @@ pub struct KeyValueDatabase { pub(super) userroomid_leftstate: Arc, pub(super) roomuserid_leftcount: Arc, + pub(super) alias_userid: Arc, // User who created the alias + pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId @@ -327,6 +329,8 @@ impl KeyValueDatabase { userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + alias_userid: builder.open_tree("alias_userid")?, + disabledroomids: builder.open_tree("disabledroomids")?, lazyloadedids: builder.open_tree("lazyloadedids")?, diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 90b00da..b3b7a74 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1,9 +1,4 @@ -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, - time::Instant, -}; +use std::{collections::BTreeMap, convert::TryFrom, sync::Arc, time::Instant}; use clap::Parser; use regex::Regex; @@ -925,7 +920,15 @@ impl Service { { RoomMessageEventContent::text_plain("No such alias exists") } else { - services().rooms.alias.remove_alias(&alias)?; + // We execute this as the server user for two reasons + // 1. If the user can execute commands in the admin room, they can always remove the alias. + // 2. In the future, we are likely going to be able to allow users to execute commands via + // other methods, such as IPC, which would lead to us not knowing their user id + + services() + .rooms + .alias + .remove_alias(&alias, services().globals.server_user())?; RoomMessageEventContent::text_plain("Alias removed sucessfully") } } @@ -1232,9 +1235,7 @@ impl Service { .await?; // 6. Room alias - let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); + let alias: OwnedRoomAliasId = services().globals.admin_alias().to_owned(); services() .rooms @@ -1257,7 +1258,10 @@ impl Service { ) .await?; - services().rooms.alias.set_alias(&alias, &room_id)?; + services() + .rooms + .alias + .set_alias(&alias, &room_id, conduit_user)?; Ok(()) } @@ -1266,15 +1270,10 @@ impl Service { /// /// Errors are propagated from the database, and will have None if there is no admin room pub(crate) fn get_admin_room(&self) -> Result> { - let admin_room_alias: Box = - format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); - services() .rooms .alias - .resolve_local_alias(&admin_room_alias) + .resolve_local_alias(services().globals.admin_alias()) } /// Invite the user to the conduit admin room. @@ -1400,6 +1399,15 @@ impl Service { } Ok(()) } + + /// Checks whether a given user is an admin of this server + pub fn user_is_admin(&self, user_id: &UserId) -> Result { + let Some(admin_room) = self.get_admin_room()? else { + return Ok(false); + }; + + services().rooms.state_cache.is_joined(user_id, &admin_room) + } } #[cfg(test)] diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 74bc9eb..caf2b3a 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -4,6 +4,7 @@ use ruma::{ serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, }; +use ruma::{OwnedRoomAliasId, RoomAliasId}; use crate::api::server_server::FedDest; @@ -72,6 +73,7 @@ pub struct Service { pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer pub roomid_federationhandletime: RwLock>, server_user: OwnedUserId, + admin_alias: OwnedRoomAliasId, pub stateres_mutex: Arc>, pub rotate: RotationHandler, @@ -194,6 +196,8 @@ impl Service { let mut s = Self { allow_registration: RwLock::new(config.allow_registration), + admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name)) + .expect("#admins:server_name is a valid alias name"), server_user: UserId::parse(format!("@conduit:{}", &config.server_name)) .expect("@conduit:server_name is valid"), db, @@ -293,6 +297,10 @@ impl Service { self.server_user.as_ref() } + pub fn admin_alias(&self) -> &RoomAliasId { + self.admin_alias.as_ref() + } + pub fn max_request_size(&self) -> u32 { self.config.max_request_size } diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 629b1ee..dd51407 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,9 +1,12 @@ use crate::Result; -use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; +use ruma::{OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId}; pub trait Data: Send + Sync { /// Creates or updates the alias to the given room id. - fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>; + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()>; + + /// Finds the user who assigned the given alias to a room + fn who_created_alias(&self, alias: &RoomAliasId) -> Result>; /// Forgets about an alias. Returns an error if the alias did not exist. fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index d26030c..95d52ad 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,9 +1,17 @@ mod data; pub use data::Data; +use tracing::error; -use crate::Result; -use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; +use crate::{services, Error, Result}; +use ruma::{ + api::client::error::ErrorKind, + events::{ + room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + StateEventType, + }, + OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId, +}; pub struct Service { pub db: &'static dyn Data, @@ -11,13 +19,71 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] - pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { - self.db.set_alias(alias, room_id) + pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { + if alias == services().globals.admin_alias() && user_id != services().globals.server_user() + { + Err(Error::BadRequest( + ErrorKind::forbidden(), + "Only the server user can set this alias", + )) + } else { + self.db.set_alias(alias, room_id, user_id) + } } #[tracing::instrument(skip(self))] - pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { - self.db.remove_alias(alias) + fn user_can_remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result { + let Some(room_id) = self.resolve_local_alias(alias)? else { + return Err(Error::BadRequest(ErrorKind::NotFound, "Alias not found.")); + }; + + // The creator of an alias can remove it + if self + .db + .who_created_alias(alias)? + .map(|user| user == user_id) + .unwrap_or_default() + // Server admins can remove any local alias + || services().admin.user_is_admin(user_id)? + // Always allow the Conduit user to remove the alias, since there may not be an admin room + || services().globals.server_user ()== user_id + { + Ok(true) + // Checking whether the user is able to change canonical aliases of the room + } else if let Some(event) = services().rooms.state_accessor.room_state_get( + &room_id, + &StateEventType::RoomPowerLevels, + "", + )? { + serde_json::from_str(event.content.get()) + .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) + .map(|content: RoomPowerLevelsEventContent| { + RoomPowerLevels::from(content) + .user_can_send_state(user_id, StateEventType::RoomCanonicalAlias) + }) + // If there is no power levels event, only the room creator can change canonical aliases + } else if let Some(event) = services().rooms.state_accessor.room_state_get( + &room_id, + &StateEventType::RoomCreate, + "", + )? { + Ok(event.sender == user_id) + } else { + error!("Room {} has no m.room.create event (VERY BAD)!", room_id); + Err(Error::bad_database("Room has no m.room.create event")) + } + } + + #[tracing::instrument(skip(self))] + pub fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<()> { + if self.user_can_remove_alias(alias, user_id)? { + self.db.remove_alias(alias) + } else { + Err(Error::BadRequest( + ErrorKind::forbidden(), + "User is not permitted to remove this alias.", + )) + } } #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 9405f00..5908a2e 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -496,7 +496,14 @@ impl Service { && services().globals.emergency_password().is_none(); if let Some(admin_room) = services().admin.get_admin_room()? { - if to_conduit && !from_conduit && admin_room == pdu.room_id { + if to_conduit + && !from_conduit + && admin_room == pdu.room_id + && services() + .rooms + .state_cache + .is_joined(server_user, &admin_room)? + { services().admin.process_message(body); } } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index c379958..a5694a1 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -9,7 +9,6 @@ pub use data::Data; use ruma::{ api::client::{ device::Device, - error::ErrorKind, filter::FilterDefinition, sync::sync_events::{ self, @@ -20,7 +19,7 @@ use ruma::{ events::AnyToDeviceEvent, serde::Raw, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, - OwnedRoomId, OwnedUserId, RoomAliasId, UInt, UserId, + OwnedRoomId, OwnedUserId, UInt, UserId, }; use crate::{services, Error, Result}; @@ -262,19 +261,14 @@ impl Service { /// Check if a user is an admin pub fn is_admin(&self, user_id: &UserId) -> Result { - let admin_room_alias_id = - RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = services() - .rooms - .alias - .resolve_local_alias(&admin_room_alias_id)? - .unwrap(); - - services() - .rooms - .state_cache - .is_joined(user_id, &admin_room_id) + if let Some(admin_room_id) = services().admin.get_admin_room()? { + services() + .rooms + .state_cache + .is_joined(user_id, &admin_room_id) + } else { + Ok(false) + } } /// Create a new user account on this homeserver. From c453d45598f1043b432149b714e2565f12f32360 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 12 Jun 2024 19:22:19 +0200 Subject: [PATCH 1694/1727] fix(keys): only use keys valid at the time of PDU or transaction, and actually refresh keys Previously, we only fetched keys once, only requesting them again if we have any missing, allowing for ancient keys to be used to sign PDUs and transactions Now we refresh keys that either have or are about to expire, preventing attacks that make use of leaked private keys of a homeserver We also ensure that when validating PDUs or transactions, that they are valid at the origin_server_ts or time of us receiving the transaction respectfully As to not break event authorization for old rooms, we need to keep old keys around We move verify_keys which we no longer see in direct requests to the origin to old_verify_keys We keep old_verify_keys indefinitely as mentioned above, as to not break event authorization (at least until a future MSC addresses this) --- src/api/client_server/membership.rs | 43 ++- src/api/ruma_wrapper/axum.rs | 19 +- src/database/key_value/globals.rs | 131 +++++---- src/service/admin/mod.rs | 44 ++- src/service/globals/data.rs | 92 +++++- src/service/globals/mod.rs | 107 +++++-- src/service/rooms/event_handler/mod.rs | 376 ++++++++++++++++--------- src/service/rooms/timeline/mod.rs | 8 +- 8 files changed, 584 insertions(+), 236 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 25a9061..1ca711e 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -18,9 +18,8 @@ use ruma::{ }, StateEventType, TimelineEventType, }, - serde::Base64, - state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId, + state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, + OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -32,7 +31,10 @@ use tokio::sync::RwLock; use tracing::{debug, error, info, warn}; use crate::{ - service::pdu::{gen_event_id_canonical_json, PduBuilder}, + service::{ + globals::SigningKeys, + pdu::{gen_event_id_canonical_json, PduBuilder}, + }, services, utils, Error, PduEvent, Result, Ruma, }; @@ -1130,7 +1132,7 @@ async fn make_join_request( async fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>, ) -> Result<(OwnedEventId, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); @@ -1177,8 +1179,35 @@ async fn validate_and_add_event_id( } } - if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version) - { + let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| { + error!("Invalid PDU, no origin_server_ts field"); + Error::BadRequest( + ErrorKind::MissingParam, + "Invalid PDU, no origin_server_ts field", + ) + })?; + + let origin_server_ts: MilliSecondsSinceUnixEpoch = { + let ts = origin_server_ts.as_integer().ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "origin_server_ts must be an integer", + ) + })?; + + MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch") + })?) + }; + + let unfiltered_keys = (*pub_key_map.read().await).clone(); + + let keys = + services() + .globals + .filter_keys_server_map(unfiltered_keys, origin_server_ts, room_version); + + if let Err(e) = ruma::signatures::verify_event(&keys, &value, room_version) { warn!("Event {} failed verification {:?} {}", event_id, pdu, e); back_off(event_id).await; return Err(Error::BadServerResponse("Event failed verification.")); diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 9411c53..047f7dc 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -14,7 +14,7 @@ use http::{Request, StatusCode}; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, server_util::authorization::XMatrix, - CanonicalJsonValue, OwnedDeviceId, OwnedUserId, UserId, + CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId, }; use serde::Deserialize; use tracing::{debug, error, warn}; @@ -231,7 +231,7 @@ where let keys_result = services() .rooms .event_handler - .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()]) + .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false) .await; let keys = match keys_result { @@ -245,8 +245,19 @@ where } }; - let pub_key_map = - BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]); + // Only verify_keys that are currently valid should be used for validating requests + // as per MSC4029 + let pub_key_map = BTreeMap::from_iter([( + x_matrix.origin.as_str().to_owned(), + if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() { + keys.verify_keys + .into_iter() + .map(|(id, key)| (id, key.key)) + .collect() + } else { + BTreeMap::new() + }, + )]); match ruma::signatures::verify_json(&pub_key_map, &request_map) { Ok(()) => (None, None, Some(x_matrix.origin), None), diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 2851ce5..bd47cb4 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,15 +1,19 @@ -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use async_trait::async_trait; use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; use ruma::{ - api::federation::discovery::{ServerSigningKeys, VerifyKey}, + api::federation::discovery::{OldVerifyKey, ServerSigningKeys}, signatures::Ed25519KeyPair, - DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId, + DeviceId, ServerName, UserId, }; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, globals::SigningKeys}, + services, utils, Error, Result, +}; pub const COUNTER: &[u8] = b"c"; pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u"; @@ -237,64 +241,97 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n" self.global.remove(b"keypair") } - fn add_signing_key( + fn add_signing_key_from_trusted_server( &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result> { - // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; + ) -> Result { + let prev_keys = self.server_signingkeys.get(origin.as_bytes())?; - let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(&keys).ok()) - .unwrap_or_else(|| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); + Ok( + if let Some(mut prev_keys) = + prev_keys.and_then(|keys| serde_json::from_slice::(&keys).ok()) + { + let ServerSigningKeys { + verify_keys, + old_verify_keys, + .. + } = new_keys; - let ServerSigningKeys { - verify_keys, - old_verify_keys, - .. - } = new_keys; + prev_keys.verify_keys.extend(verify_keys); + prev_keys.old_verify_keys.extend(old_verify_keys); + prev_keys.valid_until_ts = new_keys.valid_until_ts; - keys.verify_keys.extend(verify_keys); - keys.old_verify_keys.extend(old_verify_keys); + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"), + )?; - self.server_signingkeys.insert( - origin.as_bytes(), - &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), - )?; + prev_keys.into() + } else { + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"), + )?; - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); + new_keys.into() + }, + ) + } - Ok(tree) + fn add_signing_key_from_origin( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result { + let prev_keys = self.server_signingkeys.get(origin.as_bytes())?; + + Ok( + if let Some(mut prev_keys) = + prev_keys.and_then(|keys| serde_json::from_slice::(&keys).ok()) + { + let ServerSigningKeys { + verify_keys, + old_verify_keys, + .. + } = new_keys; + + // Moving `verify_keys` no longer present to `old_verify_keys` + for (key_id, key) in prev_keys.verify_keys { + if !verify_keys.contains_key(&key_id) { + prev_keys + .old_verify_keys + .insert(key_id, OldVerifyKey::new(prev_keys.valid_until_ts, key.key)); + } + } + + prev_keys.verify_keys = verify_keys; + prev_keys.old_verify_keys.extend(old_verify_keys); + prev_keys.valid_until_ts = new_keys.valid_until_ts; + + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"), + )?; + + prev_keys.into() + } else { + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"), + )?; + + new_keys.into() + }, + ) } /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. - fn signing_keys_for( - &self, - origin: &ServerName, - ) -> Result> { + fn signing_keys_for(&self, origin: &ServerName) -> Result> { let signingkeys = self .server_signingkeys .get(origin.as_bytes())? - .and_then(|bytes| serde_json::from_slice(&bytes).ok()) - .map(|keys: ServerSigningKeys| { - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - tree - }) - .unwrap_or_else(BTreeMap::new); + .and_then(|bytes| serde_json::from_slice::(&bytes).ok()); Ok(signingkeys) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b3b7a74..70c6338 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -19,7 +19,8 @@ use ruma::{ }, TimelineEventType, }, - EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, + EventId, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, + RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{mpsc, Mutex, RwLock}; @@ -858,15 +859,46 @@ impl Service { services() .rooms .event_handler + // Generally we shouldn't be checking against expired keys unless required, so in the admin + // room it might be best to not allow expired keys .fetch_required_signing_keys(&value, &pub_key_map) .await?; - let pub_key_map = pub_key_map.read().await; - match ruma::signatures::verify_json(&pub_key_map, &value) { - Ok(_) => RoomMessageEventContent::text_plain("Signature correct"), - Err(e) => RoomMessageEventContent::text_plain(format!( + let mut expired_key_map = BTreeMap::new(); + let mut valid_key_map = BTreeMap::new(); + + for (server, keys) in pub_key_map.into_inner().into_iter() { + if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() { + valid_key_map.insert( + server, + keys.verify_keys + .into_iter() + .map(|(id, key)| (id, key.key)) + .collect(), + ); + } else { + expired_key_map.insert( + server, + keys.verify_keys + .into_iter() + .map(|(id, key)| (id, key.key)) + .collect(), + ); + } + } + + if ruma::signatures::verify_json(&valid_key_map, &value).is_ok() { + RoomMessageEventContent::text_plain("Signature correct") + } else if let Err(e) = + ruma::signatures::verify_json(&expired_key_map, &value) + { + RoomMessageEventContent::text_plain(format!( "Signature verification failed: {e}" - )), + )) + } else { + RoomMessageEventContent::text_plain( + "Signature correct (with expired keys)", + ) } } Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 8a66751..167e823 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,13 +1,71 @@ -use std::collections::BTreeMap; - -use async_trait::async_trait; -use ruma::{ - api::federation::discovery::{ServerSigningKeys, VerifyKey}, - signatures::Ed25519KeyPair, - DeviceId, OwnedServerSigningKeyId, ServerName, UserId, +use std::{ + collections::BTreeMap, + time::{Duration, SystemTime}, }; -use crate::Result; +use crate::{services, Result}; +use async_trait::async_trait; +use ruma::{ + api::federation::discovery::{OldVerifyKey, ServerSigningKeys, VerifyKey}, + serde::Base64, + signatures::Ed25519KeyPair, + DeviceId, MilliSecondsSinceUnixEpoch, ServerName, UserId, +}; +use serde::Deserialize; + +/// Similar to ServerSigningKeys, but drops a few unnecessary fields we don't require post-validation +#[derive(Deserialize, Debug, Clone)] +pub struct SigningKeys { + pub verify_keys: BTreeMap, + pub old_verify_keys: BTreeMap, + pub valid_until_ts: MilliSecondsSinceUnixEpoch, +} + +impl SigningKeys { + /// Creates the SigningKeys struct, using the keys of the current server + pub fn load_own_keys() -> Self { + let mut keys = Self { + verify_keys: BTreeMap::new(), + old_verify_keys: BTreeMap::new(), + valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(7 * 86400), + ) + .expect("Should be valid until year 500,000,000"), + }; + + keys.verify_keys.insert( + format!("ed25519:{}", services().globals.keypair().version()), + VerifyKey { + key: Base64::new(services().globals.keypair.public_key().to_vec()), + }, + ); + + keys + } +} + +impl From for SigningKeys { + fn from(value: ServerSigningKeys) -> Self { + let ServerSigningKeys { + verify_keys, + old_verify_keys, + valid_until_ts, + .. + } = value; + + Self { + verify_keys: verify_keys + .into_iter() + .map(|(id, key)| (id.to_string(), key)) + .collect(), + old_verify_keys: old_verify_keys + .into_iter() + .map(|(id, key)| (id.to_string(), key)) + .collect(), + valid_until_ts, + } + } +} #[async_trait] pub trait Data: Send + Sync { @@ -21,17 +79,23 @@ pub trait Data: Send + Sync { fn clear_caches(&self, amount: u32); fn load_keypair(&self) -> Result; fn remove_keypair(&self) -> Result<()>; - fn add_signing_key( + /// Only extends the cached keys, not moving any verify_keys to old_verify_keys, as if we suddenly + /// recieve requests from the origin server, we want to be able to accept requests from them + fn add_signing_key_from_trusted_server( &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result>; - - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. - fn signing_keys_for( + ) -> Result; + /// Extends cached keys, as well as moving verify_keys that are not present in these new keys to + /// old_verify_keys, so that potnetially comprimised keys cannot be used to make requests + fn add_signing_key_from_origin( &self, origin: &ServerName, - ) -> Result>; + new_keys: ServerSigningKeys, + ) -> Result; + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + fn signing_keys_for(&self, origin: &ServerName) -> Result>; fn database_version(&self) -> Result; fn bump_database_version(&self, new_version: u64) -> Result<()>; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index caf2b3a..fc695f8 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,9 +1,8 @@ mod data; pub use data::Data; -use ruma::{ - serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedServerSigningKeyId, OwnedUserId, -}; +pub use data::SigningKeys; +use ruma::MilliSecondsSinceUnixEpoch; +use ruma::{serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId}; use ruma::{OwnedRoomAliasId, RoomAliasId}; use crate::api::server_server::FedDest; @@ -14,10 +13,7 @@ use hickory_resolver::TokioAsyncResolver; use hyper_util::client::legacy::connect::dns::{GaiResolver, Name as HyperName}; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use ruma::{ - api::{ - client::sync::sync_events, - federation::discovery::{ServerSigningKeys, VerifyKey}, - }, + api::{client::sync::sync_events, federation::discovery::ServerSigningKeys}, DeviceId, RoomVersionId, ServerName, UserId, }; use std::str::FromStr; @@ -393,36 +389,89 @@ impl Service { room_versions } - /// TODO: the key valid until timestamp is only honored in room version > 4 - /// Remove the outdated keys and insert the new ones. - /// /// This doesn't actually check that the keys provided are newer than the old set. - pub fn add_signing_key( + pub fn add_signing_key_from_trusted_server( &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result> { - self.db.add_signing_key(origin, new_keys) + ) -> Result { + self.db + .add_signing_key_from_trusted_server(origin, new_keys) } - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. - pub fn signing_keys_for( + /// Same as from_trusted_server, except it will move active keys not present in `new_keys` to old_signing_keys + pub fn add_signing_key_from_origin( &self, origin: &ServerName, - ) -> Result> { - let mut keys = self.db.signing_keys_for(origin)?; - if origin == self.server_name() { - keys.insert( - format!("ed25519:{}", services().globals.keypair().version()) - .try_into() - .expect("found invalid server signing keys in DB"), - VerifyKey { - key: Base64::new(self.keypair.public_key().to_vec()), - }, - ); - } + new_keys: ServerSigningKeys, + ) -> Result { + self.db.add_signing_key_from_origin(origin, new_keys) + } - Ok(keys) + /// This returns Ok(None) when there are no keys found for the server. + pub fn signing_keys_for(&self, origin: &ServerName) -> Result> { + Ok(self.db.signing_keys_for(origin)?.or_else(|| { + if origin == self.server_name() { + Some(SigningKeys::load_own_keys()) + } else { + None + } + })) + } + + /// Filters the key map of multiple servers down to keys that should be accepted given the expiry time, + /// room version, and timestamp of the paramters + pub fn filter_keys_server_map( + &self, + keys: BTreeMap, + timestamp: MilliSecondsSinceUnixEpoch, + room_version_id: &RoomVersionId, + ) -> BTreeMap> { + keys.into_iter() + .filter_map(|(server, keys)| { + self.filter_keys_single_server(keys, timestamp, room_version_id) + .map(|keys| (server, keys)) + }) + .collect() + } + + /// Filters the keys of a single server down to keys that should be accepted given the expiry time, + /// room version, and timestamp of the paramters + pub fn filter_keys_single_server( + &self, + keys: SigningKeys, + timestamp: MilliSecondsSinceUnixEpoch, + room_version_id: &RoomVersionId, + ) -> Option> { + if keys.valid_until_ts > timestamp + // valid_until_ts MUST be ignored in room versions 1, 2, 3, and 4. + // https://spec.matrix.org/v1.10/server-server-api/#get_matrixkeyv2server + || matches!(room_version_id, RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V4 + | RoomVersionId::V3) + { + // Given that either the room version allows stale keys, or the valid_until_ts is + // in the future, all verify_keys are valid + let mut map: BTreeMap<_, _> = keys + .verify_keys + .into_iter() + .map(|(id, key)| (id, key.key)) + .collect(); + + map.extend(keys.old_verify_keys.into_iter().filter_map(|(id, key)| { + // Even on old room versions, we don't allow old keys if they are expired + if key.expired_ts > timestamp { + Some((id, key.key)) + } else { + None + } + })); + + Some(map) + } else { + None + } } pub fn database_version(&self) -> Result { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 13d855d..0bdfd4a 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -9,6 +9,7 @@ use std::{ }; use futures_util::{stream::FuturesUnordered, Future, StreamExt}; +use globals::SigningKeys; use ruma::{ api::{ client::error::ErrorKind, @@ -30,7 +31,6 @@ use ruma::{ StateEventType, TimelineEventType, }, int, - serde::Base64, state_res::{self, RoomVersion, StateMap}, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName, @@ -78,7 +78,7 @@ impl Service { room_id: &'a RoomId, value: BTreeMap, is_timeline_event: bool, - pub_key_map: &'a RwLock>>, + pub_key_map: &'a RwLock>, ) -> Result>> { // 0. Check the server is in the room if !services().rooms.metadata.exists(room_id)? { @@ -304,19 +304,12 @@ impl Service { room_id: &'a RoomId, mut value: BTreeMap, auth_events_known: bool, - pub_key_map: &'a RwLock>>, + pub_key_map: &'a RwLock>, ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { Box::pin(async move { // 1.1. Remove unsigned field value.remove("unsigned"); - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - self.fetch_required_signing_keys(&value, pub_key_map) - .await?; - // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match let create_event_content: RoomCreateEventContent = @@ -329,41 +322,80 @@ impl Service { let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - let guard = pub_key_map.read().await; - let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) { - Err(e) => { - // Drop - warn!("Dropping bad event {}: {}", event_id, e,); - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Signature verification failed", - )); - } - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - warn!("Calculated hash does not match: {}", event_id); - let obj = match ruma::canonical_json::redact(value, room_version_id, None) { - Ok(obj) => obj, - Err(_) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Redaction failed", - )) - } - }; + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - // Skip the PDU if it is redacted and we already have it as an outlier event - if services().rooms.timeline.get_pdu_json(event_id)?.is_some() { + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + self.fetch_required_signing_keys(&value, pub_key_map) + .await?; + + let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| { + error!("Invalid PDU, no origin_server_ts field"); + Error::BadRequest( + ErrorKind::MissingParam, + "Invalid PDU, no origin_server_ts field", + ) + })?; + + let origin_server_ts: MilliSecondsSinceUnixEpoch = { + let ts = origin_server_ts.as_integer().ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "origin_server_ts must be an integer", + ) + })?; + + MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch") + })?) + }; + + let guard = pub_key_map.read().await; + + let pkey_map = (*guard).clone(); + + // Removing all the expired keys, unless the room version allows stale keys + let filtered_keys = services().globals.filter_keys_server_map( + pkey_map, + origin_server_ts, + room_version_id, + ); + + let mut val = + match ruma::signatures::verify_event(&filtered_keys, &value, room_version_id) { + Err(e) => { + // Drop + warn!("Dropping bad event {}: {}", event_id, e,); return Err(Error::BadRequest( ErrorKind::InvalidParam, - "Event was redacted and we already knew about it", + "Signature verification failed", )); } + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + warn!("Calculated hash does not match: {}", event_id); + let obj = match ruma::canonical_json::redact(value, room_version_id, None) { + Ok(obj) => obj, + Err(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Redaction failed", + )) + } + }; - obj - } - Ok(ruma::signatures::Verified::All) => value, - }; + // Skip the PDU if it is redacted and we already have it as an outlier event + if services().rooms.timeline.get_pdu_json(event_id)?.is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event was redacted and we already knew about it", + )); + } + + obj + } + Ok(ruma::signatures::Verified::All) => value, + }; drop(guard); @@ -487,7 +519,7 @@ impl Service { create_event: &PduEvent, origin: &ServerName, room_id: &RoomId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>, ) -> Result>> { // Skip the PDU if we already have it as a timeline event if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) { @@ -1097,7 +1129,7 @@ impl Service { create_event: &'a PduEvent, room_id: &'a RoomId, room_version_id: &'a RoomVersionId, - pub_key_map: &'a RwLock>>, + pub_key_map: &'a RwLock>, ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { Box::pin(async move { @@ -1280,7 +1312,7 @@ impl Service { create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>, initial_set: Vec>, ) -> Result<( Vec>, @@ -1378,7 +1410,7 @@ impl Service { pub(crate) async fn fetch_required_signing_keys( &self, event: &BTreeMap, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>, ) -> Result<()> { let signatures = event .get("signatures") @@ -1407,6 +1439,7 @@ impl Service { ) })?, signature_ids, + true, ) .await; @@ -1434,7 +1467,7 @@ impl Service { pdu: &RawJsonValue, servers: &mut BTreeMap>, room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>, ) -> Result<()> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); @@ -1485,8 +1518,18 @@ impl Service { let signature_ids = signature_object.keys().cloned().collect::>(); - let contains_all_ids = |keys: &BTreeMap| { - signature_ids.iter().all(|id| keys.contains_key(id)) + let contains_all_ids = |keys: &SigningKeys| { + signature_ids.iter().all(|id| { + keys.verify_keys + .keys() + .map(ToString::to_string) + .any(|key_id| id == &key_id) + || keys + .old_verify_keys + .keys() + .map(ToString::to_string) + .any(|key_id| id == &key_id) + }) }; let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { @@ -1499,19 +1542,14 @@ impl Service { trace!("Loading signing keys for {}", origin); - let result: BTreeMap<_, _> = services() - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); + if let Some(result) = services().globals.signing_keys_for(origin)? { + if !contains_all_ids(&result) { + trace!("Signing key not loaded for {}", origin); + servers.insert(origin.to_owned(), BTreeMap::new()); + } - if !contains_all_ids(&result) { - trace!("Signing key not loaded for {}", origin); - servers.insert(origin.to_owned(), BTreeMap::new()); + pub_key_map.insert(origin.to_string(), result); } - - pub_key_map.insert(origin.to_string(), result); } Ok(()) @@ -1521,7 +1559,7 @@ impl Service { &self, event: &create_join_event::v2::Response, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>, ) -> Result<()> { let mut servers: BTreeMap< OwnedServerName, @@ -1584,10 +1622,7 @@ impl Service { let result = services() .globals - .add_signing_key(&k.server_name, k.clone())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); + .add_signing_key_from_trusted_server(&k.server_name, k.clone())?; pkm.insert(k.server_name.to_string(), result); } @@ -1618,12 +1653,9 @@ impl Service { if let (Ok(get_keys_response), origin) = result { info!("Result is from {origin}"); if let Ok(key) = get_keys_response.server_key.deserialize() { - let result: BTreeMap<_, _> = services() + let result = services() .globals - .add_signing_key(&origin, key)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); + .add_signing_key_from_origin(&origin, key)?; pub_key_map.write().await.insert(origin.to_string(), result); } } @@ -1681,9 +1713,23 @@ impl Service { &self, origin: &ServerName, signature_ids: Vec, - ) -> Result> { - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + // Whether to ask for keys from trusted servers. Should be false when getting + // keys for validating requests, as per MSC4029 + query_via_trusted_servers: bool, + ) -> Result { + let contains_all_ids = |keys: &SigningKeys| { + signature_ids.iter().all(|id| { + keys.verify_keys + .keys() + .map(ToString::to_string) + .any(|key_id| id == &key_id) + || keys + .old_verify_keys + .keys() + .map(ToString::to_string) + .any(|key_id| id == &key_id) + }) + }; let permit = services() .globals @@ -1744,94 +1790,172 @@ impl Service { trace!("Loading signing keys for {}", origin); - let mut result: BTreeMap<_, _> = services() - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); + let result = services().globals.signing_keys_for(origin)?; - if contains_all_ids(&result) { - return Ok(result); + let mut expires_soon_or_has_expired = false; + + if let Some(result) = result.clone() { + let ts_threshold = MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(30 * 60), + ) + .expect("Should be valid until year 500,000,000"); + + debug!( + "The treshhold is {:?}, found time is {:?} for server {}", + ts_threshold, result.valid_until_ts, origin + ); + + if contains_all_ids(&result) { + // We want to ensure that the keys remain valid by the time the other functions that handle signatures reach them + if result.valid_until_ts > ts_threshold { + debug!( + "Keys for {} are deemed as valid, as they expire at {:?}", + &origin, &result.valid_until_ts + ); + return Ok(result); + } + + expires_soon_or_has_expired = true; + } } + let mut keys = result.unwrap_or_else(|| SigningKeys { + verify_keys: BTreeMap::new(), + old_verify_keys: BTreeMap::new(), + valid_until_ts: MilliSecondsSinceUnixEpoch::now(), + }); + + // We want to set this to the max, and then lower it whenever we see older keys + keys.valid_until_ts = MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(7 * 86400), + ) + .expect("Should be valid until year 500,000,000"); + debug!("Fetching signing keys for {} over federation", origin); - if let Some(server_key) = services() + if let Some(mut server_key) = services() .sending .send_federation_request(origin, get_server_keys::v2::Request::new()) .await .ok() .and_then(|resp| resp.server_key.deserialize().ok()) { + // Keys should only be valid for a maximum of seven days + server_key.valid_until_ts = server_key.valid_until_ts.min( + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(7 * 86400), + ) + .expect("Should be valid until year 500,000,000"), + ); + services() .globals - .add_signing_key(origin, server_key.clone())?; + .add_signing_key_from_origin(origin, server_key.clone())?; - result.extend( + if keys.valid_until_ts > server_key.valid_until_ts { + keys.valid_until_ts = server_key.valid_until_ts; + } + + keys.verify_keys.extend( server_key .verify_keys .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), + .map(|(id, key)| (id.to_string(), key)), ); - result.extend( + keys.old_verify_keys.extend( server_key .old_verify_keys .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), + .map(|(id, key)| (id.to_string(), key)), ); - if contains_all_ids(&result) { - return Ok(result); + if contains_all_ids(&keys) { + return Ok(keys); } } - for server in services().globals.trusted_servers() { - debug!("Asking {} for {}'s signing key", server, origin); - if let Some(server_keys) = services() - .sending - .send_federation_request( - server, - get_remote_server_keys::v2::Request::new( - origin.to_owned(), - MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), - ) - .expect("time is valid"), - ), - ) - .await - .ok() - .map(|resp| { - resp.server_keys - .into_iter() - .filter_map(|e| e.deserialize().ok()) - .collect::>() - }) - { - trace!("Got signing keys: {:?}", server_keys); - for k in server_keys { - services().globals.add_signing_key(origin, k.clone())?; - result.extend( - k.verify_keys + if query_via_trusted_servers { + for server in services().globals.trusted_servers() { + debug!("Asking {} for {}'s signing key", server, origin); + if let Some(server_keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys::v2::Request::new( + origin.to_owned(), + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ) + .expect("time is valid"), + ), + ) + .await + .ok() + .map(|resp| { + resp.server_keys .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) + { + trace!("Got signing keys: {:?}", server_keys); + for mut k in server_keys { + if k.valid_until_ts + // Half an hour should give plenty of time for the server to respond with keys that are still + // valid, given we requested keys which are valid at least an hour from now + < MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(30 * 60), + ) + .expect("Should be valid until year 500,000,000") + { + // Keys should only be valid for a maximum of seven days + k.valid_until_ts = k.valid_until_ts.min( + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(7 * 86400), + ) + .expect("Should be valid until year 500,000,000"), + ); - if contains_all_ids(&result) { - return Ok(result); + if keys.valid_until_ts > k.valid_until_ts { + keys.valid_until_ts = k.valid_until_ts; + } + + services() + .globals + .add_signing_key_from_trusted_server(origin, k.clone())?; + keys.verify_keys.extend( + k.verify_keys + .into_iter() + .map(|(id, key)| (id.to_string(), key)), + ); + keys.old_verify_keys.extend( + k.old_verify_keys + .into_iter() + .map(|(id, key)| (id.to_string(), key)), + ); + } else { + warn!( + "Server {} gave us keys older than we requested, valid until: {:?}", + origin, k.valid_until_ts + ); + } + + if contains_all_ids(&keys) { + return Ok(keys); + } + } } } } + // We should return these keys if fresher keys were not found + if expires_soon_or_has_expired { + info!("Returning stale keys for {}", origin); + return Ok(keys); + } + drop(permit); back_off(signature_ids).await; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 5908a2e..29d8339 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -21,7 +21,6 @@ use ruma::{ GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - serde::Base64, state_res::{self, Event, RoomVersion}, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, @@ -33,7 +32,10 @@ use tracing::{error, info, warn}; use crate::{ api::server_server, - service::pdu::{EventHash, PduBuilder}, + service::{ + globals::SigningKeys, + pdu::{EventHash, PduBuilder}, + }, services, utils, Error, PduEvent, Result, }; @@ -1214,7 +1216,7 @@ impl Service { &self, origin: &ServerName, pdu: Box, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>, ) -> Result<()> { let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?; From 7a5b8930134cf7ea5ff9880e6fa468b2b3e05c98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Jun 2024 19:43:18 +0200 Subject: [PATCH 1695/1727] Bump version --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20013bd..9e47fc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -487,7 +487,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.8.0-alpha" +version = "0.8.0" dependencies = [ "async-trait", "axum 0.7.5", diff --git a/Cargo.toml b/Cargo.toml index 66f6adb..9452b3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ license = "Apache-2.0" name = "conduit" readme = "README.md" repository = "https://gitlab.com/famedly/conduit" -version = "0.8.0-alpha" +version = "0.8.0" # See also `rust-toolchain.toml` rust-version = "1.78.0" From 16af8b58aec03fe9daef9a7f941ac6a2705ccc94 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 9 Jun 2024 11:19:23 +0100 Subject: [PATCH 1696/1727] ci: build for Debian aarch64 --- .gitlab-ci.yml | 6 ++++++ docs/deploying/generic.md | 2 ++ 2 files changed, 8 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 90de602..3346795 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -103,6 +103,11 @@ artifacts: - ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl - cp result/bin/conduit aarch64-unknown-linux-musl + - mkdir -p target/aarch64-unknown-linux-musl/release + - cp result/bin/conduit target/aarch64-unknown-linux-musl/release + - direnv exec . cargo deb --no-strip --no-build --target aarch64-unknown-linux-musl + - mv target/aarch64-unknown-linux-musl/debian/*.deb aarch64-unknown-linux-musl.deb + - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl - cp result oci-image-arm64v8.tar.gz @@ -114,6 +119,7 @@ artifacts: - x86_64-unknown-linux-musl - aarch64-unknown-linux-musl - x86_64-unknown-linux-musl.deb + - aarch64-unknown-linux-musl.deb - oci-image-amd64.tar.gz - oci-image-arm64v8.tar.gz - public diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 5710df8..8ff0c6b 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -17,6 +17,7 @@ You may simply download the binary that fits your machine. Run `uname -m` to see | Target | Type | Download | |-|-|-| | `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) | +| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl.deb?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) | | `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) | | `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) | @@ -30,6 +31,7 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to | Target | Type | Download | |-|-|-| | `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) | +| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl.deb?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) | | `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) | | `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) | From ba1138aaa338e64636d76c0cc8bca84fd0c17f28 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 14 Jun 2024 12:33:40 +0100 Subject: [PATCH 1697/1727] chore: bump version to 0.9.0-alpha --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e47fc4..ea84fc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -487,7 +487,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.8.0" +version = "0.9.0-alpha" dependencies = [ "async-trait", "axum 0.7.5", diff --git a/Cargo.toml b/Cargo.toml index 9452b3d..c74773a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ license = "Apache-2.0" name = "conduit" readme = "README.md" repository = "https://gitlab.com/famedly/conduit" -version = "0.8.0" +version = "0.9.0-alpha" # See also `rust-toolchain.toml` rust-version = "1.78.0" From fd19dda5cb30ee46d83225a4fdb23aa7c34ce24e Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 16 Jun 2024 17:28:05 +0100 Subject: [PATCH 1698/1727] ci: use nightly rustfmt we were using this before, but it broke when refactoring the flake out into separate files --- nix/shell.nix | 4 ++-- src/api/ruma_wrapper/axum.rs | 7 +++++-- src/service/globals/mod.rs | 12 ++++++------ 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/nix/shell.nix b/nix/shell.nix index bd070fe..584a6bb 100644 --- a/nix/shell.nix +++ b/nix/shell.nix @@ -23,7 +23,7 @@ mkShell { }; # Development tools - nativeBuildInputs = default.nativeBuildInputs ++ [ + nativeBuildInputs = [ # Always use nightly rustfmt because most of its options are unstable # # This needs to come before `toolchain` in this list, otherwise @@ -57,5 +57,5 @@ mkShell { # Useful for editing the book locally mdbook - ]; + ] ++ default.nativeBuildInputs ; } diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 047f7dc..aaabb1c 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -7,8 +7,11 @@ use axum::{ response::{IntoResponse, Response}, RequestExt, RequestPartsExt, }; -use axum_extra::headers::authorization::Bearer; -use axum_extra::{headers::Authorization, typed_header::TypedHeaderRejectionReason, TypedHeader}; +use axum_extra::{ + headers::{authorization::Bearer, Authorization}, + typed_header::TypedHeaderRejectionReason, + TypedHeader, +}; use bytes::{BufMut, BytesMut}; use http::{Request, StatusCode}; use ruma::{ diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index fc695f8..c22ffef 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,9 +1,9 @@ mod data; -pub use data::Data; -pub use data::SigningKeys; -use ruma::MilliSecondsSinceUnixEpoch; -use ruma::{serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId}; -use ruma::{OwnedRoomAliasId, RoomAliasId}; +pub use data::{Data, SigningKeys}; +use ruma::{ + serde::Base64, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedEventId, OwnedRoomAliasId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, +}; use crate::api::server_server::FedDest; @@ -16,7 +16,6 @@ use ruma::{ api::{client::sync::sync_events, federation::discovery::ServerSigningKeys}, DeviceId, RoomVersionId, ServerName, UserId, }; -use std::str::FromStr; use std::{ collections::{BTreeMap, HashMap}, error::Error as StdError, @@ -25,6 +24,7 @@ use std::{ iter, net::{IpAddr, SocketAddr}, path::PathBuf, + str::FromStr, sync::{ atomic::{self, AtomicBool}, Arc, RwLock as StdRwLock, From 9014e43ce1eece1b712ec7236655578a85d38cf6 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Fri, 21 Jun 2024 08:29:33 +0100 Subject: [PATCH 1699/1727] chore: bump rust to 1.79.0 and apply new lints --- Cargo.toml | 2 +- complement/Dockerfile | 2 +- flake.nix | 2 +- rust-toolchain.toml | 3 +-- src/service/media/mod.rs | 14 ++++++-------- 5 files changed, 10 insertions(+), 13 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c74773a..67128f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ repository = "https://gitlab.com/famedly/conduit" version = "0.9.0-alpha" # See also `rust-toolchain.toml` -rust-version = "1.78.0" +rust-version = "1.79.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/complement/Dockerfile b/complement/Dockerfile index 341470a..e7cde40 100644 --- a/complement/Dockerfile +++ b/complement/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.78.0 +FROM rust:1.79.0 WORKDIR /workdir diff --git a/flake.nix b/flake.nix index 9132544..f36f7e7 100644 --- a/flake.nix +++ b/flake.nix @@ -59,7 +59,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-opUgs6ckUQCyDxcB9Wy51pqhd0MPGHUVbwRKKPGiwZU="; + sha256 = "sha256-Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU="; }; }); in diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 3ffd3a5..957c8f4 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -2,7 +2,6 @@ # # Other files that need upkeep when this changes: # -# * `.gitlab-ci.yml` # * `Cargo.toml` # * `flake.nix` # @@ -10,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.78.0" +channel = "1.79.0" components = [ # For rust-analyzer "rust-src", diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 0340ab4..1a80400 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -166,22 +166,20 @@ impl Service { / u64::from(original_height) }; if use_width { - if intermediate <= u64::from(::std::u32::MAX) { + if intermediate <= u64::from(u32::MAX) { (width, intermediate as u32) } else { ( - (u64::from(width) * u64::from(::std::u32::MAX) / intermediate) - as u32, - ::std::u32::MAX, + (u64::from(width) * u64::from(u32::MAX) / intermediate) as u32, + u32::MAX, ) } - } else if intermediate <= u64::from(::std::u32::MAX) { + } else if intermediate <= u64::from(u32::MAX) { (intermediate as u32, height) } else { ( - ::std::u32::MAX, - (u64::from(height) * u64::from(::std::u32::MAX) / intermediate) - as u32, + u32::MAX, + (u64::from(height) * u64::from(u32::MAX) / intermediate) as u32, ) } }; From 602c56cae9bbe33e820e0a5641eff9fde8ced968 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 25 Jun 2024 09:39:06 +0100 Subject: [PATCH 1700/1727] chore: bump ruma --- Cargo.lock | 45 ++++++++++++++++++-------------- src/api/client_server/account.rs | 12 +++++++-- src/api/client_server/media.rs | 6 ++++- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 9 ++++--- 5 files changed, 47 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea84fc0..30d951a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1158,6 +1158,15 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-auth" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643c9bbf6a4ea8a656d6b4cd53d34f79e3f841ad5203c1a55fb7d761923bc255" +dependencies = [ + "memchr", +] + [[package]] name = "http-body" version = "0.4.6" @@ -2223,7 +2232,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "assign", "js_int", @@ -2244,7 +2253,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "js_int", "ruma-common", @@ -2256,7 +2265,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "as_variant", "assign", @@ -2279,7 +2288,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "as_variant", "base64 0.22.1", @@ -2309,7 +2318,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "as_variant", "indexmap 2.2.6", @@ -2331,7 +2340,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "js_int", "ruma-common", @@ -2343,7 +2352,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "js_int", "thiserror", @@ -2352,7 +2361,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "js_int", "ruma-common", @@ -2362,7 +2371,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "once_cell", "proc-macro-crate", @@ -2377,7 +2386,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "js_int", "ruma-common", @@ -2389,18 +2398,20 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "headers", + "http 1.1.0", + "http-auth", "ruma-common", + "thiserror", "tracing", - "yap", ] [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -2416,7 +2427,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e" +source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" dependencies = [ "itertools", "js_int", @@ -3757,12 +3768,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" -[[package]] -name = "yap" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe269e7b803a5e8e20cbd97860e136529cd83bf2c9c6d37b142467e7e1f051f" - [[package]] name = "zerocopy" version = "0.7.34" diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 36640b5..47ccdc8 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -315,7 +315,11 @@ pub async fn register_route(body: Ruma) -> Result, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body + .sender_user + .as_ref() + // In the future password changes could be performed with UIA with 3PIDs, but we don't support that currently + .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { @@ -402,7 +406,11 @@ pub async fn whoami_route(body: Ruma) -> Result, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body + .sender_user + .as_ref() + // In the future password changes could be performed with UIA with SSO, but we don't support that currently + .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 5cd2b2f..10890f9 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -1,3 +1,6 @@ +// Unauthenticated media is deprecated +#![allow(deprecated)] + use std::time::Duration; use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; @@ -190,7 +193,7 @@ pub async fn get_content_thumbnail_route( content_type, cross_origin_resource_policy: Some("cross-origin".to_owned()), }) - } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + } else if body.server_name != services().globals.server_name() && body.allow_remote { let get_thumbnail_response = services() .sending .send_federation_request( @@ -204,6 +207,7 @@ pub async fn get_content_thumbnail_route( media_id: body.media_id.clone(), timeout_ms: Duration::from_secs(20), allow_redirect: false, + animated: body.animated, }, ) .await?; diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index aaabb1c..2c5da21 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -189,7 +189,7 @@ where let origin_signatures = BTreeMap::from_iter([( x_matrix.key.clone(), - CanonicalJsonValue::String(x_matrix.sig), + CanonicalJsonValue::String(x_matrix.sig.to_string()), )]); let signatures = BTreeMap::from_iter([( diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 605a467..9d63289 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -6,8 +6,9 @@ use crate::{ services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; +use axum_extra::headers::authorization::Credentials; use get_profile_information::v1::ProfileField; -use http::header::{HeaderValue, AUTHORIZATION}; +use http::header::AUTHORIZATION; use ruma::{ api::{ @@ -44,6 +45,7 @@ use ruma::{ StateEventType, TimelineEventType, }, serde::{Base64, JsonObject, Raw}, + server_util::authorization::XMatrix, to_device::DeviceIdOrAllDevices, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, @@ -226,14 +228,15 @@ where for s in signature_server { http_request.headers_mut().insert( AUTHORIZATION, - HeaderValue::from_str(&format!( + XMatrix::parse(&format!( "X-Matrix origin=\"{}\",destination=\"{}\",key=\"{}\",sig=\"{}\"", services().globals.server_name(), destination, s.0, s.1 )) - .unwrap(), + .expect("When Ruma signs JSON, it produces a valid base64 signature. All other types are valid ServerNames or OwnedKeyId") + .encode(), ); } } From 62f1da053f59d1b7ca98c22b548dcdaee27de31d Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 22 Jun 2024 11:50:39 +0100 Subject: [PATCH 1701/1727] feat(appservice): support timestamped messaging --- src/api/client_server/membership.rs | 7 +++++++ src/api/client_server/message.rs | 5 +++++ src/api/client_server/profile.rs | 2 ++ src/api/client_server/redact.rs | 1 + src/api/client_server/room.rs | 14 ++++++++++++++ src/api/client_server/state.rs | 14 +++++++++++++- src/api/server_server.rs | 1 + src/service/admin/mod.rs | 14 ++++++++++++++ src/service/pdu.rs | 4 ++++ src/service/rooms/state_accessor/mod.rs | 1 + src/service/rooms/timeline/mod.rs | 11 ++++++----- 11 files changed, 68 insertions(+), 6 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 1ca711e..5c0169d 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -241,6 +241,7 @@ pub async fn kick_user_route( unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, + timestamp: None, }, sender_user, &body.room_id, @@ -313,6 +314,7 @@ pub async fn ban_user_route(body: Ruma) -> Result( unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, sender_user, room_id, @@ -1379,6 +1384,7 @@ pub(crate) async fn invite_helper<'a>( unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, sender_user, room_id, @@ -1506,6 +1512,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option, state_key: String, + timestamp: Option, ) -> Result> { let sender_user = sender; @@ -243,6 +254,7 @@ async fn send_state_event_for_key_helper( unsigned: None, state_key: Some(state_key), redacts: None, + timestamp, }, sender_user, room_id, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 605a467..8d205f5 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1445,6 +1445,7 @@ pub async fn create_join_event_template_route( unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, + timestamp: None, }, &body.user_id, &body.room_id, diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 70c6338..583bfcd 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -246,6 +246,7 @@ impl Service { unsigned: None, state_key: None, redacts: None, + timestamp: None, }, conduit_user, &conduit_room, @@ -1105,6 +1106,7 @@ impl Service { unsigned: None, state_key: Some("".to_owned()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1133,6 +1135,7 @@ impl Service { unsigned: None, state_key: Some(conduit_user.to_string()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1158,6 +1161,7 @@ impl Service { unsigned: None, state_key: Some("".to_owned()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1177,6 +1181,7 @@ impl Service { unsigned: None, state_key: Some("".to_owned()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1198,6 +1203,7 @@ impl Service { unsigned: None, state_key: Some("".to_owned()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1219,6 +1225,7 @@ impl Service { unsigned: None, state_key: Some("".to_owned()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1239,6 +1246,7 @@ impl Service { unsigned: None, state_key: Some("".to_owned()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1259,6 +1267,7 @@ impl Service { unsigned: None, state_key: Some("".to_owned()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1283,6 +1292,7 @@ impl Service { unsigned: None, state_key: Some("".to_owned()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1352,6 +1362,7 @@ impl Service { unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1378,6 +1389,7 @@ impl Service { unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, user_id, &room_id, @@ -1404,6 +1416,7 @@ impl Service { unsigned: None, state_key: Some("".to_owned()), redacts: None, + timestamp: None, }, conduit_user, &room_id, @@ -1423,6 +1436,7 @@ impl Service { unsigned: None, state_key: None, redacts: None, + timestamp: None, }, conduit_user, &room_id, diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 6991a08..dab7b6e 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -460,4 +460,8 @@ pub struct PduBuilder { pub unsigned: Option>, pub state_key: Option, pub redacts: Option>, + /// For timestamped messaging, should only be used for appservices + /// + /// Will be set to current time if None + pub timestamp: Option, } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 53e3176..aa654ae 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -321,6 +321,7 @@ impl Service { unsigned: None, state_key: Some(target_user.into()), redacts: None, + timestamp: None, }; Ok(services() diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 29d8339..6603ea6 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -22,8 +22,8 @@ use ruma::{ }, push::{Action, Ruleset, Tweak}, state_res::{self, Event, RoomVersion}, - uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, + uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, + OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -665,6 +665,7 @@ impl Service { unsigned, state_key, redacts, + timestamp, } = pdu_builder; let prev_events: Vec<_> = services() @@ -734,9 +735,9 @@ impl Service { event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), + origin_server_ts: timestamp + .map(|ts| ts.get()) + .unwrap_or_else(|| MilliSecondsSinceUnixEpoch::now().get()), kind: event_type, content, state_key, From 2f45a907f94eb288cef4ab22401af3ba0dff5f52 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 26 Jun 2024 22:05:44 +0100 Subject: [PATCH 1702/1727] fix: don't ignore ACLs when there is no content despite this being very bad behavior, it is required by the spec --- src/service/rooms/event_handler/mod.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 0bdfd4a..002b8d7 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1687,11 +1687,6 @@ impl Service { } }; - if acl_event_content.allow.is_empty() { - // Ignore broken acl events - return Ok(()); - } - if acl_event_content.is_allowed(server_name) { Ok(()) } else { From ba8429cafe0ddb63adcb54729374e5424d6969e9 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Mon, 24 Jun 2024 22:12:54 +0100 Subject: [PATCH 1703/1727] fix: don't cache server name lookups indefinitely --- src/api/server_server.rs | 439 ++++++++++++++++++++++++------------- src/service/globals/mod.rs | 4 +- 2 files changed, 285 insertions(+), 158 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9d63289..29dd573 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -6,7 +6,7 @@ use crate::{ services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; -use axum_extra::headers::authorization::Credentials; +use axum_extra::headers::{authorization::Credentials, CacheControl, Header}; use get_profile_information::v1::ProfileField; use http::header::AUTHORIZATION; @@ -96,13 +96,6 @@ impl FedDest { } } - fn into_uri_string(self) -> String { - match self { - Self::Literal(addr) => addr.to_string(), - Self::Named(host, ref port) => host + port, - } - } - fn hostname(&self) -> String { match &self { Self::Literal(addr) => addr.ip().to_string(), @@ -138,8 +131,6 @@ where debug!("Preparing to send request to {destination}"); - let mut write_destination_to_cache = false; - let cached_result = services() .globals .actual_destination_cache @@ -148,14 +139,63 @@ where .get(destination) .cloned(); - let (actual_destination, host) = if let Some(result) = cached_result { - result + let actual_destination = if let Some(DestinationResponse { + actual_destination, + dest_type, + }) = cached_result + { + match dest_type { + DestType::IsIpOrHasPort => actual_destination, + DestType::LookupFailed { + well_known_retry, + well_known_backoff_mins, + } => { + if well_known_retry < Instant::now() { + find_actual_destination(destination, None, false, Some(well_known_backoff_mins)) + .await + } else { + actual_destination + } + } + + DestType::WellKnown { expires } => { + if expires < Instant::now() { + find_actual_destination(destination, None, false, None).await + } else { + actual_destination + } + } + DestType::WellKnownSrv { + srv_expires, + well_known_expires, + well_known_host, + } => { + if well_known_expires < Instant::now() { + find_actual_destination(destination, None, false, None).await + } else if srv_expires < Instant::now() { + find_actual_destination(destination, Some(well_known_host), true, None).await + } else { + actual_destination + } + } + DestType::Srv { + well_known_retry, + well_known_backoff_mins, + srv_expires, + } => { + if well_known_retry < Instant::now() { + find_actual_destination(destination, None, false, Some(well_known_backoff_mins)) + .await + } else if srv_expires < Instant::now() { + find_actual_destination(destination, None, true, Some(well_known_backoff_mins)) + .await + } else { + actual_destination + } + } + } } else { - write_destination_to_cache = true; - - let result = find_actual_destination(destination).await; - - (result.0, result.1.into_uri_string()) + find_actual_destination(destination, None, false, None).await }; let actual_destination_str = actual_destination.clone().into_https_string(); @@ -293,17 +333,6 @@ where if status == 200 { debug!("Parsing response bytes from {destination}"); let response = T::IncomingResponse::try_from_http_response(http_response); - if response.is_ok() && write_destination_to_cache { - services() - .globals - .actual_destination_cache - .write() - .await - .insert( - OwnedServerName::from(destination), - (actual_destination, host), - ); - } response.map_err(|e| { warn!( @@ -348,142 +377,211 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { FedDest::Named(host.to_owned(), port.to_owned()) } -/// Returns: actual_destination, host header -/// Implemented according to the specification at +#[derive(Clone)] +pub struct DestinationResponse { + pub actual_destination: FedDest, + pub dest_type: DestType, +} + +#[derive(Clone)] +pub enum DestType { + WellKnownSrv { + srv_expires: Instant, + well_known_expires: Instant, + well_known_host: String, + }, + WellKnown { + expires: Instant, + }, + Srv { + srv_expires: Instant, + well_known_retry: Instant, + well_known_backoff_mins: u16, + }, + IsIpOrHasPort, + LookupFailed { + well_known_retry: Instant, + well_known_backoff_mins: u16, + }, +} + +/// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of specification -async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { +async fn find_actual_destination( + destination: &'_ ServerName, + // The host used to potentially lookup SRV records against, only used when only_request_srv is true + well_known_dest: Option, + // Should be used when only the SRV lookup has expired + only_request_srv: bool, + // The backoff time for the last well known failure, if any + well_known_backoff_mins: Option, +) -> FedDest { debug!("Finding actual destination for {destination}"); - let destination_str = destination.as_str().to_owned(); - let mut hostname = destination_str.clone(); - let actual_destination = match get_ip_with_port(&destination_str) { - Some(host_port) => { - debug!("1: IP literal with provided or default port"); - host_port - } - None => { - if let Some(pos) = destination_str.find(':') { - debug!("2: Hostname with included port"); - let (host, port) = destination_str.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) + let destination_str = destination.to_string(); + let next_backoff_mins = well_known_backoff_mins + // Errors are recommended to be cached for up to an hour + .map(|mins| (mins * 2).min(60)) + .unwrap_or(1); + + let (actual_destination, dest_type) = if only_request_srv { + let destination_str = well_known_dest.unwrap_or(destination_str); + let (dest, expires) = get_srv_destination(destination_str).await; + let well_known_retry = + Instant::now() + Duration::from_secs((60 * next_backoff_mins).into()); + ( + dest, + if let Some(expires) = expires { + DestType::Srv { + well_known_backoff_mins: next_backoff_mins, + srv_expires: expires, + + well_known_retry, + } } else { - debug!("Requesting well known for {destination}"); - match request_well_known(destination.as_str()).await { - Some(delegated_hostname) => { - debug!("3: A .well-known file is available"); - hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); - match get_ip_with_port(&delegated_hostname) { - Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file - None => { - if let Some(pos) = delegated_hostname.find(':') { - debug!("3.2: Hostname with port in .well-known file"); - let (host, port) = delegated_hostname.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - debug!("Delegated hostname has no port in this branch"); - if let Some(hostname_override) = - query_srv_record(&delegated_hostname).await - { - debug!("3.3: SRV lookup successful"); - let force_port = hostname_override.port(); - - if let Ok(override_ip) = services() - .globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - services() - .globals - .tls_name_override - .write() - .unwrap() - .insert( - delegated_hostname.clone(), - ( - override_ip.iter().collect(), - force_port.unwrap_or(8448), - ), - ); - } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named(delegated_hostname, format!(":{port}")) - } else { - add_port_to_hostname(&delegated_hostname) - } + DestType::LookupFailed { + well_known_retry, + well_known_backoff_mins: next_backoff_mins, + } + }, + ) + } else { + match get_ip_with_port(&destination_str) { + Some(host_port) => { + debug!("1: IP literal with provided or default port"); + (host_port, DestType::IsIpOrHasPort) + } + None => { + if let Some(pos) = destination_str.find(':') { + debug!("2: Hostname with included port"); + let (host, port) = destination_str.split_at(pos); + ( + FedDest::Named(host.to_owned(), port.to_owned()), + DestType::IsIpOrHasPort, + ) + } else { + debug!("Requesting well known for {destination_str}"); + match request_well_known(destination_str.as_str()).await { + Some((delegated_hostname, timestamp)) => { + debug!("3: A .well-known file is available"); + match get_ip_with_port(&delegated_hostname) { + // 3.1: IP literal in .well-known file + Some(host_and_port) => { + (host_and_port, DestType::WellKnown { expires: timestamp }) + } + None => { + if let Some(pos) = delegated_hostname.find(':') { + debug!("3.2: Hostname with port in .well-known file"); + let (host, port) = delegated_hostname.split_at(pos); + ( + FedDest::Named(host.to_owned(), port.to_owned()), + DestType::WellKnown { expires: timestamp }, + ) } else { - debug!("3.4: No SRV records, just use the hostname from .well-known"); - add_port_to_hostname(&delegated_hostname) + debug!("Delegated hostname has no port in this branch"); + let (dest, srv_expires) = + get_srv_destination(delegated_hostname.clone()).await; + ( + dest, + if let Some(srv_expires) = srv_expires { + DestType::WellKnownSrv { + srv_expires, + well_known_expires: timestamp, + well_known_host: delegated_hostname, + } + } else { + DestType::WellKnown { expires: timestamp } + }, + ) } } } } - } - None => { - debug!("4: No .well-known or an error occured"); - match query_srv_record(&destination_str).await { - Some(hostname_override) => { - debug!("4: SRV record found"); - let force_port = hostname_override.port(); - - if let Ok(override_ip) = services() - .globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - services() - .globals - .tls_name_override - .write() - .unwrap() - .insert( - hostname.clone(), - ( - override_ip.iter().collect(), - force_port.unwrap_or(8448), - ), - ); + None => { + debug!("4: No .well-known or an error occured"); + let (dest, expires) = get_srv_destination(destination_str).await; + let well_known_retry = Instant::now() + + Duration::from_secs((60 * next_backoff_mins).into()); + ( + dest, + if let Some(expires) = expires { + DestType::Srv { + srv_expires: expires, + well_known_retry, + well_known_backoff_mins: next_backoff_mins, + } } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named(hostname.clone(), format!(":{port}")) - } else { - add_port_to_hostname(&hostname) - } - } - None => { - debug!("5: No SRV record found"); - add_port_to_hostname(&destination_str) - } + DestType::LookupFailed { + well_known_retry, + well_known_backoff_mins: next_backoff_mins, + } + }, + ) } } } } } }; + debug!("Actual destination: {actual_destination:?}"); - // Can't use get_ip_with_port here because we don't want to add a port - // to an IP address if it wasn't specified - let hostname = if let Ok(addr) = hostname.parse::() { - FedDest::Literal(addr) - } else if let Ok(addr) = hostname.parse::() { - FedDest::Named(addr.to_string(), ":8448".to_owned()) - } else if let Some(pos) = hostname.find(':') { - let (host, port) = hostname.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - FedDest::Named(hostname, ":8448".to_owned()) + let response = DestinationResponse { + actual_destination, + dest_type, }; - (actual_destination, hostname) + + services() + .globals + .actual_destination_cache + .write() + .await + .insert(destination.to_owned(), response.clone()); + + response.actual_destination } -async fn query_given_srv_record(record: &str) -> Option { +/// Looks up the SRV records for federation usage +/// +/// If no timestamp is returned, that means no SRV record was found +async fn get_srv_destination(delegated_hostname: String) -> (FedDest, Option) { + if let Some((hostname_override, timestamp)) = query_srv_record(&delegated_hostname).await { + debug!("SRV lookup successful"); + let force_port = hostname_override.port(); + + if let Ok(override_ip) = services() + .globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + services() + .globals + .tls_name_override + .write() + .unwrap() + .insert( + delegated_hostname.clone(), + (override_ip.iter().collect(), force_port.unwrap_or(8448)), + ); + } else { + warn!("Using SRV record, but could not resolve to IP"); + } + + if let Some(port) = force_port { + ( + FedDest::Named(delegated_hostname, format!(":{port}")), + Some(timestamp), + ) + } else { + (add_port_to_hostname(&delegated_hostname), Some(timestamp)) + } + } else { + debug!("No SRV records found"); + (add_port_to_hostname(&delegated_hostname), None) + } +} + +async fn query_given_srv_record(record: &str) -> Option<(FedDest, Instant)> { services() .globals .dns_resolver() @@ -491,16 +589,19 @@ async fn query_given_srv_record(record: &str) -> Option { .await .map(|srv| { srv.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()), + ( + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()), + ), + srv.as_lookup().valid_until(), ) }) }) .unwrap_or(None) } -async fn query_srv_record(hostname: &'_ str) -> Option { +async fn query_srv_record(hostname: &'_ str) -> Option<(FedDest, Instant)> { let hostname = hostname.trim_end_matches('.'); if let Some(host_port) = query_given_srv_record(&format!("_matrix-fed._tcp.{hostname}.")).await @@ -511,7 +612,7 @@ async fn query_srv_record(hostname: &'_ str) -> Option { } } -async fn request_well_known(destination: &str) -> Option { +async fn request_well_known(destination: &str) -> Option<(String, Instant)> { let response = services() .globals .default_client() @@ -519,14 +620,40 @@ async fn request_well_known(destination: &str) -> Option { .send() .await; debug!("Got well known response"); - if let Err(e) = &response { - debug!("Well known error: {e:?}"); - return None; - } - let text = response.ok()?.text().await; + let response = match response { + Err(e) => { + debug!("Well known error: {e:?}"); + return None; + } + Ok(r) => r, + }; + + let mut headers = response.headers().values(); + + let cache_for = CacheControl::decode(&mut headers) + .ok() + .and_then(|cc| { + // Servers should respect the cache control headers present on the response, or use a sensible default when headers are not present. + if cc.no_store() || cc.no_cache() { + Some(Duration::ZERO) + } else { + cc.max_age() + // Servers should additionally impose a maximum cache time for responses: 48 hours is recommended. + .map(|age| age.min(Duration::from_secs(60 * 60 * 48))) + } + }) + // The recommended sensible default is 24 hours. + .unwrap_or_else(|| Duration::from_secs(60 * 60 * 24)); + + let text = response.text().await; debug!("Got well known response text"); - let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?; - Some(body.get("m.server")?.as_str()?.to_owned()) + + let host = || { + let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?; + body.get("m.server")?.as_str().map(ToOwned::to_owned) + }; + + host().map(|host| (host, Instant::now() + cache_for)) } /// # `GET /_matrix/federation/v1/version` diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index c22ffef..3325e51 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -5,7 +5,7 @@ use ruma::{ OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, }; -use crate::api::server_server::FedDest; +use crate::api::server_server::DestinationResponse; use crate::{services, Config, Error, Result}; use futures_util::FutureExt; @@ -37,7 +37,7 @@ use tracing::{error, info}; use base64::{engine::general_purpose, Engine as _}; -type WellKnownMap = HashMap; +type WellKnownMap = HashMap; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type SyncHandle = ( From 35ed731a4680244bdacfe53a3b6ce3cc79682d11 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Tue, 2 Jul 2024 15:39:45 +0100 Subject: [PATCH 1704/1727] feat(config): split on __, allowing for setting individual values in a table --- docs/configuration.md | 2 ++ src/main.rs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/configuration.md b/docs/configuration.md index d903a21..2239ac5 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -6,6 +6,8 @@ > **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect +> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"` + Conduit's configuration file is divided into the following sections: - [Global](#global) diff --git a/src/main.rs b/src/main.rs index 8d242c5..7134564 100644 --- a/src/main.rs +++ b/src/main.rs @@ -57,7 +57,7 @@ async fn main() { )) .nested(), ) - .merge(Env::prefixed("CONDUIT_").global()); + .merge(Env::prefixed("CONDUIT_").global().split("__")); let config = match raw_config.extract::() { Ok(s) => s, From 11187b3fad47dca8677334e733370f19309a52e1 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 6 Jul 2024 17:06:11 +0100 Subject: [PATCH 1705/1727] fix: remove TLS name override when no SRV record is present this could have been an issue in cases where there was previously a SRV record, but later got removed --- src/api/server_server.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 6fd131c..ca402f8 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -564,6 +564,13 @@ async fn get_srv_destination(delegated_hostname: String) -> (FedDest, Option Date: Sat, 6 Jul 2024 17:31:31 +0100 Subject: [PATCH 1706/1727] fix: remove TLS name override when no SRV record is present (but properly) The previous attempt only did so when no IP could be resolved, which isn't enough --- src/api/server_server.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index ca402f8..60ded05 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -583,6 +583,13 @@ async fn get_srv_destination(delegated_hostname: String) -> (FedDest, Option Date: Sat, 6 Jul 2024 22:56:08 +0100 Subject: [PATCH 1707/1727] fix: don't fail the entire transaction if any PDU's format is invalid --- src/api/server_server.rs | 157 +++++++++++++++++++-------------------- 1 file changed, 75 insertions(+), 82 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index ca402f8..a205125 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -2,7 +2,10 @@ use crate::{ api::client_server::{self, claim_keys_helper, get_keys_helper}, - service::pdu::{gen_event_id_canonical_json, PduBuilder}, + service::{ + globals::SigningKeys, + pdu::{gen_event_id_canonical_json, PduBuilder}, + }, services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; @@ -800,17 +803,78 @@ pub fn parse_incoming_pdu( let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { Ok(t) => t, - Err(_) => { + Err(e) => { // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); + return Err(e); } }; Ok((event_id, value, room_id)) } +/// Attempts to parse and append PDU to timeline. +/// If no event ID is returned, then the PDU was failed to be parsed. +/// If the Ok(()) is returned, then the PDU was successfully appended to the timeline. +async fn handle_pdu_in_transaction( + origin: &ServerName, + pub_key_map: &RwLock>, + pdu: &RawJsonValue, +) -> (Option, Result<()>) { + let (event_id, value, room_id) = match parse_incoming_pdu(pdu) { + Ok(t) => t, + Err(e) => { + warn!("Could not parse PDU: {e}"); + warn!("Full PDU: {:?}", &pdu); + return (None, Err(Error::BadServerResponse("Could not parse PDU"))); + } + }; + + // Makes use of the m.room.create event. If we cannot fetch this event, + // we must have never been in that room. + if services().rooms.state.get_room_version(&room_id).is_err() { + debug!("Room {room_id} is not known to this server"); + return ( + Some(event_id), + Err(Error::BadServerResponse("Room is not known to this server")), + ); + } + + // We do not add the event_id field to the pdu here because of signature and hashes checks + + let mutex = Arc::clone( + services() + .globals + .roomid_mutex_federation + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let start_time = Instant::now(); + + if let Err(e) = services() + .rooms + .event_handler + .handle_incoming_pdu(origin, &event_id, &room_id, value, true, pub_key_map) + .await + { + warn!("Error appending PDU to timeline: {}: {:?}", e, pdu); + return (Some(event_id), Err(e)); + } + + drop(mutex_lock); + + let elapsed = start_time.elapsed(); + debug!( + "Handling transaction of event {} took {}m{}s", + event_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + + (Some(event_id), Ok(())) +} + /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. @@ -835,77 +899,11 @@ pub async fn send_transaction_message_route( // let mut auth_cache = EventMap::new(); for pdu in &body.pdus { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - warn!("Error parsing incoming event {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - let room_id: OwnedRoomId = value - .get("room_id") - .and_then(|id| RoomId::parse(id.as_str()?).ok()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid room id in pdu", - ))?; + let (event_id, result) = + handle_pdu_in_transaction(sender_servername, &pub_key_map, pdu).await; - if services().rooms.state.get_room_version(&room_id).is_err() { - debug!("Server is not in room {room_id}"); - continue; - } - - let r = parse_incoming_pdu(pdu); - let (event_id, value, room_id) = match r { - Ok(t) => t, - Err(e) => { - warn!("Could not parse PDU: {e}"); - warn!("Full PDU: {:?}", &pdu); - continue; - } - }; - // We do not add the event_id field to the pdu here because of signature and hashes checks - - let mutex = Arc::clone( - services() - .globals - .roomid_mutex_federation - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - let start_time = Instant::now(); - resolved_map.insert( - event_id.clone(), - services() - .rooms - .event_handler - .handle_incoming_pdu( - sender_servername, - &event_id, - &room_id, - value, - true, - &pub_key_map, - ) - .await - .map(|_| ()), - ); - drop(mutex_lock); - - let elapsed = start_time.elapsed(); - debug!( - "Handling transaction of event {} took {}m{}s", - event_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - - for pdu in &resolved_map { - if let Err(e) = pdu.1 { - if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { - warn!("Incoming PDU failed {:?}", pdu); - } + if let Some(event_id) = event_id { + resolved_map.insert(event_id.clone(), result.map_err(|e| e.sanitized_error())); } } @@ -1074,12 +1072,7 @@ pub async fn send_transaction_message_route( } } - Ok(send_transaction_message::v1::Response { - pdus: resolved_map - .into_iter() - .map(|(e, r)| (e, r.map_err(|e| e.sanitized_error()))) - .collect(), - }) + Ok(send_transaction_message::v1::Response { pdus: resolved_map }) } /// # `GET /_matrix/federation/v1/event/{eventId}` From 6455e918be9f42fb846df2df13a86b7f97615dc5 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sun, 7 Jul 2024 13:30:53 +0100 Subject: [PATCH 1708/1727] fix: don't always assume ruma can generate reference hashes --- src/api/client_server/membership.rs | 6 +++--- src/api/server_server.rs | 2 +- src/service/pdu.rs | 3 ++- src/service/rooms/event_handler/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 5c0169d..3f3d25d 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -627,7 +627,7 @@ async fn join_room_by_id_helper( let event_id = format!( "${}", ruma::signatures::reference_hash(&join_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") + .expect("Event format validated when event was hashed") ); let event_id = <&EventId>::try_from(event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); @@ -1145,7 +1145,7 @@ async fn validate_and_add_event_id( let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&value, room_version) - .expect("ruma can calculate reference hashes") + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))? )) .expect("ruma's reference hashes are valid event ids"); @@ -1614,7 +1614,7 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") + .expect("Event format validated when event was hashed") )) .expect("ruma's reference hashes are valid event ids"); diff --git a/src/api/server_server.rs b/src/api/server_server.rs index a205125..9f5ed24 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1815,7 +1815,7 @@ pub async fn create_invite_route( let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&signed_event, &body.room_version) - .expect("ruma can calculate reference hashes") + .expect("Event format validated when event was hashed") )) .expect("ruma's reference hashes are valid event ids"); diff --git a/src/service/pdu.rs b/src/service/pdu.rs index dab7b6e..7934909 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,5 +1,6 @@ use crate::Error; use ruma::{ + api::client::error::ErrorKind, canonical_json::redact_content_in_place, events::{ room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent}, @@ -443,7 +444,7 @@ pub(crate) fn gen_event_id_canonical_json( "${}", // Anything higher than version3 behaves the same ruma::signatures::reference_hash(&value, room_version_id) - .expect("ruma can calculate reference hashes") + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))? ) .try_into() .expect("ruma's reference hashes are valid event ids"); diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 002b8d7..0dd405c 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1477,7 +1477,7 @@ impl Service { let event_id = format!( "${}", ruma::signatures::reference_hash(&value, room_version) - .expect("ruma can calculate reference hashes") + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))? ); let event_id = <&EventId>::try_from(event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 6603ea6..9f0e290 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -815,7 +815,7 @@ impl Service { pdu.event_id = EventId::parse_arc(format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") + .expect("Event format validated when event was hashed") )) .expect("ruma's reference hashes are valid event ids"); From 324e1beabf0f7e99c3124e7629bb62f80760aa05 Mon Sep 17 00:00:00 2001 From: Craft Date: Tue, 9 Jul 2024 21:49:55 +0000 Subject: [PATCH 1709/1727] Update docker.md specifying port so that others don't also run into trying to figure this out when following this md. --- docs/deploying/docker.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index a45c670..f914427 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -64,6 +64,7 @@ docker run -d -p 8448:6167 \ -e CONDUIT_MAX_REQUEST_SIZE="20000000" \ -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ + -e CONDUIT_PORT="6167" \ --name conduit ``` From 75a0f683494b2460ea62580b83eb5abe71483446 Mon Sep 17 00:00:00 2001 From: Dawid Rejowski Date: Thu, 18 Jul 2024 19:58:27 +0000 Subject: [PATCH 1710/1727] Update docs to point at new Synapse location --- docs/turn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/turn.md b/docs/turn.md index 11a7180..94d32db 100644 --- a/docs/turn.md +++ b/docs/turn.md @@ -2,7 +2,7 @@ ## General instructions -* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md). +* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md). ## Edit/Add a few settings to your existing conduit.toml From 423b0928d5815978303a97442a264eb558cd4fb8 Mon Sep 17 00:00:00 2001 From: avdb13 Date: Mon, 15 Jul 2024 13:35:37 +0200 Subject: [PATCH 1711/1727] use ruma content disposition type in place of string Co-Authored-By: Matthias Ahouansou --- Cargo.lock | 37 ++++++++++++++++++++------------- src/api/client_server/media.rs | 36 +++++++++++++++++++------------- src/database/key_value/media.rs | 25 +++++++--------------- src/service/media/data.rs | 6 ++++-- src/service/media/mod.rs | 23 +++++++++++++------- 5 files changed, 70 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 30d951a..2008218 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1203,9 +1203,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -2232,7 +2232,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "assign", "js_int", @@ -2253,7 +2253,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "js_int", "ruma-common", @@ -2265,7 +2265,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "as_variant", "assign", @@ -2288,7 +2288,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "as_variant", "base64 0.22.1", @@ -2318,7 +2318,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "as_variant", "indexmap 2.2.6", @@ -2334,15 +2334,22 @@ dependencies = [ "thiserror", "tracing", "url", + "web-time", "wildmatch", ] [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ + "bytes", + "http 1.1.0", + "httparse", "js_int", + "memchr", + "mime", + "rand", "ruma-common", "ruma-events", "serde", @@ -2352,7 +2359,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "js_int", "thiserror", @@ -2361,7 +2368,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "js_int", "ruma-common", @@ -2371,7 +2378,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "once_cell", "proc-macro-crate", @@ -2386,7 +2393,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "js_int", "ruma-common", @@ -2398,7 +2405,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "headers", "http 1.1.0", @@ -2411,7 +2418,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -2427,7 +2434,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/ruma/ruma#fec2152d879a6c6c2bccce984d4b8f424f460cb2" +source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" dependencies = [ "itertools", "js_int", diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 10890f9..803d516 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -4,12 +4,15 @@ use std::time::Duration; use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - media::{ - create_content, get_content, get_content_as_filename, get_content_thumbnail, - get_media_config, +use ruma::{ + api::client::{ + error::ErrorKind, + media::{ + create_content, get_content, get_content_as_filename, get_content_thumbnail, + get_media_config, + }, }, + http_headers::{ContentDisposition, ContentDispositionType}, }; const MXC_LENGTH: usize = 32; @@ -44,10 +47,10 @@ pub async fn create_content_route( .media .create( mxc.clone(), - body.filename - .as_ref() - .map(|filename| "inline; filename=".to_owned() + filename) - .as_deref(), + Some( + ContentDisposition::new(ContentDispositionType::Inline) + .with_filename(body.filename.clone()), + ), body.content_type.as_deref(), &body.file, ) @@ -82,7 +85,7 @@ pub async fn get_remote_content( .media .create( mxc.to_owned(), - content_response.content_disposition.as_deref(), + content_response.content_disposition.clone(), content_response.content_type.as_deref(), &content_response.file, ) @@ -110,7 +113,7 @@ pub async fn get_content_route( Ok(get_content::v3::Response { file, content_type, - content_disposition, + content_disposition: Some(content_disposition), cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { @@ -145,7 +148,10 @@ pub async fn get_content_as_filename_route( Ok(get_content_as_filename::v3::Response { file, content_type, - content_disposition: Some(format!("inline; filename={}", body.filename)), + content_disposition: Some( + ContentDisposition::new(ContentDispositionType::Inline) + .with_filename(Some(body.filename.clone())), + ), cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { @@ -153,7 +159,10 @@ pub async fn get_content_as_filename_route( get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; Ok(get_content_as_filename::v3::Response { - content_disposition: Some(format!("inline: filename={}", body.filename)), + content_disposition: Some( + ContentDisposition::new(ContentDispositionType::Inline) + .with_filename(Some(body.filename.clone())), + ), content_type: remote_content_response.content_type, file: remote_content_response.file, cross_origin_resource_policy: Some("cross-origin".to_owned()), @@ -216,7 +225,6 @@ pub async fn get_content_thumbnail_route( .media .upload_thumbnail( mxc, - None, get_thumbnail_response.content_type.as_deref(), body.width.try_into().expect("all UInts are valid u32s"), body.height.try_into().expect("all UInts are valid u32s"), diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index 6abe5ba..52a8e79 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,4 +1,4 @@ -use ruma::api::client::error::ErrorKind; +use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition}; use crate::{database::KeyValueDatabase, service, utils, Error, Result}; @@ -8,7 +8,7 @@ impl service::media::Data for KeyValueDatabase { mxc: String, width: u32, height: u32, - content_disposition: Option<&str>, + content_disposition: &ContentDisposition, content_type: Option<&str>, ) -> Result> { let mut key = mxc.as_bytes().to_vec(); @@ -16,12 +16,7 @@ impl service::media::Data for KeyValueDatabase { key.extend_from_slice(&width.to_be_bytes()); key.extend_from_slice(&height.to_be_bytes()); key.push(0xff); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); + key.extend_from_slice(content_disposition.to_string().as_bytes()); key.push(0xff); key.extend_from_slice( content_type @@ -40,7 +35,7 @@ impl service::media::Data for KeyValueDatabase { mxc: String, width: u32, height: u32, - ) -> Result<(Option, Option, Vec)> { + ) -> Result<(ContentDisposition, Option, Vec)> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(&width.to_be_bytes()); @@ -68,15 +63,9 @@ impl service::media::Data for KeyValueDatabase { .next() .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database("Content Disposition in mediaid_file is invalid unicode.") - })?, - ) - }; + let content_disposition = content_disposition_bytes + .try_into() + .map_err(|_| Error::bad_database("Content Disposition in mediaid_file is invalid."))?; Ok((content_disposition, content_type, key)) } } diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 75a682c..844aa99 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,3 +1,5 @@ +use ruma::http_headers::ContentDisposition; + use crate::Result; pub trait Data: Send + Sync { @@ -6,7 +8,7 @@ pub trait Data: Send + Sync { mxc: String, width: u32, height: u32, - content_disposition: Option<&str>, + content_disposition: &ContentDisposition, content_type: Option<&str>, ) -> Result>; @@ -16,5 +18,5 @@ pub trait Data: Send + Sync { mxc: String, width: u32, height: u32, - ) -> Result<(Option, Option, Vec)>; + ) -> Result<(ContentDisposition, Option, Vec)>; } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 1a80400..a7ac9d5 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -2,6 +2,7 @@ mod data; use std::io::Cursor; pub use data::Data; +use ruma::http_headers::{ContentDisposition, ContentDispositionType}; use crate::{services, Result}; use image::imageops::FilterType; @@ -12,7 +13,7 @@ use tokio::{ }; pub struct FileMeta { - pub content_disposition: Option, + pub content_disposition: ContentDisposition, pub content_type: Option, pub file: Vec, } @@ -26,14 +27,17 @@ impl Service { pub async fn create( &self, mxc: String, - content_disposition: Option<&str>, + content_disposition: Option, content_type: Option<&str>, file: &[u8], ) -> Result<()> { + let content_disposition = + content_disposition.unwrap_or(ContentDisposition::new(ContentDispositionType::Inline)); + // Width, Height = 0 if it's not a thumbnail let key = self .db - .create_file_metadata(mxc, 0, 0, content_disposition, content_type)?; + .create_file_metadata(mxc, 0, 0, &content_disposition, content_type)?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; @@ -46,15 +50,18 @@ impl Service { pub async fn upload_thumbnail( &self, mxc: String, - content_disposition: Option<&str>, content_type: Option<&str>, width: u32, height: u32, file: &[u8], ) -> Result<()> { - let key = - self.db - .create_file_metadata(mxc, width, height, content_disposition, content_type)?; + let key = self.db.create_file_metadata( + mxc, + width, + height, + &ContentDisposition::new(ContentDispositionType::Inline), + content_type, + )?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; @@ -198,7 +205,7 @@ impl Service { mxc, width, height, - content_disposition.as_deref(), + &content_disposition, content_type.as_deref(), )?; From a3716a7d5acda535359eb7ef9145b98edfbe8487 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Wed, 28 Aug 2024 11:24:51 +0100 Subject: [PATCH 1712/1727] chore: upgrade request client matrix version this is needed so that new endpoints use stable paths --- src/api/server_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index b0cdfc9..6cc29be 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -207,7 +207,7 @@ where .try_into_http_request::>( &actual_destination_str, SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_4], + &[MatrixVersion::V1_11], ) .map_err(|e| { warn!( From 27d6d94355852fd4455de23005f0acfece87ae78 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 24 Aug 2024 10:27:03 +0100 Subject: [PATCH 1713/1727] feat: add support for authenticated media requests --- src/api/client_server/media.rs | 288 +++++++++++++++++++++++++-------- src/api/server_server.rs | 88 ++++++++++ src/main.rs | 6 + 3 files changed, 317 insertions(+), 65 deletions(-) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 803d516..0473c2a 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -4,15 +4,21 @@ use std::time::Duration; use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; +use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE}; use ruma::{ - api::client::{ - error::ErrorKind, - media::{ - create_content, get_content, get_content_as_filename, get_content_thumbnail, - get_media_config, + api::{ + client::{ + authenticated_media::{ + get_content, get_content_as_filename, get_content_thumbnail, get_media_config, + }, + error::ErrorKind, + media::{self, create_content}, }, + federation::authenticated_media::{self as federation_media, FileOrLocation}, }, http_headers::{ContentDisposition, ContentDispositionType}, + media::Method, + ServerName, UInt, }; const MXC_LENGTH: usize = 32; @@ -21,9 +27,20 @@ const MXC_LENGTH: usize = 32; /// /// Returns max upload size. pub async fn get_media_config_route( - _body: Ruma, -) -> Result { - Ok(get_media_config::v3::Response { + _body: Ruma, +) -> Result { + Ok(media::get_media_config::v3::Response { + upload_size: services().globals.max_request_size().into(), + }) +} + +/// # `GET /_matrix/client/v1/media/config` +/// +/// Returns max upload size. +pub async fn get_media_config_auth_route( + _body: Ruma, +) -> Result { + Ok(get_media_config::v1::Response { upload_size: services().globals.max_request_size().into(), }) } @@ -64,19 +81,24 @@ pub async fn create_content_route( pub async fn get_remote_content( mxc: &str, - server_name: &ruma::ServerName, + server_name: &ServerName, media_id: String, -) -> Result { - let content_response = services() +) -> Result { + let media::get_content::v3::Response { + file, + content_type, + content_disposition, + .. + } = services() .sending .send_federation_request( server_name, - get_content::v3::Request { - allow_remote: false, + media::get_content::v3::Request { server_name: server_name.to_owned(), media_id, timeout_ms: Duration::from_secs(20), - allow_redirect: false, + allow_remote: false, + allow_redirect: true, }, ) .await?; @@ -85,13 +107,17 @@ pub async fn get_remote_content( .media .create( mxc.to_owned(), - content_response.content_disposition.clone(), - content_response.content_type.as_deref(), - &content_response.file, + content_disposition.clone(), + content_type.as_deref(), + &file, ) .await?; - Ok(content_response) + Ok(get_content::v1::Response { + file, + content_type, + content_disposition, + }) } /// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` @@ -100,9 +126,37 @@ pub async fn get_remote_content( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_route( - body: Ruma, -) -> Result { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + body: Ruma, +) -> Result { + let get_content::v1::Response { + file, + content_disposition, + content_type, + } = get_content(&body.server_name, body.media_id.clone(), body.allow_remote).await?; + + Ok(media::get_content::v3::Response { + file, + content_type, + content_disposition, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) +} + +/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}` +/// +/// Load media from our server or over federation. +pub async fn get_content_auth_route( + body: Ruma, +) -> Result { + get_content(&body.server_name, body.media_id.clone(), true).await +} + +async fn get_content( + server_name: &ServerName, + media_id: String, + allow_remote: bool, +) -> Result { + let mxc = format!("mxc://{}/{}", server_name, media_id); if let Some(FileMeta { content_disposition, @@ -110,21 +164,19 @@ pub async fn get_content_route( file, }) = services().media.get(mxc.clone()).await? { - Ok(get_content::v3::Response { + Ok(get_content::v1::Response { file, content_type, content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some("cross-origin".to_owned()), }) - } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + } else if server_name != services().globals.server_name() && allow_remote { let remote_content_response = - get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; + get_remote_content(&mxc, server_name, media_id.clone()).await?; - Ok(get_content::v3::Response { + Ok(get_content::v1::Response { content_disposition: remote_content_response.content_disposition, content_type: remote_content_response.content_type, file: remote_content_response.file, - cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -137,35 +189,74 @@ pub async fn get_content_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_as_filename_route( - body: Ruma, -) -> Result { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + body: Ruma, +) -> Result { + let get_content_as_filename::v1::Response { + file, + content_type, + content_disposition, + } = get_content_as_filename( + &body.server_name, + body.media_id.clone(), + body.filename.clone(), + body.allow_remote, + ) + .await?; + + Ok(media::get_content_as_filename::v3::Response { + file, + content_type, + content_disposition, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) +} + +/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}` +/// +/// Load media from our server or over federation, permitting desired filename. +pub async fn get_content_as_filename_auth_route( + body: Ruma, +) -> Result { + get_content_as_filename( + &body.server_name, + body.media_id.clone(), + body.filename.clone(), + true, + ) + .await +} + +async fn get_content_as_filename( + server_name: &ServerName, + media_id: String, + filename: String, + allow_remote: bool, +) -> Result { + let mxc = format!("mxc://{}/{}", server_name, media_id); if let Some(FileMeta { file, content_type, .. }) = services().media.get(mxc.clone()).await? { - Ok(get_content_as_filename::v3::Response { + Ok(get_content_as_filename::v1::Response { file, content_type, content_disposition: Some( ContentDisposition::new(ContentDispositionType::Inline) - .with_filename(Some(body.filename.clone())), + .with_filename(Some(filename.clone())), ), - cross_origin_resource_policy: Some("cross-origin".to_owned()), }) - } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + } else if server_name != services().globals.server_name() && allow_remote { let remote_content_response = - get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; + get_remote_content(&mxc, server_name, media_id.clone()).await?; - Ok(get_content_as_filename::v3::Response { + Ok(get_content_as_filename::v1::Response { content_disposition: Some( ContentDisposition::new(ContentDispositionType::Inline) - .with_filename(Some(body.filename.clone())), + .with_filename(Some(filename.clone())), ), content_type: remote_content_response.content_type, file: remote_content_response.file, - cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -178,9 +269,54 @@ pub async fn get_content_as_filename_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_thumbnail_route( - body: Ruma, -) -> Result { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + body: Ruma, +) -> Result { + let get_content_thumbnail::v1::Response { file, content_type } = get_content_thumbnail( + &body.server_name, + body.media_id.clone(), + body.height, + body.width, + body.method.clone(), + body.animated, + body.allow_remote, + ) + .await?; + + Ok(media::get_content_thumbnail::v3::Response { + file, + content_type, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) +} + +/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` +/// +/// Load media thumbnail from our server or over federation. +pub async fn get_content_thumbnail_auth_route( + body: Ruma, +) -> Result { + get_content_thumbnail( + &body.server_name, + body.media_id.clone(), + body.height, + body.width, + body.method.clone(), + body.animated, + true, + ) + .await +} + +async fn get_content_thumbnail( + server_name: &ServerName, + media_id: String, + height: UInt, + width: UInt, + method: Option, + animated: Option, + allow_remote: bool, +) -> Result { + let mxc = format!("mxc://{}/{}", server_name, media_id); if let Some(FileMeta { file, content_type, .. @@ -188,52 +324,74 @@ pub async fn get_content_thumbnail_route( .media .get_thumbnail( mxc.clone(), - body.width + width .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - body.height + height .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, ) .await? { - Ok(get_content_thumbnail::v3::Response { - file, - content_type, - cross_origin_resource_policy: Some("cross-origin".to_owned()), - }) - } else if body.server_name != services().globals.server_name() && body.allow_remote { - let get_thumbnail_response = services() + Ok(get_content_thumbnail::v1::Response { file, content_type }) + } else if server_name != services().globals.server_name() && allow_remote { + let media::get_content_thumbnail::v3::Response { + file, content_type, .. + } = services() .sending .send_federation_request( - &body.server_name, - get_content_thumbnail::v3::Request { - allow_remote: false, - height: body.height, - width: body.width, - method: body.method.clone(), - server_name: body.server_name.clone(), - media_id: body.media_id.clone(), + server_name, + media::get_content_thumbnail::v3::Request { + height, + width, + method: method.clone(), + server_name: server_name.to_owned(), + media_id: media_id.clone(), timeout_ms: Duration::from_secs(20), allow_redirect: false, - animated: body.animated, + animated, + allow_remote: false, }, ) .await?; - services() .media .upload_thumbnail( mxc, - get_thumbnail_response.content_type.as_deref(), - body.width.try_into().expect("all UInts are valid u32s"), - body.height.try_into().expect("all UInts are valid u32s"), - &get_thumbnail_response.file, + content_type.as_deref(), + width.try_into().expect("all UInts are valid u32s"), + height.try_into().expect("all UInts are valid u32s"), + &file, ) .await?; - Ok(get_thumbnail_response) + Ok(get_content_thumbnail::v1::Response { file, content_type }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } } + +async fn get_location_content(url: String) -> Result { + let client = services().globals.default_client(); + let response = client.get(url).send().await?; + let headers = response.headers(); + + let content_type = headers + .get(CONTENT_TYPE) + .and_then(|header| header.to_str().ok()) + .map(ToOwned::to_owned); + + let content_disposition = headers + .get(CONTENT_DISPOSITION) + .map(|header| header.as_bytes()) + .map(TryFrom::try_from) + .and_then(Result::ok); + + let file = response.bytes().await?.to_vec(); + + Ok(get_content::v1::Response { + file, + content_type, + content_disposition, + }) +} diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 6cc29be..56dd74d 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -4,6 +4,7 @@ use crate::{ api::client_server::{self, claim_keys_helper, get_keys_helper}, service::{ globals::SigningKeys, + media::FileMeta, pdu::{gen_event_id_canonical_json, PduBuilder}, }, services, utils, Error, PduEvent, Result, Ruma, @@ -17,6 +18,9 @@ use ruma::{ api::{ client::error::{Error as RumaError, ErrorKind}, federation::{ + authenticated_media::{ + get_content, get_content_thumbnail, Content, ContentMetadata, FileOrLocation, + }, authorization::get_event_authorization, backfill::get_backfill, device::get_devices::{self, v1::UserDevice}, @@ -1891,6 +1895,90 @@ pub async fn create_invite_route( }) } +/// # `GET /_matrix/federation/v1/media/download/{serverName}/{mediaId}` +/// +/// Load media from our server. +pub async fn get_content_route( + body: Ruma, +) -> Result { + let mxc = format!( + "mxc://{}/{}", + services().globals.server_name(), + body.media_id + ); + + if let Some(FileMeta { + content_disposition, + content_type, + file, + }) = services().media.get(mxc.clone()).await? + { + Ok(get_content::v1::Response::new( + ContentMetadata::new(), + FileOrLocation::File(Content { + file, + content_type, + content_disposition: Some(content_disposition), + }), + )) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +/// # `GET /_matrix/federation/v1/media/thumbnail/{serverName}/{mediaId}` +/// +/// Load media thumbnail from our server or over federation. +pub async fn get_content_thumbnail_route( + body: Ruma, +) -> Result { + let mxc = format!( + "mxc://{}/{}", + services().globals.server_name(), + body.media_id + ); + + let Some(FileMeta { + file, + content_type, + content_disposition, + }) = services() + .media + .get_thumbnail( + mxc.clone(), + body.width + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + body.height + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + ) + .await? + else { + return Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")); + }; + + services() + .media + .upload_thumbnail( + mxc, + content_type.as_deref(), + body.width.try_into().expect("all UInts are valid u32s"), + body.height.try_into().expect("all UInts are valid u32s"), + &file, + ) + .await?; + + Ok(get_content_thumbnail::v1::Response::new( + ContentMetadata::new(), + FileOrLocation::File(Content { + file, + content_type, + content_disposition: Some(content_disposition), + }), + )) +} + /// # `GET /_matrix/federation/v1/user/devices/{userId}` /// /// Gets information on all devices of the user. diff --git a/src/main.rs b/src/main.rs index 8d242c5..d0793f2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -379,10 +379,14 @@ fn routes(config: &Config) -> Router { .ruma_route(client_server::turn_server_route) .ruma_route(client_server::send_event_to_device_route) .ruma_route(client_server::get_media_config_route) + .ruma_route(client_server::get_media_config_auth_route) .ruma_route(client_server::create_content_route) .ruma_route(client_server::get_content_route) + .ruma_route(client_server::get_content_auth_route) .ruma_route(client_server::get_content_as_filename_route) + .ruma_route(client_server::get_content_as_filename_auth_route) .ruma_route(client_server::get_content_thumbnail_route) + .ruma_route(client_server::get_content_thumbnail_auth_route) .ruma_route(client_server::get_devices_route) .ruma_route(client_server::get_device_route) .ruma_route(client_server::update_device_route) @@ -440,6 +444,8 @@ fn routes(config: &Config) -> Router { .ruma_route(server_server::create_join_event_v2_route) .ruma_route(server_server::create_invite_route) .ruma_route(server_server::get_devices_route) + .ruma_route(server_server::get_content_route) + .ruma_route(server_server::get_content_thumbnail_route) .ruma_route(server_server::get_room_information_route) .ruma_route(server_server::get_profile_information_route) .ruma_route(server_server::get_keys_route) From cbd3b07ca727637d3e62e9380f24b297d8c3085f Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 24 Aug 2024 10:36:03 +0100 Subject: [PATCH 1714/1727] feat(media): use authenticated endpoints when fetching remote media --- src/api/client_server/media.rs | 132 +++++++++++++++++++++++++-------- 1 file changed, 101 insertions(+), 31 deletions(-) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 0473c2a..ada2bd7 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -84,40 +84,70 @@ pub async fn get_remote_content( server_name: &ServerName, media_id: String, ) -> Result { - let media::get_content::v3::Response { - file, - content_type, - content_disposition, - .. - } = services() + let content_response = match services() .sending .send_federation_request( server_name, - media::get_content::v3::Request { - server_name: server_name.to_owned(), - media_id, + federation_media::get_content::v1::Request { + media_id: media_id.clone(), timeout_ms: Duration::from_secs(20), - allow_remote: false, - allow_redirect: true, }, ) - .await?; + .await + { + Ok(federation_media::get_content::v1::Response { + metadata: _, + content: FileOrLocation::File(content), + }) => get_content::v1::Response { + file: content.file, + content_type: content.content_type, + content_disposition: content.content_disposition, + }, + + Ok(federation_media::get_content::v1::Response { + metadata: _, + content: FileOrLocation::Location(url), + }) => get_location_content(url).await?, + Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => { + let media::get_content::v3::Response { + file, + content_type, + content_disposition, + .. + } = services() + .sending + .send_federation_request( + server_name, + media::get_content::v3::Request { + server_name: server_name.to_owned(), + media_id, + timeout_ms: Duration::from_secs(20), + allow_remote: false, + allow_redirect: true, + }, + ) + .await?; + + get_content::v1::Response { + file, + content_type, + content_disposition, + } + } + Err(e) => return Err(e), + }; services() .media .create( mxc.to_owned(), - content_disposition.clone(), - content_type.as_deref(), - &file, + content_response.content_disposition.clone(), + content_response.content_type.as_deref(), + &content_response.file, ) .await?; - Ok(get_content::v1::Response { - file, - content_type, - content_disposition, - }) + Ok(content_response) } /// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` @@ -335,37 +365,77 @@ async fn get_content_thumbnail( { Ok(get_content_thumbnail::v1::Response { file, content_type }) } else if server_name != services().globals.server_name() && allow_remote { - let media::get_content_thumbnail::v3::Response { - file, content_type, .. - } = services() + let thumbnail_response = match services() .sending .send_federation_request( server_name, - media::get_content_thumbnail::v3::Request { + federation_media::get_content_thumbnail::v1::Request { height, width, method: method.clone(), - server_name: server_name.to_owned(), media_id: media_id.clone(), timeout_ms: Duration::from_secs(20), - allow_redirect: false, animated, - allow_remote: false, }, ) - .await?; + .await + { + Ok(federation_media::get_content_thumbnail::v1::Response { + metadata: _, + content: FileOrLocation::File(content), + }) => get_content_thumbnail::v1::Response { + file: content.file, + content_type: content.content_type, + }, + + Ok(federation_media::get_content_thumbnail::v1::Response { + metadata: _, + content: FileOrLocation::Location(url), + }) => { + let get_content::v1::Response { + file, content_type, .. + } = get_location_content(url).await?; + + get_content_thumbnail::v1::Response { file, content_type } + } + Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => { + let media::get_content_thumbnail::v3::Response { + file, content_type, .. + } = services() + .sending + .send_federation_request( + server_name, + media::get_content_thumbnail::v3::Request { + height, + width, + method: method.clone(), + server_name: server_name.to_owned(), + media_id: media_id.clone(), + timeout_ms: Duration::from_secs(20), + allow_redirect: false, + animated, + allow_remote: false, + }, + ) + .await?; + + get_content_thumbnail::v1::Response { file, content_type } + } + Err(e) => return Err(e), + }; + services() .media .upload_thumbnail( mxc, - content_type.as_deref(), + thumbnail_response.content_type.as_deref(), width.try_into().expect("all UInts are valid u32s"), height.try_into().expect("all UInts are valid u32s"), - &file, + &thumbnail_response.file, ) .await?; - Ok(get_content_thumbnail::v1::Response { file, content_type }) + Ok(thumbnail_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } From cdd03dfec0cf077e674e3dcc9786b937861569cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Leonardo=20Jos=C3=A9?= Date: Thu, 12 Sep 2024 03:33:49 +0000 Subject: [PATCH 1715/1727] Fix parsing of CONFIG_WELL_KNOW_* env variables into the internal configuration object --- src/config/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 378ab92..7cac2ed 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -59,7 +59,7 @@ pub struct Config { pub allow_unstable_room_versions: bool, #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, - #[serde(default)] + #[serde(default, flatten)] pub well_known: WellKnownConfig, #[serde(default = "false_fn")] pub allow_jaeger: bool, @@ -97,7 +97,9 @@ pub struct TlsConfig { #[derive(Clone, Debug, Deserialize, Default)] pub struct WellKnownConfig { + #[serde(rename = "well_known_client")] pub client: Option, + #[serde(rename = "well_known_server")] pub server: Option, } From a6797ca0a2bbd12f8675b423b15403eabf290117 Mon Sep 17 00:00:00 2001 From: The one with the braid Date: Sat, 21 Sep 2024 10:53:57 +0200 Subject: [PATCH 1716/1727] fix: add missing msc3916 unstable feature in version response Fixes: #473 Signed-off-by: The one with the braid --- src/api/client_server/unversioned.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 7706afe..6ddc132 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -27,7 +27,10 @@ pub async fn get_supported_versions_route( "v1.4".to_owned(), "v1.5".to_owned(), ], - unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), + unstable_features: BTreeMap::from_iter([ + ("org.matrix.e2e_cross_signing".to_owned(), true), + ("org.matrix.msc3916.stable".to_owned(), true), + ]), }; Ok(resp) From 3df21e8257b5043b943630307e4110d2f188cc06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 4 Sep 2024 13:01:23 +0200 Subject: [PATCH 1717/1727] fix: old media used spaces in content disposition without quotes --- src/api/client_server/media.rs | 14 ++++---- src/database/mod.rs | 61 +++++++++++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 8 deletions(-) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index ada2bd7..3f483c2 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -188,11 +188,11 @@ async fn get_content( ) -> Result { let mxc = format!("mxc://{}/{}", server_name, media_id); - if let Some(FileMeta { + if let Ok(Some(FileMeta { content_disposition, content_type, file, - }) = services().media.get(mxc.clone()).await? + })) = services().media.get(mxc.clone()).await { Ok(get_content::v1::Response { file, @@ -264,9 +264,9 @@ async fn get_content_as_filename( ) -> Result { let mxc = format!("mxc://{}/{}", server_name, media_id); - if let Some(FileMeta { + if let Ok(Some(FileMeta { file, content_type, .. - }) = services().media.get(mxc.clone()).await? + })) = services().media.get(mxc.clone()).await { Ok(get_content_as_filename::v1::Response { file, @@ -348,9 +348,9 @@ async fn get_content_thumbnail( ) -> Result { let mxc = format!("mxc://{}/{}", server_name, media_id); - if let Some(FileMeta { + if let Ok(Some(FileMeta { file, content_type, .. - }) = services() + })) = services() .media .get_thumbnail( mxc.clone(), @@ -361,7 +361,7 @@ async fn get_content_thumbnail( .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, ) - .await? + .await { Ok(get_content_thumbnail::v1::Response { file, content_type }) } else if server_name != services().globals.server_name() && allow_remote { diff --git a/src/database/mod.rs b/src/database/mod.rs index 5171d4b..a8d4482 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -6,6 +6,7 @@ use crate::{ SERVICES, }; use abstraction::{KeyValueDatabaseEngine, KvTree}; +use base64::{engine::general_purpose, Engine}; use directories::ProjectDirs; use lru_cache::LruCache; @@ -424,7 +425,7 @@ impl KeyValueDatabase { } // If the database has any data, perform data migrations before starting - let latest_database_version = 13; + let latest_database_version = 14; if services().users.count()? > 0 { // MIGRATIONS @@ -941,6 +942,64 @@ impl KeyValueDatabase { warn!("Migration: 12 -> 13 finished"); } + if services().globals.database_version()? < 14 { + // Reconstruct all media using the filesystem + db.mediaid_file.clear().unwrap(); + + for file in fs::read_dir(services().globals.get_media_folder()).unwrap() { + let file = file.unwrap(); + let mediaid = general_purpose::URL_SAFE_NO_PAD + .decode(file.file_name().into_string().unwrap()) + .unwrap(); + + let mut parts = mediaid.rsplit(|&b| b == 0xff); + + let mut removed_bytes = 0; + + let content_type_bytes = parts.next().unwrap(); + removed_bytes += content_type_bytes.len() + 1; + + let content_disposition_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; + removed_bytes += content_disposition_bytes.len(); + + let mut content_disposition = + utils::string_from_bytes(content_disposition_bytes).map_err(|_| { + Error::bad_database("Content Disposition in mediaid_file is invalid.") + })?; + + if content_disposition.contains("filename=") + && !content_disposition.contains("filename=\"") + { + println!("{}", &content_disposition); + content_disposition = + content_disposition.replacen("filename=", "filename=\"", 1); + content_disposition.push('"'); + println!("{}", &content_disposition); + + let mut new_key = mediaid[..(mediaid.len() - removed_bytes)].to_vec(); + assert!(*new_key.last().unwrap() == 0xff); + + new_key.extend_from_slice(content_disposition.to_string().as_bytes()); + new_key.push(0xff); + new_key.extend_from_slice(content_type_bytes); + + // Some file names are too long. Ignore those. + let _ = fs::rename( + services().globals.get_media_file(&mediaid), + services().globals.get_media_file(&new_key), + ); + db.mediaid_file.insert(&new_key, &[])?; + } else { + db.mediaid_file.insert(&mediaid, &[])?; + } + } + services().globals.bump_database_version(14)?; + + warn!("Migration: 13 -> 14 finished"); + } + assert_eq!( services().globals.database_version().unwrap(), latest_database_version From a7405cddc020817614f1186903c0fc95c2403dce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 24 Sep 2024 18:48:48 +0200 Subject: [PATCH 1718/1727] fix: Matrix media repo --- Cargo.lock | 27 ++++++++++---------- src/api/client_server/membership.rs | 2 +- src/api/server_server.rs | 13 +++++----- src/database/mod.rs | 38 ++++++++++++++++++++++------- 4 files changed, 50 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2008218..19e75ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2232,7 +2232,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "assign", "js_int", @@ -2253,7 +2253,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "js_int", "ruma-common", @@ -2265,7 +2265,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "as_variant", "assign", @@ -2288,7 +2288,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "as_variant", "base64 0.22.1", @@ -2318,7 +2318,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "as_variant", "indexmap 2.2.6", @@ -2341,7 +2341,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "bytes", "http 1.1.0", @@ -2359,7 +2359,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "js_int", "thiserror", @@ -2368,7 +2368,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "js_int", "ruma-common", @@ -2378,8 +2378,9 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ + "cfg-if", "once_cell", "proc-macro-crate", "proc-macro2", @@ -2393,7 +2394,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "js_int", "ruma-common", @@ -2405,7 +2406,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "headers", "http 1.1.0", @@ -2418,7 +2419,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -2434,7 +2435,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/ruma/ruma#82417e394076440089cd8ada87485d9a44cc4ba0" +source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" dependencies = [ "itertools", "js_int", diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 3f3d25d..baf2f23 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -97,7 +97,7 @@ pub async fn join_room_by_id_or_alias_route( let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { Ok(room_id) => { - let mut servers = body.server_name.clone(); + let mut servers = body.via.clone(); servers.extend( services() .rooms diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 56dd74d..f6dc58f 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -10,7 +10,7 @@ use crate::{ services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; -use axum_extra::headers::{authorization::Credentials, CacheControl, Header}; +use axum_extra::headers::{CacheControl, Header}; use get_profile_information::v1::ProfileField; use http::header::AUTHORIZATION; @@ -52,7 +52,6 @@ use ruma::{ StateEventType, TimelineEventType, }, serde::{Base64, JsonObject, Raw}, - server_util::authorization::XMatrix, to_device::DeviceIdOrAllDevices, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, @@ -275,15 +274,15 @@ where for s in signature_server { http_request.headers_mut().insert( AUTHORIZATION, - XMatrix::parse(&format!( + format!( "X-Matrix origin=\"{}\",destination=\"{}\",key=\"{}\",sig=\"{}\"", services().globals.server_name(), destination, s.0, s.1 - )) - .expect("When Ruma signs JSON, it produces a valid base64 signature. All other types are valid ServerNames or OwnedKeyId") - .encode(), + ) + .try_into() + .unwrap(), ); } } @@ -343,7 +342,7 @@ where response.map_err(|e| { warn!( - "Invalid 200 response from {} on: {} {}", + "Invalid 200 response from {} on: {} {:?}", &destination, url, e ); Error::BadServerResponse("Server returned bad 200 response.") diff --git a/src/database/mod.rs b/src/database/mod.rs index a8d4482..1caf61c 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -425,7 +425,7 @@ impl KeyValueDatabase { } // If the database has any data, perform data migrations before starting - let latest_database_version = 14; + let latest_database_version = 15; if services().users.count()? > 0 { // MIGRATIONS @@ -942,7 +942,7 @@ impl KeyValueDatabase { warn!("Migration: 12 -> 13 finished"); } - if services().globals.database_version()? < 14 { + if services().globals.database_version()? < 15 { // Reconstruct all media using the filesystem db.mediaid_file.clear().unwrap(); @@ -981,23 +981,43 @@ impl KeyValueDatabase { let mut new_key = mediaid[..(mediaid.len() - removed_bytes)].to_vec(); assert!(*new_key.last().unwrap() == 0xff); + let mut shorter_key = new_key.clone(); + shorter_key.extend( + ruma::http_headers::ContentDisposition::new( + ruma::http_headers::ContentDispositionType::Inline, + ) + .to_string() + .as_bytes(), + ); + shorter_key.push(0xff); + shorter_key.extend_from_slice(content_type_bytes); + new_key.extend_from_slice(content_disposition.to_string().as_bytes()); new_key.push(0xff); new_key.extend_from_slice(content_type_bytes); // Some file names are too long. Ignore those. - let _ = fs::rename( + match fs::rename( services().globals.get_media_file(&mediaid), services().globals.get_media_file(&new_key), - ); - db.mediaid_file.insert(&new_key, &[])?; - } else { - db.mediaid_file.insert(&mediaid, &[])?; + ) { + Ok(_) => { + db.mediaid_file.insert(&mediaid, &[])?; + } + Err(_) => { + fs::rename( + services().globals.get_media_file(&mediaid), + services().globals.get_media_file(&shorter_key), + ) + .unwrap(); + db.mediaid_file.insert(&shorter_key, &[])?; + } + } } } - services().globals.bump_database_version(14)?; + services().globals.bump_database_version(15)?; - warn!("Migration: 13 -> 14 finished"); + warn!("Migration: 13 -> 15 finished"); } assert_eq!( From fea85b0894d616ae963b1a8f4319040001fa8005 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 24 Sep 2024 23:00:37 +0200 Subject: [PATCH 1719/1727] fix: Migration typo for media --- src/api/client_server/media.rs | 2 +- src/database/mod.rs | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 3f483c2..03e4cba 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -359,7 +359,7 @@ async fn get_content_thumbnail( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, height .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?, ) .await { diff --git a/src/database/mod.rs b/src/database/mod.rs index 1caf61c..0035862 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -425,7 +425,7 @@ impl KeyValueDatabase { } // If the database has any data, perform data migrations before starting - let latest_database_version = 15; + let latest_database_version = 16; if services().users.count()? > 0 { // MIGRATIONS @@ -942,7 +942,7 @@ impl KeyValueDatabase { warn!("Migration: 12 -> 13 finished"); } - if services().globals.database_version()? < 15 { + if services().globals.database_version()? < 16 { // Reconstruct all media using the filesystem db.mediaid_file.clear().unwrap(); @@ -1002,7 +1002,7 @@ impl KeyValueDatabase { services().globals.get_media_file(&new_key), ) { Ok(_) => { - db.mediaid_file.insert(&mediaid, &[])?; + db.mediaid_file.insert(&new_key, &[])?; } Err(_) => { fs::rename( @@ -1013,11 +1013,13 @@ impl KeyValueDatabase { db.mediaid_file.insert(&shorter_key, &[])?; } } + } else { + db.mediaid_file.insert(&mediaid, &[])?; } } - services().globals.bump_database_version(15)?; + services().globals.bump_database_version(16)?; - warn!("Migration: 13 -> 15 finished"); + warn!("Migration: 13 -> 16 finished"); } assert_eq!( From 65fe6b0ab5968e4b6c4fdb1a20070ddd4def08ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 25 Sep 2024 09:06:43 +0200 Subject: [PATCH 1720/1727] fix: Empty content dispositions could create problems --- src/database/key_value/media.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index 52a8e79..99df009 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -63,9 +63,9 @@ impl service::media::Data for KeyValueDatabase { .next() .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - let content_disposition = content_disposition_bytes - .try_into() - .map_err(|_| Error::bad_database("Content Disposition in mediaid_file is invalid."))?; + let content_disposition = content_disposition_bytes.try_into().unwrap_or_else(|_| { + ContentDisposition::new(ruma::http_headers::ContentDispositionType::Inline) + }); Ok((content_disposition, content_type, key)) } } From 892fb8846a3759f9572d42e8b3d3f2baad2b8b2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 6 Oct 2024 14:18:54 +0200 Subject: [PATCH 1721/1727] Bump version --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 19e75ce..9a98f2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -487,7 +487,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.9.0-alpha" +version = "0.9.0" dependencies = [ "async-trait", "axum 0.7.5", diff --git a/Cargo.toml b/Cargo.toml index 67128f0..fba2f59 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ license = "Apache-2.0" name = "conduit" readme = "README.md" repository = "https://gitlab.com/famedly/conduit" -version = "0.9.0-alpha" +version = "0.9.0" # See also `rust-toolchain.toml` rust-version = "1.79.0" From 6767ca8bc8235d5a5d8adcb51f6c57a63e7bf211 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 7 Oct 2024 00:02:36 +0200 Subject: [PATCH 1722/1727] fix: config options for well_known have changed --- Cargo.lock | 2 +- Cargo.toml | 2 +- docs/configuration.md | 3 ++- docs/delegation.md | 12 ++++++------ 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a98f2a..4b020ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -487,7 +487,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.9.0" +version = "0.10.0-alpha" dependencies = [ "async-trait", "axum 0.7.5", diff --git a/Cargo.toml b/Cargo.toml index fba2f59..0cdde4a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ license = "Apache-2.0" name = "conduit" readme = "README.md" repository = "https://gitlab.com/famedly/conduit" -version = "0.9.0" +version = "0.10.0-alpha" # See also `rust-toolchain.toml` rust-version = "1.79.0" diff --git a/docs/configuration.md b/docs/configuration.md index 2239ac5..b2b4f3a 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -58,7 +58,8 @@ The `global` section contains the following fields: | `turn_secret` | `string` | The TURN secret | `""` | | `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` | | `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A | -| `well_known` | `table` | Used for [delegation](delegation.md) | See [delegation](delegation.md) | +| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) | +| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) | ### TLS diff --git a/docs/delegation.md b/docs/delegation.md index c8e5391..28d962f 100644 --- a/docs/delegation.md +++ b/docs/delegation.md @@ -16,18 +16,18 @@ are connected to the server running Conduit using something like a VPN. > **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration -To configure it, use the following options in the `global.well_known` table: +To configure it, use the following options: | Field | Type | Description | Default | | --- | --- | --- | --- | -| `client` | `String` | The URL that clients should use to connect to Conduit | `https://` | -| `server` | `String` | The hostname and port servers should use to connect to Conduit | `:443` | +| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://` | +| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `:443` | ### Example ```toml -[global.well_known] -client = "https://matrix.example.org" -server = "matrix.example.org:443" +[global] +well_known_client = "https://matrix.example.org" +well_known_server = "matrix.example.org:443" ``` ## Manual From a2b72f48b9beaa9b0228f099ebaee0b953349371 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lauren=C8=9Biu=20Nicola?= Date: Tue, 8 Oct 2024 19:41:04 +0300 Subject: [PATCH 1723/1727] fix: remove content-disposition debug prints --- src/database/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/database/mod.rs b/src/database/mod.rs index 0035862..2317f7a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -972,11 +972,9 @@ impl KeyValueDatabase { if content_disposition.contains("filename=") && !content_disposition.contains("filename=\"") { - println!("{}", &content_disposition); content_disposition = content_disposition.replacen("filename=", "filename=\"", 1); content_disposition.push('"'); - println!("{}", &content_disposition); let mut new_key = mediaid[..(mediaid.len() - removed_bytes)].to_vec(); assert!(*new_key.last().unwrap() == 0xff); From de323cbecb24bb4393db7271c5870ee6a8f7e8eb Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Thu, 10 Oct 2024 16:02:39 +0100 Subject: [PATCH 1724/1727] re-add well-known table, while still allowing individual values to be set with env vars without double underscores --- docs/configuration.md | 5 ++--- docs/delegation.md | 12 ++++++------ src/config/mod.rs | 4 +--- src/main.rs | 18 +++++++++++++++++- 4 files changed, 26 insertions(+), 13 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index b2b4f3a..9687ead 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -6,7 +6,7 @@ > **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect -> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"` +> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}_{field_name}`. Example: `CONDUIT_WELL_KNOWN_CLIENT="https://matrix.example.org"` Conduit's configuration file is divided into the following sections: @@ -58,8 +58,7 @@ The `global` section contains the following fields: | `turn_secret` | `string` | The TURN secret | `""` | | `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` | | `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A | -| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) | -| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) | +| `well_known` | `table` | Used for [delegation](delegation.md) | See [delegation](delegation.md) | ### TLS diff --git a/docs/delegation.md b/docs/delegation.md index 28d962f..c8e5391 100644 --- a/docs/delegation.md +++ b/docs/delegation.md @@ -16,18 +16,18 @@ are connected to the server running Conduit using something like a VPN. > **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration -To configure it, use the following options: +To configure it, use the following options in the `global.well_known` table: | Field | Type | Description | Default | | --- | --- | --- | --- | -| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://` | -| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `:443` | +| `client` | `String` | The URL that clients should use to connect to Conduit | `https://` | +| `server` | `String` | The hostname and port servers should use to connect to Conduit | `:443` | ### Example ```toml -[global] -well_known_client = "https://matrix.example.org" -well_known_server = "matrix.example.org:443" +[global.well_known] +client = "https://matrix.example.org" +server = "matrix.example.org:443" ``` ## Manual diff --git a/src/config/mod.rs b/src/config/mod.rs index 7cac2ed..378ab92 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -59,7 +59,7 @@ pub struct Config { pub allow_unstable_room_versions: bool, #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, - #[serde(default, flatten)] + #[serde(default)] pub well_known: WellKnownConfig, #[serde(default = "false_fn")] pub allow_jaeger: bool, @@ -97,9 +97,7 @@ pub struct TlsConfig { #[derive(Clone, Debug, Deserialize, Default)] pub struct WellKnownConfig { - #[serde(rename = "well_known_client")] pub client: Option, - #[serde(rename = "well_known_server")] pub server: Option, } diff --git a/src/main.rs b/src/main.rs index c765401..9a65fe5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -12,6 +12,7 @@ use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerH use conduit::api::{client_server, server_server}; use figment::{ providers::{Env, Format, Toml}, + value::Uncased, Figment, }; use http::{ @@ -44,6 +45,8 @@ use tikv_jemallocator::Jemalloc; #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; +static SUB_TABLES: [&str; 2] = ["well_known", "tls"]; // Not doing `proxy` cause setting that with env vars would be a pain + #[tokio::main] async fn main() { clap::parse(); @@ -57,7 +60,20 @@ async fn main() { )) .nested(), ) - .merge(Env::prefixed("CONDUIT_").global().split("__")); + .merge(Env::prefixed("CONDUIT_").global().map(|k| { + let mut key: Uncased = k.into(); + + for table in SUB_TABLES { + if k.starts_with(&(table.to_owned() + "_")) { + key = Uncased::from( + table.to_owned() + "." + k[table.len() + 1..k.len()].as_str(), + ); + break; + } + } + + key + })); let config = match raw_config.extract::() { Ok(s) => s, From 56a51360e05c8b4baa4016f42d02186f8ca51079 Mon Sep 17 00:00:00 2001 From: Matthias Ahouansou Date: Sat, 2 Mar 2024 11:12:22 +0000 Subject: [PATCH 1725/1727] feat(spaces): hierarchy over federation fix(spaces): deal with hierarchy recursion fix(spaces): properly handle max_depth refactor(spaces): token scheme to prevent clients from modifying max_depth and suggested_only perf(spaces): use tokens to skip to room to start populating results at feat(spaces): request hierarchy from servers in via field of child event --- src/api/client_server/space.rs | 46 +- src/api/server_server.rs | 26 + src/main.rs | 1 + src/service/mod.rs | 14 +- src/service/rooms/spaces/mod.rs | 1198 ++++++++++++++++------- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/state_accessor/mod.rs | 71 +- src/service/rooms/state_cache/mod.rs | 2 + src/service/rooms/timeline/mod.rs | 2 +- 9 files changed, 973 insertions(+), 389 deletions(-) diff --git a/src/api/client_server/space.rs b/src/api/client_server/space.rs index e2ea8c3..0bf9c56 100644 --- a/src/api/client_server/space.rs +++ b/src/api/client_server/space.rs @@ -1,5 +1,10 @@ -use crate::{services, Result, Ruma}; -use ruma::api::client::space::get_hierarchy; +use std::str::FromStr; + +use crate::{service::rooms::spaces::PagnationToken, services, Error, Result, Ruma}; +use ruma::{ + api::client::{error::ErrorKind, space::get_hierarchy}, + UInt, +}; /// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy`` /// @@ -9,25 +14,42 @@ pub async fn get_hierarchy_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let skip = body + let limit = body + .limit + .unwrap_or(UInt::from(10_u32)) + .min(UInt::from(100_u32)); + let max_depth = body + .max_depth + .unwrap_or(UInt::from(3_u32)) + .min(UInt::from(10_u32)); + + let key = body .from .as_ref() - .and_then(|s| s.parse::().ok()) - .unwrap_or(0); + .and_then(|s| PagnationToken::from_str(s).ok()); - let limit = body.limit.map_or(10, u64::from).min(100) as usize; - - let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself + // Should prevent unexpected behaviour in (bad) clients + if let Some(token) = &key { + if token.suggested_only != body.suggested_only || token.max_depth != max_depth { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "suggested_only and max_depth cannot change on paginated requests", + )); + } + } services() .rooms .spaces - .get_hierarchy( + .get_client_hierarchy( sender_user, &body.room_id, - limit, - skip, - max_depth, + usize::try_from(limit) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Limit is too great"))?, + key.map_or(vec![], |token| token.short_room_ids), + usize::try_from(max_depth).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Max depth is too great") + })?, body.suggested_only, ) .await diff --git a/src/api/server_server.rs b/src/api/server_server.rs index f6dc58f..f8afcf3 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -34,6 +34,7 @@ use ruma::{ membership::{create_invite, create_join_event, prepare_join_event}, openid::get_openid_userinfo, query::{get_profile_information, get_room_information}, + space::get_hierarchy, transactions::{ edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, send_transaction_message, @@ -2162,6 +2163,31 @@ pub async fn get_openid_userinfo_route( )) } +/// # `GET /_matrix/federation/v1/hierarchy/{roomId}` +/// +/// Gets the space tree in a depth-first manner to locate child rooms of a given space. +pub async fn get_hierarchy_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if services().rooms.metadata.exists(&body.room_id)? { + services() + .rooms + .spaces + .get_federation_hierarchy(&body.room_id, sender_servername, body.suggested_only) + .await + } else { + Err(Error::BadRequest( + ErrorKind::NotFound, + "Room does not exist.", + )) + } +} + /// # `GET /.well-known/matrix/server` /// /// Returns the federation server discovery information. diff --git a/src/main.rs b/src/main.rs index c765401..4bfd7cd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -451,6 +451,7 @@ fn routes(config: &Config) -> Router { .ruma_route(server_server::get_keys_route) .ruma_route(server_server::claim_keys_route) .ruma_route(server_server::get_openid_userinfo_route) + .ruma_route(server_server::get_hierarchy_route) .ruma_route(server_server::well_known_server) } else { router diff --git a/src/service/mod.rs b/src/service/mod.rs index 4c11bc1..552c71a 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -105,7 +105,7 @@ impl Services { }, threads: rooms::threads::Service { db }, spaces: rooms::spaces::Service { - roomid_spacechunk_cache: Mutex::new(LruCache::new(200)), + roomid_spacehierarchy_cache: Mutex::new(LruCache::new(200)), }, user: rooms::user::Service { db }, }, @@ -154,7 +154,13 @@ impl Services { .lock() .await .len(); - let roomid_spacechunk_cache = self.rooms.spaces.roomid_spacechunk_cache.lock().await.len(); + let roomid_spacehierarchy_cache = self + .rooms + .spaces + .roomid_spacehierarchy_cache + .lock() + .await + .len(); format!( "\ @@ -163,7 +169,7 @@ server_visibility_cache: {server_visibility_cache} user_visibility_cache: {user_visibility_cache} stateinfo_cache: {stateinfo_cache} lasttimelinecount_cache: {lasttimelinecount_cache} -roomid_spacechunk_cache: {roomid_spacechunk_cache}\ +roomid_spacechunk_cache: {roomid_spacehierarchy_cache}\ " ) } @@ -211,7 +217,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\ if amount > 5 { self.rooms .spaces - .roomid_spacechunk_cache + .roomid_spacehierarchy_cache .lock() .await .clear(); diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index a78296b..26a40f9 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,318 +1,430 @@ -use std::sync::Arc; +use std::{ + collections::VecDeque, + fmt::{Display, Formatter}, + str::FromStr, +}; use lru_cache::LruCache; use ruma::{ api::{ - client::{ - error::ErrorKind, - space::{get_hierarchy, SpaceHierarchyRoomsChunk}, + client::{self, error::ErrorKind, space::SpaceHierarchyRoomsChunk}, + federation::{ + self, + space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary}, }, - federation, }, events::{ room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{self, AllowRule, JoinRule, RoomJoinRulesEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, topic::RoomTopicEventContent, }, - space::child::SpaceChildEventContent, + space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, StateEventType, }, + serde::Raw, space::SpaceRoomJoinRule, - OwnedRoomId, RoomId, UserId, + OwnedRoomId, OwnedServerName, RoomId, ServerName, UInt, UserId, }; use tokio::sync::Mutex; +use tracing::{debug, error, info, warn}; -use tracing::{debug, error, warn}; +use crate::{services, Error, Result}; -use crate::{services, Error, PduEvent, Result}; - -pub enum CachedJoinRule { - //Simplified(SpaceRoomJoinRule), - Full(JoinRule), +pub struct CachedSpaceHierarchySummary { + summary: SpaceHierarchyParentSummary, } -pub struct CachedSpaceChunk { - chunk: SpaceHierarchyRoomsChunk, - children: Vec, - join_rule: CachedJoinRule, +pub enum SummaryAccessibility { + Accessible(Box), + Inaccessible, +} + +// Note: perhaps use some better form of token rather than just room count +#[derive(Debug, PartialEq)] +pub struct PagnationToken { + /// Path down the hierarchy of the room to start the response at, + /// excluding the root space. + pub short_room_ids: Vec, + pub limit: UInt, + pub max_depth: UInt, + pub suggested_only: bool, +} + +impl FromStr for PagnationToken { + fn from_str(value: &str) -> Result { + let mut values = value.split('_'); + + let mut pag_tok = || { + let mut rooms = vec![]; + + for room in values.next()?.split(',') { + rooms.push(u64::from_str(room).ok()?) + } + + Some(PagnationToken { + short_room_ids: rooms, + limit: UInt::from_str(values.next()?).ok()?, + max_depth: UInt::from_str(values.next()?).ok()?, + suggested_only: { + let slice = values.next()?; + + if values.next().is_none() { + if slice == "true" { + true + } else if slice == "false" { + false + } else { + None? + } + } else { + None? + } + }, + }) + }; + + if let Some(token) = pag_tok() { + Ok(token) + } else { + Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) + } + } + + type Err = Error; +} + +impl Display for PagnationToken { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}_{}_{}_{}", + self.short_room_ids + .iter() + .map(|b| b.to_string()) + .collect::>() + .join(","), + self.limit, + self.max_depth, + self.suggested_only + ) + } +} + +/// Identifier used to check if rooms are accessible +/// +/// None is used if you want to return the room, no matter if accessible or not +pub enum Identifier<'a> { + UserId(&'a UserId), + ServerName(&'a ServerName), } pub struct Service { - pub roomid_spacechunk_cache: Mutex>>, + pub roomid_spacehierarchy_cache: + Mutex>>, +} + +// Here because cannot implement `From` across ruma-federation-api and ruma-client-api types +impl From for SpaceHierarchyRoomsChunk { + fn from(value: CachedSpaceHierarchySummary) -> Self { + let SpaceHierarchyParentSummary { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + children_state, + .. + } = value.summary; + + SpaceHierarchyRoomsChunk { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + children_state, + } + } } impl Service { - pub async fn get_hierarchy( + ///Gets the response for the space hierarchy over federation request + /// + ///Panics if the room does not exist, so a check if the room exists should be done + pub async fn get_federation_hierarchy( &self, - sender_user: &UserId, room_id: &RoomId, - limit: usize, - skip: usize, - max_depth: usize, + server_name: &ServerName, suggested_only: bool, - ) -> Result { - let mut left_to_skip = skip; + ) -> Result { + match self + .get_summary_and_children_local( + &room_id.to_owned(), + Identifier::ServerName(server_name), + ) + .await? + { + Some(SummaryAccessibility::Accessible(room)) => { + let mut children = Vec::new(); + let mut inaccessible_children = Vec::new(); - let mut rooms_in_path = Vec::new(); - let mut stack = vec![vec![room_id.to_owned()]]; - let mut results = Vec::new(); + for (child, _via) in get_parent_children_via(*room.clone(), suggested_only) { + match self + .get_summary_and_children_local(&child, Identifier::ServerName(server_name)) + .await? + { + Some(SummaryAccessibility::Accessible(summary)) => { + children.push((*summary).into()); + } + Some(SummaryAccessibility::Inaccessible) => { + inaccessible_children.push(child); + } + None => (), + } + } - while let Some(current_room) = { - while stack.last().map_or(false, |s| s.is_empty()) { - stack.pop(); + Ok(federation::space::get_hierarchy::v1::Response { + room: *room, + children, + inaccessible_children, + }) } - if !stack.is_empty() { - stack.last_mut().and_then(|s| s.pop()) + Some(SummaryAccessibility::Inaccessible) => Err(Error::BadRequest( + ErrorKind::NotFound, + "The requested room is inaccessible", + )), + None => Err(Error::BadRequest( + ErrorKind::NotFound, + "The requested room was not found", + )), + } + } + + /// Gets the summary of a space using solely local information + async fn get_summary_and_children_local( + &self, + current_room: &OwnedRoomId, + identifier: Identifier<'_>, + ) -> Result> { + if let Some(cached) = self + .roomid_spacehierarchy_cache + .lock() + .await + .get_mut(¤t_room.to_owned()) + .as_ref() + { + return Ok(if let Some(cached) = cached { + if is_accessable_child( + current_room, + &cached.summary.join_rule, + &identifier, + &cached.summary.allowed_room_ids, + ) { + Some(SummaryAccessibility::Accessible(Box::new( + cached.summary.clone(), + ))) + } else { + Some(SummaryAccessibility::Inaccessible) + } } else { None - } - } { - rooms_in_path.push(current_room.clone()); - if results.len() >= limit { - break; - } + }); + } - if let Some(cached) = self - .roomid_spacechunk_cache - .lock() - .await - .get_mut(¤t_room.to_owned()) - .as_ref() - { - if let Some(cached) = cached { - let allowed = match &cached.join_rule { - //CachedJoinRule::Simplified(s) => { - //self.handle_simplified_join_rule(s, sender_user, ¤t_room)? - //} - CachedJoinRule::Full(f) => { - self.handle_join_rule(f, sender_user, ¤t_room)? - } - }; - if allowed { - if left_to_skip > 0 { - left_to_skip -= 1; - } else { - results.push(cached.chunk.clone()); - } - if rooms_in_path.len() < max_depth { - stack.push(cached.children.clone()); - } - } - } - continue; - } - - if let Some(current_shortstatehash) = services() - .rooms - .state - .get_room_shortstatehash(¤t_room)? - { - let state = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - - let mut children_ids = Vec::new(); - let mut children_pdus = Vec::new(); - for (key, id) in state { - let (event_type, state_key) = - services().rooms.short.get_statekey_from_short(key)?; - if event_type != StateEventType::SpaceChild { - continue; - } - - let pdu = services() - .rooms - .timeline - .get_pdu(&id)? - .ok_or_else(|| Error::bad_database("Event in space state not found"))?; - - if serde_json::from_str::(pdu.content.get()) - .ok() - .map(|c| c.via) - .map_or(true, |v| v.is_empty()) - { - continue; - } - - if let Ok(room_id) = OwnedRoomId::try_from(state_key) { - children_ids.push(room_id); - children_pdus.push(pdu); - } - } - - // TODO: Sort children - children_ids.reverse(); - - let chunk = self.get_room_chunk(sender_user, ¤t_room, children_pdus); - if let Ok(chunk) = chunk { - if left_to_skip > 0 { - left_to_skip -= 1; - } else { - results.push(chunk.clone()); - } - let join_rule = services() - .rooms - .state_accessor - .room_state_get(¤t_room, &StateEventType::RoomJoinRules, "")? - .map(|s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomJoinRulesEventContent| c.join_rule) - .map_err(|e| { - error!("Invalid room join rule event in database: {}", e); - Error::BadDatabase("Invalid room join rule event in database.") - }) - }) - .transpose()? - .unwrap_or(JoinRule::Invite); - - self.roomid_spacechunk_cache.lock().await.insert( + Ok( + if let Some(children_pdus) = get_stripped_space_child_events(current_room).await? { + let summary = self.get_room_summary(current_room, children_pdus, identifier); + if let Ok(summary) = summary { + self.roomid_spacehierarchy_cache.lock().await.insert( current_room.clone(), - Some(CachedSpaceChunk { - chunk, - children: children_ids.clone(), - join_rule: CachedJoinRule::Full(join_rule), + Some(CachedSpaceHierarchySummary { + summary: summary.clone(), }), ); - } - if rooms_in_path.len() < max_depth { - stack.push(children_ids); + Some(SummaryAccessibility::Accessible(Box::new(summary))) + } else { + None } } else { - let server = current_room - .server_name() - .expect("Room IDs should always have a server name"); - if server == services().globals.server_name() { - continue; - } - if !results.is_empty() { - // Early return so the client can see some data already - break; - } - debug!("Asking {server} for /hierarchy"); - if let Ok(response) = services() - .sending - .send_federation_request( - server, - federation::space::get_hierarchy::v1::Request { - room_id: current_room.to_owned(), - suggested_only, - }, - ) - .await - { - warn!("Got response from {server} for /hierarchy\n{response:?}"); - let chunk = SpaceHierarchyRoomsChunk { - canonical_alias: response.room.canonical_alias, - name: response.room.name, - num_joined_members: response.room.num_joined_members, - room_id: response.room.room_id, - topic: response.room.topic, - world_readable: response.room.world_readable, - guest_can_join: response.room.guest_can_join, - avatar_url: response.room.avatar_url, - join_rule: response.room.join_rule.clone(), - room_type: response.room.room_type, - children_state: response.room.children_state, - }; - let children = response - .children - .iter() - .map(|c| c.room_id.clone()) - .collect::>(); + None + }, + ) + } - let join_rule = match response.room.join_rule { - SpaceRoomJoinRule::Invite => JoinRule::Invite, - SpaceRoomJoinRule::Knock => JoinRule::Knock, - SpaceRoomJoinRule::Private => JoinRule::Private, - SpaceRoomJoinRule::Restricted => { - JoinRule::Restricted(join_rules::Restricted { - allow: response - .room - .allowed_room_ids - .into_iter() - .map(AllowRule::room_membership) - .collect(), - }) - } - SpaceRoomJoinRule::KnockRestricted => { - JoinRule::KnockRestricted(join_rules::Restricted { - allow: response - .room - .allowed_room_ids - .into_iter() - .map(AllowRule::room_membership) - .collect(), - }) - } - SpaceRoomJoinRule::Public => JoinRule::Public, - _ => return Err(Error::BadServerResponse("Unknown join rule")), - }; - if self.handle_join_rule(&join_rule, sender_user, ¤t_room)? { - if left_to_skip > 0 { - left_to_skip -= 1; - } else { - results.push(chunk.clone()); - } - if rooms_in_path.len() < max_depth { - stack.push(children.clone()); - } - } + /// Gets the summary of a space using solely federation + async fn get_summary_and_children_federation( + &self, + current_room: &OwnedRoomId, + suggested_only: bool, + user_id: &UserId, + via: &Vec, + ) -> Result> { + for server in via { + info!("Asking {server} for /hierarchy"); + if let Ok(response) = services() + .sending + .send_federation_request( + server, + federation::space::get_hierarchy::v1::Request { + room_id: current_room.to_owned(), + suggested_only, + }, + ) + .await + { + info!("Got response from {server} for /hierarchy\n{response:?}"); + let summary = response.room.clone(); - self.roomid_spacechunk_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceChunk { - chunk, - children, - join_rule: CachedJoinRule::Full(join_rule), - }), - ); + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.clone(), + Some(CachedSpaceHierarchySummary { + summary: summary.clone(), + }), + ); - /* TODO: - for child in response.children { - roomid_spacechunk_cache.insert( + for child in response.children { + let mut guard = self.roomid_spacehierarchy_cache.lock().await; + if !guard.contains_key(current_room) { + guard.insert( current_room.clone(), - CachedSpaceChunk { - chunk: child.chunk, - children, - join_rule, - }, + Some(CachedSpaceHierarchySummary { + summary: { + let SpaceHierarchyChildSummary { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + } = child; + + SpaceHierarchyParentSummary { + canonical_alias, + name, + num_joined_members, + room_id: room_id.clone(), + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + children_state: get_stripped_space_child_events(&room_id) + .await? + .unwrap(), + allowed_room_ids, + } + }, + }), ); } - */ + } + if is_accessable_child( + current_room, + &response.room.join_rule, + &Identifier::UserId(user_id), + &response.room.allowed_room_ids, + ) { + return Ok(Some(SummaryAccessibility::Accessible(Box::new( + summary.clone(), + )))); } else { - self.roomid_spacechunk_cache - .lock() - .await - .insert(current_room.clone(), None); + return Ok(Some(SummaryAccessibility::Inaccessible)); } } } - Ok(get_hierarchy::v1::Response { - next_batch: if results.is_empty() { - None - } else { - Some((skip + results.len()).to_string()) - }, - rooms: results, - }) + self.roomid_spacehierarchy_cache + .lock() + .await + .insert(current_room.clone(), None); + + Ok(None) } - fn get_room_chunk( + /// Gets the summary of a space using either local or remote (federation) sources + async fn get_summary_and_children_client( &self, - sender_user: &UserId, - room_id: &RoomId, - children: Vec>, - ) -> Result { - Ok(SpaceHierarchyRoomsChunk { + current_room: &OwnedRoomId, + suggested_only: bool, + user_id: &UserId, + via: &Vec, + ) -> Result> { + if let Ok(Some(response)) = self + .get_summary_and_children_local(current_room, Identifier::UserId(user_id)) + .await + { + Ok(Some(response)) + } else { + self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) + .await + } + } + + fn get_room_summary( + &self, + current_room: &OwnedRoomId, + children_state: Vec>, + identifier: Identifier<'_>, + ) -> Result { + let room_id: &RoomId = current_room; + + let join_rule = services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| c.join_rule) + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") + }) + }) + .transpose()? + .unwrap_or(JoinRule::Invite); + + let allowed_room_ids = services() + .rooms + .state_accessor + .allowed_room_ids(join_rule.clone()); + + if !is_accessable_child( + current_room, + &join_rule.clone().into(), + &identifier, + &allowed_room_ids, + ) { + debug!("User is not allowed to see room {room_id}"); + // This error will be caught later + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "User is not allowed to see the room", + )); + } + + let join_rule = join_rule.into(); + + Ok(SpaceHierarchyParentSummary { canonical_alias: services() .rooms .state_accessor @@ -348,34 +460,8 @@ impl Service { Error::bad_database("Invalid room topic event in database.") }) })?, - world_readable: services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(false), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| { - c.history_visibility == HistoryVisibility::WorldReadable - }) - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - }) - })?, - guest_can_join: services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomGuestAccess, "")? - .map_or(Ok(false), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomGuestAccessEventContent| { - c.guest_access == GuestAccess::CanJoin - }) - .map_err(|_| { - Error::bad_database("Invalid room guest access event in database.") - }) - })?, + world_readable: services().rooms.state_accessor.world_readable(room_id)?, + guest_can_join: services().rooms.state_accessor.guest_can_join(room_id)?, avatar_url: services() .rooms .state_accessor @@ -388,33 +474,7 @@ impl Service { .transpose()? // url is now an Option so we must flatten .flatten(), - join_rule: { - let join_rule = services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")? - .map(|s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomJoinRulesEventContent| c.join_rule) - .map_err(|e| { - error!("Invalid room join rule event in database: {}", e); - Error::BadDatabase("Invalid room join rule event in database.") - }) - }) - .transpose()? - .unwrap_or(JoinRule::Invite); - - if !self.handle_join_rule(&join_rule, sender_user, room_id)? { - debug!("User is not allowed to see room {room_id}"); - // This error will be caught later - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to see the room", - )); - } - - self.translate_joinrule(&join_rule)? - }, + join_rule, room_type: services() .rooms .state_accessor @@ -427,79 +487,477 @@ impl Service { }) .transpose()? .and_then(|e| e.room_type), - children_state: children - .into_iter() - .map(|pdu| pdu.to_stripped_spacechild_state_event()) - .collect(), + children_state, + allowed_room_ids, }) } - fn translate_joinrule(&self, join_rule: &JoinRule) -> Result { - match join_rule { - JoinRule::Invite => Ok(SpaceRoomJoinRule::Invite), - JoinRule::Knock => Ok(SpaceRoomJoinRule::Knock), - JoinRule::Private => Ok(SpaceRoomJoinRule::Private), - JoinRule::Restricted(_) => Ok(SpaceRoomJoinRule::Restricted), - JoinRule::KnockRestricted(_) => Ok(SpaceRoomJoinRule::KnockRestricted), - JoinRule::Public => Ok(SpaceRoomJoinRule::Public), - _ => Err(Error::BadServerResponse("Unknown join rule")), - } - } - - fn handle_simplified_join_rule( + pub async fn get_client_hierarchy( &self, - join_rule: &SpaceRoomJoinRule, sender_user: &UserId, room_id: &RoomId, - ) -> Result { - let allowed = match join_rule { - SpaceRoomJoinRule::Public => true, - SpaceRoomJoinRule::Knock => true, - SpaceRoomJoinRule::Invite => services() + limit: usize, + short_room_ids: Vec, + max_depth: usize, + suggested_only: bool, + ) -> Result { + let mut parents = VecDeque::new(); + + // Don't start populating the results if we have to start at a specific room. + let mut populate_results = short_room_ids.is_empty(); + + let mut stack = vec![vec![( + room_id.to_owned(), + match room_id.server_name() { + Some(server_name) => vec![server_name.into()], + None => vec![], + }, + )]]; + + let mut results = Vec::new(); + + while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } { + if limit > results.len() { + match ( + self.get_summary_and_children_client( + ¤t_room, + suggested_only, + sender_user, + &via, + ) + .await?, + current_room == room_id, + ) { + (Some(SummaryAccessibility::Accessible(summary)), _) => { + let mut children: Vec<(OwnedRoomId, Vec)> = + get_parent_children_via(*summary.clone(), suggested_only) + .into_iter() + .filter(|(room, _)| parents.iter().all(|parent| parent != room)) + .rev() + .collect(); + + if populate_results { + results.push(summary_to_chunk(*summary.clone())) + } else { + children = children + .into_iter() + .rev() + .skip_while(|(room, _)| { + if let Ok(short) = services().rooms.short.get_shortroomid(room) + { + short.as_ref() != short_room_ids.get(parents.len()) + } else { + false + } + }) + .collect::>() + // skip_while doesn't implement DoubleEndedIterator, which is needed for rev + .into_iter() + .rev() + .collect(); + + if children.is_empty() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Short room ids in token were not found.", + )); + } + + // We have reached the room after where we last left off + if parents.len() + 1 == short_room_ids.len() { + populate_results = true; + } + } + + if !children.is_empty() && parents.len() < max_depth { + parents.push_back(current_room.clone()); + stack.push(children); + } + // Root room in the space hierarchy, we return an error if this one fails. + } + (Some(SummaryAccessibility::Inaccessible), true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room is inaccessible", + )); + } + (None, true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room was not found", + )); + } + // Just ignore other unavailable rooms + (None | Some(SummaryAccessibility::Inaccessible), false) => (), + } + } else { + break; + } + } + + Ok(client::space::get_hierarchy::v1::Response { + next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { + parents.pop_front(); + parents.push_back(room); + + let mut short_room_ids = vec![]; + + for room in parents { + short_room_ids.push(services().rooms.short.get_or_create_shortroomid(&room)?); + } + + Some( + PagnationToken { + short_room_ids, + limit: UInt::new(max_depth as u64) + .expect("When sent in request it must have been valid UInt"), + max_depth: UInt::new(max_depth as u64) + .expect("When sent in request it must have been valid UInt"), + suggested_only, + } + .to_string(), + ) + } else { + None + }, + rooms: results, + }) + } +} + +fn next_room_to_traverse( + stack: &mut Vec)>>, + parents: &mut VecDeque, +) -> Option<(OwnedRoomId, Vec)> { + while stack.last().map_or(false, |s| s.is_empty()) { + stack.pop(); + parents.pop_back(); + } + + stack.last_mut().and_then(|s| s.pop()) +} + +/// Simply returns the stripped m.space.child events of a room +async fn get_stripped_space_child_events( + room_id: &RoomId, +) -> Result>>, Error> { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + let state = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let mut children_pdus = Vec::new(); + for (key, id) in state { + let (event_type, state_key) = services().rooms.short.get_statekey_from_short(key)?; + if event_type != StateEventType::SpaceChild { + continue; + } + + let pdu = services() + .rooms + .timeline + .get_pdu(&id)? + .ok_or_else(|| Error::bad_database("Event in space state not found"))?; + + if serde_json::from_str::(pdu.content.get()) + .ok() + .map(|c| c.via) + .map_or(true, |v| v.is_empty()) + { + continue; + } + + if OwnedRoomId::try_from(state_key).is_ok() { + children_pdus.push(pdu.to_stripped_spacechild_state_event()); + } + } + Ok(Some(children_pdus)) + } else { + Ok(None) + } +} + +/// With the given identifier, checks if a room is accessable +fn is_accessable_child( + current_room: &OwnedRoomId, + join_rule: &SpaceRoomJoinRule, + identifier: &Identifier<'_>, + allowed_room_ids: &Vec, +) -> bool { + // Note: unwrap_or_default for bool means false + match identifier { + Identifier::ServerName(server_name) => { + let room_id: &RoomId = current_room; + + // Checks if ACLs allow for the server to participate + if services() + .rooms + .event_handler + .acl_check(server_name, room_id) + .is_err() + { + return false; + } + } + Identifier::UserId(user_id) => { + if services() .rooms .state_cache - .is_joined(sender_user, room_id)?, - _ => false, - }; - - Ok(allowed) - } - - fn handle_join_rule( - &self, - join_rule: &JoinRule, - sender_user: &UserId, - room_id: &RoomId, - ) -> Result { - if self.handle_simplified_join_rule( - &self.translate_joinrule(join_rule)?, - sender_user, - room_id, - )? { - return Ok(true); + .is_joined(user_id, current_room) + .unwrap_or_default() + || services() + .rooms + .state_cache + .is_invited(user_id, current_room) + .unwrap_or_default() + { + return true; + } } - - match join_rule { - JoinRule::Restricted(r) => { - for rule in &r.allow { - if let AllowRule::RoomMembership(rm) = rule { - if let Ok(true) = services() + } // Takes care of joinrules + match join_rule { + SpaceRoomJoinRule::Restricted => { + for room in allowed_room_ids { + match identifier { + Identifier::UserId(user) => { + if services() .rooms .state_cache - .is_joined(sender_user, &rm.room_id) + .is_joined(user, room) + .unwrap_or_default() { - return Ok(true); + return true; + } + } + Identifier::ServerName(server) => { + if services() + .rooms + .state_cache + .server_in_room(server, room) + .unwrap_or_default() + { + return true; } } } - - Ok(false) } - JoinRule::KnockRestricted(_) => { - // TODO: Check rules - Ok(false) - } - _ => Ok(false), + false } + SpaceRoomJoinRule::Public + | SpaceRoomJoinRule::Knock + | SpaceRoomJoinRule::KnockRestricted => true, + SpaceRoomJoinRule::Invite | SpaceRoomJoinRule::Private => false, + // Custom join rule + _ => false, + } +} + +// Here because cannot implement `From` across ruma-federation-api and ruma-client-api types +fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { + let SpaceHierarchyParentSummary { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + children_state, + .. + } = summary; + + SpaceHierarchyRoomsChunk { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + children_state, + } +} + +/// Returns the children of a SpaceHierarchyParentSummary, making use of the children_state field +fn get_parent_children_via( + parent: SpaceHierarchyParentSummary, + suggested_only: bool, +) -> Vec<(OwnedRoomId, Vec)> { + parent + .children_state + .iter() + .filter_map(|raw_ce| { + raw_ce.deserialize().map_or(None, |ce| { + if suggested_only && !ce.content.suggested { + None + } else { + Some((ce.state_key, ce.content.via)) + } + }) + }) + .collect() +} + +#[cfg(test)] +mod tests { + use ruma::{ + api::federation::space::SpaceHierarchyParentSummaryInit, owned_room_id, owned_server_name, + }; + + use super::*; + + #[test] + fn get_summary_children() { + let summary: SpaceHierarchyParentSummary = SpaceHierarchyParentSummaryInit { + num_joined_members: UInt::from(1_u32), + room_id: owned_room_id!("!root:example.org"), + world_readable: true, + guest_can_join: true, + join_rule: SpaceRoomJoinRule::Public, + children_state: vec![ + serde_json::from_str( + r#"{ + "content": { + "via": [ + "example.org" + ], + "suggested": false + }, + "origin_server_ts": 1629413349153, + "sender": "@alice:example.org", + "state_key": "!foo:example.org", + "type": "m.space.child" + }"#, + ) + .unwrap(), + serde_json::from_str( + r#"{ + "content": { + "via": [ + "example.org" + ], + "suggested": true + }, + "origin_server_ts": 1629413349157, + "sender": "@alice:example.org", + "state_key": "!bar:example.org", + "type": "m.space.child" + }"#, + ) + .unwrap(), + serde_json::from_str( + r#"{ + "content": { + "via": [ + "example.org" + ] + }, + "origin_server_ts": 1629413349160, + "sender": "@alice:example.org", + "state_key": "!baz:example.org", + "type": "m.space.child" + }"#, + ) + .unwrap(), + ], + allowed_room_ids: vec![], + } + .into(); + + assert_eq!( + get_parent_children_via(summary.clone(), false), + vec![ + ( + owned_room_id!("!foo:example.org"), + vec![owned_server_name!("example.org")] + ), + ( + owned_room_id!("!bar:example.org"), + vec![owned_server_name!("example.org")] + ), + ( + owned_room_id!("!baz:example.org"), + vec![owned_server_name!("example.org")] + ) + ] + ); + assert_eq!( + get_parent_children_via(summary, true), + vec![( + owned_room_id!("!bar:example.org"), + vec![owned_server_name!("example.org")] + )] + ); + } + + #[test] + fn invalid_pagnation_tokens() { + fn token_is_err(token: &str) { + let token: Result = PagnationToken::from_str(token); + assert!(token.is_err()); + } + + token_is_err("231_2_noabool"); + token_is_err(""); + token_is_err("111_3_"); + token_is_err("foo_not_int"); + token_is_err("11_4_true_"); + token_is_err("___"); + token_is_err("__false"); + } + + #[test] + fn valid_pagnation_tokens() { + assert_eq!( + PagnationToken { + short_room_ids: vec![5383, 42934, 283, 423], + limit: UInt::from(20_u32), + max_depth: UInt::from(1_u32), + suggested_only: true + }, + PagnationToken::from_str("5383,42934,283,423_20_1_true").unwrap() + ); + + assert_eq!( + PagnationToken { + short_room_ids: vec![740], + limit: UInt::from(97_u32), + max_depth: UInt::from(10539_u32), + suggested_only: false + }, + PagnationToken::from_str("740_97_10539_false").unwrap() + ); + } + + #[test] + fn pagnation_token_to_string() { + assert_eq!( + PagnationToken { + short_room_ids: vec![740], + limit: UInt::from(97_u32), + max_depth: UInt::from(10539_u32), + suggested_only: false + } + .to_string(), + "740_97_10539_false" + ); + + assert_eq!( + PagnationToken { + short_room_ids: vec![9, 34], + limit: UInt::from(3_u32), + max_depth: UInt::from(1_u32), + suggested_only: true + } + .to_string(), + "9,34_3_1_true" + ); } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index f6581bb..f5bd7e9 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -93,7 +93,7 @@ impl Service { services() .rooms .spaces - .roomid_spacechunk_cache + .roomid_spacehierarchy_cache .lock() .await .remove(&pdu.room_id); diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index aa654ae..f1dcb3d 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -10,15 +10,18 @@ use ruma::{ events::{ room::{ avatar::RoomAvatarEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent, RoomMembership}, member::{MembershipState, RoomMemberEventContent}, name::RoomNameEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, StateEventType, }, + space::SpaceRoomJoinRule, state_res::Event, - EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, + EventId, JsOption, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::MutexGuard; @@ -396,4 +399,70 @@ impl Service { } }) } + + /// Checks if guests are able to join a given room + pub fn guest_can_join(&self, room_id: &RoomId) -> Result { + self.room_state_get(room_id, &StateEventType::RoomGuestAccess, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomGuestAccessEventContent| c.guest_access == GuestAccess::CanJoin) + .map_err(|_| { + Error::bad_database("Invalid room guest access event in database.") + }) + }) + } + + /// Checks if guests are able to view room content without joining + pub fn world_readable(&self, room_id: &RoomId) -> Result { + self.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility == HistoryVisibility::WorldReadable + }) + .map_err(|_| { + Error::bad_database("Invalid room history visibility event in database.") + }) + }) + } + + /// Returns the join rule for a given room + pub fn get_join_rule( + &self, + current_room: &RoomId, + ) -> Result<(SpaceRoomJoinRule, Vec), Error> { + Ok(self + .room_state_get(current_room, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| { + ( + c.join_rule.clone().into(), + self.allowed_room_ids(c.join_rule), + ) + }) + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") + }) + }) + .transpose()? + .unwrap_or((SpaceRoomJoinRule::Invite, vec![]))) + } + + /// Returns an empty vec if not a restricted room + pub fn allowed_room_ids(&self, join_rule: JoinRule) -> Vec { + let mut room_ids = vec![]; + if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { + for rule in r.allow { + if let AllowRule::RoomMembership(RoomMembership { + room_id: membership, + }) = rule + { + room_ids.push(membership.to_owned()); + } + } + } + room_ids + } } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index c108695..1604a14 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -248,11 +248,13 @@ impl Service { self.db.room_members(room_id) } + /// Returns the number of users which are currently in a room #[tracing::instrument(skip(self))] pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { self.db.room_joined_count(room_id) } + /// Returns the number of users which are currently invited to a room #[tracing::instrument(skip(self))] pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { self.db.room_invited_count(room_id) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 9f0e290..8069066 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -430,7 +430,7 @@ impl Service { services() .rooms .spaces - .roomid_spacechunk_cache + .roomid_spacehierarchy_cache .lock() .await .remove(&pdu.room_id); From 98d3dcd37f613fe68a3332bb6262c9e156259df0 Mon Sep 17 00:00:00 2001 From: Aleksandr Date: Sat, 26 Oct 2024 14:08:26 +0300 Subject: [PATCH 1726/1727] Initial commit --- .gitignore | 22 ++++++++++++++++ LICENSE | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 3 +++ 3 files changed, 98 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ab951f8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,22 @@ +# ---> Rust +# Generated by Cargo +# will have compiled files and executables +debug/ +target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +# MSVC Windows builds of rustc generate these, which store debugging information +*.pdb + +# RustRover +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..de03dbd --- /dev/null +++ b/LICENSE @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright 2024 nero + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..2cd5efe --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# tsuki + +conduit fork with useful features \ No newline at end of file From 354ba32c17f30cd5d5f0a00fd955d9cbd0815e66 Mon Sep 17 00:00:00 2001 From: Aleksandr Date: Sat, 26 Oct 2024 14:10:41 +0300 Subject: [PATCH 1727/1727] Initial commit --- LICENSE | 197 +++++++++++++----------------------------------------- README.md | 85 +---------------------- 2 files changed, 49 insertions(+), 233 deletions(-) diff --git a/LICENSE b/LICENSE index d9a10c0..de03dbd 100644 --- a/LICENSE +++ b/LICENSE @@ -1,176 +1,73 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - 1. Definitions. +1. Definitions. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright 2024 nero + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/README.md b/README.md index 6851c5b..2cd5efe 100644 --- a/README.md +++ b/README.md @@ -1,84 +1,3 @@ -# Conduit +# tsuki - -### A Matrix homeserver written in Rust - - -Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information. -Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository. - - -#### What is Matrix? - -[Matrix](https://matrix.org) is an open network for secure and decentralized -communication. Users from every Matrix homeserver can chat with users from all -other Matrix servers. You can even use bridges (also called Matrix appservices) -to communicate with users outside of Matrix, like a community on Discord. - -#### What is the goal? - -An efficient Matrix homeserver that's easy to set up and just works. You can install -it on a mini-computer like the Raspberry Pi to host Matrix for your family, -friends or company. - -#### Can I try it out? - -Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login. - -Server hosting for conduit.rs is donated by the Matrix.org Foundation. - -#### What is the current status? - -Conduit is Beta, meaning you can join and participate in most -Matrix rooms, but not all features are supported and you might run into bugs -from time to time. - -There are still a few important features missing: - -- E2EE emoji comparison over federation (E2EE chat works) -- Outgoing read receipts, typing, presence over federation (incoming works) - - - -#### How can I contribute? - -1. Look for an issue you would like to work on and make sure no one else is currently working on it. -2. Tell us that you are working on the issue (comment on the issue or chat in - [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions. -3. Fork the repo, create a new branch and push commits. -4. Submit a MR - -#### Contact - -If you have any questions, feel free to -- Ask in `#conduit:fachschaften.org` on Matrix -- Write an E-Mail to `conduit@koesters.xyz` -- Send an direct message to `@timokoesters:fachschaften.org` on Matrix -- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) - -#### Security - -If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs) -and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to -[conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before -a fix is released publically. - -#### Thanks to - -Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project. - -Thanks to the contributors to Conduit and all libraries we use, for example: - -- Ruma: A clean library for the Matrix Spec in Rust -- axum: A modular web framework - -#### Donate - -- Liberapay: -- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` - -#### Logo - -- Lightning Bolt Logo: -- Logo License: - +conduit fork with useful features \ No newline at end of file